HADOOP-16654:Delete hadoop-ozone and hadoop-hdds subprojects from apache trunk

Signed-off-by: Dinesh Chitlangia <dineshchitlangia@gmail.com>
diff --git a/BUILDING.txt b/BUILDING.txt
index d3c9a1a7..6d2cddf 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -104,8 +104,6 @@
          - hadoop-hdfs-project          (Hadoop HDFS)
          - hadoop-yarn-project          (Hadoop YARN)
          - hadoop-mapreduce-project     (Hadoop MapReduce)
-         - hadoop-ozone                 (Hadoop Ozone)
-         - hadoop-hdds                  (Hadoop Distributed Data Store)
          - hadoop-tools                 (Hadoop tools like Streaming, Distcp, etc.)
          - hadoop-dist                  (Hadoop distribution assembler)
          - hadoop-client-modules        (Hadoop client modules)
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index b47b4bc..7da999c 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -56,8 +56,6 @@
         <exclude>**/build/**</exclude>
         <exclude>**/file:/**</exclude>
         <exclude>**/SecurityAuth.audit*</exclude>
-        <exclude>hadoop-ozone/**</exclude>
-        <exclude>hadoop-hdds/**</exclude>
         <exclude>hadoop-submarine/**</exclude>
       </excludes>
     </fileSet>
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
deleted file mode 100644
index 673af41..0000000
--- a/hadoop-hdds/client/pom.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>hadoop-hdds-client</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Client Library</description>
-  <name>Apache Hadoop HDDS Client</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-all</artifactId>
-    </dependency>
-
-  </dependencies>
-</project>
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java
deleted file mode 100644
index 7a15808..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.ratis.thirdparty.io.grpc.CallOptions;
-import org.apache.ratis.thirdparty.io.grpc.Channel;
-import org.apache.ratis.thirdparty.io.grpc.ClientCall;
-import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor;
-import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY;
-
-/**
- * GRPC client interceptor for ozone block token.
- */
-public class ClientCredentialInterceptor implements ClientInterceptor {
-
-  private final String user;
-  private final String token;
-
-  public ClientCredentialInterceptor(String user, String token) {
-    this.user = user;
-    this.token = token;
-  }
-
-  @Override
-  public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
-      MethodDescriptor<ReqT, RespT> method,
-      CallOptions callOptions,
-      Channel next) {
-
-    return new ForwardingClientCall.SimpleForwardingClientCall<ReqT, RespT>(
-        next.newCall(method, callOptions)) {
-      @Override
-      public void start(Listener<RespT> responseListener, Metadata headers) {
-        if (token != null) {
-          headers.put(OBT_METADATA_KEY, token);
-        }
-        if (user != null) {
-          headers.put(USER_METADATA_KEY, user);
-        }
-        super.start(responseListener, headers);
-      }
-    };
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
deleted file mode 100644
index 04a8a1a..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.tracing.GrpcClientInterceptor;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
-import org.apache.ratis.thirdparty.io.grpc.Status;
-import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
-import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
-import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.security.cert.X509Certificate;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * A Client for the storageContainer protocol for read object data.
- */
-public class XceiverClientGrpc extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
-  private static final String COMPONENT = "dn";
-  private final Pipeline pipeline;
-  private final Configuration config;
-  private Map<UUID, XceiverClientProtocolServiceStub> asyncStubs;
-  private XceiverClientMetrics metrics;
-  private Map<UUID, ManagedChannel> channels;
-  private final Semaphore semaphore;
-  private boolean closed = false;
-  private SecurityConfig secConfig;
-  private final boolean topologyAwareRead;
-  private X509Certificate caCert;
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   *
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config   -- Ozone Config
-   * @param caCert   - SCM ca certificate.
-   */
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config,
-      X509Certificate caCert) {
-    super();
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkNotNull(config);
-    this.pipeline = pipeline;
-    this.config = config;
-    this.secConfig = new SecurityConfig(config);
-    this.semaphore =
-        new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
-    this.metrics = XceiverClientManager.getXceiverClientMetrics();
-    this.channels = new HashMap<>();
-    this.asyncStubs = new HashMap<>();
-    this.topologyAwareRead = config.getBoolean(
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
-    this.caCert = caCert;
-  }
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   *
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config   -- Ozone Config
-   */
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
-    this(pipeline, config, null);
-  }
-
-  /**
-   * To be used when grpc token is not enabled.
-   */
-  @Override
-  public void connect() throws Exception {
-    // connect to the closest node, if closest node doesn't exist, delegate to
-    // first node, which is usually the leader in the pipeline.
-    DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() :
-        this.pipeline.getFirstNode();
-    // just make a connection to the picked datanode at the beginning
-    connectToDatanode(dn, null);
-  }
-
-  /**
-   * Passed encoded token to GRPC header when security is enabled.
-   */
-  @Override
-  public void connect(String encodedToken) throws Exception {
-    // connect to the closest node, if closest node doesn't exist, delegate to
-    // first node, which is usually the leader in the pipeline.
-    DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() :
-        this.pipeline.getFirstNode();
-    // just make a connection to the picked datanode at the beginning
-    connectToDatanode(dn, encodedToken);
-  }
-
-  private void connectToDatanode(DatanodeDetails dn, String encodedToken)
-      throws IOException {
-    // read port from the data node, on failure use default configured
-    // port.
-    int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-    if (port == 0) {
-      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    }
-
-    // Add credential context to the client call
-    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString());
-      LOG.debug("Connecting to server : {}", dn.getIpAddress());
-    }
-    NettyChannelBuilder channelBuilder =
-        NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
-            .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
-            .intercept(new ClientCredentialInterceptor(userName, encodedToken),
-                new GrpcClientInterceptor());
-    if (secConfig.isGrpcTlsEnabled()) {
-      SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
-      if (caCert != null) {
-        sslContextBuilder.trustManager(caCert);
-      }
-      if (secConfig.useTestCert()) {
-        channelBuilder.overrideAuthority("localhost");
-      }
-      channelBuilder.useTransportSecurity().
-          sslContext(sslContextBuilder.build());
-    } else {
-      channelBuilder.usePlaintext();
-    }
-    ManagedChannel channel = channelBuilder.build();
-    XceiverClientProtocolServiceStub asyncStub =
-        XceiverClientProtocolServiceGrpc.newStub(channel);
-    asyncStubs.put(dn.getUuid(), asyncStub);
-    channels.put(dn.getUuid(), channel);
-  }
-
-  /**
-   * Returns if the xceiver client connects to all servers in the pipeline.
-   *
-   * @return True if the connection is alive, false otherwise.
-   */
-  @VisibleForTesting
-  public boolean isConnected(DatanodeDetails details) {
-    return isConnected(channels.get(details.getUuid()));
-  }
-
-  private boolean isConnected(ManagedChannel channel) {
-    return channel != null && !channel.isTerminated() && !channel.isShutdown();
-  }
-
-  @Override
-  public void close() {
-    closed = true;
-    for (ManagedChannel channel : channels.values()) {
-      channel.shutdownNow();
-      try {
-        channel.awaitTermination(60, TimeUnit.MINUTES);
-      } catch (Exception e) {
-        LOG.error("Unexpected exception while waiting for channel termination",
-            e);
-      }
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  @Override
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException {
-    try {
-      XceiverClientReply reply;
-      reply = sendCommandWithTraceIDAndRetry(request, null);
-      ContainerCommandResponseProto responseProto = reply.getResponse().get();
-      return responseProto;
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to execute command " + request, e);
-    }
-  }
-
-  @Override
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request, List<CheckedBiFunction> validators)
-      throws IOException {
-    try {
-      XceiverClientReply reply;
-      reply = sendCommandWithTraceIDAndRetry(request, validators);
-      ContainerCommandResponseProto responseProto = reply.getResponse().get();
-      return responseProto;
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to execute command " + request, e);
-    }
-  }
-
-  private XceiverClientReply sendCommandWithTraceIDAndRetry(
-      ContainerCommandRequestProto request, List<CheckedBiFunction> validators)
-      throws IOException {
-    try (Scope scope = GlobalTracer.get()
-        .buildSpan("XceiverClientGrpc." + request.getCmdType().name())
-        .startActive(true)) {
-      ContainerCommandRequestProto finalPayload =
-          ContainerCommandRequestProto.newBuilder(request)
-              .setTraceID(TracingUtil.exportCurrentSpan()).build();
-      return sendCommandWithRetry(finalPayload, validators);
-    }
-  }
-
-  private XceiverClientReply sendCommandWithRetry(
-      ContainerCommandRequestProto request, List<CheckedBiFunction> validators)
-      throws IOException {
-    ContainerCommandResponseProto responseProto = null;
-    IOException ioException = null;
-
-    // In case of an exception or an error, we will try to read from the
-    // datanodes in the pipeline in a round robin fashion.
-
-    // TODO: cache the correct leader info in here, so that any subsequent calls
-    // should first go to leader
-    XceiverClientReply reply = new XceiverClientReply(null);
-    List<DatanodeDetails> datanodeList;
-    if ((request.getCmdType() == ContainerProtos.Type.ReadChunk ||
-        request.getCmdType() == ContainerProtos.Type.GetSmallFile) &&
-        topologyAwareRead) {
-      datanodeList = pipeline.getNodesInOrder();
-    } else {
-      datanodeList = pipeline.getNodes();
-      // Shuffle datanode list so that clients do not read in the same order
-      // every time.
-      Collections.shuffle(datanodeList);
-    }
-    for (DatanodeDetails dn : datanodeList) {
-      try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Executing command " + request + " on datanode " + dn);
-        }
-        // In case the command gets retried on a 2nd datanode,
-        // sendCommandAsyncCall will create a new channel and async stub
-        // in case these don't exist for the specific datanode.
-        reply.addDatanode(dn);
-        responseProto = sendCommandAsync(request, dn).getResponse().get();
-        if (validators != null && !validators.isEmpty()) {
-          for (CheckedBiFunction validator : validators) {
-            validator.apply(request, responseProto);
-          }
-        }
-        break;
-      } catch (ExecutionException | InterruptedException | IOException e) {
-        LOG.error("Failed to execute command " + request + " on datanode " + dn
-            .getUuidString(), e);
-        if (!(e instanceof IOException)) {
-          if (Status.fromThrowable(e.getCause()).getCode()
-              == Status.UNAUTHENTICATED.getCode()) {
-            throw new SCMSecurityException("Failed to authenticate with "
-                + "GRPC XceiverServer with Ozone block token.");
-          }
-          ioException = new IOException(e);
-        } else {
-          ioException = (IOException) e;
-        }
-        responseProto = null;
-      }
-    }
-
-    if (responseProto != null) {
-      reply.setResponse(CompletableFuture.completedFuture(responseProto));
-      return reply;
-    } else {
-      Preconditions.checkNotNull(ioException);
-      LOG.error("Failed to execute command {} on the pipeline {}.", request,
-          pipeline);
-      throw ioException;
-    }
-  }
-
-  // TODO: for a true async API, once the waitable future while executing
-  // the command on one channel fails, it should be retried asynchronously
-  // on the future Task for all the remaining datanodes.
-
-  // Note: this Async api is not used currently used in any active I/O path.
-  // In case it gets used, the asynchronous retry logic needs to be plugged
-  // in here.
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  @Override
-  public XceiverClientReply sendCommandAsync(
-      ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException {
-    try (Scope scope = GlobalTracer.get()
-        .buildSpan("XceiverClientGrpc." + request.getCmdType().name())
-        .startActive(true)) {
-
-      ContainerCommandRequestProto finalPayload =
-          ContainerCommandRequestProto.newBuilder(request)
-              .setTraceID(TracingUtil.exportCurrentSpan())
-              .build();
-      XceiverClientReply asyncReply =
-          sendCommandAsync(finalPayload, pipeline.getFirstNode());
-      // TODO : for now make this API sync in nature as async requests are
-      // served out of order over XceiverClientGrpc. This needs to be fixed
-      // if this API is to be used for I/O path. Currently, this is not
-      // used for Read/Write Operation but for tests.
-      if (!HddsUtils.isReadOnly(request)) {
-        asyncReply.getResponse().get();
-      }
-      return asyncReply;
-    }
-  }
-
-  private XceiverClientReply sendCommandAsync(
-      ContainerCommandRequestProto request, DatanodeDetails dn)
-      throws IOException, ExecutionException, InterruptedException {
-    if (closed) {
-      throw new IOException("This channel is not connected.");
-    }
-
-    UUID dnId = dn.getUuid();
-    ManagedChannel channel = channels.get(dnId);
-    // If the channel doesn't exist for this specific datanode or the channel
-    // is closed, just reconnect
-    String token = request.getEncodedToken();
-    if (!isConnected(channel)) {
-      reconnect(dn, token);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Send command {} to datanode {}",
-          request.getCmdType().toString(), dn.getNetworkFullPath());
-    }
-    final CompletableFuture<ContainerCommandResponseProto> replyFuture =
-        new CompletableFuture<>();
-    semaphore.acquire();
-    long requestTime = Time.monotonicNowNanos();
-    metrics.incrPendingContainerOpsMetrics(request.getCmdType());
-    // create a new grpc stream for each non-async call.
-
-    // TODO: for async calls, we should reuse StreamObserver resources.
-    final StreamObserver<ContainerCommandRequestProto> requestObserver =
-        asyncStubs.get(dnId)
-            .send(new StreamObserver<ContainerCommandResponseProto>() {
-              @Override
-              public void onNext(ContainerCommandResponseProto value) {
-                replyFuture.complete(value);
-                metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-                metrics.addContainerOpsLatency(request.getCmdType(),
-                    Time.monotonicNowNanos() - requestTime);
-                semaphore.release();
-              }
-
-              @Override
-              public void onError(Throwable t) {
-                replyFuture.completeExceptionally(t);
-                metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-                metrics.addContainerOpsLatency(request.getCmdType(),
-                    Time.monotonicNowNanos() - requestTime);
-                semaphore.release();
-              }
-
-              @Override
-              public void onCompleted() {
-                if (!replyFuture.isDone()) {
-                  replyFuture.completeExceptionally(new IOException(
-                      "Stream completed but no reply for request " + request));
-                }
-              }
-            });
-    requestObserver.onNext(request);
-    requestObserver.onCompleted();
-    return new XceiverClientReply(replyFuture);
-  }
-
-  private void reconnect(DatanodeDetails dn, String encodedToken)
-      throws IOException {
-    ManagedChannel channel;
-    try {
-      connectToDatanode(dn, encodedToken);
-      channel = channels.get(dn.getUuid());
-    } catch (Exception e) {
-      LOG.error("Error while connecting: ", e);
-      throw new IOException(e);
-    }
-
-    if (channel == null || !isConnected(channel)) {
-      throw new IOException("This channel is not connected.");
-    }
-  }
-
-  @Override
-  public XceiverClientReply watchForCommit(long index, long timeout)
-      throws InterruptedException, ExecutionException, TimeoutException,
-      IOException {
-    // there is no notion of watch for commit index in standalone pipeline
-    return null;
-  };
-
-  public long getReplicatedMinCommitIndex() {
-    return 0;
-  }
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - Stand Alone as the type.
-   */
-  @Override
-  public HddsProtos.ReplicationType getPipelineType() {
-    return HddsProtos.ReplicationType.STAND_ALONE;
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
deleted file mode 100644
index b15828a..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
-import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
-
-/**
- * XceiverClientManager is responsible for the lifecycle of XceiverClient
- * instances.  Callers use this class to acquire an XceiverClient instance
- * connected to the desired container pipeline.  When done, the caller also uses
- * this class to release the previously acquired XceiverClient instance.
- *
- *
- * This class caches connection to container for reuse purpose, such that
- * accessing same container frequently will be through the same connection
- * without reestablishing connection. But the connection will be closed if
- * not being used for a period of time.
- */
-public class XceiverClientManager implements Closeable {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(XceiverClientManager.class);
-  //TODO : change this to SCM configuration class
-  private final Configuration conf;
-  private final Cache<String, XceiverClientSpi> clientCache;
-  private final boolean useRatis;
-  private X509Certificate caCert;
-
-  private static XceiverClientMetrics metrics;
-  private boolean isSecurityEnabled;
-  private final boolean topologyAwareRead;
-  /**
-   * Creates a new XceiverClientManager for non secured ozone cluster.
-   * For security enabled ozone cluster, client should use the other constructor
-   * with a valid ca certificate in pem string format.
-   *
-   * @param conf configuration
-   */
-  public XceiverClientManager(Configuration conf) throws IOException {
-    this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class),
-        null);
-  }
-
-  public XceiverClientManager(Configuration conf, ScmClientConfig clientConf,
-      String caCertPem) throws IOException {
-    Preconditions.checkNotNull(clientConf);
-    Preconditions.checkNotNull(conf);
-    long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS);
-    this.useRatis = conf.getBoolean(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    this.conf = conf;
-    this.isSecurityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf);
-    if (isSecurityEnabled) {
-      Preconditions.checkNotNull(caCertPem);
-      try {
-        this.caCert = CertificateCodec.getX509Cert(caCertPem);
-      } catch (CertificateException ex) {
-        throw new SCMSecurityException("Error: Fail to get SCM CA certificate",
-            ex);
-      }
-    }
-
-    this.clientCache = CacheBuilder.newBuilder()
-        .expireAfterAccess(staleThresholdMs, MILLISECONDS)
-        .maximumSize(clientConf.getMaxSize())
-        .removalListener(
-            new RemovalListener<String, XceiverClientSpi>() {
-            @Override
-            public void onRemoval(
-                RemovalNotification<String, XceiverClientSpi>
-                  removalNotification) {
-              synchronized (clientCache) {
-                // Mark the entry as evicted
-                XceiverClientSpi info = removalNotification.getValue();
-                info.setEvicted();
-              }
-            }
-          }).build();
-    topologyAwareRead = conf.getBoolean(
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
-  }
-
-  @VisibleForTesting
-  public Cache<String, XceiverClientSpi> getClientCache() {
-    return clientCache;
-  }
-
-  /**
-   * Acquires a XceiverClientSpi connected to a container capable of
-   * storing the specified key.
-   *
-   * If there is already a cached XceiverClientSpi, simply return
-   * the cached otherwise create a new one.
-   *
-   * @param pipeline the container pipeline for the client connection
-   * @return XceiverClientSpi connected to a container
-   * @throws IOException if a XceiverClientSpi cannot be acquired
-   */
-  public XceiverClientSpi acquireClient(Pipeline pipeline)
-      throws IOException {
-    return acquireClient(pipeline, false);
-  }
-
-  /**
-   * Acquires a XceiverClientSpi connected to a container for read.
-   *
-   * If there is already a cached XceiverClientSpi, simply return
-   * the cached otherwise create a new one.
-   *
-   * @param pipeline the container pipeline for the client connection
-   * @return XceiverClientSpi connected to a container
-   * @throws IOException if a XceiverClientSpi cannot be acquired
-   */
-  public XceiverClientSpi acquireClientForReadData(Pipeline pipeline)
-      throws IOException {
-    return acquireClient(pipeline, true);
-  }
-
-  private XceiverClientSpi acquireClient(Pipeline pipeline, boolean read)
-      throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkArgument(pipeline.getNodes() != null);
-    Preconditions.checkArgument(!pipeline.getNodes().isEmpty());
-
-    synchronized (clientCache) {
-      XceiverClientSpi info = getClient(pipeline, read);
-      info.incrementReference();
-      return info;
-    }
-  }
-
-  /**
-   * Releases a XceiverClientSpi after use.
-   *
-   * @param client client to release
-   * @param invalidateClient if true, invalidates the client in cache
-   */
-  public void releaseClient(XceiverClientSpi client, boolean invalidateClient) {
-    releaseClient(client, invalidateClient, false);
-  }
-
-  /**
-   * Releases a read XceiverClientSpi after use.
-   *
-   * @param client client to release
-   * @param invalidateClient if true, invalidates the client in cache
-   */
-  public void releaseClientForReadData(XceiverClientSpi client,
-      boolean invalidateClient) {
-    releaseClient(client, invalidateClient, true);
-  }
-
-  private void releaseClient(XceiverClientSpi client, boolean invalidateClient,
-      boolean read) {
-    Preconditions.checkNotNull(client);
-    synchronized (clientCache) {
-      client.decrementReference();
-      if (invalidateClient) {
-        Pipeline pipeline = client.getPipeline();
-        String key = getPipelineCacheKey(pipeline, read);
-        XceiverClientSpi cachedClient = clientCache.getIfPresent(key);
-        if (cachedClient == client) {
-          clientCache.invalidate(key);
-        }
-      }
-    }
-  }
-
-  private XceiverClientSpi getClient(Pipeline pipeline, boolean forRead)
-      throws IOException {
-    HddsProtos.ReplicationType type = pipeline.getType();
-    try {
-      // create different client for read different pipeline node based on
-      // network topology
-      String key = getPipelineCacheKey(pipeline, forRead);
-      // Append user short name to key to prevent a different user
-      // from using same instance of xceiverClient.
-      key = isSecurityEnabled ?
-          key + UserGroupInformation.getCurrentUser().getShortUserName() : key;
-      return clientCache.get(key, new Callable<XceiverClientSpi>() {
-        @Override
-          public XceiverClientSpi call() throws Exception {
-            XceiverClientSpi client = null;
-            switch (type) {
-            case RATIS:
-              client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf,
-                  caCert);
-              client.connect();
-              break;
-            case STAND_ALONE:
-              client = new XceiverClientGrpc(pipeline, conf, caCert);
-              break;
-            case CHAINED:
-            default:
-              throw new IOException("not implemented" + pipeline.getType());
-            }
-            return client;
-          }
-        });
-    } catch (Exception e) {
-      throw new IOException(
-          "Exception getting XceiverClient: " + e.toString(), e);
-    }
-  }
-
-  private String getPipelineCacheKey(Pipeline pipeline, boolean forRead) {
-    String key = pipeline.getId().getId().toString() + pipeline.getType();
-    if (topologyAwareRead && forRead) {
-      try {
-        key += pipeline.getClosestNode().getHostName();
-      } catch (IOException e) {
-        LOG.error("Failed to get closest node to create pipeline cache key:" +
-            e.getMessage());
-      }
-    }
-    return key;
-  }
-
-  /**
-   * Close and remove all the cached clients.
-   */
-  @Override
-  public void close() {
-    //closing is done through RemovalListener
-    clientCache.invalidateAll();
-    clientCache.cleanUp();
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-  }
-
-  /**
-   * Tells us if Ratis is enabled for this cluster.
-   * @return True if Ratis is enabled.
-   */
-  public boolean isUseRatis() {
-    return useRatis;
-  }
-
-  /**
-   * Returns hard coded 3 as replication factor.
-   * @return 3
-   */
-  public  HddsProtos.ReplicationFactor getFactor() {
-    if(isUseRatis()) {
-      return HddsProtos.ReplicationFactor.THREE;
-    }
-    return HddsProtos.ReplicationFactor.ONE;
-  }
-
-  /**
-   * Returns the default replication type.
-   * @return Ratis or Standalone
-   */
-  public HddsProtos.ReplicationType getType() {
-    // TODO : Fix me and make Ratis default before release.
-    // TODO: Remove this as replication factor and type are pipeline properties
-    if(isUseRatis()) {
-      return HddsProtos.ReplicationType.RATIS;
-    }
-    return HddsProtos.ReplicationType.STAND_ALONE;
-  }
-
-  public Function<ByteBuffer, ByteString> byteBufferToByteStringConversion(){
-    return ByteStringConversion.createByteBufferConversion(conf);
-  }
-
-  /**
-   * Get xceiver client metric.
-   */
-  public synchronized static XceiverClientMetrics getXceiverClientMetrics() {
-    if (metrics == null) {
-      metrics = XceiverClientMetrics.create();
-    }
-
-    return metrics;
-  }
-
-  /**
-   * Configuration for HDDS client.
-   */
-  @ConfigGroup(prefix = "scm.container.client")
-  public static class ScmClientConfig {
-
-    private int maxSize;
-    private long staleThreshold;
-    private int maxOutstandingRequests;
-
-    public long getStaleThreshold(TimeUnit unit) {
-      return unit.convert(staleThreshold, MILLISECONDS);
-    }
-
-    @Config(key = "idle.threshold",
-        type = ConfigType.TIME, timeUnit = MILLISECONDS,
-        defaultValue = "10s",
-        tags = { OZONE, PERFORMANCE },
-        description =
-            "In the standalone pipelines, the SCM clients use netty to "
-            + " communicate with the container. It also uses connection pooling"
-            + " to reduce client side overheads. This allows a connection to"
-            + " stay idle for a while before the connection is closed."
-    )
-    public void setStaleThreshold(long staleThreshold) {
-      this.staleThreshold = staleThreshold;
-    }
-
-    public int getMaxSize() {
-      return maxSize;
-    }
-
-    @Config(key = "max.size",
-        defaultValue = "256",
-        tags = { OZONE, PERFORMANCE },
-        description =
-            "Controls the maximum number of connections that are cached via"
-            + " client connection pooling. If the number of connections"
-            + " exceed this count, then the oldest idle connection is evicted."
-    )
-    public void setMaxSize(int maxSize) {
-      this.maxSize = maxSize;
-    }
-
-    public int getMaxOutstandingRequests() {
-      return maxOutstandingRequests;
-    }
-
-    @Config(key = "max.outstanding.requests",
-        defaultValue = "100",
-        tags = { OZONE, PERFORMANCE },
-        description =
-            "Controls the maximum number of outstanding async requests that can"
-            + " be handled by the Standalone as well as Ratis client."
-    )
-    public void setMaxOutstandingRequests(int maxOutstandingRequests) {
-      this.maxOutstandingRequests = maxOutstandingRequests;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
deleted file mode 100644
index 5d43c5e..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- * The client metrics for the Storage Container protocol.
- */
-@InterfaceAudience.Private
-@Metrics(about = "Storage Container Client Metrics", context = "dfs")
-public class XceiverClientMetrics {
-  public static final String SOURCE_NAME = XceiverClientMetrics.class
-      .getSimpleName();
-
-  private @Metric MutableCounterLong pendingOps;
-  private @Metric MutableCounterLong totalOps;
-  private MutableCounterLong[] pendingOpsArray;
-  private MutableCounterLong[] opsArray;
-  private MutableRate[] containerOpsLatency;
-  private MetricsRegistry registry;
-
-  public XceiverClientMetrics() {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    this.registry = new MetricsRegistry(SOURCE_NAME);
-
-    this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
-    this.opsArray = new MutableCounterLong[numEnumEntries];
-    this.containerOpsLatency = new MutableRate[numEnumEntries];
-    for (int i = 0; i < numEnumEntries; i++) {
-      pendingOpsArray[i] = registry.newCounter(
-          "numPending" + ContainerProtos.Type.forNumber(i + 1),
-          "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
-          (long) 0);
-      opsArray[i] = registry
-          .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1),
-              "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops",
-              (long) 0);
-
-      containerOpsLatency[i] = registry.newRate(
-          ContainerProtos.Type.forNumber(i + 1) + "Latency",
-          "latency of " + ContainerProtos.Type.forNumber(i + 1)
-          + " ops");
-    }
-  }
-
-  public static XceiverClientMetrics create() {
-    DefaultMetricsSystem.initialize(SOURCE_NAME);
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "Storage Container Client Metrics",
-        new XceiverClientMetrics());
-  }
-
-  public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr();
-    totalOps.incr();
-    opsArray[type.ordinal()].incr();
-    pendingOpsArray[type.ordinal()].incr();
-  }
-
-  public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr(-1);
-    pendingOpsArray[type.ordinal()].incr(-1);
-  }
-
-  public void addContainerOpsLatency(ContainerProtos.Type type,
-      long latencyNanos) {
-    containerOpsLatency[type.ordinal()].add(latencyNanos);
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type) {
-    return pendingOpsArray[type.ordinal()].value();
-  }
-
-  @VisibleForTesting
-  public long getTotalOpCount() {
-    return totalOps.value();
-  }
-
-  @VisibleForTesting
-  public long getContainerOpCountMetrics(ContainerProtos.Type type) {
-    return opsArray[type.ordinal()].value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
deleted file mode 100644
index 04fabab..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import java.io.IOException;
-import java.security.cert.X509Certificate;
-import java.util.Collection;
-import java.util.List;
-import java.util.Objects;
-import java.util.OptionalLong;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftException;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-
-/**
- * An abstract implementation of {@link XceiverClientSpi} using Ratis.
- * The underlying RPC mechanism can be chosen via the constructor.
- */
-public final class XceiverClientRatis extends XceiverClientSpi {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(XceiverClientRatis.class);
-
-  public static XceiverClientRatis newXceiverClientRatis(
-      org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
-      Configuration ozoneConf) {
-    return newXceiverClientRatis(pipeline, ozoneConf, null);
-  }
-
-  public static XceiverClientRatis newXceiverClientRatis(
-      org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
-      Configuration ozoneConf, X509Certificate caCert) {
-    final String rpcType = ozoneConf
-        .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-            ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final TimeDuration clientRequestTimeout =
-        RatisHelper.getClientRequestTimeout(ozoneConf);
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
-    final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
-    final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
-        SecurityConfig(ozoneConf), caCert);
-    return new XceiverClientRatis(pipeline,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-        retryPolicy, tlsConfig, clientRequestTimeout);
-  }
-
-  private final Pipeline pipeline;
-  private final RpcType rpcType;
-  private final AtomicReference<RaftClient> client = new AtomicReference<>();
-  private final int maxOutstandingRequests;
-  private final RetryPolicy retryPolicy;
-  private final GrpcTlsConfig tlsConfig;
-  private final TimeDuration clientRequestTimeout;
-
-  // Map to track commit index at every server
-  private final ConcurrentHashMap<UUID, Long> commitInfoMap;
-
-  private XceiverClientMetrics metrics;
-
-  /**
-   * Constructs a client.
-   */
-  private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
-      int maxOutStandingChunks, RetryPolicy retryPolicy,
-      GrpcTlsConfig tlsConfig, TimeDuration timeout) {
-    super();
-    this.pipeline = pipeline;
-    this.rpcType = rpcType;
-    this.maxOutstandingRequests = maxOutStandingChunks;
-    this.retryPolicy = retryPolicy;
-    commitInfoMap = new ConcurrentHashMap<>();
-    this.tlsConfig = tlsConfig;
-    this.clientRequestTimeout = timeout;
-    metrics = XceiverClientManager.getXceiverClientMetrics();
-  }
-
-  private void updateCommitInfosMap(
-      Collection<RaftProtos.CommitInfoProto> commitInfoProtos) {
-    // if the commitInfo map is empty, just update the commit indexes for each
-    // of the servers
-    if (commitInfoMap.isEmpty()) {
-      commitInfoProtos.forEach(proto -> commitInfoMap
-          .put(RatisHelper.toDatanodeId(proto.getServer()),
-              proto.getCommitIndex()));
-      // In case the commit is happening 2 way, just update the commitIndex
-      // for the servers which have been successfully updating the commit
-      // indexes. This is important because getReplicatedMinCommitIndex()
-      // should always return the min commit index out of the nodes which have
-      // been replicating data successfully.
-    } else {
-      commitInfoProtos.forEach(proto -> commitInfoMap
-          .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()),
-              (address, index) -> {
-                index = proto.getCommitIndex();
-                return index;
-              }));
-    }
-  }
-
-  /**
-   * Returns Ratis as pipeline Type.
-   *
-   * @return - Ratis
-   */
-  @Override
-  public HddsProtos.ReplicationType getPipelineType() {
-    return HddsProtos.ReplicationType.RATIS;
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  @Override
-  public void connect() throws Exception {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(),
-          RatisHelper.toRaftPeerId(pipeline.getFirstNode()));
-    }
-    // TODO : XceiverClient ratis should pass the config value of
-    // maxOutstandingRequests so as to set the upper bound on max no of async
-    // requests to be handled by raft client
-    if (!client.compareAndSet(null,
-        RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-            maxOutstandingRequests, tlsConfig, clientRequestTimeout))) {
-      throw new IllegalStateException("Client is already connected.");
-    }
-  }
-
-  @Override
-  public void connect(String encodedToken) throws Exception {
-    throw new UnsupportedOperationException("Block tokens are not " +
-        "implemented for Ratis clients.");
-  }
-
-  @Override
-  public void close() {
-    final RaftClient c = client.getAndSet(null);
-    if (c != null) {
-      closeRaftClient(c);
-    }
-  }
-
-  private void closeRaftClient(RaftClient raftClient) {
-    try {
-      raftClient.close();
-    } catch (IOException e) {
-      throw new IllegalStateException(e);
-    }
-  }
-
-  private RaftClient getClient() {
-    return Objects.requireNonNull(client.get(), "client is null");
-  }
-
-
-  @VisibleForTesting
-  public ConcurrentHashMap<UUID, Long> getCommitInfoMap() {
-    return commitInfoMap;
-  }
-
-  private CompletableFuture<RaftClientReply> sendRequestAsync(
-      ContainerCommandRequestProto request) {
-    try (Scope scope = GlobalTracer.get()
-        .buildSpan("XceiverClientRatis." + request.getCmdType().name())
-        .startActive(true)) {
-      final ContainerCommandRequestMessage message
-          = ContainerCommandRequestMessage.toMessage(
-              request, TracingUtil.exportCurrentSpan());
-      if (HddsUtils.isReadOnly(request)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("sendCommandAsync ReadOnly {}", message);
-        }
-        return getClient().sendReadOnlyAsync(message);
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("sendCommandAsync {}", message);
-        }
-        return getClient().sendAsync(message);
-      }
-    }
-  }
-
-  // gets the minimum log index replicated to all servers
-  @Override
-  public long getReplicatedMinCommitIndex() {
-    OptionalLong minIndex =
-        commitInfoMap.values().parallelStream().mapToLong(v -> v).min();
-    return minIndex.isPresent() ? minIndex.getAsLong() : 0;
-  }
-
-  private void addDatanodetoReply(UUID address, XceiverClientReply reply) {
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(address.toString());
-    reply.addDatanode(builder.build());
-  }
-
-  @Override
-  public XceiverClientReply watchForCommit(long index, long timeout)
-      throws InterruptedException, ExecutionException, TimeoutException,
-      IOException {
-    long commitIndex = getReplicatedMinCommitIndex();
-    XceiverClientReply clientReply = new XceiverClientReply(null);
-    if (commitIndex >= index) {
-      // return the min commit index till which the log has been replicated to
-      // all servers
-      clientReply.setLogIndex(commitIndex);
-      return clientReply;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("commit index : {} watch timeout : {}", index, timeout);
-    }
-    RaftClientReply reply;
-    try {
-      CompletableFuture<RaftClientReply> replyFuture = getClient()
-          .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
-      replyFuture.get(timeout, TimeUnit.MILLISECONDS);
-    } catch (Exception e) {
-      Throwable t = HddsClientUtils.checkForException(e);
-      LOG.warn("3 way commit failed on pipeline {}", pipeline, e);
-      if (t instanceof GroupMismatchException) {
-        throw e;
-      }
-      reply = getClient()
-          .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED)
-          .get(timeout, TimeUnit.MILLISECONDS);
-      List<RaftProtos.CommitInfoProto> commitInfoProtoList =
-          reply.getCommitInfos().stream()
-              .filter(i -> i.getCommitIndex() < index)
-              .collect(Collectors.toList());
-      commitInfoProtoList.parallelStream().forEach(proto -> {
-        UUID address = RatisHelper.toDatanodeId(proto.getServer());
-        addDatanodetoReply(address, clientReply);
-        // since 3 way commit has failed, the updated map from now on  will
-        // only store entries for those datanodes which have had successful
-        // replication.
-        commitInfoMap.remove(address);
-        LOG.info(
-            "Could not commit index {} on pipeline {} to all the nodes. " +
-            "Server {} has failed. Committed by majority.",
-            index, pipeline, address);
-      });
-    }
-    clientReply.setLogIndex(index);
-    return clientReply;
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   */
-  @Override
-  public XceiverClientReply sendCommandAsync(
-      ContainerCommandRequestProto request) {
-    XceiverClientReply asyncReply = new XceiverClientReply(null);
-    long requestTime = Time.monotonicNowNanos();
-    CompletableFuture<RaftClientReply> raftClientReply =
-        sendRequestAsync(request);
-    metrics.incrPendingContainerOpsMetrics(request.getCmdType());
-    CompletableFuture<ContainerCommandResponseProto> containerCommandResponse =
-        raftClientReply.whenComplete((reply, e) -> {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("received reply {} for request: cmdType={} containerID={}"
-                    + " pipelineID={} traceID={} exception: {}", reply,
-                request.getCmdType(), request.getContainerID(),
-                request.getPipelineID(), request.getTraceID(), e);
-          }
-          metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-          metrics.addContainerOpsLatency(request.getCmdType(),
-              Time.monotonicNowNanos() - requestTime);
-        }).thenApply(reply -> {
-          try {
-            if (!reply.isSuccess()) {
-              // in case of raft retry failure, the raft client is
-              // not able to connect to the leader hence the pipeline
-              // can not be used but this instance of RaftClient will close
-              // and refreshed again. In case the client cannot connect to
-              // leader, getClient call will fail.
-
-              // No need to set the failed Server ID here. Ozone client
-              // will directly exclude this pipeline in next allocate block
-              // to SCM as in this case, it is the raft client which is not
-              // able to connect to leader in the pipeline, though the
-              // pipeline can still be functional.
-              RaftException exception = reply.getException();
-              Preconditions.checkNotNull(exception, "Raft reply failure but " +
-                  "no exception propagated.");
-              throw new CompletionException(exception);
-            }
-            ContainerCommandResponseProto response =
-                ContainerCommandResponseProto
-                    .parseFrom(reply.getMessage().getContent());
-            UUID serverId = RatisHelper.toDatanodeId(reply.getReplierId());
-            if (response.getResult() == ContainerProtos.Result.SUCCESS) {
-              updateCommitInfosMap(reply.getCommitInfos());
-            }
-            asyncReply.setLogIndex(reply.getLogIndex());
-            addDatanodetoReply(serverId, asyncReply);
-            return response;
-          } catch (InvalidProtocolBufferException e) {
-            throw new CompletionException(e);
-          }
-        });
-    asyncReply.setResponse(containerCommandResponse);
-    return asyncReply;
-  }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
deleted file mode 100644
index 982fb8e..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This class provides the client-facing APIs of container operations.
- */
-public class ContainerOperationClient implements ScmClient {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerOperationClient.class);
-  private static long containerSizeB = -1;
-  private final StorageContainerLocationProtocol
-      storageContainerLocationClient;
-  private final XceiverClientManager xceiverClientManager;
-
-  public ContainerOperationClient(
-      StorageContainerLocationProtocol
-          storageContainerLocationClient,
-      XceiverClientManager xceiverClientManager) {
-    this.storageContainerLocationClient = storageContainerLocationClient;
-    this.xceiverClientManager = xceiverClientManager;
-  }
-
-  /**
-   * Return the capacity of containers. The current assumption is that all
-   * containers have the same capacity. Therefore one static is sufficient for
-   * any container.
-   * @return The capacity of one container in number of bytes.
-   */
-  public static long getContainerSizeB() {
-    return containerSizeB;
-  }
-
-  /**
-   * Set the capacity of container. Should be exactly once on system start.
-   * @param size Capacity of one container in number of bytes.
-   */
-  public static void setContainerSizeB(long size) {
-    containerSizeB = size;
-  }
-
-
-  @Override
-  public ContainerWithPipeline createContainer(String owner)
-      throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      ContainerWithPipeline containerWithPipeline =
-          storageContainerLocationClient.allocateContainer(
-              xceiverClientManager.getType(),
-              xceiverClientManager.getFactor(), owner);
-      Pipeline pipeline = containerWithPipeline.getPipeline();
-      client = xceiverClientManager.acquireClient(pipeline);
-
-      Preconditions.checkState(pipeline.isOpen(), String
-          .format("Unexpected state=%s for pipeline=%s, expected state=%s",
-              pipeline.getPipelineState(), pipeline.getId(),
-              Pipeline.PipelineState.OPEN));
-      createContainer(client,
-          containerWithPipeline.getContainerInfo().getContainerID());
-      return containerWithPipeline;
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client, false);
-      }
-    }
-  }
-
-  /**
-   * Create a container over pipeline specified by the SCM.
-   *
-   * @param client - Client to communicate with Datanodes.
-   * @param containerId - Container ID.
-   * @throws IOException
-   */
-  public void createContainer(XceiverClientSpi client,
-      long containerId) throws IOException {
-    ContainerProtocolCalls.createContainer(client, containerId, null);
-
-    // Let us log this info after we let SCM know that we have completed the
-    // creation state.
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Created container " + containerId
-          + " machines:" + client.getPipeline().getNodes());
-    }
-  }
-
-  /**
-   * Creates a pipeline over the machines choosen by the SCM.
-   *
-   * @param client - Client
-   * @param pipeline - pipeline to be createdon Datanodes.
-   * @throws IOException
-   */
-  private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
-      throws IOException {
-
-    Preconditions.checkNotNull(pipeline.getId(), "Pipeline " +
-        "name cannot be null when client create flag is set.");
-
-    // Pipeline creation is a three step process.
-    //
-    // 1. Notify SCM that this client is doing a create pipeline on
-    // datanodes.
-    //
-    // 2. Talk to Datanodes to create the pipeline.
-    //
-    // 3. update SCM that pipeline creation was successful.
-
-    // TODO: this has not been fully implemented on server side
-    // SCMClientProtocolServer#notifyObjectStageChange
-    // TODO: when implement the pipeline state machine, change
-    // the pipeline name (string) to pipeline id (long)
-    //storageContainerLocationClient.notifyObjectStageChange(
-    //    ObjectStageChangeRequestProto.Type.pipeline,
-    //    pipeline.getPipelineName(),
-    //    ObjectStageChangeRequestProto.Op.create,
-    //    ObjectStageChangeRequestProto.Stage.begin);
-
-    // client.createPipeline();
-    // TODO: Use PipelineManager to createPipeline
-
-    //storageContainerLocationClient.notifyObjectStageChange(
-    //    ObjectStageChangeRequestProto.Type.pipeline,
-    //    pipeline.getPipelineName(),
-    //    ObjectStageChangeRequestProto.Op.create,
-    //    ObjectStageChangeRequestProto.Stage.complete);
-
-    // TODO : Should we change the state on the client side ??
-    // That makes sense, but it is not needed for the client to work.
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Pipeline creation successful. Pipeline: {}",
-          pipeline.toString());
-    }
-  }
-
-  @Override
-  public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, String owner) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      // allocate container on SCM.
-      ContainerWithPipeline containerWithPipeline =
-          storageContainerLocationClient.allocateContainer(type, factor,
-              owner);
-      Pipeline pipeline = containerWithPipeline.getPipeline();
-      client = xceiverClientManager.acquireClient(pipeline);
-
-      // connect to pipeline leader and allocate container on leader datanode.
-      client = xceiverClientManager.acquireClient(pipeline);
-      createContainer(client,
-          containerWithPipeline.getContainerInfo().getContainerID());
-      return containerWithPipeline;
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client, false);
-      }
-    }
-  }
-
-  /**
-   * Returns a set of Nodes that meet a query criteria.
-   *
-   * @param nodeStatuses - Criteria that we want the node to have.
-   * @param queryScope - Query scope - Cluster or pool.
-   * @param poolName - if it is pool, a pool name is required.
-   * @return A set of nodes that meet the requested criteria.
-   * @throws IOException
-   */
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
-      throws IOException {
-    return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
-        poolName);
-  }
-
-  /**
-   * Creates a specified replication pipeline.
-   */
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException {
-    return storageContainerLocationClient.createReplicationPipeline(type,
-        factor, nodePool);
-  }
-
-  @Override
-  public List<Pipeline> listPipelines() throws IOException {
-    return storageContainerLocationClient.listPipelines();
-  }
-
-  @Override
-  public void activatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    storageContainerLocationClient.activatePipeline(pipelineID);
-  }
-
-  @Override
-  public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    storageContainerLocationClient.deactivatePipeline(pipelineID);
-  }
-
-  @Override
-  public void closePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    storageContainerLocationClient.closePipeline(pipelineID);
-  }
-
-  @Override
-  public void close() {
-    try {
-      xceiverClientManager.close();
-    } catch (Exception ex) {
-      LOG.error("Can't close " + this.getClass().getSimpleName(), ex);
-    }
-  }
-
-  /**
-   * Deletes an existing container.
-   *
-   * @param containerId - ID of the container.
-   * @param pipeline    - Pipeline that represents the container.
-   * @param force       - true to forcibly delete the container.
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerId, Pipeline pipeline,
-      boolean force) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      client = xceiverClientManager.acquireClient(pipeline);
-      ContainerProtocolCalls
-          .deleteContainer(client, containerId, force, null);
-      storageContainerLocationClient
-          .deleteContainer(containerId);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleted container {}, machines: {} ", containerId,
-            pipeline.getNodes());
-      }
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client, false);
-      }
-    }
-  }
-
-  /**
-   * Delete the container, this will release any resource it uses.
-   * @param containerID - containerID.
-   * @param force - True to forcibly delete the container.
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerID, boolean force)
-      throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerID);
-    deleteContainer(containerID, info.getPipeline(), force);
-  }
-
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID,
-      int count) throws IOException {
-    return storageContainerLocationClient.listContainer(
-        startContainerID, count);
-  }
-
-  /**
-   * Get meta data from an existing container.
-   *
-   * @param containerID - ID of the container.
-   * @param pipeline    - Pipeline where the container is located.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  @Override
-  public ContainerDataProto readContainer(long containerID,
-      Pipeline pipeline) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      client = xceiverClientManager.acquireClient(pipeline);
-      ReadContainerResponseProto response =
-          ContainerProtocolCalls.readContainer(client, containerID, null);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Read container {}, machines: {} ", containerID,
-            pipeline.getNodes());
-      }
-      return response.getContainerData();
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client, false);
-      }
-    }
-  }
-
-  /**
-   * Get meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @return ContainerInfo - a message of protobuf which has basic info
-   * of a container.
-   * @throws IOException
-   */
-  @Override
-  public ContainerDataProto readContainer(long containerID) throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerID);
-    return readContainer(containerID, info.getPipeline());
-  }
-
-  /**
-   * Given an id, return the pipeline associated with the container.
-   * @param containerId - String Container ID
-   * @return Pipeline of the existing container, corresponding to the given id.
-   * @throws IOException
-   */
-  @Override
-  public ContainerInfo getContainer(long containerId) throws
-      IOException {
-    return storageContainerLocationClient.getContainer(containerId);
-  }
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   *
-   * @param containerId - Container ID
-   * @return ContainerWithPipeline
-   * @throws IOException
-   */
-  @Override
-  public ContainerWithPipeline getContainerWithPipeline(long containerId)
-      throws IOException {
-    return storageContainerLocationClient.getContainerWithPipeline(containerId);
-  }
-
-  /**
-   * Close a container.
-   *
-   * @param pipeline the container to be closed.
-   * @throws IOException
-   */
-  @Override
-  public void closeContainer(long containerId, Pipeline pipeline)
-      throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Close container {}", pipeline);
-      }
-      /*
-      TODO: two orders here, revisit this later:
-      1. close on SCM first, then on data node
-      2. close on data node first, then on SCM
-
-      with 1: if client failed after closing on SCM, then there is a
-      container SCM thinks as closed, but is actually open. Then SCM will no
-      longer allocate block to it, which is fine. But SCM may later try to
-      replicate this "closed" container, which I'm not sure is safe.
-
-      with 2: if client failed after close on datanode, then there is a
-      container SCM thinks as open, but is actually closed. Then SCM will still
-      try to allocate block to it. Which will fail when actually doing the
-      write. No more data can be written, but at least the correctness and
-      consistency of existing data will maintain.
-
-      For now, take the #2 way.
-       */
-      // Actually close the container on Datanode
-      client = xceiverClientManager.acquireClient(pipeline);
-
-      storageContainerLocationClient.notifyObjectStageChange(
-          ObjectStageChangeRequestProto.Type.container,
-          containerId,
-          ObjectStageChangeRequestProto.Op.close,
-          ObjectStageChangeRequestProto.Stage.begin);
-
-      ContainerProtocolCalls.closeContainer(client, containerId,
-          null);
-      // Notify SCM to close the container
-      storageContainerLocationClient.notifyObjectStageChange(
-          ObjectStageChangeRequestProto.Type.container,
-          containerId,
-          ObjectStageChangeRequestProto.Op.close,
-          ObjectStageChangeRequestProto.Stage.complete);
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client, false);
-      }
-    }
-  }
-
-  /**
-   * Close a container.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void closeContainer(long containerId)
-      throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerId);
-    Pipeline pipeline = info.getPipeline();
-    closeContainer(containerId, pipeline);
-  }
-
-  /**
-   * Get the the current usage information.
-   * @param containerID - ID of the container.
-   * @return the size of the given container.
-   * @throws IOException
-   */
-  @Override
-  public long getContainerSize(long containerID) throws IOException {
-    // TODO : Fix this, it currently returns the capacity
-    // but not the current usage.
-    long size = getContainerSizeB();
-    if (size == -1) {
-      throw new IOException("Container size unknown!");
-    }
-    return size;
-  }
-
-  /**
-   * Check if SCM is in safe mode.
-   *
-   * @return Returns true if SCM is in safe mode else returns false.
-   * @throws IOException
-   */
-  public boolean inSafeMode() throws IOException {
-    return storageContainerLocationClient.inSafeMode();
-  }
-
-  /**
-   * Force SCM out of safe mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  public boolean forceExitSafeMode() throws IOException {
-    return storageContainerLocationClient.forceExitSafeMode();
-  }
-
-  @Override
-  public void startReplicationManager() throws IOException {
-    storageContainerLocationClient.startReplicationManager();
-  }
-
-  @Override
-  public void stopReplicationManager() throws IOException {
-    storageContainerLocationClient.stopReplicationManager();
-  }
-
-  @Override
-  public boolean getReplicationManagerStatus() throws IOException {
-    return storageContainerLocationClient.getReplicationManagerStatus();
-  }
-
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
deleted file mode 100644
index d3bb31a..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.ratis.protocol.AlreadyClosedException;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.NotReplicatedException;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Utility methods for Ozone and Container Clients.
- *
- * The methods to retrieve SCM service endpoints assume there is a single
- * SCM service instance. This will change when we switch to replicated service
- * instances for redundancy.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class HddsClientUtils {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsClientUtils.class);
-
-  private static final int NO_PORT = -1;
-
-  private HddsClientUtils() {
-  }
-
-  private static final List<Class<? extends Exception>> EXCEPTION_LIST =
-      new ArrayList<Class<? extends Exception>>() {{
-        add(TimeoutException.class);
-        add(StorageContainerException.class);
-        add(RaftRetryFailureException.class);
-        add(AlreadyClosedException.class);
-        add(GroupMismatchException.class);
-        // Not Replicated Exception will be thrown if watch For commit
-        // does not succeed
-        add(NotReplicatedException.class);
-      }};
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<DateTimeFormatter> DATE_FORMAT =
-      ThreadLocal.withInitial(() -> {
-        DateTimeFormatter format =
-            DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
-        return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
-      });
-
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatDateTime(long millis) {
-    ZonedDateTime dateTime = ZonedDateTime.ofInstant(
-        Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone());
-    return DATE_FORMAT.get().format(dateTime);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDateTime(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return ZonedDateTime.parse(date, DATE_FORMAT.get())
-        .toInstant().toEpochMilli();
-  }
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-    if (resName == null) {
-      throw new IllegalArgumentException("Bucket or Volume name is null");
-    }
-
-    if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
-        resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume length is illegal, "
-              + "valid length is 3-63 characters");
-    }
-
-    if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot start with a period or dash");
-    }
-
-    if (resName.charAt(resName.length() - 1) == '.' ||
-        resName.charAt(resName.length() - 1) == '-') {
-      throw new IllegalArgumentException("Bucket or Volume name "
-          + "cannot end with a period or dash");
-    }
-
-    boolean isIPv4 = true;
-    char prev = (char) 0;
-
-    for (int index = 0; index < resName.length(); index++) {
-      char currChar = resName.charAt(index);
-      if (currChar != '.') {
-        isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
-      }
-      if (currChar > 'A' && currChar < 'Z') {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name does not support uppercase characters");
-      }
-      if (currChar != '.' && currChar != '-') {
-        if (currChar < '0' || (currChar > '9' && currChar < 'a') ||
-            currChar > 'z') {
-          throw new IllegalArgumentException("Bucket or Volume name has an " +
-              "unsupported character : " +
-              currChar);
-        }
-      }
-      if (prev == '.' && currChar == '.') {
-        throw new IllegalArgumentException("Bucket or Volume name should not " +
-            "have two contiguous periods");
-      }
-      if (prev == '-' && currChar == '.') {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have period after dash");
-      }
-      if (prev == '.' && currChar == '-') {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have dash after period");
-      }
-      prev = currChar;
-    }
-
-    if (isIPv4) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot be an IPv4 address or all numeric");
-    }
-  }
-
-  /**
-   * verifies that bucket / volume name is a valid DNS name.
-   *
-   * @param resourceNames Array of bucket / volume names to be verified.
-   */
-  public static void verifyResourceName(String... resourceNames) {
-    for (String resourceName : resourceNames) {
-      HddsClientUtils.verifyResourceName(resourceName);
-    }
-  }
-
-  /**
-   * Checks that object parameters passed as reference is not null.
-   *
-   * @param references Array of object references to be checked.
-   * @param <T>
-   */
-  public static <T> void checkNotNull(T... references) {
-    for (T ref: references) {
-      Preconditions.checkNotNull(ref);
-    }
-  }
-
-  /**
-   * Returns the cache value to be used for list calls.
-   * @param conf Configuration object
-   * @return list cache size
-   */
-  public static int getListCacheSize(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
-        OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
-  }
-
-  /**
-   * @return a default instance of {@link CloseableHttpClient}.
-   */
-  public static CloseableHttpClient newHttpClient() {
-    return HddsClientUtils.newHttpClient(new Configuration());
-  }
-
-  /**
-   * Returns a {@link CloseableHttpClient} configured by given configuration.
-   * If conf is null, returns a default instance.
-   *
-   * @param conf configuration
-   * @return a {@link CloseableHttpClient} instance.
-   */
-  public static CloseableHttpClient newHttpClient(Configuration conf) {
-    long socketTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
-    long connectionTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
-    if (conf != null) {
-      socketTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      connectionTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-    }
-
-    CloseableHttpClient client = HttpClients.custom()
-        .setDefaultRequestConfig(
-            RequestConfig.custom()
-                .setSocketTimeout(Math.toIntExact(socketTimeout))
-                .setConnectTimeout(Math.toIntExact(connectionTimeout))
-                .build())
-        .build();
-    return client;
-  }
-
-  /**
-   * Returns the maximum no of outstanding async requests to be handled by
-   * Standalone and Ratis client.
-   */
-  public static int getMaxOutstandingRequests(Configuration config) {
-    return OzoneConfiguration.of(config)
-        .getObject(ScmClientConfig.class)
-        .getMaxOutstandingRequests();
-  }
-
-  /**
-   * Create a scm block client, used by putKey() and getKey().
-   *
-   * @return {@link ScmBlockLocationProtocol}
-   * @throws IOException
-   */
-  public static SCMSecurityProtocol getScmSecurityClient(
-      OzoneConfiguration conf, UserGroupInformation ugi) throws IOException {
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmSecurityProtoAdd =
-        HddsUtils.getScmAddressForSecurityProtocol(conf);
-    SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
-        new SCMSecurityProtocolClientSideTranslatorPB(
-            RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion,
-                scmSecurityProtoAdd, ugi, conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmSecurityClient;
-  }
-
-  public static Throwable checkForException(Exception e) {
-    Throwable t = e;
-    while (t != null) {
-      for (Class<? extends Exception> cls : getExceptionList()) {
-        if (cls.isInstance(t)) {
-          return t;
-        }
-      }
-      t = t.getCause();
-    }
-    return t;
-  }
-
-  public static RetryPolicy createRetryPolicy(int maxRetryCount,
-      long retryInterval) {
-    // retry with fixed sleep between retries
-    return RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        maxRetryCount, retryInterval, TimeUnit.MILLISECONDS);
-  }
-
-  public static Map<Class<? extends Throwable>,
-      RetryPolicy> getRetryPolicyByException(int maxRetryCount,
-      long retryInterval) {
-    Map<Class<? extends Throwable>, RetryPolicy> policyMap = new HashMap<>();
-    for (Class<? extends Exception> ex : EXCEPTION_LIST) {
-      if (ex == TimeoutException.class
-          || ex == RaftRetryFailureException.class) {
-        // retry without sleep
-        policyMap.put(ex, createRetryPolicy(maxRetryCount, 0));
-      } else {
-        // retry with fixed sleep between retries
-        policyMap.put(ex, createRetryPolicy(maxRetryCount, retryInterval));
-      }
-    }
-    // Default retry policy
-    policyMap
-        .put(Exception.class, createRetryPolicy(maxRetryCount, retryInterval));
-    return policyMap;
-  }
-
-  public static List<Class<? extends Exception>> getExceptionList() {
-    return EXCEPTION_LIST;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
deleted file mode 100644
index 73ad78c..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * Client facing classes for the container operations.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 9390bc1..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * Classes for different type of container service client.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
deleted file mode 100644
index 40bbd93..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * An {@link InputStream} called from KeyInputStream to read a block from the
- * container.
- * This class encapsulates all state management for iterating
- * through the sequence of chunks through {@link ChunkInputStream}.
- */
-public class BlockInputStream extends InputStream implements Seekable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BlockInputStream.class);
-
-  private static final int EOF = -1;
-
-  private final BlockID blockID;
-  private final long length;
-  private Pipeline pipeline;
-  private final Token<OzoneBlockTokenIdentifier> token;
-  private final boolean verifyChecksum;
-  private XceiverClientManager xceiverClientManager;
-  private XceiverClientSpi xceiverClient;
-  private boolean initialized = false;
-
-  // List of ChunkInputStreams, one for each chunk in the block
-  private List<ChunkInputStream> chunkStreams;
-
-  // chunkOffsets[i] stores the index of the first data byte in
-  // chunkStream i w.r.t the block data.
-  // Let’s say we have chunk size as 40 bytes. And let's say the parent
-  // block stores data from index 200 and has length 400.
-  // The first 40 bytes of this block will be stored in chunk[0], next 40 in
-  // chunk[1] and so on. But since the chunkOffsets are w.r.t the block only
-  // and not the key, the values in chunkOffsets will be [0, 40, 80,....].
-  private long[] chunkOffsets = null;
-
-  // Index of the chunkStream corresponding to the current position of the
-  // BlockInputStream i.e offset of the data to be read next from this block
-  private int chunkIndex;
-
-  // Position of the BlockInputStream is maintainted by this variable till
-  // the stream is initialized. This position is w.r.t to the block only and
-  // not the key.
-  // For the above example, if we seek to position 240 before the stream is
-  // initialized, then value of blockPosition will be set to 40.
-  // Once, the stream is initialized, the position of the stream
-  // will be determined by the current chunkStream and its position.
-  private long blockPosition = 0;
-
-  // Tracks the chunkIndex corresponding to the last blockPosition so that it
-  // can be reset if a new position is seeked.
-  private int chunkIndexOfPrevPosition;
-
-  public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline,
-      Token<OzoneBlockTokenIdentifier> token, boolean verifyChecksum,
-      XceiverClientManager xceiverClientManager) {
-    this.blockID = blockId;
-    this.length = blockLen;
-    this.pipeline = pipeline;
-    this.token = token;
-    this.verifyChecksum = verifyChecksum;
-    this.xceiverClientManager = xceiverClientManager;
-  }
-
-  /**
-   * Initialize the BlockInputStream. Get the BlockData (list of chunks) from
-   * the Container and create the ChunkInputStreams for each Chunk in the Block.
-   */
-  public synchronized void initialize() throws IOException {
-
-    // Pre-check that the stream has not been intialized already
-    if (initialized) {
-      return;
-    }
-
-    List<ChunkInfo> chunks = getChunkInfos();
-    if (chunks != null && !chunks.isEmpty()) {
-      // For each chunk in the block, create a ChunkInputStream and compute
-      // its chunkOffset
-      this.chunkOffsets = new long[chunks.size()];
-      long tempOffset = 0;
-
-      this.chunkStreams = new ArrayList<>(chunks.size());
-      for (int i = 0; i < chunks.size(); i++) {
-        addStream(chunks.get(i));
-        chunkOffsets[i] = tempOffset;
-        tempOffset += chunks.get(i).getLen();
-      }
-
-      initialized = true;
-      this.chunkIndex = 0;
-
-      if (blockPosition > 0) {
-        // Stream was seeked to blockPosition before initialization. Seek to the
-        // blockPosition now.
-        seek(blockPosition);
-      }
-    }
-  }
-
-  /**
-   * Send RPC call to get the block info from the container.
-   * @return List of chunks in this block.
-   */
-  protected List<ChunkInfo> getChunkInfos() throws IOException {
-    // irrespective of the container state, we will always read via Standalone
-    // protocol.
-    if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
-      pipeline = Pipeline.newBuilder(pipeline)
-          .setType(HddsProtos.ReplicationType.STAND_ALONE).build();
-    }
-    xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
-    boolean success = false;
-    List<ChunkInfo> chunks;
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Initializing BlockInputStream for get key to access {}",
-            blockID.getContainerID());
-      }
-
-      if (token != null) {
-        UserGroupInformation.getCurrentUser().addToken(token);
-      }
-      DatanodeBlockID datanodeBlockID = blockID
-          .getDatanodeBlockIDProtobuf();
-      GetBlockResponseProto response = ContainerProtocolCalls
-          .getBlock(xceiverClient, datanodeBlockID);
-
-      chunks = response.getBlockData().getChunksList();
-      success = true;
-    } finally {
-      if (!success) {
-        xceiverClientManager.releaseClientForReadData(xceiverClient, false);
-      }
-    }
-
-    return chunks;
-  }
-
-  /**
-   * Append another ChunkInputStream to the end of the list. Note that the
-   * ChunkInputStream is only created here. The chunk will be read from the
-   * Datanode only when a read operation is performed on for that chunk.
-   */
-  protected synchronized void addStream(ChunkInfo chunkInfo) {
-    chunkStreams.add(new ChunkInputStream(chunkInfo, blockID,
-        xceiverClient, verifyChecksum));
-  }
-
-  public synchronized long getRemaining() throws IOException {
-    return length - getPos();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read() throws IOException {
-    byte[] buf = new byte[1];
-    if (read(buf, 0, 1) == EOF) {
-      return EOF;
-    }
-    return Byte.toUnsignedInt(buf[0]);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read(byte[] b, int off, int len) throws IOException {
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return 0;
-    }
-
-    if (!initialized) {
-      initialize();
-    }
-
-    checkOpen();
-    int totalReadLen = 0;
-    while (len > 0) {
-      // if we are at the last chunk and have read the entire chunk, return
-      if (chunkStreams.size() == 0 ||
-          (chunkStreams.size() - 1 <= chunkIndex &&
-              chunkStreams.get(chunkIndex)
-                  .getRemaining() == 0)) {
-        return totalReadLen == 0 ? EOF : totalReadLen;
-      }
-
-      // Get the current chunkStream and read data from it
-      ChunkInputStream current = chunkStreams.get(chunkIndex);
-      int numBytesToRead = Math.min(len, (int)current.getRemaining());
-      int numBytesRead = current.read(b, off, numBytesToRead);
-      if (numBytesRead != numBytesToRead) {
-        // This implies that there is either data loss or corruption in the
-        // chunk entries. Even EOF in the current stream would be covered in
-        // this case.
-        throw new IOException(String.format(
-            "Inconsistent read for chunkName=%s length=%d numBytesRead=%d",
-            current.getChunkName(), current.getLength(), numBytesRead));
-      }
-      totalReadLen += numBytesRead;
-      off += numBytesRead;
-      len -= numBytesRead;
-      if (current.getRemaining() <= 0 &&
-          ((chunkIndex + 1) < chunkStreams.size())) {
-        chunkIndex += 1;
-      }
-    }
-    return totalReadLen;
-  }
-
-  /**
-   * Seeks the BlockInputStream to the specified position. If the stream is
-   * not initialized, save the seeked position via blockPosition. Otherwise,
-   * update the position in 2 steps:
-   *    1. Updating the chunkIndex to the chunkStream corresponding to the
-   *    seeked position.
-   *    2. Seek the corresponding chunkStream to the adjusted position.
-   *
-   * Let’s say we have chunk size as 40 bytes. And let's say the parent block
-   * stores data from index 200 and has length 400. If the key was seeked to
-   * position 90, then this block will be seeked to position 90.
-   * When seek(90) is called on this blockStream, then
-   *    1. chunkIndex will be set to 2 (as indices 80 - 120 reside in chunk[2]).
-   *    2. chunkStream[2] will be seeked to position 10
-   *       (= 90 - chunkOffset[2] (= 80)).
-   */
-  @Override
-  public synchronized void seek(long pos) throws IOException {
-    if (!initialized) {
-      // Stream has not been initialized yet. Save the position so that it
-      // can be seeked when the stream is initialized.
-      blockPosition = pos;
-      return;
-    }
-
-    checkOpen();
-    if (pos < 0 || pos >= length) {
-      if (pos == 0) {
-        // It is possible for length and pos to be zero in which case
-        // seek should return instead of throwing exception
-        return;
-      }
-      throw new EOFException(
-          "EOF encountered at pos: " + pos + " for block: " + blockID);
-    }
-
-    if (chunkIndex >= chunkStreams.size()) {
-      chunkIndex = Arrays.binarySearch(chunkOffsets, pos);
-    } else if (pos < chunkOffsets[chunkIndex]) {
-      chunkIndex =
-          Arrays.binarySearch(chunkOffsets, 0, chunkIndex, pos);
-    } else if (pos >= chunkOffsets[chunkIndex] + chunkStreams
-        .get(chunkIndex).getLength()) {
-      chunkIndex = Arrays.binarySearch(chunkOffsets,
-          chunkIndex + 1, chunkStreams.size(), pos);
-    }
-    if (chunkIndex < 0) {
-      // Binary search returns -insertionPoint - 1  if element is not present
-      // in the array. insertionPoint is the point at which element would be
-      // inserted in the sorted array. We need to adjust the chunkIndex
-      // accordingly so that chunkIndex = insertionPoint - 1
-      chunkIndex = -chunkIndex - 2;
-    }
-
-    // Reset the previous chunkStream's position
-    chunkStreams.get(chunkIndexOfPrevPosition).resetPosition();
-
-    // seek to the proper offset in the ChunkInputStream
-    chunkStreams.get(chunkIndex).seek(pos - chunkOffsets[chunkIndex]);
-    chunkIndexOfPrevPosition = chunkIndex;
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    if (length == 0) {
-      return 0;
-    }
-
-    if (!initialized) {
-      // The stream is not initialized yet. Return the blockPosition
-      return blockPosition;
-    } else {
-      return chunkOffsets[chunkIndex] + chunkStreams.get(chunkIndex).getPos();
-    }
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public synchronized void close() {
-    if (xceiverClientManager != null && xceiverClient != null) {
-      xceiverClientManager.releaseClient(xceiverClient, false);
-      xceiverClientManager = null;
-      xceiverClient = null;
-    }
-  }
-
-  public synchronized void resetPosition() {
-    this.blockPosition = 0;
-  }
-
-  /**
-   * Checks if the stream is open.  If not, throw an exception.
-   *
-   * @throws IOException if stream is closed
-   */
-  protected synchronized void checkOpen() throws IOException {
-    if (xceiverClient == null) {
-      throw new IOException("BlockInputStream has been closed.");
-    }
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-  @VisibleForTesting
-  synchronized int getChunkIndex() {
-    return chunkIndex;
-  }
-
-  @VisibleForTesting
-  synchronized long getBlockPosition() {
-    return blockPosition;
-  }
-
-  @VisibleForTesting
-  synchronized List<ChunkInputStream> getChunkStreams() {
-    return chunkStreams;
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
deleted file mode 100644
index b15ca3f..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
-    .putBlockAsync;
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
-    .writeChunkAsync;
-
-/**
- * An {@link OutputStream} used by the REST service in combination with the
- * SCMClient to write the value of a key to a sequence
- * of container chunks.  Writes are buffered locally and periodically written to
- * the container as a new chunk.  In order to preserve the semantics that
- * replacement of a pre-existing key is atomic, each instance of the stream has
- * an internal unique identifier.  This unique identifier and a monotonically
- * increasing chunk index form a composite key that is used as the chunk name.
- * After all data is written, a putKey call creates or updates the corresponding
- * container key, and this call includes the full list of chunks that make up
- * the key data.  The list of chunks is updated all at once.  Therefore, a
- * concurrent reader never can see an intermediate state in which different
- * chunks of data from different versions of the key data are interleaved.
- * This class encapsulates all state management for buffering and writing
- * through to the container.
- */
-public class BlockOutputStream extends OutputStream {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(BlockOutputStream.class);
-
-  private volatile BlockID blockID;
-
-  private final BlockData.Builder containerBlockData;
-  private XceiverClientManager xceiverClientManager;
-  private XceiverClientSpi xceiverClient;
-  private final ContainerProtos.ChecksumType checksumType;
-  private final int bytesPerChecksum;
-  private int chunkIndex;
-  private int chunkSize;
-  private final long streamBufferFlushSize;
-  private final long streamBufferMaxSize;
-  private BufferPool bufferPool;
-  // The IOException will be set by response handling thread in case there is an
-  // exception received in the response. If the exception is set, the next
-  // request will fail upfront.
-  private AtomicReference<IOException> ioException;
-  private ExecutorService responseExecutor;
-
-  // the effective length of data flushed so far
-  private long totalDataFlushedLength;
-
-  // effective data write attempted so far for the block
-  private long writtenDataLength;
-
-  // List containing buffers for which the putBlock call will
-  // update the length in the datanodes. This list will just maintain
-  // references to the buffers in the BufferPool which will be cleared
-  // when the watchForCommit acknowledges a putBlock logIndex has been
-  // committed on all datanodes. This list will be a  place holder for buffers
-  // which got written between successive putBlock calls.
-  private List<ByteBuffer> bufferList;
-
-  // This object will maintain the commitIndexes and byteBufferList in order
-  // Also, corresponding to the logIndex, the corresponding list of buffers will
-  // be released from the buffer pool.
-  private final CommitWatcher commitWatcher;
-
-  private List<DatanodeDetails> failedServers;
-
-  /**
-   * Creates a new BlockOutputStream.
-   *
-   * @param blockID              block ID
-   * @param xceiverClientManager client manager that controls client
-   * @param pipeline             pipeline where block will be written
-   * @param chunkSize            chunk size
-   * @param bufferPool           pool of buffers
-   * @param streamBufferFlushSize flush size
-   * @param streamBufferMaxSize   max size of the currentBuffer
-   * @param watchTimeout          watch timeout
-   * @param checksumType          checksum type
-   * @param bytesPerChecksum      Bytes per checksum
-   */
-  @SuppressWarnings("parameternumber")
-  public BlockOutputStream(BlockID blockID,
-      XceiverClientManager xceiverClientManager, Pipeline pipeline,
-      int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize,
-      long watchTimeout, BufferPool bufferPool, ChecksumType checksumType,
-      int bytesPerChecksum)
-      throws IOException {
-    this.blockID = blockID;
-    this.chunkSize = chunkSize;
-    KeyValue keyValue =
-        KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build();
-    this.containerBlockData =
-        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .addMetadata(keyValue);
-    this.xceiverClientManager = xceiverClientManager;
-    this.xceiverClient = xceiverClientManager.acquireClient(pipeline);
-    this.chunkIndex = 0;
-    this.streamBufferFlushSize = streamBufferFlushSize;
-    this.streamBufferMaxSize = streamBufferMaxSize;
-    this.bufferPool = bufferPool;
-    this.checksumType = checksumType;
-    this.bytesPerChecksum = bytesPerChecksum;
-
-    // A single thread executor handle the responses of async requests
-    responseExecutor = Executors.newSingleThreadExecutor();
-    commitWatcher = new CommitWatcher(bufferPool, xceiverClient, watchTimeout);
-    bufferList = null;
-    totalDataFlushedLength = 0;
-    writtenDataLength = 0;
-    failedServers = new ArrayList<>(0);
-    ioException = new AtomicReference<>(null);
-  }
-
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public long getTotalAckDataLength() {
-    return commitWatcher.getTotalAckDataLength();
-  }
-
-  public long getWrittenDataLength() {
-    return writtenDataLength;
-  }
-
-  public List<DatanodeDetails> getFailedServers() {
-    return failedServers;
-  }
-
-  @VisibleForTesting
-  public XceiverClientSpi getXceiverClient() {
-    return xceiverClient;
-  }
-
-  @VisibleForTesting
-  public long getTotalDataFlushedLength() {
-    return totalDataFlushedLength;
-  }
-
-  @VisibleForTesting
-  public BufferPool getBufferPool() {
-    return bufferPool;
-  }
-
-  public IOException getIoException() {
-    return ioException.get();
-  }
-
-  @VisibleForTesting
-  public Map<Long, List<ByteBuffer>> getCommitIndex2flushedDataMap() {
-    return commitWatcher.getCommitIndex2flushedDataMap();
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    checkOpen();
-    byte[] buf = new byte[1];
-    buf[0] = (byte) b;
-    write(buf, 0, 1);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    checkOpen();
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
-        || ((off + len) < 0)) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return;
-    }
-
-    while (len > 0) {
-      int writeLen;
-      // Allocate a buffer if needed. The buffer will be allocated only
-      // once as needed and will be reused again for multiple blockOutputStream
-      // entries.
-      ByteBuffer  currentBuffer = bufferPool.allocateBufferIfNeeded();
-      int pos = currentBuffer.position();
-      writeLen =
-          Math.min(chunkSize - pos % chunkSize, len);
-      currentBuffer.put(b, off, writeLen);
-      if (!currentBuffer.hasRemaining()) {
-        writeChunk(currentBuffer);
-      }
-      off += writeLen;
-      len -= writeLen;
-      writtenDataLength += writeLen;
-      if (shouldFlush()) {
-        updateFlushLength();
-        executePutBlock();
-      }
-      // Data in the bufferPool can not exceed streamBufferMaxSize
-      if (isBufferPoolFull()) {
-        handleFullBuffer();
-      }
-    }
-  }
-
-  private boolean shouldFlush() {
-    return bufferPool.computeBufferData() % streamBufferFlushSize == 0;
-  }
-
-  private void updateFlushLength() {
-    totalDataFlushedLength += writtenDataLength - totalDataFlushedLength;
-  }
-
-  private boolean isBufferPoolFull() {
-    return bufferPool.computeBufferData() == streamBufferMaxSize;
-  }
-  /**
-   * Will be called on the retryPath in case closedContainerException/
-   * TimeoutException.
-   * @param len length of data to write
-   * @throws IOException if error occurred
-   */
-
-  // In this case, the data is already cached in the currentBuffer.
-  public void writeOnRetry(long len) throws IOException {
-    if (len == 0) {
-      return;
-    }
-    int count = 0;
-    Preconditions.checkArgument(len <= streamBufferMaxSize);
-    while (len > 0) {
-      long writeLen;
-      writeLen = Math.min(chunkSize, len);
-      if (writeLen == chunkSize) {
-        writeChunk(bufferPool.getBuffer(count));
-      }
-      len -= writeLen;
-      count++;
-      writtenDataLength += writeLen;
-      // we should not call isBufferFull/shouldFlush here.
-      // The buffer might already be full as whole data is already cached in
-      // the buffer. We should just validate
-      // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to
-      // call for handling full buffer/flush buffer condition.
-      if (writtenDataLength % streamBufferFlushSize == 0) {
-        // reset the position to zero as now we will be reading the
-        // next buffer in the list
-        updateFlushLength();
-        executePutBlock();
-      }
-      if (writtenDataLength == streamBufferMaxSize) {
-        handleFullBuffer();
-      }
-    }
-  }
-
-  /**
-   * This is a blocking call. It will wait for the flush till the commit index
-   * at the head of the commitIndex2flushedDataMap gets replicated to all or
-   * majority.
-   * @throws IOException
-   */
-  private void handleFullBuffer() throws IOException {
-    try {
-      checkOpen();
-      if (!commitWatcher.getFutureMap().isEmpty()) {
-        waitOnFlushFutures();
-      }
-    } catch (InterruptedException | ExecutionException e) {
-      setIoException(e);
-      adjustBuffersOnException();
-      throw getIoException();
-    }
-    watchForCommit(true);
-  }
-
-
-  // It may happen that once the exception is encountered , we still might
-  // have successfully flushed up to a certain index. Make sure the buffers
-  // only contain data which have not been sufficiently replicated
-  private void adjustBuffersOnException() {
-    commitWatcher.releaseBuffersOnException();
-  }
-
-  /**
-   * calls watchForCommit API of the Ratis Client. For Standalone client,
-   * it is a no op.
-   * @param bufferFull flag indicating whether bufferFull condition is hit or
-   *              its called as part flush/close
-   * @return minimum commit index replicated to all nodes
-   * @throws IOException IOException in case watch gets timed out
-   */
-  private void watchForCommit(boolean bufferFull) throws IOException {
-    checkOpen();
-    try {
-      XceiverClientReply reply = bufferFull ?
-          commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex();
-      if (reply != null) {
-        List<DatanodeDetails> dnList = reply.getDatanodes();
-        if (!dnList.isEmpty()) {
-          Pipeline pipe = xceiverClient.getPipeline();
-
-          LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}",
-              blockID, pipe, dnList);
-          failedServers.addAll(dnList);
-        }
-      }
-    } catch (IOException ioe) {
-      setIoException(ioe);
-      throw getIoException();
-    }
-  }
-
-  private CompletableFuture<ContainerProtos.
-      ContainerCommandResponseProto> executePutBlock()
-      throws IOException {
-    checkOpen();
-    long flushPos = totalDataFlushedLength;
-    Preconditions.checkNotNull(bufferList);
-    List<ByteBuffer> byteBufferList = bufferList;
-    bufferList = null;
-    Preconditions.checkNotNull(byteBufferList);
-
-    CompletableFuture<ContainerProtos.
-        ContainerCommandResponseProto> flushFuture;
-    try {
-      XceiverClientReply asyncReply =
-          putBlockAsync(xceiverClient, containerBlockData.build());
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
-          asyncReply.getResponse();
-      flushFuture = future.thenApplyAsync(e -> {
-        try {
-          validateResponse(e);
-        } catch (IOException sce) {
-          throw new CompletionException(sce);
-        }
-        // if the ioException is not set, putBlock is successful
-        if (getIoException() == null) {
-          BlockID responseBlockID = BlockID.getFromProtobuf(
-              e.getPutBlock().getCommittedBlockLength().getBlockID());
-          Preconditions.checkState(blockID.getContainerBlockID()
-              .equals(responseBlockID.getContainerBlockID()));
-          // updates the bcsId of the block
-          blockID = responseBlockID;
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Adding index " + asyncReply.getLogIndex() + " commitMap size "
-                    + commitWatcher.getCommitInfoMapSize() + " flushLength "
-                    + flushPos + " numBuffers " + byteBufferList.size()
-                    + " blockID " + blockID + " bufferPool size" + bufferPool
-                    .getSize() + " currentBufferIndex " + bufferPool
-                    .getCurrentBufferIndex());
-          }
-          // for standalone protocol, logIndex will always be 0.
-          commitWatcher
-              .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
-        }
-        return e;
-      }, responseExecutor).exceptionally(e -> {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "putBlock failed for blockID " + blockID + " with exception " + e
-                  .getLocalizedMessage());
-        }
-        CompletionException ce =  new CompletionException(e);
-        setIoException(ce);
-        throw ce;
-      });
-    } catch (IOException | InterruptedException | ExecutionException e) {
-      throw new IOException(
-          "Unexpected Storage Container Exception: " + e.toString(), e);
-    }
-    commitWatcher.getFutureMap().put(flushPos, flushFuture);
-    return flushFuture;
-  }
-
-  @Override
-  public void flush() throws IOException {
-    if (xceiverClientManager != null && xceiverClient != null
-        && bufferPool != null && bufferPool.getSize() > 0) {
-      try {
-        handleFlush();
-      } catch (InterruptedException | ExecutionException e) {
-        // just set the exception here as well in order to maintain sanctity of
-        // ioException field
-        setIoException(e);
-        adjustBuffersOnException();
-        throw getIoException();
-      }
-    }
-  }
-
-
-  private void writeChunk(ByteBuffer buffer)
-      throws IOException {
-    // This data in the buffer will be pushed to datanode and a reference will
-    // be added to the bufferList. Once putBlock gets executed, this list will
-    // be marked null. Hence, during first writeChunk call after every putBlock
-    // call or during the first call to writeChunk here, the list will be null.
-
-    if (bufferList == null) {
-      bufferList = new ArrayList<>();
-    }
-    bufferList.add(buffer);
-    // Please note : We are not flipping the slice when we write since
-    // the slices are pointing the currentBuffer start and end as needed for
-    // the chunk write. Also please note, Duplicate does not create a
-    // copy of data, it only creates metadata that points to the data
-    // stream.
-    ByteBuffer chunk = buffer.duplicate();
-    chunk.position(0);
-    chunk.limit(buffer.position());
-    writeChunkToContainer(chunk);
-  }
-
-  private void handleFlush()
-      throws IOException, InterruptedException, ExecutionException {
-    checkOpen();
-    // flush the last chunk data residing on the currentBuffer
-    if (totalDataFlushedLength < writtenDataLength) {
-      ByteBuffer currentBuffer = bufferPool.getCurrentBuffer();
-      Preconditions.checkArgument(currentBuffer.position() > 0);
-      if (currentBuffer.position() != chunkSize) {
-        writeChunk(currentBuffer);
-      }
-      // This can be a partially filled chunk. Since we are flushing the buffer
-      // here, we just limit this buffer to the current position. So that next
-      // write will happen in new buffer
-      updateFlushLength();
-      executePutBlock();
-    }
-    waitOnFlushFutures();
-    watchForCommit(false);
-    // just check again if the exception is hit while waiting for the
-    // futures to ensure flush has indeed succeeded
-
-    // irrespective of whether the commitIndex2flushedDataMap is empty
-    // or not, ensure there is no exception set
-    checkOpen();
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (xceiverClientManager != null && xceiverClient != null
-        && bufferPool != null && bufferPool.getSize() > 0) {
-      try {
-        handleFlush();
-      } catch (InterruptedException | ExecutionException e) {
-        setIoException(e);
-        adjustBuffersOnException();
-        throw getIoException();
-      } finally {
-        cleanup(false);
-      }
-      // TODO: Turn the below buffer empty check on when Standalone pipeline
-      // is removed in the write path in tests
-      // Preconditions.checkArgument(buffer.position() == 0);
-      // bufferPool.checkBufferPoolEmpty();
-
-    }
-  }
-
-  private void waitOnFlushFutures()
-      throws InterruptedException, ExecutionException {
-    CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
-        commitWatcher.getFutureMap().values().toArray(
-            new CompletableFuture[commitWatcher.getFutureMap().size()]));
-    // wait for all the transactions to complete
-    combinedFuture.get();
-  }
-
-  private void validateResponse(
-      ContainerProtos.ContainerCommandResponseProto responseProto)
-      throws IOException {
-    try {
-      // if the ioException is already set, it means a prev request has failed
-      // just throw the exception. The current operation will fail with the
-      // original error
-      IOException exception = getIoException();
-      if (exception != null) {
-        throw exception;
-      }
-      ContainerProtocolCalls.validateContainerResponse(responseProto);
-    } catch (StorageContainerException sce) {
-      LOG.error("Unexpected Storage Container Exception: ", sce);
-      setIoException(sce);
-      throw sce;
-    }
-  }
-
-
-  private void setIoException(Exception e) {
-    if (getIoException() == null) {
-      IOException exception =  new IOException(
-          "Unexpected Storage Container Exception: " + e.toString(), e);
-      ioException.compareAndSet(null, exception);
-    }
-  }
-
-  public void cleanup(boolean invalidateClient) {
-    if (xceiverClientManager != null) {
-      xceiverClientManager.releaseClient(xceiverClient, invalidateClient);
-    }
-    xceiverClientManager = null;
-    xceiverClient = null;
-    commitWatcher.cleanup();
-    if (bufferList !=  null) {
-      bufferList.clear();
-    }
-    bufferList = null;
-    responseExecutor.shutdown();
-  }
-
-  /**
-   * Checks if the stream is open or exception has occured.
-   * If not, throws an exception.
-   *
-   * @throws IOException if stream is closed
-   */
-  private void checkOpen() throws IOException {
-    if (isClosed()) {
-      throw new IOException("BlockOutputStream has been closed.");
-    } else if (getIoException() != null) {
-      adjustBuffersOnException();
-      throw getIoException();
-    }
-  }
-
-  public boolean isClosed() {
-    return xceiverClient == null;
-  }
-
-  /**
-   * Writes buffered data as a new chunk to the container and saves chunk
-   * information to be used later in putKey call.
-   *
-   * @throws IOException if there is an I/O error while performing the call
-   * @throws OzoneChecksumException if there is an error while computing
-   * checksum
-   */
-  private void writeChunkToContainer(ByteBuffer chunk) throws IOException {
-    int effectiveChunkSize = chunk.remaining();
-    ByteString data = bufferPool.byteStringConversion().apply(chunk);
-    Checksum checksum = new Checksum(checksumType, bytesPerChecksum);
-    ChecksumData checksumData = checksum.computeChecksum(chunk);
-    ChunkInfo chunkInfo = ChunkInfo.newBuilder()
-        .setChunkName(blockID.getLocalID() + "_chunk_" + ++chunkIndex)
-        .setOffset(0)
-        .setLen(effectiveChunkSize)
-        .setChecksumData(checksumData.getProtoBufMessage())
-        .build();
-
-    try {
-      XceiverClientReply asyncReply =
-          writeChunkAsync(xceiverClient, chunkInfo, blockID, data);
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
-          asyncReply.getResponse();
-      future.thenApplyAsync(e -> {
-        try {
-          validateResponse(e);
-        } catch (IOException sce) {
-          future.completeExceptionally(sce);
-        }
-        return e;
-      }, responseExecutor).exceptionally(e -> {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "writing chunk failed " + chunkInfo.getChunkName() + " blockID "
-                  + blockID + " with exception " + e.getLocalizedMessage());
-        }
-        CompletionException ce = new CompletionException(e);
-        setIoException(ce);
-        throw ce;
-      });
-    } catch (IOException | InterruptedException | ExecutionException e) {
-      throw new IOException(
-          "Unexpected Storage Container Exception: " + e.toString(), e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID
-              + " length " + effectiveChunkSize);
-    }
-    containerBlockData.addChunks(chunkInfo);
-  }
-
-  @VisibleForTesting
-  public void setXceiverClient(XceiverClientSpi xceiverClient) {
-    this.xceiverClient = xceiverClient;
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
deleted file mode 100644
index 6d53457..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.ByteStringConversion;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.function.Function;
-
-/**
- * This class creates and manages pool of n buffers.
- */
-public class BufferPool {
-
-  private List<ByteBuffer> bufferList;
-  private int currentBufferIndex;
-  private final int bufferSize;
-  private final int capacity;
-  private final Function<ByteBuffer, ByteString> byteStringConversion;
-
-  public BufferPool(int bufferSize, int capacity) {
-    this(bufferSize, capacity,
-        ByteStringConversion.createByteBufferConversion(null));
-  }
-
-  public BufferPool(int bufferSize, int capacity,
-      Function<ByteBuffer, ByteString> byteStringConversion){
-    this.capacity = capacity;
-    this.bufferSize = bufferSize;
-    bufferList = new ArrayList<>(capacity);
-    currentBufferIndex = -1;
-    this.byteStringConversion = byteStringConversion;
-  }
-
-  public Function<ByteBuffer, ByteString> byteStringConversion(){
-    return byteStringConversion;
-  }
-
-  public ByteBuffer getCurrentBuffer() {
-    return currentBufferIndex == -1 ? null : bufferList.get(currentBufferIndex);
-  }
-
-  /**
-   * If the currentBufferIndex is less than the buffer size - 1,
-   * it means, the next buffer in the list has been freed up for
-   * rewriting. Reuse the next available buffer in such cases.
-   *
-   * In case, the currentBufferIndex == buffer.size and buffer size is still
-   * less than the capacity to be allocated, just allocate a buffer of size
-   * chunk size.
-   *
-   */
-  public ByteBuffer allocateBufferIfNeeded() {
-    ByteBuffer buffer = getCurrentBuffer();
-    if (buffer != null && buffer.hasRemaining()) {
-      return buffer;
-    }
-    if (currentBufferIndex < bufferList.size() - 1) {
-      buffer = getBuffer(currentBufferIndex + 1);
-    } else {
-      buffer = ByteBuffer.allocate(bufferSize);
-      bufferList.add(buffer);
-    }
-    Preconditions.checkArgument(bufferList.size() <= capacity);
-    currentBufferIndex++;
-    // TODO: Turn the below precondition check on when Standalone pipeline
-    // is removed in the write path in tests
-    // Preconditions.checkArgument(buffer.position() == 0);
-    return buffer;
-  }
-
-  public void releaseBuffer(ByteBuffer byteBuffer) {
-    // always remove from head of the list and append at last
-    ByteBuffer buffer = bufferList.remove(0);
-    // Ensure the buffer to be removed is always at the head of the list.
-    Preconditions.checkArgument(buffer.equals(byteBuffer));
-    buffer.clear();
-    bufferList.add(buffer);
-    Preconditions.checkArgument(currentBufferIndex >= 0);
-    currentBufferIndex--;
-  }
-
-  public void clearBufferPool() {
-    bufferList.clear();
-    currentBufferIndex = -1;
-  }
-
-  public void checkBufferPoolEmpty() {
-    Preconditions.checkArgument(computeBufferData() == 0);
-  }
-
-  public long computeBufferData() {
-    return bufferList.stream().mapToInt(value -> value.position())
-        .sum();
-  }
-
-  public int getSize() {
-    return bufferList.size();
-  }
-
-  public ByteBuffer getBuffer(int index) {
-    return bufferList.get(index);
-  }
-
-  int getCurrentBufferIndex() {
-    return currentBufferIndex;
-  }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
deleted file mode 100644
index f94d2d8..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-/**
- * An {@link InputStream} called from BlockInputStream to read a chunk from the
- * container. Each chunk may contain multiple underlying {@link ByteBuffer}
- * instances.
- */
-public class ChunkInputStream extends InputStream implements Seekable {
-
-  private ChunkInfo chunkInfo;
-  private final long length;
-  private final BlockID blockID;
-  private XceiverClientSpi xceiverClient;
-  private boolean verifyChecksum;
-  private boolean allocated = false;
-
-  // Buffer to store the chunk data read from the DN container
-  private List<ByteBuffer> buffers;
-
-  // Index of the buffers corresponding to the current position of the buffers
-  private int bufferIndex;
-
-  // The offset of the current data residing in the buffers w.r.t the start
-  // of chunk data
-  private long bufferOffset;
-
-  // The number of bytes of chunk data residing in the buffers currently
-  private long bufferLength;
-
-  // Position of the ChunkInputStream is maintained by this variable (if a
-  // seek is performed. This position is w.r.t to the chunk only and not the
-  // block or key. This variable is set only if either the buffers are not
-  // yet allocated or the if the allocated buffers do not cover the seeked
-  // position. Once the chunk is read, this variable is reset.
-  private long chunkPosition = -1;
-
-  private static final int EOF = -1;
-
-  ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, 
-          XceiverClientSpi xceiverClient, boolean verifyChecksum) {
-    this.chunkInfo = chunkInfo;
-    this.length = chunkInfo.getLen();
-    this.blockID = blockId;
-    this.xceiverClient = xceiverClient;
-    this.verifyChecksum = verifyChecksum;
-  }
-
-  public synchronized long getRemaining() throws IOException {
-    return length - getPos();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read() throws IOException {
-    checkOpen();
-    int available = prepareRead(1);
-    int dataout = EOF;
-
-    if (available == EOF) {
-      // There is no more data in the chunk stream. The buffers should have
-      // been released by now
-      Preconditions.checkState(buffers == null);
-    } else {
-      dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get());
-    }
-
-    if (chunkStreamEOF()) {
-      // consumer might use getPos to determine EOF,
-      // so release buffers when serving the last byte of data
-      releaseBuffers();
-    }
-
-    return dataout;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read(byte[] b, int off, int len) throws IOException {
-    // According to the JavaDocs for InputStream, it is recommended that
-    // subclasses provide an override of bulk read if possible for performance
-    // reasons.  In addition to performance, we need to do it for correctness
-    // reasons.  The Ozone REST service uses PipedInputStream and
-    // PipedOutputStream to relay HTTP response data between a Jersey thread and
-    // a Netty thread.  It turns out that PipedInputStream/PipedOutputStream
-    // have a subtle dependency (bug?) on the wrapped stream providing separate
-    // implementations of single-byte read and bulk read.  Without this, get key
-    // responses might close the connection before writing all of the bytes
-    // advertised in the Content-Length.
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return 0;
-    }
-    checkOpen();
-    int total = 0;
-    while (len > 0) {
-      int available = prepareRead(len);
-      if (available == EOF) {
-        // There is no more data in the chunk stream. The buffers should have
-        // been released by now
-        Preconditions.checkState(buffers == null);
-        return total != 0 ? total : EOF;
-      }
-      buffers.get(bufferIndex).get(b, off + total, available);
-      len -= available;
-      total += available;
-    }
-
-    if (chunkStreamEOF()) {
-      // smart consumers determine EOF by calling getPos()
-      // so we release buffers when serving the final bytes of data
-      releaseBuffers();
-    }
-
-    return total;
-  }
-
-  /**
-   * Seeks the ChunkInputStream to the specified position. This is done by
-   * updating the chunkPosition to the seeked position in case the buffers
-   * are not allocated or buffers do not contain the data corresponding to
-   * the seeked position (determined by buffersHavePosition()). Otherwise,
-   * the buffers position is updated to the seeked position.
-   */
-  @Override
-  public synchronized void seek(long pos) throws IOException {
-    if (pos < 0 || pos >= length) {
-      if (pos == 0) {
-        // It is possible for length and pos to be zero in which case
-        // seek should return instead of throwing exception
-        return;
-      }
-      throw new EOFException("EOF encountered at pos: " + pos + " for chunk: "
-          + chunkInfo.getChunkName());
-    }
-
-    if (buffersHavePosition(pos)) {
-      // The bufferPosition is w.r.t the current chunk.
-      // Adjust the bufferIndex and position to the seeked position.
-      adjustBufferPosition(pos - bufferOffset);
-    } else {
-      chunkPosition = pos;
-    }
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    if (chunkPosition >= 0) {
-      return chunkPosition;
-    }
-    if (chunkStreamEOF()) {
-      return length;
-    }
-    if (buffersHaveData()) {
-      return bufferOffset + buffers.get(bufferIndex).position();
-    }
-    if (buffersAllocated()) {
-      return bufferOffset + bufferLength;
-    }
-    return 0;
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public synchronized void close() {
-    if (xceiverClient != null) {
-      xceiverClient = null;
-    }
-  }
-
-  /**
-   * Checks if the stream is open.  If not, throw an exception.
-   *
-   * @throws IOException if stream is closed
-   */
-  protected synchronized void checkOpen() throws IOException {
-    if (xceiverClient == null) {
-      throw new IOException("BlockInputStream has been closed.");
-    }
-  }
-
-  /**
-   * Prepares to read by advancing through buffers or allocating new buffers,
-   * as needed until it finds data to return, or encounters EOF.
-   * @param len desired lenght of data to read
-   * @return length of data available to read, possibly less than desired length
-   */
-  private synchronized int prepareRead(int len) throws IOException {
-    for (;;) {
-      if (chunkPosition >= 0) {
-        if (buffersHavePosition(chunkPosition)) {
-          // The current buffers have the seeked position. Adjust the buffer
-          // index and position to point to the chunkPosition.
-          adjustBufferPosition(chunkPosition - bufferOffset);
-        } else {
-          // Read a required chunk data to fill the buffers with seeked
-          // position data
-          readChunkFromContainer(len);
-        }
-      }
-      if (buffersHaveData()) {
-        // Data is available from buffers
-        ByteBuffer bb = buffers.get(bufferIndex);
-        return len > bb.remaining() ? bb.remaining() : len;
-      } else  if (dataRemainingInChunk()) {
-        // There is more data in the chunk stream which has not
-        // been read into the buffers yet.
-        readChunkFromContainer(len);
-      } else {
-        // All available input from this chunk stream has been consumed.
-        return EOF;
-      }
-    }
-  }
-
-  /**
-   * Reads full or partial Chunk from DN Container based on the current
-   * position of the ChunkInputStream, the number of bytes of data to read
-   * and the checksum boundaries.
-   * If successful, then the read data in saved in the buffers so that
-   * subsequent read calls can utilize it.
-   * @param len number of bytes of data to be read
-   * @throws IOException if there is an I/O error while performing the call
-   * to Datanode
-   */
-  private synchronized void readChunkFromContainer(int len) throws IOException {
-
-    // index of first byte to be read from the chunk
-    long startByteIndex;
-    if (chunkPosition >= 0) {
-      // If seek operation was called to advance the buffer position, the
-      // chunk should be read from that position onwards.
-      startByteIndex = chunkPosition;
-    } else {
-      // Start reading the chunk from the last chunkPosition onwards.
-      startByteIndex = bufferOffset + bufferLength;
-    }
-
-    if (verifyChecksum) {
-      // Update the bufferOffset and bufferLength as per the checksum
-      // boundary requirement.
-      computeChecksumBoundaries(startByteIndex, len);
-    } else {
-      // Read from the startByteIndex
-      bufferOffset = startByteIndex;
-      bufferLength = len;
-    }
-
-    // Adjust the chunkInfo so that only the required bytes are read from
-    // the chunk.
-    final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo)
-        .setOffset(bufferOffset)
-        .setLen(bufferLength)
-        .build();
-
-    ByteString byteString = readChunk(adjustedChunkInfo);
-
-    buffers = byteString.asReadOnlyByteBufferList();
-    bufferIndex = 0;
-    allocated = true;
-
-    // If the stream was seeked to position before, then the buffer
-    // position should be adjusted as the reads happen at checksum boundaries.
-    // The buffers position might need to be adjusted for the following
-    // scenarios:
-    //    1. Stream was seeked to a position before the chunk was read
-    //    2. Chunk was read from index < the current position to account for
-    //    checksum boundaries.
-    adjustBufferPosition(startByteIndex - bufferOffset);
-  }
-
-  /**
-   * Send RPC call to get the chunk from the container.
-   */
-  @VisibleForTesting
-  protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException {
-    ReadChunkResponseProto readChunkResponse;
-
-    try {
-      List<CheckedBiFunction> validators =
-          ContainerProtocolCalls.getValidatorList();
-      validators.add(validator);
-
-      readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
-          readChunkInfo, blockID, validators);
-
-    } catch (IOException e) {
-      if (e instanceof StorageContainerException) {
-        throw e;
-      }
-      throw new IOException("Unexpected OzoneException: " + e.toString(), e);
-    }
-
-    return readChunkResponse.getData();
-  }
-
-  private CheckedBiFunction<ContainerCommandRequestProto,
-      ContainerCommandResponseProto, IOException> validator =
-          (request, response) -> {
-            final ChunkInfo reqChunkInfo =
-                request.getReadChunk().getChunkData();
-
-            ReadChunkResponseProto readChunkResponse = response.getReadChunk();
-            ByteString byteString = readChunkResponse.getData();
-
-            if (byteString.size() != reqChunkInfo.getLen()) {
-              // Bytes read from chunk should be equal to chunk size.
-              throw new OzoneChecksumException(String
-                  .format("Inconsistent read for chunk=%s len=%d bytesRead=%d",
-                      reqChunkInfo.getChunkName(), reqChunkInfo.getLen(),
-                      byteString.size()));
-            }
-
-            if (verifyChecksum) {
-              ChecksumData checksumData = ChecksumData.getFromProtoBuf(
-                  chunkInfo.getChecksumData());
-
-              // ChecksumData stores checksum for each 'numBytesPerChecksum'
-              // number of bytes in a list. Compute the index of the first
-              // checksum to match with the read data
-
-              int checkumStartIndex = (int) (reqChunkInfo.getOffset() /
-                  checksumData.getBytesPerChecksum());
-              Checksum.verifyChecksum(
-                  byteString, checksumData, checkumStartIndex);
-            }
-          };
-
-  /**
-   * Return the offset and length of bytes that need to be read from the
-   * chunk file to cover the checksum boundaries covering the actual start and
-   * end of the chunk index to be read.
-   * For example, lets say the client is reading from index 120 to 450 in the
-   * chunk. And let's say checksum is stored for every 100 bytes in the chunk
-   * i.e. the first checksum is for bytes from index 0 to 99, the next for
-   * bytes from index 100 to 199 and so on. To verify bytes from 120 to 450,
-   * we would need to read from bytes 100 to 499 so that checksum
-   * verification can be done.
-   *
-   * @param startByteIndex the first byte index to be read by client
-   * @param dataLen number of bytes to be read from the chunk
-   */
-  private void computeChecksumBoundaries(long startByteIndex, int dataLen) {
-
-    int bytesPerChecksum = chunkInfo.getChecksumData().getBytesPerChecksum();
-    // index of the last byte to be read from chunk, inclusively.
-    final long endByteIndex = startByteIndex + dataLen - 1;
-
-    bufferOffset =  (startByteIndex / bytesPerChecksum)
-        * bytesPerChecksum; // inclusive
-    final long endIndex = ((endByteIndex / bytesPerChecksum) + 1)
-        * bytesPerChecksum; // exclusive
-    bufferLength = Math.min(endIndex, length) - bufferOffset;
-  }
-
-  /**
-   * Adjust the buffers position to account for seeked position and/ or checksum
-   * boundary reads.
-   * @param bufferPosition the position to which the buffers must be advanced
-   */
-  private void adjustBufferPosition(long bufferPosition) {
-    // The bufferPosition is w.r.t the current chunk.
-    // Adjust the bufferIndex and position to the seeked chunkPosition.
-    long tempOffest = 0;
-    for (int i = 0; i < buffers.size(); i++) {
-      if (bufferPosition - tempOffest >= buffers.get(i).capacity()) {
-        tempOffest += buffers.get(i).capacity();
-      } else {
-        bufferIndex = i;
-        break;
-      }
-    }
-    buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest));
-
-    // Reset the chunkPosition as chunk stream has been initialized i.e. the
-    // buffers have been allocated.
-    resetPosition();
-  }
-
-  /**
-   * Check if the buffers have been allocated data and false otherwise.
-   */
-  private boolean buffersAllocated() {
-    return buffers != null && !buffers.isEmpty();
-  }
-
-  /**
-   * Check if the buffers have any data remaining between the current
-   * position and the limit.
-   */
-  private boolean buffersHaveData() {
-    boolean hasData = false;
-
-    if (buffersAllocated()) {
-      while (bufferIndex < (buffers.size())) {
-        if (buffers.get(bufferIndex).hasRemaining()) {
-          // current buffer has data
-          hasData = true;
-          break;
-        } else {
-          if (buffersRemaining()) {
-            // move to next available buffer
-            ++bufferIndex;
-            Preconditions.checkState(bufferIndex < buffers.size());
-          } else {
-            // no more buffers remaining
-            break;
-          }
-        }
-      }
-    }
-
-    return hasData;
-  }
-
-  private boolean buffersRemaining() {
-    return (bufferIndex < (buffers.size() - 1));
-  }
-
-  /**
-   * Check if curernt buffers have the data corresponding to the input position.
-   */
-  private boolean buffersHavePosition(long pos) {
-    // Check if buffers have been allocated
-    if (buffersAllocated()) {
-      // Check if the current buffers cover the input position
-      return pos >= bufferOffset &&
-          pos < bufferOffset + bufferLength;
-    }
-    return false;
-  }
-
-  /**
-   * Check if there is more data in the chunk which has not yet been read
-   * into the buffers.
-   */
-  private boolean dataRemainingInChunk() {
-    long bufferPos;
-    if (chunkPosition >= 0) {
-      bufferPos = chunkPosition;
-    } else {
-      bufferPos = bufferOffset + bufferLength;
-    }
-
-    return bufferPos < length;
-  }
-
-  /**
-   * Check if end of chunkStream has been reached.
-   */
-  private boolean chunkStreamEOF() {
-    if (!allocated) {
-      // Chunk data has not been read yet
-      return false;
-    }
-
-    if (buffersHaveData() || dataRemainingInChunk()) {
-      return false;
-    } else {
-      Preconditions.checkState(bufferOffset + bufferLength == length,
-          "EOF detected, but not at the last byte of the chunk");
-      return true;
-    }
-  }
-
-  /**
-   * If EOF is reached, release the buffers.
-   */
-  private void releaseBuffers() {
-    buffers = null;
-    bufferIndex = 0;
-  }
-
-  /**
-   * Reset the chunkPosition once the buffers are allocated.
-   */
-  void resetPosition() {
-    this.chunkPosition = -1;
-  }
-
-  String getChunkName() {
-    return chunkInfo.getChunkName();
-  }
-
-  protected long getLength() {
-    return length;
-  }
-
-  @VisibleForTesting
-  protected long getChunkPosition() {
-    return chunkPosition;
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
deleted file mode 100644
index 1d9d55b..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * This class maintains the map of the commitIndexes to be watched for
- * successful replication in the datanodes in a given pipeline. It also releases
- * the buffers associated with the user data back to {@Link BufferPool} once
- * minimum replication criteria is achieved during an ozone key write.
- */
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.ExecutionException;
-
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.stream.Collectors;
-
-/**
- * This class executes watchForCommit on ratis pipeline and releases
- * buffers once data successfully gets replicated.
- */
-public class CommitWatcher {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CommitWatcher.class);
-
-  // A reference to the pool of buffers holding the data
-  private BufferPool bufferPool;
-
-  // The map should maintain the keys (logIndexes) in order so that while
-  // removing we always end up updating incremented data flushed length.
-  // Also, corresponding to the logIndex, the corresponding list of buffers will
-  // be released from the buffer pool.
-  private ConcurrentSkipListMap<Long, List<ByteBuffer>>
-      commitIndex2flushedDataMap;
-
-  // future Map to hold up all putBlock futures
-  private ConcurrentHashMap<Long,
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
-      futureMap;
-
-  private XceiverClientSpi xceiverClient;
-
-  private final long watchTimeout;
-
-  // total data which has been successfully flushed and acknowledged
-  // by all servers
-  private long totalAckDataLength;
-
-  public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient,
-      long watchTimeout) {
-    this.bufferPool = bufferPool;
-    this.xceiverClient = xceiverClient;
-    this.watchTimeout = watchTimeout;
-    commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
-    totalAckDataLength = 0;
-    futureMap = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * just update the totalAckDataLength. In case of failure,
-   * we will read the data starting from totalAckDataLength.
-   */
-  private long releaseBuffers(List<Long> indexes) {
-    Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
-    for (long index : indexes) {
-      Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
-      List<ByteBuffer> buffers = commitIndex2flushedDataMap.remove(index);
-      long length = buffers.stream().mapToLong(value -> {
-        int pos = value.position();
-        return pos;
-      }).sum();
-      totalAckDataLength += length;
-      // clear the future object from the future Map
-      Preconditions.checkNotNull(futureMap.remove(totalAckDataLength));
-      for (ByteBuffer byteBuffer : buffers) {
-        bufferPool.releaseBuffer(byteBuffer);
-      }
-    }
-    return totalAckDataLength;
-  }
-
-  public void updateCommitInfoMap(long index, List<ByteBuffer> byteBufferList) {
-    commitIndex2flushedDataMap
-        .put(index, byteBufferList);
-  }
-
-  int getCommitInfoMapSize() {
-    return commitIndex2flushedDataMap.size();
-  }
-
-  /**
-   * Calls watch for commit for the first index in commitIndex2flushedDataMap to
-   * the Ratis client.
-   * @return reply reply from raft client
-   * @throws IOException in case watchForCommit fails
-   */
-  public XceiverClientReply watchOnFirstIndex() throws IOException {
-    if (!commitIndex2flushedDataMap.isEmpty()) {
-      // wait for the  first commit index in the commitIndex2flushedDataMap
-      // to get committed to all or majority of nodes in case timeout
-      // happens.
-      long index =
-          commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).min()
-              .getAsLong();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("waiting for first index " + index + " to catch up");
-      }
-      return watchForCommit(index);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Calls watch for commit for the first index in commitIndex2flushedDataMap to
-   * the Ratis client.
-   * @return reply reply from raft client
-   * @throws IOException in case watchForCommit fails
-   */
-  public XceiverClientReply watchOnLastIndex()
-      throws IOException {
-    if (!commitIndex2flushedDataMap.isEmpty()) {
-      // wait for the  commit index in the commitIndex2flushedDataMap
-      // to get committed to all or majority of nodes in case timeout
-      // happens.
-      long index =
-          commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).max()
-              .getAsLong();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("waiting for last flush Index " + index + " to catch up");
-      }
-      return watchForCommit(index);
-    } else {
-      return null;
-    }
-  }
-
-
-  private void adjustBuffers(long commitIndex) {
-    List<Long> keyList = commitIndex2flushedDataMap.keySet().stream()
-        .filter(p -> p <= commitIndex).collect(Collectors.toList());
-    if (keyList.isEmpty()) {
-      return;
-    } else {
-      releaseBuffers(keyList);
-    }
-  }
-
-  // It may happen that once the exception is encountered , we still might
-  // have successfully flushed up to a certain index. Make sure the buffers
-  // only contain data which have not been sufficiently replicated
-  void releaseBuffersOnException() {
-    adjustBuffers(xceiverClient.getReplicatedMinCommitIndex());
-  }
-
-
-  /**
-   * calls watchForCommit API of the Ratis Client. For Standalone client,
-   * it is a no op.
-   * @param commitIndex log index to watch for
-   * @return minimum commit index replicated to all nodes
-   * @throws IOException IOException in case watch gets timed out
-   */
-  public XceiverClientReply watchForCommit(long commitIndex)
-      throws IOException {
-    long index;
-    try {
-      XceiverClientReply reply =
-          xceiverClient.watchForCommit(commitIndex, watchTimeout);
-      if (reply == null) {
-        index = 0;
-      } else {
-        index = reply.getLogIndex();
-      }
-      adjustBuffers(index);
-      return reply;
-    } catch (TimeoutException | InterruptedException | ExecutionException e) {
-      LOG.warn("watchForCommit failed for index " + commitIndex, e);
-      IOException ioException = new IOException(
-          "Unexpected Storage Container Exception: " + e.toString(), e);
-      releaseBuffersOnException();
-      throw ioException;
-    }
-  }
-
-  @VisibleForTesting
-  public ConcurrentSkipListMap<Long,
-      List<ByteBuffer>> getCommitIndex2flushedDataMap() {
-    return commitIndex2flushedDataMap;
-  }
-
-  public ConcurrentHashMap<Long,
-      CompletableFuture<ContainerProtos.
-          ContainerCommandResponseProto>> getFutureMap() {
-    return futureMap;
-  }
-
-  public long getTotalAckDataLength() {
-    return totalAckDataLength;
-  }
-
-  public void cleanup() {
-    if (commitIndex2flushedDataMap != null) {
-      commitIndex2flushedDataMap.clear();
-    }
-    if (futureMap != null) {
-      futureMap.clear();
-    }
-    commitIndex2flushedDataMap = null;
-  }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index 6e7ce94..0000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * Low level IO streams to upload/download chunks from container service.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
deleted file mode 100644
index 042bfd9..0000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.primitives.Bytes;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData;
-
-/**
- * Tests for {@link BlockInputStream}'s functionality.
- */
-public class TestBlockInputStream {
-
-  private static final int CHUNK_SIZE = 100;
-  private static Checksum checksum;
-
-  private BlockInputStream blockStream;
-  private byte[] blockData;
-  private int blockSize;
-  private List<ChunkInfo> chunks;
-  private Map<String, byte[]> chunkDataMap;
-
-  @Before
-  public void setup() throws Exception {
-    BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
-    checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE);
-    createChunkList(5);
-
-    blockStream = new DummyBlockInputStream(blockID, blockSize, null, null,
-        false, null);
-  }
-
-  /**
-   * Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE
-   * and the last chunk with length CHUNK_SIZE/2.
-   */
-  private void createChunkList(int numChunks)
-      throws Exception {
-
-    chunks = new ArrayList<>(numChunks);
-    chunkDataMap = new HashMap<>();
-    blockData = new byte[0];
-    int i, chunkLen;
-    byte[] byteData;
-    String chunkName;
-
-    for (i = 0; i < numChunks; i++) {
-      chunkName = "chunk-" + i;
-      chunkLen = CHUNK_SIZE;
-      if (i == numChunks - 1) {
-        chunkLen = CHUNK_SIZE / 2;
-      }
-      byteData = generateRandomData(chunkLen);
-      ChunkInfo chunkInfo = ChunkInfo.newBuilder()
-          .setChunkName(chunkName)
-          .setOffset(0)
-          .setLen(chunkLen)
-          .setChecksumData(checksum.computeChecksum(
-              byteData, 0, chunkLen).getProtoBufMessage())
-          .build();
-
-      chunkDataMap.put(chunkName, byteData);
-      chunks.add(chunkInfo);
-
-      blockSize += chunkLen;
-      blockData = Bytes.concat(blockData, byteData);
-    }
-  }
-
-  /**
-   * A dummy BlockInputStream to mock read block call to DN.
-   */
-  private class DummyBlockInputStream extends BlockInputStream {
-
-    DummyBlockInputStream(BlockID blockId,
-        long blockLen,
-        Pipeline pipeline,
-        Token<OzoneBlockTokenIdentifier> token,
-        boolean verifyChecksum,
-        XceiverClientManager xceiverClientManager) {
-      super(blockId, blockLen, pipeline, token, verifyChecksum,
-          xceiverClientManager);
-    }
-
-    @Override
-    protected List<ChunkInfo> getChunkInfos() {
-      return chunks;
-    }
-
-    @Override
-    protected void addStream(ChunkInfo chunkInfo) {
-      TestChunkInputStream testChunkInputStream = new TestChunkInputStream();
-      getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream(
-          chunkInfo, null, null, false,
-          chunkDataMap.get(chunkInfo.getChunkName()).clone()));
-    }
-
-    @Override
-    protected synchronized void checkOpen() throws IOException {
-      // No action needed
-    }
-  }
-
-  private void seekAndVerify(int pos) throws Exception {
-    blockStream.seek(pos);
-    Assert.assertEquals("Current position of buffer does not match with the " +
-        "seeked position", pos, blockStream.getPos());
-  }
-
-  /**
-   * Match readData with the chunkData byte-wise.
-   * @param readData Data read through ChunkInputStream
-   * @param inputDataStartIndex first index (inclusive) in chunkData to compare
-   *                            with read data
-   * @param length the number of bytes of data to match starting from
-   *               inputDataStartIndex
-   */
-  private void matchWithInputData(byte[] readData, int inputDataStartIndex,
-      int length) {
-    for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) {
-      Assert.assertEquals(blockData[i], readData[i - inputDataStartIndex]);
-    }
-  }
-
-  @Test
-  public void testSeek() throws Exception {
-    // Seek to position 0
-    int pos = 0;
-    seekAndVerify(pos);
-    Assert.assertEquals("ChunkIndex is incorrect", 0,
-        blockStream.getChunkIndex());
-
-    // Before BlockInputStream is initialized (initialization happens during
-    // read operation), seek should update the BlockInputStream#blockPosition
-    pos = CHUNK_SIZE;
-    seekAndVerify(pos);
-    Assert.assertEquals("ChunkIndex is incorrect", 0,
-        blockStream.getChunkIndex());
-    Assert.assertEquals(pos, blockStream.getBlockPosition());
-
-    // Initialize the BlockInputStream. After initializtion, the chunkIndex
-    // should be updated to correspond to the seeked position.
-    blockStream.initialize();
-    Assert.assertEquals("ChunkIndex is incorrect", 1,
-        blockStream.getChunkIndex());
-
-    pos = (CHUNK_SIZE * 4) + 5;
-    seekAndVerify(pos);
-    Assert.assertEquals("ChunkIndex is incorrect", 4,
-        blockStream.getChunkIndex());
-
-    try {
-      // Try seeking beyond the blockSize.
-      pos = blockSize + 10;
-      seekAndVerify(pos);
-      Assert.fail("Seek to position beyond block size should fail.");
-    } catch (EOFException e) {
-      System.out.println(e);
-    }
-
-    // Seek to random positions between 0 and the block size.
-    Random random = new Random();
-    for (int i = 0; i < 10; i++) {
-      pos = random.nextInt(blockSize);
-      seekAndVerify(pos);
-    }
-  }
-
-  @Test
-  public void testRead() throws Exception {
-    // read 200 bytes of data starting from position 50. Chunk0 contains
-    // indices 0 to 99, chunk1 from 100 to 199 and chunk3 from 200 to 299. So
-    // the read should result in 3 ChunkInputStream reads
-    seekAndVerify(50);
-    byte[] b = new byte[200];
-    blockStream.read(b, 0, 200);
-    matchWithInputData(b, 50, 200);
-
-    // The new position of the blockInputStream should be the last index read
-    // + 1.
-    Assert.assertEquals(250, blockStream.getPos());
-    Assert.assertEquals(2, blockStream.getChunkIndex());
-  }
-
-  @Test
-  public void testSeekAndRead() throws Exception {
-    // Seek to a position and read data
-    seekAndVerify(50);
-    byte[] b1 = new byte[100];
-    blockStream.read(b1, 0, 100);
-    matchWithInputData(b1, 50, 100);
-
-    // Next read should start from the position of the last read + 1 i.e. 100
-    byte[] b2 = new byte[100];
-    blockStream.read(b2, 0, 100);
-    matchWithInputData(b2, 150, 100);
-  }
-}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
deleted file mode 100644
index a5fe26b..0000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.EOFException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-/**
- * Tests for {@link ChunkInputStream}'s functionality.
- */
-public class TestChunkInputStream {
-
-  private static final int CHUNK_SIZE = 100;
-  private static final int BYTES_PER_CHECKSUM = 20;
-  private static final String CHUNK_NAME = "dummyChunk";
-  private static final Random RANDOM = new Random();
-  private static Checksum checksum;
-
-  private DummyChunkInputStream chunkStream;
-  private ChunkInfo chunkInfo;
-  private byte[] chunkData;
-
-  @Before
-  public void setup() throws Exception {
-    checksum = new Checksum(ChecksumType.valueOf(
-        OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT),
-        BYTES_PER_CHECKSUM);
-
-    chunkData = generateRandomData(CHUNK_SIZE);
-
-    chunkInfo = ChunkInfo.newBuilder()
-        .setChunkName(CHUNK_NAME)
-        .setOffset(0)
-        .setLen(CHUNK_SIZE)
-        .setChecksumData(checksum.computeChecksum(
-            chunkData, 0, CHUNK_SIZE).getProtoBufMessage())
-        .build();
-
-    chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true);
-  }
-
-  static byte[] generateRandomData(int length) {
-    byte[] bytes = new byte[length];
-    RANDOM.nextBytes(bytes);
-    return bytes;
-  }
-
-  /**
-   * A dummy ChunkInputStream to mock read chunk calls to DN.
-   */
-  public class DummyChunkInputStream extends ChunkInputStream {
-
-    // Stores the read chunk data in each readChunk call
-    private List<ByteString> readByteBuffers = new ArrayList<>();
-
-    DummyChunkInputStream(ChunkInfo chunkInfo,
-        BlockID blockId,
-        XceiverClientSpi xceiverClient,
-        boolean verifyChecksum) {
-      super(chunkInfo, blockId, xceiverClient, verifyChecksum);
-    }
-
-    public DummyChunkInputStream(ChunkInfo chunkInfo,
-        BlockID blockId,
-        XceiverClientSpi xceiverClient,
-        boolean verifyChecksum,
-        byte[] data) {
-      super(chunkInfo, blockId, xceiverClient, verifyChecksum);
-      chunkData = data;
-    }
-
-    @Override
-    protected ByteString readChunk(ChunkInfo readChunkInfo) {
-      ByteString byteString = ByteString.copyFrom(chunkData,
-          (int) readChunkInfo.getOffset(),
-          (int) readChunkInfo.getLen());
-      readByteBuffers.add(byteString);
-      return byteString;
-    }
-
-    @Override
-    protected void checkOpen() {
-      // No action needed
-    }
-  }
-
-  /**
-   * Match readData with the chunkData byte-wise.
-   * @param readData Data read through ChunkInputStream
-   * @param inputDataStartIndex first index (inclusive) in chunkData to compare
-   *                            with read data
-   * @param length the number of bytes of data to match starting from
-   *               inputDataStartIndex
-   */
-  private void matchWithInputData(byte[] readData, int inputDataStartIndex,
-      int length) {
-    for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) {
-      Assert.assertEquals(chunkData[i], readData[i - inputDataStartIndex]);
-    }
-  }
-
-  /**
-   * Seek to a position and verify through getPos().
-   */
-  private void seekAndVerify(int pos) throws Exception {
-    chunkStream.seek(pos);
-    Assert.assertEquals("Current position of buffer does not match with the " +
-        "seeked position", pos, chunkStream.getPos());
-  }
-
-  @Test
-  public void testFullChunkRead() throws Exception {
-    byte[] b = new byte[CHUNK_SIZE];
-    chunkStream.read(b, 0, CHUNK_SIZE);
-
-    matchWithInputData(b, 0, CHUNK_SIZE);
-  }
-
-  @Test
-  public void testPartialChunkRead() throws Exception {
-    int len = CHUNK_SIZE / 2;
-    byte[] b = new byte[len];
-
-    chunkStream.read(b, 0, len);
-
-    matchWithInputData(b, 0, len);
-
-    // To read chunk data from index 0 to 49 (len = 50), we need to read
-    // chunk from offset 0 to 60 as the checksum boundary is at every 20
-    // bytes. Verify that 60 bytes of chunk data are read and stored in the
-    // buffers.
-    matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
-        0, 60);
-
-  }
-
-  @Test
-  public void testSeek() throws Exception {
-    seekAndVerify(0);
-
-    try {
-      seekAndVerify(CHUNK_SIZE);
-      Assert.fail("Seeking to Chunk Length should fail.");
-    } catch (EOFException e) {
-      GenericTestUtils.assertExceptionContains("EOF encountered at pos: "
-          + CHUNK_SIZE + " for chunk: " + CHUNK_NAME, e);
-    }
-
-    // Seek before read should update the ChunkInputStream#chunkPosition
-    seekAndVerify(25);
-    Assert.assertEquals(25, chunkStream.getChunkPosition());
-
-    // Read from the seeked position.
-    // Reading from index 25 to 54 should result in the ChunkInputStream
-    // copying chunk data from index 20 to 59 into the buffers (checksum
-    // boundaries).
-    byte[] b = new byte[30];
-    chunkStream.read(b, 0, 30);
-    matchWithInputData(b, 25, 30);
-    matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
-        20, 40);
-
-    // After read, the position of the chunkStream is evaluated from the
-    // buffers and the chunkPosition should be reset to -1.
-    Assert.assertEquals(-1, chunkStream.getChunkPosition());
-
-    // Seek to a position within the current buffers. Current buffers contain
-    // data from index 20 to 59. ChunkPosition should still not be used to
-    // set the position.
-    seekAndVerify(35);
-    Assert.assertEquals(-1, chunkStream.getChunkPosition());
-
-    // Seek to a position outside the current buffers. In this case, the
-    // chunkPosition should be updated to the seeked position.
-    seekAndVerify(75);
-    Assert.assertEquals(75, chunkStream.getChunkPosition());
-  }
-
-  @Test
-  public void testSeekAndRead() throws Exception {
-    // Seek to a position and read data
-    seekAndVerify(50);
-    byte[] b1 = new byte[20];
-    chunkStream.read(b1, 0, 20);
-    matchWithInputData(b1, 50, 20);
-
-    // Next read should start from the position of the last read + 1 i.e. 70
-    byte[] b2 = new byte[20];
-    chunkStream.read(b2, 0, 20);
-    matchWithInputData(b2, 70, 20);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index abdd04e..0000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * This package contains Ozone InputStream related tests.
- */
-package org.apache.hadoop.hdds.scm.storage;
\ No newline at end of file
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 4441b69..0000000
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
-  </Match>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.datanode.proto"/>
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.hdds.cli.GenericCli"></Class>
-    <Bug pattern="DM_EXIT" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.ozone.common.ChecksumByteBuffer$CrcIntTable" />
-    <Method name="update" />
-    <Bug pattern="SF_SWITCH_FALLTHROUGH,SF_SWITCH_NO_DEFAULT" />
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
deleted file mode 100644
index 9af807f..0000000
--- a/hadoop-hdds/common/pom.xml
+++ /dev/null
@@ -1,285 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-common</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Common</description>
-  <name>Apache Hadoop HDDS Common</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <hdds.version>0.5.0-SNAPSHOT</hdds.version>
-    <log4j2.version>2.11.0</log4j2.version>
-    <disruptor.version>3.4.2</disruptor.version>
-    <declared.hdds.version>${hdds.version}</declared.hdds.version>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-config</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>javax.annotation</groupId>
-      <artifactId>javax.annotation-api</artifactId>
-      <version>1.2</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.fusesource.leveldbjni</groupId>
-      <artifactId>leveldbjni-all</artifactId>
-    </dependency>
-
-    <dependency>
-      <artifactId>ratis-server</artifactId>
-      <groupId>org.apache.ratis</groupId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.dropwizard.metrics</groupId>
-          <artifactId>metrics-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.bouncycastle</groupId>
-          <artifactId>bcprov-jdk15on</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-netty</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-grpc</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.errorprone</groupId>
-      <artifactId>error_prone_annotations</artifactId>
-      <version>2.2.0</version>
-      <optional>true</optional>
-    </dependency>
-
-    <dependency>
-      <groupId>org.rocksdb</groupId>
-      <artifactId>rocksdbjni</artifactId>
-      <version>6.0.1</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-api</artifactId>
-      <version>${log4j2.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-core</artifactId>
-      <version>${log4j2.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>com.lmax</groupId>
-      <artifactId>disruptor</artifactId>
-      <version>${disruptor.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-pool2</artifactId>
-      <version>2.6.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.bouncycastle</groupId>
-      <artifactId>bcpkix-jdk15on</artifactId>
-      <version>${bouncycastle.version}</version>
-    </dependency>
-    <!-- https://mvnrepository.com/artifact/commons-validator/commons-validator -->
-    <dependency>
-      <groupId>commons-validator</groupId>
-      <artifactId>commons-validator</artifactId>
-      <version>1.6</version>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.jupiter</groupId>
-      <artifactId>junit-jupiter-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.jaegertracing</groupId>
-      <artifactId>jaeger-client</artifactId>
-      <version>${jaeger.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>io.opentracing</groupId>
-      <artifactId>opentracing-util</artifactId>
-      <version>0.31.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.yaml</groupId>
-      <artifactId>snakeyaml</artifactId>
-      <version>1.16</version>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <resources>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <excludes>
-          <exclude>hdds-version-info.properties</exclude>
-        </excludes>
-        <filtering>false</filtering>
-      </resource>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <includes>
-          <include>hdds-version-info.properties</include>
-        </includes>
-        <filtering>true</filtering>
-      </resource>
-    </resources>
-    <extensions>
-      <extension>
-        <groupId>kr.motd.maven</groupId>
-        <artifactId>os-maven-plugin</artifactId>
-        <version>${os-maven-plugin.version}</version>
-      </extension>
-    </extensions>
-    <plugins>
-      <plugin>
-        <groupId>org.xolstice.maven.plugins</groupId>
-        <artifactId>protobuf-maven-plugin</artifactId>
-        <version>${protobuf-maven-plugin.version}</version>
-        <extensions>true</extensions>
-        <configuration>
-          <protocArtifact>
-            com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
-          </protocArtifact>
-          <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
-          <includes>
-            <include>DatanodeContainerProtocol.proto</include>
-          </includes>
-          <outputDirectory>target/generated-sources/java</outputDirectory>
-          <clearOutputDirectory>false</clearOutputDirectory>
-        </configuration>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-              <goals>
-                <goal>compile</goal>
-                <goal>test-compile</goal>
-                <goal>compile-custom</goal>
-                <goal>test-compile-custom</goal>
-              </goals>
-              <configuration>
-                <pluginId>grpc-java</pluginId>
-                <pluginArtifact>
-                  io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
-                </pluginArtifact>
-              </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>generate-sources</phase>
-            <configuration>
-              <tasks>
-                <replace token="com.google.protobuf" value="org.apache.ratis.thirdparty.com.google.protobuf"
-                  dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
-                </replace>
-                <replace token="io.grpc" value="org.apache.ratis.thirdparty.io.grpc"
-                  dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
-                </replace>
-              </tasks>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>version-info</id>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>version-info</goal>
-            </goals>
-            <configuration>
-              <source>
-                <directory>${basedir}/../</directory>
-                <includes>
-                  <include>*/src/main/java/**/*.java</include>
-                  <include>*/src/main/proto/*.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>StorageContainerLocationProtocol.proto</include>
-                  <include>hdds.proto</include>
-                  <include>ScmBlockLocationProtocol.proto</include>
-                  <include>SCMSecurityProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd b/hadoop-hdds/common/src/main/bin/hadoop-config.cmd
deleted file mode 100644
index d77dc53..0000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd
+++ /dev/null
@@ -1,317 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem included in all the hadoop scripts with source command
-@rem should not be executable directly
-@rem also should not be passed any arguments, since we need original %*
-
-if not defined HADOOP_COMMON_DIR (
-  set HADOOP_COMMON_DIR=share\hadoop\common
-)
-if not defined HADOOP_COMMON_LIB_JARS_DIR (
-  set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib
-)
-if not defined HADOOP_COMMON_LIB_NATIVE_DIR (
-  set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native
-)
-if not defined HDFS_DIR (
-  set HDFS_DIR=share\hadoop\hdfs
-)
-if not defined HDFS_LIB_JARS_DIR (
-  set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib
-)
-if not defined YARN_DIR (
-  set YARN_DIR=share\hadoop\yarn
-)
-if not defined YARN_LIB_JARS_DIR (
-  set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib
-)
-if not defined MAPRED_DIR (
-  set MAPRED_DIR=share\hadoop\mapreduce
-)
-if not defined MAPRED_LIB_JARS_DIR (
-  set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib
-)
-
-@rem the root of the Hadoop installation
-set HADOOP_HOME=%~dp0
-for %%i in (%HADOOP_HOME%.) do (
-  set HADOOP_HOME=%%~dpi
-)
-if "%HADOOP_HOME:~-1%" == "\" (
-  set HADOOP_HOME=%HADOOP_HOME:~0,-1%
-)
-
-if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar (
-    @echo +================================================================+
-    @echo ^|      Error: HADOOP_HOME is not set correctly                   ^|
-    @echo +----------------------------------------------------------------+
-    @echo ^| Please set your HADOOP_HOME variable to the absolute path of   ^|
-    @echo ^| the directory that contains the hadoop distribution            ^|
-    @echo +================================================================+
-    exit /b 1
-)
-
-if not defined HADOOP_CONF_DIR (
-  set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop
-)
-
-@rem
-@rem Allow alternate conf dir location.
-@rem
-
-if "%1" == "--config" (
-  set HADOOP_CONF_DIR=%2
-  shift
-  shift
-)
-
-@rem
-@rem check to see it is specified whether to use the workers or the
-@rem masters file
-@rem
-
-if "%1" == "--hosts" (
-  set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
-  shift
-  shift
-)
-
-@rem
-@rem Set log level. Default to INFO.
-@rem
-
-if "%1" == "--loglevel" (
-  set HADOOP_LOGLEVEL=%2
-  shift
-  shift
-)
-
-if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
-  call %HADOOP_CONF_DIR%\hadoop-env.cmd
-)
-
-@rem
-@rem setup java environment variables
-@rem
-
-if not defined JAVA_HOME (
-  echo Error: JAVA_HOME is not set.
-  goto :eof
-)
-
-if not exist %JAVA_HOME%\bin\java.exe (
-  echo Error: JAVA_HOME is incorrectly set.
-  echo        Please update %HADOOP_CONF_DIR%\hadoop-env.cmd
-  goto :eof
-)
-
-set JAVA=%JAVA_HOME%\bin\java
-@rem some Java parameters
-set JAVA_HEAP_MAX=-Xmx1000m
-
-@rem
-@rem check envvars which might override default args
-@rem
-
-if defined HADOOP_HEAPSIZE (
-  set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m
-)
-
-@rem
-@rem CLASSPATH initially contains %HADOOP_CONF_DIR%
-@rem
-
-set CLASSPATH=%HADOOP_CONF_DIR%
-
-if not defined HADOOP_COMMON_HOME (
-  if exist %HADOOP_HOME%\share\hadoop\common (
-    set HADOOP_COMMON_HOME=%HADOOP_HOME%
-  )
-)
-
-@rem
-@rem for releases, add core hadoop jar & webapps to CLASSPATH
-@rem
-
-if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%
-)
-
-if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\*
-
-@rem
-@rem default log directory % file
-@rem
-
-if not defined HADOOP_LOG_DIR (
-  set HADOOP_LOG_DIR=%HADOOP_HOME%\logs
-)
-
-if not defined HADOOP_LOGFILE (
-  set HADOOP_LOGFILE=hadoop.log
-)
-
-if not defined HADOOP_LOGLEVEL (
-  set HADOOP_LOGLEVEL=INFO
-)
-
-if not defined HADOOP_ROOT_LOGGER (
-  set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
-)
-
-@rem
-@rem default policy file for service-level authorization
-@rem
-
-if not defined HADOOP_POLICYFILE (
-  set HADOOP_POLICYFILE=hadoop-policy.xml
-)
-
-@rem
-@rem Determine the JAVA_PLATFORM
-@rem
-
-for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A
-@rem replace space with underscore
-set JAVA_PLATFORM=%JAVA_PLATFORM: =_%
-
-@rem
-@rem setup 'java.library.path' for native hadoop code if necessary
-@rem
-
-@rem Check if we're running hadoop directly from the build
-if exist %HADOOP_COMMON_HOME%\target\bin (
-  if defined JAVA_LIBRARY_PATH (
-    set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\target\bin
-   ) else (
-    set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\target\bin
-   )
-)
-
-@rem For the distro case, check the bin folder
-if exist %HADOOP_COMMON_HOME%\bin (
-  if defined JAVA_LIBRARY_PATH (
-    set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\bin
-  ) else (
-    set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
-  )
-)
-
-@rem
-@rem setup a default TOOL_PATH
-@rem
-set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\*
-
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER%
-
-if defined JAVA_LIBRARY_PATH (
-  set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
-)
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE%
-
-@rem
-@rem Disable ipv6 as it can cause issues
-@rem
-
-set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem
-@rem put hdfs in classpath if present
-@rem
-
-if not defined HADOOP_HDFS_HOME (
-  if exist %HADOOP_HOME%\%HDFS_DIR% (
-    set HADOOP_HDFS_HOME=%HADOOP_HOME%
-  )
-)
-
-if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%
-)
-
-if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\*
-
-@rem
-@rem put yarn in classpath if present
-@rem
-
-if not defined HADOOP_YARN_HOME (
-  if exist %HADOOP_HOME%\%YARN_DIR% (
-    set HADOOP_YARN_HOME=%HADOOP_HOME%
-  )
-)
-
-if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%
-)
-
-if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% (
-  set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\*
-
-@rem
-@rem put mapred in classpath if present AND different from YARN
-@rem
-
-if not defined HADOOP_MAPRED_HOME (
-  if exist %HADOOP_HOME%\%MAPRED_DIR% (
-    set HADOOP_MAPRED_HOME=%HADOOP_HOME%
-  )
-)
-
-if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
-
-  if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps (
-    set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%
-  )
-
-  if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% (
-    set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
-  )
-
-  set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\*
-)
-
-@rem
-@rem add user-specified CLASSPATH last
-@rem
-
-if defined HADOOP_CLASSPATH (
-  if not defined HADOOP_USE_CLIENT_CLASSLOADER (
-    if defined HADOOP_USER_CLASSPATH_FIRST (
-      set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
-    ) else (
-      set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
-    )
-  )
-)
-
-:eof
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.sh b/hadoop-hdds/common/src/main/bin/hadoop-config.sh
deleted file mode 100755
index 444b79a..0000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-config.sh
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-####
-# IMPORTANT
-####
-
-## The hadoop-config.sh tends to get executed by non-Hadoop scripts.
-## Those parts expect this script to parse/manipulate $@. In order
-## to maintain backward compatibility, this means a surprising
-## lack of functions for bits that would be much better off in
-## a function.
-##
-## In other words, yes, there is some bad things happen here and
-## unless we break the rest of the ecosystem, we can't change it. :(
-
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-#
-# after doing more config, caller should also exec finalize
-# function to finish last minute/default configs for
-# settings that might be different between daemons & interactive
-
-# you must be this high to ride the ride
-if [[ -z "${BASH_VERSINFO[0]}" ]] \
-   || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
-   || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
-  echo "bash v3.2+ is required. Sorry."
-  exit 1
-fi
-
-# In order to get partially bootstrapped, we need to figure out where
-# we are located. Chances are good that our caller has already done
-# this work for us, but just in case...
-
-if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
-  _hadoop_common_this="${BASH_SOURCE-$0}"
-  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P)
-fi
-
-# get our functions defined for usage later
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
-   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
-  # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
-  . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
-  # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
-else
-  echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
-  exit 1
-fi
-
-hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
-
-# allow overrides of the above and pre-defines of the below
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
-   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
-  # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
-  . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
-  # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
-fi
-
-#
-# IMPORTANT! We are not executing user provided code yet!
-#
-
-# Let's go!  Base definitions so we can move forward
-hadoop_bootstrap
-
-# let's find our conf.
-#
-# first, check and process params passed to us
-# we process this in-line so that we can directly modify $@
-# if something downstream is processing that directly,
-# we need to make sure our params have been ripped out
-# note that we do many of them here for various utilities.
-# this provides consistency and forces a more consistent
-# user experience
-
-
-# save these off in case our caller needs them
-# shellcheck disable=SC2034
-HADOOP_USER_PARAMS=("$@")
-
-hadoop_parse_args "$@"
-shift "${HADOOP_PARSE_COUNTER}"
-
-#
-# Setup the base-line environment
-#
-hadoop_find_confdir
-hadoop_exec_hadoopenv
-hadoop_import_shellprofiles
-hadoop_exec_userfuncs
-
-#
-# IMPORTANT! User provided code is now available!
-#
-
-hadoop_exec_user_hadoopenv
-hadoop_verify_confdir
-
-hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
-hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
-hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
-
-# do all the OS-specific startup bits here
-# this allows us to get a decent JAVA_HOME,
-# call crle for LD_LIBRARY_PATH, etc.
-hadoop_os_tricks
-
-hadoop_java_setup
-
-hadoop_basic_init
-
-# inject any sub-project overrides, defaults, etc.
-if declare -F hadoop_subproject_init >/dev/null ; then
-  hadoop_subproject_init
-fi
-
-hadoop_shellprofiles_init
-
-# get the native libs in there pretty quick
-hadoop_add_javalibpath "${HADOOP_HOME}/build/native"
-hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
-
-hadoop_shellprofiles_nativelib
-
-# get the basic java class path for these subprojects
-# in as quickly as possible since other stuff
-# will definitely depend upon it.
-
-hadoop_add_common_to_classpath
-hadoop_shellprofiles_classpath
-
-# user API commands can now be run since the runtime
-# environment has been configured
-hadoop_exec_hadooprc
-
-#
-# backwards compatibility. new stuff should
-# call this when they are ready
-#
-if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then
-  hadoop_finalize
-fi
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh b/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh
deleted file mode 100755
index 5530491..0000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a Hadoop command on all slave hosts.
-
-function hadoop_usage
-{
-  echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) <hadoop-command> <args...>"
-}
-
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
-  exit 1
-fi
-
-if [[ $# = 0 ]]; then
-  hadoop_exit_with_usage 1
-fi
-
-daemonmode=$1
-shift
-
-if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
-  hdfsscript="${HADOOP_HOME}/bin/hdfs"
-else
-  hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
-fi
-
-hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated."
-hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead."
-
-#
-# Original input was usually:
-#  hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options)
-# we're going to turn this into
-#  hdfs --workers --daemon (start|stop) (rest of options)
-#
-for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
-do
-  if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] ||
-     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] ||
-     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then
-    unset HADOOP_USER_PARAMS[$i]
-  fi
-done
-
-${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh b/hadoop-hdds/common/src/main/bin/hadoop-functions.sh
deleted file mode 100755
index 484fe23..0000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh
+++ /dev/null
@@ -1,2732 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# we need to declare this globally as an array, which can only
-# be done outside of a function
-declare -a HADOOP_SUBCMD_USAGE
-declare -a HADOOP_OPTION_USAGE
-declare -a HADOOP_SUBCMD_USAGE_TYPES
-
-## @description  Print a message to stderr
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        string
-function hadoop_error
-{
-  echo "$*" 1>&2
-}
-
-## @description  Print a message to stderr if --debug is turned on
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        string
-function hadoop_debug
-{
-  if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
-    echo "DEBUG: $*" 1>&2
-  fi
-}
-
-## @description  Given a filename or dir, return the absolute version of it
-## @description  This works as an alternative to readlink, which isn't
-## @description  portable.
-## @audience     public
-## @stability    stable
-## @param        fsobj
-## @replaceable  no
-## @return       0 success
-## @return       1 failure
-## @return       stdout abspath
-function hadoop_abs
-{
-  declare obj=$1
-  declare dir
-  declare fn
-  declare dirret
-
-  if [[ ! -e ${obj} ]]; then
-    return 1
-  elif [[ -d ${obj} ]]; then
-    dir=${obj}
-  else
-    dir=$(dirname -- "${obj}")
-    fn=$(basename -- "${obj}")
-    fn="/${fn}"
-  fi
-
-  dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
-  dirret=$?
-  if [[ ${dirret} = 0 ]]; then
-    echo "${dir}${fn}"
-    return 0
-  fi
-  return 1
-}
-
-## @description  Given variable $1 delete $2 from it
-## @audience     public
-## @stability    stable
-## @replaceable  no
-function hadoop_delete_entry
-{
-  if [[ ${!1} =~ \ ${2}\  ]] ; then
-    hadoop_debug "Removing ${2} from ${1}"
-    eval "${1}"=\""${!1// ${2} }"\"
-  fi
-}
-
-## @description  Given variable $1 add $2 to it
-## @audience     public
-## @stability    stable
-## @replaceable  no
-function hadoop_add_entry
-{
-  if [[ ! ${!1} =~ \ ${2}\  ]] ; then
-    hadoop_debug "Adding ${2} to ${1}"
-    #shellcheck disable=SC2140
-    eval "${1}"=\""${!1} ${2} "\"
-  fi
-}
-
-## @description  Given variable $1 determine if $2 is in it
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @return       0 = yes, 1 = no
-function hadoop_verify_entry
-{
-  # this unfortunately can't really be tested by bats. :(
-  # so if this changes, be aware that unit tests effectively
-  # do this function in them
-  [[ ${!1} =~ \ ${2}\  ]]
-}
-
-## @description  Check if an array has a given value
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        element
-## @param        array
-## @returns      0 = yes
-## @returns      1 = no
-function hadoop_array_contains
-{
-  declare element=$1
-  shift
-  declare val
-
-  if [[ "$#" -eq 0 ]]; then
-    return 1
-  fi
-
-  for val in "${@}"; do
-    if [[ "${val}" == "${element}" ]]; then
-      return 0
-    fi
-  done
-  return 1
-}
-
-## @description  Add the `appendstring` if `checkstring` is not
-## @description  present in the given array
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        envvar
-## @param        appendstring
-function hadoop_add_array_param
-{
-  declare arrname=$1
-  declare add=$2
-
-  declare arrref="${arrname}[@]"
-  declare array=("${!arrref}")
-
-  if ! hadoop_array_contains "${add}" "${array[@]}"; then
-    #shellcheck disable=SC1083,SC2086
-    eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \)
-    hadoop_debug "$1 accepted $2"
-  else
-    hadoop_debug "$1 declined $2"
-  fi
-}
-
-## @description  Sort an array (must not contain regexps)
-## @description  present in the given array
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        arrayvar
-function hadoop_sort_array
-{
-  declare arrname=$1
-  declare arrref="${arrname}[@]"
-  declare array=("${!arrref}")
-  declare oifs
-
-  declare globstatus
-  declare -a sa
-
-  globstatus=$(set -o | grep noglob | awk '{print $NF}')
-
-  set -f
-  oifs=${IFS}
-
-  # shellcheck disable=SC2034
-  IFS=$'\n' sa=($(sort <<<"${array[*]}"))
-
-  # shellcheck disable=SC1083
-  eval "${arrname}"=\(\"\${sa[@]}\"\)
-
-  IFS=${oifs}
-  if [[ "${globstatus}" = off ]]; then
-    set +f
-  fi
-}
-
-## @description  Check if we are running with priv
-## @description  by default, this implementation looks for
-## @description  EUID=0.  For OSes that have true priv
-## @description  separation, this should be something more complex
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @return       1 = no priv
-## @return       0 = priv
-function hadoop_privilege_check
-{
-  [[ "${EUID}" = 0 ]]
-}
-
-## @description  Execute a command via su when running as root
-## @description  if the given user is found or exit with
-## @description  failure if not.
-## @description  otherwise just run it.  (This is intended to
-## @description  be used by the start-*/stop-* scripts.)
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        user
-## @param        commandstring
-## @return       exitstatus
-function hadoop_su
-{
-  declare user=$1
-  shift
-
-  if hadoop_privilege_check; then
-    if hadoop_verify_user_resolves user; then
-       su -l "${user}" -- "$@"
-    else
-      hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
-      return 1
-    fi
-  else
-    "$@"
-  fi
-}
-
-## @description  Execute a command via su when running as root
-## @description  with extra support for commands that might
-## @description  legitimately start as root (e.g., datanode)
-## @description  (This is intended to
-## @description  be used by the start-*/stop-* scripts.)
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-## @param        user
-## @param        commandstring
-## @return       exitstatus
-function hadoop_uservar_su
-{
-
-  ## startup matrix:
-  #
-  # if $EUID != 0, then exec
-  # if $EUID =0 then
-  #    if hdfs_subcmd_user is defined, call hadoop_su to exec
-  #    if hdfs_subcmd_user is not defined, error
-  #
-  # For secure daemons, this means both the secure and insecure env vars need to be
-  # defined.  e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
-  # This function will pick up the "normal" var, switch to that user, then
-  # execute the command which will then pick up the "secure" version.
-  #
-
-  declare program=$1
-  declare command=$2
-  shift 2
-
-  declare uprogram
-  declare ucommand
-  declare uvar
-  declare svar
-
-  if hadoop_privilege_check; then
-    uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
-
-    svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
-
-    if [[ -n "${!uvar}" ]]; then
-      hadoop_su "${!uvar}" "$@"
-    elif [[ -n "${!svar}" ]]; then
-      ## if we are here, then SECURE_USER with no USER defined
-      ## we are already privileged, so just run the command and hope
-      ## for the best
-      "$@"
-    else
-      hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root"
-      hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation."
-      return 1
-    fi
-  else
-    "$@"
-  fi
-}
-
-## @description  Add a subcommand to the usage output
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-## @param        subcommand
-## @param        subcommandtype
-## @param        subcommanddesc
-function hadoop_add_subcommand
-{
-  declare subcmd=$1
-  declare subtype=$2
-  declare text=$3
-
-  hadoop_debug "${subcmd} as a ${subtype}"
-
-  hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}"
-
-  # done in this order so that sort works later
-  HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}"
-  ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
-}
-
-## @description  Add an option to the usage output
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-## @param        subcommand
-## @param        subcommanddesc
-function hadoop_add_option
-{
-  local option=$1
-  local text=$2
-
-  HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}"
-  ((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1))
-}
-
-## @description  Reset the usage information to blank
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_reset_usage
-{
-  HADOOP_SUBCMD_USAGE=()
-  HADOOP_OPTION_USAGE=()
-  HADOOP_SUBCMD_USAGE_TYPES=()
-  HADOOP_SUBCMD_USAGE_COUNTER=0
-  HADOOP_OPTION_USAGE_COUNTER=0
-}
-
-## @description  Print a screen-size aware two-column output
-## @description  if reqtype is not null, only print those requested
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-## @param        reqtype
-## @param        array
-function hadoop_generic_columnprinter
-{
-  declare reqtype=$1
-  shift
-  declare -a input=("$@")
-  declare -i i=0
-  declare -i counter=0
-  declare line
-  declare text
-  declare option
-  declare giventext
-  declare -i maxoptsize
-  declare -i foldsize
-  declare -a tmpa
-  declare numcols
-  declare brup
-
-  if [[ -n "${COLUMNS}" ]]; then
-    numcols=${COLUMNS}
-  else
-    numcols=$(tput cols) 2>/dev/null
-    COLUMNS=${numcols}
-  fi
-
-  if [[ -z "${numcols}"
-     || ! "${numcols}" =~ ^[0-9]+$ ]]; then
-    numcols=75
-  else
-    ((numcols=numcols-5))
-  fi
-
-  while read -r line; do
-    tmpa[${counter}]=${line}
-    ((counter=counter+1))
-    IFS='@' read -ra brup <<< "${line}"
-    option="${brup[0]}"
-    if [[ ${#option} -gt ${maxoptsize} ]]; then
-      maxoptsize=${#option}
-    fi
-  done < <(for text in "${input[@]}"; do
-    echo "${text}"
-  done | sort)
-
-  i=0
-  ((foldsize=numcols-maxoptsize))
-
-  until [[ $i -eq ${#tmpa[@]} ]]; do
-    IFS='@' read -ra brup <<< "${tmpa[$i]}"
-
-    option="${brup[0]}"
-    cmdtype="${brup[1]}"
-    giventext="${brup[2]}"
-
-    if [[ -n "${reqtype}" ]]; then
-      if [[ "${cmdtype}" != "${reqtype}" ]]; then
-        ((i=i+1))
-        continue
-      fi
-    fi
-
-    if [[ -z "${giventext}" ]]; then
-      giventext=${cmdtype}
-    fi
-
-    while read -r line; do
-      printf "%-${maxoptsize}s   %-s\n" "${option}" "${line}"
-      option=" "
-    done < <(echo "${giventext}"| fold -s -w ${foldsize})
-    ((i=i+1))
-  done
-}
-
-## @description  generate standard usage output
-## @description  and optionally takes a class
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-## @param        execname
-## @param        true|false
-## @param        [text to use in place of SUBCOMMAND]
-function hadoop_generate_usage
-{
-  declare cmd=$1
-  declare takesclass=$2
-  declare subcmdtext=${3:-"SUBCOMMAND"}
-  declare haveoptions
-  declare optstring
-  declare havesubs
-  declare subcmdstring
-  declare cmdtype
-
-  cmd=${cmd##*/}
-
-  if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}"
-        && "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then
-    haveoptions=true
-    optstring=" [OPTIONS]"
-  fi
-
-  if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}"
-        && "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then
-    havesubs=true
-    subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]"
-  fi
-
-  echo "Usage: ${cmd}${optstring}${subcmdstring}"
-  if [[ ${takesclass} = true ]]; then
-    echo " or    ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]"
-    echo "  where CLASSNAME is a user-provided Java class"
-  fi
-
-  if [[ "${haveoptions}" = true ]]; then
-    echo ""
-    echo "  OPTIONS is none or any of:"
-    echo ""
-
-    hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}"
-  fi
-
-  if [[ "${havesubs}" = true ]]; then
-    echo ""
-    echo "  ${subcmdtext} is one of:"
-    echo ""
-
-    if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then
-
-      hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES
-      for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do
-        #shellcheck disable=SC2086
-        cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}"
-        printf "\n    %s Commands:\n\n" "${cmdtype}"
-        hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}"
-      done
-    else
-      hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}"
-    fi
-    echo ""
-    echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
-  fi
-}
-
-## @description  Replace `oldvar` with `newvar` if `oldvar` exists.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        oldvar
-## @param        newvar
-function hadoop_deprecate_envvar
-{
-  local oldvar=$1
-  local newvar=$2
-  local oldval=${!oldvar}
-  local newval=${!newvar}
-
-  if [[ -n "${oldval}" ]]; then
-    hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}."
-    # shellcheck disable=SC2086
-    eval ${newvar}=\"${oldval}\"
-
-    # shellcheck disable=SC2086
-    newval=${oldval}
-
-    # shellcheck disable=SC2086
-    eval ${newvar}=\"${newval}\"
-  fi
-}
-
-## @description  Declare `var` being used and print its value.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        var
-function hadoop_using_envvar
-{
-  local var=$1
-  local val=${!var}
-
-  if [[ -n "${val}" ]]; then
-    hadoop_debug "${var} = ${val}"
-  fi
-}
-
-## @description  Create the directory 'dir'.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        dir
-function hadoop_mkdir
-{
-  local dir=$1
-
-  if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
-    hadoop_error "WARNING: ${dir} does not exist. Creating."
-    if ! mkdir -p "${dir}"; then
-      hadoop_error "ERROR: Unable to create ${dir}. Aborting."
-      exit 1
-    fi
-  fi
-}
-
-## @description  Bootstraps the Hadoop shell environment
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_bootstrap
-{
-  # the root of the Hadoop installation
-  # See HADOOP-6255 for the expected directory structure layout
-
-  if [[ -n "${DEFAULT_LIBEXEC_DIR}" ]]; then
-    hadoop_error "WARNING: DEFAULT_LIBEXEC_DIR ignored. It has been replaced by HADOOP_DEFAULT_LIBEXEC_DIR."
-  fi
-
-  # By now, HADOOP_LIBEXEC_DIR should have been defined upstream
-  # We can piggyback off of that to figure out where the default
-  # HADOOP_FREFIX should be.  This allows us to run without
-  # HADOOP_HOME ever being defined by a human! As a consequence
-  # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
-  # env var within Hadoop.
-  if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
-    hadoop_error "HADOOP_LIBEXEC_DIR is not defined.  Exiting."
-    exit 1
-  fi
-  HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
-  HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX}
-  export HADOOP_HOME
-
-  #
-  # short-cuts. vendors may redefine these as well, preferably
-  # in hadoop-layout.sh
-  #
-  HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
-  HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
-  HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
-  HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
-  HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
-  YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
-  YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
-  MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
-  MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
-  HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
-  HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
-  OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
-  OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
-  OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
-
-  HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
-  HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
-  HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
-
-  # by default, whatever we are about to run doesn't support
-  # daemonization
-  HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
-
-  # by default, we have not been self-re-execed
-  HADOOP_REEXECED_CMD=false
-
-  HADOOP_SUBCMD_SECURESERVICE=false
-
-  # This is the default we claim in hadoop-env.sh
-  JSVC_HOME=${JSVC_HOME:-"/usr/bin"}
-
-  # usage output set to zero
-  hadoop_reset_usage
-
-  export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
-  # defaults
-  export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
-  hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
-}
-
-## @description  Locate Hadoop's configuration directory
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_find_confdir
-{
-  local conf_dir
-
-  # An attempt at compatibility with some Hadoop 1.x
-  # installs.
-  if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then
-    conf_dir="conf"
-  else
-    conf_dir="etc/hadoop"
-  fi
-  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}"
-
-  hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
-}
-
-## @description  Validate ${HADOOP_CONF_DIR}
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @return       will exit on failure conditions
-function hadoop_verify_confdir
-{
-  # Check only log4j.properties by default.
-  # --loglevel does not work without logger settings in log4j.log4j.properties.
-  if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then
-    hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete."
-  fi
-}
-
-## @description  Import the hadoop-env.sh settings
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_exec_hadoopenv
-{
-  if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
-    if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
-      export HADOOP_ENV_PROCESSED=true
-      # shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
-      . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-    fi
-  fi
-}
-
-## @description  Import the replaced functions
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_exec_userfuncs
-{
-  if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
-    # shellcheck disable=SC1090
-    . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
-  fi
-}
-
-## @description  Read the user's settings.  This provides for users to
-## @description  override and/or append hadoop-env.sh. It is not meant
-## @description  as a complete system override.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_exec_user_hadoopenv
-{
-  if [[ -f "${HOME}/.hadoop-env" ]]; then
-    hadoop_debug "Applying the user's .hadoop-env"
-    # shellcheck disable=SC1090
-    . "${HOME}/.hadoop-env"
-  fi
-}
-
-## @description  Read the user's settings.  This provides for users to
-## @description  run Hadoop Shell API after system bootstrap
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_exec_hadooprc
-{
-  if [[ -f "${HOME}/.hadooprc" ]]; then
-    hadoop_debug "Applying the user's .hadooprc"
-    # shellcheck disable=SC1090
-    . "${HOME}/.hadooprc"
-  fi
-}
-
-## @description  Import shellprofile.d content
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_import_shellprofiles
-{
-  local i
-  local files1
-  local files2
-
-  if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
-    files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
-    hadoop_debug "shellprofiles: ${files1[*]}"
-  else
-    hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
-  fi
-
-  if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
-    files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
-  fi
-
-  # enable bundled shellprofiles that come
-  # from hadoop-tools.  This converts the user-facing HADOOP_OPTIONAL_TOOLS
-  # to the HADOOP_TOOLS_OPTIONS that the shell profiles expect.
-  # See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS
-  # gets populated into hadoop-env.sh
-
-  for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do
-    hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}"
-  done
-
-  for i in "${files1[@]}" "${files2[@]}"
-  do
-    if [[ -n "${i}"
-      && -f "${i}" ]]; then
-      hadoop_debug "Profiles: importing ${i}"
-      # shellcheck disable=SC1090
-      . "${i}"
-    fi
-  done
-}
-
-## @description  Initialize the registered shell profiles
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_shellprofiles_init
-{
-  local i
-
-  for i in ${HADOOP_SHELL_PROFILES}
-  do
-    if declare -F _${i}_hadoop_init >/dev/null ; then
-       hadoop_debug "Profiles: ${i} init"
-       # shellcheck disable=SC2086
-       _${i}_hadoop_init
-    fi
-  done
-}
-
-## @description  Apply the shell profile classpath additions
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_shellprofiles_classpath
-{
-  local i
-
-  for i in ${HADOOP_SHELL_PROFILES}
-  do
-    if declare -F _${i}_hadoop_classpath >/dev/null ; then
-       hadoop_debug "Profiles: ${i} classpath"
-       # shellcheck disable=SC2086
-       _${i}_hadoop_classpath
-    fi
-  done
-}
-
-## @description  Apply the shell profile native library additions
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_shellprofiles_nativelib
-{
-  local i
-
-  for i in ${HADOOP_SHELL_PROFILES}
-  do
-    if declare -F _${i}_hadoop_nativelib >/dev/null ; then
-       hadoop_debug "Profiles: ${i} nativelib"
-       # shellcheck disable=SC2086
-       _${i}_hadoop_nativelib
-    fi
-  done
-}
-
-## @description  Apply the shell profile final configuration
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_shellprofiles_finalize
-{
-  local i
-
-  for i in ${HADOOP_SHELL_PROFILES}
-  do
-    if declare -F _${i}_hadoop_finalize >/dev/null ; then
-       hadoop_debug "Profiles: ${i} finalize"
-       # shellcheck disable=SC2086
-       _${i}_hadoop_finalize
-    fi
-  done
-}
-
-## @description  Initialize the Hadoop shell environment, now that
-## @description  user settings have been imported
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_basic_init
-{
-  # Some of these are also set in hadoop-env.sh.
-  # we still set them here just in case hadoop-env.sh is
-  # broken in some way, set up defaults, etc.
-  #
-  # but it is important to note that if you update these
-  # you also need to update hadoop-env.sh as well!!!
-
-  CLASSPATH=""
-  hadoop_debug "Initialize CLASSPATH"
-
-  if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
-  [[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then
-    export HADOOP_COMMON_HOME="${HADOOP_HOME}"
-  fi
-
-  # default policy file for service-level authorization
-  HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
-
-  # define HADOOP_HDFS_HOME
-  if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
-     [[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then
-    export HADOOP_HDFS_HOME="${HADOOP_HOME}"
-  fi
-
-  # define HADOOP_YARN_HOME
-  if [[ -z "${HADOOP_YARN_HOME}" ]] &&
-     [[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then
-    export HADOOP_YARN_HOME="${HADOOP_HOME}"
-  fi
-
-  # define HADOOP_MAPRED_HOME
-  if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
-     [[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then
-    export HADOOP_MAPRED_HOME="${HADOOP_HOME}"
-  fi
-
-  if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
-    hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME"
-    exit 1
-  fi
-
-  if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then
-    hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME"
-    exit 1
-  fi
-
-  if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then
-    hadoop_error "ERROR: Invalid HADOOP_YARN_HOME"
-    exit 1
-  fi
-
-  if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then
-    hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME"
-    exit 1
-  fi
-
-  # if for some reason the shell doesn't have $USER defined
-  # (e.g., ssh'd in to execute a command)
-  # let's get the effective username and use that
-  USER=${USER:-$(id -nu)}
-  HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
-  HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
-  HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
-  HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
-  HADOOP_NICENESS=${HADOOP_NICENESS:-0}
-  HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
-  HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
-  HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
-  HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
-  HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
-  HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
-  HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
-  HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
-  HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
-}
-
-## @description  Set the worker support information to the contents
-## @description  of `filename`
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        filename
-## @return       will exit if file does not exist
-function hadoop_populate_workers_file
-{
-  local workersfile=$1
-  shift
-  if [[ -f "${workersfile}" ]]; then
-    HADOOP_WORKERS="${workersfile}"
-  elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then
-    HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
-  else
-    hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\""
-    hadoop_exit_with_usage 1
-  fi
-}
-
-## @description  Rotates the given `file` until `number` of
-## @description  files exist.
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        filename
-## @param        [number]
-## @return       $? will contain last mv's return value
-function hadoop_rotate_log
-{
-  #
-  # Users are likely to replace this one for something
-  # that gzips or uses dates or who knows what.
-  #
-  # be aware that &1 and &2 might go through here
-  # so don't do anything too crazy...
-  #
-  local log=$1;
-  local num=${2:-5};
-
-  if [[ -f "${log}" ]]; then # rotate logs
-    while [[ ${num} -gt 1 ]]; do
-      #shellcheck disable=SC2086
-      let prev=${num}-1
-      if [[ -f "${log}.${prev}" ]]; then
-        mv "${log}.${prev}" "${log}.${num}"
-      fi
-      num=${prev}
-    done
-    mv "${log}" "${log}.${num}"
-  fi
-}
-
-## @description  Via ssh, log into `hostname` and run `command`
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        hostname
-## @param        command
-## @param        [...]
-function hadoop_actual_ssh
-{
-  # we are passing this function to xargs
-  # should get hostname followed by rest of command line
-  local worker=$1
-  shift
-
-  # shellcheck disable=SC2086
-  ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /"
-}
-
-## @description  Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
-## @description  and execute command.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        [...]
-function hadoop_connect_to_hosts
-{
-  # shellcheck disable=SC2124
-  local params="$@"
-  local worker_file
-  local tmpslvnames
-
-  #
-  # ssh (or whatever) to a host
-  #
-  # User can specify hostnames or a file where the hostnames are (not both)
-  if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
-    hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
-    exit 1
-  elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
-    if [[ -n "${HADOOP_WORKERS}" ]]; then
-      worker_file=${HADOOP_WORKERS}
-    elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then
-      worker_file=${HADOOP_CONF_DIR}/workers
-    elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then
-      hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead."
-      worker_file=${HADOOP_CONF_DIR}/slaves
-    fi
-  fi
-
-  # if pdsh is available, let's use it.  otherwise default
-  # to a loop around ssh.  (ugh)
-  if [[ -e '/usr/bin/pdsh' ]]; then
-    if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
-      # if we were given a file, just let pdsh deal with it.
-      # shellcheck disable=SC2086
-      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-      -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1
-    else
-      # no spaces allowed in the pdsh arg host list
-      # shellcheck disable=SC2086
-      tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
-      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-        -f "${HADOOP_SSH_PARALLEL}" \
-        -w "${tmpslvnames}" $"${@// /\\ }" 2>&1
-    fi
-  else
-    if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
-      HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
-    fi
-    hadoop_connect_to_hosts_without_pdsh "${params}"
-  fi
-}
-
-## @description  Connect to ${HADOOP_WORKER_NAMES} and execute command
-## @description  under the environment which does not support pdsh.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        [...]
-function hadoop_connect_to_hosts_without_pdsh
-{
-  # shellcheck disable=SC2124
-  local params="$@"
-  local workers=(${HADOOP_WORKER_NAMES})
-  for (( i = 0; i < ${#workers[@]}; i++ ))
-  do
-    if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
-      wait
-    fi
-    # shellcheck disable=SC2086
-    hadoop_actual_ssh "${workers[$i]}" ${params} &
-  done
-  wait
-}
-
-## @description  Utility routine to handle --workers mode
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        commandarray
-function hadoop_common_worker_mode_execute
-{
-  #
-  # input should be the command line as given by the user
-  # in the form of an array
-  #
-  local argv=("$@")
-
-  # if --workers is still on the command line, remove it
-  # to prevent loops
-  # Also remove --hostnames and --hosts along with arg values
-  local argsSize=${#argv[@]};
-  for (( i = 0; i < argsSize; i++ ))
-  do
-    if [[ "${argv[$i]}" =~ ^--workers$ ]]; then
-      unset argv[$i]
-    elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
-      [[ "${argv[$i]}" =~ ^--hosts$ ]]; then
-      unset argv[$i];
-      let i++;
-      unset argv[$i];
-    fi
-  done
-  if [[ ${QATESTMODE} = true ]]; then
-    echo "${argv[@]}"
-    return
-  fi
-  hadoop_connect_to_hosts -- "${argv[@]}"
-}
-
-## @description  Verify that a shell command was passed a valid
-## @description  class name
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        classname
-## @return       0 = success
-## @return       1 = failure w/user message
-function hadoop_validate_classname
-{
-  local class=$1
-  shift 1
-
-  if [[ ! ${class} =~ \. ]]; then
-    # assuming the arg is typo of command if it does not conatain ".".
-    # class belonging to no package is not allowed as a result.
-    hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME."
-    return 1
-  fi
-  return 0
-}
-
-## @description  Append the `appendstring` if `checkstring` is not
-## @description  present in the given `envvar`
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        envvar
-## @param        checkstring
-## @param        appendstring
-function hadoop_add_param
-{
-  #
-  # general param dedupe..
-  # $1 is what we are adding to
-  # $2 is the name of what we want to add (key)
-  # $3 is the key+value of what we're adding
-  #
-  # doing it this way allows us to support all sorts of
-  # different syntaxes, just so long as they are space
-  # delimited
-  #
-  if [[ ! ${!1} =~ $2 ]] ; then
-    #shellcheck disable=SC2140
-    eval "$1"="'${!1} $3'"
-    if [[ ${!1:0:1} = ' ' ]]; then
-      #shellcheck disable=SC2140
-      eval "$1"="'${!1# }'"
-    fi
-    hadoop_debug "$1 accepted $3"
-  else
-    hadoop_debug "$1 declined $3"
-  fi
-}
-
-## @description  Register the given `shellprofile` to the Hadoop
-## @description  shell subsystem
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        shellprofile
-function hadoop_add_profile
-{
-  # shellcheck disable=SC2086
-  hadoop_add_param HADOOP_SHELL_PROFILES $1 $1
-}
-
-## @description  Add a file system object (directory, file,
-## @description  wildcard, ...) to the classpath. Optionally provide
-## @description  a hint as to where in the classpath it should go.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        object
-## @param        [before|after]
-## @return       0 = success (added or duplicate)
-## @return       1 = failure (doesn't exist or some other reason)
-function hadoop_add_classpath
-{
-  # However, with classpath (& JLP), we can do dedupe
-  # along with some sanity checking (e.g., missing directories)
-  # since we have a better idea of what is legal
-  #
-  # for wildcard at end, we can
-  # at least check the dir exists
-  if [[ $1 =~ ^.*\*$ ]]; then
-    local mp
-    mp=$(dirname "$1")
-    if [[ ! -d "${mp}" ]]; then
-      hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
-      return 1
-    fi
-
-    # no wildcard in the middle, so check existence
-    # (doesn't matter *what* it is)
-  elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
-    hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
-    return 1
-  fi
-  if [[ -z "${CLASSPATH}" ]]; then
-    CLASSPATH=$1
-    hadoop_debug "Initial CLASSPATH=$1"
-  elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
-    if [[ "$2" = "before" ]]; then
-      CLASSPATH="$1:${CLASSPATH}"
-      hadoop_debug "Prepend CLASSPATH: $1"
-    else
-      CLASSPATH+=:$1
-      hadoop_debug "Append CLASSPATH: $1"
-    fi
-  else
-    hadoop_debug "Dupe CLASSPATH: $1"
-  fi
-  return 0
-}
-
-## @description  Add a file system object (directory, file,
-## @description  wildcard, ...) to the colonpath.  Optionally provide
-## @description  a hint as to where in the colonpath it should go.
-## @description  Prior to adding, objects are checked for duplication
-## @description  and check for existence.  Many other functions use
-## @description  this function as their base implementation
-## @description  including `hadoop_add_javalibpath` and `hadoop_add_ldlibpath`.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        envvar
-## @param        object
-## @param        [before|after]
-## @return       0 = success (added or duplicate)
-## @return       1 = failure (doesn't exist or some other reason)
-function hadoop_add_colonpath
-{
-  # this is CLASSPATH, JLP, etc but with dedupe but no
-  # other checking
-  if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
-    if [[ -z "${!1}" ]]; then
-      # shellcheck disable=SC2086
-      eval $1="'$2'"
-      hadoop_debug "Initial colonpath($1): $2"
-    elif [[ "$3" = "before" ]]; then
-      # shellcheck disable=SC2086
-      eval $1="'$2:${!1}'"
-      hadoop_debug "Prepend colonpath($1): $2"
-    else
-      # shellcheck disable=SC2086
-      eval $1+=":'$2'"
-      hadoop_debug "Append colonpath($1): $2"
-    fi
-    return 0
-  fi
-  hadoop_debug "Rejected colonpath($1): $2"
-  return 1
-}
-
-## @description  Add a file system object (directory, file,
-## @description  wildcard, ...) to the Java JNI path.  Optionally
-## @description  provide a hint as to where in the Java JNI path
-## @description  it should go.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        object
-## @param        [before|after]
-## @return       0 = success (added or duplicate)
-## @return       1 = failure (doesn't exist or some other reason)
-function hadoop_add_javalibpath
-{
-  # specialized function for a common use case
-  hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
-}
-
-## @description  Add a file system object (directory, file,
-## @description  wildcard, ...) to the LD_LIBRARY_PATH.  Optionally
-## @description  provide a hint as to where in the LD_LIBRARY_PATH
-## @description  it should go.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        object
-## @param        [before|after]
-## @return       0 = success (added or duplicate)
-## @return       1 = failure (doesn't exist or some other reason)
-function hadoop_add_ldlibpath
-{
-  local status
-  # specialized function for a common use case
-  hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
-  status=$?
-
-  # note that we export this
-  export LD_LIBRARY_PATH
-  return ${status}
-}
-
-## @description  Add the common/core Hadoop components to the
-## @description  environment
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @returns      1 on failure, may exit
-## @returns      0 on success
-function hadoop_add_common_to_classpath
-{
-  #
-  # get all of the common jars+config in the path
-  #
-
-  if [[ -z "${HADOOP_COMMON_HOME}"
-    || -z "${HADOOP_COMMON_DIR}"
-    || -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then
-    hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}"
-    hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}"
-    hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}"
-    hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured."
-    exit 1
-  fi
-
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
-  fi
-
-  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
-}
-
-## @description  Run libexec/tools/module.sh to add to the classpath
-## @description  environment
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        module
-function hadoop_add_to_classpath_tools
-{
-  declare module=$1
-
-  if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then
-    # shellcheck disable=SC1090
-    . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh"
-  else
-    hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
-  fi
-
-  if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then
-    "hadoop_classpath_tools_${module}"
-  fi
-}
-
-## @description  Add the user's custom classpath settings to the
-## @description  environment
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_add_to_classpath_userpath
-{
-  # Add the user-specified HADOOP_CLASSPATH to the
-  # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER
-  # is not set.
-  # Add it first or last depending on if user has
-  # set env-var HADOOP_USER_CLASSPATH_FIRST
-  # we'll also dedupe it, because we're cool like that.
-  #
-  declare -a array
-  declare -i c=0
-  declare -i j
-  declare -i i
-  declare idx
-
-  if [[ -n "${HADOOP_CLASSPATH}" ]]; then
-    # I wonder if Java runs on VMS.
-    for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
-      array[${c}]=${idx}
-      ((c=c+1))
-    done
-
-    # bats gets confused by j getting set to 0
-    ((j=c-1)) || ${QATESTMODE}
-
-    if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
-      if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
-        for ((i=0; i<=j; i++)); do
-          hadoop_add_classpath "${array[$i]}" after
-        done
-      else
-        for ((i=j; i>=0; i--)); do
-          hadoop_add_classpath "${array[$i]}" before
-        done
-      fi
-    fi
-  fi
-}
-
-## @description  Routine to configure any OS-specific settings.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @return       may exit on failure conditions
-function hadoop_os_tricks
-{
-  local bindv6only
-
-  HADOOP_IS_CYGWIN=false
-  case ${HADOOP_OS_TYPE} in
-    Darwin)
-      if [[ -z "${JAVA_HOME}" ]]; then
-        if [[ -x /usr/libexec/java_home ]]; then
-          JAVA_HOME="$(/usr/libexec/java_home)"
-          export JAVA_HOME
-        else
-          JAVA_HOME=/Library/Java/Home
-          export JAVA_HOME
-        fi
-      fi
-    ;;
-    Linux)
-
-      # Newer versions of glibc use an arena memory allocator that
-      # causes virtual # memory usage to explode. This interacts badly
-      # with the many threads that we use in Hadoop. Tune the variable
-      # down to prevent vmem explosion.
-      export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
-      # we put this in QA test mode off so that non-Linux can test
-      if [[ "${QATESTMODE}" = true ]]; then
-        return
-      fi
-
-      # NOTE! HADOOP_ALLOW_IPV6 is a developer hook.  We leave it
-      # undocumented in hadoop-env.sh because we don't want users to
-      # shoot themselves in the foot while devs make IPv6 work.
-
-      bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
-
-      if [[ -n "${bindv6only}" ]] &&
-         [[ "${bindv6only}" -eq "1" ]] &&
-         [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
-        hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
-        hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
-        hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
-        exit 1
-      fi
-    ;;
-    CYGWIN*)
-      # Flag that we're running on Cygwin to trigger path translation later.
-      HADOOP_IS_CYGWIN=true
-    ;;
-  esac
-}
-
-## @description  Configure/verify ${JAVA_HOME}
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @return       may exit on failure conditions
-function hadoop_java_setup
-{
-  # Bail if we did not detect it
-  if [[ -z "${JAVA_HOME}" ]]; then
-    hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
-    exit 1
-  fi
-
-  if [[ ! -d "${JAVA_HOME}" ]]; then
-    hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
-    exit 1
-  fi
-
-  JAVA="${JAVA_HOME}/bin/java"
-
-  if [[ ! -x "$JAVA" ]]; then
-    hadoop_error "ERROR: $JAVA is not executable."
-    exit 1
-  fi
-}
-
-## @description  Finish Java JNI paths prior to execution
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize_libpaths
-{
-  if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
-    hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
-    hadoop_add_param HADOOP_OPTS java.library.path \
-      "-Djava.library.path=${JAVA_LIBRARY_PATH}"
-    export LD_LIBRARY_PATH
-  fi
-}
-
-## @description  Finish Java heap parameters prior to execution
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize_hadoop_heap
-{
-  if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
-    if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then
-      HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m"
-    fi
-    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}"
-  fi
-
-  # backwards compatibility
-  if [[ -n "${HADOOP_HEAPSIZE}" ]]; then
-    if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then
-      HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m"
-    fi
-    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}"
-  fi
-
-  if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then
-    if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then
-      HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m"
-    fi
-    hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}"
-  fi
-}
-
-## @description  Converts the contents of the variable name
-## @description  `varnameref` into the equivalent Windows path.
-## @description  If the second parameter is true, then `varnameref`
-## @description  is treated as though it was a path list.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        varnameref
-## @param        [true]
-function hadoop_translate_cygwin_path
-{
-  if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then
-    if [[ "$2" = "true" ]]; then
-      #shellcheck disable=SC2016
-      eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)'
-    else
-      #shellcheck disable=SC2016
-      eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)'
-    fi
-  fi
-}
-
-## @description  Adds the HADOOP_CLIENT_OPTS variable to
-## @description  HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-function hadoop_add_client_opts
-{
-  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
-     || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  fi
-}
-
-## @description  Finish configuring Hadoop specific system properties
-## @description  prior to executing Java
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize_hadoop_opts
-{
-  hadoop_translate_cygwin_path HADOOP_LOG_DIR
-  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
-  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
-  hadoop_translate_cygwin_path HADOOP_HOME
-  export HADOOP_HOME
-  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
-  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
-  hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
-  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
-  hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
-}
-
-## @description  Finish Java classpath prior to execution
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize_classpath
-{
-  hadoop_add_classpath "${HADOOP_CONF_DIR}" before
-
-  # user classpath gets added at the last minute. this allows
-  # override of CONF dirs and more
-  hadoop_add_to_classpath_userpath
-  hadoop_translate_cygwin_path CLASSPATH true
-}
-
-## @description  Finish all the remaining environment settings prior
-## @description  to executing Java.  This is a wrapper that calls
-## @description  the other `finalize` routines.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize
-{
-  hadoop_shellprofiles_finalize
-
-  hadoop_finalize_classpath
-  hadoop_finalize_libpaths
-  hadoop_finalize_hadoop_heap
-  hadoop_finalize_hadoop_opts
-
-  hadoop_translate_cygwin_path HADOOP_HOME
-  hadoop_translate_cygwin_path HADOOP_CONF_DIR
-  hadoop_translate_cygwin_path HADOOP_COMMON_HOME
-  hadoop_translate_cygwin_path HADOOP_HDFS_HOME
-  hadoop_translate_cygwin_path HADOOP_YARN_HOME
-  hadoop_translate_cygwin_path HADOOP_MAPRED_HOME
-}
-
-## @description  Print usage information and exit with the passed
-## @description  `exitcode`
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        exitcode
-## @return       This function will always exit.
-function hadoop_exit_with_usage
-{
-  local exitcode=$1
-  if [[ -z $exitcode ]]; then
-    exitcode=1
-  fi
-  # shellcheck disable=SC2034
-  if declare -F hadoop_usage >/dev/null ; then
-    hadoop_usage
-  elif [[ -x /usr/bin/cowsay ]]; then
-    /usr/bin/cowsay -f elephant "Sorry, no help available."
-  else
-    hadoop_error "Sorry, no help available."
-  fi
-  exit $exitcode
-}
-
-## @description  Verify that prerequisites have been met prior to
-## @description  excuting a privileged program.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @return       This routine may exit.
-function hadoop_verify_secure_prereq
-{
-  # if you are on an OS like Illumos that has functional roles
-  # and you are using pfexec, you'll probably want to change
-  # this.
-
-  if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
-    hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
-    exit 1
-  else
-    return 0
-  fi
-}
-
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_setup_secure_service
-{
-  # need a more complicated setup? replace me!
-
-  HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
-  HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
-}
-
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_verify_piddir
-{
-  if [[ -z "${HADOOP_PID_DIR}" ]]; then
-    hadoop_error "No pid directory defined."
-    exit 1
-  fi
-  hadoop_mkdir "${HADOOP_PID_DIR}"
-  touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
-    exit 1
-  fi
-  rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
-}
-
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_verify_logdir
-{
-  if [[ -z "${HADOOP_LOG_DIR}" ]]; then
-    hadoop_error "No log directory defined."
-    exit 1
-  fi
-  hadoop_mkdir "${HADOOP_LOG_DIR}"
-  touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
-    exit 1
-  fi
-  rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
-}
-
-## @description  Determine the status of the daemon referenced
-## @description  by `pidfile`
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        pidfile
-## @return       (mostly) LSB 4.1.0 compatible status
-function hadoop_status_daemon
-{
-  #
-  # LSB 4.1.0 compatible status command (1)
-  #
-  # 0 = program is running
-  # 1 = dead, but still a pid (2)
-  # 2 = (not used by us)
-  # 3 = not running
-  #
-  # 1 - this is not an endorsement of the LSB
-  #
-  # 2 - technically, the specification says /var/run/pid, so
-  #     we should never return this value, but we're giving
-  #     them the benefit of a doubt and returning 1 even if
-  #     our pid is not in in /var/run .
-  #
-
-  local pidfile=$1
-  shift
-
-  local pid
-  local pspid
-
-  if [[ -f "${pidfile}" ]]; then
-    pid=$(cat "${pidfile}")
-    if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then
-      # this is to check that the running process we found is actually the same
-      # daemon that we're interested in
-      if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then
-        return 0
-      fi
-    fi
-    return 1
-  fi
-  return 3
-}
-
-## @description  Execute the Java `class`, passing along any `options`.
-## @description  Additionally, set the Java property -Dproc_`command`.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        class
-## @param        [options]
-function hadoop_java_exec
-{
-  # run a java command.  this is used for
-  # non-daemons
-
-  local command=$1
-  local class=$2
-  shift 2
-
-  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
-  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
-  hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}"
-  hadoop_debug "java: ${JAVA}"
-  hadoop_debug "Class name: ${class}"
-  hadoop_debug "Command line options: $*"
-
-  export CLASSPATH
-  #shellcheck disable=SC2086
-  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-}
-
-## @description  Start a non-privileged daemon in the foreground.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        class
-## @param        pidfile
-## @param        [options]
-function hadoop_start_daemon
-{
-  # this is our non-privileged daemon starter
-  # that fires up a daemon in the *foreground*
-  # so complex! so wow! much java!
-  local command=$1
-  local class=$2
-  local pidfile=$3
-  shift 3
-
-  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
-  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
-  hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}"
-  hadoop_debug "java: ${JAVA}"
-  hadoop_debug "Class name: ${class}"
-  hadoop_debug "Command line options: $*"
-
-  # this is for the non-daemon pid creation
-  #shellcheck disable=SC2086
-  echo $$ > "${pidfile}" 2>/dev/null
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${command} pid ${pidfile}."
-  fi
-
-  export CLASSPATH
-  #shellcheck disable=SC2086
-  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-}
-
-## @description  Start a non-privileged daemon in the background.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        class
-## @param        pidfile
-## @param        outfile
-## @param        [options]
-function hadoop_start_daemon_wrapper
-{
-  local daemonname=$1
-  local class=$2
-  local pidfile=$3
-  local outfile=$4
-  shift 4
-
-  local counter
-
-  hadoop_rotate_log "${outfile}"
-
-  hadoop_start_daemon "${daemonname}" \
-    "$class" \
-    "${pidfile}" \
-    "$@" >> "${outfile}" 2>&1 < /dev/null &
-
-  # we need to avoid a race condition here
-  # so let's wait for the fork to finish
-  # before overriding with the daemonized pid
-  (( counter=0 ))
-  while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
-    sleep 1
-    (( counter++ ))
-  done
-
-  # this is for daemon pid creation
-  #shellcheck disable=SC2086
-  echo $! > "${pidfile}" 2>/dev/null
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${pidfile}."
-  fi
-
-  # shellcheck disable=SC2086
-  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
-  fi
-
-  # shellcheck disable=SC2086
-  disown %+ >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
-  fi
-  sleep 1
-
-  # capture the ulimit output
-  ulimit -a >> "${outfile}" 2>&1
-
-  # shellcheck disable=SC2086
-  if ! ps -p $! >/dev/null 2>&1; then
-    return 1
-  fi
-  return 0
-}
-
-## @description  Start a privileged daemon in the foreground.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        class
-## @param        daemonpidfile
-## @param        daemonoutfile
-## @param        daemonerrfile
-## @param        wrapperpidfile
-## @param        [options]
-function hadoop_start_secure_daemon
-{
-  # this is used to launch a secure daemon in the *foreground*
-  #
-  local daemonname=$1
-  local class=$2
-
-  # pid file to create for our daemon
-  local daemonpidfile=$3
-
-  # where to send stdout. jsvc has bad habits so this *may* be &1
-  # which means you send it to stdout!
-  local daemonoutfile=$4
-
-  # where to send stderr.  same thing, except &2 = stderr
-  local daemonerrfile=$5
-  local privpidfile=$6
-  shift 6
-
-  hadoop_rotate_log "${daemonoutfile}"
-  hadoop_rotate_log "${daemonerrfile}"
-
-  # shellcheck disable=SC2153
-  jsvc="${JSVC_HOME}/jsvc"
-  if [[ ! -f "${jsvc}" ]]; then
-    hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
-    hadoop_error "or privileged daemons. Please download and install jsvc from "
-    hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ "
-    hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
-    exit 1
-  fi
-
-  # note that shellcheck will throw a
-  # bogus for-our-use-case 2086 here.
-  # it doesn't properly support multi-line situations
-
-  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
-  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
-  hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}"
-  hadoop_debug "jsvc: ${jsvc}"
-  hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: ${HADOOP_DAEMON_JSVC_EXTRA_OPTS}"
-  hadoop_debug "Class name: ${class}"
-  hadoop_debug "Command line options: $*"
-
-  #shellcheck disable=SC2086
-  echo $$ > "${privpidfile}" 2>/dev/null
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${privpidfile}."
-  fi
-
-  # shellcheck disable=SC2086
-  exec "${jsvc}" \
-    "-Dproc_${daemonname}" \
-    ${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \
-    -outfile "${daemonoutfile}" \
-    -errfile "${daemonerrfile}" \
-    -pidfile "${daemonpidfile}" \
-    -nodetach \
-    -user "${HADOOP_SECURE_USER}" \
-    -cp "${CLASSPATH}" \
-    ${HADOOP_OPTS} \
-    "${class}" "$@"
-}
-
-## @description  Start a privileged daemon in the background.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        command
-## @param        class
-## @param        daemonpidfile
-## @param        daemonoutfile
-## @param        wrapperpidfile
-## @param        warpperoutfile
-## @param        daemonerrfile
-## @param        [options]
-function hadoop_start_secure_daemon_wrapper
-{
-  # this wraps hadoop_start_secure_daemon to take care
-  # of the dirty work to launch a daemon in the background!
-  local daemonname=$1
-  local class=$2
-
-  # same rules as hadoop_start_secure_daemon except we
-  # have some additional parameters
-
-  local daemonpidfile=$3
-
-  local daemonoutfile=$4
-
-  # the pid file of the subprocess that spawned our
-  # secure launcher
-  local jsvcpidfile=$5
-
-  # the output of the subprocess that spawned our secure
-  # launcher
-  local jsvcoutfile=$6
-
-  local daemonerrfile=$7
-  shift 7
-
-  local counter
-
-  hadoop_rotate_log "${jsvcoutfile}"
-
-  hadoop_start_secure_daemon \
-    "${daemonname}" \
-    "${class}" \
-    "${daemonpidfile}" \
-    "${daemonoutfile}" \
-    "${daemonerrfile}" \
-    "${jsvcpidfile}"  "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
-
-  # we need to avoid a race condition here
-  # so let's wait for the fork to finish
-  # before overriding with the daemonized pid
-  (( counter=0 ))
-  while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
-    sleep 1
-    (( counter++ ))
-  done
-
-  #shellcheck disable=SC2086
-  if ! echo $! > "${jsvcpidfile}"; then
-    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${jsvcpidfile}."
-  fi
-
-  sleep 1
-  #shellcheck disable=SC2086
-  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
-  fi
-  if [[ -f "${daemonpidfile}" ]]; then
-    #shellcheck disable=SC2046
-    renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1
-    if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)"
-    fi
-  fi
-  #shellcheck disable=SC2046
-  disown %+ >/dev/null 2>&1
-  if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
-  fi
-  # capture the ulimit output
-  su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
-  #shellcheck disable=SC2086
-  if ! ps -p $! >/dev/null 2>&1; then
-    return 1
-  fi
-  return 0
-}
-
-## @description  Wait till process dies or till timeout
-## @audience     private
-## @stability    evolving
-## @param        pid
-## @param        timeout
-function wait_process_to_die_or_timeout
-{
-  local pid=$1
-  local timeout=$2
-
-  # Normalize timeout
-  # Round up or down
-  timeout=$(printf "%.0f\n" "${timeout}")
-  if [[ ${timeout} -lt 1  ]]; then
-    # minimum 1 second
-    timeout=1
-  fi
-
-  # Wait to see if it's still alive
-  for (( i=0; i < "${timeout}"; i++ ))
-  do
-    if kill -0 "${pid}" > /dev/null 2>&1; then
-      sleep 1
-    else
-      break
-    fi
-  done
-}
-
-## @description  Stop the non-privileged `command` daemon with that
-## @description  that is running at `pidfile`.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        pidfile
-function hadoop_stop_daemon
-{
-  local cmd=$1
-  local pidfile=$2
-  shift 2
-
-  local pid
-  local cur_pid
-
-  if [[ -f "${pidfile}" ]]; then
-    pid=$(cat "$pidfile")
-
-    kill "${pid}" >/dev/null 2>&1
-
-    wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
-
-    if kill -0 "${pid}" > /dev/null 2>&1; then
-      hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
-      kill -9 "${pid}" >/dev/null 2>&1
-    fi
-    wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
-    if ps -p "${pid}" > /dev/null 2>&1; then
-      hadoop_error "ERROR: Unable to kill ${pid}"
-    else
-      cur_pid=$(cat "$pidfile")
-      if [[ "${pid}" = "${cur_pid}" ]]; then
-        rm -f "${pidfile}" >/dev/null 2>&1
-      else
-        hadoop_error "WARNING: pid has changed for ${cmd}, skip deleting pid file"
-      fi
-    fi
-  fi
-}
-
-## @description  Stop the privileged `command` daemon with that
-## @description  that is running at `daemonpidfile` and launched with
-## @description  the wrapper at `wrapperpidfile`.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        daemonpidfile
-## @param        wrapperpidfile
-function hadoop_stop_secure_daemon
-{
-  local command=$1
-  local daemonpidfile=$2
-  local privpidfile=$3
-  shift 3
-  local ret
-
-  local daemon_pid
-  local priv_pid
-  local cur_daemon_pid
-  local cur_priv_pid
-
-  daemon_pid=$(cat "$daemonpidfile")
-  priv_pid=$(cat "$privpidfile")
-
-  hadoop_stop_daemon "${command}" "${daemonpidfile}"
-  ret=$?
-
-  cur_daemon_pid=$(cat "$daemonpidfile")
-  cur_priv_pid=$(cat "$privpidfile")
-
-  if [[ "${daemon_pid}" = "${cur_daemon_pid}" ]]; then
-    rm -f "${daemonpidfile}" >/dev/null 2>&1
-  else
-    hadoop_error "WARNING: daemon pid has changed for ${command}, skip deleting daemon pid file"
-  fi
-
-  if [[ "${priv_pid}" = "${cur_priv_pid}" ]]; then
-    rm -f "${privpidfile}" >/dev/null 2>&1
-  else
-    hadoop_error "WARNING: priv pid has changed for ${command}, skip deleting priv pid file"
-  fi
-  return ${ret}
-}
-
-## @description  Manage a non-privileged daemon.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        [start|stop|status|default]
-## @param        command
-## @param        class
-## @param        daemonpidfile
-## @param        daemonoutfile
-## @param        [options]
-function hadoop_daemon_handler
-{
-  local daemonmode=$1
-  local daemonname=$2
-  local class=$3
-  local daemon_pidfile=$4
-  local daemon_outfile=$5
-  shift 5
-
-  case ${daemonmode} in
-    status)
-      hadoop_status_daemon "${daemon_pidfile}"
-      exit $?
-    ;;
-
-    stop)
-      hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
-      exit $?
-    ;;
-
-    ##COMPAT  -- older hadoops would also start daemons by default
-    start|default)
-      hadoop_verify_piddir
-      hadoop_verify_logdir
-      hadoop_status_daemon "${daemon_pidfile}"
-      if [[ $? == 0  ]]; then
-        hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}").  Stop it first."
-        exit 1
-      else
-        # stale pid file, so just remove it and continue on
-        rm -f "${daemon_pidfile}" >/dev/null 2>&1
-      fi
-      ##COMPAT  - differenticate between --daemon start and nothing
-      # "nothing" shouldn't detach
-      if [[ "$daemonmode" = "default" ]]; then
-        hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@"
-      else
-        hadoop_start_daemon_wrapper "${daemonname}" \
-        "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
-      fi
-    ;;
-  esac
-}
-
-## @description  Manage a privileged daemon.
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        [start|stop|status|default]
-## @param        command
-## @param        class
-## @param        daemonpidfile
-## @param        daemonoutfile
-## @param        wrapperpidfile
-## @param        wrapperoutfile
-## @param        wrappererrfile
-## @param        [options]
-function hadoop_secure_daemon_handler
-{
-  local daemonmode=$1
-  local daemonname=$2
-  local classname=$3
-  local daemon_pidfile=$4
-  local daemon_outfile=$5
-  local priv_pidfile=$6
-  local priv_outfile=$7
-  local priv_errfile=$8
-  shift 8
-
-  case ${daemonmode} in
-    status)
-      hadoop_status_daemon "${daemon_pidfile}"
-      exit $?
-    ;;
-
-    stop)
-      hadoop_stop_secure_daemon "${daemonname}" \
-      "${daemon_pidfile}" "${priv_pidfile}"
-      exit $?
-    ;;
-
-    ##COMPAT  -- older hadoops would also start daemons by default
-    start|default)
-      hadoop_verify_piddir
-      hadoop_verify_logdir
-      hadoop_status_daemon "${daemon_pidfile}"
-      if [[ $? == 0  ]]; then
-        hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}").  Stop it first."
-        exit 1
-      else
-        # stale pid file, so just remove it and continue on
-        rm -f "${daemon_pidfile}" >/dev/null 2>&1
-      fi
-
-      ##COMPAT  - differenticate between --daemon start and nothing
-      # "nothing" shouldn't detach
-      if [[ "${daemonmode}" = "default" ]]; then
-        hadoop_start_secure_daemon "${daemonname}" "${classname}" \
-        "${daemon_pidfile}" "${daemon_outfile}" \
-        "${priv_errfile}" "${priv_pidfile}" "$@"
-      else
-        hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
-        "${daemon_pidfile}" "${daemon_outfile}" \
-        "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}"  "$@"
-      fi
-    ;;
-  esac
-}
-
-## @description autodetect whether this is a priv subcmd
-## @description by whether or not a priv user var exists
-## @description and if HADOOP_SECURE_CLASSNAME is defined
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        subcommand
-## @return       1 = not priv
-## @return       0 = priv
-function hadoop_detect_priv_subcmd
-{
-  declare program=$1
-  declare command=$2
-
-  if [[ -z "${HADOOP_SECURE_CLASSNAME}" ]]; then
-    hadoop_debug "No secure classname defined."
-    return 1
-  fi
-
-  uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
-  if [[ -z "${!uvar}" ]]; then
-    hadoop_debug "No secure user defined."
-    return 1
-  fi
-  return 0
-}
-
-## @description  Build custom subcommand var
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        subcommand
-## @param        customid
-## @return       string
-function hadoop_build_custom_subcmd_var
-{
-  declare program=$1
-  declare command=$2
-  declare custom=$3
-  declare uprogram
-  declare ucommand
-
-  if [[ -z "${BASH_VERSINFO[0]}" ]] \
-     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
-    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
-    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
-  else
-    uprogram=${program^^}
-    ucommand=${command^^}
-  fi
-
-  echo "${uprogram}_${ucommand}_${custom}"
-}
-
-## @description  Verify that username in a var converts to user id
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        userstring
-## @return       0 for success
-## @return       1 for failure
-function hadoop_verify_user_resolves
-{
-  declare userstr=$1
-
-  if [[ -z ${userstr} || -z ${!userstr} ]] ; then
-    return 1
-  fi
-
-  id -u "${!userstr}" >/dev/null 2>&1
-}
-
-## @description  Verify that ${USER} is allowed to execute the
-## @description  given subcommand.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        command
-## @param        subcommand
-## @return       return 0 on success
-## @return       exit 1 on failure
-function hadoop_verify_user_perm
-{
-  declare program=$1
-  declare command=$2
-  declare uvar
-
-  if [[ ${command} =~ \. ]]; then
-    return 1
-  fi
-
-  uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
-
-  if [[ -n ${!uvar} ]]; then
-    if [[ ${!uvar} !=  "${USER}" ]]; then
-      hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
-      exit 1
-    fi
-  fi
-  return 0
-}
-
-## @description  Verify that ${USER} is allowed to execute the
-## @description  given subcommand.
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        subcommand
-## @return       1 on no re-exec needed
-## @return       0 on need to re-exec
-function hadoop_need_reexec
-{
-  declare program=$1
-  declare command=$2
-  declare uvar
-
-  # we've already been re-execed, bail
-
-  if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
-    return 1
-  fi
-
-  if [[ ${command} =~ \. ]]; then
-    return 1
-  fi
-
-  # if we have privilege, and the _USER is defined, and _USER is
-  # set to someone who isn't us, then yes, we should re-exec.
-  # otherwise no, don't re-exec and let the system deal with it.
-
-  if hadoop_privilege_check; then
-    uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
-    if [[ -n ${!uvar} ]]; then
-      if [[ ${!uvar} !=  "${USER}" ]]; then
-        return 0
-      fi
-    fi
-  fi
-  return 1
-}
-
-## @description  Add custom (program)_(command)_OPTS to HADOOP_OPTS.
-## @description  Also handles the deprecated cases from pre-3.x.
-## @audience     public
-## @stability    evolving
-## @replaceable  yes
-## @param        program
-## @param        subcommand
-## @return       will exit on failure conditions
-function hadoop_subcommand_opts
-{
-  declare program=$1
-  declare command=$2
-  declare uvar
-  declare depvar
-  declare uprogram
-  declare ucommand
-
-  if [[ -z "${program}" || -z "${command}" ]]; then
-    return 1
-  fi
-
-  if [[ ${command} =~ \. ]]; then
-    return 1
-  fi
-
-  # bash 4 and up have built-in ways to upper and lower
-  # case the contents of vars.  This is faster than
-  # calling tr.
-
-  ## We don't call hadoop_build_custom_subcmd_var here
-  ## since we need to construct this for the deprecation
-  ## cases. For Hadoop 4.x, this needs to get cleaned up.
-
-  if [[ -z "${BASH_VERSINFO[0]}" ]] \
-     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
-    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
-    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
-  else
-    uprogram=${program^^}
-    ucommand=${command^^}
-  fi
-
-  uvar="${uprogram}_${ucommand}_OPTS"
-
-  # Let's handle all of the deprecation cases early
-  # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS
-
-  depvar="HADOOP_${ucommand}_OPTS"
-
-  if [[ "${depvar}" != "${uvar}" ]]; then
-    if [[ -n "${!depvar}" ]]; then
-      hadoop_deprecate_envvar "${depvar}" "${uvar}"
-    fi
-  fi
-
-  if [[ -n ${!uvar} ]]; then
-    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
-    return 0
-  fi
-}
-
-## @description  Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
-## @description  This *does not* handle the pre-3.x deprecated cases
-## @audience     public
-## @stability    stable
-## @replaceable  yes
-## @param        program
-## @param        subcommand
-## @return       will exit on failure conditions
-function hadoop_subcommand_secure_opts
-{
-  declare program=$1
-  declare command=$2
-  declare uvar
-  declare uprogram
-  declare ucommand
-
-  if [[ -z "${program}" || -z "${command}" ]]; then
-    return 1
-  fi
-
-  # HDFS_DATANODE_SECURE_EXTRA_OPTS
-  # HDFS_NFS3_SECURE_EXTRA_OPTS
-  # ...
-  uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_EXTRA_OPTS)
-
-  if [[ -n ${!uvar} ]]; then
-    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
-    return 0
-  fi
-}
-
-## @description  Perform the 'hadoop classpath', etc subcommand with the given
-## @description  parameters
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        [parameters]
-## @return       will print & exit with no params
-function hadoop_do_classpath_subcommand
-{
-  if [[ "$#" -gt 1 ]]; then
-    eval "$1"=org.apache.hadoop.util.Classpath
-  else
-    hadoop_finalize
-    echo "${CLASSPATH}"
-    exit 0
-  fi
-}
-
-## @description  generic shell script option parser.  sets
-## @description  HADOOP_PARSE_COUNTER to set number the
-## @description  caller should shift
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-## @param        [parameters, typically "$@"]
-function hadoop_parse_args
-{
-  HADOOP_DAEMON_MODE="default"
-  HADOOP_PARSE_COUNTER=0
-
-  # not all of the options supported here are supported by all commands
-  # however these are:
-  hadoop_add_option "--config dir" "Hadoop config directory"
-  hadoop_add_option "--debug" "turn on shell script debug mode"
-  hadoop_add_option "--help" "usage information"
-
-  while true; do
-    hadoop_debug "hadoop_parse_args: processing $1"
-    case $1 in
-      --buildpaths)
-        HADOOP_ENABLE_BUILD_PATHS=true
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
-      ;;
-      --config)
-        shift
-        confdir=$1
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
-        if [[ -d "${confdir}" ]]; then
-          HADOOP_CONF_DIR="${confdir}"
-        elif [[ -z "${confdir}" ]]; then
-          hadoop_error "ERROR: No parameter provided for --config "
-          hadoop_exit_with_usage 1
-        else
-          hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
-          hadoop_exit_with_usage 1
-        fi
-      ;;
-      --daemon)
-        shift
-        HADOOP_DAEMON_MODE=$1
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
-        if [[ -z "${HADOOP_DAEMON_MODE}" || \
-          ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
-          hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
-          hadoop_exit_with_usage 1
-        fi
-      ;;
-      --debug)
-        shift
-        HADOOP_SHELL_SCRIPT_DEBUG=true
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
-      ;;
-      --help|-help|-h|help|--h|--\?|-\?|\?)
-        hadoop_exit_with_usage 0
-      ;;
-      --hostnames)
-        shift
-        HADOOP_WORKER_NAMES="$1"
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
-      ;;
-      --hosts)
-        shift
-        hadoop_populate_workers_file "$1"
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
-      ;;
-      --loglevel)
-        shift
-        # shellcheck disable=SC2034
-        HADOOP_LOGLEVEL="$1"
-        shift
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
-      ;;
-      --reexec)
-        shift
-        if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
-          hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
-          exit 1
-        fi
-        HADOOP_REEXECED_CMD=true
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
-      ;;
-      --workers)
-        shift
-        # shellcheck disable=SC2034
-        HADOOP_WORKER_MODE=true
-        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
-      ;;
-      *)
-        break
-      ;;
-    esac
-  done
-
-  hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
-}
-
-## @description Handle subcommands from main program entries
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_generic_java_subcmd_handler
-{
-  declare priv_outfile
-  declare priv_errfile
-  declare priv_pidfile
-  declare daemon_outfile
-  declare daemon_pidfile
-  declare secureuser
-
-  # The default/expected way to determine if a daemon is going to run in secure
-  # mode is defined by hadoop_detect_priv_subcmd.  If this returns true
-  # then setup the secure user var and tell the world we're in secure mode
-
-  if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then
-    HADOOP_SUBCMD_SECURESERVICE=true
-    secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER)
-
-    if ! hadoop_verify_user_resolves "${secureuser}"; then
-      hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting."
-      exit 1
-    fi
-
-    HADOOP_SECURE_USER="${!secureuser}"
-  fi
-
-  # check if we're running in secure mode.
-  # breaking this up from the above lets 3rd parties
-  # do things a bit different
-  # secure services require some extra setup
-  # if yes, then we need to define all of the priv and daemon stuff
-  # if not, then we just need to define daemon stuff.
-  # note the daemon vars are purposefully different between the two
-
-  if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
-
-    hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-    hadoop_verify_secure_prereq
-    hadoop_setup_secure_service
-    priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-    priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
-    priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-  else
-    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-  fi
-
-  # are we actually in daemon mode?
-  # if yes, use the daemon logger and the appropriate log file.
-  if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
-    HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
-    if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
-      HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
-    else
-      HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
-    fi
-  fi
-
-  # finish defining the environment: system properties, env vars, class paths, etc.
-  hadoop_finalize
-
-  # do the hard work of launching a daemon or just executing our interactive
-  # java class
-  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
-    if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
-      hadoop_secure_daemon_handler \
-        "${HADOOP_DAEMON_MODE}" \
-        "${HADOOP_SUBCMD}" \
-        "${HADOOP_SECURE_CLASSNAME}" \
-        "${daemon_pidfile}" \
-        "${daemon_outfile}" \
-        "${priv_pidfile}" \
-        "${priv_outfile}" \
-        "${priv_errfile}" \
-        "${HADOOP_SUBCMD_ARGS[@]}"
-    else
-      hadoop_daemon_handler \
-        "${HADOOP_DAEMON_MODE}" \
-        "${HADOOP_SUBCMD}" \
-        "${HADOOP_CLASSNAME}" \
-        "${daemon_pidfile}" \
-        "${daemon_outfile}" \
-        "${HADOOP_SUBCMD_ARGS[@]}"
-    fi
-    exit $?
-  else
-    hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
-  fi
-}
diff --git a/hadoop-hdds/common/src/main/bin/workers.sh b/hadoop-hdds/common/src/main/bin/workers.sh
deleted file mode 100755
index 05bc5fd..0000000
--- a/hadoop-hdds/common/src/main/bin/workers.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a shell command on all worker hosts.
-#
-# Environment Variables
-#
-#   HADOOP_WORKERS    File naming remote hosts.
-#     Default is ${HADOOP_CONF_DIR}/workers.
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-#   HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
-#   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
-##
-
-function hadoop_usage
-{
-  echo "Usage: workers.sh [--config confdir] command..."
-}
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
-  exit 1
-fi
-
-# if no args specified, show usage
-if [[ $# -le 0 ]]; then
-  hadoop_exit_with_usage 1
-fi
-
-hadoop_connect_to_hosts "$@"
diff --git a/hadoop-hdds/common/src/main/conf/core-site.xml b/hadoop-hdds/common/src/main/conf/core-site.xml
deleted file mode 100644
index d2ddf89..0000000
--- a/hadoop-hdds/common/src/main/conf/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-</configuration>
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd b/hadoop-hdds/common/src/main/conf/hadoop-env.cmd
deleted file mode 100644
index 9718695..0000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd
+++ /dev/null
@@ -1,90 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem Set Hadoop-specific environment variables here.
-
-@rem The only required environment variable is JAVA_HOME.  All others are
-@rem optional.  When running a distributed configuration it is best to
-@rem set JAVA_HOME in this file, so that it is correctly defined on
-@rem remote nodes.
-
-@rem The java implementation to use.  Required.
-set JAVA_HOME=%JAVA_HOME%
-
-@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
-@rem set JSVC_HOME=%JSVC_HOME%
-
-@rem set HADOOP_CONF_DIR=
-
-@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-if exist %HADOOP_HOME%\contrib\capacity-scheduler (
-  if not defined HADOOP_CLASSPATH (
-    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  ) else (
-    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  )
-)
-
-@rem The maximum amount of heap to use, in MB. Default is 1000.
-@rem set HADOOP_HEAPSIZE=
-@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-@rem Extra Java runtime options.  Empty by default.
-@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem Command specific options appended to HADOOP_OPTS when specified
-if not defined HADOOP_SECURITY_LOGGER (
-  set HADOOP_SECURITY_LOGGER=INFO,RFAS
-)
-if not defined HDFS_AUDIT_LOGGER (
-  set HDFS_AUDIT_LOGGER=INFO,NullAppender
-)
-
-set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
-set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
-set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
-
-@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
-@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
-
-@rem On secure datanodes, user to run the datanode as after dropping privileges
-set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
-
-@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
-@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
-
-@rem Where log files are stored in the secure data environment.
-set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
-
-@rem
-@rem Router-based HDFS Federation specific parameters
-@rem Specify the JVM options to be used when starting the RBF Routers.
-@rem These options will be appended to the options specified as HADOOP_OPTS
-@rem and therefore may override any similar flags set in HADOOP_OPTS
-@rem
-@rem set HADOOP_DFSROUTER_OPTS=""
-@rem
-
-@rem The directory where pid files are stored. /tmp by default.
-@rem NOTE: this should be set to a directory that can only be written to by
-@rem       the user that will run the hadoop daemons.  Otherwise there is the
-@rem       potential for a symlink attack.
-set HADOOP_PID_DIR=%HADOOP_PID_DIR%
-set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
-
-@rem A string representing this instance of hadoop. %USERNAME% by default.
-set HADOOP_IDENT_STRING=%USERNAME%
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh
deleted file mode 100644
index e43cd95..0000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh
+++ /dev/null
@@ -1,439 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-##
-## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
-## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS.  THEREFORE,
-## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
-## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
-##
-## Precedence rules:
-##
-## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
-##
-## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
-##
-
-# Many of the options here are built from the perspective that users
-# may want to provide OVERWRITING values on the command line.
-# For example:
-#
-#  JAVA_HOME=/usr/java/testing hdfs dfs -ls
-#
-# Therefore, the vast majority (BUT NOT ALL!) of these defaults
-# are configured for substitution and not append.  If append
-# is preferable, modify this file accordingly.
-
-###
-# Generic settings for HADOOP
-###
-
-# Technically, the only required environment variable is JAVA_HOME.
-# All others are optional.  However, the defaults are probably not
-# preferred.  Many sites configure these options outside of Hadoop,
-# such as in /etc/profile.d
-
-# The java implementation to use. By default, this environment
-# variable is REQUIRED on ALL platforms except OS X!
-# export JAVA_HOME=
-
-# Location of Hadoop.  By default, Hadoop will attempt to determine
-# this location based upon its execution path.
-# export HADOOP_HOME=
-
-# Location of Hadoop's configuration information.  i.e., where this
-# file is living. If this is not defined, Hadoop will attempt to
-# locate it based upon its execution path.
-#
-# NOTE: It is recommend that this variable not be set here but in
-# /etc/profile.d or equivalent.  Some options (such as
-# --config) may react strangely otherwise.
-#
-# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
-
-# The maximum amount of heap to use (Java -Xmx).  If no unit
-# is provided, it will be converted to MB.  Daemons will
-# prefer any Xmx setting in their respective _OPT variable.
-# There is no default; the JVM will autoscale based upon machine
-# memory size.
-# export HADOOP_HEAPSIZE_MAX=
-
-# The minimum amount of heap to use (Java -Xms).  If no unit
-# is provided, it will be converted to MB.  Daemons will
-# prefer any Xms setting in their respective _OPT variable.
-# There is no default; the JVM will autoscale based upon machine
-# memory size.
-# export HADOOP_HEAPSIZE_MIN=
-
-# Enable extra debugging of Hadoop's JAAS binding, used to set up
-# Kerberos security.
-# export HADOOP_JAAS_DEBUG=true
-
-# Extra Java runtime options for all Hadoop commands. We don't support
-# IPv6 yet/still, so by default the preference is set to IPv4.
-# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-# For Kerberos debugging, an extended option set logs more information
-# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
-
-# Some parts of the shell code may do special things dependent upon
-# the operating system.  We have to set this here. See the next
-# section as to why....
-export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
-# Extra Java runtime options for some Hadoop commands
-# and clients (i.e., hdfs dfs -blah).  These get appended to HADOOP_OPTS for
-# such commands.  In most cases, # this should be left empty and
-# let users supply it on the command line.
-# export HADOOP_CLIENT_OPTS=""
-
-#
-# A note about classpaths.
-#
-# By default, Apache Hadoop overrides Java's CLASSPATH
-# environment variable.  It is configured such
-# that it starts out blank with new entries added after passing
-# a series of checks (file/dir exists, not already listed aka
-# de-deduplication).  During de-deduplication, wildcards and/or
-# directories are *NOT* expanded to keep it simple. Therefore,
-# if the computed classpath has two specific mentions of
-# awesome-methods-1.0.jar, only the first one added will be seen.
-# If two directories are in the classpath that both contain
-# awesome-methods-1.0.jar, then Java will pick up both versions.
-
-# An additional, custom CLASSPATH. Site-wide configs should be
-# handled via the shellprofile functionality, utilizing the
-# hadoop_add_classpath function for greater control and much
-# harder for apps/end-users to accidentally override.
-# Similarly, end users should utilize ${HOME}/.hadooprc .
-# This variable should ideally only be used as a short-cut,
-# interactive way for temporary additions on the command line.
-# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
-
-# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
-# export HADOOP_USER_CLASSPATH_FIRST="yes"
-
-# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
-# with the main jar are handled by a separate isolated
-# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
-# is utilized. If it is set, HADOOP_CLASSPATH and
-# HADOOP_USER_CLASSPATH_FIRST are ignored.
-# export HADOOP_USE_CLIENT_CLASSLOADER=true
-
-# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
-# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
-# is enabled. Names ending in '.' (period) are treated as package names, and
-# names starting with a '-' are treated as negative matches. For example,
-# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
-
-# Enable optional, bundled Hadoop features
-# This is a comma delimited list.  It may NOT be overridden via .hadooprc
-# Entries may be added/removed as needed.
-# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
-
-###
-# Options for remote shell connectivity
-###
-
-# There are some optional components of hadoop that allow for
-# command and control of remote hosts.  For example,
-# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
-
-# Options to pass to SSH when one of the "log into a host and
-# start/stop daemons" scripts is executed
-# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
-
-# The built-in ssh handler will limit itself to 10 simultaneous connections.
-# For pdsh users, this sets the fanout size ( -f )
-# Change this to increase/decrease as necessary.
-# export HADOOP_SSH_PARALLEL=10
-
-# Filename which contains all of the hosts for any remote execution
-# helper scripts # such as workers.sh, start-dfs.sh, etc.
-# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
-
-###
-# Options for all daemons
-###
-#
-
-#
-# Many options may also be specified as Java properties.  It is
-# very common, and in many cases, desirable, to hard-set these
-# in daemon _OPTS variables.  Where applicable, the appropriate
-# Java property is also identified.  Note that many are re-used
-# or set differently in certain contexts (e.g., secure vs
-# non-secure)
-#
-
-# Where (primarily) daemon log files are stored.
-# ${HADOOP_HOME}/logs by default.
-# Java property: hadoop.log.dir
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-
-# A string representing this instance of hadoop. $USER by default.
-# This is used in writing log and pid files, so keep that in mind!
-# Java property: hadoop.id.str
-# export HADOOP_IDENT_STRING=$USER
-
-# How many seconds to pause after stopping a daemon
-# export HADOOP_STOP_TIMEOUT=5
-
-# Where pid files are stored.  /tmp by default.
-# export HADOOP_PID_DIR=/tmp
-
-# Default log4j setting for interactive commands
-# Java property: hadoop.root.logger
-# export HADOOP_ROOT_LOGGER=INFO,console
-
-# Default log4j setting for daemons spawned explicitly by
-# --daemon option of hadoop, hdfs, mapred and yarn command.
-# Java property: hadoop.root.logger
-# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
-
-# Default log level and output location for security-related messages.
-# You will almost certainly want to change this on a per-daemon basis via
-# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
-# defaults for the NN and 2NN override this by default.)
-# Java property: hadoop.security.logger
-# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
-
-# Default process priority level
-# Note that sub-processes will also run at this level!
-# export HADOOP_NICENESS=0
-
-# Default name for the service level authorization file
-# Java property: hadoop.policy.file
-# export HADOOP_POLICYFILE="hadoop-policy.xml"
-
-#
-# NOTE: this is not used by default!  <-----
-# You can define variables right here and then re-use them later on.
-# For example, it is common to use the same garbage collection settings
-# for all the daemons.  So one could define:
-#
-# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
-#
-# .. and then use it as per the b option under the namenode.
-
-###
-# Secure/privileged execution
-###
-
-#
-# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
-# on privileged ports.  This functionality can be replaced by providing
-# custom functions.  See hadoop-functions.sh for more information.
-#
-
-# The jsvc implementation to use. Jsvc is required to run secure datanodes
-# that bind to privileged ports to provide authentication of data transfer
-# protocol.  Jsvc is not required if SASL is configured for authentication of
-# data transfer protocol using non-privileged ports.
-# export JSVC_HOME=/usr/bin
-
-#
-# This directory contains pids for secure and privileged processes.
-#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
-
-#
-# This directory contains the logs for secure and privileged processes.
-# Java property: hadoop.log.dir
-# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
-
-#
-# When running a secure daemon, the default value of HADOOP_IDENT_STRING
-# ends up being a bit bogus.  Therefore, by default, the code will
-# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER.  If one wants
-# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
-# export HADOOP_SECURE_IDENT_PRESERVE="true"
-
-###
-# NameNode specific parameters
-###
-
-# Default log level and output location for file system related change
-# messages. For non-namenode daemons, the Java property must be set in
-# the appropriate _OPTS if one wants something other than INFO,NullAppender
-# Java property: hdfs.audit.logger
-# export HDFS_AUDIT_LOGGER=INFO,NullAppender
-
-# Specify the JVM options to be used when starting the NameNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# a) Set JMX options
-# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
-#
-# b) Set garbage collection logs
-# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
-#
-# c) ... or set them directly
-# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
-
-# this is the default:
-# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
-
-###
-# SecondaryNameNode specific parameters
-###
-# Specify the JVM options to be used when starting the SecondaryNameNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# This is the default:
-# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
-
-###
-# DataNode specific parameters
-###
-# Specify the JVM options to be used when starting the DataNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# This is the default:
-# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges.
-# This **MUST** be uncommented to enable secure HDFS if using privileged ports
-# to provide authentication of data transfer protocol.  This **MUST NOT** be
-# defined if SASL is configured for authentication of data transfer protocol
-# using non-privileged ports.
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HDFS_DATANODE_SECURE_USER=hdfs
-
-# Supplemental options for secure datanodes
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
-
-###
-# NFS3 Gateway specific parameters
-###
-# Specify the JVM options to be used when starting the NFS3 Gateway.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_NFS3_OPTS=""
-
-# Specify the JVM options to be used when starting the Hadoop portmapper.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_PORTMAP_OPTS="-Xmx512m"
-
-# Supplemental options for priviliged gateways
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
-
-# On privileged gateways, user to run the gateway as after dropping privileges
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HDFS_NFS3_SECURE_USER=nfsserver
-
-###
-# ZKFailoverController specific parameters
-###
-# Specify the JVM options to be used when starting the ZKFailoverController.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_ZKFC_OPTS=""
-
-###
-# QuorumJournalNode specific parameters
-###
-# Specify the JVM options to be used when starting the QuorumJournalNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_JOURNALNODE_OPTS=""
-
-###
-# HDFS Balancer specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Balancer.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_BALANCER_OPTS=""
-
-###
-# HDFS Mover specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Mover.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_MOVER_OPTS=""
-
-###
-# Router-based HDFS Federation specific parameters
-# Specify the JVM options to be used when starting the RBF Routers.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_DFSROUTER_OPTS=""
-
-###
-# Ozone Manager specific parameters
-###
-# Specify the JVM options to be used when starting the Ozone Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_OM_OPTS=""
-
-###
-# HDFS StorageContainerManager specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Storage Container Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_STORAGECONTAINERMANAGER_OPTS=""
-
-###
-# Advanced Users Only!
-###
-
-#
-# When building Hadoop, one can add the class paths to the commands
-# via this special env var:
-# export HADOOP_ENABLE_BUILD_PATHS="true"
-
-#
-# To prevent accidents, shell commands be (superficially) locked
-# to only allow certain users to execute certain subcommands.
-# It uses the format of (command)_(subcommand)_USER.
-#
-# For example, to limit who can execute the namenode command,
-# export HDFS_NAMENODE_USER=hdfs
-
-
-###
-# Registry DNS specific parameters
-###
-# For privileged registry DNS, user to run as after dropping privileges
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HADOOP_REGISTRYDNS_SECURE_USER=yarn
-
-# Supplemental options for privileged registry DNS
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server"
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties b/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties
deleted file mode 100644
index f67bf8e..0000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties
+++ /dev/null
@@ -1,99 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink].[instance].[options]
-# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
-
-*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period, in seconds
-*.period=10
-
-# The namenode-metrics.out will contain metrics from all context
-#namenode.sink.file.filename=namenode-metrics.out
-# Specifying a special sampling period for namenode:
-#namenode.sink.*.period=8
-
-#datanode.sink.file.filename=datanode-metrics.out
-
-#resourcemanager.sink.file.filename=resourcemanager-metrics.out
-
-#nodemanager.sink.file.filename=nodemanager-metrics.out
-
-#mrappmaster.sink.file.filename=mrappmaster-metrics.out
-
-#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
-
-# the following example split metrics of different
-# context to different sinks (in this case files)
-#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_jvm.context=jvm
-#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
-#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_mapred.context=mapred
-#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
-
-#
-# Below are for sending metrics to Ganglia
-#
-# for Ganglia 3.0 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
-#
-# for Ganglia 3.1 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-
-# *.sink.ganglia.period=10
-
-# default for supportsparse is false
-# *.sink.ganglia.supportsparse=true
-
-#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifying multiple tags separate them with
-# commas. Note that the last segment of the property name is the context name.
-#
-# A typical use of tags is separating the metrics by the HDFS rpc port
-# and HDFS service rpc port.
-# For example:
-#   With following HDFS configuration:
-#       dfs.namenode.rpc-address is set as namenodeAddress:9110
-#       dfs.namenode.servicerpc-address is set as namenodeAddress:9111
-#   If no tags are used, following metric would be gathered:
-#       rpc.rpc.NumOpenConnections
-#   If using "*.sink.ganglia.tagsForPrefix.rpc=port",
-#   following metrics would be gathered:
-#       rpc.rpc.port=9110.NumOpenConnections
-#       rpc.rpc.port=9111.NumOpenConnections
-#
-#*.sink.ganglia.tagsForPrefix.jvm=ProcessName
-#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync
-#*.sink.ganglia.tagsForPrefix.rpc=port
-#*.sink.ganglia.tagsForPrefix.rpcdetailed=port
-#*.sink.ganglia.tagsForPrefix.metricssystem=*
-#*.sink.ganglia.tagsForPrefix.ugi=*
-#*.sink.ganglia.tagsForPrefix.mapred=
-
-#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml b/hadoop-hdds/common/src/main/conf/hadoop-policy.xml
deleted file mode 100644
index 85e4975a..0000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml
+++ /dev/null
@@ -1,275 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.user.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.router.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RouterAdmin Protocol. The ACL is a comma-separated
-    list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.
-    </description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.interqjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterQJournalProtocol, used by the JN to
-    communicate with other JN
-    </description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationhistory.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationHistoryProtocol, used by the timeline
-    server and the generic history service client to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.collector-nodemanager.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for CollectorNodemanagerProtocol, used by nodemanager
-    if timeline service v2 is enabled, for the timeline collector and nodemanager
-    to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster-nodemanager.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the Nodemanager
-        and ApplicationMasters to communicate.
-        The ACL is a comma-separated list of user and group names. The user and
-        group list is separated by a blank. For e.g. "alice,bob users,wheel".
-        A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.distributedscheduling.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DistributedSchedulingAMProtocol, used by the Nodemanager
-        and Resourcemanager to communicate.
-        The ACL is a comma-separated list of user and group names. The user and
-        group list is separated by a blank. For e.g. "alice,bob users,wheel".
-        A special value of "*" means all users are allowed.</description>
-    </property>
-</configuration>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
deleted file mode 100644
index 99972ae..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds;
-
-import org.apache.hadoop.hdds.utils.db.DBProfile;
-
-/**
- * This class contains constants for configuration keys and default values
- * used in hdds.
- */
-public final class HddsConfigKeys {
-
-  public static final String HDDS_HEARTBEAT_INTERVAL =
-      "hdds.heartbeat.interval";
-  public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
-      "30s";
-  public static final String HDDS_NODE_REPORT_INTERVAL =
-      "hdds.node.report.interval";
-  public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
-      "60s";
-  public static final String HDDS_CONTAINER_REPORT_INTERVAL =
-      "hdds.container.report.interval";
-  public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
-      "60s";
-  public static final String HDDS_PIPELINE_REPORT_INTERVAL =
-          "hdds.pipeline.report.interval";
-  public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT =
-          "60s";
-  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
-      "hdds.command.status.report.interval";
-  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
-      "60s";
-  public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT =
-      "hdds.container.action.max.limit";
-  public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT =
-      20;
-  public static final String HDDS_PIPELINE_ACTION_MAX_LIMIT =
-      "hdds.pipeline.action.max.limit";
-  public static final int HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT =
-      20;
-  // Configuration to allow volume choosing policy.
-  public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
-      "hdds.datanode.volume.choosing.policy";
-  // DB PKIProfile used by ROCKDB instances.
-  public static final String HDDS_DB_PROFILE = "hdds.db.profile";
-  public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK;
-  // Once a container usage crosses this threshold, it is eligible for
-  // closing.
-  public static final String HDDS_CONTAINER_CLOSE_THRESHOLD =
-      "hdds.container.close.threshold";
-  public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
-  public static final String HDDS_SCM_SAFEMODE_ENABLED =
-      "hdds.scm.safemode.enabled";
-
-  public static final boolean HDDS_SCM_SAFEMODE_ENABLED_DEFAULT = true;
-  public static final String HDDS_SCM_SAFEMODE_MIN_DATANODE =
-      "hdds.scm.safemode.min.datanode";
-  public static final int HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT = 1;
-
-  public static final String
-      HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT =
-      "hdds.scm.wait.time.after.safemode.exit";
-
-  public static final String
-      HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT = "5m";
-
-  public static final String HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK =
-      "hdds.scm.safemode.pipeline-availability.check";
-  public static final boolean
-      HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false;
-
-  // % of containers which should have at least one reported replica
-  // before SCM comes out of safe mode.
-  public static final String HDDS_SCM_SAFEMODE_THRESHOLD_PCT =
-      "hdds.scm.safemode.threshold.pct";
-  public static final double HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.99;
-
-
-  // percentage of healthy pipelines, where all 3 datanodes are reported in the
-  // pipeline.
-  public static final String HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT =
-      "hdds.scm.safemode.healthy.pipelie.pct";
-  public static final double
-      HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT = 0.10;
-
-  public static final String HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT =
-      "hdds.scm.safemode.atleast.one.node.reported.pipeline.pct";
-  public static final double
-      HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT = 0.90;
-
-  public static final String HDDS_LOCK_MAX_CONCURRENCY =
-      "hdds.lock.max.concurrency";
-  public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
-  // This configuration setting is used as a fallback location by all
-  // Ozone/HDDS services for their metadata. It is useful as a single
-  // config point for test/PoC clusters.
-  //
-  // In any real cluster where performance matters, the SCM, OM and DN
-  // metadata locations must be configured explicitly.
-  public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
-
-  public static final String HDDS_PROMETHEUS_ENABLED =
-      "hdds.prometheus.endpoint.enabled";
-
-  public static final String HDDS_PROFILER_ENABLED =
-      "hdds.profiler.endpoint.enabled";
-
-  public static final String HDDS_KEY_LEN = "hdds.key.len";
-  public static final int HDDS_DEFAULT_KEY_LEN = 2048;
-  public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo";
-  public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA";
-  public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider";
-  public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC";
-  public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name";
-  public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys";
-  // TODO : Talk to StorageIO classes and see if they can return a secure
-  // storage location for each node.
-  public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir";
-  public static final String HDDS_PRIVATE_KEY_FILE_NAME =
-      "hdds.priv.key.file.name";
-  public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem";
-  public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
-      + ".name";
-  public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
-
-  public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME =
-      "hdds.block.token.expiry.time";
-  public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT = "1d";
-  /**
-   * Maximum duration of certificates issued by SCM including Self-Signed Roots.
-   * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS
-   * Default value is 5 years and written as P1865D.
-   */
-  public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration";
-  // Limit Certificate duration to a max value of 5 years.
-  public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D";
-  public static final String HDDS_X509_SIGNATURE_ALGO =
-      "hdds.x509.signature.algorithm";
-  public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA";
-  public static final String HDDS_BLOCK_TOKEN_ENABLED =
-      "hdds.block.token.enabled";
-  public static final boolean HDDS_BLOCK_TOKEN_ENABLED_DEFAULT = false;
-
-  public static final String HDDS_X509_DIR_NAME = "hdds.x509.dir.name";
-  public static final String HDDS_X509_DIR_NAME_DEFAULT = "certs";
-  public static final String HDDS_X509_FILE_NAME = "hdds.x509.file.name";
-  public static final String HDDS_X509_FILE_NAME_DEFAULT = "certificate.crt";
-
-  /**
-   * Default duration of certificates issued by SCM CA.
-   * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS
-   * Default value is 5 years and written as P1865D.
-   */
-  public static final String HDDS_X509_DEFAULT_DURATION = "hdds.x509.default" +
-      ".duration";
-  // Default Certificate duration to one year.
-  public static final String HDDS_X509_DEFAULT_DURATION_DEFAULT = "P365D";
-
-  /**
-   * Do not instantiate.
-   */
-  private HddsConfigKeys() {
-  }
-
-  // Enable TLS for GRPC clients/server in ozone.
-  public static final String HDDS_GRPC_TLS_ENABLED = "hdds.grpc.tls.enabled";
-  public static final boolean HDDS_GRPC_TLS_ENABLED_DEFAULT = false;
-
-  // Choose TLS provider the default is set to OPENSSL for better performance.
-  public static final String HDDS_GRPC_TLS_PROVIDER = "hdds.grpc.tls.provider";
-  public static final String HDDS_GRPC_TLS_PROVIDER_DEFAULT = "OPENSSL";
-
-  // Test only settings for using test signed certificate, authority assume to
-  // be localhost.
-  public static final String HDDS_GRPC_TLS_TEST_CERT = "hdds.grpc.tls" +
-      ".test.cert";
-  public static final boolean HDDS_GRPC_TLS_TEST_CERT_DEFAULT = false;
-
-  // Comma separated acls (users, groups) allowing clients accessing
-  // datanode container protocol
-  // when hadoop.security.authorization is true, this needs to be set in
-  // hadoop-policy.xml, "*" allows all users/groups to access.
-  public static final String
-      HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL =
-      "hdds.security.client.datanode.container.protocol.acl";
-
-  // Comma separated acls (users, groups) allowing clients accessing
-  // scm container protocol
-  // when hadoop.security.authorization is true, this needs to be set in
-  // hadoop-policy.xml, "*" allows all users/groups to access.
-  public static final String HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL =
-      "hdds.security.client.scm.container.protocol.acl";
-
-  // Comma separated acls (users, groups) allowing clients accessing
-  // scm block protocol
-  // when hadoop.security.authorization is true, this needs to be set in
-  // hadoop-policy.xml, "*" allows all users/groups to access.
-  public static final String HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL =
-      "hdds.security.client.scm.block.protocol.acl";
-
-  // Comma separated acls (users, groups) allowing clients accessing
-  // scm certificate protocol
-  // when hadoop.security.authorization is true, this needs to be set in
-  // hadoop-policy.xml, "*" allows all users/groups to access.
-  public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL =
-      "hdds.security.client.scm.certificate.protocol.acl";
-
-  // Determines if the Container Chunk Manager will write user data to disk
-  // Set to false only for specific performance tests
-  public static final String HDDS_CONTAINER_PERSISTDATA =
-      "hdds.container.chunk.persistdata";
-  public static final boolean HDDS_CONTAINER_PERSISTDATA_DEFAULT = true;
-
-  public static final String HDDS_CONTAINER_SCRUB_ENABLED =
-      "hdds.container.scrub.enabled";
-  public static final boolean HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT = false;
-
-  public static final String HDDS_DATANODE_HTTP_ENABLED_KEY =
-      "hdds.datanode.http.enabled";
-  public static final String HDDS_DATANODE_HTTP_BIND_HOST_KEY =
-      "hdds.datanode.http-bind-host";
-  public static final String HDDS_DATANODE_HTTPS_BIND_HOST_KEY =
-      "hdds.datanode.https-bind-host";
-  public static final String HDDS_DATANODE_HTTP_ADDRESS_KEY =
-      "hdds.datanode.http-address";
-  public static final String HDDS_DATANODE_HTTPS_ADDRESS_KEY =
-      "hdds.datanode.https-address";
-
-  public static final String HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT = 9882;
-  public static final int HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT = 9883;
-  public static final String
-      HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY =
-      "hdds.datanode.http.kerberos.principal";
-  public static final String
-      HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY =
-      "hdds.datanode.http.kerberos.keytab";
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
deleted file mode 100644
index b244b8c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds;
-
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * HDDS Id generator.
- */
-public final class HddsIdFactory {
-  private HddsIdFactory() {
-  }
-
-  private static final AtomicLong LONG_COUNTER = new AtomicLong(
-      System.currentTimeMillis());
-
-  /**
-   * Returns an incrementing long. This class doesn't
-   * persist initial value for long Id's, so incremental id's after restart
-   * may collide with previously generated Id's.
-   *
-   * @return long
-   */
-  public static long getLongId() {
-    return LONG_COUNTER.incrementAndGet();
-  }
-
-  /**
-   * Returns a uuid.
-   *
-   * @return UUID.
-   */
-  public static UUID getUUId() {
-    return UUID.randomUUID();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
deleted file mode 100644
index d7b20fd..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ /dev/null
@@ -1,505 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.util.Calendar;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Optional;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.source.JvmMetrics;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-
-import com.google.common.net.HostAndPort;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * HDDS specific stateless utility functions.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public final class HddsUtils {
-
-
-  private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
-
-  /**
-   * The service ID of the solitary Ozone SCM service.
-   */
-  public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
-  public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
-      "OzoneScmServiceInstance";
-  private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC");
-
-
-  private static final int NO_PORT = -1;
-
-  private HddsUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      // Fallback to Ozone SCM names.
-      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
-      if (scmAddresses.size() > 1) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_NAMES +
-                " must contain a single hostname. Multiple SCM hosts are " +
-                "currently unsupported");
-      }
-      host = Optional.of(scmAddresses.iterator().next().getHostName());
-    }
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
-              + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
-              + "details"
-              + " on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM for block service. If
-   * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
-   * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   * @throws IllegalArgumentException if configuration is not defined.
-   */
-  public static InetSocketAddress getScmAddressForBlockClients(
-      Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      host = getHostNameFromConfigKeys(conf,
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-    }
-
-    if (!host.isPresent()) {
-      // Fallback to Ozone SCM names.
-      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
-      if (scmAddresses.size() > 1) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_NAMES +
-                " must contain a single hostname. Multiple SCM hosts are " +
-                "currently unsupported");
-      }
-      host = Optional.of(scmAddresses.iterator().next().getHostName());
-    }
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
-              + " must be defined. See"
-              + " https://wiki.apache.org/hadoop/Ozone#Configuration"
-              + " for details on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Create a scm security client.
-   * @param conf    - Ozone configuration.
-   *
-   * @return {@link SCMSecurityProtocol}
-   * @throws IOException
-   */
-  public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress address =
-        getScmAddressForSecurityProtocol(conf);
-    RetryPolicy retryPolicy =
-        RetryPolicies.retryForeverWithFixedSleep(
-            1000, TimeUnit.MILLISECONDS);
-    SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
-        new SCMSecurityProtocolClientSideTranslatorPB(
-            RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion,
-                address, UserGroupInformation.getCurrentUser(),
-                conf, NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf), retryPolicy).getProxy());
-    return scmSecurityClient;
-  }
-
-  /**
-   * Retrieve the hostname, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf  - Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first hostname component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
-      String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<String> hostName = getHostName(value);
-      if (hostName.isPresent()) {
-        return hostName;
-      }
-    }
-    return Optional.empty();
-  }
-
-  /**
-   * Gets the hostname or Indicates that it is absent.
-   * @param value host or host:port
-   * @return hostname
-   */
-  public static Optional<String> getHostName(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.empty();
-    }
-    String hostname = value.replaceAll("\\:[0-9]+$", "");
-    if (hostname.length() == 0) {
-      return Optional.empty();
-    } else {
-      return Optional.of(hostname);
-    }
-  }
-
-  /**
-   * Gets the port if there is one, throws otherwise.
-   * @param value  String in host:port format.
-   * @return Port
-   */
-  public static Optional<Integer> getHostPort(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.empty();
-    }
-    int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
-    if (port == NO_PORT) {
-      return Optional.empty();
-    } else {
-      return Optional.of(port);
-    }
-  }
-
-  /**
-   * Retrieve the port number, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first port number component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<Integer> getPortNumberFromConfigKeys(
-      Configuration conf, String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<Integer> hostPort = getHostPort(value);
-      if (hostPort.isPresent()) {
-        return hostPort;
-      }
-    }
-    return Optional.empty();
-  }
-
-  /**
-   * Retrieve the socket addresses of all storage container managers.
-   *
-   * @param conf
-   * @return A collection of SCM addresses
-   * @throws IllegalArgumentException If the configuration is invalid
-   */
-  public static Collection<InetSocketAddress> getSCMAddresses(
-      Configuration conf) throws IllegalArgumentException {
-    Collection<InetSocketAddress> addresses =
-        new HashSet<InetSocketAddress>();
-    Collection<String> names =
-        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
-    if (names == null || names.isEmpty()) {
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
-          + " need to be a set of valid DNS names or IP addresses."
-          + " Null or empty address list found.");
-    }
-
-    final Optional<Integer> defaultPort = Optional
-        .of(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT);
-    for (String address : names) {
-      Optional<String> hostname = getHostName(address);
-      if (!hostname.isPresent()) {
-        throw new IllegalArgumentException("Invalid hostname for SCM: "
-            + hostname);
-      }
-      Optional<Integer> port = getHostPort(address);
-      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
-          port.orElse(defaultPort.get()));
-      addresses.add(addr);
-    }
-    return addresses;
-  }
-
-  public static boolean isHddsEnabled(Configuration conf) {
-    return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
-  }
-
-
-  /**
-   * Returns the hostname for this datanode. If the hostname is not
-   * explicitly configured in the given config, then it is determined
-   * via the DNS class.
-   *
-   * @param conf Configuration
-   *
-   * @return the hostname (NB: may not be a FQDN)
-   * @throws UnknownHostException if the dfs.datanode.dns.interface
-   *    option is used and the hostname can not be determined
-   */
-  public static String getHostName(Configuration conf)
-      throws UnknownHostException {
-    String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
-    if (name == null) {
-      String dnsInterface = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
-      String nameServer = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
-      boolean fallbackToHosts = false;
-
-      if (dnsInterface == null) {
-        // Try the legacy configuration keys.
-        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
-        nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
-      } else {
-        // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
-        // resolution if DNS fails. We will not use hosts file resolution
-        // by default to avoid breaking existing clusters.
-        fallbackToHosts = true;
-      }
-
-      name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
-    }
-    return name;
-  }
-
-  /**
-   * Checks if the container command is read only or not.
-   * @param proto ContainerCommand Request proto
-   * @return True if its readOnly , false otherwise.
-   */
-  public static boolean isReadOnly(
-      ContainerProtos.ContainerCommandRequestProto proto) {
-    switch (proto.getCmdType()) {
-    case ReadContainer:
-    case ReadChunk:
-    case ListBlock:
-    case GetBlock:
-    case GetSmallFile:
-    case ListContainer:
-    case ListChunk:
-    case GetCommittedBlockLength:
-      return true;
-    case CloseContainer:
-    case WriteChunk:
-    case UpdateContainer:
-    case CompactChunk:
-    case CreateContainer:
-    case DeleteChunk:
-    case DeleteContainer:
-    case DeleteBlock:
-    case PutBlock:
-    case PutSmallFile:
-    default:
-      return false;
-    }
-  }
-
-  /**
-   * Register the provided MBean with additional JMX ObjectName properties.
-   * If additional properties are not supported then fallback to registering
-   * without properties.
-   *
-   * @param serviceName - see {@link MBeans#register}
-   * @param mBeanName - see {@link MBeans#register}
-   * @param jmxProperties - additional JMX ObjectName properties.
-   * @param mBean - the MBean to register.
-   * @return the named used to register the MBean.
-   */
-  public static ObjectName registerWithJmxProperties(
-      String serviceName, String mBeanName, Map<String, String> jmxProperties,
-      Object mBean) {
-    try {
-
-      // Check support for registering with additional properties.
-      final Method registerMethod = MBeans.class.getMethod(
-          "register", String.class, String.class,
-          Map.class, Object.class);
-
-      return (ObjectName) registerMethod.invoke(
-          null, serviceName, mBeanName, jmxProperties, mBean);
-
-    } catch (NoSuchMethodException | IllegalAccessException |
-        InvocationTargetException e) {
-
-      // Fallback
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Registering MBean {} without additional properties {}",
-            mBeanName, jmxProperties);
-      }
-      return MBeans.register(serviceName, mBeanName, mBean);
-    }
-  }
-
-  /**
-   * Get the current UTC time in milliseconds.
-   * @return the current UTC time in milliseconds.
-   */
-  public static long getUtcTime() {
-    return Calendar.getInstance(UTC_ZONE).getTimeInMillis();
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM for
-   * {@link org.apache.hadoop.hdds.protocol.SCMSecurityProtocol}. If
-   * {@link ScmConfigKeys#OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
-   * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   * @throws IllegalArgumentException if configuration is not defined.
-   */
-  public static InetSocketAddress getScmAddressForSecurityProtocol(
-      Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      host = getHostNameFromConfigKeys(conf,
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-    }
-
-    if (!host.isPresent()) {
-      // Fallback to Ozone SCM names.
-      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
-      if (scmAddresses.size() > 1) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_NAMES +
-                " must contain a single hostname. Multiple SCM hosts are " +
-                "currently unsupported");
-      }
-      host = Optional.of(scmAddresses.iterator().next().getHostName());
-    }
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY
-              + " must be defined. See"
-              + " https://wiki.apache.org/hadoop/Ozone#Configuration"
-              + " for details on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT));
-  }
-
-  /**
-   * Initialize hadoop metrics system for Ozone servers.
-   * @param configuration OzoneConfiguration to use.
-   * @param serverName    The logical name of the server components.
-   * @return
-   */
-  public static MetricsSystem initializeMetrics(
-      OzoneConfiguration configuration, String serverName) {
-    MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName);
-    JvmMetrics.create(serverName,
-        configuration.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
-        DefaultMetricsSystem.instance());
-    return metricsSystem;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
deleted file mode 100644
index 372828b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.fs.Path;
-
-import com.google.common.annotations.VisibleForTesting;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.RunLast;
-
-/**
- * This is a generic parent class for all the ozone related cli tools.
- */
-public class GenericCli implements Callable<Void>, GenericParentCommand {
-
-  @Option(names = {"--verbose"},
-      description = "More verbose output. Show the stack trace of the errors.")
-  private boolean verbose;
-
-  @Option(names = {"-D", "--set"})
-  private Map<String, String> configurationOverrides = new HashMap<>();
-
-  @Option(names = {"-conf"})
-  private String configurationPath;
-
-  private final CommandLine cmd;
-
-  public GenericCli() {
-    cmd = new CommandLine(this);
-  }
-
-  public void run(String[] argv) {
-    try {
-      execute(argv);
-    } catch (ExecutionException ex) {
-      printError(ex.getCause() == null ? ex : ex.getCause());
-      System.exit(-1);
-    }
-  }
-
-  @VisibleForTesting
-  public void execute(String[] argv) {
-    cmd.parseWithHandler(new RunLast(), argv);
-  }
-
-  protected void printError(Throwable error) {
-    //message could be null in case of NPE. This is unexpected so we can
-    //print out the stack trace.
-    if (verbose || error.getMessage() == null
-        || error.getMessage().length() == 0) {
-      error.printStackTrace(System.err);
-    } else {
-      System.err.println(error.getMessage().split("\n")[0]);
-    }
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(cmd);
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    OzoneConfiguration ozoneConf = new OzoneConfiguration();
-    if (configurationPath != null) {
-      ozoneConf.addResource(new Path(configurationPath));
-    }
-    if (configurationOverrides != null) {
-      for (Entry<String, String> entry : configurationOverrides.entrySet()) {
-        ozoneConf.set(entry.getKey(), entry.getValue());
-      }
-    }
-    return ozoneConf;
-  }
-
-  @VisibleForTesting
-  public picocli.CommandLine getCmd() {
-    return cmd;
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return verbose;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
deleted file mode 100644
index 6abad3e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-/**
- * Interface to access the higher level parameters.
- */
-public interface GenericParentCommand {
-
-  boolean isVerbose();
-
-  OzoneConfiguration createOzoneConfiguration();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
deleted file mode 100644
index 2f4ac4f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
-
-import picocli.CommandLine.IVersionProvider;
-
-/**
- * Version provider for the CLI interface.
- */
-public class HddsVersionProvider implements IVersionProvider {
-  @Override
-  public String[] getVersion() throws Exception {
-    String[] result = new String[] {
-        HddsVersionInfo.HDDS_VERSION_INFO.getBuildVersion()
-    };
-    return result;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
deleted file mode 100644
index 7594765..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import picocli.CommandLine;
-
-/**
- * Exception to throw if subcommand is not selected but required.
- */
-public class MissingSubcommandException extends CommandLine.ParameterException {
-
-  public MissingSubcommandException(CommandLine cmd) {
-    super(cmd, "Incomplete command");
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
deleted file mode 100644
index 8dcc1d1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Generic helper class to make instantiate picocli based cli tools.
- */
-package org.apache.hadoop.hdds.cli;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
deleted file mode 100644
index 07aa536..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.Objects;
-
-/**
- * BlockID of Ozone (containerID + localID + blockCommitSequenceId).
- */
-
-public class BlockID {
-
-  private ContainerBlockID containerBlockID;
-  private long blockCommitSequenceId;
-
-  public BlockID(long containerID, long localID) {
-    this(containerID, localID, 0);
-  }
-
-  private BlockID(long containerID, long localID, long bcsID) {
-    containerBlockID = new ContainerBlockID(containerID, localID);
-    blockCommitSequenceId = bcsID;
-  }
-
-  public BlockID(ContainerBlockID containerBlockID) {
-    this(containerBlockID, 0);
-  }
-
-  private BlockID(ContainerBlockID containerBlockID, long bcsId) {
-    this.containerBlockID = containerBlockID;
-    blockCommitSequenceId = bcsId;
-  }
-
-  public long getContainerID() {
-    return containerBlockID.getContainerID();
-  }
-
-  public long getLocalID() {
-    return containerBlockID.getLocalID();
-  }
-
-  public long getBlockCommitSequenceId() {
-    return blockCommitSequenceId;
-  }
-
-  public void setBlockCommitSequenceId(long blockCommitSequenceId) {
-    this.blockCommitSequenceId = blockCommitSequenceId;
-  }
-
-  public ContainerBlockID getContainerBlockID() {
-    return containerBlockID;
-  }
-
-  public void setContainerBlockID(ContainerBlockID containerBlockID) {
-    this.containerBlockID = containerBlockID;
-  }
-
-  @Override
-  public String toString() {
-    return new StringBuilder().append(getContainerBlockID().toString())
-        .append(" bcsId: ")
-        .append(blockCommitSequenceId)
-        .toString();
-  }
-
-  public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
-    return ContainerProtos.DatanodeBlockID.newBuilder().
-        setContainerID(containerBlockID.getContainerID())
-        .setLocalID(containerBlockID.getLocalID())
-        .setBlockCommitSequenceId(blockCommitSequenceId).build();
-  }
-
-  public static BlockID getFromProtobuf(
-      ContainerProtos.DatanodeBlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID(), blockID.getBlockCommitSequenceId());
-  }
-
-  public HddsProtos.BlockID getProtobuf() {
-    return HddsProtos.BlockID.newBuilder()
-        .setContainerBlockID(containerBlockID.getProtobuf())
-        .setBlockCommitSequenceId(blockCommitSequenceId).build();
-  }
-
-  public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
-    return new BlockID(
-        ContainerBlockID.getFromProtobuf(blockID.getContainerBlockID()),
-        blockID.getBlockCommitSequenceId());
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    BlockID blockID = (BlockID) o;
-    return containerBlockID.equals(blockID.getContainerBlockID())
-        && blockCommitSequenceId == blockID.getBlockCommitSequenceId();
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects
-        .hash(containerBlockID.getContainerID(), containerBlockID.getLocalID(),
-            blockCommitSequenceId);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java
deleted file mode 100644
index 1e30cc3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.Objects;
-
-/**
- * BlockID returned by SCM during allocation of block (containerID + localID).
- */
-public class ContainerBlockID {
-  private long containerID;
-  private long localID;
-
-  public ContainerBlockID(long containerID, long localID) {
-    this.containerID = containerID;
-    this.localID = localID;
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public long getLocalID() {
-    return localID;
-  }
-
-  @Override
-  public String toString() {
-    return new StringBuffer()
-        .append("conID: ")
-        .append(containerID)
-        .append(" locID: ")
-        .append(localID).toString();
-  }
-
-  public HddsProtos.ContainerBlockID getProtobuf() {
-    return HddsProtos.ContainerBlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
-  }
-
-  public static ContainerBlockID getFromProtobuf(
-      HddsProtos.ContainerBlockID containerBlockID) {
-    return new ContainerBlockID(containerBlockID.getContainerID(),
-        containerBlockID.getLocalID());
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ContainerBlockID blockID = (ContainerBlockID) o;
-    return containerID == blockID.containerID && localID == blockID.localID;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(containerID, localID);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
deleted file mode 100644
index 59708a9..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-
-/**
- * represents an OzoneQuota Object that can be applied to
- * a storage volume.
- */
-public class OzoneQuota {
-
-  public static final String OZONE_QUOTA_BYTES = "BYTES";
-  public static final String OZONE_QUOTA_MB = "MB";
-  public static final String OZONE_QUOTA_GB = "GB";
-  public static final String OZONE_QUOTA_TB = "TB";
-
-  private Units unit;
-  private long size;
-
-  /** Quota Units.*/
-  public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
-
-  /**
-   * Returns size.
-   *
-   * @return long
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Returns Units.
-   *
-   * @return Unit in MB, GB or TB
-   */
-  public Units getUnit() {
-    return unit;
-  }
-
-  /**
-   * Constructs a default Quota object.
-   */
-  public OzoneQuota() {
-    this.size = 0;
-    this.unit = Units.UNDEFINED;
-  }
-
-  /**
-   * Constructor for Ozone Quota.
-   *
-   * @param size Long Size
-   * @param unit MB, GB  or TB
-   */
-  public OzoneQuota(long size, Units unit) {
-    this.size = size;
-    this.unit = unit;
-  }
-
-  /**
-   * Formats a quota as a string.
-   *
-   * @param quota the quota to format
-   * @return string representation of quota
-   */
-  public static String formatQuota(OzoneQuota quota) {
-    return String.valueOf(quota.size) + quota.unit;
-  }
-
-  /**
-   * Parses a user provided string and returns the
-   * Quota Object.
-   *
-   * @param quotaString Quota String
-   *
-   * @return OzoneQuota object
-   *
-   * @throws IllegalArgumentException
-   */
-  public static OzoneQuota parseQuota(String quotaString)
-      throws IllegalArgumentException {
-
-    if ((quotaString == null) || (quotaString.isEmpty())) {
-      throw new IllegalArgumentException(
-          "Quota string cannot be null or empty.");
-    }
-
-    String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
-    String size = "";
-    int nSize;
-    Units currUnit = Units.MB;
-    Boolean found = false;
-    if (uppercase.endsWith(OZONE_QUOTA_MB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_MB.length());
-      currUnit = Units.MB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_GB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_GB.length());
-      currUnit = Units.GB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_TB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_TB.length());
-      currUnit = Units.TB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_BYTES)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length());
-      currUnit = Units.BYTES;
-      found = true;
-    }
-
-    if (!found) {
-      throw new IllegalArgumentException(
-          "Quota unit not recognized. Supported values are BYTES, MB, GB and " +
-              "TB.");
-    }
-
-    nSize = Integer.parseInt(size);
-    if (nSize < 0) {
-      throw new IllegalArgumentException("Quota cannot be negative.");
-    }
-
-    return new OzoneQuota(nSize, currUnit);
-  }
-
-
-  /**
-   * Returns size in Bytes or -1 if there is no Quota.
-   */
-  public long sizeInBytes() {
-    switch (this.unit) {
-    case BYTES:
-      return this.getSize();
-    case MB:
-      return this.getSize() * OzoneConsts.MB;
-    case GB:
-      return this.getSize() * OzoneConsts.GB;
-    case TB:
-      return this.getSize() * OzoneConsts.TB;
-    case UNDEFINED:
-    default:
-      return -1;
-    }
-  }
-
-  /**
-   * Returns OzoneQuota corresponding to size in bytes.
-   *
-   * @param sizeInBytes size in bytes to be converted
-   *
-   * @return OzoneQuota object
-   */
-  public static OzoneQuota getOzoneQuota(long sizeInBytes) {
-    long size;
-    Units unit;
-    if (sizeInBytes % OzoneConsts.TB == 0) {
-      size = sizeInBytes / OzoneConsts.TB;
-      unit = Units.TB;
-    } else if (sizeInBytes % OzoneConsts.GB == 0) {
-      size = sizeInBytes / OzoneConsts.GB;
-      unit = Units.GB;
-    } else if (sizeInBytes % OzoneConsts.MB == 0) {
-      size = sizeInBytes / OzoneConsts.MB;
-      unit = Units.MB;
-    } else {
-      size = sizeInBytes;
-      unit = Units.BYTES;
-    }
-    return new OzoneQuota((int)size, unit);
-  }
-
-  @Override
-  public String toString() {
-    return size + " " + unit;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
deleted file mode 100644
index 044bd6f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * The replication factor to be used while writing key into ozone.
- */
-public enum ReplicationFactor {
-  ONE(1),
-  THREE(3);
-
-  /**
-   * Integer representation of replication.
-   */
-  private int value;
-
-  /**
-   * Initializes ReplicationFactor with value.
-   * @param value replication value
-   */
-  ReplicationFactor(int value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns enum value corresponding to the int value.
-   * @param value replication value
-   * @return ReplicationFactor
-   */
-  public static ReplicationFactor valueOf(int value) {
-    if(value == 1) {
-      return ONE;
-    }
-    if (value == 3) {
-      return THREE;
-    }
-    throw new IllegalArgumentException("Unsupported value: " + value);
-  }
-
-  public static ReplicationFactor fromProto(
-      HddsProtos.ReplicationFactor replicationFactor) {
-    if (replicationFactor == null) {
-      return null;
-    }
-    switch (replicationFactor) {
-    case ONE:
-      return ReplicationFactor.ONE;
-    case THREE:
-      return ReplicationFactor.THREE;
-    default:
-      throw new IllegalArgumentException(
-          "Unsupported ProtoBuf replication factor: " + replicationFactor);
-    }
-  }
-
-  /**
-   * Returns integer representation of ReplicationFactor.
-   * @return replication value
-   */
-  public int getValue() {
-    return value;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
deleted file mode 100644
index c63896e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * The replication type to be used while writing key into ozone.
- */
-public enum ReplicationType {
-    RATIS,
-    STAND_ALONE,
-  CHAINED;
-
-  public static ReplicationType fromProto(
-      HddsProtos.ReplicationType replicationType) {
-    if (replicationType == null) {
-      return null;
-    }
-    switch (replicationType) {
-    case RATIS:
-      return ReplicationType.RATIS;
-    case STAND_ALONE:
-      return ReplicationType.STAND_ALONE;
-    case CHAINED:
-      return ReplicationType.CHAINED;
-    default:
-      throw new IllegalArgumentException(
-          "Unsupported ProtoBuf replication type: " + replicationType);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
deleted file mode 100644
index e81f134..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * Base property types for HDDS containers and replications.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
deleted file mode 100644
index 8beac16..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import com.google.gson.Gson;
-import java.io.IOException;
-import java.io.Writer;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.HttpHeaders;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer2;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
-
-/**
- * A servlet to print out the running configuration data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class HddsConfServlet extends HttpServlet {
-
-  private static final long serialVersionUID = 1L;
-
-  protected static final String FORMAT_JSON = "json";
-  protected static final String FORMAT_XML = "xml";
-  private static final String COMMAND = "cmd";
-  private static final OzoneConfiguration OZONE_CONFIG =
-      new OzoneConfiguration();
-  private static final transient Logger LOG =
-      LoggerFactory.getLogger(HddsConfServlet.class);
-
-
-  /**
-   * Return the Configuration of the daemon hosting this servlet.
-   * This is populated when the HttpServer starts.
-   */
-  private Configuration getConfFromContext() {
-    Configuration conf = (Configuration) getServletContext().getAttribute(
-        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
-    assert conf != null;
-    return conf;
-  }
-
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws ServletException, IOException {
-
-    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
-        request, response)) {
-      return;
-    }
-
-    String format = parseAcceptHeader(request);
-    if (FORMAT_XML.equals(format)) {
-      response.setContentType("text/xml; charset=utf-8");
-    } else if (FORMAT_JSON.equals(format)) {
-      response.setContentType("application/json; charset=utf-8");
-    }
-
-    String name = request.getParameter("name");
-    Writer out = response.getWriter();
-    String cmd = request.getParameter(COMMAND);
-
-    processCommand(cmd, format, request, response, out, name);
-    out.close();
-  }
-
-  private void processCommand(String cmd, String format,
-      HttpServletRequest request, HttpServletResponse response, Writer out,
-      String name)
-      throws IOException {
-    try {
-      if (cmd == null) {
-        if (FORMAT_XML.equals(format)) {
-          response.setContentType("text/xml; charset=utf-8");
-        } else if (FORMAT_JSON.equals(format)) {
-          response.setContentType("application/json; charset=utf-8");
-        }
-
-        writeResponse(getConfFromContext(), out, format, name);
-      } else {
-        processConfigTagRequest(request, out);
-      }
-    } catch (BadFormatException bfe) {
-      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
-    } catch (IllegalArgumentException iae) {
-      response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  static String parseAcceptHeader(HttpServletRequest request) {
-    String format = request.getHeader(HttpHeaders.ACCEPT);
-    return format != null && format.contains(FORMAT_JSON) ?
-        FORMAT_JSON : FORMAT_XML;
-  }
-
-  /**
-   * Guts of the servlet - extracted for easy testing.
-   */
-  static void writeResponse(Configuration conf,
-      Writer out, String format, String propertyName)
-      throws IOException, IllegalArgumentException, BadFormatException {
-    if (FORMAT_JSON.equals(format)) {
-      Configuration.dumpConfiguration(conf, propertyName, out);
-    } else if (FORMAT_XML.equals(format)) {
-      conf.writeXml(propertyName, out);
-    } else {
-      throw new BadFormatException("Bad format: " + format);
-    }
-  }
-
-  /**
-   * Exception for signal bad content type.
-   */
-  public static class BadFormatException extends Exception {
-
-    private static final long serialVersionUID = 1L;
-
-    public BadFormatException(String msg) {
-      super(msg);
-    }
-  }
-
-  private void processConfigTagRequest(HttpServletRequest request,
-      Writer out) throws IOException {
-    String cmd = request.getParameter(COMMAND);
-    Gson gson = new Gson();
-    Configuration config = getOzoneConfig();
-
-    switch (cmd) {
-    case "getOzoneTags":
-      out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY)
-          .split(",")));
-      break;
-    case "getPropertyByTag":
-      String tags = request.getParameter("tags");
-      Map<String, Properties> propMap = new HashMap<>();
-
-      for (String tag : tags.split(",")) {
-        if (config.isPropertyTag(tag)) {
-          Properties properties = config.getAllPropertiesByTag(tag);
-          propMap.put(tag, properties);
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Not a valid tag" + tag);
-          }
-        }
-      }
-      out.write(gson.toJsonTree(propMap).toString());
-      break;
-    default:
-      throw new IllegalArgumentException(cmd + " is not a valid command.");
-    }
-
-  }
-
-  private static Configuration getOzoneConfig() {
-    return OZONE_CONFIG;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
deleted file mode 100644
index c0486335..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.conf;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Properties;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Configuration for ozone.
- */
-@InterfaceAudience.Private
-public class OzoneConfiguration extends Configuration {
-  static {
-    activate();
-  }
-
-  public static OzoneConfiguration of(Configuration conf) {
-    Preconditions.checkNotNull(conf);
-
-    return conf instanceof OzoneConfiguration
-        ? (OzoneConfiguration) conf
-        : new OzoneConfiguration(conf);
-  }
-
-  public OzoneConfiguration() {
-    OzoneConfiguration.activate();
-    loadDefaults();
-  }
-
-  public OzoneConfiguration(Configuration conf) {
-    super(conf);
-    //load the configuration from the classloader of the original conf.
-    setClassLoader(conf.getClassLoader());
-    if (!(conf instanceof OzoneConfiguration)) {
-      loadDefaults();
-    }
-  }
-
-  private void loadDefaults() {
-    try {
-      //there could be multiple ozone-default-generated.xml files on the
-      // classpath, which are generated by the annotation processor.
-      // Here we add all of them to the list of the available configuration.
-      Enumeration<URL> generatedDefaults =
-          OzoneConfiguration.class.getClassLoader().getResources(
-              "ozone-default-generated.xml");
-      while (generatedDefaults.hasMoreElements()) {
-        addResource(generatedDefaults.nextElement());
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    addResource("ozone-site.xml");
-  }
-
-  public List<Property> readPropertyFromXml(URL url) throws JAXBException {
-    JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class);
-    Unmarshaller um = context.createUnmarshaller();
-
-    XMLConfiguration config = (XMLConfiguration) um.unmarshal(url);
-    return config.getProperties();
-  }
-
-  /**
-   * Create a Configuration object and inject the required configuration values.
-   *
-   * @param configurationClass The class where the fields are annotated with
-   *                           the configuration.
-   * @return Initiated java object where the config fields are injected.
-   */
-  public <T> T getObject(Class<T> configurationClass) {
-
-    T configuration;
-
-    try {
-      configuration = configurationClass.newInstance();
-    } catch (InstantiationException | IllegalAccessException e) {
-      throw new ConfigurationException(
-          "Configuration class can't be created: " + configurationClass, e);
-    }
-    ConfigGroup configGroup =
-        configurationClass.getAnnotation(ConfigGroup.class);
-    String prefix = configGroup.prefix();
-
-    for (Method setterMethod : configurationClass.getMethods()) {
-      if (setterMethod.isAnnotationPresent(Config.class)) {
-
-        String methodLocation =
-            configurationClass + "." + setterMethod.getName();
-
-        Config configAnnotation = setterMethod.getAnnotation(Config.class);
-
-        String key = prefix + "." + configAnnotation.key();
-
-        Class<?>[] parameterTypes = setterMethod.getParameterTypes();
-        if (parameterTypes.length != 1) {
-          throw new ConfigurationException(
-              "@Config annotation should be used on simple setter: "
-                  + methodLocation);
-        }
-
-        ConfigType type = configAnnotation.type();
-
-        if (type == ConfigType.AUTO) {
-          type = detectConfigType(parameterTypes[0], methodLocation);
-        }
-
-        //Note: default value is handled by ozone-default.xml. Here we can
-        //use any default.
-        try {
-          switch (type) {
-          case STRING:
-            setterMethod.invoke(configuration, get(key));
-            break;
-          case INT:
-            setterMethod.invoke(configuration,
-                getInt(key, 0));
-            break;
-          case BOOLEAN:
-            setterMethod.invoke(configuration,
-                getBoolean(key, false));
-            break;
-          case LONG:
-            setterMethod.invoke(configuration,
-                getLong(key, 0));
-            break;
-          case TIME:
-            setterMethod.invoke(configuration,
-                getTimeDuration(key, 0, configAnnotation.timeUnit()));
-            break;
-          default:
-            throw new ConfigurationException(
-                "Unsupported ConfigType " + type + " on " + methodLocation);
-          }
-        } catch (InvocationTargetException | IllegalAccessException e) {
-          throw new ConfigurationException(
-              "Can't inject configuration to " + methodLocation, e);
-        }
-
-      }
-    }
-    return configuration;
-
-  }
-
-  private ConfigType detectConfigType(Class<?> parameterType,
-      String methodLocation) {
-    ConfigType type;
-    if (parameterType == String.class) {
-      type = ConfigType.STRING;
-    } else if (parameterType == Integer.class || parameterType == int.class) {
-      type = ConfigType.INT;
-    } else if (parameterType == Long.class || parameterType == long.class) {
-      type = ConfigType.LONG;
-    } else if (parameterType == Boolean.class
-        || parameterType == boolean.class) {
-      type = ConfigType.BOOLEAN;
-    } else {
-      throw new ConfigurationException(
-          "Unsupported configuration type " + parameterType + " in "
-              + methodLocation);
-    }
-    return type;
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "configuration")
-  public static class XMLConfiguration {
-
-    @XmlElement(name = "property", type = Property.class)
-    private List<Property> properties = new ArrayList<>();
-
-    public XMLConfiguration() {
-    }
-
-    public XMLConfiguration(List<Property> properties) {
-      this.properties = properties;
-    }
-
-    public List<Property> getProperties() {
-      return properties;
-    }
-
-    public void setProperties(List<Property> properties) {
-      this.properties = properties;
-    }
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration properties from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "property")
-  public static class Property implements Comparable<Property> {
-
-    private String name;
-    private String value;
-    private String tag;
-    private String description;
-
-    public String getName() {
-      return name;
-    }
-
-    public void setName(String name) {
-      this.name = name;
-    }
-
-    public String getValue() {
-      return value;
-    }
-
-    public void setValue(String value) {
-      this.value = value;
-    }
-
-    public String getTag() {
-      return tag;
-    }
-
-    public void setTag(String tag) {
-      this.tag = tag;
-    }
-
-    public String getDescription() {
-      return description;
-    }
-
-    public void setDescription(String description) {
-      this.description = description;
-    }
-
-    @Override
-    public int compareTo(Property o) {
-      if (this == o) {
-        return 0;
-      }
-      return this.getName().compareTo(o.getName());
-    }
-
-    @Override
-    public String toString() {
-      return this.getName() + " " + this.getValue() + " " + this.getTag();
-    }
-
-    @Override
-    public int hashCode() {
-      return this.getName().hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      return (obj instanceof Property) && (((Property) obj).getName())
-          .equals(this.getName());
-    }
-  }
-
-  public static void activate() {
-    // adds the default resources
-    Configuration.addDefaultResource("hdfs-default.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
-    Configuration.addDefaultResource("ozone-default.xml");
-  }
-
-  /**
-   * The super class method getAllPropertiesByTag
-   * does not override values of properties
-   * if there is no tag present in the configs of
-   * newly added resources.
-   *
-   * @param tag
-   * @return Properties that belong to the tag
-   */
-  @Override
-  public Properties getAllPropertiesByTag(String tag) {
-    // Call getProps first to load the newly added resources
-    // before calling super.getAllPropertiesByTag
-    Properties updatedProps = getProps();
-    Properties propertiesByTag = super.getAllPropertiesByTag(tag);
-    Properties props = new Properties();
-    Enumeration properties = propertiesByTag.propertyNames();
-    while (properties.hasMoreElements()) {
-      Object propertyName = properties.nextElement();
-      // get the current value of the property
-      Object value = updatedProps.getProperty(propertyName.toString());
-      if (value != null) {
-        props.put(propertyName, value);
-      }
-    }
-    return props;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index 948057e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
deleted file mode 100644
index b9d7bce..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.function;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Functional interface like java.util.function.Function but with
- * checked exception.
- */
-@FunctionalInterface
-public interface FunctionWithServiceException<T, R> {
-
-  /**
-   * Applies this function to the given argument.
-   *
-   * @param t the function argument
-   * @return the function result
-   */
-  R apply(T t) throws ServiceException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
deleted file mode 100644
index 915fe35..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional interfaces for ozone, similar to java.util.function.
- */
-package org.apache.hadoop.hdds.function;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
deleted file mode 100644
index f8894e6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-/**
- * Generic HDDS specific configurator and helper classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
deleted file mode 100644
index 698a443..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ /dev/null
@@ -1,493 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocol;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.net.NetConstants;
-import org.apache.hadoop.hdds.scm.net.NodeImpl;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * DatanodeDetails class contains details about DataNode like:
- * - UUID of the DataNode.
- * - IP and Hostname details.
- * - Port details to which the DataNode will be listening.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeDetails extends NodeImpl implements
-    Comparable<DatanodeDetails> {
-/**
-   * DataNode's unique identifier in the cluster.
-   */
-  private final UUID uuid;
-
-  private String ipAddress;
-  private String hostName;
-  private List<Port> ports;
-  private String certSerialId;
-
-  /**
-   * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used
-   * for instantiating DatanodeDetails.
-   * @param uuid DataNode's UUID
-   * @param ipAddress IP Address of this DataNode
-   * @param hostName DataNode's hostname
-   * @param networkLocation DataNode's network location path
-   * @param ports Ports used by the DataNode
-   * @param certSerialId serial id from SCM issued certificate.
-   */
-  private DatanodeDetails(String uuid, String ipAddress, String hostName,
-      String networkLocation, List<Port> ports, String certSerialId) {
-    super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT);
-    this.uuid = UUID.fromString(uuid);
-    this.ipAddress = ipAddress;
-    this.hostName = hostName;
-    this.ports = ports;
-    this.certSerialId = certSerialId;
-  }
-
-  protected DatanodeDetails(DatanodeDetails datanodeDetails) {
-    super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(),
-        datanodeDetails.getCost());
-    this.uuid = datanodeDetails.uuid;
-    this.ipAddress = datanodeDetails.ipAddress;
-    this.hostName = datanodeDetails.hostName;
-    this.ports = datanodeDetails.ports;
-    this.setNetworkName(datanodeDetails.getNetworkName());
-  }
-
-  /**
-   * Returns the DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public UUID getUuid() {
-    return uuid;
-  }
-
-  /**
-   * Returns the string representation of DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public String getUuidString() {
-    return uuid.toString();
-  }
-
-  /**
-   * Sets the IP address of Datanode.
-   *
-   * @param ip IP Address
-   */
-  public void setIpAddress(String ip) {
-    this.ipAddress = ip;
-  }
-
-  /**
-   * Returns IP address of DataNode.
-   *
-   * @return IP address
-   */
-  public String getIpAddress() {
-    return ipAddress;
-  }
-
-  /**
-   * Sets the Datanode hostname.
-   *
-   * @param host hostname
-   */
-  public void setHostName(String host) {
-    this.hostName = host;
-  }
-
-  /**
-   * Returns Hostname of DataNode.
-   *
-   * @return Hostname
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * Sets a DataNode Port.
-   *
-   * @param port DataNode port
-   */
-  public void setPort(Port port) {
-    // If the port is already in the list remove it first and add the
-    // new/updated port value.
-    ports.remove(port);
-    ports.add(port);
-  }
-
-  /**
-   * Returns all the Ports used by DataNode.
-   *
-   * @return DataNode Ports
-   */
-  public List<Port> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Given the name returns port number, null if the asked port is not found.
-   *
-   * @param name Name of the port
-   *
-   * @return Port
-   */
-  public Port getPort(Port.Name name) {
-    for (Port port : ports) {
-      if (port.getName().equals(name)) {
-        return port;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns a DatanodeDetails from the protocol buffers.
-   *
-   * @param datanodeDetailsProto - protoBuf Message
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails getFromProtoBuf(
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
-    DatanodeDetails.Builder builder = newBuilder();
-    builder.setUuid(datanodeDetailsProto.getUuid());
-    if (datanodeDetailsProto.hasIpAddress()) {
-      builder.setIpAddress(datanodeDetailsProto.getIpAddress());
-    }
-    if (datanodeDetailsProto.hasHostName()) {
-      builder.setHostName(datanodeDetailsProto.getHostName());
-    }
-    if (datanodeDetailsProto.hasCertSerialId()) {
-      builder.setCertSerialId(datanodeDetailsProto.getCertSerialId());
-    }
-    for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) {
-      builder.addPort(newPort(
-          Port.Name.valueOf(port.getName().toUpperCase()), port.getValue()));
-    }
-    if (datanodeDetailsProto.hasNetworkName()) {
-      builder.setNetworkName(datanodeDetailsProto.getNetworkName());
-    }
-    if (datanodeDetailsProto.hasNetworkLocation()) {
-      builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Returns a DatanodeDetails protobuf message from a datanode ID.
-   * @return HddsProtos.DatanodeDetailsProto
-   */
-  public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
-    HddsProtos.DatanodeDetailsProto.Builder builder =
-        HddsProtos.DatanodeDetailsProto.newBuilder()
-            .setUuid(getUuidString());
-    if (ipAddress != null) {
-      builder.setIpAddress(ipAddress);
-    }
-    if (hostName != null) {
-      builder.setHostName(hostName);
-    }
-    if (certSerialId != null) {
-      builder.setCertSerialId(certSerialId);
-    }
-    if (!Strings.isNullOrEmpty(getNetworkName())) {
-      builder.setNetworkName(getNetworkName());
-    }
-    if (!Strings.isNullOrEmpty(getNetworkLocation())) {
-      builder.setNetworkLocation(getNetworkLocation());
-    }
-
-    for (Port port : ports) {
-      builder.addPorts(HddsProtos.Port.newBuilder()
-          .setName(port.getName().toString())
-          .setValue(port.getValue())
-          .build());
-    }
-    return builder.build();
-  }
-
-  @Override
-  public String toString() {
-    return uuid.toString() + "{" +
-        "ip: " +
-        ipAddress +
-        ", host: " +
-        hostName +
-        ", networkLocation: " +
-        getNetworkLocation() +
-        ", certSerialId: " + certSerialId +
-        "}";
-  }
-
-  @Override
-  public int compareTo(DatanodeDetails that) {
-    return this.getUuid().compareTo(that.getUuid());
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return obj instanceof DatanodeDetails &&
-        uuid.equals(((DatanodeDetails) obj).uuid);
-  }
-
-  @Override
-  public int hashCode() {
-    return uuid.hashCode();
-  }
-
-  /**
-   * Returns DatanodeDetails.Builder instance.
-   *
-   * @return DatanodeDetails.Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for building DatanodeDetails.
-   */
-  public static final class Builder {
-    private String id;
-    private String ipAddress;
-    private String hostName;
-    private String networkName;
-    private String networkLocation;
-    private List<Port> ports;
-    private String certSerialId;
-
-    /**
-     * Default private constructor. To create Builder instance use
-     * DatanodeDetails#newBuilder.
-     */
-    private Builder() {
-      ports = new ArrayList<>();
-    }
-
-    /**
-     * Sets the DatanodeUuid.
-     *
-     * @param uuid DatanodeUuid
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setUuid(String uuid) {
-      this.id = uuid;
-      return this;
-    }
-
-    /**
-     * Sets the IP address of DataNode.
-     *
-     * @param ip address
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setIpAddress(String ip) {
-      this.ipAddress = ip;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of DataNode.
-     *
-     * @param host hostname
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setHostName(String host) {
-      this.hostName = host;
-      return this;
-    }
-
-    /**
-     * Sets the network name of DataNode.
-     *
-     * @param name network name
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setNetworkName(String name) {
-      this.networkName = name;
-      return this;
-    }
-
-    /**
-     * Sets the network location of DataNode.
-     *
-     * @param loc location
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setNetworkLocation(String loc) {
-      this.networkLocation = loc;
-      return this;
-    }
-
-    /**
-     * Adds a DataNode Port.
-     *
-     * @param port DataNode port
-     *
-     * @return DatanodeDetails.Builder
-     */
-    public Builder addPort(Port port) {
-      this.ports.add(port);
-      return this;
-    }
-
-    /**
-     * Adds certificate serial id.
-     *
-     * @param certId Serial id of SCM issued certificate.
-     *
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setCertSerialId(String certId) {
-      this.certSerialId = certId;
-      return this;
-    }
-
-    /**
-     * Builds and returns DatanodeDetails instance.
-     *
-     * @return DatanodeDetails
-     */
-    public DatanodeDetails build() {
-      Preconditions.checkNotNull(id);
-      if (networkLocation == null) {
-        networkLocation = NetConstants.DEFAULT_RACK;
-      }
-      DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName,
-          networkLocation, ports, certSerialId);
-      if (networkName != null) {
-        dn.setNetworkName(networkName);
-      }
-      return dn;
-    }
-  }
-
-  /**
-   * Constructs a new Port with name and value.
-   *
-   * @param name Name of the port
-   * @param value Port number
-   *
-   * @return {@code Port} instance
-   */
-  public static Port newPort(Port.Name name, Integer value) {
-    return new Port(name, value);
-  }
-
-  /**
-   * Container to hold DataNode Port details.
-   */
-  public static final class Port {
-
-    /**
-     * Ports that are supported in DataNode.
-     */
-    public enum Name {
-      STANDALONE, RATIS, REST
-    }
-
-    private Name name;
-    private Integer value;
-
-    /**
-     * Private constructor for constructing Port object. Use
-     * DatanodeDetails#newPort to create a new Port object.
-     *
-     * @param name
-     * @param value
-     */
-    private Port(Name name, Integer value) {
-      this.name = name;
-      this.value = value;
-    }
-
-    /**
-     * Returns the name of the port.
-     *
-     * @return Port name
-     */
-    public Name getName() {
-      return name;
-    }
-
-    /**
-     * Returns the port number.
-     *
-     * @return Port number
-     */
-    public Integer getValue() {
-      return value;
-    }
-
-    @Override
-    public int hashCode() {
-      return name.hashCode();
-    }
-
-    /**
-     * Ports are considered equal if they have the same name.
-     *
-     * @param anObject
-     *          The object to compare this {@code Port} against
-     * @return {@code true} if the given object represents a {@code Port}
-               and has the same name, {@code false} otherwise
-     */
-    @Override
-    public boolean equals(Object anObject) {
-      if (this == anObject) {
-        return true;
-      }
-      if (anObject instanceof Port) {
-        return name.equals(((Port) anObject).name);
-      }
-      return false;
-    }
-  }
-
-  /**
-   * Returns serial id of SCM issued certificate.
-   *
-   * @return certificate serial id
-   */
-  public String getCertSerialId() {
-    return certSerialId;
-  }
-
-  /**
-   * Set certificate serial id of SCM issued certificate.
-   *
-   */
-  public void setCertSerialId(String certSerialId) {
-    this.certSerialId = certSerialId;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
deleted file mode 100644
index 4036cb1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocol;
-
-import java.io.IOException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * The protocol used to perform security related operations with SCM.
- */
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-@InterfaceAudience.Private
-public interface SCMSecurityProtocol {
-
-  @SuppressWarnings("checkstyle:ConstantName")
-  /**
-   * Version 1: Initial version.
-   */
-  long versionID = 1L;
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   *
-   * @param dataNodeDetails - DataNode Details.
-   * @param certSignReq     - Certificate signing request.
-   * @return byte[]         - SCM signed certificate.
-   */
-  String getDataNodeCertificate(
-      DatanodeDetailsProto dataNodeDetails,
-      String certSignReq) throws IOException;
-
-  /**
-   * Get SCM signed certificate for OM.
-   *
-   * @param omDetails       - DataNode Details.
-   * @param certSignReq     - Certificate signing request.
-   * @return String         - pem encoded SCM signed
-   *                          certificate.
-   */
-  String getOMCertificate(OzoneManagerDetailsProto omDetails,
-      String certSignReq) throws IOException;
-
-  /**
-   * Get SCM signed certificate for given certificate serial id if it exists.
-   * Throws exception if it's not found.
-   *
-   * @param certSerialId    - Certificate serial id.
-   * @return String         - pem encoded SCM signed
-   *                          certificate with given cert id if it
-   *                          exists.
-   */
-  String getCertificate(String certSerialId) throws IOException;
-
-  /**
-   * Get CA certificate.
-   *
-   * @return String         - pem encoded CA certificate.
-   */
-  String getCACertificate() throws IOException;
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
deleted file mode 100644
index 7dae0fc..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains HDDS protocol related classes.
- */
-package org.apache.hadoop.hdds.protocol;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
deleted file mode 100644
index efe79a76..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.function.Consumer;
-
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import static org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
-
-/**
- * This class is the client-side translator that forwards requests for
- * {@link SCMSecurityProtocol} to the {@link SCMSecurityProtocolPB} proxy.
- */
-public class SCMSecurityProtocolClientSideTranslatorPB implements
-    SCMSecurityProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-  private final SCMSecurityProtocolPB rpcProxy;
-
-  public SCMSecurityProtocolClientSideTranslatorPB(
-      SCMSecurityProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Helper method to wrap the request and send the message.
-   */
-  private SCMSecurityResponse submitRequest(
-      SCMSecurityProtocolProtos.Type type,
-      Consumer<Builder> builderConsumer) throws IOException {
-    final SCMSecurityResponse response;
-    try {
-
-      Builder builder = SCMSecurityRequest.newBuilder()
-          .setCmdType(type)
-          .setTraceID(TracingUtil.exportCurrentSpan());
-      builderConsumer.accept(builder);
-      SCMSecurityRequest wrapper = builder.build();
-
-      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
-    } catch (ServiceException ex) {
-      throw ProtobufHelper.getRemoteException(ex);
-    }
-    return response;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   *
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    RPC.stopProxy(rpcProxy);
-  }
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   *
-   * @param dataNodeDetails - DataNode Details.
-   * @param certSignReq     - Certificate signing request.
-   * @return byte[]         - SCM signed certificate.
-   */
-  @Override
-  public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails,
-      String certSignReq) throws IOException {
-    return getDataNodeCertificateChain(dataNodeDetails, certSignReq)
-        .getX509Certificate();
-  }
-
-  /**
-   * Get SCM signed certificate for OM.
-   *
-   * @param omDetails   - OzoneManager Details.
-   * @param certSignReq - Certificate signing request.
-   * @return byte[]         - SCM signed certificate.
-   */
-  @Override
-  public String getOMCertificate(OzoneManagerDetailsProto omDetails,
-      String certSignReq) throws IOException {
-    return getOMCertChain(omDetails, certSignReq).getX509Certificate();
-  }
-
-  /**
-   * Get SCM signed certificate for OM.
-   *
-   * @param omDetails   - OzoneManager Details.
-   * @param certSignReq - Certificate signing request.
-   * @return byte[]         - SCM signed certificate.
-   */
-  public SCMGetCertResponseProto getOMCertChain(
-      OzoneManagerDetailsProto omDetails, String certSignReq)
-      throws IOException {
-    SCMGetOMCertRequestProto request = SCMGetOMCertRequestProto
-        .newBuilder()
-        .setCSR(certSignReq)
-        .setOmDetails(omDetails)
-        .build();
-    return submitRequest(Type.GetOMCertificate,
-        builder -> builder.setGetOMCertRequest(request))
-        .getGetCertResponseProto();
-  }
-
-  /**
-   * Get SCM signed certificate with given serial id. Throws exception if
-   * certificate is not found.
-   *
-   * @param certSerialId - Certificate serial id.
-   * @return string         - pem encoded certificate.
-   */
-  @Override
-  public String getCertificate(String certSerialId) throws IOException {
-    SCMGetCertificateRequestProto request = SCMGetCertificateRequestProto
-        .newBuilder()
-        .setCertSerialId(certSerialId)
-        .build();
-    return submitRequest(Type.GetCertificate,
-        builder -> builder.setGetCertificateRequest(request))
-        .getGetCertResponseProto()
-        .getX509Certificate();
-  }
-
-  /**
-   * Get SCM signed certificate for Datanode.
-   *
-   * @param dnDetails   - Datanode Details.
-   * @param certSignReq - Certificate signing request.
-   * @return byte[]         - SCM signed certificate.
-   */
-  public SCMGetCertResponseProto getDataNodeCertificateChain(
-      DatanodeDetailsProto dnDetails, String certSignReq)
-      throws IOException {
-
-    SCMGetDataNodeCertRequestProto request =
-        SCMGetDataNodeCertRequestProto.newBuilder()
-            .setCSR(certSignReq)
-            .setDatanodeDetails(dnDetails)
-            .build();
-    return submitRequest(Type.GetDataNodeCertificate,
-        builder -> builder.setGetDataNodeCertRequest(request))
-        .getGetCertResponseProto();
-  }
-
-  /**
-   * Get CA certificate.
-   *
-   * @return serial   - Root certificate.
-   */
-  @Override
-  public String getCACertificate() throws IOException {
-    SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto
-        .getDefaultInstance();
-    return submitRequest(Type.GetCACertificate,
-        builder -> builder.setGetCACertificateRequest(protoIns))
-        .getGetCertResponseProto().getX509Certificate();
-
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java
deleted file mode 100644
index 41b0332..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocolPB;
-
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityProtocolService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * Protocol for security related operations on SCM.
- */
-
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.hdds.protocol.SCMSecurityProtocol",
-    protocolVersion = 1)
-@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-public interface SCMSecurityProtocolPB extends
-    SCMSecurityProtocolService.BlockingInterface {
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
deleted file mode 100644
index 4496019..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocolPB;
-/**
- * This package contains classes for wiring HDDS protobuf calls to rpc.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java
deleted file mode 100644
index 07a886a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.ratis;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.ratis.util.JavaUtils;
-
-import java.util.Objects;
-import java.util.function.Supplier;
-
-/**
- * Implementing the {@link Message} interface
- * for {@link ContainerCommandRequestProto}.
- */
-public final class ContainerCommandRequestMessage implements Message {
-  public static ContainerCommandRequestMessage toMessage(
-      ContainerCommandRequestProto request, String traceId) {
-    final ContainerCommandRequestProto.Builder b
-        = ContainerCommandRequestProto.newBuilder(request);
-    if (traceId != null) {
-      b.setTraceID(traceId);
-    }
-
-    ByteString data = ByteString.EMPTY;
-    if (request.getCmdType() == Type.WriteChunk) {
-      final WriteChunkRequestProto w = request.getWriteChunk();
-      data = w.getData();
-      b.setWriteChunk(w.toBuilder().clearData());
-    } else if (request.getCmdType() == Type.PutSmallFile) {
-      final PutSmallFileRequestProto p = request.getPutSmallFile();
-      data = p.getData();
-      b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY));
-    }
-    return new ContainerCommandRequestMessage(b.build(), data);
-  }
-
-  public static ContainerCommandRequestProto toProto(
-      ByteString bytes, RaftGroupId groupId)
-      throws InvalidProtocolBufferException {
-    final int i = 4 + bytes.asReadOnlyByteBuffer().getInt();
-    final ContainerCommandRequestProto header
-        = ContainerCommandRequestProto.parseFrom(bytes.substring(4, i));
-    // TODO: setting pipeline id can be avoided if the client is sending it.
-    //       In such case, just have to validate the pipeline id.
-    final ContainerCommandRequestProto.Builder b = header.toBuilder();
-    if (groupId != null) {
-      b.setPipelineID(groupId.getUuid().toString());
-    }
-    final ByteString data = bytes.substring(i);
-    if (header.getCmdType() == Type.WriteChunk) {
-      b.setWriteChunk(b.getWriteChunkBuilder().setData(data));
-    } else if (header.getCmdType() == Type.PutSmallFile) {
-      b.setPutSmallFile(b.getPutSmallFileBuilder().setData(data));
-    }
-    return b.build();
-  }
-
-  private final ContainerCommandRequestProto header;
-  private final ByteString data;
-  private final Supplier<ByteString> contentSupplier
-      = JavaUtils.memoize(this::buildContent);
-
-  private ContainerCommandRequestMessage(
-      ContainerCommandRequestProto header, ByteString data) {
-    this.header = Objects.requireNonNull(header, "header == null");
-    this.data = Objects.requireNonNull(data, "data == null");
-  }
-
-  private ByteString buildContent() {
-    final ByteString headerBytes = header.toByteString();
-    return RatisHelper.int2ByteString(headerBytes.size())
-        .concat(headerBytes)
-        .concat(data);
-  }
-
-  @Override
-  public ByteString getContent() {
-    return contentSupplier.get();
-  }
-
-  @Override
-  public String toString() {
-    return header + ", data.size=" + data.size();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
deleted file mode 100644
index 081b4fb..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.ratis;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.client.RaftClientConfigKeys;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.grpc.GrpcFactory;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.retry.RetryPolicies;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ratis helper methods.
- */
-public interface RatisHelper {
-  Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
-
-  static String toRaftPeerIdString(DatanodeDetails id) {
-    return id.getUuidString();
-  }
-
-  static UUID toDatanodeId(String peerIdString) {
-    return UUID.fromString(peerIdString);
-  }
-
-  static UUID toDatanodeId(RaftPeerId peerId) {
-    return toDatanodeId(peerId.toString());
-  }
-
-  static UUID toDatanodeId(RaftProtos.RaftPeerProto peerId) {
-    return toDatanodeId(RaftPeerId.valueOf(peerId.getId()));
-  }
-
-  static String toRaftPeerAddressString(DatanodeDetails id) {
-    return id.getIpAddress() + ":" +
-        id.getPort(DatanodeDetails.Port.Name.RATIS).getValue();
-  }
-
-  static RaftPeerId toRaftPeerId(DatanodeDetails id) {
-    return RaftPeerId.valueOf(toRaftPeerIdString(id));
-  }
-
-  static RaftPeer toRaftPeer(DatanodeDetails id) {
-    return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id));
-  }
-
-  static List<RaftPeer> toRaftPeers(Pipeline pipeline) {
-    return toRaftPeers(pipeline.getNodes());
-  }
-
-  static <E extends DatanodeDetails> List<RaftPeer> toRaftPeers(
-      List<E> datanodes) {
-    return datanodes.stream().map(RatisHelper::toRaftPeer)
-        .collect(Collectors.toList());
-  }
-
-  /* TODO: use a dummy id for all groups for the moment.
-   *       It should be changed to a unique id for each group.
-   */
-  RaftGroupId DUMMY_GROUP_ID =
-      RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup"));
-
-  RaftGroup EMPTY_GROUP = RaftGroup.valueOf(DUMMY_GROUP_ID,
-      Collections.emptyList());
-
-  static RaftGroup emptyRaftGroup() {
-    return EMPTY_GROUP;
-  }
-
-  static RaftGroup newRaftGroup(Collection<RaftPeer> peers) {
-    return peers.isEmpty()? emptyRaftGroup()
-        : RaftGroup.valueOf(DUMMY_GROUP_ID, peers);
-  }
-
-  static RaftGroup newRaftGroup(RaftGroupId groupId,
-      Collection<DatanodeDetails> peers) {
-    final List<RaftPeer> newPeers = peers.stream()
-        .map(RatisHelper::toRaftPeer)
-        .collect(Collectors.toList());
-    return peers.isEmpty() ? RaftGroup.valueOf(groupId, Collections.emptyList())
-        : RaftGroup.valueOf(groupId, newPeers);
-  }
-
-  static RaftGroup newRaftGroup(Pipeline pipeline) {
-    return RaftGroup.valueOf(RaftGroupId.valueOf(pipeline.getId().getId()),
-        toRaftPeers(pipeline));
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
-      RetryPolicy retryPolicy, int maxOutStandingRequest,
-      GrpcTlsConfig tlsConfig, TimeDuration timeout) throws IOException {
-    return newRaftClient(rpcType, toRaftPeerId(pipeline.getFirstNode()),
-        newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
-            pipeline.getNodes()), retryPolicy, maxOutStandingRequest, tlsConfig,
-        timeout);
-  }
-
-  static TimeDuration getClientRequestTimeout(Configuration conf) {
-    // Set the client requestTimeout
-    final TimeUnit timeUnit =
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    final long duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration clientRequestTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    return clientRequestTimeout;
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RetryPolicy retryPolicy, int maxOutstandingRequests,
-      GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
-    return newRaftClient(rpcType, leader.getId(),
-        newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy,
-        maxOutstandingRequests, tlsConfig, clientRequestTimeout);
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RetryPolicy retryPolicy, int maxOutstandingRequests,
-      TimeDuration clientRequestTimeout) {
-    return newRaftClient(rpcType, leader.getId(),
-        newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy,
-        maxOutstandingRequests, null, clientRequestTimeout);
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
-      RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest,
-      GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("newRaftClient: {}, leader={}, group={}",
-          rpcType, leader, group);
-    }
-    final RaftProperties properties = new RaftProperties();
-    RaftConfigKeys.Rpc.setType(properties, rpcType);
-    RaftClientConfigKeys.Rpc
-        .setRequestTimeout(properties, clientRequestTimeout);
-
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE));
-    GrpcConfigKeys.OutputStream.setOutstandingAppendsMax(properties,
-        maxOutStandingRequest);
-
-    RaftClient.Builder builder =  RaftClient.newBuilder()
-        .setRaftGroup(group)
-        .setLeaderId(leader)
-        .setProperties(properties)
-        .setRetryPolicy(retryPolicy);
-
-    // TODO: GRPC TLS only for now, netty/hadoop RPC TLS support later.
-    if (tlsConfig != null && rpcType == SupportedRpcType.GRPC) {
-      builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig));
-    }
-    return builder.build();
-  }
-
-  // For External gRPC client to server with gRPC TLS.
-  // No mTLS for external client as SCM CA does not issued certificates for them
-  static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf,
-      X509Certificate caCert) {
-    GrpcTlsConfig tlsConfig = null;
-    if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
-      tlsConfig = new GrpcTlsConfig(null, null,
-          caCert, false);
-    }
-    return tlsConfig;
-  }
-
-  // For Internal gRPC client from SCM to DN with gRPC TLS
-  static GrpcTlsConfig createTlsClientConfigForSCM(SecurityConfig conf,
-      CertificateServer certificateServer) throws IOException {
-    if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
-      try {
-        X509Certificate caCert =
-            CertificateCodec.getX509Certificate(
-                certificateServer.getCACertificate());
-        return new GrpcTlsConfig(null, null,
-            caCert, false);
-      } catch (CertificateException ex) {
-        throw new SCMSecurityException("Fail to find SCM CA certificate.", ex);
-      }
-    }
-    return null;
-  }
-
-  // For gRPC server running DN container service with gPRC TLS
-  // No mTLS as the channel is shared for for external client, which
-  // does not have SCM CA issued certificates.
-  // In summary:
-  // authenticate from server to client is via TLS.
-  // authenticate from client to server is via block token (or container token).
-  static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf,
-      CertificateClient caClient)  {
-    if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
-      return new GrpcTlsConfig(
-          caClient.getPrivateKey(), caClient.getCertificate(),
-          null, false);
-    }
-    return null;
-  }
-
-  static RetryPolicy createRetryPolicy(Configuration conf) {
-    int maxRetryCount =
-        conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
-            OzoneConfigKeys.
-                DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT);
-    long retryInterval = conf.getTimeDuration(OzoneConfigKeys.
-        DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, OzoneConfigKeys.
-        DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT
-        .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    TimeDuration sleepDuration =
-        TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS);
-    RetryPolicy retryPolicy = RetryPolicies
-        .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration);
-    return retryPolicy;
-  }
-
-  static Long getMinReplicatedIndex(
-      Collection<RaftProtos.CommitInfoProto> commitInfos) {
-    return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex)
-        .min(Long::compareTo).orElse(null);
-  }
-
-  static ByteString int2ByteString(int n) {
-    final ByteString.Output out = ByteString.newOutput();
-    try(DataOutputStream dataOut = new DataOutputStream(out)) {
-      dataOut.writeInt(n);
-    } catch (IOException e) {
-      throw new IllegalStateException(
-          "Failed to write integer n = " + n + " to a ByteString.", e);
-    }
-    return out.toByteString();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java
deleted file mode 100644
index e52dc7f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.ratis;
-
-/**
- * This package contains classes related to Apache Ratis.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
deleted file mode 100644
index 4608df7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations;
-
-import java.nio.ByteBuffer;
-import java.util.function.Function;
-
-/**
- * Helper class to create a conversion function from ByteBuffer to ByteString
- * based on the property
- * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} in the
- * Ozone configuration.
- */
-public final class ByteStringConversion {
-  private ByteStringConversion(){} // no instantiation.
-
-  /**
-   * Creates the conversion function to be used to convert ByteBuffers to
-   * ByteString instances to be used in protobuf messages.
-   *
-   * @param config the Ozone configuration
-   * @return the conversion function defined by
-   *          {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED}
-   * @see <pre>ByteBuffer</pre>
-   */
-  public static Function<ByteBuffer, ByteString> createByteBufferConversion(
-      Configuration config){
-    boolean unsafeEnabled =
-        config!=null && config.getBoolean(
-            OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
-            OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT);
-    if (unsafeEnabled) {
-      return buffer -> UnsafeByteOperations.unsafeWrap(buffer);
-    } else {
-      return buffer -> {
-        ByteString retval = ByteString.copyFrom(buffer);
-        buffer.flip();
-        return retval;
-      };
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
deleted file mode 100644
index 1617806..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class contains constants for configuration keys used in SCM.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ScmConfigKeys {
-
-  // Location of SCM DB files. For now we just support a single
-  // metadata dir but in future we may support multiple for redundancy or
-  // performance.
-  public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs";
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = "dfs.container.ratis.enabled";
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = false;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = "dfs.container.ratis.rpc.type";
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = "GRPC";
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = "dfs.container.ratis.num.write.chunk.threads";
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = 60;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = "dfs.container.ratis.replication.level";
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY;
-  public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
-      = "dfs.container.ratis.num.container.op.executors";
-  public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
-      = 10;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
-      "dfs.container.ratis.segment.size";
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-      "1MB";
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
-      "dfs.container.ratis.segment.preallocated.size";
-  public static final String
-      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "16KB";
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
-      "dfs.container.ratis.statemachinedata.sync.timeout";
-  public static final TimeDuration
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
-      TimeDuration.valueOf(10, TimeUnit.SECONDS);
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
-      "dfs.container.ratis.statemachinedata.sync.retries";
-  public static final int
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
-      "dfs.container.ratis.statemachine.max.pending.apply-transactions";
-  // The default value of maximum number of pending state machine apply
-  // transactions is kept same as default snapshot threshold.
-  public static final int
-      DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
-      100000;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
-      "dfs.container.ratis.log.queue.num-elements";
-  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
-      1024;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
-      "dfs.container.ratis.log.queue.byte-limit";
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
-      "4GB";
-  public static final String
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
-      "dfs.container.ratis.log.appender.queue.num-elements";
-  public static final int
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
-  public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
-      "dfs.container.ratis.log.appender.queue.byte-limit";
-  public static final String
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
-  public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
-      "dfs.container.ratis.log.purge.gap";
-  // TODO: Set to 1024 once RATIS issue around purge is fixed.
-  public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
-      1000000;
-
-  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
-      "dfs.container.ratis.leader.num.pending.requests";
-  public static final int
-      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096;
-  // expiry interval stateMachineData cache entry inside containerStateMachine
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
-      "dfs.container.ratis.statemachine.cache.expiry.interval";
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
-      "10s";
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.client.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      "dfs.ratis.client.request.max.retries";
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      "dfs.ratis.client.request.retry.interval";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      TimeDuration.valueOf(1000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.retry-cache.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.leader.election.minimum.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(5, TimeUnit.SECONDS);
-
-  public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
-      "dfs.ratis.snapshot.threshold";
-  public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      "dfs.ratis.server.failure.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
-  // TODO : this is copied from OzoneConsts, may need to move to a better place
-  public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
-  // 16 MB by default
-  public static final String OZONE_SCM_CHUNK_SIZE_DEFAULT = "16MB";
-
-  public static final String OZONE_SCM_CLIENT_PORT_KEY =
-      "ozone.scm.client.port";
-  public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
-
-  public static final String OZONE_SCM_DATANODE_PORT_KEY =
-      "ozone.scm.datanode.port";
-  public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
-
-  // OZONE_OM_PORT_DEFAULT = 9862
-  public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
-      "ozone.scm.block.client.port";
-  public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
-
-  public static final String OZONE_SCM_SECURITY_SERVICE_PORT_KEY =
-      "ozone.scm.security.service.port";
-  public static final int OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT = 9961;
-
-  // Container service client
-  public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
-      "ozone.scm.client.address";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.client.bind.host";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  // Block service client
-  public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
-      "ozone.scm.block.client.address";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.block.client.bind.host";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  // SCM Security service address.
-  public static final String OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY =
-      "ozone.scm.security.service.address";
-  public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY =
-      "ozone.scm.security.service.bind.host";
-  public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
-      "ozone.scm.datanode.address";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
-      "ozone.scm.datanode.bind.host";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_HTTP_ENABLED_KEY =
-      "ozone.scm.http.enabled";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
-      "ozone.scm.http-bind-host";
-  public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
-      "ozone.scm.https-bind-host";
-  public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
-      "ozone.scm.http-address";
-  public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
-      "ozone.scm.https-address";
-  public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
-      "hdds.scm.kerberos.keytab.file";
-  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
-      "hdds.scm.kerberos.principal";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
-  public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
-
-  public static final String HDDS_REST_HTTP_ADDRESS_KEY =
-      "hdds.rest.http-address";
-  public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
-  public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
-  public static final String HDDS_REST_CSRF_ENABLED_KEY =
-      "hdds.rest.rest-csrf.enabled";
-  public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
-  public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
-      "hdds.rest.netty.high.watermark";
-  public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
-  public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
-  public static final String HDDS_REST_NETTY_LOW_WATERMARK =
-      "hdds.rest.netty.low.watermark";
-
-  public static final String OZONE_SCM_HANDLER_COUNT_KEY =
-      "ozone.scm.handler.count.key";
-  public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
-
-  public static final String OZONE_SCM_SECURITY_HANDLER_COUNT_KEY =
-      "ozone.scm.security.handler.count.key";
-  public static final int OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT = 2;
-
-  public static final String OZONE_SCM_DEADNODE_INTERVAL =
-      "ozone.scm.dead.node.interval";
-  public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
-      "10m";
-
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
-      "ozone.scm.heartbeat.thread.interval";
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
-      "3s";
-
-  public static final String OZONE_SCM_STALENODE_INTERVAL =
-      "ozone.scm.stale.node.interval";
-  public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-      "5m";
-
-  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
-      "ozone.scm.heartbeat.rpc-timeout";
-  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
-      "1s";
-
-  /**
-   * Defines how frequently we will log the missing of heartbeat to a specific
-   * SCM. In the default case we will write a warning message for each 10
-   * sequential heart beats that we miss to a specific SCM. This is to avoid
-   * overrunning the log with lots of HB missed Log statements.
-   */
-  public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
-      "ozone.scm.heartbeat.log.warn.interval.count";
-  public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
-      10;
-
-  // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
-  // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
-  //
-  // If this key is not specified datanodes will not be able to find
-  // SCM. The SCM membership can be dynamic, so this key should contain
-  // all possible SCM names. Once the SCM leader is discovered datanodes will
-  // get the right list of SCMs to heartbeat to from the leader.
-  // While it is good for the datanodes to know the names of all SCM nodes,
-  // it is sufficient to actually know the name of on working SCM. That SCM
-  // will be able to return the information about other SCMs that are part of
-  // the SCM replicated Log.
-  //
-  //In case of a membership change, any one of the SCM machines will be
-  // able to send back a new list to the datanodes.
-  public static final String OZONE_SCM_NAMES = "ozone.scm.names";
-
-  public static final int OZONE_SCM_DEFAULT_PORT =
-      OZONE_SCM_DATANODE_PORT_DEFAULT;
-  // The path where datanode ID is to be written to.
-  // if this value is not set then container startup will fail.
-  public static final String OZONE_SCM_DATANODE_ID_DIR =
-      "ozone.scm.datanode.id.dir";
-
-  public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
-      "ozone.scm.db.cache.size.mb";
-  public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_SCM_CONTAINER_SIZE =
-      "ozone.scm.container.size";
-  public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB";
-
-  public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
-      "ozone.scm.container.placement.impl";
-
-  public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT =
-      "ozone.scm.pipeline.owner.container.count";
-  public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3;
-
-  public static final String
-      OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY =
-      "ozone.scm.keyvalue.container.deletion-choosing.policy";
-
-  public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.container.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT =
-      "ozone.scm.pipeline.destroy.timeout";
-
-  public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
-      "66s";
-
-  public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
-      "ozone.scm.pipeline.creation.interval";
-  public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT =
-      "120s";
-
-  public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
-      "ozone.scm.block.deletion.max.retry";
-  public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT =
-      "hdds.scm.watcher.timeout";
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
-      "10m";
-
-  public static final String
-      HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
-      "hdds.scm.http.kerberos.principal";
-  public static final String
-      HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY =
-      "hdds.scm.http.kerberos.keytab";
-
-  // Network topology
-  public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE =
-      "ozone.scm.network.topology.schema.file";
-  public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT =
-      "network-topology-default.xml";
-
-  public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled";
-  public static final boolean HDDS_TRACING_ENABLED_DEFAULT = true;
-
-  /**
-   * Never constructed.
-   */
-  private ScmConfigKeys() {
-
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
deleted file mode 100644
index 6236feb..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * ScmInfo wraps the result returned from SCM#getScmInfo which
- * contains clusterId and the SCM Id.
- */
-public final class ScmInfo {
-  private String clusterId;
-  private String scmId;
-
-  /**
-   * Builder for ScmInfo.
-   */
-  public static class Builder {
-    private String clusterId;
-    private String scmId;
-
-    /**
-     * sets the cluster id.
-     * @param cid clusterId to be set
-     * @return Builder for ScmInfo
-     */
-    public Builder setClusterId(String cid) {
-      this.clusterId = cid;
-      return this;
-    }
-
-    /**
-     * sets the scmId.
-     * @param id scmId
-     * @return Builder for scmInfo
-     */
-    public Builder setScmId(String id) {
-      this.scmId = id;
-      return this;
-    }
-
-    public ScmInfo build() {
-      return new ScmInfo(clusterId, scmId);
-    }
-  }
-
-  private ScmInfo(String clusterId, String scmId) {
-    this.clusterId = clusterId;
-    this.scmId = scmId;
-  }
-
-  /**
-   * Gets the clusterId from the Version file.
-   * @return ClusterId
-   */
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * Gets the SCM Id from the Version file.
-   * @return SCM Id
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java
deleted file mode 100644
index bae0758..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * This class represents the reply from XceiverClient.
- */
-public class XceiverClientReply {
-
-  private CompletableFuture<ContainerCommandResponseProto> response;
-  private Long logIndex;
-
-  /**
-   * List of datanodes where the command got executed and reply is received.
-   * If there is an exception in the reply, these datanodes will inform
-   * about the servers where there is a failure.
-   */
-  private List<DatanodeDetails> datanodes;
-
-  public XceiverClientReply(
-      CompletableFuture<ContainerCommandResponseProto> response) {
-    this(response, null);
-  }
-
-  public XceiverClientReply(
-      CompletableFuture<ContainerCommandResponseProto> response,
-      List<DatanodeDetails> datanodes) {
-    this.logIndex = (long) 0;
-    this.response = response;
-    this.datanodes = datanodes == null ? new ArrayList<>() : datanodes;
-  }
-
-  public CompletableFuture<ContainerCommandResponseProto> getResponse() {
-    return response;
-  }
-
-  public long getLogIndex() {
-    return logIndex;
-  }
-
-  public void setLogIndex(Long logIndex) {
-    this.logIndex = logIndex;
-  }
-
-  public List<DatanodeDetails> getDatanodes() {
-    return datanodes;
-  }
-
-  public void addDatanode(DatanodeDetails dn) {
-    datanodes.add(dn);
-  }
-
-  public void setResponse(
-      CompletableFuture<ContainerCommandResponseProto> response) {
-    this.response = response;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
deleted file mode 100644
index 5631bad..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public abstract class XceiverClientSpi implements Closeable {
-
-  final private AtomicInteger referenceCount;
-  private boolean isEvicted;
-
-  XceiverClientSpi() {
-    this.referenceCount = new AtomicInteger(0);
-    this.isEvicted = false;
-  }
-
-  void incrementReference() {
-    this.referenceCount.incrementAndGet();
-  }
-
-  void decrementReference() {
-    this.referenceCount.decrementAndGet();
-    cleanup();
-  }
-
-  void setEvicted() {
-    isEvicted = true;
-    cleanup();
-  }
-
-  // close the xceiverClient only if,
-  // 1) there is no refcount on the client
-  // 2) it has been evicted from the cache.
-  private void cleanup() {
-    if (referenceCount.get() == 0 && isEvicted) {
-      close();
-    }
-  }
-
-  @VisibleForTesting
-  public int getRefcount() {
-    return referenceCount.get();
-  }
-
-  /**
-   * Connects to the leader in the pipeline.
-   */
-  public abstract void connect() throws Exception;
-
-  /**
-   * Connects to the leader in the pipeline using encoded token. To be used
-   * in a secure cluster.
-   */
-  public abstract void connect(String encodedToken) throws Exception;
-
-  @Override
-  public abstract void close();
-
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
-  public abstract Pipeline getPipeline();
-
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException {
-    try {
-      XceiverClientReply reply;
-      reply = sendCommandAsync(request);
-      ContainerCommandResponseProto responseProto = reply.getResponse().get();
-      return responseProto;
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to command " + request, e);
-    }
-  }
-
-  /**
-   * Sends a given command to server and gets the reply back along with
-   * the server associated info.
-   * @param request Request
-   * @param validators functions to validate the response
-   * @return Response to the command
-   * @throws IOException
-   */
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request, List<CheckedBiFunction> validators)
-      throws IOException {
-    try {
-      XceiverClientReply reply;
-      reply = sendCommandAsync(request);
-      ContainerCommandResponseProto responseProto = reply.getResponse().get();
-      for (CheckedBiFunction function : validators) {
-        function.apply(request, responseProto);
-      }
-      return responseProto;
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to command " + request, e);
-    }
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public abstract XceiverClientReply
-      sendCommandAsync(ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException;
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - {Stand_Alone, Ratis or Chained}
-   */
-  public abstract HddsProtos.ReplicationType getPipelineType();
-
-  /**
-   * Check if an specfic commitIndex is replicated to majority/all servers.
-   * @param index index to watch for
-   * @param timeout timeout provided for the watch operation to complete
-   * @return reply containing the min commit index replicated to all or majority
-   *         servers in case of a failure
-   * @throws InterruptedException
-   * @throws ExecutionException
-   * @throws TimeoutException
-   * @throws IOException
-   */
-  public abstract XceiverClientReply watchForCommit(long index, long timeout)
-      throws InterruptedException, ExecutionException, TimeoutException,
-      IOException;
-
-  /**
-   * returns the min commit index replicated to all servers.
-   * @return min commit index replicated to all servers.
-   */
-  public abstract long getReplicatedMinCommitIndex();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
deleted file mode 100644
index 226ceda..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * The interface to call into underlying container layer.
- *
- * Written as interface to allow easy testing: implement a mock container layer
- * for standalone testing of CBlock API without actually calling into remote
- * containers. Actual container layer can simply re-implement this.
- *
- * NOTE this is temporarily needed class. When SCM containers are full-fledged,
- * this interface will likely be removed.
- */
-@InterfaceStability.Unstable
-public interface ScmClient extends Closeable {
-  /**
-   * Creates a Container on SCM and returns the pipeline.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerWithPipeline createContainer(String owner) throws IOException;
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   * @param containerId - Container ID
-   * @return Pipeline
-   * @throws IOException
-   */
-  ContainerInfo getContainer(long containerId) throws IOException;
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   * @param containerId - Container ID
-   * @return ContainerWithPipeline
-   * @throws IOException
-   */
-  ContainerWithPipeline getContainerWithPipeline(long containerId)
-      throws IOException;
-
-  /**
-   * Close a container.
-   *
-   * @param containerId - ID of the container.
-   * @param pipeline - Pipeline where the container is located.
-   * @throws IOException
-   */
-  void closeContainer(long containerId, Pipeline pipeline) throws IOException;
-
-  /**
-   * Close a container.
-   *
-   * @param containerId - ID of the container.
-   * @throws IOException
-   */
-  void closeContainer(long containerId) throws IOException;
-
-  /**
-   * Deletes an existing container.
-   * @param containerId - ID of the container.
-   * @param pipeline - Pipeline that represents the container.
-   * @param force - true to forcibly delete the container.
-   * @throws IOException
-   */
-  void deleteContainer(long containerId, Pipeline pipeline, boolean force)
-      throws IOException;
-
-  /**
-   * Deletes an existing container.
-   * @param containerId - ID of the container.
-   * @param force - true to forcibly delete the container.
-   * @throws IOException
-   */
-  void deleteContainer(long containerId, boolean force) throws IOException;
-
-  /**
-   * Lists a range of containers and get their info.
-   *
-   * @param startContainerID start containerID.
-   * @param count count must be {@literal >} 0.
-   *
-   * @return a list of pipeline.
-   * @throws IOException
-   */
-  List<ContainerInfo> listContainer(long startContainerID,
-      int count) throws IOException;
-
-  /**
-   * Read meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @param pipeline - Pipeline where the container is located.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerDataProto readContainer(long containerID, Pipeline pipeline)
-      throws IOException;
-
-  /**
-   * Read meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerDataProto readContainer(long containerID)
-      throws IOException;
-
-  /**
-   * Gets the container size -- Computed by SCM from Container Reports.
-   * @param containerID - ID of the container.
-   * @return number of bytes used by this container.
-   * @throws IOException
-   */
-  long getContainerSize(long containerID) throws IOException;
-
-  /**
-   * Creates a Container on SCM and returns the pipeline.
-   * @param type - Replication Type.
-   * @param replicationFactor - Replication Factor
-   * @return ContainerInfo
-   * @throws IOException - in case of error.
-   */
-  ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor,
-      String owner) throws IOException;
-
-  /**
-   * Returns a set of Nodes that meet a query criteria.
-   * @param nodeStatuses - Criteria that we want the node to have.
-   * @param queryScope - Query scope - Cluster or pool.
-   * @param poolName - if it is pool, a pool name is required.
-   * @return A set of nodes that meet the requested criteria.
-   * @throws IOException
-   */
-  List<HddsProtos.Node> queryNode(HddsProtos.NodeState nodeStatuses,
-      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
-
-  /**
-   * Creates a specified replication pipeline.
-   * @param type - Type
-   * @param factor - Replication factor
-   * @param nodePool - Set of machines.
-   * @throws IOException
-   */
-  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException;
-
-  /**
-   * Returns the list of active Pipelines.
-   *
-   * @return list of Pipeline
-   * @throws IOException in case of any exception
-   */
-  List<Pipeline> listPipelines() throws IOException;
-
-  /**
-   * Activates the pipeline given a pipeline ID.
-   *
-   * @param pipelineID PipelineID to activate.
-   * @throws IOException In case of exception while activating the pipeline
-   */
-  void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Deactivates the pipeline given a pipeline ID.
-   *
-   * @param pipelineID PipelineID to deactivate.
-   * @throws IOException In case of exception while deactivating the pipeline
-   */
-  void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Closes the pipeline given a pipeline ID.
-   *
-   * @param pipelineID PipelineID to close.
-   * @throws IOException In case of exception while closing the pipeline
-   */
-  void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Check if SCM is in safe mode.
-   *
-   * @return Returns true if SCM is in safe mode else returns false.
-   * @throws IOException
-   */
-  boolean inSafeMode() throws IOException;
-
-  /**
-   * Force SCM out of safe mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  boolean forceExitSafeMode() throws IOException;
-
-  /**
-   * Start ReplicationManager.
-   */
-  void startReplicationManager() throws IOException;
-
-  /**
-   * Stop ReplicationManager.
-   */
-  void stopReplicationManager() throws IOException;
-
-  /**
-   * Returns ReplicationManager status.
-   *
-   * @return True if ReplicationManager is running, false otherwise.
-   */
-  boolean getReplicationManagerStatus() throws IOException;
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
deleted file mode 100644
index e2f7033..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java
deleted file mode 100644
index 9d37dfb..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-
-/**
- * Signals that ContainerException of some sort has occurred. This is parent
- * of all the exceptions thrown by ContainerManager.
- */
-public class ContainerException extends IOException {
-
-  /**
-   * Constructs an {@code ContainerException} with {@code null}
-   * as its error detail message.
-   */
-  public ContainerException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code ContainerException} with the specified detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public ContainerException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
deleted file mode 100644
index bb44da4..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.builder.CompareToBuilder;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-/**
- * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
- * <p>
- * We are creating a specific type for this to avoid mixing this with
- * normal integers in code.
- */
-public final class ContainerID implements Comparable<ContainerID> {
-
-  private final long id;
-
-  // TODO: make this private.
-  /**
-   * Constructs ContainerID.
-   *
-   * @param id int
-   */
-  public ContainerID(long id) {
-    this.id = id;
-  }
-
-  /**
-   * Factory method for creation of ContainerID.
-   * @param containerID  long
-   * @return ContainerID.
-   */
-  public static ContainerID valueof(final long containerID) {
-    Preconditions.checkState(containerID > 0,
-        "Container ID should be a positive long. "+ containerID);
-    return new ContainerID(containerID);
-  }
-
-  /**
-   * Returns int representation of ID.
-   *
-   * @return int
-   */
-  public long getId() {
-    return id;
-  }
-
-  public byte[] getBytes() {
-    return Longs.toByteArray(id);
-  }
-
-  @Override
-  public boolean equals(final Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    final ContainerID that = (ContainerID) o;
-
-    return new EqualsBuilder()
-        .append(getId(), that.getId())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(61, 71)
-        .append(getId())
-        .toHashCode();
-  }
-
-  @Override
-  public int compareTo(final ContainerID that) {
-    Preconditions.checkNotNull(that);
-    return new CompareToBuilder()
-        .append(this.getId(), that.getId())
-        .build();
-  }
-
-  @Override
-  public String toString() {
-    return "#" + id;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
deleted file mode 100644
index 5c58e92..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import static java.lang.Math.max;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Arrays;
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.util.Time;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerInfo implements Comparator<ContainerInfo>,
-    Comparable<ContainerInfo>, Externalizable {
-
-  private static final ObjectWriter WRITER;
-  private static final String SERIALIZATION_ERROR_MSG = "Java serialization not"
-      + " supported. Use protobuf instead.";
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
-    mapper
-        .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private HddsProtos.LifeCycleState state;
-  @JsonIgnore
-  private PipelineID pipelineID;
-  private ReplicationFactor replicationFactor;
-  private ReplicationType replicationType;
-  private long usedBytes;
-  private long numberOfKeys;
-  private long lastUsed;
-  // The wall-clock ms since the epoch at which the current state enters.
-  private long stateEnterTime;
-  private String owner;
-  private long containerID;
-  private long deleteTransactionId;
-  // The sequenceId of a close container cannot change, and all the
-  // container replica should have the same sequenceId.
-  private long sequenceId;
-
-  /**
-   * Allows you to maintain private data on ContainerInfo. This is not
-   * serialized via protobuf, just allows us to maintain some private data.
-   */
-  @JsonIgnore
-  private byte[] data;
-
-  @SuppressWarnings("parameternumber")
-  ContainerInfo(
-      long containerID,
-      HddsProtos.LifeCycleState state,
-      PipelineID pipelineID,
-      long usedBytes,
-      long numberOfKeys,
-      long stateEnterTime,
-      String owner,
-      long deleteTransactionId,
-      long sequenceId,
-      ReplicationFactor replicationFactor,
-      ReplicationType repType) {
-    this.containerID = containerID;
-    this.pipelineID = pipelineID;
-    this.usedBytes = usedBytes;
-    this.numberOfKeys = numberOfKeys;
-    this.lastUsed = Time.monotonicNow();
-    this.state = state;
-    this.stateEnterTime = stateEnterTime;
-    this.owner = owner;
-    this.deleteTransactionId = deleteTransactionId;
-    this.sequenceId = sequenceId;
-    this.replicationFactor = replicationFactor;
-    this.replicationType = repType;
-  }
-
-  /**
-   * Needed for serialization findbugs.
-   */
-  public ContainerInfo() {
-  }
-
-  public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
-    ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    return builder.setPipelineID(
-        PipelineID.getFromProtobuf(info.getPipelineID()))
-        .setUsedBytes(info.getUsedBytes())
-        .setNumberOfKeys(info.getNumberOfKeys())
-        .setState(info.getState())
-        .setStateEnterTime(info.getStateEnterTime())
-        .setOwner(info.getOwner())
-        .setContainerID(info.getContainerID())
-        .setDeleteTransactionId(info.getDeleteTransactionId())
-        .setReplicationFactor(info.getReplicationFactor())
-        .setReplicationType(info.getReplicationType())
-        .build();
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public HddsProtos.LifeCycleState getState() {
-    return state;
-  }
-
-  public void setState(HddsProtos.LifeCycleState state) {
-    this.state = state;
-  }
-
-  public long getStateEnterTime() {
-    return stateEnterTime;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-
-  public PipelineID getPipelineID() {
-    return pipelineID;
-  }
-
-  public long getUsedBytes() {
-    return usedBytes;
-  }
-
-  public void setUsedBytes(long value) {
-    usedBytes = value;
-  }
-
-  public long getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  public void setNumberOfKeys(long value) {
-    numberOfKeys = value;
-  }
-
-  public long getDeleteTransactionId() {
-    return deleteTransactionId;
-  }
-
-  public long getSequenceId() {
-    return sequenceId;
-  }
-
-  public void updateDeleteTransactionId(long transactionId) {
-    deleteTransactionId = max(transactionId, deleteTransactionId);
-  }
-
-  public void updateSequenceId(long sequenceID) {
-    assert (isOpen() || state == HddsProtos.LifeCycleState.QUASI_CLOSED);
-    sequenceId = max(sequenceID, sequenceId);
-  }
-
-  public ContainerID containerID() {
-    return new ContainerID(getContainerID());
-  }
-
-  /**
-   * Gets the last used time from SCM's perspective.
-   *
-   * @return time in milliseconds.
-   */
-  public long getLastUsed() {
-    return lastUsed;
-  }
-
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public void updateLastUsedTime() {
-    lastUsed = Time.monotonicNow();
-  }
-
-  public HddsProtos.ContainerInfoProto getProtobuf() {
-    HddsProtos.ContainerInfoProto.Builder builder =
-        HddsProtos.ContainerInfoProto.newBuilder();
-    Preconditions.checkState(containerID > 0);
-    return builder.setContainerID(getContainerID())
-        .setUsedBytes(getUsedBytes())
-        .setNumberOfKeys(getNumberOfKeys()).setState(getState())
-        .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID())
-        .setDeleteTransactionId(getDeleteTransactionId())
-        .setPipelineID(getPipelineID().getProtobuf())
-        .setReplicationFactor(getReplicationFactor())
-        .setReplicationType(getReplicationType())
-        .setOwner(getOwner())
-        .build();
-  }
-
-  public String getOwner() {
-    return owner;
-  }
-
-  public void setOwner(String owner) {
-    this.owner = owner;
-  }
-
-  @Override
-  public String toString() {
-    return "ContainerInfo{"
-        + "id=" + containerID
-        + ", state=" + state
-        + ", pipelineID=" + pipelineID
-        + ", stateEnterTime=" + stateEnterTime
-        + ", owner=" + owner
-        + '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerInfo that = (ContainerInfo) o;
-
-    return new EqualsBuilder()
-        .append(getContainerID(), that.getContainerID())
-
-        // TODO : Fix this later. If we add these factors some tests fail.
-        // So Commenting this to continue and will enforce this with
-        // Changes in pipeline where we remove Container Name to
-        // SCMContainerinfo from Pipeline.
-        // .append(pipeline.getFactor(), that.pipeline.getFactor())
-        // .append(pipeline.getType(), that.pipeline.getType())
-        .append(owner, that.owner)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(11, 811)
-        .append(getContainerID())
-        .append(getOwner())
-        .toHashCode();
-  }
-
-  /**
-   * Compares its two arguments for order.  Returns a negative integer, zero, or
-   * a positive integer as the first argument is less than, equal to, or greater
-   * than the second.<p>
-   *
-   * @param o1 the first object to be compared.
-   * @param o2 the second object to be compared.
-   * @return a negative integer, zero, or a positive integer as the first
-   * argument is less than, equal to, or greater than the second.
-   * @throws NullPointerException if an argument is null and this comparator
-   *                              does not permit null arguments
-   * @throws ClassCastException   if the arguments' types prevent them from
-   *                              being compared by this comparator.
-   */
-  @Override
-  public int compare(ContainerInfo o1, ContainerInfo o2) {
-    return Long.compare(o1.getLastUsed(), o2.getLastUsed());
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less than,
-   * equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(ContainerInfo o) {
-    return this.compare(this, o);
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns private data that is set on this containerInfo.
-   *
-   * @return blob, the user can interpret it any way they like.
-   */
-  public byte[] getData() {
-    if (this.data != null) {
-      return Arrays.copyOf(this.data, this.data.length);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Set private data on ContainerInfo object.
-   *
-   * @param data -- private data.
-   */
-  public void setData(byte[] data) {
-    if (data != null) {
-      this.data = Arrays.copyOf(data, data.length);
-    }
-  }
-
-  /**
-   * Throws IOException as default java serialization is not supported. Use
-   * serialization via protobuf instead.
-   *
-   * @param out the stream to write the object to
-   * @throws IOException Includes any I/O exceptions that may occur
-   * @serialData Overriding methods should use this tag to describe
-   * the data layout of this Externalizable object.
-   * List the sequence of element types and, if possible,
-   * relate the element to a public/protected field and/or
-   * method of this Externalizable class.
-   */
-  @Override
-  public void writeExternal(ObjectOutput out) throws IOException {
-    throw new IOException(SERIALIZATION_ERROR_MSG);
-  }
-
-  /**
-   * Throws IOException as default java serialization is not supported. Use
-   * serialization via protobuf instead.
-   *
-   * @param in the stream to read data from in order to restore the object
-   * @throws IOException            if I/O errors occur
-   * @throws ClassNotFoundException If the class for an object being
-   *                                restored cannot be found.
-   */
-  @Override
-  public void readExternal(ObjectInput in)
-      throws IOException, ClassNotFoundException {
-    throw new IOException(SERIALIZATION_ERROR_MSG);
-  }
-
-  /**
-   * Builder class for ContainerInfo.
-   */
-  public static class Builder {
-    private HddsProtos.LifeCycleState state;
-    private long used;
-    private long keys;
-    private long stateEnterTime;
-    private String owner;
-    private long containerID;
-    private long deleteTransactionId;
-    private long sequenceId;
-    private PipelineID pipelineID;
-    private ReplicationFactor replicationFactor;
-    private ReplicationType replicationType;
-
-    public Builder setReplicationType(
-        ReplicationType repType) {
-      this.replicationType = repType;
-      return this;
-    }
-
-    public Builder setPipelineID(PipelineID pipelineId) {
-      this.pipelineID = pipelineId;
-      return this;
-    }
-
-    public Builder setReplicationFactor(ReplicationFactor repFactor) {
-      this.replicationFactor = repFactor;
-      return this;
-    }
-
-    public Builder setContainerID(long id) {
-      Preconditions.checkState(id >= 0);
-      this.containerID = id;
-      return this;
-    }
-
-    public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
-      this.state = lifeCycleState;
-      return this;
-    }
-
-    public Builder setUsedBytes(long bytesUsed) {
-      this.used = bytesUsed;
-      return this;
-    }
-
-    public Builder setNumberOfKeys(long keyCount) {
-      this.keys = keyCount;
-      return this;
-    }
-
-    public Builder setStateEnterTime(long time) {
-      this.stateEnterTime = time;
-      return this;
-    }
-
-    public Builder setOwner(String containerOwner) {
-      this.owner = containerOwner;
-      return this;
-    }
-
-    public Builder setDeleteTransactionId(long deleteTransactionID) {
-      this.deleteTransactionId = deleteTransactionID;
-      return this;
-    }
-
-    public Builder setSequenceId(long sequenceID) {
-      this.sequenceId = sequenceID;
-      return this;
-    }
-
-    public ContainerInfo build() {
-      return new ContainerInfo(containerID, state, pipelineID,
-          used, keys, stateEnterTime, owner, deleteTransactionId,
-          sequenceId, replicationFactor, replicationType);
-    }
-  }
-
-  /**
-   * Check if a container is in open state, this will check if the
-   * container is either open or closing state. Any containers in these states
-   * is managed as an open container by SCM.
-   */
-  public boolean isOpen() {
-    return state == HddsProtos.LifeCycleState.OPEN
-        || state == HddsProtos.LifeCycleState.CLOSING;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java
deleted file mode 100644
index 3eebcce..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-/**
- * Signals that a container is missing from ContainerManager.
- */
-public class ContainerNotFoundException extends ContainerException {
-
-  /**
-   * Constructs an {@code ContainerNotFoundException} with {@code null}
-   * as its error detail message.
-   */
-  public ContainerNotFoundException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code ContainerNotFoundException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public ContainerNotFoundException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java
deleted file mode 100644
index fdbc18b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-/**
- * Signals that a ContainerReplica is missing from the Container in
- * ContainerManager.
- */
-public class ContainerReplicaNotFoundException extends ContainerException {
-
-  /**
-   * Constructs an {@code ContainerReplicaNotFoundException} with {@code null}
-   * as its error detail message.
-   */
-  public ContainerReplicaNotFoundException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code ContainerReplicaNotFoundException} with the
-   * specified detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public ContainerReplicaNotFoundException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
deleted file mode 100644
index 7ac0401..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
-/**
- * Allocated block wraps the result returned from SCM#allocateBlock which
- * contains a Pipeline and the key.
- */
-public final class AllocatedBlock {
-  private Pipeline pipeline;
-  private ContainerBlockID containerBlockID;
-
-  /**
-   * Builder for AllocatedBlock.
-   */
-  public static class Builder {
-    private Pipeline pipeline;
-    private ContainerBlockID containerBlockID;
-
-    public Builder setPipeline(Pipeline p) {
-      this.pipeline = p;
-      return this;
-    }
-
-    public Builder setContainerBlockID(ContainerBlockID blockId) {
-      this.containerBlockID = blockId;
-      return this;
-    }
-
-    public AllocatedBlock build() {
-      return new AllocatedBlock(pipeline, containerBlockID);
-    }
-  }
-
-  private AllocatedBlock(Pipeline pipeline, ContainerBlockID containerBlockID) {
-    this.pipeline = pipeline;
-    this.containerBlockID = containerBlockID;
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public ContainerBlockID getBlockID() {
-    return containerBlockID;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
deleted file mode 100644
index 86f5a66..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a block is yet to be committed on the datanode.
- */
-public class BlockNotCommittedException extends StorageContainerException {
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   */
-  public BlockNotCommittedException(String message) {
-    super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
deleted file mode 100644
index 4e406e6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a write/update opearation is done on non-open
- * container.
- */
-public class ContainerNotOpenException extends StorageContainerException {
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   */
-  public ContainerNotOpenException(String message) {
-    super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
deleted file mode 100644
index 5b01bd2..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerWithPipeline implements Comparator<ContainerWithPipeline>,
-    Comparable<ContainerWithPipeline> {
-
-  private final ContainerInfo containerInfo;
-  private final Pipeline pipeline;
-
-  public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) {
-    this.containerInfo = containerInfo;
-    this.pipeline = pipeline;
-  }
-
-  public ContainerInfo getContainerInfo() {
-    return containerInfo;
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public static ContainerWithPipeline fromProtobuf(
-      HddsProtos.ContainerWithPipeline allocatedContainer)
-      throws UnknownPipelineStateException {
-    return new ContainerWithPipeline(
-        ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
-        Pipeline.getFromProtobuf(allocatedContainer.getPipeline()));
-  }
-
-  public HddsProtos.ContainerWithPipeline getProtobuf()
-      throws UnknownPipelineStateException {
-    HddsProtos.ContainerWithPipeline.Builder builder =
-        HddsProtos.ContainerWithPipeline.newBuilder();
-    builder.setContainerInfo(getContainerInfo().getProtobuf())
-        .setPipeline(getPipeline().getProtobufMessage());
-
-    return builder.build();
-  }
-
-
-  @Override
-  public String toString() {
-    return containerInfo.toString() + " | " + pipeline.toString();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerWithPipeline that = (ContainerWithPipeline) o;
-
-    return new EqualsBuilder()
-        .append(getContainerInfo(), that.getContainerInfo())
-        .append(getPipeline(), that.getPipeline())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(11, 811)
-        .append(getContainerInfo())
-        .append(getPipeline())
-        .toHashCode();
-  }
-
-  /**
-   * Compares its two arguments for order.  Returns a negative integer, zero, or
-   * a positive integer as the first argument is less than, equal to, or greater
-   * than the second.<p>
-   *
-   * @param o1 the first object to be compared.
-   * @param o2 the second object to be compared.
-   * @return a negative integer, zero, or a positive integer as the first
-   * argument is less than, equal to, or greater than the second.
-   * @throws NullPointerException if an argument is null and this comparator
-   *                              does not permit null arguments
-   * @throws ClassCastException   if the arguments' types prevent them from
-   *                              being compared by this comparator.
-   */
-  @Override
-  public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) {
-    return o1.getContainerInfo().compareTo(o2.getContainerInfo());
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less than,
-   * equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(ContainerWithPipeline o) {
-    return this.compare(this, o);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
deleted file mode 100644
index 5f5aace..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
-
-/**
- * Class wraps storage container manager block deletion results.
- */
-public class DeleteBlockResult {
-  private BlockID blockID;
-  private DeleteScmBlockResult.Result result;
-
-  public DeleteBlockResult(final BlockID blockID,
-      final DeleteScmBlockResult.Result result) {
-    this.blockID = blockID;
-    this.result = result;
-  }
-
-  /**
-   * Get block id deleted.
-   * @return block id.
-   */
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  /**
-   * Get key deletion result.
-   * @return key deletion result.
-   */
-  public DeleteScmBlockResult.Result getResult() {
-    return result;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
deleted file mode 100644
index eb215d6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Collection;
-
-/**
- * This class contains set of dns and containers which ozone client provides
- * to be handed over to SCM when block allocation request comes.
- */
-public class ExcludeList {
-
-  private final List<DatanodeDetails> datanodes;
-  private final List<ContainerID> containerIds;
-  private final List<PipelineID> pipelineIds;
-
-
-  public ExcludeList() {
-    datanodes = new ArrayList<>();
-    containerIds = new ArrayList<>();
-    pipelineIds = new ArrayList<>();
-  }
-
-  public List<ContainerID> getContainerIds() {
-    return containerIds;
-  }
-
-  public List<DatanodeDetails> getDatanodes() {
-    return datanodes;
-  }
-
-  public void addDatanodes(Collection<DatanodeDetails> dns) {
-    datanodes.addAll(dns);
-  }
-
-  public void addDatanode(DatanodeDetails dn) {
-    datanodes.add(dn);
-  }
-
-  public void addConatinerId(ContainerID containerId) {
-    containerIds.add(containerId);
-  }
-
-  public void addPipeline(PipelineID pipelineId) {
-    pipelineIds.add(pipelineId);
-  }
-
-  public List<PipelineID> getPipelineIds() {
-    return pipelineIds;
-  }
-
-  public HddsProtos.ExcludeListProto getProtoBuf() {
-    HddsProtos.ExcludeListProto.Builder builder =
-        HddsProtos.ExcludeListProto.newBuilder();
-    containerIds
-        .forEach(id -> builder.addContainerIds(id.getId()));
-    datanodes.forEach(dn -> {
-      builder.addDatanodes(dn.getUuidString());
-    });
-    pipelineIds.forEach(pipelineID -> {
-      builder.addPipelineIds(pipelineID.getProtobuf());
-    });
-    return builder.build();
-  }
-
-  public static ExcludeList getFromProtoBuf(
-      HddsProtos.ExcludeListProto excludeListProto) {
-    ExcludeList excludeList = new ExcludeList();
-    excludeListProto.getContainerIdsList().forEach(id -> {
-      excludeList.addConatinerId(ContainerID.valueof(id));
-    });
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    excludeListProto.getDatanodesList().forEach(dn -> {
-      builder.setUuid(dn);
-      excludeList.addDatanode(builder.build());
-    });
-    excludeListProto.getPipelineIdsList().forEach(pipelineID -> {
-      excludeList.addPipeline(PipelineID.getFromProtobuf(pipelineID));
-    });
-    return excludeList;
-  }
-
-  public void clear() {
-    datanodes.clear();
-    containerIds.clear();
-    pipelineIds.clear();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
deleted file mode 100644
index 1378d1a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a container is in invalid state while doing a I/O.
- */
-public class InvalidContainerStateException extends StorageContainerException {
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   */
-  public InvalidContainerStateException(String message) {
-    super(message, ContainerProtos.Result.INVALID_CONTAINER_STATE);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
deleted file mode 100644
index f1405ff..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import java.io.IOException;
-
-/**
- * Exceptions thrown from the Storage Container.
- */
-public class StorageContainerException extends IOException {
-  private ContainerProtos.Result result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public StorageContainerException(ContainerProtos.Result result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   * @param result - The result code
-   */
-  public StorageContainerException(String message,
-      ContainerProtos.Result result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   *
-   * @param result - The result code
-   * @since 1.6
-   */
-  public StorageContainerException(String message, Throwable cause,
-      ContainerProtos.Result result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @param result - The result code
-   * @since 1.6
-   */
-  public StorageContainerException(Throwable cause, ContainerProtos.Result
-      result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns Result.
-   *
-   * @return Result.
-   */
-  public ContainerProtos.Result getResult() {
-    return result;
-  }
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
deleted file mode 100644
index ffe0d3d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-/**
- Contains protocol buffer helper classes and utilites used in
- impl.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
deleted file mode 100644
index d13dcb1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
deleted file mode 100644
index 52ce796..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * A ContainerPlacementPolicy support choosing datanodes to build replication
- * pipeline with specified constraints.
- */
-public interface ContainerPlacementPolicy {
-
-  /**
-   * Given the replication factor and size required, return set of datanodes
-   * that satisfy the nodes and size requirement.
-   *
-   * @param excludedNodes - list of nodes to be excluded.
-   * @param favoredNodes - list of nodes preferred.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return list of datanodes chosen.
-   * @throws IOException
-   */
-  List<DatanodeDetails> chooseDatanodes(List<DatanodeDetails> excludedNodes,
-      List<DatanodeDetails> favoredNodes, int nodesRequired, long sizeRequired)
-      throws IOException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
deleted file mode 100644
index dac4752..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-/**
- Contains container placement policy interface definition.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
deleted file mode 100644
index db1f82a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by SCM.
- */
-public class SCMException extends IOException {
-  private final ResultCodes result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public SCMException(ResultCodes result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   */
-  public SCMException(String message, ResultCodes result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public SCMException(String message, Throwable cause, ResultCodes result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public SCMException(Throwable cause, ResultCodes result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns resultCode.
-   * @return ResultCode
-   */
-  public ResultCodes getResult() {
-    return result;
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ResultCodes {
-    OK,
-    FAILED_TO_LOAD_NODEPOOL,
-    FAILED_TO_FIND_NODE_IN_POOL,
-    FAILED_TO_FIND_HEALTHY_NODES,
-    FAILED_TO_FIND_NODES_WITH_SPACE,
-    FAILED_TO_FIND_SUITABLE_NODE,
-    INVALID_CAPACITY,
-    INVALID_BLOCK_SIZE,
-    SAFE_MODE_EXCEPTION,
-    FAILED_TO_LOAD_OPEN_CONTAINER,
-    FAILED_TO_ALLOCATE_CONTAINER,
-    FAILED_TO_CHANGE_CONTAINER_STATE,
-    FAILED_TO_CHANGE_PIPELINE_STATE,
-    CONTAINER_EXISTS,
-    FAILED_TO_FIND_CONTAINER,
-    FAILED_TO_FIND_CONTAINER_WITH_SPACE,
-    BLOCK_EXISTS,
-    FAILED_TO_FIND_BLOCK,
-    IO_EXCEPTION,
-    UNEXPECTED_CONTAINER_STATE,
-    SCM_NOT_INITIALIZED,
-    DUPLICATE_DATANODE,
-    NO_SUCH_DATANODE,
-    NO_REPLICA_FOUND,
-    FAILED_TO_FIND_ACTIVE_PIPELINE,
-    FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY,
-    FAILED_TO_ALLOCATE_ENOUGH_BLOCKS,
-    INTERNAL_ERROR
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java
deleted file mode 100644
index 721a32b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.exceptions;
-/**
- Exception objects for the SCM Server.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java
deleted file mode 100644
index 6cf73bf..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import java.util.Collection;
-import java.util.List;
-
-/**
- * The interface defines an inner node in a network topology.
- * An inner node represents network topology entities, such as data center,
- * rack, switch or logical group.
- */
-public interface InnerNode extends Node {
-  /** A factory interface to get new InnerNode instance. */
-  interface Factory<N extends InnerNode> {
-    /** Construct an InnerNode from name, location, parent, level and cost. */
-    N newInnerNode(String name, String location, InnerNode parent, int level,
-        int cost);
-  }
-
-  /**
-   * Add node <i>n</i> to the subtree of this node.
-   * @param n node to be added
-   * @return true if the node is added; false otherwise
-   */
-  boolean add(Node n);
-
-  /**
-   * Remove node <i>n</i> from the subtree of this node.
-   * @param n node to be deleted
-   */
-  void remove(Node n);
-
-  /**
-   * Given a node's string representation, return a reference to the node.
-   * @param loc string location of the format /dc/rack/nodegroup/node
-   * @return null if the node is not found
-   */
-  Node getNode(String loc);
-
-  /**
-   * @return number of its all nodes at level <i>level</i>. Here level is a
-   * relative level. If level is 1, means node itself. If level is 2, means its
-   * direct children, and so on.
-   **/
-  int getNumOfNodes(int level);
-
-  /**
-   * Get <i>leafIndex</i> leaf of this subtree.
-   *
-   * @param leafIndex an indexed leaf of the node
-   * @return the leaf node corresponding to the given index.
-   */
-  Node getLeaf(int leafIndex);
-
-  /**
-   * Get <i>leafIndex</i> leaf of this subtree.
-   *
-   * @param leafIndex ode's index, start from 0, skip the nodes in
-   *                  excludedScope and excludedNodes with ancestorGen
-   * @param excludedScopes the excluded scopes
-   * @param excludedNodes nodes to be excluded. If ancestorGen is not 0,
-   *                      the chosen node will not share same ancestor with
-   *                      those in excluded nodes at the specified generation
-   * @param ancestorGen ignored with value is 0
-   * @return the leaf node corresponding to the given index
-   */
-  Node getLeaf(int leafIndex, List<String> excludedScopes,
-      Collection<Node> excludedNodes, int ancestorGen);
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
deleted file mode 100644
index f2183fc..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ /dev/null
@@ -1,509 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR;
-
-/**
- * A thread safe class that implements InnerNode interface.
- */
-public class InnerNodeImpl extends NodeImpl implements InnerNode {
-  protected static class Factory implements InnerNode.Factory<InnerNodeImpl> {
-    protected Factory() {}
-
-    public InnerNodeImpl newInnerNode(String name, String location,
-        InnerNode parent, int level, int cost) {
-      return new InnerNodeImpl(name, location, parent, level, cost);
-    }
-  }
-
-  static final Factory FACTORY = new Factory();
-  // a map of node's network name to Node for quick search and keep
-  // the insert order
-  private final HashMap<String, Node> childrenMap =
-      new LinkedHashMap<String, Node>();
-  // number of descendant leaves under this node
-  private int numOfLeaves;
-  // LOGGER
-  public static final Logger LOG = LoggerFactory.getLogger(InnerNodeImpl.class);
-
-  /**
-   * Construct an InnerNode from its name, network location, parent, level and
-   * its cost.
-   **/
-  protected InnerNodeImpl(String name, String location, InnerNode parent,
-      int level, int cost) {
-    super(name, location, parent, level, cost);
-  }
-
-  /** @return the number of children this node has */
-  private int getNumOfChildren() {
-    return childrenMap.size();
-  }
-
-  /** @return its leaf nodes number */
-  @Override
-  public int getNumOfLeaves() {
-    return numOfLeaves;
-  }
-
-  /**
-   * @return number of its all nodes at level <i>level</i>. Here level is a
-   * relative level. If level is 1, means node itself. If level is 2, means its
-   * direct children, and so on.
-   **/
-  public int getNumOfNodes(int level) {
-    Preconditions.checkArgument(level > 0);
-    int count = 0;
-    if (level == 1) {
-      count += 1;
-    } else if (level == 2) {
-      count += getNumOfChildren();
-    } else {
-      for (Node node: childrenMap.values()) {
-        if (node instanceof InnerNode) {
-          count += ((InnerNode)node).getNumOfNodes(level -1);
-        } else {
-          throw new RuntimeException("Cannot support Level:" + level +
-              " on this node " + this.toString());
-        }
-      }
-    }
-    return count;
-  }
-
-  /**
-   * Judge if this node is the parent of a leave node <i>n</i>.
-   * @return true if this node is the parent of <i>n</i>
-   */
-  private boolean isLeafParent() {
-    if (childrenMap.isEmpty()) {
-      return true;
-    }
-    Node child = childrenMap.values().iterator().next();
-    return child instanceof InnerNode ? false : true;
-  }
-
-  /**
-   * Judge if this node is the parent of node <i>node</i>.
-   * @param node a node
-   * @return true if this node is the parent of <i>n</i>
-   */
-  private boolean isParent(Node node) {
-    return node.getNetworkLocation().equals(this.getNetworkFullPath());
-  }
-
-  /**
-   * Add node <i>node</i> to the subtree of this node.
-   * @param node node to be added
-   * @return true if the node is added, false if is only updated
-   */
-  public boolean add(Node node) {
-    if (!isAncestor(node)) {
-      throw new IllegalArgumentException(node.getNetworkName()
-          + ", which is located at " + node.getNetworkLocation()
-          + ", is not a descendant of " + this.getNetworkFullPath());
-    }
-    if (isParent(node)) {
-      // this node is the parent, then add it directly
-      node.setParent(this);
-      node.setLevel(this.getLevel() + 1);
-      Node current = childrenMap.put(node.getNetworkName(), node);
-      if (current != null) {
-        return false;
-      }
-    } else {
-      // find the next level ancestor node
-      String ancestorName = getNextLevelAncestorName(node);
-      InnerNode childNode = (InnerNode)childrenMap.get(ancestorName);
-      if (childNode == null) {
-        // create a new InnerNode for this ancestor node
-        childNode = createChildNode(ancestorName);
-        childrenMap.put(childNode.getNetworkName(), childNode);
-      }
-      // add node to the subtree of the next ancestor node
-      if (!childNode.add(node)) {
-        return false;
-      }
-    }
-    numOfLeaves++;
-    return true;
-  }
-
-  /**
-   * Remove node <i>node</i> from the subtree of this node.
-   * @param node node to be deleted
-   */
-  public void remove(Node node) {
-    if (!isAncestor(node)) {
-      throw new IllegalArgumentException(node.getNetworkName()
-          + ", which is located at " + node.getNetworkLocation()
-          + ", is not a descendant of " + this.getNetworkFullPath());
-    }
-    if (isParent(node)) {
-      // this node is the parent, remove it directly
-      if (childrenMap.containsKey(node.getNetworkName())) {
-        childrenMap.remove(node.getNetworkName());
-        node.setParent(null);
-      } else {
-        throw new RuntimeException("Should not come to here. Node:" +
-            node.getNetworkFullPath() + ", Parent:" +
-            this.getNetworkFullPath());
-      }
-    } else {
-      // find the next ancestor node
-      String ancestorName = getNextLevelAncestorName(node);
-      InnerNodeImpl childNode = (InnerNodeImpl)childrenMap.get(ancestorName);
-      Preconditions.checkNotNull(childNode, "InnerNode is deleted before leaf");
-      // remove node from the parent node
-      childNode.remove(node);
-      // if the parent node has no children, remove the parent node too
-      if (childNode.getNumOfChildren() == 0) {
-        childrenMap.remove(ancestorName);
-      }
-    }
-    numOfLeaves--;
-  }
-
-  /**
-   * Given a node's string representation, return a reference to the node.
-   * Node can be leaf node or inner node.
-   *
-   * @param loc string location of a node. If loc starts with "/", it's a
-   *            absolute path, otherwise a relative path. Following examples
-   *            are all accepted,
-   *            1.  /dc1/rm1/rack1          -> an inner node
-   *            2.  /dc1/rm1/rack1/node1    -> a leaf node
-   *            3.  rack1/node1             -> a relative path to this node
-   *
-   * @return null if the node is not found
-   */
-  public Node getNode(String loc) {
-    if (loc == null) {
-      return null;
-    }
-
-    String fullPath = this.getNetworkFullPath();
-    if (loc.equalsIgnoreCase(fullPath)) {
-      return this;
-    }
-
-    // remove current node's location from loc when it's a absolute path
-    if (fullPath.equals(NetConstants.PATH_SEPARATOR_STR)) {
-      // current node is ROOT
-      if (loc.startsWith(PATH_SEPARATOR_STR)) {
-        loc = loc.substring(1);
-      }
-    } else if (loc.startsWith(fullPath)) {
-      loc = loc.substring(fullPath.length());
-      // skip the separator "/"
-      loc = loc.substring(1);
-    }
-
-    String[] path = loc.split(PATH_SEPARATOR_STR, 2);
-    Node child = childrenMap.get(path[0]);
-    if (child == null) {
-      return null;
-    }
-    if (path.length == 1){
-      return child;
-    }
-    if (child instanceof InnerNode) {
-      return ((InnerNode)child).getNode(path[1]);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * get <i>leafIndex</i> leaf of this subtree.
-   *
-   * @param leafIndex an indexed leaf of the node
-   * @return the leaf node corresponding to the given index.
-   */
-  public Node getLeaf(int leafIndex) {
-    Preconditions.checkArgument(leafIndex >= 0);
-    // children are leaves
-    if (isLeafParent()) {
-      // range check
-      if (leafIndex >= getNumOfChildren()) {
-        return null;
-      }
-      return getChildNode(leafIndex);
-    } else {
-      for(Node node : childrenMap.values()) {
-        InnerNodeImpl child = (InnerNodeImpl)node;
-        int leafCount = child.getNumOfLeaves();
-        if (leafIndex < leafCount) {
-          return child.getLeaf(leafIndex);
-        } else {
-          leafIndex -= leafCount;
-        }
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Get <i>leafIndex</i> leaf of this subtree.
-   *
-   * @param leafIndex node's index, start from 0, skip the nodes in
-   *                 excludedScope and excludedNodes with ancestorGen
-   * @param excludedScopes the exclude scopes
-   * @param excludedNodes nodes to be excluded from. If ancestorGen is not 0,
-   *                      the chosen node will not share same ancestor with
-   *                      those in excluded nodes at the specified generation
-   * @param ancestorGen  apply to excludeNodes, when value is 0, then no same
-   *                    ancestor enforcement on excludedNodes
-   * @return the leaf node corresponding to the given index.
-   * Example:
-   *
-   *                                /  --- root
-   *                              /  \
-   *                             /    \
-   *                            /      \
-   *                           /        \
-   *                         dc1         dc2
-   *                        / \         / \
-   *                       /   \       /   \
-   *                      /     \     /     \
-   *                    rack1 rack2  rack1  rack2
-   *                   / \     / \  / \     / \
-   *                 n1  n2  n3 n4 n5  n6  n7 n8
-   *
-   *   Input:
-   *   leafIndex = 2
-   *   excludedScope = /dc2/rack2
-   *   excludedNodes = {/dc1/rack1/n1}
-   *   ancestorGen = 1
-   *
-   *   Output:
-   *   node /dc1/rack2/n5
-   *
-   *   Explanation:
-   *   Since excludedNodes is n1 and ancestorGen is 1, it means nodes under
-   *   /root/dc1/rack1 are excluded. Given leafIndex start from 0, LeafIndex 2
-   *   means picking the 3th available node, which is n5.
-   *
-   */
-  public Node getLeaf(int leafIndex, List<String> excludedScopes,
-      Collection<Node> excludedNodes, int ancestorGen) {
-    Preconditions.checkArgument(leafIndex >= 0 && ancestorGen >= 0);
-    // come to leaf parent layer
-    if (isLeafParent()) {
-      return getLeafOnLeafParent(leafIndex, excludedScopes, excludedNodes);
-    }
-
-    int maxLevel = NodeSchemaManager.getInstance().getMaxLevel();
-    // this node's children, it's generation as the ancestor of the leaf node
-    int currentGen = maxLevel - this.getLevel() - 1;
-    // build an ancestor(children) to exclude node count map
-    Map<Node, Integer> countMap =
-        getAncestorCountMap(excludedNodes, ancestorGen, currentGen);
-    // nodes covered by excluded scope
-    Map<String, Integer> excludedNodeCount =
-        getExcludedScopeNodeCount(excludedScopes);
-
-    for (Node child : childrenMap.values()) {
-      int leafCount = child.getNumOfLeaves();
-      // skip nodes covered by excluded scopes
-      for (Map.Entry<String, Integer> entry: excludedNodeCount.entrySet()) {
-        if (entry.getKey().startsWith(child.getNetworkFullPath())) {
-          leafCount -= entry.getValue();
-        }
-      }
-      // skip nodes covered by excluded nodes and ancestorGen
-      Integer count = countMap.get(child);
-      if (count != null) {
-        leafCount -= count;
-      }
-      if (leafIndex < leafCount) {
-        return ((InnerNode)child).getLeaf(leafIndex, excludedScopes,
-            excludedNodes, ancestorGen);
-      } else {
-        leafIndex -= leafCount;
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public boolean equals(Object to) {
-    if (to == null) {
-      return false;
-    }
-    if (this == to) {
-      return true;
-    }
-    return this.toString().equals(to.toString());
-  }
-
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-
-  /**
-   * Get a ancestor to its excluded node count map.
-   *
-   * @param nodes a collection of leaf nodes to exclude
-   * @param genToExclude  the ancestor generation to exclude
-   * @param genToReturn  the ancestor generation to return the count map
-   * @return the map.
-   * example:
-   *
-   *                *  --- root
-   *              /    \
-   *             *      *   -- genToReturn =2
-   *            / \    / \
-   *          *   *   *   *  -- genToExclude = 1
-   *         /\  /\  /\  /\
-   *       *  * * * * * * *  -- nodes
-   */
-  private Map<Node, Integer> getAncestorCountMap(Collection<Node> nodes,
-      int genToExclude, int genToReturn) {
-    Preconditions.checkState(genToExclude >= 0);
-    Preconditions.checkState(genToReturn >= 0);
-
-    if (nodes == null || nodes.size() == 0) {
-      return Collections.emptyMap();
-    }
-    // with the recursive call, genToReturn can be smaller than genToExclude
-    if (genToReturn < genToExclude) {
-      genToExclude = genToReturn;
-    }
-    // ancestorToExclude to ancestorToReturn map
-    HashMap<Node, Node> ancestorMap = new HashMap<>();
-    for (Node node: nodes) {
-      Node ancestorToExclude = node.getAncestor(genToExclude);
-      Node ancestorToReturn = node.getAncestor(genToReturn);
-      if (ancestorToExclude == null || ancestorToReturn == null) {
-        LOG.warn("Ancestor not found, node: " + node.getNetworkFullPath() +
-            ", generation to exclude: " + genToExclude +
-            ", generation to return:" + genToReturn);
-        continue;
-      }
-      ancestorMap.put(ancestorToExclude, ancestorToReturn);
-    }
-    // ancestorToReturn to exclude node count map
-    HashMap<Node, Integer> countMap = new HashMap<>();
-    for (Map.Entry<Node, Node> entry : ancestorMap.entrySet()) {
-      countMap.compute(entry.getValue(),
-          (key, n) -> (n == null ? 0 : n) + entry.getKey().getNumOfLeaves());
-    }
-
-    return countMap;
-  }
-
-  /**
-   *  Get the node with leafIndex, considering skip nodes in excludedScope
-   *  and in excludeNodes list.
-   */
-  private Node getLeafOnLeafParent(int leafIndex, List<String> excludedScopes,
-      Collection<Node> excludedNodes) {
-    Preconditions.checkArgument(isLeafParent() && leafIndex >= 0);
-    if (leafIndex >= getNumOfChildren()) {
-      return null;
-    }
-    for(Node node : childrenMap.values()) {
-      if (excludedNodes != null && excludedNodes.contains(node)) {
-        continue;
-      }
-      if (excludedScopes != null && excludedScopes.size() > 0) {
-        if (excludedScopes.stream().anyMatch(scope ->
-            node.getNetworkFullPath().startsWith(scope))) {
-          continue;
-        }
-      }
-      if (leafIndex == 0) {
-        return node;
-      }
-      leafIndex--;
-    }
-    return null;
-  }
-
-  /**
-   *  Return child's name of this node which is an ancestor of node <i>n</i>.
-   */
-  private String getNextLevelAncestorName(Node n) {
-    int parentPathLen = this.getNetworkFullPath().length();
-    String name = n.getNetworkLocation().substring(parentPathLen);
-    if (name.charAt(0) == PATH_SEPARATOR) {
-      name = name.substring(1);
-    }
-    int index = name.indexOf(PATH_SEPARATOR);
-    if (index != -1) {
-      name = name.substring(0, index);
-    }
-    return name;
-  }
-
-  /**
-   * Creates a child node to be added to the list of children.
-   * @param name The name of the child node
-   * @return A new inner node
-   * @see InnerNodeImpl(String, String, InnerNode, int)
-   */
-  private InnerNodeImpl createChildNode(String name) {
-    int childLevel = this.getLevel() + 1;
-    int cost = NodeSchemaManager.getInstance().getCost(childLevel);
-    return new InnerNodeImpl(name, this.getNetworkFullPath(), this, childLevel,
-        cost);
-  }
-
-  /** Get node with index <i>index</i>. */
-  private Node getChildNode(int index) {
-    Iterator iterator = childrenMap.values().iterator();
-    Node node = null;
-    while(index >= 0 && iterator.hasNext()) {
-      node = (Node)iterator.next();
-      index--;
-    }
-    return node;
-  }
-
-  /** Get how many leaf nodes are covered by the excludedScopes(no overlap). */
-  private Map<String, Integer> getExcludedScopeNodeCount(
-      List<String> excludedScopes) {
-    HashMap<String, Integer> nodeCounts = new HashMap<>();
-    if (excludedScopes == null || excludedScopes.isEmpty()) {
-      return nodeCounts;
-    }
-
-    for (String scope: excludedScopes) {
-      Node excludedScopeNode = getNode(scope);
-      nodeCounts.put(scope, excludedScopeNode == null ? 0 :
-          excludedScopeNode.getNumOfLeaves());
-    }
-    return nodeCounts;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java
deleted file mode 100644
index 0e1b076..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import  org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType;
-
-/**
- * Class to hold network topology related constants and configurations.
- */
-public final class NetConstants {
-  private NetConstants() {
-    // Prevent instantiation
-  }
-  public final static char PATH_SEPARATOR = '/';
-  /** Path separator as a string. */
-  public final static String PATH_SEPARATOR_STR = "/";
-  public final static String SCOPE_REVERSE_STR = "~";
-  /** string representation of root. */
-  public final static String ROOT = "";
-  public final static int INNER_NODE_COST_DEFAULT = 1;
-  public final static int NODE_COST_DEFAULT = 0;
-  public final static int ANCESTOR_GENERATION_DEFAULT = 0;
-  public final static int ROOT_LEVEL = 1;
-  public final static String NODE_COST_PREFIX = "$";
-  public final static String DEFAULT_RACK = "/default-rack";
-  public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
-  public final static String DEFAULT_DATACENTER = "/default-datacenter";
-  public final static String DEFAULT_REGION = "/default-dataregion";
-
-  // Build-in network topology node schema
-  public static final NodeSchema ROOT_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.ROOT).build();
-
-  public static final NodeSchema REGION_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.INNER_NODE)
-          .setDefaultName(DEFAULT_REGION).build();
-
-  public static final NodeSchema DATACENTER_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.INNER_NODE)
-          .setDefaultName(DEFAULT_DATACENTER).build();
-
-  public static final NodeSchema RACK_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.INNER_NODE)
-          .setDefaultName(DEFAULT_RACK).build();
-
-  public static final NodeSchema NODEGROUP_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.INNER_NODE)
-          .setDefaultName(DEFAULT_NODEGROUP).build();
-
-  public static final NodeSchema LEAF_SCHEMA =
-      new NodeSchema.Builder().setType(LayerType.LEAF_NODE).build();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java
deleted file mode 100644
index 4019b13..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.apache.commons.collections.CollectionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Utility class to facilitate network topology functions.
- */
-public final class NetUtils {
-  public static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
-  private NetUtils() {
-    // Prevent instantiation
-  }
-  /**
-   * Normalize a path by stripping off any trailing.
-   * {@link NetConstants#PATH_SEPARATOR}
-   * @param path path to normalize.
-   * @return the normalised path
-   * If <i>path</i>is empty or null, then {@link NetConstants#ROOT} is returned
-   */
-  public static String normalize(String path) {
-    if (path == null || path.length() == 0) {
-      return NetConstants.ROOT;
-    }
-
-    if (path.charAt(0) != NetConstants.PATH_SEPARATOR) {
-      throw new IllegalArgumentException(
-          "Network Location path does not start with "
-              + NetConstants.PATH_SEPARATOR_STR + ": " + path);
-    }
-
-    // Remove any trailing NetConstants.PATH_SEPARATOR
-    return path.length() == 1 ? path :
-        path.replaceAll(NetConstants.PATH_SEPARATOR_STR + "+$", "");
-  }
-
-  /**
-   *  Given a network topology location string, return its network topology
-   *  depth, E.g. the depth of /dc1/rack1/ng1/node1 is 5.
-   */
-  public static int locationToDepth(String location) {
-    String newLocation = normalize(location);
-    return newLocation.equals(NetConstants.PATH_SEPARATOR_STR) ? 1 :
-        newLocation.split(NetConstants.PATH_SEPARATOR_STR).length;
-  }
-
-
-  /**
-   *  Remove node from mutableExcludedNodes if it's covered by excludedScope.
-   *  Please noted that mutableExcludedNodes content might be changed after the
-   *  function call.
-   */
-  public static void removeDuplicate(NetworkTopology topology,
-      Collection<Node> mutableExcludedNodes, List<String> mutableExcludedScopes,
-      int ancestorGen) {
-    if (CollectionUtils.isEmpty(mutableExcludedNodes) ||
-        CollectionUtils.isEmpty(mutableExcludedScopes) || topology == null) {
-      return;
-    }
-
-    Iterator<Node> iterator = mutableExcludedNodes.iterator();
-    while (iterator.hasNext() && (!mutableExcludedScopes.isEmpty())) {
-      Node node = iterator.next();
-      Node ancestor = topology.getAncestor(node, ancestorGen);
-      if (ancestor == null) {
-        LOG.warn("Fail to get ancestor generation " + ancestorGen +
-            " of node :" + node);
-        continue;
-      }
-      // excludedScope is child of ancestor
-      List<String> duplicateList = mutableExcludedScopes.stream()
-          .filter(scope -> scope.startsWith(ancestor.getNetworkFullPath()))
-          .collect(Collectors.toList());
-      mutableExcludedScopes.removeAll(duplicateList);
-
-      // ancestor is covered by excludedScope
-      mutableExcludedScopes.stream().forEach(scope -> {
-        if (ancestor.getNetworkFullPath().startsWith(scope)) {
-          // remove exclude node if it's covered by excludedScope
-          iterator.remove();
-        }
-      });
-    }
-  }
-
-  /**
-   *  Remove node from mutableExcludedNodes if it's not part of scope
-   *  Please noted that mutableExcludedNodes content might be changed after the
-   *  function call.
-   */
-  public static void removeOutscope(Collection<Node> mutableExcludedNodes,
-      String scope) {
-    if (CollectionUtils.isEmpty(mutableExcludedNodes) || scope == null) {
-      return;
-    }
-    synchronized (mutableExcludedNodes) {
-      Iterator<Node> iterator = mutableExcludedNodes.iterator();
-      while (iterator.hasNext()) {
-        Node next = iterator.next();
-        if (!next.getNetworkFullPath().startsWith(scope)) {
-          iterator.remove();
-        }
-      }
-    }
-  }
-
-  /**
-   * Get a ancestor list for nodes on generation <i>generation</i>.
-   *
-   * @param nodes a collection of leaf nodes
-   * @param generation  the ancestor generation
-   * @return the ancestor list. If no ancestor is found, then a empty list is
-   * returned.
-   */
-  public static List<Node> getAncestorList(NetworkTopology topology,
-      Collection<Node> nodes, int generation) {
-    List<Node> ancestorList = new ArrayList<>();
-    if (topology == null || CollectionUtils.isEmpty(nodes) ||
-        generation == 0) {
-      return ancestorList;
-    }
-    Iterator<Node> iterator = nodes.iterator();
-    while (iterator.hasNext()) {
-      Node node = iterator.next();
-      Node ancestor = topology.getAncestor(node, generation);
-      if (ancestor == null) {
-        LOG.warn("Fail to get ancestor generation " + generation +
-            " of node :" + node);
-        continue;
-      }
-      if (!ancestorList.contains(ancestor)) {
-        ancestorList.add(ancestor);
-      }
-    }
-    return ancestorList;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java
deleted file mode 100644
index 3a2c7c0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import java.util.Collection;
-import java.util.List;
-
-/**
- * The interface defines a network topology.
- */
-public interface NetworkTopology {
-  /** Exception for invalid network topology detection. */
-  class InvalidTopologyException extends RuntimeException {
-    private static final long serialVersionUID = 1L;
-    public InvalidTopologyException(String msg) {
-      super(msg);
-    }
-  }
-  /**
-   * Add a leaf node. This will be called when a new datanode is added.
-   * @param node node to be added; can be null
-   * @exception IllegalArgumentException if add a node to a leave or node to be
-   * added is not a leaf
-   */
-  void add(Node node);
-
-  /**
-   * Remove a node from the network topology. This will be called when a
-   * existing datanode is removed from the system.
-   * @param node node to be removed; cannot be null
-   */
-  void remove(Node node);
-
-  /**
-   * Check if the tree already contains node <i>node</i>.
-   * @param node a node
-   * @return true if <i>node</i> is already in the tree; false otherwise
-   */
-  boolean contains(Node node);
-
-  /**
-   * Compare the direct parent of each node for equality.
-   * @return true if their parent are the same
-   */
-  boolean isSameParent(Node node1, Node node2);
-
-  /**
-   * Compare the specified ancestor generation of each node for equality.
-   * ancestorGen 1 means parent.
-   * @return true if their specified generation ancestor are equal
-   */
-  boolean isSameAncestor(Node node1, Node node2, int ancestorGen);
-
-  /**
-   * Get the ancestor for node on generation <i>ancestorGen</i>.
-   *
-   * @param node the node to get ancestor
-   * @param ancestorGen  the ancestor generation
-   * @return the ancestor. If no ancestor is found, then null is returned.
-   */
-  Node getAncestor(Node node, int ancestorGen);
-
-  /**
-   * Return the max level of this topology, start from 1 for ROOT. For example,
-   * topology like "/rack/node" has the max level '3'.
-   */
-  int getMaxLevel();
-
-  /**
-   * Given a string representation of a node, return its reference.
-   * @param loc a path string representing a node, can be leaf or inner node
-   * @return a reference to the node; null if the node is not in the tree
-   */
-  Node getNode(String loc);
-
-  /**
-   * Given a string representation of a InnerNode, return its leaf nodes count.
-   * @param loc a path-like string representation of a InnerNode
-   * @return the number of leaf nodes, 0 if it's not an InnerNode or the node
-   * doesn't exist
-   */
-  int getNumOfLeafNode(String loc);
-
-  /**
-   * Return the node numbers at level <i>level</i>.
-   * @param level topology level, start from 1, which means ROOT
-   * @return the number of nodes on the level
-   */
-  int getNumOfNodes(int level);
-
-  /**
-   * Randomly choose a node in the scope.
-   * @param scope range of nodes from which a node will be chosen. If scope
-   *              starts with ~, choose one from the all nodes except for the
-   *              ones in <i>scope</i>; otherwise, choose one from <i>scope</i>.
-   * @return the chosen node
-   */
-  Node chooseRandom(String scope);
-
-  /**
-   * Randomly choose a node in the scope, ano not in the exclude scope.
-   * @param scope range of nodes from which a node will be chosen. cannot start
-   *              with ~
-   * @param excludedScopes the chosen nodes cannot be in these ranges. cannot
-   *                      starts with ~
-   * @return the chosen node
-   */
-  Node chooseRandom(String scope, List<String>  excludedScopes);
-
-  /**
-   * Randomly choose a leaf node from <i>scope</i>.
-   *
-   * If scope starts with ~, choose one from the all nodes except for the
-   * ones in <i>scope</i>; otherwise, choose nodes from <i>scope</i>.
-   * If excludedNodes is given, choose a node that's not in excludedNodes.
-   *
-   * @param scope range of nodes from which a node will be chosen
-   * @param excludedNodes nodes to be excluded
-   *
-   * @return the chosen node
-   */
-  Node chooseRandom(String scope, Collection<Node> excludedNodes);
-
-  /**
-   * Randomly choose a leaf node from <i>scope</i>.
-   *
-   * If scope starts with ~, choose one from the all nodes except for the
-   * ones in <i>scope</i>; otherwise, choose nodes from <i>scope</i>.
-   * If excludedNodes is given, choose a node that's not in excludedNodes.
-   *
-   * @param scope range of nodes from which a node will be chosen
-   * @param excludedNodes nodes to be excluded from.
-   * @param ancestorGen matters when excludeNodes is not null. It means the
-   * ancestor generation that's not allowed to share between chosen node and the
-   * excludedNodes. For example, if ancestorGen is 1, means chosen node
-   * cannot share the same parent with excludeNodes. If value is 2, cannot
-   * share the same grand parent, and so on. If ancestorGen is 0, then no
-   * effect.
-   *
-   * @return the chosen node
-   */
-  Node chooseRandom(String scope, Collection<Node> excludedNodes,
-      int ancestorGen);
-
-  /**
-   * Randomly choose one node from <i>scope</i>, share the same generation
-   * ancestor with <i>affinityNode</i>, and exclude nodes in
-   * <i>excludeScope</i> and <i>excludeNodes</i>.
-   *
-   * @param scope range of nodes from which a node will be chosen, cannot start
-   *              with ~
-   * @param excludedScopes ranges of nodes to be excluded, cannot start with ~
-   * @param excludedNodes nodes to be excluded
-   * @param affinityNode  when not null, the chosen node should share the same
-   *                     ancestor with this node at generation ancestorGen.
-   *                      Ignored when value is null
-   * @param ancestorGen If 0, then no same generation ancestor enforcement on
-   *                     both excludedNodes and affinityNode. If greater than 0,
-   *                     then apply to affinityNode(if not null), or apply to
-   *                     excludedNodes if affinityNode is null
-   * @return the chosen node
-   */
-  Node chooseRandom(String scope, List<String>  excludedScopes,
-      Collection<Node> excludedNodes, Node affinityNode, int ancestorGen);
-
-  /**
-   * Choose the node at index <i>index</i> from <i>scope</i>, share the same
-   * generation ancestor with <i>affinityNode</i>, and exclude nodes in
-   * <i>excludeScope</i> and <i>excludeNodes</i>.
-   *
-   * @param leafIndex node index, exclude nodes in excludedScope and
-   *                  excludedNodes
-   * @param scope range of nodes from which a node will be chosen, cannot start
-   *              with ~
-   * @param excludedScopes ranges of nodes to be excluded, cannot start with ~
-   * @param excludedNodes nodes to be excluded
-   * @param affinityNode  when not null, the chosen node should share the same
-   *                     ancestor with this node at generation ancestorGen.
-   *                      Ignored when value is null
-   * @param ancestorGen If 0, then no same generation ancestor enforcement on
-   *                     both excludedNodes and affinityNode. If greater than 0,
-   *                     then apply to affinityNode(if not null), or apply to
-   *                     excludedNodes if affinityNode is null
-   * @return the chosen node
-   */
-  Node getNode(int leafIndex, String scope, List<String> excludedScopes,
-      Collection<Node> excludedNodes, Node affinityNode, int ancestorGen);
-
-  /** Return the distance cost between two nodes
-   * The distance cost from one node to its parent is it's parent's cost
-   * The distance cost between two nodes is calculated by summing up their
-   * distances cost to their closest common ancestor.
-   * @param node1 one node
-   * @param node2 another node
-   * @return the distance cost between node1 and node2 which is zero if they
-   * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong
-   * to the cluster
-   */
-  int getDistanceCost(Node node1, Node node2);
-
-  /**
-   * Sort nodes array by network distance to <i>reader</i> to reduces network
-   * traffic and improves performance.
-   *
-   * As an additional twist, we also randomize the nodes at each network
-   * distance. This helps with load balancing when there is data skew.
-   *
-   * @param reader    Node where need the data
-   * @param nodes     Available replicas with the requested data
-   * @param activeLen Number of active nodes at the front of the array
-   */
-  List<? extends Node> sortByDistanceCost(Node reader,
-      List<? extends Node> nodes, int activeLen);
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
deleted file mode 100644
index 579e5f7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ /dev/null
@@ -1,798 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import org.apache.commons.collections.CollectionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.TreeMap;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.SCOPE_REVERSE_STR;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DEFAULT;
-
-/**
- * The class represents a cluster of computers with a tree hierarchical
- * network topology. In the network topology, leaves represent data nodes
- * (computers) and inner nodes represent datacenter/core-switches/routers that
- * manages traffic in/out of data centers or racks.
- */
-public class NetworkTopologyImpl implements NetworkTopology{
-  public static final Logger LOG =
-      LoggerFactory.getLogger(NetworkTopology.class);
-
-  /** The Inner node crate factory. */
-  private final InnerNode.Factory factory;
-  /** The root cluster tree. */
-  private final InnerNode clusterTree;
-  /** Depth of all leaf nodes. */
-  private final int maxLevel;
-  /** Schema manager. */
-  private final NodeSchemaManager schemaManager;
-  /** Lock to coordinate cluster tree access. */
-  private ReadWriteLock netlock = new ReentrantReadWriteLock(true);
-
-  public NetworkTopologyImpl(Configuration conf) {
-    schemaManager = NodeSchemaManager.getInstance();
-    schemaManager.init(conf);
-    maxLevel = schemaManager.getMaxLevel();
-    factory = InnerNodeImpl.FACTORY;
-    clusterTree = factory.newInnerNode(ROOT, null, null,
-        NetConstants.ROOT_LEVEL,
-        schemaManager.getCost(NetConstants.ROOT_LEVEL));
-  }
-
-  @VisibleForTesting
-  public NetworkTopologyImpl(NodeSchemaManager manager) {
-    schemaManager = manager;
-    maxLevel = schemaManager.getMaxLevel();
-    factory = InnerNodeImpl.FACTORY;
-    clusterTree = factory.newInnerNode(ROOT, null, null,
-        NetConstants.ROOT_LEVEL,
-        schemaManager.getCost(NetConstants.ROOT_LEVEL));
-  }
-
-  /**
-   * Add a leaf node. This will be called when a new datanode is added.
-   * @param node node to be added; can be null
-   * @exception IllegalArgumentException if add a node to a leave or node to be
-   * added is not a leaf
-   */
-  public void add(Node node) {
-    Preconditions.checkArgument(node != null, "node cannot be null");
-    if (node instanceof InnerNode) {
-      throw new IllegalArgumentException(
-          "Not allowed to add an inner node: "+ node.getNetworkFullPath());
-    }
-    int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1;
-
-    // Check depth
-    if (maxLevel != newDepth) {
-      throw new InvalidTopologyException("Failed to add " +
-          node.getNetworkFullPath() + ": Its path depth is not " + maxLevel);
-    }
-    netlock.writeLock().lock();
-    boolean add;
-    try {
-      add = clusterTree.add(node);
-    }finally {
-      netlock.writeLock().unlock();
-    }
-
-    if (add) {
-      LOG.info("Added a new node: " + node.getNetworkFullPath());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("NetworkTopology became:\n{}", this);
-      }
-    }
-  }
-
-  /**
-   * Remove a node from the network topology. This will be called when a
-   * existing datanode is removed from the system.
-   * @param node node to be removed; cannot be null
-   */
-  public void remove(Node node) {
-    Preconditions.checkArgument(node != null, "node cannot be null");
-    if (node instanceof InnerNode) {
-      throw new IllegalArgumentException(
-          "Not allowed to remove an inner node: "+ node.getNetworkFullPath());
-    }
-    netlock.writeLock().lock();
-    try {
-      clusterTree.remove(node);
-    }finally {
-      netlock.writeLock().unlock();
-    }
-    LOG.info("Removed a node: " + node.getNetworkFullPath());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("NetworkTopology became:\n{}", this);
-    }
-  }
-
-  /**
-   * Check if the tree already contains node <i>node</i>.
-   * @param node a node
-   * @return true if <i>node</i> is already in the tree; false otherwise
-   */
-  public boolean contains(Node node) {
-    Preconditions.checkArgument(node != null, "node cannot be null");
-    netlock.readLock().lock();
-    try {
-      Node parent = node.getParent();
-      while (parent != null && parent != clusterTree) {
-        parent = parent.getParent();
-      }
-      if (parent == clusterTree) {
-        return true;
-      }
-    } finally {
-      netlock.readLock().unlock();
-    }
-    return false;
-  }
-
-  /**
-   * Compare the specified ancestor generation of each node for equality.
-   * @return true if their specified generation ancestor are equal
-   */
-  public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) {
-    if (node1 == null || node2 == null || ancestorGen <= 0) {
-      return false;
-    }
-    netlock.readLock().lock();
-    try {
-      return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen);
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Compare the direct parent of each node for equality.
-   * @return true if their parent are the same
-   */
-  public boolean isSameParent(Node node1, Node node2) {
-    if (node1 == null || node2 == null) {
-      return false;
-    }
-    netlock.readLock().lock();
-    try {
-      node1 = node1.getParent();
-      node2 = node2.getParent();
-      return node1 == node2;
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Get the ancestor for node on generation <i>ancestorGen</i>.
-   *
-   * @param node the node to get ancestor
-   * @param ancestorGen  the ancestor generation
-   * @return the ancestor. If no ancestor is found, then null is returned.
-   */
-  public Node getAncestor(Node node, int ancestorGen) {
-    if (node == null) {
-      return null;
-    }
-    netlock.readLock().lock();
-    try {
-      return node.getAncestor(ancestorGen);
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Given a string representation of a node(leaf or inner), return its
-   * reference.
-   * @param loc a path string representing a node, can be leaf or inner node
-   * @return a reference to the node, null if the node is not in the tree
-   */
-  public Node getNode(String loc) {
-    loc = NetUtils.normalize(loc);
-    netlock.readLock().lock();
-    try {
-      if (!ROOT.equals(loc)) {
-        return clusterTree.getNode(loc);
-      } else {
-        return clusterTree;
-      }
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Given a string representation of Node, return its leaf nodes count.
-   * @param loc a path-like string representation of Node
-   * @return the number of leaf nodes for InnerNode, 1 for leaf node, 0 if node
-   * doesn't exist
-   */
-  public int getNumOfLeafNode(String loc) {
-    netlock.readLock().lock();
-    try {
-      Node node = getNode(loc);
-      if (node != null) {
-        return node.getNumOfLeaves();
-      }
-    } finally {
-      netlock.readLock().unlock();
-    }
-    return 0;
-  }
-
-  /**
-   * Return the max level of this tree, start from 1 for ROOT. For example,
-   * topology like "/rack/node" has the max level '3'.
-   */
-  public int getMaxLevel() {
-    return maxLevel;
-  }
-
-  /**
-   * Return the node numbers at level <i>level</i>.
-   * @param level topology level, start from 1, which means ROOT
-   * @return the number of nodes on the level
-   */
-  public int getNumOfNodes(int level) {
-    Preconditions.checkArgument(level > 0 && level <= maxLevel,
-        "Invalid level");
-    netlock.readLock().lock();
-    try {
-      return clusterTree.getNumOfNodes(level);
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Randomly choose a node in the scope.
-   * @param scope range of nodes from which a node will be chosen. If scope
-   *              starts with ~, choose one from the all nodes except for the
-   *              ones in <i>scope</i>; otherwise, choose one from <i>scope</i>.
-   * @return the chosen node
-   */
-  public Node chooseRandom(String scope) {
-    if (scope == null) {
-      scope = ROOT;
-    }
-    if (scope.startsWith(SCOPE_REVERSE_STR)) {
-      ArrayList<String> excludedScopes = new ArrayList();
-      excludedScopes.add(scope.substring(1));
-      return chooseRandom(ROOT, excludedScopes, null, null,
-          ANCESTOR_GENERATION_DEFAULT);
-    } else {
-      return chooseRandom(scope, null, null, null, ANCESTOR_GENERATION_DEFAULT);
-    }
-  }
-
-  /**
-   * Randomly choose a node in the scope, ano not in the exclude scope.
-   * @param scope range of nodes from which a node will be chosen. cannot start
-   *              with ~
-   * @param excludedScopes the chosen node cannot be in these ranges. cannot
-   *                      starts with ~
-   * @return the chosen node
-   */
-  public Node chooseRandom(String scope, List<String> excludedScopes) {
-    return chooseRandom(scope, excludedScopes, null, null,
-        ANCESTOR_GENERATION_DEFAULT);
-  }
-
-  /**
-   * Randomly choose a leaf node from <i>scope</i>.
-   *
-   * If scope starts with ~, choose one from the all nodes except for the
-   * ones in <i>scope</i>; otherwise, choose nodes from <i>scope</i>.
-   * If excludedNodes is given, choose a node that's not in excludedNodes.
-   *
-   * @param scope range of nodes from which a node will be chosen
-   * @param excludedNodes nodes to be excluded
-   *
-   * @return the chosen node
-   */
-  public Node chooseRandom(String scope, Collection<Node> excludedNodes) {
-    if (scope == null) {
-      scope = ROOT;
-    }
-    if (scope.startsWith(SCOPE_REVERSE_STR)) {
-      ArrayList<String> excludedScopes = new ArrayList();
-      excludedScopes.add(scope.substring(1));
-      return chooseRandom(ROOT, excludedScopes, excludedNodes, null,
-          ANCESTOR_GENERATION_DEFAULT);
-    } else {
-      return chooseRandom(scope, null, excludedNodes, null,
-          ANCESTOR_GENERATION_DEFAULT);
-    }
-  }
-
-  /**
-   * Randomly choose a leaf node from <i>scope</i>.
-   *
-   * If scope starts with ~, choose one from the all nodes except for the
-   * ones in <i>scope</i>; otherwise, choose nodes from <i>scope</i>.
-   * If excludedNodes is given, choose a node that's not in excludedNodes.
-   *
-   * @param scope range of nodes from which a node will be chosen
-   * @param excludedNodes nodes to be excluded from.
-   * @param ancestorGen matters when excludeNodes is not null. It means the
-   * ancestor generation that's not allowed to share between chosen node and the
-   * excludedNodes. For example, if ancestorGen is 1, means chosen node
-   * cannot share the same parent with excludeNodes. If value is 2, cannot
-   * share the same grand parent, and so on. If ancestorGen is 0, then no
-   * effect.
-   *
-   * @return the chosen node
-   */
-  public Node chooseRandom(String scope, Collection<Node> excludedNodes,
-      int ancestorGen) {
-    if (scope == null) {
-      scope = ROOT;
-    }
-    if (scope.startsWith(SCOPE_REVERSE_STR)) {
-      ArrayList<String> excludedScopes = new ArrayList();
-      excludedScopes.add(scope.substring(1));
-      return chooseRandom(ROOT, excludedScopes, excludedNodes, null,
-          ancestorGen);
-    } else {
-      return chooseRandom(scope, null, excludedNodes, null, ancestorGen);
-    }
-  }
-
-  /**
-   * Randomly choose one leaf node from <i>scope</i>, share the same generation
-   * ancestor with <i>affinityNode</i>, and exclude nodes in
-   * <i>excludeScope</i> and <i>excludeNodes</i>.
-   *
-   * @param scope range of nodes from which a node will be chosen, cannot start
-   *              with ~
-   * @param excludedScopes ranges of nodes to be excluded, cannot start with ~
-   * @param excludedNodes nodes to be excluded
-   * @param affinityNode  when not null, the chosen node should share the same
-   *                     ancestor with this node at generation ancestorGen.
-   *                      Ignored when value is null
-   * @param ancestorGen If 0, then no same generation ancestor enforcement on
-   *                     both excludedNodes and affinityNode. If greater than 0,
-   *                     then apply to affinityNode(if not null), or apply to
-   *                     excludedNodes if affinityNode is null
-   * @return the chosen node
-   */
-  public Node chooseRandom(String scope, List<String> excludedScopes,
-      Collection<Node> excludedNodes, Node affinityNode, int ancestorGen) {
-    if (scope == null) {
-      scope = ROOT;
-    }
-
-    checkScope(scope);
-    checkExcludedScopes(excludedScopes);
-    checkAffinityNode(affinityNode);
-    checkAncestorGen(ancestorGen);
-
-    netlock.readLock().lock();
-    try {
-      return chooseNodeInternal(scope, -1, excludedScopes,
-          excludedNodes, affinityNode, ancestorGen);
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Choose the leaf node at index <i>index</i> from <i>scope</i>, share the
-   * same generation ancestor with <i>affinityNode</i>, and exclude nodes in
-   * <i>excludeScope</i> and <i>excludeNodes</i>.
-   *
-   * @param leafIndex node index, exclude nodes in excludedScope and
-   *                  excludedNodes
-   * @param scope range of nodes from which a node will be chosen, cannot start
-   *              with ~
-   * @param excludedScopes ranges of nodes to be excluded, cannot start with ~
-   * @param excludedNodes nodes to be excluded
-   * @param affinityNode  when not null, the chosen node should share the same
-   *                     ancestor with this node at generation ancestorGen.
-   *                      Ignored when value is null
-   * @param ancestorGen If 0, then no same generation ancestor enforcement on
-   *                     both excludedNodes and affinityNode. If greater than 0,
-   *                     then apply to affinityNode(if not null), or apply to
-   *                     excludedNodes if affinityNode is null
-   * @return the chosen node
-   * Example:
-   *
-   *                                /  --- root
-   *                              /  \
-   *                             /    \
-   *                            /      \
-   *                           /        \
-   *                         dc1         dc2
-   *                        / \         / \
-   *                       /   \       /   \
-   *                      /     \     /     \
-   *                    rack1 rack2  rack1  rack2
-   *                   / \     / \  / \     / \
-   *                 n1  n2  n3 n4 n5  n6  n7 n8
-   *
-   *   Input:
-   *   leafIndex = 1
-   *   excludedScope = /dc2
-   *   excludedNodes = {/dc1/rack1/n1}
-   *   affinityNode = /dc1/rack2/n2
-   *   ancestorGen = 2
-   *
-   *   Output:
-   *   node /dc1/rack2/n4
-   *
-   *   Explanation:
-   *   With affinityNode n2 and ancestorGen 2, it means we can only pick node
-   *   from subtree /dc1. LeafIndex 1, so we pick the 2nd available node n4.
-   *
-   */
-  public Node getNode(int leafIndex, String scope, List<String> excludedScopes,
-      Collection<Node> excludedNodes, Node affinityNode, int ancestorGen) {
-    Preconditions.checkArgument(leafIndex >= 0);
-    if (scope == null) {
-      scope = ROOT;
-    }
-    checkScope(scope);
-    checkExcludedScopes(excludedScopes);
-    checkAffinityNode(affinityNode);
-    checkAncestorGen(ancestorGen);
-
-    netlock.readLock().lock();
-    try {
-      return chooseNodeInternal(scope, leafIndex, excludedScopes,
-          excludedNodes, affinityNode, ancestorGen);
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  private Node chooseNodeInternal(String scope, int leafIndex,
-      List<String> excludedScopes, Collection<Node> excludedNodes,
-      Node affinityNode, int ancestorGen) {
-    Preconditions.checkArgument(scope != null);
-
-    String finalScope = scope;
-    if (affinityNode != null && ancestorGen > 0) {
-      Node affinityAncestor = affinityNode.getAncestor(ancestorGen);
-      if (affinityAncestor == null) {
-        throw new IllegalArgumentException("affinityNode " +
-            affinityNode.getNetworkFullPath() + " doesn't have ancestor on" +
-            " generation  " + ancestorGen);
-      }
-      // affinity ancestor should has overlap with scope
-      if (affinityAncestor.getNetworkFullPath().startsWith(scope)){
-        finalScope = affinityAncestor.getNetworkFullPath();
-      } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) {
-        return null;
-      }
-      // reset ancestor generation since the new scope is identified now
-      ancestorGen = 0;
-    }
-
-    // check overlap of excludedScopes and finalScope
-    List<String> mutableExcludedScopes = null;
-    if (excludedScopes != null && !excludedScopes.isEmpty()) {
-      mutableExcludedScopes = new ArrayList<>();
-      for (String s: excludedScopes) {
-        // excludeScope covers finalScope
-        if (finalScope.startsWith(s)) {
-          return null;
-        }
-        // excludeScope and finalScope share nothing case
-        if (s.startsWith(finalScope)) {
-          if (!mutableExcludedScopes.stream().anyMatch(
-              e -> s.startsWith(e))) {
-            mutableExcludedScopes.add(s);
-          }
-        }
-      }
-    }
-
-    // clone excludedNodes before remove duplicate in it
-    Collection<Node> mutableExNodes = null;
-
-    // Remove duplicate in excludedNodes
-    if (excludedNodes != null) {
-      mutableExNodes =
-          excludedNodes.stream().distinct().collect(Collectors.toList());
-    }
-
-    // remove duplicate in mutableExNodes and mutableExcludedScopes
-    NetUtils.removeDuplicate(this, mutableExNodes, mutableExcludedScopes,
-        ancestorGen);
-
-    // calculate available node count
-    Node scopeNode = getNode(finalScope);
-    int availableNodes = getAvailableNodesCount(
-        scopeNode.getNetworkFullPath(), mutableExcludedScopes, mutableExNodes,
-        ancestorGen);
-
-    if (availableNodes <= 0) {
-      LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " +
-              "excludedNodes=\"{}\"  ancestorGen=\"{}\").",
-          scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes,
-          ancestorGen);
-      return null;
-    }
-
-    // scope is a Leaf node
-    if (!(scopeNode instanceof InnerNode)) {
-      return scopeNode;
-    }
-
-    Node ret;
-    int nodeIndex;
-    if (leafIndex >= 0) {
-      nodeIndex = leafIndex % availableNodes;
-      ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes,
-          mutableExNodes, ancestorGen);
-    } else {
-      nodeIndex = ThreadLocalRandom.current().nextInt(availableNodes);
-      ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes,
-          mutableExNodes, ancestorGen);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Choosing node[index={},random={}] from \"{}\" available " +
-              "nodes, scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".",
-          nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes,
-          scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes);
-      LOG.debug("Chosen node = {}", (ret == null ? "not found" :
-          ret.toString()));
-    }
-    return ret;
-  }
-
-  /** Return the distance cost between two nodes
-   * The distance cost from one node to its parent is it's parent's cost
-   * The distance cost between two nodes is calculated by summing up their
-   * distances cost to their closest common ancestor.
-   * @param node1 one node
-   * @param node2 another node
-   * @return the distance cost between node1 and node2 which is zero if they
-   * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong
-   * to the cluster
-   */
-  public int getDistanceCost(Node node1, Node node2) {
-    if ((node1 != null && node2 != null && node1.equals(node2)) ||
-        (node1 == null && node2 == null))  {
-      return 0;
-    }
-    if (node1 == null || node2 == null) {
-      LOG.warn("One of the nodes is a null pointer");
-      return Integer.MAX_VALUE;
-    }
-    int cost = 0;
-    netlock.readLock().lock();
-    try {
-      if ((node1.getAncestor(maxLevel - 1) != clusterTree) ||
-          (node2.getAncestor(maxLevel - 1) != clusterTree)) {
-        LOG.debug("One of the nodes is outside of network topology");
-        return Integer.MAX_VALUE;
-      }
-      int level1 = node1.getLevel();
-      int level2 = node2.getLevel();
-      if (level1 > maxLevel || level2 > maxLevel) {
-        return Integer.MAX_VALUE;
-      }
-      while(level1 > level2 && node1 != null) {
-        node1 = node1.getParent();
-        level1--;
-        cost += node1 == null? 0 : node1.getCost();
-      }
-      while(level2 > level1 && node2 != null) {
-        node2 = node2.getParent();
-        level2--;
-        cost += node2 == null? 0 : node2.getCost();
-      }
-      while(node1 != null && node2 != null && node1 != node2) {
-        node1 = node1.getParent();
-        node2 = node2.getParent();
-        cost += node1 == null? 0 : node1.getCost();
-        cost += node2 == null? 0 : node2.getCost();
-      }
-      return cost;
-    } finally {
-      netlock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Sort nodes array by network distance to <i>reader</i> to reduces network
-   * traffic and improves performance.
-   *
-   * As an additional twist, we also randomize the nodes at each network
-   * distance. This helps with load balancing when there is data skew.
-   *
-   * @param reader    Node where need the data
-   * @param nodes     Available replicas with the requested data
-   * @param activeLen Number of active nodes at the front of the array
-   */
-  public List<? extends Node> sortByDistanceCost(Node reader,
-      List<? extends Node> nodes, int activeLen) {
-    /** Sort weights for the nodes array */
-    if (reader == null) {
-      return nodes;
-    }
-    int[] costs = new int[activeLen];
-    for (int i = 0; i < activeLen; i++) {
-      costs[i] = getDistanceCost(reader, nodes.get(i));
-    }
-    // Add cost/node pairs to a TreeMap to sort
-    TreeMap<Integer, List<Node>> tree = new TreeMap<Integer, List<Node>>();
-    for (int i = 0; i < activeLen; i++) {
-      int cost = costs[i];
-      Node node = nodes.get(i);
-      List<Node> list = tree.get(cost);
-      if (list == null) {
-        list = Lists.newArrayListWithExpectedSize(1);
-        tree.put(cost, list);
-      }
-      list.add(node);
-    }
-
-    List<Node> ret = new ArrayList<>();
-    for (List<Node> list: tree.values()) {
-      if (list != null) {
-        Collections.shuffle(list);
-        for (Node n: list) {
-          ret.add(n);
-        }
-      }
-    }
-
-    Preconditions.checkState(ret.size() == activeLen,
-        "Wrong number of nodes sorted!");
-    return ret;
-  }
-
-  /**
-   * Return the number of leaves in <i>scope</i> but not in
-   * <i>excludedNodes</i> and <i>excludeScope</i>.
-   * @param scope the scope
-   * @param excludedScopes excluded scopes
-   * @param mutableExcludedNodes a list of excluded nodes, content might be
-   *                            changed after the call
-   * @param ancestorGen same generation ancestor prohibit on excludedNodes
-   * @return number of available nodes
-   */
-  private int getAvailableNodesCount(String scope, List<String> excludedScopes,
-      Collection<Node> mutableExcludedNodes, int ancestorGen) {
-    Preconditions.checkArgument(scope != null);
-
-    Node scopeNode = getNode(scope);
-    if (scopeNode == null) {
-      return 0;
-    }
-    NetUtils.removeOutscope(mutableExcludedNodes, scope);
-    List<Node> excludedAncestorList =
-        NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen);
-    for (Node ancestor : excludedAncestorList) {
-      if (scope.startsWith(ancestor.getNetworkFullPath())){
-        return 0;
-      }
-    }
-    // number of nodes to exclude
-    int excludedCount = 0;
-    if (excludedScopes != null) {
-      for (String excludedScope: excludedScopes) {
-        Node excludedScopeNode = getNode(excludedScope);
-        if (excludedScopeNode != null) {
-          if (excludedScope.startsWith(scope)) {
-            excludedCount += excludedScopeNode.getNumOfLeaves();
-          } else if (scope.startsWith(excludedScope)) {
-            return 0;
-          }
-        }
-      }
-    }
-    // excludedNodes is not null case
-    if (mutableExcludedNodes != null && (!mutableExcludedNodes.isEmpty())) {
-      if (ancestorGen == 0) {
-        for (Node node: mutableExcludedNodes) {
-          if (contains(node)) {
-            excludedCount++;
-          }
-        }
-      } else {
-        for (Node ancestor : excludedAncestorList) {
-          if (ancestor.getNetworkFullPath().startsWith(scope)) {
-            excludedCount += ancestor.getNumOfLeaves();
-          }
-        }
-      }
-    }
-
-    int availableCount = scopeNode.getNumOfLeaves() - excludedCount;
-    Preconditions.checkState(availableCount >= 0);
-    return availableCount;
-  }
-
-  @Override
-  public String toString() {
-    // print max level
-    StringBuilder tree = new StringBuilder();
-    tree.append("Level: ");
-    tree.append(maxLevel);
-    tree.append("\n");
-    netlock.readLock().lock();
-    try {
-      // print the number of leaves
-      int numOfLeaves = clusterTree.getNumOfLeaves();
-      tree.append("Number of leaves:");
-      tree.append(numOfLeaves);
-      tree.append("\n");
-      // print all nodes
-      for (int i = 0; i < numOfLeaves; i++) {
-        tree.append(clusterTree.getLeaf(i).getNetworkFullPath());
-        tree.append("\n");
-      }
-    } finally {
-      netlock.readLock().unlock();
-    }
-    return tree.toString();
-  }
-
-  private void checkScope(String scope) {
-    if (scope != null && scope.startsWith(SCOPE_REVERSE_STR)) {
-      throw new IllegalArgumentException("scope " + scope +
-          " should not start with " + SCOPE_REVERSE_STR);
-    }
-  }
-
-  private void checkExcludedScopes(List<String> excludedScopes) {
-    if (!CollectionUtils.isEmpty(excludedScopes)) {
-      excludedScopes.stream().forEach(scope -> {
-        if (scope.startsWith(SCOPE_REVERSE_STR)) {
-          throw new IllegalArgumentException("excludedScope " + scope +
-              " cannot start with " + SCOPE_REVERSE_STR);
-        }
-      });
-    }
-  }
-
-  private void checkAffinityNode(Node affinityNode) {
-    if (affinityNode != null && (!contains(affinityNode))) {
-      throw new IllegalArgumentException("Affinity node " +
-          affinityNode.getNetworkFullPath() + " is not a member of topology");
-    }
-  }
-
-  private void checkAncestorGen(int ancestorGen) {
-    if (ancestorGen > (maxLevel - 1) || ancestorGen < 0) {
-      throw new IllegalArgumentException("ancestorGen " + ancestorGen +
-          " exceeds this network topology acceptable level [0, " +
-          (maxLevel - 1) + "]");
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java
deleted file mode 100644
index 0007e546..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-/**
- * The interface defines a node in a network topology.
- * A node may be a leave representing a data node or an inner
- * node representing a data center or rack.
- * Each node has a name and its location in the network is
- * decided by a string with syntax similar to a file name.
- * For example, a data node's name is hostname:port# and if it's located at
- * rack "orange" in data center "dog", the string representation of its
- * network location will be /dog/orange.
- */
-public interface Node {
-  /** @return the string representation of this node's network location path,
-   *  exclude itself. In another words, its parent's full network location */
-  String getNetworkLocation();
-
-  /**
-   * Set this node's network location.
-   * @param location it's network location
-   */
-  void setNetworkLocation(String location);
-
-  /** @return this node's self name in network topology. This should be node's
-   * IP or hostname.
-   * */
-  String getNetworkName();
-
-  /**
-   * Set this node's name, can be hostname or Ipaddress.
-   * @param name it's network name
-   */
-  void setNetworkName(String name);
-
-  /** @return this node's full path in network topology. It's the concatenation
-   *  of location and name.
-   * */
-  String getNetworkFullPath();
-
-  /** @return this node's parent */
-  InnerNode getParent();
-
-  /**
-   * Set this node's parent.
-   * @param parent the parent
-   */
-  void setParent(InnerNode parent);
-
-  /** @return this node's ancestor, generation 0 is itself, generation 1 is
-   *  node's parent, and so on.*/
-  Node getAncestor(int generation);
-
-  /**
-   * @return this node's level in the tree.
-   * E.g. the root of a tree returns 1 and root's children return 2
-   */
-  int getLevel();
-
-  /**
-   * Set this node's level in the tree.
-   * @param i the level
-   */
-  void setLevel(int i);
-
-  /**
-   * @return this node's cost when network traffic go through it.
-   * E.g. the cost of going cross a switch is 1, and cost of going through a
-   * datacenter can be 5.
-   * Be default the cost of leaf datanode is 0, all other node is 1.
-   */
-  int getCost();
-
-  /** @return the leaf nodes number under this node. */
-  int getNumOfLeaves();
-
-  /**
-   * Judge if this node is an ancestor of node <i>n</i>.
-   * Ancestor includes itself and parents case.
-   *
-   * @param n a node
-   * @return true if this node is an ancestor of <i>n</i>
-   */
-  boolean isAncestor(Node n);
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java
deleted file mode 100644
index 53b05ea..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import com.google.common.base.Preconditions;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
-
-/**
- * A thread safe class that implements interface Node.
- */
-public class NodeImpl implements Node {
-  // host:port#
-  private String name;
-  // string representation of this node's location, such as /dc1/rack1
-  private String location;
-  // location + "/" + name
-  private String path;
-  // which level of the tree the node resides, start from 1 for root
-  private int level;
-  // node's parent
-  private InnerNode parent;
-  // the cost to go through this node
-  private final int cost;
-
-  /**
-   * Construct a node from its name and its location.
-   * @param name this node's name (can be null, must not contain
-   * {@link NetConstants#PATH_SEPARATOR})
-   * @param location this node's location
-   */
-  public NodeImpl(String name, String location, int cost) {
-    if (name != null && name.contains(PATH_SEPARATOR_STR)) {
-      throw new IllegalArgumentException(
-          "Network location name:" + name + " should not contain " +
-              PATH_SEPARATOR_STR);
-    }
-    this.name = (name == null) ? ROOT : name;
-    this.location = NetUtils.normalize(location);
-    this.path = getPath();
-    this.cost = cost;
-  }
-
-  /**
-   * Construct a node from its name and its location.
-   *
-   * @param name     this node's name (can be null, must not contain
-   *                 {@link NetConstants#PATH_SEPARATOR})
-   * @param location this node's location
-   * @param parent   this node's parent node
-   * @param level    this node's level in the tree
-   * @param cost     this node's cost if traffic goes through it
-   */
-  public NodeImpl(String name, String location, InnerNode parent, int level,
-      int cost) {
-    this(name, location, cost);
-    this.parent = parent;
-    this.level = level;
-  }
-
-  /**
-   * @return this node's name
-   */
-  public String getNetworkName() {
-    return name;
-  }
-
-  /**
-   * Set this node's name, can be hostname or Ipaddress.
-   * @param networkName it's network name
-   */
-  public void setNetworkName(String networkName) {
-    this.name = networkName;
-    this.path = getPath();
-  }
-
-  /**
-   * @return this node's network location
-   */
-  public String getNetworkLocation() {
-    return location;
-  }
-
-  /**
-   * Set this node's network location.
-   * @param networkLocation it's network location
-   */
-  @Override
-  public void setNetworkLocation(String networkLocation) {
-    this.location = networkLocation;
-    this.path = getPath();
-  }
-
-  /**
-   * @return this node's full path in network topology. It's the concatenation
-   * of location and name.
-   */
-  public String getNetworkFullPath() {
-    return path;
-  }
-
-  /**
-   * @return this node's parent
-   */
-  public InnerNode getParent() {
-    return parent;
-  }
-
-  /**
-   * @return this node's ancestor, generation 0 is itself, generation 1 is
-   * node's parent, and so on.
-   */
-  public Node getAncestor(int generation) {
-    Preconditions.checkArgument(generation >= 0);
-    Node current = this;
-    while (generation > 0 && current != null) {
-      current = current.getParent();
-      generation--;
-    }
-    return current;
-  }
-
-  /**
-   * Set this node's parent.
-   *
-   * @param parent the parent
-   */
-  public void setParent(InnerNode parent) {
-    this.parent = parent;
-  }
-
-  /**
-   * @return this node's level in the tree.
-   * E.g. the root of a tree returns 0 and its children return 1
-   */
-  public int getLevel() {
-    return this.level;
-  }
-
-  /**
-   * Set this node's level in the tree.
-   *
-   * @param level the level
-   */
-  public void setLevel(int level) {
-    this.level = level;
-  }
-
-  /**
-   * @return this node's cost when network traffic go through it.
-   * E.g. the cost of going cross a switch is 1, and cost of going through a
-   * datacenter is 5.
-   * Be default the cost of leaf datanode is 0, all other inner node is 1.
-   */
-  public int getCost() {
-    return this.cost;
-  }
-
-  /** @return the leaf nodes number under this node. */
-  public int getNumOfLeaves() {
-    return 1;
-  }
-
-  /**
-   * Check if this node is an ancestor of node <i>node</i>. Ancestor includes
-   * itself and parents case;
-   * @param node a node
-   * @return true if this node is an ancestor of <i>node</i>
-   */
-  public boolean isAncestor(Node node) {
-    return this.getNetworkFullPath().equals(PATH_SEPARATOR_STR) ||
-        node.getNetworkLocation().startsWith(this.getNetworkFullPath()) ||
-            node.getNetworkFullPath().equalsIgnoreCase(
-                this.getNetworkFullPath());
-  }
-
-  @Override
-  public boolean equals(Object to) {
-    if (to == null) {
-      return false;
-    }
-    if (this == to) {
-      return true;
-    }
-    return this.toString().equals(to.toString());
-  }
-
-  @Override
-  public int hashCode() {
-    return toString().hashCode();
-  }
-
-  /**
-   * @return this node's path as its string representation
-   */
-  @Override
-  public String toString() {
-    return getNetworkFullPath();
-  }
-
-  private String getPath() {
-    return this.location.equals(PATH_SEPARATOR_STR) ?
-        this.location + this.name :
-        this.location + PATH_SEPARATOR_STR + this.name;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
deleted file mode 100644
index 47e5de8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-
-import java.util.List;
-
-/**
- * Network topology schema to housekeeper relevant information.
- */
-public final class NodeSchema {
-  /**
-   * Network topology layer type enum definition.
-   */
-  public enum LayerType{
-    ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT),
-    INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT),
-    LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT);
-
-    private final String description;
-    // default cost
-    private final int cost;
-
-    LayerType(String description, int cost) {
-      this.description = description;
-      this.cost = cost;
-    }
-
-    @Override
-    public String toString() {
-      return description;
-    }
-
-    public int getCost(){
-      return cost;
-    }
-    public static LayerType getType(String typeStr) {
-      for (LayerType type: LayerType.values()) {
-        if (typeStr.equalsIgnoreCase(type.toString())) {
-          return type;
-        }
-      }
-      return null;
-    }
-  }
-
-  // default cost
-  private int cost;
-  // layer Type, mandatory property
-  private LayerType type;
-  // default name, can be null or ""
-  private String defaultName;
-  // layer prefix, can be null or ""
-  private String prefix;
-  // sublayer
-  private List<NodeSchema> sublayer;
-
-  /**
-   * Builder for NodeSchema.
-   */
-  public static class Builder {
-    private int cost = -1;
-    private LayerType type;
-    private String defaultName;
-    private String prefix;
-
-    public Builder setCost(int nodeCost) {
-      this.cost = nodeCost;
-      return this;
-    }
-
-    public Builder setPrefix(String nodePrefix) {
-      this.prefix = nodePrefix;
-      return this;
-    }
-
-    public Builder setType(LayerType nodeType) {
-      this.type = nodeType;
-      return this;
-    }
-
-    public Builder setDefaultName(String nodeDefaultName) {
-      this.defaultName = nodeDefaultName;
-      return this;
-    }
-
-    public NodeSchema build() {
-      if (type == null) {
-        throw new HadoopIllegalArgumentException("Type is mandatory for a " +
-            "network topology node layer definition");
-      }
-      if (cost == -1) {
-        cost = type.getCost();
-      }
-      return new NodeSchema(type, cost, prefix, defaultName);
-    }
-  }
-
-  /**
-   * Constructor.
-   * @param type layer type
-   * @param cost layer's default cost
-   * @param prefix layer's prefix
-   * @param defaultName layer's default name is if specified
-   */
-  public NodeSchema(LayerType type, int cost, String prefix,
-      String defaultName) {
-    this.type = type;
-    this.cost = cost;
-    this.prefix = prefix;
-    this.defaultName = defaultName;
-  }
-
-  /**
-   * Constructor. This constructor is only used when build NodeSchema from
-   * YAML file.
-   */
-  public NodeSchema() {
-    this.type = LayerType.INNER_NODE;
-  }
-
-  public boolean matchPrefix(String name) {
-    if (name == null || name.isEmpty() || prefix == null || prefix.isEmpty()) {
-      return false;
-    }
-    return name.trim().toLowerCase().startsWith(prefix.toLowerCase());
-  }
-
-  public LayerType getType() {
-    return this.type;
-  }
-
-  public void setType(LayerType type) {
-    this.type = type;
-  }
-
-  public String getPrefix() {
-    return this.prefix;
-  }
-
-  public void setPrefix(String prefix) {
-    this.prefix = prefix;
-  }
-
-  public String getDefaultName() {
-    return this.defaultName;
-  }
-
-  public void setDefaultName(String name) {
-    this.defaultName = name;
-  }
-
-  public int getCost() {
-    return this.cost;
-  }
-  public void setCost(int cost) {
-    this.cost = cost;
-  }
-
-  public void setSublayer(List<NodeSchema> sublayer) {
-    this.sublayer = sublayer;
-  }
-
-  public List<NodeSchema> getSublayer() {
-    return sublayer;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
deleted file mode 100644
index 8d7abed..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
+++ /dev/null
@@ -1,489 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.apache.commons.io.FilenameUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import  org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType;
-import org.yaml.snakeyaml.Yaml;
-
-/**
- * A Network topology layer schema loading tool that loads user defined network
- * layer schema data from a XML configuration file.
- */
-public final class NodeSchemaLoader {
-  private static final Logger LOG
-      = LoggerFactory.getLogger(NodeSchemaLoader.class);
-  private static final String CONFIGURATION_TAG = "configuration";
-  private static final String LAYOUT_VERSION_TAG = "layoutversion";
-  private static final String TOPOLOGY_TAG = "topology";
-  private static final String TOPOLOGY_PATH = "path";
-  private static final String TOPOLOGY_ENFORCE_PREFIX = "enforceprefix";
-  private static final String LAYERS_TAG = "layers";
-  private static final String LAYER_TAG = "layer";
-  private static final String LAYER_ID = "id";
-  private static final String LAYER_TYPE = "type";
-  private static final String LAYER_COST = "cost";
-  private static final String LAYER_PREFIX = "prefix";
-  private static final String LAYER_DEFAULT_NAME = "default";
-
-  private static final int LAYOUT_VERSION = 1;
-  private volatile static NodeSchemaLoader instance = null;
-  private NodeSchemaLoader() {}
-
-  public static NodeSchemaLoader getInstance() {
-    if (instance == null) {
-      instance = new NodeSchemaLoader();
-    }
-    return instance;
-  }
-
-  /**
-   * Class to house keep the result of parsing a network topology schema file.
-   */
-  public static class NodeSchemaLoadResult {
-    private List<NodeSchema> schemaList;
-    private boolean enforcePrefix;
-
-    NodeSchemaLoadResult(List<NodeSchema> schemaList, boolean enforcePrefix) {
-      this.schemaList = schemaList;
-      this.enforcePrefix = enforcePrefix;
-    }
-
-    public boolean isEnforePrefix() {
-      return enforcePrefix;
-    }
-
-    public List<NodeSchema> getSchemaList() {
-      return schemaList;
-    }
-  }
-
-  /**
-   * Load user defined network layer schemas from a XML/YAML configuration file.
-   * @param schemaFilePath path of schema file
-   * @return all valid node schemas defined in schema file
-   */
-  public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath)
-      throws IllegalArgumentException, FileNotFoundException {
-    try {
-      File schemaFile = new File(schemaFilePath);
-
-      if (schemaFile.exists()) {
-        LOG.info("Load network topology schema file " +
-            schemaFile.getAbsolutePath());
-        try (FileInputStream inputStream = new FileInputStream(schemaFile)) {
-          return loadSchemaFromStream(schemaFilePath, inputStream);
-        }
-      } else {
-        // try to load with classloader
-        ClassLoader classloader =
-            Thread.currentThread().getContextClassLoader();
-        if (classloader == null) {
-          classloader = NodeSchemaLoader.class.getClassLoader();
-        }
-        if (classloader != null) {
-          try (InputStream stream = classloader
-              .getResourceAsStream(schemaFilePath)) {
-            if (stream != null) {
-              LOG.info("Loading file from " + classloader
-                  .getResources(schemaFilePath));
-              return loadSchemaFromStream(schemaFilePath, stream);
-            }
-          }
-        }
-
-      }
-
-      String msg = "Network topology layer schema file " +
-          schemaFilePath + "[" + schemaFile.getAbsolutePath() +
-          "] is not found.";
-      LOG.warn(msg);
-      throw new FileNotFoundException(msg);
-
-    } catch (FileNotFoundException e) {
-      throw e;
-    } catch (ParserConfigurationException | IOException | SAXException e) {
-      throw new IllegalArgumentException("Failed to load network topology node"
-          + " schema file: " + schemaFilePath + " , error:" + e.getMessage(),
-          e);
-    }
-  }
-
-  private NodeSchemaLoadResult loadSchemaFromStream(String schemaFilePath,
-      InputStream stream)
-      throws ParserConfigurationException, SAXException, IOException {
-    if (FilenameUtils.getExtension(schemaFilePath).toLowerCase()
-        .compareTo("yaml") == 0) {
-      return loadSchemaFromYaml(stream);
-    } else {
-      return loadSchema(stream);
-    }
-  }
-
-  /**
-   * Load network topology layer schemas from a XML configuration file.
-   * @param inputStream schema file as an inputStream
-   * @return all valid node schemas defined in schema file
-   * @throws ParserConfigurationException ParserConfigurationException happen
-   * @throws IOException no such schema file
-   * @throws SAXException xml file has some invalid elements
-   * @throws IllegalArgumentException xml file content is logically invalid
-   */
-  private NodeSchemaLoadResult loadSchema(InputStream inputStream) throws
-      ParserConfigurationException, SAXException, IOException {
-    LOG.info("Loading network topology layer schema file");
-    // Read and parse the schema file.
-    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
-    dbf.setIgnoringComments(true);
-    DocumentBuilder builder = dbf.newDocumentBuilder();
-    Document doc = builder.parse(inputStream);
-    Element root = doc.getDocumentElement();
-
-    if (!CONFIGURATION_TAG.equals(root.getTagName())) {
-      throw new IllegalArgumentException("Bad network topology layer schema " +
-          "configuration file: top-level element not <" + CONFIGURATION_TAG +
-          ">");
-    }
-    NodeSchemaLoadResult schemaList;
-    if (root.getElementsByTagName(LAYOUT_VERSION_TAG).getLength() == 1) {
-      if (loadLayoutVersion(root) == LAYOUT_VERSION) {
-        if (root.getElementsByTagName(LAYERS_TAG).getLength() == 1) {
-          Map<String, NodeSchema> schemas = loadLayersSection(root);
-          if (root.getElementsByTagName(TOPOLOGY_TAG).getLength() == 1) {
-            schemaList = loadTopologySection(root, schemas);
-          } else {
-            throw new IllegalArgumentException("Bad network topology layer " +
-                "schema configuration file: no or multiple <" + TOPOLOGY_TAG +
-                "> element");
-          }
-        } else {
-          throw new IllegalArgumentException("Bad network topology layer schema"
-              + " configuration file: no or multiple <" + LAYERS_TAG +
-              ">element");
-        }
-      } else {
-        throw new IllegalArgumentException("The parse failed because of bad "
-            + LAYOUT_VERSION_TAG + " value, expected:" + LAYOUT_VERSION);
-      }
-    } else {
-      throw new IllegalArgumentException("Bad network topology layer schema " +
-          "configuration file: no or multiple <" + LAYOUT_VERSION_TAG +
-          "> elements");
-    }
-    return schemaList;
-  }
-
-  /**
-   * Load network topology layer schemas from a YAML configuration file.
-   * @param schemaFile as inputStream
-   * @return all valid node schemas defined in schema file
-   * @throws ParserConfigurationException ParserConfigurationException happen
-   * @throws IOException no such schema file
-   * @throws SAXException xml file has some invalid elements
-   * @throws IllegalArgumentException xml file content is logically invalid
-   */
-  private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) {
-    LOG.info("Loading network topology layer schema file {}", schemaFile);
-    NodeSchemaLoadResult finalSchema;
-
-    try {
-      Yaml yaml = new Yaml();
-      NodeSchema nodeTree;
-
-      nodeTree = yaml.loadAs(schemaFile, NodeSchema.class);
-
-      List<NodeSchema> schemaList = new ArrayList<>();
-      if (nodeTree.getType() != LayerType.ROOT) {
-        throw new IllegalArgumentException("First layer is not a ROOT node."
-            + " schema file.");
-      }
-      schemaList.add(nodeTree);
-      if (nodeTree.getSublayer() != null) {
-        nodeTree = nodeTree.getSublayer().get(0);
-      }
-
-      while (nodeTree != null) {
-        if (nodeTree.getType() == LayerType.LEAF_NODE
-                && nodeTree.getSublayer() != null) {
-          throw new IllegalArgumentException("Leaf node in the middle of path."
-              + " schema file.");
-        }
-        if (nodeTree.getType() == LayerType.ROOT) {
-          throw new IllegalArgumentException("Multiple root nodes are defined."
-              + " schema file.");
-        }
-        schemaList.add(nodeTree);
-        if (nodeTree.getSublayer() != null) {
-          nodeTree = nodeTree.getSublayer().get(0);
-        } else {
-          break;
-        }
-      }
-      finalSchema = new NodeSchemaLoadResult(schemaList, true);
-    } catch (Exception e) {
-      throw new IllegalArgumentException("Fail to load network topology node"
-          + " schema file: " + schemaFile + " , error:"
-          + e.getMessage(), e);
-    }
-
-    return finalSchema;
-  }
-
-  /**
-   * Load layoutVersion from root element in the XML configuration file.
-   * @param root root element
-   * @return layout version
-   */
-  private int loadLayoutVersion(Element root) {
-    int layoutVersion;
-    Text text = (Text) root.getElementsByTagName(LAYOUT_VERSION_TAG)
-        .item(0).getFirstChild();
-    if (text != null) {
-      String value = text.getData().trim();
-      try {
-        layoutVersion = Integer.parseInt(value);
-      } catch (NumberFormatException e) {
-        throw new IllegalArgumentException("Bad " + LAYOUT_VERSION_TAG +
-            " value " + value + " is found. It should be an integer.");
-      }
-    } else {
-      throw new IllegalArgumentException("Value of <" + LAYOUT_VERSION_TAG +
-          "> is null");
-    }
-    return layoutVersion;
-  }
-
-  /**
-   * Load layers from root element in the XML configuration file.
-   * @param root root element
-   * @return A map of node schemas with layer ID and layer schema
-   */
-  private Map<String, NodeSchema> loadLayersSection(Element root) {
-    NodeList elements = root.getElementsByTagName(LAYER_TAG);
-    Map<String, NodeSchema> schemas = new HashMap<String, NodeSchema>();
-    for (int i = 0; i < elements.getLength(); i++) {
-      Node node = elements.item(i);
-      if (node instanceof Element) {
-        Element element = (Element) node;
-        if (LAYER_TAG.equals(element.getTagName())) {
-          String layerId = element.getAttribute(LAYER_ID);
-          NodeSchema schema = parseLayerElement(element);
-          if (!schemas.containsValue(schema)) {
-            schemas.put(layerId, schema);
-          } else {
-            throw new IllegalArgumentException("Repetitive layer in network " +
-                "topology node schema configuration file: " + layerId);
-          }
-        } else {
-          throw new IllegalArgumentException("Bad element in network topology "
-              + "node schema configuration file: " + element.getTagName());
-        }
-      }
-    }
-
-    // Integrity check, only one ROOT and one LEAF is allowed
-    boolean foundRoot = false;
-    boolean foundLeaf = false;
-    for(NodeSchema schema: schemas.values()) {
-      if (schema.getType() == LayerType.ROOT) {
-        if (foundRoot) {
-          throw new IllegalArgumentException("Multiple ROOT layers are found" +
-              " in network topology schema configuration file");
-        } else {
-          foundRoot = true;
-        }
-      }
-      if (schema.getType() == LayerType.LEAF_NODE) {
-        if (foundLeaf) {
-          throw new IllegalArgumentException("Multiple LEAF layers are found" +
-              " in network topology schema configuration file");
-        } else {
-          foundLeaf = true;
-        }
-      }
-    }
-    if (!foundRoot) {
-      throw new IllegalArgumentException("No ROOT layer is found" +
-          " in network topology schema configuration file");
-    }
-    if (!foundLeaf) {
-      throw new IllegalArgumentException("No LEAF layer is found" +
-          " in network topology schema configuration file");
-    }
-    return schemas;
-  }
-
-  /**
-   * Load network topology from root element in the XML configuration file and
-   * sort node schemas according to the topology path.
-   * @param root root element
-   * @param schemas schema map
-   * @return all valid node schemas defined in schema file
-   */
-  private NodeSchemaLoadResult loadTopologySection(Element root,
-      Map<String, NodeSchema> schemas) {
-    NodeList elements = root.getElementsByTagName(TOPOLOGY_TAG)
-        .item(0).getChildNodes();
-    List<NodeSchema> schemaList = new ArrayList<NodeSchema>();
-    boolean enforecePrefix = false;
-    for (int i = 0; i < elements.getLength(); i++) {
-      Node node = elements.item(i);
-      if (node instanceof Element) {
-        Element element = (Element) node;
-        String tagName = element.getTagName();
-        // Get the nonnull text value.
-        Text text = (Text) element.getFirstChild();
-        String value;
-        if (text != null) {
-          value = text.getData().trim();
-          if (value.isEmpty()) {
-            // Element with empty value is ignored
-            continue;
-          }
-        } else {
-          throw new IllegalArgumentException("Value of <" + tagName
-              + "> is null");
-        }
-        if (TOPOLOGY_PATH.equals(tagName)) {
-          if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
-            value = value.substring(1, value.length());
-          }
-          String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR);
-          if (layerIDs == null || layerIDs.length != schemas.size()) {
-            throw new IllegalArgumentException("Topology path depth doesn't "
-                + "match layer element numbers");
-          }
-          for (int j = 0; j < layerIDs.length; j++) {
-            if (schemas.get(layerIDs[j]) == null) {
-              throw new IllegalArgumentException("No layer found for id " +
-                  layerIDs[j]);
-            }
-          }
-          if (schemas.get(layerIDs[0]).getType() != LayerType.ROOT) {
-            throw new IllegalArgumentException("Topology path doesn't start "
-                + "with ROOT layer");
-          }
-          if (schemas.get(layerIDs[layerIDs.length -1]).getType() !=
-              LayerType.LEAF_NODE) {
-            throw new IllegalArgumentException("Topology path doesn't end "
-                + "with LEAF layer");
-          }
-          for (int j = 0; j < layerIDs.length; j++) {
-            schemaList.add(schemas.get(layerIDs[j]));
-          }
-        } else if (TOPOLOGY_ENFORCE_PREFIX.equalsIgnoreCase(tagName)) {
-          enforecePrefix = Boolean.parseBoolean(value);
-        } else {
-          throw new IllegalArgumentException("Unsupported Element <" +
-              tagName + ">");
-        }
-      }
-    }
-    // Integrity check
-    if (enforecePrefix) {
-      // Every InnerNode should have prefix defined
-      for (NodeSchema schema: schemas.values()) {
-        if (schema.getType() == LayerType.INNER_NODE &&
-            schema.getPrefix() == null) {
-          throw new IllegalArgumentException("There is layer without prefix " +
-              "defined while prefix is enforced.");
-        }
-      }
-    }
-    return new NodeSchemaLoadResult(schemaList, enforecePrefix);
-  }
-
-  /**
-   * Load a layer from a layer element in the XML configuration file.
-   * @param element network topology node layer element
-   * @return ECSchema
-   */
-  private NodeSchema parseLayerElement(Element element) {
-    NodeList fields = element.getChildNodes();
-    LayerType type = null;
-    int cost = 0;
-    String prefix = null;
-    String defaultName = null;
-    for (int i = 0; i < fields.getLength(); i++) {
-      Node fieldNode = fields.item(i);
-      if (fieldNode instanceof Element) {
-        Element field = (Element) fieldNode;
-        String tagName = field.getTagName();
-        // Get the nonnull text value.
-        Text text = (Text) field.getFirstChild();
-        String value;
-        if (text != null) {
-          value = text.getData().trim();
-          if (value.isEmpty()) {
-            // Element with empty value is ignored
-            continue;
-          }
-        } else {
-          continue;
-        }
-        if (LAYER_COST.equalsIgnoreCase(tagName)) {
-          cost = Integer.parseInt(value);
-          if (cost < 0) {
-            throw new IllegalArgumentException(
-                "Cost should be positive number or 0");
-          }
-        } else if (LAYER_TYPE.equalsIgnoreCase(tagName)) {
-          type = NodeSchema.LayerType.getType(value);
-          if (type == null) {
-            throw new IllegalArgumentException(
-                "Unsupported layer type:" + value);
-          }
-        } else if (LAYER_PREFIX.equalsIgnoreCase(tagName)) {
-          prefix = value;
-        } else if (LAYER_DEFAULT_NAME.equalsIgnoreCase(tagName)) {
-          defaultName = value;
-        } else {
-          throw new IllegalArgumentException("Unsupported Element <" + tagName
-              + ">");
-        }
-      }
-    }
-    // type is a mandatory property
-    if (type == null) {
-      throw new IllegalArgumentException("Missing type Element");
-    }
-    return new NodeSchema(type, cost, prefix, defaultName);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java
deleted file mode 100644
index c60c2c8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/** The class manages all network topology schemas. */
-
-public final class NodeSchemaManager {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      NodeSchemaManager.class);
-
-  // All schema saved and sorted from ROOT to LEAF node
-  private List<NodeSchema> allSchema;
-  // enforcePrefix only applies to INNER_NODE
-  private boolean enforcePrefix;
-  // max level, includes ROOT level
-  private int maxLevel = -1;
-
-  private volatile static NodeSchemaManager instance = null;
-
-  private NodeSchemaManager() {
-  }
-
-  public static NodeSchemaManager getInstance() {
-    if (instance == null) {
-      instance = new NodeSchemaManager();
-    }
-    return instance;
-  }
-
-  public void init(Configuration conf) {
-    /**
-     * Load schemas from network topology schema configuration file
-     */
-    String schemaFile = conf.get(
-        ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE,
-        ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT);
-    NodeSchemaLoadResult result;
-    try {
-      result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile);
-      allSchema = result.getSchemaList();
-      enforcePrefix = result.isEnforePrefix();
-      maxLevel = allSchema.size();
-    } catch (Throwable e) {
-      String msg = "Failed to load schema file:" + schemaFile
-          + ", error: " + e.getMessage();
-      LOG.error(msg, e);
-      throw new RuntimeException(msg, e);
-    }
-  }
-
-  @VisibleForTesting
-  public void init(NodeSchema[] schemas, boolean enforce) {
-    allSchema = new ArrayList<>();
-    allSchema.addAll(Arrays.asList(schemas));
-    enforcePrefix = enforce;
-    maxLevel = schemas.length;
-  }
-
-  public int getMaxLevel() {
-    return maxLevel;
-  }
-
-  public int getCost(int level) {
-    Preconditions.checkArgument(level <= maxLevel &&
-        level >= (NetConstants.ROOT_LEVEL));
-    return allSchema.get(level - NetConstants.ROOT_LEVEL).getCost();
-  }
-
-  /**
-   * Given a incomplete network path, return its complete network path if
-   * possible. E.g. input is 'node1', output is '/rack-default/node1' if this
-   * schema manages ROOT, RACK and LEAF, with prefix defined and enforce prefix
-   * enabled.
-   *
-   * @param path the incomplete input path
-   * @return complete path, null if cannot carry out complete action or action
-   * failed
-   */
-  public String complete(String path) {
-    if (!enforcePrefix) {
-      return null;
-    }
-    String normalizedPath = NetUtils.normalize(path);
-    String[] subPath = normalizedPath.split(NetConstants.PATH_SEPARATOR_STR);
-    if ((subPath.length) == maxLevel) {
-      return path;
-    }
-    StringBuffer newPath = new StringBuffer(NetConstants.ROOT);
-    // skip the ROOT and LEAF layer
-    int i, j;
-    for (i = 1, j = 1; i < subPath.length && j < (allSchema.size() - 1);) {
-      if (allSchema.get(j).matchPrefix(subPath[i])) {
-        newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]);
-        i++;
-        j++;
-      } else {
-        newPath.append(allSchema.get(j).getDefaultName());
-        j++;
-      }
-    }
-    if (i == (subPath.length - 1)) {
-      newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]);
-      return newPath.toString();
-    }
-    return null;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java
deleted file mode 100644
index 375af7f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-/**
- The network topology supported by Ozone.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 3c544db..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
deleted file mode 100644
index 2828f6e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * Represents a group of datanodes which store a container.
- */
-public final class Pipeline {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class);
-  private final PipelineID id;
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
-
-  private PipelineState state;
-  private Map<DatanodeDetails, Long> nodeStatus;
-  // nodes with ordered distance to client
-  private ThreadLocal<List<DatanodeDetails>> nodesInOrder = new ThreadLocal<>();
-
-  /**
-   * The immutable properties of pipeline object is used in
-   * ContainerStateManager#getMatchingContainerByPipeline to take a lock on
-   * the container allocations for a particular pipeline.
-   */
-  private Pipeline(PipelineID id, ReplicationType type,
-      ReplicationFactor factor, PipelineState state,
-      Map<DatanodeDetails, Long> nodeStatus) {
-    this.id = id;
-    this.type = type;
-    this.factor = factor;
-    this.state = state;
-    this.nodeStatus = nodeStatus;
-  }
-
-  /**
-   * Returns the ID of this pipeline.
-   *
-   * @return PipelineID
-   */
-  public PipelineID getId() {
-    return id;
-  }
-
-  /**
-   * Returns the type.
-   *
-   * @return type - Simple or Ratis.
-   */
-  public ReplicationType getType() {
-    return type;
-  }
-
-  /**
-   * Returns the factor.
-   *
-   * @return type - Simple or Ratis.
-   */
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  /**
-   * Returns the State of the pipeline.
-   *
-   * @return - LifeCycleStates.
-   */
-  public PipelineState getPipelineState() {
-    return state;
-  }
-
-  /**
-   * Returns the list of nodes which form this pipeline.
-   *
-   * @return List of DatanodeDetails
-   */
-  public List<DatanodeDetails> getNodes() {
-    return new ArrayList<>(nodeStatus.keySet());
-  }
-
-  public DatanodeDetails getFirstNode() throws IOException {
-    if (nodeStatus.isEmpty()) {
-      throw new IOException(String.format("Pipeline=%s is empty", id));
-    }
-    return nodeStatus.keySet().iterator().next();
-  }
-
-  public DatanodeDetails getClosestNode() throws IOException {
-    if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) {
-      LOG.debug("Nodes in order is empty, delegate to getFirstNode");
-      return getFirstNode();
-    }
-    return nodesInOrder.get().get(0);
-  }
-
-  public boolean isClosed() {
-    return state == PipelineState.CLOSED;
-  }
-
-  public boolean isOpen() {
-    return state == PipelineState.OPEN;
-  }
-
-  public void setNodesInOrder(List<DatanodeDetails> nodes) {
-    nodesInOrder.set(nodes);
-  }
-
-  public List<DatanodeDetails> getNodesInOrder() {
-    if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) {
-      LOG.debug("Nodes in order is empty, delegate to getNodes");
-      return getNodes();
-    }
-    return nodesInOrder.get();
-  }
-
-  void reportDatanode(DatanodeDetails dn) throws IOException {
-    if (nodeStatus.get(dn) == null) {
-      throw new IOException(
-          String.format("Datanode=%s not part of pipeline=%s", dn, id));
-    }
-    nodeStatus.put(dn, System.currentTimeMillis());
-  }
-
-  boolean isHealthy() {
-    for (Long reportedTime : nodeStatus.values()) {
-      if (reportedTime < 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  public boolean isEmpty() {
-    return nodeStatus.isEmpty();
-  }
-
-  public HddsProtos.Pipeline getProtobufMessage()
-      throws UnknownPipelineStateException {
-    HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder()
-        .setId(id.getProtobuf())
-        .setType(type)
-        .setFactor(factor)
-        .setState(PipelineState.getProtobuf(state))
-        .setLeaderID("")
-        .addAllMembers(nodeStatus.keySet().stream()
-            .map(DatanodeDetails::getProtoBufMessage)
-            .collect(Collectors.toList()));
-    // To save the message size on wire, only transfer the node order based on
-    // network topology
-    List<DatanodeDetails> nodes = nodesInOrder.get();
-    if (nodes != null && !nodes.isEmpty()) {
-      for (int i = 0; i < nodes.size(); i++) {
-        Iterator<DatanodeDetails> it = nodeStatus.keySet().iterator();
-        for (int j = 0; j < nodeStatus.keySet().size(); j++) {
-          if (it.next().equals(nodes.get(i))) {
-            builder.addMemberOrders(j);
-            break;
-          }
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Serialize pipeline {} with nodesInOrder{ }", id.toString(),
-            nodes);
-      }
-    }
-    return builder.build();
-  }
-
-  public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline)
-      throws UnknownPipelineStateException {
-    Preconditions.checkNotNull(pipeline, "Pipeline is null");
-    return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
-        .setFactor(pipeline.getFactor())
-        .setType(pipeline.getType())
-        .setState(PipelineState.fromProtobuf(pipeline.getState()))
-        .setNodes(pipeline.getMembersList().stream()
-            .map(DatanodeDetails::getFromProtoBuf).collect(Collectors.toList()))
-        .setNodesInOrder(pipeline.getMemberOrdersList())
-        .build();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    Pipeline that = (Pipeline) o;
-
-    return new EqualsBuilder()
-        .append(id, that.id)
-        .append(type, that.type)
-        .append(factor, that.factor)
-        .append(getNodes(), that.getNodes())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder()
-        .append(id)
-        .append(type)
-        .append(factor)
-        .append(nodeStatus)
-        .toHashCode();
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder b =
-        new StringBuilder(getClass().getSimpleName()).append("[");
-    b.append(" Id: ").append(id.getId());
-    b.append(", Nodes: ");
-    nodeStatus.keySet().forEach(b::append);
-    b.append(", Type:").append(getType());
-    b.append(", Factor:").append(getFactor());
-    b.append(", State:").append(getPipelineState());
-    b.append("]");
-    return b.toString();
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  public static Builder newBuilder(Pipeline pipeline) {
-    return new Builder(pipeline);
-  }
-
-  /**
-   * Builder class for Pipeline.
-   */
-  public static class Builder {
-    private PipelineID id = null;
-    private ReplicationType type = null;
-    private ReplicationFactor factor = null;
-    private PipelineState state = null;
-    private Map<DatanodeDetails, Long> nodeStatus = null;
-    private List<Integer> nodeOrder = null;
-    private List<DatanodeDetails> nodesInOrder = null;
-
-    public Builder() {}
-
-    public Builder(Pipeline pipeline) {
-      this.id = pipeline.id;
-      this.type = pipeline.type;
-      this.factor = pipeline.factor;
-      this.state = pipeline.state;
-      this.nodeStatus = pipeline.nodeStatus;
-      this.nodesInOrder = pipeline.nodesInOrder.get();
-    }
-
-    public Builder setId(PipelineID id1) {
-      this.id = id1;
-      return this;
-    }
-
-    public Builder setType(ReplicationType type1) {
-      this.type = type1;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor factor1) {
-      this.factor = factor1;
-      return this;
-    }
-
-    public Builder setState(PipelineState state1) {
-      this.state = state1;
-      return this;
-    }
-
-    public Builder setNodes(List<DatanodeDetails> nodes) {
-      this.nodeStatus = new LinkedHashMap<>();
-      nodes.forEach(node -> nodeStatus.put(node, -1L));
-      return this;
-    }
-
-    public Builder setNodesInOrder(List<Integer> orders) {
-      this.nodeOrder = orders;
-      return this;
-    }
-
-    public Pipeline build() {
-      Preconditions.checkNotNull(id);
-      Preconditions.checkNotNull(type);
-      Preconditions.checkNotNull(factor);
-      Preconditions.checkNotNull(state);
-      Preconditions.checkNotNull(nodeStatus);
-      Pipeline pipeline = new Pipeline(id, type, factor, state, nodeStatus);
-
-      if (nodeOrder != null && !nodeOrder.isEmpty()) {
-        // This branch is for build from ProtoBuf
-        List<DatanodeDetails> nodesWithOrder = new ArrayList<>();
-        for(int i = 0; i < nodeOrder.size(); i++) {
-          int nodeIndex = nodeOrder.get(i);
-          Iterator<DatanodeDetails> it = nodeStatus.keySet().iterator();
-          while(it.hasNext() && nodeIndex >= 0) {
-            DatanodeDetails node = it.next();
-            if (nodeIndex == 0) {
-              nodesWithOrder.add(node);
-              break;
-            }
-            nodeIndex--;
-          }
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Deserialize nodesInOrder {} in pipeline {}",
-              nodesWithOrder, id.toString());
-        }
-        pipeline.setNodesInOrder(nodesWithOrder);
-      } else if (nodesInOrder != null){
-        // This branch is for pipeline clone
-        pipeline.setNodesInOrder(nodesInOrder);
-      }
-      return pipeline;
-    }
-  }
-
-  /**
-   * Possible Pipeline states in SCM.
-   */
-  public enum PipelineState {
-    ALLOCATED, OPEN, DORMANT, CLOSED;
-
-    public static PipelineState fromProtobuf(HddsProtos.PipelineState state)
-        throws UnknownPipelineStateException {
-      Preconditions.checkNotNull(state, "Pipeline state is null");
-      switch (state) {
-      case PIPELINE_ALLOCATED: return ALLOCATED;
-      case PIPELINE_OPEN: return OPEN;
-      case PIPELINE_DORMANT: return DORMANT;
-      case PIPELINE_CLOSED: return CLOSED;
-      default:
-        throw new UnknownPipelineStateException(
-            "Pipeline state: " + state + " is not recognized.");
-      }
-    }
-
-    public static HddsProtos.PipelineState getProtobuf(PipelineState state)
-        throws UnknownPipelineStateException {
-      Preconditions.checkNotNull(state, "Pipeline state is null");
-      switch (state) {
-      case ALLOCATED: return HddsProtos.PipelineState.PIPELINE_ALLOCATED;
-      case OPEN: return HddsProtos.PipelineState.PIPELINE_OPEN;
-      case DORMANT: return HddsProtos.PipelineState.PIPELINE_DORMANT;
-      case CLOSED: return HddsProtos.PipelineState.PIPELINE_CLOSED;
-      default:
-        throw new UnknownPipelineStateException(
-            "Pipeline state: " + state + " is not recognized.");
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java
deleted file mode 100644
index 76cf55e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.UUID;
-
-/**
- * ID for the pipeline, the ID is based on UUID.
- */
-public final class PipelineID {
-
-  private UUID id;
-
-  private PipelineID(UUID id) {
-    this.id = id;
-  }
-
-  public static PipelineID randomId() {
-    return new PipelineID(UUID.randomUUID());
-  }
-
-  public static PipelineID valueOf(UUID id) {
-    return new PipelineID(id);
-  }
-
-  public UUID getId() {
-    return id;
-  }
-
-  public HddsProtos.PipelineID getProtobuf() {
-    return HddsProtos.PipelineID.newBuilder().setId(id.toString()).build();
-  }
-
-  public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) {
-    return new PipelineID(UUID.fromString(protos.getId()));
-  }
-
-  @Override
-  public String toString() {
-    return "PipelineID=" + id;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    PipelineID that = (PipelineID) o;
-
-    return id.equals(that.id);
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
deleted file mode 100644
index 2a89aab..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import java.io.IOException;
-
-/**
- * Signals that a pipeline is missing from PipelineManager.
- */
-public class PipelineNotFoundException extends IOException{
-  /**
-   * Constructs an {@code PipelineNotFoundException} with {@code null}
-   * as its error detail message.
-   */
-  public PipelineNotFoundException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code PipelineNotFoundException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public PipelineNotFoundException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java
deleted file mode 100644
index 7c75fc0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import java.io.IOException;
-
-/**
- * Signals that a pipeline state is not recognized.
- */
-public class UnknownPipelineStateException extends IOException {
-  /**
-   * Constructs an {@code UnknownPipelineStateException} with {@code null}
-   * as its error detail message.
-   */
-  public UnknownPipelineStateException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code UnknownPipelineStateException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public UnknownPipelineStateException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
deleted file mode 100644
index 51adc88..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-/**
- Ozone supports the notion of different kind of pipelines.
- That means that we can have a replication pipeline build on
- Ratis, Simple or some other protocol. All Pipeline managers
- the entities in charge of pipelines reside in the package.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
deleted file mode 100644
index 10a9b1b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.Set;
-
-/**
- * Holds the nodes that currently host the container for an object key hash.
- */
-@InterfaceAudience.Private
-public final class LocatedContainer {
-  private final String key;
-  private final String matchedKeyPrefix;
-  private final String containerName;
-  private final Set<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a LocatedContainer.
-   *
-   * @param key object key
-   * @param matchedKeyPrefix prefix of key that was used to find the location
-   * @param containerName container name
-   * @param locations nodes that currently host the container
-   * @param leader node that currently acts as pipeline leader
-   */
-  public LocatedContainer(String key, String matchedKeyPrefix,
-      String containerName, Set<DatanodeInfo> locations, DatanodeInfo leader) {
-    this.key = key;
-    this.matchedKeyPrefix = matchedKeyPrefix;
-    this.containerName = containerName;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the container name.
-   *
-   * @return container name
-   */
-  public String getContainerName() {
-    return this.containerName;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the container.
-   *
-   * @return {@code Set<DatanodeInfo>} nodes that currently host the container
-   */
-  public Set<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  /**
-   * Returns the prefix of the key that was used to find the location.
-   *
-   * @return prefix of the key that was used to find the location
-   */
-  public String getMatchedKeyPrefix() {
-    return this.matchedKeyPrefix;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof LocatedContainer)) {
-      return false;
-    }
-    LocatedContainer other = (LocatedContainer)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName()
-        + "{key=" + key
-        + "; matchedKeyPrefix=" + matchedKeyPrefix
-        + "; containerName=" + containerName
-        + "; locations=" + locations
-        + "; leader=" + leader
-        + "}";
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
deleted file mode 100644
index 18045f8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
- * to read/write a block.
- */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-public interface ScmBlockLocationProtocol extends Closeable {
-
-  @SuppressWarnings("checkstyle:ConstantName")
-  /**
-   * Version 1: Initial version.
-   */
-  long versionID = 1L;
-
-  /**
-   * Asks SCM where a block should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this block.
-   * @param size - size of the block.
-   * @param numBlocks - number of blocks.
-   * @param type - replication type of the blocks.
-   * @param factor - replication factor of the blocks.
-   * @param excludeList List of datanodes/containers to exclude during block
-   *                    allocation.
-   * @return allocated block accessing info (key, pipeline).
-   * @throws IOException
-   */
-  List<AllocatedBlock> allocateBlock(long size, int numBlocks,
-      ReplicationType type, ReplicationFactor factor, String owner,
-      ExcludeList excludeList) throws IOException;
-
-  /**
-   * Delete blocks for a set of object keys.
-   *
-   * @param keyBlocksInfoList Map of object key and its blocks.
-   * @return list of block deletion results.
-   * @throws IOException if there is any failure.
-   */
-  List<DeleteBlockGroupResult>
-      deleteKeyBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException;
-
-  /**
-   * Gets the Clusterid and SCM Id from SCM.
-   */
-  ScmInfo getScmInfo() throws IOException;
-
-  /**
-   * Sort datanodes with distance to client.
-   * @param nodes list of network name of each node.
-   * @param clientMachine client address, depends, can be hostname or ipaddress.
-   */
-  List<DatanodeDetails> sortDatanodes(List<String> nodes,
-      String clientMachine) throws IOException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
deleted file mode 100644
index 0d2ecf7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Holds the nodes that currently host the block for a block key.
- */
-@InterfaceAudience.Private
-public final class ScmLocatedBlock {
-  private final String key;
-  private final List<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a ScmLocatedBlock.
-   *
-   * @param key object key
-   * @param locations nodes that currently host the block
-   * @param leader node that currently acts as pipeline leader
-   */
-  public ScmLocatedBlock(final String key, final List<DatanodeInfo> locations,
-      final DatanodeInfo leader) {
-    this.key = key;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the block.
-   *
-   * @return {@literal List<DatanodeInfo>} nodes that currently host the block
-   */
-  public List<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof ScmLocatedBlock)) {
-      return false;
-    }
-    ScmLocatedBlock other = (ScmLocatedBlock)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + "{key=" + key + "; locations="
-        + locations.stream().map(loc -> loc.toString()).collect(Collectors
-            .joining(",")) + "; leader=" + leader + "}";
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
deleted file mode 100644
index 88db820..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
- * that currently host a container.
- */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-public interface StorageContainerLocationProtocol extends Closeable {
-
-  @SuppressWarnings("checkstyle:ConstantName")
-  /**
-   * Version 1: Initial version.
-   */
-  long versionID = 1L;
-
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this container.
-   *
-   */
-  ContainerWithPipeline allocateContainer(
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor factor, String owner)
-      throws IOException;
-
-  /**
-   * Ask SCM the location of the container. SCM responds with a group of
-   * nodes where this container and its replicas are located.
-   *
-   * @param containerID - ID of the container.
-   * @return ContainerInfo - the container info such as where the pipeline
-   *                         is located.
-   * @throws IOException
-   */
-  ContainerInfo getContainer(long containerID) throws IOException;
-
-  /**
-   * Ask SCM the location of the container. SCM responds with a group of
-   * nodes where this container and its replicas are located.
-   *
-   * @param containerID - ID of the container.
-   * @return ContainerWithPipeline - the container info with the pipeline.
-   * @throws IOException
-   */
-  ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException;
-
-  /**
-   * Ask SCM a list of containers with a range of container names
-   * and the limit of count.
-   * Search container names between start name(exclusive), and
-   * use prefix name to filter the result. the max size of the
-   * searching range cannot exceed the value of count.
-   *
-   * @param startContainerID start container ID.
-   * @param count count, if count {@literal <} 0, the max size is unlimited.(
-   *              Usually the count will be replace with a very big
-   *              value instead of being unlimited in case the db is very big)
-   *
-   * @return a list of container.
-   * @throws IOException
-   */
-  List<ContainerInfo> listContainer(long startContainerID, int count)
-      throws IOException;
-
-  /**
-   * Deletes a container in SCM.
-   *
-   * @param containerID
-   * @throws IOException
-   *   if failed to delete the container mapping from db store
-   *   or container doesn't exist.
-   */
-  void deleteContainer(long containerID) throws IOException;
-
-  /**
-   *  Queries a list of Node Statuses.
-   * @param state
-   * @return List of Datanodes.
-   */
-  List<HddsProtos.Node> queryNode(HddsProtos.NodeState state,
-      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
-
-  /**
-   * Notify from client when begin or finish creating objects like pipeline
-   * or containers on datanodes.
-   * Container will be in Operational state after that.
-   * @param type object type
-   * @param id object id
-   * @param op operation type (e.g., create, close, delete)
-   * @param stage creation stage
-   */
-  void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, long id,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException;
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   * @param type - replication type
-   * @param factor - factor 1 or 3
-   * @param nodePool - optional machine list to build a pipeline.
-   * @throws IOException
-   */
-  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException;
-
-  /**
-   * Returns the list of active Pipelines.
-   *
-   * @return list of Pipeline
-   *
-   * @throws IOException in case of any exception
-   */
-  List<Pipeline> listPipelines() throws IOException;
-
-  /**
-   * Activates a dormant pipeline.
-   *
-   * @param pipelineID ID of the pipeline to activate.
-   * @throws IOException in case of any Exception
-   */
-  void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Deactivates an active pipeline.
-   *
-   * @param pipelineID ID of the pipeline to deactivate.
-   * @throws IOException in case of any Exception
-   */
-  void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Closes a pipeline given the pipelineID.
-   *
-   * @param pipelineID ID of the pipeline to demolish
-   * @throws IOException
-   */
-  void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
-  /**
-   * Returns information about SCM.
-   *
-   * @return {@link ScmInfo}
-   * @throws IOException
-   */
-  ScmInfo getScmInfo() throws IOException;
-
-  /**
-   * Check if SCM is in safe mode.
-   *
-   * @return Returns true if SCM is in safe mode else returns false.
-   * @throws IOException
-   */
-  boolean inSafeMode() throws IOException;
-
-  /**
-   * Force SCM out of Safe mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  boolean forceExitSafeMode() throws IOException;
-
-  /**
-   * Start ReplicationManager.
-   */
-  void startReplicationManager() throws IOException;
-
-  /**
-   * Stop ReplicationManager.
-   */
-  void stopReplicationManager() throws IOException;
-
-  /**
-   * Returns ReplicationManager status.
-   *
-   * @return True if ReplicationManager is running, false otherwise.
-   */
-  boolean getReplicationManagerStatus() throws IOException;
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
deleted file mode 100644
index b56a749..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
deleted file mode 100644
index a262bb5..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .SortDatanodesRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .SortDatanodesResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status.OK;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link ScmBlockLocationProtocol} interface to the RPC server
- * implementing {@link ScmBlockLocationProtocolPB}.
- */
-@InterfaceAudience.Private
-public final class ScmBlockLocationProtocolClientSideTranslatorPB
-    implements ScmBlockLocationProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final ScmBlockLocationProtocolPB rpcProxy;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
-   *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
-   */
-  public ScmBlockLocationProtocolClientSideTranslatorPB(
-      ScmBlockLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Returns a SCMBlockLocationRequest builder with specified type.
-   * @param cmdType type of the request
-   */
-  private SCMBlockLocationRequest.Builder createSCMBlockRequest(Type cmdType) {
-    return SCMBlockLocationRequest.newBuilder()
-        .setCmdType(cmdType)
-        .setTraceID(TracingUtil.exportCurrentSpan());
-  }
-
-  /**
-   * Submits client request to SCM server.
-   * @param req client request
-   * @return response from SCM
-   * @throws IOException thrown if any Protobuf service exception occurs
-   */
-  private SCMBlockLocationResponse submitRequest(
-      SCMBlockLocationRequest req) throws IOException {
-    try {
-      SCMBlockLocationResponse response =
-          rpcProxy.send(NULL_RPC_CONTROLLER, req);
-      return response;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  private SCMBlockLocationResponse handleError(SCMBlockLocationResponse resp)
-      throws SCMException {
-    if (resp.getStatus() != OK) {
-      throw new SCMException(resp.getMessage(),
-          SCMException.ResultCodes.values()[resp.getStatus().ordinal()]);
-    }
-    return resp;
-  }
-
-  /**
-   * Asks SCM where a block should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this block.
-   * @param size - size of the block.
-   * @param num - number of blocks.
-   * @param type - replication type of the blocks.
-   * @param factor - replication factor of the blocks.
-   * @param excludeList - exclude list while allocating blocks.
-   * @return allocated block accessing info (key, pipeline).
-   * @throws IOException
-   */
-  @Override
-  public List<AllocatedBlock> allocateBlock(long size, int num,
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner, ExcludeList excludeList) throws IOException {
-    Preconditions.checkArgument(size > 0, "block size must be greater than 0");
-
-    AllocateScmBlockRequestProto request =
-        AllocateScmBlockRequestProto.newBuilder()
-            .setSize(size)
-            .setNumBlocks(num)
-            .setType(type)
-            .setFactor(factor)
-            .setOwner(owner)
-            .setExcludeList(excludeList.getProtoBuf())
-            .build();
-
-    SCMBlockLocationRequest wrapper = createSCMBlockRequest(
-        Type.AllocateScmBlock)
-        .setAllocateScmBlockRequest(request)
-        .build();
-
-    final SCMBlockLocationResponse wrappedResponse =
-        handleError(submitRequest(wrapper));
-    final AllocateScmBlockResponseProto response =
-        wrappedResponse.getAllocateScmBlockResponse();
-
-    List<AllocatedBlock> blocks = new ArrayList<>(response.getBlocksCount());
-    for (AllocateBlockResponse resp : response.getBlocksList()) {
-      AllocatedBlock.Builder builder = new AllocatedBlock.Builder()
-          .setContainerBlockID(
-              ContainerBlockID.getFromProtobuf(resp.getContainerBlockID()))
-          .setPipeline(Pipeline.getFromProtobuf(resp.getPipeline()));
-      blocks.add(builder.build());
-    }
-
-    return blocks;
-  }
-
-  /**
-   * Delete the set of keys specified.
-   *
-   * @param keyBlocksInfoList batch of block keys to delete.
-   * @return list of block deletion results.
-   * @throws IOException if there is any failure.
-   *
-   */
-  @Override
-  public List<DeleteBlockGroupResult> deleteKeyBlocks(
-      List<BlockGroup> keyBlocksInfoList) throws IOException {
-    List<KeyBlocks> keyBlocksProto = keyBlocksInfoList.stream()
-        .map(BlockGroup::getProto).collect(Collectors.toList());
-    DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto
-        .newBuilder()
-        .addAllKeyBlocks(keyBlocksProto)
-        .build();
-
-    SCMBlockLocationRequest wrapper = createSCMBlockRequest(
-        Type.DeleteScmKeyBlocks)
-        .setDeleteScmKeyBlocksRequest(request)
-        .build();
-
-    final SCMBlockLocationResponse wrappedResponse =
-        handleError(submitRequest(wrapper));
-    final DeleteScmKeyBlocksResponseProto resp =
-        wrappedResponse.getDeleteScmKeyBlocksResponse();
-
-    List<DeleteBlockGroupResult> results =
-        new ArrayList<>(resp.getResultsCount());
-    results.addAll(resp.getResultsList().stream().map(
-        result -> new DeleteBlockGroupResult(result.getObjectKey(),
-            DeleteBlockGroupResult
-                .convertBlockResultProto(result.getBlockResultsList())))
-        .collect(Collectors.toList()));
-    return results;
-  }
-
-  /**
-   * Gets the cluster Id and Scm Id from SCM.
-   * @return ScmInfo
-   * @throws IOException
-   */
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    HddsProtos.GetScmInfoRequestProto request =
-        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
-    HddsProtos.GetScmInfoResponseProto resp;
-
-    SCMBlockLocationRequest wrapper = createSCMBlockRequest(
-        Type.GetScmInfo)
-        .setGetScmInfoRequest(request)
-        .build();
-
-    final SCMBlockLocationResponse wrappedResponse =
-        handleError(submitRequest(wrapper));
-    resp = wrappedResponse.getGetScmInfoResponse();
-    ScmInfo.Builder builder = new ScmInfo.Builder()
-        .setClusterId(resp.getClusterId())
-        .setScmId(resp.getScmId());
-    return builder.build();
-  }
-
-  /**
-   * Sort the datanodes based on distance from client.
-   * @return List<DatanodeDetails></>
-   * @throws IOException
-   */
-  @Override
-  public List<DatanodeDetails> sortDatanodes(List<String> nodes,
-      String clientMachine) throws IOException {
-    SortDatanodesRequestProto request = SortDatanodesRequestProto
-        .newBuilder()
-        .addAllNodeNetworkName(nodes)
-        .setClient(clientMachine)
-        .build();
-    SCMBlockLocationRequest wrapper = createSCMBlockRequest(
-        Type.SortDatanodes)
-        .setSortDatanodesRequest(request)
-        .build();
-
-    final SCMBlockLocationResponse wrappedResponse =
-        handleError(submitRequest(wrapper));
-    SortDatanodesResponseProto resp =
-        wrappedResponse.getSortDatanodesResponse();
-    List<DatanodeDetails> results = new ArrayList<>(resp.getNodeCount());
-    results.addAll(resp.getNodeList().stream()
-        .map(node -> DatanodeDetails.getFromProtoBuf(node))
-        .collect(Collectors.toList()));
-    return results;
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
deleted file mode 100644
index 1ba698b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .ScmBlockLocationProtocolService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-public interface ScmBlockLocationProtocolPB
-    extends ScmBlockLocationProtocolService.BlockingInterface {
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 01db597..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,475 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.function.Consumer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link StorageContainerLocationProtocol} interface to the RPC server
- * implementing {@link StorageContainerLocationProtocolPB}.
- */
-@InterfaceAudience.Private
-public final class StorageContainerLocationProtocolClientSideTranslatorPB
-    implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final StorageContainerLocationProtocolPB rpcProxy;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
-   *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
-   */
-  public StorageContainerLocationProtocolClientSideTranslatorPB(
-      StorageContainerLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Helper method to wrap the request and send the message.
-   */
-  private ScmContainerLocationResponse submitRequest(
-      StorageContainerLocationProtocolProtos.Type type,
-      Consumer<Builder> builderConsumer) throws IOException {
-    final ScmContainerLocationResponse response;
-    try {
-
-      Builder builder = ScmContainerLocationRequest.newBuilder()
-          .setCmdType(type)
-          .setTraceID(TracingUtil.exportCurrentSpan());
-      builderConsumer.accept(builder);
-      ScmContainerLocationRequest wrapper = builder.build();
-
-      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
-    } catch (ServiceException ex) {
-      throw ProtobufHelper.getRemoteException(ex);
-    }
-    return response;
-  }
-
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the set
-   * of datanodes that should be used creating this container. Ozone/SCM only
-   * supports replication factor of either 1 or 3.
-   *
-   * @param type   - Replication Type
-   * @param factor - Replication Count
-   */
-  @Override
-  public ContainerWithPipeline allocateContainer(
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-
-    ContainerRequestProto request = ContainerRequestProto.newBuilder()
-        .setTraceID(TracingUtil.exportCurrentSpan())
-        .setReplicationFactor(factor)
-        .setReplicationType(type)
-        .setOwner(owner)
-        .build();
-
-    ContainerResponseProto response =
-        submitRequest(Type.AllocateContainer,
-            builder -> builder.setContainerRequest(request))
-            .getContainerResponse();
-    //TODO should be migrated to use the top level status structure.
-    if (response.getErrorCode() != ContainerResponseProto.Error.success) {
-      throw new IOException(response.hasErrorMessage() ?
-          response.getErrorMessage() : "Allocate container failed.");
-    }
-    return ContainerWithPipeline.fromProtobuf(
-        response.getContainerWithPipeline());
-  }
-
-  public ContainerInfo getContainer(long containerID) throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerRequestProto request = GetContainerRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .setTraceID(TracingUtil.exportCurrentSpan())
-        .build();
-    ScmContainerLocationResponse response =
-        submitRequest(Type.GetContainer,
-            (builder) -> builder.setGetContainerRequest(request));
-    return ContainerInfo
-        .fromProtobuf(response.getGetContainerResponse().getContainerInfo());
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerWithPipelineRequestProto request =
-        GetContainerWithPipelineRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setContainerID(containerID).build();
-
-    ScmContainerLocationResponse response =
-        submitRequest(Type.GetContainerWithPipeline,
-            (builder) -> builder.setGetContainerWithPipelineRequest(request));
-
-    return ContainerWithPipeline.fromProtobuf(
-        response.getGetContainerWithPipelineResponse()
-            .getContainerWithPipeline());
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID, int count)
-      throws IOException {
-    Preconditions.checkState(startContainerID >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(count > 0,
-        "Container count must be greater than 0.");
-    SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto
-        .newBuilder();
-    builder.setStartContainerID(startContainerID);
-    builder.setCount(count);
-    builder.setTraceID(TracingUtil.exportCurrentSpan());
-    SCMListContainerRequestProto request = builder.build();
-
-    SCMListContainerResponseProto response =
-        submitRequest(Type.ListContainer,
-            builder1 -> builder1.setScmListContainerRequest(request))
-            .getScmListContainerResponse();
-    List<ContainerInfo> containerList = new ArrayList<>();
-    for (HddsProtos.ContainerInfoProto containerInfoProto : response
-        .getContainersList()) {
-      containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
-    }
-    return containerList;
-
-  }
-
-  /**
-   * Ask SCM to delete a container by name. SCM will remove
-   * the container mapping in its database.
-   */
-  @Override
-  public void deleteContainer(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto
-        .newBuilder()
-        .setTraceID(TracingUtil.exportCurrentSpan())
-        .setContainerID(containerID)
-        .build();
-    submitRequest(Type.DeleteContainer,
-        builder -> builder.setScmDeleteContainerRequest(request));
-
-  }
-
-  /**
-   * Queries a list of Node Statuses.
-   */
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
-      throws IOException {
-    // TODO : We support only cluster wide query right now. So ignoring checking
-    // queryScope and poolName
-    Preconditions.checkNotNull(nodeStatuses);
-    NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
-        .setState(nodeStatuses)
-        .setTraceID(TracingUtil.exportCurrentSpan())
-        .setScope(queryScope).setPoolName(poolName).build();
-    NodeQueryResponseProto response = submitRequest(Type.QueryNode,
-        builder -> builder.setNodeQueryRequest(request)).getNodeQueryResponse();
-    return response.getDatanodesList();
-
-  }
-
-  /**
-   * Notify from client that creates object on datanodes.
-   *
-   * @param type  object type
-   * @param id    object id
-   * @param op    operation type (e.g., create, close, delete)
-   * @param stage object creation stage : begin/complete
-   */
-  @Override
-  public void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, long id,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException {
-    Preconditions.checkState(id >= 0,
-        "Object id cannot be negative.");
-    ObjectStageChangeRequestProto request =
-        ObjectStageChangeRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setType(type)
-            .setId(id)
-            .setOp(op)
-            .setStage(stage)
-            .build();
-    submitRequest(Type.NotifyObjectStageChange,
-        builder -> builder.setObjectStageChangeRequest(request));
-
-  }
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   *
-   * @param replicationType - replication type
-   * @param factor          - factor 1 or 3
-   * @param nodePool        - optional machine list to build a pipeline.
-   */
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
-      replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
-      .NodePool nodePool) throws IOException {
-    PipelineRequestProto request = PipelineRequestProto.newBuilder()
-        .setTraceID(TracingUtil.exportCurrentSpan())
-        .setNodePool(nodePool)
-        .setReplicationFactor(factor)
-        .setReplicationType(replicationType)
-        .build();
-
-    PipelineResponseProto response =
-        submitRequest(Type.AllocatePipeline,
-            builder -> builder.setPipelineRequest(request))
-            .getPipelineResponse();
-    if (response.getErrorCode() ==
-        PipelineResponseProto.Error.success) {
-      Preconditions.checkState(response.hasPipeline(), "With success, " +
-          "must come a pipeline");
-      return Pipeline.getFromProtobuf(response.getPipeline());
-    } else {
-      String errorMessage = String.format("create replication pipeline " +
-              "failed. code : %s Message: %s", response.getErrorCode(),
-          response.hasErrorMessage() ? response.getErrorMessage() : "");
-      throw new IOException(errorMessage);
-    }
-
-  }
-
-  @Override
-  public List<Pipeline> listPipelines() throws IOException {
-    ListPipelineRequestProto request = ListPipelineRequestProto
-        .newBuilder().setTraceID(TracingUtil.exportCurrentSpan())
-        .build();
-
-    ListPipelineResponseProto response = submitRequest(Type.ListPipelines,
-        builder -> builder.setListPipelineRequest(request))
-        .getListPipelineResponse();
-
-    List<Pipeline> list = new ArrayList<>();
-    for (HddsProtos.Pipeline pipeline : response.getPipelinesList()) {
-      Pipeline fromProtobuf = Pipeline.getFromProtobuf(pipeline);
-      list.add(fromProtobuf);
-    }
-    return list;
-
-  }
-
-  @Override
-  public void activatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    ActivatePipelineRequestProto request =
-        ActivatePipelineRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setPipelineID(pipelineID)
-            .build();
-    submitRequest(Type.ActivatePipeline,
-        builder -> builder.setActivatePipelineRequest(request));
-
-  }
-
-  @Override
-  public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-
-    DeactivatePipelineRequestProto request =
-        DeactivatePipelineRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setPipelineID(pipelineID)
-            .build();
-    submitRequest(Type.DeactivatePipeline,
-        builder -> builder.setDeactivatePipelineRequest(request));
-  }
-
-  @Override
-  public void closePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-
-    ClosePipelineRequestProto request =
-        ClosePipelineRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setPipelineID(pipelineID)
-            .build();
-    submitRequest(Type.ClosePipeline,
-        builder -> builder.setClosePipelineRequest(request));
-
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    HddsProtos.GetScmInfoRequestProto request =
-        HddsProtos.GetScmInfoRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .build();
-
-    GetScmInfoResponseProto resp = submitRequest(Type.GetScmInfo,
-        builder -> builder.setGetScmInfoRequest(request))
-        .getGetScmInfoResponse();
-    ScmInfo.Builder builder = new ScmInfo.Builder()
-        .setClusterId(resp.getClusterId())
-        .setScmId(resp.getScmId());
-    return builder.build();
-
-  }
-
-  /**
-   * Check if SCM is in safe mode.
-   *
-   * @return Returns true if SCM is in safe mode else returns false.
-   */
-  @Override
-  public boolean inSafeMode() throws IOException {
-    InSafeModeRequestProto request =
-        InSafeModeRequestProto.getDefaultInstance();
-
-    return submitRequest(Type.InSafeMode,
-        builder -> builder.setInSafeModeRequest(request))
-        .getInSafeModeResponse().getInSafeMode();
-
-  }
-
-  /**
-   * Force SCM out of Safe mode.
-   *
-   * @return returns true if operation is successful.
-   */
-  @Override
-  public boolean forceExitSafeMode() throws IOException {
-    ForceExitSafeModeRequestProto request =
-        ForceExitSafeModeRequestProto.getDefaultInstance();
-    ForceExitSafeModeResponseProto resp =
-        submitRequest(Type.ForceExitSafeMode,
-            builder -> builder.setForceExitSafeModeRequest(request))
-            .getForceExitSafeModeResponse();
-
-    return resp.getExitedSafeMode();
-
-  }
-
-  @Override
-  public void startReplicationManager() throws IOException {
-
-    StartReplicationManagerRequestProto request =
-        StartReplicationManagerRequestProto.getDefaultInstance();
-    submitRequest(Type.StartReplicationManager,
-        builder -> builder.setStartReplicationManagerRequest(request));
-
-  }
-
-  @Override
-  public void stopReplicationManager() throws IOException {
-
-    StopReplicationManagerRequestProto request =
-        StopReplicationManagerRequestProto.getDefaultInstance();
-    submitRequest(Type.StopReplicationManager,
-        builder -> builder.setStopReplicationManagerRequest(request));
-
-  }
-
-  @Override
-  public boolean getReplicationManagerStatus() throws IOException {
-
-    ReplicationManagerStatusRequestProto request =
-        ReplicationManagerStatusRequestProto.getDefaultInstance();
-    ReplicationManagerStatusResponseProto response =
-        submitRequest(Type.GetReplicationManagerStatus,
-            builder -> builder.setSeplicationManagerStatusRequest(request))
-            .getReplicationManagerStatusResponse();
-    return response.getIsRunning();
-
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
deleted file mode 100644
index f0af7aa..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .StorageContainerLocationProtocolService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol",
-    protocolVersion = 1)
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-@InterfaceAudience.Private
-public interface StorageContainerLocationProtocolPB
-    extends StorageContainerLocationProtocolService.BlockingInterface {
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
deleted file mode 100644
index 652ae60..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java
deleted file mode 100644
index df84859..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-
-import java.io.IOException;
-
-/**
- * Defines a functional interface having two inputs which throws IOException.
- */
-@FunctionalInterface
-public interface CheckedBiFunction<LEFT, RIGHT,
-    THROWABLE extends IOException> {
-  void apply(LEFT left, RIGHT right) throws THROWABLE;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
deleted file mode 100644
index d0ba60d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .BlockNotCommittedException;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSelector;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CloseContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .DatanodeBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    PutSmallFileResponseProto;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
-/**
- * Implementation of all container protocol calls performed by Container
- * clients.
- */
-public final class ContainerProtocolCalls  {
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private ContainerProtocolCalls() {
-  }
-
-  /**
-   * Calls the container protocol to get a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param datanodeBlockID blockID to identify container
-   * @return container protocol get block response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-      DatanodeBlockID datanodeBlockID) throws IOException {
-    GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(datanodeBlockID);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-
-    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetBlock)
-        .setContainerID(datanodeBlockID.getContainerID())
-        .setDatanodeUuid(id)
-        .setGetBlock(readBlockRequest);
-    String encodedToken = getEncodedBlockToken(getService(datanodeBlockID));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto response =
-        xceiverClient.sendCommand(request, getValidatorList());
-    return response.getGetBlock();
-  }
-
-  /**
-   * Calls the container protocol to get the length of a committed block.
-   *
-   * @param xceiverClient client to perform call
-   * @param blockID blockId for the Block
-   * @return container protocol getLastCommittedBlockLength response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ContainerProtos.GetCommittedBlockLengthResponseProto
-      getCommittedBlockLength(
-      XceiverClientSpi xceiverClient, BlockID blockID)
-      throws IOException {
-    ContainerProtos.GetCommittedBlockLengthRequestProto.Builder
-        getBlockLengthRequestBuilder =
-        ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().
-            setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.GetCommittedBlockLength)
-            .setContainerID(blockID.getContainerID())
-            .setDatanodeUuid(id)
-            .setGetCommittedBlockLength(getBlockLengthRequestBuilder);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto response =
-        xceiverClient.sendCommand(request, getValidatorList());
-    return response.getGetCommittedBlockLength();
-  }
-
-  /**
-   * Calls the container protocol to put a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param containerBlockData block data to identify container
-   * @return putBlockResponse
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ContainerProtos.PutBlockResponseProto putBlock(
-      XceiverClientSpi xceiverClient, BlockData containerBlockData)
-      throws IOException {
-    PutBlockRequestProto.Builder createBlockRequest =
-        PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock)
-            .setContainerID(containerBlockData.getBlockID().getContainerID())
-            .setDatanodeUuid(id)
-            .setPutBlock(createBlockRequest);
-    String encodedToken =
-        getEncodedBlockToken(getService(containerBlockData.getBlockID()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto response =
-        xceiverClient.sendCommand(request, getValidatorList());
-    return response.getPutBlock();
-  }
-
-  /**
-   * Calls the container protocol to put a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param containerBlockData block data to identify container
-   * @return putBlockResponse
-   * @throws IOException if there is an error while performing the call
-   * @throws InterruptedException
-   * @throws ExecutionException
-   */
-  public static XceiverClientReply putBlockAsync(
-      XceiverClientSpi xceiverClient, BlockData containerBlockData)
-      throws IOException, InterruptedException, ExecutionException {
-    PutBlockRequestProto.Builder createBlockRequest =
-        PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock)
-            .setContainerID(containerBlockData.getBlockID().getContainerID())
-            .setDatanodeUuid(id)
-            .setPutBlock(createBlockRequest);
-    String encodedToken =
-        getEncodedBlockToken(getService(containerBlockData.getBlockID()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    return xceiverClient.sendCommandAsync(request);
-  }
-
-  /**
-   * Calls the container protocol to read a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to read
-   * @param blockID ID of the block
-   * @param validators functions to validate the response
-   * @return container protocol read chunk response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ContainerProtos.ReadChunkResponseProto readChunk(
-      XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID,
-      List<CheckedBiFunction> validators) throws IOException {
-    ReadChunkRequestProto.Builder readChunkRequest =
-        ReadChunkRequestProto.newBuilder()
-            .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .setChunkData(chunk);
-    String id = xceiverClient.getPipeline().getClosestNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder().setCmdType(Type.ReadChunk)
-            .setContainerID(blockID.getContainerID())
-            .setDatanodeUuid(id).setReadChunk(readChunkRequest);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto reply =
-        xceiverClient.sendCommand(request, validators);
-    return reply.getReadChunk();
-  }
-
-  /**
-   * Calls the container protocol to write a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to write
-   * @param blockID ID of the block
-   * @param data the data of the chunk to write
-   * @throws IOException if there is an error while performing the call
-   */
-  public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
-      BlockID blockID, ByteString data)
-      throws IOException {
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.WriteChunk)
-        .setContainerID(blockID.getContainerID())
-        .setDatanodeUuid(id)
-        .setWriteChunk(writeChunkRequest);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    xceiverClient.sendCommand(request, getValidatorList());
-  }
-
-  /**
-   * Calls the container protocol to write a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to write
-   * @param blockID ID of the block
-   * @param data the data of the chunk to write
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static XceiverClientReply writeChunkAsync(
-      XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID,
-      ByteString data)
-      throws IOException, ExecutionException, InterruptedException {
-    WriteChunkRequestProto.Builder writeChunkRequest =
-        WriteChunkRequestProto.newBuilder()
-            .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .setChunkData(chunk).setData(data);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder().setCmdType(Type.WriteChunk)
-            .setContainerID(blockID.getContainerID())
-            .setDatanodeUuid(id).setWriteChunk(writeChunkRequest);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    return xceiverClient.sendCommandAsync(request);
-  }
-
-  /**
-   * Allows writing a small file using single RPC. This takes the container
-   * name, block name and data to write sends all that data to the container
-   * using a single RPC. This API is designed to be used for files which are
-   * smaller than 1 MB.
-   *
-   * @param client - client that communicates with the container.
-   * @param blockID - ID of the block
-   * @param data - Data to be written into the container.
-   * @return container protocol writeSmallFile response
-   * @throws IOException
-   */
-  public static PutSmallFileResponseProto writeSmallFile(
-      XceiverClientSpi client, BlockID blockID, byte[] data)
-      throws IOException {
-
-    BlockData containerBlockData =
-        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .build();
-    PutBlockRequestProto.Builder createBlockRequest =
-        PutBlockRequestProto.newBuilder()
-            .setBlockData(containerBlockData);
-
-    KeyValue keyValue =
-        KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
-            .build();
-    Checksum checksum = new Checksum();
-    ChecksumData checksumData = checksum.computeChecksum(data, 0, data.length);
-    ChunkInfo chunk =
-        ChunkInfo.newBuilder()
-            .setChunkName(blockID.getLocalID() + "_chunk")
-            .setOffset(0)
-            .setLen(data.length)
-            .addMetadata(keyValue)
-            .setChecksumData(checksumData.getProtoBufMessage())
-            .build();
-
-    PutSmallFileRequestProto putSmallFileRequest =
-        PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
-            .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
-            .build();
-
-    String id = client.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.PutSmallFile)
-            .setContainerID(blockID.getContainerID())
-            .setDatanodeUuid(id)
-            .setPutSmallFile(putSmallFileRequest);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto response =
-        client.sendCommand(request, getValidatorList());
-    return response.getPutSmallFile();
-  }
-
-  /**
-   * createContainer call that creates a container on the datanode.
-   * @param client  - client
-   * @param containerID - ID of container
-   * @param encodedToken - encodedToken if security is enabled
-   * @throws IOException
-   */
-  public static void createContainer(XceiverClientSpi client, long containerID,
-      String encodedToken) throws IOException {
-    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
-        ContainerProtos.CreateContainerRequestProto
-            .newBuilder();
-    createRequest.setContainerType(ContainerProtos.ContainerType
-        .KeyValueContainer);
-
-    String id = client.getPipeline().getFirstNode().getUuidString();
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    if (encodedToken != null) {
-      request.setEncodedToken(encodedToken);
-    }
-    request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setContainerID(containerID);
-    request.setCreateContainer(createRequest.build());
-    request.setDatanodeUuid(id);
-    client.sendCommand(request.build(), getValidatorList());
-  }
-
-  /**
-   * Deletes a container from a pipeline.
-   *
-   * @param client
-   * @param force whether or not to forcibly delete the container.
-   * @param encodedToken - encodedToken if security is enabled
-   * @throws IOException
-   */
-  public static void deleteContainer(XceiverClientSpi client, long containerID,
-      boolean force, String encodedToken) throws IOException {
-    ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
-        ContainerProtos.DeleteContainerRequestProto.newBuilder();
-    deleteRequest.setForceDelete(force);
-    String id = client.getPipeline().getFirstNode().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteContainer);
-    request.setContainerID(containerID);
-    request.setDeleteContainer(deleteRequest);
-    request.setDatanodeUuid(id);
-    if (encodedToken != null) {
-      request.setEncodedToken(encodedToken);
-    }
-    client.sendCommand(request.build(), getValidatorList());
-  }
-
-  /**
-   * Close a container.
-   *
-   * @param client
-   * @param containerID
-   * @param encodedToken - encodedToken if security is enabled
-   * @throws IOException
-   */
-  public static void closeContainer(XceiverClientSpi client,
-      long containerID, String encodedToken)
-      throws IOException {
-    String id = client.getPipeline().getFirstNode().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(id);
-    if(encodedToken != null) {
-      request.setEncodedToken(encodedToken);
-    }
-    client.sendCommand(request.build(), getValidatorList());
-  }
-
-  /**
-   * readContainer call that gets meta data from an existing container.
-   *
-   * @param client       - client
-   * @param encodedToken - encodedToken if security is enabled
-   * @throws IOException
-   */
-  public static ReadContainerResponseProto readContainer(
-      XceiverClientSpi client, long containerID, String encodedToken)
-      throws IOException {
-    String id = client.getPipeline().getFirstNode().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.ReadContainer);
-    request.setContainerID(containerID);
-    request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(id);
-    if(encodedToken != null) {
-      request.setEncodedToken(encodedToken);
-    }
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build(), getValidatorList());
-
-    return response.getReadContainer();
-  }
-
-  /**
-   * Reads the data given the blockID.
-   *
-   * @param client
-   * @param blockID - ID of the block
-   * @return GetSmallFileResponseProto
-   * @throws IOException
-   */
-  public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
-      BlockID blockID) throws IOException {
-    GetBlockRequestProto.Builder getBlock = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
-        GetSmallFileRequestProto
-            .newBuilder().setBlock(getBlock)
-            .build();
-    String id = client.getPipeline().getClosestNode().getUuidString();
-
-    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetSmallFile)
-        .setContainerID(blockID.getContainerID())
-        .setDatanodeUuid(id)
-        .setGetSmallFile(getSmallFileRequest);
-    String encodedToken = getEncodedBlockToken(new Text(blockID.
-        getContainerBlockID().toString()));
-    if (encodedToken != null) {
-      builder.setEncodedToken(encodedToken);
-    }
-    ContainerCommandRequestProto request = builder.build();
-    ContainerCommandResponseProto response =
-        client.sendCommand(request, getValidatorList());
-    return response.getGetSmallFile();
-  }
-
-  /**
-   * Validates a response from a container protocol call.  Any non-successful
-   * return code is mapped to a corresponding exception and thrown.
-   *
-   * @param response container protocol call response
-   * @throws StorageContainerException if the container protocol call failed
-   */
-  public static void validateContainerResponse(
-      ContainerCommandResponseProto response
-  ) throws StorageContainerException {
-    if (response.getResult() == ContainerProtos.Result.SUCCESS) {
-      return;
-    } else if (response.getResult()
-        == ContainerProtos.Result.BLOCK_NOT_COMMITTED) {
-      throw new BlockNotCommittedException(response.getMessage());
-    } else if (response.getResult()
-        == ContainerProtos.Result.CLOSED_CONTAINER_IO) {
-      throw new ContainerNotOpenException(response.getMessage());
-    }
-    throw new StorageContainerException(
-        response.getMessage(), response.getResult());
-  }
-
-  /**
-   * Returns a url encoded block token. Service param should match the service
-   * field of token.
-   * @param service
-   *
-   * */
-  private static String getEncodedBlockToken(Text service)
-      throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    Token<OzoneBlockTokenIdentifier> token =
-        OzoneBlockTokenSelector.selectBlockToken(service, ugi.getTokens());
-    if (token != null) {
-      return token.encodeToUrlString();
-    }
-    return null;
-  }
-
-  private static Text getService(DatanodeBlockID blockId) {
-    return new Text(new StringBuffer()
-        .append("conID: ")
-        .append(blockId.getContainerID())
-        .append(" locID: ")
-        .append(blockId.getLocalID())
-        .toString());
-  }
-
-  public static List<CheckedBiFunction> getValidatorList() {
-    List<CheckedBiFunction> validators = new ArrayList<>(1);
-    CheckedBiFunction<ContainerProtos.ContainerCommandRequestProto,
-        ContainerProtos.ContainerCommandResponseProto, IOException>
-        validator = (request, response) -> validateContainerResponse(response);
-    validators.add(validator);
-    return validators;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index 8e98158..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * This package contains StorageContainerManager classes.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java
deleted file mode 100644
index bbe25a9..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.security.exception;
-
-import java.io.IOException;
-
-/**
- * Root Security Exception call for all Certificate related Execptions.
- */
-public class SCMSecurityException extends IOException {
-  private final ErrorCode errorCode;
-
-  /**
-   * Ctor.
-   * @param message - Error Message.
-   */
-  public SCMSecurityException(String message) {
-    super(message);
-    this.errorCode = ErrorCode.DEFAULT;
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param cause  - Actual cause.
-   */
-  public SCMSecurityException(String message, Throwable cause) {
-    super(message, cause);
-    this.errorCode = ErrorCode.DEFAULT;
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param error   - error code.
-   */
-  public SCMSecurityException(String message, ErrorCode error) {
-    super(message);
-    this.errorCode = error;
-  }
-
-  /**
-   * Ctor.
-   * @param cause - Base Exception.
-   */
-  public SCMSecurityException(Throwable cause) {
-    super(cause);
-    this.errorCode = ErrorCode.DEFAULT;
-  }
-
-  public ErrorCode getErrorCode() {
-    return errorCode;
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ErrorCode {
-    DEFAULT,
-    MISSING_BLOCK_TOKEN,
-    BLOCK_TOKEN_VERIFICATION_FAILED
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java
deleted file mode 100644
index b980592..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Exceptions thrown by SCM security classes.
- */
-package org.apache.hadoop.hdds.security.exception;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java
deleted file mode 100644
index 7ea0ebc..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.token;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-
-/**
- * Block Token Exceptions from the SCM Security layer.
- */
-public class BlockTokenException extends SCMSecurityException {
-
-  /**
-   * Ctor.
-   * @param message - Error Message.
-   */
-  public BlockTokenException(String message) {
-    super(message);
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param cause  - Actual cause.
-   */
-  public BlockTokenException(String message, Throwable cause) {
-    super(message, cause);
-  }
-
-  /**
-   * Ctor.
-   * @param cause - Base Exception.
-   */
-  public BlockTokenException(Throwable cause) {
-    super(cause);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
deleted file mode 100644
index e94808a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.security.token;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.security.cert.X509Certificate;
-
-
-/**
- * Verify token and return a UGI with token if authenticated.
- */
-public class BlockTokenVerifier implements TokenVerifier {
-
-  private final CertificateClient caClient;
-  private final SecurityConfig conf;
-  private static boolean testStub = false;
-  private final static Logger LOGGER =
-      LoggerFactory.getLogger(BlockTokenVerifier.class);
-
-  public BlockTokenVerifier(SecurityConfig conf, CertificateClient caClient) {
-    this.conf = conf;
-    this.caClient = caClient;
-  }
-
-  private boolean isExpired(long expiryDate) {
-    return Time.now() > expiryDate;
-  }
-
-  @Override
-  public UserGroupInformation verify(String user, String tokenStr)
-      throws SCMSecurityException {
-    if (conf.isBlockTokenEnabled()) {
-      // TODO: add audit logs.
-
-      if (Strings.isNullOrEmpty(tokenStr)) {
-        throw new BlockTokenException("Fail to find any token (empty or " +
-            "null.)");
-      }
-      final Token<OzoneBlockTokenIdentifier> token = new Token();
-      OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier();
-      try {
-        token.decodeFromUrlString(tokenStr);
-        if (LOGGER.isDebugEnabled()) {
-          LOGGER.debug("Verifying token:{} for user:{} ", token, user);
-        }
-        ByteArrayInputStream buf = new ByteArrayInputStream(
-            token.getIdentifier());
-        DataInputStream in = new DataInputStream(buf);
-        tokenId.readFields(in);
-
-      } catch (IOException ex) {
-        throw new BlockTokenException("Failed to decode token : " + tokenStr);
-      }
-
-      if (caClient == null) {
-        throw new SCMSecurityException("Certificate client not available " +
-            "to validate token");
-      }
-
-      X509Certificate singerCert;
-      singerCert = caClient.getCertificate(tokenId.getOmCertSerialId());
-
-      if (singerCert == null) {
-        throw new BlockTokenException("Can't find signer certificate " +
-            "(OmCertSerialId: " + tokenId.getOmCertSerialId() +
-            ") of the block token for user: " + tokenId.getUser());
-      }
-      boolean validToken = caClient.verifySignature(tokenId.getBytes(),
-          token.getPassword(), singerCert);
-      if (!validToken) {
-        throw new BlockTokenException("Invalid block token for user: " +
-            tokenId.getUser());
-      }
-
-      // check expiration
-      if (isExpired(tokenId.getExpiryDate())) {
-        UserGroupInformation tokenUser = tokenId.getUser();
-        tokenUser.setAuthenticationMethod(
-            UserGroupInformation.AuthenticationMethod.TOKEN);
-        throw new BlockTokenException("Expired block token for user: " +
-            tokenUser);
-      }
-      // defer access mode, bcsid and maxLength check to container dispatcher
-      UserGroupInformation ugi = tokenId.getUser();
-      ugi.addToken(token);
-      ugi.setAuthenticationMethod(UserGroupInformation
-          .AuthenticationMethod.TOKEN);
-      return ugi;
-    } else {
-      return UserGroupInformation.createRemoteUser(user);
-    }
-  }
-
-  public static boolean isTestStub() {
-    return testStub;
-  }
-
-  // For testing purpose only.
-  public static void setTestStub(boolean isTestStub) {
-    BlockTokenVerifier.testStub = isTestStub;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java
deleted file mode 100644
index 54cf180..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java
+++ /dev/null
@@ -1,212 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.security.token;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.Builder;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.Token.TrivialRenewer;
-
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.EnumSet;
-
-/**
- * Block token identifier for Ozone/HDDS. Ozone block access token is similar
- * to HDFS block access token, which is meant to be lightweight and
- * short-lived. No need to renew or revoke a block access token. when a
- * cached block access token expires, the client simply get a new one.
- * Block access token should be cached only in memory and never write to disk.
- */
-@InterfaceAudience.Private
-public class OzoneBlockTokenIdentifier extends TokenIdentifier {
-
-  static final Text KIND_NAME = new Text("HDDS_BLOCK_TOKEN");
-  private long expiryDate;
-  private String ownerId;
-  private String blockId;
-  private EnumSet<AccessModeProto> modes;
-  private String omCertSerialId;
-  private long maxLength;
-
-  public OzoneBlockTokenIdentifier() {
-  }
-
-  public OzoneBlockTokenIdentifier(String ownerId, String blockId,
-      EnumSet<AccessModeProto> modes, long expiryDate, String omCertSerialId,
-      long maxLength) {
-    this.ownerId = ownerId;
-    this.blockId = blockId;
-    this.expiryDate = expiryDate;
-    this.modes = modes == null ? EnumSet.noneOf(AccessModeProto.class) : modes;
-    this.omCertSerialId = omCertSerialId;
-    this.maxLength = maxLength;
-  }
-
-  @Override
-  public UserGroupInformation getUser() {
-    if (this.getOwnerId() == null || "".equals(this.getOwnerId())) {
-      return UserGroupInformation.createRemoteUser(blockId);
-    }
-    return UserGroupInformation.createRemoteUser(ownerId);
-  }
-
-  public long getExpiryDate() {
-    return expiryDate;
-  }
-
-  public String getOwnerId() {
-    return ownerId;
-  }
-
-  public String getBlockId() {
-    return blockId;
-  }
-
-  public EnumSet<AccessModeProto> getAccessModes() {
-    return modes;
-  }
-
-  public String getOmCertSerialId(){
-    return omCertSerialId;
-  }
-
-  public long getMaxLength() {
-    return maxLength;
-  }
-
-  @Override
-  public Text getKind() {
-    return KIND_NAME;
-  }
-
-  @Override
-  public String toString() {
-    return "block_token_identifier (expiryDate=" + this.getExpiryDate()
-        + ", ownerId=" + this.getOwnerId()
-        + ", omCertSerialId=" + this.getOmCertSerialId()
-        + ", blockId=" + this.getBlockId() + ", access modes="
-        + this.getAccessModes() + ", maxLength=" + this.getMaxLength() + ")";
-  }
-
-  static boolean isEqual(Object a, Object b) {
-    return a == null ? b == null : a.equals(b);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    }
-
-    if (obj instanceof OzoneBlockTokenIdentifier) {
-      OzoneBlockTokenIdentifier that = (OzoneBlockTokenIdentifier) obj;
-      return new EqualsBuilder()
-          .append(this.expiryDate, that.expiryDate)
-          .append(this.ownerId, that.ownerId)
-          .append(this.blockId, that.blockId)
-          .append(this.modes, that.modes)
-          .append(this.omCertSerialId, that.omCertSerialId)
-          .append(this.maxLength, that.maxLength)
-          .build();
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(133, 567)
-        .append(this.expiryDate)
-        .append(this.blockId)
-        .append(this.ownerId)
-        .append(this.modes)
-        .append(this.omCertSerialId)
-        .append(this.maxLength)
-        .build();
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    final DataInputStream dis = (DataInputStream) in;
-    if (!dis.markSupported()) {
-      throw new IOException("Could not peek first byte.");
-    }
-    BlockTokenSecretProto tokenPtoto =
-        BlockTokenSecretProto.parseFrom((DataInputStream) in);
-    this.ownerId = tokenPtoto.getOwnerId();
-    this.blockId = tokenPtoto.getBlockId();
-    this.modes = EnumSet.copyOf(tokenPtoto.getModesList());
-    this.expiryDate = tokenPtoto.getExpiryDate();
-    this.omCertSerialId = tokenPtoto.getOmCertSerialId();
-    this.maxLength = tokenPtoto.getMaxLength();
-  }
-
-  @VisibleForTesting
-  public static OzoneBlockTokenIdentifier readFieldsProtobuf(DataInput in)
-      throws IOException {
-    BlockTokenSecretProto tokenPtoto =
-        BlockTokenSecretProto.parseFrom((DataInputStream) in);
-    return new OzoneBlockTokenIdentifier(tokenPtoto.getOwnerId(),
-        tokenPtoto.getBlockId(), EnumSet.copyOf(tokenPtoto.getModesList()),
-        tokenPtoto.getExpiryDate(), tokenPtoto.getOmCertSerialId(),
-        tokenPtoto.getMaxLength());
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    writeProtobuf(out);
-  }
-
-  @VisibleForTesting
-  void writeProtobuf(DataOutput out) throws IOException {
-    Builder builder = BlockTokenSecretProto.newBuilder()
-        .setBlockId(this.getBlockId())
-        .setOwnerId(this.getOwnerId())
-        .setOmCertSerialId(this.getOmCertSerialId())
-        .setExpiryDate(this.getExpiryDate())
-        .setMaxLength(this.getMaxLength());
-    // Add access mode allowed
-    for (AccessModeProto mode : this.getAccessModes()) {
-      builder.addModes(AccessModeProto.valueOf(mode.name()));
-    }
-    out.write(builder.build().toByteArray());
-  }
-
-  /**
-   * Default TrivialRenewer.
-   */
-  @InterfaceAudience.Private
-  public static class Renewer extends TrivialRenewer {
-
-    @Override
-    protected Text getKind() {
-      return KIND_NAME;
-    }
-  }
-}
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java
deleted file mode 100644
index 9acc75a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.security.token;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-
-/**
- * A block token selector for Ozone.
- */
-@InterfaceAudience.Private
-public class OzoneBlockTokenSelector implements
-    TokenSelector<OzoneBlockTokenIdentifier> {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneBlockTokenSelector.class);
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public Token<OzoneBlockTokenIdentifier> selectToken(Text service,
-      Collection<Token<? extends TokenIdentifier>> tokens) {
-    if (service == null) {
-      return null;
-    }
-    for (Token<? extends TokenIdentifier> token : tokens) {
-      if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind())
-          && token.getService().equals(service)) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Getting token for service:{}", service);
-        }
-        return (Token<OzoneBlockTokenIdentifier>) token;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Static method to avoid instantiation.
-   * */
-  @SuppressWarnings("unchecked")
-  public static Token<OzoneBlockTokenIdentifier> selectBlockToken(Text service,
-      Collection<Token<? extends TokenIdentifier>> tokens) {
-    if (service == null) {
-      return null;
-    }
-    for (Token<? extends TokenIdentifier> token : tokens) {
-      if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind())
-          && token.getService().equals(service)) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Getting token for service:{}", service);
-        }
-        return (Token<OzoneBlockTokenIdentifier>) token;
-      }
-    }
-    return null;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java
deleted file mode 100644
index d8170ab..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.security.token;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.security.UserGroupInformation;
-
-/**
- * Ozone GRPC token header verifier.
- */
-public interface TokenVerifier {
-  /**
-   * Given a user and tokenStr header, return a UGI object with token if
-   * verified.
-   * @param user user of the request
-   * @param tokenStr token str of the request
-   * @return UGI
-   * @throws SCMSecurityException
-   */
-  UserGroupInformation verify(String user, String tokenStr)
-      throws SCMSecurityException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java
deleted file mode 100644
index 885bed5..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains the block token related test classes.
- */
-package org.apache.hadoop.hdds.security.token;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
deleted file mode 100644
index 8aaba5d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider;
-import org.bouncycastle.jce.provider.BouncyCastleProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.Provider;
-import java.security.Security;
-import java.time.Duration;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_LEN;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_SECURITY_PROVIDER;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT_DEFAULT;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_ALGORITHM;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_LEN;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_PROVIDER;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-/**
- * A class that deals with all Security related configs in HDDS.
- * <p>
- * This class allows security configs to be read and used consistently across
- * all of security related code base.
- */
-public class SecurityConfig {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SecurityConfig.class);
-  private static volatile Provider provider;
-  private final Configuration configuration;
-  private final int size;
-  private final String keyAlgo;
-  private final String providerString;
-  private final String metadatDir;
-  private final String keyDir;
-  private final String privateKeyFileName;
-  private final String publicKeyFileName;
-  private final Duration certDuration;
-  private final String x509SignatureAlgo;
-  private final boolean blockTokenEnabled;
-  private final String certificateDir;
-  private final String certificateFileName;
-  private final boolean grpcTlsEnabled;
-  private boolean grpcTlsUseTestCert;
-  private final Duration defaultCertDuration;
-  private final boolean isSecurityEnabled;
-
-  /**
-   * Constructs a SecurityConfig.
-   *
-   * @param configuration - HDDS Configuration
-   */
-  public SecurityConfig(Configuration configuration) {
-    Preconditions.checkNotNull(configuration, "Configuration cannot be null");
-    this.configuration = configuration;
-    this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN);
-    this.keyAlgo = this.configuration.get(HDDS_KEY_ALGORITHM,
-        HDDS_DEFAULT_KEY_ALGORITHM);
-    this.providerString = this.configuration.get(HDDS_SECURITY_PROVIDER,
-        HDDS_DEFAULT_SECURITY_PROVIDER);
-
-    // Please Note: To make it easy for our customers we will attempt to read
-    // HDDS metadata dir and if that is not set, we will use Ozone directory.
-    // TODO: We might want to fix this later.
-    this.metadatDir = this.configuration.get(HDDS_METADATA_DIR_NAME,
-        configuration.get(OZONE_METADATA_DIRS,
-            configuration.get(HDDS_DATANODE_DIR_KEY)));
-    this.keyDir = this.configuration.get(HDDS_KEY_DIR_NAME,
-        HDDS_KEY_DIR_NAME_DEFAULT);
-    this.privateKeyFileName = this.configuration.get(HDDS_PRIVATE_KEY_FILE_NAME,
-        HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT);
-    this.publicKeyFileName = this.configuration.get(HDDS_PUBLIC_KEY_FILE_NAME,
-        HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT);
-
-    String durationString = this.configuration.get(HDDS_X509_MAX_DURATION,
-        HDDS_X509_MAX_DURATION_DEFAULT);
-    this.certDuration = Duration.parse(durationString);
-    this.x509SignatureAlgo = this.configuration.get(HDDS_X509_SIGNATURE_ALGO,
-        HDDS_X509_SIGNATURE_ALGO_DEFAULT);
-    this.certificateDir = this.configuration.get(HDDS_X509_DIR_NAME,
-        HDDS_X509_DIR_NAME_DEFAULT);
-    this.certificateFileName = this.configuration.get(HDDS_X509_FILE_NAME,
-        HDDS_X509_FILE_NAME_DEFAULT);
-
-    this.blockTokenEnabled = this.configuration.getBoolean(
-        HDDS_BLOCK_TOKEN_ENABLED,
-        HDDS_BLOCK_TOKEN_ENABLED_DEFAULT);
-
-    this.grpcTlsEnabled = this.configuration.getBoolean(HDDS_GRPC_TLS_ENABLED,
-        HDDS_GRPC_TLS_ENABLED_DEFAULT);
-
-    if (grpcTlsEnabled) {
-      this.grpcTlsUseTestCert = this.configuration.getBoolean(
-          HDDS_GRPC_TLS_TEST_CERT, HDDS_GRPC_TLS_TEST_CERT_DEFAULT);
-    }
-
-    this.isSecurityEnabled = this.configuration.getBoolean(
-        OZONE_SECURITY_ENABLED_KEY,
-        OZONE_SECURITY_ENABLED_DEFAULT);
-
-    String certDurationString =
-        this.configuration.get(HDDS_X509_DEFAULT_DURATION,
-            HDDS_X509_DEFAULT_DURATION_DEFAULT);
-    defaultCertDuration = Duration.parse(certDurationString);
-
-
-    // First Startup -- if the provider is null, check for the provider.
-    if (SecurityConfig.provider == null) {
-      synchronized (SecurityConfig.class) {
-        provider = Security.getProvider(this.providerString);
-        if (SecurityConfig.provider == null) {
-          // Provider not found, let us try to Dynamically initialize the
-          // provider.
-          provider = initSecurityProvider(this.providerString);
-        }
-      }
-    }
-  }
-
-  /**
-   * Returns true if security is enabled for OzoneCluster. This is determined
-   * by value of OZONE_SECURITY_ENABLED_KEY.
-   *
-   * @return true if security is enabled for OzoneCluster.
-   */
-  public boolean isSecurityEnabled() {
-    return isSecurityEnabled;
-  }
-
-  /**
-   * Returns the Default Certificate Duration.
-   *
-   * @return Duration for the default certificate issue.
-   */
-  public Duration getDefaultCertDuration() {
-    return defaultCertDuration;
-  }
-
-  /**
-   * Returns the Standard Certificate file name.
-   *
-   * @return String - Name of the Certificate File.
-   */
-  public String getCertificateFileName() {
-    return certificateFileName;
-  }
-
-  /**
-   * Returns the public key file name, This is used for storing the public keys
-   * on disk.
-   *
-   * @return String, File name used for public keys.
-   */
-  public String getPublicKeyFileName() {
-    return publicKeyFileName;
-  }
-
-  /**
-   * Returns the private key file name.This is used for storing the private keys
-   * on disk.
-   *
-   * @return String, File name used for private keys.
-   */
-  public String getPrivateKeyFileName() {
-    return privateKeyFileName;
-  }
-
-  /**
-   * Returns the File path to where keys are stored with an additional component
-   * name inserted in between.
-   *
-   * @param component - Component Name - String.
-   * @return Path Key location.
-   */
-  public Path getKeyLocation(String component) {
-    Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be"
-        + " null. Please check configs.");
-    return Paths.get(metadatDir, component, keyDir);
-  }
-
-  /**
-   * Returns the File path to where certificates are stored with an addition
-   * component
-   * name inserted in between.
-   *
-   * @param component - Component Name - String.
-   * @return Path location.
-   */
-  public Path getCertificateLocation(String component) {
-    Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be"
-        + " null. Please check configs.");
-    return Paths.get(metadatDir, component, certificateDir);
-  }
-
-  /**
-   * Gets the Key Size, The default key size is 2048, since the default
-   * algorithm used is RSA. User can change this by setting the "hdds.key.len"
-   * in configuration.
-   *
-   * @return key size.
-   */
-  public int getSize() {
-    return size;
-  }
-
-  /**
-   * Returns the Provider name. SCM defaults to using Bouncy Castle and will
-   * return "BC".
-   *
-   * @return String Provider name.
-   */
-  public String getProvider() {
-    return providerString;
-  }
-
-  /**
-   * Returns the Key generation Algorithm used.  User can change this by setting
-   * the "hdds.key.algo" in configuration.
-   *
-   * @return String Algo.
-   */
-  public String getKeyAlgo() {
-    return keyAlgo;
-  }
-
-  /**
-   * Returns the X.509 Signature Algorithm used. This can be changed by setting
-   * "hdds.x509.signature.algorithm" to the new name. The default algorithm is
-   * SHA256withRSA.
-   *
-   * @return String
-   */
-  public String getSignatureAlgo() {
-    return x509SignatureAlgo;
-  }
-
-  /**
-   * Returns the Configuration used for initializing this SecurityConfig.
-   *
-   * @return Configuration
-   */
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  /**
-   * Returns the maximum length a certificate can be valid in SCM. The default
-   * value is 5 years. This can be changed by setting "hdds.x509.max.duration"
-   * in configuration. The formats accepted are based on the ISO-8601 duration
-   * format PnDTnHnMn.nS
-   * <p>
-   * Default value is 5 years and written as P1865D.
-   *
-   * @return Duration.
-   */
-  public Duration getMaxCertificateDuration() {
-    return this.certDuration;
-  }
-
-  public boolean isBlockTokenEnabled() {
-    return this.blockTokenEnabled;
-  }
-
-  /**
-   * Returns true if TLS is enabled for gRPC services.
-   * @return true if TLS is enabled for gRPC services.
-   */
-  public boolean isGrpcTlsEnabled() {
-    return this.grpcTlsEnabled;
-  }
-
-  /**
-   * Get the gRPC TLS provider.
-   * @return the gRPC TLS Provider.
-   */
-  public SslProvider getGrpcSslProvider() {
-    return SslProvider.valueOf(configuration.get(HDDS_GRPC_TLS_PROVIDER,
-        HDDS_GRPC_TLS_PROVIDER_DEFAULT));
-  }
-
-  /**
-   * Return true if using test certificates with authority as localhost.
-   * This should be used only for unit test where certificates are generated
-   * by openssl with localhost as DN and should never use for production as it
-   * will bypass the hostname/ip matching verification.
-   * @return true if using test certificates.
-   */
-  public boolean useTestCert() {
-    return grpcTlsUseTestCert;
-  }
-
-  /**
-   * Adds a security provider dynamically if it is not loaded already.
-   *
-   * @param providerName - name of the provider.
-   */
-  private Provider initSecurityProvider(String providerName) {
-    switch (providerName) {
-    case "BC":
-      Security.addProvider(new BouncyCastleProvider());
-      return Security.getProvider(providerName);
-    default:
-      LOG.error("Security Provider:{} is unknown", provider);
-      throw new SecurityException("Unknown security provider:" + provider);
-    }
-  }
-
-  /**
-   * Returns max date for which S3 tokens will be valid.
-   */
-  public long getS3TokenMaxDate() {
-    return getConfiguration().getTimeDuration(
-        OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY,
-        OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT,
-        TimeUnit.MICROSECONDS);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
deleted file mode 100644
index 12ececd..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.bouncycastle.asn1.ASN1Encodable;
-import org.bouncycastle.asn1.ASN1ObjectIdentifier;
-import org.bouncycastle.asn1.pkcs.Attribute;
-import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
-import org.bouncycastle.asn1.x500.RDN;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.Extensions;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.operator.ContentVerifierProvider;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.bouncycastle.pkcs.PKCSException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * A base approver class for certificate approvals.
- */
-public abstract class BaseApprover implements CertificateApprover {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CertificateApprover.class);
-  private final PKIProfile profile;
-  private final SecurityConfig securityConfig;
-
-  public BaseApprover(PKIProfile pkiProfile, SecurityConfig config) {
-    this.profile = Objects.requireNonNull(pkiProfile);
-    this.securityConfig = Objects.requireNonNull(config);
-  }
-
-  /**
-   * Returns the Security config.
-   *
-   * @return SecurityConfig
-   */
-  public SecurityConfig getSecurityConfig() {
-    return securityConfig;
-  }
-
-  /**
-   * Returns the Attribute array that encodes extensions.
-   *
-   * @param request - Certificate Request
-   * @return - An Array of Attributes that encode various extensions requested
-   * in this certificate.
-   */
-  Attribute[] getAttributes(PKCS10CertificationRequest request) {
-    Objects.requireNonNull(request);
-    return
-        request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest);
-  }
-
-  /**
-   * Returns a list of Extensions encoded in a given attribute.
-   *
-   * @param attribute - Attribute to decode.
-   * @return - List of Extensions.
-   */
-  List<Extensions> getExtensionsList(Attribute attribute) {
-    Objects.requireNonNull(attribute);
-    List<Extensions> extensionsList = new ArrayList<>();
-    for (ASN1Encodable value : attribute.getAttributeValues()) {
-      if(value != null) {
-        Extensions extensions = Extensions.getInstance(value);
-        extensionsList.add(extensions);
-      }
-    }
-    return extensionsList;
-  }
-
-  /**
-   * Returns the Extension decoded into a Java Collection.
-   * @param extensions - A set of Extensions in ASN.1.
-   * @return List of Decoded Extensions.
-   */
-  List<Extension> getIndividualExtension(Extensions extensions) {
-    Objects.requireNonNull(extensions);
-    List<Extension> extenList = new ArrayList<>();
-    for (ASN1ObjectIdentifier id : extensions.getExtensionOIDs()) {
-      if (id != null) {
-        Extension ext = extensions.getExtension(id);
-        if (ext != null) {
-          extenList.add(ext);
-        }
-      }
-    }
-    return extenList;
-  }
-
-
-
-  /**
-   * This function verifies all extensions in the certificate.
-   *
-   * @param request - CSR
-   * @return - true if the extensions are acceptable by the profile, false
-   * otherwise.
-   */
-  boolean verfiyExtensions(PKCS10CertificationRequest request) {
-    Objects.requireNonNull(request);
-    /*
-     * Inside a CSR we have
-     *  1. A list of Attributes
-     *    2. Inside each attribute a list of extensions.
-     *      3. We need to walk thru the each extension and verify they
-     *      are expected and we can put that into a certificate.
-     */
-
-    for (Attribute attr : getAttributes(request)) {
-      for (Extensions extensionsList : getExtensionsList(attr)) {
-        for (Extension extension : getIndividualExtension(extensionsList)) {
-          if (!profile.validateExtension(extension)) {
-            LOG.error("Failed to verify extension. {}",
-                extension.getExtnId().getId());
-            return false;
-          }
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Verifies the Signature on the CSR is valid.
-   *
-   * @param pkcs10Request - PCKS10 Request.
-   * @return True if it is valid, false otherwise.
-   * @throws OperatorCreationException - On Error.
-   * @throws PKCSException             - on Error.
-   */
-  boolean verifyPkcs10Request(PKCS10CertificationRequest pkcs10Request)
-      throws OperatorCreationException, PKCSException {
-    ContentVerifierProvider verifierProvider = new
-        JcaContentVerifierProviderBuilder()
-        .setProvider(this.securityConfig.getProvider())
-        .build(pkcs10Request.getSubjectPublicKeyInfo());
-    return
-        pkcs10Request.isSignatureValid(verifierProvider);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CompletableFuture<X509CertificateHolder> inspectCSR(String csr)
-      throws IOException {
-    return inspectCSR(CertificateSignRequest.getCertificationRequest(csr));
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CompletableFuture<X509CertificateHolder>
-        inspectCSR(PKCS10CertificationRequest csr) {
-    /**
-     * The base approver executes the following algorithm to verify that a
-     * CSR meets the PKI Profile criteria.
-     *
-     * 0. For time being (Until we have SCM HA) we will deny all request to
-     * become an intermediary CA. So we will not need to verify using CA
-     * profile, right now.
-     *
-     * 1. We verify the proof of possession. That is we verify the entity
-     * that sends us the CSR indeed has the private key for the said public key.
-     *
-     * 2. Then we will verify the RDNs meet the format and the Syntax that
-     * PKI profile dictates.
-     *
-     * 3. Then we decode each and every extension and  ask if the PKI profile
-     * approves of these extension requests.
-     *
-     * 4. If all of these pass, We will return a Future which will point to
-     * the Certificate when finished.
-     */
-
-    CompletableFuture<X509CertificateHolder> response =
-        new CompletableFuture<>();
-    try {
-      // Step 0: Verify this is not a CA Certificate.
-      // Will be done by the Ozone PKI profile for time being.
-      // If there are any basicConstraints, they will flagged as not
-      // supported for time being.
-
-      // Step 1: Let us verify that Certificate is indeed signed by someone
-      // who has access to the private key.
-      if (!verifyPkcs10Request(csr)) {
-        LOG.error("Failed to verify the signature in CSR.");
-        response.completeExceptionally(new SCMSecurityException("Failed to " +
-            "verify the CSR."));
-      }
-
-      // Step 2: Verify the RDNs are in the correct format.
-      // TODO: Ozone Profile does not verify RDN now, so this call will pass.
-      for (RDN rdn : csr.getSubject().getRDNs()) {
-        if (!profile.validateRDN(rdn)) {
-          LOG.error("Failed in verifying RDNs");
-          response.completeExceptionally(new SCMSecurityException("Failed to " +
-              "verify the RDNs. Please check the subject name."));
-        }
-      }
-
-      // Step 3: Verify the Extensions.
-      if (!verfiyExtensions(csr)) {
-        LOG.error("failed in verification of extensions.");
-        response.completeExceptionally(new SCMSecurityException("Failed to " +
-            "verify extensions."));
-      }
-
-    } catch (OperatorCreationException | PKCSException e) {
-      LOG.error("Approval Failure.", e);
-      response.completeExceptionally(new SCMSecurityException(e));
-    }
-    return response;
-  }
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java
deleted file mode 100644
index 31d0aea..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-
-import java.io.IOException;
-import java.security.PrivateKey;
-import java.util.Date;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * Certificate Approver interface is used to inspectCSR a certificate.
- */
-public interface CertificateApprover {
-  /**
-   * Approves a Certificate Request based on the policies of this approver.
-   *
-   * @param csr - Certificate Signing Request.
-   * @return - Future that will be contain the certificate or exception.
-   */
-  CompletableFuture<X509CertificateHolder>
-      inspectCSR(PKCS10CertificationRequest csr);
-
-  /**
-   * Approves a Certificate Request based on the policies of this approver.
-   *
-   * @param csr - Certificate Signing Request.
-   * @return - Future that will be contain the certificate or exception.
-   * @throws IOException - On Error.
-   */
-  CompletableFuture<X509CertificateHolder>
-      inspectCSR(String csr) throws IOException;
-
-  /**
-   * Sign function signs a Certificate.
-   * @param config - Security Config.
-   * @param caPrivate - CAs private Key.
-   * @param caCertificate - CA Certificate.
-   * @param validFrom - Begin Date
-   * @param validTill - End Date
-   * @param certificationRequest - Certification Request.
-   * @param scmId - SCM id.
-   * @param clusterId - Cluster id.
-   * @return Signed Certificate.
-   * @throws IOException - On Error
-   * @throws OperatorCreationException - on Error.
-   */
-  @SuppressWarnings("ParameterNumber")
-  X509CertificateHolder sign(
-      SecurityConfig config,
-      PrivateKey caPrivate,
-      X509CertificateHolder caCertificate,
-      Date validFrom,
-      Date validTill,
-      PKCS10CertificationRequest certificationRequest,
-      String scmId,
-      String clusterId)
-      throws IOException, OperatorCreationException;
-
-
-  /**
-   * Approval Types for a certificate request.
-   */
-  enum ApprovalType {
-    KERBEROS_TRUSTED, /* The Request came from a DN using Kerberos Identity*/
-    MANUAL, /* Wait for a Human being to inspect CSR of this certificate */
-    TESTING_AUTOMATIC /* For testing purpose, Automatic Approval. */
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
deleted file mode 100644
index b1d7d6b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-
-import java.io.IOException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.Future;
-
-/**
- * Interface for Certificate Authority. This can be extended to talk to
- * external CAs later or HSMs later.
- */
-public interface CertificateServer {
-  /**
-   * Initialize the Certificate Authority.
-   *
-   * @param securityConfig - Security Configuration.
-   * @param type - The Type of CertificateServer we are creating, we make this
-   * explicit so that when we read code it is visible to the users.
-   * @throws SCMSecurityException - Throws if the init fails.
-   */
-  void init(SecurityConfig securityConfig, CAType type)
-      throws SCMSecurityException;
-
-  /**
-   * Returns the CA Certificate for this CA.
-   *
-   * @return X509CertificateHolder - Certificate for this CA.
-   * @throws CertificateException - usually thrown if this CA is not
-   *                              initialized.
-   * @throws IOException          - on Error.
-   */
-  X509CertificateHolder getCACertificate()
-      throws CertificateException, IOException;
-
-  /**
-   * Returns the Certificate corresponding to given certificate serial id if
-   * exist. Return null if it doesn't exist.
-   *
-   * @return certSerialId         - Certificate serial id.
-   * @throws CertificateException - usually thrown if this CA is not
-   *                              initialized.
-   * @throws IOException          - on Error.
-   */
-  X509Certificate getCertificate(String certSerialId)
-      throws CertificateException, IOException;
-
-  /**
-   * Request a Certificate based on Certificate Signing Request.
-   *
-   * @param csr  - Certificate Signing Request.
-   * @param type - An Enum which says what kind of approval process to follow.
-   * @return A future that will have this certificate when this request is
-   * approved.
-   * @throws SCMSecurityException - on Error.
-   */
-  Future<X509CertificateHolder> requestCertificate(
-      PKCS10CertificationRequest csr,
-      CertificateApprover.ApprovalType type)
-      throws SCMSecurityException;
-
-
-  /**
-   * Request a Certificate based on Certificate Signing Request.
-   *
-   * @param csr - Certificate Signing Request as a PEM encoded String.
-   * @param type - An Enum which says what kind of approval process to follow.
-   * @return A future that will have this certificate when this request is
-   * approved.
-   * @throws SCMSecurityException - on Error.
-   */
-  Future<X509CertificateHolder> requestCertificate(String csr,
-      ApprovalType type) throws IOException;
-
-  /**
-   * Revokes a Certificate issued by this CertificateServer.
-   *
-   * @param certificate - Certificate to revoke
-   * @param approver - Approval process to follow.
-   * @return Future that tells us what happened.
-   * @throws SCMSecurityException - on Error.
-   */
-  Future<Boolean> revokeCertificate(X509Certificate certificate,
-      ApprovalType approver) throws SCMSecurityException;
-
-  /**
-   * TODO : CRL, OCSP etc. Later. This is the start of a CertificateServer
-   * framework.
-   */
-
-
-  /**
-   * Make it explicit what type of CertificateServer we are creating here.
-   */
-  enum CAType {
-    SELF_SIGNED_CA,
-    INTERMEDIARY_CA
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
deleted file mode 100644
index 961d048..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.cert.X509Certificate;
-
-/**
- * This interface allows the DefaultCA to be portable and use different DB
- * interfaces later. It also allows us define this interface in the SCM layer
- * by which we don't have to take a circular dependency between hdds-common
- * and the SCM.
- *
- * With this interface, DefaultCA server read and write DB or persistence
- * layer and we can write to SCM's Metadata DB.
- */
-public interface CertificateStore {
-
-  /**
-   * Writes a new certificate that was issued to the persistent store.
-   * @param serialID - Certificate Serial Number.
-   * @param certificate - Certificate to persist.
-   * @throws IOException - on Failure.
-   */
-  void storeValidCertificate(BigInteger serialID,
-                             X509Certificate certificate) throws IOException;
-
-  /**
-   * Moves a certificate in a transactional manner from valid certificate to
-   * revoked certificate state.
-   * @param serialID - Serial ID of the certificate.
-   * @throws IOException
-   */
-  void revokeCertificate(BigInteger serialID) throws IOException;
-
-  /**
-   * Deletes an expired certificate from the store. Please note: We don't
-   * remove revoked certificates, we need that information to generate the
-   * CRLs.
-   * @param serialID - Certificate ID.
-   */
-  void removeExpiredCertificate(BigInteger serialID) throws IOException;
-
-  /**
-   * Retrieves a Certificate based on the Serial number of that certificate.
-   * @param serialID - ID of the certificate.
-   * @param certType
-   * @return X509Certificate
-   * @throws IOException
-   */
-  X509Certificate getCertificateByID(BigInteger serialID, CertType certType)
-      throws IOException;
-
-  /**
-   * Different kind of Certificate stores.
-   */
-  enum CertType {
-    VALID_CERTS,
-    REVOKED_CERTS
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
deleted file mode 100644
index c7f37c1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile;
-import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil;
-import org.apache.hadoop.util.Time;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x500.style.BCStyle;
-import org.bouncycastle.asn1.x509.AlgorithmIdentifier;
-import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.X509v3CertificateBuilder;
-import org.bouncycastle.crypto.params.AsymmetricKeyParameter;
-import org.bouncycastle.crypto.params.RSAKeyParameters;
-import org.bouncycastle.crypto.util.PrivateKeyFactory;
-import org.bouncycastle.crypto.util.PublicKeyFactory;
-import org.bouncycastle.operator.ContentSigner;
-import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder;
-import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.PrivateKey;
-import java.util.Date;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * Default Approver used the by the DefaultCA.
- */
-public class DefaultApprover extends BaseApprover {
-
-  /**
-   * Constructs the Default Approver.
-   *
-   * @param pkiProfile - PKI Profile to use.
-   * @param config - Security Config
-   */
-  public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) {
-    super(pkiProfile, config);
-  }
-
-  /**
-   * Sign function signs a Certificate.
-   * @param config - Security Config.
-   * @param caPrivate - CAs private Key.
-   * @param caCertificate - CA Certificate.
-   * @param validFrom - Begin Da te
-   * @param validTill - End Date
-   * @param certificationRequest - Certification Request.
-   * @param scmId - SCM id.
-   * @param clusterId - Cluster id.
-   * @return Signed Certificate.
-   * @throws IOException - On Error
-   * @throws OperatorCreationException - on Error.
-   */
-  @SuppressWarnings("ParameterNumber")
-  public  X509CertificateHolder sign(
-      SecurityConfig config,
-      PrivateKey caPrivate,
-      X509CertificateHolder caCertificate,
-      Date validFrom,
-      Date validTill,
-      PKCS10CertificationRequest certificationRequest,
-      String scmId,
-      String clusterId) throws IOException, OperatorCreationException {
-
-    AlgorithmIdentifier sigAlgId = new
-        DefaultSignatureAlgorithmIdentifierFinder().find(
-        config.getSignatureAlgo());
-    AlgorithmIdentifier digAlgId = new DefaultDigestAlgorithmIdentifierFinder()
-        .find(sigAlgId);
-
-    AsymmetricKeyParameter asymmetricKP = PrivateKeyFactory.createKey(caPrivate
-        .getEncoded());
-    SubjectPublicKeyInfo keyInfo =
-        certificationRequest.getSubjectPublicKeyInfo();
-
-    // Get scmId and cluster Id from subject name.
-    X500Name x500Name = certificationRequest.getSubject();
-    String csrScmId = x500Name.getRDNs(BCStyle.OU)[0].getFirst().getValue().
-        toASN1Primitive().toString();
-    String csrClusterId = x500Name.getRDNs(BCStyle.O)[0].getFirst().getValue().
-        toASN1Primitive().toString();
-
-    if (!scmId.equals(csrScmId) || !clusterId.equals(csrClusterId)) {
-      if (csrScmId.equalsIgnoreCase("null") &&
-          csrClusterId.equalsIgnoreCase("null")) {
-        // Special case to handle DN certificate generation as DN might not know
-        // scmId and clusterId before registration. In secure mode registration
-        // will succeed only after datanode has a valid certificate.
-        String cn = x500Name.getRDNs(BCStyle.CN)[0].getFirst().getValue()
-            .toASN1Primitive().toString();
-        x500Name = SecurityUtil.getDistinguishedName(cn, scmId, clusterId);
-      } else {
-        // Throw exception if scmId and clusterId doesn't match.
-        throw new SCMSecurityException("ScmId and ClusterId in CSR subject" +
-            " are incorrect.");
-      }
-    }
-
-    RSAKeyParameters rsa =
-        (RSAKeyParameters) PublicKeyFactory.createKey(keyInfo);
-    if (rsa.getModulus().bitLength() < config.getSize()) {
-      throw new SCMSecurityException("Key size is too small in certificate " +
-          "signing request");
-    }
-    X509v3CertificateBuilder certificateGenerator =
-        new X509v3CertificateBuilder(
-            caCertificate.getSubject(),
-            // Serial is not sequential but it is monotonically increasing.
-            BigInteger.valueOf(Time.monotonicNowNanos()),
-            validFrom,
-            validTill,
-            x500Name, keyInfo);
-
-    ContentSigner sigGen = new BcRSAContentSignerBuilder(sigAlgId, digAlgId)
-        .build(asymmetricKP);
-
-    return certificateGenerator.build(sigGen);
-
-  }
-
-  @Override
-  public CompletableFuture<X509CertificateHolder> inspectCSR(String csr)
-      throws IOException {
-    return super.inspectCSR(csr);
-  }
-
-  @Override
-  public CompletableFuture<X509CertificateHolder>
-      inspectCSR(PKCS10CertificationRequest csr) {
-    return super.inspectCSR(csr);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
deleted file mode 100644
index a5147b3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.security.spec.InvalidKeySpecException;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.Future;
-import java.util.function.Consumer;
-
-import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.*;
-
-/**
- * The default CertificateServer used by SCM. This has no dependencies on any
- * external system, this allows us to bootstrap a CertificateServer from
- * Scratch.
- * <p>
- * Details =======
- * <p>
- * The Default CA server is one of the many possible implementations of an SCM
- * Certificate Authority.
- * <p>
- * A certificate authority needs the Root Certificates and its private key to
- * operate.  The init function of the DefaultCA Server detects four possible
- * states the System can be in.
- * <p>
- * 1.  Success - This means that the expected Certificates and Keys are in
- * place, and the CA was able to read those files into memory.
- * <p>
- * 2. Missing Keys - This means that private keys are missing. This is an error
- * state which SCM CA cannot recover from. The cluster might have been
- * initialized earlier and for some reason, we are not able to find the private
- * keys for the CA. Eventually we will have 2 ways to recover from this state,
- * first one is to copy the SCM CA private keys from a backup. Second one is to
- * rekey the whole cluster. Both of these are improvements we will support in
- * future.
- * <p>
- * 3. Missing Certificate - Similar to Missing Keys, but the root certificates
- * are missing.
- * <p>
- * 4. Initialize - We don't have keys or certificates. DefaultCA assumes that
- * this is a system bootup and will generate the keys and certificates
- * automatically.
- * <p>
- * The init() follows the following logic,
- * <p>
- * 1. Compute the Verification Status -- Success, Missing Keys, Missing Certs or
- * Initialize.
- * <p>
- * 2. ProcessVerificationStatus - Returns a Lambda, based on the Verification
- * Status.
- * <p>
- * 3. Invoke the Lambda function.
- * <p>
- * At the end of the init function, we have functional CA. This function can be
- * invoked as many times since we will regenerate the keys and certs only if
- * both of them are missing.
- */
-public class DefaultCAServer implements CertificateServer {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DefaultCAServer.class);
-  private final String subject;
-  private final String clusterID;
-  private final String scmID;
-  private String componentName = Paths.get("scm", "ca").toString();
-  private Path caKeysPath;
-  private Path caRootX509Path;
-  private SecurityConfig config;
-  /**
-   * TODO: We will make these configurable in the future.
-   */
-  private PKIProfile profile;
-  private CertificateApprover approver;
-  private CertificateStore store;
-
-  /**
-   * Create an Instance of DefaultCAServer.
-   *  @param subject - String Subject
-   * @param clusterID - String ClusterID
-   * @param scmID - String SCMID.
-   * @param certificateStore - A store used to persist Certificates.
-   */
-  public DefaultCAServer(String subject, String clusterID, String scmID,
-                         CertificateStore certificateStore) {
-    this.subject = subject;
-    this.clusterID = clusterID;
-    this.scmID = scmID;
-    this.store = certificateStore;
-  }
-
-  @Override
-  public void init(SecurityConfig securityConfig, CAType type)
-      throws SCMSecurityException {
-    caKeysPath = securityConfig.getKeyLocation(componentName);
-    caRootX509Path = securityConfig.getCertificateLocation(componentName);
-    this.config = securityConfig;
-
-    // TODO: Make these configurable and load different profiles based on
-    // config.
-    profile = new DefaultProfile();
-    this.approver = new DefaultApprover(profile, this.config);
-
-    /* In future we will spilt this code to have different kind of CAs.
-     * Right now, we have only self-signed CertificateServer.
-     */
-
-    if (type == CAType.SELF_SIGNED_CA) {
-      VerificationStatus status = verifySelfSignedCA(securityConfig);
-      Consumer<SecurityConfig> caInitializer =
-          processVerificationStatus(status);
-      caInitializer.accept(securityConfig);
-      return;
-    }
-
-    LOG.error("We support only Self-Signed CAs for now.");
-    throw new IllegalStateException("Not implemented functionality requested.");
-  }
-
-  @Override
-  public X509CertificateHolder getCACertificate() throws IOException {
-    CertificateCodec certificateCodec =
-        new CertificateCodec(config, componentName);
-    try {
-      return certificateCodec.readCertificate();
-    } catch (CertificateException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Returns the Certificate corresponding to given certificate serial id if
-   * exist. Return null if it doesn't exist.
-   *
-   * @param certSerialId         - Certificate for this CA.
-   * @return X509CertificateHolder
-   * @throws CertificateException - usually thrown if this CA is not
-   * initialized.
-   * @throws IOException - on Error.
-   */
-  @Override
-  public X509Certificate getCertificate(String certSerialId) throws
-      IOException {
-    return store.getCertificateByID(new BigInteger(certSerialId),
-        CertificateStore.CertType.VALID_CERTS);
-  }
-
-  private KeyPair getCAKeys() throws IOException {
-    KeyCodec keyCodec = new KeyCodec(config, componentName);
-    try {
-      return new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey());
-    } catch (InvalidKeySpecException | NoSuchAlgorithmException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public Future<X509CertificateHolder> requestCertificate(
-      PKCS10CertificationRequest csr,
-      CertificateApprover.ApprovalType approverType) {
-    LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate();
-    LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT);
-    LocalDate endDate =
-        temp.plus(config.getDefaultCertDuration()).toLocalDate();
-
-    CompletableFuture<X509CertificateHolder> xcertHolder =
-        approver.inspectCSR(csr);
-
-    if(xcertHolder.isCompletedExceptionally()) {
-      // This means that approver told us there are things which it disagrees
-      // with in this Certificate Request. Since the first set of sanity
-      // checks failed, we just return the future object right here.
-      return xcertHolder;
-    }
-    try {
-      switch (approverType) {
-      case MANUAL:
-        xcertHolder.completeExceptionally(new SCMSecurityException("Manual " +
-            "approval is not yet implemented."));
-        break;
-      case KERBEROS_TRUSTED:
-      case TESTING_AUTOMATIC:
-        X509CertificateHolder xcert;
-        try {
-          xcert = signAndStoreCertificate(beginDate, endDate, csr);
-        } catch (SCMSecurityException e) {
-          // Certificate with conflicting serial id, retry again may resolve
-          // this issue.
-          LOG.error("Certificate storage failed, retrying one more time.", e);
-          xcert = signAndStoreCertificate(beginDate, endDate, csr);
-        }
-
-        xcertHolder.complete(xcert);
-        break;
-      default:
-        return null; // cannot happen, keeping checkstyle happy.
-      }
-    } catch (CertificateException | IOException | OperatorCreationException e) {
-      LOG.error("Unable to issue a certificate. {}", e);
-      xcertHolder.completeExceptionally(new SCMSecurityException(e));
-    }
-    return xcertHolder;
-  }
-
-  private X509CertificateHolder signAndStoreCertificate(LocalDate beginDate,
-      LocalDate endDate, PKCS10CertificationRequest csr) throws IOException,
-      OperatorCreationException, CertificateException {
-    X509CertificateHolder xcert = approver.sign(config,
-        getCAKeys().getPrivate(),
-        getCACertificate(), java.sql.Date.valueOf(beginDate),
-        java.sql.Date.valueOf(endDate), csr, scmID, clusterID);
-    store.storeValidCertificate(xcert.getSerialNumber(),
-        CertificateCodec.getX509Certificate(xcert));
-    return xcert;
-  }
-
-  @Override
-  public Future<X509CertificateHolder> requestCertificate(String csr,
-      CertificateApprover.ApprovalType type) throws IOException {
-    PKCS10CertificationRequest request =
-        getCertificationRequest(csr);
-    return requestCertificate(request, type);
-  }
-
-  @Override
-  public Future<Boolean> revokeCertificate(X509Certificate certificate,
-      CertificateApprover.ApprovalType approverType)
-      throws SCMSecurityException {
-    CompletableFuture<Boolean> revoked = new CompletableFuture<>();
-    if (certificate == null) {
-      revoked.completeExceptionally(new SCMSecurityException(
-          "Certificate cannot be null"));
-      return revoked;
-    }
-    try {
-      store.revokeCertificate(certificate.getSerialNumber());
-    } catch (IOException ex) {
-      LOG.error("Revoking the certificate failed. {}", ex.getCause());
-      throw new SCMSecurityException(ex);
-    }
-    return revoked;
-  }
-
-  /**
-   * Generates a Self Signed CertificateServer. These are the steps in
-   * generating a Self-Signed CertificateServer.
-   * <p>
-   * 1. Generate a Private/Public Key Pair. 2. Persist to a protected location.
-   * 3. Generate a SelfSigned Root CertificateServer certificate.
-   *
-   * @param securityConfig - Config.
-   */
-  private void generateSelfSignedCA(SecurityConfig securityConfig) throws
-      NoSuchAlgorithmException, NoSuchProviderException, IOException {
-    KeyPair keyPair = generateKeys(securityConfig);
-    generateRootCertificate(securityConfig, keyPair);
-  }
-
-  /**
-   * Verify Self-Signed CertificateServer. 1. Check if the Certificate exist. 2.
-   * Check if the key pair exists.
-   *
-   * @param securityConfig -- Config
-   * @return Verification Status
-   */
-  private VerificationStatus verifySelfSignedCA(SecurityConfig securityConfig) {
-    /*
-    The following is the truth table for the States.
-    True means we have that file False means it is missing.
-    +--------------+--------+--------+--------------+
-    | Certificates |  Keys  | Result |   Function   |
-    +--------------+--------+--------+--------------+
-    | True         | True   | True   | Success      |
-    | False        | False  | True   | Initialize   |
-    | True         | False  | False  | Missing Key  |
-    | False        | True   | False  | Missing Cert |
-    +--------------+--------+--------+--------------+
-
-    This truth table maps to ~(certs xor keys) or certs == keys
-     */
-    boolean keyStatus = checkIfKeysExist();
-    boolean certStatus = checkIfCertificatesExist();
-
-    if ((certStatus == keyStatus) && (certStatus)) {
-      return VerificationStatus.SUCCESS;
-    }
-
-    if ((certStatus == keyStatus) && (!certStatus)) {
-      return VerificationStatus.INITIALIZE;
-    }
-
-    // At this point certStatus is not equal to keyStatus.
-    if (certStatus) {
-      return VerificationStatus.MISSING_KEYS;
-    }
-
-    return VerificationStatus.MISSING_CERTIFICATE;
-  }
-
-  /**
-   * Returns Keys status.
-   *
-   * @return True if the key files exist.
-   */
-  private boolean checkIfKeysExist() {
-    if (!Files.exists(caKeysPath)) {
-      return false;
-    }
-
-    return Files.exists(Paths.get(caKeysPath.toString(),
-        this.config.getPrivateKeyFileName()));
-  }
-
-  /**
-   * Returns certificate Status.
-   *
-   * @return True if the Certificate files exist.
-   */
-  private boolean checkIfCertificatesExist() {
-    if (!Files.exists(caRootX509Path)) {
-      return false;
-    }
-    return Files.exists(Paths.get(caRootX509Path.toString(),
-        this.config.getCertificateFileName()));
-  }
-
-  /**
-   * Based on the Status of the verification, we return a lambda that gets
-   * executed by the init function of the CA.
-   *
-   * @param status - Verification Status.
-   */
-  @VisibleForTesting
-  Consumer<SecurityConfig> processVerificationStatus(
-      VerificationStatus status) {
-    Consumer<SecurityConfig> consumer = null;
-    switch (status) {
-    case SUCCESS:
-      consumer = (arg) -> LOG.info("CertificateServer validation is " +
-          "successful");
-      break;
-    case MISSING_KEYS:
-      consumer = (arg) -> {
-        LOG.error("We have found the Certificate for this CertificateServer, " +
-            "but keys used by this CertificateServer is missing. This is a " +
-            "non-recoverable error. Please restart the system after locating " +
-            "the Keys used by the CertificateServer.");
-        LOG.error("Exiting due to unrecoverable CertificateServer error.");
-        throw new IllegalStateException("Missing Keys, cannot continue.");
-      };
-      break;
-    case MISSING_CERTIFICATE:
-      consumer = (arg) -> {
-        LOG.error("We found the keys, but the root certificate for this " +
-            "CertificateServer is missing. Please restart SCM after locating " +
-            "the " +
-            "Certificates.");
-        LOG.error("Exiting due to unrecoverable CertificateServer error.");
-        throw new IllegalStateException("Missing Root Certs, cannot continue.");
-      };
-      break;
-    case INITIALIZE:
-      consumer = (arg) -> {
-        try {
-          generateSelfSignedCA(arg);
-        } catch (NoSuchProviderException | NoSuchAlgorithmException
-            | IOException e) {
-          LOG.error("Unable to initialize CertificateServer.", e);
-        }
-        VerificationStatus newStatus = verifySelfSignedCA(arg);
-        if (newStatus != VerificationStatus.SUCCESS) {
-          LOG.error("Unable to initialize CertificateServer, failed in " +
-              "verification.");
-        }
-      };
-      break;
-    default:
-      /* Make CheckStyle happy */
-      break;
-    }
-    return consumer;
-  }
-
-  /**
-   * Generates a KeyPair for the Certificate.
-   *
-   * @param securityConfig - SecurityConfig.
-   * @return Key Pair.
-   * @throws NoSuchProviderException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws IOException              - on Error.
-   */
-  private KeyPair generateKeys(SecurityConfig securityConfig)
-      throws NoSuchProviderException, NoSuchAlgorithmException, IOException {
-    HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig);
-    KeyPair keys = keyGenerator.generateKey();
-    KeyCodec keyPEMWriter = new KeyCodec(securityConfig,
-        componentName);
-    keyPEMWriter.writeKey(keys);
-    return keys;
-  }
-
-  /**
-   * Generates a self-signed Root Certificate for CA.
-   *
-   * @param securityConfig - SecurityConfig
-   * @param key - KeyPair.
-   * @throws IOException          - on Error.
-   * @throws SCMSecurityException - on Error.
-   */
-  private void generateRootCertificate(SecurityConfig securityConfig,
-      KeyPair key) throws IOException, SCMSecurityException {
-    Preconditions.checkNotNull(this.config);
-    LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate();
-    LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT);
-    LocalDate endDate =
-        temp.plus(securityConfig.getMaxCertificateDuration()).toLocalDate();
-    X509CertificateHolder selfSignedCertificate =
-        SelfSignedCertificate
-            .newBuilder()
-            .setSubject(this.subject)
-            .setScmID(this.scmID)
-            .setClusterID(this.clusterID)
-            .setBeginDate(beginDate)
-            .setEndDate(endDate)
-            .makeCA()
-            .setConfiguration(securityConfig.getConfiguration())
-            .setKey(key)
-            .build();
-
-    CertificateCodec certCodec =
-        new CertificateCodec(config, componentName);
-    certCodec.writeCertificate(selfSignedCertificate);
-  }
-
-  /**
-   * This represents the verification status of the CA. Based on this enum
-   * appropriate action is taken in the Init.
-   */
-  @VisibleForTesting
-  enum VerificationStatus {
-    SUCCESS, /* All artifacts needed by CertificateServer is present */
-    MISSING_KEYS, /* Private key is missing, certificate Exists.*/
-    MISSING_CERTIFICATE, /* Keys exist, but root certificate missing.*/
-    INITIALIZE /* All artifacts are missing, we should init the system. */
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
deleted file mode 100644
index 53eb98f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles;
-
-import org.bouncycastle.asn1.x509.Extension;
-
-import java.util.function.BiFunction;
-
-import static java.lang.Boolean.TRUE;
-
-/**
- * CA Profile, this is needed when SCM does HA.
- * A place holder class indicating what we need to do when we support issuing
- * CA certificates to other SCMs in HA mode.
- */
-public class DefaultCAProfile extends DefaultProfile {
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_BASIC_CONSTRAINTS = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_CRL_NUMBER = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_REASON_CODE = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_DELTA_CRL_INDICATOR = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_NAME_CONSTRAINTS = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_CRL_DISTRIBUTION_POINTS = (e, b) -> TRUE;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
deleted file mode 100644
index 5fdb6f7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.codec.DecoderException;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.validator.routines.DomainValidator;
-import org.bouncycastle.asn1.ASN1ObjectIdentifier;
-import org.bouncycastle.asn1.x500.RDN;
-import org.bouncycastle.asn1.x509.ExtendedKeyUsage;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.GeneralName;
-import org.bouncycastle.asn1.x509.GeneralNames;
-import org.bouncycastle.asn1.x509.KeyPurposeId;
-import org.bouncycastle.asn1.x509.KeyUsage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.BiFunction;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static java.lang.Boolean.TRUE;
-import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth;
-import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth;
-
-/**
- * Ozone PKI profile.
- * <p>
- * This PKI profile is invoked by SCM CA to make sure that certificates issued
- * by SCM CA are constrained
- */
-public class DefaultProfile implements PKIProfile {
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_KEY_USAGE = DefaultProfile::validateKeyUsage;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_AUTHORITY_KEY_IDENTIFIER = (e, b) -> TRUE;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_LOGO_TYPE = (e, b) -> TRUE;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DefaultProfile.class);
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_SAN = DefaultProfile::validateSubjectAlternativeName;
-  static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_EXTENDED_KEY_USAGE = DefaultProfile::validateExtendedKeyUsage;
-  // If we decide to add more General Names, we should add those here and
-  // also update the logic in validateGeneralName function.
-  private static final int[] GENERAL_NAMES = {
-      GeneralName.dNSName,
-      GeneralName.iPAddress,
-  };
-  // Map that handles all the Extensions lookup and validations.
-  private static final Map<ASN1ObjectIdentifier, BiFunction<Extension,
-      PKIProfile, Boolean>> EXTENSIONS_MAP = Stream.of(
-      new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE),
-      new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN),
-      new SimpleEntry<>(Extension.authorityKeyIdentifier,
-          VALIDATE_AUTHORITY_KEY_IDENTIFIER),
-      new SimpleEntry<>(Extension.extendedKeyUsage,
-          VALIDATE_EXTENDED_KEY_USAGE),
-      // Ozone certs are issued only for the use of Ozone.
-      // However, some users will discover that this is a full scale CA
-      // and decide to mis-use these certs for other purposes.
-      // To discourage usage of these certs for other purposes, we can leave
-      // the Ozone Logo inside these certs. So if a browser is used to
-      // connect these logos will show up.
-      // https://www.ietf.org/rfc/rfc3709.txt
-      new SimpleEntry<>(Extension.logoType, VALIDATE_LOGO_TYPE))
-      .collect(Collectors.toMap(SimpleEntry::getKey,
-          SimpleEntry::getValue));
-  // If we decide to add more General Names, we should add those here and
-  // also update the logic in validateGeneralName function.
-  private static final KeyPurposeId[] EXTENDED_KEY_USAGE = {
-      id_kp_serverAuth, // TLS Web server authentication
-      id_kp_clientAuth, // TLS Web client authentication
-
-  };
-  private final Set<KeyPurposeId> extendKeyPurposeSet;
-  private Set<Integer> generalNameSet;
-
-  /**
-   * Construct DefaultProfile.
-   */
-  public DefaultProfile() {
-    generalNameSet = new HashSet<>();
-    for (int val : GENERAL_NAMES) {
-      generalNameSet.add(val);
-    }
-    extendKeyPurposeSet =
-        new HashSet<>(Arrays.asList(EXTENDED_KEY_USAGE));
-
-  }
-
-  /**
-   * This function validates that the KeyUsage Bits are subset of the Bits
-   * permitted by the ozone profile.
-   *
-   * @param ext - KeyUsage Extension.
-   * @param profile - PKI Profile - In this case this profile.
-   * @return True, if the request key usage is a subset, false otherwise.
-   */
-  private static Boolean validateKeyUsage(Extension ext, PKIProfile profile) {
-    KeyUsage keyUsage = profile.getKeyUsage();
-    KeyUsage requestedUsage = KeyUsage.getInstance(ext.getParsedValue());
-    BitSet profileBitSet = BitSet.valueOf(keyUsage.getBytes());
-    BitSet requestBitSet = BitSet.valueOf(requestedUsage.getBytes());
-    // Check if the requestBitSet is a subset of profileBitSet
-    //  p & r == r should be equal if it is a subset.
-    profileBitSet.and(requestBitSet);
-    return profileBitSet.equals(requestBitSet);
-  }
-
-  /**
-   * Validates the SubjectAlternative names in the Certificate.
-   *
-   * @param ext - Extension - SAN, which allows us to get the SAN names.
-   * @param profile - This profile.
-   * @return - True if the request contains only SANs, General names that we
-   * support. False otherwise.
-   */
-  private static Boolean validateSubjectAlternativeName(Extension ext,
-      PKIProfile profile) {
-    if (ext.isCritical()) {
-      // SAN extensions should not be marked as critical under ozone profile.
-      LOG.error("SAN extension marked as critical in the Extension. {}",
-          GeneralNames.getInstance(ext.getParsedValue()).toString());
-      return false;
-    }
-    GeneralNames generalNames = GeneralNames.getInstance(ext.getParsedValue());
-    for (GeneralName name : generalNames.getNames()) {
-      try {
-        if (!profile.validateGeneralName(name.getTagNo(),
-            name.getName().toString())) {
-          return false;
-        }
-      } catch (UnknownHostException e) {
-        LOG.error("IP address validation failed."
-            + name.getName().toString(), e);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * This function validates that the KeyUsage Bits are subset of the Bits
-   * permitted by the ozone profile.
-   *
-   * @param ext - KeyUsage Extension.
-   * @param profile - PKI Profile - In this case this profile.
-   * @return True, if the request key usage is a subset, false otherwise.
-   */
-  private static Boolean validateExtendedKeyUsage(Extension ext,
-      PKIProfile profile) {
-    if (ext.isCritical()) {
-      // https://tools.ietf.org/html/rfc5280#section-4.2.1.12
-      // Ozone profile opts to mark this extension as non-critical.
-      LOG.error("Extended Key usage marked as critical.");
-      return false;
-    }
-    ExtendedKeyUsage extendedKeyUsage =
-        ExtendedKeyUsage.getInstance(ext.getParsedValue());
-    for (KeyPurposeId id : extendedKeyUsage.getUsages()) {
-      if (!profile.validateExtendedKeyUsage(id)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int[] getGeneralNames() {
-    return Arrays.copyOfRange(GENERAL_NAMES, 0, GENERAL_NAMES.length);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean isSupportedGeneralName(int generalName) {
-    return generalNameSet.contains(generalName);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean validateGeneralName(int type, String value) {
-    // TODO : We should add more validation for IP address, for example
-    //  it matches the local network, and domain matches where the cluster
-    //  exits.
-    if (!isSupportedGeneralName(type)) {
-      return false;
-    }
-    switch (type) {
-    case GeneralName.iPAddress:
-
-      // We need DatatypeConverter conversion, since the original CSR encodes
-      // an IP address int a Hex String, for example 8.8.8.8 is encoded as
-      // #08080808. Value string is always preceded by "#", we will strip
-      // that before passing it on.
-
-      // getByAddress call converts the IP address to hostname/ipAddress format.
-      // if the hostname cannot determined then it will be /ipAddress.
-
-      // TODO: Fail? if we cannot resolve the Hostname?
-      try {
-        final InetAddress byAddress = InetAddress.getByAddress(
-            Hex.decodeHex(value.substring(1)));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Host Name/IP Address : {}", byAddress.toString());
-        }
-        return true;
-      } catch (UnknownHostException | DecoderException e) {
-        return false;
-      }
-    case GeneralName.dNSName:
-      return DomainValidator.getInstance().isValid(value);
-    default:
-      // This should not happen, since it guarded via isSupportedGeneralName.
-      LOG.error("Unexpected type in General Name (int value) : " + type);
-      return false;
-    }
-  }
-
-  @Override
-  public boolean validateExtendedKeyUsage(KeyPurposeId id) {
-    return extendKeyPurposeSet.contains(id);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public ASN1ObjectIdentifier[] getSupportedExtensions() {
-    return EXTENSIONS_MAP.keySet().toArray(new ASN1ObjectIdentifier[0]);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean isSupportedExtension(Extension extension) {
-    return EXTENSIONS_MAP.containsKey(extension.getExtnId());
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean validateExtension(Extension extension) {
-    Preconditions.checkNotNull(extension, "Extension cannot be null");
-
-    if (!isSupportedExtension(extension)) {
-      LOG.error("Unsupported Extension found: {} ",
-          extension.getExtnId().getId());
-      return false;
-    }
-
-    BiFunction<Extension, PKIProfile, Boolean> func =
-        EXTENSIONS_MAP.get(extension.getExtnId());
-
-    if (func != null) {
-      return func.apply(extension, this);
-    }
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public KeyUsage getKeyUsage() {
-    return new KeyUsage(KeyUsage.digitalSignature | KeyUsage.keyEncipherment
-        | KeyUsage.dataEncipherment | KeyUsage.keyAgreement);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public RDN[] getRDNs() {
-    return new RDN[0];
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean isValidRDN(RDN distinguishedName) {
-    // TODO: Right now we just approve all strings.
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean validateRDN(RDN name) {
-    return true;
-  }
-
-  @Override
-  public boolean isCA() {
-    return false;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java
deleted file mode 100644
index c3ff198..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles;
-
-import org.bouncycastle.asn1.ASN1ObjectIdentifier;
-import org.bouncycastle.asn1.x500.RDN;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.KeyPurposeId;
-import org.bouncycastle.asn1.x509.KeyUsage;
-
-import java.net.UnknownHostException;
-
-/**
- * Base class for profile rules. Generally profiles are documents that define
- * the PKI policy. In HDDS/Ozone world, we have chosen to make PKIs
- * executable code. So if an end-user wants to use a custom profile or one of
- * the existing profile like the list below, they are free to implement a
- * custom profile.
- *
- *     PKIX - Internet PKI profile.
- *     FPKI - (US) Federal PKI profile.
- *     MISSI - US DoD profile.
- *     ISO 15782 - Banking - Certificate Management Part 1: Public Key
- *         Certificates.
- *     TeleTrust/MailTrusT - German MailTrusT profile for TeleTrusT (it
- *     really is
- *         capitalised that way).
- *     German SigG Profile - Profile to implement the German digital
- *     signature law
- *     ISIS Profile - Another German profile.
- *     Australian Profile - Profile for the Australian PKAF
- *     SS 61 43 31 Electronic ID Certificate - Swedish profile.
- *     FINEID S3 - Finnish profile.
- *     ANX Profile - Automotive Network Exchange profile.
- *     Microsoft Profile - This isn't a real profile, but windows uses this.
- */
-public interface PKIProfile {
-
-  /**
-   * Returns the list of General Names  supported by this profile.
-   * @return - an Array of supported General Names by this certificate profile.
-   */
-  int[] getGeneralNames();
-
-  /**
-   * Checks if a given General Name is permitted in this profile.
-   * @param generalName - General name.
-   * @return true if it is allowed, false otherwise.
-   */
-  boolean isSupportedGeneralName(int generalName);
-
-  /**
-   * Allows the profile to dictate what value ranges are valid.
-   * @param type - Type of the General Name.
-   * @param value - Value of the General Name.
-   * @return - true if the value is permitted, false otherwise.
-   * @throws UnknownHostException - on Error in IP validation.
-   */
-  boolean validateGeneralName(int type, String value)
-      throws UnknownHostException;
-
-  /**
-   * Returns an array of Object identifiers for extensions supported by this
-   * profile.
-   * @return an Array of ASN1ObjectIdentifier for the supported extensions.
-   */
-  ASN1ObjectIdentifier[] getSupportedExtensions();
-
-  /**
-   * Checks if the this extension is permitted in this profile.
-   * @param extension - Extension to check for.
-   * @return - true if this extension is supported, false otherwise.
-   */
-  boolean isSupportedExtension(Extension extension);
-
-  /**
-   * Checks if the extension has the value which this profile approves.
-   * @param extension - Extension to validate.
-   * @return - True if the extension is acceptable, false otherwise.
-   */
-  boolean validateExtension(Extension extension);
-
-  /**
-   * Validate the Extended Key Usage.
-   * @param id - KeyPurpose ID
-   * @return true, if this is a supported Purpose, false otherwise.
-   */
-  boolean validateExtendedKeyUsage(KeyPurposeId id);
-
-  /**
-   * Returns the permitted Key usage mask while using this profile.
-   * @return KeyUsage
-   */
-  KeyUsage getKeyUsage();
-
-  /**
-   * Gets the supported list of RDNs supported by this profile.
-   * @return Array of RDNs.
-   */
-  RDN[] getRDNs();
-
-  /**
-   * Returns true if this Relative Distinguished Name component is allowed in
-   * this profile.
-   * @param distinguishedName - RDN to check.
-   * @return boolean, True if this RDN is allowed, false otherwise.
-   */
-  boolean isValidRDN(RDN distinguishedName);
-
-  /**
-   * Allows the profile to control the value set of the RDN. Profile can
-   * reject a RDN name if needed.
-   * @param name - RDN.
-   * @return true if the name is acceptable to this profile, false otherwise.
-   */
-  boolean validateRDN(RDN name);
-
-  /**
-   * True if the profile we are checking is for issuing a CA certificate.
-   * @return  True, if the profile used is for CA, false otherwise.
-   */
-  boolean isCA();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java
deleted file mode 100644
index 36c885d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * PKI PKIProfile package supports different kind of profiles that certificates
- * can support. If you are not familiar with PKI profiles, there is an
- * excellent introduction at
- *
- * https://www.cs.auckland.ac.nz/~pgut001/pubs/x509guide.txt
- *
- * At high level, the profiles in this directory define what kinds of
- * Extensions, General names , Key usage and critical extensions are
- * permitted when the CA is functional.
- *
- * An excellent example of a profile would be ozone profile if you would
- * like to see a reference to create your own profiles.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
deleted file mode 100644
index af53904..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Classes related to Certificate Life Cycle or Certificate Authority Server.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
deleted file mode 100644
index 34b4930..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-
-import java.io.InputStream;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.cert.CertStore;
-import java.security.cert.X509Certificate;
-import java.util.List;
-
-/**
- * Certificate client provides and interface to certificate operations that
- * needs to be performed by all clients in the Ozone eco-system.
- */
-public interface CertificateClient {
-
-  /**
-   * Returns the private key of the specified component if it exists on the
-   * local system.
-   *
-   * @return private key or Null if there is no data.
-   */
-  PrivateKey getPrivateKey();
-
-  /**
-   * Returns the public key of the specified component if it exists on the local
-   * system.
-   *
-   * @return public key or Null if there is no data.
-   */
-  PublicKey getPublicKey();
-
-  /**
-   * Returns the certificate  of the specified component if it exists on the
-   * local system.
-   * @param certSerialId
-   *
-   * @return certificate or Null if there is no data.
-   */
-  X509Certificate getCertificate(String certSerialId)
-      throws CertificateException;
-
-  /**
-   * Returns the certificate  of the specified component if it exists on the
-   * local system.
-   *
-   * @return certificate or Null if there is no data.
-   */
-  X509Certificate getCertificate();
-
-  /**
-   * Return the latest CA certificate known to the client.
-   * @return latest ca certificate known to the client.
-   */
-  X509Certificate getCACertificate();
-
-  /**
-   * Verifies if this certificate is part of a trusted chain.
-   * @param certificate - certificate.
-   * @return true if it trusted, false otherwise.
-   */
-  boolean verifyCertificate(X509Certificate certificate);
-
-  /**
-   * Creates digital signature over the data stream using the components private
-   * key.
-   *
-   * @param stream - Data stream to sign.
-   * @return byte array - containing the signature.
-   * @throws CertificateException - on Error.
-   */
-  byte[] signDataStream(InputStream stream)
-      throws CertificateException;
-
-  byte[] signData(byte[] data) throws CertificateException;
-
-  /**
-   * Verifies a digital Signature, given the signature and the certificate of
-   * the signer.
-   *
-   * @param stream - Data Stream.
-   * @param signature - Byte Array containing the signature.
-   * @param cert - Certificate of the Signer.
-   * @return true if verified, false if not.
-   */
-  boolean verifySignature(InputStream stream, byte[] signature,
-      X509Certificate cert) throws CertificateException;
-
-  /**
-   * Verifies a digital Signature, given the signature and the certificate of
-   * the signer.
-   * @param data - Data in byte array.
-   * @param signature - Byte Array containing the signature.
-   * @param cert - Certificate of the Signer.
-   * @return true if verified, false if not.
-   */
-  boolean verifySignature(byte[] data, byte[] signature,
-      X509Certificate cert) throws CertificateException;
-
-  /**
-   * Returns a CSR builder that can be used to creates a Certificate sigining
-   * request.
-   *
-   * @return CertificateSignRequest.Builder
-   */
-  CertificateSignRequest.Builder getCSRBuilder() throws CertificateException;
-
-  /**
-   * Get the certificate of well-known entity from SCM.
-   *
-   * @param query - String Query, please see the implementation for the
-   * discussion on the query formats.
-   * @return X509Certificate or null if not found.
-   */
-  X509Certificate queryCertificate(String query);
-
-  /**
-   * Stores the Certificate  for this client. Don't use this api to add
-   * trusted certificates of others.
-   *
-   * @param pemEncodedCert        - pem encoded X509 Certificate
-   * @param force                 - override any existing file
-   * @throws CertificateException - on Error.
-   *
-   */
-  void storeCertificate(String pemEncodedCert, boolean force)
-      throws CertificateException;
-
-  /**
-   * Stores the Certificate  for this client. Don't use this api to add
-   * trusted certificates of others.
-   *
-   * @param pemEncodedCert        - pem encoded X509 Certificate
-   * @param force                 - override any existing file
-   * @param caCert                - Is CA certificate.
-   * @throws CertificateException - on Error.
-   *
-   */
-  void storeCertificate(String pemEncodedCert, boolean force, boolean caCert)
-      throws CertificateException;
-
-  /**
-   * Stores the trusted chain of certificates.
-   *
-   * @param certStore - Cert Store.
-   * @throws CertificateException - on Error.
-   */
-  void storeTrustChain(CertStore certStore) throws CertificateException;
-
-  /**
-   * Stores the trusted chain of certificates.
-   *
-   * @param certificates - List of Certificates.
-
-   * @throws CertificateException - on Error.
-   */
-  void storeTrustChain(List<X509Certificate> certificates)
-      throws CertificateException;
-
-  /**
-   * Initialize certificate client.
-   *
-   * */
-  InitResponse init() throws CertificateException;
-
-  /**
-   * Represents initialization response of client.
-   * 1. SUCCESS: Means client is initialized successfully and all required
-   *              files are in expected state.
-   * 2. FAILURE: Initialization failed due to some unrecoverable error.
-   * 3. GETCERT: Bootstrap of keypair is successful but certificate is not
-   *             found. Client should request SCM signed certificate.
-   *
-   */
-  enum InitResponse {
-    SUCCESS,
-    FAILURE,
-    GETCERT,
-    RECOVER
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
deleted file mode 100644
index 7698658..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-
-/**
- * Certificate client for DataNodes.
- */
-public class DNCertificateClient extends DefaultCertificateClient {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DNCertificateClient.class);
-
-  public static final String COMPONENT_NAME = "dn";
-
-  public DNCertificateClient(SecurityConfig securityConfig,
-      String certSerialId) {
-    super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
-  }
-
-  public DNCertificateClient(SecurityConfig securityConfig) {
-    super(securityConfig, LOG, null, COMPONENT_NAME);
-  }
-
-  /**
-   * Returns a CSR builder that can be used to creates a Certificate signing
-   * request.
-   *
-   * @return CertificateSignRequest.Builder
-   */
-  @Override
-  public CertificateSignRequest.Builder getCSRBuilder()
-      throws CertificateException {
-    return super.getCSRBuilder()
-        .setDigitalEncryption(false)
-        .setDigitalSignature(false);
-  }
-
-  public Logger getLogger() {
-    return LOG;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
deleted file mode 100644
index ff99e08..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ /dev/null
@@ -1,828 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.math.NumberUtils;
-import org.apache.commons.validator.routines.DomainValidator;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetSocketAddress;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.Signature;
-import java.security.SignatureException;
-import java.security.cert.CertStore;
-import java.security.cert.X509Certificate;
-import java.security.spec.InvalidKeySpecException;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
-import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.*;
-
-/**
- * Default Certificate client implementation. It provides certificate
- * operations that needs to be performed by certificate clients in the Ozone
- * eco-system.
- */
-public abstract class DefaultCertificateClient implements CertificateClient {
-
-  private static final String CERT_FILE_NAME_FORMAT = "%s.crt";
-  private static final String CA_CERT_PREFIX = "CA-";
-  private static final int CA_CERT_PREFIX_LEN = 3;
-  private final Logger logger;
-  private final SecurityConfig securityConfig;
-  private final KeyCodec keyCodec;
-  private PrivateKey privateKey;
-  private PublicKey publicKey;
-  private X509Certificate x509Certificate;
-  private Map<String, X509Certificate> certificateMap;
-  private String certSerialId;
-  private String caCertId;
-  private String component;
-
-  DefaultCertificateClient(SecurityConfig securityConfig, Logger log,
-      String certSerialId, String component) {
-    Objects.requireNonNull(securityConfig);
-    this.securityConfig = securityConfig;
-    keyCodec = new KeyCodec(securityConfig, component);
-    this.logger = log;
-    this.certificateMap = new ConcurrentHashMap<>();
-    this.certSerialId = certSerialId;
-    this.component = component;
-
-    loadAllCertificates();
-  }
-
-  /**
-   * Load all certificates from configured location.
-   * */
-  private void loadAllCertificates() {
-    // See if certs directory exists in file system.
-    Path certPath = securityConfig.getCertificateLocation(component);
-    if (Files.exists(certPath) && Files.isDirectory(certPath)) {
-      getLogger().info("Loading certificate from location:{}.",
-          certPath);
-      File[] certFiles = certPath.toFile().listFiles();
-
-      if (certFiles != null) {
-        CertificateCodec certificateCodec =
-            new CertificateCodec(securityConfig, component);
-        long latestCaCertSerailId = -1L;
-        for (File file : certFiles) {
-          if (file.isFile()) {
-            try {
-              X509CertificateHolder x509CertificateHolder = certificateCodec
-                  .readCertificate(certPath, file.getName());
-              X509Certificate cert =
-                  CertificateCodec.getX509Certificate(x509CertificateHolder);
-              if (cert != null && cert.getSerialNumber() != null) {
-                if (cert.getSerialNumber().toString().equals(certSerialId)) {
-                  x509Certificate = cert;
-                }
-                certificateMap.putIfAbsent(cert.getSerialNumber().toString(),
-                    cert);
-                if (file.getName().startsWith(CA_CERT_PREFIX)) {
-                  String certFileName = FilenameUtils.getBaseName(
-                      file.getName());
-                  long tmpCaCertSerailId = NumberUtils.toLong(
-                      certFileName.substring(CA_CERT_PREFIX_LEN));
-                  if (tmpCaCertSerailId > latestCaCertSerailId) {
-                    latestCaCertSerailId = tmpCaCertSerailId;
-                  }
-                }
-                getLogger().info("Added certificate from file:{}.",
-                    file.getAbsolutePath());
-              } else {
-                getLogger().error("Error reading certificate from file:{}",
-                    file);
-              }
-            } catch (java.security.cert.CertificateException | IOException e) {
-              getLogger().error("Error reading certificate from file:{}.",
-                  file.getAbsolutePath(), e);
-            }
-          }
-        }
-        if (latestCaCertSerailId != -1) {
-          caCertId = Long.toString(latestCaCertSerailId);
-        }
-      }
-    }
-  }
-
-  /**
-   * Returns the private key of the specified  if it exists on the local
-   * system.
-   *
-   * @return private key or Null if there is no data.
-   */
-  @Override
-  public PrivateKey getPrivateKey() {
-    if (privateKey != null) {
-      return privateKey;
-    }
-
-    Path keyPath = securityConfig.getKeyLocation(component);
-    if (OzoneSecurityUtil.checkIfFileExist(keyPath,
-        securityConfig.getPrivateKeyFileName())) {
-      try {
-        privateKey = keyCodec.readPrivateKey();
-      } catch (InvalidKeySpecException | NoSuchAlgorithmException
-          | IOException e) {
-        getLogger().error("Error while getting private key.", e);
-      }
-    }
-    return privateKey;
-  }
-
-  /**
-   * Returns the public key of the specified if it exists on the local system.
-   *
-   * @return public key or Null if there is no data.
-   */
-  @Override
-  public PublicKey getPublicKey() {
-    if (publicKey != null) {
-      return publicKey;
-    }
-
-    Path keyPath = securityConfig.getKeyLocation(component);
-    if (OzoneSecurityUtil.checkIfFileExist(keyPath,
-        securityConfig.getPublicKeyFileName())) {
-      try {
-        publicKey = keyCodec.readPublicKey();
-      } catch (InvalidKeySpecException | NoSuchAlgorithmException
-          | IOException e) {
-        getLogger().error("Error while getting public key.", e);
-      }
-    }
-    return publicKey;
-  }
-
-  /**
-   * Returns the default certificate of given client if it exists.
-   *
-   * @return certificate or Null if there is no data.
-   */
-  @Override
-  public X509Certificate getCertificate() {
-    if (x509Certificate != null) {
-      return x509Certificate;
-    }
-
-    if (certSerialId == null) {
-      getLogger().error("Default certificate serial id is not set. Can't " +
-          "locate the default certificate for this client.");
-      return null;
-    }
-    // Refresh the cache from file system.
-    loadAllCertificates();
-    if (certificateMap.containsKey(certSerialId)) {
-      x509Certificate = certificateMap.get(certSerialId);
-    }
-    return x509Certificate;
-  }
-
-  /**
-   * Return the latest CA certificate known to the client.
-   * @return latest ca certificate known to the client.
-   */
-  @Override
-  public X509Certificate getCACertificate() {
-    if (caCertId != null) {
-      return certificateMap.get(caCertId);
-    }
-    return null;
-  }
-
-  /**
-   * Returns the certificate  with the specified certificate serial id if it
-   * exists else try to get it from SCM.
-   * @param  certId
-   *
-   * @return certificate or Null if there is no data.
-   */
-  @Override
-  public X509Certificate getCertificate(String certId)
-      throws CertificateException {
-    // Check if it is in cache.
-    if (certificateMap.containsKey(certId)) {
-      return certificateMap.get(certId);
-    }
-    // Try to get it from SCM.
-    return this.getCertificateFromScm(certId);
-  }
-
-  /**
-   * Get certificate from SCM and store it in local file system.
-   * @param certId
-   * @return certificate
-   */
-  private X509Certificate getCertificateFromScm(String certId)
-      throws CertificateException {
-
-    getLogger().info("Getting certificate with certSerialId:{}.",
-        certId);
-    try {
-      SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient(
-          (OzoneConfiguration) securityConfig.getConfiguration());
-      String pemEncodedCert =
-          scmSecurityProtocolClient.getCertificate(certId);
-      this.storeCertificate(pemEncodedCert, true);
-      return CertificateCodec.getX509Certificate(pemEncodedCert);
-    } catch (Exception e) {
-      getLogger().error("Error while getting Certificate with " +
-          "certSerialId:{} from scm.", certId, e);
-      throw new CertificateException("Error while getting certificate for " +
-          "certSerialId:" + certId, e, CERTIFICATE_ERROR);
-    }
-  }
-
-  /**
-   * Verifies if this certificate is part of a trusted chain.
-   *
-   * @param certificate - certificate.
-   * @return true if it trusted, false otherwise.
-   */
-  @Override
-  public boolean verifyCertificate(X509Certificate certificate) {
-    throw new UnsupportedOperationException("Operation not supported.");
-  }
-
-  /**
-   * Creates digital signature over the data stream using the s private key.
-   *
-   * @param stream - Data stream to sign.
-   * @throws CertificateException - on Error.
-   */
-  @Override
-  public byte[] signDataStream(InputStream stream)
-      throws CertificateException {
-    try {
-      Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
-          securityConfig.getProvider());
-      sign.initSign(getPrivateKey());
-      byte[] buffer = new byte[1024 * 4];
-
-      int len;
-      while (-1 != (len = stream.read(buffer))) {
-        sign.update(buffer, 0, len);
-      }
-      return sign.sign();
-    } catch (NoSuchAlgorithmException | NoSuchProviderException
-        | InvalidKeyException | SignatureException | IOException e) {
-      getLogger().error("Error while signing the stream", e);
-      throw new CertificateException("Error while signing the stream", e,
-          CRYPTO_SIGN_ERROR);
-    }
-  }
-
-  /**
-   * Creates digital signature over the data stream using the s private key.
-   *
-   * @param data - Data to sign.
-   * @throws CertificateException - on Error.
-   */
-  @Override
-  public byte[] signData(byte[] data) throws CertificateException {
-    try {
-      Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
-          securityConfig.getProvider());
-
-      sign.initSign(getPrivateKey());
-      sign.update(data);
-
-      return sign.sign();
-    } catch (NoSuchAlgorithmException | NoSuchProviderException
-        | InvalidKeyException | SignatureException e) {
-      getLogger().error("Error while signing the stream", e);
-      throw new CertificateException("Error while signing the stream", e,
-          CRYPTO_SIGN_ERROR);
-    }
-  }
-
-  /**
-   * Verifies a digital Signature, given the signature and the certificate of
-   * the signer.
-   *
-   * @param stream - Data Stream.
-   * @param signature - Byte Array containing the signature.
-   * @param cert - Certificate of the Signer.
-   * @return true if verified, false if not.
-   */
-  @Override
-  public boolean verifySignature(InputStream stream, byte[] signature,
-      X509Certificate cert) throws CertificateException {
-    try {
-      Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
-          securityConfig.getProvider());
-      sign.initVerify(cert);
-      byte[] buffer = new byte[1024 * 4];
-
-      int len;
-      while (-1 != (len = stream.read(buffer))) {
-        sign.update(buffer, 0, len);
-      }
-      return sign.verify(signature);
-    } catch (NoSuchAlgorithmException | NoSuchProviderException
-        | InvalidKeyException | SignatureException | IOException e) {
-      getLogger().error("Error while signing the stream", e);
-      throw new CertificateException("Error while signing the stream", e,
-          CRYPTO_SIGNATURE_VERIFICATION_ERROR);
-    }
-  }
-
-  /**
-   * Verifies a digital Signature, given the signature and the certificate of
-   * the signer.
-   *
-   * @param data - Data in byte array.
-   * @param signature - Byte Array containing the signature.
-   * @param cert - Certificate of the Signer.
-   * @return true if verified, false if not.
-   */
-  @Override
-  public boolean verifySignature(byte[] data, byte[] signature,
-      X509Certificate cert) throws CertificateException {
-    try {
-      Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
-          securityConfig.getProvider());
-      sign.initVerify(cert);
-      sign.update(data);
-      return sign.verify(signature);
-    } catch (NoSuchAlgorithmException | NoSuchProviderException
-        | InvalidKeyException | SignatureException e) {
-      getLogger().error("Error while signing the stream", e);
-      throw new CertificateException("Error while signing the stream", e,
-          CRYPTO_SIGNATURE_VERIFICATION_ERROR);
-    }
-  }
-
-  /**
-   * Verifies a digital Signature, given the signature and the certificate of
-   * the signer.
-   *
-   * @param data - Data in byte array.
-   * @param signature - Byte Array containing the signature.
-   * @param pubKey - Certificate of the Signer.
-   * @return true if verified, false if not.
-   */
-  private boolean verifySignature(byte[] data, byte[] signature,
-      PublicKey pubKey) throws CertificateException {
-    try {
-      Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
-          securityConfig.getProvider());
-      sign.initVerify(pubKey);
-      sign.update(data);
-      return sign.verify(signature);
-    } catch (NoSuchAlgorithmException | NoSuchProviderException
-        | InvalidKeyException | SignatureException e) {
-      getLogger().error("Error while signing the stream", e);
-      throw new CertificateException("Error while signing the stream", e,
-          CRYPTO_SIGNATURE_VERIFICATION_ERROR);
-    }
-  }
-
-  /**
-   * Returns a CSR builder that can be used to creates a Certificate signing
-   * request.
-   *
-   * @return CertificateSignRequest.Builder
-   */
-  @Override
-  public CertificateSignRequest.Builder getCSRBuilder()
-      throws CertificateException {
-    CertificateSignRequest.Builder builder =
-        new CertificateSignRequest.Builder()
-        .setConfiguration(securityConfig.getConfiguration());
-    try {
-      DomainValidator validator = DomainValidator.getInstance();
-      // Add all valid ips.
-      OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
-          ip -> {
-            builder.addIpAddress(ip.getHostAddress());
-            if(validator.isValid(ip.getCanonicalHostName())) {
-              builder.addDnsName(ip.getCanonicalHostName());
-            }
-          });
-    } catch (IOException e) {
-      throw new CertificateException("Error while adding ip to CSR builder",
-          e, CSR_ERROR);
-    }
-    return builder;
-  }
-
-  /**
-   * Get the certificate of well-known entity from SCM.
-   *
-   * @param query - String Query, please see the implementation for the
-   * discussion on the query formats.
-   * @return X509Certificate or null if not found.
-   */
-  @Override
-  public X509Certificate queryCertificate(String query) {
-    // TODO:
-    throw new UnsupportedOperationException("Operation not supported");
-  }
-
-  /**
-   * Stores the Certificate  for this client. Don't use this api to add trusted
-   * certificates of others.
-   *
-   * @param pemEncodedCert        - pem encoded X509 Certificate
-   * @param force                 - override any existing file
-   * @throws CertificateException - on Error.
-   *
-   */
-  @Override
-  public void storeCertificate(String pemEncodedCert, boolean force)
-      throws CertificateException {
-    this.storeCertificate(pemEncodedCert, force, false);
-  }
-
-  /**
-   * Stores the Certificate  for this client. Don't use this api to add trusted
-   * certificates of others.
-   *
-   * @param pemEncodedCert        - pem encoded X509 Certificate
-   * @param force                 - override any existing file
-   * @param caCert                - Is CA certificate.
-   * @throws CertificateException - on Error.
-   *
-   */
-  @Override
-  public void storeCertificate(String pemEncodedCert, boolean force,
-      boolean caCert) throws CertificateException {
-    CertificateCodec certificateCodec = new CertificateCodec(securityConfig,
-        component);
-    try {
-      Path basePath = securityConfig.getCertificateLocation(component);
-
-      X509Certificate cert =
-          CertificateCodec.getX509Certificate(pemEncodedCert);
-      String certName = String.format(CERT_FILE_NAME_FORMAT,
-          cert.getSerialNumber().toString());
-
-      if(caCert) {
-        certName = CA_CERT_PREFIX + certName;
-        caCertId = cert.getSerialNumber().toString();
-      }
-
-      certificateCodec.writeCertificate(basePath, certName,
-          pemEncodedCert, force);
-      certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert);
-    } catch (IOException | java.security.cert.CertificateException e) {
-      throw new CertificateException("Error while storing certificate.", e,
-          CERTIFICATE_ERROR);
-    }
-  }
-
-  /**
-   * Stores the trusted chain of certificates for a specific .
-   *
-   * @param ks - Key Store.
-   * @throws CertificateException - on Error.
-   */
-  @Override
-  public synchronized void storeTrustChain(CertStore ks)
-      throws CertificateException {
-    throw new UnsupportedOperationException("Operation not supported.");
-  }
-
-
-  /**
-   * Stores the trusted chain of certificates for a specific .
-   *
-   * @param certificates - List of Certificates.
-   * @throws CertificateException - on Error.
-   */
-  @Override
-  public synchronized void storeTrustChain(List<X509Certificate> certificates)
-      throws CertificateException {
-    throw new UnsupportedOperationException("Operation not supported.");
-  }
-
-  /**
-   * Defines 8 cases of initialization.
-   * Each case specifies objects found.
-   * 0. NONE                  Keypair as well as certificate not found.
-   * 1. CERT                  Certificate found but keypair missing.
-   * 2. PUBLIC_KEY            Public key found but private key and
-   *                          certificate is missing.
-   * 3. PUBLICKEY_CERT        Only public key and certificate is present.
-   * 4. PRIVATE_KEY           Only private key is present.
-   * 5. PRIVATEKEY_CERT       Only private key and certificate is present.
-   * 6. PUBLICKEY_PRIVATEKEY  indicates private and public key were read
-   *                          successfully from configured location but
-   *                          Certificate.
-   * 7. All                   Keypair as well as certificate is present.
-   *
-   * */
-  protected enum InitCase {
-    NONE,
-    CERT,
-    PUBLIC_KEY,
-    PUBLICKEY_CERT,
-    PRIVATE_KEY,
-    PRIVATEKEY_CERT,
-    PUBLICKEY_PRIVATEKEY,
-    ALL
-  }
-
-  /**
-   *
-   * Initializes client by performing following actions.
-   * 1. Create key dir if not created already.
-   * 2. Generates and stores a keypair.
-   * 3. Try to recover public key if private key and certificate is present
-   *    but public key is missing.
-   *
-   * Truth table:
-   *  +--------------+-----------------+--------------+----------------+
-   *  | Private Key  | Public Keys     | Certificate  |   Result       |
-   *  +--------------+-----------------+--------------+----------------+
-   *  | False  (0)   | False   (0)     | False  (0)   |   GETCERT  000 |
-   *  | False  (0)   | False   (0)     | True   (1)   |   FAILURE  001 |
-   *  | False  (0)   | True    (1)     | False  (0)   |   FAILURE  010 |
-   *  | False  (0)   | True    (1)     | True   (1)   |   FAILURE  011 |
-   *  | True   (1)   | False   (0)     | False  (0)   |   FAILURE  100 |
-   *  | True   (1)   | False   (0)     | True   (1)   |   SUCCESS  101 |
-   *  | True   (1)   | True    (1)     | False  (0)   |   GETCERT  110 |
-   *  | True   (1)   | True    (1)     | True   (1)   |   SUCCESS  111 |
-   *  +--------------+-----------------+--------------+----------------+
-   *
-   * @return InitResponse
-   * Returns FAILURE in following cases:
-   * 1. If private key is missing but public key or certificate is available.
-   * 2. If public key and certificate is missing.
-   *
-   * Returns SUCCESS in following cases:
-   * 1. If keypair as well certificate is available.
-   * 2. If private key and certificate is available and public key is
-   *    recovered successfully.
-   *
-   * Returns GETCERT in following cases:
-   * 1. First time when keypair and certificate is not available, keypair
-   *    will be generated and stored at configured location.
-   * 2. When keypair (public/private key) is available but certificate is
-   *    missing.
-   *
-   */
-  @Override
-  public synchronized InitResponse init() throws CertificateException {
-    int initCase = 0;
-    PrivateKey pvtKey= getPrivateKey();
-    PublicKey pubKey = getPublicKey();
-    X509Certificate certificate = getCertificate();
-
-    if(pvtKey != null){
-      initCase = initCase | 1<<2;
-    }
-    if(pubKey != null){
-      initCase = initCase | 1<<1;
-    }
-    if(certificate != null){
-      initCase = initCase | 1;
-    }
-    getLogger().info("Certificate client init case: {}", initCase);
-    Preconditions.checkArgument(initCase < 8, "Not a " +
-        "valid case.");
-    InitCase init = InitCase.values()[initCase];
-    return handleCase(init);
-  }
-
-  /**
-   * Default handling of each {@link InitCase}.
-   * */
-  protected InitResponse handleCase(InitCase init)
-      throws CertificateException {
-    switch (init) {
-    case NONE:
-      getLogger().info("Creating keypair for client as keypair and " +
-          "certificate not found.");
-      bootstrapClientKeys();
-      return GETCERT;
-    case CERT:
-      getLogger().error("Private key not found, while certificate is still" +
-          " present. Delete keypair and try again.");
-      return FAILURE;
-    case PUBLIC_KEY:
-      getLogger().error("Found public key but private key and certificate " +
-          "missing.");
-      return FAILURE;
-    case PRIVATE_KEY:
-      getLogger().info("Found private key but public key and certificate " +
-          "is missing.");
-      // TODO: Recovering public key from private might be possible in some
-      //  cases.
-      return FAILURE;
-    case PUBLICKEY_CERT:
-      getLogger().error("Found public key and certificate but private " +
-          "key is missing.");
-      return FAILURE;
-    case PRIVATEKEY_CERT:
-      getLogger().info("Found private key and certificate but public key" +
-          " missing.");
-      if (recoverPublicKey()) {
-        return SUCCESS;
-      } else {
-        getLogger().error("Public key recovery failed.");
-        return FAILURE;
-      }
-    case PUBLICKEY_PRIVATEKEY:
-      getLogger().info("Found private and public key but certificate is" +
-          " missing.");
-      if (validateKeyPair(getPublicKey())) {
-        return GETCERT;
-      } else {
-        getLogger().info("Keypair validation failed.");
-        return FAILURE;
-      }
-    case ALL:
-      getLogger().info("Found certificate file along with KeyPair.");
-      if (validateKeyPairAndCertificate()) {
-        return SUCCESS;
-      } else {
-        return FAILURE;
-      }
-    default:
-      getLogger().error("Unexpected case: {} (private/public/cert)",
-          Integer.toBinaryString(init.ordinal()));
-
-      return FAILURE;
-    }
-  }
-
-  /**
-   * Validate keypair and certificate.
-   * */
-  protected boolean validateKeyPairAndCertificate() throws
-      CertificateException {
-    if (validateKeyPair(getPublicKey())) {
-      getLogger().info("Keypair validated.");
-      // TODO: Certificates cryptographic validity can be checked as well.
-      if (validateKeyPair(getCertificate().getPublicKey())) {
-        getLogger().info("Keypair validated with certificate.");
-      } else {
-        getLogger().error("Stored certificate is generated with different " +
-            "private key.");
-        return false;
-      }
-    } else {
-      getLogger().error("Keypair validation failed.");
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Tries to recover public key from certificate. Also validates recovered
-   * public key.
-   * */
-  protected boolean recoverPublicKey() throws CertificateException {
-    PublicKey pubKey = getCertificate().getPublicKey();
-    try {
-
-      if(validateKeyPair(pubKey)){
-        keyCodec.writePublicKey(pubKey);
-        publicKey = pubKey;
-      } else {
-        getLogger().error("Can't recover public key " +
-            "corresponding to private key.", BOOTSTRAP_ERROR);
-        return false;
-      }
-    } catch (IOException e) {
-      throw new CertificateException("Error while trying to recover " +
-          "public key.", e, BOOTSTRAP_ERROR);
-    }
-    return true;
-  }
-
-  /**
-   * Validates public and private key of certificate client.
-   *
-   * @param pubKey
-   * */
-  protected boolean validateKeyPair(PublicKey pubKey)
-      throws CertificateException {
-    byte[] challenge = RandomStringUtils.random(1000).getBytes(
-        StandardCharsets.UTF_8);
-    byte[]  sign = signDataStream(new ByteArrayInputStream(challenge));
-    return verifySignature(challenge, sign, pubKey);
-  }
-
-  /**
-   * Bootstrap the client by creating keypair and storing it in configured
-   * location.
-   * */
-  protected void bootstrapClientKeys() throws CertificateException {
-    Path keyPath = securityConfig.getKeyLocation(component);
-    if (Files.notExists(keyPath)) {
-      try {
-        Files.createDirectories(keyPath);
-      } catch (IOException e) {
-        throw new CertificateException("Error while creating directories " +
-            "for certificate storage.", BOOTSTRAP_ERROR);
-      }
-    }
-    KeyPair keyPair = createKeyPair();
-    privateKey = keyPair.getPrivate();
-    publicKey = keyPair.getPublic();
-  }
-
-  protected KeyPair createKeyPair() throws CertificateException {
-    HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig);
-    KeyPair keyPair = null;
-    try {
-      keyPair = keyGenerator.generateKey();
-      keyCodec.writePublicKey(keyPair.getPublic());
-      keyCodec.writePrivateKey(keyPair.getPrivate());
-    } catch (NoSuchProviderException | NoSuchAlgorithmException
-        | IOException e) {
-      getLogger().error("Error while bootstrapping certificate client.", e);
-      throw new CertificateException("Error while bootstrapping certificate.",
-          BOOTSTRAP_ERROR);
-    }
-    return keyPair;
-  }
-
-  public Logger getLogger() {
-    return logger;
-  }
-
-  /**
-   * Create a scm security client, used to get SCM signed certificate.
-   *
-   * @return {@link SCMSecurityProtocol}
-   */
-  private static SCMSecurityProtocol getScmSecurityClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmSecurityProtoAdd =
-        HddsUtils.getScmAddressForSecurityProtocol(conf);
-    SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
-        new SCMSecurityProtocolClientSideTranslatorPB(
-            RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion,
-                scmSecurityProtoAdd, UserGroupInformation.getCurrentUser(),
-                conf, NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmSecurityClient;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
deleted file mode 100644
index cb3ce75..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
-
-/**
- * Certificate client for OzoneManager.
- */
-public class OMCertificateClient extends DefaultCertificateClient {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMCertificateClient.class);
-
-  public static final String COMPONENT_NAME = "om";
-
-  public OMCertificateClient(SecurityConfig securityConfig,
-      String certSerialId) {
-    super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
-  }
-
-  public OMCertificateClient(SecurityConfig securityConfig) {
-    super(securityConfig, LOG, null, COMPONENT_NAME);
-  }
-
-  protected InitResponse handleCase(InitCase init) throws
-      CertificateException {
-    switch (init) {
-    case NONE:
-      LOG.info("Creating keypair for client as keypair and certificate not " +
-          "found.");
-      bootstrapClientKeys();
-      return GETCERT;
-    case CERT:
-      LOG.error("Private key not found, while certificate is still present." +
-          "Delete keypair and try again.");
-      return FAILURE;
-    case PUBLIC_KEY:
-      LOG.error("Found public key but private key and certificate missing.");
-      return FAILURE;
-    case PRIVATE_KEY:
-      LOG.info("Found private key but public key and certificate is missing.");
-      // TODO: Recovering public key from private might be possible in some
-      //  cases.
-      return FAILURE;
-    case PUBLICKEY_CERT:
-      LOG.error("Found public key and certificate but private key is " +
-          "missing.");
-      return FAILURE;
-    case PRIVATEKEY_CERT:
-      LOG.info("Found private key and certificate but public key missing.");
-      if (recoverPublicKey()) {
-        return SUCCESS;
-      } else {
-        LOG.error("Public key recovery failed.");
-        return FAILURE;
-      }
-    case PUBLICKEY_PRIVATEKEY:
-      LOG.info("Found private and public key but certificate is missing.");
-      if (validateKeyPair(getPublicKey())) {
-        return RECOVER;
-      } else {
-        LOG.error("Keypair validation failed.");
-        return FAILURE;
-      }
-    case ALL:
-      LOG.info("Found certificate file along with KeyPair.");
-      if (validateKeyPairAndCertificate()) {
-        return SUCCESS;
-      } else {
-        return FAILURE;
-      }
-    default:
-      LOG.error("Unexpected case: {} (private/public/cert)",
-          Integer.toBinaryString(init.ordinal()));
-      return FAILURE;
-    }
-  }
-
-  /**
-   * Returns a CSR builder that can be used to creates a Certificate signing
-   * request.
-   *
-   * @return CertificateSignRequest.Builder
-   */
-  @Override
-  public CertificateSignRequest.Builder getCSRBuilder()
-      throws CertificateException {
-    return super.getCSRBuilder()
-        .setDigitalEncryption(true)
-        .setDigitalSignature(true);
-  }
-
-
-  public Logger getLogger() {
-    return LOG;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java
deleted file mode 100644
index dea609b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Classes related to creating and using certificates.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.client;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java
deleted file mode 100644
index 2c8721b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
-import org.bouncycastle.openssl.jcajce.JcaPEMWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.attribute.PosixFilePermission;
-import java.security.cert.CertificateEncodingException;
-import java.security.cert.CertificateException;
-import java.security.cert.CertificateFactory;
-import java.security.cert.X509Certificate;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE;
-import static java.nio.file.attribute.PosixFilePermission.OWNER_READ;
-import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE;
-
-/**
- * A class used to read and write X.509 certificates  PEM encoded Streams.
- */
-public class CertificateCodec {
-  public static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----";
-  public static final String END_CERT = "-----END CERTIFICATE-----";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CertificateCodec.class);
-  private static final JcaX509CertificateConverter CERTIFICATE_CONVERTER
-      = new JcaX509CertificateConverter();
-  private final SecurityConfig securityConfig;
-  private final Path location;
-  private Set<PosixFilePermission> permissionSet =
-      Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE)
-          .collect(Collectors.toSet());
-  /**
-   * Creates a CertificateCodec with component name.
-   *
-   * @param config - Security Config.
-   * @param component - Component String.
-   */
-  public CertificateCodec(SecurityConfig config, String component) {
-    this.securityConfig = config;
-    this.location = securityConfig.getCertificateLocation(component);
-  }
-
-  /**
-   * Returns a X509 Certificate from the Certificate Holder.
-   *
-   * @param holder - Holder
-   * @return X509Certificate.
-   * @throws CertificateException - on Error.
-   */
-  public static X509Certificate getX509Certificate(X509CertificateHolder holder)
-      throws CertificateException {
-    return CERTIFICATE_CONVERTER.getCertificate(holder);
-  }
-
-  /**
-   * Returns the Certificate as a PEM encoded String.
-   *
-   * @param x509CertHolder - X.509 Certificate Holder.
-   * @return PEM Encoded Certificate String.
-   * @throws SCMSecurityException - On failure to create a PEM String.
-   */
-  public static String getPEMEncodedString(X509CertificateHolder x509CertHolder)
-      throws SCMSecurityException {
-    try {
-      return getPEMEncodedString(getX509Certificate(x509CertHolder));
-    } catch (CertificateException exp) {
-      throw new SCMSecurityException(exp);
-    }
-  }
-
-  /**
-   * Returns the Certificate as a PEM encoded String.
-   *
-   * @param certificate - X.509 Certificate.
-   * @return PEM Encoded Certificate String.
-   * @throws SCMSecurityException - On failure to create a PEM String.
-   */
-  public static String getPEMEncodedString(X509Certificate certificate)
-      throws SCMSecurityException {
-    try {
-      StringWriter stringWriter = new StringWriter();
-      try (JcaPEMWriter pemWriter = new JcaPEMWriter(stringWriter)) {
-        pemWriter.writeObject(certificate);
-      }
-      return stringWriter.toString();
-    } catch (IOException e) {
-      LOG.error("Error in encoding certificate." + certificate
-          .getSubjectDN().toString(), e);
-      throw new SCMSecurityException("PEM Encoding failed for certificate." +
-          certificate.getSubjectDN().toString(), e);
-    }
-  }
-
-  /**
-   * Gets the X.509 Certificate from PEM encoded String.
-   *
-   * @param pemEncodedString - PEM encoded String.
-   * @return X509Certificate  - Certificate.
-   * @throws CertificateException - Thrown on Failure.
-   * @throws IOException          - Thrown on Failure.
-   */
-  public static X509Certificate getX509Certificate(String pemEncodedString)
-      throws CertificateException, IOException {
-    CertificateFactory fact = CertificateFactory.getInstance("X.509");
-    try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) {
-      return (X509Certificate) fact.generateCertificate(input);
-    }
-  }
-
-  /**
-   * Get Certificate location.
-   *
-   * @return Path
-   */
-  public Path getLocation() {
-    return location;
-  }
-
-  /**
-   * Gets the X.509 Certificate from PEM encoded String.
-   *
-   * @param pemEncodedString - PEM encoded String.
-   * @return X509Certificate  - Certificate.
-   * @throws CertificateException - Thrown on Failure.
-   * @throws IOException          - Thrown on Failure.
-   */
-  public static X509Certificate getX509Cert(String pemEncodedString)
-      throws CertificateException, IOException {
-    CertificateFactory fact = CertificateFactory.getInstance("X.509");
-    try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) {
-      return (X509Certificate) fact.generateCertificate(input);
-    }
-  }
-
-  /**
-   * Write the Certificate pointed to the location by the configs.
-   *
-   * @param xCertificate - Certificate to write.
-   * @throws SCMSecurityException - on Error.
-   * @throws IOException - on Error.
-   */
-  public void writeCertificate(X509CertificateHolder xCertificate)
-      throws SCMSecurityException, IOException {
-    String pem = getPEMEncodedString(xCertificate);
-    writeCertificate(location.toAbsolutePath(),
-        this.securityConfig.getCertificateFileName(), pem, false);
-  }
-
-  /**
-   * Write the Certificate to the specific file.
-   *
-   * @param xCertificate - Certificate to write.
-   * @param fileName - file name to write to.
-   * @param overwrite - boolean value, true means overwrite an existing
-   * certificate.
-   * @throws SCMSecurityException - On Error.
-   * @throws IOException          - On Error.
-   */
-  public void writeCertificate(X509CertificateHolder xCertificate,
-      String fileName, boolean overwrite)
-      throws SCMSecurityException, IOException {
-    String pem = getPEMEncodedString(xCertificate);
-    writeCertificate(location.toAbsolutePath(), fileName, pem, overwrite);
-  }
-
-  /**
-   * Helper function that writes data to the file.
-   *
-   * @param basePath - Base Path where the file needs to written to.
-   * @param fileName - Certificate file name.
-   * @param pemEncodedCertificate - pemEncoded Certificate file.
-   * @param force - Overwrite if the file exists.
-   * @throws IOException - on Error.
-   */
-  public synchronized void writeCertificate(Path basePath, String fileName,
-      String pemEncodedCertificate, boolean force)
-      throws IOException {
-    File certificateFile =
-        Paths.get(basePath.toString(), fileName).toFile();
-    if (certificateFile.exists() && !force) {
-      throw new SCMSecurityException("Specified certificate file already " +
-          "exists.Please use force option if you want to overwrite it.");
-    }
-    if (!basePath.toFile().exists()) {
-      if (!basePath.toFile().mkdirs()) {
-        LOG.error("Unable to create file path. Path: {}", basePath);
-        throw new IOException("Creation of the directories failed."
-            + basePath.toString());
-      }
-    }
-    try (FileOutputStream file = new FileOutputStream(certificateFile)) {
-      IOUtils.write(pemEncodedCertificate, file, UTF_8);
-    }
-
-    Files.setPosixFilePermissions(certificateFile.toPath(), permissionSet);
-  }
-
-  /**
-   * Rertuns a default certificate using the default paths for this component.
-   *
-   * @return X509CertificateHolder.
-   * @throws SCMSecurityException - on Error.
-   * @throws CertificateException - on Error.
-   * @throws IOException          - on Error.
-   */
-  public X509CertificateHolder readCertificate() throws
-      CertificateException, IOException {
-    return readCertificate(this.location.toAbsolutePath(),
-        this.securityConfig.getCertificateFileName());
-  }
-
-  /**
-   * Returns the certificate from the specific PEM encoded file.
-   *
-   * @param basePath - base path
-   * @param fileName - fileName
-   * @return X%09 Certificate
-   * @throws IOException          - on Error.
-   * @throws SCMSecurityException - on Error.
-   * @throws CertificateException - on Error.
-   */
-  public synchronized X509CertificateHolder readCertificate(Path basePath,
-      String fileName) throws IOException, CertificateException {
-    File certificateFile = Paths.get(basePath.toString(), fileName).toFile();
-    return getX509CertificateHolder(certificateFile);
-  }
-
-  /**
-   * Helper function to read certificate.
-   *
-   * @param certificateFile - Full path to certificate file.
-   * @return X509CertificateHolder
-   * @throws IOException          - On Error.
-   * @throws CertificateException - On Error.
-   */
-  private X509CertificateHolder getX509CertificateHolder(File certificateFile)
-      throws IOException, CertificateException {
-    if (!certificateFile.exists()) {
-      throw new IOException("Unable to find the requested certificate. Path: "
-          + certificateFile.toString());
-    }
-    CertificateFactory fact = CertificateFactory.getInstance("X.509");
-    try (FileInputStream is = new FileInputStream(certificateFile)) {
-      return getCertificateHolder(
-          (X509Certificate) fact.generateCertificate(is));
-    }
-  }
-
-  /**
-   * Returns the Certificate holder from X509Ceritificate class.
-   *
-   * @param x509cert - Certificate class.
-   * @return X509CertificateHolder
-   * @throws CertificateEncodingException - on Error.
-   * @throws IOException                  - on Error.
-   */
-  public X509CertificateHolder getCertificateHolder(X509Certificate x509cert)
-      throws CertificateEncodingException, IOException {
-    return new X509CertificateHolder(x509cert.getEncoded());
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
deleted file mode 100644
index 4971d4a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Certificate Utils.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
deleted file mode 100644
index 28f853a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.certificates.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil;
-import org.apache.logging.log4j.util.Strings;
-import org.bouncycastle.asn1.DEROctetString;
-import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x509.BasicConstraints;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.Extensions;
-import org.bouncycastle.asn1.x509.GeneralName;
-import org.bouncycastle.asn1.x509.GeneralNames;
-import org.bouncycastle.asn1.x509.KeyUsage;
-import org.bouncycastle.openssl.jcajce.JcaPEMWriter;
-import org.bouncycastle.operator.ContentSigner;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder;
-import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
-import org.bouncycastle.util.io.pem.PemObject;
-import org.bouncycastle.util.io.pem.PemReader;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.security.KeyPair;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * A certificate sign request object that wraps operations to build a
- * PKCS10CertificationRequest to CertificateServer.
- */
-public final class CertificateSignRequest {
-  private final KeyPair keyPair;
-  private final SecurityConfig config;
-  private final Extensions extensions;
-  private String subject;
-  private String clusterID;
-  private String scmID;
-
-  /**
-   * Private Ctor for CSR.
-   *
-   * @param subject - Subject
-   * @param scmID - SCM ID
-   * @param clusterID - Cluster ID
-   * @param keyPair - KeyPair
-   * @param config - SCM Config
-   * @param extensions - CSR extensions
-   */
-  private CertificateSignRequest(String subject, String scmID, String clusterID,
-                                 KeyPair keyPair, SecurityConfig config,
-                                 Extensions extensions) {
-    this.subject = subject;
-    this.clusterID = clusterID;
-    this.scmID = scmID;
-    this.keyPair = keyPair;
-    this.config = config;
-    this.extensions = extensions;
-  }
-
-  private PKCS10CertificationRequest generateCSR() throws
-      OperatorCreationException {
-    X500Name dnName = SecurityUtil.getDistinguishedName(subject, scmID,
-        clusterID);
-    PKCS10CertificationRequestBuilder p10Builder =
-        new JcaPKCS10CertificationRequestBuilder(dnName, keyPair.getPublic());
-
-    ContentSigner contentSigner =
-        new JcaContentSignerBuilder(config.getSignatureAlgo())
-            .setProvider(config.getProvider())
-            .build(keyPair.getPrivate());
-
-    if (extensions != null) {
-      p10Builder.addAttribute(
-          PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, extensions);
-    }
-    return p10Builder.build(contentSigner);
-  }
-  public static String getEncodedString(PKCS10CertificationRequest request)
-      throws IOException {
-    PemObject pemObject =
-        new PemObject("CERTIFICATE REQUEST", request.getEncoded());
-    StringWriter str = new StringWriter();
-    try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) {
-      pemWriter.writeObject(pemObject);
-    }
-    return str.toString();
-  }
-
-
-  /**
-   * Gets a CertificateRequest Object from PEM encoded CSR.
-   *
-   * @param csr - PEM Encoded Certificate Request String.
-   * @return PKCS10CertificationRequest
-   * @throws IOException - On Error.
-   */
-  public static PKCS10CertificationRequest getCertificationRequest(String csr)
-      throws IOException {
-    try (PemReader reader = new PemReader(new StringReader(csr))) {
-      PemObject pemObject = reader.readPemObject();
-      if(pemObject.getContent() == null) {
-        throw new SCMSecurityException("Invalid Certificate signing request");
-      }
-      return new PKCS10CertificationRequest(pemObject.getContent());
-    }
-  }
-
-  /**
-   * Builder class for Certificate Sign Request.
-   */
-  public static class Builder {
-    private String subject;
-    private String clusterID;
-    private String scmID;
-    private KeyPair key;
-    private SecurityConfig config;
-    private List<GeneralName> altNames;
-    private Boolean ca = false;
-    private boolean digitalSignature;
-    private boolean digitalEncryption;
-
-    public CertificateSignRequest.Builder setConfiguration(
-        Configuration configuration) {
-      this.config = new SecurityConfig(configuration);
-      return this;
-    }
-
-    public CertificateSignRequest.Builder setKey(KeyPair keyPair) {
-      this.key = keyPair;
-      return this;
-    }
-
-    public CertificateSignRequest.Builder setSubject(String subjectString) {
-      this.subject = subjectString;
-      return this;
-    }
-
-    public CertificateSignRequest.Builder setClusterID(String s) {
-      this.clusterID = s;
-      return this;
-    }
-
-    public CertificateSignRequest.Builder setScmID(String s) {
-      this.scmID = s;
-      return this;
-    }
-
-    public Builder setDigitalSignature(boolean dSign) {
-      this.digitalSignature = dSign;
-      return this;
-    }
-
-    public Builder setDigitalEncryption(boolean dEncryption) {
-      this.digitalEncryption = dEncryption;
-      return this;
-    }
-
-    // Support SAN extenion with DNS and RFC822 Name
-    // other name type will be added as needed.
-    public CertificateSignRequest.Builder addDnsName(String dnsName) {
-      Preconditions.checkNotNull(dnsName, "dnsName cannot be null");
-      this.addAltName(GeneralName.dNSName, dnsName);
-      return this;
-    }
-
-    // IP address is subject to change which is optional for now.
-    public CertificateSignRequest.Builder addIpAddress(String ip) {
-      Preconditions.checkNotNull(ip, "Ip address cannot be null");
-      this.addAltName(GeneralName.iPAddress, ip);
-      return this;
-    }
-
-    private CertificateSignRequest.Builder addAltName(int tag, String name) {
-      if (altNames == null) {
-        altNames = new ArrayList<>();
-      }
-      altNames.add(new GeneralName(tag, name));
-      return this;
-    }
-
-    public CertificateSignRequest.Builder setCA(Boolean isCA) {
-      this.ca = isCA;
-      return this;
-    }
-
-    private Extension getKeyUsageExtension() throws IOException {
-      int keyUsageFlag = KeyUsage.keyAgreement;
-      if(digitalEncryption){
-        keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment;
-      }
-      if(digitalSignature) {
-        keyUsageFlag |= KeyUsage.digitalSignature;
-      }
-
-      if (ca) {
-        keyUsageFlag |= KeyUsage.keyCertSign | KeyUsage.cRLSign;
-      }
-      KeyUsage keyUsage = new KeyUsage(keyUsageFlag);
-      return new Extension(Extension.keyUsage, true,
-          new DEROctetString(keyUsage));
-    }
-
-    private Optional<Extension> getSubjectAltNameExtension() throws
-        IOException {
-      if (altNames != null) {
-        return Optional.of(new Extension(Extension.subjectAlternativeName,
-            false, new DEROctetString(new GeneralNames(
-            altNames.toArray(new GeneralName[altNames.size()])))));
-      }
-      return Optional.empty();
-    }
-
-    private Extension getBasicExtension() throws IOException {
-      // We don't set pathLenConstraint means no limit is imposed.
-      return new Extension(Extension.basicConstraints,
-          true, new DEROctetString(new BasicConstraints(ca)));
-    }
-
-    private Extensions createExtensions() throws IOException {
-      List<Extension> extensions = new ArrayList<>();
-
-      // Add basic extension
-      if(ca) {
-        extensions.add(getBasicExtension());
-      }
-
-      // Add key usage extension
-      extensions.add(getKeyUsageExtension());
-
-      // Add subject alternate name extension
-      Optional<Extension> san = getSubjectAltNameExtension();
-      if (san.isPresent()) {
-        extensions.add(san.get());
-      }
-
-      return new Extensions(
-          extensions.toArray(new Extension[extensions.size()]));
-    }
-
-    public PKCS10CertificationRequest build() throws SCMSecurityException {
-      Preconditions.checkNotNull(key, "KeyPair cannot be null");
-      Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " +
-          "cannot be blank");
-
-      try {
-        CertificateSignRequest csr = new CertificateSignRequest(subject, scmID,
-            clusterID, key, config, createExtensions());
-        return csr.generateCSR();
-      } catch (IOException ioe) {
-        throw new CertificateException(String.format("Unable to create " +
-            "extension for certificate sign request for %s.", SecurityUtil
-            .getDistinguishedName(subject, scmID, clusterID)), ioe.getCause());
-      } catch (OperatorCreationException ex) {
-        throw new CertificateException(String.format("Unable to create " +
-            "certificate sign request for %s.", SecurityUtil
-            .getDistinguishedName(subject, scmID, clusterID)),
-            ex.getCause());
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java
deleted file mode 100644
index 1fd6d7c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificates.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.util.Time;
-import org.apache.logging.log4j.util.Strings;
-import org.bouncycastle.asn1.DEROctetString;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x509.BasicConstraints;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.KeyUsage;
-import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
-import org.bouncycastle.cert.CertIOException;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.X509v3CertificateBuilder;
-import org.bouncycastle.operator.ContentSigner;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.KeyPair;
-import java.time.Duration;
-import java.time.LocalDate;
-import java.time.LocalTime;
-import java.time.ZoneOffset;
-import java.util.Date;
-
-/**
- * A Self Signed Certificate with CertificateServer basic constraint can be used
- * to bootstrap a certificate infrastructure, if no external certificate is
- * provided.
- */
-public final class SelfSignedCertificate {
-  private static final String NAME_FORMAT = "CN=%s,OU=%s,O=%s";
-  private String subject;
-  private String clusterID;
-  private String scmID;
-  private LocalDate beginDate;
-  private LocalDate endDate;
-  private KeyPair key;
-  private SecurityConfig config;
-
-  /**
-   * Private Ctor invoked only via Builder Interface.
-   *
-   * @param subject - Subject
-   * @param scmID - SCM ID
-   * @param clusterID - Cluster ID
-   * @param beginDate - NotBefore
-   * @param endDate - Not After
-   * @param configuration - SCM Config
-   * @param keyPair - KeyPair
-   */
-  private SelfSignedCertificate(String subject, String scmID, String clusterID,
-      LocalDate beginDate, LocalDate endDate, SecurityConfig configuration,
-      KeyPair keyPair) {
-    this.subject = subject;
-    this.clusterID = clusterID;
-    this.scmID = scmID;
-    this.beginDate = beginDate;
-    this.endDate = endDate;
-    config = configuration;
-    this.key = keyPair;
-  }
-
-  @VisibleForTesting
-  public static String getNameFormat() {
-    return NAME_FORMAT;
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  private X509CertificateHolder generateCertificate(boolean isCA)
-      throws OperatorCreationException, IOException {
-    // For the Root Certificate we form the name from Subject, SCM ID and
-    // Cluster ID.
-    String dnName = String.format(getNameFormat(), subject, scmID, clusterID);
-    X500Name name = new X500Name(dnName);
-    byte[] encoded = key.getPublic().getEncoded();
-    SubjectPublicKeyInfo publicKeyInfo =
-        SubjectPublicKeyInfo.getInstance(encoded);
-
-
-    ContentSigner contentSigner =
-        new JcaContentSignerBuilder(config.getSignatureAlgo())
-            .setProvider(config.getProvider()).build(key.getPrivate());
-
-    // Please note: Since this is a root certificate we use "ONE" as the
-    // serial number. Also note that skip enforcing locale or UTC. We are
-    // trying to operate at the Days level, hence Time zone is also skipped for
-    // now.
-    BigInteger serial = BigInteger.ONE;
-    if (!isCA) {
-      serial = new BigInteger(Long.toString(Time.monotonicNow()));
-    }
-
-    ZoneOffset zoneOffset =
-        beginDate.atStartOfDay(ZoneOffset.systemDefault()).getOffset();
-
-    // Valid from the Start of the day when we generate this Certificate.
-    Date validFrom =
-        Date.from(beginDate.atTime(LocalTime.MIN).toInstant(zoneOffset));
-
-    // Valid till end day finishes.
-    Date validTill =
-        Date.from(endDate.atTime(LocalTime.MAX).toInstant(zoneOffset));
-
-    X509v3CertificateBuilder builder = new X509v3CertificateBuilder(name,
-        serial, validFrom, validTill, name, publicKeyInfo);
-
-    if (isCA) {
-      builder.addExtension(Extension.basicConstraints, true,
-          new BasicConstraints(true));
-      int keyUsageFlag = KeyUsage.keyCertSign | KeyUsage.cRLSign;
-      KeyUsage keyUsage = new KeyUsage(keyUsageFlag);
-      builder.addExtension(Extension.keyUsage, false,
-          new DEROctetString(keyUsage));
-    }
-    return builder.build(contentSigner);
-  }
-
-  /**
-   * Builder class for Root Certificates.
-   */
-  public static class Builder {
-    private String subject;
-    private String clusterID;
-    private String scmID;
-    private LocalDate beginDate;
-    private LocalDate endDate;
-    private KeyPair key;
-    private SecurityConfig config;
-    private boolean isCA;
-
-    public Builder setConfiguration(Configuration configuration) {
-      this.config = new SecurityConfig(configuration);
-      return this;
-    }
-
-    public Builder setKey(KeyPair keyPair) {
-      this.key = keyPair;
-      return this;
-    }
-
-    public Builder setSubject(String subjectString) {
-      this.subject = subjectString;
-      return this;
-    }
-
-    public Builder setClusterID(String s) {
-      this.clusterID = s;
-      return this;
-    }
-
-    public Builder setScmID(String s) {
-      this.scmID = s;
-      return this;
-    }
-
-    public Builder setBeginDate(LocalDate date) {
-      this.beginDate = date;
-      return this;
-    }
-
-    public Builder setEndDate(LocalDate date) {
-      this.endDate = date;
-      return this;
-    }
-
-    public Builder makeCA() {
-      isCA = true;
-      return this;
-    }
-
-    public X509CertificateHolder build()
-        throws SCMSecurityException, IOException {
-      Preconditions.checkNotNull(key, "Key cannot be null");
-      Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " +
-          "cannot be blank");
-      Preconditions.checkArgument(Strings.isNotBlank(clusterID), "Cluster ID " +
-          "cannot be blank");
-      Preconditions.checkArgument(Strings.isNotBlank(scmID), "SCM ID cannot " +
-          "be blank");
-
-      Preconditions.checkArgument(beginDate.isBefore(endDate), "Certificate " +
-          "begin date should be before end date");
-
-      // We just read the beginDate and EndDate as Start of the Day and
-      // confirm that we do not violate the maxDuration Config.
-      Duration certDuration = Duration.between(beginDate.atStartOfDay(),
-          endDate.atStartOfDay());
-      Duration maxDuration = config.getMaxCertificateDuration();
-      if (certDuration.compareTo(maxDuration) > 0) {
-        throw new SCMSecurityException("The cert duration violates the " +
-            "maximum configured value. Please check the hdds.x509.max" +
-            ".duration config key. Current Value: " + certDuration +
-            " config: " + maxDuration);
-      }
-
-      SelfSignedCertificate rootCertificate =
-          new SelfSignedCertificate(this.subject,
-              this.scmID, this.clusterID, this.beginDate, this.endDate,
-              this.config, key);
-      try {
-        return rootCertificate.generateCertificate(isCA);
-      } catch (OperatorCreationException | CertIOException e) {
-        throw new CertificateException("Unable to create root certificate.",
-            e.getCause());
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java
deleted file mode 100644
index e7110e3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- Helpers for Certificates.
- */
-package org.apache.hadoop.hdds.security.x509.certificates.utils;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java
deleted file mode 100644
index b312128..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.exceptions;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-
-/**
- * Certificate Exceptions from the SCM Security layer.
- */
-public class CertificateException extends SCMSecurityException {
-
-  private ErrorCode errorCode;
-  /**
-   * Ctor.
-   * @param message - Error Message.
-   */
-  public CertificateException(String message) {
-    super(message);
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param cause  - Actual cause.
-   */
-  public CertificateException(String message, Throwable cause) {
-    super(message, cause);
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param cause  - Actual cause.
-   * @param errorCode
-   */
-  public CertificateException(String message, Throwable cause,
-      ErrorCode errorCode) {
-    super(message, cause);
-    this.errorCode = errorCode;
-  }
-
-  /**
-   * Ctor.
-   * @param message - Message.
-   * @param errorCode
-   */
-  public CertificateException(String message, ErrorCode errorCode) {
-    super(message);
-    this.errorCode = errorCode;
-  }
-
-  /**
-   * Ctor.
-   * @param cause - Base Exception.
-   */
-  public CertificateException(Throwable cause) {
-    super(cause);
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ErrorCode {
-    KEYSTORE_ERROR,
-    CRYPTO_SIGN_ERROR,
-    CERTIFICATE_ERROR,
-    BOOTSTRAP_ERROR,
-    CSR_ERROR,
-    CRYPTO_SIGNATURE_VERIFICATION_ERROR,
-    CERTIFICATE_NOT_FOUND_ERROR
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java
deleted file mode 100644
index afcc474..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Exceptions thrown by X.509 security classes.
- */
-package org.apache.hadoop.hdds.security.x509.exceptions;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java
deleted file mode 100644
index 640f5ca..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.keys;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.security.KeyPair;
-import java.security.KeyPairGenerator;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-
-/**
- * A class to generate Key Pair for use with Certificates.
- */
-public class HDDSKeyGenerator {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(HDDSKeyGenerator.class);
-  private final SecurityConfig securityConfig;
-
-  /**
-   * Constructor for HDDSKeyGenerator.
-   *
-   * @param configuration - config
-   */
-  public HDDSKeyGenerator(Configuration configuration) {
-    this.securityConfig = new SecurityConfig(configuration);
-  }
-
-  /**
-   * Constructor that takes a SecurityConfig as the Argument.
-   *
-   * @param config - SecurityConfig
-   */
-  public HDDSKeyGenerator(SecurityConfig config) {
-    this.securityConfig = config;
-  }
-
-  /**
-   * Returns the Security config used for this object.
-   *
-   * @return SecurityConfig
-   */
-  public SecurityConfig getSecurityConfig() {
-    return securityConfig;
-  }
-
-  /**
-   * Use Config to generate key.
-   *
-   * @return KeyPair
-   * @throws NoSuchProviderException  - On Error, due to missing Java
-   *                                  dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   *                                  dependencies.
-   */
-  public KeyPair generateKey() throws NoSuchProviderException,
-      NoSuchAlgorithmException {
-    return generateKey(securityConfig.getSize(),
-        securityConfig.getKeyAlgo(), securityConfig.getProvider());
-  }
-
-  /**
-   * Specify the size -- all other parameters are used from config.
-   *
-   * @param size - int, valid key sizes.
-   * @return KeyPair
-   * @throws NoSuchProviderException  - On Error, due to missing Java
-   *                                  dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   *                                  dependencies.
-   */
-  public KeyPair generateKey(int size) throws
-      NoSuchProviderException, NoSuchAlgorithmException {
-    return generateKey(size,
-        securityConfig.getKeyAlgo(), securityConfig.getProvider());
-  }
-
-  /**
-   * Custom Key Generation, all values are user provided.
-   *
-   * @param size - Key Size
-   * @param algorithm - Algorithm to use
-   * @param provider - Security provider.
-   * @return KeyPair.
-   * @throws NoSuchProviderException  - On Error, due to missing Java
-   *                                  dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   *                                  dependencies.
-   */
-  public KeyPair generateKey(int size, String algorithm, String provider)
-      throws NoSuchProviderException, NoSuchAlgorithmException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Generating key pair using size:{}, Algorithm:{}, Provider:{}",
-          size, algorithm, provider);
-    }
-    KeyPairGenerator generator = KeyPairGenerator
-        .getInstance(algorithm, provider);
-    generator.initialize(size);
-    return generator.generateKeyPair();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java
deleted file mode 100644
index 82873b0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.keys;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.output.FileWriterWithEncoding;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.bouncycastle.util.io.pem.PemObject;
-import org.bouncycastle.util.io.pem.PemReader;
-import org.bouncycastle.util.io.pem.PemWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.StringReader;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.attribute.PosixFilePermission;
-import java.security.KeyFactory;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.spec.InvalidKeySpecException;
-import java.security.spec.PKCS8EncodedKeySpec;
-import java.security.spec.X509EncodedKeySpec;
-import java.util.Set;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE;
-import static java.nio.file.attribute.PosixFilePermission.OWNER_READ;
-import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE;
-
-/**
- * We store all Key material in good old PEM files. This helps in avoiding
- * dealing will persistent Java KeyStore issues. Also when debugging, general
- * tools like OpenSSL can be used to read and decode these files.
- */
-public class KeyCodec {
-  public final static String PRIVATE_KEY = "PRIVATE KEY";
-  public final static String PUBLIC_KEY = "PUBLIC KEY";
-  public final static Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
-  private final static  Logger LOG =
-      LoggerFactory.getLogger(KeyCodec.class);
-  private final Path location;
-  private final SecurityConfig securityConfig;
-  private Set<PosixFilePermission> permissionSet =
-      Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE)
-          .collect(Collectors.toSet());
-  private Supplier<Boolean> isPosixFileSystem;
-
-  /**
-   * Creates a KeyCodec with component name.
-   *
-   * @param config - Security Config.
-   * @param component - Component String.
-   */
-  public KeyCodec(SecurityConfig config, String component) {
-    this.securityConfig = config;
-    isPosixFileSystem = KeyCodec::isPosix;
-    this.location = securityConfig.getKeyLocation(component);
-  }
-
-  /**
-   * Checks if File System supports posix style security permissions.
-   *
-   * @return True if it supports posix.
-   */
-  private static Boolean isPosix() {
-    return FileSystems.getDefault().supportedFileAttributeViews()
-        .contains("posix");
-  }
-
-  /**
-   * Returns the Permission set.
-   *
-   * @return Set
-   */
-  @VisibleForTesting
-  public Set<PosixFilePermission> getPermissionSet() {
-    return permissionSet;
-  }
-
-  /**
-   * Returns the Security config used for this object.
-   *
-   * @return SecurityConfig
-   */
-  public SecurityConfig getSecurityConfig() {
-    return securityConfig;
-  }
-
-  /**
-   * This function is used only for testing.
-   *
-   * @param isPosixFileSystem - Sets a boolean function for mimicking files
-   * systems that are not posix.
-   */
-  @VisibleForTesting
-  public void setIsPosixFileSystem(Supplier<Boolean> isPosixFileSystem) {
-    this.isPosixFileSystem = isPosixFileSystem;
-  }
-
-  /**
-   * Writes a given key using the default config options.
-   *
-   * @param keyPair - Key Pair to write to file.
-   * @throws IOException - On I/O failure.
-   */
-  public void writeKey(KeyPair keyPair) throws IOException {
-    writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(),
-        securityConfig.getPublicKeyFileName(), false);
-  }
-
-  /**
-   * Writes a given private key using the default config options.
-   *
-   * @param key - Key to write to file.
-   * @throws IOException - On I/O failure.
-   */
-  public void writePrivateKey(PrivateKey key) throws IOException {
-    File privateKeyFile =
-        Paths.get(location.toString(),
-            securityConfig.getPrivateKeyFileName()).toFile();
-
-    if (Files.exists(privateKeyFile.toPath())) {
-      throw new IOException("Private key already exist.");
-    }
-
-    try (PemWriter privateKeyWriter = new PemWriter(new
-        FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) {
-      privateKeyWriter.writeObject(
-          new PemObject(PRIVATE_KEY, key.getEncoded()));
-    }
-    Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet);
-  }
-
-  /**
-   * Writes a given public key using the default config options.
-   *
-   * @param key - Key to write to file.
-   * @throws IOException - On I/O failure.
-   */
-  public void writePublicKey(PublicKey key) throws IOException {
-    File publicKeyFile = Paths.get(location.toString(),
-        securityConfig.getPublicKeyFileName()).toFile();
-
-    if (Files.exists(publicKeyFile.toPath())) {
-      throw new IOException("Private key already exist.");
-    }
-
-    try (PemWriter keyWriter = new PemWriter(new
-        FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) {
-      keyWriter.writeObject(
-          new PemObject(PUBLIC_KEY, key.getEncoded()));
-    }
-    Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet);
-  }
-
-  /**
-   * Writes a given key using default config options.
-   *
-   * @param keyPair - Key pair to write
-   * @param overwrite - Overwrites the keys if they already exist.
-   * @throws IOException - On I/O failure.
-   */
-  public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException {
-    writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(),
-        securityConfig.getPublicKeyFileName(), overwrite);
-  }
-
-  /**
-   * Writes a given key using default config options.
-   *
-   * @param basePath - The location to write to, override the config values.
-   * @param keyPair - Key pair to write
-   * @param overwrite - Overwrites the keys if they already exist.
-   * @throws IOException - On I/O failure.
-   */
-  public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite)
-      throws IOException {
-    writeKey(basePath, keyPair, securityConfig.getPrivateKeyFileName(),
-        securityConfig.getPublicKeyFileName(), overwrite);
-  }
-
-  /**
-   * Reads a Private Key from the PEM Encoded Store.
-   *
-   * @param basePath - Base Path, Directory where the Key is stored.
-   * @param keyFileName - File Name of the private key
-   * @return PrivateKey Object.
-   * @throws IOException - on Error.
-   */
-  private PKCS8EncodedKeySpec readKey(Path basePath, String keyFileName)
-      throws IOException {
-    File fileName = Paths.get(basePath.toString(), keyFileName).toFile();
-    String keyData = FileUtils.readFileToString(fileName, DEFAULT_CHARSET);
-    final byte[] pemContent;
-    try (PemReader pemReader = new PemReader(new StringReader(keyData))) {
-      PemObject keyObject = pemReader.readPemObject();
-      pemContent = keyObject.getContent();
-    }
-    return new PKCS8EncodedKeySpec(pemContent);
-  }
-
-  /**
-   * Returns a Private Key from a PEM encoded file.
-   *
-   * @param basePath - base path
-   * @param privateKeyFileName - private key file name.
-   * @return PrivateKey
-   * @throws InvalidKeySpecException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws IOException              - on Error.
-   */
-  public PrivateKey readPrivateKey(Path basePath, String privateKeyFileName)
-      throws InvalidKeySpecException, NoSuchAlgorithmException, IOException {
-    PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, privateKeyFileName);
-    final KeyFactory keyFactory =
-        KeyFactory.getInstance(securityConfig.getKeyAlgo());
-    return
-        keyFactory.generatePrivate(encodedKeySpec);
-  }
-
-  /**
-   * Read the Public Key using defaults.
-   * @return PublicKey.
-   * @throws InvalidKeySpecException - On Error.
-   * @throws NoSuchAlgorithmException - On Error.
-   * @throws IOException - On Error.
-   */
-  public PublicKey readPublicKey() throws InvalidKeySpecException,
-      NoSuchAlgorithmException, IOException {
-    return readPublicKey(this.location.toAbsolutePath(),
-        securityConfig.getPublicKeyFileName());
-  }
-
-  /**
-   * Returns a public key from a PEM encoded file.
-   *
-   * @param basePath - base path.
-   * @param publicKeyFileName - public key file name.
-   * @return PublicKey
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws InvalidKeySpecException  - on Error.
-   * @throws IOException              - on Error.
-   */
-  public PublicKey readPublicKey(Path basePath, String publicKeyFileName)
-      throws NoSuchAlgorithmException, InvalidKeySpecException, IOException {
-    PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, publicKeyFileName);
-    final KeyFactory keyFactory =
-        KeyFactory.getInstance(securityConfig.getKeyAlgo());
-    return
-        keyFactory.generatePublic(
-            new X509EncodedKeySpec(encodedKeySpec.getEncoded()));
-
-  }
-
-
-  /**
-   * Returns the private key  using defaults.
-   * @return PrivateKey.
-   * @throws InvalidKeySpecException - On Error.
-   * @throws NoSuchAlgorithmException - On Error.
-   * @throws IOException - On Error.
-   */
-  public PrivateKey readPrivateKey() throws InvalidKeySpecException,
-      NoSuchAlgorithmException, IOException {
-    return readPrivateKey(this.location.toAbsolutePath(),
-        securityConfig.getPrivateKeyFileName());
-  }
-
-
-  /**
-   * Helper function that actually writes data to the files.
-   *
-   * @param basePath - base path to write key
-   * @param keyPair - Key pair to write to file.
-   * @param privateKeyFileName - private key file name.
-   * @param publicKeyFileName - public key file name.
-   * @param force - forces overwriting the keys.
-   * @throws IOException - On I/O failure.
-   */
-  private synchronized void writeKey(Path basePath, KeyPair keyPair,
-      String privateKeyFileName, String publicKeyFileName, boolean force)
-      throws IOException {
-    checkPreconditions(basePath);
-
-    File privateKeyFile =
-        Paths.get(location.toString(), privateKeyFileName).toFile();
-    File publicKeyFile =
-        Paths.get(location.toString(), publicKeyFileName).toFile();
-    checkKeyFile(privateKeyFile, force, publicKeyFile);
-
-    try (PemWriter privateKeyWriter = new PemWriter(new
-        FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) {
-      privateKeyWriter.writeObject(
-          new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded()));
-    }
-
-    try (PemWriter publicKeyWriter = new PemWriter(new
-        FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) {
-      publicKeyWriter.writeObject(
-          new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded()));
-    }
-    Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet);
-    Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet);
-  }
-
-  /**
-   * Checks if private and public key file already exists. Throws IOException if
-   * file exists and force flag is set to false, else will delete the existing
-   * file.
-   *
-   * @param privateKeyFile - Private key file.
-   * @param force - forces overwriting the keys.
-   * @param publicKeyFile - public key file.
-   * @throws IOException - On I/O failure.
-   */
-  private void checkKeyFile(File privateKeyFile, boolean force,
-                            File publicKeyFile) throws IOException {
-    if (privateKeyFile.exists() && force) {
-      if (!privateKeyFile.delete()) {
-        throw new IOException("Unable to delete private key file.");
-      }
-    }
-
-    if (publicKeyFile.exists() && force) {
-      if (!publicKeyFile.delete()) {
-        throw new IOException("Unable to delete public key file.");
-      }
-    }
-
-    if (privateKeyFile.exists()) {
-      throw new IOException("Private Key file already exists.");
-    }
-
-    if (publicKeyFile.exists()) {
-      throw new IOException("Public Key file already exists.");
-    }
-  }
-
-  /**
-   * Checks if base path exists and sets file permissions.
-   *
-   * @param basePath - base path to write key
-   * @throws IOException - On I/O failure.
-   */
-  private void checkPreconditions(Path basePath) throws IOException {
-    Preconditions.checkNotNull(basePath, "Base path cannot be null");
-    if (!isPosixFileSystem.get()) {
-      LOG.error("Keys cannot be stored securely without POSIX file system "
-          + "support for now.");
-      throw new IOException("Unsupported File System for pem file.");
-    }
-
-    if (Files.exists(basePath)) {
-      // Not the end of the world if we reset the permissions on an existing
-      // directory.
-      Files.setPosixFilePermissions(basePath, permissionSet);
-    } else {
-      boolean success = basePath.toFile().mkdirs();
-      if (!success) {
-        LOG.error("Unable to create the directory for the "
-            + "location. Location: {}", basePath);
-        throw new IOException("Unable to create the directory for the "
-            + "location. Location:" + basePath);
-      }
-      Files.setPosixFilePermissions(basePath, permissionSet);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
deleted file mode 100644
index 6147d3a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.keys;
-
-import java.security.KeyFactory;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.spec.InvalidKeySpecException;
-import java.security.spec.PKCS8EncodedKeySpec;
-import java.security.spec.X509EncodedKeySpec;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.bouncycastle.asn1.ASN1ObjectIdentifier;
-import org.bouncycastle.asn1.ASN1Sequence;
-import org.bouncycastle.asn1.ASN1Set;
-import org.bouncycastle.asn1.pkcs.Attribute;
-import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x509.Extensions;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-
-/**
- * Utility functions for Security modules for Ozone.
- */
-public final class SecurityUtil {
-
-  // Ozone Certificate distinguished format: (CN=Subject,OU=ScmID,O=ClusterID).
-  private static final String DISTINGUISHED_NAME_FORMAT = "CN=%s,OU=%s,O=%s";
-
-  private SecurityUtil() {
-  }
-
-  public static String getDistinguishedNameFormat() {
-    return DISTINGUISHED_NAME_FORMAT;
-  }
-
-  public static X500Name getDistinguishedName(String subject, String scmID,
-      String clusterID) {
-    return new X500Name(String.format(getDistinguishedNameFormat(), subject,
-        scmID, clusterID));
-  }
-
-  // TODO: move the PKCS10CSRValidator class
-  public static Extensions getPkcs9Extensions(PKCS10CertificationRequest csr)
-      throws CertificateException {
-    ASN1Set pkcs9ExtReq = getPkcs9ExtRequest(csr);
-    Object extReqElement = pkcs9ExtReq.getObjects().nextElement();
-    if (extReqElement instanceof Extensions) {
-      return (Extensions) extReqElement;
-    } else {
-      if (extReqElement instanceof ASN1Sequence) {
-        return Extensions.getInstance((ASN1Sequence) extReqElement);
-      } else {
-        throw new CertificateException("Unknown element type :" + extReqElement
-            .getClass().getSimpleName());
-      }
-    }
-  }
-
-  public static ASN1Set getPkcs9ExtRequest(PKCS10CertificationRequest csr)
-      throws CertificateException {
-    for (Attribute attr : csr.getAttributes()) {
-      ASN1ObjectIdentifier oid = attr.getAttrType();
-      if (oid.equals(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) {
-        return attr.getAttrValues();
-      }
-    }
-    throw new CertificateException("No PKCS#9 extension found in CSR");
-  }
-
-  /*
-   * Returns private key created from encoded key.
-   * @return private key if successful else returns null.
-   */
-  public static PrivateKey getPrivateKey(byte[] encodedKey,
-      SecurityConfig secureConfig) {
-    PrivateKey pvtKey = null;
-    if (encodedKey == null || encodedKey.length == 0) {
-      return null;
-    }
-
-    try {
-      KeyFactory kf = null;
-
-      kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(),
-          secureConfig.getProvider());
-      pvtKey = kf.generatePrivate(new PKCS8EncodedKeySpec(encodedKey));
-
-    } catch (NoSuchAlgorithmException | InvalidKeySpecException |
-        NoSuchProviderException e) {
-      return null;
-    }
-    return pvtKey;
-  }
-
-  /*
-   * Returns public key created from encoded key.
-   * @return public key if successful else returns null.
-   */
-  public static PublicKey getPublicKey(byte[] encodedKey,
-      SecurityConfig secureConfig) {
-    PublicKey key = null;
-    if (encodedKey == null || encodedKey.length == 0) {
-      return null;
-    }
-
-    try {
-      KeyFactory kf = null;
-      kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(),
-          secureConfig.getProvider());
-      key = kf.generatePublic(new X509EncodedKeySpec(encodedKey));
-
-    } catch (NoSuchAlgorithmException | InvalidKeySpecException |
-        NoSuchProviderException e) {
-      return null;
-    }
-    return key;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
deleted file mode 100644
index 37a04d6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Utils for private and public keys.
- */
-package org.apache.hadoop.hdds.security.x509.keys;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
deleted file mode 100644
index a6369c6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-
-/**
- * This package contains common routines used in creating an x509 based identity
- * framework for HDDS.
- */
-package org.apache.hadoop.hdds.security.x509;
-/*
-
-Architecture of Certificate Infrastructure for SCM.
-====================================================
-
-The certificate infrastructure has two main parts, the certificate server or
-the Certificate authority and the clients who want certificates. The CA is
-responsible for issuing certificates to participating entities.
-
-To issue a certificate the CA has to verify the identity and the assertions
-in the certificate. The client starts off making a request to CA for a
-certificate.  This request is called Certificate Signing Request or CSR
-(PKCS#10).
-
-When a CSR arrives on the CA, CA will decode the CSR and verify that all the
-fields in the CSR are in line with what the system expects. Since there are
-lots of possible ways to construct an X.509 certificate, we rely on PKI
-profiles.
-
-Generally, PKI profiles are policy documents or general guidelines that get
-followed by the requester and CA. However, most of the PKI profiles that are
-commonly available are general purpose and offers too much surface area.
-
-SCM CA infrastructure supports the notion of a PKI profile class which can
-codify the RDNs, Extensions and other certificate policies. The CA when
-issuing a certificate will invoke a certificate approver class, based on the
-authentication method used. For example, out of the box, we support manual,
-Kerberos, trusted network and testing authentication mechanisms.
-
-If there is no authentication mechanism in place, then when CA receives the
-CSR, it runs the standard PKI profile over it verify that all the fields are
-in expected ranges. Once that is done, The signing request is sent for human
-review and approval. This form of certificate approval is called Manual,  Of
-all the certificate approval process this is the ** most secure **. This
-approval needs to be done once for each data node.
-
-For existing clusters, where data nodes already have a Kerberos keytab,  we
-can leverage the Kerberos identity mechanism to identify the data node that
-is requesting the certificate. In this case, users can configure the system
-to leverage Kerberos while issuing certificates and SCM CA will be able to
-verify the data nodes identity and issue certificates automatically.
-
-In environments like Kubernetes, we can leverage the base system services to
-pass on a shared secret securely. In this model also, we can rely on these
-secrets to make sure that is the right data node that is talking to us. This
-kind of approval is called a Trusted network approval. In this process, each
-data node not only sends the CSR but signs the request with a shared secret
-with SCM. SCM then can issue a certificate without the intervention of a
-human administrator.
-
-The last, TESTING method which never should be used other than in development
- and testing clusters, is merely a mechanism to bypass all identity checks. If
-this flag is setup, then CA will issue a CSR if the base approves all fields.
-
- * Please do not use this mechanism(TESTING) for any purpose other than
- * testing.
-
-CA - Certificate Approval and Code Layout (as of Dec, 1st, 2018)
-=================================================================
-The CA implementation ( as of now it is called DefaultCA) receives a CSR from
- the network layer. The network also tells the system what approver type to
- use, that is if Kerberos or Shared secrets mechanism is used, it reports
- that to Default CA.
-
-The default CA instantiates the approver based on the type of the approver
-indicated by the network layer. This approver creates an instance of the PKI
-profile and passes each field from the certificate signing request. The PKI
-profile (as of today Dec 1st, 2018, we have one profile called Ozone profile)
- verifies that each field in the CSR meets the approved set of values.
-
-Once the PKI Profile validates the request, it is either auto approved or
-queued for manual review.
-
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java
deleted file mode 100644
index 58270ba..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import org.apache.ratis.thirdparty.io.grpc.CallOptions;
-import org.apache.ratis.thirdparty.io.grpc.Channel;
-import org.apache.ratis.thirdparty.io.grpc.ClientCall;
-import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor;
-import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall.SimpleForwardingClientCall;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-import org.apache.ratis.thirdparty.io.grpc.Metadata.Key;
-import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor;
-
-/**
- * Interceptor to add the tracing id to the outgoing call header.
- */
-public class GrpcClientInterceptor implements ClientInterceptor {
-
-  public static final Key<String> TRACING_HEADER =
-      Key.of("Tracing", Metadata.ASCII_STRING_MARSHALLER);
-
-  @Override
-  public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
-      MethodDescriptor<ReqT, RespT> method, CallOptions callOptions,
-      Channel next) {
-
-    return new SimpleForwardingClientCall<ReqT, RespT>(
-        next.newCall(method, callOptions)) {
-
-      @Override
-      public void start(Listener<RespT> responseListener, Metadata headers) {
-
-        Metadata tracingHeaders = new Metadata();
-        tracingHeaders.put(TRACING_HEADER, TracingUtil.exportCurrentSpan());
-
-        headers.merge(tracingHeaders);
-
-        super.start(responseListener, headers);
-      }
-    };
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java
deleted file mode 100644
index b63af12b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import io.opentracing.Scope;
-import org.apache.ratis.thirdparty.io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-import org.apache.ratis.thirdparty.io.grpc.ServerCall;
-import org.apache.ratis.thirdparty.io.grpc.ServerCall.Listener;
-import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler;
-import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor;
-
-/**
- * Interceptor to add the tracing id to the outgoing call header.
- */
-public class GrpcServerInterceptor implements ServerInterceptor {
-
-  @Override
-  public <ReqT, RespT> Listener<ReqT> interceptCall(
-      ServerCall<ReqT, RespT> call, Metadata headers,
-      ServerCallHandler<ReqT, RespT> next) {
-
-    return new SimpleForwardingServerCallListener<ReqT>(
-        next.startCall(call, headers)) {
-      @Override
-      public void onMessage(ReqT message) {
-        try (Scope scope = TracingUtil
-            .importAndCreateScope(
-                call.getMethodDescriptor().getFullMethodName(),
-                headers.get(GrpcClientInterceptor.TRACING_HEADER))) {
-          super.onMessage(message);
-        }
-      }
-    };
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
deleted file mode 100644
index 56d59ea..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import java.math.BigInteger;
-
-import io.jaegertracing.internal.JaegerSpanContext;
-import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException;
-import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException;
-import io.jaegertracing.internal.exceptions.TraceIdOutOfBoundException;
-import io.jaegertracing.spi.Codec;
-import io.opentracing.propagation.Format;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A jaeger codec to save the current tracing context as a string.
- */
-public class StringCodec implements Codec<StringBuilder> {
-
-  public static final Logger LOG  = LoggerFactory.getLogger(StringCodec.class);
-  public static final StringFormat FORMAT = new StringFormat();
-
-  @Override
-  public JaegerSpanContext extract(StringBuilder s) {
-    if (s == null) {
-      throw new EmptyTracerStateStringException();
-    }
-    String value = s.toString();
-    if (value != null && !value.equals("")) {
-      String[] parts = value.split(":");
-      if (parts.length != 4) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("MalformedTracerStateString: {}", value);
-        }
-        throw new MalformedTracerStateStringException(value);
-      } else {
-        String traceId = parts[0];
-        if (traceId.length() <= 32 && traceId.length() >= 1) {
-          return new JaegerSpanContext(high(traceId),
-              (new BigInteger(traceId, 16)).longValue(),
-              (new BigInteger(parts[1], 16)).longValue(),
-              (new BigInteger(parts[2], 16)).longValue(),
-              (new BigInteger(parts[3], 16)).byteValue());
-        } else {
-          throw new TraceIdOutOfBoundException(
-              "Trace id [" + traceId + "] length is not withing 1 and 32");
-        }
-      }
-    } else {
-      throw new EmptyTracerStateStringException();
-    }
-  }
-
-  @Override
-  public void inject(JaegerSpanContext context,
-      StringBuilder string) {
-    int intFlag = context.getFlags() & 255;
-    string.append(
-        context.getTraceId() + ":" + Long.toHexString(context.getSpanId())
-            + ":" + Long.toHexString(context.getParentId()) + ":" + Integer
-            .toHexString(intFlag));
-  }
-
-  private static long high(String hexString) {
-    if (hexString.length() > 16) {
-      int highLength = hexString.length() - 16;
-      String highString = hexString.substring(0, highLength);
-      return (new BigInteger(highString, 16)).longValue();
-    } else {
-      return 0L;
-    }
-  }
-
-  /**
-   * The format to save the context as text.
-   * <p>
-   * Using the mutable StringBuilder instead of plain String.
-   */
-  public static final class StringFormat implements Format<StringBuilder> {
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
deleted file mode 100644
index 8bdf638..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.Method;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-
-/**
- * A Java proxy invocation handler to trace all the methods of the delegate
- * class.
- *
- * @param <T>
- */
-public class TraceAllMethod<T> implements InvocationHandler {
-
-  /**
-   * Cache for all the method objects of the delegate class.
-   */
-  private final Map<String, Map<Class<?>[], Method>> methods = new HashMap<>();
-
-  private T delegate;
-
-  private String name;
-
-  public TraceAllMethod(T delegate, String name) {
-    this.delegate = delegate;
-    this.name = name;
-    for (Method method : delegate.getClass().getDeclaredMethods()) {
-      if (!methods.containsKey(method.getName())) {
-        methods.put(method.getName(), new HashMap<>());
-      }
-      methods.get(method.getName()).put(method.getParameterTypes(), method);
-    }
-  }
-
-  @Override
-  public Object invoke(Object proxy, Method method, Object[] args)
-      throws Throwable {
-    Method delegateMethod = findDelegatedMethod(method);
-    try (Scope scope = GlobalTracer.get().buildSpan(
-        name + "." + method.getName())
-        .startActive(true)) {
-      try {
-        return delegateMethod.invoke(delegate, args);
-      } catch (Exception ex) {
-        if (ex.getCause() != null) {
-          throw ex.getCause();
-        } else {
-          throw ex;
-        }
-      }
-    }
-  }
-
-  private Method findDelegatedMethod(Method method) {
-    for (Entry<Class<?>[], Method> entry : methods.get(method.getName())
-        .entrySet()) {
-      if (Arrays.equals(entry.getKey(), method.getParameterTypes())) {
-        return entry.getValue();
-      }
-    }
-    return null;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
deleted file mode 100644
index 8e82a37..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import java.lang.reflect.Proxy;
-
-import io.jaegertracing.Configuration;
-import io.jaegertracing.internal.JaegerTracer;
-import io.opentracing.Scope;
-import io.opentracing.Span;
-import io.opentracing.SpanContext;
-import io.opentracing.Tracer;
-import io.opentracing.util.GlobalTracer;
-
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-
-/**
- * Utility class to collect all the tracing helper methods.
- */
-public final class TracingUtil {
-
-  private static final String NULL_SPAN_AS_STRING = "";
-
-  private TracingUtil() {
-  }
-
-  /**
-   * Initialize the tracing with the given service name.
-   *
-   * @param serviceName
-   */
-  public static void initTracing(String serviceName) {
-    if (!GlobalTracer.isRegistered()) {
-      Configuration config = Configuration.fromEnv(serviceName);
-      JaegerTracer tracer = config.getTracerBuilder()
-          .registerExtractor(StringCodec.FORMAT, new StringCodec())
-          .registerInjector(StringCodec.FORMAT, new StringCodec())
-          .build();
-      GlobalTracer.register(tracer);
-    }
-  }
-
-  /**
-   * Export the active tracing span as a string.
-   *
-   * @return encoded tracing context.
-   */
-  public static String exportCurrentSpan() {
-    if (GlobalTracer.get().activeSpan() != null) {
-      StringBuilder builder = new StringBuilder();
-      GlobalTracer.get().inject(GlobalTracer.get().activeSpan().context(),
-          StringCodec.FORMAT, builder);
-      return builder.toString();
-    }
-    return NULL_SPAN_AS_STRING;
-  }
-
-  /**
-   * Export the specific span as a string.
-   *
-   * @return encoded tracing context.
-   */
-  public static String exportSpan(Span span) {
-    if (span != null) {
-      StringBuilder builder = new StringBuilder();
-      GlobalTracer.get().inject(span.context(), StringCodec.FORMAT, builder);
-      return builder.toString();
-    }
-    return NULL_SPAN_AS_STRING;
-  }
-
-  /**
-   * Create a new scope and use the imported span as the parent.
-   *
-   * @param name          name of the newly created scope
-   * @param encodedParent Encoded parent span (could be null or empty)
-   *
-   * @return OpenTracing scope.
-   */
-  public static Scope importAndCreateScope(String name, String encodedParent) {
-    Tracer.SpanBuilder spanBuilder;
-    Tracer tracer = GlobalTracer.get();
-    SpanContext parentSpan = null;
-    if (encodedParent != null && encodedParent.length() > 0) {
-      StringBuilder builder = new StringBuilder();
-      builder.append(encodedParent);
-      parentSpan = tracer.extract(StringCodec.FORMAT, builder);
-
-    }
-
-    if (parentSpan == null) {
-      spanBuilder = tracer.buildSpan(name);
-    } else {
-      spanBuilder =
-          tracer.buildSpan(name).asChildOf(parentSpan);
-    }
-    return spanBuilder.startActive(true);
-  }
-
-  /**
-   * Creates a proxy of the implementation and trace all the method calls.
-   *
-   * @param delegate the original class instance
-   * @param interfce the interface which should be implemented by the proxy
-   * @param <T> the type of the interface
-   * @param conf configuration
-   *
-   * @return A new interface which implements interfce but delegate all the
-   * calls to the delegate and also enables tracing.
-   */
-  public static <T> T createProxy(T delegate, Class<T> interfce,
-                                  org.apache.hadoop.conf.Configuration conf) {
-    boolean isTracingEnabled = conf.getBoolean(
-        ScmConfigKeys.HDDS_TRACING_ENABLED,
-        ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT);
-    if (!isTracingEnabled) {
-      return delegate;
-    }
-    Class<?> aClass = delegate.getClass();
-    return  (T) Proxy.newProxyInstance(aClass.getClassLoader(),
-        new Class<?>[] {interfce},
-        new TraceAllMethod<T>(delegate, interfce.getSimpleName()));
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java
deleted file mode 100644
index 3ead03b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.tracing;
-
-/**
- * Helper classes to use distributed tracing in Ozone components.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
deleted file mode 100644
index ca8d870..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * An abstract class for a background service in ozone.
- * A background service schedules multiple child tasks in parallel
- * in a certain period. In each interval, it waits until all the tasks
- * finish execution and then schedule next interval.
- */
-public abstract class BackgroundService {
-
-  @VisibleForTesting
-  public static final Logger LOG =
-      LoggerFactory.getLogger(BackgroundService.class);
-
-  // Executor to launch child tasks
-  private final ScheduledExecutorService exec;
-  private final ThreadGroup threadGroup;
-  private final ThreadFactory threadFactory;
-  private final String serviceName;
-  private final long interval;
-  private final long serviceTimeout;
-  private final TimeUnit unit;
-  private final PeriodicalTask service;
-
-  public BackgroundService(String serviceName, long interval,
-      TimeUnit unit, int threadPoolSize, long serviceTimeout) {
-    this.interval = interval;
-    this.unit = unit;
-    this.serviceName = serviceName;
-    this.serviceTimeout = serviceTimeout;
-    threadGroup = new ThreadGroup(serviceName);
-    ThreadFactory tf = r -> new Thread(threadGroup, r);
-    threadFactory = new ThreadFactoryBuilder()
-        .setThreadFactory(tf)
-        .setDaemon(true)
-        .setNameFormat(serviceName + "#%d")
-        .build();
-    exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory);
-    service = new PeriodicalTask();
-  }
-
-  protected ExecutorService getExecutorService() {
-    return this.exec;
-  }
-
-  @VisibleForTesting
-  public int getThreadCount() {
-    return threadGroup.activeCount();
-  }
-
-  @VisibleForTesting
-  public void triggerBackgroundTaskForTesting() {
-    service.run();
-  }
-
-  // start service
-  public void start() {
-    exec.scheduleWithFixedDelay(service, 0, interval, unit);
-  }
-
-  public abstract BackgroundTaskQueue getTasks();
-
-  /**
-   * Run one or more background tasks concurrently.
-   * Wait until all tasks to return the result.
-   */
-  public class PeriodicalTask implements Runnable {
-    @Override
-    public synchronized void run() {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Running background service : {}", serviceName);
-      }
-      BackgroundTaskQueue tasks = getTasks();
-      if (tasks.isEmpty()) {
-        // No task found, or some problems to init tasks
-        // return and retry in next interval.
-        return;
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Number of background tasks to execute : {}", tasks.size());
-      }
-      CompletionService<BackgroundTaskResult> taskCompletionService =
-          new ExecutorCompletionService<>(exec);
-
-      List<Future<BackgroundTaskResult>> results = Lists.newArrayList();
-      while (tasks.size() > 0) {
-        BackgroundTask task = tasks.poll();
-        Future<BackgroundTaskResult> result =
-            taskCompletionService.submit(task);
-        results.add(result);
-      }
-
-      results.parallelStream().forEach(taskResultFuture -> {
-        try {
-          // Collect task results
-          BackgroundTaskResult result = serviceTimeout > 0
-              ? taskResultFuture.get(serviceTimeout, unit)
-              : taskResultFuture.get();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("task execution result size {}", result.getSize());
-          }
-        } catch (InterruptedException | ExecutionException e) {
-          LOG.warn(
-              "Background task fails to execute, "
-                  + "retrying in next interval", e);
-        } catch (TimeoutException e) {
-          LOG.warn("Background task executes timed out, "
-              + "retrying in next interval", e);
-        }
-      });
-    }
-  }
-
-  // shutdown and make sure all threads are properly released.
-  public void shutdown() {
-    LOG.info("Shutting down service {}", this.serviceName);
-    exec.shutdown();
-    try {
-      if (!exec.awaitTermination(60, TimeUnit.SECONDS)) {
-        exec.shutdownNow();
-      }
-    } catch (InterruptedException e) {
-      exec.shutdownNow();
-    }
-    if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) {
-      threadGroup.destroy();
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java
deleted file mode 100644
index d5ad2a3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import java.util.concurrent.Callable;
-
-/**
- * A task thread to run by {@link BackgroundService}.
- */
-public interface BackgroundTask<T> extends Callable<T> {
-
-  int getPriority();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java
deleted file mode 100644
index 005d14b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import java.util.PriorityQueue;
-
-/**
- * A priority queue that stores a number of {@link BackgroundTask}.
- */
-public class BackgroundTaskQueue {
-
-  private final PriorityQueue<BackgroundTask> tasks;
-
-  public BackgroundTaskQueue() {
-    tasks = new PriorityQueue<>((task1, task2)
-        -> task1.getPriority() - task2.getPriority());
-  }
-
-  /**
-   * @return the head task in this queue.
-   */
-  public synchronized BackgroundTask poll() {
-    return tasks.poll();
-  }
-
-  /**
-   * Add a {@link BackgroundTask} to the queue,
-   * the task will be sorted by its priority.
-   *
-   * @param task
-   */
-  public synchronized void add(BackgroundTask task) {
-    tasks.add(task);
-  }
-
-  /**
-   * @return true if the queue contains no task, false otherwise.
-   */
-  public synchronized boolean isEmpty() {
-    return tasks.isEmpty();
-  }
-
-  /**
-   * @return the size of the queue.
-   */
-  public synchronized int size() {
-    return tasks.size();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java
deleted file mode 100644
index be8032b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-/**
- * Result of a {@link BackgroundTask}.
- */
-public interface BackgroundTaskResult {
-
-  /**
-   * Returns the size of entries included in this result.
-   */
-  int getSize();
-
-  /**
-   * An empty task result implementation.
-   */
-  class EmptyTaskResult implements BackgroundTaskResult {
-
-    public static EmptyTaskResult newResult() {
-      return new EmptyTaskResult();
-    }
-
-    @Override
-    public int getSize() {
-      return 0;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java
deleted file mode 100644
index 377c7f6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import com.google.common.collect.Lists;
-
-import java.util.List;
-
-/**
- * An utility class to store a batch of DB write operations.
- */
-public class BatchOperation {
-
-  /**
-   * Enum for write operations.
-   */
-  public enum Operation {
-    DELETE, PUT
-  }
-
-  private List<SingleOperation> operations =
-      Lists.newArrayList();
-
-  /**
-   * Add a PUT operation into the batch.
-   */
-  public void put(byte[] key, byte[] value) {
-    operations.add(new SingleOperation(Operation.PUT, key, value));
-  }
-
-  /**
-   * Add a DELETE operation into the batch.
-   */
-  public void delete(byte[] key) {
-    operations.add(new SingleOperation(Operation.DELETE, key, null));
-
-  }
-
-  public List<SingleOperation> getOperations() {
-    return operations;
-  }
-
-  /**
-   * A SingleOperation represents a PUT or DELETE operation
-   * and the data the operation needs to manipulates.
-   */
-  public static class SingleOperation {
-
-    private Operation opt;
-    private byte[] key;
-    private byte[] value;
-
-    public SingleOperation(Operation opt, byte[] key, byte[] value) {
-      this.opt = opt;
-      if (key == null) {
-        throw new IllegalArgumentException("key cannot be null");
-      }
-      this.key = key.clone();
-      this.value = value == null ? null : value.clone();
-    }
-
-    public Operation getOpt() {
-      return opt;
-    }
-
-    public byte[] getKey() {
-      return key.clone();
-    }
-
-    public byte[] getValue() {
-      return value == null ? null : value.clone();
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java
deleted file mode 100644
index dc08c2b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import java.io.IOException;
-
-/**
- * A consumer for metadata store key-value entries.
- * Used by {@link MetadataStore} class.
- */
-@FunctionalInterface
-public interface EntryConsumer {
-
-  /**
-   * Consumes a key and value and produces a boolean result.
-   * @param key key
-   * @param value value
-   * @return a boolean value produced by the consumer
-   * @throws IOException
-   */
-  boolean consume(byte[] key, byte[] value) throws IOException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
deleted file mode 100644
index 6a372d1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.ClassUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class returns build information about Hadoop components.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public final class HddsVersionInfo {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsVersionInfo.class);
-
-  public static final VersionInfo HDDS_VERSION_INFO =
-      new VersionInfo("hdds");
-
-  private HddsVersionInfo() {}
-
-  public static void main(String[] args) {
-    System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion());
-    System.out.println(
-        "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " +
-            HDDS_VERSION_INFO.getRevision());
-    System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on "
-        + HDDS_VERSION_INFO.getDate());
-    System.out.println(
-        "Compiled with protoc " + HDDS_VERSION_INFO.getProtocVersion());
-    System.out.println(
-        "From source with checksum " + HDDS_VERSION_INFO.getSrcChecksum());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("This command was run using " +
-          ClassUtil.findContainingJar(HddsVersionInfo.class));
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java
deleted file mode 100644
index 0598987..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.fusesource.leveldbjni.JniDBFactory;
-import org.iq80.leveldb.DB;
-import org.iq80.leveldb.DBIterator;
-import org.iq80.leveldb.Options;
-import org.iq80.leveldb.ReadOptions;
-import org.iq80.leveldb.Snapshot;
-import org.iq80.leveldb.WriteBatch;
-import org.iq80.leveldb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * LevelDB interface.
- */
-public class LevelDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LevelDBStore.class);
-
-  private DB db;
-  private final File dbFile;
-  private final Options dbOptions;
-  private final WriteOptions writeOptions;
-
-  public LevelDBStore(File dbPath, boolean createIfMissing)
-      throws IOException {
-    dbOptions = new Options();
-    dbOptions.createIfMissing(createIfMissing);
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  /**
-   * Opens a DB file.
-   *
-   * @param dbPath          - DB File path
-   * @throws IOException
-   */
-  public LevelDBStore(File dbPath, Options options)
-      throws IOException {
-    dbOptions = options;
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  private void openDB(File dbPath, Options options) throws IOException {
-    if (dbPath.getParentFile().mkdirs()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Db path {} created.", dbPath.getParentFile());
-      }
-    }
-    db = JniDBFactory.factory.open(dbPath, options);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("LevelDB successfully opened");
-      LOG.debug("[Option] cacheSize = " + options.cacheSize());
-      LOG.debug("[Option] createIfMissing = " + options.createIfMissing());
-      LOG.debug("[Option] blockSize = " + options.blockSize());
-      LOG.debug("[Option] compressionType= " + options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize());
-    }
-  }
-
-  /**
-   * Puts a Key into file.
-   *
-   * @param key   - key
-   * @param value - value
-   */
-  @Override
-  public void put(byte[] key, byte[] value) {
-    db.put(key, value, writeOptions);
-  }
-
-  /**
-   * Get Key.
-   *
-   * @param key key
-   * @return value
-   */
-  @Override
-  public byte[] get(byte[] key) {
-    return db.get(key);
-  }
-
-  /**
-   * Delete Key.
-   *
-   * @param key - Key
-   */
-  @Override
-  public void delete(byte[] key) {
-    db.delete(key);
-  }
-
-  /**
-   * Closes the DB.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (db != null){
-      db.close();
-    }
-  }
-
-  /**
-   * Returns true if the DB is empty.
-   *
-   * @return boolean
-   * @throws IOException
-   */
-  @Override
-  public boolean isEmpty() throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      iter.seekToFirst();
-      boolean hasNext = !iter.hasNext();
-      return hasNext;
-    }
-  }
-
-  /**
-   * Returns the actual levelDB object.
-   * @return DB handle.
-   */
-  public DB getDB() {
-    return db;
-  }
-
-  /**
-   * Returns an iterator on all the key-value pairs in the DB.
-   * @return an iterator on DB entries.
-   */
-  public DBIterator getIterator() {
-    return db.iterator();
-  }
-
-
-  @Override
-  public void destroy() throws IOException {
-    close();
-    JniDBFactory.factory.destroy(dbFile, dbOptions);
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    try (DBIterator it = db.iterator()) {
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.hasNext()) {
-        return null;
-      }
-      switch (offset) {
-      case 0:
-        Entry<byte[], byte[]> current = it.next();
-        return new ImmutablePair<>(current.getKey(), current.getValue());
-      case 1:
-        if (it.next() != null && it.hasNext()) {
-          Entry<byte[], byte[]> next = it.peekNext();
-          return new ImmutablePair<>(next.getKey(), next.getValue());
-        }
-        break;
-      case -1:
-        if (it.hasPrev()) {
-          Entry<byte[], byte[]> prev = it.peekPrev();
-          return new ImmutablePair<>(prev.getKey(), prev.getValue());
-        }
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      if (from != null) {
-        iter.seek(from);
-      } else {
-        iter.seekToFirst();
-      }
-      while (iter.hasNext()) {
-        Entry<byte[], byte[]> current = iter.next();
-        if (!consumer.consume(current.getKey(),
-            current.getValue())) {
-          break;
-        }
-      }
-    }
-  }
-
-  /**
-   * Compacts the DB by removing deleted keys etc.
-   * @throws IOException if there is an error.
-   */
-  @Override
-  public void compactDB() throws IOException {
-    if(db != null) {
-      // From LevelDB docs : begin == null and end == null means the whole DB.
-      db.compactRange(null, null);
-    }
-  }
-
-  @Override
-  public void flushDB(boolean sync) {
-    // TODO: Implement flush for level db
-    // do nothing
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation) throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = db.createWriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.delete(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeBatch);
-      }
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist, an empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  private List<Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential, MetadataKeyFilter... filters)
-      throws IOException {
-    List<Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    Snapshot snapShot = null;
-    DBIterator dbIter = null;
-    try {
-      snapShot = db.getSnapshot();
-      ReadOptions readOptions = new ReadOptions().snapshot(snapShot);
-      dbIter = db.iterator(readOptions);
-      if (startKey == null) {
-        dbIter.seekToFirst();
-      } else {
-        if (db.get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        dbIter.seek(startKey);
-      }
-      while (dbIter.hasNext() && result.size() < count) {
-        byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null;
-        byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null;
-        Entry<byte[], byte[]> current = dbIter.next();
-
-        if (filters == null) {
-          result.add(current);
-        } else {
-          if (Arrays.asList(filters).stream().allMatch(
-              entry -> entry.filterKey(preKey, current.getKey(), nextKey))) {
-            result.add(current);
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (snapShot != null) {
-        snapShot.close();
-      }
-      if (dbIter != null) {
-        dbIter.close();
-      }
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(
-                    "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                    filter.getClass().getSimpleName(),
-                    filter.getKeysScannedNum(), filter.getKeysHintedNum());
-              }
-            }
-          }
-        }
-        long end = System.currentTimeMillis();
-        long timeConsumed = end - start;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-              + " result length is {}.", timeConsumed, result.size());
-        }
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public MetaStoreIterator<KeyValue> iterator() {
-    return new LevelDBStoreIterator(db.iterator());
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
deleted file mode 100644
index f5b6769..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.iq80.leveldb.DBIterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-/**
- * LevelDB store iterator.
- */
-public class LevelDBStoreIterator
-    implements MetaStoreIterator<MetadataStore.KeyValue> {
-
-
-  private DBIterator levelDBIterator;
-
-  public LevelDBStoreIterator(DBIterator iterator) {
-    this.levelDBIterator = iterator;
-    levelDBIterator.seekToFirst();
-  }
-
-  @Override
-  public boolean hasNext() {
-    return levelDBIterator.hasNext();
-  }
-
-  @Override
-  public MetadataStore.KeyValue next() {
-    if(levelDBIterator.hasNext()) {
-      Map.Entry<byte[], byte[]> entry = levelDBIterator.next();
-      return MetadataStore.KeyValue.create(entry.getKey(), entry.getValue());
-    }
-    throw new NoSuchElementException("LevelDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    levelDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    levelDBIterator.seekToLast();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java
deleted file mode 100644
index 2a33de7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import java.util.Iterator;
-
-/**
- * Iterator for MetaDataStore DB.
- * @param <T>
- */
-public interface MetaStoreIterator<T> extends Iterator<T> {
-
-  /**
-   * seek to first entry.
-   */
-  void seekToFirst();
-
-  /**
-   * seek to last entry.
-   */
-  void seekToLast();
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
deleted file mode 100644
index a88ce47..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * An utility class to filter levelDB keys.
- */
-public final class MetadataKeyFilters {
-
-  private static KeyPrefixFilter deletingKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-
-  private static KeyPrefixFilter deletedKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETED_KEY_PREFIX);
-
-  private static KeyPrefixFilter normalKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
-          .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-          .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
-          .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
-
-  private MetadataKeyFilters() {
-  }
-
-  public static KeyPrefixFilter getDeletingKeyFilter() {
-    return deletingKeyFilter;
-  }
-
-  public static KeyPrefixFilter getDeletedKeyFilter() {
-    return deletedKeyFilter;
-  }
-
-  public static KeyPrefixFilter getNormalKeyFilter() {
-    return normalKeyFilter;
-  }
-  /**
-   * Interface for levelDB key filters.
-   */
-  public interface MetadataKeyFilter {
-    /**
-     * Filter levelDB key with a certain condition.
-     *
-     * @param preKey     previous key.
-     * @param currentKey current key.
-     * @param nextKey    next key.
-     * @return true if a certain condition satisfied, return false otherwise.
-     */
-    boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey);
-
-    default int getKeysScannedNum() {
-      return 0;
-    }
-
-    default int getKeysHintedNum() {
-      return 0;
-    }
-  }
-
-  /**
-   * Utility class to filter key by a string prefix. This filter
-   * assumes keys can be parsed to a string.
-   */
-  public static class KeyPrefixFilter implements MetadataKeyFilter {
-
-    private List<String> positivePrefixList = new ArrayList<>();
-    private List<String> negativePrefixList = new ArrayList<>();
-    private boolean atleastOnePositiveMatch;
-    private int keysScanned = 0;
-    private int keysHinted = 0;
-
-    public KeyPrefixFilter() {}
-
-    /**
-     * KeyPrefixFilter constructor. It is made of positive and negative prefix
-     * list. PositivePrefixList is the list of prefixes which are accepted
-     * whereas negativePrefixList contains the list of prefixes which are
-     * rejected.
-     *
-     * @param atleastOnePositiveMatch if positive it requires key to be accepted
-     *                               by atleast one positive filter.
-     */
-    public KeyPrefixFilter(boolean atleastOnePositiveMatch) {
-      this.atleastOnePositiveMatch = atleastOnePositiveMatch;
-    }
-
-    public KeyPrefixFilter addFilter(String keyPrefix) {
-      addFilter(keyPrefix, false);
-      return this;
-    }
-
-    public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) {
-      Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix),
-          "KeyPrefix is null or empty: " + keyPrefix);
-      // keyPrefix which needs to be added should not be prefix of any opposing
-      // filter already present. If keyPrefix is a negative filter it should not
-      // be a prefix of any positive filter. Nor should any opposing filter be
-      // a prefix of keyPrefix.
-      // For example if b0 is accepted b can not be rejected and
-      // if b is accepted b0 can not be rejected. If these scenarios need to be
-      // handled we need to add priorities.
-      if (negative) {
-        Preconditions.checkArgument(positivePrefixList.stream().noneMatch(
-            prefix -> prefix.startsWith(keyPrefix) || keyPrefix
-                .startsWith(prefix)),
-            "KeyPrefix: " + keyPrefix + " already accepted.");
-        this.negativePrefixList.add(keyPrefix);
-      } else {
-        Preconditions.checkArgument(negativePrefixList.stream().noneMatch(
-            prefix -> prefix.startsWith(keyPrefix) || keyPrefix
-                .startsWith(prefix)),
-            "KeyPrefix: " + keyPrefix + " already rejected.");
-        this.positivePrefixList.add(keyPrefix);
-      }
-      return this;
-    }
-
-    @Override
-    public boolean filterKey(byte[] preKey, byte[] currentKey,
-        byte[] nextKey) {
-      keysScanned++;
-      if (currentKey == null) {
-        return false;
-      }
-      boolean accept;
-
-      // There are no filters present
-      if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) {
-        return true;
-      }
-
-      accept = !positivePrefixList.isEmpty() && positivePrefixList.stream()
-          .anyMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
-            return prefixMatch(prefixBytes, currentKey);
-          });
-      if (accept) {
-        keysHinted++;
-        return true;
-      } else if (atleastOnePositiveMatch) {
-        return false;
-      }
-
-      accept = !negativePrefixList.isEmpty() && negativePrefixList.stream()
-          .allMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
-            return !prefixMatch(prefixBytes, currentKey);
-          });
-      if (accept) {
-        keysHinted++;
-        return true;
-      }
-
-      return false;
-    }
-
-    @Override
-    public int getKeysScannedNum() {
-      return keysScanned;
-    }
-
-    @Override
-    public int getKeysHintedNum() {
-      return keysHinted;
-    }
-
-    private static boolean prefixMatch(byte[] prefix, byte[] key) {
-      Preconditions.checkNotNull(prefix);
-      Preconditions.checkNotNull(key);
-      if (key.length < prefix.length) {
-        return false;
-      }
-      for (int i = 0; i < prefix.length; i++) {
-        if (key[i] != prefix[i]) {
-          return false;
-        }
-      }
-      return true;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java
deleted file mode 100644
index f05e6d2..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Interface for key-value store that stores ozone metadata.
- * Ozone metadata is stored as key value pairs, both key and value
- * are arbitrary byte arrays.
- */
-@InterfaceStability.Evolving
-public interface MetadataStore extends Closeable{
-
-  /**
-   * Puts a key-value pair into the store.
-   *
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * @return true if the metadata store is empty.
-   *
-   * @throws IOException
-   */
-  boolean isEmpty() throws IOException;
-
-  /**
-   * Returns the value mapped to the given key in byte array.
-   *
-   * @param key metadata key
-   * @return value in byte array
-   * @throws IOException
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store.
-   *
-   * @param key metadata key
-   * @throws IOException
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist and empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * This method is very similar to {@link #getRangeKVs}, the only
-   * different is this method is supposed to return a sequential range
-   * of elements based on the filters. While iterating the elements,
-   * if it met any entry that cannot pass the filter, the iterator will stop
-   * from this point without looking for next match. If no filter is given,
-   * this method behaves just like {@link #getRangeKVs}.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database.
-   * @throws IOException
-   * @throws IllegalArgumentException
-   */
-  List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * A batch of PUT, DELETE operations handled as a single atomic write.
-   *
-   * @throws IOException write fails
-   */
-  void writeBatch(BatchOperation operation) throws IOException;
-
-  /**
-   * Compact the entire database.
-   * @throws IOException
-   */
-  void compactDB() throws IOException;
-
-  /**
-   * Flush the outstanding I/O operations of the DB.
-   * @param sync if true will sync the outstanding I/Os to the disk.
-   */
-  void flushDB(boolean sync) throws IOException;
-
-  /**
-   * Destroy the content of the specified database,
-   * a destroyed database will not be able to load again.
-   * Be very careful with this method.
-   *
-   * @throws IOException if I/O error happens
-   */
-  void destroy() throws IOException;
-
-  /**
-   * Seek the database to a certain key, returns the key-value
-   * pairs around this key based on the given offset. Note, this method
-   * can only support offset -1 (left), 0 (current) and 1 (right),
-   * any other offset given will cause a {@link IllegalArgumentException}.
-   *
-   * @param offset offset to the key
-   * @param from from which key
-   * @return a key-value pair
-   * @throws IOException
-   */
-  ImmutablePair<byte[], byte[]> peekAround(int offset, byte[] from)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * Iterates entries in the database from a certain key.
-   * Applies the given {@link EntryConsumer} to the key and value of
-   * each entry, the function produces a boolean result which is used
-   * as the criteria to exit from iteration.
-   *
-   * @param from the start key
-   * @param consumer
-   *   a {@link EntryConsumer} applied to each key and value. If the consumer
-   *   returns true, continues the iteration to next entry; otherwise exits
-   *   the iteration.
-   * @throws IOException
-   */
-  void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException;
-
-  /**
-   * Returns the iterator for this metadata store.
-   * @return MetaStoreIterator
-   */
-  MetaStoreIterator<KeyValue> iterator();
-
-  /**
-   * Class used to represent the key and value pair of a db entry.
-   */
-  class KeyValue {
-
-    private final byte[] key;
-    private final byte[] value;
-
-    /**
-     * KeyValue Constructor, used to represent a key and value of a db entry.
-     * @param key
-     * @param value
-     */
-    private KeyValue(byte[] key, byte[] value) {
-      this.key = key;
-      this.value = value;
-    }
-
-    /**
-     * Return key.
-     * @return byte[]
-     */
-    public byte[] getKey() {
-      byte[] result = new byte[key.length];
-      System.arraycopy(key, 0, result, 0, key.length);
-      return result;
-    }
-
-    /**
-     * Return value.
-     * @return byte[]
-     */
-    public byte[] getValue() {
-      byte[] result = new byte[value.length];
-      System.arraycopy(value, 0, result, 0, value.length);
-      return result;
-    }
-
-    /**
-     * Create a KeyValue pair.
-     * @param key
-     * @param value
-     * @return KeyValue object.
-     */
-    public static KeyValue create(byte[] key, byte[] value) {
-      return new KeyValue(key, value);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
deleted file mode 100644
index 85bb6aa..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Optional;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-import com.google.common.annotations.VisibleForTesting;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-import org.iq80.leveldb.Options;
-import org.rocksdb.BlockBasedTableConfig;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Builder for metadata store.
- */
-public class MetadataStoreBuilder {
-
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(MetadataStoreBuilder.class);
-  private File dbFile;
-  private long cacheSize;
-  private boolean createIfMissing = true;
-  private Optional<Configuration> optionalConf = Optional.empty();
-  private String dbType;
-
-  public static MetadataStoreBuilder newBuilder() {
-    return new MetadataStoreBuilder();
-  }
-
-  public MetadataStoreBuilder setDbFile(File dbPath) {
-    this.dbFile = dbPath;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCacheSize(long cache) {
-    this.cacheSize = cache;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) {
-    this.createIfMissing = doCreate;
-    return this;
-  }
-
-  public MetadataStoreBuilder setConf(Configuration configuration) {
-    this.optionalConf = Optional.of(configuration);
-    return this;
-  }
-
-  /**
-   * Set the container DB Type.
-   * @param type
-   * @return MetadataStoreBuilder
-   */
-  public MetadataStoreBuilder setDBType(String type) {
-    this.dbType = type;
-    return this;
-  }
-
-
-  public MetadataStore build() throws IOException {
-    if (dbFile == null) {
-      throw new IllegalArgumentException("Failed to build metadata store, "
-          + "dbFile is required but not found");
-    }
-
-    // Build db store based on configuration
-    final Configuration conf = optionalConf.orElseGet(
-        () -> new OzoneConfiguration());
-
-    if(dbType == null) {
-      LOG.debug("dbType is null, using ");
-      dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-              OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
-      LOG.debug("dbType is null, using dbType {} from ozone configuration",
-          dbType);
-    } else {
-      LOG.debug("Using dbType {} for metastore", dbType);
-    }
-    if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) {
-      Options options = new Options();
-      options.createIfMissing(createIfMissing);
-      if (cacheSize > 0) {
-        options.cacheSize(cacheSize);
-      }
-      return new LevelDBStore(dbFile, options);
-    } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
-      org.rocksdb.Options opts = new org.rocksdb.Options();
-      opts.setCreateIfMissing(createIfMissing);
-
-      if (cacheSize > 0) {
-        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
-        tableConfig.setBlockCacheSize(cacheSize);
-        opts.setTableFormatConfig(tableConfig);
-      }
-
-      String rocksDbStat = conf.getTrimmed(
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
-      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
-        Statistics statistics = new Statistics();
-        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
-        opts = opts.setStatistics(statistics);
-
-      }
-      return new RocksDBStore(dbFile, opts);
-    }
-    
-    throw new IllegalArgumentException("Invalid argument for "
-        + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
-        + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
-        + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
-        + ", but met " + dbType);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java
deleted file mode 100644
index a3ee1fd..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.util.ThreadUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.Callable;
-
-/**
- * {@code Callable} implementation that retries a delegate task according to
- * the specified {@code RetryPolicy}.  Sleeps between retries in the caller
- * thread.
- *
- * @param <V> the result type of method {@code call}
- */
-public class RetriableTask<V> implements Callable<V> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RetriableTask.class);
-
-  private final String name;
-  private final Callable<V> task;
-  private final RetryPolicy retryPolicy;
-
-  public RetriableTask(RetryPolicy retryPolicy, String name, Callable<V> task) {
-    this.retryPolicy = retryPolicy;
-    this.name = name;
-    this.task = task;
-  }
-
-  @Override
-  public V call() throws Exception {
-    int attempts = 0;
-    Exception cause;
-    while (true) {
-      try {
-        return task.call();
-      } catch (Exception e) {
-        cause = e;
-        RetryPolicy.RetryAction action = retryPolicy.shouldRetry(e, ++attempts,
-             0, true);
-        if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
-          LOG.info("Execution of task {} failed, will be retried in {} ms",
-              name, action.delayMillis);
-          ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis);
-        } else {
-          break;
-        }
-      }
-    }
-
-    String msg = String.format(
-        "Execution of task %s failed permanently after %d attempts",
-        name, attempts);
-    LOG.warn(msg, cause);
-    throw new IOException(msg, cause);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java
deleted file mode 100644
index 7dd1bde..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.ratis.thirdparty.com.google.common.annotations.
-    VisibleForTesting;
-import org.rocksdb.DbPath;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.RocksIterator;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * RocksDB implementation of ozone metadata store.
- */
-public class RocksDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBStore.class);
-
-  private RocksDB db = null;
-  private File dbLocation;
-  private WriteOptions writeOptions;
-  private Options dbOptions;
-  private ObjectName statMBeanName;
-
-  public RocksDBStore(File dbFile, Options options)
-      throws IOException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    RocksDB.loadLibrary();
-    dbOptions = options;
-    dbLocation = dbFile;
-    writeOptions = new WriteOptions();
-    try {
-
-      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
-      if (dbOptions.statistics() != null) {
-
-        Map<String, String> jmxProperties = new HashMap<String, String>();
-        jmxProperties.put("dbName", dbFile.getName());
-        statMBeanName = HddsUtils.registerWithJmxProperties(
-            "Ozone", "RocksDbStore", jmxProperties,
-            RocksDBStoreMBean.create(dbOptions.statistics(),
-                dbFile.getName()));
-        if (statMBeanName == null) {
-          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
-              dbFile.getAbsolutePath());
-        }
-      }
-    } catch (RocksDBException e) {
-      throw new IOException(
-          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("RocksDB successfully opened.");
-      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
-      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
-      LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
-      LOG.debug("[Option] compressionType= {}", options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
-    }
-  }
-
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    try {
-      db.put(writeOptions, key, value);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to put key-value to metadata store", e);
-    }
-  }
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      it.seekToFirst();
-      return !it.isValid();
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    try {
-      return db.get(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to get the value for the given key", e);
-    }
-  }
-
-  @Override
-  public void delete(byte[] key) throws IOException {
-    try {
-      db.delete(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to delete the given key", e);
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  private List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential,
-      MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    List<Map.Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (startKey == null) {
-        it.seekToFirst();
-      } else {
-        if(get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        it.seek(startKey);
-      }
-      while(it.isValid() && result.size() < count) {
-        byte[] currentKey = it.key();
-        byte[] currentValue = it.value();
-
-        it.prev();
-        final byte[] prevKey = it.isValid() ? it.key() : null;
-
-        it.seek(currentKey);
-        it.next();
-        final byte[] nextKey = it.isValid() ? it.key() : null;
-
-        if (filters == null) {
-          result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-              currentValue));
-        } else {
-          if (Arrays.asList(filters).stream()
-              .allMatch(entry -> entry.filterKey(prevKey,
-                  currentKey, nextKey))) {
-            result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-                currentValue));
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-      long end = System.currentTimeMillis();
-      long timeConsumed = end - start;
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              LOG.debug(
-                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                  filter.getClass().getSimpleName(), filter.getKeysScannedNum(),
-                  filter.getKeysHintedNum());
-            }
-          }
-        }
-        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-            + " result length is {}.", timeConsumed, result.size());
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation)
-      throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = new WriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.delete(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeOptions, writeBatch);
-      } catch (RocksDBException e) {
-        throw toIOException("Batch write operation failed", e);
-      }
-    }
-  }
-
-  @Override
-  public void compactDB() throws IOException {
-    if (db != null) {
-      try {
-        db.compactRange();
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to compact db", e);
-      }
-    }
-  }
-
-  @Override
-  public void flushDB(boolean sync) throws IOException {
-    if (db != null) {
-      try {
-        // for RocksDB it is sufficient to flush the WAL as entire db can
-        // be reconstructed using it.
-        db.flushWal(sync);
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to flush db", e);
-      }
-    }
-  }
-
-  private void deleteQuietly(File fileOrDir) {
-    if (fileOrDir != null && fileOrDir.exists()) {
-      try {
-        FileUtils.forceDelete(fileOrDir);
-      } catch (IOException e) {
-        LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e);
-      }
-    }
-  }
-
-  @Override
-  public void destroy() throws IOException {
-    // Make sure db is closed.
-    close();
-
-    // There is no destroydb java API available,
-    // equivalently we can delete all db directories.
-    deleteQuietly(dbLocation);
-    deleteQuietly(new File(dbOptions.dbLogDir()));
-    deleteQuietly(new File(dbOptions.walDir()));
-    List<DbPath> dbPaths = dbOptions.dbPaths();
-    if (dbPaths != null) {
-      dbPaths.forEach(dbPath -> {
-        deleteQuietly(new File(dbPath.toString()));
-      });
-    }
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.isValid()) {
-        return null;
-      }
-
-      switch (offset) {
-      case 0:
-        break;
-      case 1:
-        it.next();
-        break;
-      case -1:
-        it.prev();
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-      return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null;
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from != null) {
-        it.seek(from);
-      } else {
-        it.seekToFirst();
-      }
-      while (it.isValid()) {
-        if (!consumer.consume(it.key(), it.value())) {
-          break;
-        }
-        it.next();
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (statMBeanName != null) {
-      MBeans.unregister(statMBeanName);
-      statMBeanName = null;
-    }
-    if (db != null) {
-      db.close();
-    }
-
-  }
-
-  @VisibleForTesting
-  protected ObjectName getStatMBeanName() {
-    return statMBeanName;
-  }
-
-  @Override
-  public MetaStoreIterator<KeyValue> iterator() {
-    return new RocksDBStoreIterator(db.newIterator());
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
deleted file mode 100644
index e39ec57..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.hdds.utils;
-
-import org.rocksdb.RocksIterator;
-
-import java.util.NoSuchElementException;
-
-/**
- * RocksDB store iterator.
- */
-public class RocksDBStoreIterator
-    implements MetaStoreIterator<MetadataStore.KeyValue> {
-
-  private RocksIterator rocksDBIterator;
-
-  public RocksDBStoreIterator(RocksIterator iterator) {
-    this.rocksDBIterator = iterator;
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public boolean hasNext() {
-    return rocksDBIterator.isValid();
-  }
-
-  @Override
-  public MetadataStore.KeyValue next() {
-    if (rocksDBIterator.isValid()) {
-      MetadataStore.KeyValue value =
-          MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
-              .value());
-      rocksDBIterator.next();
-      return value;
-    }
-    throw new NoSuchElementException("RocksDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    rocksDBIterator.seekToLast();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java
deleted file mode 100644
index 60d4db8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.rocksdb.HistogramData;
-import org.rocksdb.HistogramType;
-import org.rocksdb.Statistics;
-import org.rocksdb.TickerType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.Attribute;
-import javax.management.AttributeList;
-import javax.management.AttributeNotFoundException;
-import javax.management.DynamicMBean;
-import javax.management.InvalidAttributeValueException;
-import javax.management.MBeanAttributeInfo;
-import javax.management.MBeanException;
-import javax.management.MBeanInfo;
-import javax.management.ReflectionException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Adapter JMX bean to publish all the Rocksdb metrics.
- */
-public class RocksDBStoreMBean implements DynamicMBean, MetricsSource {
-
-  private Statistics statistics;
-
-  private Set<String> histogramAttributes = new HashSet<>();
-
-  private String contextName;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBStoreMBean.class);
-
-  public final static String ROCKSDB_CONTEXT_PREFIX = "Rocksdb_";
-
-  public RocksDBStoreMBean(Statistics statistics, String dbName) {
-    this.contextName = ROCKSDB_CONTEXT_PREFIX + dbName;
-    this.statistics = statistics;
-    histogramAttributes.add("Average");
-    histogramAttributes.add("Median");
-    histogramAttributes.add("Percentile95");
-    histogramAttributes.add("Percentile99");
-    histogramAttributes.add("StandardDeviation");
-  }
-
-  public static RocksDBStoreMBean create(Statistics statistics,
-                                         String contextName) {
-
-    RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean(
-        statistics, contextName);
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName);
-    if (metricsSource != null) {
-      return (RocksDBStoreMBean)metricsSource;
-    } else {
-      return ms.register(rocksDBStoreMBean.contextName,
-          "RocksDB Metrics",
-          rocksDBStoreMBean);
-    }
-  }
-
-  @Override
-  public Object getAttribute(String attribute)
-      throws AttributeNotFoundException, MBeanException, ReflectionException {
-    for (String histogramAttribute : histogramAttributes) {
-      if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) {
-        String keyName = attribute
-            .substring(0, attribute.length() - histogramAttribute.length() - 1);
-        try {
-          HistogramData histogram =
-              statistics.getHistogramData(HistogramType.valueOf(keyName));
-          try {
-            Method method =
-                HistogramData.class.getMethod("get" + histogramAttribute);
-            return method.invoke(histogram);
-          } catch (Exception e) {
-            throw new ReflectionException(e,
-                "Can't read attribute " + attribute);
-          }
-        } catch (IllegalArgumentException exception) {
-          throw new AttributeNotFoundException(
-              "No such attribute in RocksDB stats: " + attribute);
-        }
-      }
-    }
-    try {
-      return statistics.getTickerCount(TickerType.valueOf(attribute));
-    } catch (IllegalArgumentException ex) {
-      throw new AttributeNotFoundException(
-          "No such attribute in RocksDB stats: " + attribute);
-    }
-  }
-
-  @Override
-  public void setAttribute(Attribute attribute)
-      throws AttributeNotFoundException, InvalidAttributeValueException,
-      MBeanException, ReflectionException {
-
-  }
-
-  @Override
-  public AttributeList getAttributes(String[] attributes) {
-    AttributeList result = new AttributeList();
-    for (String attributeName : attributes) {
-      try {
-        Object value = getAttribute(attributeName);
-        result.add(value);
-      } catch (Exception e) {
-        //TODO
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public AttributeList setAttributes(AttributeList attributes) {
-    return null;
-  }
-
-  @Override
-  public Object invoke(String actionName, Object[] params, String[] signature)
-      throws MBeanException, ReflectionException {
-    return null;
-  }
-
-  @Override
-  public MBeanInfo getMBeanInfo() {
-
-    List<MBeanAttributeInfo> attributes = new ArrayList<>();
-    for (TickerType tickerType : TickerType.values()) {
-      attributes.add(new MBeanAttributeInfo(tickerType.name(), "long",
-          "RocksDBStat: " + tickerType.name(), true, false, false));
-    }
-    for (HistogramType histogramType : HistogramType.values()) {
-      for (String histogramAttribute : histogramAttributes) {
-        attributes.add(new MBeanAttributeInfo(
-            histogramType.name() + "_" + histogramAttribute.toUpperCase(),
-            "long", "RocksDBStat: " + histogramType.name(), true, false,
-            false));
-      }
-    }
-
-    return new MBeanInfo("", "RocksDBStat",
-        attributes.toArray(new MBeanAttributeInfo[0]), null, null, null);
-
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector metricsCollector, boolean b) {
-    MetricsRecordBuilder rb = metricsCollector.addRecord(contextName);
-    getHistogramData(rb);
-    getTickerTypeData(rb);
-  }
-
-  /**
-   * Collect all histogram metrics from RocksDB statistics.
-   * @param rb Metrics Record Builder.
-   */
-  private void getHistogramData(MetricsRecordBuilder rb) {
-    for (HistogramType histogramType : HistogramType.values()) {
-      HistogramData histogram =
-          statistics.getHistogramData(
-              HistogramType.valueOf(histogramType.name()));
-      for (String histogramAttribute : histogramAttributes) {
-        try {
-          Method method =
-              HistogramData.class.getMethod("get" + histogramAttribute);
-          double metricValue =  (double) method.invoke(histogram);
-          rb.addGauge(Interns.info(histogramType.name() + "_" +
-                  histogramAttribute.toUpperCase(), "RocksDBStat"),
-              metricValue);
-        } catch (Exception e) {
-          LOG.error("Error reading histogram data {} ", e);
-        }
-      }
-    }
-  }
-
-  /**
-   * Collect all Counter metrics from RocksDB statistics.
-   * @param rb Metrics Record Builder.
-   */
-  private void getTickerTypeData(MetricsRecordBuilder rb) {
-    for (TickerType tickerType : TickerType.values()) {
-      rb.addCounter(Interns.info(tickerType.name(), "RocksDBStat"),
-          statistics.getTickerCount(tickerType));
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java
deleted file mode 100644
index 9edc104..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.ratis.util.function.CheckedRunnable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class encapsulates ScheduledExecutorService.
- */
-public class Scheduler {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(Scheduler.class);
-
-  private ScheduledExecutorService scheduler;
-
-  private volatile boolean isClosed;
-
-  private String threadName;
-
-  /**
-   * Creates a ScheduledExecutorService based on input arguments.
-   * @param threadName - thread name
-   * @param isDaemon - if true the threads in the scheduler are started as
-   *                 daemon
-   * @param numCoreThreads - number of core threads to maintain in the scheduler
-   */
-  public Scheduler(String threadName, boolean isDaemon, int numCoreThreads) {
-    scheduler = Executors.newScheduledThreadPool(numCoreThreads, r -> {
-      Thread t = new Thread(r);
-      t.setName(threadName);
-      t.setDaemon(isDaemon);
-      return t;
-    });
-    this.threadName = threadName;
-    isClosed = false;
-  }
-
-  public void schedule(Runnable runnable, long delay, TimeUnit timeUnit) {
-    scheduler.schedule(runnable, delay, timeUnit);
-  }
-
-  public void schedule(CheckedRunnable runnable, long delay,
-      TimeUnit timeUnit, Logger logger, String errMsg) {
-    scheduler.schedule(() -> {
-      try {
-        runnable.run();
-      } catch (Throwable throwable) {
-        logger.error(errMsg, throwable);
-      }
-    }, delay, timeUnit);
-  }
-
-  public void scheduleWithFixedDelay(Runnable runnable, long initialDelay,
-      long fixedDelay, TimeUnit timeUnit) {
-    scheduler
-        .scheduleWithFixedDelay(runnable, initialDelay, fixedDelay, timeUnit);
-  }
-
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  /**
-   * Closes the scheduler for further task submission. Any pending tasks not
-   * yet executed are also cancelled. For the executing tasks the scheduler
-   * waits 60 seconds for completion.
-   */
-  public synchronized void close() {
-    isClosed = true;
-    if (scheduler != null) {
-      scheduler.shutdownNow();
-      try {
-        scheduler.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException e) {
-        LOG.info(
-            threadName + " interrupted while waiting for task completion {}",
-            e);
-      }
-    }
-    scheduler = null;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
deleted file mode 100644
index 0914536..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.hdds.HddsUtils;
-
-/**
- * This class uses system current time milliseconds to generate unique id.
- */
-public final class UniqueId {
-    /*
-     * When we represent time in milliseconds using 'long' data type,
-     * the LSB bits are used. Currently we are only using 44 bits (LSB),
-     * 20 bits (MSB) are not used.
-     * We will exhaust this 44 bits only when we are in year 2525,
-     * until then we can safely use this 20 bits (MSB) for offset to generate
-     * unique id within millisecond.
-     *
-     * Year        : Mon Dec 31 18:49:04 IST 2525
-     * TimeInMillis: 17545641544247
-     * Binary Representation:
-     *   MSB (20 bits): 0000 0000 0000 0000 0000
-     *   LSB (44 bits): 1111 1111 0101 0010 1001 1011 1011 0100 1010 0011 0111
-     *
-     * We have 20 bits to run counter, we should exclude the first bit (MSB)
-     * as we don't want to deal with negative values.
-     * To be on safer side we will use 'short' data type which is of length
-     * 16 bits and will give us 65,536 values for offset.
-     *
-     */
-
-  private static volatile short offset = 0;
-
-  /**
-   * Private constructor so that no one can instantiate this class.
-   */
-  private UniqueId() {}
-
-  /**
-   * Calculate and returns next unique id based on System#currentTimeMillis.
-   *
-   * @return unique long value
-   */
-  public static synchronized long next() {
-    long utcTime = HddsUtils.getUtcTime();
-    if ((utcTime & 0xFFFF000000000000L) == 0) {
-      return utcTime << Short.SIZE | (offset++ & 0x0000FFFF);
-    }
-    throw new RuntimeException("Got invalid UTC time," +
-        " cannot generate unique Id. UTC Time: " + utcTime);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java
deleted file mode 100644
index ca9f859..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.ThreadUtil;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Properties;
-
-/**
- * This class returns build information about Hadoop components.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class VersionInfo {
-
-  private final Properties info = new Properties();
-
-  public VersionInfo(String component) {
-    String versionInfoFile = component + "-version-info.properties";
-    InputStream is = null;
-    try {
-      is = ThreadUtil.getResourceAsStream(
-        getClass().getClassLoader(),
-        versionInfoFile);
-      info.load(is);
-    } catch (IOException ex) {
-      LoggerFactory.getLogger(getClass()).warn("Could not read '" +
-          versionInfoFile + "', " + ex.toString(), ex);
-    } finally {
-      IOUtils.closeStream(is);
-    }
-  }
-
-  public String getRelease() {
-    return info.getProperty("release", "Unknown");
-  }
-
-  public String getVersion() {
-    return info.getProperty("version", "Unknown");
-  }
-
-  public String getRevision() {
-    return info.getProperty("revision", "Unknown");
-  }
-
-  public String getBranch() {
-    return info.getProperty("branch", "Unknown");
-  }
-
-  public String getDate() {
-    return info.getProperty("date", "Unknown");
-  }
-
-  public String getUser() {
-    return info.getProperty("user", "Unknown");
-  }
-
-  public String getUrl() {
-    return info.getProperty("url", "Unknown");
-  }
-
-  public String getSrcChecksum() {
-    return info.getProperty("srcChecksum", "Unknown");
-  }
-
-  public String getProtocVersion() {
-    return info.getProperty("protocVersion", "Unknown");
-  }
-
-  public String getBuildVersion() {
-    return getVersion() +
-        " from " + getRevision() +
-        " by " + getUser() +
-        " source checksum " + getSrcChecksum();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java
deleted file mode 100644
index 8ca5d18..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-/**
- * Class represents a batch operation, collects multiple db operation.
- */
-public interface BatchOperation extends AutoCloseable {
-
-  void close();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java
deleted file mode 100644
index 7c60291..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-
-/**
- * Key value for raw Table implementations.
- */
-public final class ByteArrayKeyValue implements KeyValue<byte[], byte[]> {
-  private byte[] key;
-  private byte[] value;
-
-  private ByteArrayKeyValue(byte[] key, byte[] value) {
-    this.key = key;
-    this.value = value;
-  }
-
-  /**
-   * Create a KeyValue pair.
-   *
-   * @param key   - Key Bytes
-   * @param value - Value bytes
-   * @return KeyValue object.
-   */
-  public static ByteArrayKeyValue create(byte[] key, byte[] value) {
-    return new ByteArrayKeyValue(key, value);
-  }
-
-  /**
-   * Return key.
-   *
-   * @return byte[]
-   */
-  public byte[] getKey() {
-    byte[] result = new byte[key.length];
-    System.arraycopy(key, 0, result, 0, key.length);
-    return result;
-  }
-
-  /**
-   * Return value.
-   *
-   * @return byte[]
-   */
-  public byte[] getValue() {
-    byte[] result = new byte[value.length];
-    System.arraycopy(value, 0, result, 0, value.length);
-    return result;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java
deleted file mode 100644
index 36ece3e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-
-/**
- * Codec interface to marshall/unmarshall data to/from a byte[] based
- * key/value store.
- *
- * @param <T> Unserialized type
- */
-public interface Codec<T> {
-
-  /**
-   * Convert object to raw persisted format.
-   * @param object The original java object. Should not be null.
-   */
-  byte[] toPersistedFormat(T object) throws IOException;
-
-  /**
-   * Convert object from raw persisted format.
-   *
-   * @param rawData Byte array from the key/value store. Should not be null.
-   */
-  T fromPersistedFormat(byte[] rawData) throws IOException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
deleted file mode 100644
index f92189a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Collection of available codecs.
- */
-public class CodecRegistry {
-
-  private Map<Class, Codec<?>> valueCodecs;
-
-  public CodecRegistry() {
-    valueCodecs = new HashMap<>();
-    valueCodecs.put(String.class, new StringCodec());
-    valueCodecs.put(Long.class, new LongCodec());
-  }
-
-  /**
-   * Convert raw value to strongly typed value/key with the help of a codec.
-   *
-   * @param rawData original byte array from the db.
-   * @param format  Class of the return value
-   * @param <T>     Type of the return value.
-   * @return the object with the parsed field data
-   */
-  public <T> T asObject(byte[] rawData, Class<T> format)
-      throws IOException {
-    if (rawData == null) {
-      return null;
-    }
-    Codec codec = getCodec(format);
-    return (T) codec.fromPersistedFormat(rawData);
-  }
-
-  /**
-   * Convert strongly typed object to raw data to store it in the kv store.
-   *
-   * @param object typed object.
-   * @param <T>    Type of the typed object.
-   * @return byte array to store it ini the kv store.
-   */
-  public <T> byte[] asRawData(T object) throws IOException {
-    Preconditions.checkNotNull(object,
-        "Null value shouldn't be persisted in the database");
-    Codec<T> codec = getCodec(object);
-    return codec.toPersistedFormat(object);
-  }
-
-  /**
-   * Get codec for the typed object including class and subclass.
-   * @param object typed object.
-   * @return Codec for the typed object.
-   * @throws IOException
-   */
-  private <T> Codec getCodec(T object) throws IOException {
-    Class<T> format = (Class<T>) object.getClass();
-    return getCodec(format);
-  }
-
-
-  /**
-   * Get codec for the typed object including class and subclass.
-   * @param <T>    Type of the typed object.
-   * @return Codec for the typed object.
-   * @throws IOException
-   */
-  private <T> Codec getCodec(Class<T> format) throws IOException {
-    Codec<T> codec;
-    if (valueCodecs.containsKey(format)) {
-      codec = (Codec<T>) valueCodecs.get(format);
-    } else if (valueCodecs.containsKey(format.getSuperclass())) {
-      codec = (Codec<T>) valueCodecs.get(format.getSuperclass());
-    } else {
-      throw new IllegalStateException(
-          "Codec is not registered for type: " + format);
-    }
-    return codec;
-  }
-
-  /**
-   * Addds codec to the internal collection.
-   *
-   * @param type  Type of the codec source/destination object.
-   * @param codec The codec itself.
-   * @param <T>   The type of the codec
-   */
-  public <T> void addCodec(Class<T> type, Codec<T> codec) {
-    valueCodecs.put(type, codec);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java
deleted file mode 100644
index 6a45298..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.nio.file.Path;
-
-/**
- * Generic DB Checkpoint interface.
- */
-public interface DBCheckpoint {
-
-  /**
-   * Get Snapshot location.
-   */
-  Path getCheckpointLocation();
-
-  /**
-   * Get Snapshot creation timestamp.
-   */
-  long getCheckpointTimestamp();
-
-  /**
-   * Get last sequence number of Snapshot.
-   */
-  long getLatestSequenceNumber();
-
-  /**
-   * Time taken in milliseconds for the checkpoint to be created.
-   */
-  long checkpointCreationTimeTaken();
-
-  /**
-   * Destroy the contents of the specified checkpoint to ensure
-   * proper cleanup of the footprint on disk.
-   *
-   * @throws IOException if I/O error happens
-   */
-  void cleanupCheckpoint() throws IOException;
-
-  /**
-   * Set the OM Ratis snapshot index corresponding to the OM DB checkpoint.
-   * The snapshot index is the latest snapshot index saved by ratis
-   * snapshots. It is not guaranteed to be the last ratis index applied to
-   * the OM DB state.
-   * @param omRatisSnapshotIndex the saved ratis snapshot index
-   */
-  void setRatisSnapshotIndex(long omRatisSnapshotIndex);
-
-  /**
-   * Get the OM Ratis snapshot index corresponding to the OM DB checkpoint.
-   * The ratis snapshot index indicates upto which index is definitely
-   * included in the DB checkpoint. It is not guaranteed to be the last ratis
-   * log index applied to the DB checkpoint.
-   */
-  long getRatisSnapshotIndex();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
deleted file mode 100644
index 4375425..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import com.google.common.base.Preconditions;
-import org.eclipse.jetty.util.StringUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.DBOptions;
-import org.rocksdb.Env;
-import org.rocksdb.OptionsUtil;
-import org.rocksdb.RocksDBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-
-/**
- * A Class that controls the standard config options of RocksDB.
- * <p>
- * Important : Some of the functions in this file are magic functions designed
- * for the use of OZONE developers only. Due to that this information is
- * documented in this files only and is *not* intended for end user consumption.
- * Please do not use this information to tune your production environments.
- * Please remember the SpiderMan principal; with great power comes great
- * responsibility.
- */
-public final class DBConfigFromFile {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DBConfigFromFile.class);
-
-  public static final String CONFIG_DIR = "HADOOP_CONF_DIR";
-
-  private DBConfigFromFile() {
-  }
-
-  public static File getConfigLocation() throws IOException {
-    String path = System.getenv(CONFIG_DIR);
-
-    // Make testing easy.
-    // If there is No Env. defined, let us try to read the JVM property
-    if (StringUtil.isBlank(path)) {
-      path = System.getProperty(CONFIG_DIR);
-    }
-
-    if (StringUtil.isBlank(path)) {
-      LOG.debug("Unable to find the configuration directory. "
-          + "Please make sure that HADOOP_CONF_DIR is setup correctly.");
-    }
-    if(StringUtil.isBlank(path)){
-      return null;
-    }
-    return new File(path);
-
-  }
-
-  /**
-   * This class establishes a magic pattern where we look for DBFile.ini as the
-   * options for RocksDB.
-   *
-   * @param dbFileName - The DBFile Name. For example, OzoneManager.db
-   * @return Name of the DB File options
-   */
-  public static String getOptionsFileNameFromDB(String dbFileName) {
-    Preconditions.checkNotNull(dbFileName);
-    return dbFileName + ".ini";
-  }
-
-  /**
-   * One of the Magic functions designed for the use of Ozone Developers *ONLY*.
-   * This function takes the name of DB file and looks up the a .ini file that
-   * follows the ROCKSDB config format and uses that file for DBOptions and
-   * Column family Options. The Format for this file is specified by RockDB.
-   * <p>
-   * Here is a sample config from RocksDB sample Repo.
-   * <p>
-   * https://github.com/facebook/rocksdb/blob/master/examples
-   * /rocksdb_option_file_example.ini
-   * <p>
-   * We look for a specific pattern, say OzoneManager.db will have its configs
-   * specified in OzoneManager.db.ini. This option is used only by the
-   * performance testing group to allow tuning of all parameters freely.
-   * <p>
-   * For the end users we offer a set of Predefined options that is easy to use
-   * and the user does not need to become an expert in RockDB config.
-   * <p>
-   * This code assumes the .ini file is placed in the same directory as normal
-   * config files. That is in $HADOOP_DIR/etc/hadoop. For example, if we want to
-   * control OzoneManager.db configs from a file, we need to create a file
-   * called OzoneManager.db.ini and place that file in $HADOOP_DIR/etc/hadoop.
-   *
-   * @param dbFileName - The DB File Name, for example, OzoneManager.db.
-   * @param cfDescs - ColumnFamily Handles.
-   * @return DBOptions, Options to be used for opening/creating the DB.
-   * @throws IOException
-   */
-  public static DBOptions readFromFile(String dbFileName,
-      List<ColumnFamilyDescriptor> cfDescs) throws IOException {
-    Preconditions.checkNotNull(dbFileName);
-    Preconditions.checkNotNull(cfDescs);
-    Preconditions.checkArgument(cfDescs.size() > 0);
-
-    //TODO: Add Documentation on how to support RocksDB Mem Env.
-    Env env = Env.getDefault();
-    DBOptions options = null;
-    File configLocation = getConfigLocation();
-    if(configLocation != null &&
-        StringUtil.isNotBlank(configLocation.toString())){
-      Path optionsFile = Paths.get(configLocation.toString(),
-          getOptionsFileNameFromDB(dbFileName));
-
-      if (optionsFile.toFile().exists()) {
-        options = new DBOptions();
-        try {
-          OptionsUtil.loadOptionsFromFile(optionsFile.toString(),
-              env, options, cfDescs, true);
-
-        } catch (RocksDBException rdEx) {
-          RDBTable.toIOException("Unable to find/open Options file.", rdEx);
-        }
-      }
-    }
-    return options;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
deleted file mode 100644
index 57516fd..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.rocksdb.BlockBasedTableConfig;
-import org.rocksdb.BloomFilter;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.CompactionStyle;
-import org.rocksdb.DBOptions;
-
-import java.math.BigDecimal;
-
-/**
- * User visible configs based RocksDB tuning page. Documentation for Options.
- * <p>
- * https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h
- * <p>
- * Most tuning parameters are based on this URL.
- * <p>
- * https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
- */
-public enum DBProfile {
-  //TODO : Add more profiles like TEST etc.
-  SSD {
-    @Override
-    public String toString() {
-      return "DBProfile.SSD";
-    }
-
-    @Override
-    public ColumnFamilyOptions getColumnFamilyOptions() {
-
-      // Set BlockCacheSize to 256 MB. This should not be an issue for HADOOP.
-      final long blockCacheSize = toLong(StorageUnit.MB.toBytes(256.00));
-
-      // Set the Default block size to 16KB
-      final long blockSize = toLong(StorageUnit.KB.toBytes(16));
-
-      // Write Buffer Size -- set to 128 MB
-      final long writeBufferSize = toLong(StorageUnit.MB.toBytes(128));
-
-      return new ColumnFamilyOptions()
-          .setLevelCompactionDynamicLevelBytes(true)
-          .setWriteBufferSize(writeBufferSize)
-          .setTableFormatConfig(
-              new BlockBasedTableConfig()
-                  .setBlockCacheSize(blockCacheSize)
-                  .setBlockSize(blockSize)
-                  .setCacheIndexAndFilterBlocks(true)
-                  .setPinL0FilterAndIndexBlocksInCache(true)
-                  .setFilter(new BloomFilter()));
-    }
-
-    @Override
-    public DBOptions getDBOptions() {
-      final int maxBackgroundCompactions = 4;
-      final int maxBackgroundFlushes = 2;
-      final long bytesPerSync = toLong(StorageUnit.MB.toBytes(1.00));
-      final boolean createIfMissing = true;
-      final boolean createMissingColumnFamilies = true;
-      return new DBOptions()
-          .setIncreaseParallelism(Runtime.getRuntime().availableProcessors())
-          .setMaxBackgroundCompactions(maxBackgroundCompactions)
-          .setMaxBackgroundFlushes(maxBackgroundFlushes)
-          .setBytesPerSync(bytesPerSync)
-          .setCreateIfMissing(createIfMissing)
-          .setCreateMissingColumnFamilies(createMissingColumnFamilies);
-    }
-
-
-  },
-  DISK {
-    @Override
-    public String toString() {
-      return "DBProfile.DISK";
-    }
-
-    @Override
-    public DBOptions getDBOptions() {
-      final long readAheadSize = toLong(StorageUnit.MB.toBytes(4.00));
-      return SSD.getDBOptions().setCompactionReadaheadSize(readAheadSize);
-    }
-
-    @Override
-    public ColumnFamilyOptions getColumnFamilyOptions() {
-      ColumnFamilyOptions columnFamilyOptions = SSD.getColumnFamilyOptions();
-      columnFamilyOptions.setCompactionStyle(CompactionStyle.LEVEL);
-      return columnFamilyOptions;
-    }
-
-
-  };
-
-  private static long toLong(double value) {
-    BigDecimal temp = new BigDecimal(value);
-    return temp.longValue();
-  }
-
-  public abstract DBOptions getDBOptions();
-
-  public abstract ColumnFamilyOptions getColumnFamilyOptions();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
deleted file mode 100644
index b3f5838..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
-
-/**
- * The DBStore interface provides the ability to create Tables, which store
- * a specific type of Key-Value pair. Some DB interfaces like LevelDB will not
- * be able to do this. In those case a Table creation will map to a default
- * store.
- *
- */
-@InterfaceStability.Evolving
-public interface DBStore extends AutoCloseable {
-
-  /**
-   * Gets an existing TableStore.
-   *
-   * @param name - Name of the TableStore to get
-   * @return - TableStore.
-   * @throws IOException on Failure
-   */
-  Table<byte[], byte[]> getTable(String name) throws IOException;
-
-
-  /**
-   * Gets an existing TableStore with implicit key/value conversion and
-   * with default cleanup policy for cache. Default cache clean up policy is
-   * manual.
-   *
-   * @param name - Name of the TableStore to get
-   * @param keyType
-   * @param valueType
-   * @return - TableStore.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
-      Class<KEY> keyType, Class<VALUE> valueType) throws IOException;
-
-  /**
-   * Gets an existing TableStore with implicit key/value conversion and
-   * with specified cleanup policy for cache.
-   * @throws IOException
-   */
-  <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
-      Class<KEY> keyType, Class<VALUE> valueType,
-      TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException;
-
-  /**
-   * Lists the Known list of Tables in a DB.
-   *
-   * @return List of Tables, in case of Rocks DB and LevelDB we will return at
-   * least one entry called DEFAULT.
-   * @throws IOException on Failure
-   */
-  ArrayList<Table> listTables() throws IOException;
-
-  /**
-   * Flush the DB buffer onto persistent storage.
-   * @throws IOException
-   */
-  void flush() throws IOException;
-
-  /**
-   * Compact the entire database.
-   *
-   * @throws IOException on Failure
-   */
-  void compactDB() throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table.
-   *
-   * @param key - Key to move.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> void move(KEY key, Table<KEY, VALUE> source,
-                         Table<KEY, VALUE> dest) throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination to the new value.
-   *
-   * @param key - Key to move.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> void move(KEY key, VALUE value, Table<KEY, VALUE> source,
-                         Table<KEY, VALUE> dest)
-      throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination with the new key name and value.
-   * This is similar to deleting an entry in one table and adding an entry in
-   * another table, here it is done atomically.
-   *
-   * @param sourceKey - Key to move.
-   * @param destKey - Destination key name.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE value,
-                         Table<KEY, VALUE> source, Table<KEY, VALUE> dest)
-      throws IOException;
-
-  /**
-   * Returns an estimated count of keys in this DB.
-   *
-   * @return long, estimate of keys in the DB.
-   */
-  long getEstimatedKeyCount() throws IOException;
-
-  /**
-   * Initialize an atomic batch operation which can hold multiple PUT/DELETE
-   * operations and committed later in one step.
-   *
-   * @return BatchOperation holder which can be used to add or commit batch
-   * operations.
-   */
-  BatchOperation initBatchOperation();
-
-  /**
-   * Commit the batch operations.
-   *
-   * @param operation which contains all the required batch operation.
-   * @throws IOException on Failure.
-   */
-  void commitBatchOperation(BatchOperation operation) throws IOException;
-
-  /**
-   * Get current snapshot of OM DB store as an artifact stored on
-   * the local filesystem.
-   * @return An object that encapsulates the checkpoint information along with
-   * location.
-   */
-  DBCheckpoint getCheckpoint(boolean flush) throws IOException;
-
-  /**
-   * Get DB Store location.
-   * @return DB file location.
-   */
-  File getDbLocation();
-
-  /**
-   * Get List of Index to Table Names.
-   * (For decoding table from column family index)
-   * @return Map of Index -> TableName
-   */
-  Map<Integer, String> getTableNames();
-
-  /**
-   * Get Codec registry.
-   * @return codec registry.
-   */
-  CodecRegistry getCodecRegistry();
-
-  /**
-   * Get data written to DB since a specific sequence number.
-   * @param sequenceNumber
-   * @return
-   * @throws SequenceNumberNotFoundException
-   */
-  DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
-      throws SequenceNumberNotFoundException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
deleted file mode 100644
index 263864f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.eclipse.jetty.util.StringUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.InfoLogLevel;
-import org.rocksdb.RocksDB;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_DB_PROFILE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-
-/**
- * DBStore Builder.
- */
-public final class DBStoreBuilder {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DBStoreBuilder.class);
-  public static final Logger ROCKS_DB_LOGGER =
-      LoggerFactory.getLogger(RocksDB.class);
-  private Set<TableConfig> tables;
-  private DBProfile dbProfile;
-  private DBOptions rocksDBOption;
-  private String dbname;
-  private Path dbPath;
-  private List<String> tableNames;
-  private Configuration configuration;
-  private CodecRegistry registry;
-  private String rocksDbStat;
-  private RocksDBConfiguration rocksDBConfiguration;
-
-  private DBStoreBuilder(OzoneConfiguration configuration) {
-    tables = new HashSet<>();
-    tableNames = new LinkedList<>();
-    this.configuration = configuration;
-    this.registry = new CodecRegistry();
-    this.rocksDbStat = configuration.getTrimmed(
-        OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-        OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-    this.rocksDBConfiguration =
-        configuration.getObject(RocksDBConfiguration.class);
-  }
-
-  public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) {
-    return new DBStoreBuilder(configuration);
-  }
-
-  public DBStoreBuilder setProfile(DBProfile profile) {
-    dbProfile = profile;
-    return this;
-  }
-
-  public DBStoreBuilder setName(String name) {
-    dbname = name;
-    return this;
-  }
-
-  public DBStoreBuilder addTable(String tableName) {
-    tableNames.add(tableName);
-    return this;
-  }
-
-  public <T> DBStoreBuilder addCodec(Class<T> type, Codec<T> codec) {
-    registry.addCodec(type, codec);
-    return this;
-  }
-
-  public DBStoreBuilder addTable(String tableName, ColumnFamilyOptions option)
-      throws IOException {
-    TableConfig tableConfig = new TableConfig(tableName, option);
-    if (!tables.add(tableConfig)) {
-      String message = "Unable to add the table: " + tableName +
-          ".  Please check if this table name is already in use.";
-      LOG.error(message);
-      throw new IOException(message);
-    }
-    LOG.info("using custom profile for table: {}", tableName);
-    return this;
-  }
-
-  public DBStoreBuilder setDBOption(DBOptions option) {
-    rocksDBOption = option;
-    return this;
-  }
-
-  public DBStoreBuilder setPath(Path path) {
-    Preconditions.checkNotNull(path);
-    dbPath = path;
-    return this;
-  }
-
-  /**
-   * Builds a DBStore instance and returns that.
-   *
-   * @return DBStore
-   */
-  public DBStore build() throws IOException {
-    if(StringUtil.isBlank(dbname) || (dbPath == null)) {
-      LOG.error("Required Parameter missing.");
-      throw new IOException("Required parameter is missing. Please make sure "
-          + "sure Path and DB name is provided.");
-    }
-    processDBProfile();
-    processTables();
-    DBOptions options = getDbProfile();
-    File dbFile = getDBFile();
-    if (!dbFile.getParentFile().exists()) {
-      throw new IOException("The DB destination directory should exist.");
-    }
-    return new RDBStore(dbFile, options, tables, registry);
-  }
-
-  /**
-   * if the DBProfile is not set, we will default to using default from the
-   * config file.
-   */
-  private void processDBProfile() {
-    if (dbProfile == null) {
-      dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE,
-          HDDS_DEFAULT_DB_PROFILE);
-    }
-  }
-
-  private void processTables() throws IOException {
-    if (tableNames.size() > 0) {
-      for (String name : tableNames) {
-        addTable(name, dbProfile.getColumnFamilyOptions());
-        LOG.info("Using default column profile:{} for Table:{}",
-            dbProfile.toString(), name);
-      }
-    }
-    addTable(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-        dbProfile.getColumnFamilyOptions());
-    LOG.info("Using default column profile:{} for Table:{}",
-        dbProfile.toString(),
-        DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY));
-  }
-
-  private DBOptions getDbProfile() {
-    if (rocksDBOption != null) {
-      return rocksDBOption;
-    }
-    DBOptions option = null;
-    if (StringUtil.isNotBlank(dbname)) {
-      List<ColumnFamilyDescriptor> columnFamilyDescriptors = new LinkedList<>();
-
-      for (TableConfig tc : tables) {
-        columnFamilyDescriptors.add(tc.getDescriptor());
-      }
-
-      if (columnFamilyDescriptors.size() > 0) {
-        try {
-          option = DBConfigFromFile.readFromFile(dbname,
-              columnFamilyDescriptors);
-          if(option != null) {
-            LOG.info("Using Configs from {}.ini file", dbname);
-          }
-        } catch (IOException ex) {
-          LOG.info("Unable to read ROCKDB config", ex);
-        }
-      }
-    }
-
-    if (option == null) {
-      LOG.info("Using default options. {}", dbProfile.toString());
-      option = dbProfile.getDBOptions();
-    }
-
-    if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
-      org.rocksdb.Logger logger = new org.rocksdb.Logger(option) {
-        @Override
-        protected void log(InfoLogLevel infoLogLevel, String s) {
-          ROCKS_DB_LOGGER.info(s);
-        }
-      };
-      InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
-          .getRocksdbLogLevel() + "_LEVEL");
-      logger.setInfoLogLevel(level);
-      option.setLogger(logger);
-    }
-
-    if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
-      Statistics statistics = new Statistics();
-      statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
-      option = option.setStatistics(statistics);
-    }
-    return option;
-  }
-
-  private File getDBFile() throws IOException {
-    if (dbPath == null) {
-      LOG.error("DB path is required.");
-      throw new IOException("A Path to for DB file is needed.");
-    }
-
-    if (StringUtil.isBlank(dbname)) {
-      LOG.error("DBName is a required.");
-      throw new IOException("A valid DB name is required.");
-    }
-    return Paths.get(dbPath.toString(), dbname).toFile();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java
deleted file mode 100644
index aa48c5e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Wrapper class to hold DB data read from the RocksDB log file.
- */
-public class DBUpdatesWrapper {
-
-  private List<byte[]> dataList = new ArrayList<>();
-  private long currentSequenceNumber = -1;
-
-  public void addWriteBatch(byte[] data, long sequenceNumber) {
-    dataList.add(data);
-    if (currentSequenceNumber < sequenceNumber) {
-      currentSequenceNumber = sequenceNumber;
-    }
-  }
-
-  public List<byte[]> getData() {
-    return dataList;
-  }
-
-  public void setCurrentSequenceNumber(long sequenceNumber) {
-    this.currentSequenceNumber = sequenceNumber;
-  }
-
-  public long getCurrentSequenceNumber() {
-    return currentSequenceNumber;
-  }
-}
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java
deleted file mode 100644
index e95e0f1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-
-import com.google.common.primitives.Ints;
-
-/**
- * Codec to convert Integer to/from byte array.
- */
-public class IntegerCodec implements Codec<Integer> {
-  @Override
-  public byte[] toPersistedFormat(Integer object) throws IOException {
-    return Ints.toByteArray(object);
-  }
-
-  @Override
-  public Integer fromPersistedFormat(byte[] rawData) throws IOException {
-    return Ints.fromByteArray(rawData);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java
deleted file mode 100644
index 6c95246..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import com.google.common.primitives.Longs;
-
-
-/**
- * Codec to convert Long to/from byte array.
- */
-public class LongCodec implements Codec<Long> {
-
-  @Override
-  public byte[] toPersistedFormat(Long object) {
-    if (object != null) {
-      return Longs.toByteArray(object);
-    } else {
-      return null;
-    }
-  }
-
-  @Override
-  public Long fromPersistedFormat(byte[] rawData) {
-    if (rawData != null) {
-      return Longs.fromByteArray(rawData);
-    } else {
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
deleted file mode 100644
index 42843b0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-
-/**
- * Batch operation implementation for rocks db.
- */
-public class RDBBatchOperation implements BatchOperation {
-
-  private final WriteBatch writeBatch;
-
-  public RDBBatchOperation() {
-    writeBatch = new WriteBatch();
-  }
-
-  public RDBBatchOperation(WriteBatch writeBatch) {
-    this.writeBatch = writeBatch;
-  }
-
-  public void commit(RocksDB db, WriteOptions writeOptions) throws IOException {
-    try {
-      db.write(writeOptions, writeBatch);
-    } catch (RocksDBException e) {
-      throw new IOException("Unable to write the batch.", e);
-    }
-  }
-
-  @Override
-  public void close() {
-    writeBatch.close();
-  }
-
-  public void delete(ColumnFamilyHandle handle, byte[] key) throws IOException {
-    try {
-      writeBatch.delete(handle, key);
-    } catch (RocksDBException e) {
-      throw new IOException("Can't record batch delete operation.", e);
-    }
-  }
-
-  public void put(ColumnFamilyHandle handle, byte[] key, byte[] value)
-      throws IOException {
-    try {
-      writeBatch.put(handle, key, value);
-    } catch (RocksDBException e) {
-      throw new IOException("Can't record batch put operation.", e);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
deleted file mode 100644
index 42b9b77..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.time.Instant;
-
-import org.apache.commons.lang3.StringUtils;
-import org.rocksdb.Checkpoint;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * RocksDB Checkpoint Manager, used to create and cleanup checkpoints.
- */
-public class RDBCheckpointManager {
-
-  private final Checkpoint checkpoint;
-  private final RocksDB db;
-  public static final String RDB_CHECKPOINT_DIR_PREFIX = "rdb_checkpoint_";
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RDBCheckpointManager.class);
-  private String checkpointNamePrefix = "";
-
-  public RDBCheckpointManager(RocksDB rocksDB) {
-    this.db = rocksDB;
-    this.checkpoint = Checkpoint.create(rocksDB);
-  }
-
-  /**
-   * Create a checkpoint manager with a prefix to be added to the
-   * snapshots created.
-   *
-   * @param rocksDB          DB instance
-   * @param checkpointPrefix prefix string.
-   */
-  public RDBCheckpointManager(RocksDB rocksDB, String checkpointPrefix) {
-    this.db = rocksDB;
-    this.checkpointNamePrefix = checkpointPrefix;
-    this.checkpoint = Checkpoint.create(rocksDB);
-  }
-
-  /**
-   * Create RocksDB snapshot by saving a checkpoint to a directory.
-   *
-   * @param parentDir The directory where the checkpoint needs to be created.
-   * @return RocksDB specific Checkpoint information object.
-   */
-  public RocksDBCheckpoint createCheckpoint(String parentDir) {
-    try {
-      long currentTime = System.currentTimeMillis();
-
-      String checkpointDir = StringUtils.EMPTY;
-      if (StringUtils.isNotEmpty(checkpointNamePrefix)) {
-        checkpointDir += checkpointNamePrefix;
-      }
-      checkpointDir += "_" + RDB_CHECKPOINT_DIR_PREFIX + currentTime;
-
-      Path checkpointPath = Paths.get(parentDir, checkpointDir);
-      Instant start = Instant.now();
-      checkpoint.createCheckpoint(checkpointPath.toString());
-      Instant end = Instant.now();
-
-      long duration = Duration.between(start, end).toMillis();
-      LOG.info("Created checkpoint at " + checkpointPath.toString() + " in "
-          + duration + " milliseconds");
-
-      return new RocksDBCheckpoint(
-          checkpointPath,
-          currentTime,
-          db.getLatestSequenceNumber(), //Best guesstimate here. Not accurate.
-          duration);
-
-    } catch (RocksDBException e) {
-      LOG.error("Unable to create RocksDB Snapshot.", e);
-    }
-    return null;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
deleted file mode 100644
index 53bd424..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_CHECKPOINTS_DIR_NAME;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Hashtable;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.utils.RocksDBStoreMBean;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.metrics2.util.MBeans;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
-import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.DBOptions;
-import org.rocksdb.FlushOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.TransactionLogIterator;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * RocksDB Store that supports creating Tables in DB.
- */
-public class RDBStore implements DBStore {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RDBStore.class);
-  private RocksDB db;
-  private File dbLocation;
-  private final WriteOptions writeOptions;
-  private final DBOptions dbOptions;
-  private final CodecRegistry codecRegistry;
-  private final Hashtable<String, ColumnFamilyHandle> handleTable;
-  private ObjectName statMBeanName;
-  private RDBCheckpointManager checkPointManager;
-  private String checkpointsParentDir;
-  private List<ColumnFamilyHandle> columnFamilyHandles;
-
-  @VisibleForTesting
-  public RDBStore(File dbFile, DBOptions options,
-                  Set<TableConfig> families) throws IOException {
-    this(dbFile, options, families, new CodecRegistry());
-  }
-
-  public RDBStore(File dbFile, DBOptions options, Set<TableConfig> families,
-                  CodecRegistry registry)
-      throws IOException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    Preconditions.checkNotNull(families);
-    Preconditions.checkArgument(families.size() > 0);
-    handleTable = new Hashtable<>();
-    codecRegistry = registry;
-    final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-        new ArrayList<>();
-    columnFamilyHandles = new ArrayList<>();
-
-    for (TableConfig family : families) {
-      columnFamilyDescriptors.add(family.getDescriptor());
-    }
-
-    dbOptions = options;
-    dbLocation = dbFile;
-    // TODO: Read from the next Config.
-    writeOptions = new WriteOptions();
-
-    try {
-      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(),
-          columnFamilyDescriptors, columnFamilyHandles);
-
-      for (int x = 0; x < columnFamilyHandles.size(); x++) {
-        handleTable.put(
-            DFSUtil.bytes2String(columnFamilyHandles.get(x).getName()),
-            columnFamilyHandles.get(x));
-      }
-
-      if (dbOptions.statistics() != null) {
-        Map<String, String> jmxProperties = new HashMap<>();
-        jmxProperties.put("dbName", dbFile.getName());
-        statMBeanName = HddsUtils.registerWithJmxProperties(
-            "Ozone", "RocksDbStore", jmxProperties,
-            RocksDBStoreMBean.create(dbOptions.statistics(),
-                dbFile.getName()));
-        if (statMBeanName == null) {
-          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
-              dbFile.getAbsolutePath());
-        }
-      }
-
-      //create checkpoints directory if not exists.
-      checkpointsParentDir = Paths.get(dbLocation.getParent(),
-          OM_DB_CHECKPOINTS_DIR_NAME).toString();
-      File checkpointsDir = new File(checkpointsParentDir);
-      if (!checkpointsDir.exists()) {
-        boolean success = checkpointsDir.mkdir();
-        if (!success) {
-          LOG.warn("Unable to create RocksDB checkpoint directory");
-        }
-      }
-
-      //Initialize checkpoint manager
-      checkPointManager = new RDBCheckpointManager(db, "om");
-
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("RocksDB successfully opened.");
-      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
-      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
-      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
-    }
-  }
-
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  @Override
-  public void compactDB() throws IOException {
-    if (db != null) {
-      try {
-        db.compactRange();
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to compact db", e);
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-
-    for (final ColumnFamilyHandle handle : handleTable.values()) {
-      handle.close();
-    }
-
-    if (statMBeanName != null) {
-      MBeans.unregister(statMBeanName);
-      statMBeanName = null;
-    }
-
-    if (db != null) {
-      db.close();
-    }
-
-    if (dbOptions != null) {
-      dbOptions.close();
-    }
-
-    if (writeOptions != null) {
-      writeOptions.close();
-    }
-  }
-
-  @Override
-  public <KEY, VALUE> void move(KEY key, Table<KEY, VALUE> source,
-                                Table<KEY, VALUE> dest) throws IOException {
-    try (BatchOperation batchOperation = initBatchOperation()) {
-
-      VALUE value = source.get(key);
-      dest.putWithBatch(batchOperation, key, value);
-      source.deleteWithBatch(batchOperation, key);
-      commitBatchOperation(batchOperation);
-    }
-  }
-
-  @Override
-  public <KEY, VALUE> void move(KEY key, VALUE value, Table<KEY, VALUE> source,
-                                Table<KEY, VALUE> dest) throws IOException {
-    move(key, key, value, source, dest);
-  }
-
-  @Override
-  public <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE value,
-                                Table<KEY, VALUE> source,
-                                Table<KEY, VALUE> dest) throws IOException {
-    try (BatchOperation batchOperation = initBatchOperation()) {
-      dest.putWithBatch(batchOperation, destKey, value);
-      source.deleteWithBatch(batchOperation, sourceKey);
-      commitBatchOperation(batchOperation);
-    }
-  }
-
-  @Override
-  public long getEstimatedKeyCount() throws IOException {
-    try {
-      return db.getLongProperty("rocksdb.estimate-num-keys");
-    } catch (RocksDBException e) {
-      throw toIOException("Unable to get the estimated count.", e);
-    }
-  }
-
-  @Override
-  public BatchOperation initBatchOperation() {
-    return new RDBBatchOperation();
-  }
-
-  @Override
-  public void commitBatchOperation(BatchOperation operation)
-      throws IOException {
-    ((RDBBatchOperation) operation).commit(db, writeOptions);
-  }
-
-
-  @VisibleForTesting
-  protected ObjectName getStatMBeanName() {
-    return statMBeanName;
-  }
-
-  @Override
-  public Table<byte[], byte[]> getTable(String name) throws IOException {
-    ColumnFamilyHandle handle = handleTable.get(name);
-    if (handle == null) {
-      throw new IOException("No such table in this DB. TableName : " + name);
-    }
-    return new RDBTable(this.db, handle, this.writeOptions);
-  }
-
-  @Override
-  public <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
-      Class<KEY> keyType, Class<VALUE> valueType) throws IOException {
-    return new TypedTable<KEY, VALUE>(getTable(name), codecRegistry, keyType,
-        valueType);
-  }
-
-  @Override
-  public <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
-      Class<KEY> keyType, Class<VALUE> valueType,
-      TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException {
-    return new TypedTable<KEY, VALUE>(getTable(name), codecRegistry, keyType,
-        valueType, cleanupPolicy);
-  }
-
-  @Override
-  public ArrayList<Table> listTables() throws IOException {
-    ArrayList<Table> returnList = new ArrayList<>();
-    for (ColumnFamilyHandle handle : handleTable.values()) {
-      returnList.add(new RDBTable(db, handle, writeOptions));
-    }
-    return returnList;
-  }
-
-  @Override
-  public void flush() throws IOException {
-    final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true);
-    try {
-      db.flush(flushOptions);
-    } catch (RocksDBException e) {
-      LOG.error("Unable to Flush RocksDB data", e);
-      throw toIOException("Unable to Flush RocksDB data", e);
-    }
-  }
-
-  @Override
-  public DBCheckpoint getCheckpoint(boolean flush) {
-    final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(flush);
-    try {
-      db.flush(flushOptions);
-    } catch (RocksDBException e) {
-      LOG.error("Unable to Flush RocksDB data before creating snapshot", e);
-    }
-    return checkPointManager.createCheckpoint(checkpointsParentDir);
-  }
-
-  @Override
-  public File getDbLocation() {
-    return dbLocation;
-  }
-
-  @Override
-  public Map<Integer, String> getTableNames() {
-    Map<Integer, String> tableNames = new HashMap<>();
-    StringCodec stringCodec = new StringCodec();
-
-    for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
-      try {
-        tableNames.put(columnFamilyHandle.getID(), stringCodec
-            .fromPersistedFormat(columnFamilyHandle.getName()));
-      } catch (RocksDBException | IOException e) {
-        LOG.error("Unexpected exception while reading column family handle " +
-            "name", e);
-      }
-    }
-    return tableNames;
-  }
-
-  @Override
-  public CodecRegistry getCodecRegistry() {
-    return codecRegistry;
-  }
-
-  @Override
-  public DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
-      throws SequenceNumberNotFoundException {
-
-    DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
-    try {
-      TransactionLogIterator transactionLogIterator =
-          db.getUpdatesSince(sequenceNumber);
-
-      // Only the first record needs to be checked if its seq number <
-      // ( 1 + passed_in_sequence_number). For example, if seqNumber passed
-      // in is 100, then we can read from the WAL ONLY if the first sequence
-      // number is <= 101. If it is 102, then 101 may already be flushed to
-      // SST. If it 99, we can skip 99 and 100, and then read from 101.
-
-      boolean checkValidStartingSeqNumber = true;
-
-      while (transactionLogIterator.isValid()) {
-        TransactionLogIterator.BatchResult result =
-            transactionLogIterator.getBatch();
-        long currSequenceNumber = result.sequenceNumber();
-        if (checkValidStartingSeqNumber &&
-            currSequenceNumber > 1 + sequenceNumber) {
-          throw new SequenceNumberNotFoundException("Unable to read data from" +
-              " RocksDB wal to get delta updates. It may have already been" +
-              "flushed to SSTs.");
-        }
-        // If the above condition was not satisfied, then it is OK to reset
-        // the flag.
-        checkValidStartingSeqNumber = false;
-        if (currSequenceNumber <= sequenceNumber) {
-          transactionLogIterator.next();
-          continue;
-        }
-        dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(),
-            result.sequenceNumber());
-        transactionLogIterator.next();
-      }
-    } catch (RocksDBException e) {
-      LOG.error("Unable to get delta updates since sequenceNumber {} ",
-          sequenceNumber, e);
-    }
-    return dbUpdatesWrapper;
-  }
-
-  @VisibleForTesting
-  public RocksDB getDb() {
-    return db;
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java
deleted file mode 100644
index 784738b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.util.NoSuchElementException;
-import java.util.function.Consumer;
-
-import org.rocksdb.RocksIterator;
-
-/**
- * RocksDB store iterator.
- */
-public class RDBStoreIterator
-    implements TableIterator<byte[], ByteArrayKeyValue> {
-
-  private RocksIterator rocksDBIterator;
-
-  public RDBStoreIterator(RocksIterator iterator) {
-    this.rocksDBIterator = iterator;
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void forEachRemaining(
-      Consumer<? super ByteArrayKeyValue> action) {
-    while (hasNext()) {
-      action.accept(next());
-    }
-  }
-
-  @Override
-  public boolean hasNext() {
-    return rocksDBIterator.isValid();
-  }
-
-  @Override
-  public ByteArrayKeyValue next() {
-    if (rocksDBIterator.isValid()) {
-      ByteArrayKeyValue value =
-          ByteArrayKeyValue.create(rocksDBIterator.key(), rocksDBIterator
-              .value());
-      rocksDBIterator.next();
-      return value;
-    }
-    throw new NoSuchElementException("RocksDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    rocksDBIterator.seekToLast();
-  }
-
-  @Override
-  public ByteArrayKeyValue seek(byte[] key) {
-    rocksDBIterator.seek(key);
-    if (rocksDBIterator.isValid()) {
-      return ByteArrayKeyValue.create(rocksDBIterator.key(),
-          rocksDBIterator.value());
-    }
-    return null;
-  }
-
-  @Override
-  public byte[] key() {
-    if (rocksDBIterator.isValid()) {
-      return rocksDBIterator.key();
-    }
-    return null;
-  }
-
-  @Override
-  public ByteArrayKeyValue value() {
-    if (rocksDBIterator.isValid()) {
-      return ByteArrayKeyValue.create(rocksDBIterator.key(),
-          rocksDBIterator.value());
-    }
-    return null;
-  }
-
-  @Override
-  public void close() throws IOException {
-    rocksDBIterator.close();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
deleted file mode 100644
index 49ccc020..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.ReadOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * RocksDB implementation of ozone metadata store. This class should be only
- * used as part of TypedTable as it's underlying implementation to access the
- * metadata store content. All other user's using Table should use TypedTable.
- */
-@InterfaceAudience.Private
-class RDBTable implements Table<byte[], byte[]> {
-
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RDBTable.class);
-
-  private final RocksDB db;
-  private final ColumnFamilyHandle handle;
-  private final WriteOptions writeOptions;
-
-  /**
-   * Constructs a TableStore.
-   *
-   * @param db - DBstore that we are using.
-   * @param handle - ColumnFamily Handle.
-   * @param writeOptions - RocksDB write Options.
-   */
-  RDBTable(RocksDB db, ColumnFamilyHandle handle,
-      WriteOptions writeOptions) {
-    this.db = db;
-    this.handle = handle;
-    this.writeOptions = writeOptions;
-  }
-
-  /**
-   * Converts RocksDB exception to IOE.
-   * @param msg  - Message to add to exception.
-   * @param e - Original Exception.
-   * @return  IOE.
-   */
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  /**
-   * Returns the Column family Handle.
-   *
-   * @return ColumnFamilyHandle.
-   */
-  public ColumnFamilyHandle getHandle() {
-    return handle;
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    try {
-      db.put(handle, writeOptions, key, value);
-    } catch (RocksDBException e) {
-      LOG.error("Failed to write to DB. Key: {}", new String(key,
-          StandardCharsets.UTF_8));
-      throw toIOException("Failed to put key-value to metadata "
-          + "store", e);
-    }
-  }
-
-  @Override
-  public void putWithBatch(BatchOperation batch, byte[] key, byte[] value)
-      throws IOException {
-    if (batch instanceof RDBBatchOperation) {
-      ((RDBBatchOperation) batch).put(getHandle(), key, value);
-    } else {
-      throw new IllegalArgumentException("batch should be RDBBatchOperation");
-    }
-  }
-
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    try (TableIterator<byte[], ByteArrayKeyValue> keyIter = iterator()) {
-      keyIter.seekToFirst();
-      return !keyIter.hasNext();
-    }
-  }
-
-  @Override
-  public boolean isExist(byte[] key) throws IOException {
-    try {
-      // RocksDB#keyMayExist
-      // If the key definitely does not exist in the database, then this
-      // method returns false, else true.
-      return db.keyMayExist(handle, key, new StringBuilder())
-          && db.get(handle, key) != null;
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Error in accessing DB. ", e);
-    }
-  }
-
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    try {
-      return db.get(handle, key);
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Failed to get the value for the given key", e);
-    }
-  }
-
-  @Override
-  public void delete(byte[] key) throws IOException {
-    try {
-      db.delete(handle, key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to delete the given key", e);
-    }
-  }
-
-  @Override
-  public void deleteWithBatch(BatchOperation batch, byte[] key)
-      throws IOException {
-    if (batch instanceof RDBBatchOperation) {
-      ((RDBBatchOperation) batch).delete(getHandle(), key);
-    } else {
-      throw new IllegalArgumentException("batch should be RDBBatchOperation");
-    }
-
-  }
-
-  @Override
-  public TableIterator<byte[], ByteArrayKeyValue> iterator() {
-    ReadOptions readOptions = new ReadOptions();
-    readOptions.setFillCache(false);
-    return new RDBStoreIterator(db.newIterator(handle, readOptions));
-  }
-
-  @Override
-  public String getName() throws IOException {
-    try {
-      return DFSUtil.bytes2String(this.getHandle().getName());
-    } catch (RocksDBException rdbEx) {
-      throw toIOException("Unable to get the table name.", rdbEx);
-    }
-  }
-
-  @Override
-  public void close() throws Exception {
-    // Nothing do for a Column Family.
-  }
-
-  @Override
-  public long getEstimatedKeyCount() throws IOException {
-    try {
-      return db.getLongProperty(handle, "rocksdb.estimate-num-keys");
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Failed to get estimated key count of table " + getName(), e);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java
deleted file mode 100644
index 1497438..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.nio.file.Path;
-
-import org.apache.commons.io.FileUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class to hold information and location of a RocksDB Checkpoint.
- */
-public class RocksDBCheckpoint implements DBCheckpoint {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBCheckpoint.class);
-
-  private Path checkpointLocation;
-  private long checkpointTimestamp = System.currentTimeMillis();
-  private long latestSequenceNumber = -1;
-  private long checkpointCreationTimeTaken = 0L;
-  private long ratisSnapshotIndex = 0L;
-
-  public RocksDBCheckpoint(Path checkpointLocation) {
-    this.checkpointLocation = checkpointLocation;
-  }
-
-  public RocksDBCheckpoint(Path checkpointLocation,
-                    long snapshotTimestamp,
-                    long latestSequenceNumber,
-                    long checkpointCreationTimeTaken) {
-    this.checkpointLocation = checkpointLocation;
-    this.checkpointTimestamp = snapshotTimestamp;
-    this.latestSequenceNumber = latestSequenceNumber;
-    this.checkpointCreationTimeTaken = checkpointCreationTimeTaken;
-  }
-
-  @Override
-  public Path getCheckpointLocation() {
-    return this.checkpointLocation;
-  }
-
-  @Override
-  public long getCheckpointTimestamp() {
-    return this.checkpointTimestamp;
-  }
-
-  @Override
-  public long getLatestSequenceNumber() {
-    return this.latestSequenceNumber;
-  }
-
-  @Override
-  public long checkpointCreationTimeTaken() {
-    return checkpointCreationTimeTaken;
-  }
-
-  @Override
-  public void cleanupCheckpoint() throws IOException {
-    LOG.info("Cleaning up RocksDB checkpoint at " +
-        checkpointLocation.toString());
-    FileUtils.deleteDirectory(checkpointLocation.toFile());
-  }
-
-  @Override
-  public void setRatisSnapshotIndex(long omRatisSnapshotIndex) {
-    this.ratisSnapshotIndex = omRatisSnapshotIndex;
-  }
-
-  @Override
-  public long getRatisSnapshotIndex() {
-    return ratisSnapshotIndex;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
deleted file mode 100644
index 1a8c846..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigTag;
-import org.apache.hadoop.hdds.conf.ConfigType;
-
-/**
- * Holds configuration items for OM RocksDB.
- */
-@ConfigGroup(prefix = "hadoop.hdds.db")
-public class RocksDBConfiguration {
-
-  private boolean rocksdbLogEnabled;
-
-  @Config(key = "rocksdb.logging.enabled",
-      type = ConfigType.BOOLEAN,
-      defaultValue = "false",
-      tags = {ConfigTag.OM},
-      description = "Enable/Disable RocksDB logging for OM.")
-  public void setRocksdbLoggingEnabled(boolean enabled) {
-    this.rocksdbLogEnabled = enabled;
-  }
-
-  public boolean isRocksdbLoggingEnabled() {
-    return rocksdbLogEnabled;
-  }
-
-  private String rocksdbLogLevel;
-
-  @Config(key = "rocksdb.logging.level",
-      type = ConfigType.STRING,
-      defaultValue = "INFO",
-      tags = {ConfigTag.OM},
-      description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)")
-  public void setRocksdbLogLevel(String level) {
-    this.rocksdbLogLevel = level;
-  }
-
-  public String getRocksdbLogLevel() {
-    return rocksdbLogLevel;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java
deleted file mode 100644
index e9b4fa3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-
-/**
- * Thrown if RocksDB is unable to find requested data from WAL file.
- */
-public class SequenceNumberNotFoundException extends IOException {
-
-  public SequenceNumberNotFoundException() {
-    super();
-  }
-
-  public SequenceNumberNotFoundException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java
deleted file mode 100644
index f823736..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-/**
- * Codec to convert String to/from byte array.
- */
-public class StringCodec implements Codec<String> {
-
-  @Override
-  public byte[] toPersistedFormat(String object) throws IOException {
-    if (object != null) {
-      return DFSUtil.string2Bytes(object);
-    } else {
-      return null;
-    }
-  }
-
-  @Override
-  public String fromPersistedFormat(byte[] rawData) throws IOException {
-    if (rawData != null) {
-      return DFSUtil.bytes2String(rawData);
-    } else {
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
deleted file mode 100644
index 0502541..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-
-import org.apache.commons.lang3.NotImplementedException;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-/**
- * Interface for key-value store that stores ozone metadata. Ozone metadata is
- * stored as key value pairs, both key and value are arbitrary byte arrays. Each
- * Table Stores a certain kind of keys and values. This allows a DB to have
- * different kind of tables.
- */
-@InterfaceStability.Evolving
-public interface Table<KEY, VALUE> extends AutoCloseable {
-
-  /**
-   * Puts a key-value pair into the store.
-   *
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void put(KEY key, VALUE value) throws IOException;
-
-  /**
-   * Puts a key-value pair into the store as part of a bath operation.
-   *
-   * @param batch the batch operation
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void putWithBatch(BatchOperation batch, KEY key, VALUE value)
-      throws IOException;
-
-  /**
-   * @return true if the metadata store is empty.
-   * @throws IOException on Failure
-   */
-  boolean isEmpty() throws IOException;
-
-  /**
-   * Check if a given key exists in Metadata store.
-   * (Optimization to save on data deserialization)
-   * A lock on the key / bucket needs to be acquired before invoking this API.
-   * @param key metadata key
-   * @return true if the metadata store contains a key.
-   * @throws IOException on Failure
-   */
-  boolean isExist(KEY key) throws IOException;
-
-  /**
-   * Returns the value mapped to the given key in byte array or returns null
-   * if the key is not found.
-   *
-   * @param key metadata key
-   * @return value in byte array or null if the key is not found.
-   * @throws IOException on Failure
-   */
-  VALUE get(KEY key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store.
-   *
-   * @param key metadata key
-   * @throws IOException on Failure
-   */
-  void delete(KEY key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store as part of a batch operation.
-   *
-   * @param batch the batch operation
-   * @param key metadata key
-   * @throws IOException on Failure
-   */
-  void deleteWithBatch(BatchOperation batch, KEY key) throws IOException;
-
-  /**
-   * Returns the iterator for this metadata store.
-   *
-   * @return MetaStoreIterator
-   */
-  TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator();
-
-  /**
-   * Returns the Name of this Table.
-   * @return - Table Name.
-   * @throws IOException on failure.
-   */
-  String getName() throws IOException;
-
-  /**
-   * Returns the key count of this Table.  Note the result can be inaccurate.
-   * @return Estimated key count of this Table
-   * @throws IOException on failure
-   */
-  long getEstimatedKeyCount() throws IOException;
-
-  /**
-   * Add entry to the table cache.
-   *
-   * If the cacheKey already exists, it will override the entry.
-   * @param cacheKey
-   * @param cacheValue
-   */
-  default void addCacheEntry(CacheKey<KEY> cacheKey,
-      CacheValue<VALUE> cacheValue) {
-    throw new NotImplementedException("addCacheEntry is not implemented");
-  }
-
-  /**
-   * Get the cache value from table cache.
-   * @param cacheKey
-   */
-  default CacheValue<VALUE> getCacheValue(CacheKey<KEY> cacheKey) {
-    throw new NotImplementedException("getCacheValue is not implemented");
-  }
-
-  /**
-   * Removes all the entries from the table cache which are having epoch value
-   * less
-   * than or equal to specified epoch value.
-   * @param epoch
-   */
-  default void cleanupCache(long epoch) {
-    throw new NotImplementedException("cleanupCache is not implemented");
-  }
-
-  /**
-   * Return cache iterator maintained for this table.
-   */
-  default Iterator<Map.Entry<CacheKey<KEY>, CacheValue<VALUE>>>
-      cacheIterator() {
-    throw new NotImplementedException("cacheIterator is not implemented");
-  }
-
-  /**
-   * Class used to represent the key and value pair of a db entry.
-   */
-  interface KeyValue<KEY, VALUE> {
-
-    KEY getKey() throws IOException;
-
-    VALUE getValue() throws IOException;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
deleted file mode 100644
index d8eb401..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyOptions;
-
-/**
- * Class that maintains Table Configuration.
- */
-public class TableConfig {
-  private final String name;
-  private final ColumnFamilyOptions columnFamilyOptions;
-
-
-  /**
-   * Constructs a Table Config.
-   * @param name - Name of the Table.
-   * @param columnFamilyOptions - Column Family options.
-   */
-  public TableConfig(String name, ColumnFamilyOptions columnFamilyOptions) {
-    this.name = name;
-    this.columnFamilyOptions = columnFamilyOptions;
-  }
-
-  /**
-   * Returns the Name for this Table.
-   * @return - Name String
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns a ColumnFamilyDescriptor for this table.
-   * @return ColumnFamilyDescriptor
-   */
-  public ColumnFamilyDescriptor getDescriptor() {
-    return  new ColumnFamilyDescriptor(DFSUtil.string2Bytes(name),
-        columnFamilyOptions);
-  }
-
-  /**
-   * Returns Column family options for this Table.
-   * @return  ColumnFamilyOptions used for the Table.
-   */
-  public ColumnFamilyOptions getColumnFamilyOptions() {
-    return columnFamilyOptions;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    TableConfig that = (TableConfig) o;
-    return new EqualsBuilder()
-        .append(getName(), that.getName())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(getName())
-        .toHashCode();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
deleted file mode 100644
index a684157..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * Iterator for MetaDataStore DB.
- *
- * @param <T>
- */
-public interface TableIterator<KEY, T> extends Iterator<T>, Closeable {
-
-  /**
-   * seek to first entry.
-   */
-  void seekToFirst();
-
-  /**
-   * seek to last entry.
-   */
-  void seekToLast();
-
-  /**
-   * Seek to the specific key.
-   *
-   * @param key - Bytes that represent the key.
-   * @return VALUE.
-   */
-  T seek(KEY key) throws IOException;
-
-  /**
-   * Returns the key value at the current position.
-   * @return KEY
-   */
-  KEY key() throws IOException;
-
-  /**
-   * Returns the VALUE at the current position.
-   * @return VALUE
-   */
-  T value();
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
deleted file mode 100644
index 597eff1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheResult;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
-import org.apache.hadoop.hdds.utils.db.cache.TableCache;
-import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl.CacheCleanupPolicy;
-
-import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.EXISTS;
-import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.NOT_EXIST;
-/**
- * Strongly typed table implementation.
- * <p>
- * Automatically converts values and keys using a raw byte[] based table
- * implementation and registered converters.
- *
- * @param <KEY>   type of the keys in the store.
- * @param <VALUE> type of the values in the store.
- */
-public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
-
-  private final Table<byte[], byte[]> rawTable;
-
-  private final CodecRegistry codecRegistry;
-
-  private final Class<KEY> keyType;
-
-  private final Class<VALUE> valueType;
-
-  private final TableCache<CacheKey<KEY>, CacheValue<VALUE>> cache;
-
-  private final static long EPOCH_DEFAULT = -1L;
-
-  /**
-   * Create an TypedTable from the raw table.
-   * Default cleanup policy used for the table is
-   * {@link CacheCleanupPolicy#MANUAL}.
-   * @param rawTable
-   * @param codecRegistry
-   * @param keyType
-   * @param valueType
-   */
-  public TypedTable(
-      Table<byte[], byte[]> rawTable,
-      CodecRegistry codecRegistry, Class<KEY> keyType,
-      Class<VALUE> valueType) throws IOException {
-    this(rawTable, codecRegistry, keyType, valueType,
-        CacheCleanupPolicy.MANUAL);
-  }
-
-  /**
-   * Create an TypedTable from the raw table with specified cleanup policy
-   * for table cache.
-   * @param rawTable
-   * @param codecRegistry
-   * @param keyType
-   * @param valueType
-   * @param cleanupPolicy
-   */
-  public TypedTable(
-      Table<byte[], byte[]> rawTable,
-      CodecRegistry codecRegistry, Class<KEY> keyType,
-      Class<VALUE> valueType,
-      TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException {
-    this.rawTable = rawTable;
-    this.codecRegistry = codecRegistry;
-    this.keyType = keyType;
-    this.valueType = valueType;
-    cache = new TableCacheImpl<>(cleanupPolicy);
-
-    if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
-      //fill cache
-      try(TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
-              iterator()) {
-
-        while (tableIterator.hasNext()) {
-          KeyValue< KEY, VALUE > kv = tableIterator.next();
-
-          // We should build cache after OM restart when clean up policy is
-          // NEVER. Setting epoch value -1, so that when it is marked for
-          // delete, this will be considered for cleanup.
-          cache.loadInitial(new CacheKey<>(kv.getKey()),
-              new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT));
-        }
-      }
-    }
-  }
-
-  @Override
-  public void put(KEY key, VALUE value) throws IOException {
-    byte[] keyData = codecRegistry.asRawData(key);
-    byte[] valueData = codecRegistry.asRawData(value);
-    rawTable.put(keyData, valueData);
-  }
-
-  @Override
-  public void putWithBatch(BatchOperation batch, KEY key, VALUE value)
-      throws IOException {
-    byte[] keyData = codecRegistry.asRawData(key);
-    byte[] valueData = codecRegistry.asRawData(value);
-    rawTable.putWithBatch(batch, keyData, valueData);
-  }
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    return rawTable.isEmpty();
-  }
-
-  @Override
-  public boolean isExist(KEY key) throws IOException {
-
-    CacheResult<CacheValue<VALUE>> cacheResult =
-        cache.lookup(new CacheKey<>(key));
-
-    if (cacheResult.getCacheStatus() == EXISTS) {
-      return true;
-    } else if (cacheResult.getCacheStatus() == NOT_EXIST) {
-      return false;
-    } else {
-      return rawTable.isExist(codecRegistry.asRawData(key));
-    }
-  }
-
-  /**
-   * Returns the value mapped to the given key in byte array or returns null
-   * if the key is not found.
-   *
-   * Caller's of this method should use synchronization mechanism, when
-   * accessing. First it will check from cache, if it has entry return the
-   * value, otherwise get from the RocksDB table.
-   *
-   * @param key metadata key
-   * @return VALUE
-   * @throws IOException
-   */
-  @Override
-  public VALUE get(KEY key) throws IOException {
-    // Here the metadata lock will guarantee that cache is not updated for same
-    // key during get key.
-
-    CacheResult<CacheValue<VALUE>> cacheResult =
-        cache.lookup(new CacheKey<>(key));
-
-    if (cacheResult.getCacheStatus() == EXISTS) {
-      return cacheResult.getValue().getCacheValue();
-    } else if (cacheResult.getCacheStatus() == NOT_EXIST) {
-      return null;
-    } else {
-      return getFromTable(key);
-    }
-  }
-
-  private VALUE getFromTable(KEY key) throws IOException {
-    byte[] keyBytes = codecRegistry.asRawData(key);
-    byte[] valueBytes = rawTable.get(keyBytes);
-    return codecRegistry.asObject(valueBytes, valueType);
-  }
-
-  @Override
-  public void delete(KEY key) throws IOException {
-    rawTable.delete(codecRegistry.asRawData(key));
-  }
-
-  @Override
-  public void deleteWithBatch(BatchOperation batch, KEY key)
-      throws IOException {
-    rawTable.deleteWithBatch(batch, codecRegistry.asRawData(key));
-
-  }
-
-  @Override
-  public TableIterator<KEY, TypedKeyValue> iterator() {
-    TableIterator<byte[], ? extends KeyValue<byte[], byte[]>> iterator =
-        rawTable.iterator();
-    return new TypedTableIterator(iterator, keyType, valueType);
-  }
-
-  @Override
-  public String getName() throws IOException {
-    return rawTable.getName();
-  }
-
-  @Override
-  public long getEstimatedKeyCount() throws IOException {
-    return rawTable.getEstimatedKeyCount();
-  }
-
-  @Override
-  public void close() throws Exception {
-    rawTable.close();
-
-  }
-
-  @Override
-  public void addCacheEntry(CacheKey<KEY> cacheKey,
-      CacheValue<VALUE> cacheValue) {
-    // This will override the entry if there is already entry for this key.
-    cache.put(cacheKey, cacheValue);
-  }
-
-  @Override
-  public CacheValue<VALUE> getCacheValue(CacheKey<KEY> cacheKey) {
-    return cache.get(cacheKey);
-  }
-
-  public Iterator<Map.Entry<CacheKey<KEY>, CacheValue<VALUE>>> cacheIterator() {
-    return cache.iterator();
-  }
-
-  @Override
-  public void cleanupCache(long epoch) {
-    cache.cleanup(epoch);
-  }
-
-  @VisibleForTesting
-  TableCache<CacheKey<KEY>, CacheValue<VALUE>> getCache() {
-    return cache;
-  }
-
-  public Table<byte[], byte[]> getRawTable() {
-    return rawTable;
-  }
-
-  public CodecRegistry getCodecRegistry() {
-    return codecRegistry;
-  }
-
-  public Class<KEY> getKeyType() {
-    return keyType;
-  }
-
-  public Class<VALUE> getValueType() {
-    return valueType;
-  }
-
-  /**
-   * Key value implementation for strongly typed tables.
-   */
-  public class TypedKeyValue implements KeyValue<KEY, VALUE> {
-
-    private KeyValue<byte[], byte[]> rawKeyValue;
-
-    public TypedKeyValue(KeyValue<byte[], byte[]> rawKeyValue) {
-      this.rawKeyValue = rawKeyValue;
-    }
-
-    public TypedKeyValue(KeyValue<byte[], byte[]> rawKeyValue,
-        Class<KEY> keyType, Class<VALUE> valueType) {
-      this.rawKeyValue = rawKeyValue;
-    }
-
-    @Override
-    public KEY getKey() throws IOException {
-      return codecRegistry.asObject(rawKeyValue.getKey(), keyType);
-    }
-
-    @Override
-    public VALUE getValue() throws IOException {
-      return codecRegistry.asObject(rawKeyValue.getValue(), valueType);
-    }
-  }
-
-  /**
-   * Table Iterator implementation for strongly typed tables.
-   */
-  public class TypedTableIterator implements TableIterator<KEY, TypedKeyValue> {
-
-    private TableIterator<byte[], ? extends KeyValue<byte[], byte[]>>
-        rawIterator;
-    private final Class<KEY> keyClass;
-    private final Class<VALUE> valueClass;
-
-    public TypedTableIterator(
-        TableIterator<byte[], ? extends KeyValue<byte[], byte[]>> rawIterator,
-        Class<KEY> keyType,
-        Class<VALUE> valueType) {
-      this.rawIterator = rawIterator;
-      keyClass = keyType;
-      valueClass = valueType;
-    }
-
-    @Override
-    public void seekToFirst() {
-      rawIterator.seekToFirst();
-    }
-
-    @Override
-    public void seekToLast() {
-      rawIterator.seekToLast();
-    }
-
-    @Override
-    public TypedKeyValue seek(KEY key) throws IOException {
-      byte[] keyBytes = codecRegistry.asRawData(key);
-      KeyValue<byte[], byte[]> result = rawIterator.seek(keyBytes);
-      if (result == null) {
-        return null;
-      }
-      return new TypedKeyValue(result);
-    }
-
-    @Override
-    public KEY key() throws IOException {
-      byte[] result = rawIterator.key();
-      if (result == null) {
-        return null;
-      }
-      return codecRegistry.asObject(result, keyClass);
-    }
-
-    @Override
-    public TypedKeyValue value() {
-      KeyValue keyValue = rawIterator.value();
-      if(keyValue != null) {
-        return new TypedKeyValue(keyValue, keyClass, valueClass);
-      }
-      return null;
-    }
-
-    @Override
-    public void close() throws IOException {
-      rawIterator.close();
-    }
-
-    @Override
-    public boolean hasNext() {
-      return rawIterator.hasNext();
-    }
-
-    @Override
-    public TypedKeyValue next() {
-      return new TypedKeyValue(rawIterator.next(), keyType,
-          valueType);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
deleted file mode 100644
index 7be2921..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import java.util.Objects;
-
-/**
- * CacheKey for the RocksDB table.
- * @param <KEY>
- */
-public class CacheKey<KEY> implements Comparable<KEY> {
-
-  private final KEY key;
-
-  public CacheKey(KEY key) {
-    Objects.requireNonNull(key, "Key Should not be null in CacheKey");
-    this.key = key;
-  }
-
-  public KEY getCacheKey() {
-    return key;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    CacheKey<?> cacheKey = (CacheKey<?>) o;
-    return Objects.equals(key, cacheKey.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(key);
-  }
-
-  @Override
-  public int compareTo(Object o) {
-    if(Objects.equals(key, ((CacheKey<?>)o).key)) {
-      return 0;
-    } else {
-      return key.toString().compareTo((((CacheKey<?>) o).key).toString());
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java
deleted file mode 100644
index 8c5a68b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import java.util.Objects;
-
-/**
- * CacheResult which is returned as response for Key exist in cache or not.
- * @param <CACHEVALUE>
- */
-public class CacheResult<CACHEVALUE extends CacheValue> {
-
-  private CacheStatus cacheStatus;
-  private CACHEVALUE cachevalue;
-
-  public CacheResult(CacheStatus cacheStatus, CACHEVALUE cachevalue) {
-    this.cacheStatus = cacheStatus;
-    this.cachevalue = cachevalue;
-  }
-
-  public CacheStatus getCacheStatus() {
-    return cacheStatus;
-  }
-
-  public CACHEVALUE getValue() {
-    return cachevalue;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    CacheResult< ? > that = (CacheResult< ? >) o;
-    return cacheStatus == that.cacheStatus &&
-        Objects.equals(cachevalue, that.cachevalue);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(cacheStatus, cachevalue);
-  }
-
-  /**
-   * Status which tells whether key exists in cache or not.
-   */
-  public enum CacheStatus {
-    EXISTS, // When key exists in cache.
-
-    NOT_EXIST, // We guarantee that it does not exist. This will be returned
-    // when the key does not exist in cache, when cache clean up policy is
-    // NEVER.
-    MAY_EXIST  // This will be returned when the key does not exist in
-    // cache, when cache clean up policy is MANUAL. So caller need to check
-    // if it might exist in it's rocksdb table.
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java
deleted file mode 100644
index de9fe0d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import com.google.common.base.Optional;
-
-/**
- * CacheValue for the RocksDB Table.
- * @param <VALUE>
- */
-public class CacheValue<VALUE> {
-
-  private Optional<VALUE> value;
-  // This value is used for evict entries from cache.
-  // This value is set with ratis transaction context log entry index.
-  private long epoch;
-
-  public CacheValue(Optional<VALUE> value, long epoch) {
-    this.value = value;
-    this.epoch = epoch;
-  }
-
-  public VALUE getCacheValue() {
-    return value.orNull();
-  }
-
-  public long getEpoch() {
-    return epoch;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
deleted file mode 100644
index 7235202..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import java.util.Objects;
-
-/**
- * Class used which describes epoch entry. This will be used during deletion
- * entries from cache for partial table cache.
- * @param <CACHEKEY>
- */
-public class EpochEntry<CACHEKEY> implements Comparable<CACHEKEY> {
-
-  private long epoch;
-  private CACHEKEY cachekey;
-
-  EpochEntry(long epoch, CACHEKEY cachekey) {
-    this.epoch = epoch;
-    this.cachekey = cachekey;
-  }
-
-  public long getEpoch() {
-    return epoch;
-  }
-
-  public CACHEKEY getCachekey() {
-    return cachekey;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    EpochEntry<?> that = (EpochEntry<?>) o;
-    return epoch == that.epoch && cachekey == that.cachekey;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(epoch, cachekey);
-  }
-
-  public int compareTo(Object o) {
-    if(this.epoch == ((EpochEntry<?>)o).epoch) {
-      return 0;
-    } else if (this.epoch < ((EpochEntry<?>)o).epoch) {
-      return -1;
-    } else {
-      return 1;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java
deleted file mode 100644
index de5a079..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * Cache used for RocksDB tables.
- * @param <CACHEKEY>
- * @param <CACHEVALUE>
- */
-
-@Private
-@Evolving
-public interface TableCache<CACHEKEY extends CacheKey,
-    CACHEVALUE extends CacheValue> {
-
-  /**
-   * Return the value for the key if it is present, otherwise return null.
-   * @param cacheKey
-   * @return CACHEVALUE
-   */
-  CACHEVALUE get(CACHEKEY cacheKey);
-
-  /**
-   * This method should be called for tables with cache cleanup policy
-   * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} after system restart to
-   * fill up the cache.
-   * @param cacheKey
-   * @param cacheValue
-   */
-  void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue);
-
-  /**
-   * Add an entry to the cache, if the key already exists it overrides.
-   * @param cacheKey
-   * @param value
-   */
-  void put(CACHEKEY cacheKey, CACHEVALUE value);
-
-  /**
-   * Removes all the entries from the cache which are having epoch value less
-   * than or equal to specified epoch value.
-   *
-   * If clean up policy is NEVER, this is a do nothing operation.
-   * If clean up policy is MANUAL, it is caller responsibility to cleanup the
-   * cache before calling cleanup.
-   * @param epoch
-   */
-  void cleanup(long epoch);
-
-  /**
-   * Return the size of the cache.
-   * @return size
-   */
-  int size();
-
-  /**
-   * Return an iterator for the cache.
-   * @return iterator of the underlying cache for the table.
-   */
-  Iterator<Map.Entry<CACHEKEY, CACHEVALUE>> iterator();
-
-  /**
-   * Check key exist in cache or not.
-   *
-   * If it exists return CacheResult with value and status as
-   * {@link CacheResult.CacheStatus#EXISTS}
-   *
-   * If it does not exist:
-   *  If cache clean up policy is
-   *  {@link TableCacheImpl.CacheCleanupPolicy#NEVER} it means table cache is
-   *  full cache. It return's {@link CacheResult} with null
-   *  and status as {@link CacheResult.CacheStatus#NOT_EXIST}.
-   *
-   *  If cache clean up policy is
-   *  {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means
-   *  table cache is partial cache. It return's {@link CacheResult} with
-   *  null and status as MAY_EXIST.
-   *
-   * @param cachekey
-   */
-  CacheResult<CACHEVALUE> lookup(CACHEKEY cachekey);
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
deleted file mode 100644
index 3e6999a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-
-/**
- * Cache implementation for the table. Depending on the cache clean up policy
- * this cache will be full cache or partial cache.
- *
- * If cache cleanup policy is set as {@link CacheCleanupPolicy#MANUAL},
- * this will be a partial cache.
- *
- * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER},
- * this will be a full cache.
- */
-@Private
-@Evolving
-public class TableCacheImpl<CACHEKEY extends CacheKey,
-    CACHEVALUE extends CacheValue> implements TableCache<CACHEKEY, CACHEVALUE> {
-
-  private final Map<CACHEKEY, CACHEVALUE> cache;
-  private final NavigableSet<EpochEntry<CACHEKEY>> epochEntries;
-  private ExecutorService executorService;
-  private CacheCleanupPolicy cleanupPolicy;
-
-
-
-  public TableCacheImpl(CacheCleanupPolicy cleanupPolicy) {
-
-    // As for full table cache only we need elements to be inserted in sorted
-    // manner, so that list will be easy. For other we can go with Hash map.
-    if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
-      cache = new ConcurrentSkipListMap<>();
-    } else {
-      cache = new ConcurrentHashMap<>();
-    }
-    epochEntries = new ConcurrentSkipListSet<>();
-    // Created a singleThreadExecutor, so one cleanup will be running at a
-    // time.
-    ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
-        .setNameFormat("PartialTableCache Cleanup Thread - %d").build();
-    executorService = Executors.newSingleThreadExecutor(build);
-    this.cleanupPolicy = cleanupPolicy;
-  }
-
-  @Override
-  public CACHEVALUE get(CACHEKEY cachekey) {
-    return cache.get(cachekey);
-  }
-
-  @Override
-  public void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue) {
-    // No need to add entry to epochEntries. Adding to cache is required during
-    // normal put operation.
-    cache.put(cacheKey, cacheValue);
-  }
-
-  @Override
-  public void put(CACHEKEY cacheKey, CACHEVALUE value) {
-    cache.put(cacheKey, value);
-    epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey));
-  }
-
-  @Override
-  public void cleanup(long epoch) {
-    executorService.submit(() -> evictCache(epoch, cleanupPolicy));
-  }
-
-  @Override
-  public int size() {
-    return cache.size();
-  }
-
-  @Override
-  public Iterator<Map.Entry<CACHEKEY, CACHEVALUE>> iterator() {
-    return cache.entrySet().iterator();
-  }
-
-  private void evictCache(long epoch, CacheCleanupPolicy cacheCleanupPolicy) {
-    EpochEntry<CACHEKEY> currentEntry = null;
-    for (Iterator<EpochEntry<CACHEKEY>> iterator = epochEntries.iterator();
-         iterator.hasNext();) {
-      currentEntry = iterator.next();
-      CACHEKEY cachekey = currentEntry.getCachekey();
-      CacheValue cacheValue = cache.computeIfPresent(cachekey, ((k, v) -> {
-        if (cleanupPolicy == CacheCleanupPolicy.MANUAL) {
-          if (v.getEpoch() <= epoch) {
-            iterator.remove();
-            return null;
-          }
-        } else if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
-          // Remove only entries which are marked for delete.
-          if (v.getEpoch() <= epoch && v.getCacheValue() == null) {
-            iterator.remove();
-            return null;
-          }
-        }
-        return v;
-      }));
-      // If currentEntry epoch is greater than epoch, we have deleted all
-      // entries less than specified epoch. So, we can break.
-      if (cacheValue != null && cacheValue.getEpoch() >= epoch) {
-        break;
-      }
-    }
-  }
-
-  public CacheResult<CACHEVALUE> lookup(CACHEKEY cachekey) {
-
-    CACHEVALUE cachevalue = cache.get(cachekey);
-    if (cachevalue == null) {
-      if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
-        return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null);
-      } else {
-        return new CacheResult<>(CacheResult.CacheStatus.MAY_EXIST,
-            null);
-      }
-    } else {
-      if (cachevalue.getCacheValue() != null) {
-        return new CacheResult<>(CacheResult.CacheStatus.EXISTS, cachevalue);
-      } else {
-        // When entity is marked for delete, cacheValue will be set to null.
-        // In that case we can return NOT_EXIST irrespective of cache cleanup
-        // policy.
-        return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null);
-      }
-    }
-  }
-
-  /**
-   * Cleanup policies for table cache.
-   */
-  public enum CacheCleanupPolicy {
-    NEVER, // Cache will not be cleaned up. This mean's the table maintains
-    // full cache.
-    MANUAL // Cache will be cleaned up, once after flushing to DB. It is
-    // caller's responsibility to flush to DB, before calling cleanup cache.
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
deleted file mode 100644
index eb9c5b9..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils.db.cache;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
deleted file mode 100644
index 8b56bff..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Database interfaces for Ozone.
- */
-package org.apache.hadoop.hdds.utils.db;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java
deleted file mode 100644
index 4576dc8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
deleted file mode 100644
index 3f7d0b9..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ /dev/null
@@ -1,464 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-
-import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class contains constants for configuration keys used in Ozone.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class OzoneConfigKeys {
-  public static final String OZONE_TAGS_SYSTEM_KEY =
-      "ozone.tags.system";
-  public static final String DFS_CONTAINER_IPC_PORT =
-      "dfs.container.ipc";
-  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
-
-  public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
-
-  /**
-   *
-   * When set to true, allocate a random free port for ozone container,
-   * so that a mini cluster is able to launch multiple containers on a node.
-   *
-   * When set to false (default), container port is fixed as specified by
-   * DFS_CONTAINER_IPC_PORT_DEFAULT.
-   */
-  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
-      "dfs.container.ipc.random.port";
-  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
-      false;
-
-  public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
-      "dfs.container.chunk.write.sync";
-  public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
-  /**
-   * Ratis Port where containers listen to.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
-      "dfs.container.ratis.ipc";
-  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
-
-  /**
-   * When set to true, allocate a random free port for ozone container, so that
-   * a mini cluster is able to launch multiple containers on a node.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
-      "dfs.container.ratis.ipc.random.port";
-  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
-      false;
-  public static final String OZONE_ENABLED =
-      "ozone.enabled";
-  public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_TRACE_ENABLED_KEY =
-      "ozone.trace.enabled";
-  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
-
-  public static final String OZONE_METADATA_STORE_IMPL =
-      "ozone.metastore.impl";
-  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
-      "LevelDB";
-  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
-      "RocksDB";
-  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
-      OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
-      "ozone.metastore.rocksdb.statistics";
-
-  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
-      "OFF";
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
-      "OFF";
-
-  public static final String OZONE_UNSAFEBYTEOPERATIONS_ENABLED =
-      "ozone.UnsafeByteOperations.enabled";
-  public static final boolean OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT
-      = true;
-
-  public static final String OZONE_CONTAINER_CACHE_SIZE =
-      "ozone.container.cache.size";
-  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
-
-  public static final String OZONE_SCM_BLOCK_SIZE =
-      "ozone.scm.block.size";
-  public static final String OZONE_SCM_BLOCK_SIZE_DEFAULT = "256MB";
-
-  /**
-   * Ozone administrator users delimited by comma.
-   * If not set, only the user who launches an ozone service will be the
-   * admin user. This property must be set if ozone services are started by
-   * different users. Otherwise the RPC layer will reject calls from
-   * other servers which are started by users not in the list.
-   * */
-  public static final String OZONE_ADMINISTRATORS =
-      "ozone.administrators";
-  /**
-   * Used only for testing purpose. Results in making every user an admin.
-   * */
-  public static final String OZONE_ADMINISTRATORS_WILDCARD = "*";
-
-  public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE =
-      "ozone.client.stream.buffer.flush.size";
-
-  public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT =
-      "64MB";
-
-  public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE =
-      "ozone.client.stream.buffer.max.size";
-
-  public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT =
-      "128MB";
-
-  public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT =
-      "ozone.client.watch.request.timeout";
-
-  public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT =
-      "30s";
-
-  public static final String OZONE_CLIENT_MAX_RETRIES =
-      "ozone.client.max.retries";
-  public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 100;
-  public static final String OZONE_CLIENT_RETRY_INTERVAL =
-      "ozone.client.retry.interval";
-  public static final TimeDuration OZONE_CLIENT_RETRY_INTERVAL_DEFAULT =
-      TimeDuration.valueOf(0, TimeUnit.MILLISECONDS);
-
-  // This defines the overall connection limit for the connection pool used in
-  // RestClient.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
-      "ozone.rest.client.http.connection.max";
-  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
-
-  // This defines the connection limit per one HTTP route/host.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
-      "ozone.rest.client.http.connection.per-route.max";
-
-  public static final int
-      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
-
-  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
-      "ozone.client.socket.timeout";
-  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
-  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
-      "ozone.client.connection.timeout";
-  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
-
-  public static final String OZONE_REPLICATION = "ozone.replication";
-  public static final int OZONE_REPLICATION_DEFAULT =
-      ReplicationFactor.THREE.getValue();
-
-  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
-  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
-      ReplicationType.RATIS.toString();
-
-  /**
-   * Configuration property to configure the cache size of client list calls.
-   */
-  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
-      "ozone.client.list.cache";
-  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
-
-  /**
-   * Configuration properties for Ozone Block Deleting Service.
-   */
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
-      "ozone.block.deleting.service.interval";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
-      = "60s";
-
-  /**
-   * The interval of open key clean service.
-   */
-  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
-      "ozone.open.key.cleanup.service.interval.seconds";
-  public static final int
-      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
-      = 24 * 3600; // a total of 24 hour
-
-  /**
-   * An open key gets cleaned up when it is being in open state for too long.
-   */
-  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
-      "ozone.open.key.expire.threshold";
-  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
-      24 * 3600;
-
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
-      "ozone.block.deleting.service.timeout";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
-      = "300s"; // 300s for default
-
-  public static final String OZONE_KEY_PREALLOCATION_BLOCKS_MAX =
-      "ozone.key.preallocation.max.blocks";
-  public static final int OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT
-      = 64;
-
-  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
-      "ozone.block.deleting.limit.per.task";
-  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
-      = 1000;
-
-  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
-      = "ozone.block.deleting.container.limit.per.interval";
-  public static final int
-      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY;
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY;
-  public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
-  public static final String
-      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
-
-  // config settings to enable stateMachineData write timeout
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT;
-  public static final TimeDuration
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
-
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
-      ScmConfigKeys.
-          DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL;
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
-      ScmConfigKeys.
-          DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT;
-
-  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
-      "dfs.container.ratis.datanode.storage.dir";
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY;
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT;
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
-  public static final String
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
-  public static final int
-      DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS;
-  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT;
-  public static final String
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS;
-  public static final int
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT;
-  public static final String
-      DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP;
-  public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS;
-  public static final int
-      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT;
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
-      ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY;
-  public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT;
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT;
-
-  public static final String HDDS_DATANODE_PLUGINS_KEY =
-      "hdds.datanode.plugins";
-
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD =
-      "hdds.datanode.storage.utilization.warning.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.75;
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD =
-      "hdds.datanode.storage.utilization.critical.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.95;
-
-  public static final String OZONE_SECURITY_ENABLED_KEY =
-      "ozone.security.enabled";
-  public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
-
-  public static final String OZONE_CONTAINER_COPY_WORKDIR =
-      "hdds.datanode.replication.work.dir";
-
-  /**
-   * Config properties to set client side checksum properties.
-   */
-  public static final String OZONE_CLIENT_CHECKSUM_TYPE =
-      "ozone.client.checksum.type";
-  public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "CRC32";
-  public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM =
-      "ozone.client.bytes.per.checksum";
-  public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB";
-  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES =
-      1024 * 1024;
-  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 1024;
-  public static final String OZONE_CLIENT_VERIFY_CHECKSUM =
-      "ozone.client.verify.checksum";
-  public static final boolean OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT = true;
-  public static final String OZONE_ACL_AUTHORIZER_CLASS =
-      "ozone.acl.authorizer.class";
-  public static final String OZONE_ACL_AUTHORIZER_CLASS_DEFAULT =
-      "org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer";
-  public static final String OZONE_ACL_AUTHORIZER_CLASS_NATIVE =
-      "org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer";
-  public static final String OZONE_ACL_ENABLED =
-      "ozone.acl.enabled";
-  public static final boolean OZONE_ACL_ENABLED_DEFAULT =
-      false;
-  public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY =
-      "ozone.s3.token.max.lifetime";
-  public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT = "3m";
-  //For technical reasons this is unused and hardcoded to the
-  // OzoneFileSystem.initialize.
-  public static final String OZONE_FS_ISOLATED_CLASSLOADER =
-      "ozone.fs.isolated-classloader";
-
-  // Ozone Client Retry and Failover configurations
-  public static final String OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY =
-      "ozone.client.retry.max.attempts";
-  public static final int OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT =
-      10;
-  public static final String OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY =
-      "ozone.client.failover.max.attempts";
-  public static final int OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT =
-      15;
-  public static final String OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY =
-      "ozone.client.failover.sleep.base.millis";
-  public static final int OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT =
-      500;
-  public static final String OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY =
-      "ozone.client.failover.sleep.max.millis";
-  public static final int OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT =
-      15000;
-
-  public static final String OZONE_FREON_HTTP_ENABLED_KEY =
-      "ozone.freon.http.enabled";
-  public static final String OZONE_FREON_HTTP_BIND_HOST_KEY =
-      "ozone.freon.http-bind-host";
-  public static final String OZONE_FREON_HTTPS_BIND_HOST_KEY =
-      "ozone.freon.https-bind-host";
-  public static final String OZONE_FREON_HTTP_ADDRESS_KEY =
-      "ozone.freon.http-address";
-  public static final String OZONE_FREON_HTTPS_ADDRESS_KEY =
-      "ozone.freon.https-address";
-
-  public static final String OZONE_FREON_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_FREON_HTTP_BIND_PORT_DEFAULT = 9884;
-  public static final int OZONE_FREON_HTTPS_BIND_PORT_DEFAULT = 9885;
-  public static final String
-      OZONE_FREON_HTTP_KERBEROS_PRINCIPAL_KEY =
-      "ozone.freon.http.kerberos.principal";
-  public static final String
-      OZONE_FREON_HTTP_KERBEROS_KEYTAB_FILE_KEY =
-      "ozone.freon.http.kerberos.keytab";
-
-  public static final String OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY =
-      "ozone.network.topology.aware.read";
-  public static final boolean OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT = false;
-
-  public static final String OZONE_MANAGER_FAIR_LOCK = "ozone.om.lock.fair";
-  public static final boolean OZONE_MANAGER_FAIR_LOCK_DEFAULT = false;
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private OzoneConfigKeys() {
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
deleted file mode 100644
index 9817d87..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.io.grpc.Context;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-
-import static org.apache.ratis.thirdparty.io.grpc.Metadata.ASCII_STRING_MARSHALLER;
-
-/**
- * Set of constants used in Ozone implementation.
- */
-@InterfaceAudience.Private
-public final class OzoneConsts {
-
-
-  public static final String STORAGE_DIR = "scm";
-  public static final String SCM_ID = "scmUuid";
-
-  public static final String OZONE_SIMPLE_ROOT_USER = "root";
-  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
-
-  public static final String STORAGE_ID = "storageID";
-  public static final String DATANODE_UUID = "datanodeUuid";
-  public static final String CLUSTER_ID = "clusterID";
-  public static final String LAYOUTVERSION = "layOutVersion";
-  public static final String CTIME = "ctime";
-  /*
-   * BucketName length is used for both buckets and volume lengths
-   */
-  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
-  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
-
-  public static final String OZONE_ACL_USER_TYPE = "user";
-  public static final String OZONE_ACL_GROUP_TYPE = "group";
-  public static final String OZONE_ACL_WORLD_TYPE = "world";
-  public static final String OZONE_ACL_ANONYMOUS_TYPE = "anonymous";
-  public static final String OZONE_ACL_IP_TYPE = "ip";
-
-  public static final String OZONE_ACL_READ = "r";
-  public static final String OZONE_ACL_WRITE = "w";
-  public static final String OZONE_ACL_DELETE = "d";
-  public static final String OZONE_ACL_LIST = "l";
-  public static final String OZONE_ACL_ALL = "a";
-  public static final String OZONE_ACL_NONE = "n";
-  public static final String OZONE_ACL_CREATE = "c";
-  public static final String OZONE_ACL_READ_ACL = "x";
-  public static final String OZONE_ACL_WRITE_ACL = "y";
-
-
-  public static final String OZONE_DATE_FORMAT =
-      "EEE, dd MMM yyyy HH:mm:ss zzz";
-  public static final String OZONE_TIME_ZONE = "GMT";
-
-  public static final String OZONE_COMPONENT = "component";
-  public static final String OZONE_FUNCTION  = "function";
-  public static final String OZONE_RESOURCE = "resource";
-  public static final String OZONE_USER = "user";
-  public static final String OZONE_REQUEST = "request";
-
-  // OM Http server endpoints
-  public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT =
-      "/serviceList";
-  public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT =
-      "/dbCheckpoint";
-
-  // Ozone File System scheme
-  public static final String OZONE_URI_SCHEME = "o3fs";
-
-  public static final String OZONE_RPC_SCHEME = "o3";
-  public static final String OZONE_HTTP_SCHEME = "http";
-  public static final String OZONE_URI_DELIMITER = "/";
-
-  public static final String CONTAINER_EXTENSION = ".container";
-  public static final String CONTAINER_META = ".meta";
-
-  // Refer to {@link ContainerReader} for container storage layout on disk.
-  public static final String CONTAINER_PREFIX  = "containers";
-  public static final String CONTAINER_META_PATH = "metadata";
-  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
-  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
-  public static final String CONTAINER_ROOT_PREFIX = "repository";
-
-  public static final String FILE_HASH = "SHA-256";
-  public static final String MD5_HASH = "MD5";
-  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
-
-  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
-  public static final long KB = 1024L;
-  public static final long MB = KB * 1024L;
-  public static final long GB = MB * 1024L;
-  public static final long TB = GB * 1024L;
-
-  /**
-   * level DB names used by SCM and data nodes.
-   */
-  public static final String CONTAINER_DB_SUFFIX = "container.db";
-  public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
-  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
-  public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
-  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String OM_DB_NAME = "om.db";
-  public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
-  public static final String OM_DB_CHECKPOINTS_DIR_NAME = "om.db.checkpoints";
-  public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db";
-  public static final String SCM_DB_NAME = "scm.db";
-
-  public static final String STORAGE_DIR_CHUNKS = "chunks";
-  public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH =
-      "flushBeforeCheckpoint";
-
-  /**
-   * Supports Bucket Versioning.
-   */
-  public enum Versioning {
-    NOT_DEFINED, ENABLED, DISABLED;
-
-    public static Versioning getVersioning(boolean versioning) {
-      return versioning ? ENABLED : DISABLED;
-    }
-  }
-
-  public static final String DELETING_KEY_PREFIX = "#deleting#";
-  public static final String DELETED_KEY_PREFIX = "#deleted#";
-  public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
-
-  /**
-   * OM LevelDB prefixes.
-   *
-   * OM DB stores metadata as KV pairs with certain prefixes,
-   * prefix is used to improve the performance to get related
-   * metadata.
-   *
-   * OM DB Schema:
-   *  ----------------------------------------------------------
-   *  |  KEY                                     |     VALUE   |
-   *  ----------------------------------------------------------
-   *  | $userName                                |  VolumeList |
-   *  ----------------------------------------------------------
-   *  | /#volumeName                             |  VolumeInfo |
-   *  ----------------------------------------------------------
-   *  | /#volumeName/#bucketName                 |  BucketInfo |
-   *  ----------------------------------------------------------
-   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
-   *  ----------------------------------------------------------
-   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
-   *  ----------------------------------------------------------
-   */
-
-  public static final String OM_KEY_PREFIX = "/";
-  public static final String OM_USER_PREFIX = "$";
-  public static final String OM_S3_PREFIX ="S3:";
-  public static final String OM_S3_VOLUME_PREFIX = "s3";
-  public static final String OM_S3_SECRET = "S3Secret:";
-  public static final String OM_PREFIX = "Prefix:";
-
-  /**
-   *   Max chunk size limit.
-   */
-  public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
-
-
-  /**
-   * Max OM Quota size of 1024 PB.
-   */
-  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
-
-  /**
-   * Max number of keys returned per list buckets operation.
-   */
-  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
-
-  /**
-   * Max number of keys returned per list keys operation.
-   */
-  public static final int MAX_LISTKEYS_SIZE  = 1024;
-
-  /**
-   * Max number of volumes returned per list volumes operation.
-   */
-  public static final int MAX_LISTVOLUMES_SIZE = 1024;
-
-  public static final int INVALID_PORT = -1;
-
-
-  /**
-   * Default SCM Datanode ID file name.
-   */
-  public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id";
-
-  // The ServiceListJSONServlet context attribute where OzoneManager
-  // instance gets stored.
-  public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
-
-  private OzoneConsts() {
-    // Never Constructed
-  }
-
-  // YAML fields for .container files
-  public static final String CONTAINER_ID = "containerID";
-  public static final String CONTAINER_TYPE = "containerType";
-  public static final String STATE = "state";
-  public static final String METADATA = "metadata";
-  public static final String MAX_SIZE = "maxSize";
-  public static final String METADATA_PATH = "metadataPath";
-  public static final String CHUNKS_PATH = "chunksPath";
-  public static final String CONTAINER_DB_TYPE = "containerDBType";
-  public static final String CHECKSUM = "checksum";
-  public static final String ORIGIN_PIPELINE_ID = "originPipelineId";
-  public static final String ORIGIN_NODE_ID = "originNodeId";
-
-  // Supported store types.
-  public static final String OZONE = "ozone";
-  public static final String S3 = "s3";
-
-  // For OM Audit usage
-  public static final String VOLUME = "volume";
-  public static final String BUCKET = "bucket";
-  public static final String KEY = "key";
-  public static final String QUOTA = "quota";
-  public static final String QUOTA_IN_BYTES = "quotaInBytes";
-  public static final String OBJECT_ID = "objectID";
-  public static final String UPDATE_ID = "updateID";
-  public static final String CLIENT_ID = "clientID";
-  public static final String OWNER = "owner";
-  public static final String ADMIN = "admin";
-  public static final String USERNAME = "username";
-  public static final String PREV_KEY = "prevKey";
-  public static final String START_KEY = "startKey";
-  public static final String MAX_KEYS = "maxKeys";
-  public static final String PREFIX = "prefix";
-  public static final String KEY_PREFIX = "keyPrefix";
-  public static final String ACL = "acl";
-  public static final String ACLS = "acls";
-  public static final String USER_ACL = "userAcl";
-  public static final String ADD_ACLS = "addAcls";
-  public static final String REMOVE_ACLS = "removeAcls";
-  public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
-  public static final String TO_KEY_NAME = "toKeyName";
-  public static final String STORAGE_TYPE = "storageType";
-  public static final String RESOURCE_TYPE = "resourceType";
-  public static final String IS_VERSION_ENABLED = "isVersionEnabled";
-  public static final String CREATION_TIME = "creationTime";
-  public static final String DATA_SIZE = "dataSize";
-  public static final String REPLICATION_TYPE = "replicationType";
-  public static final String REPLICATION_FACTOR = "replicationFactor";
-  public static final String KEY_LOCATION_INFO = "keyLocationInfo";
-  public static final String MULTIPART_LIST = "multipartList";
-  public static final String UPLOAD_ID = "uploadID";
-  public static final String PART_NUMBER_MARKER = "partNumberMarker";
-  public static final String MAX_PARTS = "maxParts";
-  public static final String S3_BUCKET = "s3Bucket";
-  public static final String S3_GETSECRET_USER = "S3GetSecretUser";
-
-
-
-  // For OM metrics saving to a file
-  public static final String OM_METRICS_FILE = "omMetrics";
-  public static final String OM_METRICS_TEMP_FILE = OM_METRICS_FILE + ".tmp";
-
-  // For Multipart upload
-  public static final int OM_MULTIPART_MIN_SIZE = 5 * 1024 * 1024;
-
-  // GRPC block token metadata header and context key
-  public static final String OZONE_BLOCK_TOKEN = "blocktoken";
-  public static final Context.Key<UserGroupInformation> UGI_CTX_KEY =
-      Context.key("UGI");
-
-  public static final Metadata.Key<String> OBT_METADATA_KEY =
-      Metadata.Key.of(OZONE_BLOCK_TOKEN, ASCII_STRING_MARSHALLER);
-  public static final Metadata.Key<String> USER_METADATA_KEY =
-      Metadata.Key.of(OZONE_USER, ASCII_STRING_MARSHALLER);
-
-  public static final String RPC_PORT = "RPC";
-
-  // Default OMServiceID for OM Ratis servers to use as RaftGroupId
-  public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault";
-
-  // Dummy OMNodeID for OM Clients to use for a non-HA OM setup
-  public static final String OM_NODE_ID_DUMMY = "omNodeIdDummy";
-
-  // OM Ratis snapshot file to store the last applied index
-  public static final String OM_RATIS_SNAPSHOT_INDEX = "ratisSnapshotIndex";
-
-  // OM Http request parameter to be used while downloading DB checkpoint
-  // from OM leader to follower
-  public static final String OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT =
-      "snapshotBeforeCheckpoint";
-
-  public static final String JAVA_TMP_DIR = "java.io.tmpdir";
-  public static final String LOCALHOST = "localhost";
-
-
-  public static final int S3_BUCKET_MIN_LENGTH = 3;
-  public static final int S3_BUCKET_MAX_LENGTH = 64;
-
-  //GDPR
-  public static final String GDPR_FLAG = "gdprEnabled";
-  public static final String GDPR_ALGORITHM_NAME = "AES";
-  public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16;
-  public static final String GDPR_CHARSET = "UTF-8";
-  public static final String GDPR_LENGTH = "length";
-  public static final String GDPR_SECRET = "secret";
-  public static final String GDPR_ALGORITHM = "algorithm";
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java
deleted file mode 100644
index c1fb893..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-import org.apache.commons.validator.routines.InetAddressValidator;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Enumeration;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Ozone security Util class.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class OzoneSecurityUtil {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(OzoneSecurityUtil.class);
-  // List of ip's not recommended to be added to CSR.
-  private final static Set<String> INVALID_IPS = new HashSet<>(Arrays.asList(
-      "0.0.0.0", "127.0.0.1"));
-
-  private OzoneSecurityUtil() {
-  }
-
-  public static boolean isSecurityEnabled(Configuration conf) {
-    return conf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
-        OZONE_SECURITY_ENABLED_DEFAULT);
-  }
-
-  /**
-   * Returns Keys status.
-   *
-   * @return True if the key files exist.
-   */
-  public static boolean checkIfFileExist(Path path, String fileName) {
-    if (Files.exists(path) && Files.exists(Paths.get(path.toString(),
-        fileName))) {
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Iterates through network interfaces and return all valid ip's not
-   * listed in CertificateSignRequest#INVALID_IPS.
-   *
-   * @return List<InetAddress>
-   * @throws IOException if no network interface are found or if an error
-   * occurs.
-   */
-  public static List<InetAddress> getValidInetsForCurrentHost()
-      throws IOException {
-    List<InetAddress> hostIps = new ArrayList<>();
-    InetAddressValidator ipValidator = InetAddressValidator.getInstance();
-
-    Enumeration<NetworkInterface> enumNI =
-        NetworkInterface.getNetworkInterfaces();
-    if (enumNI != null) {
-      while (enumNI.hasMoreElements()) {
-        NetworkInterface ifc = enumNI.nextElement();
-        if (ifc.isUp()) {
-          Enumeration<InetAddress> enumAdds = ifc.getInetAddresses();
-          while (enumAdds.hasMoreElements()) {
-            InetAddress addr = enumAdds.nextElement();
-
-            if (ipValidator.isValid(addr.getHostAddress())
-                && !INVALID_IPS.contains(addr.getHostAddress())) {
-              LOG.info("Adding ip:{},host:{}", addr.getHostAddress(),
-                  addr.getHostName());
-              hostIps.add(addr);
-            } else {
-              LOG.info("ip:{},host:{} not returned.", addr.getHostAddress(),
-                  addr.getHostName());
-            }
-          }
-        }
-      }
-      return hostIps;
-    } else {
-      throw new IOException("Unable to get network interfaces.");
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
deleted file mode 100644
index 8c1d6f0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Interface to define AuditAction.
- */
-public interface AuditAction {
-  /**
-   * Implementation must override.
-   * @return String
-   */
-  String getAction();
-}
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
deleted file mode 100644
index 098ab6b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define AuditEventStatus values.
- */
-public enum AuditEventStatus {
-  SUCCESS("SUCCESS"),
-  FAILURE("FAILURE");
-
-  private String status;
-
-  AuditEventStatus(String status){
-    this.status = status;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
deleted file mode 100644
index ee6f45d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.spi.ExtendedLogger;
-
-
-/**
- * Class to define Audit Logger for Ozone.
- */
-public class AuditLogger {
-
-  private ExtendedLogger logger;
-  private static final String FQCN = AuditLogger.class.getName();
-  private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
-  private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
-
-  /**
-   * Parametrized Constructor to initialize logger.
-   * @param type Audit Logger Type
-   */
-  public AuditLogger(AuditLoggerType type){
-    initializeLogger(type);
-  }
-
-  /**
-   * Initializes the logger with specific type.
-   * @param loggerType specified one of the values from enum AuditLoggerType.
-   */
-  private void initializeLogger(AuditLoggerType loggerType){
-    this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
-  }
-
-  @VisibleForTesting
-  public ExtendedLogger getLogger() {
-    return logger;
-  }
-
-  public void logWriteSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null);
-  }
-
-  public void logWriteFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg,
-        msg.getThrowable());
-  }
-
-  public void logReadSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null);
-  }
-
-  public void logReadFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg,
-        msg.getThrowable());
-  }
-
-  public void logWrite(AuditMessage auditMessage) {
-    if (auditMessage.getThrowable() == null) {
-      this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, auditMessage,
-          auditMessage.getThrowable());
-    } else {
-      this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, auditMessage,
-          auditMessage.getThrowable());
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
deleted file mode 100644
index 18241c7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enumeration for defining types of Audit Loggers in Ozone.
- */
-public enum AuditLoggerType {
-  DNLOGGER("DNAudit"),
-  OMLOGGER("OMAudit"),
-  SCMLOGGER("SCMAudit");
-
-  private String type;
-
-  public String getType() {
-    return type;
-  }
-
-  AuditLoggerType(String type){
-    this.type = type;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
deleted file mode 100644
index 505b958..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.MarkerManager;
-
-/**
- * Defines audit marker types.
- */
-public enum AuditMarker {
-  WRITE(MarkerManager.getMarker("WRITE")),
-  READ(MarkerManager.getMarker("READ"));
-
-  private Marker marker;
-
-  AuditMarker(Marker marker){
-    this.marker = marker;
-  }
-
-  public Marker getMarker(){
-    return marker;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
deleted file mode 100644
index 1569ffe..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.message.Message;
-
-import java.util.Map;
-
-/**
- * Defines audit message structure.
- */
-public class AuditMessage implements Message {
-
-  private String message;
-  private Throwable throwable;
-
-  private static final String MSG_PATTERN =
-      "user=%s | ip=%s | op=%s %s | ret=%s";
-
-  public AuditMessage(){
-
-  }
-
-  @Override
-  public String getFormattedMessage() {
-    return message;
-  }
-
-  @Override
-  public String getFormat() {
-    return null;
-  }
-
-  @Override
-  public Object[] getParameters() {
-    return new Object[0];
-  }
-
-  @Override
-  public Throwable getThrowable() {
-    return throwable;
-  }
-
-  /**
-   * Use when there are custom string to be added to default msg.
-   * @param customMessage custom string
-   */
-  private void appendMessage(String customMessage) {
-    this.message += customMessage;
-  }
-
-  public String getMessage() {
-    return message;
-  }
-
-  public void setMessage(String message) {
-    this.message = message;
-  }
-
-  public void setThrowable(Throwable throwable) {
-    this.throwable = throwable;
-  }
-
-  /**
-   * Builder class for AuditMessage.
-   */
-  public static class Builder {
-    private Throwable throwable;
-    private String user;
-    private String ip;
-    private String op;
-    private Map<String, String> params;
-    private String ret;
-
-    public Builder(){
-
-    }
-
-    public Builder setUser(String usr){
-      this.user = usr;
-      return this;
-    }
-
-    public Builder atIp(String ipAddr){
-      this.ip = ipAddr;
-      return this;
-    }
-
-    public Builder forOperation(String operation){
-      this.op = operation;
-      return this;
-    }
-
-    public Builder withParams(Map<String, String> args){
-      this.params = args;
-      return this;
-    }
-
-    public Builder withResult(String result){
-      this.ret = result;
-      return this;
-    }
-
-    public Builder withException(Throwable ex){
-      this.throwable = ex;
-      return this;
-    }
-
-    public AuditMessage build(){
-      AuditMessage auditMessage = new AuditMessage();
-      auditMessage.message = String.format(MSG_PATTERN,
-          this.user, this.ip, this.op, this.params, this.ret);
-      auditMessage.throwable = this.throwable;
-      return auditMessage;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
deleted file mode 100644
index 9d7dbee..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to make an entity auditable.
- */
-public interface Auditable {
-  /**
-   * Must override in implementation.
-   * @return {@literal Map<String, String>} with values to be logged in audit.
-   */
-  Map<String, String> toAuditMap();
-}
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
deleted file mode 100644
index 51c0298..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to mark an actor as Auditor.
- */
-public interface Auditor {
-
-  AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap);
-
-  AuditMessage buildAuditMessageForFailure(
-      AuditAction op, Map<String, String> auditMap, Throwable throwable);
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
deleted file mode 100644
index 1c87f2b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define Audit Action types for Datanode.
- */
-public enum DNAction implements AuditAction {
-
-  CREATE_CONTAINER,
-  READ_CONTAINER,
-  UPDATE_CONTAINER,
-  DELETE_CONTAINER,
-  LIST_CONTAINER,
-  PUT_BLOCK,
-  GET_BLOCK,
-  DELETE_BLOCK,
-  LIST_BLOCK,
-  READ_CHUNK,
-  DELETE_CHUNK,
-  WRITE_CHUNK,
-  LIST_CHUNK,
-  COMPACT_CHUNK,
-  PUT_SMALL_FILE,
-  GET_SMALL_FILE,
-  CLOSE_CONTAINER,
-  GET_COMMITTED_BLOCK_LENGTH;
-
-  @Override
-  public String getAction() {
-    return this.toString();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
deleted file mode 100644
index d03ad15..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define Audit Action types for SCM.
- */
-public enum SCMAction implements AuditAction {
-
-  GET_VERSION,
-  REGISTER,
-  SEND_HEARTBEAT,
-  GET_SCM_INFO,
-  ALLOCATE_BLOCK,
-  DELETE_KEY_BLOCK,
-  ALLOCATE_CONTAINER,
-  GET_CONTAINER,
-  GET_CONTAINER_WITH_PIPELINE,
-  LIST_CONTAINER,
-  LIST_PIPELINE,
-  CLOSE_PIPELINE,
-  ACTIVATE_PIPELINE,
-  DEACTIVATE_PIPELINE,
-  DELETE_CONTAINER,
-  IN_SAFE_MODE,
-  FORCE_EXIT_SAFE_MODE,
-  SORT_DATANODE,
-  START_REPLICATION_MANAGER,
-  STOP_REPLICATION_MANAGER,
-  GET_REPLICATION_MANAGER_STATUS;
-
-  @Override
-  public String getAction() {
-    return this.toString();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
deleted file mode 100644
index c8284fd..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-/**
- ******************************************************************************
- *                              Important
- * 1. Any changes to classes in this package can render the logging
- * framework broken.
- * 2. The logger framework has been designed keeping in mind future
- * plans to build a log parser.
- * 3. Please exercise great caution when attempting changes in this package.
- ******************************************************************************
- *
- *
- * This package lays the foundation for Audit logging in Ozone.
- * AuditLogging in Ozone has been built using log4j2 which brings in new
- * features that facilitate turning on/off selective audit events by using
- * MarkerFilter, checking for change in logging configuration periodically
- * and reloading the changes, use of disruptor framework for improved
- * Asynchronous logging.
- *
- * The log4j2 configurations can be specified in XML, YAML, JSON and
- * Properties file. For Ozone, we are using the Properties file due to sheer
- * simplicity, readability and ease of modification.
- *
- * log4j2 configuration file can be passed to startup command with option
- * -Dlog4j.configurationFile unlike -Dlog4j.configuration in log4j 1.x
- *
- ******************************************************************************
- *          Understanding the Audit Logging framework in Ozone.
- ******************************************************************************
- * **** Auditable ***
- * This is an interface to mark an entity as auditable.
- * This interface must be implemented by entities requiring audit logging.
- * For example - OMVolumeArgs, OMBucketArgs.
- * The implementing class must override toAuditMap() to return an
- * instance of Map<Key, Value> where both Key and Value are String.
- *
- * Key: must contain printable US ASCII characters
- * May not contain a space, =, ], or "
- * If the key is multi word then use camel case.
- *
- * Value: if it is a collection/array, then it must be converted to a comma
- * delimited string
- *
- * *** AuditAction ***
- * This is an interface to define the various type of actions to be audited.
- * To ensure separation of concern, for each sub-component you must create an
- * Enum to implement AuditAction.
- * Structure of Enum can be referred from the test class DummyAction.
- *
- * For starters, we expect following 3 implementations of AuditAction:
- * OMAction - to define action types for Ozone Manager
- * SCMAction - to define action types for Storage Container manager
- * DNAction - to define action types for Datanode
- *
- * *** AuditEventStatus ***
- * Enum to define Audit event status like success and failure.
- * This is used in AuditLogger.logXXX() methods.
- *
- *  * *** AuditLogger ***
- * This is where the audit logging magic unfolds.
- * The class has 2 Markers defined - READ and WRITE.
- * These markers are used to tag when logging events.
- *
- * *** AuditLoggerType ***
- * Enum to define the various AuditLoggers in Ozone
- *
- * *** AuditMarker ***
- * Enum to define various Audit Markers used in AuditLogging.
- *
- * *** AuditMessage ***
- * Entity to define an audit message to be logged
- * It will generate a message formatted as:
- * user=xxx ip=xxx op=XXXX_XXXX {key=val, key1=val1..} ret=XXXXXX
- *
- * *** Auditor ***
- * Interface to mark an actor class as Auditor
- * Must be implemented by class where we want to log audit events
- * Implementing class must override and implement methods
- * buildAuditMessageForSuccess and buildAuditMessageForFailure.
- *
- * ****************************************************************************
- *                              Usage
- * ****************************************************************************
- * Using the AuditLogger to log events:
- * 1. Get a logger by specifying the appropriate logger type
- * Example: ExtendedLogger AUDIT = new AuditLogger(AuditLoggerType.OMLogger)
- *
- * 2. Construct an instance of AuditMessage
- *
- * 3. Log Read/Write and Success/Failure event as needed.
- * Example
- * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params))
- *
- * 4. Log Level implicitly defaults to INFO for xxxxSuccess() and ERROR for
- * xxxxFailure()
- * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params))
- * AUDIT.logWriteFailure(buildAuditMessageForSuccess(params))
- *
- * See sample invocations in src/test in the following class:
- * org.apache.hadoop.ozone.audit.TestOzoneAuditLogger
- *
- * ****************************************************************************
- *                      Defining new Logger types
- * ****************************************************************************
- * New Logger type can be added with following steps:
- * 1. Update AuditLoggerType to add the new type
- * 2. Create new Enum by implementing AuditAction if needed
- * 3. Ensure the required entity implements Auditable
- *
- * ****************************************************************************
- *                      Defining new Marker types
- * ****************************************************************************
- * New Markers can be configured as follows:
- * 1. Define new markers in AuditMarker
- * 2. Get the Marker in AuditLogger for use in the log methods, example:
- * private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
- * 3. Define log methods in AuditLogger to use the new Marker type
- * 4. Call these new methods from the required classes to audit with these
- * new markers
- * 5. The marker based filtering can be configured in log4j2 configurations
- * Refer log4j2.properties in src/test/resources for a sample.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
deleted file mode 100644
index 1925c22..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .KeyBlocks;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * A group of blocks relations relevant, e.g belong to a certain object key.
- */
-public final class BlockGroup {
-
-  private String groupID;
-  private List<BlockID> blockIDs;
-  private BlockGroup(String groupID, List<BlockID> blockIDs) {
-    this.groupID = groupID;
-    this.blockIDs = blockIDs;
-  }
-
-  public List<BlockID> getBlockIDList() {
-    return blockIDs;
-  }
-
-  public String getGroupID() {
-    return groupID;
-  }
-
-  public KeyBlocks getProto() {
-    KeyBlocks.Builder kbb = KeyBlocks.newBuilder();
-    for (BlockID block : blockIDs) {
-      kbb.addBlocks(block.getProtobuf());
-    }
-    return kbb.setKey(groupID).build();
-  }
-
-  /**
-   * Parses a KeyBlocks proto to a group of blocks.
-   * @param proto KeyBlocks proto.
-   * @return a group of blocks.
-   */
-  public static BlockGroup getFromProto(KeyBlocks proto) {
-    List<BlockID> blockIDs = new ArrayList<>();
-    for (HddsProtos.BlockID block : proto.getBlocksList()) {
-      blockIDs.add(new BlockID(block.getContainerBlockID().getContainerID(),
-          block.getContainerBlockID().getLocalID()));
-    }
-    return BlockGroup.newBuilder().setKeyName(proto.getKey())
-        .addAllBlockIDs(blockIDs).build();
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  @Override
-  public String toString() {
-    return "BlockGroup[" +
-        "groupID='" + groupID + '\'' +
-        ", blockIDs=" + blockIDs +
-        ']';
-  }
-
-  /**
-   * BlockGroup instance builder.
-   */
-  public static class Builder {
-
-    private String groupID;
-    private List<BlockID> blockIDs;
-
-    public Builder setKeyName(String blockGroupID) {
-      this.groupID = blockGroupID;
-      return this;
-    }
-
-    public Builder addAllBlockIDs(List<BlockID> keyBlocks) {
-      this.blockIDs = keyBlocks;
-      return this;
-    }
-
-    public BlockGroup build() {
-      return new BlockGroup(groupID, blockIDs);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
deleted file mode 100644
index 0e70515..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.primitives.Longs;
-
-import java.nio.ByteBuffer;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ChecksumType;
-import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.PureJavaCrc32C;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class to compute and verify checksums for chunks.
- *
- * This class is not thread safe.
- */
-public class Checksum {
-
-  public static final Logger LOG = LoggerFactory.getLogger(Checksum.class);
-
-  private final ChecksumType checksumType;
-  private final int bytesPerChecksum;
-
-  private PureJavaCrc32 crc32Checksum;
-  private PureJavaCrc32C crc32cChecksum;
-  private MessageDigest sha;
-
-  /**
-   * Constructs a Checksum object.
-   * @param type type of Checksum
-   * @param bytesPerChecksum number of bytes of data per checksum
-   */
-  public Checksum(ChecksumType type, int bytesPerChecksum) {
-    this.checksumType = type;
-    this.bytesPerChecksum = bytesPerChecksum;
-  }
-
-  /**
-   * Constructs a Checksum object with default ChecksumType and default
-   * BytesPerChecksum.
-   */
-  @VisibleForTesting
-  public Checksum() {
-    this.checksumType = ChecksumType.valueOf(
-        OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT);
-    this.bytesPerChecksum = OzoneConfigKeys
-        .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB
-  }
-
-  /**
-   * Computes checksum for give data.
-   * @param byteBuffer input data in the form of ByteString.
-   * @return ChecksumData computed for input data.
-   */
-  public ChecksumData computeChecksum(ByteBuffer byteBuffer)
-      throws OzoneChecksumException {
-    return computeChecksum(byteBuffer.array(), byteBuffer.position(),
-        byteBuffer.limit());
-  }
-
-  /**
-   * Computes checksum for give data.
-   * @param data input data in the form of byte array.
-   * @return ChecksumData computed for input data.
-   */
-  public ChecksumData computeChecksum(byte[] data)
-      throws OzoneChecksumException {
-    return computeChecksum(data, 0, data.length);
-  }
-
-  /**
-   * Computes checksum for give data.
-   * @param data input data in the form of byte array.
-   * @return ChecksumData computed for input data.
-   */
-  public ChecksumData computeChecksum(byte[] data, int offset, int len)
-      throws OzoneChecksumException {
-    ChecksumData checksumData = new ChecksumData(this.checksumType, this
-        .bytesPerChecksum);
-    if (checksumType == ChecksumType.NONE) {
-      // Since type is set to NONE, we do not need to compute the checksums
-      return checksumData;
-    }
-
-    switch (checksumType) {
-    case CRC32:
-      crc32Checksum = new PureJavaCrc32();
-      break;
-    case CRC32C:
-      crc32cChecksum = new PureJavaCrc32C();
-      break;
-    case SHA256:
-      try {
-        sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-      } catch (NoSuchAlgorithmException e) {
-        throw new OzoneChecksumException(OzoneConsts.FILE_HASH, e);
-      }
-      break;
-    case MD5:
-      break;
-    default:
-      throw new OzoneChecksumException(checksumType);
-    }
-
-    // Compute number of checksums needs for given data length based on bytes
-    // per checksum.
-    int dataSize = len - offset;
-    int numChecksums = (dataSize + bytesPerChecksum - 1) / bytesPerChecksum;
-
-    // Checksum is computed for each bytesPerChecksum number of bytes of data
-    // starting at offset 0. The last checksum might be computed for the
-    // remaining data with length less than bytesPerChecksum.
-    List<ByteString> checksumList = new ArrayList<>(numChecksums);
-    for (int index = 0; index < numChecksums; index++) {
-      checksumList.add(computeChecksumAtIndex(data, index, offset, len));
-    }
-    checksumData.setChecksums(checksumList);
-
-    return checksumData;
-  }
-
-  /**
-   * Computes checksum based on checksumType for a data block at given index
-   * and a max length of bytesPerChecksum.
-   * @param data input data
-   * @param index index to compute the offset from where data must be read
-   * @param start start pos of the array where the computation has to start
-   * @length length of array till which checksum needs to be computed
-   * @return computed checksum ByteString
-   * @throws OzoneChecksumException thrown when ChecksumType is not recognized
-   */
-  private ByteString computeChecksumAtIndex(byte[] data, int index, int start,
-      int length)
-      throws OzoneChecksumException {
-    int offset = start + index * bytesPerChecksum;
-    int dataLength = length - start;
-    int len = bytesPerChecksum;
-    if ((offset + len) > dataLength) {
-      len = dataLength - offset;
-    }
-    byte[] checksumBytes = null;
-    switch (checksumType) {
-    case CRC32:
-      checksumBytes = computeCRC32Checksum(data, offset, len);
-      break;
-    case CRC32C:
-      checksumBytes = computeCRC32CChecksum(data, offset, len);
-      break;
-    case SHA256:
-      checksumBytes = computeSHA256Checksum(data, offset, len);
-      break;
-    case MD5:
-      checksumBytes = computeMD5Checksum(data, offset, len);
-      break;
-    default:
-      throw new OzoneChecksumException(checksumType);
-    }
-
-    return ByteString.copyFrom(checksumBytes);
-  }
-
-  /**
-   * Computes CRC32 checksum.
-   */
-  private byte[] computeCRC32Checksum(byte[] data, int offset, int len) {
-    crc32Checksum.reset();
-    crc32Checksum.update(data, offset, len);
-    return Longs.toByteArray(crc32Checksum.getValue());
-  }
-
-  /**
-   * Computes CRC32C checksum.
-   */
-  private byte[] computeCRC32CChecksum(byte[] data, int offset, int len) {
-    crc32cChecksum.reset();
-    crc32cChecksum.update(data, offset, len);
-    return Longs.toByteArray(crc32cChecksum.getValue());
-  }
-
-  /**
-   * Computes SHA-256 checksum.
-   */
-  private byte[] computeSHA256Checksum(byte[] data, int offset, int len) {
-    sha.reset();
-    sha.update(data, offset, len);
-    return sha.digest();
-  }
-
-  /**
-   * Computes MD5 checksum.
-   */
-  private byte[] computeMD5Checksum(byte[] data, int offset, int len) {
-    MD5Hash md5out = MD5Hash.digest(data, offset, len);
-    return md5out.getDigest();
-  }
-
-  /**
-   * Computes the ChecksumData for the input data and verifies that it
-   * matches with that of the input checksumData, starting from index
-   * startIndex.
-   * @param byteString input data
-   * @param checksumData checksumData to match with
-   * @param startIndex index of first checksum in checksumData to match with
-   *                   data's computed checksum.
-   * @throws OzoneChecksumException is thrown if checksums do not match
-   */
-  public static boolean verifyChecksum(ByteString byteString,
-      ChecksumData checksumData, int startIndex) throws OzoneChecksumException {
-    return verifyChecksum(byteString.toByteArray(), checksumData, startIndex);
-  }
-
-  /**
-   * Computes the ChecksumData for the input data and verifies that it
-   * matches with that of the input checksumData.
-   * @param data input data
-   * @param checksumData checksumData to match with
-   * @throws OzoneChecksumException is thrown if checksums do not match
-   */
-  public static boolean verifyChecksum(byte[] data, ChecksumData checksumData)
-      throws OzoneChecksumException {
-    return verifyChecksum(data, checksumData, 0);
-  }
-
-  /**
-   * Computes the ChecksumData for the input data and verifies that it
-   * matches with that of the input checksumData.
-   * @param data input data
-   * @param checksumData checksumData to match with
-   * @param startIndex index of first checksum in checksumData to match with
-   *                   data's computed checksum.
-   * @throws OzoneChecksumException is thrown if checksums do not match
-   */
-  public static boolean verifyChecksum(byte[] data, ChecksumData checksumData,
-      int startIndex) throws OzoneChecksumException {
-    ChecksumType checksumType = checksumData.getChecksumType();
-    if (checksumType == ChecksumType.NONE) {
-      // Checksum is set to NONE. No further verification is required.
-      return true;
-    }
-
-    int bytesPerChecksum = checksumData.getBytesPerChecksum();
-    Checksum checksum = new Checksum(checksumType, bytesPerChecksum);
-    ChecksumData computedChecksumData =
-        checksum.computeChecksum(data, 0, data.length);
-
-    return checksumData.verifyChecksumDataMatches(computedChecksumData,
-        startIndex);
-  }
-
-  /**
-   * Returns a ChecksumData with type NONE for testing.
-   */
-  @VisibleForTesting
-  public static ContainerProtos.ChecksumData getNoChecksumDataProto() {
-    return new ChecksumData(ChecksumType.NONE, 0).getProtoBufMessage();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
deleted file mode 100644
index 7ce643d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Some portions of this file Copyright (c) 2004-2006 Intel Corportation
- * and licensed under the BSD license.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.ratis.util.Preconditions;
-
-import java.nio.ByteBuffer;
-import java.util.zip.Checksum;
-
-/**
- * A sub-interface of {@link Checksum}
- * with a method to update checksum from a {@link ByteBuffer}.
- */
-public interface ChecksumByteBuffer extends Checksum {
-  /**
-   * Updates the current checksum with the specified bytes in the buffer.
-   * Upon return, the buffer's position will be equal to its limit.
-   *
-   * @param buffer the bytes to update the checksum with
-   */
-  void update(ByteBuffer buffer);
-
-  @Override
-  default void update(byte[] b, int off, int len) {
-    update(ByteBuffer.wrap(b, off, len).asReadOnlyBuffer());
-  }
-
-  /**
-   * An abstract class implementing {@link ChecksumByteBuffer}
-   * with a 32-bit checksum and a lookup table.
-   */
-  @SuppressWarnings("innerassignment")
-  abstract class CrcIntTable implements ChecksumByteBuffer {
-    /** Current CRC value with bit-flipped. */
-    private int crc;
-
-    CrcIntTable() {
-      reset();
-      Preconditions.assertTrue(getTable().length == 8 * (1 << 8));
-    }
-
-    abstract int[] getTable();
-
-    @Override
-    public final long getValue() {
-      return (~crc) & 0xffffffffL;
-    }
-
-    @Override
-    public final void reset() {
-      crc = 0xffffffff;
-    }
-
-    @Override
-    public final void update(int b) {
-      crc = (crc >>> 8) ^ getTable()[(((crc ^ b) << 24) >>> 24)];
-    }
-
-    @Override
-    public final void update(ByteBuffer b) {
-      crc = update(crc, b, getTable());
-    }
-
-    private static int update(int crc, ByteBuffer b, int[] table) {
-      for(; b.remaining() > 7;) {
-        final int c0 = (b.get() ^ crc) & 0xff;
-        final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff;
-        final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff;
-        final int c3 = (b.get() ^ (crc >>> 8)) & 0xff;
-        crc = (table[0x700 + c0] ^ table[0x600 + c1])
-            ^ (table[0x500 + c2] ^ table[0x400 + c3]);
-
-        final int c4 = b.get() & 0xff;
-        final int c5 = b.get() & 0xff;
-        final int c6 = b.get() & 0xff;
-        final int c7 = b.get() & 0xff;
-
-        crc ^= (table[0x300 + c4] ^ table[0x200 + c5])
-            ^ (table[0x100 + c6] ^ table[c7]);
-      }
-
-      // loop unroll - duff's device style
-      switch (b.remaining()) {
-      case 7:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 6:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 5:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 4:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 3:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 2:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      case 1:
-        crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)];
-      default: // noop
-      }
-
-      return crc;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
deleted file mode 100644
index 4a927fb..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import java.util.List;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ChecksumType;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
-/**
- * Java class that represents Checksum ProtoBuf class. This helper class allows
- * us to convert to and from protobuf to normal java.
- */
-public class ChecksumData {
-
-  private ChecksumType type;
-  // Checksum will be computed for every bytesPerChecksum number of bytes and
-  // stored sequentially in checksumList
-  private int bytesPerChecksum;
-  private List<ByteString> checksums;
-
-  public ChecksumData(ChecksumType checksumType, int bytesPerChecksum) {
-    this(checksumType, bytesPerChecksum, Lists.newArrayList());
-  }
-
-  public ChecksumData(ChecksumType checksumType, int bytesPerChecksum,
-                      List<ByteString> checksums) {
-    this.type = checksumType;
-    this.bytesPerChecksum = bytesPerChecksum;
-    this.checksums = checksums;
-  }
-
-  /**
-   * Getter method for checksumType.
-   */
-  public ChecksumType getChecksumType() {
-    return this.type;
-  }
-
-  /**
-   * Getter method for bytesPerChecksum.
-   */
-  public int getBytesPerChecksum() {
-    return this.bytesPerChecksum;
-  }
-
-  /**
-   * Getter method for checksums.
-   */
-  @VisibleForTesting
-  public List<ByteString> getChecksums() {
-    return this.checksums;
-  }
-
-  /**
-   * Setter method for checksums.
-   * @param checksumList list of checksums
-   */
-  public void setChecksums(List<ByteString> checksumList) {
-    this.checksums.clear();
-    this.checksums.addAll(checksumList);
-  }
-
-  /**
-   * Construct the Checksum ProtoBuf message.
-   * @return Checksum ProtoBuf message
-   */
-  public ContainerProtos.ChecksumData getProtoBufMessage() {
-    ContainerProtos.ChecksumData.Builder checksumProtoBuilder =
-        ContainerProtos.ChecksumData.newBuilder()
-            .setType(this.type)
-            .setBytesPerChecksum(this.bytesPerChecksum);
-
-    checksumProtoBuilder.addAllChecksums(checksums);
-
-    return checksumProtoBuilder.build();
-  }
-
-  /**
-   * Constructs Checksum class object from the Checksum ProtoBuf message.
-   * @param checksumDataProto Checksum ProtoBuf message
-   * @return ChecksumData object representing the proto
-   */
-  public static ChecksumData getFromProtoBuf(
-      ContainerProtos.ChecksumData checksumDataProto) {
-    Preconditions.checkNotNull(checksumDataProto);
-
-    ChecksumData checksumData = new ChecksumData(
-        checksumDataProto.getType(), checksumDataProto.getBytesPerChecksum());
-
-    if (checksumDataProto.getChecksumsCount() != 0) {
-      checksumData.setChecksums(checksumDataProto.getChecksumsList());
-    }
-
-    return checksumData;
-  }
-
-  /**
-   * Verify that this ChecksumData from startIndex to endIndex matches with the
-   * provided ChecksumData.
-   * The checksum at startIndex of this ChecksumData will be matched with the
-   * checksum at index 0 of the provided ChecksumData, and checksum at
-   * (startIndex + 1) of this ChecksumData with checksum at index 1 of
-   * provided ChecksumData and so on.
-   * @param that the ChecksumData to match with
-   * @param startIndex index of the first checksum from this ChecksumData
-   *                   which will be used to compare checksums
-   * @return true if checksums match
-   * @throws OzoneChecksumException
-   */
-  public boolean verifyChecksumDataMatches(ChecksumData that, int startIndex)
-      throws OzoneChecksumException {
-
-    // pre checks
-    if (this.checksums.size() == 0) {
-      throw new OzoneChecksumException("Original checksumData has no " +
-          "checksums");
-    }
-
-    if (that.checksums.size() == 0) {
-      throw new OzoneChecksumException("Computed checksumData has no " +
-          "checksums");
-    }
-
-    int numChecksums = that.checksums.size();
-
-    try {
-      // Verify that checksum matches at each index
-      for (int index = 0; index < numChecksums; index++) {
-        if (!matchChecksumAtIndex(this.checksums.get(startIndex + index),
-            that.checksums.get(index))) {
-          // checksum mismatch. throw exception.
-          throw new OzoneChecksumException(index);
-        }
-      }
-    } catch (ArrayIndexOutOfBoundsException e) {
-      throw new OzoneChecksumException("Computed checksum has "
-          + numChecksums + " number of checksums. Original checksum has " +
-          (this.checksums.size() - startIndex) + " number of checksums " +
-          "starting from index " + startIndex);
-    }
-    return true;
-  }
-
-  private static boolean matchChecksumAtIndex(
-      ByteString expectedChecksumAtIndex, ByteString computedChecksumAtIndex) {
-    return expectedChecksumAtIndex.equals(computedChecksumAtIndex);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!(obj instanceof ChecksumData)) {
-      return false;
-    }
-
-    ChecksumData that = (ChecksumData) obj;
-
-    if (!this.type.equals(that.getChecksumType())) {
-      return false;
-    }
-    if (this.bytesPerChecksum != that.getBytesPerChecksum()) {
-      return false;
-    }
-    if (this.checksums.size() != that.checksums.size()) {
-      return false;
-    }
-
-    // Match checksum at each index
-    for (int index = 0; index < this.checksums.size(); index++) {
-      if (!matchChecksumAtIndex(this.checksums.get(index),
-          that.checksums.get(index))) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    HashCodeBuilder hc = new HashCodeBuilder();
-    hc.append(type);
-    hc.append(bytesPerChecksum);
-    hc.append(checksums.toArray());
-    return hc.toHashCode();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
deleted file mode 100644
index 892b695..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmBlockResult;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmBlockResult.Result;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Result to delete a group of blocks.
- */
-public class DeleteBlockGroupResult {
-  private String objectKey;
-  private List<DeleteBlockResult> blockResultList;
-  public DeleteBlockGroupResult(String objectKey,
-      List<DeleteBlockResult> blockResultList) {
-    this.objectKey = objectKey;
-    this.blockResultList = blockResultList;
-  }
-
-  public String getObjectKey() {
-    return objectKey;
-  }
-
-  public List<DeleteBlockResult> getBlockResultList() {
-    return blockResultList;
-  }
-
-  public List<DeleteScmBlockResult> getBlockResultProtoList() {
-    List<DeleteScmBlockResult> resultProtoList =
-        new ArrayList<>(blockResultList.size());
-    for (DeleteBlockResult result : blockResultList) {
-      DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder()
-          .setBlockID(result.getBlockID().getProtobuf())
-          .setResult(result.getResult()).build();
-      resultProtoList.add(proto);
-    }
-    return resultProtoList;
-  }
-
-  public static List<DeleteBlockResult> convertBlockResultProto(
-      List<DeleteScmBlockResult> results) {
-    List<DeleteBlockResult> protoResults = new ArrayList<>(results.size());
-    for (DeleteScmBlockResult result : results) {
-      protoResults.add(new DeleteBlockResult(BlockID.getFromProtobuf(
-          result.getBlockID()), result.getResult()));
-    }
-    return protoResults;
-  }
-
-  /**
-   * Only if all blocks are successfully deleted, this group is considered
-   * to be successfully executed.
-   *
-   * @return true if all blocks are successfully deleted, false otherwise.
-   */
-  public boolean isSuccess() {
-    for (DeleteBlockResult result : blockResultList) {
-      if (result.getResult() != Result.success) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * @return A list of deletion failed block IDs.
-   */
-  public List<BlockID> getFailedBlocks() {
-    List<BlockID> failedBlocks = blockResultList.stream()
-        .filter(result -> result.getResult() != Result.success)
-        .map(DeleteBlockResult::getBlockID).collect(Collectors.toList());
-    return failedBlocks;
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
deleted file mode 100644
index 518b519..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
-  * Licensed to the Apache Software Foundation (ASF) under one
-  * or more contributor license agreements.  See the NOTICE file
-  * distributed with this work for additional information
-  * regarding copyright ownership.  The ASF licenses this file
-  * to you under the Apache License, Version 2.0 (the
-  * "License"); you may not use this file except in compliance
-  * with the License.  You may obtain a copy of the License at
-  *
-  *     http://www.apache.org/licenses/LICENSE-2.0
-  *
-  * Unless required by applicable law or agreed to in writing, software
-  * distributed under the License is distributed on an "AS IS" BASIS,
-  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  * See the License for the specific language governing permissions and
-  * limitations under the License.
-  */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * The exception is thrown when file system state is inconsistent
- * and is not recoverable.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class InconsistentStorageStateException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public InconsistentStorageStateException(String descr) {
-    super(descr);
-  }
-
-  public InconsistentStorageStateException(File dir, String descr) {
-    super("Directory " + getFilePath(dir) + " is in an inconsistent state: "
-        + descr);
-  }
-
-  private static String getFilePath(File dir) {
-    try {
-      return dir.getCanonicalPath();
-    } catch (IOException e) {
-    }
-    return dir.getPath();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java
deleted file mode 100644
index 20e40af..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/** Thrown for checksum errors. */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class OzoneChecksumException extends IOException {
-
-  /**
-   * OzoneChecksumException to throw when checksum verfication fails.
-   * @param index checksum list index at which checksum match failed
-   */
-  public OzoneChecksumException(int index) {
-    super(String.format("Checksum mismatch at index %d", index));
-  }
-
-  /**
-   * OzoneChecksumException to throw when unrecognized checksumType is given.
-   * @param unrecognizedChecksumType
-   */
-  public OzoneChecksumException(
-      ContainerProtos.ChecksumType unrecognizedChecksumType) {
-    super(String.format("Unrecognized ChecksumType: %s",
-        unrecognizedChecksumType));
-  }
-
-  /**
-   * OzoneChecksumException to wrap around NoSuchAlgorithmException.
-   * @param algorithm name of algorithm
-   * @param ex original exception thrown
-   */
-  public OzoneChecksumException(
-      String algorithm, NoSuchAlgorithmException ex) {
-    super(String.format("NoSuchAlgorithmException thrown while computing " +
-        "SHA-256 checksum using algorithm %s", algorithm), ex);
-  }
-
-  /**
-   * OzoneChecksumException to throw with custom message.
-   */
-  public OzoneChecksumException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java
deleted file mode 100644
index 0d1f630..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-/**
- * Similar to {@link org.apache.hadoop.util.PureJavaCrc32}
- * except that this class implement {@link ChecksumByteBuffer}.
- */
-final class PureJavaCrc32ByteBuffer extends ChecksumByteBuffer.CrcIntTable {
-  @Override
-  int[] getTable() {
-    return T;
-  }
-
-  /**
-   * CRC-32 lookup table generated by the polynomial 0xEDB88320.
-   * See also org.apache.hadoop.util.TestPureJavaCrc32.Table.
-   */
-  private static final int[] T = {
-      /* T8_0 */
-      0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
-      0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
-      0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
-      0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
-      0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
-      0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
-      0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
-      0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
-      0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
-      0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
-      0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
-      0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
-      0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
-      0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
-      0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
-      0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
-      0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
-      0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
-      0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
-      0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
-      0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
-      0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
-      0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
-      0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
-      0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
-      0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
-      0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
-      0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
-      0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
-      0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
-      0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
-      0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
-      0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
-      0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
-      0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
-      0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
-      0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
-      0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
-      0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
-      0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
-      0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
-      0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
-      0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
-      0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
-      0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
-      0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
-      0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
-      0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
-      0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
-      0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
-      0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
-      0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
-      0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
-      0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
-      0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
-      0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
-      0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
-      0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
-      0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
-      0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
-      0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
-      0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
-      0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
-      0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D,
-      /* T8_1 */
-      0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3,
-      0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7,
-      0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB,
-      0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF,
-      0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192,
-      0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496,
-      0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A,
-      0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E,
-      0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761,
-      0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265,
-      0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69,
-      0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D,
-      0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530,
-      0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034,
-      0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38,
-      0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C,
-      0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6,
-      0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2,
-      0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE,
-      0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA,
-      0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97,
-      0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93,
-      0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F,
-      0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B,
-      0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864,
-      0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60,
-      0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C,
-      0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768,
-      0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35,
-      0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31,
-      0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D,
-      0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539,
-      0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88,
-      0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C,
-      0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180,
-      0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484,
-      0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9,
-      0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD,
-      0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1,
-      0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5,
-      0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A,
-      0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E,
-      0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522,
-      0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026,
-      0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B,
-      0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F,
-      0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773,
-      0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277,
-      0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D,
-      0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189,
-      0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85,
-      0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81,
-      0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC,
-      0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8,
-      0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4,
-      0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0,
-      0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F,
-      0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B,
-      0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27,
-      0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23,
-      0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E,
-      0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A,
-      0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876,
-      0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72,
-      /* T8_2 */
-      0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59,
-      0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685,
-      0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1,
-      0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D,
-      0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29,
-      0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5,
-      0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91,
-      0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D,
-      0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9,
-      0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065,
-      0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901,
-      0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD,
-      0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9,
-      0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315,
-      0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71,
-      0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD,
-      0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399,
-      0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45,
-      0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221,
-      0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD,
-      0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9,
-      0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835,
-      0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151,
-      0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D,
-      0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579,
-      0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5,
-      0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1,
-      0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D,
-      0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609,
-      0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5,
-      0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1,
-      0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D,
-      0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9,
-      0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05,
-      0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461,
-      0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD,
-      0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9,
-      0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75,
-      0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711,
-      0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD,
-      0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339,
-      0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5,
-      0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281,
-      0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D,
-      0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049,
-      0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895,
-      0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1,
-      0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D,
-      0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819,
-      0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5,
-      0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1,
-      0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D,
-      0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69,
-      0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5,
-      0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1,
-      0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D,
-      0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9,
-      0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625,
-      0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41,
-      0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D,
-      0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89,
-      0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555,
-      0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31,
-      0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED,
-      /* T8_3 */
-      0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE,
-      0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9,
-      0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701,
-      0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056,
-      0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871,
-      0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26,
-      0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E,
-      0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9,
-      0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0,
-      0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787,
-      0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F,
-      0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68,
-      0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F,
-      0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018,
-      0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0,
-      0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7,
-      0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3,
-      0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084,
-      0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C,
-      0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B,
-      0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C,
-      0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B,
-      0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3,
-      0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4,
-      0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED,
-      0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA,
-      0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002,
-      0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755,
-      0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72,
-      0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825,
-      0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D,
-      0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA,
-      0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5,
-      0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82,
-      0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A,
-      0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D,
-      0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A,
-      0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D,
-      0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5,
-      0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2,
-      0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB,
-      0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC,
-      0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04,
-      0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953,
-      0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174,
-      0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623,
-      0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B,
-      0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC,
-      0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8,
-      0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF,
-      0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907,
-      0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50,
-      0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677,
-      0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120,
-      0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98,
-      0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF,
-      0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6,
-      0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981,
-      0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639,
-      0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E,
-      0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949,
-      0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E,
-      0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6,
-      0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1,
-      /* T8_4 */
-      0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0,
-      0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10,
-      0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111,
-      0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1,
-      0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52,
-      0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92,
-      0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693,
-      0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053,
-      0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4,
-      0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314,
-      0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15,
-      0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5,
-      0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256,
-      0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496,
-      0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997,
-      0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57,
-      0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299,
-      0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459,
-      0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958,
-      0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98,
-      0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B,
-      0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB,
-      0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA,
-      0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A,
-      0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D,
-      0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D,
-      0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C,
-      0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C,
-      0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F,
-      0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF,
-      0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE,
-      0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E,
-      0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42,
-      0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82,
-      0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183,
-      0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743,
-      0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0,
-      0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00,
-      0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601,
-      0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1,
-      0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546,
-      0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386,
-      0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87,
-      0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847,
-      0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4,
-      0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404,
-      0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905,
-      0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5,
-      0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B,
-      0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB,
-      0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA,
-      0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A,
-      0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589,
-      0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349,
-      0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48,
-      0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888,
-      0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F,
-      0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF,
-      0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE,
-      0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E,
-      0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D,
-      0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D,
-      0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C,
-      0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C,
-      /* T8_5 */
-      0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE,
-      0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8,
-      0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3,
-      0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5,
-      0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035,
-      0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223,
-      0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258,
-      0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E,
-      0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798,
-      0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E,
-      0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5,
-      0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3,
-      0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503,
-      0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715,
-      0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E,
-      0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578,
-      0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2,
-      0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4,
-      0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF,
-      0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9,
-      0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59,
-      0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F,
-      0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834,
-      0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22,
-      0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4,
-      0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2,
-      0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99,
-      0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F,
-      0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F,
-      0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79,
-      0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02,
-      0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14,
-      0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676,
-      0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460,
-      0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B,
-      0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D,
-      0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED,
-      0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB,
-      0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680,
-      0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496,
-      0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340,
-      0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156,
-      0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D,
-      0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B,
-      0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB,
-      0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD,
-      0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6,
-      0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0,
-      0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A,
-      0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C,
-      0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77,
-      0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61,
-      0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81,
-      0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97,
-      0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC,
-      0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA,
-      0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C,
-      0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A,
-      0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41,
-      0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957,
-      0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7,
-      0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1,
-      0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA,
-      0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC,
-      /* T8_6 */
-      0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D,
-      0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E,
-      0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA,
-      0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9,
-      0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653,
-      0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240,
-      0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834,
-      0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27,
-      0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301,
-      0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712,
-      0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66,
-      0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975,
-      0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF,
-      0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC,
-      0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8,
-      0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB,
-      0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4,
-      0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7,
-      0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183,
-      0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590,
-      0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A,
-      0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739,
-      0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D,
-      0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E,
-      0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678,
-      0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B,
-      0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F,
-      0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C,
-      0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6,
-      0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5,
-      0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1,
-      0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2,
-      0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F,
-      0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C,
-      0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08,
-      0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B,
-      0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1,
-      0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2,
-      0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6,
-      0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5,
-      0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3,
-      0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0,
-      0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794,
-      0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387,
-      0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D,
-      0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E,
-      0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A,
-      0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49,
-      0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516,
-      0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105,
-      0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71,
-      0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62,
-      0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8,
-      0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB,
-      0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF,
-      0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC,
-      0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A,
-      0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899,
-      0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED,
-      0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE,
-      0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044,
-      0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457,
-      0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23,
-      0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30,
-      /* T8_7 */
-      0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3,
-      0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919,
-      0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56,
-      0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC,
-      0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8,
-      0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832,
-      0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D,
-      0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387,
-      0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5,
-      0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F,
-      0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00,
-      0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA,
-      0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E,
-      0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64,
-      0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B,
-      0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1,
-      0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E,
-      0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4,
-      0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB,
-      0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041,
-      0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425,
-      0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF,
-      0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90,
-      0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A,
-      0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758,
-      0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2,
-      0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED,
-      0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217,
-      0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673,
-      0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889,
-      0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6,
-      0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C,
-      0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239,
-      0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3,
-      0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C,
-      0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776,
-      0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312,
-      0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8,
-      0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7,
-      0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D,
-      0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F,
-      0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95,
-      0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA,
-      0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520,
-      0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144,
-      0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE,
-      0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1,
-      0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B,
-      0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4,
-      0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E,
-      0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61,
-      0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B,
-      0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF,
-      0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05,
-      0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A,
-      0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0,
-      0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282,
-      0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78,
-      0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937,
-      0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD,
-      0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9,
-      0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53,
-      0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C,
-      0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6
-  };
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java
deleted file mode 100644
index 1c44357..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Some portions of this file Copyright (c) 2004-2006 Intel Corportation
- * and licensed under the BSD license.
- */
-package org.apache.hadoop.ozone.common;
-
-/**
- * Similar to {@link org.apache.hadoop.util.PureJavaCrc32C}
- * except that this class implement {@link ChecksumByteBuffer}.
- */
-final class PureJavaCrc32CByteBuffer extends ChecksumByteBuffer.CrcIntTable {
-  @Override
-  int[] getTable() {
-    return T;
-  }
-
-  /**
-   * CRC-32C lookup table generated by the polynomial 0x82F63B78.
-   * See also org.apache.hadoop.util.TestPureJavaCrc32.Table.
-   */
-  private static final int[] T = {
-      /* T8_0 */
-      0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
-      0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
-      0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
-      0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
-      0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
-      0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
-      0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
-      0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
-      0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
-      0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
-      0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
-      0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
-      0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
-      0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
-      0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
-      0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
-      0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
-      0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
-      0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
-      0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
-      0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
-      0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
-      0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
-      0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
-      0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
-      0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
-      0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
-      0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
-      0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
-      0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
-      0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
-      0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
-      0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
-      0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
-      0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
-      0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
-      0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
-      0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
-      0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
-      0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
-      0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
-      0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
-      0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
-      0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
-      0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
-      0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
-      0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
-      0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
-      0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
-      0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
-      0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
-      0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
-      0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
-      0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
-      0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
-      0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
-      0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
-      0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
-      0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
-      0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
-      0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
-      0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
-      0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
-      0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
-      /* T8_1 */
-      0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899,
-      0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
-      0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
-      0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
-      0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918,
-      0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
-      0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0,
-      0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
-      0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
-      0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
-      0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823,
-      0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
-      0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A,
-      0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
-      0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
-      0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
-      0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D,
-      0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
-      0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25,
-      0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
-      0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
-      0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
-      0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4,
-      0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
-      0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F,
-      0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
-      0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
-      0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
-      0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E,
-      0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
-      0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6,
-      0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
-      0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
-      0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
-      0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8,
-      0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
-      0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1,
-      0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
-      0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
-      0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
-      0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162,
-      0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
-      0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA,
-      0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
-      0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
-      0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
-      0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B,
-      0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
-      0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464,
-      0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
-      0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
-      0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
-      0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5,
-      0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
-      0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D,
-      0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
-      0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
-      0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
-      0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE,
-      0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
-      0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7,
-      0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
-      0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
-      0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483,
-      /* T8_2 */
-      0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073,
-      0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
-      0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
-      0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
-      0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9,
-      0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
-      0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C,
-      0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
-      0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
-      0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
-      0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2,
-      0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
-      0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED,
-      0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
-      0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
-      0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
-      0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA,
-      0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
-      0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F,
-      0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
-      0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
-      0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
-      0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5,
-      0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
-      0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE,
-      0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
-      0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
-      0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
-      0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634,
-      0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
-      0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1,
-      0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
-      0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
-      0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
-      0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5,
-      0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
-      0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA,
-      0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
-      0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
-      0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
-      0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24,
-      0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
-      0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1,
-      0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
-      0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
-      0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
-      0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B,
-      0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
-      0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9,
-      0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
-      0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
-      0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
-      0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63,
-      0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
-      0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6,
-      0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
-      0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
-      0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
-      0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238,
-      0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
-      0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177,
-      0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
-      0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
-      0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8,
-      /* T8_3 */
-      0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939,
-      0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
-      0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
-      0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
-      0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804,
-      0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
-      0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2,
-      0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
-      0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
-      0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
-      0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54,
-      0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
-      0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F,
-      0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
-      0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
-      0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
-      0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE,
-      0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
-      0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538,
-      0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
-      0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
-      0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
-      0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405,
-      0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
-      0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255,
-      0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
-      0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
-      0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
-      0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368,
-      0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
-      0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E,
-      0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
-      0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
-      0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
-      0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0,
-      0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
-      0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B,
-      0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
-      0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
-      0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
-      0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D,
-      0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
-      0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B,
-      0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
-      0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
-      0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
-      0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656,
-      0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
-      0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1,
-      0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
-      0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
-      0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
-      0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC,
-      0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
-      0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A,
-      0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
-      0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
-      0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
-      0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C,
-      0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
-      0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57,
-      0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
-      0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
-      0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842,
-      /* T8_4 */
-      0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4,
-      0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
-      0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
-      0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
-      0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127,
-      0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
-      0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6,
-      0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
-      0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
-      0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
-      0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32,
-      0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
-      0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470,
-      0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
-      0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
-      0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
-      0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A,
-      0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
-      0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB,
-      0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
-      0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
-      0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
-      0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018,
-      0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
-      0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D,
-      0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
-      0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
-      0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
-      0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE,
-      0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
-      0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F,
-      0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
-      0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
-      0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
-      0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39,
-      0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
-      0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B,
-      0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
-      0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
-      0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
-      0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF,
-      0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
-      0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E,
-      0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
-      0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
-      0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
-      0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD,
-      0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
-      0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06,
-      0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
-      0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
-      0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
-      0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5,
-      0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
-      0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544,
-      0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
-      0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
-      0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
-      0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0,
-      0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
-      0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82,
-      0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
-      0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
-      0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3,
-      /* T8_5 */
-      0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA,
-      0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
-      0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
-      0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
-      0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4,
-      0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
-      0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB,
-      0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
-      0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
-      0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
-      0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548,
-      0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
-      0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69,
-      0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
-      0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
-      0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
-      0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031,
-      0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
-      0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E,
-      0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
-      0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
-      0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
-      0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810,
-      0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
-      0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC,
-      0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
-      0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
-      0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
-      0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682,
-      0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
-      0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D,
-      0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
-      0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
-      0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
-      0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413,
-      0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
-      0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32,
-      0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
-      0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
-      0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
-      0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81,
-      0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
-      0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E,
-      0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
-      0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
-      0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
-      0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0,
-      0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
-      0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7,
-      0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
-      0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
-      0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
-      0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9,
-      0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
-      0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6,
-      0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
-      0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
-      0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
-      0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975,
-      0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
-      0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154,
-      0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
-      0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
-      0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C,
-      /* T8_6 */
-      0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558,
-      0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
-      0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
-      0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
-      0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE,
-      0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
-      0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD,
-      0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
-      0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
-      0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
-      0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6,
-      0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
-      0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43,
-      0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
-      0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
-      0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
-      0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222,
-      0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
-      0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71,
-      0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
-      0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
-      0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
-      0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7,
-      0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
-      0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F,
-      0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
-      0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
-      0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
-      0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39,
-      0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
-      0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A,
-      0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
-      0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
-      0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
-      0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF,
-      0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
-      0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A,
-      0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
-      0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
-      0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
-      0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811,
-      0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
-      0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542,
-      0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
-      0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
-      0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
-      0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4,
-      0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
-      0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6,
-      0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
-      0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
-      0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
-      0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670,
-      0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
-      0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23,
-      0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
-      0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
-      0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
-      0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238,
-      0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
-      0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD,
-      0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
-      0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
-      0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F,
-      /* T8_7 */
-      0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769,
-      0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
-      0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
-      0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
-      0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD,
-      0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
-      0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07,
-      0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
-      0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
-      0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
-      0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A,
-      0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
-      0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44,
-      0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
-      0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
-      0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
-      0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B,
-      0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
-      0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881,
-      0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
-      0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
-      0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
-      0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135,
-      0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
-      0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2,
-      0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
-      0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
-      0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
-      0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076,
-      0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
-      0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC,
-      0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
-      0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
-      0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
-      0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7,
-      0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
-      0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9,
-      0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
-      0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
-      0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
-      0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494,
-      0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
-      0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E,
-      0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
-      0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
-      0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
-      0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA,
-      0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
-      0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F,
-      0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
-      0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
-      0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
-      0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B,
-      0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
-      0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751,
-      0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
-      0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
-      0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
-      0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C,
-      0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
-      0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612,
-      0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
-      0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
-      0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
-  };
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
deleted file mode 100644
index 7992dad..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Properties;
-
-/**
- * Storage information file. This Class defines the methods to check
- * the consistency of the storage dir and the version file.
- * <p>
- * Local storage information is stored in a separate file VERSION.
- * It contains type of the node,
- * the storage layout version, the SCM id, and
- * the OM/SCM state creation time.
- *
- */
-@InterfaceAudience.Private
-public abstract class Storage {
-  private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
-
-  public static final String STORAGE_DIR_CURRENT = "current";
-  protected static final String STORAGE_FILE_VERSION = "VERSION";
-  public static final String CONTAINER_DIR = "containerDir";
-
-  private final NodeType nodeType;
-  private final File root;
-  private final File storageDir;
-
-  private StorageState state;
-  private StorageInfo storageInfo;
-
-
-  /**
-   * Determines the state of the Version file.
-   */
-  public enum StorageState {
-    NON_EXISTENT, NOT_INITIALIZED, INITIALIZED
-  }
-
-  public Storage(NodeType type, File root, String sdName)
-      throws IOException {
-    this.nodeType = type;
-    this.root = root;
-    this.storageDir = new File(root, sdName);
-    this.state = getStorageState();
-    if (state == StorageState.INITIALIZED) {
-      this.storageInfo = new StorageInfo(type, getVersionFile());
-    } else {
-      this.storageInfo = new StorageInfo(
-          nodeType, StorageInfo.newClusterID(), Time.now());
-      setNodeProperties();
-    }
-  }
-
-  /**
-   * Gets the path of the Storage dir.
-   * @return Storage dir path
-   */
-  public String getStorageDir() {
-    return storageDir.getAbsoluteFile().toString();
-  }
-
-  /**
-   * Gets the state of the version file.
-   * @return the state of the Version file
-   */
-  public StorageState getState() {
-    return state;
-  }
-
-  public NodeType getNodeType() {
-    return storageInfo.getNodeType();
-  }
-
-  public String getClusterID() {
-    return storageInfo.getClusterID();
-  }
-
-  public long getCreationTime() {
-    return storageInfo.getCreationTime();
-  }
-
-  public void setClusterId(String clusterId) throws IOException {
-    if (state == StorageState.INITIALIZED) {
-      throw new IOException(
-          "Storage directory " + storageDir + " already initialized.");
-    } else {
-      storageInfo.setClusterId(clusterId);
-    }
-  }
-
-  /**
-   * Retrieves the storageInfo instance to read/write the common
-   * version file properties.
-   * @return the instance of the storageInfo class
-   */
-  protected StorageInfo getStorageInfo() {
-    return storageInfo;
-  }
-
-  abstract protected Properties getNodeProperties();
-
-  /**
-   * Sets the Node properties specific to OM/SCM.
-   */
-  private void setNodeProperties() {
-    Properties nodeProperties = getNodeProperties();
-    if (nodeProperties != null) {
-      for (String key : nodeProperties.stringPropertyNames()) {
-        storageInfo.setProperty(key, nodeProperties.getProperty(key));
-      }
-    }
-  }
-
-  /**
-   * Directory {@code current} contains latest files defining
-   * the file system meta-data.
-   *
-   * @return the directory path
-   */
-  public File getCurrentDir() {
-    return new File(storageDir, STORAGE_DIR_CURRENT);
-  }
-
-  /**
-   * File {@code VERSION} contains the following fields:
-   * <ol>
-   * <li>node type</li>
-   * <li>OM/SCM state creation time</li>
-   * <li>other fields specific for this node type</li>
-   * </ol>
-   * The version file is always written last during storage directory updates.
-   * The existence of the version file indicates that all other files have
-   * been successfully written in the storage directory, the storage is valid
-   * and does not need to be recovered.
-   *
-   * @return the version file path
-   */
-  private File getVersionFile() {
-    return new File(getCurrentDir(), STORAGE_FILE_VERSION);
-  }
-
-
-  /**
-   * Check to see if current/ directory is empty. This method is used
-   * before determining to format the directory.
-   * @throws IOException if unable to list files under the directory.
-   */
-  private void checkEmptyCurrent() throws IOException {
-    File currentDir = getCurrentDir();
-    if (!currentDir.exists()) {
-      // if current/ does not exist, it's safe to format it.
-      return;
-    }
-    try (DirectoryStream<Path> dirStream = Files
-        .newDirectoryStream(currentDir.toPath())) {
-      if (dirStream.iterator().hasNext()) {
-        throw new InconsistentStorageStateException(getCurrentDir(),
-            "Can't initialize the storage directory because the current "
-                + "it is not empty.");
-      }
-    }
-  }
-
-  /**
-   * Check consistency of the storage directory.
-   *
-   * @return state {@link StorageState} of the storage directory
-   * @throws IOException
-   */
-  private StorageState getStorageState() throws IOException {
-    assert root != null : "root is null";
-    String rootPath = root.getCanonicalPath();
-    try { // check that storage exists
-      if (!root.exists()) {
-        // storage directory does not exist
-        LOG.warn("Storage directory " + rootPath + " does not exist");
-        return StorageState.NON_EXISTENT;
-      }
-      // or is inaccessible
-      if (!root.isDirectory()) {
-        LOG.warn(rootPath + "is not a directory");
-        return StorageState.NON_EXISTENT;
-      }
-      if (!FileUtil.canWrite(root)) {
-        LOG.warn("Cannot access storage directory " + rootPath);
-        return StorageState.NON_EXISTENT;
-      }
-    } catch (SecurityException ex) {
-      LOG.warn("Cannot access storage directory " + rootPath, ex);
-      return StorageState.NON_EXISTENT;
-    }
-
-    // check whether current directory is valid
-    File versionFile = getVersionFile();
-    boolean hasCurrent = versionFile.exists();
-
-    if (hasCurrent) {
-      return StorageState.INITIALIZED;
-    } else {
-      checkEmptyCurrent();
-      return StorageState.NOT_INITIALIZED;
-    }
-  }
-
-  /**
-   * Creates the Version file if not present,
-   * otherwise returns with IOException.
-   * @throws IOException
-   */
-  public void initialize() throws IOException {
-    if (state == StorageState.INITIALIZED) {
-      throw new IOException("Storage directory already initialized.");
-    }
-    if (!getCurrentDir().mkdirs()) {
-      throw new IOException("Cannot create directory " + getCurrentDir());
-    }
-    storageInfo.writeTo(getVersionFile());
-  }
-
-  /**
-   * Persists current StorageInfo to file system..
-   * @throws IOException
-   */
-  public void persistCurrentState() throws IOException {
-    if (!getCurrentDir().exists()) {
-      throw new IOException("Metadata dir doesn't exist, dir: " +
-          getCurrentDir());
-    }
-    storageInfo.writeTo(getVersionFile());
-  }
-
-}
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
deleted file mode 100644
index ad26f77..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * Common class for storage information. This class defines the common
- * properties and functions to set them , write them into the version file
- * and read them from the version file.
- *
- */
-@InterfaceAudience.Private
-public class StorageInfo {
-
-  private Properties properties = new Properties();
-
-  /**
-   * Property to hold node type.
-   */
-  private static final String NODE_TYPE = "nodeType";
-  /**
-   * Property to hold ID of the cluster.
-   */
-  private static final String CLUSTER_ID = "clusterID";
-  /**
-   * Property to hold creation time of the storage.
-   */
-  private static final String CREATION_TIME = "cTime";
-
-  /**
-   * Constructs StorageInfo instance.
-   * @param type
-   *          Type of the node using the storage
-   * @param cid
-   *          Cluster ID
-   * @param cT
-   *          Cluster creation Time
-
-   * @throws IOException - on Error.
-   */
-  public StorageInfo(NodeType type, String cid, long cT)
-      throws IOException {
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(cid);
-    properties.setProperty(NODE_TYPE, type.name());
-    properties.setProperty(CLUSTER_ID, cid);
-    properties.setProperty(CREATION_TIME, String.valueOf(cT));
-  }
-
-  public StorageInfo(NodeType type, File propertiesFile)
-      throws IOException {
-    this.properties = readFrom(propertiesFile);
-    verifyNodeType(type);
-    verifyClusterId();
-    verifyCreationTime();
-  }
-
-  public NodeType getNodeType() {
-    return NodeType.valueOf(properties.getProperty(NODE_TYPE));
-  }
-
-  public String getClusterID() {
-    return properties.getProperty(CLUSTER_ID);
-  }
-
-  public Long  getCreationTime() {
-    String creationTime = properties.getProperty(CREATION_TIME);
-    if(creationTime != null) {
-      return Long.parseLong(creationTime);
-    }
-    return null;
-  }
-
-  public String getProperty(String key) {
-    return properties.getProperty(key);
-  }
-
-  public void setProperty(String key, String value) {
-    properties.setProperty(key, value);
-  }
-
-  public void setClusterId(String clusterId) {
-    properties.setProperty(CLUSTER_ID, clusterId);
-  }
-
-  private void verifyNodeType(NodeType type)
-      throws InconsistentStorageStateException {
-    NodeType nodeType = getNodeType();
-    Preconditions.checkNotNull(nodeType);
-    if(type != nodeType) {
-      throw new InconsistentStorageStateException("Expected NodeType: " + type +
-          ", but found: " + nodeType);
-    }
-  }
-
-  private void verifyClusterId()
-      throws InconsistentStorageStateException {
-    String clusterId = getClusterID();
-    Preconditions.checkNotNull(clusterId);
-    if(clusterId.isEmpty()) {
-      throw new InconsistentStorageStateException("Cluster ID not found");
-    }
-  }
-
-  private void verifyCreationTime() {
-    Long creationTime = getCreationTime();
-    Preconditions.checkNotNull(creationTime);
-  }
-
-
-  public void writeTo(File to)
-      throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(to, "rws");
-         FileOutputStream out = new FileOutputStream(file.getFD())) {
-      file.seek(0);
-    /*
-     * If server is interrupted before this line,
-     * the version file will remain unchanged.
-     */
-      properties.store(out, null);
-    /*
-     * Now the new fields are flushed to the head of the file, but file
-     * length can still be larger then required and therefore the file can
-     * contain whole or corrupted fields from its old contents in the end.
-     * If server is interrupted here and restarted later these extra fields
-     * either should not effect server behavior or should be handled
-     * by the server correctly.
-     */
-      file.setLength(out.getChannel().position());
-    }
-  }
-
-  private Properties readFrom(File from) throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(from, "rws");
-        FileInputStream in = new FileInputStream(file.getFD())) {
-      Properties props = new Properties();
-      file.seek(0);
-      props.load(in);
-      return props;
-    }
-  }
-
-  /**
-   * Generate new clusterID.
-   *
-   * clusterID is a persistent attribute of the cluster.
-   * It is generated when the cluster is created and remains the same
-   * during the life cycle of the cluster.  When a new SCM node is initialized,
-   * if this is a new cluster, a new clusterID is generated and stored.
-   * @return new clusterID
-   */
-  public static String newClusterID() {
-    return "CID-" + UUID.randomUUID().toString();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
deleted file mode 100644
index 6517e58..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
deleted file mode 100644
index 9aeff24..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.common.statemachine;
-
-/**
- * Class wraps invalid state transition exception.
- */
-public class InvalidStateTransitionException extends Exception {
-  private Enum<?> currentState;
-  private Enum<?> event;
-
-  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
-    super("Invalid event: " + event + " at " + currentState + " state.");
-    this.currentState = currentState;
-    this.event = event;
-  }
-
-  public Enum<?> getCurrentState() {
-    return currentState;
-  }
-
-  public Enum<?> getEvent() {
-    return event;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
deleted file mode 100644
index bf8cbd5..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.common.statemachine;
-
-import com.google.common.base.Supplier;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Template class that wraps simple event driven state machine.
- * @param <STATE> states allowed
- * @param <EVENT> events allowed
- */
-public class StateMachine<STATE extends Enum<?>, EVENT extends Enum<?>> {
-  private STATE initialState;
-  private Set<STATE> finalStates;
-
-  private final LoadingCache<EVENT, Map<STATE, STATE>> transitions =
-      CacheBuilder.newBuilder().build(
-          CacheLoader.from((Supplier<Map<STATE, STATE>>) () -> new HashMap()));
-
-  public StateMachine(STATE initState, Set<STATE> finalStates) {
-    this.initialState = initState;
-    this.finalStates = finalStates;
-  }
-
-  public STATE getInitialState() {
-    return initialState;
-  }
-
-  public Set<STATE> getFinalStates() {
-    return finalStates;
-  }
-
-  public STATE getNextState(STATE from, EVENT e)
-      throws InvalidStateTransitionException {
-    STATE target = transitions.getUnchecked(e).get(from);
-    if (target == null) {
-      throw new InvalidStateTransitionException(from, e);
-    }
-    return target;
-  }
-
-  public void addTransition(STATE from, STATE to, EVENT e) {
-    transitions.getUnchecked(e).put(from, to);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
deleted file mode 100644
index 045409e3e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common.statemachine;
-/**
- state machine template class for ozone.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
deleted file mode 100644
index e0cac8b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.commons.lang3.builder.ToStringBuilder;
-import org.apache.commons.lang3.builder.ToStringStyle;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.ArrayList;
-
-/**
- * Helper class to convert Protobuf to Java classes.
- */
-public class BlockData {
-  private final BlockID blockID;
-  private final Map<String, String> metadata;
-
-  /**
-   * Represent a list of chunks.
-   * In order to reduce memory usage, chunkList is declared as an
-   * {@link Object}.
-   * When #elements == 0, chunkList is null.
-   * When #elements == 1, chunkList refers to the only element.
-   * When #elements > 1, chunkList refers to the list.
-   *
-   * Please note : when we are working with blocks, we don't care what they
-   * point to. So we We don't read chunkinfo nor validate them. It is
-   * responsibility of higher layer like ozone. We just read and write data
-   * from network.
-   */
-  private Object chunkList;
-
-  /**
-   * total size of the key.
-   */
-  private long size;
-
-  /**
-   * Constructs a BlockData Object.
-   *
-   * @param blockID
-   */
-  public BlockData(BlockID blockID) {
-    this.blockID = blockID;
-    this.metadata = new TreeMap<>();
-    this.size = 0;
-  }
-
-  public long getBlockCommitSequenceId() {
-    return blockID.getBlockCommitSequenceId();
-  }
-
-  public void setBlockCommitSequenceId(long blockCommitSequenceId) {
-    this.blockID.setBlockCommitSequenceId(blockCommitSequenceId);
-  }
-
-  /**
-   * Returns a blockData object from the protobuf data.
-   *
-   * @param data - Protobuf data.
-   * @return - BlockData
-   * @throws IOException
-   */
-  public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws
-      IOException {
-    BlockData blockData = new BlockData(
-        BlockID.getFromProtobuf(data.getBlockID()));
-    for (int x = 0; x < data.getMetadataCount(); x++) {
-      blockData.addMetadata(data.getMetadata(x).getKey(),
-          data.getMetadata(x).getValue());
-    }
-    blockData.setChunks(data.getChunksList());
-    if (data.hasSize()) {
-      Preconditions.checkArgument(data.getSize() == blockData.getSize());
-    }
-    return blockData;
-  }
-
-  /**
-   * Returns a Protobuf message from BlockData.
-   * @return Proto Buf Message.
-   */
-  public ContainerProtos.BlockData getProtoBufMessage() {
-    ContainerProtos.BlockData.Builder builder =
-        ContainerProtos.BlockData.newBuilder();
-    builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-    builder.addAllChunks(getChunks());
-    builder.setSize(size);
-    return builder.build();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key
-   * @param value - Value
-   * @throws IOException
-   */
-  public synchronized void addMetadata(String key, String value) throws
-      IOException {
-    if (this.metadata.containsKey(key)) {
-      throw new IOException("This key already exists. Key " + key);
-    }
-    metadata.put(key, value);
-  }
-
-  public synchronized Map<String, String> getMetadata() {
-    return Collections.unmodifiableMap(this.metadata);
-  }
-
-  /**
-   * Returns value of a key.
-   */
-  public synchronized String getValue(String key) {
-    return metadata.get(key);
-  }
-
-  /**
-   * Deletes a metadata entry from the map.
-   *
-   * @param key - Key
-   */
-  public synchronized void deleteKey(String key) {
-    metadata.remove(key);
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<ContainerProtos.ChunkInfo> castChunkList() {
-    return (List<ContainerProtos.ChunkInfo>)chunkList;
-  }
-
-  /**
-   * Returns chunks list.
-   *
-   * @return list of chunkinfo.
-   */
-  public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunkList == null? Collections.emptyList()
-        : chunkList instanceof ContainerProtos.ChunkInfo?
-            Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
-        : Collections.unmodifiableList(castChunkList());
-  }
-
-  /**
-   * Adds chinkInfo to the list.
-   */
-  public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    if (chunkList == null) {
-      chunkList = chunkInfo;
-    } else {
-      final List<ContainerProtos.ChunkInfo> list;
-      if (chunkList instanceof ContainerProtos.ChunkInfo) {
-        list = new ArrayList<>(2);
-        list.add((ContainerProtos.ChunkInfo)chunkList);
-        chunkList = list;
-      } else {
-        list = castChunkList();
-      }
-      list.add(chunkInfo);
-    }
-    size += chunkInfo.getLen();
-  }
-
-  /**
-   * removes the chunk.
-   */
-  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    final boolean removed;
-    if (chunkList instanceof List) {
-      final List<ContainerProtos.ChunkInfo> list = castChunkList();
-      removed = list.remove(chunkInfo);
-      if (list.size() == 1) {
-        chunkList = list.get(0);
-      }
-    } else if (chunkInfo.equals(chunkList)) {
-      chunkList = null;
-      removed = true;
-    } else {
-      removed = false;
-    }
-
-    if (removed) {
-      size -= chunkInfo.getLen();
-    }
-    return removed;
-  }
-
-  /**
-   * Returns container ID.
-   *
-   * @return long.
-   */
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  /**
-   * Returns LocalID.
-   * @return long.
-   */
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  /**
-   * Return Block ID.
-   * @return BlockID.
-   */
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  /**
-   * Sets Chunk list.
-   *
-   * @param chunks - List of chunks.
-   */
-  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
-    if (chunks == null) {
-      chunkList = null;
-      size = 0L;
-    } else {
-      final int n = chunks.size();
-      chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
-      size = chunks.parallelStream().mapToLong(
-          ContainerProtos.ChunkInfo::getLen).sum();
-    }
-  }
-
-  /**
-   * Get the total size of chunks allocated for the key.
-   * @return total size of the key.
-   */
-  public long getSize() {
-    return size;
-  }
-
-  @Override
-  public String toString() {
-    return new ToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE)
-        .append("blockId", blockID.toString())
-        .append("size", this.size)
-        .toString();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
deleted file mode 100644
index 1c73a31..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.TreeMap;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-
-/**
- * Java class that represents ChunkInfo ProtoBuf class. This helper class allows
- * us to convert to and from protobuf to normal java.
- */
-public class ChunkInfo {
-  private final String chunkName;
-  private final long offset;
-  private final long len;
-  private ChecksumData checksumData;
-  private final Map<String, String> metadata;
-
-
-  /**
-   * Constructs a ChunkInfo.
-   *
-   * @param chunkName - File Name where chunk lives.
-   * @param offset    - offset where Chunk Starts.
-   * @param len       - Length of the Chunk.
-   */
-  public ChunkInfo(String chunkName, long offset, long len) {
-    this.chunkName = chunkName;
-    this.offset = offset;
-    this.len = len;
-    this.metadata = new TreeMap<>();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key Name.
-   * @param value - Value.
-   * @throws IOException
-   */
-  public void addMetadata(String key, String value) throws IOException {
-    synchronized (this.metadata) {
-      if (this.metadata.containsKey(key)) {
-        throw new IOException("This key already exists. Key " + key);
-      }
-      metadata.put(key, value);
-    }
-  }
-
-  /**
-   * Gets a Chunkinfo class from the protobuf definitions.
-   *
-   * @param info - Protobuf class
-   * @return ChunkInfo
-   * @throws IOException
-   */
-  public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info)
-      throws IOException {
-    Preconditions.checkNotNull(info);
-
-    ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(),
-        info.getLen());
-
-    for (int x = 0; x < info.getMetadataCount(); x++) {
-      chunkInfo.addMetadata(info.getMetadata(x).getKey(),
-          info.getMetadata(x).getValue());
-    }
-
-    chunkInfo.setChecksumData(
-        ChecksumData.getFromProtoBuf(info.getChecksumData()));
-
-    return chunkInfo;
-  }
-
-  /**
-   * Returns a ProtoBuf Message from ChunkInfo.
-   *
-   * @return Protocol Buffer Message
-   */
-  public ContainerProtos.ChunkInfo getProtoBufMessage() {
-    ContainerProtos.ChunkInfo.Builder builder = ContainerProtos
-        .ChunkInfo.newBuilder();
-
-    builder.setChunkName(this.getChunkName());
-    builder.setOffset(this.getOffset());
-    builder.setLen(this.getLen());
-    if (checksumData == null) {
-      // ChecksumData cannot be null while computing the protobufMessage.
-      // Set it to NONE type (equivalent to non checksum).
-      builder.setChecksumData(Checksum.getNoChecksumDataProto());
-    } else {
-      builder.setChecksumData(this.checksumData.getProtoBufMessage());
-    }
-
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-
-    return builder.build();
-  }
-
-  /**
-   * Returns the chunkName.
-   *
-   * @return - String
-   */
-  public String getChunkName() {
-    return chunkName;
-  }
-
-  /**
-   * Gets the start offset of the given chunk in physical file.
-   *
-   * @return - long
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-  /**
-   * Returns the length of the Chunk.
-   *
-   * @return long
-   */
-  public long getLen() {
-    return len;
-  }
-
-  /**
-   * Returns the checksumData of this chunk.
-   */
-  public ChecksumData getChecksumData() {
-    return checksumData;
-  }
-
-  /**
-   * Sets the checksums of this chunk.
-   */
-  public void setChecksumData(ChecksumData cData) {
-    this.checksumData = cData;
-  }
-
-  /**
-   * Returns Metadata associated with this Chunk.
-   *
-   * @return - Map of Key,values.
-   */
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  @Override
-  public String toString() {
-    return "ChunkInfo{" +
-        "chunkName='" + chunkName +
-        ", offset=" + offset +
-        ", len=" + len +
-        '}';
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
deleted file mode 100644
index 11d9028f..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.ozone.audit.DNAction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * Utilities for converting protobuf classes to Java classes.
- */
-public final class ContainerCommandRequestPBHelper {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(ContainerCommandRequestPBHelper.class);
-
-  private ContainerCommandRequestPBHelper() {
-  }
-
-  public static Map<String, String> getAuditParams(
-      ContainerCommandRequestProto msg) {
-    Map<String, String> auditParams = new TreeMap<>();
-    Type cmdType = msg.getCmdType();
-    String containerID = String.valueOf(msg.getContainerID());
-    switch(cmdType) {
-    case CreateContainer:
-      auditParams.put("containerID", containerID);
-      auditParams.put("containerType",
-          msg.getCreateContainer().getContainerType().toString());
-      return auditParams;
-
-    case ReadContainer:
-      auditParams.put("containerID", containerID);
-      return auditParams;
-
-    case UpdateContainer:
-      auditParams.put("containerID", containerID);
-      auditParams.put("forceUpdate",
-          String.valueOf(msg.getUpdateContainer().getForceUpdate()));
-      return auditParams;
-
-    case DeleteContainer:
-      auditParams.put("containerID", containerID);
-      auditParams.put("forceDelete",
-          String.valueOf(msg.getDeleteContainer().getForceDelete()));
-      return auditParams;
-
-    case ListContainer:
-      auditParams.put("startContainerID", containerID);
-      auditParams.put("count",
-          String.valueOf(msg.getListContainer().getCount()));
-      return auditParams;
-
-    case PutBlock:
-      try{
-        auditParams.put("blockData",
-            BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData())
-                .toString());
-      } catch (IOException ex){
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Encountered error parsing BlockData from protobuf: "
-              + ex.getMessage());
-        }
-        return null;
-      }
-      return auditParams;
-
-    case GetBlock:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString());
-      return auditParams;
-
-    case DeleteBlock:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID())
-              .toString());
-      return auditParams;
-
-    case ListBlock:
-      auditParams.put("startLocalID",
-          String.valueOf(msg.getListBlock().getStartLocalID()));
-      auditParams.put("count", String.valueOf(msg.getListBlock().getCount()));
-      return auditParams;
-
-    case ReadChunk:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString());
-      return auditParams;
-
-    case DeleteChunk:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID())
-              .toString());
-      return auditParams;
-
-    case WriteChunk:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID())
-              .toString());
-      return auditParams;
-
-    case ListChunk:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString());
-      auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName());
-      auditParams.put("count", String.valueOf(msg.getListChunk().getCount()));
-      return auditParams;
-
-    case CompactChunk: return null; //CompactChunk operation
-
-    case PutSmallFile:
-      try{
-        auditParams.put("blockData",
-            BlockData.getFromProtoBuf(msg.getPutSmallFile()
-                .getBlock().getBlockData()).toString());
-      } catch (IOException ex){
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Encountered error parsing BlockData from protobuf: "
-              + ex.getMessage());
-        }
-      }
-      return auditParams;
-
-    case GetSmallFile:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID())
-              .toString());
-      return auditParams;
-
-    case CloseContainer:
-      auditParams.put("containerID", containerID);
-      return auditParams;
-
-    case GetCommittedBlockLength:
-      auditParams.put("blockData",
-          BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID())
-              .toString());
-      return auditParams;
-
-    default :
-      LOG.debug("Invalid command type - " + cmdType);
-      return null;
-    }
-
-  }
-
-  public static DNAction getAuditAction(Type cmdType) {
-    switch (cmdType) {
-    case CreateContainer  : return DNAction.CREATE_CONTAINER;
-    case ReadContainer    : return DNAction.READ_CONTAINER;
-    case UpdateContainer  : return DNAction.UPDATE_CONTAINER;
-    case DeleteContainer  : return DNAction.DELETE_CONTAINER;
-    case ListContainer    : return DNAction.LIST_CONTAINER;
-    case PutBlock         : return DNAction.PUT_BLOCK;
-    case GetBlock         : return DNAction.GET_BLOCK;
-    case DeleteBlock      : return DNAction.DELETE_BLOCK;
-    case ListBlock        : return DNAction.LIST_BLOCK;
-    case ReadChunk        : return DNAction.READ_CHUNK;
-    case DeleteChunk      : return DNAction.DELETE_CHUNK;
-    case WriteChunk       : return DNAction.WRITE_CHUNK;
-    case ListChunk        : return DNAction.LIST_CHUNK;
-    case CompactChunk     : return DNAction.COMPACT_CHUNK;
-    case PutSmallFile     : return DNAction.PUT_SMALL_FILE;
-    case GetSmallFile     : return DNAction.GET_SMALL_FILE;
-    case CloseContainer   : return DNAction.CLOSE_CONTAINER;
-    case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH;
-    default :
-      LOG.debug("Invalid command type - " + cmdType);
-      return null;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
deleted file mode 100644
index fa5df11..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-/**
- * Helper classes for the container protocol communication.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
deleted file mode 100644
index dfa9315..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.apache.hadoop.util.Time;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-/**
- * This class represents the lease created on a resource. Callback can be
- * registered on the lease which will be executed in case of timeout.
- *
- * @param <T> Resource type for which the lease can be associated
- */
-public class Lease<T> {
-
-  /**
-   * The resource for which this lease is created.
-   */
-  private final T resource;
-
-  private final long creationTime;
-
-  /**
-   * Lease lifetime in milliseconds.
-   */
-  private volatile long leaseTimeout;
-
-  private boolean expired;
-
-  /**
-   * Functions to be called in case of timeout.
-   */
-  private List<Callable<Void>> callbacks;
-
-
-  /**
-   * Creates a lease on the specified resource with given timeout.
-   *
-   * @param resource
-   *        Resource for which the lease has to be created
-   * @param timeout
-   *        Lease lifetime in milliseconds
-   */
-  public Lease(T resource, long timeout) {
-    this.resource = resource;
-    this.leaseTimeout = timeout;
-    this.callbacks = Collections.synchronizedList(new ArrayList<>());
-    this.creationTime = Time.monotonicNow();
-    this.expired = false;
-  }
-
-  /**
-   * Returns true if the lease has expired, else false.
-   *
-   * @return true if expired, else false
-   */
-  public boolean hasExpired() {
-    return expired;
-  }
-
-  /**
-   * Registers a callback which will be executed in case of timeout. Callbacks
-   * are executed in a separate Thread.
-   *
-   * @param callback
-   *        The Callable which has to be executed
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public void registerCallBack(Callable<Void> callback)
-      throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    callbacks.add(callback);
-  }
-
-  /**
-   * Returns the time elapsed since the creation of lease.
-   *
-   * @return elapsed time in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getElapsedTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return Time.monotonicNow() - creationTime;
-  }
-
-  /**
-   * Returns the time available before timeout.
-   *
-   * @return remaining time in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getRemainingTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return leaseTimeout - getElapsedTime();
-  }
-
-  /**
-   * Returns total lease lifetime.
-   *
-   * @return total lifetime of lease in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getLeaseLifeTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return leaseTimeout;
-  }
-
-  /**
-   * Renews the lease timeout period.
-   *
-   * @param timeout
-   *        Time to be added to the lease in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public void renew(long timeout) throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    leaseTimeout += timeout;
-  }
-
-  @Override
-  public int hashCode() {
-    return resource.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if(obj instanceof Lease) {
-      return resource.equals(((Lease) obj).resource);
-    }
-    return false;
-  }
-
-  @Override
-  public String toString() {
-    return "Lease<" + resource.toString() + ">";
-  }
-
-  /**
-   * Returns the callbacks to be executed for the lease in case of timeout.
-   *
-   * @return callbacks to be executed
-   */
-  List<Callable<Void>> getCallbacks() {
-    return callbacks;
-  }
-
-  /**
-   * Expires/Invalidates the lease.
-   */
-  void invalidate() {
-    callbacks = null;
-    expired = true;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java
deleted file mode 100644
index a39ea22..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that there is already a lease acquired on the
- * same resource.
- */
-public class LeaseAlreadyExistException  extends LeaseException {
-
-  /**
-   * Constructs an {@code LeaseAlreadyExistException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseAlreadyExistException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseAlreadyExistException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseAlreadyExistException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
deleted file mode 100644
index e2ca455..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.Callable;
-
-/**
- * This class is responsible for executing the callbacks of a lease in case of
- * timeout.
- */
-public class LeaseCallbackExecutor<T> implements Runnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Lease.class);
-
-  private final T resource;
-  private final List<Callable<Void>> callbacks;
-
-  /**
-   * Constructs LeaseCallbackExecutor instance with list of callbacks.
-   *
-   * @param resource
-   *        The resource for which the callbacks are executed
-   * @param callbacks
-   *        Callbacks to be executed by this executor
-   */
-  public LeaseCallbackExecutor(T resource, List<Callable<Void>> callbacks) {
-    this.resource = resource;
-    this.callbacks = callbacks;
-  }
-
-  @Override
-  public void run() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Executing callbacks for lease on {}", resource);
-    }
-    for(Callable<Void> callback : callbacks) {
-      try {
-        callback.call();
-      } catch (Exception e) {
-        LOG.warn("Exception while executing callback for lease on {}",
-            resource, e);
-      }
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
deleted file mode 100644
index 418f412..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents all lease related exceptions.
- */
-public class LeaseException extends Exception {
-
-  /**
-   * Constructs an {@code LeaseException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseException(String message) {
-    super(message);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java
deleted file mode 100644
index 440a023..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that the lease that is being accessed has expired.
- */
-public class LeaseExpiredException extends LeaseException {
-
-  /**
-   * Constructs an {@code LeaseExpiredException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseExpiredException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseExpiredException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseExpiredException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
deleted file mode 100644
index 02befae..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-/**
- * LeaseManager is someone who can provide you leases based on your
- * requirement. If you want to return the lease back before it expires,
- * you can give it back to Lease Manager. He is the one responsible for
- * the lifecycle of leases. The resource for which lease is created
- * should have proper {@code equals} method implementation, resource
- * equality is checked while the lease is created.
- *
- * @param <T> Type of leases that this lease manager can create
- */
-public class LeaseManager<T> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LeaseManager.class);
-
-  private final String name;
-  private final long defaultTimeout;
-  private Map<T, Lease<T>> activeLeases;
-  private LeaseMonitor leaseMonitor;
-  private Thread leaseMonitorThread;
-  private boolean isRunning;
-
-  /**
-   * Creates an instance of lease manager.
-   *
-   * @param name
-   *        Name for the LeaseManager instance.
-   * @param defaultTimeout
-   *        Default timeout in milliseconds to be used for lease creation.
-   */
-  public LeaseManager(String name, long defaultTimeout) {
-    this.name = name;
-    this.defaultTimeout = defaultTimeout;
-  }
-
-  /**
-   * Starts the lease manager service.
-   */
-  public void start() {
-    LOG.debug("Starting {} LeaseManager service", name);
-    activeLeases = new ConcurrentHashMap<>();
-    leaseMonitor = new LeaseMonitor();
-    leaseMonitorThread = new Thread(leaseMonitor);
-    leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor");
-    leaseMonitorThread.setDaemon(true);
-    leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
-      // Let us just restart this thread after logging an error.
-      // if this thread is not running we cannot handle Lease expiry.
-      LOG.error("LeaseMonitor thread encountered an error. Thread: {}",
-          thread.toString(), throwable);
-      leaseMonitorThread.start();
-    });
-    LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name);
-    leaseMonitorThread.start();
-    isRunning = true;
-  }
-
-  /**
-   * Returns a lease for the specified resource with default timeout.
-   *
-   * @param resource
-   *        Resource for which lease has to be created
-   * @throws LeaseAlreadyExistException
-   *         If there is already a lease on the resource
-   */
-  public synchronized Lease<T> acquire(T resource)
-      throws LeaseAlreadyExistException {
-    return acquire(resource, defaultTimeout);
-  }
-
-  /**
-   * Returns a lease for the specified resource with the timeout provided.
-   *
-   * @param resource
-   *        Resource for which lease has to be created
-   * @param timeout
-   *        The timeout in milliseconds which has to be set on the lease
-   * @throws LeaseAlreadyExistException
-   *         If there is already a lease on the resource
-   */
-  public synchronized Lease<T> acquire(T resource, long timeout)
-      throws LeaseAlreadyExistException {
-    checkStatus();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout);
-    }
-    if(activeLeases.containsKey(resource)) {
-      throw new LeaseAlreadyExistException("Resource: " + resource);
-    }
-    Lease<T> lease = new Lease<>(resource, timeout);
-    activeLeases.put(resource, lease);
-    leaseMonitorThread.interrupt();
-    return lease;
-  }
-
-  /**
-   * Returns a lease associated with the specified resource.
-   *
-   * @param resource
-   *        Resource for which the lease has to be returned
-   * @throws LeaseNotFoundException
-   *         If there is no active lease on the resource
-   */
-  public Lease<T> get(T resource) throws LeaseNotFoundException {
-    checkStatus();
-    Lease<T> lease = activeLeases.get(resource);
-    if(lease != null) {
-      return lease;
-    }
-    throw new LeaseNotFoundException("Resource: " + resource);
-  }
-
-  /**
-   * Releases the lease associated with the specified resource.
-   *
-   * @param resource
-   *        The for which the lease has to be released
-   * @throws LeaseNotFoundException
-   *         If there is no active lease on the resource
-   */
-  public synchronized void release(T resource)
-      throws LeaseNotFoundException {
-    checkStatus();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Releasing lease on {}", resource);
-    }
-    Lease<T> lease = activeLeases.remove(resource);
-    if(lease == null) {
-      throw new LeaseNotFoundException("Resource: " + resource);
-    }
-    lease.invalidate();
-  }
-
-  /**
-   * Shuts down the LeaseManager and releases the resources. All the active
-   * {@link Lease} will be released (callbacks on leases will not be
-   * executed).
-   */
-  public void shutdown() {
-    checkStatus();
-    LOG.debug("Shutting down LeaseManager service");
-    leaseMonitor.disable();
-    leaseMonitorThread.interrupt();
-    for(T resource : activeLeases.keySet()) {
-      try {
-        release(resource);
-      }  catch(LeaseNotFoundException ex) {
-        //Ignore the exception, someone might have released the lease
-      }
-    }
-    isRunning = false;
-  }
-
-  /**
-   * Throws {@link LeaseManagerNotRunningException} if the service is not
-   * running.
-   */
-  private void checkStatus() {
-    if(!isRunning) {
-      throw new LeaseManagerNotRunningException("LeaseManager not running.");
-    }
-  }
-
-  /**
-   * Monitors the leases and expires them based on the timeout, also
-   * responsible for executing the callbacks of expired leases.
-   */
-  private final class LeaseMonitor implements Runnable {
-
-    private boolean monitor = true;
-    private ExecutorService executorService;
-
-    private LeaseMonitor() {
-      this.monitor = true;
-      this.executorService = Executors.newCachedThreadPool();
-    }
-
-    @Override
-    public void run() {
-      while (monitor) {
-        LOG.debug("{}-LeaseMonitor: checking for lease expiry", name);
-        long sleepTime = Long.MAX_VALUE;
-
-        for (T resource : activeLeases.keySet()) {
-          try {
-            Lease<T> lease = get(resource);
-            long remainingTime = lease.getRemainingTime();
-            if (remainingTime <= 0) {
-              //Lease has timed out
-              List<Callable<Void>> leaseCallbacks = lease.getCallbacks();
-              release(resource);
-              executorService.execute(
-                  new LeaseCallbackExecutor(resource, leaseCallbacks));
-            } else {
-              sleepTime = remainingTime > sleepTime ?
-                  sleepTime : remainingTime;
-            }
-          } catch (LeaseNotFoundException | LeaseExpiredException ex) {
-            //Ignore the exception, someone might have released the lease
-          }
-        }
-
-        try {
-          if(!Thread.interrupted()) {
-            Thread.sleep(sleepTime);
-          }
-        } catch (InterruptedException ignored) {
-          // This means a new lease is added to activeLeases.
-        }
-      }
-    }
-
-    /**
-     * Disables lease monitor, next interrupt call on the thread
-     * will stop lease monitor.
-     */
-    public void disable() {
-      monitor = false;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java
deleted file mode 100644
index ced31de..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that there LeaseManager service is not running.
- */
-public class LeaseManagerNotRunningException  extends RuntimeException {
-
-  /**
-   * Constructs an {@code LeaseManagerNotRunningException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseManagerNotRunningException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseManagerNotRunningException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseManagerNotRunningException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java
deleted file mode 100644
index c292d33..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that the lease that is being accessed does not
- * exist.
- */
-public class LeaseNotFoundException extends LeaseException {
-
-  /**
-   * Constructs an {@code LeaseNotFoundException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseNotFoundException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseNotFoundException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseNotFoundException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java
deleted file mode 100644
index 48ee2e1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * A generic lease management API which can be used if a service
- * needs any kind of lease management.
- */
-
-package org.apache.hadoop.ozone.lease;
-/*
- This package contains lease management related classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
deleted file mode 100644
index 95dfd6c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * Lock implementation which also maintains counter.
- */
-public final class ActiveLock {
-
-  private ReadWriteLock lock;
-  private AtomicInteger count;
-
-  /**
-   * Use ActiveLock#newInstance to create instance.
-   *
-   * @param fairness - if true the lock uses a fair ordering policy, else
-   * non-fair ordering.
-   */
-  private ActiveLock(boolean fairness) {
-    this.lock = new ReentrantReadWriteLock(fairness);
-    this.count = new AtomicInteger(0);
-  }
-
-  /**
-   * Creates a new instance of ActiveLock.
-   *
-   * @return new ActiveLock
-   */
-  public static ActiveLock newInstance(boolean fairness) {
-    return new ActiveLock(fairness);
-  }
-
-  /**
-   * Acquires read lock.
-   *
-   * <p>Acquires the read lock if the write lock is not held by
-   * another thread and returns immediately.
-   *
-   * <p>If the write lock is held by another thread then
-   * the current thread becomes disabled for thread scheduling
-   * purposes and lies dormant until the read lock has been acquired.
-   */
-  void readLock() {
-    lock.readLock().lock();
-  }
-
-  /**
-   * Attempts to release the read lock.
-   *
-   * <p>If the number of readers is now zero then the lock
-   * is made available for write lock attempts.
-   */
-  void readUnlock() {
-    lock.readLock().unlock();
-  }
-
-  /**
-   * Acquires write lock.
-   *
-   * <p>Acquires the write lock if neither the read nor write lock
-   * are held by another thread
-   * and returns immediately, setting the write lock hold count to
-   * one.
-   *
-   * <p>If the current thread already holds the write lock then the
-   * hold count is incremented by one and the method returns
-   * immediately.
-   *
-   * <p>If the lock is held by another thread then the current
-   * thread becomes disabled for thread scheduling purposes and
-   * lies dormant until the write lock has been acquired.
-   */
-  void writeLock() {
-    lock.writeLock().lock();
-  }
-
-  /**
-   * Attempts to release the write lock.
-   *
-   * <p>If the current thread is the holder of this lock then
-   * the hold count is decremented. If the hold count is now
-   * zero then the lock is released.
-   */
-  void writeUnlock() {
-    lock.writeLock().unlock();
-  }
-
-  /**
-   * Increment the active count of the lock.
-   */
-  void incrementActiveCount() {
-    count.incrementAndGet();
-  }
-
-  /**
-   * Decrement the active count of the lock.
-   */
-  void decrementActiveCount() {
-    count.decrementAndGet();
-  }
-
-  /**
-   * Returns the active count on the lock.
-   *
-   * @return Number of active leases on the lock.
-   */
-  int getActiveLockCount() {
-    return count.get();
-  }
-
-  /**
-   * Resets the active count on the lock.
-   */
-  void resetCounter() {
-    count.set(0);
-  }
-
-  @Override
-  public String toString() {
-    return lock.toString();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
deleted file mode 100644
index 3c2b5d4..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.impl.GenericObjectPool;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Consumer;
-
-/**
- * Manages the locks on a given resource. A new lock is created for each
- * and every unique resource. Uniqueness of resource depends on the
- * {@code equals} implementation of it.
- */
-public class LockManager<R> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
-
-  private final Map<R, ActiveLock> activeLocks = new ConcurrentHashMap<>();
-  private final GenericObjectPool<ActiveLock> lockPool;
-
-  /**
-   * Creates new LockManager instance with the given Configuration.and uses
-   * non-fair mode for locks.
-   *
-   * @param conf Configuration object
-   */
-  public LockManager(final Configuration conf) {
-    this(conf, false);
-  }
-
-
-  /**
-   * Creates new LockManager instance with the given Configuration.
-   *
-   * @param conf Configuration object
-   * @param fair - true to use fair lock ordering, else non-fair lock ordering.
-   */
-  public LockManager(final Configuration conf, boolean fair) {
-    final int maxPoolSize = conf.getInt(
-        HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY,
-        HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT);
-    lockPool =
-        new GenericObjectPool<>(new PooledLockFactory(fair));
-    lockPool.setMaxTotal(maxPoolSize);
-  }
-
-  /**
-   * Acquires the lock on given resource.
-   *
-   * <p>If the lock is not available then the current thread becomes
-   * disabled for thread scheduling purposes and lies dormant until the
-   * lock has been acquired.
-   *
-   * @param resource on which the lock has to be acquired
-   * @deprecated Use {@link LockManager#writeLock} instead
-   */
-  public void lock(final R resource) {
-    writeLock(resource);
-  }
-
-  /**
-   * Releases the lock on given resource.
-   *
-   * @param resource for which the lock has to be released
-   * @deprecated Use {@link LockManager#writeUnlock} instead
-   */
-  public void unlock(final R resource) {
-    writeUnlock(resource);
-  }
-
-  /**
-   * Acquires the read lock on given resource.
-   *
-   * <p>Acquires the read lock on resource if the write lock is not held by
-   * another thread and returns immediately.
-   *
-   * <p>If the write lock on resource is held by another thread then
-   * the current thread becomes disabled for thread scheduling
-   * purposes and lies dormant until the read lock has been acquired.
-   *
-   * @param resource on which the read lock has to be acquired
-   */
-  public void readLock(final R resource) {
-    acquire(resource, ActiveLock::readLock);
-  }
-
-  /**
-   * Releases the read lock on given resource.
-   *
-   * @param resource for which the read lock has to be released
-   * @throws IllegalMonitorStateException if the current thread does not
-   *                                      hold this lock
-   */
-  public void readUnlock(final R resource) throws IllegalMonitorStateException {
-    release(resource, ActiveLock::readUnlock);
-  }
-
-  /**
-   * Acquires the write lock on given resource.
-   *
-   * <p>Acquires the write lock on resource if neither the read nor write lock
-   * are held by another thread and returns immediately.
-   *
-   * <p>If the current thread already holds the write lock then the
-   * hold count is incremented by one and the method returns
-   * immediately.
-   *
-   * <p>If the lock is held by another thread then the current
-   * thread becomes disabled for thread scheduling purposes and
-   * lies dormant until the write lock has been acquired.
-   *
-   * @param resource on which the lock has to be acquired
-   */
-  public void writeLock(final R resource) {
-    acquire(resource, ActiveLock::writeLock);
-  }
-
-  /**
-   * Releases the write lock on given resource.
-   *
-   * @param resource for which the lock has to be released
-   * @throws IllegalMonitorStateException if the current thread does not
-   *                                      hold this lock
-   */
-  public void writeUnlock(final R resource)
-      throws IllegalMonitorStateException {
-    release(resource, ActiveLock::writeUnlock);
-  }
-
-  /**
-   * Acquires the lock on given resource using the provided lock function.
-   *
-   * @param resource on which the lock has to be acquired
-   * @param lockFn function to acquire the lock
-   */
-  private void acquire(final R resource, final Consumer<ActiveLock> lockFn) {
-    lockFn.accept(getLockForLocking(resource));
-  }
-
-  /**
-   * Releases the lock on given resource using the provided release function.
-   *
-   * @param resource for which the lock has to be released
-   * @param releaseFn function to release the lock
-   */
-  private void release(final R resource, final Consumer<ActiveLock> releaseFn) {
-    final ActiveLock lock = getLockForReleasing(resource);
-    releaseFn.accept(lock);
-    decrementActiveLockCount(resource);
-  }
-
-  /**
-   * Returns {@link ActiveLock} instance for the given resource,
-   * on which the lock can be acquired.
-   *
-   * @param resource on which the lock has to be acquired
-   * @return {@link ActiveLock} instance
-   */
-  private ActiveLock getLockForLocking(final R resource) {
-    /*
-     * While getting a lock object for locking we should
-     * atomically increment the active count of the lock.
-     *
-     * This is to avoid cases where the selected lock could
-     * be removed from the activeLocks map and returned to
-     * the object pool.
-     */
-    return activeLocks.compute(resource, (k, v) -> {
-      final ActiveLock lock;
-      try {
-        if (v == null) {
-          lock = lockPool.borrowObject();
-        } else {
-          lock = v;
-        }
-        lock.incrementActiveCount();
-      } catch (Exception ex) {
-        LOG.error("Unable to obtain lock.", ex);
-        throw new RuntimeException(ex);
-      }
-      return lock;
-    });
-  }
-
-  /**
-   * Returns {@link ActiveLock} instance for the given resource,
-   * for which the lock has to be released.
-   *
-   * @param resource for which the lock has to be released
-   * @return {@link ActiveLock} instance
-   */
-  private ActiveLock getLockForReleasing(final R resource) {
-    if (activeLocks.containsKey(resource)) {
-      return activeLocks.get(resource);
-    }
-    // Someone is releasing a lock which was never acquired.
-    LOG.error("Trying to release the lock on {}, which was never acquired.",
-        resource);
-    throw new IllegalMonitorStateException("Releasing lock on resource "
-        + resource + " without acquiring lock");
-  }
-
-  /**
-   * Decrements the active lock count and returns the {@link ActiveLock}
-   * object to pool if the active count is 0.
-   *
-   * @param resource resource to which the ActiveLock is associated
-   */
-  private void decrementActiveLockCount(final R resource) {
-    activeLocks.computeIfPresent(resource, (k, v) -> {
-      v.decrementActiveCount();
-      if (v.getActiveLockCount() != 0) {
-        return v;
-      }
-      lockPool.returnObject(v);
-      return null;
-    });
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
deleted file mode 100644
index 1e3ba05..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.BasePooledObjectFactory;
-import org.apache.commons.pool2.PooledObject;
-import org.apache.commons.pool2.impl.DefaultPooledObject;
-
-/**
- * Pool factory to create {@code ActiveLock} instances.
- */
-public class PooledLockFactory extends BasePooledObjectFactory<ActiveLock> {
-
-  private boolean fairness;
-
-  PooledLockFactory(boolean fair) {
-    this.fairness = fair;
-  }
-  @Override
-  public ActiveLock create() throws Exception {
-    return ActiveLock.newInstance(fairness);
-  }
-
-  @Override
-  public PooledObject<ActiveLock> wrap(ActiveLock activeLock) {
-    return new DefaultPooledObject<>(activeLock);
-  }
-
-  @Override
-  public void activateObject(PooledObject<ActiveLock> pooledObject) {
-    pooledObject.getObject().resetCounter();
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
deleted file mode 100644
index 5c677ce..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-/*
- This package contains the lock related classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index db399db..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-/**
- This package contains class that support ozone implementation on the datanode
- side.
-
- Main parts of ozone on datanode are:
-
- 1. REST Interface - This code lives under the web directory and listens to the
- WebHDFS port.
-
- 2. Datanode container classes: This support persistence of ozone objects on
- datanode. These classes live under container directory.
-
- 3. Client and Shell: We also support a ozone REST client lib, they are under
- web/client and web/ozShell.
-
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java
deleted file mode 100644
index 96725f2..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-import com.google.protobuf.ProtocolMessageEnum;
-
-/**
- * Metrics to count all the subtypes of a specific message.
- */
-public class ProtocolMessageMetrics implements MetricsSource {
-
-  private String name;
-
-  private String description;
-
-  private Map<ProtocolMessageEnum, AtomicLong> counters =
-      new ConcurrentHashMap<>();
-
-  public static ProtocolMessageMetrics create(String name,
-      String description, ProtocolMessageEnum[] types) {
-    ProtocolMessageMetrics protocolMessageMetrics =
-        new ProtocolMessageMetrics(name, description,
-            types);
-    return protocolMessageMetrics;
-  }
-
-  public ProtocolMessageMetrics(String name, String description,
-      ProtocolMessageEnum[] values) {
-    this.name = name;
-    this.description = description;
-    for (ProtocolMessageEnum value : values) {
-      counters.put(value, new AtomicLong(0));
-    }
-  }
-
-  public void increment(ProtocolMessageEnum key) {
-    counters.get(key).incrementAndGet();
-  }
-
-  public void register() {
-    DefaultMetricsSystem.instance()
-        .register(name, description, this);
-  }
-
-  public void unregister() {
-    DefaultMetricsSystem.instance().unregisterSource(name);
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    MetricsRecordBuilder builder = collector.addRecord(name);
-    counters.forEach((key, value) -> {
-      builder.addCounter(new MetricName(key.toString(), ""), value.longValue());
-    });
-    builder.endRecord();
-  }
-
-  /**
-   * Simple metrics info implementation.
-   */
-  public static class MetricName implements MetricsInfo {
-    private String name;
-    private String description;
-
-    public MetricName(String name, String description) {
-      this.name = name;
-      this.description = description;
-    }
-
-    @Override
-    public String name() {
-      return name;
-    }
-
-    @Override
-    public String description() {
-      return description;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 860386d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * This package contains classes for the Protocol Buffers binding of Ozone
- * protocols.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
deleted file mode 100644
index 4177b96..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.type.CollectionType;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * JSON Utility functions used in ozone.
- */
-public final class JsonUtils {
-
-  // Reuse ObjectMapper instance for improving performance.
-  // ObjectMapper is thread safe as long as we always configure instance
-  // before use.
-  private static final ObjectMapper MAPPER = new ObjectMapper();
-  private static final ObjectReader READER = MAPPER.readerFor(Object.class);
-  private static final ObjectWriter WRITTER =
-      MAPPER.writerWithDefaultPrettyPrinter();
-
-  private JsonUtils() {
-    // Never constructed
-  }
-
-  public static String toJsonStringWithDefaultPrettyPrinter(Object obj)
-      throws IOException {
-    return WRITTER.writeValueAsString(obj);
-  }
-
-  public static String toJsonString(Object obj) throws IOException {
-    return MAPPER.writeValueAsString(obj);
-  }
-
-  /**
-   * Deserialize a list of elements from a given string,
-   * each element in the list is in the given type.
-   *
-   * @param str json string.
-   * @param elementType element type.
-   * @return List of elements of type elementType
-   * @throws IOException
-   */
-  public static List<?> toJsonList(String str, Class<?> elementType)
-      throws IOException {
-    CollectionType type = MAPPER.getTypeFactory()
-        .constructCollectionType(List.class, elementType);
-    return MAPPER.readValue(str, type);
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
deleted file mode 100644
index e5812c0..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
deleted file mode 100644
index 1bfe4d1..0000000
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ /dev/null
@@ -1,469 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and Unstable.
- * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * for what changes are allowed for a *Unstable* .proto interface.
- */
-
-// This file contains protocol buffers that are used to transfer data
-// to and from the datanode.
-syntax = "proto2";
-option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto";
-option java_outer_classname = "ContainerProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdds.datanode;
-
-/**
- * Commands that are used to manipulate the state of containers on a datanode.
- *
- * These commands allow us to work against the datanode - from
- * StorageContainer Manager as well as clients.
- *
- *  1. CreateContainer - This call is usually made by Storage Container
- *     manager, when we need to create a new container on a given datanode.
- *
- *  2. ReadContainer - Allows end user to stat a container. For example
- *     this allows us to return the metadata of a container.
- *
- *  3. UpdateContainer - Updates a container metadata.
-
- *  4. DeleteContainer - This call is made to delete a container.
- *
- *  5. ListContainer - Returns the list of containers on this
- *     datanode. This will be used by tests and tools.
- *
- *  6. PutBlock - Given a valid container, creates a block.
- *
- *  7. GetBlock - Allows user to read the metadata of a block.
- *
- *  8. DeleteBlock - Deletes a given block.
- *
- *  9. ListBlock - Returns a list of blocks that are present inside
- *      a given container.
- *
- *  10. ReadChunk - Allows us to read a chunk.
- *
- *  11. DeleteChunk - Delete an unused chunk.
- *
- *  12. WriteChunk - Allows us to write a chunk
- *
- *  13. ListChunk - Given a Container/Block returns the list of Chunks.
- *
- *  14. CompactChunk - Re-writes a chunk based on Offsets.
- *
- *  15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk.
- *
- *  16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk.
- *
- *  17. CloseContainer - Closes an open container and makes it immutable.
- *
- *  18. CopyContainer - Copies a container from a remote machine.
- */
-
-enum Type {
-  CreateContainer = 1;
-  ReadContainer = 2;
-  UpdateContainer = 3;
-  DeleteContainer = 4;
-  ListContainer = 5;
-
-  PutBlock = 6;
-  GetBlock = 7;
-  DeleteBlock = 8;
-  ListBlock = 9;
-
-  ReadChunk = 10;
-  DeleteChunk = 11;
-  WriteChunk = 12;
-  ListChunk = 13;
-  CompactChunk = 14;
-
-  /** Combines Block and Chunk Operation into Single RPC. */
-  PutSmallFile = 15;
-  GetSmallFile = 16;
-  CloseContainer = 17;
-  GetCommittedBlockLength = 18;
-}
-
-
-enum Result {
-  SUCCESS = 1;
-  UNSUPPORTED_REQUEST = 2;
-  MALFORMED_REQUEST = 3;
-  CONTAINER_INTERNAL_ERROR = 4;
-  INVALID_CONFIG = 5;
-  INVALID_FILE_HASH_FOUND = 6;
-  CONTAINER_EXISTS = 7;
-  NO_SUCH_ALGORITHM = 8;
-  CONTAINER_NOT_FOUND = 9;
-  IO_EXCEPTION = 10;
-  UNABLE_TO_READ_METADATA_DB = 11;
-  NO_SUCH_BLOCK = 12;
-  OVERWRITE_FLAG_REQUIRED = 13;
-  UNABLE_TO_FIND_DATA_DIR = 14;
-  INVALID_WRITE_SIZE = 15;
-  CHECKSUM_MISMATCH = 16;
-  UNABLE_TO_FIND_CHUNK = 17;
-  PROTOC_DECODING_ERROR = 18;
-  INVALID_ARGUMENT = 19;
-  PUT_SMALL_FILE_ERROR = 20;
-  GET_SMALL_FILE_ERROR = 21;
-  CLOSED_CONTAINER_IO = 22;
-  ERROR_IN_COMPACT_DB = 24;
-  UNCLOSED_CONTAINER_IO = 25;
-  DELETE_ON_OPEN_CONTAINER = 26;
-  CLOSED_CONTAINER_RETRY = 27;
-  INVALID_CONTAINER_STATE = 28;
-  DISK_OUT_OF_SPACE = 29;
-  CONTAINER_ALREADY_EXISTS = 30;
-  CONTAINER_METADATA_ERROR = 31;
-  CONTAINER_FILES_CREATE_ERROR = 32;
-  CONTAINER_CHECKSUM_ERROR = 33;
-  UNKNOWN_CONTAINER_TYPE = 34;
-  BLOCK_NOT_COMMITTED = 35;
-  CONTAINER_UNHEALTHY = 36;
-  UNKNOWN_BCSID = 37;
-  BCSID_MISMATCH = 38;
-  CONTAINER_NOT_OPEN = 39;
-  CONTAINER_MISSING = 40;
-  BLOCK_TOKEN_VERIFICATION_FAILED = 41;
-  ERROR_IN_DB_SYNC = 42;
-}
-
-/**
- * Block ID that uniquely identify a block in Datanode.
- */
-message DatanodeBlockID {
-  required int64 containerID = 1;
-  required int64 localID = 2;
-  optional uint64 blockCommitSequenceId = 3 [default = 0];
-}
-
-message KeyValue {
-  required string key = 1;
-  optional string value = 2;
-}
-
-message ContainerCommandRequestProto {
-  required   Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional   string traceID = 2;
-
-  required   int64 containerID = 3;
-  required   string datanodeUuid = 4;
-  optional   string pipelineID = 5;
-
-  // One of the following command is available when the corresponding
-  // cmdType is set. At the protocol level we allow only
-  // one command in each packet.
-  // TODO : Upgrade to Protobuf 2.6 or later.
-  optional   CreateContainerRequestProto createContainer = 6;
-  optional   ReadContainerRequestProto readContainer = 7;
-  optional   UpdateContainerRequestProto updateContainer = 8;
-  optional   DeleteContainerRequestProto deleteContainer = 9;
-  optional   ListContainerRequestProto listContainer = 10;
-  optional   CloseContainerRequestProto closeContainer = 11;
-
-  optional   PutBlockRequestProto putBlock = 12;
-  optional   GetBlockRequestProto getBlock = 13;
-  optional   DeleteBlockRequestProto deleteBlock = 14;
-  optional   ListBlockRequestProto listBlock = 15;
-
-  optional   ReadChunkRequestProto readChunk = 16;
-  optional   WriteChunkRequestProto writeChunk = 17;
-  optional   DeleteChunkRequestProto deleteChunk = 18;
-  optional   ListChunkRequestProto listChunk = 19;
-
-  optional   PutSmallFileRequestProto putSmallFile = 20;
-  optional   GetSmallFileRequestProto getSmallFile = 21;
-  optional   GetCommittedBlockLengthRequestProto getCommittedBlockLength = 22;
-  optional   string encodedToken = 23;
-}
-
-message ContainerCommandResponseProto {
-  required   Type cmdType = 1;
-  optional   string traceID = 2;
-
-  required   Result result = 3;
-  optional   string message = 4;
-
-  optional   CreateContainerResponseProto createContainer = 5;
-  optional   ReadContainerResponseProto readContainer = 6;
-  optional   UpdateContainerResponseProto updateContainer = 7;
-  optional   DeleteContainerResponseProto deleteContainer = 8;
-  optional   ListContainerResponseProto listContainer = 9;
-  optional   CloseContainerResponseProto closeContainer = 10;
-
-  optional   PutBlockResponseProto putBlock = 11;
-  optional   GetBlockResponseProto getBlock = 12;
-  optional   DeleteBlockResponseProto deleteBlock = 13;
-  optional   ListBlockResponseProto listBlock = 14;
-
-  optional   WriteChunkResponseProto writeChunk = 15;
-  optional   ReadChunkResponseProto readChunk = 16;
-  optional   DeleteChunkResponseProto deleteChunk = 17;
-  optional   ListChunkResponseProto listChunk = 18;
-
-  optional   PutSmallFileResponseProto putSmallFile = 19;
-  optional   GetSmallFileResponseProto getSmallFile = 20;
-
-  optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21;
-}
-
-message ContainerDataProto {
-  enum State {
-    OPEN = 1;
-    CLOSING = 2;
-    QUASI_CLOSED =3;
-    CLOSED = 4;
-    UNHEALTHY = 5;
-    INVALID = 6;
-  }
-  required int64 containerID = 1;
-  repeated KeyValue metadata = 2;
-  optional string containerPath = 4;
-  optional int64 bytesUsed = 6;
-  optional int64 size = 7;
-  optional int64 blockCount = 8;
-  optional State state = 9 [default = OPEN];
-  optional ContainerType containerType = 10 [default = KeyValueContainer];
-}
-
-message Container2BCSIDMapProto {
-   // repeated Container2BCSIDMapEntryProto container2BCSID = 1;
-    map <int64, int64> container2BCSID = 1;
-}
-
-enum ContainerType {
-  KeyValueContainer = 1;
-}
-
-
-// Container Messages.
-message  CreateContainerRequestProto {
-  repeated KeyValue metadata = 2;
-  optional ContainerType containerType = 3 [default = KeyValueContainer];
-}
-
-message  CreateContainerResponseProto {
-}
-
-message  ReadContainerRequestProto {
-}
-
-message  ReadContainerResponseProto {
-  optional ContainerDataProto containerData = 1;
-}
-
-message  UpdateContainerRequestProto {
-  repeated KeyValue metadata = 2;
-  optional bool forceUpdate = 3 [default = false];
-}
-
-message  UpdateContainerResponseProto {
-}
-
-message  DeleteContainerRequestProto {
-  optional bool forceDelete = 2 [default = false];
-}
-
-message  DeleteContainerResponseProto {
-}
-
-message  ListContainerRequestProto {
-  optional uint32 count = 2; // Max Results to return
-}
-
-message  ListContainerResponseProto {
-  repeated ContainerDataProto containerData = 1;
-}
-
-message CloseContainerRequestProto {
-}
-
-message CloseContainerResponseProto {
-  optional string hash = 1;
-  optional int64 containerID = 2;
-}
-
-message BlockData {
-  required DatanodeBlockID blockID = 1;
-  optional int64 flags = 2; // for future use.
-  repeated KeyValue metadata = 3;
-  repeated ChunkInfo chunks = 4;
-  optional int64 size = 5;
-}
-
-// Block Messages.
-message  PutBlockRequestProto {
-  required BlockData blockData = 1;
-}
-
-message  PutBlockResponseProto {
-  required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
-}
-
-message  GetBlockRequestProto  {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetBlockResponseProto  {
-  required BlockData blockData = 1;
-}
-
-
-message  DeleteBlockRequestProto {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetCommittedBlockLengthRequestProto {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetCommittedBlockLengthResponseProto {
-  required DatanodeBlockID blockID = 1;
-  required int64 blockLength = 2;
-}
-
-message   DeleteBlockResponseProto {
-}
-
-message  ListBlockRequestProto {
-  optional int64 startLocalID = 2;
-  required uint32 count = 3;
-
-}
-
-message  ListBlockResponseProto {
-  repeated BlockData blockData = 1;
-}
-
-// Chunk Operations
-
-message ChunkInfo {
-  required string chunkName = 1;
-  required uint64 offset = 2;
-  required uint64 len = 3;
-  repeated KeyValue metadata = 4;
-  required ChecksumData checksumData =5;
-}
-
-message ChecksumData {
-  required ChecksumType type = 1;
-  required uint32 bytesPerChecksum = 2;
-  repeated bytes checksums = 3;
-}
-
-enum ChecksumType {
-    NONE = 1;
-    CRC32 = 2;
-    CRC32C = 3;
-    SHA256 = 4;
-    MD5 = 5;
-}
-
-message  WriteChunkRequestProto  {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-  optional bytes data = 3;
-}
-
-message  WriteChunkResponseProto {
-}
-
-message  ReadChunkRequestProto  {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-}
-
-message  ReadChunkResponseProto {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-  required bytes data = 3;
-}
-
-message  DeleteChunkRequestProto {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-}
-
-message  DeleteChunkResponseProto {
-}
-
-message  ListChunkRequestProto {
-  required DatanodeBlockID blockID = 1;
-  required string prevChunkName = 2;
-  required uint32 count = 3;
-}
-
-message  ListChunkResponseProto {
-  repeated ChunkInfo chunkData = 1;
-}
-
-/** For small file access combines write chunk and putBlock into a single
-RPC */
-
-message PutSmallFileRequestProto {
-  required PutBlockRequestProto block = 1;
-  required ChunkInfo chunkInfo = 2;
-  required bytes data = 3;
-}
-
-
-message PutSmallFileResponseProto {
-  required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
-}
-
-message GetSmallFileRequestProto {
-  required GetBlockRequestProto block = 1;
-}
-
-message GetSmallFileResponseProto {
-  required ReadChunkResponseProto data = 1;
-}
-
-message CopyContainerRequestProto {
-  required int64 containerID = 1;
-  required uint64 readOffset = 2;
-  optional uint64 len = 3;
-}
-
-message CopyContainerResponseProto {
-  required int64 containerID = 1;
-  required uint64 readOffset = 2;
-  required uint64 len = 3;
-  required bool eof = 4;
-  required bytes data = 5;
-  optional int64 checksum = 6;
-}
-
-service XceiverClientProtocolService {
-  // A client-to-datanode RPC to send container commands
-  rpc send(stream ContainerCommandRequestProto) returns
-    (stream ContainerCommandResponseProto) {};
-
-}
-
-service IntraDatanodeProtocolService {
-  // An intradatanode service to copy the raw container data between nodes
-  rpc download (CopyContainerRequestProto) returns (stream CopyContainerResponseProto);
-}
diff --git a/hadoop-hdds/common/src/main/proto/FSProtos.proto b/hadoop-hdds/common/src/main/proto/FSProtos.proto
deleted file mode 100644
index c3b768a..0000000
--- a/hadoop-hdds/common/src/main/proto/FSProtos.proto
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.fs";
-option java_outer_classname = "FSProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.fs;
-
-message FsPermissionProto {
-  required uint32 perm = 1; // UNIX-style mode bits
-}
-
-/*
- * FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
- * cross-serialization is not an explicitly supported use case. Unlike HDFS,
- * most fields are optional and do not define defaults.
- */
-message FileStatusProto {
-  enum FileType {
-    FT_DIR     = 1;
-    FT_FILE    = 2;
-    FT_SYMLINK = 3;
-  }
-  enum Flags {
-    HAS_ACL           = 0x01; // has ACLs
-    HAS_CRYPT         = 0x02; // encrypted
-    HAS_EC            = 0x04; // erasure coded
-    SNAPSHOT_ENABLED  = 0x08; // snapshot enabled
-  }
-  required FileType fileType            = 1;
-  required string path                  = 2;
-  optional uint64 length                = 3;
-  optional FsPermissionProto permission = 4;
-  optional string owner                 = 5;
-  optional string group                 = 6;
-  optional uint64 modification_time     = 7;
-  optional uint64 access_time           = 8;
-  optional string symlink               = 9;
-  optional uint32 block_replication     = 10;
-  optional uint64 block_size            = 11;
-  // locations                          = 12
-  // alias                              = 13
-  // childrenNum                        = 14
-  optional bytes encryption_data        = 15;
-  // storagePolicy                      = 16
-  optional bytes ec_data                = 17;
-  optional uint32 flags                 = 18 [default = 0];
-}
-
-/**
- * Placeholder type for consistent basic FileSystem operations.
- */
-message LocalFileSystemPathHandleProto {
-    optional uint64 mtime = 1;
-    optional string path = 2;
-}
diff --git a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto
deleted file mode 100644
index 72e0e9f6..0000000
--- a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-
-option java_outer_classname = "SCMSecurityProtocolProtos";
-
-option java_generic_services = true;
-
-option java_generate_equals_and_hash = true;
-
-package hadoop.hdds.security;
-
-import "hdds.proto";
-
-/**
-All commands is send as request and all response come back via
-Response class. If adding new functions please follow this protocol, since
-our tracing and visibility tools depend on this pattern.
-*/
-message SCMSecurityRequest {
-    required Type cmdType = 1; // Type of the command
-
-    optional string traceID = 2;
-
-    optional SCMGetDataNodeCertRequestProto getDataNodeCertRequest = 3;
-    optional SCMGetOMCertRequestProto getOMCertRequest = 4;
-    optional SCMGetCertificateRequestProto getCertificateRequest = 5;
-    optional SCMGetCACertificateRequestProto getCACertificateRequest = 6;
-
-}
-
-message SCMSecurityResponse {
-    required Type cmdType = 1; // Type of the command
-
-    // A string that identifies this command, we generate  Trace ID in Ozone
-    // frontend and this allows us to trace that command all over ozone.
-    optional string traceID = 2;
-
-    optional bool success = 3 [default = true];
-
-    optional string message = 4;
-
-    required Status status = 5;
-
-    optional SCMGetCertResponseProto getCertResponseProto = 6;
-
-}
-
-enum Type {
-    GetDataNodeCertificate = 1;
-    GetOMCertificate = 2;
-    GetCertificate = 3;
-    GetCACertificate = 4;
-}
-
-enum Status {
-    OK = 1;
-}
-/**
-* This message is send by data node to prove its identity and get an SCM
-* signed certificate.
-*/
-message SCMGetDataNodeCertRequestProto {
-    required DatanodeDetailsProto datanodeDetails = 1;
-    required string CSR = 2;
-}
-
-/**
-* This message is send by OzoneManager to prove its identity and get an SCM
-* signed certificate.
-*/
-message SCMGetOMCertRequestProto {
-    required OzoneManagerDetailsProto omDetails = 1;
-    required string CSR = 2;
-}
-
-/**
-* Proto request to get a certificate with given serial id.
-*/
-message SCMGetCertificateRequestProto {
-    required string certSerialId = 1;
-}
-
-/**
-* Proto request to get CA certificate.
-*/
-message SCMGetCACertificateRequestProto {
-}
-
-/**
- * Returns a certificate signed by SCM.
- */
-message SCMGetCertResponseProto {
-    enum ResponseCode {
-        success = 1;
-        authenticationFailed = 2;
-        invalidCSR = 3;
-    }
-    required ResponseCode responseCode = 1;
-    required string x509Certificate = 2; // Base64 encoded X509 certificate.
-    optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate.
-}
-
-
-service SCMSecurityProtocolService {
-    rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse);
-}
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
deleted file mode 100644
index fc7a598..0000000
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ /dev/null
@@ -1,212 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "ScmBlockLocationProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds.block;
-
-import "hdds.proto";
-
-
-// SCM Block protocol
-
-enum Type {
-  AllocateScmBlock   = 11;
-  DeleteScmKeyBlocks = 12;
-  GetScmInfo         = 13;
-  SortDatanodes      = 14;
-}
-
-message SCMBlockLocationRequest {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  optional UserInfo userInfo = 3;
-
-  optional AllocateScmBlockRequestProto       allocateScmBlockRequest   = 11;
-  optional DeleteScmKeyBlocksRequestProto     deleteScmKeyBlocksRequest = 12;
-  optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest         = 13;
-  optional SortDatanodesRequestProto          sortDatanodesRequest      = 14;
-}
-
-message SCMBlockLocationResponse {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  optional bool success = 3 [default=true];
-
-  optional string message = 4;
-
-  required Status status = 5;
-
-  optional string leaderOMNodeId = 6;
-
-  optional AllocateScmBlockResponseProto       allocateScmBlockResponse   = 11;
-  optional DeleteScmKeyBlocksResponseProto     deleteScmKeyBlocksResponse = 12;
-  optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse         = 13;
-  optional SortDatanodesResponseProto          sortDatanodesResponse      = 14;
-}
-
-/**
-    User information which will be extracted during RPC context and used
-    during validating Acl.
-*/
-message UserInfo {
-  optional string userName = 1;
-  optional string remoteAddress = 3;
-}
-
-enum Status {
-  OK = 1;
-  FAILED_TO_LOAD_NODEPOOL = 2;
-  FAILED_TO_FIND_NODE_IN_POOL = 3;
-  FAILED_TO_FIND_HEALTHY_NODES = 4;
-  FAILED_TO_FIND_NODES_WITH_SPACE = 5;
-  FAILED_TO_FIND_SUITABLE_NODE = 6;
-  INVALID_CAPACITY = 7;
-  INVALID_BLOCK_SIZE = 8;
-  SAFE_MODE_EXCEPTION = 9;
-  FAILED_TO_LOAD_OPEN_CONTAINER = 10;
-  FAILED_TO_ALLOCATE_CONTAINER = 11;
-  FAILED_TO_CHANGE_CONTAINER_STATE = 12;
-  FAILED_TO_CHANGE_PIPELINE_STATE = 13;
-  CONTAINER_EXISTS = 14;
-  FAILED_TO_FIND_CONTAINER = 15;
-  FAILED_TO_FIND_CONTAINER_WITH_SPACE = 16;
-  BLOCK_EXISTS = 17;
-  FAILED_TO_FIND_BLOCK = 18;
-  IO_EXCEPTION = 19;
-  UNEXPECTED_CONTAINER_STATE = 20;
-  SCM_NOT_INITIALIZED = 21;
-  DUPLICATE_DATANODE = 22;
-  NO_SUCH_DATANODE = 23;
-  NO_REPLICA_FOUND = 24;
-  FAILED_TO_FIND_ACTIVE_PIPELINE = 25;
-  FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY = 26;
-  FAILED_TO_ALLOCATE_ENOUGH_BLOCKS = 27;
-  INTERNAL_ERROR = 29;
-}
-
-/**
-* Request send to SCM asking allocate block of specified size.
-*/
-message AllocateScmBlockRequestProto {
-  required uint64 size = 1;
-  required uint32 numBlocks = 2;
-  required ReplicationType type = 3;
-  required hadoop.hdds.ReplicationFactor factor = 4;
-  required string owner = 5;
-  optional ExcludeListProto excludeList = 7;
-}
-
-/**
- * A delete key request sent by OM to SCM, it contains
- * multiple number of keys (and their blocks).
- */
-message DeleteScmKeyBlocksRequestProto {
-  repeated KeyBlocks keyBlocks = 1;
-}
-
-/**
- * A object key and all its associated blocks.
- * We need to encapsulate object key name plus the blocks in this potocol
- * because SCM needs to response OM with the keys it has deleted.
- * If the response only contains blocks, it will be very expensive for
- * OM to figure out what keys have been deleted.
- */
-message KeyBlocks {
-  required string key = 1;
-  repeated BlockID blocks = 2;
-}
-
-/**
- * A delete key response from SCM to OM, it contains multiple child-results.
- * Each child-result represents a key deletion result, only if all blocks of
- * a key are successfully deleted, this key result is considered as succeed.
- */
-message DeleteScmKeyBlocksResponseProto {
-  repeated DeleteKeyBlocksResultProto results = 1;
-}
-
-/**
- * A key deletion result. It contains all the block deletion results.
- */
-message DeleteKeyBlocksResultProto {
-  required string objectKey = 1;
-  repeated DeleteScmBlockResult blockResults = 2;
-}
-
-message DeleteScmBlockResult {
-  enum Result {
-    success = 1;
-    safeMode = 2;
-    errorNotFound = 3;
-    unknownFailure = 4;
-  }
-  required Result result = 1;
-  required BlockID blockID = 2;
-}
-
-message AllocateBlockResponse {
-  optional ContainerBlockID containerBlockID = 1;
-  optional hadoop.hdds.Pipeline pipeline = 2;
-}
-
-/**
- * Reply from SCM indicating that the container.
- */
-message AllocateScmBlockResponseProto {
-  repeated AllocateBlockResponse blocks = 3;
-}
-
-/**
- * Datanode sort request sent by OM to SCM, it contains
- * multiple number of datanodes.
- */
-message SortDatanodesRequestProto{
-  required string client = 1;
-  repeated string nodeNetworkName = 2;
-}
-
-message SortDatanodesResponseProto{
-  repeated DatanodeDetailsProto node = 1;
-}
-
-/**
- * Protocol used from OzoneManager to StorageContainerManager.
- * See request and response messages for details of the RPC calls.
- */
-service ScmBlockLocationProtocolService {
-
-  rpc send(SCMBlockLocationRequest)
-      returns (SCMBlockLocationResponse);
-}
diff --git a/hadoop-hdds/common/src/main/proto/Security.proto b/hadoop-hdds/common/src/main/proto/Security.proto
deleted file mode 100644
index a3ce739..0000000
--- a/hadoop-hdds/common/src/main/proto/Security.proto
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.security.proto";
-option java_outer_classname = "SecurityProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.common;
-
-/**
- * Security token identifier
- */
-message TokenProto {
-  required bytes identifier = 1;
-  required bytes password = 2;
-  required string kind = 3;
-  required string service = 4;
-}
-
-message CredentialsKVProto {
-  required string alias = 1;
-  optional hadoop.common.TokenProto token = 2;
-  optional bytes secret = 3;
-}
-
-message CredentialsProto {
-  repeated hadoop.common.CredentialsKVProto tokens = 1;
-  repeated hadoop.common.CredentialsKVProto secrets = 2;
-}
-
-message GetDelegationTokenRequestProto {
-  required string renewer = 1;
-}
-
-message GetDelegationTokenResponseProto {
-  optional hadoop.common.TokenProto token = 1;
-}
-
-message RenewDelegationTokenRequestProto {
-  required hadoop.common.TokenProto token = 1;
-}
-
-message RenewDelegationTokenResponseProto {
-  required uint64 newExpiryTime = 1;
-}
-
-message CancelDelegationTokenRequestProto {
-  required hadoop.common.TokenProto token = 1;
-}
-
-message CancelDelegationTokenResponseProto { // void response
-}
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
deleted file mode 100644
index 8ea72b6..0000000
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "StorageContainerLocationProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds.container;
-
-import "hdds.proto";
-
-/**
-  All functions are dispatched as Request/Response under Ozone.
-  if you add newe functions, please add them using same pattern.
-*/
-message ScmContainerLocationRequest {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  optional ContainerRequestProto containerRequest = 6;
-  optional GetContainerRequestProto getContainerRequest = 7;
-  optional GetContainerWithPipelineRequestProto getContainerWithPipelineRequest = 8;
-  optional SCMListContainerRequestProto scmListContainerRequest = 9;
-  optional SCMDeleteContainerRequestProto scmDeleteContainerRequest = 10;
-  optional NodeQueryRequestProto nodeQueryRequest = 11;
-  optional ObjectStageChangeRequestProto objectStageChangeRequest = 12;
-  optional PipelineRequestProto pipelineRequest = 13;
-  optional ListPipelineRequestProto listPipelineRequest = 14;
-  optional ActivatePipelineRequestProto activatePipelineRequest = 15;
-  optional DeactivatePipelineRequestProto deactivatePipelineRequest = 16;
-  optional ClosePipelineRequestProto closePipelineRequest = 17;
-  optional GetScmInfoRequestProto getScmInfoRequest = 18;
-  optional InSafeModeRequestProto inSafeModeRequest = 19;
-  optional ForceExitSafeModeRequestProto forceExitSafeModeRequest = 20;
-  optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21;
-  optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22;
-  optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23;
-
-}
-
-message ScmContainerLocationResponse {
-  required Type cmdType = 1; // Type of the command
-
-  optional string traceID = 2;
-
-  optional bool success = 3 [default = true];
-
-  optional string message = 4;
-
-  required Status status = 5;
-
-  optional ContainerResponseProto containerResponse = 6;
-  optional GetContainerResponseProto getContainerResponse = 7;
-  optional GetContainerWithPipelineResponseProto getContainerWithPipelineResponse = 8;
-  optional SCMListContainerResponseProto scmListContainerResponse = 9;
-  optional SCMDeleteContainerResponseProto scmDeleteContainerResponse = 10;
-  optional NodeQueryResponseProto nodeQueryResponse = 11;
-  optional ObjectStageChangeResponseProto objectStageChangeResponse = 12;
-  optional PipelineResponseProto pipelineResponse = 13;
-  optional ListPipelineResponseProto listPipelineResponse = 14;
-  optional ActivatePipelineResponseProto activatePipelineResponse = 15;
-  optional DeactivatePipelineResponseProto deactivatePipelineResponse = 16;
-  optional ClosePipelineResponseProto closePipelineResponse = 17;
-  optional GetScmInfoResponseProto getScmInfoResponse = 18;
-  optional InSafeModeResponseProto inSafeModeResponse = 19;
-  optional ForceExitSafeModeResponseProto forceExitSafeModeResponse = 20;
-  optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21;
-  optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22;
-  optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23;
-  enum Status {
-    OK = 1;
-    CONTAINER_ALREADY_EXISTS = 2;
-    CONTAINER_IS_MISSING = 3;
-  }
-}
-
-enum Type {
-
-  AllocateContainer = 1;
-  GetContainer = 2;
-  GetContainerWithPipeline = 3;
-  ListContainer = 4;
-  DeleteContainer = 5;
-  QueryNode = 6;
-  NotifyObjectStageChange = 7;
-  AllocatePipeline = 8;
-  ListPipelines = 9;
-  ActivatePipeline = 10;
-  DeactivatePipeline = 11;
-  ClosePipeline = 12;
-  GetScmInfo = 13;
-  InSafeMode = 14;
-  ForceExitSafeMode = 15;
-  StartReplicationManager = 16;
-  StopReplicationManager = 17;
-  GetReplicationManagerStatus = 18;
-}
-
-/**
-* Request send to SCM asking where the container should be created.
-*/
-message ContainerRequestProto {
-  // Ozone only support replication of either 1 or 3.
-  required ReplicationFactor replicationFactor = 2;
-  required ReplicationType  replicationType = 3;
-  required string owner = 4;
-  optional string traceID = 5;
-}
-
-/**
- * Reply from SCM indicating that the container.
- */
-message ContainerResponseProto {
-  enum Error {
-    success = 1;
-    errorContainerAlreadyExists = 2;
-    errorContainerMissing = 3;
-  }
-  required Error errorCode = 1;
-  required ContainerWithPipeline containerWithPipeline = 2;
-  optional string errorMessage = 3;
-}
-
-message GetContainerRequestProto {
-  required int64 containerID = 1;
-  optional string traceID = 2;
-
-}
-
-message GetContainerResponseProto {
-  required ContainerInfoProto containerInfo = 1;
-}
-
-message GetContainerWithPipelineRequestProto {
-  required int64 containerID = 1;
-  optional string traceID = 2;
-
-}
-
-message GetContainerWithPipelineResponseProto {
-  required ContainerWithPipeline containerWithPipeline = 1;
-}
-
-message SCMListContainerRequestProto {
-  required uint32 count = 1;
-  optional uint64 startContainerID = 2;
-  optional string traceID = 3;
-}
-
-message SCMListContainerResponseProto {
-  repeated ContainerInfoProto containers = 1;
-}
-
-message SCMDeleteContainerRequestProto {
-  required int64 containerID = 1;
-  optional string traceID = 2;
-
-}
-
-message SCMDeleteContainerResponseProto {
-  // Empty response
-}
-
-message ObjectStageChangeRequestProto {
-  enum Type {
-    container = 1;
-    pipeline = 2;
-  }
-  // delete/copy operation may be added later
-  enum Op {
-    create = 1;
-    close = 2;
-  }
-  enum Stage {
-    begin = 1;
-    complete = 2;
-  }
-  required int64 id = 1;
-  required Type type = 2;
-  required Op op= 3;
-  required Stage stage = 4;
-  optional string traceID = 5;
-}
-
-message ObjectStageChangeResponseProto {
-  // Empty response
-}
-
-/*
- NodeQueryRequest sends a request to SCM asking to send a list of nodes that
- match the NodeState that we are requesting.
-*/
-message NodeQueryRequestProto {
-  required NodeState state = 1;
-  required QueryScope scope = 2;
-  optional string poolName = 3; // if scope is pool, then pool name is needed.
-  optional string traceID = 4;
-}
-
-message NodeQueryResponseProto {
-  repeated Node datanodes = 1;
-}
-
-/**
-  Request to create a replication pipeline.
- */
-message PipelineRequestProto {
-  required ReplicationType replicationType = 1;
-  required ReplicationFactor replicationFactor = 2;
-
-  // if datanodes are specified then pipelines are created using those
-  // datanodes.
-  optional NodePool nodePool = 3;
-  optional string pipelineID = 4;
-  optional string traceID = 5;
-}
-
-message  PipelineResponseProto {
-  enum Error {
-    success = 1;
-    errorPipelineAlreadyExists = 2;
-  }
-  required Error errorCode = 1;
-  optional Pipeline  pipeline = 2;
-  optional string errorMessage = 3;
-}
-
-message ListPipelineRequestProto {
-  optional string traceID = 1;
-}
-
-message ListPipelineResponseProto {
-  repeated Pipeline pipelines = 1;
-}
-
-message ActivatePipelineRequestProto {
-  required PipelineID pipelineID = 1;
-  optional string traceID = 2;
-}
-
-message ActivatePipelineResponseProto {
-}
-
-message DeactivatePipelineRequestProto {
-  required PipelineID pipelineID = 1;
-  optional string traceID = 2;
-}
-
-message DeactivatePipelineResponseProto {
-}
-
-message ClosePipelineRequestProto {
-  required PipelineID pipelineID = 1;
-  optional string traceID = 2;
-
-}
-
-message ClosePipelineResponseProto {
-}
-
-message InSafeModeRequestProto {
-  optional string traceID = 1;
-}
-
-message InSafeModeResponseProto {
-  required bool inSafeMode = 1;
-}
-
-message ForceExitSafeModeRequestProto {
-  optional string traceID = 1;
-}
-
-message ForceExitSafeModeResponseProto {
-  required bool exitedSafeMode = 1;
-}
-
-message StartReplicationManagerRequestProto {
-  optional string traceID = 1;
-}
-
-message StartReplicationManagerResponseProto {
-}
-
-message StopReplicationManagerRequestProto {
-  optional string traceID = 1;
-}
-
-message StopReplicationManagerResponseProto {
-}
-
-message ReplicationManagerStatusRequestProto {
-  optional string traceID = 1;
-}
-
-message ReplicationManagerStatusResponseProto {
-  required bool isRunning = 1;
-}
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  See the request
- * and response messages for details of the RPC calls.
- */
-service StorageContainerLocationProtocolService {
-  rpc submitRequest (ScmContainerLocationRequest) returns (ScmContainerLocationResponse);
-
-}
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
deleted file mode 100644
index d2bb355..0000000
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "HddsProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds;
-
-message DatanodeDetailsProto {
-    required string uuid = 1;  // UUID assigned to the Datanode.
-    required string ipAddress = 2;     // IP address
-    required string hostName = 3;      // hostname
-    repeated Port ports = 4;
-    optional string certSerialId = 5;   // Certificate serial id.
-    // network name, can be Ip address or host name, depends
-    optional string networkName = 6;
-    optional string networkLocation = 7; // Network topology location
-}
-
-/**
- Proto message encapsulating information required to uniquely identify a
- OzoneManager.
-*/
-message OzoneManagerDetailsProto {
-    required string uuid = 1;          // UUID assigned to the OzoneManager.
-    required string ipAddress = 2;     // IP address of OM.
-    required string hostName = 3;      // Hostname of OM.
-    repeated Port ports = 4;
-}
-
-message Port {
-    required string name = 1;
-    required uint32 value = 2;
-}
-
-message PipelineID {
-  required string id = 1;
-}
-
-enum PipelineState {
-    PIPELINE_ALLOCATED = 1;
-    PIPELINE_OPEN = 2;
-    PIPELINE_DORMANT = 3;
-    PIPELINE_CLOSED = 4;
-}
-
-message Pipeline {
-    required string leaderID = 1;
-    repeated DatanodeDetailsProto members = 2;
-    // TODO: remove the state and leaderID from this class
-    optional PipelineState state = 3 [default = PIPELINE_ALLOCATED];
-    optional ReplicationType type = 4 [default = STAND_ALONE];
-    optional ReplicationFactor factor = 5 [default = ONE];
-    required PipelineID id = 6;
-    repeated uint32 memberOrders = 7;
-}
-
-message KeyValue {
-    required string key = 1;
-    optional string value = 2;
-}
-
-/**
- * Type of the node.
- */
-enum NodeType {
-    OM = 1;         // Ozone Manager
-    SCM = 2;        // Storage Container Manager
-    DATANODE = 3;   // DataNode
-}
-
-// Should we rename NodeState to DatanodeState?
-/**
- * Enum that represents the Node State. This is used in calls to getNodeList
- * and getNodeCount.
- */
-enum NodeState {
-    HEALTHY = 1;
-    STALE = 2;
-    DEAD = 3;
-    DECOMMISSIONING = 4;
-    DECOMMISSIONED = 5;
-}
-
-enum QueryScope {
-    CLUSTER = 1;
-    POOL = 2;
-}
-
-message Node {
-    required DatanodeDetailsProto nodeID = 1;
-    repeated NodeState nodeStates = 2;
-}
-
-message NodePool {
-    repeated Node nodes = 1;
-}
-
-/**
- * LifeCycleState for SCM object creation state machine:
- *    ->Allocated: allocated on SCM but clean has not started creating it yet.
- *    ->Creating: allocated and assigned to client to create but not ack-ed yet.
- *    ->Open: allocated on SCM and created on datanodes and ack-ed by a client.
- *    ->Close: container closed due to space all used or error?
- *    ->Timeout -> container failed to create on datanodes or ack-ed by client.
- *    ->Deleting(TBD) -> container will be deleted after timeout
- * 1. ALLOCATE-ed containers on SCM can't serve key/block related operation
- *    until ACK-ed explicitly which changes the state to OPEN.
- * 2. Only OPEN/CLOSED containers can serve key/block related operation.
- * 3. ALLOCATE-ed containers that are not ACK-ed timely will be TIMEOUT and
- *    CLEANUP asynchronously.
- */
-
-enum LifeCycleState {
-    OPEN = 1;
-    CLOSING = 2;
-    QUASI_CLOSED = 3;
-    CLOSED = 4;
-    DELETING = 5;
-    DELETED = 6; // object is deleted.
-}
-
-enum LifeCycleEvent {
-    FINALIZE = 1;
-    QUASI_CLOSE = 2;
-    CLOSE = 3; // !!Event after this has not been used yet.
-    FORCE_CLOSE = 4;
-    DELETE = 5;
-    CLEANUP = 6;
-}
-
-message ContainerInfoProto {
-    required int64 containerID = 1;
-    required LifeCycleState state = 2;
-    optional PipelineID pipelineID = 3;
-    required uint64 usedBytes = 4;
-    required uint64 numberOfKeys = 5;
-    optional int64 stateEnterTime = 6;
-    required string owner = 7;
-    optional int64 deleteTransactionId = 8;
-    optional int64 sequenceId = 9;
-    required ReplicationFactor replicationFactor  = 10;
-    required ReplicationType replicationType  = 11;
-}
-
-message ContainerWithPipeline {
-  required ContainerInfoProto containerInfo = 1;
-  required Pipeline pipeline = 2;
-}
-
-message GetScmInfoRequestProto {
-    optional string traceID = 1;
-}
-
-message GetScmInfoResponseProto {
-    required string clusterId = 1;
-    required string scmId = 2;
-}
-
-
-enum ReplicationType {
-    RATIS = 1;
-    STAND_ALONE = 2;
-    CHAINED = 3;
-}
-
-enum ReplicationFactor {
-    ONE = 1;
-    THREE = 3;
-}
-
-enum ScmOps {
-    allocateBlock = 1;
-    keyBlocksInfoList = 2;
-    getScmInfo = 3;
-    deleteBlock = 4;
-    createReplicationPipeline = 5;
-    allocateContainer = 6;
-    getContainer = 7;
-    getContainerWithPipeline = 8;
-    listContainer = 9;
-    deleteContainer = 10;
-    queryNode = 11;
-}
-
-message ExcludeListProto {
-    repeated string datanodes = 1;
-    repeated int64 containerIds = 2;
-    repeated PipelineID pipelineIds = 3;
-}
-
-/**
- * Block ID that uniquely identify a block by SCM.
- */
-message ContainerBlockID {
-    required int64 containerID = 1;
-    required int64 localID = 2;
-}
-
-
-/**
- * Information for the Hdds block token.
- * When adding further fields, make sure they are optional as they would
- * otherwise not be backwards compatible.
- */
-message BlockTokenSecretProto {
-    /**
-     * File access permissions mode.
-     */
-    enum AccessModeProto {
-        READ = 1;
-        WRITE = 2;
-        COPY = 3;
-        DELETE = 4;
-    }
-    required string ownerId = 1;
-    required string blockId = 2;
-    required uint64 expiryDate = 3;
-    required string omCertSerialId = 4;
-    repeated AccessModeProto modes = 5;
-    required uint64 maxLength = 6;
-}
-
-message BlockID {
-    required ContainerBlockID containerBlockID = 1;
-    optional uint64 blockCommitSequenceId = 2 [default = 0];
-}
diff --git a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor
deleted file mode 100644
index f29efda..0000000
--- a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.hdds.conf.ConfigFileGenerator
diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
deleted file mode 100644
index 2cbd817..0000000
--- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-version=${declared.hdds.version}
-revision=${version-info.scm.commit}
-branch=${version-info.scm.branch}
-user=${user.name}
-date=${version-info.build.time}
-url=${version-info.scm.uri}
-srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.xml b/hadoop-hdds/common/src/main/resources/network-topology-default.xml
deleted file mode 100644
index f86597c..0000000
--- a/hadoop-hdds/common/src/main/resources/network-topology-default.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!--
-This is the default for network topology configuration. It defines
-level prefix key name, level default cost and max levels.
--->
-<configuration>
-    <!-- The version of network topology configuration file format, it must be an integer -->
-    <layoutversion>1</layoutversion>
-    <layers>
-        <layer id="datacenter">
-            <prefix></prefix>
-            <cost>1</cost>
-            <type>Root</type>
-        </layer>
-        <!-- layer id is only used as the reference internally in this document -->
-        <layer id="rack">
-            <!-- prefix of the name of this layer. For example, if the prefix is "dc", then every
-            name in this layer should start with "dc", such as "dc1", "dc2", otherwise NetworkTopology
-            class should report error when add a node path which does't follow this rule. This field
-            is case insensitive. It is optional and can have empty or "" value, in all these cases
-            prefix check will not be enforced.
-            -->
-            <prefix>rack</prefix>
-            <!-- The default cost of this layer, an positive integer or 0. Can be override by the
-            "${cost}" value in specific path. This field is also optional. When it's not defined,
-            it's value is default "1".
-            -->
-            <cost>1</cost>
-            <!-- Layer type, optional field, case insensitive, default value InnerNode.
-            Current value range : {Root, InnerNode, Leaf}
-            Leaf node can only appear in the end of the "path" field of the "topology" section.
-            Root node is a special node. It doesn't have name. It's represented by "/" at the beginning of the path.
-            -->
-            <type>InnerNode</type>
-            <!-- default name if this layer is missed. Only apply to InnerNode. Ignored for Leaf node and Root. -->
-            <default>/default-rack</default>
-        </layer>
-        <layer id="node">
-            <prefix></prefix>
-            <cost>0</cost>
-            <type>Leaf</type>
-        </layer>
-    </layers>
-    <topology>
-        <path>/datacenter/rack/node</path>
-        <!-- When this field is true, each InnerNode layer should has its prefix defined with not empty value,
-         otherwise the content is not valid. Default value is false.
-         -->
-        <enforceprefix>false</enforceprefix>
-    </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml b/hadoop-hdds/common/src/main/resources/network-topology-default.yaml
deleted file mode 100644
index 561869f..0000000
--- a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-# Cost: The cost of crossing this layer.
-# The value should be positive integer or 0. This field is optional.
-# When it's not defined, it's value is default "1".
-cost: 1
-
-# The prefix of this layer.
-# If the prefix is "dc", then every name in this layer should start with "dc",
-# such as "dc1", "dc2".
-# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode.
-prefix: /
-
-# Layer type, optional field, default value InnerNode.
-# Current value range : {ROOT, INNER_NODE, LEAF_NODE}
-type: ROOT
-
-# Layer name
-defaultName: root
-
-# Sub layer
-# The sub layer property defines as a list which can reflect a node tree, though
-# in schema template it always has only one child.
-sublayer:
-  -
-    cost: 1
-    prefix: dc
-    defaultName: datacenter
-    type: INNER_NODE
-    sublayer:
-      -
-        cost: 1
-        prefix: rack
-        defaultName: rack
-        type: INNER_NODE
-        sublayer:
-            -
-              cost: 1
-              prefix: ng
-              defaultName: nodegroup
-              type: INNER_NODE
-              sublayer:
-                -
-                  defaultName: node
-                  type: LEAF_NODE
-                  prefix: node
-...
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml b/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml
deleted file mode 100644
index b43ebd5..0000000
--- a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!--
-This is the default for network topology configuration. It defines level
-prefix key name, level default cost and max levels.
--->
-<configuration>
-    <!-- The version of network topology configuration file format, it must be an integer -->
-    <layoutversion>1</layoutversion>
-    <layers>
-        <layer id="datacenter">
-            <prefix></prefix>
-            <cost>1</cost>
-            <type>Root</type>
-        </layer>
-        <!-- layer id is only used as the reference internally in this document -->
-        <layer id="rack">
-            <!-- prefix of the name of this layer. For example, if the prefix is "dc", then every
-            name in this layer should start with "dc", such as "dc1", "dc2", otherwise
-            NetworkTopology class should report error when add a node path which does't follow this
-            rule. This field is case insensitive. It is optional is and can have empty or "" value, in
-            all these cases prefix check will not be enforced.
-            -->
-            <prefix>rack</prefix>
-            <!-- The default cost of this layer, an positive integer or 0. Can be override by the
-            "${cost}" value in specific path. This field is also optional. When it's not defined,
-            it's value is default "1".
-            -->
-            <cost>1</cost>
-            <!-- Layer type, optional field, case insensitive, default value InnerNode.
-            Current value range : {Root, InnerNode, Leaf}
-            Leaf node can only appear in the end of the "path" field of the "topology" section.
-            Root node is a special node. It doesn't have name. It's represented by "/" at the beginning of the path.
-            -->
-            <type>InnerNode</type>
-            <!-- default name if this layer is missed. Only apply to InnerNode. Ignored for Leaf node and Root -->
-            <default>/default-rack</default>
-        </layer>
-        <layer id="nodegroup">
-            <prefix>ng</prefix>
-            <cost>1</cost>
-            <type>InnerNode</type>
-            <default>/default-nodegroup</default>
-        </layer>
-        <layer id="node">
-            <prefix></prefix>
-            <cost>0</cost>
-            <type>Leaf</type>
-        </layer>
-    </layers>
-    <topology>
-        <path>/datacenter/rack/nodegroup/node</path>
-        <!-- When this field is true, each InnerNode layer should has its prefix defined with not empty value,
-         otherwise the content is not valid. Default value is false.
-        -->
-        <enforceprefix>false</enforceprefix>
-    </topology>
-</configuration>
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
deleted file mode 100644
index b0a59fa..0000000
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ /dev/null
@@ -1,2504 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into ozone-site.xml and change them -->
-<!-- there.  If ozone-site.xml does not already exist, create it.      -->
-
-<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
-<!--DEBUG, CLIENT, SERVER, OM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
-<!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
-
-<configuration>
-
-  <!--Container Settings used by Datanode-->
-  <property>
-    <name>ozone.container.cache.size</name>
-    <value>1024</value>
-    <tag>PERFORMANCE, CONTAINER, STORAGE</tag>
-    <description>The open container is cached on the data node side. We maintain
-      an LRU
-      cache for caching the recently used containers. This setting controls the
-      size of that cache.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ipc</name>
-    <value>9859</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG, CONTAINER</tag>
-    <description>Allocates a random free port for ozone container. This is used
-      only while
-      running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.chunk.write.sync</name>
-    <value>false</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Determines whether the chunk writes in the container happen as
-      sync I/0 or buffered I/O operation.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.statemachinedata.sync.timeout</name>
-    <value>10s</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Timeout for StateMachine data writes by Ratis.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.statemachinedata.sync.retries</name>
-    <value>-1</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Number of times the WriteStateMachineData op will be tried
-      before failing, if this value is -1, then this retries indefinitely.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.log.queue.num-elements</name>
-    <value>1024</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Limit for the number of operations in Ratis Log Worker.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.log.queue.byte-limit</name>
-    <value>4GB</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Byte limit for Ratis Log Worker queue.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.log.appender.queue.num-elements</name>
-    <value>1</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Limit for number of append entries in ratis leader's
-      log appender queue.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.log.appender.queue.byte-limit</name>
-    <value>32MB</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Byte limit for ratis leader's log appender queue.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.log.purge.gap</name>
-    <value>1000000</value>
-    <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
-    <description>Purge gap between the last purged commit index
-      and the current index, when the leader decides to purge its log.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.datanode.storage.dir</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS</tag>
-    <description>This directory is used for storing Ratis metadata like logs. If
-      this is
-      not set then default metadata dirs is used. A warning will be logged if
-      this not set. Ideally, this should be mapped to a fast disk like an SSD.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.dir</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
-    <description>Determines where on the local filesystem HDDS data will be
-      stored. Defaults to dfs.datanode.data.dir if not specified.
-      The directories should be tagged with corresponding storage types
-      ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
-      storage type will be DISK if the directory does not have a storage type
-      tagged explicitly.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.volume.choosing.policy</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
-    <description>
-      The class name of the policy for choosing volumes in the list of
-      directories.  Defaults to
-      org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy.
-      This volume choosing policy selects volumes in a round-robin order.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.enabled</name>
-    <value>false</value>
-    <tag>OZONE, MANAGEMENT, PIPELINE, RATIS</tag>
-    <description>Ozone supports different kinds of replication pipelines. Ratis
-      is one of
-      the replication pipeline supported by ozone.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc</name>
-    <value>9858</value>
-    <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE,DEBUG</tag>
-    <description>Allocates a random free port for ozone ratis port for the
-      container. This
-      is used only while running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.rpc.type</name>
-    <value>GRPC</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Ratis supports different kinds of transports like netty, GRPC,
-      Hadoop RPC
-      etc. This picks one of those for this cluster.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.snapshot.threshold</name>
-    <value>10000</value>
-    <tag>OZONE, RATIS</tag>
-    <description>Number of transactions after which a ratis snapshot should be
-      taken.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.statemachine.max.pending.apply-transactions</name>
-    <value>10000</value>
-    <tag>OZONE, RATIS</tag>
-    <description>Maximum number of pending apply transactions in a data
-      pipeline. The default value is kept same as default snapshot threshold
-      dfs.ratis.snapshot.threshold.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.num.write.chunk.threads</name>
-    <value>60</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Maximum number of threads in the thread pool that Ratis
-      will use for writing chunks (60 by default).
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.leader.num.pending.requests</name>
-    <value>4096</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Maximum number of pending requests after which the leader
-      starts rejecting requests from client.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.replication.level</name>
-    <value>MAJORITY</value>
-    <tag>OZONE, RATIS</tag>
-    <description>Replication level to be used by datanode for submitting a
-      container command to ratis. Available replication levels are ALL and
-      MAJORTIY, MAJORITY is used as the default replication level.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.num.container.op.executors</name>
-    <value>10</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Number of executors that will be used by Ratis to execute
-      container ops.(10 by default).
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.size</name>
-    <value>1MB</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the raft segment used by Apache Ratis on datanodes.
-      (1 MB by default)
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.preallocated.size</name>
-    <value>16KB</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the buffer which is preallocated for raft segment
-      used by Apache Ratis on datanodes.(16 KB by default)
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.statemachine.cache.expiry.interval</name>
-    <value>10s</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The interval till which the stateMachine data in ratis
-      will be cached inside the ContainerStateMachine.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis client request.It should be
-        set greater than leader election timeout in Ratis.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.max.retries</name>
-    <value>180</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Number of retries for ratis client request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.retry.interval</name>
-    <value>1000ms</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Interval between successive retries for a ratis client request.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.retry-cache.timeout.duration</name>
-    <value>600000ms</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Retry Cache entry timeout for ratis server.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.leader.election.minimum.timeout.duration</name>
-    <value>5s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The minimum timeout duration for ratis leader election.
-        Default is 5s.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.failure.duration</name>
-    <value>120s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server failure detection,
-      once the threshold has reached, the ratis state machine will be informed
-      about the failure in the ratis ring
-    </description>
-  </property>
-  <property>
-    <name>hdds.node.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send node report. Each
-      datanode periodically send node report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.container.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send container report. Each
-      datanode periodically send container report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.command.status.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send status of command
-      execution. Each datanode periodically the execution status of commands
-      received from SCM to SCM. Unit could be defined with postfix
-      (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.pipeline.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, PIPELINE, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send pipeline report. Each
-      datanode periodically send pipeline report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-
-
-  <property>
-    <name>hdds.prometheus.endpoint.enabled</name>
-    <value>true</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>Enable prometheus compatible metric page on the HTTP
-      servers.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.profiler.endpoint.enabled</name>
-    <value>false</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>Enable /prof java profiler servlet page on HTTP server.
-    </description>
-  </property>
-
-  <!--Ozone Settings-->
-  <property>
-    <name>ozone.administrators</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>Ozone administrator users delimited by the comma.
-      If not set, only the user who launches an ozone service will be the admin
-      user. This property must be set if ozone services are started by different
-      users. Otherwise, the RPC layer will reject calls from other servers which
-      are started by users not in the list.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.container.limit.per.interval</name>
-    <value>10</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of containers to be scanned by block deleting
-      service per
-      time interval. The block deleting service spawns a thread to handle block
-      deletions in a container. This property is used to throttle the number of
-      threads spawned for block deletions.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of blocks to be deleted by block deleting
-      service per
-      time interval. This property is used to throttle the actual number of
-      block deletions on a data node per container.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.interval</name>
-    <value>1m</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>Time interval of the block deleting service.
-      The block deleting service runs on each datanode periodically and
-      deletes blocks queued for deletion. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.timeout</name>
-    <value>300000ms</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A timeout value of block deletion service. If this is set
-      greater than 0,
-      the service will stop waiting for the block deleting completion after this
-      time. If timeout happens to a large proportion of block deletion, this
-      needs to be increased with ozone.block.deleting.limit.per.task. This
-      setting supports multiple time unit suffixes as described in
-      dfs.heartbeat.interval. If no suffix is specified, then milliseconds is
-      assumed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.UnsafeByteOperations.enabled</name>
-    <value>true</value>
-    <tag>OZONE, PERFORMANCE, CLIENT</tag>
-    <description>It specifies whether to use unsafe or safe buffer to byteString
-      copy.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.connection.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, PERFORMANCE, CLIENT</tag>
-    <description>Connection timeout for Ozone client in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.stream.buffer.flush.size</name>
-    <value>64MB</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Size which determines at what buffer position , a partial
-      flush will be initiated during write. It should be ideally a multiple
-      of chunkSize.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.stream.buffer.max.size</name>
-    <value>128MB</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Size which determines at what buffer position,
-      write call be blocked till acknowledgement of the first partial flush
-      happens by all servers.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.watch.request.timeout</name>
-    <value>30s</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Timeout for the watch API in Ratis client to acknowledge
-      a particular request getting replayed to all servers.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.max.retries</name>
-    <value>100</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Maximum number of retries by Ozone Client on encountering
-      exception while writing a key.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.retry.interval</name>
-    <value>0ms</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Indicates the time duration a client will wait before
-      retrying a write key request on encountering an exception. By default
-      there is no wait.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.socket.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Socket timeout for Ozone client. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>ozone.enabled</name>
-    <value>false</value>
-    <tag>OZONE, REQUIRED</tag>
-    <description>
-      Status of the Ozone Object Storage service is enabled.
-      Set to true to enable Ozone.
-      Set to false to disable Ozone.
-      Unless this value is set to true, Ozone services will not be started in
-      the cluster.
-
-      Please note: By default ozone is disabled on a hadoop cluster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.key.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      A maximum number of keys to be scanned by key deleting service
-      per time interval in OM. Those keys are sent to delete metadata and
-      generate transactions in SCM for next async deletion between SCM
-      and DataNode.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.service.ids</name>
-    <value></value>
-    <tag>OM, HA</tag>
-    <description>
-      Comma-separated list of OM service Ids.
-
-      If not set, the default value of "om-service-value" is assigned as the
-      OM service ID.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.nodes.EXAMPLEOMSERVICEID</name>
-    <value></value>
-    <tag>OM, HA</tag>
-    <description>
-      Comma-separated list of OM node Ids for a given OM service ID (eg.
-      EXAMPLEOMSERVICEID). The OM service ID should be the value (one of the
-      values if there are multiple) set for the parameter ozone.om.service.ids.
-
-      Unique identifiers for each OM Node, delimited by commas. This will be
-      used by OzoneManagers in HA setup to determine all the OzoneManagers
-      belonging to the same OMservice in the cluster. For example, if you
-      used “omService1” as the OM service ID previously, and you wanted to
-      use “om1”, “om2” and "om3" as the individual IDs of the OzoneManagers,
-      you would configure a property ozone.om.nodes.omService1, and its value
-      "om1,om2,om3".
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.node.id</name>
-    <value></value>
-    <tag>OM, HA</tag>
-    <description>
-      The ID of this OM node. If the OM node ID is not configured it
-      is determined automatically by matching the local node's address
-      with the configured address.
-
-      If node ID is not deterministic from the configuration, then it is set
-      to the OmId from the OM version file.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.address</name>
-    <value>0.0.0.0:9862</value>
-    <tag>OM, REQUIRED</tag>
-    <description>
-      The address of the Ozone OM service. This allows clients to discover
-      the address of the OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.handler.count.key</name>
-    <value>20</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for OM service endpoints.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http-address</name>
-    <value>0.0.0.0:9874</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the OM web UI will listen on.
-
-      If the port is 0, then the server will start on a free port. However, it
-      is best to specify a well-known port, so it is easy to connect and see
-      the OM management UI.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The actual address the OM web server will bind to. If this optional
-      the address is set, it overrides only the hostname portion of
-      ozone.om.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http.enabled</name>
-    <value>true</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable OM web user interface.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.https-address</name>
-    <value>0.0.0.0:9875</value>
-    <tag>OM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The address and the base port where the OM web UI will listen
-      on using HTTPS.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The actual address the OM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion of
-      ozone.om.https-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.keytab.file</name>
-    <value/>
-    <tag>OM, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      The size of OM DB cache in MB that used for caching files.
-      This value is set to an abnormally low value in the default configuration.
-      That is to make unit testing easy. Generally, this value should be set to
-      something like 16GB or more, if you intend to use Ozone at scale.
-
-      A large value for this key allows a proportionally larger amount of OM
-      metadata to be cached in memory. This makes OM operations faster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.user.max.volume</name>
-    <value>1024</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The maximum number of volumes a user can have on a cluster.Increasing or
-      decreasing this number has no real impact on ozone cluster. This is
-      defined only for operational purposes. Only an administrator can create a
-      volume, once a volume is created there are no restrictions on the number
-      of buckets or keys inside each bucket a user can create.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.db.dirs</name>
-    <value/>
-    <tag>OZONE, OM, STORAGE, PERFORMANCE</tag>
-    <description>
-      Directory where the OzoneManager stores its metadata. This should
-      be specified as a single directory. If the directory does not
-      exist then the OM will attempt to create it.
-
-      If undefined, then the OM will log a warning and fallback to
-      ozone.metadata.dirs. This fallback approach is not recommended for
-      production environments.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metadata.dirs</name>
-    <value/>
-    <tag>OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED</tag>
-    <description>
-      This setting is the fallback location for SCM, OM, Recon and DataNodes
-      to store their metadata. This setting may be used only in test/PoC
-      clusters to simplify configuration.
-
-      For production clusters or any time you care about performance, it is
-      recommended that ozone.om.db.dirs, ozone.scm.db.dirs and
-      dfs.container.ratis.datanode.storage.dir be configured separately.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metastore.impl</name>
-    <value>RocksDB</value>
-    <tag>OZONE, OM, SCM, CONTAINER, STORAGE</tag>
-    <description>
-      Ozone metadata store implementation. Ozone metadata are well
-      distributed to multiple services such as ozoneManager, scm. They are stored in
-      some local key-value databases. This property determines which database
-      library to use. Supported value is either LevelDB or RocksDB.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.metastore.rocksdb.statistics</name>
-    <value>OFF</value>
-    <tag>OZONE, OM, SCM, STORAGE, PERFORMANCE</tag>
-    <description>
-      The statistics level of the rocksdb store. If you use any value from
-      org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb
-      statistics will be exposed over JMX bean with the choosed setting. Set
-      it to OFF to not initialize rocksdb statistics at all. Please note that
-      collection of statistics could have 5-10% performance penalty.
-      Check the rocksdb documentation for more details.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.db.dirs</name>
-    <value/>
-    <tag>OZONE, SCM, STORAGE, PERFORMANCE</tag>
-    <description>
-      Directory where the StorageContainerManager stores its metadata.
-      This should be specified as a single directory. If the directory
-      does not exist then the SCM will attempt to create it.
-
-      If undefined, then the SCM will log a warning and fallback to
-      ozone.metadata.dirs. This fallback approach is not recommended for
-      production environments.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.address</name>
-    <value/>
-    <tag>OZONE, SCM</tag>
-    <description>The address of the Ozone SCM block client service. If not
-      defined value of ozone.scm.client.address is used.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The hostname or IP address used by the SCM block client
-      endpoint to bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.port</name>
-    <value>9863</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The port number of the Ozone SCM block client service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.deletion.max.retry</name>
-    <value>4096</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      SCM wraps up many blocks in a deletion transaction and sends that to data
-      node for physical deletion periodically. This property determines how many
-      times SCM is going to retry sending a deletion operation to the data node.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.size</name>
-    <value>256MB</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The default size of a scm block. This is maps to the default
-      Ozone block size.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.chunk.size</name>
-    <value>16MB</value>
-    <tag>OZONE, SCM, CONTAINER, PERFORMANCE</tag>
-    <description>
-      The chunk size for reading/writing chunk operations in bytes.
-
-      The chunk size defaults to 8MB. If the value configured is more than the
-      maximum size (16MB), it will be reset to the maximum size. This maps to
-      the network packet sizes and file write operations in the client to
-      datanode protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.address</name>
-    <value/>
-    <tag>OZONE, SCM, REQUIRED</tag>
-    <description>
-      The address of the Ozone SCM client service. This is a required setting.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9860.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The hostname or IP address used by the SCM client endpoint to
-      bind.
-      This setting is used by the SCM only and never used by clients.
-
-      The setting can be useful in multi-homed setups to restrict the
-      availability of the SCM client service to a specific interface.
-
-      The default is appropriate for most clusters.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.port</name>
-    <value>9860</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The port number of the Ozone SCM client service.</description>
-  </property>
-  <property>
-    <name>ozone.scm.keyvalue.container.deletion-choosing.policy</name>
-    <value>
-      org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The policy used for choosing desired keyvalue containers for block deletion.
-      Datanode selects some containers to process block deletion
-      in a certain interval defined by ozone.block.deleting.service.interval.
-      The number of containers to process in each interval is defined
-      by ozone.block.deleting.container.limit.per.interval. This property is
-      used to configure the policy applied while selecting containers.
-      There are two policies supporting now:
-      RandomContainerDeletionChoosingPolicy and
-      TopNOrderedContainerDeletionChoosingPolicy.
-      org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy
-      implements a simply random policy that to return a random list of
-      containers.
-      org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-      implements a policy that choosing top count number of containers in a
-      pending-deletion-blocks's num
-      based descending order.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.placement.impl</name>
-    <value>
-      org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The full name of class which implements org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy.
-      The class decides which datanode will be used to host the container replica. If not set,
-      org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom will be used as default value.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.pipeline.owner.container.count</name>
-    <value>3</value>
-    <tag>OZONE, SCM, PIPELINE</tag>
-    <description>Number of containers per owner in a pipeline.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.size</name>
-    <value>5GB</value>
-    <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
-    <description>
-      Default container size used by Ozone.
-      There are two considerations while picking this number. The speed at which
-      a container can be replicated, determined by the network speed and the
-      metadata that each container generates. So selecting a large number
-      creates less SCM metadata, but recovery time will be more. 5GB is a number
-      that maps to quick replication times in gigabit networks, but still
-      balances the amount of metadata.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.address</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address of the Ozone SCM service used for internal
-      communication between the DataNodes and the SCM.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9861.
-
-      This setting is optional. If unspecified then the hostname portion
-      is picked from the ozone.scm.client.address setting and the
-      default service port of 9861 is chosen.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.bind.host</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The hostname or IP address used by the SCM service endpoint to
-      bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.id.dir</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>The path that datanodes will use to store the datanode ID.
-      If this value is not set, then datanode ID is created under the
-      metadata directory.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.port</name>
-    <value>9861</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The port number of the Ozone SCM service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>SCM keeps track of the Containers in the cluster. This DB holds
-      the container metadata. This value is set to a small value to make the
-      unit
-      testing runs smooth. In production, we recommend a value of 16GB or
-      higher. This allows SCM to avoid disk I/O's while looking up the container
-      location.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.dead.node.interval</name>
-    <value>10m</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval between heartbeats before a node is tagged as dead.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.handler.count.key</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for each SCM service
-      endpoint.
-
-      The default is appropriate for small clusters (tens of nodes).
-
-      Set a value that is appropriate for the cluster size. Generally, HDFS
-      recommends RPC handler count is set to 20 * log2(Cluster Size) with an
-      upper limit of 200. However, SCM will not have the same amount of
-      traffic as Namenode, so a value much smaller than that will work well too.
-    </description>
-  </property>
-  <property>
-    <name>hdds.heartbeat.interval</name>
-    <value>30s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The heartbeat interval from a data node to SCM. Yes,
-      it is not three but 30, since most data nodes will heart beating via Ratis
-      heartbeats. If a client is not able to talk to a data node, it will notify
-      OM/SCM eventually. So a 30 second HB seems to work. This assumes that
-      replication strategy used is Ratis if not, this value should be set to
-      something smaller like 3 seconds.
-      ozone.scm.pipeline.close.timeout should also be adjusted accordingly,
-      if the default value for this config is not used.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.log.warn.interval.count</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Defines how frequently we will log the missing of a heartbeat to SCM.
-      For example in the default case, we will write a warning message for each
-      ten consecutive heartbeats that we miss to SCM. This helps in reducing
-      clutter in a data node log, but trade off is that logs will have less of
-      this statement.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.rpc-timeout</name>
-    <value>1s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Timeout value for the RPC from Datanode to SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.thread.interval</name>
-    <value>3s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      When a heartbeat from the data node arrives on SCM, It is queued for
-      processing with the time stamp of when the heartbeat arrived. There is a
-      heartbeat processing thread inside SCM that runs at a specified interval.
-      This value controls how frequently this thread is run.
-
-      There are some assumptions build into SCM such as this value should allow
-      the heartbeat processing thread to run at least three times more
-      frequently than heartbeats and at least five times more than stale node
-      detection time. If you specify a wrong value, SCM will gracefully refuse
-      to run. For more info look at the node manager tests in SCM.
-
-      In short, you don't need to change this.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-address</name>
-    <value>0.0.0.0:9876</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web ui will listen on.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to. If this
-      optional address is set, it overrides only the hostname portion of
-      ozone.scm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable SCM web ui.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-address</name>
-    <value>0.0.0.0:9877</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web UI will listen
-      on using HTTPS.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion of
-      ozone.scm.https-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.names</name>
-    <value/>
-    <tag>OZONE, REQUIRED</tag>
-    <description>
-      The value of this property is a set of DNS | DNS:PORT | IP
-      Address | IP:PORT. Written as a comma separated string. e.g. scm1,
-      scm2:8020, 7.7.7.7:7777.
-      This property allows datanodes to discover where SCM is, so that
-      datanodes can send heartbeat to SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.stale.node.interval</name>
-    <value>5m</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval for stale node flagging. Please
-      see ozone.scm.heartbeat.thread.interval before changing this value.
-    </description>
-  </property>
-  <property>
-    <name>ozone.trace.enabled</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG</tag>
-    <description>
-      Setting this flag to true dumps the HTTP request/ response in
-      the logs. Very useful when debugging REST protocol.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.scm.container.creation.lease.timeout</name>
-    <value>60s</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Container creation timeout in milliseconds to be used by SCM. When
-      BEGIN_CREATE event happens the container is moved from ALLOCATED to
-      CREATING state, SCM will now wait for the configured amount of time
-      to get COMPLETE_CREATE event if it doesn't receive it will move the
-      container to DELETING.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.key.preallocation.max.blocks</name>
-    <value>64</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      While allocating blocks from OM, this configuration limits the maximum
-      number of blocks being allocated. This configuration ensures that the
-      allocated block response do not exceed rpc payload limit. If client needs
-      more space for the write, separate block allocation requests will be made.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.list.cache</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Configuration property to configure the cache size of client list calls.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication</name>
-    <value>3</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication value. The actual number of replications can be
-      specified when writing the key. The default is used if replication
-      is not specified. Supported values: 1 and 3.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication.type</name>
-    <value>RATIS</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication type to be used while writing key into ozone. The
-      value can be specified when writing the key, default is used when
-      nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED.
-    </description>
-  </property>
-  <property>
-    <name>hdds.container.close.threshold</name>
-    <value>0.9f</value>
-    <tag>OZONE, DATANODE</tag>
-    <description>
-      This determines the threshold to be used for closing a container.
-      When the container used percentage reaches this threshold,
-      the container will be closed. Value should be a positive, non-zero
-      percentage in float notation (X.Yf), with 1.0f meaning 100%.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.max</name>
-    <value>100</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the overall connection limit for the connection pool used in
-      RestClient.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.per-route.max</name>
-    <value>20</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the connection limit per one HTTP route/host. Total max
-      connection is limited by ozone.rest.client.http.connection.max property.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.cleanup.service.interval.seconds</name>
-    <value>86400</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      A background job periodically checks open key entries and delete the expired ones. This entry controls the
-      interval of this cleanup check.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.expire.threshold</name>
-    <value>86400</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      Controls how long an open key operation is considered active. Specifically, if a key
-      has been open longer than the value of this config entry, that open key is considered as
-      expired (e.g. due to client crash). Default to 24 hours.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.tags.custom</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
-      CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON</value>
-  </property>
-
-  <property>
-    <name>ozone.tags.system</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
-      CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON</value>
-  </property>
-
-
-  <property>
-    <name>hdds.rest.rest-csrf.enabled</name>
-    <value>false</value>
-    <description>
-      If true, then enables Object Store REST server protection against
-      cross-site request forgery (CSRF).
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.rest.http-address</name>
-    <value>0.0.0.0:9880</value>
-    <description>The http address of Object Store REST server inside the
-      datanode.</description>
-  </property>
-
-
-  <property>
-    <name>hdds.rest.netty.high.watermark</name>
-    <value>65535</value>
-    <description>
-      High watermark configuration to Netty for Object Store REST server.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.rest.netty.low.watermark</name>
-    <value>32768</value>
-    <description>
-      Low watermark configuration to Netty for Object Store REST server.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.plugins</name>
-    <value></value>
-    <description>
-      Comma-separated list of HDDS datanode plug-ins to be activated when
-      HDDS service starts as part of datanode.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.storage.utilization.warning.threshold</name>
-    <value>0.75</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      If a datanode overall storage utilization exceeds more than this
-      value, a warning will be logged while processing the nodeReport in SCM.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.storage.utilization.critical.threshold</name>
-    <value>0.95</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      If a datanode overall storage utilization exceeds more than this
-      value, the datanode will be marked out of space.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.command.status.report.interval</name>
-    <value>30s</value>
-    <tag>OZONE, DATANODE, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send status of commands
-      executed since last report. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>ozone.scm.pipeline.destroy.timeout</name>
-    <value>66s</value>
-    <tag>OZONE, SCM, PIPELINE</tag>
-    <description>
-      Once a pipeline is closed, SCM should wait for the above configured time
-      before destroying a pipeline.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.pipeline.creation.interval</name>
-    <value>120s</value>
-    <tag>OZONE, SCM, PIPELINE</tag>
-    <description>
-      SCM schedules a fixed interval job using the configured interval to
-      create pipelines.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.threshold.pct</name>
-    <value>0.99</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description> % of containers which should have at least one
-      reported replica before SCM comes out of safe mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.wait.time.after.safemode.exit</name>
-    <value>5m</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description> After exiting safemode, wait for configured interval of
-      time to start replication monitor and cleanup activities of unhealthy
-      pipelines.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.enabled</name>
-    <value>true</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>Boolean value to enable or disable SCM safe mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.min.datanode</name>
-    <value>1</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>Minimum DataNodes which should be registered to get SCM out of
-      safe mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.pipeline-availability.check</name>
-    <value>false</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>
-      Boolean value to enable pipeline availability check during SCM safe mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.healthy.pipelie.pct</name>
-    <value>0.10</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>
-      Percentage of healthy pipelines, where all 3 datanodes are reported in the
-      pipeline.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.safemode.atleast.one.node.reported.pipeline.pct</name>
-    <value>0.90</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>
-      Percentage of pipelines, where at least one datanode is reported in the
-      pipeline.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.container.scrub.enabled</name>
-    <value>false</value>
-    <tag>DATANODE</tag>
-    <description>
-      Boolean value to enable data and metadata scrubbing in the containers
-      running on each datanode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.container.action.max.limit</name>
-    <value>20</value>
-    <tag>DATANODE</tag>
-    <description>
-      Maximum number of Container Actions sent by the datanode to SCM in a
-      single heartbeat.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.pipeline.action.max.limit</name>
-    <value>20</value>
-    <tag>DATANODE</tag>
-    <description>
-      Maximum number of Pipeline Actions sent by the datanode to SCM in a
-      single heartbeat.
-    </description>
-  </property>
-  <property>
-    <name>hdds.scm.watcher.timeout</name>
-    <value>10m</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      Timeout for the watchers of the HDDS SCM CommandWatchers. After this
-      duration the Copy/Delete container commands will be sent again to the
-      datanode unless the datanode confirms the completion.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.db.profile</name>
-    <value>DISK</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>This property allows user to pick a configuration
-    that tunes the RocksDB settings for the hardware it is running
-    on. Right now, we have SSD and DISK as profile options.</description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.replication.work.dir</name>
-    <tag>DATANODE</tag>
-    <description>Temporary which is used during the container replication
-      betweeen datanodes. Should have enough space to store multiple container
-      (in compressed format), but doesn't require fast io access such as SSD.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.lock.max.concurrency</name>
-    <value>100</value>
-    <tag>HDDS</tag>
-    <description>Locks in HDDS/Ozone uses object pool to maintain active locks
-      in the system, this property defines the max limit for the locks that
-      will be maintained in the pool.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.authentication.kerberos.principal</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The server principal used by Ozone S3Gateway server. This is
-      typically set to
-      HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix
-      HTTP/ by convention.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.domain.name</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>List of Ozone S3Gateway domain names. If multiple
-      domain names to be provided, they should be a "," seperated.
-      This parameter is only required when virtual host style pattern is
-      followed.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http-address</name>
-    <value>0.0.0.0:9878</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The address and the base port where the Ozone S3Gateway
-      Server will
-      listen on.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The actual address the HTTP server will bind to. If this optional address
-      is set, it overrides only the hostname portion of ozone.s3g.http-address.
-      This is useful for making the Ozone S3Gateway HTTP server listen on all
-      interfaces by setting it to 0.0.0.0.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The boolean which enables the Ozone S3Gateway server
-      .</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.https-address</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>Ozone S3Gateway serverHTTPS server address and port
-      .</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.https-bind-host</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The actual address the HTTPS server will bind to. If this optional address
-      is set, it overrides only the hostname portion of ozone.s3g.https-address.
-      This is useful for making the Ozone S3Gateway HTTPS server listen on all
-      interfaces by setting it to 0.0.0.0.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.keytab.file</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The keytab file used by the S3Gateway server to login as its
-      service principal. </description>
-  </property>
-
-  <property>
-    <name>ozone.om.save.metrics.interval</name>
-    <value>5m</value>
-    <tag>OZONE, OM</tag>
-    <description>Time interval used to store the omMetrics in to a
-      file. Background thread periodically stores the OM metrics in to a
-      file. Unit could be defined with postfix (ns,ms,s,m,h,d)
-    </description>
-  </property>
-  <property>
-    <name>ozone.security.enabled</name>
-    <value>false</value>
-    <tag> OZONE, SECURITY</tag>
-    <description>True if security is enabled for ozone. When this property is
-     true, hadoop.security.authentication should be Kerberos.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.checksum.type</name>
-    <value>CRC32</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] determines
-      which algorithm would be used to compute checksum for chunk data.
-      Default checksum type is SHA256.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.bytes.per.checksum</name>
-    <value>1MB</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>Checksum will be computed for every bytes per checksum number
-      of bytes and stored sequentially. The minimum value for this config is
-      256KB.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.verify.checksum</name>
-    <value>true</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>
-      Ozone client to verify checksum of the checksum blocksize data.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.lock.fair</name>
-    <value>false</value>
-    <description>If this is true, the Ozone Manager lock will be used in Fair
-      mode, which will schedule threads in the order received/queued. If this is
-      false, uses non-fair ordering. See
-      java.util.concurrent.locks.ReentrantReadWriteLock
-      for more information on fair/non-fair locks.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.enable</name>
-    <value>false</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>Property to enable or disable Ratis server on OM.
-    Please note - this is a temporary property to disable OM Ratis server.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.port</name>
-    <value>9872</value>
-    <tag>OZONE, OM, RATIS</tag>
-    <description>
-      The port number of the OzoneManager's Ratis server.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.rpc.type</name>
-    <value>GRPC</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>Ratis supports different kinds of transports like netty, GRPC,
-      Hadoop RPC etc. This picks one of those for this cluster.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.storage.dir</name>
-    <value/>
-    <tag>OZONE, OM, STORAGE, MANAGEMENT, RATIS</tag>
-    <description>This directory is used for storing OM's Ratis metadata like
-      logs. If this is not set then default metadata dirs is used. A warning
-      will be logged if this not set. Ideally, this should be mapped to a
-      fast disk like an SSD.
-      If undefined, OM ratis storage dir will fallback to ozone.metadata.dirs.
-      This fallback approach is not recommended for production environments.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.segment.size</name>
-    <value>16KB</value>
-    <tag>OZONE, OM, RATIS, PERFORMANCE</tag>
-    <description>The size of the raft segment used by Apache Ratis on OM.
-      (16 KB by default)
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.segment.preallocated.size</name>
-    <value>16KB</value>
-    <tag>OZONE, OM, RATIS, PERFORMANCE</tag>
-    <description>The size of the buffer which is preallocated for raft segment
-      used by Apache Ratis on OM.(16 KB by default)
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.log.appender.queue.num-elements</name>
-    <value>1024</value>
-    <tag>OZONE, DEBUG, OM, RATIS</tag>
-    <description>Number of operation pending with Raft's Log Worker.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.ratis.log.appender.queue.byte-limit</name>
-    <value>32MB</value>
-    <tag>OZONE, DEBUG, OM, RATIS</tag>
-    <description>Byte limit for Raft's Log Worker queue.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.ratis.log.purge.gap</name>
-    <value>1000000</value>
-    <tag>OZONE, OM, RATIS</tag>
-    <description>The minimum gap between log indices for Raft server to purge
-      its log segments after taking snapshot.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.snapshot.auto.trigger.threshold</name>
-    <value>400000</value>
-    <tag>OZONE, DEBUG, OM, RATIS</tag>
-    <description>The log index threshold after ratis will auto trigger
-      snapshot on the OM state machine.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.server.request.timeout</name>
-    <value>3s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for OM's ratis server request .</description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.server.retry.cache.timeout</name>
-    <value>600000ms</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>Retry Cache entry timeout for OM's ratis server.</description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.minimum.timeout</name>
-    <value>1s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The minimum timeout duration for OM's Ratis server rpc.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for OM Ratis client request.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.ratis.client.request.max.retries</name>
-    <value>180</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>Number of retries for OM client request.</description>
-  </property>
-  <property>
-    <name>ozone.om.ratis.client.request.retry.interval</name>
-    <value>100ms</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>Interval between successive retries for a OM client request.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.leader.election.minimum.timeout.duration</name>
-    <value>1s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The minimum timeout duration for OM ratis leader election.
-      Default is 1s.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.server.failure.timeout.duration</name>
-    <value>120s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server failure detection,
-      once the threshold has reached, the ratis state machine will be informed
-      about the failure in the ratis ring.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.server.role.check.interval</name>
-    <value>15s</value>
-    <tag>OZONE, OM, RATIS, MANAGEMENT</tag>
-    <description>The interval between OM leader performing a role
-      check on its ratis server. Ratis server informs OM if it
-      loses the leader role. The scheduled check is an secondary
-      check to ensure that the leader role is updated periodically
-      .</description>
-  </property>
-
-  <property>
-    <name>ozone.om.ratis.snapshot.dir</name>
-    <value/>
-    <tag>OZONE, OM, STORAGE, MANAGEMENT, RATIS</tag>
-    <description>This directory is used for storing OM's snapshot
-      related files like the ratisSnapshotIndex and DB checkpoint from leader
-      OM.
-      If undefined, OM snapshot dir will fallback to ozone.om.ratis.storage.dir.
-      This fallback approach is not recommended for production environments.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.snapshot.provider.socket.timeout</name>
-    <value>5000s</value>
-    <tag>OZONE, OM, HA, MANAGEMENT</tag>
-    <description>
-      Socket timeout for HTTP call made by OM Snapshot Provider to request
-      OM snapshot from OM Leader.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.snapshot.provider.connection.timeout</name>
-    <value>5000s</value>
-    <tag>OZONE, OM, HA, MANAGEMENT</tag>
-    <description>
-      Connection timeout for HTTP call made by OM Snapshot Provider to request
-      OM snapshot from OM Leader.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.snapshot.provider.request.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, OM, HA, MANAGEMENT</tag>
-    <description>
-      Connection request timeout for HTTP call made by OM Snapshot Provider to
-      request OM snapshot from OM Leader.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.acl.authorizer.class</name>
-    <value>org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer</value>
-    <tag>OZONE, SECURITY, ACL</tag>
-    <description>Acl authorizer for Ozone.
-    </description>
-  </property>
-  <property>
-    <name>ozone.acl.enabled</name>
-    <value>false</value>
-    <tag>OZONE, SECURITY, ACL</tag>
-    <description>Key to enable/disable ozone acls.</description>
-  </property>
-  <property>
-    <name>hdds.scm.kerberos.keytab.file</name>
-    <value></value>
-    <tag> OZONE, SECURITY</tag>
-    <description> The keytab file used by each SCM daemon to login as its
-      service principal. The principal name is configured with
-      hdds.scm.kerberos.principal.
-    </description>
-  </property>
-  <property>
-    <name>hdds.scm.kerberos.principal</name>
-    <value></value>
-    <tag> OZONE, SECURITY</tag>
-    <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
-  </property>
-
-  <property>
-    <name>ozone.om.kerberos.keytab.file</name>
-    <value></value>
-    <tag> OZONE, SECURITY</tag>
-    <description> The keytab file used by OzoneManager daemon to login as its
-      service principal. The principal name is configured with
-      ozone.om.kerberos.principal.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.kerberos.principal</name>
-    <value></value>
-    <tag> OZONE, SECURITY</tag>
-    <description>The OzoneManager service principal. Ex om/_HOST@REALM.COM</description>
-  </property>
-
-  <property>
-    <name>hdds.scm.http.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-  </property>
-  <property>
-    <name>hdds.scm.http.kerberos.keytab</name>
-    <value>/etc/security/keytabs/HTTP.keytab</value>
-  </property>
-
-  <property>
-    <name>ozone.om.http.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>
-      OzoneManager http server kerberos principal.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http.kerberos.keytab</name>
-    <value>/etc/security/keytabs/HTTP.keytab</value>
-    <description>
-      OzoneManager http server kerberos keytab.
-    </description>
-  </property>
-  <property>
-    <name>hdds.key.len</name>
-    <value>2048</value>
-    <tag>SCM, HDDS, X509, SECURITY</tag>
-    <description>
-      SCM CA key length.  This is an algorithm-specific metric, such as modulus length, specified in number of bits.
-    </description>
-  </property>
-  <property>
-    <name>hdds.key.dir.name</name>
-    <value>keys</value>
-    <tag>SCM, HDDS, X509, SECURITY</tag>
-    <description>
-      Directory to store public/private key for SCM CA. This is relative to ozone/hdds meteadata dir.
-    </description>
-  </property>
-  <property>
-    <name>hdds.block.token.expiry.time</name>
-    <value>1d</value>
-    <tag>OZONE, HDDS, SECURITY, TOKEN</tag>
-    <description>
-      Default value for expiry time of block token. This
-      setting supports multiple time unit suffixes as described in
-      dfs.heartbeat.interval. If no suffix is specified, then milliseconds is
-      assumed.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.block.token.enabled</name>
-    <value>false</value>
-    <tag>OZONE, HDDS, SECURITY, TOKEN</tag>
-    <description>True if block tokens are enabled, else false.</description>
-  </property>
-  <property>
-    <name>hdds.x509.file.name</name>
-    <value>certificate.crt</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>Certificate file name.</description>
-  </property>
-  <property>
-    <name>hdds.grpc.tls.provider</name>
-    <value>OPENSSL</value>
-    <tag>OZONE, HDDS, SECURITY, TLS</tag>
-    <description>HDDS GRPC server TLS provider.</description>
-  </property>
-  <property>
-    <name>hdds.grpc.tls.enabled</name>
-    <value>false</value>
-    <tag>OZONE, HDDS, SECURITY, TLS</tag>
-    <description>If HDDS GRPC server TLS is enabled.</description>
-  </property>
-  <property>
-    <name>hdds.x509.default.duration</name>
-    <value>P365D</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>Default duration for which x509 certificates issued by SCM are
-      valid. The formats accepted are based on the ISO-8601 duration format
-      PnDTnHnMn.nS</description>
-  </property>
-  <property>
-    <name>hdds.x509.dir.name</name>
-    <value>certs</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>X509 certificate directory name.</description>
-  </property>
-  <property>
-    <name>hdds.x509.max.duration</name>
-    <value>P1865D</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>Max time for which certificate issued by SCM CA are valid.
-      . The formats accepted are based on the ISO-8601 duration format
-      PnDTnHnMn.nS</description>
-  </property>
-  <property>
-    <name>hdds.x509.signature.algorithm</name>
-    <value>SHA256withRSA</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>X509 signature certificate.</description>
-  </property>
-  <property>
-    <name>ozone.scm.security.handler.count.key</name>
-    <value>2</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>Threads configured for SCMSecurityProtocolServer.</description>
-  </property>
-  <property>
-    <name>ozone.scm.security.service.address</name>
-    <value/>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>Address of SCMSecurityProtocolServer.</description>
-  </property>
-  <property>
-    <name>ozone.scm.security.service.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>SCM security server host.</description>
-  </property>
-  <property>
-    <name>ozone.scm.security.service.port</name>
-    <value>9961</value>
-    <tag>OZONE, HDDS, SECURITY</tag>
-    <description>SCM security server port.</description>
-  </property>
-
-  <property>
-    <name>hdds.metadata.dir</name>
-    <value/>
-    <tag>X509, SECURITY</tag>
-    <description>
-      Absolute path to HDDS metadata dir.
-    </description>
-  </property>
-  <property>
-    <name>hdds.priv.key.file.name</name>
-    <value>private.pem</value>
-    <tag>X509, SECURITY</tag>
-    <description>
-      Name of file which stores private key generated for SCM CA.
-    </description>
-  </property>
-  <property>
-    <name>hdds.public.key.file.name</name>
-    <value>public.pem</value>
-    <tag>X509, SECURITY</tag>
-    <description>
-      Name of file which stores public key generated for SCM CA.
-    </description>
-  </property>
-  <property>
-    <name>ozone.manager.delegation.remover.scan.interval</name>
-    <value>3600000</value>
-    <description>
-      Time interval after which ozone secret manger scans for expired
-      delegation token.
-    </description>
-  </property>
-  <property>
-    <name>ozone.manager.delegation.token.renew-interval</name>
-    <value>1d</value>
-    <description>
-      Default time interval after which ozone delegation token will
-      require renewal before any further use.
-    </description>
-  </property>
-  <property>
-    <name>ozone.manager.delegation.token.max-lifetime</name>
-    <value>7d</value>
-    <description>
-      Default max time interval after which ozone delegation token will
-      not be renewed.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.fs.isolated-classloader</name>
-    <value></value>
-    <tag>OZONE, OZONEFS</tag>
-    <description>
-      Enable it for older hadoops to separate the classloading of all the
-      Ozone classes. With 'true' value, ozonefs can be used with older
-      hadoop versions as the hadoop3/ozone related classes are loaded by
-      an isolated classloader.
-
-      Default depends from the used jar. true for ozone-filesystem-lib-legacy
-      jar and false for the ozone-filesystem-lib-current.jar
-    </description>
-  </property>
-  <property>
-    <name>ozone.manager.db.checkpoint.transfer.bandwidthPerSec</name>
-    <value>0</value>
-    <tag>OZONE</tag>
-    <description>
-      Maximum bandwidth used for Ozone Manager DB checkpoint download through
-      the servlet.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.freon.http-address</name>
-    <value>0.0.0.0:9884</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the FREON web ui will listen on.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the Freon web server will bind to. If this
-      optional address is set, it overrides only the hostname portion of
-      ozone.freon.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable FREON web ui.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.https-address</name>
-    <value>0.0.0.0:9885</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the Freon web server will listen
-      on using HTTPS.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the Freon web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion of
-      ozone.freon.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.http.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <tag>SECURITY</tag>
-    <description>
-     Security principal used by freon.
-    </description>
-  </property>
-  <property>
-    <name>ozone.freon.http.kerberos.keytab</name>
-    <value>/etc/security/keytabs/HTTP.keytab</value>
-    <tag>SECURITY</tag>
-    <description>
-       Keytab used by Freon.
-    </description>
-  </property>
-  <property>
-    <name>hdds.security.client.datanode.container.protocol.acl</name>
-    <value>*</value>
-    <tag>SECURITY</tag>
-    <description>
-      Comma separated list of users and groups allowed to access
-      client datanode container protocol.
-    </description>
-  </property>
-  <property>
-    <name>hdds.security.client.scm.block.protocol.acl</name>
-    <value>*</value>
-    <tag>SECURITY</tag>
-    <description>
-      Comma separated list of users and groups allowed to access
-      client scm block protocol.
-    </description>
-  </property>
-  <property>
-    <name>hdds.security.client.scm.certificate.protocol.acl</name>
-    <value>*</value>
-    <tag>SECURITY</tag>
-    <description>
-      Comma separated list of users and groups allowed to access
-      client scm certificate protocol.
-    </description>
-  </property>
-  <property>
-    <name>hdds.security.client.scm.container.protocol.acl</name>
-    <value>*</value>
-    <tag>SECURITY</tag>
-    <description>
-      Comma separated list of users and groups allowed to access
-      client scm container protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.security.client.protocol.acl</name>
-    <value>*</value>
-    <tag>SECURITY</tag>
-    <description>
-      Comma separated list of users and groups allowed to access
-      client ozone manager protocol.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.http.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <tag>HDDS, SECURITY, MANAGEMENT</tag>
-    <description>
-      The kerberos principal for the datanode http server.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.http.kerberos.keytab</name>
-    <value>/etc/security/keytabs/HTTP.keytab</value>
-    <tag>HDDS, SECURITY, MANAGEMENT</tag>
-    <description>
-      The kerberos keytab file for datanode http server
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.http-address</name>
-    <value>0.0.0.0:9882</value>
-    <tag>HDDS, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the Datanode web ui will listen on.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>HDDS, MANAGEMENT</tag>
-    <description>
-      The actual address the Datanode web server will bind to. If this
-      optional address is set, it overrides only the hostname portion of
-      hdds.datanode.http-address.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.http.enabled</name>
-    <value>true</value>
-    <tag>HDDS, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable Datanode web ui.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.https-address</name>
-    <value>0.0.0.0:9883</value>
-    <tag>HDDS, MANAGEMENT, SECURITY</tag>
-    <description>
-      The address and the base port where the Datanode web UI will listen
-      on using HTTPS.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>HDDS, MANAGEMENT, SECURITY</tag>
-    <description>
-      The actual address the Datanode web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion of
-      hdds.datanode.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.retry.max.attempts</name>
-    <value>10</value>
-    <description>
-      Max retry attempts for Ozone RpcClient talking to OzoneManagers.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.max.attempts</name>
-    <value>15</value>
-    <description>
-      Expert only. The number of client failover attempts that should be
-      made before the failover is considered failed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.sleep.base.millis</name>
-    <value>500</value>
-    <description>
-      Expert only. The time to wait, in milliseconds, between failover
-      attempts increases exponentially as a function of the number of
-      attempts made so far, with a random factor of +/- 50%. This option
-      specifies the base value used in the failover calculation. The
-      first failover will retry immediately. The 2nd failover attempt
-      will delay at least ozone.client.failover.sleep.base.millis
-      milliseconds. And so on.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.sleep.max.millis</name>
-    <value>15000</value>
-    <description>
-      Expert only. The time to wait, in milliseconds, between failover
-      attempts increases exponentially as a function of the number of
-      attempts made so far, with a random factor of +/- 50%. This option
-      specifies the maximum value to wait between failovers.
-      Specifically, the time between two failover attempts will not
-      exceed +/- 50% of ozone.client.failover.sleep.max.millis
-      milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.http.enabled</name>
-    <value>true</value>
-    <tag>RECON, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable Recon web user interface.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.http-address</name>
-    <value>0.0.0.0:9888</value>
-    <tag>RECON, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the Recon web UI will listen on.
-
-      If the port is 0, then the server will start on a free port. However, it
-      is best to specify a well-known port, so it is easy to connect and see
-      the Recon management UI.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>RECON, MANAGEMENT</tag>
-    <description>
-      The actual address the Recon server will bind to. If this optional
-      the address is set, it overrides only the hostname portion of
-      ozone.recon.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>RECON, MANAGEMENT, SECURITY</tag>
-    <description>
-      The actual address the Recon web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion of
-      ozone.recon.https-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.https-address</name>
-    <value>0.0.0.0:9889</value>
-    <tag>RECON, MANAGEMENT, SECURITY</tag>
-    <description>
-      The address and the base port where the Recon web UI will listen
-      on using HTTPS. If the port is 0 then the server will start on a free
-      port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.keytab.file</name>
-    <value/>
-    <tag>RECON, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in Recon.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.authentication.kerberos.principal</name>
-    <value/>
-    <tag>RECON</tag>
-    <description>The server principal used by Ozone Recon server. This is
-      typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal
-      begins with the prefix HTTP/ by convention.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.container.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>RECON, PERFORMANCE</tag>
-    <description>
-      The size of Recon DB cache in MB that used for caching files.
-      This value is set to an abnormally low value in the default configuration.
-      That is to make unit testing easy. Generally, this value should be set to
-      something like 16GB or more, if you intend to use Recon at scale.
-
-      A large value for this key allows a proportionally larger amount of Recon
-      container DB to be cached in memory. This makes Recon Container-Key
-      operations faster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.db.dir</name>
-    <value/>
-    <tag>OZONE, RECON, STORAGE, PERFORMANCE</tag>
-    <description>
-      Directory where the Recon Server stores its metadata. This should
-      be specified as a single directory. If the directory does not
-      exist then the Recon will attempt to create it.
-
-      If undefined, then the Recon will log a warning and fallback to
-      ozone.metadata.dirs. This fallback approach is not recommended for
-      production environments.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.network.topology.schema.file</name>
-    <value>network-topology-default.xml</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The schema file defines the ozone network topology. We currently support
-      xml(default) and yaml format. Refer to the samples in the topology
-      awareness document for xml and yaml topology definition samples.
-    </description>
-  </property>
-  <property>
-    <name>ozone.network.topology.aware.read</name>
-    <value>false</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Whether to enable topology aware read to improve the read performance.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.container.db.impl</name>
-    <value>RocksDB</value>
-    <tag>OZONE, RECON, STORAGE</tag>
-    <description>
-      Ozone Recon container DB store implementation.Supported value is either
-      LevelDB or RocksDB.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.om.db.dir</name>
-  <value/>
-  <tag>OZONE, RECON, STORAGE</tag>
-  <description>
-    Directory where the Recon Server stores its OM snapshot DB. This should
-    be specified as a single directory. If the directory does not
-    exist then the Recon will attempt to create it.
-
-    If undefined, then the Recon will log a warning and fallback to
-    ozone.metadata.dirs. This fallback approach is not recommended for
-    production environments.
-  </description>
-</property>
-  <property>
-    <name>recon.om.connection.request.timeout</name>
-    <value>5000</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Connection request timeout in milliseconds for HTTP call made by Recon to
-      request OM DB snapshot.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.connection.timeout</name>
-    <value>5s</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Connection timeout for HTTP call in milliseconds made by Recon to request
-      OM snapshot.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.socket.timeout</name>
-    <value>5s</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Socket timeout in milliseconds for HTTP call made by Recon to request
-      OM snapshot.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.snapshot.task.initial.delay</name>
-    <value>1m</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Initial delay in MINUTES by Recon to request OM DB Snapshot.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.snapshot.task.interval.delay</name>
-    <value>10m</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Interval in MINUTES by Recon to request OM DB Snapshot.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.snapshot.task.flush.param</name>
-    <value>false</value>
-    <tag>OZONE, RECON, OM</tag>
-    <description>
-      Request to flush the OM DB before taking checkpoint snapshot.
-    </description>
-  </property>
-  <property>
-    <name>hdds.tracing.enabled</name>
-    <value>true</value>
-    <tag>OZONE, HDDS</tag>
-    <description>
-      If enabled, tracing information is sent to tracing server.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.driver</name>
-    <value>org.sqlite.JDBC</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Database driver class name available on the
-      Ozone Recon classpath.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.jdbc.url</name>
-    <value>jdbc:sqlite:/${ozone.recon.db.dir}/ozone_recon_sqlite.db</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Ozone Recon SQL database jdbc url.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.username</name>
-    <value/>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Ozone Recon SQL database username.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.password</name>
-    <value/>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Ozone Recon database password.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.auto.commit</name>
-    <value>false</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Sets the Ozone Recon database connection property of auto-commit to
-      true/false.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.timeout</name>
-    <value>30000</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Sets time in milliseconds before call to getConnection is timed out.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.max.active</name>
-    <value>1</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      The max active connections to the SQL database. The default SQLite
-      database only allows single active connection, set this to a
-      reasonable value like 10, for external production database.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.max.age</name>
-    <value>1800</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Sets maximum time a connection can be active in seconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.idle.max.age</name>
-    <value>3600</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Sets maximum time to live for idle connection in seconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.idle.test.period</name>
-    <value>60</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      This sets the time (in seconds), for a connection to remain idle before
-      sending a test query to the DB. This is useful to prevent a DB from
-      timing out connections on its end.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.sql.db.conn.idle.test</name>
-    <value>SELECT 1</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      The query to send to the DB to maintain keep-alives and test for dead
-      connections.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.task.thread.count</name>
-    <value>1</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      The number of Recon Tasks that are waiting on updates from OM.
-    </description>
-  </property>
-</configuration>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
deleted file mode 100644
index 7563610..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds;
-
-import java.util.Optional;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Testing HddsUtils.
- */
-public class TestHddsUtils {
-
-  @Test
-  public void testGetHostName() {
-    Assert.assertEquals(Optional.of("localhost"),
-        HddsUtils.getHostName("localhost:1234"));
-
-    Assert.assertEquals(Optional.of("localhost"),
-        HddsUtils.getHostName("localhost"));
-
-    Assert.assertEquals(Optional.empty(),
-        HddsUtils.getHostName(":1234"));
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
deleted file mode 100644
index f18fd5e..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Example configuration to test the configuration injection.
- */
-@ConfigGroup(prefix = "ozone.scm.client")
-public class SimpleConfiguration {
-
-  private String clientAddress;
-
-  private String bindHost;
-
-  private boolean enabled;
-
-  private int port = 1234;
-
-  private long waitTime = 1;
-
-  @Config(key = "address", defaultValue = "localhost", description = "Just "
-      + "for testing", tags = ConfigTag.MANAGEMENT)
-  public void setClientAddress(String clientAddress) {
-    this.clientAddress = clientAddress;
-  }
-
-  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just "
-      + "for testing", tags = ConfigTag.MANAGEMENT)
-  public void setBindHost(String bindHost) {
-    this.bindHost = bindHost;
-  }
-
-  @Config(key = "enabled", defaultValue = "true", description = "Just for "
-      + "testing", tags = ConfigTag.MANAGEMENT)
-  public void setEnabled(boolean enabled) {
-    this.enabled = enabled;
-  }
-
-  @Config(key = "port", defaultValue = "9878", description = "Just for "
-      + "testing", tags = ConfigTag.MANAGEMENT)
-  public void setPort(int port) {
-    this.port = port;
-  }
-
-  @Config(key = "wait", type = ConfigType.TIME, timeUnit =
-      TimeUnit.SECONDS, defaultValue = "10m", description = "Just for "
-      + "testing", tags = ConfigTag.MANAGEMENT)
-  public void setWaitTime(long waitTime) {
-    this.waitTime = waitTime;
-  }
-
-  public String getClientAddress() {
-    return clientAddress;
-  }
-
-  public String getBindHost() {
-    return bindHost;
-  }
-
-  public boolean isEnabled() {
-    return enabled;
-  }
-
-  public int getPort() {
-    return port;
-  }
-
-  public long getWaitTime() {
-    return waitTime;
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
deleted file mode 100644
index 0a80478..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.junit.Rule;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.Assert;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test class for OzoneConfiguration.
- */
-public class TestOzoneConfiguration {
-
-  private Configuration conf;
-
-  @Rule
-  public TemporaryFolder tempConfigs = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-  }
-
-  private void startConfig(BufferedWriter out) throws IOException {
-    out.write("<?xml version=\"1.0\"?>\n");
-    out.write("<configuration>\n");
-  }
-
-  private void endConfig(BufferedWriter out) throws IOException {
-    out.write("</configuration>\n");
-    out.flush();
-    out.close();
-  }
-
-  @Test
-  public void testGetAllPropertiesByTags() throws Exception {
-    File coreDefault = tempConfigs.newFile("core-default-test.xml");
-    File coreSite = tempConfigs.newFile("core-site-test.xml");
-    try (BufferedWriter out = new BufferedWriter(new FileWriter(coreDefault))) {
-      startConfig(out);
-      appendProperty(out, "hadoop.tags.system", "YARN,HDFS,NAMENODE");
-      appendProperty(out, "hadoop.tags.custom", "MYCUSTOMTAG");
-      appendPropertyByTag(out, "dfs.cblock.trace.io", "false", "YARN");
-      appendPropertyByTag(out, "dfs.replication", "1", "HDFS");
-      appendPropertyByTag(out, "dfs.namenode.logging.level", "INFO",
-          "NAMENODE");
-      appendPropertyByTag(out, "dfs.random.key", "XYZ", "MYCUSTOMTAG");
-      endConfig(out);
-
-      Path fileResource = new Path(coreDefault.getAbsolutePath());
-      conf.addResource(fileResource);
-      Assert.assertEquals(conf.getAllPropertiesByTag("MYCUSTOMTAG")
-          .getProperty("dfs.random.key"), "XYZ");
-    }
-
-    try (BufferedWriter out = new BufferedWriter(new FileWriter(coreSite))) {
-      startConfig(out);
-      appendProperty(out, "dfs.random.key", "ABC");
-      appendProperty(out, "dfs.replication", "3");
-      appendProperty(out, "dfs.cblock.trace.io", "true");
-      endConfig(out);
-
-      Path fileResource = new Path(coreSite.getAbsolutePath());
-      conf.addResource(fileResource);
-    }
-
-    // Test if values are getting overridden even without tags being present
-    Assert.assertEquals("3", conf.getAllPropertiesByTag("HDFS")
-        .getProperty("dfs.replication"));
-    Assert.assertEquals("ABC", conf.getAllPropertiesByTag("MYCUSTOMTAG")
-        .getProperty("dfs.random.key"));
-    Assert.assertEquals("true", conf.getAllPropertiesByTag("YARN")
-        .getProperty("dfs.cblock.trace.io"));
-  }
-
-  @Test
-  public void getConfigurationObject() {
-    OzoneConfiguration ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.set("ozone.scm.client.address", "address");
-    ozoneConfig.set("ozone.scm.client.bind.host", "host");
-    ozoneConfig.setBoolean("ozone.scm.client.enabled", true);
-    ozoneConfig.setInt("ozone.scm.client.port", 5555);
-    ozoneConfig.setTimeDuration("ozone.scm.client.wait", 10, TimeUnit.MINUTES);
-
-    SimpleConfiguration configuration =
-        ozoneConfig.getObject(SimpleConfiguration.class);
-
-    Assert.assertEquals("host", configuration.getBindHost());
-    Assert.assertEquals("address", configuration.getClientAddress());
-    Assert.assertEquals(true, configuration.isEnabled());
-    Assert.assertEquals(5555, configuration.getPort());
-    Assert.assertEquals(600, configuration.getWaitTime());
-  }
-
-  @Test
-  public void getConfigurationObjectWithDefault() {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-
-    SimpleConfiguration configuration =
-        ozoneConfiguration.getObject(SimpleConfiguration.class);
-
-    Assert.assertEquals(true, configuration.isEnabled());
-    Assert.assertEquals(9878, configuration.getPort());
-  }
-
-
-  private void appendProperty(BufferedWriter out, String name, String val)
-      throws IOException {
-    this.appendProperty(out, name, val, false);
-  }
-
-  private void appendProperty(BufferedWriter out, String name, String val,
-                              boolean isFinal) throws IOException {
-    out.write("<property>");
-    out.write("<name>");
-    out.write(name);
-    out.write("</name>");
-    out.write("<value>");
-    out.write(val);
-    out.write("</value>");
-    if (isFinal) {
-      out.write("<final>true</final>");
-    }
-    out.write("</property>\n");
-  }
-
-  private void appendPropertyByTag(BufferedWriter out, String name, String val,
-                                   String tags) throws IOException {
-    this.appendPropertyByTag(out, name, val, false, tags);
-  }
-
-  private void appendPropertyByTag(BufferedWriter out, String name, String val,
-                                   boolean isFinal,
-                                   String tag) throws IOException {
-    out.write("<property>");
-    out.write("<name>");
-    out.write(name);
-    out.write("</name>");
-    out.write("<value>");
-    out.write(val);
-    out.write("</value>");
-    if (isFinal) {
-      out.write("<final>true</final>");
-    }
-    out.write("<tag>");
-    out.write(tag);
-    out.write("</tag>");
-    out.write("</property>\n");
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index e72c902..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains the OzoneConfiguration related tests.
- */
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
deleted file mode 100644
index bbe6ab7..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.ratis;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Random;
-import java.util.UUID;
-import java.util.function.BiFunction;
-
-/** Testing {@link ContainerCommandRequestMessage}. */
-public class TestContainerCommandRequestMessage {
-  static final Random RANDOM = new Random();
-
-  static ByteString newData(int length, Random random) {
-    final ByteString.Output out = ByteString.newOutput();
-    for(int i = 0; i < length; i++) {
-      out.write(random.nextInt());
-    }
-    return out.toByteString();
-  }
-
-  static ChecksumData checksum(ByteString data) {
-    try {
-      return new Checksum().computeChecksum(data.toByteArray());
-    } catch (OzoneChecksumException e) {
-      throw new IllegalStateException(e);
-    }
-  }
-
-  static ContainerCommandRequestProto newPutSmallFile(
-      BlockID blockID, ByteString data) {
-    final BlockData.Builder blockData
-        = BlockData.newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    final PutBlockRequestProto.Builder putBlockRequest
-        = PutBlockRequestProto.newBuilder()
-        .setBlockData(blockData);
-    final KeyValue keyValue = KeyValue.newBuilder()
-        .setKey("OverWriteRequested")
-        .setValue("true")
-        .build();
-    final ChunkInfo chunk = ChunkInfo.newBuilder()
-        .setChunkName(blockID.getLocalID() + "_chunk")
-        .setOffset(0)
-        .setLen(data.size())
-        .addMetadata(keyValue)
-        .setChecksumData(checksum(data).getProtoBufMessage())
-        .build();
-    final PutSmallFileRequestProto putSmallFileRequest
-        = PutSmallFileRequestProto.newBuilder()
-        .setChunkInfo(chunk)
-        .setBlock(putBlockRequest)
-        .setData(data)
-        .build();
-    return ContainerCommandRequestProto.newBuilder()
-        .setCmdType(Type.PutSmallFile)
-        .setContainerID(blockID.getContainerID())
-        .setDatanodeUuid(UUID.randomUUID().toString())
-        .setPutSmallFile(putSmallFileRequest)
-        .build();
-  }
-
-  static ContainerCommandRequestProto newWriteChunk(
-      BlockID blockID, ByteString data) {
-    final ChunkInfo chunk = ChunkInfo.newBuilder()
-        .setChunkName(blockID.getLocalID() + "_chunk_" + 1)
-        .setOffset(0)
-        .setLen(data.size())
-        .setChecksumData(checksum(data).getProtoBufMessage())
-        .build();
-
-    final WriteChunkRequestProto.Builder writeChunkRequest
-        = WriteChunkRequestProto.newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-    return ContainerCommandRequestProto.newBuilder()
-        .setCmdType(Type.WriteChunk)
-        .setContainerID(blockID.getContainerID())
-        .setDatanodeUuid(UUID.randomUUID().toString())
-        .setWriteChunk(writeChunkRequest)
-        .build();
-  }
-
-  @Test
-  public void testPutSmallFile() throws Exception {
-    runTest(TestContainerCommandRequestMessage::newPutSmallFile);
-  }
-
-  @Test
-  public void testWriteChunk() throws Exception {
-    runTest(TestContainerCommandRequestMessage::newWriteChunk);
-  }
-
-  static void runTest(
-      BiFunction<BlockID, ByteString, ContainerCommandRequestProto> method)
-      throws Exception {
-    for(int i = 0; i < 2; i++) {
-      runTest(i, method);
-    }
-    for(int i = 2; i < 1 << 10;) {
-      runTest(i + 1 + RANDOM.nextInt(i - 1), method);
-      i <<= 1;
-      runTest(i, method);
-    }
-  }
-
-  static void runTest(int length,
-      BiFunction<BlockID, ByteString, ContainerCommandRequestProto> method)
-      throws Exception {
-    System.out.println("length=" + length);
-    final BlockID blockID = new BlockID(RANDOM.nextLong(), RANDOM.nextLong());
-    final ByteString data = newData(length, RANDOM);
-
-    final ContainerCommandRequestProto original = method.apply(blockID, data);
-    final ContainerCommandRequestMessage message
-        = ContainerCommandRequestMessage.toMessage(original, null);
-    final ContainerCommandRequestProto computed
-        = ContainerCommandRequestMessage.toProto(message.getContent(), null);
-    Assert.assertEquals(original, computed);
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java
deleted file mode 100644
index b5b4684..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.exceptions;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.protocol.proto.
-    ScmBlockLocationProtocolProtos.Status;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test Result code mappping between SCMException and the protobuf definitions.
- */
-public class TestSCMExceptionResultCodes {
-
-  @Test
-  public void codeMapping() {
-    // ResultCode = SCMException definition
-    // Status = protobuf definition
-    Assert.assertEquals(ResultCodes.values().length, Status.values().length);
-    for (int i = 0; i < ResultCodes.values().length; i++) {
-      ResultCodes codeValue = ResultCodes.values()[i];
-      Status protoBufValue = Status.values()[i];
-      Assert.assertTrue(String
-          .format("Protobuf/Enum constant name mismatch %s %s", codeValue,
-              protoBufValue), sameName(codeValue.name(), protoBufValue.name()));
-      ResultCodes converted = ResultCodes.values()[protoBufValue.ordinal()];
-      Assert.assertEquals(codeValue, converted);
-    }
-  }
-
-  private boolean sameName(String codeValue, String protoBufValue) {
-    return codeValue.equals(protoBufValue);
-  }
-
-}
-
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
deleted file mode 100644
index b31e4a8..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ /dev/null
@@ -1,953 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
-
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.stream.Collectors;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.junit.runner.RunWith;
-
-/** Test the network topology functions. */
-@RunWith(Parameterized.class)
-public class TestNetworkTopologyImpl {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestNetworkTopologyImpl.class);
-  private NetworkTopology cluster;
-  private Node[] dataNodes;
-  private Random random = new Random();
-
-  public TestNetworkTopologyImpl(NodeSchema[] schemas, Node[] nodeArray) {
-    NodeSchemaManager.getInstance().init(schemas, true);
-    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
-    dataNodes = nodeArray;
-    for (int i = 0; i < dataNodes.length; i++) {
-      cluster.add(dataNodes[i]);
-    }
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(3000000);
-
-  @Parameters
-  public static Collection<Object[]> setupDatanodes() {
-    Object[][] topologies = new Object[][]{
-        {new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA},
-            new Node[]{
-                createDatanode("1.1.1.1", "/"),
-                createDatanode("2.2.2.2", "/"),
-                createDatanode("3.3.3.3", "/"),
-                createDatanode("4.4.4.4", "/"),
-                createDatanode("5.5.5.5", "/"),
-                createDatanode("6.6.6.6", "/"),
-                createDatanode("7.7.7.7", "/"),
-                createDatanode("8.8.8.8", "/"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA},
-            new Node[]{
-                createDatanode("1.1.1.1", "/r1"),
-                createDatanode("2.2.2.2", "/r1"),
-                createDatanode("3.3.3.3", "/r2"),
-                createDatanode("4.4.4.4", "/r2"),
-                createDatanode("5.5.5.5", "/r2"),
-                createDatanode("6.6.6.6", "/r3"),
-                createDatanode("7.7.7.7", "/r3"),
-                createDatanode("8.8.8.8", "/r3"),
-            }},
-        {new NodeSchema[]
-            {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA},
-            new Node[]{
-                createDatanode("1.1.1.1", "/d1/r1"),
-                createDatanode("2.2.2.2", "/d1/r1"),
-                createDatanode("3.3.3.3", "/d1/r2"),
-                createDatanode("4.4.4.4", "/d1/r2"),
-                createDatanode("5.5.5.5", "/d1/r2"),
-                createDatanode("6.6.6.6", "/d2/r3"),
-                createDatanode("7.7.7.7", "/d2/r3"),
-                createDatanode("8.8.8.8", "/d2/r3"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA,
-            NODEGROUP_SCHEMA, LEAF_SCHEMA},
-            new Node[]{
-                createDatanode("1.1.1.1", "/d1/r1/ng1"),
-                createDatanode("2.2.2.2", "/d1/r1/ng1"),
-                createDatanode("3.3.3.3", "/d1/r2/ng2"),
-                createDatanode("4.4.4.4", "/d1/r2/ng2"),
-                createDatanode("5.5.5.5", "/d1/r2/ng3"),
-                createDatanode("6.6.6.6", "/d2/r3/ng3"),
-                createDatanode("7.7.7.7", "/d2/r3/ng3"),
-                createDatanode("8.8.8.8", "/d2/r3/ng3"),
-                createDatanode("9.9.9.9", "/d3/r1/ng1"),
-                createDatanode("10.10.10.10", "/d3/r1/ng1"),
-                createDatanode("11.11.11.11", "/d3/r1/ng1"),
-                createDatanode("12.12.12.12", "/d3/r2/ng2"),
-                createDatanode("13.13.13.13", "/d3/r2/ng2"),
-                createDatanode("14.14.14.14", "/d4/r1/ng1"),
-                createDatanode("15.15.15.15", "/d4/r1/ng1"),
-                createDatanode("16.16.16.16", "/d4/r1/ng1"),
-                createDatanode("17.17.17.17", "/d4/r1/ng2"),
-                createDatanode("18.18.18.18", "/d4/r1/ng2"),
-                createDatanode("19.19.19.19", "/d4/r1/ng3"),
-                createDatanode("20.20.20.20", "/d4/r1/ng3"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, REGION_SCHEMA, DATACENTER_SCHEMA,
-            RACK_SCHEMA, NODEGROUP_SCHEMA, LEAF_SCHEMA},
-            new Node[]{
-                createDatanode("1.1.1.1", "/d1/rg1/r1/ng1"),
-                createDatanode("2.2.2.2", "/d1/rg1/r1/ng1"),
-                createDatanode("3.3.3.3", "/d1/rg1/r1/ng2"),
-                createDatanode("4.4.4.4", "/d1/rg1/r1/ng1"),
-                createDatanode("5.5.5.5", "/d1/rg1/r1/ng1"),
-                createDatanode("6.6.6.6", "/d1/rg1/r1/ng2"),
-                createDatanode("7.7.7.7", "/d1/rg1/r1/ng2"),
-                createDatanode("8.8.8.8", "/d1/rg1/r1/ng2"),
-                createDatanode("9.9.9.9", "/d1/rg1/r1/ng2"),
-                createDatanode("10.10.10.10", "/d1/rg1/r1/ng2"),
-                createDatanode("11.11.11.11", "/d1/rg1/r2/ng1"),
-                createDatanode("12.12.12.12", "/d1/rg1/r2/ng1"),
-                createDatanode("13.13.13.13", "/d1/rg1/r2/ng1"),
-                createDatanode("14.14.14.14", "/d1/rg1/r2/ng1"),
-                createDatanode("15.15.15.15", "/d1/rg1/r2/ng1"),
-                createDatanode("16.16.16.16", "/d1/rg1/r2/ng2"),
-                createDatanode("17.17.17.17", "/d1/rg1/r2/ng2"),
-                createDatanode("18.18.18.18", "/d1/rg1/r2/ng2"),
-                createDatanode("19.19.19.19", "/d1/rg1/r2/ng2"),
-                createDatanode("20.20.20.20", "/d1/rg1/r2/ng2"),
-                createDatanode("21.21.21.21", "/d2/rg1/r2/ng1"),
-                createDatanode("22.22.22.22", "/d2/rg1/r2/ng1"),
-                createDatanode("23.23.23.23", "/d2/rg2/r2/ng1"),
-                createDatanode("24.24.24.24", "/d2/rg2/r2/ng1"),
-                createDatanode("25.25.25.25", "/d2/rg2/r2/ng1"),
-            }}
-    };
-    return Arrays.asList(topologies);
-  }
-
-  @Test
-  public void testContains() {
-    Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4");
-    for (int i=0; i < dataNodes.length; i++) {
-      assertTrue(cluster.contains(dataNodes[i]));
-    }
-    assertFalse(cluster.contains(nodeNotInMap));
-  }
-
-  @Test
-  public void testNumOfChildren() {
-    assertEquals(dataNodes.length, cluster.getNumOfLeafNode(null));
-    assertEquals(0, cluster.getNumOfLeafNode("/switch1/node1"));
-  }
-
-  @Test
-  public void testGetNode() {
-    assertEquals(cluster.getNode(""), cluster.getNode(null));
-    assertEquals(cluster.getNode(""), cluster.getNode("/"));
-    assertEquals(null, cluster.getNode("/switch1/node1"));
-    assertEquals(null, cluster.getNode("/switch1"));
-  }
-
-  @Test
-  public void testCreateInvalidTopology() {
-    List<NodeSchema> schemas = new ArrayList<NodeSchema>();
-    schemas.add(ROOT_SCHEMA);
-    schemas.add(RACK_SCHEMA);
-    schemas.add(LEAF_SCHEMA);
-    NodeSchemaManager.getInstance().init(schemas.toArray(new NodeSchema[0]),
-        true);
-    NetworkTopology newCluster = new NetworkTopologyImpl(
-        NodeSchemaManager.getInstance());
-    Node[] invalidDataNodes = new Node[] {
-        createDatanode("1.1.1.1", "/r1"),
-        createDatanode("2.2.2.2", "/r2"),
-        createDatanode("3.3.3.3", "/d1/r2")
-    };
-    newCluster.add(invalidDataNodes[0]);
-    newCluster.add(invalidDataNodes[1]);
-    try {
-      newCluster.add(invalidDataNodes[2]);
-      fail("expected InvalidTopologyException");
-    } catch (NetworkTopology.InvalidTopologyException e) {
-      assertTrue(e.getMessage().contains("Failed to add"));
-      assertTrue(e.getMessage().contains("Its path depth is not " +
-          newCluster.getMaxLevel()));
-    }
-  }
-
-  @Test
-  public void testInitWithConfigFile() {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    Configuration conf = new Configuration();
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/good.xml").getPath();
-      conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
-      NetworkTopology newCluster = new NetworkTopologyImpl(conf);
-      LOG.info("network topology max level = " + newCluster.getMaxLevel());
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
-  }
-
-  @Test
-  public void testAncestor() {
-    assumeTrue(cluster.getMaxLevel() > 2);
-    int maxLevel = cluster.getMaxLevel();
-    assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1]));
-    while(maxLevel > 1) {
-      assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1],
-          maxLevel - 1));
-      maxLevel--;
-    }
-    assertFalse(cluster.isSameParent(dataNodes[1], dataNodes[2]));
-    assertFalse(cluster.isSameParent(null, dataNodes[2]));
-    assertFalse(cluster.isSameParent(dataNodes[1], null));
-    assertFalse(cluster.isSameParent(null, null));
-
-    assertFalse(cluster.isSameAncestor(dataNodes[1], dataNodes[2], 0));
-    assertFalse(cluster.isSameAncestor(dataNodes[1], null, 1));
-    assertFalse(cluster.isSameAncestor(null, dataNodes[2], 1));
-    assertFalse(cluster.isSameAncestor(null, null, 1));
-
-    maxLevel = cluster.getMaxLevel();
-    assertTrue(cluster.isSameAncestor(
-        dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))],
-        dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))],
-        maxLevel - 1));
-  }
-
-  @Test
-  public void testAddRemove() {
-    for(int i = 0; i < dataNodes.length; i++) {
-      cluster.remove(dataNodes[i]);
-    }
-    for(int i = 0; i < dataNodes.length; i++) {
-      assertFalse(cluster.contains(dataNodes[i]));
-    }
-    // no leaf nodes
-    assertEquals(0, cluster.getNumOfLeafNode(null));
-    // no inner nodes
-    assertEquals(0, cluster.getNumOfNodes(2));
-    for(int i = 0; i < dataNodes.length; i++) {
-      cluster.add(dataNodes[i]);
-    }
-    // Inner nodes are created automatically
-    assertTrue(cluster.getNumOfNodes(2) > 0);
-
-    try {
-      cluster.add(cluster.chooseRandom(null).getParent());
-      fail("Inner node can not be added manually");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith(
-          "Not allowed to add an inner node"));
-    }
-
-    try {
-      cluster.remove(cluster.chooseRandom(null).getParent());
-      fail("Inner node can not be removed manually");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith(
-          "Not allowed to remove an inner node"));
-    }
-  }
-
-  @Test
-  public void testGetNodesWithLevel() {
-    int maxLevel = cluster.getMaxLevel();
-    try {
-      assertEquals(1, cluster.getNumOfNodes(0));
-      fail("level 0 is not supported");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
-
-    try {
-      assertEquals(1, cluster.getNumOfNodes(0));
-      fail("level 0 is not supported");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
-
-    try {
-      assertEquals(1, cluster.getNumOfNodes(maxLevel + 1));
-      fail("level out of scope");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
-
-    try {
-      assertEquals(1, cluster.getNumOfNodes(maxLevel + 1));
-      fail("level out of scope");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
-    // root node
-    assertEquals(1, cluster.getNumOfNodes(1));
-    assertEquals(1, cluster.getNumOfNodes(1));
-    // leaf nodes
-    assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel));
-    assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel));
-  }
-
-  @Test
-  public void testChooseRandomSimple() {
-    String path =
-        dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath();
-    assertEquals(path, cluster.chooseRandom(path).getNetworkFullPath());
-    path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
-    // test chooseRandom(String scope)
-    while (!path.equals(ROOT)) {
-      assertTrue(cluster.chooseRandom(path).getNetworkLocation()
-          .startsWith(path));
-      Node node = cluster.chooseRandom("~" + path);
-      assertTrue(!node.getNetworkLocation()
-          .startsWith(path));
-      path = path.substring(0,
-          path.lastIndexOf(PATH_SEPARATOR_STR));
-    }
-    assertNotNull(cluster.chooseRandom(null));
-    assertNotNull(cluster.chooseRandom(""));
-    assertNotNull(cluster.chooseRandom("/"));
-    assertNull(cluster.chooseRandom("~"));
-    assertNull(cluster.chooseRandom("~/"));
-
-    // test chooseRandom(String scope, String excludedScope)
-    path = dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath();
-    List<String> pathList = new ArrayList<>();
-    pathList.add(path);
-    assertNull(cluster.chooseRandom(path, pathList));
-    assertNotNull(cluster.chooseRandom(null, pathList));
-    assertNotNull(cluster.chooseRandom("", pathList));
-
-    // test chooseRandom(String scope, Collection<Node> excludedNodes)
-    assertNull(cluster.chooseRandom("", Arrays.asList(dataNodes)));
-    assertNull(cluster.chooseRandom("/", Arrays.asList(dataNodes)));
-    assertNull(cluster.chooseRandom("~", Arrays.asList(dataNodes)));
-    assertNull(cluster.chooseRandom("~/", Arrays.asList(dataNodes)));
-    assertNull(cluster.chooseRandom(null, Arrays.asList(dataNodes)));
-  }
-
-  /**
-   * Following test checks that chooseRandom works for an excluded scope.
-   */
-  @Test
-  public void testChooseRandomExcludedScope() {
-    int[] excludedNodeIndexs = {0, dataNodes.length - 1,
-        random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
-    String scope;
-    Map<Node, Integer> frequency;
-    for (int i : excludedNodeIndexs) {
-      String path = dataNodes[i].getNetworkFullPath();
-      while (!path.equals(ROOT)) {
-        scope = "~" + path;
-        frequency = pickNodesAtRandom(100, scope, null, 0);
-        for (Node key : dataNodes) {
-          if (key.getNetworkFullPath().startsWith(path)) {
-            assertTrue(frequency.get(key) == 0);
-          }
-        }
-        path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
-      }
-    }
-
-    // null excludedScope, every node should be chosen
-    frequency = pickNodes(100, null, null, null, 0);
-    for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) != 0);
-    }
-
-    // "" excludedScope,  no node will ever be chosen
-    List<String> pathList = new ArrayList();
-    pathList.add("");
-    frequency = pickNodes(100, pathList, null, null, 0);
-    for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) == 0);
-    }
-
-    // "~" scope, no node will ever be chosen
-    scope = "~";
-    frequency = pickNodesAtRandom(100, scope, null, 0);
-    for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) == 0);
-    }
-    // out network topology excluded scope, every node should be chosen
-    pathList.clear();
-    pathList.add("/city1");
-    frequency = pickNodes(
-        cluster.getNumOfLeafNode(null), pathList, null, null, 0);
-    for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) != 0);
-    }
-  }
-
-  /**
-   * Following test checks that chooseRandom works for an excluded nodes.
-   */
-  @Test
-  public void testChooseRandomExcludedNode() {
-    Node[][] excludedNodeLists = {
-        {},
-        {dataNodes[0]},
-        {dataNodes[dataNodes.length - 1]},
-        {dataNodes[random.nextInt(dataNodes.length)]},
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)]
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        }};
-    int leafNum = cluster.getNumOfLeafNode(null);
-    Map<Node, Integer> frequency;
-    for(Node[] list : excludedNodeLists) {
-      List<Node> excludedList = Arrays.asList(list);
-      int ancestorGen = 0;
-      while(ancestorGen < cluster.getMaxLevel()) {
-        frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
-        List<Node> ancestorList = NetUtils.getAncestorList(cluster,
-            excludedList, ancestorGen);
-        for (Node key : dataNodes) {
-          if (excludedList.contains(key) ||
-              (ancestorList.size() > 0 &&
-                  ancestorList.stream()
-                      .map(a -> (InnerNode) a)
-                      .filter(a -> a.isAncestor(key))
-                      .collect(Collectors.toList()).size() > 0)) {
-            assertTrue(frequency.get(key) == 0);
-          }
-        }
-        ancestorGen++;
-      }
-    }
-    // all nodes excluded, no node will be picked
-    List<Node> excludedList = Arrays.asList(dataNodes);
-    int ancestorGen = 0;
-    while(ancestorGen < cluster.getMaxLevel()) {
-      frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
-      for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) == 0);
-      }
-      ancestorGen++;
-    }
-    // out scope excluded nodes, each node will be picked
-    excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1"));
-    ancestorGen = 0;
-    while(ancestorGen < cluster.getMaxLevel()) {
-      frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen);
-      for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) != 0);
-      }
-      ancestorGen++;
-    }
-  }
-
-  /**
-   * Following test checks that chooseRandom works for excluded nodes and scope.
-   */
-  @Test
-  public void testChooseRandomExcludedNodeAndScope() {
-    int[] excludedNodeIndexs = {0, dataNodes.length - 1,
-        random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
-    Node[][] excludedNodeLists = {
-        {},
-        {dataNodes[0]},
-        {dataNodes[dataNodes.length - 1]},
-        {dataNodes[random.nextInt(dataNodes.length)]},
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)]
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        }};
-    int leafNum = cluster.getNumOfLeafNode(null);
-    Map<Node, Integer> frequency;
-    String scope;
-    for (int i : excludedNodeIndexs) {
-      String path = dataNodes[i].getNetworkFullPath();
-      while (!path.equals(ROOT)) {
-        scope = "~" + path;
-        int ancestorGen = 0;
-        while(ancestorGen < cluster.getMaxLevel()) {
-          for (Node[] list : excludedNodeLists) {
-            List<Node> excludedList = Arrays.asList(list);
-            frequency =
-                pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen);
-            List<Node> ancestorList = NetUtils.getAncestorList(cluster,
-                excludedList, ancestorGen);
-            for (Node key : dataNodes) {
-              if (excludedList.contains(key) ||
-                  key.getNetworkFullPath().startsWith(path) ||
-                  (ancestorList.size() > 0 &&
-                      ancestorList.stream()
-                          .map(a -> (InnerNode) a)
-                          .filter(a -> a.isAncestor(key))
-                          .collect(Collectors.toList()).size() > 0)) {
-                assertTrue(frequency.get(key) == 0);
-              }
-            }
-          }
-          ancestorGen++;
-        }
-        path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
-      }
-    }
-    // all nodes excluded, no node will be picked
-    List<Node> excludedList = Arrays.asList(dataNodes);
-    for (int i : excludedNodeIndexs) {
-      String path = dataNodes[i].getNetworkFullPath();
-      while (!path.equals(ROOT)) {
-        scope = "~" + path;
-        int ancestorGen = 0;
-        while (ancestorGen < cluster.getMaxLevel()) {
-          frequency =
-              pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen);
-          for (Node key : dataNodes) {
-            assertTrue(frequency.get(key) == 0);
-          }
-          ancestorGen++;
-        }
-        path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
-      }
-    }
-
-    // no node excluded and no excluded scope, each node will be picked
-    int ancestorGen = 0;
-    while (ancestorGen < cluster.getMaxLevel()) {
-      frequency = pickNodes(leafNum, null, null, null, ancestorGen);
-      for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) != 0);
-      }
-      ancestorGen++;
-    }
-  }
-
-  /**
-   * Following test checks that chooseRandom works for excluded nodes, scope
-   * and ancestor generation.
-   */
-  @Test
-  public void testChooseRandomWithAffinityNode() {
-    int[] excludedNodeIndexs = {0, dataNodes.length - 1,
-        random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
-    Node[][] excludedNodeLists = {
-        {},
-        {dataNodes[0]},
-        {dataNodes[dataNodes.length - 1]},
-        {dataNodes[random.nextInt(dataNodes.length)]},
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)]
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        }};
-    int[] affinityNodeIndexs = {0, dataNodes.length - 1,
-        random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
-    Node[][] excludedScopeIndexs = {{dataNodes[0]},
-        {dataNodes[dataNodes.length - 1]},
-        {dataNodes[random.nextInt(dataNodes.length)]},
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)]
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        }};
-    int leafNum = cluster.getNumOfLeafNode(null);
-    Map<Node, Integer> frequency;
-    List<String> pathList = new ArrayList<>();
-    for (int k : affinityNodeIndexs) {
-      for (Node[] excludedScopes : excludedScopeIndexs) {
-        pathList.clear();
-        pathList.addAll(Arrays.stream(excludedScopes)
-            .map(node -> node.getNetworkFullPath())
-            .collect(Collectors.toList()));
-        while (!pathList.get(0).equals(ROOT)) {
-          int ancestorGen = cluster.getMaxLevel() - 1;
-          while (ancestorGen > 0) {
-            for (Node[] list : excludedNodeLists) {
-              List<Node> excludedList = Arrays.asList(list);
-              frequency = pickNodes(leafNum, pathList, excludedList,
-                  dataNodes[k], ancestorGen);
-              Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen);
-              for (Node key : dataNodes) {
-                if (affinityAncestor != null) {
-                  if (frequency.get(key) > 0) {
-                    assertTrue(affinityAncestor.isAncestor(key));
-                  } else if (!affinityAncestor.isAncestor(key)) {
-                    continue;
-                  } else if (excludedList != null &&
-                      excludedList.contains(key)) {
-                    continue;
-                  } else if (pathList != null &&
-                      pathList.stream().anyMatch(path ->
-                          key.getNetworkFullPath().startsWith(path))) {
-                    continue;
-                  } else {
-                    fail("Node is not picked when sequentially going " +
-                        "through ancestor node's leaf nodes. node:" +
-                        key.getNetworkFullPath() + ", ancestor node:" +
-                        affinityAncestor.getNetworkFullPath() +
-                        ", excludedScope: " + pathList.toString() + ", " +
-                        "excludedList:" + (excludedList == null ? "" :
-                        excludedList.toString()));
-                  }
-                }
-              }
-            }
-            ancestorGen--;
-          }
-          pathList = pathList.stream().map(path ->
-              path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)))
-              .collect(Collectors.toList());
-        }
-      }
-    }
-
-    // all nodes excluded, no node will be picked
-    String scope;
-    List<Node> excludedList = Arrays.asList(dataNodes);
-    for (int k : affinityNodeIndexs) {
-      for (int i : excludedNodeIndexs) {
-        String path = dataNodes[i].getNetworkFullPath();
-        while (!path.equals(ROOT)) {
-          scope = "~" + path;
-          int ancestorGen = 0;
-          while (ancestorGen < cluster.getMaxLevel()) {
-            frequency = pickNodesAtRandom(leafNum, scope, excludedList,
-                dataNodes[k], ancestorGen);
-            for (Node key : dataNodes) {
-              assertTrue(frequency.get(key) == 0);
-            }
-            ancestorGen++;
-          }
-          path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
-        }
-      }
-    }
-    // no node excluded and no excluded scope, each node will be picked
-    int ancestorGen = cluster.getMaxLevel() - 1;
-    for (int k : affinityNodeIndexs) {
-      while (ancestorGen > 0) {
-        frequency =
-            pickNodes(leafNum, null, null, dataNodes[k], ancestorGen);
-        Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen);
-        for (Node key : dataNodes) {
-          if (frequency.get(key) > 0) {
-            if (affinityAncestor != null) {
-              assertTrue(affinityAncestor.isAncestor(key));
-            }
-          }
-        }
-        ancestorGen--;
-      }
-    }
-    // check invalid ancestor generation
-    try {
-      cluster.chooseRandom(null, null, null, dataNodes[0],
-          cluster.getMaxLevel());
-      fail("ancestor generation exceeds max level, should fail");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith("ancestorGen " +
-          cluster.getMaxLevel() +
-          " exceeds this network topology acceptable level"));
-    }
-  }
-
-  @Test
-  public void testCost() {
-    // network topology with default cost
-    List<NodeSchema> schemas = new ArrayList<>();
-    schemas.add(ROOT_SCHEMA);
-    schemas.add(RACK_SCHEMA);
-    schemas.add(NODEGROUP_SCHEMA);
-    schemas.add(LEAF_SCHEMA);
-
-    NodeSchemaManager manager = NodeSchemaManager.getInstance();
-    manager.init(schemas.toArray(new NodeSchema[0]), true);
-    NetworkTopology newCluster =
-        new NetworkTopologyImpl(manager);
-    Node[] nodeList = new Node[] {
-        createDatanode("1.1.1.1", "/r1/ng1"),
-        createDatanode("2.2.2.2", "/r1/ng1"),
-        createDatanode("3.3.3.3", "/r1/ng2"),
-        createDatanode("4.4.4.4", "/r2/ng1"),
-    };
-    for (Node node: nodeList) {
-      newCluster.add(node);
-    }
-    Node outScopeNode1 = createDatanode("5.5.5.5", "/r2/ng2");
-    Node outScopeNode2 = createDatanode("6.6.6.6", "/r2/ng2");
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(nodeList[0], null));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(null, nodeList[0]));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(outScopeNode1, nodeList[0]));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(nodeList[0], outScopeNode1));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(outScopeNode1, outScopeNode2));
-
-    assertEquals(0, newCluster.getDistanceCost(null, null));
-    assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0]));
-    assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1]));
-    assertEquals(4, newCluster.getDistanceCost(nodeList[0], nodeList[2]));
-    assertEquals(6, newCluster.getDistanceCost(nodeList[0], nodeList[3]));
-
-    // network topology with customized cost
-    schemas.clear();
-    schemas.add(new NodeSchema.Builder()
-        .setType(NodeSchema.LayerType.ROOT).setCost(5).build());
-    schemas.add(new NodeSchema.Builder()
-        .setType(NodeSchema.LayerType.INNER_NODE).setCost(3).build());
-    schemas.add(new NodeSchema.Builder()
-        .setType(NodeSchema.LayerType.INNER_NODE).setCost(1).build());
-    schemas.add(new NodeSchema.Builder()
-        .setType(NodeSchema.LayerType.LEAF_NODE).build());
-    manager = NodeSchemaManager.getInstance();
-    manager.init(schemas.toArray(new NodeSchema[0]), true);
-    newCluster = new NetworkTopologyImpl(manager);
-    for (Node node: nodeList) {
-      newCluster.add(node);
-    }
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(nodeList[0], null));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(null, nodeList[0]));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(outScopeNode1, nodeList[0]));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(nodeList[0], outScopeNode1));
-    assertEquals(Integer.MAX_VALUE,
-        newCluster.getDistanceCost(outScopeNode1, outScopeNode2));
-
-    assertEquals(0, newCluster.getDistanceCost(null, null));
-    assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0]));
-    assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1]));
-    assertEquals(8, newCluster.getDistanceCost(nodeList[0], nodeList[2]));
-    assertEquals(18, newCluster.getDistanceCost(nodeList[0], nodeList[3]));
-  }
-
-  @Test
-  public void testSortByDistanceCost() {
-    Node[][] nodes = {
-        {},
-        {dataNodes[0]},
-        {dataNodes[dataNodes.length - 1]},
-        {dataNodes[random.nextInt(dataNodes.length)]},
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)]
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        },
-        {dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-            dataNodes[random.nextInt(dataNodes.length)],
-        }};
-    Node[] readers = {null, dataNodes[0], dataNodes[dataNodes.length - 1],
-        dataNodes[random.nextInt(dataNodes.length)],
-        dataNodes[random.nextInt(dataNodes.length)],
-        dataNodes[random.nextInt(dataNodes.length)]
-    };
-    for (Node reader : readers) {
-      for (Node[] nodeList : nodes) {
-        int length = nodeList.length;
-        while (length > 0) {
-          List<? extends Node> ret = cluster.sortByDistanceCost(reader,
-              Arrays.asList(nodeList), length);
-          for (int i = 0; i < ret.size(); i++) {
-            if ((i + 1) < ret.size()) {
-              int cost1 = cluster.getDistanceCost(reader, ret.get(i));
-              int cost2 = cluster.getDistanceCost(reader, ret.get(i + 1));
-              assertTrue("reader:" + (reader != null ?
-                  reader.getNetworkFullPath() : "null") +
-                  ",node1:" + ret.get(i).getNetworkFullPath() +
-                  ",node2:" + ret.get(i + 1).getNetworkFullPath() +
-                  ",cost1:" + cost1 + ",cost2:" + cost2,
-                  cost1 == Integer.MAX_VALUE || cost1 <= cost2);
-            }
-          }
-          length--;
-        }
-      }
-    }
-
-    // sort all nodes
-    List<Node> nodeList = Arrays.asList(dataNodes.clone());
-    for (Node reader : readers) {
-      int length = nodeList.size();
-      while (length >= 0) {
-        List<? extends Node> sortedNodeList =
-            cluster.sortByDistanceCost(reader, nodeList, length);
-        for (int i = 0; i < sortedNodeList.size(); i++) {
-          if ((i + 1) < sortedNodeList.size()) {
-            int cost1 = cluster.getDistanceCost(reader, sortedNodeList.get(i));
-            int cost2 = cluster.getDistanceCost(
-                reader, sortedNodeList.get(i + 1));
-            // node can be removed when called in testConcurrentAccess
-            assertTrue("reader:" + (reader != null ?
-                reader.getNetworkFullPath() : "null") +
-                ",node1:" + sortedNodeList.get(i).getNetworkFullPath() +
-                ",node2:" + sortedNodeList.get(i + 1).getNetworkFullPath() +
-                ",cost1:" + cost1 + ",cost2:" + cost2,
-                cost1 == Integer.MAX_VALUE || cost1 <= cost2);
-          }
-        }
-        length--;
-      }
-    }
-  }
-
-  private static Node createDatanode(String name, String path) {
-    return new NodeImpl(name, path, NetConstants.NODE_COST_DEFAULT);
-  }
-
-  /**
-   * This picks a large number of nodes at random in order to ensure coverage.
-   *
-   * @param numNodes the number of nodes
-   * @param excludedScope the excluded scope
-   * @param excludedNodes the excluded node list
-   * @param ancestorGen the chosen node cannot share the same ancestor at
-   *                    this generation with excludedNodes
-   * @return the frequency that nodes were chosen
-   */
-  private Map<Node, Integer> pickNodesAtRandom(int numNodes,
-      String excludedScope, Collection<Node> excludedNodes, int ancestorGen) {
-    Map<Node, Integer> frequency = new HashMap<Node, Integer>();
-    for (Node dnd : dataNodes) {
-      frequency.put(dnd, 0);
-    }
-    for (int j = 0; j < numNodes; j++) {
-      Node node = cluster.chooseRandom(excludedScope, excludedNodes,
-          ancestorGen);
-      if (node != null) {
-        frequency.put(node, frequency.get(node) + 1);
-      }
-    }
-    LOG.info("Result:" + frequency);
-    return frequency;
-  }
-
-  /**
-   * This picks a large number of nodes at random in order to ensure coverage.
-   *
-   * @param numNodes the number of nodes
-   * @param excludedScope the excluded scope
-   * @param excludedNodes the excluded node list
-   * @param affinityNode the chosen node should share the same ancestor at
-   *                     generation "ancestorGen" with this node
-   * @param ancestorGen  the chosen node cannot share the same ancestor at
-   *                     this generation with excludedNodes
-   * @return the frequency that nodes were chosen
-   */
-  private Map<Node, Integer> pickNodesAtRandom(int numNodes,
-      String excludedScope, Collection<Node> excludedNodes, Node affinityNode,
-      int ancestorGen) {
-    Map<Node, Integer> frequency = new HashMap<Node, Integer>();
-    for (Node dnd : dataNodes) {
-      frequency.put(dnd, 0);
-    }
-
-    List<String> pathList = new ArrayList<>();
-    pathList.add(excludedScope.substring(1));
-    for (int j = 0; j < numNodes; j++) {
-
-      Node node = cluster.chooseRandom("", pathList, excludedNodes,
-          affinityNode, ancestorGen);
-      if (node != null) {
-        frequency.put(node, frequency.get(node) + 1);
-      }
-    }
-    LOG.info("Result:" + frequency);
-    return frequency;
-  }
-
-  /**
-   * This picks a large amount of nodes sequentially.
-   *
-   * @param numNodes the number of nodes
-   * @param excludedScopes the excluded scopes, should not start with "~"
-   * @param excludedNodes the excluded node list
-   * @param affinityNode the chosen node should share the same ancestor at
-   *                     generation "ancestorGen" with this node
-   * @param ancestorGen  the chosen node cannot share the same ancestor at
-   *                     this generation with excludedNodes
-   * @return the frequency that nodes were chosen
-   */
-  private Map<Node, Integer> pickNodes(int numNodes,
-      List<String> excludedScopes, Collection<Node> excludedNodes,
-      Node affinityNode, int ancestorGen) {
-    Map<Node, Integer> frequency = new HashMap<>();
-    for (Node dnd : dataNodes) {
-      frequency.put(dnd, 0);
-    }
-    excludedNodes = excludedNodes == null ? null :
-        excludedNodes.stream().distinct().collect(Collectors.toList());
-    for (int j = 0; j < numNodes; j++) {
-      Node node = cluster.getNode(j, null, excludedScopes, excludedNodes,
-          affinityNode, ancestorGen);
-      if (node != null) {
-        frequency.put(node, frequency.get(node) + 1);
-      }
-    }
-
-    LOG.info("Result:" + frequency);
-    return frequency;
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
deleted file mode 100644
index 0c20353..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/** Test the node schema loader. */
-@RunWith(Parameterized.class)
-public class TestNodeSchemaLoader {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestNodeSchemaLoader.class);
-  private ClassLoader classLoader =
-      Thread.currentThread().getContextClassLoader();
-
-  public TestNodeSchemaLoader(String schemaFile, String errMsg) {
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/" + schemaFile).getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("expect exceptions");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains(errMsg));
-    }
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(30000);
-
-  @Parameters
-  public static Collection<Object[]> getSchemaFiles() {
-    Object[][] schemaFiles = new Object[][]{
-        {"enforce-error.xml", "layer without prefix defined"},
-        {"invalid-cost.xml", "Cost should be positive number or 0"},
-        {"multiple-leaf.xml", "Multiple LEAF layers are found"},
-        {"multiple-root.xml", "Multiple ROOT layers are found"},
-        {"no-leaf.xml", "No LEAF layer is found"},
-        {"no-root.xml", "No ROOT layer is found"},
-        {"path-layers-size-mismatch.xml",
-            "Topology path depth doesn't match layer element numbers"},
-        {"path-with-id-reference-failure.xml",
-            "No layer found for id"},
-        {"unknown-layer-type.xml", "Unsupported layer type"},
-        {"wrong-path-order-1.xml",
-            "Topology path doesn't start with ROOT layer"},
-        {"wrong-path-order-2.xml", "Topology path doesn't end with LEAF layer"},
-        {"no-topology.xml", "no or multiple <topology> element"},
-        {"multiple-topology.xml", "no or multiple <topology> element"},
-        {"invalid-version.xml", "Bad layoutversion value"},
-    };
-    return Arrays.asList(schemaFiles);
-  }
-
-  @Test
-  public void testGood() {
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/good.xml").getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
-  }
-
-  @Test
-  public void testNotExist() {
-    String filePath = classLoader.getResource(
-        "./networkTopologyTestFiles/good.xml").getPath() + ".backup";
-    try {
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("should fail");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains("not found"));
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
deleted file mode 100644
index 6698043..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/** Test the node schema loader. */
-public class TestNodeSchemaManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestNodeSchemaManager.class);
-  private ClassLoader classLoader =
-      Thread.currentThread().getContextClassLoader();
-  private NodeSchemaManager manager;
-  private Configuration conf;
-
-  public TestNodeSchemaManager() {
-    conf = new Configuration();
-    String filePath = classLoader.getResource(
-        "./networkTopologyTestFiles/good.xml").getPath();
-    conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
-    manager = NodeSchemaManager.getInstance();
-    manager.init(conf);
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(30000);
-
-  @Test(expected = IllegalArgumentException.class)
-  public void testFailure1() {
-    manager.getCost(0);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void testFailure2() {
-    manager.getCost(manager.getMaxLevel() + 1);
-  }
-
-  @Test
-  public void testPass() {
-    assertEquals(4, manager.getMaxLevel());
-    for (int i  = 1; i <= manager.getMaxLevel(); i++) {
-      assertTrue(manager.getCost(i) == 1 || manager.getCost(i) == 0);
-    }
-  }
-
-  @Test
-  public void testInitFailure() {
-    String filePath = classLoader.getResource(
-        "./networkTopologyTestFiles/good.xml").getPath() + ".backup";
-    conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
-    try {
-      manager.init(conf);
-      fail("should fail");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains("Failed to load schema file:" +
-          filePath));
-    }
-  }
-
-  @Test
-  public void testComplete() {
-    // successful complete action
-    String path = "/node1";
-    assertEquals(DEFAULT_RACK + DEFAULT_NODEGROUP + path,
-        manager.complete(path));
-    assertEquals("/rack" + DEFAULT_NODEGROUP + path,
-        manager.complete("/rack" + path));
-    assertEquals(DEFAULT_RACK + "/nodegroup" + path,
-        manager.complete("/nodegroup" + path));
-
-    // failed complete action
-    assertEquals(null, manager.complete("/dc" + path));
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
deleted file mode 100644
index c38bf38..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.net;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/** Test the node schema loader. */
-@RunWith(Parameterized.class)
-public class TestYamlSchemaLoader {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestYamlSchemaLoader.class);
-  private ClassLoader classLoader =
-      Thread.currentThread().getContextClassLoader();
-
-  public TestYamlSchemaLoader(String schemaFile, String errMsg) {
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/" + schemaFile).getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("expect exceptions");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains(errMsg));
-    }
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(30000);
-
-  @Parameters
-  public static Collection<Object[]> getSchemaFiles() {
-    Object[][] schemaFiles = new Object[][]{
-        {"multiple-root.yaml", "Multiple root"},
-        {"middle-leaf.yaml", "Leaf node in the middle"},
-    };
-    return Arrays.asList(schemaFiles);
-  }
-
-
-  @Test
-  public void testGood() {
-    try {
-      String filePath = classLoader.getResource(
-              "./networkTopologyTestFiles/good.yaml").getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
-  }
-
-  @Test
-  public void testNotExist() {
-    String filePath = classLoader.getResource(
-        "./networkTopologyTestFiles/good.yaml").getPath() + ".backup";
-    try {
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("should fail");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains("not found"));
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 7966941..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-/**
- Test cases for SCM client classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
deleted file mode 100644
index 77a2cec..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
+++ /dev/null
@@ -1,313 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.security.token;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.Signature;
-import java.security.SignatureException;
-import java.security.cert.Certificate;
-import java.security.cert.CertificateEncodingException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import javax.crypto.KeyGenerator;
-import javax.crypto.Mac;
-import javax.crypto.SecretKey;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test class for OzoneManagerDelegationToken.
- */
-public class TestOzoneBlockTokenIdentifier {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestOzoneBlockTokenIdentifier.class);
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneBlockTokenIdentifier.class.getSimpleName());
-  private static final String KEYSTORES_DIR =
-      new File(BASEDIR).getAbsolutePath();
-  private static long expiryTime;
-  private static KeyPair keyPair;
-  private static X509Certificate cert;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    expiryTime = Time.monotonicNow() + 60 * 60 * 24;
-
-    // Create Ozone Master key pair.
-    keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-    // Create Ozone Master certificate (SCM CA issued cert) and key store.
-    cert = KeyStoreTestUtil
-        .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA");
-  }
-
-  @After
-  public void cleanUp() throws Exception {
-    // KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
-  }
-
-  @Test
-  public void testSignToken() throws GeneralSecurityException, IOException {
-    String keystore = new File(KEYSTORES_DIR, "keystore.jks")
-        .getAbsolutePath();
-    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
-        .getAbsolutePath();
-    String trustPassword = "trustPass";
-    String keyStorePassword = "keyStorePass";
-    String keyPassword = "keyPass";
-
-
-    KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword,
-        "OzoneMaster", keyPair.getPrivate(), cert);
-
-    // Create trust store and put the certificate in the trust store
-    Map<String, X509Certificate> certs = Collections.singletonMap("server",
-        cert);
-    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
-
-    // Sign the OzoneMaster Token with Ozone Master private key
-    PrivateKey privateKey = keyPair.getPrivate();
-    OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(
-        "testUser", "84940",
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), 128L);
-    byte[] signedToken = signTokenAsymmetric(tokenId, privateKey);
-
-    // Verify a valid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert);
-    LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid.");
-
-    // Verify an invalid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    tokenId = new OzoneBlockTokenIdentifier("", "",
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), 128L);
-    LOG.info("Unsigned token {} is {}", tokenId,
-        verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert));
-
-  }
-
-  @Test
-  public void testTokenSerialization() throws GeneralSecurityException,
-      IOException {
-    String keystore = new File(KEYSTORES_DIR, "keystore.jks")
-        .getAbsolutePath();
-    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
-        .getAbsolutePath();
-    String trustPassword = "trustPass";
-    String keyStorePassword = "keyStorePass";
-    String keyPassword = "keyPass";
-    long maxLength = 128L;
-
-    KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword,
-        "OzoneMaster", keyPair.getPrivate(), cert);
-
-    // Create trust store and put the certificate in the trust store
-    Map<String, X509Certificate> certs = Collections.singletonMap("server",
-        cert);
-    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
-
-    // Sign the OzoneMaster Token with Ozone Master private key
-    PrivateKey privateKey = keyPair.getPrivate();
-    OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(
-        "testUser", "84940",
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), maxLength);
-    byte[] signedToken = signTokenAsymmetric(tokenId, privateKey);
-
-
-    Token<BlockTokenIdentifier> token = new Token(tokenId.getBytes(),
-        signedToken, tokenId.getKind(), new Text("host:port"));
-
-    String encodeToUrlString = token.encodeToUrlString();
-
-    Token<BlockTokenIdentifier>decodedToken = new Token();
-    decodedToken.decodeFromUrlString(encodeToUrlString);
-
-    OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier();
-    decodedTokenId.readFields(new DataInputStream(
-        new ByteArrayInputStream(decodedToken.getIdentifier())));
-
-    Assert.assertEquals(decodedTokenId, tokenId);
-    Assert.assertEquals(decodedTokenId.getMaxLength(), maxLength);
-
-    // Verify a decoded signed Token with public key(certificate)
-    boolean isValidToken = verifyTokenAsymmetric(decodedTokenId, decodedToken
-        .getPassword(), cert);
-    LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid.");
-  }
-
-
-  public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId,
-      PrivateKey privateKey) throws NoSuchAlgorithmException,
-      InvalidKeyException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initSign(privateKey);
-    rsaSignature.update(tokenId.getBytes());
-    byte[] signature = rsaSignature.sign();
-    return signature;
-  }
-
-  public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId,
-      byte[] signature, Certificate certificate) throws InvalidKeyException,
-      NoSuchAlgorithmException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initVerify(certificate);
-    rsaSignature.update(tokenId.getBytes());
-    boolean isValid = rsaSignature.verify(signature);
-    return isValid;
-  }
-
-  private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier,
-      Mac mac, SecretKey key) {
-    try {
-      mac.init(key);
-    } catch (InvalidKeyException ike) {
-      throw new IllegalArgumentException("Invalid key to HMAC computation",
-          ike);
-    }
-    return mac.doFinal(identifier.getBytes());
-  }
-
-  OzoneBlockTokenIdentifier generateTestToken() {
-    return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6),
-        RandomStringUtils.randomAlphabetic(5),
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), 1024768L);
-  }
-
-  @Test
-  public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException,
-      CertificateEncodingException, NoSuchProviderException,
-      InvalidKeyException, SignatureException {
-    final int testTokenCount = 1000;
-    List<OzoneBlockTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordAsym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA");
-
-    // Create Ozone Master certificate (SCM CA issued cert) and key store
-    X509Certificate certificate;
-    certificate = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster",
-        kp, 30, "SHA256withRSA");
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordAsym.add(
-          signTokenAsymmetric(tokenIds.get(i), kp.getPrivate()));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns",
-        duration / testTokenCount);
-
-    startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i),
-          certificate);
-    }
-    duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token verify time with HmacSha256(RSA/1024 key) "
-        + "is {} ns", duration / testTokenCount);
-  }
-
-  @Test
-  public void testSymmetricTokenPerf() {
-    String hmacSHA1 = "HmacSHA1";
-    String hmacSHA256 = "HmacSHA256";
-
-    testSymmetricTokenPerfHelper(hmacSHA1, 64);
-    testSymmetricTokenPerfHelper(hmacSHA256, 1024);
-  }
-
-  public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) {
-    final int testTokenCount = 1000;
-    List<OzoneBlockTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordSym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyGenerator keyGen;
-    try {
-      keyGen = KeyGenerator.getInstance(hmacAlgorithm);
-      keyGen.init(keyLen);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    Mac mac;
-    try {
-      mac = Mac.getInstance(hmacAlgorithm);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    SecretKey secretKey = keyGen.generateKey();
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordSym.add(
-          signTokenSymmetric(tokenIds.get(i), mac, secretKey));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with {}({} symmetric key) is {} ns",
-        hmacAlgorithm, keyLen, duration / testTokenCount);
-  }
-
-  // TODO: verify certificate with a trust store
-  public boolean verifyCert(Certificate certificate) {
-    return true;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java
deleted file mode 100644
index d056655..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains the block token related classes.
- */
-package org.apache.hadoop.hdds.security.token;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java
deleted file mode 100644
index a8fa0af..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-
-import java.io.IOException;
-import java.security.PrivateKey;
-import java.util.Date;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * A test approver class that makes testing easier.
- */
-public class MockApprover extends BaseApprover {
-
-  public MockApprover(PKIProfile pkiProfile, SecurityConfig config) {
-    super(pkiProfile, config);
-  }
-
-  @Override
-  public CompletableFuture<X509CertificateHolder>
-      inspectCSR(PKCS10CertificationRequest csr) {
-    return super.inspectCSR(csr);
-  }
-
-  @Override
-  public X509CertificateHolder sign(SecurityConfig config, PrivateKey caPrivate,
-      X509CertificateHolder caCertificate,
-      Date validFrom, Date validTill,
-      PKCS10CertificationRequest request,
-      String scmId, String clusterId)
-      throws IOException, OperatorCreationException {
-    return null;
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
deleted file mode 100644
index 1dea512..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.cert.X509Certificate;
-
-/**
- *
- */
-public class MockCAStore implements CertificateStore {
-  @Override
-  public void storeValidCertificate(BigInteger serialID,
-                                    X509Certificate certificate)
-      throws IOException {
-
-  }
-
-  @Override
-  public void revokeCertificate(BigInteger serialID) throws IOException {
-
-  }
-
-  @Override
-  public void removeExpiredCertificate(BigInteger serialID)
-      throws IOException {
-
-  }
-
-  @Override
-  public X509Certificate getCertificateByID(BigInteger serialID,
-                                            CertType certType)
-      throws IOException {
-    return null;
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
deleted file mode 100644
index 64eb4ba..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.cert.CertificateException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.function.Consumer;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.junit.Assert.*;
-
-/**
- * Tests the Default CA Server.
- */
-public class TestDefaultCAServer {
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-  private MockCAStore caStore;
-
-  @Before
-  public void init() throws IOException {
-    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString());
-    caStore = new MockCAStore();
-  }
-
-  @Test
-  public void testInit() throws SCMSecurityException, CertificateException,
-      IOException {
-    SecurityConfig securityConfig = new SecurityConfig(conf);
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA);
-    X509CertificateHolder first = testCA.getCACertificate();
-    assertNotNull(first);
-    //Init is idempotent.
-    testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA);
-    X509CertificateHolder second = testCA.getCACertificate();
-    assertEquals(first, second);
-
-    // we only support Self Signed CA for now.
-    try {
-      testCA.init(securityConfig, CertificateServer.CAType.INTERMEDIARY_CA);
-      fail("code should not reach here, exception should have been thrown.");
-    } catch (IllegalStateException e) {
-      // This is a run time exception, hence it is not caught by the junit
-      // expected Exception.
-      assertTrue(e.toString().contains("Not implemented"));
-    }
-  }
-
-  @Test
-  public void testMissingCertificate() {
-    SecurityConfig securityConfig = new SecurityConfig(conf);
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    Consumer<SecurityConfig> caInitializer =
-        ((DefaultCAServer) testCA).processVerificationStatus(
-        DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE);
-    try {
-
-      caInitializer.accept(securityConfig);
-      fail("code should not reach here, exception should have been thrown.");
-    } catch (IllegalStateException e) {
-      // This also is a runtime exception. Hence not caught by junit expected
-      // exception.
-      assertTrue(e.toString().contains("Missing Root Certs"));
-    }
-  }
-
-  @Test
-  public void testMissingKey() {
-    SecurityConfig securityConfig = new SecurityConfig(conf);
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    Consumer<SecurityConfig> caInitializer =
-        ((DefaultCAServer) testCA).processVerificationStatus(
-            DefaultCAServer.VerificationStatus.MISSING_KEYS);
-    try {
-
-      caInitializer.accept(securityConfig);
-      fail("code should not reach here, exception should have been thrown.");
-    } catch (IllegalStateException e) {
-      // This also is a runtime exception. Hence not caught by junit expected
-      // exception.
-      assertTrue(e.toString().contains("Missing Keys"));
-    }
-  }
-
-  /**
-   * The most important test of this test suite. This tests that we are able
-   * to create a Test CA, creates it own self-Signed CA and then issue a
-   * certificate based on a CSR.
-   * @throws SCMSecurityException - on ERROR.
-   * @throws ExecutionException - on ERROR.
-   * @throws InterruptedException - on ERROR.
-   * @throws NoSuchProviderException - on ERROR.
-   * @throws NoSuchAlgorithmException - on ERROR.
-   */
-  @Test
-  public void testRequestCertificate() throws IOException,
-      ExecutionException, InterruptedException,
-      NoSuchProviderException, NoSuchAlgorithmException {
-    String scmId =  RandomStringUtils.randomAlphabetic(4);
-    String clusterId =  RandomStringUtils.randomAlphabetic(4);
-    KeyPair keyPair =
-        new HDDSKeyGenerator(conf).generateKey();
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("8.8.8.8")
-        .setCA(false)
-        .setClusterID(clusterId)
-        .setScmID(scmId)
-        .setSubject("Ozone Cluster")
-        .setConfiguration(conf)
-        .setKey(keyPair)
-        .build();
-
-    // Let us convert this to a string to mimic the common use case.
-    String csrString = CertificateSignRequest.getEncodedString(csr);
-
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        clusterId, scmId, caStore);
-    testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
-
-    Future<X509CertificateHolder> holder = testCA.requestCertificate(csrString,
-        CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
-    // Right now our calls are synchronous. Eventually this will have to wait.
-    assertTrue(holder.isDone());
-    assertNotNull(holder.get());
-  }
-
-  /**
-   * Tests that we are able
-   * to create a Test CA, creates it own self-Signed CA and then issue a
-   * certificate based on a CSR when scmId and clusterId are not set in
-   * csr subject.
-   * @throws SCMSecurityException - on ERROR.
-   * @throws ExecutionException - on ERROR.
-   * @throws InterruptedException - on ERROR.
-   * @throws NoSuchProviderException - on ERROR.
-   * @throws NoSuchAlgorithmException - on ERROR.
-   */
-  @Test
-  public void testRequestCertificateWithInvalidSubject() throws IOException,
-      ExecutionException, InterruptedException,
-      NoSuchProviderException, NoSuchAlgorithmException {
-    KeyPair keyPair =
-        new HDDSKeyGenerator(conf).generateKey();
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("8.8.8.8")
-        .setCA(false)
-        .setSubject("Ozone Cluster")
-        .setConfiguration(conf)
-        .setKey(keyPair)
-        .build();
-
-    // Let us convert this to a string to mimic the common use case.
-    String csrString = CertificateSignRequest.getEncodedString(csr);
-
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
-
-    Future<X509CertificateHolder> holder = testCA.requestCertificate(csrString,
-        CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
-    // Right now our calls are synchronous. Eventually this will have to wait.
-    assertTrue(holder.isDone());
-    assertNotNull(holder.get());
-  }
-
-  @Test
-  public void testRequestCertificateWithInvalidSubjectFailure()
-      throws Exception {
-    KeyPair keyPair =
-        new HDDSKeyGenerator(conf).generateKey();
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("8.8.8.8")
-        .setCA(false)
-        .setScmID("wrong one")
-        .setClusterID("223432rf")
-        .setSubject("Ozone Cluster")
-        .setConfiguration(conf)
-        .setKey(keyPair)
-        .build();
-
-    // Let us convert this to a string to mimic the common use case.
-    String csrString = CertificateSignRequest.getEncodedString(csr);
-
-    CertificateServer testCA = new DefaultCAServer("testCA",
-        RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
-
-    LambdaTestUtils.intercept(ExecutionException.class, "ScmId and " +
-            "ClusterId in CSR subject are incorrect",
-        () -> {
-          Future<X509CertificateHolder> holder =
-              testCA.requestCertificate(csrString,
-                  CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
-          holder.isDone();
-          holder.get();
-        });
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java
deleted file mode 100644
index f892b8d..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x500.X500NameBuilder;
-import org.bouncycastle.asn1.x500.style.BCStyle;
-import org.bouncycastle.asn1.x509.ExtendedKeyUsage;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.Extensions;
-import org.bouncycastle.asn1.x509.ExtensionsGenerator;
-import org.bouncycastle.asn1.x509.GeneralName;
-import org.bouncycastle.asn1.x509.GeneralNames;
-import org.bouncycastle.asn1.x509.KeyPurposeId;
-import org.bouncycastle.operator.ContentSigner;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder;
-import org.bouncycastle.pkcs.PKCSException;
-import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests for the default PKI Profile.
- */
-public class TestDefaultProfile {
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  private OzoneConfiguration configuration;
-  private SecurityConfig securityConfig;
-  private DefaultProfile defaultProfile;
-  private MockApprover testApprover;
-  private KeyPair keyPair;
-
-  @Before
-  public void setUp() throws Exception {
-    configuration = new OzoneConfiguration();
-    configuration.set(OZONE_METADATA_DIRS,
-        temporaryFolder.newFolder().toString());
-    securityConfig = new SecurityConfig(configuration);
-    defaultProfile = new DefaultProfile();
-    testApprover = new MockApprover(defaultProfile,
-        securityConfig);
-    keyPair = new HDDSKeyGenerator(securityConfig).generateKey();
-  }
-
-  /**
-   * Tests the General Names that we support. The default profile supports only
-   * two names right now.
-   */
-  @Test
-  public void testisSupportedGeneralName() {
-// Positive tests
-    assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.iPAddress));
-    assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.dNSName));
-// Negative Tests
-    assertFalse(defaultProfile.isSupportedGeneralName(
-        GeneralName.directoryName));
-    assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.rfc822Name));
-    assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.otherName));
-  }
-
-  /**
-   * Test valid keys are validated correctly.
-   *
-   * @throws SCMSecurityException      - on Error.
-   * @throws PKCSException             - on Error.
-   * @throws OperatorCreationException - on Error.
-   */
-  @Test
-  public void testVerifyCertificate() throws SCMSecurityException,
-      PKCSException, OperatorCreationException {
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("8.8.8.8")
-        .setCA(false)
-        .setClusterID("ClusterID")
-        .setScmID("SCMID")
-        .setSubject("Ozone Cluster")
-        .setConfiguration(configuration)
-        .setKey(keyPair)
-        .build();
-    assertTrue(testApprover.verifyPkcs10Request(csr));
-  }
-
-
-
-
-  /**
-   * Test invalid keys fail in the validation.
-   *
-   * @throws SCMSecurityException      - on Error.
-   * @throws PKCSException             - on Error.
-   * @throws OperatorCreationException - on Error.
-   * @throws NoSuchProviderException   - on Error.
-   * @throws NoSuchAlgorithmException  - on Error.
-   */
-  @Test
-  public void testVerifyCertificateInvalidKeys() throws SCMSecurityException,
-      PKCSException, OperatorCreationException,
-      NoSuchProviderException, NoSuchAlgorithmException {
-    KeyPair newKeyPair = new HDDSKeyGenerator(securityConfig).generateKey();
-    KeyPair wrongKey = new KeyPair(keyPair.getPublic(),
-        newKeyPair.getPrivate());
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("8.8.8.8")
-        .setCA(false)
-        .setClusterID("ClusterID")
-        .setScmID("SCMID")
-        .setSubject("Ozone Cluster")
-        .setConfiguration(configuration)
-        .setKey(wrongKey)
-        .build();
-    // Signature verification should fail here, since the public/private key
-    // does not match.
-    assertFalse(testApprover.verifyPkcs10Request(csr));
-  }
-
-  /**
-   * Tests that normal valid extensions work with the default profile.
-   *
-   * @throws SCMSecurityException      - on Error.
-   * @throws PKCSException             - on Error.
-   * @throws OperatorCreationException - on Error.
-   */
-  @Test
-  public void testExtensions() throws SCMSecurityException {
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("192.10.234.6")
-        .setCA(false)
-        .setClusterID("ClusterID")
-        .setScmID("SCMID")
-        .setSubject("Ozone Cluster")
-        .setConfiguration(configuration)
-        .setKey(keyPair)
-        .build();
-    assertTrue(testApprover.verfiyExtensions(csr));
-  }
-
-  /**
-   * Tests that  invalid extensions cause a failure in validation. We will fail
-   * if CA extension is enabled.
-   *
-   * @throws SCMSecurityException - on Error.
-   */
-
-  @Test
-  public void testInvalidExtensionsWithCA() throws SCMSecurityException {
-    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
-        .addDnsName("hadoop.apache.org")
-        .addIpAddress("192.10.234.6")
-        .setCA(true)
-        .setClusterID("ClusterID")
-        .setScmID("SCMID")
-        .setSubject("Ozone Cluster")
-        .setConfiguration(configuration)
-        .setKey(keyPair)
-        .build();
-    assertFalse(testApprover.verfiyExtensions(csr));
-  }
-
-  /**
-   * Tests that  invalid extensions cause a failure in validation. We will fail
-   * if rfc222 type names are added, we also add the extension as both
-   * critical and non-critical fashion to verify that the we catch both cases.
-   *
-   * @throws SCMSecurityException - on Error.
-   */
-
-  @Test
-  public void testInvalidExtensionsWithEmail()
-      throws IOException, OperatorCreationException {
-    Extensions emailExtension = getSANExtension(GeneralName.rfc822Name,
-        "bilbo@apache.org", false);
-    PKCS10CertificationRequest csr = getInvalidCSR(keyPair, emailExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-
-    emailExtension = getSANExtension(GeneralName.rfc822Name, "bilbo" +
-        "@apache.org", true);
-    csr = getInvalidCSR(keyPair, emailExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-
-  }
-
-  /**
-   * Same test for URI.
-   * @throws IOException - On Error.
-   * @throws OperatorCreationException- on Error.
-   */
-  @Test
-  public void testInvalidExtensionsWithURI() throws IOException,
-      OperatorCreationException {
-    Extensions oExtension = getSANExtension(
-        GeneralName.uniformResourceIdentifier, "s3g.ozone.org", false);
-    PKCS10CertificationRequest csr = getInvalidCSR(keyPair, oExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-    oExtension = getSANExtension(GeneralName.uniformResourceIdentifier,
-        "s3g.ozone.org", false);
-    csr = getInvalidCSR(keyPair, oExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-  }
-
-  /**
-   * Assert that if DNS is marked critical our PKI profile will reject it.
-   * @throws IOException - on Error.
-   * @throws OperatorCreationException - on Error.
-   */
-  @Test
-  public void testInvalidExtensionsWithCriticalDNS() throws IOException,
-      OperatorCreationException {
-    Extensions dnsExtension = getSANExtension(GeneralName.dNSName,
-        "ozone.hadoop.org",
-        true);
-    PKCS10CertificationRequest csr = getInvalidCSR(keyPair, dnsExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-    // This tests should pass, hence the assertTrue
-    dnsExtension = getSANExtension(GeneralName.dNSName,
-        "ozone.hadoop.org",
-        false);
-    csr = getInvalidCSR(keyPair, dnsExtension);
-    assertTrue(testApprover.verfiyExtensions(csr));
-  }
-
-
-  /**
-   * Verify that valid Extended Key usage works as expected.
-   * @throws IOException - on Error.
-   * @throws OperatorCreationException - on Error.
-   */
-  @Test
-  public void testValidExtendedKeyUsage() throws IOException,
-      OperatorCreationException {
-    Extensions extendedExtension =
-        getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, false);
-    PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension);
-    assertTrue(testApprover.verfiyExtensions(csr));
-
-    extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_serverAuth,
-        false);
-    csr = getInvalidCSR(keyPair, extendedExtension);
-    assertTrue(testApprover.verfiyExtensions(csr));
-  }
-
-
-  /**
-   * Verify that Invalid Extended Key usage works as expected, that is rejected.
-   * @throws IOException - on Error.
-   * @throws OperatorCreationException - on Error.
-   */
-  @Test
-  public void testInValidExtendedKeyUsage() throws IOException,
-      OperatorCreationException {
-    Extensions extendedExtension =
-        getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, true);
-    PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-
-    extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_OCSPSigning,
-        false);
-    csr = getInvalidCSR(keyPair, extendedExtension);
-    assertFalse(testApprover.verfiyExtensions(csr));
-  }
-
-
-
-  /**
-   * Generates an CSR with the extension specified.
-   * This function is used to get an Invalid CSR and test that PKI profile
-   * rejects these invalid extensions, Hence the function name, by itself it
-   * is a well formed CSR, but our PKI profile will treat it as invalid CSR.
-   *
-   * @param kPair - Key Pair.
-   * @return CSR  - PKCS10CertificationRequest
-   * @throws OperatorCreationException - on Error.
-   */
-  private PKCS10CertificationRequest getInvalidCSR(KeyPair kPair,
-      Extensions extensions) throws OperatorCreationException {
-    X500NameBuilder namebuilder =
-        new X500NameBuilder(X500Name.getDefaultStyle());
-    namebuilder.addRDN(BCStyle.CN, "invalidCert");
-    PKCS10CertificationRequestBuilder p10Builder =
-        new JcaPKCS10CertificationRequestBuilder(namebuilder.build(),
-            keyPair.getPublic());
-    p10Builder.addAttribute(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest,
-        extensions);
-    JcaContentSignerBuilder csBuilder =
-        new JcaContentSignerBuilder(this.securityConfig.getSignatureAlgo());
-    ContentSigner signer = csBuilder.build(keyPair.getPrivate());
-    return p10Builder.build(signer);
-  }
-
-  /**
-   * Generate an Extension with rfc822Name.
-   * @param extensionCode - Extension Code.
-   * @param value  - email to be added to the certificate
-   * @param critical - boolean value that marks the extension as critical.
-   * @return - An Extension list with email address.
-   * @throws IOException
-   */
-  private Extensions getSANExtension(int extensionCode, String value,
-      boolean critical) throws IOException {
-    GeneralName extn = new GeneralName(extensionCode,
-        value);
-    ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator();
-    extensionsGenerator.addExtension(Extension.subjectAlternativeName, critical,
-        new GeneralNames(extn));
-    return extensionsGenerator.generate();
-  }
-
-  /**
-   * Returns a extension with Extended Key usage.
-   * @param purposeId - Usage that we want to encode.
-   * @param critical -  makes the extension critical.
-   * @return Extensions.
-   */
-  private Extensions getKeyUsageExtension(KeyPurposeId purposeId,
-      boolean critical) throws IOException {
-    ExtendedKeyUsage extendedKeyUsage = new ExtendedKeyUsage(purposeId);
-    ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator();
-    extensionsGenerator.addExtension(
-        Extension.extendedKeyUsage, critical, extendedKeyUsage);
-    return extensionsGenerator.generate();
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
deleted file mode 100644
index 1d20a78..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for Default CA.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java
deleted file mode 100644
index dcd9898..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.cert.X509Certificate;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test class for {@link DefaultCertificateClient}.
- */
-@RunWith(Parameterized.class)
-@SuppressWarnings("visibilitymodifier")
-public class TestCertificateClientInit {
-
-  private KeyPair keyPair;
-  private String certSerialId = "3284792342234";
-  private CertificateClient dnCertificateClient;
-  private CertificateClient omCertificateClient;
-  private HDDSKeyGenerator keyGenerator;
-  private Path metaDirPath;
-  private SecurityConfig securityConfig;
-  private KeyCodec dnKeyCodec;
-  private KeyCodec omKeyCodec;
-  private X509Certificate x509Certificate;
-  private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME;
-  private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME;
-
-  @Parameter
-  public boolean pvtKeyPresent;
-  @Parameter(1)
-  public boolean pubKeyPresent;
-  @Parameter(2)
-  public boolean certPresent;
-  @Parameter(3)
-  public InitResponse expectedResult;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> initData() {
-    return Arrays.asList(new Object[][]{
-        {false, false, false, GETCERT},
-        {false, false, true, FAILURE},
-        {false, true, false, FAILURE},
-        {true, false, false, FAILURE},
-        {false, true, true, FAILURE},
-        {true, true, false, GETCERT},
-        {true, false, true, SUCCESS},
-        {true, true, true, SUCCESS}});
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration();
-    final String path = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-    metaDirPath = Paths.get(path, "test");
-    config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString());
-    securityConfig = new SecurityConfig(config);
-    keyGenerator = new HDDSKeyGenerator(securityConfig);
-    keyPair = keyGenerator.generateKey();
-    x509Certificate = getX509Certificate();
-    certSerialId = x509Certificate.getSerialNumber().toString();
-    dnCertificateClient = new DNCertificateClient(securityConfig,
-        certSerialId);
-    omCertificateClient = new OMCertificateClient(securityConfig,
-        certSerialId);
-    dnKeyCodec = new KeyCodec(securityConfig, DN_COMPONENT);
-    omKeyCodec = new KeyCodec(securityConfig, OM_COMPONENT);
-
-    Files.createDirectories(securityConfig.getKeyLocation(DN_COMPONENT));
-    Files.createDirectories(securityConfig.getKeyLocation(OM_COMPONENT));
-  }
-
-  @After
-  public void tearDown() {
-    dnCertificateClient = null;
-    omCertificateClient = null;
-    FileUtils.deleteQuietly(metaDirPath.toFile());
-  }
-
-
-  @Test
-  public void testInitDatanode() throws Exception {
-    if (pvtKeyPresent) {
-      dnKeyCodec.writePrivateKey(keyPair.getPrivate());
-    } else {
-      FileUtils.deleteQuietly(Paths.get(
-          securityConfig.getKeyLocation(DN_COMPONENT).toString(),
-          securityConfig.getPrivateKeyFileName()).toFile());
-    }
-
-    if (pubKeyPresent) {
-      if (dnCertificateClient.getPublicKey() == null) {
-        dnKeyCodec.writePublicKey(keyPair.getPublic());
-      }
-    } else {
-      FileUtils.deleteQuietly(
-          Paths.get(securityConfig.getKeyLocation(DN_COMPONENT).toString(),
-              securityConfig.getPublicKeyFileName()).toFile());
-    }
-
-    if (certPresent) {
-      CertificateCodec codec = new CertificateCodec(securityConfig,
-          DN_COMPONENT);
-      codec.writeCertificate(new X509CertificateHolder(
-          x509Certificate.getEncoded()));
-    } else {
-      FileUtils.deleteQuietly(Paths.get(
-          securityConfig.getKeyLocation(DN_COMPONENT).toString(),
-          securityConfig.getCertificateFileName()).toFile());
-    }
-    InitResponse response = dnCertificateClient.init();
-
-    assertTrue(response.equals(expectedResult));
-
-    if (!response.equals(FAILURE)) {
-      assertTrue(OzoneSecurityUtil.checkIfFileExist(
-          securityConfig.getKeyLocation(DN_COMPONENT),
-          securityConfig.getPrivateKeyFileName()));
-      assertTrue(OzoneSecurityUtil.checkIfFileExist(
-          securityConfig.getKeyLocation(DN_COMPONENT),
-          securityConfig.getPublicKeyFileName()));
-    }
-  }
-
-  @Test
-  public void testInitOzoneManager() throws Exception {
-    if (pvtKeyPresent) {
-      omKeyCodec.writePrivateKey(keyPair.getPrivate());
-    } else {
-      FileUtils.deleteQuietly(Paths.get(
-          securityConfig.getKeyLocation(OM_COMPONENT).toString(),
-          securityConfig.getPrivateKeyFileName()).toFile());
-    }
-
-    if (pubKeyPresent) {
-      if (omCertificateClient.getPublicKey() == null) {
-        omKeyCodec.writePublicKey(keyPair.getPublic());
-      }
-    } else {
-      FileUtils.deleteQuietly(Paths.get(
-          securityConfig.getKeyLocation(OM_COMPONENT).toString(),
-          securityConfig.getPublicKeyFileName()).toFile());
-    }
-
-    if (certPresent) {
-      CertificateCodec codec = new CertificateCodec(securityConfig,
-          OM_COMPONENT);
-      codec.writeCertificate(new X509CertificateHolder(
-          x509Certificate.getEncoded()));
-    } else {
-      FileUtils.deleteQuietly(Paths.get(
-          securityConfig.getKeyLocation(OM_COMPONENT).toString(),
-          securityConfig.getCertificateFileName()).toFile());
-    }
-    InitResponse response = omCertificateClient.init();
-
-    if (pvtKeyPresent && pubKeyPresent & !certPresent) {
-      assertTrue(response.equals(RECOVER));
-    } else {
-      assertTrue(response.equals(expectedResult));
-    }
-
-    if (!response.equals(FAILURE)) {
-      assertTrue(OzoneSecurityUtil.checkIfFileExist(
-          securityConfig.getKeyLocation(OM_COMPONENT),
-          securityConfig.getPrivateKeyFileName()));
-      assertTrue(OzoneSecurityUtil.checkIfFileExist(
-          securityConfig.getKeyLocation(OM_COMPONENT),
-          securityConfig.getPublicKeyFileName()));
-    }
-  }
-
-  private X509Certificate getX509Certificate() throws Exception {
-    return KeyStoreTestUtil.generateCertificate(
-        "CN=Test", keyPair, 10, securityConfig.getSignatureAlgo());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
deleted file mode 100644
index f389cdb..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.certificate.client;
-
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.Signature;
-import java.security.cert.X509Certificate;
-import java.util.UUID;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
-import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getPEMEncodedString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test class for {@link DefaultCertificateClient}.
- */
-public class TestDefaultCertificateClient {
-
-  private String certSerialId;
-  private X509Certificate x509Certificate;
-  private OMCertificateClient omCertClient;
-  private DNCertificateClient dnCertClient;
-  private HDDSKeyGenerator keyGenerator;
-  private Path omMetaDirPath;
-  private Path dnMetaDirPath;
-  private SecurityConfig omSecurityConfig;
-  private SecurityConfig dnSecurityConfig;
-  private final static String UTF = "UTF-8";
-  private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME;
-  private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME;
-  private KeyCodec omKeyCodec;
-  private KeyCodec dnKeyCodec;
-
-  @Before
-  public void setUp() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration();
-    config.setStrings(OZONE_SCM_NAMES, "localhost");
-    config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2);
-    final String omPath = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-    final String dnPath = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-
-    omMetaDirPath = Paths.get(omPath, "test");
-    dnMetaDirPath = Paths.get(dnPath, "test");
-
-    config.set(HDDS_METADATA_DIR_NAME, omMetaDirPath.toString());
-    omSecurityConfig = new SecurityConfig(config);
-    config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString());
-    dnSecurityConfig = new SecurityConfig(config);
-
-
-    keyGenerator = new HDDSKeyGenerator(omSecurityConfig);
-    omKeyCodec = new KeyCodec(omSecurityConfig, OM_COMPONENT);
-    dnKeyCodec = new KeyCodec(dnSecurityConfig, DN_COMPONENT);
-
-    Files.createDirectories(omSecurityConfig.getKeyLocation(OM_COMPONENT));
-    Files.createDirectories(dnSecurityConfig.getKeyLocation(DN_COMPONENT));
-    x509Certificate = generateX509Cert(null);
-    certSerialId = x509Certificate.getSerialNumber().toString();
-    getCertClient();
-  }
-
-  private void getCertClient() {
-    omCertClient = new OMCertificateClient(omSecurityConfig, certSerialId);
-    dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId);
-  }
-
-  @After
-  public void tearDown() {
-    omCertClient = null;
-    dnCertClient = null;
-    FileUtils.deleteQuietly(omMetaDirPath.toFile());
-    FileUtils.deleteQuietly(dnMetaDirPath.toFile());
-  }
-
-  /**
-   * Tests: 1. getPrivateKey 2. getPublicKey 3. storePrivateKey 4.
-   * storePublicKey
-   */
-  @Test
-  public void testKeyOperations() throws Exception {
-    cleanupOldKeyPair();
-    PrivateKey pvtKey = omCertClient.getPrivateKey();
-    PublicKey publicKey = omCertClient.getPublicKey();
-    assertNull(publicKey);
-    assertNull(pvtKey);
-
-    KeyPair keyPair = generateKeyPairFiles();
-    pvtKey = omCertClient.getPrivateKey();
-    assertNotNull(pvtKey);
-    assertEquals(pvtKey, keyPair.getPrivate());
-
-    publicKey = dnCertClient.getPublicKey();
-    assertNotNull(publicKey);
-    assertEquals(publicKey, keyPair.getPublic());
-  }
-
-  private KeyPair generateKeyPairFiles() throws Exception {
-    cleanupOldKeyPair();
-    KeyPair keyPair = keyGenerator.generateKey();
-    omKeyCodec.writePrivateKey(keyPair.getPrivate());
-    omKeyCodec.writePublicKey(keyPair.getPublic());
-
-    dnKeyCodec.writePrivateKey(keyPair.getPrivate());
-    dnKeyCodec.writePublicKey(keyPair.getPublic());
-    return keyPair;
-  }
-
-  private void cleanupOldKeyPair() {
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPublicKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPublicKeyFileName()).toFile());
-  }
-
-  /**
-   * Tests: 1. storeCertificate 2. getCertificate 3. verifyCertificate
-   */
-  @Test
-  public void testCertificateOps() throws Exception {
-    X509Certificate cert = omCertClient.getCertificate();
-    assertNull(cert);
-    omCertClient.storeCertificate(getPEMEncodedString(x509Certificate),
-        true);
-
-    cert = omCertClient.getCertificate(
-        x509Certificate.getSerialNumber().toString());
-    assertNotNull(cert);
-    assertTrue(cert.getEncoded().length > 0);
-    assertEquals(cert, x509Certificate);
-
-    // TODO: test verifyCertificate once implemented.
-  }
-
-  private X509Certificate generateX509Cert(KeyPair keyPair) throws Exception {
-    if (keyPair == null) {
-      keyPair = generateKeyPairFiles();
-    }
-    return KeyStoreTestUtil.generateCertificate("CN=Test", keyPair, 30,
-        omSecurityConfig.getSignatureAlgo());
-  }
-
-  @Test
-  public void testSignDataStream() throws Exception {
-    String data = RandomStringUtils.random(100, UTF);
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPublicKeyFileName()).toFile());
-
-    // Expect error when there is no private key to sign.
-    LambdaTestUtils.intercept(IOException.class, "Error while " +
-            "signing the stream",
-        () -> omCertClient.signDataStream(IOUtils.toInputStream(data,
-            UTF)));
-
-    generateKeyPairFiles();
-    byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data,
-        UTF));
-    validateHash(sign, data.getBytes());
-  }
-
-  /**
-   * Validate hash using public key of KeyPair.
-   */
-  private void validateHash(byte[] hash, byte[] data)
-      throws Exception {
-    Signature rsaSignature =
-        Signature.getInstance(omSecurityConfig.getSignatureAlgo(),
-            omSecurityConfig.getProvider());
-    rsaSignature.initVerify(omCertClient.getPublicKey());
-    rsaSignature.update(data);
-    Assert.assertTrue(rsaSignature.verify(hash));
-  }
-
-  /**
-   * Tests: 1. verifySignature
-   */
-  @Test
-  public void verifySignatureStream() throws Exception {
-    String data = RandomStringUtils.random(500, UTF);
-    byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data,
-        UTF));
-
-    // Positive tests.
-    assertTrue(omCertClient.verifySignature(data.getBytes(), sign,
-        x509Certificate));
-    assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF),
-        sign, x509Certificate));
-
-    // Negative tests.
-    assertFalse(omCertClient.verifySignature(data.getBytes(),
-        "abc".getBytes(), x509Certificate));
-    assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data,
-        UTF), "abc".getBytes(), x509Certificate));
-
-  }
-
-  /**
-   * Tests: 1. verifySignature
-   */
-  @Test
-  public void verifySignatureDataArray() throws Exception {
-    String data = RandomStringUtils.random(500, UTF);
-    byte[] sign = omCertClient.signData(data.getBytes());
-
-    // Positive tests.
-    assertTrue(omCertClient.verifySignature(data.getBytes(), sign,
-        x509Certificate));
-    assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF),
-        sign, x509Certificate));
-
-    // Negative tests.
-    assertFalse(omCertClient.verifySignature(data.getBytes(),
-        "abc".getBytes(), x509Certificate));
-    assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data,
-        UTF), "abc".getBytes(), x509Certificate));
-
-  }
-
-  @Test
-  public void queryCertificate() throws Exception {
-    LambdaTestUtils.intercept(UnsupportedOperationException.class,
-        "Operation not supported",
-        () -> omCertClient.queryCertificate(""));
-  }
-
-  @Test
-  public void testCertificateLoadingOnInit() throws Exception {
-    KeyPair keyPair = keyGenerator.generateKey();
-    X509Certificate cert1 = generateX509Cert(keyPair);
-    X509Certificate cert2 = generateX509Cert(keyPair);
-    X509Certificate cert3 = generateX509Cert(keyPair);
-
-    Path certPath = dnSecurityConfig.getCertificateLocation(DN_COMPONENT);
-    CertificateCodec codec = new CertificateCodec(dnSecurityConfig,
-        DN_COMPONENT);
-
-    // Certificate not found.
-    LambdaTestUtils.intercept(CertificateException.class, "Error while" +
-            " getting certificate",
-        () -> dnCertClient.getCertificate(cert1.getSerialNumber()
-            .toString()));
-    LambdaTestUtils.intercept(CertificateException.class, "Error while" +
-            " getting certificate",
-        () -> dnCertClient.getCertificate(cert2.getSerialNumber()
-            .toString()));
-    LambdaTestUtils.intercept(CertificateException.class, "Error while" +
-            " getting certificate",
-        () -> dnCertClient.getCertificate(cert3.getSerialNumber()
-            .toString()));
-    codec.writeCertificate(certPath, "1.crt",
-        getPEMEncodedString(cert1), true);
-    codec.writeCertificate(certPath, "2.crt",
-        getPEMEncodedString(cert2), true);
-    codec.writeCertificate(certPath, "3.crt",
-        getPEMEncodedString(cert3), true);
-
-    // Re instantiate DN client which will load certificates from filesystem.
-    dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId);
-
-    assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber()
-        .toString()));
-    assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber()
-        .toString()));
-    assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber()
-        .toString()));
-
-  }
-
-  @Test
-  public void testStoreCertificate() throws Exception {
-    KeyPair keyPair = keyGenerator.generateKey();
-    X509Certificate cert1 = generateX509Cert(keyPair);
-    X509Certificate cert2 = generateX509Cert(keyPair);
-    X509Certificate cert3 = generateX509Cert(keyPair);
-
-    dnCertClient.storeCertificate(getPEMEncodedString(cert1), true);
-    dnCertClient.storeCertificate(getPEMEncodedString(cert2), true);
-    dnCertClient.storeCertificate(getPEMEncodedString(cert3), true);
-
-    assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber()
-        .toString()));
-    assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber()
-        .toString()));
-    assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber()
-        .toString()));
-  }
-
-  @Test
-  public void testInitCertAndKeypairValidationFailures() throws Exception {
-
-    GenericTestUtils.LogCapturer dnClientLog = GenericTestUtils.LogCapturer
-        .captureLogs(dnCertClient.getLogger());
-    GenericTestUtils.LogCapturer omClientLog = GenericTestUtils.LogCapturer
-        .captureLogs(omCertClient.getLogger());
-    KeyPair keyPair = keyGenerator.generateKey();
-    KeyPair keyPair2 = keyGenerator.generateKey();
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Case 1. Expect failure when keypair validation fails.
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPublicKeyFileName()).toFile());
-
-
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPublicKeyFileName()).toFile());
-
-    omKeyCodec.writePrivateKey(keyPair.getPrivate());
-    omKeyCodec.writePublicKey(keyPair2.getPublic());
-
-    dnKeyCodec.writePrivateKey(keyPair.getPrivate());
-    dnKeyCodec.writePublicKey(keyPair2.getPublic());
-
-
-    // Check for DN.
-    assertEquals(dnCertClient.init(), FAILURE);
-    assertTrue(dnClientLog.getOutput().contains("Keypair validation " +
-        "failed"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Check for OM.
-    assertEquals(omCertClient.init(), FAILURE);
-    assertTrue(omClientLog.getOutput().contains("Keypair validation " +
-        "failed"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Case 2. Expect failure when certificate is generated from different
-    // private key and keypair validation fails.
-    getCertClient();
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getCertificateFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getCertificateFileName()).toFile());
-
-    CertificateCodec omCertCodec = new CertificateCodec(omSecurityConfig,
-        OM_COMPONENT);
-    omCertCodec.writeCertificate(new X509CertificateHolder(
-        x509Certificate.getEncoded()));
-
-    CertificateCodec dnCertCodec = new CertificateCodec(dnSecurityConfig,
-        DN_COMPONENT);
-    dnCertCodec.writeCertificate(new X509CertificateHolder(
-        x509Certificate.getEncoded()));
-    // Check for DN.
-    assertEquals(dnCertClient.init(), FAILURE);
-    assertTrue(dnClientLog.getOutput().contains("Keypair validation " +
-        "failed"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Check for OM.
-    assertEquals(omCertClient.init(), FAILURE);
-    assertTrue(omClientLog.getOutput().contains("Keypair validation failed"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Case 3. Expect failure when certificate is generated from different
-    // private key and certificate validation fails.
-
-    // Re write the correct public key.
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPublicKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPublicKeyFileName()).toFile());
-    getCertClient();
-    omKeyCodec.writePublicKey(keyPair.getPublic());
-    dnKeyCodec.writePublicKey(keyPair.getPublic());
-
-    // Check for DN.
-    assertEquals(dnCertClient.init(), FAILURE);
-    assertTrue(dnClientLog.getOutput().contains("Stored certificate is " +
-        "generated with different"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    //Check for OM.
-    assertEquals(omCertClient.init(), FAILURE);
-    assertTrue(omClientLog.getOutput().contains("Stored certificate is " +
-        "generated with different"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-
-    // Case 4. Failure when public key recovery fails.
-    getCertClient();
-    FileUtils.deleteQuietly(Paths.get(
-        omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(),
-        omSecurityConfig.getPublicKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        dnSecurityConfig.getPublicKeyFileName()).toFile());
-
-    // Check for DN.
-    assertEquals(dnCertClient.init(), FAILURE);
-    assertTrue(dnClientLog.getOutput().contains("Can't recover public key"));
-
-    // Check for OM.
-    assertEquals(omCertClient.init(), FAILURE);
-    assertTrue(omClientLog.getOutput().contains("Can't recover public key"));
-    dnClientLog.clearOutput();
-    omClientLog.clearOutput();
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java
deleted file mode 100644
index ded5206..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.time.LocalDate;
-import java.time.temporal.ChronoUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests the Certificate codecs.
- */
-public class TestCertificateCodec {
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static final String COMPONENT = "test";
-  private SecurityConfig securityConfig;
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  @Before
-  public void init() throws IOException {
-    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString());
-    securityConfig = new SecurityConfig(conf);
-  }
-
-  /**
-   * This test converts a X509Certificate Holder object to a PEM encoded String,
-   * then creates a new X509Certificate object to verify that we are able to
-   * serialize and deserialize correctly. we follow up with converting these
-   * objects to standard JCA x509Certificate objects.
-   *
-   * @throws NoSuchProviderException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws IOException              - on Error.
-   * @throws SCMSecurityException     - on Error.
-   * @throws CertificateException     - on Error.
-   */
-  @Test
-  public void testGetPEMEncodedString()
-      throws NoSuchProviderException, NoSuchAlgorithmException,
-      IOException, SCMSecurityException, CertificateException {
-    HDDSKeyGenerator keyGenerator =
-        new HDDSKeyGenerator(conf);
-    X509CertificateHolder cert =
-        SelfSignedCertificate.newBuilder()
-            .setSubject(RandomStringUtils.randomAlphabetic(4))
-            .setClusterID(RandomStringUtils.randomAlphabetic(4))
-            .setScmID(RandomStringUtils.randomAlphabetic(4))
-            .setBeginDate(LocalDate.now())
-            .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS))
-            .setConfiguration(keyGenerator.getSecurityConfig()
-                .getConfiguration())
-            .setKey(keyGenerator.generateKey())
-            .makeCA()
-            .build();
-    CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT);
-    String pemString = codec.getPEMEncodedString(cert);
-    assertTrue(pemString.startsWith(CertificateCodec.BEGIN_CERT));
-    assertTrue(pemString.endsWith(CertificateCodec.END_CERT + "\n"));
-
-    // Read back the certificate and verify that all the comparisons pass.
-    X509CertificateHolder newCert =
-        codec.getCertificateHolder(codec.getX509Certificate(pemString));
-    assertEquals(cert, newCert);
-
-    // Just make sure we can decode both these classes to Java Std. lIb classes.
-    X509Certificate firstCert = CertificateCodec.getX509Certificate(cert);
-    X509Certificate secondCert = CertificateCodec.getX509Certificate(newCert);
-    assertEquals(firstCert, secondCert);
-  }
-
-  /**
-   * tests writing and reading certificates in PEM encoded form.
-   *
-   * @throws NoSuchProviderException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws IOException              - on Error.
-   * @throws SCMSecurityException     - on Error.
-   * @throws CertificateException     - on Error.
-   */
-  @Test
-  public void testwriteCertificate() throws NoSuchProviderException,
-      NoSuchAlgorithmException, IOException, SCMSecurityException,
-      CertificateException {
-    HDDSKeyGenerator keyGenerator =
-        new HDDSKeyGenerator(conf);
-    X509CertificateHolder cert =
-        SelfSignedCertificate.newBuilder()
-            .setSubject(RandomStringUtils.randomAlphabetic(4))
-            .setClusterID(RandomStringUtils.randomAlphabetic(4))
-            .setScmID(RandomStringUtils.randomAlphabetic(4))
-            .setBeginDate(LocalDate.now())
-            .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS))
-            .setConfiguration(keyGenerator.getSecurityConfig()
-                .getConfiguration())
-            .setKey(keyGenerator.generateKey())
-            .makeCA()
-            .build();
-    CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT);
-    String pemString = codec.getPEMEncodedString(cert);
-    File basePath = temporaryFolder.newFolder();
-    if (!basePath.exists()) {
-      Assert.assertTrue(basePath.mkdirs());
-    }
-    codec.writeCertificate(basePath.toPath(), "pemcertificate.crt",
-        pemString, false);
-    X509CertificateHolder certHolder =
-        codec.readCertificate(basePath.toPath(), "pemcertificate.crt");
-    assertNotNull(certHolder);
-    assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber());
-  }
-
-  /**
-   * Tests reading and writing certificates in DER form.
-   *
-   * @throws IOException              - on Error.
-   * @throws SCMSecurityException     - on Error.
-   * @throws CertificateException     - on Error.
-   * @throws NoSuchProviderException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   */
-  @Test
-  public void testwriteCertificateDefault()
-      throws IOException, SCMSecurityException, CertificateException,
-      NoSuchProviderException, NoSuchAlgorithmException {
-    HDDSKeyGenerator keyGenerator =
-        new HDDSKeyGenerator(conf);
-    X509CertificateHolder cert =
-        SelfSignedCertificate.newBuilder()
-            .setSubject(RandomStringUtils.randomAlphabetic(4))
-            .setClusterID(RandomStringUtils.randomAlphabetic(4))
-            .setScmID(RandomStringUtils.randomAlphabetic(4))
-            .setBeginDate(LocalDate.now())
-            .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS))
-            .setConfiguration(keyGenerator.getSecurityConfig()
-                .getConfiguration())
-            .setKey(keyGenerator.generateKey())
-            .makeCA()
-            .build();
-    CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT);
-    codec.writeCertificate(cert);
-    X509CertificateHolder certHolder = codec.readCertificate();
-    assertNotNull(certHolder);
-    assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber());
-  }
-
-  /**
-   * Tests writing to non-default certificate file name.
-   *
-   * @throws IOException              - on Error.
-   * @throws SCMSecurityException     - on Error.
-   * @throws NoSuchProviderException  - on Error.
-   * @throws NoSuchAlgorithmException - on Error.
-   * @throws CertificateException     - on Error.
-   */
-  @Test
-  public void writeCertificate2() throws IOException, SCMSecurityException,
-      NoSuchProviderException, NoSuchAlgorithmException, CertificateException {
-    HDDSKeyGenerator keyGenerator =
-        new HDDSKeyGenerator(conf);
-    X509CertificateHolder cert =
-        SelfSignedCertificate.newBuilder()
-            .setSubject(RandomStringUtils.randomAlphabetic(4))
-            .setClusterID(RandomStringUtils.randomAlphabetic(4))
-            .setScmID(RandomStringUtils.randomAlphabetic(4))
-            .setBeginDate(LocalDate.now())
-            .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS))
-            .setConfiguration(keyGenerator.getSecurityConfig()
-                .getConfiguration())
-            .setKey(keyGenerator.generateKey())
-            .makeCA()
-            .build();
-    CertificateCodec codec =
-        new CertificateCodec(keyGenerator.getSecurityConfig(), "ca");
-    codec.writeCertificate(cert, "newcert.crt", false);
-    // Rewrite with force support
-    codec.writeCertificate(cert, "newcert.crt", true);
-    X509CertificateHolder x509CertificateHolder =
-        codec.readCertificate(codec.getLocation(), "newcert.crt");
-    assertNotNull(x509CertificateHolder);
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
deleted file mode 100644
index 4551f29..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- Tests for Certificate helpers.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
-
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
deleted file mode 100644
index 5720d27..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509.certificates;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil;
-import org.bouncycastle.asn1.ASN1Sequence;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.Extensions;
-import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
-import org.bouncycastle.operator.ContentVerifierProvider;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.bouncycastle.pkcs.PKCSException;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * Certificate Signing Request.
- */
-public class TestCertificateSignRequest {
-
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-  private SecurityConfig securityConfig;
-
-  @Before
-  public void init() throws IOException {
-    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString());
-    securityConfig = new SecurityConfig(conf);
-  }
-
-  @Test
-  public void testGenerateCSR() throws NoSuchProviderException,
-      NoSuchAlgorithmException, SCMSecurityException,
-      OperatorCreationException, PKCSException {
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "DN001";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    CertificateSignRequest.Builder builder =
-        new CertificateSignRequest.Builder()
-            .setSubject(subject)
-            .setScmID(scmID)
-            .setClusterID(clusterID)
-            .setKey(keyPair)
-            .setConfiguration(conf);
-    PKCS10CertificationRequest csr = builder.build();
-
-    // Check the Subject Name is in the expected format.
-    String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(),
-        subject, scmID, clusterID);
-    Assert.assertEquals(csr.getSubject().toString(), dnName);
-
-    // Verify the public key info match
-    byte[] encoded = keyPair.getPublic().getEncoded();
-    SubjectPublicKeyInfo subjectPublicKeyInfo =
-        SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded));
-    SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo();
-    Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo);
-
-    // Verify CSR with attribute for extensions
-    Assert.assertEquals(1, csr.getAttributes().length);
-    Extensions extensions = SecurityUtil.getPkcs9Extensions(csr);
-
-    // Verify key usage extension
-    Extension keyUsageExt = extensions.getExtension(Extension.keyUsage);
-    Assert.assertEquals(true, keyUsageExt.isCritical());
-
-
-    // Verify San extension not set
-    Assert.assertEquals(null,
-        extensions.getExtension(Extension.subjectAlternativeName));
-
-    // Verify signature in CSR
-    ContentVerifierProvider verifierProvider =
-        new JcaContentVerifierProviderBuilder().setProvider(securityConfig
-            .getProvider()).build(csr.getSubjectPublicKeyInfo());
-    Assert.assertEquals(true, csr.isSignatureValid(verifierProvider));
-  }
-
-  @Test
-  public void testGenerateCSRwithSan() throws NoSuchProviderException,
-      NoSuchAlgorithmException, SCMSecurityException,
-      OperatorCreationException, PKCSException {
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "DN001";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    CertificateSignRequest.Builder builder =
-        new CertificateSignRequest.Builder()
-            .setSubject(subject)
-            .setScmID(scmID)
-            .setClusterID(clusterID)
-            .setKey(keyPair)
-            .setConfiguration(conf);
-
-    // Multi-home
-    builder.addIpAddress("192.168.1.1");
-    builder.addIpAddress("192.168.2.1");
-
-    builder.addDnsName("dn1.abc.com");
-
-    PKCS10CertificationRequest csr = builder.build();
-
-    // Check the Subject Name is in the expected format.
-    String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(),
-        subject, scmID, clusterID);
-    Assert.assertEquals(csr.getSubject().toString(), dnName);
-
-    // Verify the public key info match
-    byte[] encoded = keyPair.getPublic().getEncoded();
-    SubjectPublicKeyInfo subjectPublicKeyInfo =
-        SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded));
-    SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo();
-    Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo);
-
-    // Verify CSR with attribute for extensions
-    Assert.assertEquals(1, csr.getAttributes().length);
-    Extensions extensions = SecurityUtil.getPkcs9Extensions(csr);
-
-    // Verify key usage extension
-    Extension sanExt = extensions.getExtension(Extension.keyUsage);
-    Assert.assertEquals(true, sanExt.isCritical());
-
-
-    // Verify signature in CSR
-    ContentVerifierProvider verifierProvider =
-        new JcaContentVerifierProviderBuilder().setProvider(securityConfig
-            .getProvider()).build(csr.getSubjectPublicKeyInfo());
-    Assert.assertEquals(true, csr.isSignatureValid(verifierProvider));
-  }
-
-  @Test
-  public void testGenerateCSRWithInvalidParams() throws NoSuchProviderException,
-      NoSuchAlgorithmException, SCMSecurityException {
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "DN001";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    CertificateSignRequest.Builder builder =
-        new CertificateSignRequest.Builder()
-            .setSubject(subject)
-            .setScmID(scmID)
-            .setClusterID(clusterID)
-            .setKey(keyPair)
-            .setConfiguration(conf);
-
-    try {
-      builder.setKey(null);
-      builder.build();
-      Assert.fail("Null Key should have failed.");
-    } catch (NullPointerException | IllegalArgumentException e) {
-      builder.setKey(keyPair);
-    }
-
-    // Now try with blank/null Subject.
-    try {
-      builder.setSubject(null);
-      builder.build();
-      Assert.fail("Null/Blank Subject should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setSubject(subject);
-    }
-
-    try {
-      builder.setSubject("");
-      builder.build();
-      Assert.fail("Null/Blank Subject should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setSubject(subject);
-    }
-
-    // Now try with invalid IP address
-    try {
-      builder.addIpAddress("255.255.255.*");
-      builder.build();
-      Assert.fail("Invalid ip address");
-    } catch (IllegalArgumentException e) {
-    }
-
-    PKCS10CertificationRequest csr = builder.build();
-
-    // Check the Subject Name is in the expected format.
-    String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(),
-        subject, scmID, clusterID);
-    Assert.assertEquals(csr.getSubject().toString(), dnName);
-
-    // Verify the public key info match
-    byte[] encoded = keyPair.getPublic().getEncoded();
-    SubjectPublicKeyInfo subjectPublicKeyInfo =
-        SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded));
-    SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo();
-    Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo);
-
-    // Verify CSR with attribute for extensions
-    Assert.assertEquals(1, csr.getAttributes().length);
-  }
-
-  @Test
-  public void testCsrSerialization() throws NoSuchProviderException,
-      NoSuchAlgorithmException, SCMSecurityException, IOException {
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "DN001";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    CertificateSignRequest.Builder builder =
-        new CertificateSignRequest.Builder()
-            .setSubject(subject)
-            .setScmID(scmID)
-            .setClusterID(clusterID)
-            .setKey(keyPair)
-            .setConfiguration(conf);
-
-    PKCS10CertificationRequest csr = builder.build();
-    byte[] csrBytes = csr.getEncoded();
-
-    // Verify de-serialized CSR matches with the original CSR
-    PKCS10CertificationRequest dsCsr = new PKCS10CertificationRequest(csrBytes);
-    Assert.assertEquals(csr, dsCsr);
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
deleted file mode 100644
index 02d0078..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.certificates;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.SignatureException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.time.LocalDate;
-import java.time.temporal.ChronoUnit;
-import java.util.Date;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * Test Class for Root Certificate generation.
- */
-public class TestRootCertificate {
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-  private SecurityConfig securityConfig;
-
-  @Before
-  public void init() throws IOException {
-    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString());
-    securityConfig = new SecurityConfig(conf);
-  }
-
-  @Test
-  public void testAllFieldsAreExpected()
-      throws SCMSecurityException, NoSuchProviderException,
-      NoSuchAlgorithmException, CertificateException,
-      SignatureException, InvalidKeyException, IOException {
-    LocalDate notBefore = LocalDate.now();
-    LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS);
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "testRootCert";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    SelfSignedCertificate.Builder builder =
-        SelfSignedCertificate.newBuilder()
-            .setBeginDate(notBefore)
-            .setEndDate(notAfter)
-            .setClusterID(clusterID)
-            .setScmID(scmID)
-            .setSubject(subject)
-            .setKey(keyPair)
-            .setConfiguration(conf);
-
-    X509CertificateHolder certificateHolder = builder.build();
-
-    //Assert that we indeed have a self signed certificate.
-    Assert.assertEquals(certificateHolder.getIssuer(),
-        certificateHolder.getSubject());
-
-
-    // Make sure that NotBefore is before the current Date
-    Date invalidDate = java.sql.Date.valueOf(
-        notBefore.minus(1, ChronoUnit.DAYS));
-    Assert.assertFalse(
-        certificateHolder.getNotBefore()
-            .before(invalidDate));
-
-    //Make sure the end date is honored.
-    invalidDate = java.sql.Date.valueOf(
-        notAfter.plus(1, ChronoUnit.DAYS));
-    Assert.assertFalse(
-        certificateHolder.getNotAfter()
-            .after(invalidDate));
-
-    // Check the Subject Name and Issuer Name is in the expected format.
-    String dnName = String.format(SelfSignedCertificate.getNameFormat(),
-        subject, scmID, clusterID);
-    Assert.assertEquals(certificateHolder.getIssuer().toString(), dnName);
-    Assert.assertEquals(certificateHolder.getSubject().toString(), dnName);
-
-    // We did not ask for this Certificate to be a CertificateServer
-    // certificate, hence that
-    // extension should be null.
-    Assert.assertNull(
-        certificateHolder.getExtension(Extension.basicConstraints));
-
-    // Extract the Certificate and verify that certificate matches the public
-    // key.
-    X509Certificate cert =
-        new JcaX509CertificateConverter().getCertificate(certificateHolder);
-    cert.verify(keyPair.getPublic());
-  }
-
-  @Test
-  public void testCACert()
-      throws SCMSecurityException, NoSuchProviderException,
-      NoSuchAlgorithmException, IOException {
-    LocalDate notBefore = LocalDate.now();
-    LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS);
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "testRootCert";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    SelfSignedCertificate.Builder builder =
-        SelfSignedCertificate.newBuilder()
-            .setBeginDate(notBefore)
-            .setEndDate(notAfter)
-            .setClusterID(clusterID)
-            .setScmID(scmID)
-            .setSubject(subject)
-            .setKey(keyPair)
-            .setConfiguration(conf)
-            .makeCA();
-
-    X509CertificateHolder certificateHolder = builder.build();
-    // This time we asked for a CertificateServer Certificate, make sure that
-    // extension is
-    // present and valid.
-    Extension basicExt =
-        certificateHolder.getExtension(Extension.basicConstraints);
-
-    Assert.assertNotNull(basicExt);
-    Assert.assertTrue(basicExt.isCritical());
-
-    // Since this code assigns ONE for the root certificate, we check if the
-    // serial number is the expected number.
-    Assert.assertEquals(certificateHolder.getSerialNumber(), BigInteger.ONE);
-  }
-
-  @Test
-  public void testInvalidParamFails()
-      throws SCMSecurityException, NoSuchProviderException,
-      NoSuchAlgorithmException, IOException {
-    LocalDate notBefore = LocalDate.now();
-    LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS);
-    String clusterID = UUID.randomUUID().toString();
-    String scmID = UUID.randomUUID().toString();
-    String subject = "testRootCert";
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-
-    SelfSignedCertificate.Builder builder =
-        SelfSignedCertificate.newBuilder()
-            .setBeginDate(notBefore)
-            .setEndDate(notAfter)
-            .setClusterID(clusterID)
-            .setScmID(scmID)
-            .setSubject(subject)
-            .setConfiguration(conf)
-            .setKey(keyPair)
-            .makeCA();
-    try {
-      builder.setKey(null);
-      builder.build();
-      Assert.fail("Null Key should have failed.");
-    } catch (NullPointerException | IllegalArgumentException e) {
-      builder.setKey(keyPair);
-    }
-
-    // Now try with Blank Subject.
-    try {
-      builder.setSubject("");
-      builder.build();
-      Assert.fail("Null/Blank Subject should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setSubject(subject);
-    }
-
-    // Now try with blank/null SCM ID
-    try {
-      builder.setScmID(null);
-      builder.build();
-      Assert.fail("Null/Blank SCM ID should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setScmID(scmID);
-    }
-
-
-    // Now try with blank/null SCM ID
-    try {
-      builder.setClusterID(null);
-      builder.build();
-      Assert.fail("Null/Blank Cluster ID should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setClusterID(clusterID);
-    }
-
-
-    // Swap the Begin and End Date and verify that we cannot create a
-    // certificate like that.
-    try {
-      builder.setBeginDate(notAfter);
-      builder.setEndDate(notBefore);
-      builder.build();
-      Assert.fail("Illegal dates should have thrown.");
-    } catch (IllegalArgumentException e) {
-      builder.setBeginDate(notBefore);
-      builder.setEndDate(notAfter);
-    }
-
-    try {
-      KeyPair newKey = keyGen.generateKey();
-      KeyPair wrongKey = new KeyPair(newKey.getPublic(), keyPair.getPrivate());
-      builder.setKey(wrongKey);
-      X509CertificateHolder certificateHolder = builder.build();
-      X509Certificate cert =
-          new JcaX509CertificateConverter().getCertificate(certificateHolder);
-      cert.verify(wrongKey.getPublic());
-      Assert.fail("Invalid Key, should have thrown.");
-    } catch (SCMSecurityException | CertificateException
-        | SignatureException | InvalidKeyException e) {
-      builder.setKey(keyPair);
-    }
-    // Assert that we can create a certificate with all sane params.
-    Assert.assertNotNull(builder.build());
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java
deleted file mode 100644
index fffe1e5..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Test classes for Certificate utilities.
- */
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
deleted file mode 100644
index 08761f4..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.keys;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PublicKey;
-import java.security.interfaces.RSAPublicKey;
-import java.security.spec.PKCS8EncodedKeySpec;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test class for HDDS Key Generator.
- */
-public class TestHDDSKeyGenerator {
-  private SecurityConfig config;
-
-  @Before
-  public void init() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_METADATA_DIRS,  GenericTestUtils.getTempPath("testpath"));
-    config = new SecurityConfig(conf);
-  }
-  /**
-   * In this test we verify that we are able to create a key pair, then get
-   * bytes of that and use ASN1. parser to parse it back to a private key.
-   * @throws NoSuchProviderException - On Error, due to missing Java
-   * dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   * dependencies.
-   */
-  @Test
-  public void testGenerateKey()
-      throws NoSuchProviderException, NoSuchAlgorithmException {
-    HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey();
-    Assert.assertEquals(config.getKeyAlgo(),
-        keyPair.getPrivate().getAlgorithm());
-    PKCS8EncodedKeySpec keySpec =
-        new PKCS8EncodedKeySpec(keyPair.getPrivate().getEncoded());
-    Assert.assertEquals("PKCS#8", keySpec.getFormat());
-  }
-
-  /**
-   * In this test we assert that size that we specified is used for Key
-   * generation.
-   * @throws NoSuchProviderException - On Error, due to missing Java
-   * dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   * dependencies.
-   */
-  @Test
-  public void testGenerateKeyWithSize() throws NoSuchProviderException,
-      NoSuchAlgorithmException {
-    HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
-    KeyPair keyPair = keyGen.generateKey(4096);
-    PublicKey publicKey = keyPair.getPublic();
-    if(publicKey instanceof RSAPublicKey) {
-      Assert.assertEquals(4096,
-          ((RSAPublicKey)(publicKey)).getModulus().bitLength());
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java
deleted file mode 100644
index d82b02f..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.security.x509.keys;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.attribute.PosixFilePermission;
-import java.security.KeyFactory;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.spec.InvalidKeySpecException;
-import java.security.spec.PKCS8EncodedKeySpec;
-import java.security.spec.X509EncodedKeySpec;
-import java.util.Set;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-/**
- * Test class for HDDS pem writer.
- */
-public class TestKeyCodec {
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-  private OzoneConfiguration configuration;
-  private SecurityConfig securityConfig;
-  private String component;
-  private HDDSKeyGenerator keyGenerator;
-  private String prefix;
-
-  @Before
-  public void init() throws IOException {
-    configuration = new OzoneConfiguration();
-    prefix = temporaryFolder.newFolder().toString();
-    configuration.set(HDDS_METADATA_DIR_NAME, prefix);
-    keyGenerator = new HDDSKeyGenerator(configuration);
-    securityConfig = new SecurityConfig(configuration);
-    component = "test_component";
-  }
-
-  /**
-   * Assert basic things like we are able to create a file, and the names are
-   * in expected format etc.
-   *
-   * @throws NoSuchProviderException - On Error, due to missing Java
-   * dependencies.
-   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
-   * dependencies.
-   * @throws IOException - On I/O failure.
-   */
-  @Test
-  public void testWriteKey()
-      throws NoSuchProviderException, NoSuchAlgorithmException,
-      IOException, InvalidKeySpecException {
-    KeyPair keys = keyGenerator.generateKey();
-    KeyCodec pemWriter = new KeyCodec(securityConfig, component);
-    pemWriter.writeKey(keys);
-
-    // Assert that locations have been created.
-    Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation(component);
-    Assert.assertTrue(keyLocation.toFile().exists());
-
-    // Assert that locations are created in the locations that we specified
-    // using the Config.
-    Assert.assertTrue(keyLocation.toString().startsWith(prefix));
-    Path privateKeyPath = Paths.get(keyLocation.toString(),
-        pemWriter.getSecurityConfig().getPrivateKeyFileName());
-    Assert.assertTrue(privateKeyPath.toFile().exists());
-    Path publicKeyPath = Paths.get(keyLocation.toString(),
-        pemWriter.getSecurityConfig().getPublicKeyFileName());
-    Assert.assertTrue(publicKeyPath.toFile().exists());
-
-    // Read the private key and test if the expected String in the PEM file
-    // format exists.
-    byte[] privateKey = Files.readAllBytes(privateKeyPath);
-    String privateKeydata = new String(privateKey, StandardCharsets.UTF_8);
-    Assert.assertTrue(privateKeydata.contains("PRIVATE KEY"));
-
-    // Read the public key and test if the expected String in the PEM file
-    // format exists.
-    byte[] publicKey = Files.readAllBytes(publicKeyPath);
-    String publicKeydata = new String(publicKey, StandardCharsets.UTF_8);
-    Assert.assertTrue(publicKeydata.contains("PUBLIC KEY"));
-
-    // Let us decode the PEM file and parse it back into binary.
-    KeyFactory kf = KeyFactory.getInstance(
-        pemWriter.getSecurityConfig().getKeyAlgo());
-
-    // Replace the PEM Human readable guards.
-    privateKeydata =
-        privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", "");
-    privateKeydata =
-        privateKeydata.replace("-----END PRIVATE KEY-----", "");
-
-    // Decode the bas64 to binary format and then use an ASN.1 parser to
-    // parse the binary format.
-
-    byte[] keyBytes = Base64.decodeBase64(privateKeydata);
-    PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes);
-    PrivateKey privateKeyDecoded = kf.generatePrivate(spec);
-    assertNotNull("Private Key should not be null",
-        privateKeyDecoded);
-
-    // Let us decode the public key and veriy that we can parse it back into
-    // binary.
-    publicKeydata =
-        publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", "");
-    publicKeydata =
-        publicKeydata.replace("-----END PUBLIC KEY-----", "");
-
-    keyBytes = Base64.decodeBase64(publicKeydata);
-    X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes);
-    PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec);
-    assertNotNull("Public Key should not be null",
-        publicKeyDecoded);
-
-    // Now let us assert the permissions on the Directories and files are as
-    // expected.
-    Set<PosixFilePermission> expectedSet = pemWriter.getPermissionSet();
-    Set<PosixFilePermission> currentSet =
-        Files.getPosixFilePermissions(privateKeyPath);
-    currentSet.removeAll(expectedSet);
-    Assert.assertEquals(0, currentSet.size());
-
-    currentSet =
-        Files.getPosixFilePermissions(publicKeyPath);
-    currentSet.removeAll(expectedSet);
-    Assert.assertEquals(0, currentSet.size());
-
-    currentSet =
-        Files.getPosixFilePermissions(keyLocation);
-    currentSet.removeAll(expectedSet);
-    Assert.assertEquals(0, currentSet.size());
-  }
-
-  /**
-   * Assert key rewrite fails without force option.
-   *
-   * @throws IOException - on I/O failure.
-   */
-  @Test
-  public void testReWriteKey()
-      throws Exception {
-    KeyPair kp = keyGenerator.generateKey();
-    KeyCodec pemWriter = new KeyCodec(securityConfig, component);
-    SecurityConfig secConfig = pemWriter.getSecurityConfig();
-    pemWriter.writeKey(kp);
-
-    // Assert that rewriting of keys throws exception with valid messages.
-    LambdaTestUtils
-        .intercept(IOException.class, "Private Key file already exists.",
-            () -> pemWriter.writeKey(kp));
-    FileUtils.deleteQuietly(Paths.get(
-        secConfig.getKeyLocation(component).toString() + "/" + secConfig
-            .getPrivateKeyFileName()).toFile());
-    LambdaTestUtils
-        .intercept(IOException.class, "Public Key file already exists.",
-            () -> pemWriter.writeKey(kp));
-    FileUtils.deleteQuietly(Paths.get(
-        secConfig.getKeyLocation(component).toString() + "/" + secConfig
-            .getPublicKeyFileName()).toFile());
-
-    // Should succeed now as both public and private key are deleted.
-    pemWriter.writeKey(kp);
-    // Should succeed with overwrite flag as true.
-    pemWriter.writeKey(kp, true);
-
-  }
-
-  /**
-   * Assert key rewrite fails in non Posix file system.
-   *
-   * @throws IOException - on I/O failure.
-   */
-  @Test
-  public void testWriteKeyInNonPosixFS()
-      throws Exception {
-    KeyPair kp = keyGenerator.generateKey();
-    KeyCodec pemWriter = new KeyCodec(securityConfig, component);
-    pemWriter.setIsPosixFileSystem(() -> false);
-
-    // Assert key rewrite fails in non Posix file system.
-    LambdaTestUtils
-        .intercept(IOException.class, "Unsupported File System for pem file.",
-            () -> pemWriter.writeKey(kp));
-  }
-
-  @Test
-  public void testReadWritePublicKeywithoutArgs()
-      throws NoSuchProviderException, NoSuchAlgorithmException, IOException,
-      InvalidKeySpecException {
-
-    KeyPair kp = keyGenerator.generateKey();
-    KeyCodec keycodec = new KeyCodec(securityConfig, component);
-    keycodec.writeKey(kp);
-
-    PublicKey pubKey = keycodec.readPublicKey();
-    assertNotNull(pubKey);
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
deleted file mode 100644
index 49e40b4..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Test package for keys used in X.509 env.
- */
-package org.apache.hadoop.hdds.security.x509.keys;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java
deleted file mode 100644
index f541468..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * X.509 Certificate and keys related tests.
- */
-package org.apache.hadoop.hdds.security.x509;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
deleted file mode 100644
index 10724ab..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-
-import io.jaegertracing.internal.JaegerSpanContext;
-import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException;
-import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.jupiter.api.Test;
-
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-class TestStringCodec {
-
-  @Test
-  void testExtract() throws Exception {
-    StringCodec codec = new StringCodec();
-
-    LambdaTestUtils.intercept(EmptyTracerStateStringException.class,
-        () -> codec.extract(null));
-
-    StringBuilder sb = new StringBuilder().append("123");
-    LambdaTestUtils.intercept(MalformedTracerStateStringException.class,
-        "String does not match tracer state format",
-        () -> codec.extract(sb));
-
-    sb.append(":456:789");
-    LambdaTestUtils.intercept(MalformedTracerStateStringException.class,
-        "String does not match tracer state format",
-        () -> codec.extract(sb));
-    sb.append(":66");
-    JaegerSpanContext context = codec.extract(sb);
-    String expectedContextString = new String("123:456:789:66");
-    assertTrue(context.getTraceId().equals("123"));
-    assertTrue(context.toString().equals(expectedContextString));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
deleted file mode 100644
index 18e1200..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.tracing;
-/**
- Test cases for ozone tracing.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java
deleted file mode 100644
index 11d0fad5..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test the JMX interface for the rocksdb metastore implementation.
- */
-public class TestHddsIdFactory {
-
-  private static final Set<Long> ID_SET = ConcurrentHashMap.newKeySet();
-  private static final int IDS_PER_THREAD = 10000;
-  private static final int NUM_OF_THREADS = 5;
-
-  @After
-  public void cleanup() {
-    ID_SET.clear();
-  }
-
-  @Test
-  public void testGetLongId() throws Exception {
-
-    ExecutorService executor = Executors.newFixedThreadPool(5);
-    List<Callable<Integer>> tasks = new ArrayList<>(5);
-    addTasks(tasks);
-    List<Future<Integer>> result = executor.invokeAll(tasks);
-    assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size());
-    for (Future<Integer> r : result) {
-      assertEquals(IDS_PER_THREAD, r.get().intValue());
-    }
-  }
-
-  private void addTasks(List<Callable<Integer>> tasks) {
-    for (int i = 0; i < NUM_OF_THREADS; i++) {
-      Callable<Integer> task = () -> {
-        for (int idNum = 0; idNum < IDS_PER_THREAD; idNum++) {
-          long var = HddsIdFactory.getLongId();
-          if (ID_SET.contains(var)) {
-            Assert.fail("Duplicate id found");
-          }
-          ID_SET.add(var);
-        }
-        return IDS_PER_THREAD;
-      };
-      tasks.add(task);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
deleted file mode 100644
index d24fcf5..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
+++ /dev/null
@@ -1,590 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.event.Level;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.runners.Parameterized.Parameters;
-
-/**
- * Test class for ozone metadata store.
- */
-@RunWith(Parameterized.class)
-public class TestMetadataStore {
-
-  private final static int MAX_GETRANGE_LENGTH = 100;
-  private final String storeImpl;
-  @Rule
-  public ExpectedException expectedException = ExpectedException.none();
-  private MetadataStore store;
-  private File testDir;
-
-  public TestMetadataStore(String metadataImpl) {
-    this.storeImpl = metadataImpl;
-  }
-
-  @Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
-    });
-  }
-
-  @Before
-  public void init() throws IOException {
-    if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
-      // The initialization of RocksDB fails on Windows
-      assumeNotWindows();
-    }
-
-    testDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
-        + "-" + storeImpl.toLowerCase());
-
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-
-    store = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setCreateIfMissing(true)
-        .setDbFile(testDir)
-        .build();
-
-    // Add 20 entries.
-    // {a0 : a-value0} to {a9 : a-value9}
-    // {b0 : b-value0} to {b9 : b-value9}
-    for (int i = 0; i < 10; i++) {
-      store.put(getBytes("a" + i), getBytes("a-value" + i));
-      store.put(getBytes("b" + i), getBytes("b-value" + i));
-    }
-  }
-
-  @Test
-  public void testIterator() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-    File dbDir = GenericTestUtils.getRandomizedTestDir();
-    MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setCreateIfMissing(true)
-        .setDbFile(dbDir)
-        .build();
-
-    //As database is empty, check whether iterator is working as expected or
-    // not.
-    MetaStoreIterator<MetadataStore.KeyValue> metaStoreIterator =
-        dbStore.iterator();
-    assertFalse(metaStoreIterator.hasNext());
-    try {
-      metaStoreIterator.next();
-      fail("testIterator failed");
-    } catch (NoSuchElementException ex) {
-      GenericTestUtils.assertExceptionContains("Store has no more elements",
-          ex);
-    }
-
-    for (int i = 0; i < 10; i++) {
-      store.put(getBytes("a" + i), getBytes("a-value" + i));
-    }
-
-    metaStoreIterator = dbStore.iterator();
-
-    int i = 0;
-    while (metaStoreIterator.hasNext()) {
-      MetadataStore.KeyValue val = metaStoreIterator.next();
-      assertEquals("a" + i, getString(val.getKey()));
-      assertEquals("a-value" + i, getString(val.getValue()));
-      i++;
-    }
-
-    // As we have iterated all the keys in database, hasNext should return
-    // false and next() should throw NoSuchElement exception.
-
-    assertFalse(metaStoreIterator.hasNext());
-    try {
-      metaStoreIterator.next();
-      fail("testIterator failed");
-    } catch (NoSuchElementException ex) {
-      GenericTestUtils.assertExceptionContains("Store has no more elements",
-          ex);
-    }
-    dbStore.close();
-    dbStore.destroy();
-    FileUtils.deleteDirectory(dbDir);
-
-  }
-
-  @Test
-  public void testMetaStoreConfigDifferentFromType() throws IOException {
-
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-    String dbType;
-    GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
-    if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
-      dbType = "RocksDB";
-    } else {
-      dbType = "LevelDB";
-    }
-
-    File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
-        + "-" + dbType.toLowerCase() + "-test");
-    MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
-        .setCreateIfMissing(true).setDbFile(dbDir).setDBType(dbType).build();
-    assertTrue(logCapturer.getOutput().contains("Using dbType " + dbType + "" +
-        " for metastore"));
-    dbStore.close();
-    dbStore.destroy();
-    FileUtils.deleteDirectory(dbDir);
-
-  }
-
-  @Test
-  public void testdbTypeNotSet() throws IOException {
-
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-    GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
-
-    File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
-        + "-" + storeImpl.toLowerCase() + "-test");
-    MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
-        .setCreateIfMissing(true).setDbFile(dbDir).build();
-    assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" +
-        " " + storeImpl));
-    dbStore.close();
-    dbStore.destroy();
-    FileUtils.deleteDirectory(dbDir);
-
-  }
-
-  @After
-  public void cleanup() throws IOException {
-    if (store != null) {
-      store.close();
-      store.destroy();
-    }
-    if (testDir != null) {
-      FileUtils.deleteDirectory(testDir);
-    }
-  }
-
-  private byte[] getBytes(String str) {
-    return str == null ? null :
-        DFSUtilClient.string2Bytes(str);
-  }
-
-  private String getString(byte[] bytes) {
-    return bytes == null ? null :
-        DFSUtilClient.bytes2String(bytes);
-  }
-
-  @Test
-  public void testGetDelete() throws IOException {
-    for (int i = 0; i < 10; i++) {
-      byte[] va = store.get(getBytes("a" + i));
-      assertEquals("a-value" + i, getString(va));
-
-      byte[] vb = store.get(getBytes("b" + i));
-      assertEquals("b-value" + i, getString(vb));
-    }
-
-    String keyToDel = "del-" + UUID.randomUUID().toString();
-    store.put(getBytes(keyToDel), getBytes(keyToDel));
-    assertEquals(keyToDel, getString(store.get(getBytes(keyToDel))));
-    store.delete(getBytes(keyToDel));
-    assertEquals(null, store.get(getBytes(keyToDel)));
-  }
-
-  @Test
-  public void testPeekFrom() throws IOException {
-    // Test peek from an element that has prev as well as next
-    testPeek("a3", "a2", "a4");
-
-    // Test peek from an element that only has prev
-    testPeek("b9", "b8", null);
-
-    // Test peek from an element that only has next
-    testPeek("a0", null, "a1");
-  }
-
-  private String getExpectedValue(String key) {
-    if (key == null) {
-      return null;
-    }
-    char[] arr = key.toCharArray();
-    return new StringBuilder().append(arr[0]).append("-value")
-        .append(arr[arr.length - 1]).toString();
-  }
-
-  private void testPeek(String peekKey, String prevKey, String nextKey)
-      throws IOException {
-    // Look for current
-    String k = null;
-    String v = null;
-    ImmutablePair<byte[], byte[]> current =
-        store.peekAround(0, getBytes(peekKey));
-    if (current != null) {
-      k = getString(current.getKey());
-      v = getString(current.getValue());
-    }
-    assertEquals(peekKey, k);
-    assertEquals(v, getExpectedValue(peekKey));
-
-    // Look for prev
-    k = null;
-    v = null;
-    ImmutablePair<byte[], byte[]> prev =
-        store.peekAround(-1, getBytes(peekKey));
-    if (prev != null) {
-      k = getString(prev.getKey());
-      v = getString(prev.getValue());
-    }
-    assertEquals(prevKey, k);
-    assertEquals(v, getExpectedValue(prevKey));
-
-    // Look for next
-    k = null;
-    v = null;
-    ImmutablePair<byte[], byte[]> next =
-        store.peekAround(1, getBytes(peekKey));
-    if (next != null) {
-      k = getString(next.getKey());
-      v = getString(next.getValue());
-    }
-    assertEquals(nextKey, k);
-    assertEquals(v, getExpectedValue(nextKey));
-  }
-
-  @Test
-  public void testIterateKeys() throws IOException {
-    // iterate keys from b0
-    ArrayList<String> result = Lists.newArrayList();
-    store.iterate(getBytes("b0"), (k, v) -> {
-      // b-value{i}
-      String value = getString(v);
-      char num = value.charAt(value.length() - 1);
-      // each value adds 1
-      int i = Character.getNumericValue(num) + 1;
-      value = value.substring(0, value.length() - 1) + i;
-      result.add(value);
-      return true;
-    });
-
-    assertFalse(result.isEmpty());
-    for (int i = 0; i < result.size(); i++) {
-      assertEquals("b-value" + (i + 1), result.get(i));
-    }
-
-    // iterate from a non exist key
-    result.clear();
-    store.iterate(getBytes("xyz"), (k, v) -> {
-      result.add(getString(v));
-      return true;
-    });
-    assertTrue(result.isEmpty());
-
-    // iterate from the beginning
-    result.clear();
-    store.iterate(null, (k, v) -> {
-      result.add(getString(v));
-      return true;
-    });
-    assertEquals(20, result.size());
-  }
-
-  @Test
-  public void testGetRangeKVs() throws IOException {
-    List<Map.Entry<byte[], byte[]>> result = null;
-
-    // Set empty startKey will return values from beginning.
-    result = store.getRangeKVs(null, 5);
-    assertEquals(5, result.size());
-    assertEquals("a-value2", getString(result.get(2).getValue()));
-
-    // Empty list if startKey doesn't exist.
-    result = store.getRangeKVs(getBytes("a12"), 5);
-    assertEquals(0, result.size());
-
-    // Returns max available entries after a valid startKey.
-    result = store.getRangeKVs(getBytes("b0"), MAX_GETRANGE_LENGTH);
-    assertEquals(10, result.size());
-    assertEquals("b0", getString(result.get(0).getKey()));
-    assertEquals("b-value0", getString(result.get(0).getValue()));
-    result = store.getRangeKVs(getBytes("b0"), 5);
-    assertEquals(5, result.size());
-
-    // Both startKey and count are honored.
-    result = store.getRangeKVs(getBytes("a9"), 2);
-    assertEquals(2, result.size());
-    assertEquals("a9", getString(result.get(0).getKey()));
-    assertEquals("a-value9", getString(result.get(0).getValue()));
-    assertEquals("b0", getString(result.get(1).getKey()));
-    assertEquals("b-value0", getString(result.get(1).getValue()));
-
-    // Filter keys by prefix.
-    // It should returns all "b*" entries.
-    MetadataKeyFilter filter1 = new KeyPrefixFilter().addFilter("b");
-    result = store.getRangeKVs(null, 100, filter1);
-    assertEquals(10, result.size());
-    assertTrue(result.stream().allMatch(entry ->
-        new String(entry.getKey(), UTF_8).startsWith("b")
-    ));
-    assertEquals(20, filter1.getKeysScannedNum());
-    assertEquals(10, filter1.getKeysHintedNum());
-    result = store.getRangeKVs(null, 3, filter1);
-    assertEquals(3, result.size());
-    result = store.getRangeKVs(getBytes("b3"), 1, filter1);
-    assertEquals("b-value3", getString(result.get(0).getValue()));
-
-    // Define a customized filter that filters keys by suffix.
-    // Returns all "*2" entries.
-    MetadataKeyFilter filter2 = (preKey, currentKey, nextKey)
-        -> getString(currentKey).endsWith("2");
-    result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter2);
-    assertEquals(2, result.size());
-    assertEquals("a2", getString(result.get(0).getKey()));
-    assertEquals("b2", getString(result.get(1).getKey()));
-    result = store.getRangeKVs(null, 1, filter2);
-    assertEquals(1, result.size());
-    assertEquals("a2", getString(result.get(0).getKey()));
-
-    // Apply multiple filters.
-    result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter1, filter2);
-    assertEquals(1, result.size());
-    assertEquals("b2", getString(result.get(0).getKey()));
-    assertEquals("b-value2", getString(result.get(0).getValue()));
-
-    // If filter is null, no effect.
-    result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null);
-    assertEquals(1, result.size());
-    assertEquals("a0", getString(result.get(0).getKey()));
-  }
-
-  @Test
-  public void testGetSequentialRangeKVs() throws IOException {
-    MetadataKeyFilter suffixFilter = (preKey, currentKey, nextKey)
-        -> DFSUtil.bytes2String(currentKey).endsWith("2");
-    // Suppose to return a2 and b2
-    List<Map.Entry<byte[], byte[]>> result =
-        store.getRangeKVs(null, MAX_GETRANGE_LENGTH, suffixFilter);
-    assertEquals(2, result.size());
-    assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey()));
-    assertEquals("b2", DFSUtil.bytes2String(result.get(1).getKey()));
-
-    // Suppose to return just a2, because when it iterates to a3,
-    // the filter no long matches and it should stop from there.
-    result = store.getSequentialRangeKVs(null,
-        MAX_GETRANGE_LENGTH, suffixFilter);
-    assertEquals(1, result.size());
-    assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey()));
-  }
-
-  @Test
-  public void testGetRangeLength() throws IOException {
-    List<Map.Entry<byte[], byte[]>> result = null;
-
-    result = store.getRangeKVs(null, 0);
-    assertEquals(0, result.size());
-
-    result = store.getRangeKVs(null, 1);
-    assertEquals(1, result.size());
-
-    // Count less than zero is invalid.
-    expectedException.expect(IllegalArgumentException.class);
-    expectedException.expectMessage("Invalid count given");
-    store.getRangeKVs(null, -1);
-  }
-
-  @Test
-  public void testInvalidStartKey() throws IOException {
-    // If startKey is invalid, the returned list should be empty.
-    List<Map.Entry<byte[], byte[]>> kvs =
-        store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH);
-    assertEquals(0, kvs.size());
-  }
-
-  @Test
-  public void testDestroyDB() throws IOException {
-    // create a new DB to test db destroy
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-
-    File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
-        + "-" + storeImpl.toLowerCase() + "-toDestroy");
-    MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setCreateIfMissing(true)
-        .setDbFile(dbDir)
-        .build();
-
-    dbStore.put(getBytes("key1"), getBytes("value1"));
-    dbStore.put(getBytes("key2"), getBytes("value2"));
-
-    assertFalse(dbStore.isEmpty());
-    assertTrue(dbDir.exists());
-    assertTrue(dbDir.listFiles().length > 0);
-
-    dbStore.destroy();
-
-    assertFalse(dbDir.exists());
-  }
-
-  @Test
-  public void testBatchWrite() throws IOException {
-    Configuration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
-
-    File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
-        + "-" + storeImpl.toLowerCase() + "-batchWrite");
-    MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setCreateIfMissing(true)
-        .setDbFile(dbDir)
-        .build();
-
-    List<String> expectedResult = Lists.newArrayList();
-    for (int i = 0; i < 10; i++) {
-      dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
-      expectedResult.add("batch-" + i);
-    }
-
-    BatchOperation batch = new BatchOperation();
-    batch.delete(getBytes("batch-2"));
-    batch.delete(getBytes("batch-3"));
-    batch.delete(getBytes("batch-4"));
-    batch.put(getBytes("batch-new-2"), getBytes("batch-new-value-2"));
-
-    expectedResult.remove("batch-2");
-    expectedResult.remove("batch-3");
-    expectedResult.remove("batch-4");
-    expectedResult.add("batch-new-2");
-
-    dbStore.writeBatch(batch);
-
-    Iterator<String> it = expectedResult.iterator();
-    AtomicInteger count = new AtomicInteger(0);
-    dbStore.iterate(null, (key, value) -> {
-      count.incrementAndGet();
-      return it.hasNext() && it.next().equals(getString(key));
-    });
-
-    assertEquals(8, count.get());
-  }
-
-  @Test
-  public void testKeyPrefixFilter() throws IOException {
-    List<Map.Entry<byte[], byte[]>> result = null;
-    RuntimeException exception = null;
-
-    try {
-      new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
-    } catch (IllegalArgumentException e) {
-      exception = e;
-      assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
-          "rejected"));
-    }
-
-    try {
-      new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
-    } catch (IllegalArgumentException e) {
-      exception = e;
-      assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
-          "accepted"));
-    }
-
-    try {
-      new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
-    } catch (IllegalArgumentException e) {
-      exception = e;
-      assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
-          "rejected"));
-    }
-
-    try {
-      new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
-    } catch (IllegalArgumentException e) {
-      exception = e;
-      assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
-          "accepted"));
-    }
-
-    MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
-        .addFilter("a0")
-        .addFilter("a1")
-        .addFilter("b", true);
-    result = store.getRangeKVs(null, 100, filter1);
-    assertEquals(2, result.size());
-    assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(),
-        UTF_8)
-        .startsWith("a0")) && result.stream().anyMatch(entry -> new String(
-        entry.getKey(), UTF_8).startsWith("a1")));
-
-    filter1 = new KeyPrefixFilter(true).addFilter("b", true);
-    result = store.getRangeKVs(null, 100, filter1);
-    assertEquals(0, result.size());
-
-    filter1 = new KeyPrefixFilter().addFilter("b", true);
-    result = store.getRangeKVs(null, 100, filter1);
-    assertEquals(10, result.size());
-    assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(),
-        UTF_8)
-        .startsWith("a")));
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java
deleted file mode 100644
index 148ccf9..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.zip.ZipException;
-
-/**
- * Tests for {@link RetriableTask}.
- */
-public class TestRetriableTask {
-
-  @Test
-  public void returnsSuccessfulResult() throws Exception {
-    String result = "bilbo";
-    RetriableTask<String> task = new RetriableTask<>(
-        RetryPolicies.RETRY_FOREVER, "test", () -> result);
-    assertEquals(result, task.call());
-  }
-
-  @Test
-  public void returnsSuccessfulResultAfterFailures() throws Exception {
-    String result = "gandalf";
-    AtomicInteger attempts = new AtomicInteger();
-    RetriableTask<String> task = new RetriableTask<>(
-        RetryPolicies.RETRY_FOREVER, "test",
-        () -> {
-          if (attempts.incrementAndGet() <= 2) {
-            throw new Exception("testing");
-          }
-          return result;
-        });
-    assertEquals(result, task.call());
-  }
-
-  @Test
-  public void respectsRetryPolicy() {
-    int expectedAttempts = 3;
-    AtomicInteger attempts = new AtomicInteger();
-    RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        expectedAttempts, 1, TimeUnit.MILLISECONDS);
-    RetriableTask<String> task = new RetriableTask<>(retryPolicy, "thr", () -> {
-      attempts.incrementAndGet();
-      throw new ZipException("testing");
-    });
-
-    IOException e = assertThrows(IOException.class, task::call);
-    assertEquals(ZipException.class, e.getCause().getClass());
-    assertEquals(expectedAttempts, attempts.get());
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java
deleted file mode 100644
index 29c7803..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.utils;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.management.MBeanServer;
-import java.io.File;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.HashMap;
-import java.util.Map;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test the JMX interface for the rocksdb metastore implementation.
- */
-public class TestRocksDBStoreMBean {
-  
-  private Configuration conf;
-  
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-        OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB);
-  }
-  
-
-  @Test
-  public void testJmxBeans() throws Exception {
-
-    RocksDBStore metadataStore = getTestRocksDBStoreWithData();
-
-    MBeanServer platformMBeanServer =
-        ManagementFactory.getPlatformMBeanServer();
-    Thread.sleep(2000);
-
-    Object keysWritten = platformMBeanServer
-        .getAttribute(metadataStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN");
-
-    assertEquals(10L, keysWritten);
-
-    Object dbWriteAverage = platformMBeanServer
-        .getAttribute(metadataStore.getStatMBeanName(), "DB_WRITE_AVERAGE");
-    assertTrue((double) dbWriteAverage > 0);
-
-    metadataStore.close();
-
-  }
-
-  @Test()
-  public void testDisabledStat() throws Exception {
-    File testDir = GenericTestUtils
-        .getTestDir(getClass().getSimpleName() + "-withoutstat");
-
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-        OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF);
-
-    RocksDBStore metadataStore =
-        (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf)
-            .setCreateIfMissing(true).setDbFile(testDir).build();
-
-    Assert.assertNull(metadataStore.getStatMBeanName());
-  }
-
-  @Test
-  public void testMetricsSystemIntegration() throws Exception {
-
-    RocksDBStore metadataStore = getTestRocksDBStoreWithData();
-    Thread.sleep(2000);
-
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    MetricsSource rdbSource =
-        ms.getSource("Rocksdb_TestRocksDBStoreMBean-withstat");
-
-    BufferedMetricsCollector metricsCollector = new BufferedMetricsCollector();
-    rdbSource.getMetrics(metricsCollector, true);
-
-    Map<String, Double> metrics = metricsCollector.getMetricsRecordBuilder()
-        .getMetrics();
-    assertTrue(10.0 == metrics.get("NUMBER_KEYS_WRITTEN"));
-    assertTrue(metrics.get("DB_WRITE_AVERAGE") > 0);
-    metadataStore.close();
-  }
-
-  private RocksDBStore getTestRocksDBStoreWithData() throws IOException {
-    File testDir =
-        GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat");
-
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL");
-
-    RocksDBStore metadataStore =
-        (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf)
-            .setCreateIfMissing(true).setDbFile(testDir).build();
-
-    for (int i = 0; i < 10; i++) {
-      metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
-    }
-
-    return metadataStore;
-  }
-}
-
-/**
- * Test class to buffer a single MetricsRecordBuilder instance.
- */
-class BufferedMetricsCollector implements MetricsCollector {
-
-  private BufferedMetricsRecordBuilderImpl metricsRecordBuilder;
-
-  BufferedMetricsCollector() {
-    metricsRecordBuilder = new BufferedMetricsRecordBuilderImpl();
-  }
-
-  public BufferedMetricsRecordBuilderImpl getMetricsRecordBuilder() {
-    return metricsRecordBuilder;
-  }
-
-  @Override
-  public MetricsRecordBuilder addRecord(String s) {
-    metricsRecordBuilder.setContext(s);
-    return metricsRecordBuilder;
-  }
-
-  @Override
-  public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) {
-    return metricsRecordBuilder;
-  }
-
-  /**
-   * Test class to buffer a single snapshot of metrics.
-   */
-  class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder {
-
-    private Map<String, Double> metrics = new HashMap<>();
-    private String contextName;
-
-    public Map<String, Double> getMetrics() {
-      return metrics;
-    }
-
-    @Override
-    public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder add(MetricsTag metricsTag) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder add(AbstractMetric abstractMetric) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder setContext(String s) {
-      this.contextName = s;
-      return this;
-    }
-
-    @Override
-    public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) {
-      metrics.put(metricsInfo.name(), (double)l);
-      return this;
-    }
-
-    @Override
-    public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) {
-      return null;
-    }
-
-    @Override
-    public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) {
-      metrics.put(metricsInfo.name(), v);
-      return this;
-    }
-
-    @Override
-    public MetricsCollector parent() {
-      return null;
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
deleted file mode 100644
index 4ba54e9..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.apache.hadoop.hdds.utils.db.DBConfigFromFile.getOptionsFileNameFromDB;
-
-/**
- * DBConf tests.
- */
-public class TestDBConfigFromFile {
-  private final static String DB_FILE = "test.db";
-  private final static String INI_FILE = getOptionsFileNameFromDB(DB_FILE);
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    System.setProperty(DBConfigFromFile.CONFIG_DIR,
-        folder.newFolder().toString());
-    ClassLoader classLoader = getClass().getClassLoader();
-    File testData = new File(classLoader.getResource(INI_FILE).getFile());
-    File dest = Paths.get(
-        System.getProperty(DBConfigFromFile.CONFIG_DIR), INI_FILE).toFile();
-    FileUtils.copyFile(testData, dest);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test
-  public void readFromFile() throws IOException {
-    final List<String> families =
-        Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-            "First", "Second", "Third",
-            "Fourth", "Fifth",
-            "Sixth");
-    final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-        new ArrayList<>();
-    for (String family : families) {
-      columnFamilyDescriptors.add(
-          new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8),
-              new ColumnFamilyOptions()));
-    }
-
-    final DBOptions options = DBConfigFromFile.readFromFile(DB_FILE,
-        columnFamilyDescriptors);
-
-    // Some Random Values Defined in the test.db.ini, we verify that we are
-    // able to get values that are defined in the test.db.ini.
-    Assert.assertNotNull(options);
-    Assert.assertEquals(551615L, options.maxManifestFileSize());
-    Assert.assertEquals(1000L, options.keepLogFileNum());
-    Assert.assertEquals(1048576, options.writableFileMaxBufferSize());
-  }
-
-  @Test
-  public void readFromFileInvalidConfig() throws IOException {
-    final List<String> families =
-        Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-            "First", "Second", "Third",
-            "Fourth", "Fifth",
-            "Sixth");
-    final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-        new ArrayList<>();
-    for (String family : families) {
-      columnFamilyDescriptors.add(
-          new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8),
-              new ColumnFamilyOptions()));
-    }
-
-    final DBOptions options = DBConfigFromFile.readFromFile("badfile.db.ini",
-        columnFamilyDescriptors);
-
-    // This has to return a Null, since we have config defined for badfile.db
-    Assert.assertNull(options);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
deleted file mode 100644
index d406060..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-
-/**
- * Tests RDBStore creation.
- */
-public class TestDBStoreBuilder {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Before
-  public void setUp() throws Exception {
-    System.setProperty(DBConfigFromFile.CONFIG_DIR,
-        folder.newFolder().toString());
-  }
-
-  @Test
-  public void builderWithoutAnyParams() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    thrown.expect(IOException.class);
-    DBStoreBuilder.newBuilder(conf).build();
-  }
-
-  @Test
-  public void builderWithOneParamV1() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    thrown.expect(IOException.class);
-    DBStoreBuilder.newBuilder(conf)
-        .setName("Test.db")
-        .build();
-  }
-
-  @Test
-  public void builderWithOneParamV2() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    thrown.expect(IOException.class);
-    DBStoreBuilder.newBuilder(conf)
-        .setPath(newFolder.toPath())
-        .build();
-  }
-
-  @Test
-  public void builderWithOpenClose() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    DBStore dbStore = DBStoreBuilder.newBuilder(conf)
-        .setName("Test.db")
-        .setPath(newFolder.toPath())
-        .build();
-    // Nothing to do just open and Close.
-    dbStore.close();
-  }
-
-  @Test
-  public void builderWithDoubleTableName() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    thrown.expect(IOException.class);
-    DBStoreBuilder.newBuilder(conf)
-        .setName("Test.db")
-        .setPath(newFolder.toPath())
-        .addTable("FIRST")
-        .addTable("FIRST")
-        .build();
-    // Nothing to do , This will throw so we do not have to close.
-
-  }
-
-  @Test
-  public void builderWithDataWrites() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
-        .setName("Test.db")
-        .setPath(newFolder.toPath())
-        .addTable("First")
-        .addTable("Second")
-        .build()) {
-      try (Table<byte[], byte[]> firstTable = dbStore.getTable("First")) {
-        byte[] key =
-            RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
-        firstTable.put(key, value);
-        byte[] temp = firstTable.get(key);
-        Assert.assertArrayEquals(value, temp);
-      }
-
-      try (Table secondTable = dbStore.getTable("Second")) {
-        Assert.assertTrue(secondTable.isEmpty());
-      }
-    }
-  }
-
-  @Test
-  public void builderWithDiskProfileWrites() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
-        .setName("Test.db")
-        .setPath(newFolder.toPath())
-        .addTable("First")
-        .addTable("Second")
-        .setProfile(DBProfile.DISK)
-        .build()) {
-      try (Table<byte[], byte[]> firstTable = dbStore.getTable("First")) {
-        byte[] key =
-            RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
-        firstTable.put(key, value);
-        byte[] temp = firstTable.get(key);
-        Assert.assertArrayEquals(value, temp);
-      }
-
-      try (Table secondTable = dbStore.getTable("Second")) {
-        Assert.assertTrue(secondTable.isEmpty());
-      }
-    }
-  }
-
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
deleted file mode 100644
index 6084ae9..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import javax.management.MBeanServer;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.codec.binary.StringUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-
-/**
- * RDBStore Tests.
- */
-public class TestRDBStore {
-  private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-          "First", "Second", "Third",
-          "Fourth", "Fifth",
-          "Sixth");
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-  private RDBStore rdbStore = null;
-  private DBOptions options = null;
-  private Set<TableConfig> configSet;
-
-  @Before
-  public void setUp() throws Exception {
-    options = new DBOptions();
-    options.setCreateIfMissing(true);
-    options.setCreateMissingColumnFamilies(true);
-
-    Statistics statistics = new Statistics();
-    statistics.setStatsLevel(StatsLevel.ALL);
-    options = options.setStatistics(statistics);
-    configSet = new HashSet<>();
-    for(String name : families) {
-      TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
-      configSet.add(newConfig);
-    }
-    rdbStore = new RDBStore(folder.newFolder(), options, configSet);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (rdbStore != null) {
-      rdbStore.close();
-    }
-  }
-
-  private void insertRandomData(RDBStore dbStore, int familyIndex)
-      throws Exception {
-    try (Table firstTable = dbStore.getTable(families.get(familyIndex))) {
-      Assert.assertNotNull("Table cannot be null", firstTable);
-      for (int x = 0; x < 100; x++) {
-        byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        firstTable.put(key, value);
-      }
-    }
-  }
-
-  @Test
-  public void compactDB() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      Assert.assertNotNull("DB Store cannot be null", newStore);
-      insertRandomData(newStore, 1);
-      // This test does not assert anything if there is any error this test
-      // will throw and fail.
-      newStore.compactDB();
-    }
-  }
-
-  @Test
-  public void close() throws Exception {
-    RDBStore newStore =
-        new RDBStore(folder.newFolder(), options, configSet);
-    Assert.assertNotNull("DBStore cannot be null", newStore);
-    // This test does not assert anything if there is any error this test
-    // will throw and fail.
-    newStore.close();
-  }
-
-  @Test
-  public void moveKey() throws Exception {
-    byte[] key =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-    byte[] value =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-
-    try (Table firstTable = rdbStore.getTable(families.get(1))) {
-      firstTable.put(key, value);
-      try (Table<byte[], byte[]> secondTable = rdbStore
-          .getTable(families.get(2))) {
-        rdbStore.move(key, firstTable, secondTable);
-        byte[] newvalue = secondTable.get(key);
-        // Make sure we have value in the second table
-        Assert.assertNotNull(newvalue);
-        //and it is same as what we wrote to the FirstTable
-        Assert.assertArrayEquals(value, newvalue);
-      }
-      // After move this key must not exist in the first table.
-      Assert.assertNull(firstTable.get(key));
-    }
-  }
-
-  @Test
-  public void moveWithValue() throws Exception {
-    byte[] key =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-    byte[] value =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-
-    byte[] nextValue =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-    try (Table firstTable = rdbStore.getTable(families.get(1))) {
-      firstTable.put(key, value);
-      try (Table<byte[], byte[]> secondTable = rdbStore
-          .getTable(families.get(2))) {
-        rdbStore.move(key, nextValue, firstTable, secondTable);
-        byte[] newvalue = secondTable.get(key);
-        // Make sure we have value in the second table
-        Assert.assertNotNull(newvalue);
-        //and it is not same as what we wrote to the FirstTable, and equals
-        // the new value.
-        Assert.assertArrayEquals(nextValue, nextValue);
-      }
-    }
-
-  }
-
-  @Test
-  public void getEstimatedKeyCount() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      Assert.assertNotNull("DB Store cannot be null", newStore);
-
-      // Write 100 keys to the first table.
-      insertRandomData(newStore, 1);
-
-      // Write 100 keys to the secondTable table.
-      insertRandomData(newStore, 2);
-
-      // Let us make sure that our estimate is not off by 10%
-      Assert.assertTrue(newStore.getEstimatedKeyCount() > 180
-          || newStore.getEstimatedKeyCount() < 220);
-    }
-  }
-
-  @Test
-  public void getStatMBeanName() throws Exception {
-
-    try (Table firstTable = rdbStore.getTable(families.get(1))) {
-      for (int y = 0; y < 100; y++) {
-        byte[] key =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        firstTable.put(key, value);
-      }
-    }
-    MBeanServer platformMBeanServer =
-        ManagementFactory.getPlatformMBeanServer();
-    Thread.sleep(2000);
-
-    Object keysWritten = platformMBeanServer
-        .getAttribute(rdbStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN");
-
-    Assert.assertTrue(((Long) keysWritten) >= 99L);
-
-    Object dbWriteAverage = platformMBeanServer
-        .getAttribute(rdbStore.getStatMBeanName(), "DB_WRITE_AVERAGE");
-    Assert.assertTrue((double) dbWriteAverage > 0);
-  }
-
-  @Test
-  public void getTable() throws Exception {
-    for (String tableName : families) {
-      try (Table table = rdbStore.getTable(tableName)) {
-        Assert.assertNotNull(tableName + "is null", table);
-      }
-    }
-    thrown.expect(IOException.class);
-    rdbStore.getTable("ATableWithNoName");
-  }
-
-  @Test
-  public void listTables() throws Exception {
-    List<Table> tableList = rdbStore.listTables();
-    Assert.assertNotNull("Table list cannot be null", tableList);
-    Map<String, Table> hashTable = new HashMap<>();
-
-    for (Table t : tableList) {
-      hashTable.put(t.getName(), t);
-    }
-
-    int count = families.size();
-    // Assert that we have all the tables in the list and no more.
-    for (String name : families) {
-      Assert.assertTrue(hashTable.containsKey(name));
-      count--;
-    }
-    Assert.assertEquals(0, count);
-  }
-
-  @Test
-  public void testRocksDBCheckpoint() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      Assert.assertNotNull("DB Store cannot be null", newStore);
-
-      insertRandomData(newStore, 1);
-      DBCheckpoint checkpoint =
-          newStore.getCheckpoint(true);
-      Assert.assertNotNull(checkpoint);
-
-      RDBStore restoredStoreFromCheckPoint =
-          new RDBStore(checkpoint.getCheckpointLocation().toFile(),
-              options, configSet);
-
-      // Let us make sure that our estimate is not off by 10%
-      Assert.assertTrue(
-          restoredStoreFromCheckPoint.getEstimatedKeyCount() > 90
-          || restoredStoreFromCheckPoint.getEstimatedKeyCount() < 110);
-      checkpoint.cleanupCheckpoint();
-    }
-
-  }
-
-  @Test
-  public void testRocksDBCheckpointCleanup() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      Assert.assertNotNull("DB Store cannot be null", newStore);
-
-      insertRandomData(newStore, 1);
-      DBCheckpoint checkpoint =
-          newStore.getCheckpoint(true);
-      Assert.assertNotNull(checkpoint);
-
-      Assert.assertTrue(Files.exists(
-          checkpoint.getCheckpointLocation()));
-      checkpoint.cleanupCheckpoint();
-      Assert.assertFalse(Files.exists(
-          checkpoint.getCheckpointLocation()));
-    }
-  }
-
-  /**
-   * Not strictly a unit test. Just a confirmation of the expected behavior
-   * of RocksDB keyMayExist API.
-   * Expected behavior - On average, keyMayExist latency < key.get() latency
-   * for invalid keys.
-   * @throws Exception if unable to read from RocksDB.
-   */
-  @Test
-  public void testRocksDBKeyMayExistApi() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      RocksDB db = newStore.getDb();
-
-      //Test with 50 invalid keys.
-      long start = System.nanoTime();
-      for (int i = 0; i < 50; i++) {
-        Assert.assertTrue(db.get(
-            StringUtils.getBytesUtf16("key" + i))== null);
-      }
-      long end = System.nanoTime();
-      long keyGetLatency = end - start;
-
-      start = System.nanoTime();
-      for (int i = 0; i < 50; i++) {
-        Assert.assertFalse(db.keyMayExist(
-            StringUtils.getBytesUtf16("key" + i), new StringBuilder()));
-      }
-      end = System.nanoTime();
-      long keyMayExistLatency = end - start;
-
-      Assert.assertTrue(keyMayExistLatency < keyGetLatency);
-    }
-  }
-
-  @Test
-  public void testGetDBUpdatesSince() throws Exception {
-
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-
-      try (Table firstTable = newStore.getTable(families.get(1))) {
-        firstTable.put(StringUtils.getBytesUtf16("Key1"), StringUtils
-            .getBytesUtf16("Value1"));
-        firstTable.put(StringUtils.getBytesUtf16("Key2"), StringUtils
-            .getBytesUtf16("Value2"));
-      }
-      Assert.assertTrue(
-          newStore.getDb().getLatestSequenceNumber() == 2);
-
-      DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0);
-      Assert.assertEquals(2, dbUpdatesSince.getData().size());
-    }
-  }
-
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
deleted file mode 100644
index 788883d..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-
-/**
- * Tests for RocksDBTable Store.
- */
-public class TestRDBTableStore {
-  private static int count = 0;
-  private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-          "First", "Second", "Third",
-          "Fourth", "Fifth",
-          "Sixth", "Seventh",
-          "Eighth");
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private RDBStore rdbStore = null;
-  private DBOptions options = null;
-
-  private static boolean consume(Table.KeyValue keyValue)  {
-    count++;
-    try {
-      Assert.assertNotNull(keyValue.getKey());
-    } catch(IOException ex) {
-      Assert.fail("Unexpected Exception " + ex.toString());
-    }
-    return true;
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    options = new DBOptions();
-    options.setCreateIfMissing(true);
-    options.setCreateMissingColumnFamilies(true);
-
-    Statistics statistics = new Statistics();
-    statistics.setStatsLevel(StatsLevel.ALL);
-    options = options.setStatistics(statistics);
-
-    Set<TableConfig> configSet = new HashSet<>();
-    for(String name : families) {
-      TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
-      configSet.add(newConfig);
-    }
-    rdbStore = new RDBStore(folder.newFolder(), options, configSet);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (rdbStore != null) {
-      rdbStore.close();
-    }
-  }
-
-  @Test
-  public void toIOException() {
-  }
-
-  @Test
-  public void getHandle() throws Exception {
-    try (Table testTable = rdbStore.getTable("First")) {
-      Assert.assertNotNull(testTable);
-      Assert.assertNotNull(((RDBTable) testTable).getHandle());
-    }
-  }
-
-  @Test
-  public void putGetAndEmpty() throws Exception {
-    try (Table<byte[], byte[]> testTable = rdbStore.getTable("First")) {
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      testTable.put(key, value);
-      Assert.assertFalse(testTable.isEmpty());
-      byte[] readValue = testTable.get(key);
-      Assert.assertArrayEquals(value, readValue);
-    }
-    try (Table secondTable = rdbStore.getTable("Second")) {
-      Assert.assertTrue(secondTable.isEmpty());
-    }
-  }
-
-  @Test
-  public void delete() throws Exception {
-    List<byte[]> deletedKeys = new ArrayList<>();
-    List<byte[]> validKeys = new ArrayList<>();
-    byte[] value =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-    for (int x = 0; x < 100; x++) {
-      deletedKeys.add(
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
-    }
-
-    for (int x = 0; x < 100; x++) {
-      validKeys.add(
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
-    }
-
-    // Write all the keys and delete the keys scheduled for delete.
-    //Assert we find only expected keys in the Table.
-    try (Table testTable = rdbStore.getTable("Fourth")) {
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        testTable.put(deletedKeys.get(x), value);
-        testTable.delete(deletedKeys.get(x));
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        testTable.put(validKeys.get(x), value);
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        Assert.assertNotNull(testTable.get(validKeys.get(0)));
-      }
-
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        Assert.assertNull(testTable.get(deletedKeys.get(0)));
-      }
-    }
-  }
-
-  @Test
-  public void batchPut() throws Exception {
-    try (Table testTable = rdbStore.getTable("Fifth");
-        BatchOperation batch = rdbStore.initBatchOperation()) {
-      //given
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      Assert.assertNull(testTable.get(key));
-
-      //when
-      testTable.putWithBatch(batch, key, value);
-      rdbStore.commitBatchOperation(batch);
-
-      //then
-      Assert.assertNotNull(testTable.get(key));
-    }
-  }
-
-  @Test
-  public void batchDelete() throws Exception {
-    try (Table testTable = rdbStore.getTable("Fifth");
-        BatchOperation batch = rdbStore.initBatchOperation()) {
-
-      //given
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      testTable.put(key, value);
-      Assert.assertNotNull(testTable.get(key));
-
-
-      //when
-      testTable.deleteWithBatch(batch, key);
-      rdbStore.commitBatchOperation(batch);
-
-      //then
-      Assert.assertNull(testTable.get(key));
-    }
-  }
-
-  @Test
-  public void forEachAndIterator() throws Exception {
-    final int iterCount = 100;
-    try (Table testTable = rdbStore.getTable("Sixth")) {
-      for (int x = 0; x < iterCount; x++) {
-        byte[] key =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        testTable.put(key, value);
-      }
-      int localCount = 0;
-      try (TableIterator<byte[], Table.KeyValue> iter = testTable.iterator()) {
-        while (iter.hasNext()) {
-          Table.KeyValue keyValue = iter.next();
-          localCount++;
-        }
-
-        Assert.assertEquals(iterCount, localCount);
-        iter.seekToFirst();
-        iter.forEachRemaining(TestRDBTableStore::consume);
-        Assert.assertEquals(iterCount, count);
-
-      }
-    }
-  }
-
-  @Test
-  public void testIsExist() throws Exception {
-    try (Table<byte[], byte[]> testTable = rdbStore.getTable("Seventh")) {
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      testTable.put(key, value);
-      Assert.assertTrue(testTable.isExist(key));
-
-      testTable.delete(key);
-      Assert.assertFalse(testTable.isExist(key));
-
-      byte[] invalidKey =
-          RandomStringUtils.random(5).getBytes(StandardCharsets.UTF_8);
-      Assert.assertFalse(testTable.isExist(invalidKey));
-    }
-  }
-
-  @Test
-  public void testCountEstimatedRowsInTable() throws Exception {
-    try (Table<byte[], byte[]> testTable = rdbStore.getTable("Eighth")) {
-      // Add a few keys
-      final int numKeys = 12345;
-      for (int i = 0; i < numKeys; i++) {
-        byte[] key =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        testTable.put(key, value);
-      }
-      long keyCount = testTable.getEstimatedKeyCount();
-      // The result should be larger than zero but not exceed(?) numKeys
-      Assert.assertTrue(keyCount > 0 && keyCount <= numKeys);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
deleted file mode 100644
index 9ee0d19..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-
-/**
- * Tests for RocksDBTable Store.
- */
-public class TestTypedRDBTableStore {
-  private static int count = 0;
-  private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-          "First", "Second", "Third",
-          "Fourth", "Fifth",
-          "Sixth", "Seven", "Eighth",
-          "Ninth");
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private RDBStore rdbStore = null;
-  private DBOptions options = null;
-  private CodecRegistry codecRegistry;
-
-  @Before
-  public void setUp() throws Exception {
-    options = new DBOptions();
-    options.setCreateIfMissing(true);
-    options.setCreateMissingColumnFamilies(true);
-
-    Statistics statistics = new Statistics();
-    statistics.setStatsLevel(StatsLevel.ALL);
-    options = options.setStatistics(statistics);
-
-    Set<TableConfig> configSet = new HashSet<>();
-    for (String name : families) {
-      TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
-      configSet.add(newConfig);
-    }
-    rdbStore = new RDBStore(folder.newFolder(), options, configSet);
-
-    codecRegistry = new CodecRegistry();
-
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (rdbStore != null) {
-      rdbStore.close();
-    }
-  }
-
-  @Test
-  public void toIOException() {
-  }
-
-  @Test
-  public void putGetAndEmpty() throws Exception {
-    try (Table<String, String> testTable = createTypedTable(
-        "First")) {
-      String key =
-          RandomStringUtils.random(10);
-      String value = RandomStringUtils.random(10);
-      testTable.put(key, value);
-      Assert.assertFalse(testTable.isEmpty());
-      String readValue = testTable.get(key);
-      Assert.assertEquals(value, readValue);
-    }
-    try (Table secondTable = rdbStore.getTable("Second")) {
-      Assert.assertTrue(secondTable.isEmpty());
-    }
-  }
-
-  private Table<String, String> createTypedTable(String name)
-      throws IOException {
-    return new TypedTable<String, String>(
-        rdbStore.getTable(name),
-        codecRegistry,
-        String.class, String.class);
-  }
-
-  @Test
-  public void delete() throws Exception {
-    List<String> deletedKeys = new LinkedList<>();
-    List<String> validKeys = new LinkedList<>();
-    String value =
-        RandomStringUtils.random(10);
-    for (int x = 0; x < 100; x++) {
-      deletedKeys.add(
-          RandomStringUtils.random(10));
-    }
-
-    for (int x = 0; x < 100; x++) {
-      validKeys.add(
-          RandomStringUtils.random(10));
-    }
-
-    // Write all the keys and delete the keys scheduled for delete.
-    //Assert we find only expected keys in the Table.
-    try (Table<String, String> testTable = createTypedTable(
-        "Fourth")) {
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        testTable.put(deletedKeys.get(x), value);
-        testTable.delete(deletedKeys.get(x));
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        testTable.put(validKeys.get(x), value);
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        Assert.assertNotNull(testTable.get(validKeys.get(0)));
-      }
-
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        Assert.assertNull(testTable.get(deletedKeys.get(0)));
-      }
-    }
-  }
-
-  @Test
-  public void batchPut() throws Exception {
-
-    try (Table<String, String> testTable = createTypedTable(
-        "Fourth");
-        BatchOperation batch = rdbStore.initBatchOperation()) {
-      //given
-      String key =
-          RandomStringUtils.random(10);
-      String value =
-          RandomStringUtils.random(10);
-
-      //when
-      testTable.putWithBatch(batch, key, value);
-      rdbStore.commitBatchOperation(batch);
-
-      //then
-      Assert.assertNotNull(testTable.get(key));
-    }
-  }
-
-  @Test
-  public void batchDelete() throws Exception {
-    try (Table<String, String> testTable = createTypedTable(
-        "Fourth");
-        BatchOperation batch = rdbStore.initBatchOperation()) {
-
-      //given
-      String key =
-          RandomStringUtils.random(10);
-      String value =
-          RandomStringUtils.random(10);
-      testTable.put(key, value);
-
-      //when
-      testTable.deleteWithBatch(batch, key);
-      rdbStore.commitBatchOperation(batch);
-
-      //then
-      Assert.assertNull(testTable.get(key));
-    }
-  }
-
-  private static boolean consume(Table.KeyValue keyValue) {
-    count++;
-    try {
-      Assert.assertNotNull(keyValue.getKey());
-    } catch (IOException ex) {
-      Assert.fail(ex.toString());
-    }
-    return true;
-  }
-
-  @Test
-  public void forEachAndIterator() throws Exception {
-    final int iterCount = 100;
-    try (Table<String, String> testTable = createTypedTable(
-        "Sixth")) {
-      for (int x = 0; x < iterCount; x++) {
-        String key =
-            RandomStringUtils.random(10);
-        String value =
-            RandomStringUtils.random(10);
-        testTable.put(key, value);
-      }
-      int localCount = 0;
-
-      try (TableIterator<String, ? extends KeyValue<String, String>> iter =
-          testTable.iterator()) {
-        while (iter.hasNext()) {
-          Table.KeyValue keyValue = iter.next();
-          localCount++;
-        }
-
-        Assert.assertEquals(iterCount, localCount);
-        iter.seekToFirst();
-        iter.forEachRemaining(TestTypedRDBTableStore::consume);
-        Assert.assertEquals(iterCount, count);
-
-      }
-    }
-  }
-
-  @Test
-  public void testTypedTableWithCache() throws Exception {
-    int iterCount = 10;
-    try (Table<String, String> testTable = createTypedTable(
-        "Seven")) {
-
-      for (int x = 0; x < iterCount; x++) {
-        String key = Integer.toString(x);
-        String value = Integer.toString(x);
-        testTable.addCacheEntry(new CacheKey<>(key),
-            new CacheValue<>(Optional.of(value),
-            x));
-      }
-
-      // As we have added to cache, so get should return value even if it
-      // does not exist in DB.
-      for (int x = 0; x < iterCount; x++) {
-        Assert.assertEquals(Integer.toString(1),
-            testTable.get(Integer.toString(1)));
-      }
-
-    }
-  }
-
-  @Test
-  public void testTypedTableWithCacheWithFewDeletedOperationType()
-      throws Exception {
-    int iterCount = 10;
-    try (Table<String, String> testTable = createTypedTable(
-        "Seven")) {
-
-      for (int x = 0; x < iterCount; x++) {
-        String key = Integer.toString(x);
-        String value = Integer.toString(x);
-        if (x % 2 == 0) {
-          testTable.addCacheEntry(new CacheKey<>(key),
-              new CacheValue<>(Optional.of(value), x));
-        } else {
-          testTable.addCacheEntry(new CacheKey<>(key),
-              new CacheValue<>(Optional.absent(),
-              x));
-        }
-      }
-
-      // As we have added to cache, so get should return value even if it
-      // does not exist in DB.
-      for (int x = 0; x < iterCount; x++) {
-        if (x % 2 == 0) {
-          Assert.assertEquals(Integer.toString(x),
-              testTable.get(Integer.toString(x)));
-        } else {
-          Assert.assertNull(testTable.get(Integer.toString(x)));
-        }
-      }
-
-      testTable.cleanupCache(5);
-
-      GenericTestUtils.waitFor(() ->
-          ((TypedTable<String, String>) testTable).getCache().size() == 4,
-          100, 5000);
-
-
-      //Check remaining values
-      for (int x = 6; x < iterCount; x++) {
-        if (x % 2 == 0) {
-          Assert.assertEquals(Integer.toString(x),
-              testTable.get(Integer.toString(x)));
-        } else {
-          Assert.assertNull(testTable.get(Integer.toString(x)));
-        }
-      }
-
-
-    }
-  }
-
-  @Test
-  public void testIsExist() throws Exception {
-    try (Table<String, String> testTable = createTypedTable(
-        "Eighth")) {
-      String key =
-          RandomStringUtils.random(10);
-      String value = RandomStringUtils.random(10);
-      testTable.put(key, value);
-      Assert.assertTrue(testTable.isExist(key));
-
-      String invalidKey = key + RandomStringUtils.random(1);
-      Assert.assertFalse(testTable.isExist(invalidKey));
-
-      testTable.delete(key);
-      Assert.assertFalse(testTable.isExist(key));
-    }
-  }
-
-  @Test
-  public void testIsExistCache() throws Exception {
-    try (Table<String, String> testTable = createTypedTable(
-        "Eighth")) {
-      String key =
-          RandomStringUtils.random(10);
-      String value = RandomStringUtils.random(10);
-      testTable.addCacheEntry(new CacheKey<>(key),
-          new CacheValue<>(Optional.of(value), 1L));
-      Assert.assertTrue(testTable.isExist(key));
-
-      testTable.addCacheEntry(new CacheKey<>(key),
-          new CacheValue<>(Optional.absent(), 1L));
-      Assert.assertFalse(testTable.isExist(key));
-    }
-  }
-
-  @Test
-  public void testCountEstimatedRowsInTable() throws Exception {
-    try (Table<String, String> testTable = createTypedTable(
-        "Ninth")) {
-      // Add a few keys
-      final int numKeys = 12345;
-      for (int i = 0; i < numKeys; i++) {
-        String key =
-            RandomStringUtils.random(10);
-        String value = RandomStringUtils.random(10);
-        testTable.put(key, value);
-      }
-      long keyCount = testTable.getEstimatedKeyCount();
-      // The result should be larger than zero but not exceed(?) numKeys
-      Assert.assertTrue(keyCount > 0 && keyCount <= numKeys);
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java
deleted file mode 100644
index 4239129..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.utils.db.cache;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.concurrent.CompletableFuture;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import static org.junit.Assert.fail;
-
-/**
- * Class tests partial table cache.
- */
-@RunWith(value = Parameterized.class)
-public class TestTableCacheImpl {
-  private TableCache<CacheKey<String>, CacheValue<String>> tableCache;
-
-  private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy;
-
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {TableCacheImpl.CacheCleanupPolicy.NEVER},
-        {TableCacheImpl.CacheCleanupPolicy.MANUAL}
-    };
-    return Arrays.asList(params);
-  }
-
-  public TestTableCacheImpl(
-      TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy) {
-    this.cacheCleanupPolicy = cacheCleanupPolicy;
-  }
-
-
-  @Before
-  public void create() {
-    tableCache =
-        new TableCacheImpl<>(cacheCleanupPolicy);
-  }
-  @Test
-  public void testPartialTableCache() {
-
-
-    for (int i = 0; i< 10; i++) {
-      tableCache.put(new CacheKey<>(Integer.toString(i)),
-          new CacheValue<>(Optional.of(Integer.toString(i)), i));
-    }
-
-
-    for (int i=0; i < 10; i++) {
-      Assert.assertEquals(Integer.toString(i),
-          tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
-    }
-
-    // On a full table cache if some one calls cleanup it is a no-op.
-    tableCache.cleanup(4);
-
-    for (int i=5; i < 10; i++) {
-      Assert.assertEquals(Integer.toString(i),
-          tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
-    }
-  }
-
-
-  @Test
-  public void testPartialTableCacheParallel() throws Exception {
-
-    int totalCount = 0;
-    CompletableFuture<Integer> future =
-        CompletableFuture.supplyAsync(() -> {
-          try {
-            return writeToCache(10, 1, 0);
-          } catch (InterruptedException ex) {
-            fail("writeToCache got interrupt exception");
-          }
-          return 0;
-        });
-    int value = future.get();
-    Assert.assertEquals(10, value);
-
-    totalCount += value;
-
-    future =
-        CompletableFuture.supplyAsync(() -> {
-          try {
-            return writeToCache(10, 11, 100);
-          } catch (InterruptedException ex) {
-            fail("writeToCache got interrupt exception");
-          }
-          return 0;
-        });
-
-    // Check we have first 10 entries in cache.
-    for (int i=1; i <= 10; i++) {
-      Assert.assertEquals(Integer.toString(i),
-          tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
-    }
-
-
-    value = future.get();
-    Assert.assertEquals(10, value);
-
-    totalCount += value;
-
-    if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.MANUAL) {
-      int deleted = 5;
-
-      // cleanup first 5 entires
-      tableCache.cleanup(deleted);
-
-      // We should totalCount - deleted entries in cache.
-      final int tc = totalCount;
-      GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100,
-          5000);
-      // Check if we have remaining entries.
-      for (int i=6; i <= totalCount; i++) {
-        Assert.assertEquals(Integer.toString(i), tableCache.get(
-            new CacheKey<>(Integer.toString(i))).getCacheValue());
-      }
-      tableCache.cleanup(10);
-
-      tableCache.cleanup(totalCount);
-
-      // Cleaned up all entries, so cache size should be zero.
-      GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100,
-          5000);
-    } else {
-      tableCache.cleanup(totalCount);
-      Assert.assertEquals(totalCount, tableCache.size());
-    }
-
-
-  }
-
-  private int writeToCache(int count, int startVal, long sleep)
-      throws InterruptedException {
-    int counter = 1;
-    while (counter <= count){
-      tableCache.put(new CacheKey<>(Integer.toString(startVal)),
-          new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal));
-      startVal++;
-      counter++;
-      Thread.sleep(sleep);
-    }
-    return count;
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
deleted file mode 100644
index f97fda2..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for the DB Cache Utilities.
- */
-package org.apache.hadoop.hdds.utils.db.cache;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java
deleted file mode 100644
index f1c7ce1..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for the DB Utilities.
- */
-package org.apache.hadoop.hdds.utils.db;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
deleted file mode 100644
index f93e3fd..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * DB test Utils.
- */
-package org.apache.hadoop.hdds.utils;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
deleted file mode 100644
index 789560a..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define Dummy AuditAction Type for test.
- */
-public enum DummyAction implements AuditAction {
-
-  CREATE_VOLUME,
-  CREATE_BUCKET,
-  READ_VOLUME,
-  READ_BUCKET,
-  READ_KEY,
-  UPDATE_VOLUME,
-  UPDATE_BUCKET,
-  UPDATE_KEY,
-  DELETE_VOLUME,
-  DELETE_BUCKET,
-  DELETE_KEY,
-  SET_OWNER,
-  SET_QUOTA;
-
-  @Override
-  public String getAction() {
-    return this.toString();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
deleted file mode 100644
index 0c2d98f..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * DummyEntity that implements Auditable for test purpose.
- */
-public class DummyEntity implements Auditable {
-
-  private String key1;
-  private String key2;
-
-  public DummyEntity(){
-    this.key1 = "value1";
-    this.key2 = "value2";
-  }
-  public String getKey1() {
-    return key1;
-  }
-
-  public void setKey1(String key1) {
-    this.key1 = key1;
-  }
-
-  public String getKey2() {
-    return key2;
-  }
-
-  public void setKey2(String key2) {
-    this.key2 = key2;
-  }
-
-  @Override
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put("key1", this.key1);
-    auditMap.put("key2", this.key2);
-    return auditMap;
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
deleted file mode 100644
index 518ddae..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.commons.io.FileUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test Ozone Audit Logger.
- */
-public class TestOzoneAuditLogger {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneAuditLogger.class.getName());
-
-  private static final AuditLogger AUDIT =
-      new AuditLogger(AuditLoggerType.OMLOGGER);
-
-  private static final String SUCCESS = AuditEventStatus.SUCCESS.name();
-  private static final String FAILURE = AuditEventStatus.FAILURE.name();
-
-  private static final Map<String, String> PARAMS =
-      new DummyEntity().toAuditMap();
-
-  private static final AuditMessage WRITE_FAIL_MSG =
-      new AuditMessage.Builder()
-          .setUser("john")
-          .atIp("192.168.0.1")
-          .forOperation(DummyAction.CREATE_VOLUME.name())
-          .withParams(PARAMS)
-          .withResult(FAILURE)
-          .withException(null).build();
-
-  private static final AuditMessage WRITE_SUCCESS_MSG =
-      new AuditMessage.Builder()
-          .setUser("john")
-          .atIp("192.168.0.1")
-          .forOperation(DummyAction.CREATE_VOLUME.name())
-          .withParams(PARAMS)
-          .withResult(SUCCESS)
-          .withException(null).build();
-
-  private static final AuditMessage READ_FAIL_MSG =
-      new AuditMessage.Builder()
-          .setUser("john")
-          .atIp("192.168.0.1")
-          .forOperation(DummyAction.READ_VOLUME.name())
-          .withParams(PARAMS)
-          .withResult(FAILURE)
-          .withException(null).build();
-
-  private static final AuditMessage READ_SUCCESS_MSG =
-      new AuditMessage.Builder()
-          .setUser("john")
-          .atIp("192.168.0.1")
-          .forOperation(DummyAction.READ_VOLUME.name())
-          .withParams(PARAMS)
-          .withResult(SUCCESS)
-          .withException(null).build();
-
-  @BeforeClass
-  public static void setUp(){
-    System.setProperty("log4j.configurationFile", "log4j2.properties");
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    File file = new File("audit.log");
-    if (FileUtils.deleteQuietly(file)) {
-      LOG.info(file.getName() +
-          " has been deleted as all tests have completed.");
-    } else {
-      LOG.info("audit.log could not be deleted.");
-    }
-  }
-
-  /**
-   * Test to verify default log level is INFO when logging success events.
-   */
-  @Test
-  public void verifyDefaultLogLevelForSuccess() throws IOException {
-    AUDIT.logWriteSuccess(WRITE_SUCCESS_MSG);
-    String expected =
-        "INFO  | OMAudit | " + WRITE_SUCCESS_MSG.getFormattedMessage();
-    verifyLog(expected);
-  }
-
-  /**
-   * Test to verify default log level is ERROR when logging failure events.
-   */
-  @Test
-  public void verifyDefaultLogLevelForFailure() throws IOException {
-    AUDIT.logWriteFailure(WRITE_FAIL_MSG);
-    String expected =
-        "ERROR | OMAudit | " + WRITE_FAIL_MSG.getFormattedMessage();
-    verifyLog(expected);
-  }
-
-  /**
-   * Test to verify no READ event is logged.
-   */
-  @Test
-  public void notLogReadEvents() throws IOException {
-    AUDIT.logReadSuccess(READ_SUCCESS_MSG);
-    AUDIT.logReadFailure(READ_FAIL_MSG);
-    verifyNoLog();
-  }
-
-  private void verifyLog(String expected) throws IOException {
-    File file = new File("audit.log");
-    List<String> lines = FileUtils.readLines(file, (String)null);
-    final int retry = 5;
-    int i = 0;
-    while (lines.isEmpty() && i < retry) {
-      lines = FileUtils.readLines(file, (String)null);
-      try {
-        Thread.sleep(500 * (i + 1));
-      } catch(InterruptedException ie) {
-        Thread.currentThread().interrupt();
-        break;
-      }
-      i++;
-    }
-
-    // When log entry is expected, the log file will contain one line and
-    // that must be equal to the expected string
-    assertTrue(lines.size() != 0);
-    assertTrue(expected.equalsIgnoreCase(lines.get(0)));
-    //empty the file
-    lines.clear();
-    FileUtils.writeLines(file, lines, false);
-  }
-
-  private void verifyNoLog() throws IOException {
-    File file = new File("audit.log");
-    List<String> lines = FileUtils.readLines(file, (String)null);
-    // When no log entry is expected, the log file must be empty
-    assertTrue(lines.size() == 0);
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java
deleted file mode 100644
index 1222ad0..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.audit;
-/**
- * Unit tests of Ozone Audit Logger.
- * For test purpose, the log4j2 configuration is loaded from file at:
- * src/test/resources/log4j2.properties
- */
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
deleted file mode 100644
index 819c29f..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Tests for {@link Checksum} class.
- */
-public class TestChecksum {
-
-  private static final int BYTES_PER_CHECKSUM = 10;
-  private static final ContainerProtos.ChecksumType CHECKSUM_TYPE_DEFAULT =
-      ContainerProtos.ChecksumType.SHA256;
-
-  private Checksum getChecksum(ContainerProtos.ChecksumType type) {
-    if (type == null) {
-      type = CHECKSUM_TYPE_DEFAULT;
-    }
-    return new Checksum(type, BYTES_PER_CHECKSUM);
-  }
-
-  /**
-   * Tests {@link Checksum#verifyChecksum(byte[], ChecksumData)}.
-   */
-  @Test
-  public void testVerifyChecksum() throws Exception {
-    Checksum checksum = getChecksum(null);
-    int dataLen = 55;
-    byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes();
-
-    ChecksumData checksumData = checksum.computeChecksum(data);
-
-    // A checksum is calculate for each bytesPerChecksum number of bytes in
-    // the data. Since that value is 10 here and the data length is 55, we
-    // should have 6 checksums in checksumData.
-    Assert.assertEquals(6, checksumData.getChecksums().size());
-
-    // Checksum verification should pass
-    Assert.assertTrue("Checksum mismatch",
-        Checksum.verifyChecksum(data, checksumData));
-  }
-
-  /**
-   * Tests that if data is modified, then the checksums should not match.
-   */
-  @Test
-  public void testIncorrectChecksum() throws Exception {
-    Checksum checksum = getChecksum(null);
-    byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes();
-    ChecksumData originalChecksumData = checksum.computeChecksum(data);
-
-    // Change the data and check if new checksum matches the original checksum.
-    // Modifying one byte of data should be enough for the checksum data to
-    // mismatch
-    data[50] = (byte) (data[50]+1);
-    ChecksumData newChecksumData = checksum.computeChecksum(data);
-    Assert.assertNotEquals("Checksums should not match for different data",
-        originalChecksumData, newChecksumData);
-  }
-
-  /**
-   * Tests that checksum calculated using two different checksumTypes should
-   * not match.
-   */
-  @Test
-  public void testChecksumMismatchForDifferentChecksumTypes() throws Exception {
-    byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes();
-
-    // Checksum1 of type SHA-256
-    Checksum checksum1 = getChecksum(null);
-    ChecksumData checksumData1 = checksum1.computeChecksum(data);
-
-    // Checksum2 of type CRC32
-    Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32);
-    ChecksumData checksumData2 = checksum2.computeChecksum(data);
-
-    // The two checksums should not match as they have different types
-    Assert.assertNotEquals(
-        "Checksums should not match for different checksum types",
-        checksum1, checksum2);
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
deleted file mode 100644
index 2f466377..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.PureJavaCrc32C;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.nio.charset.StandardCharsets;
-import java.util.Random;
-import java.util.zip.Checksum;
-
-/**
- * Test {@link ChecksumByteBuffer} implementations.
- */
-public class TestChecksumByteBuffer {
-  @Test
-  public void testPureJavaCrc32ByteBuffer() {
-    final Checksum expected = new PureJavaCrc32();
-    final ChecksumByteBuffer testee = new PureJavaCrc32ByteBuffer();
-    new VerifyChecksumByteBuffer(expected, testee).testCorrectness();
-  }
-
-  @Test
-  public void testPureJavaCrc32CByteBuffer() {
-    final Checksum expected = new PureJavaCrc32C();
-    final ChecksumByteBuffer testee = new PureJavaCrc32CByteBuffer();
-    new VerifyChecksumByteBuffer(expected, testee).testCorrectness();
-  }
-
-  static class VerifyChecksumByteBuffer {
-    private final Checksum expected;
-    private final ChecksumByteBuffer testee;
-
-    VerifyChecksumByteBuffer(Checksum expected, ChecksumByteBuffer testee) {
-      this.expected = expected;
-      this.testee = testee;
-    }
-
-    void testCorrectness() {
-      checkSame();
-
-      checkBytes("hello world!".getBytes(StandardCharsets.UTF_8));
-
-      final Random random = new Random();
-      final byte[] bytes = new byte[1 << 10];
-      for (int i = 0; i < 1000; i++) {
-        random.nextBytes(bytes);
-        checkBytes(bytes, random.nextInt(bytes.length));
-      }
-    }
-
-    void checkBytes(byte[] bytes) {
-      checkBytes(bytes, bytes.length);
-    }
-
-    void checkBytes(byte[] bytes, int length) {
-      expected.reset();
-      testee.reset();
-      checkSame();
-
-      for (byte b : bytes) {
-        expected.update(b);
-        testee.update(b);
-        checkSame();
-      }
-
-      expected.reset();
-      testee.reset();
-
-      for (int i = 0; i < length; i++) {
-        expected.update(bytes, 0, i);
-        testee.update(bytes, 0, i);
-        checkSame();
-      }
-
-      expected.reset();
-      testee.reset();
-      checkSame();
-    }
-
-    private void checkSame() {
-      Assert.assertEquals(expected.getValue(), testee.getValue());
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
deleted file mode 100644
index c1470bb..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.common;
-
-import org.apache.commons.collections.SetUtils;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
-import org.apache.hadoop.ozone.common.statemachine.StateMachine;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLEANUP;
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLOSED;
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CREATING;
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.FINAL;
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.INIT;
-import static org.apache.hadoop.ozone.common.TestStateMachine.STATES
-    .OPERATIONAL;
-
-/**
- * This class is to test ozone common state machine.
- */
-public class TestStateMachine {
-
-  /**
-   * STATES used by the test state machine.
-   */
-  public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL};
-
-  /**
-   * EVENTS used by the test state machine.
-   */
-  public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT};
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void testStateMachineStates() throws InvalidStateTransitionException {
-    Set<STATES> finals = new HashSet<>();
-    finals.add(FINAL);
-
-    StateMachine<STATES, EVENTS> stateMachine =
-        new StateMachine<>(INIT, finals);
-
-    stateMachine.addTransition(INIT, CREATING, EVENTS.ALLOCATE);
-    stateMachine.addTransition(CREATING, OPERATIONAL, EVENTS.CREATE);
-    stateMachine.addTransition(OPERATIONAL, OPERATIONAL, EVENTS.UPDATE);
-    stateMachine.addTransition(OPERATIONAL, CLEANUP, EVENTS.DELETE);
-    stateMachine.addTransition(OPERATIONAL, CLOSED, EVENTS.CLOSE);
-    stateMachine.addTransition(CREATING, CLEANUP, EVENTS.TIMEOUT);
-
-    // Initial and Final states
-    Assert.assertEquals("Initial State", INIT, stateMachine.getInitialState());
-    Assert.assertTrue("Final States", SetUtils.isEqualSet(finals,
-        stateMachine.getFinalStates()));
-
-    // Valid state transitions
-    Assert.assertEquals("STATE should be OPERATIONAL after being created",
-        OPERATIONAL, stateMachine.getNextState(CREATING, EVENTS.CREATE));
-    Assert.assertEquals("STATE should be OPERATIONAL after being updated",
-        OPERATIONAL, stateMachine.getNextState(OPERATIONAL, EVENTS.UPDATE));
-    Assert.assertEquals("STATE should be CLEANUP after being deleted",
-        CLEANUP, stateMachine.getNextState(OPERATIONAL, EVENTS.DELETE));
-    Assert.assertEquals("STATE should be CLEANUP after being timeout",
-        CLEANUP, stateMachine.getNextState(CREATING, EVENTS.TIMEOUT));
-    Assert.assertEquals("STATE should be CLOSED after being closed",
-        CLOSED, stateMachine.getNextState(OPERATIONAL, EVENTS.CLOSE));
-
-    // Negative cases: invalid transition
-    expectException();
-    stateMachine.getNextState(OPERATIONAL, EVENTS.CREATE);
-
-    expectException();
-    stateMachine.getNextState(CREATING, EVENTS.CLOSE);
-  }
-
-  /**
-   * We expect an InvalidStateTransitionException.
-   */
-  private void expectException() {
-    exception.expect(InvalidStateTransitionException.class);
-    exception.expectMessage("Invalid event");
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
deleted file mode 100644
index 3887833..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * A generic lease management API which can be used if a service
- * needs any kind of lease management.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Test class to check functionality and consistency of LeaseManager.
- */
-public class TestLeaseManager {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Dummy resource on which leases can be acquired.
-   */
-  private static final class DummyResource {
-
-    private final String name;
-
-    private DummyResource(String name) {
-      this.name = name;
-    }
-
-    @Override
-    public int hashCode() {
-      return name.hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if(obj instanceof DummyResource) {
-        return name.equals(((DummyResource) obj).name);
-      }
-      return false;
-    }
-
-    /**
-     * Adding to String method to fix the ErrorProne warning that this method
-     * is later used in String functions, which would print out (e.g.
-     * `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@
-     * 4488aabb`) instead of useful information.
-     *
-     * @return Name of the Dummy object.
-     */
-    @Override
-    public String toString() {
-      return "DummyResource{" +
-          "name='" + name + '\'' +
-          '}';
-    }
-  }
-
-  @Test
-  public void testLeaseAcquireAndRelease() throws LeaseException {
-    //It is assumed that the test case execution won't take more than 5 seconds,
-    //if it takes more time increase the defaultTimeout value of LeaseManager.
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    DummyResource resourceTwo = new DummyResource("two");
-    DummyResource resourceThree = new DummyResource("three");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Lease<DummyResource> leaseTwo = manager.acquire(resourceTwo);
-    Lease<DummyResource> leaseThree = manager.acquire(resourceThree);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertEquals(leaseTwo, manager.get(resourceTwo));
-    Assert.assertEquals(leaseThree, manager.get(resourceThree));
-    Assert.assertFalse(leaseOne.hasExpired());
-    Assert.assertFalse(leaseTwo.hasExpired());
-    Assert.assertFalse(leaseThree.hasExpired());
-    //The below releases should not throw LeaseNotFoundException.
-    manager.release(resourceOne);
-    manager.release(resourceTwo);
-    manager.release(resourceThree);
-    Assert.assertTrue(leaseOne.hasExpired());
-    Assert.assertTrue(leaseTwo.hasExpired());
-    Assert.assertTrue(leaseThree.hasExpired());
-    manager.shutdown();
-  }
-
-  @Test
-  public void testLeaseAlreadyExist() throws LeaseException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    DummyResource resourceTwo = new DummyResource("two");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Lease<DummyResource> leaseTwo = manager.acquire(resourceTwo);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertEquals(leaseTwo, manager.get(resourceTwo));
-
-    exception.expect(LeaseAlreadyExistException.class);
-    exception.expectMessage("Resource: " + resourceOne);
-    manager.acquire(resourceOne);
-
-    manager.release(resourceOne);
-    manager.release(resourceTwo);
-    manager.shutdown();
-  }
-
-  @Test
-  public void testLeaseNotFound() throws LeaseException, InterruptedException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    DummyResource resourceTwo = new DummyResource("two");
-    DummyResource resourceThree = new DummyResource("three");
-
-    //Case 1: lease was never acquired.
-    exception.expect(LeaseNotFoundException.class);
-    exception.expectMessage("Resource: " + resourceOne);
-    manager.get(resourceOne);
-
-    //Case 2: lease is acquired and released.
-    Lease<DummyResource> leaseTwo = manager.acquire(resourceTwo);
-    Assert.assertEquals(leaseTwo, manager.get(resourceTwo));
-    Assert.assertFalse(leaseTwo.hasExpired());
-    manager.release(resourceTwo);
-    Assert.assertTrue(leaseTwo.hasExpired());
-    exception.expect(LeaseNotFoundException.class);
-    exception.expectMessage("Resource: " + resourceTwo);
-    manager.get(resourceTwo);
-
-    //Case 3: lease acquired and timed out.
-    Lease<DummyResource> leaseThree = manager.acquire(resourceThree);
-    Assert.assertEquals(leaseThree, manager.get(resourceThree));
-    Assert.assertFalse(leaseThree.hasExpired());
-    long sleepTime = leaseThree.getRemainingTime() + 1000;
-    try {
-      Thread.sleep(sleepTime);
-    } catch (InterruptedException ex) {
-      //even in case of interrupt we have to wait till lease times out.
-      Thread.sleep(sleepTime);
-    }
-    Assert.assertTrue(leaseThree.hasExpired());
-    exception.expect(LeaseNotFoundException.class);
-    exception.expectMessage("Resource: " + resourceThree);
-    manager.get(resourceThree);
-    manager.shutdown();
-  }
-
-  @Test
-  public void testCustomLeaseTimeout() throws LeaseException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    DummyResource resourceTwo = new DummyResource("two");
-    DummyResource resourceThree = new DummyResource("three");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Lease<DummyResource> leaseTwo = manager.acquire(resourceTwo, 10000);
-    Lease<DummyResource> leaseThree = manager.acquire(resourceThree, 50000);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertEquals(leaseTwo, manager.get(resourceTwo));
-    Assert.assertEquals(leaseThree, manager.get(resourceThree));
-    Assert.assertFalse(leaseOne.hasExpired());
-    Assert.assertFalse(leaseTwo.hasExpired());
-    Assert.assertFalse(leaseThree.hasExpired());
-    Assert.assertEquals(5000, leaseOne.getLeaseLifeTime());
-    Assert.assertEquals(10000, leaseTwo.getLeaseLifeTime());
-    Assert.assertEquals(50000, leaseThree.getLeaseLifeTime());
-    // Releasing of leases is done in shutdown, so don't have to worry about
-    // lease release
-    manager.shutdown();
-  }
-
-  @Test
-  public void testLeaseCallback() throws LeaseException, InterruptedException {
-    Map<DummyResource, String> leaseStatus = new HashMap<>();
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    leaseStatus.put(resourceOne, "lease in use");
-    leaseOne.registerCallBack(() -> {
-      leaseStatus.put(resourceOne, "lease expired");
-      return null;
-    });
-    // wait for lease to expire
-    long sleepTime = leaseOne.getRemainingTime() + 1000;
-    try {
-      Thread.sleep(sleepTime);
-    } catch (InterruptedException ex) {
-      //even in case of interrupt we have to wait till lease times out.
-      Thread.sleep(sleepTime);
-    }
-    Assert.assertTrue(leaseOne.hasExpired());
-    exception.expect(LeaseNotFoundException.class);
-    exception.expectMessage("Resource: " + resourceOne);
-    manager.get(resourceOne);
-    // check if callback has been executed
-    Assert.assertEquals("lease expired", leaseStatus.get(resourceOne));
-  }
-
-  @Test
-  public void testCallbackExecutionInCaseOfLeaseRelease()
-      throws LeaseException, InterruptedException {
-    // Callbacks should not be executed in case of lease release
-    Map<DummyResource, String> leaseStatus = new HashMap<>();
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    leaseStatus.put(resourceOne, "lease in use");
-    leaseOne.registerCallBack(() -> {
-      leaseStatus.put(resourceOne, "lease expired");
-      return null;
-    });
-    leaseStatus.put(resourceOne, "lease released");
-    manager.release(resourceOne);
-    Assert.assertTrue(leaseOne.hasExpired());
-    exception.expect(LeaseNotFoundException.class);
-    exception.expectMessage("Resource: " + resourceOne);
-    manager.get(resourceOne);
-    Assert.assertEquals("lease released", leaseStatus.get(resourceOne));
-  }
-
-  @Test
-  public void testLeaseCallbackWithMultipleLeases()
-      throws LeaseException, InterruptedException {
-    Map<DummyResource, String> leaseStatus = new HashMap<>();
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    DummyResource resourceTwo = new DummyResource("two");
-    DummyResource resourceThree = new DummyResource("three");
-    DummyResource resourceFour = new DummyResource("four");
-    DummyResource resourceFive = new DummyResource("five");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Lease<DummyResource> leaseTwo = manager.acquire(resourceTwo);
-    Lease<DummyResource> leaseThree = manager.acquire(resourceThree);
-    Lease<DummyResource> leaseFour = manager.acquire(resourceFour);
-    Lease<DummyResource> leaseFive = manager.acquire(resourceFive);
-    leaseStatus.put(resourceOne, "lease in use");
-    leaseStatus.put(resourceTwo, "lease in use");
-    leaseStatus.put(resourceThree, "lease in use");
-    leaseStatus.put(resourceFour, "lease in use");
-    leaseStatus.put(resourceFive, "lease in use");
-    leaseOne.registerCallBack(() -> {
-      leaseStatus.put(resourceOne, "lease expired");
-      return null;
-    });
-    leaseTwo.registerCallBack(() -> {
-      leaseStatus.put(resourceTwo, "lease expired");
-      return null;
-    });
-    leaseThree.registerCallBack(() -> {
-      leaseStatus.put(resourceThree, "lease expired");
-      return null;
-    });
-    leaseFour.registerCallBack(() -> {
-      leaseStatus.put(resourceFour, "lease expired");
-      return null;
-    });
-    leaseFive.registerCallBack(() -> {
-      leaseStatus.put(resourceFive, "lease expired");
-      return null;
-    });
-
-    // release lease one, two and three
-    leaseStatus.put(resourceOne, "lease released");
-    manager.release(resourceOne);
-    leaseStatus.put(resourceTwo, "lease released");
-    manager.release(resourceTwo);
-    leaseStatus.put(resourceThree, "lease released");
-    manager.release(resourceThree);
-
-    // wait for other leases to expire
-    long sleepTime = leaseFive.getRemainingTime() + 1000;
-
-    try {
-      Thread.sleep(sleepTime);
-    } catch (InterruptedException ex) {
-      //even in case of interrupt we have to wait till lease times out.
-      Thread.sleep(sleepTime);
-    }
-    Assert.assertTrue(leaseOne.hasExpired());
-    Assert.assertTrue(leaseTwo.hasExpired());
-    Assert.assertTrue(leaseThree.hasExpired());
-    Assert.assertTrue(leaseFour.hasExpired());
-    Assert.assertTrue(leaseFive.hasExpired());
-
-    Assert.assertEquals("lease released", leaseStatus.get(resourceOne));
-    Assert.assertEquals("lease released", leaseStatus.get(resourceTwo));
-    Assert.assertEquals("lease released", leaseStatus.get(resourceThree));
-    Assert.assertEquals("lease expired", leaseStatus.get(resourceFour));
-    Assert.assertEquals("lease expired", leaseStatus.get(resourceFive));
-    manager.shutdown();
-  }
-
-  @Test
-  public void testReuseReleasedLease() throws LeaseException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertFalse(leaseOne.hasExpired());
-
-    manager.release(resourceOne);
-    Assert.assertTrue(leaseOne.hasExpired());
-
-    Lease<DummyResource> sameResourceLease = manager.acquire(resourceOne);
-    Assert.assertEquals(sameResourceLease, manager.get(resourceOne));
-    Assert.assertFalse(sameResourceLease.hasExpired());
-
-    manager.release(resourceOne);
-    Assert.assertTrue(sameResourceLease.hasExpired());
-    manager.shutdown();
-  }
-
-  @Test
-  public void testReuseTimedOutLease()
-      throws LeaseException, InterruptedException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertFalse(leaseOne.hasExpired());
-    // wait for lease to expire
-    long sleepTime = leaseOne.getRemainingTime() + 1000;
-    try {
-      Thread.sleep(sleepTime);
-    } catch (InterruptedException ex) {
-      //even in case of interrupt we have to wait till lease times out.
-      Thread.sleep(sleepTime);
-    }
-    Assert.assertTrue(leaseOne.hasExpired());
-
-    Lease<DummyResource> sameResourceLease = manager.acquire(resourceOne);
-    Assert.assertEquals(sameResourceLease, manager.get(resourceOne));
-    Assert.assertFalse(sameResourceLease.hasExpired());
-
-    manager.release(resourceOne);
-    Assert.assertTrue(sameResourceLease.hasExpired());
-    manager.shutdown();
-  }
-
-  @Test
-  public void testRenewLease() throws LeaseException, InterruptedException {
-    LeaseManager<DummyResource> manager = new LeaseManager<>("Test", 5000);
-    manager.start();
-    DummyResource resourceOne = new DummyResource("one");
-    Lease<DummyResource> leaseOne = manager.acquire(resourceOne);
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertFalse(leaseOne.hasExpired());
-
-    // add 5 more seconds to the lease
-    leaseOne.renew(5000);
-
-    Thread.sleep(5000);
-
-    // lease should still be active
-    Assert.assertEquals(leaseOne, manager.get(resourceOne));
-    Assert.assertFalse(leaseOne.hasExpired());
-    manager.release(resourceOne);
-    manager.shutdown();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java
deleted file mode 100644
index 1071309..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-/*
- This package contains lease management unit test classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
deleted file mode 100644
index e88b1bb1..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Test-cases to test LockManager.
- */
-public class TestLockManager {
-
-  @Test(timeout = 1000)
-  public void testWriteLockWithDifferentResource() {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    manager.writeLock("/resourceOne");
-    // This should work, as they are different resource.
-    manager.writeLock("/resourceTwo");
-    manager.writeUnlock("/resourceOne");
-    manager.writeUnlock("/resourceTwo");
-    Assert.assertTrue(true);
-  }
-
-  @Test
-  public void testWriteLockWithSameResource() throws Exception {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    final AtomicBoolean gotLock = new AtomicBoolean(false);
-    manager.writeLock("/resourceOne");
-    new Thread(() -> {
-      manager.writeLock("/resourceOne");
-      gotLock.set(true);
-      manager.writeUnlock("/resourceOne");
-    }).start();
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    // Since the other thread is trying to get write lock on same object,
-    // it will wait.
-    Assert.assertFalse(gotLock.get());
-    manager.writeUnlock("/resourceOne");
-    // Since we have released the write lock, the other thread should have
-    // the lock now
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    Assert.assertTrue(gotLock.get());
-  }
-
-  @Test(timeout = 1000)
-  public void testReadLockWithDifferentResource() {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    manager.readLock("/resourceOne");
-    manager.readLock("/resourceTwo");
-    manager.readUnlock("/resourceOne");
-    manager.readUnlock("/resourceTwo");
-    Assert.assertTrue(true);
-  }
-
-  @Test
-  public void testReadLockWithSameResource() throws Exception {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    final AtomicBoolean gotLock = new AtomicBoolean(false);
-    manager.readLock("/resourceOne");
-    new Thread(() -> {
-      manager.readLock("/resourceOne");
-      gotLock.set(true);
-      manager.readUnlock("/resourceOne");
-    }).start();
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    // Since the new thread is trying to get read lock, it should work.
-    Assert.assertTrue(gotLock.get());
-    manager.readUnlock("/resourceOne");
-  }
-
-  @Test
-  public void testWriteReadLockWithSameResource() throws Exception {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    final AtomicBoolean gotLock = new AtomicBoolean(false);
-    manager.writeLock("/resourceOne");
-    new Thread(() -> {
-      manager.readLock("/resourceOne");
-      gotLock.set(true);
-      manager.readUnlock("/resourceOne");
-    }).start();
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    // Since the other thread is trying to get read lock on same object,
-    // it will wait.
-    Assert.assertFalse(gotLock.get());
-    manager.writeUnlock("/resourceOne");
-    // Since we have released the write lock, the other thread should have
-    // the lock now
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    Assert.assertTrue(gotLock.get());
-  }
-
-  @Test
-  public void testReadWriteLockWithSameResource() throws Exception {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    final AtomicBoolean gotLock = new AtomicBoolean(false);
-    manager.readLock("/resourceOne");
-    new Thread(() -> {
-      manager.writeLock("/resourceOne");
-      gotLock.set(true);
-      manager.writeUnlock("/resourceOne");
-    }).start();
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    // Since the other thread is trying to get write lock on same object,
-    // it will wait.
-    Assert.assertFalse(gotLock.get());
-    manager.readUnlock("/resourceOne");
-    // Since we have released the read lock, the other thread should have
-    // the lock now
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    Assert.assertTrue(gotLock.get());
-  }
-
-  @Test
-  public void testMultiReadWriteLockWithSameResource() throws Exception {
-    final LockManager<String> manager =
-        new LockManager<>(new OzoneConfiguration());
-    final AtomicBoolean gotLock = new AtomicBoolean(false);
-    manager.readLock("/resourceOne");
-    manager.readLock("/resourceOne");
-    new Thread(() -> {
-      manager.writeLock("/resourceOne");
-      gotLock.set(true);
-      manager.writeUnlock("/resourceOne");
-    }).start();
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    // Since the other thread is trying to get write lock on same object,
-    // it will wait.
-    Assert.assertFalse(gotLock.get());
-    manager.readUnlock("/resourceOne");
-    //We have only released one read lock, we still hold another read lock.
-    Thread.sleep(100);
-    Assert.assertFalse(gotLock.get());
-    manager.readUnlock("/resourceOne");
-    // Since we have released the read lock, the other thread should have
-    // the lock now
-    // Let's give some time for the other thread to run
-    Thread.sleep(100);
-    Assert.assertTrue(gotLock.get());
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
deleted file mode 100644
index a96bc162..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-/*
- This package contains the lock related test classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index 0030d2e..0000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone;
-/**
- * Ozone related test helper classes and tests of common utils.
- */
diff --git a/hadoop-hdds/common/src/test/resources/log4j2.properties b/hadoop-hdds/common/src/test/resources/log4j2.properties
deleted file mode 100644
index cef69e1..0000000
--- a/hadoop-hdds/common/src/test/resources/log4j2.properties
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=5
-
-filter=read, write
-# filter.read.onMatch = DENY avoids logging all READ events
-# filter.read.onMatch = ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch = NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type = MarkerFilter
-filter.read.marker = READ
-filter.read.onMatch = DENY
-filter.read.onMismatch = NEUTRAL
-
-# filter.write.onMatch = DENY avoids logging all WRITE events
-# filter.write.onMatch = ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type = MarkerFilter
-filter.write.marker = WRITE
-filter.write.onMatch = NEUTRAL
-filter.write.onMismatch = NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-appenders = console, audit
-appender.console.type = Console
-appender.console.name = STDOUT
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %-5level | %c{1} | %msg%n
-
-appender.audit.type = File
-appender.audit.name = AUDITLOG
-appender.audit.fileName=audit.log
-appender.audit.layout.type=PatternLayout
-appender.audit.layout.pattern= %-5level | %c{1} | %msg%n
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=OMAudit
-logger.audit.level = INFO
-logger.audit.appenderRefs = audit
-logger.audit.appenderRef.file.ref = AUDITLOG
-
-rootLogger.level = INFO
-rootLogger.appenderRefs = stdout
-rootLogger.appenderRef.stdout.ref = STDOUT
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml
deleted file mode 100644
index 58c5802..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-    </layer>
-    <layer id="nodegroup">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/nodegroup/node</path>
-    <enforceprefix>true</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml
deleted file mode 100644
index 25be9c2..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>Root</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>/default-rack</default>
-    </layer>
-    <layer id="nodegroup">
-      <prefix>nodegroup</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>/default-nodegroup</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/nodegroup/node</path>
-    <enforceprefix>true</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml
deleted file mode 100644
index d5092ad..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-# Cost: The cost of crossing this layer.
-# The value should be positive integer or 0. This field is optional.
-# When it's not defined, it's value is default "1".
-cost: 1
-
-# The prefix of this layer.
-# If the prefix is "dc", then every name in this layer should start with "dc",
-# such as "dc1", "dc2".
-# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode.
-prefix: /
-
-# Layer type, optional field, default value InnerNode.
-# Current value range : {ROOT, INNER_NODE, LEAF_NODE}
-type: ROOT
-
-# Layer name
-defaultName: root
-
-# The sub layer of current layer. We use list
-sublayer:
-  -
-    cost: 1
-    prefix: dc
-    defaultName: datacenter
-    type: INNER_NODE
-    sublayer:
-      -
-        cost: 1
-        prefix: rack
-        defaultName: rack
-        type: INNER_NODE
-        sublayer:
-            -
-              cost: 1
-              prefix: ng
-              defaultName: nodegroup
-              type: INNER_NODE
-              sublayer:
-                -
-                  defaultName: node
-                  type: LEAF_NODE
-                  prefix: node
-...
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml
deleted file mode 100644
index cf934bc..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>-1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml
deleted file mode 100644
index d69aab1..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>a</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>-1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml
deleted file mode 100644
index 0a2d490d..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-# Cost: The cost of crossing this layer.
-# The value should be positive integer or 0. This field is optional.
-# When it's not defined, it's value is default "1".
-cost: 1
-
-# The prefix of this layer.
-# If the prefix is "dc", then every name in this layer should start with "dc",
-# such as "dc1", "dc2".
-# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode.
-prefix: /
-
-# Layer type, optional field, default value InnerNode.
-# Current value range : {ROOT, INNER_NODE, LEAF_NODE}
-type: ROOT
-
-# Layer name
-defaultName: root
-
-# The sub layer of current layer. We use list
-sublayer:
-  -
-    cost: 1
-    prefix: dc
-    defaultName: datacenter
-    type: INNER_NODE
-    sublayer:
-      -
-        cost: 1
-        prefix: node
-        defaultName: rack
-        type: LEAF_NODE
-        sublayer:
-            -
-              cost: 1
-              prefix: ng
-              defaultName: nodegroup
-              type: INNER_NODE
-              sublayer:
-                -
-                  defaultName: node
-                  type: LEAF_NODE
-                  prefix: node
-...
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml
deleted file mode 100644
index a4297af..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>Leaf</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml
deleted file mode 100644
index afc7816..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml
deleted file mode 100644
index 536ed23..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-# Cost: The cost of crossing this layer.
-# The value should be positive integer or 0. This field is optional.
-# When it's not defined, it's value is default "1".
-cost: 1
-
-# The prefix of this layer.
-# If the prefix is "dc", then every name in this layer should start with "dc",
-# such as "dc1", "dc2".
-# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode.
-prefix: /
-
-# Layer type, optional field, default value InnerNode.
-# Current value range : {ROOT, INNER_NODE, LEAF_NODE}
-type: ROOT
-
-# Layer name
-defaultName: root
-
-# The sub layer of current layer. We use list
-sublayer:
-  -
-    cost: 1
-    prefix: root
-    defaultName: root
-    type: ROOT
-    sublayer:
-      -
-        cost: 1
-        prefix: rack
-        defaultName: rack
-        type: INNER_NODE
-        sublayer:
-            -
-              cost: 1
-              prefix: ng
-              defaultName: nodegroup
-              type: INNER_NODE
-              sublayer:
-                -
-                  defaultName: node
-                  type: LEAF_NODE
-                  prefix: node
-...
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml
deleted file mode 100644
index a7322ca..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml
deleted file mode 100644
index fcc697c..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>InnerNode</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml
deleted file mode 100644
index 940696c..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml
deleted file mode 100644
index c16e216..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>LEAF</type>
-    </layer>
-  </layers>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml
deleted file mode 100644
index 2c30219..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml
deleted file mode 100644
index fac224b..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/room/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml
deleted file mode 100644
index d228eec..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>leaves</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/rack/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml
deleted file mode 100644
index 221e10b..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/rack/datacenter/node</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml
deleted file mode 100644
index 51e579e..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-  <layoutversion>1</layoutversion>
-  <layers>
-    <layer id="datacenter">
-      <prefix></prefix>
-      <cost>1</cost>
-      <type>ROOT</type>
-    </layer>
-    <layer id="rack">
-      <prefix>rack</prefix>
-      <cost>1</cost>
-      <type>InnerNode</type>
-      <default>default-rack</default>
-    </layer>
-    <layer id="node">
-      <prefix></prefix>
-      <cost>0</cost>
-      <type>Leaf</type>
-    </layer>
-  </layers>
-  <topology>
-    <path>/datacenter/node/rack</path>
-    <enforceprefix>false</enforceprefix>
-  </topology>
-</configuration>
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/resources/test.db.ini b/hadoop-hdds/common/src/test/resources/test.db.ini
deleted file mode 100644
index 6666cd2..0000000
--- a/hadoop-hdds/common/src/test/resources/test.db.ini
+++ /dev/null
@@ -1,145 +0,0 @@
-# This is a RocksDB option file.
-#
-# A typical RocksDB options file has four sections, which are
-# Version section, DBOptions section, at least one CFOptions
-# section, and one TableOptions section for each column family.
-# The RocksDB options file in general follows the basic INI
-# file format with the following extensions / modifications:
-#
-#  * Escaped characters
-#    We escaped the following characters:
-#     - \n -- line feed - new line
-#     - \r -- carriage return
-#     - \\ -- backslash \
-#     - \: -- colon symbol :
-#     - \# -- hash tag #
-#  * Comments
-#    We support # style comments.  Comments can appear at the ending
-#    part of a line.
-#  * Statements
-#    A statement is of the form option_name = value.
-#    Each statement contains a '=', where extra white-spaces
-#    are supported. However, we don't support multi-lined statement.
-#    Furthermore, each line can only contain at most one statement.
-#  * Sections
-#    Sections are of the form [SecitonTitle "SectionArgument"],
-#    where section argument is optional.
-#  * List
-#    We use colon-separated string to represent a list.
-#    For instance, n1:n2:n3:n4 is a list containing four values.
-#
-# Below is an example of a RocksDB options file:
-
-
-#----------------------IMPORTANT------------------------------------#
-### FAKE VALUES FOR TESTING ONLY ### DO NOT USE THESE FOR PRODUCTION.
-#----------------------IMPORTANT------------------------------------#
-[DBOptions]
-  stats_dump_period_sec=600
-  max_manifest_file_size=551615
-  bytes_per_sync=8388608
-  delayed_write_rate=2097152
-  WAL_ttl_seconds=0
-  WAL_size_limit_MB=0
-  max_subcompactions=1
-  wal_dir=
-  wal_bytes_per_sync=0
-  db_write_buffer_size=0
-  keep_log_file_num=1000
-  table_cache_numshardbits=4
-  max_file_opening_threads=1
-  writable_file_max_buffer_size=1048576
-  random_access_max_buffer_size=1048576
-  use_fsync=false
-  max_total_wal_size=0
-  max_open_files=-1
-  skip_stats_update_on_db_open=false
-  max_background_compactions=16
-  manifest_preallocation_size=4194304
-  max_background_flushes=7
-  is_fd_close_on_exec=true
-  max_log_file_size=0
-  advise_random_on_open=true
-  create_missing_column_families=false
-  paranoid_checks=true
-  delete_obsolete_files_period_micros=21600000000
-  log_file_time_to_roll=0
-  compaction_readahead_size=0
-  create_if_missing=false
-  use_adaptive_mutex=false
-  enable_thread_tracking=false
-  allow_fallocate=true
-  error_if_exists=false
-  recycle_log_file_num=0
-  skip_log_error_on_recovery=false
-  db_log_dir=
-  new_table_reader_for_compaction_inputs=true
-  allow_mmap_reads=false
-  allow_mmap_writes=false
-  use_direct_reads=false
-  use_direct_writes=false
-
-
-[CFOptions "default"]
-  compaction_style=kCompactionStyleLevel
-  compaction_filter=nullptr
-  num_levels=6
-  table_factory=BlockBasedTable
-  comparator=leveldb.BytewiseComparator
-  max_sequential_skip_in_iterations=8
-  soft_rate_limit=0.000000
-  max_bytes_for_level_base=1073741824
-  memtable_prefix_bloom_probes=6
-  memtable_prefix_bloom_bits=0
-  memtable_prefix_bloom_huge_page_tlb_size=0
-  max_successive_merges=0
-  arena_block_size=16777216
-  min_write_buffer_number_to_merge=1
-  target_file_size_multiplier=1
-  source_compaction_factor=1
-  max_bytes_for_level_multiplier=8
-  max_bytes_for_level_multiplier_additional=2:3:5
-  compaction_filter_factory=nullptr
-  max_write_buffer_number=8
-  level0_stop_writes_trigger=20
-  compression=kSnappyCompression
-  level0_file_num_compaction_trigger=4
-  purge_redundant_kvs_while_flush=true
-  max_write_buffer_number_to_maintain=0
-  memtable_factory=SkipListFactory
-  max_grandparent_overlap_factor=8
-  expanded_compaction_factor=25
-  hard_pending_compaction_bytes_limit=137438953472
-  inplace_update_num_locks=10000
-  level_compaction_dynamic_level_bytes=true
-  level0_slowdown_writes_trigger=12
-  filter_deletes=false
-  verify_checksums_in_compaction=true
-  min_partial_merge_operands=2
-  paranoid_file_checks=false
-  target_file_size_base=134217728
-  optimize_filters_for_hits=false
-  merge_operator=PutOperator
-  compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression
-  compaction_measure_io_stats=false
-  prefix_extractor=nullptr
-  bloom_locality=0
-  write_buffer_size=134217728
-  disable_auto_compactions=false
-  inplace_update_support=false
-
-[TableOptions/BlockBasedTable "default"]
-  format_version=2
-  whole_key_filtering=true
-  no_block_cache=false
-  checksum=kCRC32c
-  filter_policy=rocksdb.BuiltinBloomFilter
-  block_size_deviation=10
-  block_size=8192
-  block_restart_interval=16
-  cache_index_and_filter_blocks=false
-  pin_l0_filter_and_index_blocks_in_cache=false
-  pin_top_level_index_and_filter=false
-  index_type=kBinarySearch
-  hash_index_allow_collision=true
-  flush_block_policy_factory=FlushBlockBySizePolicyFactory
\ No newline at end of file
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
deleted file mode 100644
index a595549..0000000
--- a/hadoop-hdds/config/pom.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-config</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Config Tools</description>
-  <name>Apache Hadoop HDDS Config</name>
-  <packaging>jar</packaging>
-
-  <properties>
-
-  </properties>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-  </dependencies>
-
-</project>
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
deleted file mode 100644
index 70aa58d..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Mark field to be configurable from ozone-site.xml.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.METHOD)
-public @interface Config {
-
-  /**
-   * Configuration fragment relative to the prefix defined with @ConfigGroup.
-   */
-  String key();
-
-  /**
-   * Default value to use if not set.
-   */
-  String defaultValue();
-
-  /**
-   * Custom description as a help.
-   */
-  String description();
-
-  /**
-   * Type of configuration. Use AUTO to decide it based on the java type.
-   */
-  ConfigType type() default ConfigType.AUTO;
-
-  /**
-   * If type == TIME the unit should be defined with this attribute.
-   */
-  TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
-
-  ConfigTag[] tags();
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
deleted file mode 100644
index 9463f42..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.InputStream;
-import java.io.Writer;
-import java.util.Arrays;
-import java.util.stream.Collectors;
-
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-
-/**
- * Simple DOM based config file writer.
- * <p>
- * This class can init/load existing ozone-default-generated.xml fragments
- * and append new entries and write to the file system.
- */
-public class ConfigFileAppender {
-
-  private Document document;
-
-  private final DocumentBuilder builder;
-
-  public ConfigFileAppender() {
-    try {
-      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-      builder = factory.newDocumentBuilder();
-    } catch (Exception ex) {
-      throw new ConfigurationException("Can initialize new configuration", ex);
-    }
-  }
-
-  /**
-   * Initialize a new ozone-site.xml structure with empty content.
-   */
-  public void init() {
-    try {
-      document = builder.newDocument();
-      document.appendChild(document.createElement("configuration"));
-    } catch (Exception ex) {
-      throw new ConfigurationException("Can initialize new configuration", ex);
-    }
-  }
-
-  /**
-   * Load existing ozone-site.xml content and parse the DOM tree.
-   */
-  public void load(InputStream stream) {
-    try {
-      document = builder.parse(stream);
-    } catch (Exception ex) {
-      throw new ConfigurationException("Can't load existing configuration", ex);
-    }
-  }
-
-  /**
-   * Add configuration fragment.
-   */
-  public void addConfig(String key, String defaultValue, String description,
-      ConfigTag[] tags) {
-    Element root = document.getDocumentElement();
-    Element propertyElement = document.createElement("property");
-
-    addXmlElement(propertyElement, "name", key);
-
-    addXmlElement(propertyElement, "value", defaultValue);
-
-    addXmlElement(propertyElement, "description", description);
-
-    String tagsAsString = Arrays.stream(tags).map(tag -> tag.name())
-        .collect(Collectors.joining(", "));
-
-    addXmlElement(propertyElement, "tag", tagsAsString);
-
-    root.appendChild(propertyElement);
-  }
-
-  private void addXmlElement(Element parentElement, String tagValue,
-      String textValue) {
-    Element element = document.createElement(tagValue);
-    element.appendChild(document.createTextNode(textValue));
-    parentElement.appendChild(element);
-  }
-
-  /**
-   * Write out the XML content to a writer.
-   */
-  public void write(Writer writer) {
-    try {
-      TransformerFactory transformerFactory = TransformerFactory.newInstance();
-      Transformer transf = transformerFactory.newTransformer();
-
-      transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
-      transf.setOutputProperty(OutputKeys.INDENT, "yes");
-      transf
-          .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
-
-      transf.transform(new DOMSource(document), new StreamResult(writer));
-    } catch (TransformerException e) {
-      throw new ConfigurationException("Can't write the configuration xml", e);
-    }
-  }
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
deleted file mode 100644
index 471b679..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import javax.annotation.processing.AbstractProcessor;
-import javax.annotation.processing.Filer;
-import javax.annotation.processing.RoundEnvironment;
-import javax.annotation.processing.SupportedAnnotationTypes;
-import javax.lang.model.element.Element;
-import javax.lang.model.element.ElementKind;
-import javax.lang.model.element.TypeElement;
-import javax.tools.Diagnostic.Kind;
-import javax.tools.FileObject;
-import javax.tools.StandardLocation;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.NoSuchFileException;
-import java.util.Set;
-
-/**
- * Annotation processor to generate config fragments from Config annotations.
- */
-@SupportedAnnotationTypes("org.apache.hadoop.hdds.conf.ConfigGroup")
-public class ConfigFileGenerator extends AbstractProcessor {
-
-  public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml";
-
-  @Override
-  public boolean process(Set<? extends TypeElement> annotations,
-      RoundEnvironment roundEnv) {
-    if (roundEnv.processingOver()) {
-      return false;
-    }
-
-    Filer filer = processingEnv.getFiler();
-
-    try {
-
-      //load existing generated config (if exists)
-      ConfigFileAppender appender = new ConfigFileAppender();
-      try (InputStream input = filer
-          .getResource(StandardLocation.CLASS_OUTPUT, "",
-              OUTPUT_FILE_NAME).openInputStream()) {
-        appender.load(input);
-      } catch (FileNotFoundException | NoSuchFileException ex) {
-        appender.init();
-      }
-
-      Set<? extends Element> annotatedElements =
-          roundEnv.getElementsAnnotatedWith(ConfigGroup.class);
-      for (Element annotatedElement : annotatedElements) {
-        TypeElement configGroup = (TypeElement) annotatedElement;
-
-        //check if any of the setters are annotated with @Config
-        for (Element element : configGroup.getEnclosedElements()) {
-          if (element.getKind() == ElementKind.METHOD) {
-            processingEnv.getMessager()
-                .printMessage(Kind.WARNING, element.getSimpleName().toString());
-            if (element.getSimpleName().toString().startsWith("set")
-                && element.getAnnotation(Config.class) != null) {
-
-              //update the ozone-site-generated.xml
-              Config configAnnotation = element.getAnnotation(Config.class);
-              ConfigGroup configGroupAnnotation =
-                  configGroup.getAnnotation(ConfigGroup.class);
-
-              String key = configGroupAnnotation.prefix() + "."
-                  + configAnnotation.key();
-
-              appender.addConfig(key,
-                  configAnnotation.defaultValue(),
-                  configAnnotation.description(),
-                  configAnnotation.tags());
-            }
-          }
-
-        }
-      }
-      FileObject resource = filer
-          .createResource(StandardLocation.CLASS_OUTPUT, "",
-              OUTPUT_FILE_NAME);
-
-      try (Writer writer = new OutputStreamWriter(
-          resource.openOutputStream(), StandardCharsets.UTF_8)) {
-        appender.write(writer);
-      }
-
-    } catch (IOException e) {
-      processingEnv.getMessager().printMessage(Kind.ERROR,
-          "Can't generate the config file from annotation: " + e);
-    }
-    return false;
-  }
-
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
deleted file mode 100644
index dd24ccb..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Mark pojo which holds configuration variables.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.TYPE)
-public @interface ConfigGroup {
-  String prefix();
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
deleted file mode 100644
index de50d2a..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-/**
- * Available config tags.
- * <p>
- * Note: the values are defined in ozone-default.xml by hadoop.tags.custom.
- */
-public enum ConfigTag {
-  OZONE,
-  MANAGEMENT,
-  SECURITY,
-  PERFORMANCE,
-  DEBUG,
-  CLIENT,
-  SERVER,
-  OM,
-  SCM,
-  CRITICAL,
-  RATIS,
-  CONTAINER,
-  REQUIRED,
-  REST,
-  STORAGE,
-  PIPELINE,
-  STANDALONE,
-  S3GATEWAY
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
deleted file mode 100644
index 23a8104..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-/**
- * Possible type of injected configuration.
- * <p>
- * AUTO means that the exact type will be identified based on the java type of
- * the configuration field.
- */
-public enum ConfigType {
-  AUTO,
-  STRING,
-  BOOLEAN,
-  INT,
-  LONG,
-  TIME,
-  SIZE
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
deleted file mode 100644
index 2e68012..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-/**
- * Exception to throw in case of a configuration problem.
- */
-public class ConfigurationException extends RuntimeException {
-  public ConfigurationException() {
-  }
-
-  public ConfigurationException(String message) {
-    super(message);
-  }
-
-  public ConfigurationException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index e789040..0000000
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Generic configuration annotations, tools and generators.
- */
-package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
deleted file mode 100644
index 2dd26696..0000000
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Example configuration to test the configuration injection.
- */
-@ConfigGroup(prefix = "ozone.scm.client")
-public class ConfigurationExample {
-
-  private String clientAddress;
-
-  private String bindHost;
-
-  private boolean compressionEnabled;
-
-  private int port = 1234;
-
-  private long waitTime = 1;
-
-  @Config(key = "address", defaultValue = "localhost", description = "Client "
-      + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT)
-  public void setClientAddress(String clientAddress) {
-    this.clientAddress = clientAddress;
-  }
-
-  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind "
-      + "host(To test string injection).", tags = ConfigTag.MANAGEMENT)
-  public void setBindHost(String bindHost) {
-    this.bindHost = bindHost;
-  }
-
-  @Config(key = "compression.enabled", defaultValue = "true", description =
-      "Compression enabled. (Just to test boolean flag)", tags =
-      ConfigTag.MANAGEMENT)
-  public void setCompressionEnabled(boolean compressionEnabled) {
-    this.compressionEnabled = compressionEnabled;
-  }
-
-  @Config(key = "port", defaultValue = "1234", description = "Port number "
-      + "config (To test in injection)", tags = ConfigTag.MANAGEMENT)
-  public void setPort(int port) {
-    this.port = port;
-  }
-
-  @Config(key = "wait", type = ConfigType.TIME, timeUnit =
-      TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To "
-      + "test TIME config type)", tags = ConfigTag.MANAGEMENT)
-  public void setWaitTime(long waitTime) {
-    this.waitTime = waitTime;
-  }
-
-  public String getClientAddress() {
-    return clientAddress;
-  }
-
-  public String getBindHost() {
-    return bindHost;
-  }
-
-  public boolean isCompressionEnabled() {
-    return compressionEnabled;
-  }
-
-  public int getPort() {
-    return port;
-  }
-
-  public long getWaitTime() {
-    return waitTime;
-  }
-}
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
deleted file mode 100644
index 0edb01a..0000000
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import java.io.StringWriter;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test the utility which loads/writes the config file fragments.
- */
-public class TestConfigFileAppender {
-
-  @Test
-  public void testInit() {
-    ConfigFileAppender appender = new ConfigFileAppender();
-
-    appender.init();
-
-    appender.addConfig("hadoop.scm.enabled", "true", "desc",
-        new ConfigTag[] {ConfigTag.OZONE, ConfigTag.SECURITY});
-
-    StringWriter builder = new StringWriter();
-    appender.write(builder);
-
-    Assert.assertTrue("Generated config should contain property key entry",
-        builder.toString().contains("<name>hadoop.scm.enabled</name>"));
-
-    Assert.assertTrue("Generated config should contain tags",
-        builder.toString().contains("<tag>OZONE, SECURITY</tag>"));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index e8b310d..0000000
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * Testing configuration tools.
- */
-
-/**
- * Testing configuration tools.
- */
-package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor
deleted file mode 100644
index f29efda..0000000
--- a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.hdds.conf.ConfigFileGenerator
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 18128e8..0000000
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
-    <Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
-    <Bug pattern="DLS_DEAD_LOCAL_STORE" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
-    <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
deleted file mode 100644
index 0eef961..0000000
--- a/hadoop-hdds/container-service/pom.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-container-service</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Container Service</description>
-  <name>Apache Hadoop HDDS Container Service</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.2.0</version>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.yaml</groupId>
-      <artifactId>snakeyaml</artifactId>
-      <version>1.16</version>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>
-                  ${basedir}/../../hadoop-hdds/common/src/main/proto/
-                </param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>StorageContainerDatanodeProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
deleted file mode 100644
index c1997d6..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ /dev/null
@@ -1,384 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_HEARTBEAT_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.HddsUtils.*;
-import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs;
-
-/**
- * Hdds stateless helper functions for server side components.
- */
-public final class HddsServerUtil {
-
-  private HddsServerUtil() {
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsServerUtil.class);
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmAddressForDataNodes(
-      Configuration conf) {
-    // We try the following settings in decreasing priority to retrieve the
-    // target host.
-    // - OZONE_SCM_DATANODE_ADDRESS_KEY
-    // - OZONE_SCM_CLIENT_ADDRESS_KEY
-    // - OZONE_SCM_NAMES
-    //
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      // Fallback to Ozone SCM names.
-      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
-      if (scmAddresses.size() > 1) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_NAMES +
-                " must contain a single hostname. Multiple SCM hosts are " +
-                "currently unsupported");
-      }
-      host = Optional.of(scmAddresses.iterator().next().getHostName());
-    }
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY +
-              " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration "
-              + "for details on configuring Ozone.");
-    }
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    InetSocketAddress addr = NetUtils.createSocketAddr(host.get() + ":" +
-        port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    return addr;
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT) + ":" +
-            port.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM Block service.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   */
-  public static InetSocketAddress getScmBlockClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT)
-            + ":"
-            + port.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by scm security server to
-   * service clients.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM security service.
-   */
-  public static InetSocketAddress getScmSecurityInetAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT)
-            + ":" + port
-            .orElse(conf.getInt(ScmConfigKeys
-                    .OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
-                ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT)));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmDataNodeBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" +
-            port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-  }
-
-
-  /**
-   * Returns the interval in which the heartbeat processor thread runs.
-   *
-   * @param conf - Configuration
-   * @return long in Milliseconds.
-   */
-  public static long getScmheartbeatCheckerInterval(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Heartbeat Interval - Defines the heartbeat frequency from a datanode to
-   * SCM.
-   *
-   * @param conf - Ozone Config
-   * @return - HB interval in milli seconds.
-   */
-  public static long getScmHeartbeatInterval(Configuration conf) {
-    return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-        HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Get the Stale Node interval, which is used by SCM to flag a datanode as
-   * stale, if the heartbeat from that node has been missing for this duration.
-   *
-   * @param conf - Configuration.
-   * @return - Long, Milliseconds to wait before flagging a node as stale.
-   */
-  public static long getStaleNodeInterval(Configuration conf) {
-
-    long staleNodeIntervalMs =
-        conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-            OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-
-    long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
-
-    long heartbeatIntervalMs = getScmHeartbeatInterval(conf);
-
-
-    // Make sure that StaleNodeInterval is configured way above the frequency
-    // at which we run the heartbeat thread.
-    //
-    // Here we check that staleNodeInterval is at least five times more than the
-    // frequency at which the accounting thread is going to run.
-    staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
-        staleNodeIntervalMs, OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        heartbeatThreadFrequencyMs, 5, 1000);
-
-    // Make sure that stale node value is greater than configured value that
-    // datanodes are going to send HBs.
-    staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
-        staleNodeIntervalMs, HDDS_HEARTBEAT_INTERVAL, heartbeatIntervalMs, 3,
-        1000);
-    return staleNodeIntervalMs;
-  }
-
-  /**
-   * Gets the interval for dead node flagging. This has to be a value that is
-   * greater than stale node value,  and by transitive relation we also know
-   * that this value is greater than heartbeat interval and heartbeatProcess
-   * Interval.
-   *
-   * @param conf - Configuration.
-   * @return - the interval for dead node flagging.
-   */
-  public static long getDeadNodeInterval(Configuration conf) {
-    long staleNodeIntervalMs = getStaleNodeInterval(conf);
-    long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
-        OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    // Make sure that dead nodes Ms is at least twice the time for staleNodes
-    // with a max of 1000 times the staleNodes.
-    return sanitizeUserArgs(OZONE_SCM_DEADNODE_INTERVAL, deadNodeIntervalMs,
-        OZONE_SCM_STALENODE_INTERVAL, staleNodeIntervalMs, 2, 1000);
-  }
-
-  /**
-   * Timeout value for the RPC from Datanode to SCM, primarily used for
-   * Heartbeats and container reports.
-   *
-   * @param conf - Ozone Config
-   * @return - Rpc timeout in Milliseconds.
-   */
-  public static long getScmRpcTimeOutInMilliseconds(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT,
-        OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Log Warn interval.
-   *
-   * @param conf - Ozone Config
-   * @return - Log warn interval.
-   */
-  public static int getLogWarnInterval(Configuration conf) {
-    return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT,
-        OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT);
-  }
-
-  /**
-   * returns the Container port.
-   * @param conf - Conf
-   * @return port number.
-   */
-  public static int getContainerPort(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-  }
-
-
-  /**
-   * Return the list of service addresses for the Ozone SCM. This method is used
-   * by the DataNodes to determine the service instances to connect to.
-   *
-   * @param conf
-   * @return list of SCM service addresses.
-   */
-  public static Map<String, ? extends Map<String, InetSocketAddress>>
-      getScmServiceRpcAddresses(Configuration conf) {
-
-    final Map<String, InetSocketAddress> serviceInstances = new HashMap<>();
-    serviceInstances.put(OZONE_SCM_SERVICE_INSTANCE_ID,
-        getScmAddressForDataNodes(conf));
-
-    final Map<String, Map<String, InetSocketAddress>> services =
-        new HashMap<>();
-    services.put(OZONE_SCM_SERVICE_ID, serviceInstances);
-    return services;
-  }
-
-  public static String getOzoneDatanodeRatisDirectory(Configuration conf) {
-    String storageDir = conf.get(
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
-
-    if (Strings.isNullOrEmpty(storageDir)) {
-      storageDir = getDefaultRatisDirectory(conf);
-    }
-    return storageDir;
-  }
-
-  public static String getDefaultRatisDirectory(Configuration conf) {
-    LOG.warn("Storage directory for Ratis is not configured. It is a good " +
-            "idea to map this to an SSD disk. Falling back to {}",
-        HddsConfigKeys.OZONE_METADATA_DIRS);
-    File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
-    return (new File(metaDirPath, "ratis")).getPath();
-  }
-
-  /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIdFilePath(Configuration conf) {
-    String dataNodeIDDirPath =
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-    if (dataNodeIDDirPath == null) {
-      File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
-      if (metaDirPath == null) {
-        // this means meta data is not found, in theory should not happen at
-        // this point because should've failed earlier.
-        throw new IllegalArgumentException("Unable to locate meta data" +
-            "directory when getting datanode id path");
-      }
-      dataNodeIDDirPath = metaDirPath.toString();
-    }
-    // Use default datanode id file name for file path
-    return new File(dataNodeIDDirPath,
-        OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
deleted file mode 100644
index 4e52046..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-/**
- * This is a class that tracks versions of SCM.
- */
-public final class VersionInfo {
-
-  // We will just be normal and use positive counting numbers for versions.
-  private final static VersionInfo[] VERSION_INFOS =
-      {new VersionInfo("First version of SCM", 1)};
-
-
-  public static final String DESCRIPTION_KEY = "Description";
-  private final String description;
-  private final int version;
-
-  /**
-   * Never created outside this class.
-   *
-   * @param description -- description
-   * @param version     -- version number
-   */
-  private VersionInfo(String description, int version) {
-    this.description = description;
-    this.version = version;
-  }
-
-  /**
-   * Returns all versions.
-   *
-   * @return Version info array.
-   */
-  public static VersionInfo[] getAllVersions() {
-    return VERSION_INFOS.clone();
-  }
-
-  /**
-   * Returns the latest version.
-   *
-   * @return versionInfo
-   */
-  public static VersionInfo getLatestVersion() {
-    return VERSION_INFOS[VERSION_INFOS.length - 1];
-  }
-
-  /**
-   * Return description.
-   *
-   * @return String
-   */
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * Return the version.
-   *
-   * @return int.
-   */
-  public int getVersion() {
-    return version;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 5905468..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
deleted file mode 100644
index 3dcfcfe..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-/**
- * Simple http server to provide basic monitoring for hdds datanode.
- * <p>
- * This server is used to access default /conf /prom /prof endpoints.
- */
-public class HddsDatanodeHttpServer extends BaseHttpServer {
-
-  public HddsDatanodeHttpServer(Configuration conf) throws IOException {
-    super(conf, "hddsDatanode");
-  }
-
-  @Override
-  protected String getHttpAddressKey() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpBindHostKey() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getHttpsAddressKey() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpsBindHostKey() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getBindHostDefault() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpBindPortDefault() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpsBindPortDefault() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected String getKeytabFile() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY;
-  }
-
-  @Override
-  protected String getSpnegoPrincipal() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY;
-  }
-
-  @Override
-  protected String getEnabledKey() {
-    return HddsConfigKeys.HDDS_DATANODE_HTTP_ENABLED_KEY;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
deleted file mode 100644
index b13c37d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.util.ServicePlugin;
-import org.apache.hadoop.util.StringUtils;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.KeyPair;
-import java.security.cert.CertificateException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate;
-import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * Datanode service plugin to start the HDDS container services.
- */
-
-@Command(name = "ozone datanode",
-    hidden = true, description = "Start the datanode for ozone",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class HddsDatanodeService extends GenericCli implements ServicePlugin {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsDatanodeService.class);
-
-  private OzoneConfiguration conf;
-  private DatanodeDetails datanodeDetails;
-  private DatanodeStateMachine datanodeStateMachine;
-  private List<ServicePlugin> plugins;
-  private CertificateClient dnCertClient;
-  private String component;
-  private HddsDatanodeHttpServer httpServer;
-  private boolean printBanner;
-  private String[] args;
-  private volatile AtomicBoolean isStopped = new AtomicBoolean(false);
-
-  public HddsDatanodeService(boolean printBanner, String[] args) {
-    this.printBanner = printBanner;
-    this.args = args != null ? Arrays.copyOf(args, args.length) : null;
-  }
-
-  /**
-   * Create an Datanode instance based on the supplied command-line arguments.
-   * <p>
-   * This method is intended for unit tests only. It suppresses the
-   * startup/shutdown message and skips registering Unix signal handlers.
-   *
-   * @param args      command line arguments.
-   * @return Datanode instance
-   */
-  @VisibleForTesting
-  public static HddsDatanodeService createHddsDatanodeService(
-      String[] args) {
-    return createHddsDatanodeService(args, false);
-  }
-
-  /**
-   * Create an Datanode instance based on the supplied command-line arguments.
-   *
-   * @param args        command line arguments.
-   * @param printBanner if true, then log a verbose startup message.
-   * @return Datanode instance
-   */
-  private static HddsDatanodeService createHddsDatanodeService(
-      String[] args, boolean printBanner) {
-    return new HddsDatanodeService(printBanner, args);
-  }
-
-  public static void main(String[] args) {
-    try {
-      HddsDatanodeService hddsDatanodeService =
-          createHddsDatanodeService(args, true);
-      hddsDatanodeService.run(args);
-    } catch (Throwable e) {
-      LOG.error("Exception in HddsDatanodeService.", e);
-      terminate(1, e);
-    }
-  }
-
-  public static Logger getLogger() {
-    return LOG;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    if (printBanner) {
-      StringUtils
-          .startupShutdownMessage(HddsDatanodeService.class, args, LOG);
-    }
-    start(createOzoneConfiguration());
-    join();
-    return null;
-  }
-
-  public void setConfiguration(OzoneConfiguration configuration) {
-    this.conf = configuration;
-  }
-
-  /**
-   * Starts HddsDatanode services.
-   *
-   * @param service The service instance invoking this method
-   */
-  @Override
-  public void start(Object service) {
-    if (service instanceof Configurable) {
-      start(new OzoneConfiguration(((Configurable) service).getConf()));
-    } else {
-      start(new OzoneConfiguration());
-    }
-  }
-
-  public void start(OzoneConfiguration configuration) {
-    setConfiguration(configuration);
-    start();
-  }
-
-  public void start() {
-    OzoneConfiguration.activate();
-    HddsUtils.initializeMetrics(conf, "HddsDatanode");
-    if (HddsUtils.isHddsEnabled(conf)) {
-      try {
-        String hostname = HddsUtils.getHostName(conf);
-        String ip = InetAddress.getByName(hostname).getHostAddress();
-        datanodeDetails = initializeDatanodeDetails();
-        datanodeDetails.setHostName(hostname);
-        datanodeDetails.setIpAddress(ip);
-        TracingUtil.initTracing(
-            "HddsDatanodeService." + datanodeDetails.getUuidString()
-                .substring(0, 8));
-        LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip);
-        // Authenticate Hdds Datanode service if security is enabled
-        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-          component = "dn-" + datanodeDetails.getUuidString();
-
-          dnCertClient = new DNCertificateClient(new SecurityConfig(conf),
-              datanodeDetails.getCertSerialId());
-
-          if (SecurityUtil.getAuthenticationMethod(conf).equals(
-              UserGroupInformation.AuthenticationMethod.KERBEROS)) {
-            LOG.info("Ozone security is enabled. Attempting login for Hdds " +
-                    "Datanode user. Principal: {},keytab: {}", conf.get(
-                DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY),
-                conf.get(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY));
-
-            UserGroupInformation.setConfiguration(conf);
-
-            SecurityUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,
-                DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hostname);
-          } else {
-            throw new AuthenticationException(SecurityUtil.
-                getAuthenticationMethod(conf) + " authentication method not " +
-                "supported. Datanode user" + " login " + "failed.");
-          }
-          LOG.info("Hdds Datanode login successful.");
-        }
-        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-          initializeCertificateClient(conf);
-        }
-        datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf,
-            dnCertClient, this::terminateDatanode);
-        try {
-          httpServer = new HddsDatanodeHttpServer(conf);
-          httpServer.start();
-        } catch (Exception ex) {
-          LOG.error("HttpServer failed to start.", ex);
-        }
-        startPlugins();
-        // Starting HDDS Daemons
-        datanodeStateMachine.startDaemon();
-      } catch (IOException e) {
-        throw new RuntimeException("Can't start the HDDS datanode plugin", e);
-      } catch (AuthenticationException ex) {
-        throw new RuntimeException("Fail to authentication when starting" +
-            " HDDS datanode plugin", ex);
-      }
-    }
-  }
-
-  /**
-   * Initializes secure Datanode.
-   * */
-  @VisibleForTesting
-  public void initializeCertificateClient(OzoneConfiguration config)
-      throws IOException {
-    LOG.info("Initializing secure Datanode.");
-
-    CertificateClient.InitResponse response = dnCertClient.init();
-    LOG.info("Init response: {}", response);
-    switch (response) {
-    case SUCCESS:
-      LOG.info("Initialization successful, case:{}.", response);
-      break;
-    case GETCERT:
-      getSCMSignedCert(config);
-      LOG.info("Successfully stored SCM signed certificate, case:{}.",
-          response);
-      break;
-    case FAILURE:
-      LOG.error("DN security initialization failed, case:{}.", response);
-      throw new RuntimeException("DN security initialization failed.");
-    case RECOVER:
-      LOG.error("DN security initialization failed, case:{}. OM certificate " +
-          "is missing.", response);
-      throw new RuntimeException("DN security initialization failed.");
-    default:
-      LOG.error("DN security initialization failed. Init response: {}",
-          response);
-      throw new RuntimeException("DN security initialization failed.");
-    }
-  }
-
-  /**
-   * Get SCM signed certificate and store it using certificate client.
-   * @param config
-   * */
-  private void getSCMSignedCert(OzoneConfiguration config) {
-    try {
-      PKCS10CertificationRequest csr = getCSR(config);
-      // TODO: For SCM CA we should fetch certificate from multiple SCMs.
-      SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
-          HddsUtils.getScmSecurityClient(config);
-      SCMGetCertResponseProto response = secureScmClient.
-          getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(),
-              getEncodedString(csr));
-      // Persist certificates.
-      if(response.hasX509CACertificate()) {
-        String pemEncodedCert = response.getX509Certificate();
-        dnCertClient.storeCertificate(pemEncodedCert, true);
-        dnCertClient.storeCertificate(response.getX509CACertificate(), true,
-            true);
-        datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert).
-            getSerialNumber().toString());
-        persistDatanodeDetails(datanodeDetails);
-      } else {
-        throw new RuntimeException("Unable to retrieve datanode certificate " +
-            "chain");
-      }
-    } catch (IOException | CertificateException e) {
-      LOG.error("Error while storing SCM signed certificate.", e);
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * Creates CSR for DN.
-   * @param config
-   * */
-  @VisibleForTesting
-  public PKCS10CertificationRequest getCSR(Configuration config)
-      throws IOException {
-    CertificateSignRequest.Builder builder = dnCertClient.getCSRBuilder();
-    KeyPair keyPair = new KeyPair(dnCertClient.getPublicKey(),
-        dnCertClient.getPrivateKey());
-
-    String hostname = InetAddress.getLocalHost().getCanonicalHostName();
-    String subject = UserGroupInformation.getCurrentUser()
-        .getShortUserName() + "@" + hostname;
-
-    builder.setCA(false)
-        .setKey(keyPair)
-        .setConfiguration(config)
-        .setSubject(subject);
-
-    LOG.info("Creating csr for DN-> subject:{}", subject);
-    return builder.build();
-  }
-
-  /**
-   * Returns DatanodeDetails or null in case of Error.
-   *
-   * @return DatanodeDetails
-   */
-  private DatanodeDetails initializeDatanodeDetails()
-      throws IOException {
-    String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-          " for details on configuring Ozone.");
-    }
-
-    Preconditions.checkNotNull(idFilePath);
-    File idFile = new File(idFilePath);
-    if (idFile.exists()) {
-      return ContainerUtils.readDatanodeDetailsFrom(idFile);
-    } else {
-      // There is no datanode.id file, this might be the first time datanode
-      // is started.
-      String datanodeUuid = UUID.randomUUID().toString();
-      return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build();
-    }
-  }
-
-  /**
-   * Persist DatanodeDetails to file system.
-   * @param dnDetails
-   *
-   * @return DatanodeDetails
-   */
-  private void persistDatanodeDetails(DatanodeDetails dnDetails)
-      throws IOException {
-    String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-          " for details on configuring Ozone.");
-    }
-
-    Preconditions.checkNotNull(idFilePath);
-    File idFile = new File(idFilePath);
-    ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile);
-  }
-
-  /**
-   * Starts all the service plugins which are configured using
-   * OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY.
-   */
-  private void startPlugins() {
-    try {
-      plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY,
-          ServicePlugin.class);
-    } catch (RuntimeException e) {
-      String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY);
-      LOG.error("Unable to load HDDS DataNode plugins. " +
-              "Specified list of plugins: {}",
-          pluginsValue, e);
-      throw e;
-    }
-    for (ServicePlugin plugin : plugins) {
-      try {
-        plugin.start(this);
-        LOG.info("Started plug-in {}", plugin);
-      } catch (Throwable t) {
-        LOG.warn("ServicePlugin {} could not be started", plugin, t);
-      }
-    }
-  }
-
-  /**
-   * Returns the OzoneConfiguration used by this HddsDatanodeService.
-   *
-   * @return OzoneConfiguration
-   */
-  public OzoneConfiguration getConf() {
-    return conf;
-  }
-
-  /**
-   * Return DatanodeDetails if set, return null otherwise.
-   *
-   * @return DatanodeDetails
-   */
-  @VisibleForTesting
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  @VisibleForTesting
-  public DatanodeStateMachine getDatanodeStateMachine() {
-    return datanodeStateMachine;
-  }
-
-  public void join() {
-    if (datanodeStateMachine != null) {
-      try {
-        datanodeStateMachine.join();
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        LOG.info("Interrupted during StorageContainerManager join.");
-      }
-    }
-  }
-
-  public void terminateDatanode() {
-    stop();
-    terminate(1);
-  }
-
-
-  @Override
-  public void stop() {
-    if (!isStopped.get()) {
-      isStopped.set(true);
-      if (plugins != null) {
-        for (ServicePlugin plugin : plugins) {
-          try {
-            plugin.stop();
-            LOG.info("Stopped plug-in {}", plugin);
-          } catch (Throwable t) {
-            LOG.warn("ServicePlugin {} could not be stopped", plugin, t);
-          }
-        }
-      }
-      if (datanodeStateMachine != null) {
-        datanodeStateMachine.stopDaemon();
-      }
-      if (httpServer != null) {
-        try {
-          httpServer.stop();
-        } catch (Exception e) {
-          LOG.error("Stopping HttpServer is failed.", e);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void close() {
-    if (plugins != null) {
-      for (ServicePlugin plugin : plugins) {
-        try {
-          plugin.close();
-        } catch (Throwable t) {
-          LOG.warn("ServicePlugin {} could not be closed", plugin, t);
-        }
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public String getComponent() {
-    return component;
-  }
-
-  public CertificateClient getCertificateClient() {
-    return dnCertClient;
-  }
-
-  @VisibleForTesting
-  public void setCertificateClient(CertificateClient client) {
-    dnCertClient = client;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java
deleted file mode 100644
index 02c1431..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-/**
- * Interface which declares a method to stop HddsDatanodeService.
- */
-public interface HddsDatanodeStopService {
-
-  void stopService();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
deleted file mode 100644
index 2d58c39..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-/**
- * Datanode layout version which describes information about the layout version
- * on the datanode.
- */
-public final class DataNodeLayoutVersion {
-
-  // We will just be normal and use positive counting numbers for versions.
-  private final static DataNodeLayoutVersion[] VERSION_INFOS =
-      {new DataNodeLayoutVersion(1, "HDDS Datanode LayOut Version 1")};
-
-  private final String description;
-  private final int version;
-
-  /**
-   * Never created outside this class.
-   *
-   * @param description -- description
-   * @param version     -- version number
-   */
-  private DataNodeLayoutVersion(int version, String description) {
-    this.description = description;
-    this.version = version;
-  }
-
-  /**
-   * Returns all versions.
-   *
-   * @return Version info array.
-   */
-  public static DataNodeLayoutVersion[] getAllVersions() {
-    return VERSION_INFOS.clone();
-  }
-
-  /**
-   * Returns the latest version.
-   *
-   * @return versionInfo
-   */
-  public static DataNodeLayoutVersion getLatestVersion() {
-    return VERSION_INFOS[VERSION_INFOS.length - 1];
-  }
-
-  /**
-   * Return description.
-   *
-   * @return String
-   */
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * Return the version.
-   *
-   * @return int.
-   */
-  public int getVersion() {
-    return version;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
deleted file mode 100644
index 9ea4adf..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableQuantiles;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- *
- * This class is for maintaining  the various Storage Container
- * DataNode statistics and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #numOps}.inc()
- *
- */
-@InterfaceAudience.Private
-@Metrics(about="Storage Container DataNode Metrics", context="dfs")
-public class ContainerMetrics {
-  public static final String STORAGE_CONTAINER_METRICS =
-      "StorageContainerMetrics";
-  @Metric private MutableCounterLong numOps;
-  private MutableCounterLong[] numOpsArray;
-  private MutableCounterLong[] opsBytesArray;
-  private MutableRate[] opsLatency;
-  private MutableQuantiles[][] opsLatQuantiles;
-  private MetricsRegistry registry = null;
-
-  public ContainerMetrics(int[] intervals) {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    final int len = intervals.length;
-    this.numOpsArray = new MutableCounterLong[numEnumEntries];
-    this.opsBytesArray = new MutableCounterLong[numEnumEntries];
-    this.opsLatency = new MutableRate[numEnumEntries];
-    this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len];
-    this.registry = new MetricsRegistry("StorageContainerMetrics");
-    for (int i = 0; i < numEnumEntries; i++) {
-      numOpsArray[i] = registry.newCounter(
-          "num" + ContainerProtos.Type.forNumber(i + 1),
-          "number of " + ContainerProtos.Type.forNumber(i + 1) + " ops",
-          (long) 0);
-      opsBytesArray[i] = registry.newCounter(
-          "bytes" + ContainerProtos.Type.forNumber(i + 1),
-          "bytes used by " + ContainerProtos.Type.forNumber(i + 1) + "op",
-          (long) 0);
-      opsLatency[i] = registry.newRate(
-          "latency" + ContainerProtos.Type.forNumber(i + 1),
-          ContainerProtos.Type.forNumber(i + 1) + " op");
-
-      for (int j = 0; j < len; j++) {
-        int interval = intervals[j];
-        String quantileName = ContainerProtos.Type.forNumber(i + 1) + "Nanos"
-            + interval + "s";
-        opsLatQuantiles[i][j] = registry.newQuantiles(quantileName,
-            "latency of Container ops", "ops", "latency", interval);
-      }
-    }
-  }
-
-  public static ContainerMetrics create(Configuration conf) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    // Percentile measurement is off by default, by watching no intervals
-    int[] intervals =
-             conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
-    return ms.register(STORAGE_CONTAINER_METRICS,
-                       "Storage Container Node Metrics",
-                       new ContainerMetrics(intervals));
-  }
-
-  public static void remove() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(STORAGE_CONTAINER_METRICS);
-  }
-
-  public void incContainerOpsMetrics(ContainerProtos.Type type) {
-    numOps.incr();
-    numOpsArray[type.ordinal()].incr();
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type){
-    return numOpsArray[type.ordinal()].value();
-  }
-
-  public void incContainerOpsLatencies(ContainerProtos.Type type,
-                                       long latencyNanos) {
-    opsLatency[type.ordinal()].add(latencyNanos);
-    for (MutableQuantiles q: opsLatQuantiles[type.ordinal()]) {
-      q.add(latencyNanos);
-    }
-  }
-
-  public void incContainerBytesStats(ContainerProtos.Type type, long bytes) {
-    opsBytesArray[type.ordinal()].incr(bytes);
-  }
-
-  public long getContainerBytesMetrics(ContainerProtos.Type type){
-    return opsBytesArray[type.ordinal()].value();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
deleted file mode 100644
index ff6dec8..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_CHECKSUM_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.yaml.snakeyaml.Yaml;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A set of helper functions to create proper responses.
- */
-public final class ContainerUtils {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerUtils.class);
-
-  private ContainerUtils() {
-    //never constructed.
-  }
-
-  /**
-   * Returns a Container Command Response Builder with the specified result
-   * and message.
-   * @param request requestProto message.
-   * @param result result of the command.
-   * @param message response message.
-   * @return ContainerCommand Response Builder.
-   */
-  public static ContainerCommandResponseProto.Builder
-      getContainerCommandResponse(
-          ContainerCommandRequestProto request, Result result, String message) {
-    return ContainerCommandResponseProto.newBuilder()
-        .setCmdType(request.getCmdType())
-        .setTraceID(request.getTraceID())
-        .setResult(result)
-        .setMessage(message);
-  }
-
-  /**
-   * Returns a Container Command Response Builder. This call is used to build
-   * success responses. Calling function can add other fields to the response
-   * as required.
-   * @param request requestProto message.
-   * @return ContainerCommand Response Builder with result as SUCCESS.
-   */
-  public static ContainerCommandResponseProto.Builder getSuccessResponseBuilder(
-      ContainerCommandRequestProto request) {
-    return
-        ContainerCommandResponseProto.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setTraceID(request.getTraceID())
-            .setResult(Result.SUCCESS);
-  }
-
-  /**
-   * Returns a Container Command Response. This call is used for creating null
-   * success responses.
-   * @param request requestProto message.
-   * @return ContainerCommand Response with result as SUCCESS.
-   */
-  public static ContainerCommandResponseProto getSuccessResponse(
-      ContainerCommandRequestProto request) {
-    ContainerCommandResponseProto.Builder builder =
-        getContainerCommandResponse(request, Result.SUCCESS, "");
-    return builder.build();
-  }
-
-  /**
-   * We found a command type but no associated payload for the command. Hence
-   * return malformed Command as response.
-   *
-   * @param request - Protobuf message.
-   * @return ContainerCommandResponseProto - MALFORMED_REQUEST.
-   */
-  public static ContainerCommandResponseProto malformedRequest(
-      ContainerCommandRequestProto request) {
-    return getContainerCommandResponse(request, Result.MALFORMED_REQUEST,
-        "Cmd type does not match the payload.").build();
-  }
-
-  /**
-   * We found a command type that is not supported yet.
-   *
-   * @param request - Protobuf message.
-   * @return ContainerCommandResponseProto - UNSUPPORTED_REQUEST.
-   */
-  public static ContainerCommandResponseProto unsupportedRequest(
-      ContainerCommandRequestProto request) {
-    return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST,
-        "Server does not support this command yet.").build();
-  }
-
-  /**
-   * Logs the error and returns a response to the caller.
-   *
-   * @param log - Logger
-   * @param ex - Exception
-   * @param request - Request Object
-   * @return Response
-   */
-  public static ContainerCommandResponseProto logAndReturnError(
-      Logger log, StorageContainerException ex,
-      ContainerCommandRequestProto request) {
-    log.info("Operation: {} : Trace ID: {} : Message: {} : Result: {}",
-        request.getCmdType().name(), request.getTraceID(),
-        ex.getMessage(), ex.getResult().getValueDescriptor().getName());
-    return getContainerCommandResponse(request, ex.getResult(), ex.getMessage())
-        .build();
-  }
-
-  /**
-   * get containerName from a container file.
-   *
-   * @param containerFile - File
-   * @return Name of the container.
-   */
-  public static String getContainerNameFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
-    return Paths.get(containerFile.getParent()).resolve(
-        removeExtension(containerFile.getName())).toString();
-  }
-
-  public static long getContainerIDFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
-    String containerID = getContainerNameFromFile(containerFile);
-    return Long.parseLong(containerID);
-  }
-
-  /**
-   * Verifies that this is indeed a new container.
-   *
-   * @param containerFile - Container File to verify
-   * @throws FileAlreadyExistsException
-   */
-  public static void verifyIsNewContainer(File containerFile) throws
-      FileAlreadyExistsException {
-    Logger log = LoggerFactory.getLogger(ContainerSet.class);
-    Preconditions.checkNotNull(containerFile, "containerFile Should not be " +
-        "null");
-    if (containerFile.getParentFile().exists()) {
-      log.error("Container already exists on disk. File: {}", containerFile
-          .toPath());
-      throw new FileAlreadyExistsException("container already exists on " +
-          "disk.");
-    }
-  }
-
-  public static String getContainerDbFileName(String containerName) {
-    return containerName + OzoneConsts.DN_CONTAINER_DB;
-  }
-
-  /**
-   * Persistent a {@link DatanodeDetails} to a local file.
-   *
-   * @throws IOException when read/write error occurs
-   */
-  public synchronized static void writeDatanodeDetailsTo(
-      DatanodeDetails datanodeDetails, File path) throws IOException {
-    if (path.exists()) {
-      if (!path.delete() || !path.createNewFile()) {
-        throw new IOException("Unable to overwrite the datanode ID file.");
-      }
-    } else {
-      if (!path.getParentFile().exists() &&
-          !path.getParentFile().mkdirs()) {
-        throw new IOException("Unable to create datanode ID directories.");
-      }
-    }
-    DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path);
-  }
-
-  /**
-   * Read {@link DatanodeDetails} from a local ID file.
-   *
-   * @param path ID file local path
-   * @return {@link DatanodeDetails}
-   * @throws IOException If the id file is malformed or other I/O exceptions
-   */
-  public synchronized static DatanodeDetails readDatanodeDetailsFrom(File path)
-      throws IOException {
-    if (!path.exists()) {
-      throw new IOException("Datanode ID file not found.");
-    }
-    try {
-      return DatanodeIdYaml.readDatanodeIdFile(path);
-    } catch (IOException e) {
-      LOG.warn("Error loading DatanodeDetails yaml from " +
-          path.getAbsolutePath(), e);
-      // Try to load as protobuf before giving up
-      try (FileInputStream in = new FileInputStream(path)) {
-        return DatanodeDetails.getFromProtoBuf(
-            HddsProtos.DatanodeDetailsProto.parseFrom(in));
-      } catch (IOException io) {
-        throw new IOException("Failed to parse DatanodeDetails from "
-            + path.getAbsolutePath(), io);
-      }
-    }
-  }
-
-  /**
-   * Verify that the checksum stored in containerData is equal to the
-   * computed checksum.
-   * @param containerData
-   * @throws IOException
-   */
-  public static void verifyChecksum(ContainerData containerData)
-      throws IOException {
-    String storedChecksum = containerData.getChecksum();
-
-    Yaml yaml = ContainerDataYaml.getYamlForContainerType(
-        containerData.getContainerType());
-    containerData.computeAndSetChecksum(yaml);
-    String computedChecksum = containerData.getChecksum();
-
-    if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) {
-      throw new StorageContainerException("Container checksum error for " +
-          "ContainerID: " + containerData.getContainerID() + ". " +
-          "\nStored Checksum: " + storedChecksum +
-          "\nExpected Checksum: " + computedChecksum,
-          CONTAINER_CHECKSUM_ERROR);
-    }
-  }
-
-  /**
-   * Return the SHA-256 chesksum of the containerData.
-   * @param containerDataYamlStr ContainerData as a Yaml String
-   * @return Checksum of the container data
-   * @throws StorageContainerException
-   */
-  public static String getChecksum(String containerDataYamlStr)
-      throws StorageContainerException {
-    MessageDigest sha;
-    try {
-      sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-      sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING));
-      return DigestUtils.sha256Hex(sha.digest());
-    } catch (NoSuchAlgorithmException e) {
-      throw new StorageContainerException("Unable to create Message Digest, " +
-          "usually this is a java configuration issue.", NO_SUCH_ALGORITHM);
-    }
-  }
-
-  /**
-   * Get the .container file from the containerBaseDir.
-   * @param containerBaseDir container base directory. The name of this
-   *                         directory is same as the containerID
-   * @return the .container file
-   */
-  public static File getContainerFile(File containerBaseDir) {
-    // Container file layout is
-    // .../<<containerID>>/metadata/<<containerID>>.container
-    String containerFilePath = OzoneConsts.CONTAINER_META_PATH + File.separator
-        + getContainerID(containerBaseDir) + OzoneConsts.CONTAINER_EXTENSION;
-    return new File(containerBaseDir, containerFilePath);
-  }
-
-  /**
-   * ContainerID can be decoded from the container base directory name.
-   */
-  public static long getContainerID(File containerBaseDir) {
-    return Long.parseLong(containerBaseDir.getName());
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
deleted file mode 100644
index d3efa98..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.MapUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
-
-/**
- * Class for creating datanode.id file in yaml format.
- */
-public final class DatanodeIdYaml {
-
-  private DatanodeIdYaml() {
-    // static helper methods only, no state.
-  }
-
-  /**
-   * Creates a yaml file using DatnodeDetails. This method expects the path
-   * validation to be performed by the caller.
-   *
-   * @param datanodeDetails {@link DatanodeDetails}
-   * @param path            Path to datnode.id file
-   */
-  public static void createDatanodeIdFile(DatanodeDetails datanodeDetails,
-                                          File path) throws IOException {
-    DumperOptions options = new DumperOptions();
-    options.setPrettyFlow(true);
-    options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW);
-    Yaml yaml = new Yaml(options);
-
-    try (Writer writer = new OutputStreamWriter(
-        new FileOutputStream(path), "UTF-8")) {
-      yaml.dump(getDatanodeDetailsYaml(datanodeDetails), writer);
-    }
-  }
-
-  /**
-   * Read datanode.id from file.
-   */
-  public static DatanodeDetails readDatanodeIdFile(File path)
-      throws IOException {
-    DatanodeDetails datanodeDetails;
-    try (FileInputStream inputFileStream = new FileInputStream(path)) {
-      Yaml yaml = new Yaml();
-      DatanodeDetailsYaml datanodeDetailsYaml;
-      try {
-        datanodeDetailsYaml =
-            yaml.loadAs(inputFileStream, DatanodeDetailsYaml.class);
-      } catch (Exception e) {
-        throw new IOException("Unable to parse yaml file.", e);
-      }
-
-      DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-      builder.setUuid(datanodeDetailsYaml.getUuid())
-          .setIpAddress(datanodeDetailsYaml.getIpAddress())
-          .setHostName(datanodeDetailsYaml.getHostName())
-          .setCertSerialId(datanodeDetailsYaml.getCertSerialId());
-
-      if (!MapUtils.isEmpty(datanodeDetailsYaml.getPortDetails())) {
-        for (Map.Entry<String, Integer> portEntry :
-            datanodeDetailsYaml.getPortDetails().entrySet()) {
-          builder.addPort(DatanodeDetails.newPort(
-              DatanodeDetails.Port.Name.valueOf(portEntry.getKey()),
-              portEntry.getValue()));
-        }
-      }
-      datanodeDetails = builder.build();
-    }
-
-    return datanodeDetails;
-  }
-
-  /**
-   * Datanode details bean to be written to the yaml file.
-   */
-  public static class DatanodeDetailsYaml {
-    private String uuid;
-    private String ipAddress;
-    private String hostName;
-    private String certSerialId;
-    private Map<String, Integer> portDetails;
-
-    public DatanodeDetailsYaml() {
-      // Needed for snake-yaml introspection.
-    }
-
-    private DatanodeDetailsYaml(String uuid, String ipAddress,
-                                String hostName, String certSerialId,
-                                Map<String, Integer> portDetails) {
-      this.uuid = uuid;
-      this.ipAddress = ipAddress;
-      this.hostName = hostName;
-      this.certSerialId = certSerialId;
-      this.portDetails = portDetails;
-    }
-
-    public String getUuid() {
-      return uuid;
-    }
-
-    public String getIpAddress() {
-      return ipAddress;
-    }
-
-    public String getHostName() {
-      return hostName;
-    }
-
-    public String getCertSerialId() {
-      return certSerialId;
-    }
-
-    public Map<String, Integer> getPortDetails() {
-      return portDetails;
-    }
-
-    public void setUuid(String uuid) {
-      this.uuid = uuid;
-    }
-
-    public void setIpAddress(String ipAddress) {
-      this.ipAddress = ipAddress;
-    }
-
-    public void setHostName(String hostName) {
-      this.hostName = hostName;
-    }
-
-    public void setCertSerialId(String certSerialId) {
-      this.certSerialId = certSerialId;
-    }
-
-    public void setPortDetails(Map<String, Integer> portDetails) {
-      this.portDetails = portDetails;
-    }
-  }
-
-  private static DatanodeDetailsYaml getDatanodeDetailsYaml(
-      DatanodeDetails datanodeDetails) {
-
-    Map<String, Integer> portDetails = new LinkedHashMap<>();
-    if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) {
-      for (DatanodeDetails.Port port : datanodeDetails.getPorts()) {
-        portDetails.put(port.getName().toString(), port.getValue());
-      }
-    }
-
-    return new DatanodeDetailsYaml(
-        datanodeDetails.getUuid().toString(),
-        datanodeDetails.getIpAddress(),
-        datanodeDetails.getHostName(),
-        datanodeDetails.getCertSerialId(),
-        portDetails);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
deleted file mode 100644
index 4db6d31..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.util.Properties;
-
-/**
- * This is a utility class which helps to create the version file on datanode
- * and also validate the content of the version file.
- */
-public class DatanodeVersionFile {
-
-  private final String storageId;
-  private final String clusterId;
-  private final String datanodeUuid;
-  private final long cTime;
-  private final int layOutVersion;
-
-  public DatanodeVersionFile(String storageId, String clusterId,
-      String datanodeUuid, long cTime, int layOutVersion) {
-    this.storageId = storageId;
-    this.clusterId = clusterId;
-    this.datanodeUuid = datanodeUuid;
-    this.cTime = cTime;
-    this.layOutVersion = layOutVersion;
-  }
-
-  private Properties createProperties() {
-    Properties properties = new Properties();
-    properties.setProperty(OzoneConsts.STORAGE_ID, storageId);
-    properties.setProperty(OzoneConsts.CLUSTER_ID, clusterId);
-    properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid);
-    properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime));
-    properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf(
-        layOutVersion));
-    return properties;
-  }
-
-  /**
-   * Creates a version File in specified path.
-   * @param path
-   * @throws IOException
-   */
-  public void createVersionFile(File path) throws
-      IOException {
-    try (RandomAccessFile file = new RandomAccessFile(path, "rws");
-         FileOutputStream out = new FileOutputStream(file.getFD())) {
-      file.getChannel().truncate(0);
-      Properties properties = createProperties();
-      /*
-       * If server is interrupted before this line,
-       * the version file will remain unchanged.
-       */
-      properties.store(out, null);
-    }
-  }
-
-
-  /**
-   * Creates a property object from the specified file content.
-   * @param  versionFile
-   * @return Properties
-   * @throws IOException
-   */
-  public static Properties readFrom(File versionFile) throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(versionFile, "rws");
-         FileInputStream in = new FileInputStream(file.getFD())) {
-      Properties props = new Properties();
-      props.load(in);
-      return props;
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
deleted file mode 100644
index 9d0ec95..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.util.StringUtils;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * A helper class to wrap the info about under deletion container blocks.
- */
-public final class DeletedContainerBlocksSummary {
-
-  private final List<DeletedBlocksTransaction> blocks;
-  // key : txID
-  // value : times of this tx has been processed
-  private final Map<Long, Integer> txSummary;
-  // key : container name
-  // value : the number of blocks need to be deleted in this container
-  // if the message contains multiple entries for same block,
-  // blocks will be merged
-  private final Map<Long, Integer> blockSummary;
-  // total number of blocks in this message
-  private int numOfBlocks;
-
-  private DeletedContainerBlocksSummary(List<DeletedBlocksTransaction> blocks) {
-    this.blocks = blocks;
-    txSummary = Maps.newHashMap();
-    blockSummary = Maps.newHashMap();
-    blocks.forEach(entry -> {
-      txSummary.put(entry.getTxID(), entry.getCount());
-      if (blockSummary.containsKey(entry.getContainerID())) {
-        blockSummary.put(entry.getContainerID(),
-            blockSummary.get(entry.getContainerID())
-                + entry.getLocalIDCount());
-      } else {
-        blockSummary.put(entry.getContainerID(), entry.getLocalIDCount());
-      }
-      numOfBlocks += entry.getLocalIDCount();
-    });
-  }
-
-  public static DeletedContainerBlocksSummary getFrom(
-      List<DeletedBlocksTransaction> blocks) {
-    return new DeletedContainerBlocksSummary(blocks);
-  }
-
-  public int getNumOfBlocks() {
-    return numOfBlocks;
-  }
-
-  public int getNumOfContainers() {
-    return blockSummary.size();
-  }
-
-  public String getTXIDs() {
-    return String.join(",", txSummary.keySet()
-        .stream().map(String::valueOf).collect(Collectors.toList()));
-  }
-
-  public String getTxIDSummary() {
-    List<String> txSummaryEntry = txSummary.entrySet().stream()
-        .map(entry -> entry.getKey() + "(" + entry.getValue() + ")")
-        .collect(Collectors.toList());
-    return "[" + String.join(",", txSummaryEntry) + "]";
-  }
-
-  @Override public String toString() {
-    StringBuffer sb = new StringBuffer();
-    for (DeletedBlocksTransaction blks : blocks) {
-      sb.append(" ")
-          .append("TXID=")
-          .append(blks.getTxID())
-          .append(", ")
-          .append("TimesProceed=")
-          .append(blks.getCount())
-          .append(", ")
-          .append(blks.getContainerID())
-          .append(" : [")
-          .append(StringUtils.join(',', blks.getLocalIDList())).append("]")
-          .append("\n");
-    }
-    return sb.toString();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
deleted file mode 100644
index 21f31e1..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-/**
- Contains protocol buffer helper classes and utilites used in
- impl.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
deleted file mode 100644
index d1b1bd6..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-
-import com.google.common.base.Preconditions;
-
-/**
- * Defines layout versions for the Chunks.
- */
-
-public final class ChunkLayOutVersion {
-
-  private final static ChunkLayOutVersion[] CHUNK_LAYOUT_VERSION_INFOS =
-      {new ChunkLayOutVersion(1, "Data without checksums.")};
-
-  private int version;
-  private String description;
-
-
-  /**
-   * Never created outside this class.
-   *
-   * @param description -- description
-   * @param version     -- version number
-   */
-  private ChunkLayOutVersion(int version, String description) {
-    this.version = version;
-    this.description = description;
-  }
-
-  /**
-   * Return ChunkLayOutVersion object for the chunkVersion.
-   * @param chunkVersion
-   * @return ChunkLayOutVersion
-   */
-  public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) {
-    Preconditions.checkArgument((chunkVersion <= ChunkLayOutVersion
-        .getLatestVersion().getVersion()));
-    for(ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSION_INFOS) {
-      if(chunkLayOutVersion.getVersion() == chunkVersion) {
-        return chunkLayOutVersion;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns all versions.
-   *
-   * @return Version info array.
-   */
-  public static ChunkLayOutVersion[] getAllVersions() {
-    return CHUNK_LAYOUT_VERSION_INFOS.clone();
-  }
-
-  /**
-   * Returns the latest version.
-   *
-   * @return versionInfo
-   */
-  public static ChunkLayOutVersion getLatestVersion() {
-    return CHUNK_LAYOUT_VERSION_INFOS[CHUNK_LAYOUT_VERSION_INFOS.length - 1];
-  }
-
-  /**
-   * Return version.
-   *
-   * @return int
-   */
-  public int getVersion() {
-    return version;
-  }
-
-  /**
-   * Returns description.
-   * @return String
-   */
-  public String getDescription() {
-    return description;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
deleted file mode 100644
index 85738e2..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ /dev/null
@@ -1,560 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.List;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-import org.yaml.snakeyaml.Yaml;
-
-import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
-import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
-import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE;
-import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
-import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_NODE_ID;
-import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_PIPELINE_ID;
-import static org.apache.hadoop.ozone.OzoneConsts.STATE;
-
-/**
- * ContainerData is the in-memory representation of container metadata and is
- * represented on disk by the .container file.
- */
-public abstract class ContainerData {
-
-  //Type of the container.
-  // For now, we support only KeyValueContainer.
-  private final ContainerType containerType;
-
-  // Unique identifier for the container
-  private final long containerID;
-
-  // Layout version of the container data
-  private final int layOutVersion;
-
-  // Metadata of the container will be a key value pair.
-  // This can hold information like volume name, owner etc.,
-  private final Map<String, String> metadata;
-
-  // State of the Container
-  private ContainerDataProto.State state;
-
-  private final long maxSize;
-
-  private boolean committedSpace;
-
-  //ID of the pipeline where this container is created
-  private String originPipelineId;
-  //ID of the datanode where this container is created
-  private String originNodeId;
-
-  /** parameters for read/write statistics on the container. **/
-  private final AtomicLong readBytes;
-  private final AtomicLong writeBytes;
-  private final AtomicLong readCount;
-  private final AtomicLong writeCount;
-  private final AtomicLong bytesUsed;
-  private final AtomicLong keyCount;
-
-  private HddsVolume volume;
-
-  private String checksum;
-  public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8");
-  private static final String DUMMY_CHECKSUM = new String(new byte[64],
-      CHARSET_ENCODING);
-
-  // Common Fields need to be stored in .container file.
-  protected static final List<String> YAML_FIELDS =
-      Collections.unmodifiableList(Lists.newArrayList(
-      CONTAINER_TYPE,
-      CONTAINER_ID,
-      LAYOUTVERSION,
-      STATE,
-      METADATA,
-      MAX_SIZE,
-      CHECKSUM,
-      ORIGIN_PIPELINE_ID,
-      ORIGIN_NODE_ID));
-
-  /**
-   * Creates a ContainerData Object, which holds metadata of the container.
-   * @param type - ContainerType
-   * @param containerId - ContainerId
-   * @param size - container maximum size in bytes
-   * @param originPipelineId - Pipeline Id where this container is/was created
-   * @param originNodeId - Node Id where this container is/was created
-   */
-  protected ContainerData(ContainerType type, long containerId, long size,
-                          String originPipelineId, String originNodeId) {
-    this(type, containerId, ChunkLayOutVersion.getLatestVersion().getVersion(),
-        size, originPipelineId, originNodeId);
-  }
-
-  /**
-   * Creates a ContainerData Object, which holds metadata of the container.
-   * @param type - ContainerType
-   * @param containerId - ContainerId
-   * @param layOutVersion - Container layOutVersion
-   * @param size - Container maximum size in bytes
-   * @param originPipelineId - Pipeline Id where this container is/was created
-   * @param originNodeId - Node Id where this container is/was created
-   */
-  protected ContainerData(ContainerType type, long containerId,
-      int layOutVersion, long size, String originPipelineId,
-      String originNodeId) {
-    Preconditions.checkNotNull(type);
-
-    this.containerType = type;
-    this.containerID = containerId;
-    this.layOutVersion = layOutVersion;
-    this.metadata = new TreeMap<>();
-    this.state = ContainerDataProto.State.OPEN;
-    this.readCount = new AtomicLong(0L);
-    this.readBytes =  new AtomicLong(0L);
-    this.writeCount =  new AtomicLong(0L);
-    this.writeBytes =  new AtomicLong(0L);
-    this.bytesUsed = new AtomicLong(0L);
-    this.keyCount = new AtomicLong(0L);
-    this.maxSize = size;
-    this.originPipelineId = originPipelineId;
-    this.originNodeId = originNodeId;
-    setChecksumTo0ByteArray();
-  }
-
-  /**
-   * Returns the containerID.
-   */
-  public long getContainerID() {
-    return containerID;
-  }
-
-  /**
-   * Returns the path to base dir of the container.
-   * @return Path to base dir.
-   */
-  public abstract String getContainerPath();
-
-  /**
-   * Returns the type of the container.
-   * @return ContainerType
-   */
-  public ContainerType getContainerType() {
-    return containerType;
-  }
-
-
-  /**
-   * Returns the state of the container.
-   * @return ContainerLifeCycleState
-   */
-  public synchronized ContainerDataProto.State getState() {
-    return state;
-  }
-
-  /**
-   * Set the state of the container.
-   * @param state
-   */
-  public synchronized void setState(ContainerDataProto.State state) {
-    ContainerDataProto.State oldState = this.state;
-    this.state = state;
-
-    if ((oldState == ContainerDataProto.State.OPEN) &&
-        (state != oldState)) {
-      releaseCommitSpace();
-    }
-
-    /**
-     * commit space when container transitions (back) to Open.
-     * when? perhaps closing a container threw an exception
-     */
-    if ((state == ContainerDataProto.State.OPEN) &&
-        (state != oldState)) {
-      Preconditions.checkState(getMaxSize() > 0);
-      commitSpace();
-    }
-  }
-
-  /**
-   * Return's maximum size of the container in bytes.
-   * @return maxSize in bytes
-   */
-  public long getMaxSize() {
-    return maxSize;
-  }
-
-  /**
-   * Returns the layOutVersion of the actual container data format.
-   * @return layOutVersion
-   */
-  public int getLayOutVersion() {
-    return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion).getVersion();
-  }
-
-  /**
-   * Add/Update metadata.
-   * We should hold the container lock before updating the metadata as this
-   * will be persisted on disk. Unless, we are reconstructing ContainerData
-   * from protoBuf or from on disk .container file in which case lock is not
-   * required.
-   */
-  public void addMetadata(String key, String value) {
-    metadata.put(key, value);
-  }
-
-  /**
-   * Retuns metadata of the container.
-   * @return metadata
-   */
-  public Map<String, String> getMetadata() {
-    return Collections.unmodifiableMap(this.metadata);
-  }
-
-  /**
-   * Set metadata.
-   * We should hold the container lock before updating the metadata as this
-   * will be persisted on disk. Unless, we are reconstructing ContainerData
-   * from protoBuf or from on disk .container file in which case lock is not
-   * required.
-   */
-  public void setMetadata(Map<String, String> metadataMap) {
-    metadata.clear();
-    metadata.putAll(metadataMap);
-  }
-
-  /**
-   * checks if the container is open.
-   * @return - boolean
-   */
-  public synchronized  boolean isOpen() {
-    return ContainerDataProto.State.OPEN == state;
-  }
-
-  /**
-   * checks if the container is invalid.
-   * @return - boolean
-   */
-  public synchronized boolean isValid() {
-    return !(ContainerDataProto.State.INVALID == state);
-  }
-
-  /**
-   * checks if the container is closed.
-   * @return - boolean
-   */
-  public synchronized boolean isClosed() {
-    return ContainerDataProto.State.CLOSED == state;
-  }
-
-  /**
-   * checks if the container is quasi closed.
-   * @return - boolean
-   */
-  public synchronized boolean isQuasiClosed() {
-    return ContainerDataProto.State.QUASI_CLOSED == state;
-  }
-
-  /**
-   * checks if the container is unhealthy.
-   * @return - boolean
-   */
-  public synchronized boolean isUnhealthy() {
-    return ContainerDataProto.State.UNHEALTHY == state;
-  }
-
-  /**
-   * Marks this container as quasi closed.
-   */
-  public synchronized void quasiCloseContainer() {
-    setState(ContainerDataProto.State.QUASI_CLOSED);
-  }
-
-  /**
-   * Marks this container as closed.
-   */
-  public synchronized void closeContainer() {
-    setState(ContainerDataProto.State.CLOSED);
-  }
-
-  private void releaseCommitSpace() {
-    long unused = getMaxSize() - getBytesUsed();
-
-    // only if container size < max size
-    if (unused > 0 && committedSpace) {
-      getVolume().incCommittedBytes(0 - unused);
-    }
-    committedSpace = false;
-  }
-
-  /**
-   * add available space in the container to the committed space in the volume.
-   * available space is the number of bytes remaining till max capacity.
-   */
-  public void commitSpace() {
-    long unused = getMaxSize() - getBytesUsed();
-    ContainerDataProto.State myState = getState();
-    HddsVolume cVol;
-
-    //we don't expect duplicate calls
-    Preconditions.checkState(!committedSpace);
-
-    // Only Open Containers have Committed Space
-    if (myState != ContainerDataProto.State.OPEN) {
-      return;
-    }
-
-    // junit tests do not always set up volume
-    cVol = getVolume();
-    if (unused > 0 && (cVol != null)) {
-      cVol.incCommittedBytes(unused);
-      committedSpace = true;
-    }
-  }
-
-  /**
-   * Get the number of bytes read from the container.
-   * @return the number of bytes read from the container.
-   */
-  public long getReadBytes() {
-    return readBytes.get();
-  }
-
-  /**
-   * Increase the number of bytes read from the container.
-   * @param bytes number of bytes read.
-   */
-  public void incrReadBytes(long bytes) {
-    this.readBytes.addAndGet(bytes);
-  }
-
-  /**
-   * Get the number of times the container is read.
-   * @return the number of times the container is read.
-   */
-  public long getReadCount() {
-    return readCount.get();
-  }
-
-  /**
-   * Increase the number of container read count by 1.
-   */
-  public void incrReadCount() {
-    this.readCount.incrementAndGet();
-  }
-
-  /**
-   * Get the number of bytes write into the container.
-   * @return the number of bytes write into the container.
-   */
-  public long getWriteBytes() {
-    return writeBytes.get();
-  }
-
-  /**
-   * Increase the number of bytes write into the container.
-   * Also decrement committed bytes against the bytes written.
-   * @param bytes the number of bytes write into the container.
-   */
-  public void incrWriteBytes(long bytes) {
-    long unused = getMaxSize() - getBytesUsed();
-
-    this.writeBytes.addAndGet(bytes);
-
-    // only if container size < max size
-    if (committedSpace && unused > 0) {
-      //with this write, container size might breach max size
-      long decrement = Math.min(bytes, unused);
-      this.getVolume().incCommittedBytes(0 - decrement);
-    }
-  }
-
-  /**
-   * Get the number of writes into the container.
-   * @return the number of writes into the container.
-   */
-  public long getWriteCount() {
-    return writeCount.get();
-  }
-
-  /**
-   * Increase the number of writes into the container by 1.
-   */
-  public void incrWriteCount() {
-    this.writeCount.incrementAndGet();
-  }
-
-  /**
-   * Sets the number of bytes used by the container.
-   * @param used
-   */
-  public void setBytesUsed(long used) {
-    this.bytesUsed.set(used);
-  }
-
-  /**
-   * Get the number of bytes used by the container.
-   * @return the number of bytes used by the container.
-   */
-  public long getBytesUsed() {
-    return bytesUsed.get();
-  }
-
-  /**
-   * Increase the number of bytes used by the container.
-   * @param used number of bytes used by the container.
-   * @return the current number of bytes used by the container afert increase.
-   */
-  public long incrBytesUsed(long used) {
-    return this.bytesUsed.addAndGet(used);
-  }
-
-  /**
-   * Decrease the number of bytes used by the container.
-   * @param reclaimed the number of bytes reclaimed from the container.
-   * @return the current number of bytes used by the container after decrease.
-   */
-  public long decrBytesUsed(long reclaimed) {
-    return this.bytesUsed.addAndGet(-1L * reclaimed);
-  }
-
-  /**
-   * Set the Volume for the Container.
-   * This should be called only from the createContainer.
-   * @param hddsVolume
-   */
-  public void setVolume(HddsVolume hddsVolume) {
-    this.volume = hddsVolume;
-  }
-
-  /**
-   * Returns the volume of the Container.
-   * @return HddsVolume
-   */
-  public HddsVolume getVolume() {
-    return volume;
-  }
-
-  /**
-   * Increments the number of keys in the container.
-   */
-  public void incrKeyCount() {
-    this.keyCount.incrementAndGet();
-  }
-
-  /**
-   * Decrements number of keys in the container.
-   */
-  public void decrKeyCount() {
-    this.keyCount.decrementAndGet();
-  }
-
-  /**
-   * Returns number of keys in the container.
-   * @return key count
-   */
-  public long getKeyCount() {
-    return this.keyCount.get();
-  }
-
-  /**
-   * Set's number of keys in the container.
-   * @param count
-   */
-  public void setKeyCount(long count) {
-    this.keyCount.set(count);
-  }
-
-  public void setChecksumTo0ByteArray() {
-    this.checksum = DUMMY_CHECKSUM;
-  }
-
-  public void setChecksum(String checkSum) {
-    this.checksum = checkSum;
-  }
-
-  public String getChecksum() {
-    return this.checksum;
-  }
-
-
-  /**
-   * Returns the origin pipeline Id of this container.
-   * @return origin node Id
-   */
-  public String getOriginPipelineId() {
-    return originPipelineId;
-  }
-
-  /**
-   * Returns the origin node Id of this container.
-   * @return origin node Id
-   */
-  public String getOriginNodeId() {
-    return originNodeId;
-  }
-
-  /**
-   * Compute the checksum for ContainerData using the specified Yaml (based
-   * on ContainerType) and set the checksum.
-   *
-   * Checksum of ContainerData is calculated by setting the
-   * {@link ContainerData#checksum} field to a 64-byte array with all 0's -
-   * {@link ContainerData#DUMMY_CHECKSUM}. After the checksum is calculated,
-   * the checksum field is updated with this value.
-   *
-   * @param yaml Yaml for ContainerType to get the ContainerData as Yaml String
-   * @throws IOException
-   */
-  public void computeAndSetChecksum(Yaml yaml) throws IOException {
-    // Set checksum to dummy value - 0 byte array, to calculate the checksum
-    // of rest of the data.
-    setChecksumTo0ByteArray();
-
-    // Dump yaml data into a string to compute its checksum
-    String containerDataYamlStr = yaml.dump(this);
-
-    this.checksum = ContainerUtils.getChecksum(containerDataYamlStr);
-  }
-
-  /**
-   * Returns a ProtoBuf Message from ContainerData.
-   *
-   * @return Protocol Buffer Message
-   */
-  public abstract ContainerProtos.ContainerDataProto getProtoBufMessage();
-
-  /**
-   * Returns the blockCommitSequenceId.
-   */
-  public abstract long getBlockCommitSequenceId();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
deleted file mode 100644
index 1f9966c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import java.beans.IntrospectionException;
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-
-import com.google.common.base.Preconditions;
-import static org.apache.hadoop.ozone.container.keyvalue
-    .KeyValueContainerData.KEYVALUE_YAML_TAG;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.yaml.snakeyaml.Yaml;
-import org.yaml.snakeyaml.constructor.AbstractConstruct;
-import org.yaml.snakeyaml.constructor.Constructor;
-import org.yaml.snakeyaml.introspector.BeanAccess;
-import org.yaml.snakeyaml.introspector.Property;
-import org.yaml.snakeyaml.introspector.PropertyUtils;
-import org.yaml.snakeyaml.nodes.MappingNode;
-import org.yaml.snakeyaml.nodes.Node;
-import org.yaml.snakeyaml.nodes.ScalarNode;
-import org.yaml.snakeyaml.nodes.Tag;
-import org.yaml.snakeyaml.representer.Representer;
-
-/**
- * Class for creating and reading .container files.
- */
-
-public final class ContainerDataYaml {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerDataYaml.class);
-
-  private ContainerDataYaml() {
-
-  }
-
-  /**
-   * Creates a .container file in yaml format.
-   *
-   * @param containerFile
-   * @param containerData
-   * @throws IOException
-   */
-  public static void createContainerFile(ContainerType containerType,
-      ContainerData containerData, File containerFile) throws IOException {
-    Writer writer = null;
-    FileOutputStream out = null;
-    try {
-      // Create Yaml for given container type
-      Yaml yaml = getYamlForContainerType(containerType);
-      // Compute Checksum and update ContainerData
-      containerData.computeAndSetChecksum(yaml);
-
-      // Write the ContainerData with checksum to Yaml file.
-      out = new FileOutputStream(
-          containerFile);
-      writer = new OutputStreamWriter(out, "UTF-8");
-      yaml.dump(containerData, writer);
-
-    } finally {
-      try {
-        if (writer != null) {
-          writer.flush();
-          // make sure the container metadata is synced to disk.
-          out.getFD().sync();
-          writer.close();
-        }
-      } catch (IOException ex) {
-        LOG.warn("Error occurred during closing the writer. ContainerID: " +
-            containerData.getContainerID());
-      }
-    }
-  }
-
-  /**
-   * Read the yaml file, and return containerData.
-   *
-   * @throws IOException
-   */
-  public static ContainerData readContainerFile(File containerFile)
-      throws IOException {
-    Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
-    try (FileInputStream inputFileStream = new FileInputStream(containerFile)) {
-      return readContainer(inputFileStream);
-    }
-
-  }
-
-  /**
-   * Read the yaml file content, and return containerData.
-   *
-   * @throws IOException
-   */
-  public static ContainerData readContainer(byte[] containerFileContent)
-      throws IOException {
-    return readContainer(
-        new ByteArrayInputStream(containerFileContent));
-  }
-
-  /**
-   * Read the yaml content, and return containerData.
-   *
-   * @throws IOException
-   */
-  public static ContainerData readContainer(InputStream input)
-      throws IOException {
-
-    ContainerData containerData;
-    PropertyUtils propertyUtils = new PropertyUtils();
-    propertyUtils.setBeanAccess(BeanAccess.FIELD);
-    propertyUtils.setAllowReadOnlyProperties(true);
-
-    Representer representer = new ContainerDataRepresenter();
-    representer.setPropertyUtils(propertyUtils);
-
-    Constructor containerDataConstructor = new ContainerDataConstructor();
-
-    Yaml yaml = new Yaml(containerDataConstructor, representer);
-    yaml.setBeanAccess(BeanAccess.FIELD);
-
-    containerData = (ContainerData)
-        yaml.load(input);
-
-    return containerData;
-  }
-
-  /**
-   * Given a ContainerType this method returns a Yaml representation of
-   * the container properties.
-   *
-   * @param containerType type of container
-   * @return Yamal representation of container properties
-   *
-   * @throws StorageContainerException if the type is unrecognized
-   */
-  public static Yaml getYamlForContainerType(ContainerType containerType)
-      throws StorageContainerException {
-    PropertyUtils propertyUtils = new PropertyUtils();
-    propertyUtils.setBeanAccess(BeanAccess.FIELD);
-    propertyUtils.setAllowReadOnlyProperties(true);
-
-    switch (containerType) {
-    case KeyValueContainer:
-      Representer representer = new ContainerDataRepresenter();
-      representer.setPropertyUtils(propertyUtils);
-      representer.addClassTag(
-          KeyValueContainerData.class,
-          KeyValueContainerData.KEYVALUE_YAML_TAG);
-
-      Constructor keyValueDataConstructor = new ContainerDataConstructor();
-
-      return new Yaml(keyValueDataConstructor, representer);
-    default:
-      throw new StorageContainerException("Unrecognized container Type " +
-          "format " + containerType, ContainerProtos.Result
-          .UNKNOWN_CONTAINER_TYPE);
-    }
-  }
-
-  /**
-   * Representer class to define which fields need to be stored in yaml file.
-   */
-  private static class ContainerDataRepresenter extends Representer {
-    @Override
-    protected Set<Property> getProperties(Class<? extends Object> type)
-        throws IntrospectionException {
-      Set<Property> set = super.getProperties(type);
-      Set<Property> filtered = new TreeSet<Property>();
-
-      // When a new Container type is added, we need to add what fields need
-      // to be filtered here
-      if (type.equals(KeyValueContainerData.class)) {
-        List<String> yamlFields = KeyValueContainerData.getYamlFields();
-        // filter properties
-        for (Property prop : set) {
-          String name = prop.getName();
-          if (yamlFields.contains(name)) {
-            filtered.add(prop);
-          }
-        }
-      }
-      return filtered;
-    }
-  }
-
-  /**
-   * Constructor class for KeyValueData, which will be used by Yaml.
-   */
-  private static class ContainerDataConstructor extends Constructor {
-    ContainerDataConstructor() {
-      //Adding our own specific constructors for tags.
-      // When a new Container type is added, we need to add yamlConstructor
-      // for that
-      this.yamlConstructors.put(
-          KEYVALUE_YAML_TAG, new ConstructKeyValueContainerData());
-      this.yamlConstructors.put(Tag.INT, new ConstructLong());
-    }
-
-    private class ConstructKeyValueContainerData extends AbstractConstruct {
-      public Object construct(Node node) {
-        MappingNode mnode = (MappingNode) node;
-        Map<Object, Object> nodes = constructMapping(mnode);
-
-        //Needed this, as TAG.INT type is by default converted to Long.
-        long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION);
-        int lv = (int) layOutVersion;
-
-        long size = (long) nodes.get(OzoneConsts.MAX_SIZE);
-
-        String originPipelineId = (String) nodes.get(
-            OzoneConsts.ORIGIN_PIPELINE_ID);
-        String originNodeId = (String) nodes.get(OzoneConsts.ORIGIN_NODE_ID);
-
-        //When a new field is added, it needs to be added here.
-        KeyValueContainerData kvData = new KeyValueContainerData(
-            (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, size,
-            originPipelineId, originNodeId);
-
-        kvData.setContainerDBType((String)nodes.get(
-            OzoneConsts.CONTAINER_DB_TYPE));
-        kvData.setMetadataPath((String) nodes.get(
-            OzoneConsts.METADATA_PATH));
-        kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
-        Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA);
-        kvData.setMetadata(meta);
-        kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM));
-        String state = (String) nodes.get(OzoneConsts.STATE);
-        kvData
-            .setState(ContainerProtos.ContainerDataProto.State.valueOf(state));
-        return kvData;
-      }
-    }
-
-    //Below code is taken from snake yaml, as snakeyaml tries to fit the
-    // number if it fits in integer, otherwise returns long. So, slightly
-    // modified the code to return long in all cases.
-    private class ConstructLong extends AbstractConstruct {
-      public Object construct(Node node) {
-        String value = constructScalar((ScalarNode) node).toString()
-            .replaceAll("_", "");
-        int sign = +1;
-        char first = value.charAt(0);
-        if (first == '-') {
-          sign = -1;
-          value = value.substring(1);
-        } else if (first == '+') {
-          value = value.substring(1);
-        }
-        int base = 10;
-        if ("0".equals(value)) {
-          return Long.valueOf(0);
-        } else if (value.startsWith("0b")) {
-          value = value.substring(2);
-          base = 2;
-        } else if (value.startsWith("0x")) {
-          value = value.substring(2);
-          base = 16;
-        } else if (value.startsWith("0")) {
-          value = value.substring(1);
-          base = 8;
-        } else if (value.indexOf(':') != -1) {
-          String[] digits = value.split(":");
-          int bes = 1;
-          int val = 0;
-          for (int i = 0, j = digits.length; i < j; i++) {
-            val += (Long.parseLong(digits[(j - i) - 1]) * bes);
-            bes *= 60;
-          }
-          return createNumber(sign, String.valueOf(val), 10);
-        } else {
-          return createNumber(sign, value, 10);
-        }
-        return createNumber(sign, value, base);
-      }
-    }
-
-    private Number createNumber(int sign, String number, int radix) {
-      Number result;
-      if (sign < 0) {
-        number = "-" + number;
-      }
-      result = Long.valueOf(number, radix);
-      return result;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
deleted file mode 100644
index 41415eb..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.List;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-
-
-/**
- * Class that manages Containers created on the datanode.
- */
-public class ContainerSet {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class);
-
-  private final ConcurrentSkipListMap<Long, Container<?>> containerMap = new
-      ConcurrentSkipListMap<>();
-  private final ConcurrentSkipListSet<Long> missingContainerSet =
-      new ConcurrentSkipListSet<>();
-  /**
-   * Add Container to container map.
-   * @param container container to be added
-   * @return If container is added to containerMap returns true, otherwise
-   * false
-   */
-  public boolean addContainer(Container<?> container) throws
-      StorageContainerException {
-    Preconditions.checkNotNull(container, "container cannot be null");
-
-    long containerId = container.getContainerData().getContainerID();
-    if (containerMap.putIfAbsent(containerId, container) == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container with container Id {} is added to containerMap",
-            containerId);
-      }
-      // wish we could have done this from ContainerData.setState
-      container.getContainerData().commitSpace();
-      return true;
-    } else {
-      LOG.warn("Container already exists with container Id {}", containerId);
-      throw new StorageContainerException("Container already exists with " +
-          "container Id " + containerId,
-          ContainerProtos.Result.CONTAINER_EXISTS);
-    }
-  }
-
-  /**
-   * Returns the Container with specified containerId.
-   * @param containerId ID of the container to get
-   * @return Container
-   */
-  public Container<?> getContainer(long containerId) {
-    Preconditions.checkState(containerId >= 0,
-        "Container Id cannot be negative.");
-    return containerMap.get(containerId);
-  }
-
-  /**
-   * Removes the Container matching with specified containerId.
-   * @param containerId ID of the container to remove
-   * @return If container is removed from containerMap returns true, otherwise
-   * false
-   */
-  public boolean removeContainer(long containerId) {
-    Preconditions.checkState(containerId >= 0,
-        "Container Id cannot be negative.");
-    Container<?> removed = containerMap.remove(containerId);
-    if (removed == null) {
-      LOG.debug("Container with containerId {} is not present in " +
-          "containerMap", containerId);
-      return false;
-    } else {
-      LOG.debug("Container with containerId {} is removed from containerMap",
-          containerId);
-      return true;
-    }
-  }
-
-  /**
-   * Return number of containers in container map.
-   * @return container count
-   */
-  @VisibleForTesting
-  public int containerCount() {
-    return containerMap.size();
-  }
-
-  /**
-   * Return an container Iterator over {@link ContainerSet#containerMap}.
-   * @return {@literal Iterator<Container<?>>}
-   */
-  public Iterator<Container<?>> getContainerIterator() {
-    return containerMap.values().iterator();
-  }
-
-  /**
-   * Return an iterator of containers associated with the specified volume.
-   *
-   * @param  volume the HDDS volume which should be used to filter containers
-   * @return {@literal Iterator<Container<?>>}
-   */
-  public Iterator<Container<?>> getContainerIterator(HddsVolume volume) {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(volume.getStorageID());
-    String volumeUuid = volume.getStorageID();
-    return containerMap.values().stream()
-        .filter(x -> volumeUuid.equals(x.getContainerData().getVolume()
-            .getStorageID()))
-        .iterator();
-  }
-
-  /**
-   * Return an containerMap iterator over {@link ContainerSet#containerMap}.
-   * @return containerMap Iterator
-   */
-  public Iterator<Map.Entry<Long, Container<?>>> getContainerMapIterator() {
-    return containerMap.entrySet().iterator();
-  }
-
-  /**
-   * Return a copy of the containerMap.
-   * @return containerMap
-   */
-  @VisibleForTesting
-  public Map<Long, Container<?>> getContainerMapCopy() {
-    return ImmutableMap.copyOf(containerMap);
-  }
-
-  public Map<Long, Container<?>> getContainerMap() {
-    return Collections.unmodifiableMap(containerMap);
-  }
-
-  /**
-   * A simple interface for container Iterations.
-   * <p>
-   * This call make no guarantees about consistency of the data between
-   * different list calls. It just returns the best known data at that point of
-   * time. It is possible that using this iteration you can miss certain
-   * container from the listing.
-   *
-   * @param startContainerId - Return containers with Id &gt;= startContainerId.
-   * @param count - how many to return
-   * @param data - Actual containerData
-   */
-  public void listContainer(long startContainerId, long count,
-                            List<ContainerData> data) throws
-      StorageContainerException {
-    Preconditions.checkNotNull(data,
-        "Internal assertion: data cannot be null");
-    Preconditions.checkState(startContainerId >= 0,
-        "Start container Id cannot be negative");
-    Preconditions.checkState(count > 0,
-        "max number of containers returned " +
-            "must be positive");
-    LOG.debug("listContainer returns containerData starting from {} of count " +
-        "{}", startContainerId, count);
-    ConcurrentNavigableMap<Long, Container<?>> map;
-    if (startContainerId == 0) {
-      map = containerMap.tailMap(containerMap.firstKey(), true);
-    } else {
-      map = containerMap.tailMap(startContainerId, true);
-    }
-    int currentCount = 0;
-    for (Container<?> entry : map.values()) {
-      if (currentCount < count) {
-        data.add(entry.getContainerData());
-        currentCount++;
-      } else {
-        return;
-      }
-    }
-  }
-
-  /**
-   * Get container report.
-   *
-   * @return The container report.
-   */
-  public ContainerReportsProto getContainerReport() throws IOException {
-    LOG.debug("Starting container report iteration.");
-
-    // No need for locking since containerMap is a ConcurrentSkipListMap
-    // And we can never get the exact state since close might happen
-    // after we iterate a point.
-    List<Container<?>> containers = new ArrayList<>(containerMap.values());
-
-    ContainerReportsProto.Builder crBuilder =
-        ContainerReportsProto.newBuilder();
-
-    for (Container<?> container: containers) {
-      crBuilder.addReports(container.getContainerReport());
-    }
-
-    return crBuilder.build();
-  }
-
-  public Set<Long> getMissingContainerSet() {
-    return missingContainerSet;
-  }
-
-  /**
-   * Builds the missing container set by taking a diff between total no
-   * containers actually found and number of containers which actually
-   * got created. It also validates the BCSID stored in the snapshot file
-   * for each container as against what is reported in containerScan.
-   * This will only be called during the initialization of Datanode Service
-   * when  it still not a part of any write Pipeline.
-   * @param container2BCSIDMap Map of containerId to BCSID persisted in the
-   *                           Ratis snapshot
-   */
-  public void buildMissingContainerSetAndValidate(
-      Map<Long, Long> container2BCSIDMap) {
-    container2BCSIDMap.entrySet().parallelStream().forEach((mapEntry) -> {
-      long id = mapEntry.getKey();
-      if (!containerMap.containsKey(id)) {
-        LOG.warn("Adding container {} to missing container set.", id);
-        missingContainerSet.add(id);
-      } else {
-        Container<?> container = containerMap.get(id);
-        long containerBCSID = container.getBlockCommitSequenceId();
-        long snapshotBCSID = mapEntry.getValue();
-        if (containerBCSID < snapshotBCSID) {
-          LOG.warn(
-              "Marking container {} unhealthy as reported BCSID {} is smaller"
-                  + " than ratis snapshot recorded value {}", id,
-              containerBCSID, snapshotBCSID);
-          // just mark the container unhealthy. Once the DatanodeStateMachine
-          // thread starts it will send container report to SCM where these
-          // unhealthy containers would be detected
-          try {
-            container.markContainerUnhealthy();
-          } catch (StorageContainerException sce) {
-            // The container will still be marked unhealthy in memory even if
-            // exception occurs. It won't accept any new transactions and will
-            // be handled by SCM. Eve if dn restarts, it will still be detected
-            // as unheathy as its BCSID won't change.
-            LOG.error("Unable to persist unhealthy state for container {}", id);
-          }
-        }
-      }
-    });
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
deleted file mode 100644
index 76f6b3c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerNotOpenException;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .InvalidContainerStateException;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditLoggerType;
-import org.apache.hadoop.ozone.audit.AuditMarker;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.audit.Auditor;
-import org.apache.hadoop.ozone.container.common.helpers
-    .ContainerCommandRequestPBHelper;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis
-    .DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    ContainerDataProto.State;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-
-import io.opentracing.Scope;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-
-/**
- * Ozone Container dispatcher takes a call from the netty server and routes it
- * to the right handler function.
- */
-public class HddsDispatcher implements ContainerDispatcher, Auditor {
-
-  static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class);
-  private static final AuditLogger AUDIT =
-      new AuditLogger(AuditLoggerType.DNLOGGER);
-  private final Map<ContainerType, Handler> handlers;
-  private final Configuration conf;
-  private final ContainerSet containerSet;
-  private final VolumeSet volumeSet;
-  private final StateContext context;
-  private final float containerCloseThreshold;
-  private String scmID;
-  private ContainerMetrics metrics;
-
-  /**
-   * Constructs an OzoneContainer that receives calls from
-   * XceiverServerHandler.
-   */
-  public HddsDispatcher(Configuration config, ContainerSet contSet,
-      VolumeSet volumes, Map<ContainerType, Handler> handlers,
-      StateContext context, ContainerMetrics metrics) {
-    this.conf = config;
-    this.containerSet = contSet;
-    this.volumeSet = volumes;
-    this.context = context;
-    this.handlers = handlers;
-    this.metrics = metrics;
-    this.containerCloseThreshold = conf.getFloat(
-        HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD,
-        HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
-  }
-
-  @Override
-  public void init() {
-  }
-
-  @Override
-  public void shutdown() {
-  }
-
-  /**
-   * Returns true for exceptions which can be ignored for marking the container
-   * unhealthy.
-   * @param result ContainerCommandResponse error code.
-   * @return true if exception can be ignored, false otherwise.
-   */
-  private boolean canIgnoreException(Result result) {
-    switch (result) {
-    case SUCCESS:
-    case CONTAINER_UNHEALTHY:
-    case CLOSED_CONTAINER_IO:
-    case DELETE_ON_OPEN_CONTAINER:
-      return true;
-    default:
-      return false;
-    }
-  }
-
-  @Override
-  public void buildMissingContainerSetAndValidate(
-      Map<Long, Long> container2BCSIDMap) {
-    containerSet
-        .buildMissingContainerSetAndValidate(container2BCSIDMap);
-  }
-
-  @Override
-  public ContainerCommandResponseProto dispatch(
-      ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
-    String spanName = "HddsDispatcher." + msg.getCmdType().name();
-    try (Scope scope = TracingUtil
-        .importAndCreateScope(spanName, msg.getTraceID())) {
-      return dispatchRequest(msg, dispatcherContext);
-    }
-  }
-
-  @SuppressWarnings("methodlength")
-  private ContainerCommandResponseProto dispatchRequest(
-      ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
-    Preconditions.checkNotNull(msg);
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(),
-          msg.getTraceID());
-    }
-
-    AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(
-        msg.getCmdType());
-    EventType eventType = getEventType(msg);
-    Map<String, String> params =
-        ContainerCommandRequestPBHelper.getAuditParams(msg);
-
-    Container container;
-    ContainerType containerType;
-    ContainerCommandResponseProto responseProto = null;
-    long startTime = System.nanoTime();
-    ContainerProtos.Type cmdType = msg.getCmdType();
-    long containerID = msg.getContainerID();
-    metrics.incContainerOpsMetrics(cmdType);
-    container = getContainer(containerID);
-    boolean isWriteStage =
-        (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null
-            && dispatcherContext.getStage()
-            == DispatcherContext.WriteChunkStage.WRITE_DATA);
-    boolean isWriteCommitStage =
-        (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null
-            && dispatcherContext.getStage()
-            == DispatcherContext.WriteChunkStage.COMMIT_DATA);
-
-    // if the command gets executed other than Ratis, the default wroite stage
-    // is WriteChunkStage.COMBINED
-    boolean isCombinedStage =
-        cmdType == ContainerProtos.Type.WriteChunk && (dispatcherContext == null
-            || dispatcherContext.getStage()
-            == DispatcherContext.WriteChunkStage.COMBINED);
-    Map<Long, Long> container2BCSIDMap = null;
-    if (dispatcherContext != null) {
-      container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
-    }
-    if (isWriteCommitStage) {
-      //  check if the container Id exist in the loaded snapshot file. if
-      // it does not , it infers that , this is a restart of dn where
-      // the we are reapplying the transaction which was not captured in the
-      // snapshot.
-      // just add it to the list, and remove it from missing container set
-      // as it might have been added in the list during "init".
-      Preconditions.checkNotNull(container2BCSIDMap);
-      if (container2BCSIDMap.get(containerID) == null) {
-        container2BCSIDMap
-            .put(containerID, container.getBlockCommitSequenceId());
-        containerSet.getMissingContainerSet().remove(containerID);
-      }
-    }
-    if (getMissingContainerSet().contains(containerID)) {
-      StorageContainerException sce = new StorageContainerException(
-          "ContainerID " + containerID
-              + " has been lost and and cannot be recreated on this DataNode",
-          ContainerProtos.Result.CONTAINER_MISSING);
-      audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
-      return ContainerUtils.logAndReturnError(LOG, sce, msg);
-    }
-
-    if (cmdType != ContainerProtos.Type.CreateContainer) {
-      /**
-       * Create Container should happen only as part of Write_Data phase of
-       * writeChunk.
-       */
-      if (container == null && ((isWriteStage || isCombinedStage)
-          || cmdType == ContainerProtos.Type.PutSmallFile)) {
-        // If container does not exist, create one for WriteChunk and
-        // PutSmallFile request
-        responseProto = createContainer(msg);
-        if (responseProto.getResult() != Result.SUCCESS) {
-          StorageContainerException sce = new StorageContainerException(
-              "ContainerID " + containerID + " creation failed",
-              responseProto.getResult());
-          audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
-          return ContainerUtils.logAndReturnError(LOG, sce, msg);
-        }
-        Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null
-            || dispatcherContext == null);
-        if (container2BCSIDMap != null) {
-          // adds this container to list of containers created in the pipeline
-          // with initial BCSID recorded as 0.
-          container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
-        }
-        container = getContainer(containerID);
-      }
-
-      // if container not found return error
-      if (container == null) {
-        StorageContainerException sce = new StorageContainerException(
-            "ContainerID " + containerID + " does not exist",
-            ContainerProtos.Result.CONTAINER_NOT_FOUND);
-        audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
-        return ContainerUtils.logAndReturnError(LOG, sce, msg);
-      }
-      containerType = getContainerType(container);
-    } else {
-      if (!msg.hasCreateContainer()) {
-        audit(action, eventType, params, AuditEventStatus.FAILURE,
-            new Exception("MALFORMED_REQUEST"));
-        return ContainerUtils.malformedRequest(msg);
-      }
-      containerType = msg.getCreateContainer().getContainerType();
-    }
-    // Small performance optimization. We check if the operation is of type
-    // write before trying to send CloseContainerAction.
-    if (!HddsUtils.isReadOnly(msg)) {
-      sendCloseContainerActionIfNeeded(container);
-    }
-    Handler handler = getHandler(containerType);
-    if (handler == null) {
-      StorageContainerException ex = new StorageContainerException("Invalid " +
-          "ContainerType " + containerType,
-          ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
-      // log failure
-      audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
-      return ContainerUtils.logAndReturnError(LOG, ex, msg);
-    }
-    responseProto = handler.handle(msg, container, dispatcherContext);
-    if (responseProto != null) {
-      metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
-
-      // If the request is of Write Type and the container operation
-      // is unsuccessful, it implies the applyTransaction on the container
-      // failed. All subsequent transactions on the container should fail and
-      // hence replica will be marked unhealthy here. In this case, a close
-      // container action will be sent to SCM to close the container.
-
-      // ApplyTransaction called on closed Container will fail with Closed
-      // container exception. In such cases, ignore the exception here
-      // If the container is already marked unhealthy, no need to change the
-      // state here.
-
-      Result result = responseProto.getResult();
-      if (cmdType == ContainerProtos.Type.CreateContainer
-          && result == Result.SUCCESS && dispatcherContext != null) {
-        Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
-        container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
-      }
-      if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) {
-        // If the container is open/closing and the container operation
-        // has failed, it should be first marked unhealthy and the initiate the
-        // close container action. This also implies this is the first
-        // transaction which has failed, so the container is marked unhealthy
-        // right here.
-        // Once container is marked unhealthy, all the subsequent write
-        // transactions will fail with UNHEALTHY_CONTAINER exception.
-
-        // For container to be moved to unhealthy state here, the container can
-        // only be in open or closing state.
-        State containerState = container.getContainerData().getState();
-        Preconditions.checkState(
-            containerState == State.OPEN || containerState == State.CLOSING);
-        // mark and persist the container state to be unhealthy
-        try {
-          handler.markContainerUnhealthy(container);
-        } catch (IOException ioe) {
-          // just log the error here in case marking the container fails,
-          // Return the actual failure response to the client
-          LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ",
-              ioe);
-        }
-        // in any case, the in memory state of the container should be unhealthy
-        Preconditions.checkArgument(
-            container.getContainerData().getState() == State.UNHEALTHY);
-        sendCloseContainerActionIfNeeded(container);
-      }
-
-      if (result == Result.SUCCESS) {
-        updateBCSID(container, dispatcherContext, cmdType);
-        audit(action, eventType, params, AuditEventStatus.SUCCESS, null);
-      } else {
-        audit(action, eventType, params, AuditEventStatus.FAILURE,
-            new Exception(responseProto.getMessage()));
-      }
-
-      return responseProto;
-    } else {
-      // log failure
-      audit(action, eventType, params, AuditEventStatus.FAILURE,
-          new Exception("UNSUPPORTED_REQUEST"));
-      return ContainerUtils.unsupportedRequest(msg);
-    }
-  }
-
-  private void updateBCSID(Container container,
-      DispatcherContext dispatcherContext, ContainerProtos.Type cmdType) {
-    if (dispatcherContext != null && (cmdType == ContainerProtos.Type.PutBlock
-        || cmdType == ContainerProtos.Type.PutSmallFile)) {
-      Preconditions.checkNotNull(container);
-      long bcsID = container.getBlockCommitSequenceId();
-      long containerId = container.getContainerData().getContainerID();
-      Map<Long, Long> container2BCSIDMap;
-      container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
-      Preconditions.checkNotNull(container2BCSIDMap);
-      Preconditions.checkArgument(container2BCSIDMap.containsKey(containerId));
-      // updates the latest BCSID on every putBlock or putSmallFile
-      // transaction over Ratis.
-      container2BCSIDMap.computeIfPresent(containerId, (u, v) -> v = bcsID);
-    }
-  }
-  /**
-   * Create a container using the input container request.
-   * @param containerRequest - the container request which requires container
-   *                         to be created.
-   * @return ContainerCommandResponseProto container command response.
-   */
-  @VisibleForTesting
-  ContainerCommandResponseProto createContainer(
-      ContainerCommandRequestProto containerRequest) {
-    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
-        ContainerProtos.CreateContainerRequestProto.newBuilder();
-    ContainerType containerType =
-        ContainerProtos.ContainerType.KeyValueContainer;
-    createRequest.setContainerType(containerType);
-
-    ContainerCommandRequestProto.Builder requestBuilder =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CreateContainer)
-            .setContainerID(containerRequest.getContainerID())
-            .setCreateContainer(createRequest.build())
-            .setPipelineID(containerRequest.getPipelineID())
-            .setDatanodeUuid(containerRequest.getDatanodeUuid())
-            .setTraceID(containerRequest.getTraceID());
-
-    // TODO: Assuming the container type to be KeyValueContainer for now.
-    // We need to get container type from the containerRequest.
-    Handler handler = getHandler(containerType);
-    return handler.handle(requestBuilder.build(), null, null);
-  }
-
-  /**
-   * This will be called as a part of creating the log entry during
-   * startTransaction in Ratis on the leader node. In such cases, if the
-   * container is not in open state for writing we should just fail.
-   * Leader will propagate the exception to client.
-   * @param msg  container command proto
-   * @throws StorageContainerException In case container state is open for write
-   *         requests and in invalid state for read requests.
-   */
-  @Override
-  public void validateContainerCommand(
-      ContainerCommandRequestProto msg) throws StorageContainerException {
-    long containerID = msg.getContainerID();
-    Container container = getContainer(containerID);
-    if (container == null) {
-      return;
-    }
-    ContainerType containerType = container.getContainerType();
-    ContainerProtos.Type cmdType = msg.getCmdType();
-    AuditAction action =
-        ContainerCommandRequestPBHelper.getAuditAction(cmdType);
-    EventType eventType = getEventType(msg);
-    Map<String, String> params =
-        ContainerCommandRequestPBHelper.getAuditParams(msg);
-    Handler handler = getHandler(containerType);
-    if (handler == null) {
-      StorageContainerException ex = new StorageContainerException(
-          "Invalid " + "ContainerType " + containerType,
-          ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
-      audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
-      throw ex;
-    }
-
-    State containerState = container.getContainerState();
-    if (!HddsUtils.isReadOnly(msg) && containerState != State.OPEN) {
-      switch (cmdType) {
-      case CreateContainer:
-        // Create Container is idempotent. There is nothing to validate.
-        break;
-      case CloseContainer:
-        // If the container is unhealthy, closeContainer will be rejected
-        // while execution. Nothing to validate here.
-        break;
-      default:
-        // if the container is not open, no updates can happen. Just throw
-        // an exception
-        ContainerNotOpenException cex = new ContainerNotOpenException(
-            "Container " + containerID + " in " + containerState + " state");
-        audit(action, eventType, params, AuditEventStatus.FAILURE, cex);
-        throw cex;
-      }
-    } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) {
-      InvalidContainerStateException iex = new InvalidContainerStateException(
-          "Container " + containerID + " in " + containerState + " state");
-      audit(action, eventType, params, AuditEventStatus.FAILURE, iex);
-      throw iex;
-    }
-  }
-
-  /**
-   * If the container usage reaches the close threshold or the container is
-   * marked unhealthy we send Close ContainerAction to SCM.
-   * @param container current state of container
-   */
-  private void sendCloseContainerActionIfNeeded(Container container) {
-    // We have to find a more efficient way to close a container.
-    boolean isSpaceFull = isContainerFull(container);
-    boolean shouldClose = isSpaceFull || isContainerUnhealthy(container);
-    if (shouldClose) {
-      ContainerData containerData = container.getContainerData();
-      ContainerAction.Reason reason =
-          isSpaceFull ? ContainerAction.Reason.CONTAINER_FULL :
-              ContainerAction.Reason.CONTAINER_UNHEALTHY;
-      ContainerAction action = ContainerAction.newBuilder()
-          .setContainerID(containerData.getContainerID())
-          .setAction(ContainerAction.Action.CLOSE).setReason(reason).build();
-      context.addContainerActionIfAbsent(action);
-    }
-  }
-
-  private boolean isContainerFull(Container container) {
-    boolean isOpen = Optional.ofNullable(container)
-        .map(cont -> cont.getContainerState() == ContainerDataProto.State.OPEN)
-        .orElse(Boolean.FALSE);
-    if (isOpen) {
-      ContainerData containerData = container.getContainerData();
-      double containerUsedPercentage =
-          1.0f * containerData.getBytesUsed() / containerData.getMaxSize();
-      return containerUsedPercentage >= containerCloseThreshold;
-    } else {
-      return false;
-    }
-  }
-
-  private boolean isContainerUnhealthy(Container container) {
-    return Optional.ofNullable(container).map(
-        cont -> (cont.getContainerState() ==
-            ContainerDataProto.State.UNHEALTHY))
-        .orElse(Boolean.FALSE);
-  }
-
-  @Override
-  public Handler getHandler(ContainerProtos.ContainerType containerType) {
-    return handlers.get(containerType);
-  }
-
-  @Override
-  public void setScmId(String scmId) {
-    Preconditions.checkNotNull(scmId, "scmId Cannot be null");
-    if (this.scmID == null) {
-      this.scmID = scmId;
-      for (Map.Entry<ContainerType, Handler> handlerMap : handlers.entrySet()) {
-        handlerMap.getValue().setScmID(scmID);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public Container getContainer(long containerID) {
-    return containerSet.getContainer(containerID);
-  }
-
-  @VisibleForTesting
-  public Set<Long> getMissingContainerSet() {
-    return containerSet.getMissingContainerSet();
-  }
-
-  private ContainerType getContainerType(Container container) {
-    return container.getContainerType();
-  }
-
-  @VisibleForTesting
-  public void setMetricsForTesting(ContainerMetrics containerMetrics) {
-    this.metrics = containerMetrics;
-  }
-
-  private EventType getEventType(ContainerCommandRequestProto msg) {
-    return HddsUtils.isReadOnly(msg) ? EventType.READ : EventType.WRITE;
-  }
-
-  private void audit(AuditAction action, EventType eventType,
-      Map<String, String> params, AuditEventStatus result, Throwable exception){
-    AuditMessage amsg;
-    switch (result) {
-    case SUCCESS:
-      if(eventType == EventType.READ &&
-          AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) {
-        amsg = buildAuditMessageForSuccess(action, params);
-        AUDIT.logReadSuccess(amsg);
-      } else if(eventType == EventType.WRITE &&
-          AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) {
-        amsg = buildAuditMessageForSuccess(action, params);
-        AUDIT.logWriteSuccess(amsg);
-      }
-      break;
-
-    case FAILURE:
-      if(eventType == EventType.READ &&
-          AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) {
-        amsg = buildAuditMessageForFailure(action, params, exception);
-        AUDIT.logReadFailure(amsg);
-      } else if(eventType == EventType.WRITE &&
-          AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) {
-        amsg = buildAuditMessageForFailure(action, params, exception);
-        AUDIT.logWriteFailure(amsg);
-      }
-      break;
-
-    default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Invalid audit event status - " + result);
-      }
-    }
-  }
-
-  //TODO: use GRPC to fetch user and ip details
-  @Override
-  public AuditMessage buildAuditMessageForSuccess(AuditAction op,
-      Map<String, String> auditMap) {
-    return new AuditMessage.Builder()
-        .setUser(null)
-        .atIp(null)
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.SUCCESS.toString())
-        .withException(null)
-        .build();
-  }
-
-  //TODO: use GRPC to fetch user and ip details
-  @Override
-  public AuditMessage buildAuditMessageForFailure(AuditAction op,
-      Map<String, String> auditMap, Throwable throwable) {
-    return new AuditMessage.Builder()
-        .setUser(null)
-        .atIp(null)
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.FAILURE.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  enum EventType {
-    READ,
-    WRITE
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
deleted file mode 100644
index b736eb5..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.function.Function;
-
-/**
- * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}).
- * The outer container map does not entail locking for a better performance.
- * The inner {@link BlockDataMap} is synchronized.
- *
- * This class will maintain list of open keys per container when closeContainer
- * command comes, it should autocommit all open keys of a open container before
- * marking the container as closed.
- */
-public class OpenContainerBlockMap {
-  /**
-   * Map: localId {@literal ->} BlockData.
-   *
-   * In order to support {@link #getAll()}, the update operations are
-   * synchronized.
-   */
-  static class BlockDataMap {
-    private final ConcurrentMap<Long, BlockData> blocks =
-        new ConcurrentHashMap<>();
-
-    BlockData get(long localId) {
-      return blocks.get(localId);
-    }
-
-    synchronized int removeAndGetSize(long localId) {
-      blocks.remove(localId);
-      return blocks.size();
-    }
-
-    synchronized BlockData computeIfAbsent(
-        long localId, Function<Long, BlockData> f) {
-      return blocks.computeIfAbsent(localId, f);
-    }
-
-    synchronized List<BlockData> getAll() {
-      return new ArrayList<>(blocks.values());
-    }
-  }
-
-  /**
-   * TODO : We may construct the openBlockMap by reading the Block Layout
-   * for each block inside a container listing all chunk files and reading the
-   * sizes. This will help to recreate the openKeys Map once the DataNode
-   * restarts.
-   *
-   * For now, we will track all open blocks of a container in the blockMap.
-   */
-  private final ConcurrentMap<Long, BlockDataMap> containers =
-      new ConcurrentHashMap<>();
-
-  /**
-   * Removes the Container matching with specified containerId.
-   * @param containerId containerId
-   */
-  public void removeContainer(long containerId) {
-    Preconditions
-        .checkState(containerId >= 0, "Container Id cannot be negative.");
-    containers.remove(containerId);
-  }
-
-  public void addChunk(BlockID blockID, ChunkInfo info) {
-    Preconditions.checkNotNull(info);
-    containers.computeIfAbsent(blockID.getContainerID(),
-        id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(),
-          id -> new BlockData(blockID)).addChunk(info);
-  }
-
-  /**
-   * Removes the chunk from the chunkInfo list for the given block.
-   * @param blockID id of the block
-   * @param chunkInfo chunk info.
-   */
-  public void removeChunk(BlockID blockID, ChunkInfo chunkInfo) {
-    Preconditions.checkNotNull(chunkInfo);
-    Preconditions.checkNotNull(blockID);
-    Optional.ofNullable(containers.get(blockID.getContainerID()))
-        .map(blocks -> blocks.get(blockID.getLocalID()))
-        .ifPresent(keyData -> keyData.removeChunk(chunkInfo));
-  }
-
-  /**
-   * Returns the list of open blocks to the openContainerBlockMap.
-   * @param containerId container id
-   * @return List of open blocks
-   */
-  public List<BlockData> getOpenBlocks(long containerId) {
-    return Optional.ofNullable(containers.get(containerId))
-        .map(BlockDataMap::getAll)
-        .orElseGet(Collections::emptyList);
-  }
-
-  /**
-   * removes the block from the block map.
-   * @param blockID - block ID
-   */
-  public void removeFromBlockMap(BlockID blockID) {
-    Preconditions.checkNotNull(blockID);
-    containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
-        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
-  }
-
-  /**
-   * Returns true if the block exists in the map, false otherwise.
-   *
-   * @param blockID  - Block ID.
-   * @return True, if it exists, false otherwise
-   */
-  public boolean checkIfBlockExists(BlockID blockID) {
-    BlockDataMap keyDataMap = containers.get(blockID.getContainerID());
-    return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null;
-  }
-
-  @VisibleForTesting
-  BlockDataMap getBlockDataMap(long containerId) {
-    return containers.get(containerId);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
deleted file mode 100644
index 4dde3d6c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.container.common.interfaces
-    .ContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Randomly choosing containers for block deletion.
- */
-public class RandomContainerDeletionChoosingPolicy
-    implements ContainerDeletionChoosingPolicy {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class);
-
-  @Override
-  public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerData> candidateContainers)
-      throws StorageContainerException {
-    Preconditions.checkNotNull(candidateContainers,
-        "Internal assertion: candidate containers cannot be null");
-
-    int currentCount = 0;
-    List<ContainerData> result = new LinkedList<>();
-    ContainerData[] values = new ContainerData[candidateContainers.size()];
-    // to get a shuffle list
-    for (ContainerData entry : DFSUtil.shuffle(
-        candidateContainers.values().toArray(values))) {
-      if (currentCount < count) {
-        result.add(entry);
-        currentCount++;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Select container {} for block deletion, "
-                  + "pending deletion blocks num: {}.",
-              entry.getContainerID(),
-              ((KeyValueContainerData) entry).getNumPendingDeletionBlocks());
-        }
-      } else {
-        break;
-      }
-    }
-
-    return result;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
deleted file mode 100644
index 061d09b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.StorageTypeProto;
-import org.apache.hadoop.ozone.container.common.interfaces
-    .StorageLocationReportMXBean;
-
-import java.io.IOException;
-
-/**
- * Storage location stats of datanodes that provide back store for containers.
- *
- */
-public final class StorageLocationReport implements
-    StorageLocationReportMXBean {
-
-  private final String id;
-  private final boolean failed;
-  private final long capacity;
-  private final long scmUsed;
-  private final long remaining;
-  private final StorageType storageType;
-  private final String storageLocation;
-
-  private StorageLocationReport(String id, boolean failed, long capacity,
-      long scmUsed, long remaining, StorageType storageType,
-      String storageLocation) {
-    this.id = id;
-    this.failed = failed;
-    this.capacity = capacity;
-    this.scmUsed = scmUsed;
-    this.remaining = remaining;
-    this.storageType = storageType;
-    this.storageLocation = storageLocation;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public boolean isFailed() {
-    return failed;
-  }
-
-  public long getCapacity() {
-    return capacity;
-  }
-
-  public long getScmUsed() {
-    return scmUsed;
-  }
-
-  public long getRemaining() {
-    return remaining;
-  }
-
-  public String getStorageLocation() {
-    return storageLocation;
-  }
-
-  @Override
-  public String getStorageTypeName() {
-    return storageType.name();
-  }
-
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-
-  private StorageTypeProto getStorageTypeProto() throws
-      IOException {
-    StorageTypeProto storageTypeProto;
-    switch (getStorageType()) {
-    case SSD:
-      storageTypeProto = StorageTypeProto.SSD;
-      break;
-    case DISK:
-      storageTypeProto = StorageTypeProto.DISK;
-      break;
-    case ARCHIVE:
-      storageTypeProto = StorageTypeProto.ARCHIVE;
-      break;
-    case PROVIDED:
-      storageTypeProto = StorageTypeProto.PROVIDED;
-      break;
-    case RAM_DISK:
-      storageTypeProto = StorageTypeProto.RAM_DISK;
-      break;
-    default:
-      throw new IOException("Illegal Storage Type specified");
-    }
-    return storageTypeProto;
-  }
-
-  private static StorageType getStorageType(StorageTypeProto proto) throws
-      IOException {
-    StorageType storageType;
-    switch (proto) {
-    case SSD:
-      storageType = StorageType.SSD;
-      break;
-    case DISK:
-      storageType = StorageType.DISK;
-      break;
-    case ARCHIVE:
-      storageType = StorageType.ARCHIVE;
-      break;
-    case PROVIDED:
-      storageType = StorageType.PROVIDED;
-      break;
-    case RAM_DISK:
-      storageType = StorageType.RAM_DISK;
-      break;
-    default:
-      throw new IOException("Illegal Storage Type specified");
-    }
-    return storageType;
-  }
-
-  /**
-   * Returns the SCMStorageReport protoBuf message for the Storage Location
-   * report.
-   * @return SCMStorageReport
-   * @throws IOException In case, the storage type specified is invalid.
-   */
-  public StorageReportProto getProtoBufMessage() throws IOException{
-    StorageReportProto.Builder srb = StorageReportProto.newBuilder();
-    return srb.setStorageUuid(getId())
-        .setCapacity(getCapacity())
-        .setScmUsed(getScmUsed())
-        .setRemaining(getRemaining())
-        .setStorageType(getStorageTypeProto())
-        .setStorageLocation(getStorageLocation())
-        .setFailed(isFailed())
-        .build();
-  }
-
-  /**
-   * Returns the StorageLocationReport from the protoBuf message.
-   * @param report SCMStorageReport
-   * @return StorageLocationReport
-   * @throws IOException in case of invalid storage type
-   */
-
-  public static StorageLocationReport getFromProtobuf(StorageReportProto report)
-      throws IOException {
-    StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
-    builder.setId(report.getStorageUuid())
-        .setStorageLocation(report.getStorageLocation());
-    if (report.hasCapacity()) {
-      builder.setCapacity(report.getCapacity());
-    }
-    if (report.hasScmUsed()) {
-      builder.setScmUsed(report.getScmUsed());
-    }
-    if (report.hasStorageType()) {
-      builder.setStorageType(getStorageType(report.getStorageType()));
-    }
-    if (report.hasRemaining()) {
-      builder.setRemaining(report.getRemaining());
-    }
-
-    if (report.hasFailed()) {
-      builder.setFailed(report.getFailed());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Returns StorageLocation.Builder instance.
-   *
-   * @return StorageLocation.Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for building StorageLocationReport.
-   */
-  public static class Builder {
-    private String id;
-    private boolean failed;
-    private long capacity;
-    private long scmUsed;
-    private long remaining;
-    private StorageType storageType;
-    private String storageLocation;
-
-    /**
-     * Sets the storageId.
-     *
-     * @param idValue storageId
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setId(String idValue) {
-      this.id = idValue;
-      return this;
-    }
-
-    /**
-     * Sets whether the volume failed or not.
-     *
-     * @param failedValue whether volume failed or not
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setFailed(boolean failedValue) {
-      this.failed = failedValue;
-      return this;
-    }
-
-    /**
-     * Sets the capacity of volume.
-     *
-     * @param capacityValue capacity
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setCapacity(long capacityValue) {
-      this.capacity = capacityValue;
-      return this;
-    }
-    /**
-     * Sets the scmUsed Value.
-     *
-     * @param scmUsedValue storage space used by scm
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setScmUsed(long scmUsedValue) {
-      this.scmUsed = scmUsedValue;
-      return this;
-    }
-
-    /**
-     * Sets the remaining free space value.
-     *
-     * @param remainingValue remaining free space
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setRemaining(long remainingValue) {
-      this.remaining = remainingValue;
-      return this;
-    }
-
-    /**
-     * Sets the storageType.
-     *
-     * @param storageTypeValue type of the storage used
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setStorageType(StorageType storageTypeValue) {
-      this.storageType = storageTypeValue;
-      return this;
-    }
-
-    /**
-     * Sets the storageLocation.
-     *
-     * @param storageLocationValue location of the volume
-     * @return StorageLocationReport.Builder
-     */
-    public Builder setStorageLocation(String storageLocationValue) {
-      this.storageLocation = storageLocationValue;
-      return this;
-    }
-
-    /**
-     * Builds and returns StorageLocationReport instance.
-     *
-     * @return StorageLocationReport
-     */
-    public StorageLocationReport build() {
-      return new StorageLocationReport(id, failed, capacity, scmUsed,
-          remaining, storageType, storageLocation);
-    }
-
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
deleted file mode 100644
index 41fc267..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.interfaces
-    .ContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * TopN Ordered choosing policy that choosing containers based on pending
- * deletion blocks' number.
- */
-public class TopNOrderedContainerDeletionChoosingPolicy
-    implements ContainerDeletionChoosingPolicy {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TopNOrderedContainerDeletionChoosingPolicy.class);
-
-  /** customized comparator used to compare differentiate container data. **/
-  private static final Comparator<KeyValueContainerData>
-        KEY_VALUE_CONTAINER_DATA_COMPARATOR = (KeyValueContainerData c1,
-                                               KeyValueContainerData c2) ->
-              Integer.compare(c2.getNumPendingDeletionBlocks(),
-                  c1.getNumPendingDeletionBlocks());
-
-  @Override
-  public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerData> candidateContainers)
-      throws StorageContainerException {
-    Preconditions.checkNotNull(candidateContainers,
-        "Internal assertion: candidate containers cannot be null");
-
-    List<ContainerData> result = new LinkedList<>();
-    List<KeyValueContainerData> orderedList = new LinkedList<>();
-    for (ContainerData entry : candidateContainers.values()) {
-      orderedList.add((KeyValueContainerData)entry);
-    }
-    Collections.sort(orderedList, KEY_VALUE_CONTAINER_DATA_COMPARATOR);
-
-    // get top N list ordered by pending deletion blocks' number
-    int currentCount = 0;
-    for (KeyValueContainerData entry : orderedList) {
-      if (currentCount < count) {
-        if (entry.getNumPendingDeletionBlocks() > 0) {
-          result.add(entry);
-          currentCount++;
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Select container {} for block deletion, "
-                    + "pending deletion blocks num: {}.",
-                entry.getContainerID(),
-                entry.getNumPendingDeletionBlocks());
-          }
-        } else {
-          LOG.debug("Stop looking for next container, there is no"
-              + " pending deletion block contained in remaining containers.");
-          break;
-        }
-      } else {
-        break;
-      }
-    }
-
-    return result;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
deleted file mode 100644
index 16da5d9..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-/**
- This package is contains Ozone container implementation.
-**/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
deleted file mode 100644
index f6931e3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-
-import java.io.IOException;
-import java.util.NoSuchElementException;
-
-/**
- * Block Iterator for container. Each container type need to implement this
- * interface.
- * @param <T>
- */
-public interface BlockIterator<T> {
-
-  /**
-   * This checks if iterator has next element. If it has returns true,
-   * otherwise false.
-   * @return boolean
-   */
-  boolean hasNext() throws IOException;
-
-  /**
-   * Seek to first entry.
-   */
-  void seekToFirst();
-
-  /**
-   * Seek to last entry.
-   */
-  void seekToLast();
-
-  /**
-   * Get next block in the container.
-   * @return next block or null if there are no blocks
-   * @throws IOException
-   */
-  T nextBlock() throws IOException, NoSuchElementException;
-
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
deleted file mode 100644
index 7f7deaf..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-
-import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.hdfs.util.RwLock;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-
-/**
- * Interface for Container Operations.
- */
-public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
-
-  /**
-   * Creates a container.
-   *
-   * @throws StorageContainerException
-   */
-  void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy,
-              String scmId) throws StorageContainerException;
-
-  /**
-   * Deletes the container.
-   *
-   * @throws StorageContainerException
-   */
-  void delete() throws StorageContainerException;
-
-  /**
-   * Update the container.
-   *
-   * @param metaData
-   * @param forceUpdate if true, update container forcibly.
-   * @throws StorageContainerException
-   */
-  void update(Map<String, String> metaData, boolean forceUpdate)
-      throws StorageContainerException;
-
-  /**
-   * Get metadata about the container.
-   *
-   * @return ContainerData - Container Data.
-   */
-  CONTAINERDATA getContainerData();
-
-  /**
-   * Get the Container Lifecycle state.
-   *
-   * @return ContainerLifeCycleState - Container State.
-   */
-  ContainerProtos.ContainerDataProto.State getContainerState();
-
-  /**
-   * Marks the container for closing. Moves the container to CLOSING state.
-   */
-  void markContainerForClose() throws StorageContainerException;
-
-  /**
-   * Marks the container replica as unhealthy.
-   */
-  void markContainerUnhealthy() throws StorageContainerException;
-
-  /**
-   * Quasi Closes a open container, if it is already closed or does not exist a
-   * StorageContainerException is thrown.
-   *
-   * @throws StorageContainerException
-   */
-  void quasiClose() throws StorageContainerException;
-
-  /**
-   * Closes a open/quasi closed container, if it is already closed or does not
-   * exist a StorageContainerException is thrown.
-   *
-   * @throws StorageContainerException
-   */
-  void close() throws StorageContainerException;
-
-  /**
-   * Return the ContainerType for the container.
-   */
-  ContainerProtos.ContainerType getContainerType();
-
-  /**
-   * Returns containerFile.
-   */
-  File getContainerFile();
-
-  /**
-   * updates the DeleteTransactionId.
-   * @param deleteTransactionId
-   */
-  void updateDeleteTransactionId(long deleteTransactionId);
-
-  /**
-   * Returns blockIterator for the container.
-   * @return BlockIterator
-   * @throws IOException
-   */
-  BlockIterator blockIterator() throws IOException;
-
-  /**
-   * Import the container from an external archive.
-   */
-  void importContainerData(InputStream stream,
-      ContainerPacker<CONTAINERDATA> packer) throws IOException;
-
-  /**
-   * Export all the data of the container to one output archive with the help
-   * of the packer.
-   *
-   */
-  void exportContainerData(OutputStream stream,
-      ContainerPacker<CONTAINERDATA> packer) throws IOException;
-
-  /**
-   * Returns containerReport for the container.
-   */
-  ContainerReplicaProto getContainerReport()
-      throws StorageContainerException;
-
-  /**
-   * updates the blockCommitSequenceId.
-   */
-  void updateBlockCommitSequenceId(long blockCommitSequenceId);
-
-  /**
-   * Returns the blockCommitSequenceId.
-   */
-  long getBlockCommitSequenceId();
-
-  /**
-   * check and report the structural integrity of the container.
-   * @return true if the integrity checks pass
-   * Scan the container metadata to detect corruption.
-   */
-  boolean scanMetaData();
-
-  /**
-   * Return if the container data should be checksum verified to detect
-   * corruption. The result depends upon the current state of the container
-   * (e.g. if a container is accepting writes, it may not be a good idea to
-   * perform checksum verification to avoid concurrency issues).
-   */
-  boolean shouldScanData();
-
-  /**
-   * Perform checksum verification for the container data.
-   *
-   * @param throttler A reference of {@link DataTransferThrottler} used to
-   *                  perform I/O bandwidth throttling
-   * @param canceler  A reference of {@link Canceler} used to cancel the
-   *                  I/O bandwidth throttling (e.g. for shutdown purpose).
-   * @return true if the checksum verification succeeds
-   *         false otherwise
-   */
-  boolean scanData(DataTransferThrottler throttler, Canceler canceler);
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
deleted file mode 100644
index 84c4f90..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * This interface is used for choosing desired containers for
- * block deletion.
- */
-public interface ContainerDeletionChoosingPolicy {
-
-  /**
-   * Chooses desired containers for block deletion.
-   * @param count
-   *          how many to return
-   * @param candidateContainers
-   *          candidate containers collection
-   * @return container data list
-   * @throws StorageContainerException
-   */
-  List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerData> candidateContainers)
-      throws StorageContainerException;
-
-  /**
-   * Determine if the container has suitable type for this policy.
-   * @param type  type of the container
-   * @return whether the container type suitable for this policy.
-   */
-  default boolean isValidContainerType(ContainerProtos.ContainerType type) {
-    if (type == ContainerProtos.ContainerType.KeyValueContainer) {
-      return true;
-    }
-    return false;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
deleted file mode 100644
index ee0b6bc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-
-import java.util.Map;
-
-/**
- * Dispatcher acts as the bridge between the transport layer and
- * the actual container layer. This layer is capable of transforming
- * protobuf objects into corresponding class and issue the function call
- * into the lower layers.
- *
- * The reply from the request is dispatched to the client.
- */
-public interface ContainerDispatcher {
-  /**
-   * Dispatches commands to container layer.
-   * @param msg - Command Request
-   * @param context - Context info related to ContainerStateMachine
-   * @return Command Response
-   */
-  ContainerCommandResponseProto dispatch(ContainerCommandRequestProto msg,
-      DispatcherContext context);
-
-  /**
-   * Validates whether the container command should be executed on the pipeline
-   * or not. Will be invoked by the leader node in the Ratis pipeline
-   * @param msg containerCommand
-   * @throws StorageContainerException
-   */
-  void validateContainerCommand(
-      ContainerCommandRequestProto msg) throws StorageContainerException;
-
-  /**
-   * Initialize the Dispatcher.
-   */
-  void init();
-
-  /**
-   * finds and builds the missing containers in case of a lost disk etc
-   * in the ContainerSet. It also validates the BCSID of the containers found.
-   */
-  void buildMissingContainerSetAndValidate(Map<Long, Long> container2BCSIDMap);
-
-  /**
-   * Shutdown Dispatcher services.
-   */
-  void shutdown();
-
-  /**
-   * Returns the handler for the specified containerType.
-   * @param containerType
-   * @return
-   */
-  Handler getHandler(ContainerProtos.ContainerType containerType);
-
-  /**
-   * If scmId is not set, this will set scmId, otherwise it is a no-op.
-   * @param scmId
-   */
-  void setScmId(String scmId);
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java
deleted file mode 100644
index 9c5fcea..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-
-import java.io.IOException;
-import java.nio.file.Path;
-
-/**
- * Returns physical path locations, where the containers will be created.
- */
-public interface ContainerLocationManager {
-  /**
-   * Returns the path where the container should be placed from a set of
-   * locations.
-   *
-   * @return A path where we should place this container and metadata.
-   * @throws IOException
-   */
-  Path getContainerPath() throws IOException;
-
-  /**
-   * Returns the path where the container Data file are stored.
-   *
-   * @return a path where we place the LevelDB and data files of a container.
-   * @throws IOException
-   */
-  Path getDataPath(String containerName) throws IOException;
-
-  /**
-   * Returns an array of storage location usage report.
-   * @return storage location usage report.
-   */
-  StorageLocationReport[] getLocationReport() throws IOException;
-
-  /**
-   * Supports clean shutdown of container.
-   *
-   * @throws IOException
-   */
-  void shutdown() throws IOException;
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
deleted file mode 100644
index 97d2dc3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import java.io.IOException;
-
-/**
- * Returns physical path locations, where the containers will be created.
- */
-public interface ContainerLocationManagerMXBean {
-
-  /**
-   * Returns an array of storage location usage report.
-   *
-   * @return storage location usage report.
-   */
-  StorageLocationReportMXBean[] getLocationReport() throws IOException;
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java
deleted file mode 100644
index 8308c23..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-
-/**
- * Service to pack/unpack ContainerData container data to/from a single byte
- * stream.
- */
-public interface ContainerPacker<CONTAINERDATA extends ContainerData> {
-
-  /**
-   * Extract the container data to the path defined by the container.
-   * <p>
-   * This doesn't contain the extraction of the container descriptor file.
-   *
-   * @return the byte content of the descriptor (which won't be written to a
-   * file but returned).
-   */
-  byte[] unpackContainerData(Container<CONTAINERDATA> container,
-      InputStream inputStream)
-      throws IOException;
-
-  /**
-   * Compress all the container data (chunk data, metadata db AND container
-   * descriptor) to one single archive.
-   */
-  void pack(Container<CONTAINERDATA> container, OutputStream destination)
-      throws IOException;
-
-  /**
-   * Read the descriptor from the finished archive to get the data before
-   * importing the container.
-   */
-  byte[] unpackContainerDescriptor(InputStream inputStream)
-      throws IOException;
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
deleted file mode 100644
index 8c3b981..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
-
-/**
- * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type
- * should have an implementation for Handler.
- */
-@SuppressWarnings("visibilitymodifier")
-public abstract class Handler {
-
-  protected final Configuration conf;
-  protected final ContainerSet containerSet;
-  protected final VolumeSet volumeSet;
-  protected String scmID;
-  protected final ContainerMetrics metrics;
-
-  private final StateContext context;
-  private final DatanodeDetails datanodeDetails;
-
-  protected Handler(Configuration config, StateContext context,
-      ContainerSet contSet, VolumeSet volumeSet,
-      ContainerMetrics containerMetrics) {
-    this.conf = config;
-    this.context = context;
-    this.containerSet = contSet;
-    this.volumeSet = volumeSet;
-    this.metrics = containerMetrics;
-    this.datanodeDetails = context.getParent().getDatanodeDetails();
-  }
-
-  public static Handler getHandlerForContainerType(
-      final ContainerType containerType, final Configuration config,
-      final StateContext context, final ContainerSet contSet,
-      final VolumeSet volumeSet, final ContainerMetrics metrics) {
-    switch (containerType) {
-    case KeyValueContainer:
-      return new KeyValueHandler(config, context, contSet, volumeSet, metrics);
-    default:
-      throw new IllegalArgumentException("Handler for ContainerType: " +
-        containerType + "doesn't exist.");
-    }
-  }
-
-  /**
-   * Returns the Id of this datanode.
-   * @return datanode Id
-   */
-  protected DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-  /**
-   * This should be called whenever there is state change. It will trigger
-   * an ICR to SCM.
-   *
-   * @param container Container for which ICR has to be sent
-   */
-  protected void sendICR(final Container container)
-      throws StorageContainerException {
-    IncrementalContainerReportProto icr = IncrementalContainerReportProto
-        .newBuilder()
-        .addReport(container.getContainerReport())
-        .build();
-    context.addReport(icr);
-    context.getParent().triggerHeartbeat();
-  }
-
-  public abstract ContainerCommandResponseProto handle(
-      ContainerCommandRequestProto msg, Container container,
-      DispatcherContext dispatcherContext);
-
-  /**
-   * Imports container from a raw input stream.
-   */
-  public abstract Container importContainer(
-      long containerID,
-      long maxSize,
-      String originPipelineId,
-      String originNodeId,
-      InputStream rawContainerStream,
-      TarContainerPacker packer)
-      throws IOException;
-
-  /**
-   * Exports container to the output stream.
-   */
-  public abstract void exportContainer(
-      Container container,
-      OutputStream outputStream,
-      TarContainerPacker packer)
-      throws IOException;
-
-  /**
-   * Stop the Handler.
-   */
-  public abstract void stop();
-
-  /**
-   * Marks the container for closing. Moves the container to CLOSING state.
-   *
-   * @param container container to update
-   * @throws IOException in case of exception
-   */
-  public abstract void markContainerForClose(Container container)
-      throws IOException;
-
-  /**
-   * Marks the container Unhealthy. Moves the container to UHEALTHY state.
-   *
-   * @param container container to update
-   * @throws IOException in case of exception
-   */
-  public abstract void markContainerUnhealthy(Container container)
-      throws IOException;
-
-  /**
-   * Moves the Container to QUASI_CLOSED state.
-   *
-   * @param container container to be quasi closed
-   * @throws IOException
-   */
-  public abstract void quasiCloseContainer(Container container)
-      throws IOException;
-
-  /**
-   * Moves the Container to CLOSED state.
-   *
-   * @param container container to be closed
-   * @throws IOException
-   */
-  public abstract void closeContainer(Container container)
-      throws IOException;
-
-  /**
-   * Deletes the given container.
-   *
-   * @param container container to be deleted
-   * @param force if this is set to true, we delete container without checking
-   * state of the container.
-   * @throws IOException
-   */
-  public abstract void deleteContainer(Container container, boolean force)
-      throws IOException;
-
-  public void setScmID(String scmId) {
-    this.scmID = scmId;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
deleted file mode 100644
index fd06367..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-/**
- * Contract to define properties available on the JMX interface.
- */
-public interface StorageLocationReportMXBean {
-
-  String getId();
-
-  boolean isFailed();
-
-  long getCapacity();
-
-  long getScmUsed();
-
-  long getRemaining();
-
-  String getStorageLocation();
-
-  String getStorageTypeName();
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java
deleted file mode 100644
index 7de0e2a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This interface specifies the policy for choosing volumes to store replicas.
- */
-@InterfaceAudience.Private
-public interface VolumeChoosingPolicy {
-
-  /**
-   * Choose a volume to place a container,
-   * given a list of volumes and the max container size sought for storage.
-   *
-   * The implementations of this interface must be thread-safe.
-   *
-   * @param volumes - a list of available volumes.
-   * @param maxContainerSize - the maximum size of the container for which a
-   *                         volume is sought.
-   * @return the chosen volume.
-   * @throws IOException when disks are unavailable or are full.
-   */
-  HddsVolume chooseVolume(List<HddsVolume> volumes, long maxContainerSize)
-      throws IOException;
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java
deleted file mode 100644
index d83bf95..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-/**
- This package contains common ozone container interfaces.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java
deleted file mode 100644
index 1638a36..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common;
-/**
-  Common Container Layer. At this layer the abstractions are:
-
- 1. Containers - Both data and metadata containers.
- 2. Keys - Key/Value pairs that live inside a container.
- 3. Chunks - Keys can be composed of many chunks.
-
- Ozone uses these abstractions to build Volumes, Buckets and Keys.
-
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
deleted file mode 100644
index f52387b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.report;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT;
-
-/**
- * Publishes CommandStatusReport which will be sent to SCM as part of
- * heartbeat. CommandStatusReport consist of the following information:
- * - type       : type of command.
- * - status     : status of command execution (PENDING, EXECUTED, FAILURE).
- * - cmdId      : Command id.
- * - msg        : optional message.
- */
-public class CommandStatusReportPublisher extends
-    ReportPublisher<CommandStatusReportsProto> {
-
-  private long cmdStatusReportInterval = -1;
-
-  @Override
-  protected long getReportFrequency() {
-    if (cmdStatusReportInterval == -1) {
-      cmdStatusReportInterval = getConf().getTimeDuration(
-          HDDS_COMMAND_STATUS_REPORT_INTERVAL,
-          HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
-          TimeUnit.MILLISECONDS);
-
-      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
-          getConf());
-
-      Preconditions.checkState(
-          heartbeatFrequency <= cmdStatusReportInterval,
-          HDDS_COMMAND_STATUS_REPORT_INTERVAL +
-              " cannot be configured lower than heartbeat frequency.");
-    }
-    return cmdStatusReportInterval;
-  }
-
-  @Override
-  protected CommandStatusReportsProto getReport() {
-    Map<Long, CommandStatus> map = this.getContext()
-        .getCommandStatusMap();
-    Iterator<Long> iterator = map.keySet().iterator();
-    CommandStatusReportsProto.Builder builder = CommandStatusReportsProto
-        .newBuilder();
-
-    iterator.forEachRemaining(key -> {
-      CommandStatus cmdStatus = map.get(key);
-      // If status is still pending then don't remove it from map as
-      // CommandHandler will change its status when it works on this command.
-      if (!cmdStatus.getStatus().equals(Status.PENDING)) {
-        builder.addCmdStatus(cmdStatus.getProtoBufMessage());
-        map.remove(key);
-      }
-    });
-    return builder.getCmdStatusCount() > 0 ? builder.build() : null;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
deleted file mode 100644
index b92e3b0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
-
-
-/**
- * Publishes ContainerReport which will be sent to SCM as part of heartbeat.
- * ContainerReport consist of the following information about each containers:
- *   - containerID
- *   - size
- *   - used
- *   - keyCount
- *   - readCount
- *   - writeCount
- *   - readBytes
- *   - writeBytes
- *   - finalHash
- *   - LifeCycleState
- *
- */
-public class ContainerReportPublisher extends
-    ReportPublisher<ContainerReportsProto> {
-
-  private Long containerReportInterval = null;
-
-  @Override
-  protected long getReportFrequency() {
-    if (containerReportInterval == null) {
-      containerReportInterval = getConf().getTimeDuration(
-          HDDS_CONTAINER_REPORT_INTERVAL,
-          HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT,
-          TimeUnit.MILLISECONDS);
-
-      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
-          getConf());
-
-      Preconditions.checkState(
-          heartbeatFrequency <= containerReportInterval,
-          HDDS_CONTAINER_REPORT_INTERVAL +
-              " cannot be configured lower than heartbeat frequency.");
-    }
-    // Add a random delay (0~30s) on top of the container report
-    // interval (60s) so tha the SCM is overwhelmed by the container reports
-    // sent in sync.
-    return containerReportInterval + getRandomReportDelay();
-  }
-
-  private long getRandomReportDelay() {
-    return RandomUtils.nextLong(0, containerReportInterval);
-  }
-
-  @Override
-  protected ContainerReportsProto getReport() throws IOException {
-    return getContext().getParent().getContainer()
-        .getController().getContainerReport();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
deleted file mode 100644
index 6ac99dd..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_NODE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_NODE_REPORT_INTERVAL_DEFAULT;
-
-/**
- * Publishes NodeReport which will be sent to SCM as part of heartbeat.
- * NodeReport consist of:
- *   - NodeIOStats
- *   - VolumeReports
- */
-public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
-
-  private Long nodeReportInterval;
-
-  @Override
-  protected long getReportFrequency() {
-    if (nodeReportInterval == null) {
-      nodeReportInterval = getConf().getTimeDuration(
-          HDDS_NODE_REPORT_INTERVAL,
-          HDDS_NODE_REPORT_INTERVAL_DEFAULT,
-          TimeUnit.MILLISECONDS);
-
-      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
-          getConf());
-
-      Preconditions.checkState(
-          heartbeatFrequency <= nodeReportInterval,
-          HDDS_NODE_REPORT_INTERVAL +
-              " cannot be configured lower than heartbeat frequency.");
-    }
-    return nodeReportInterval;
-  }
-
-  @Override
-  protected NodeReportProto getReport() throws IOException {
-    return getContext().getParent().getContainer().getNodeReport();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java
deleted file mode 100644
index e7f4347..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT;
-
-
-/**
- * Publishes Pipeline which will be sent to SCM as part of heartbeat.
- * PipelineReport consist of the following information about each containers:
- *   - pipelineID
- *
- */
-public class PipelineReportPublisher extends
-    ReportPublisher<PipelineReportsProto> {
-
-  private Long pipelineReportInterval = null;
-
-  @Override
-  protected long getReportFrequency() {
-    if (pipelineReportInterval == null) {
-      pipelineReportInterval = getConf().getTimeDuration(
-          HDDS_PIPELINE_REPORT_INTERVAL,
-          HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT,
-          TimeUnit.MILLISECONDS);
-
-      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
-          getConf());
-
-      Preconditions.checkState(
-          heartbeatFrequency <= pipelineReportInterval,
-              HDDS_PIPELINE_REPORT_INTERVAL +
-              " cannot be configured lower than heartbeat frequency.");
-    }
-    // Add a random delay (0~30s) on top of the pipeline report
-    // interval (60s) so tha the SCM is overwhelmed by the pipeline reports
-    // sent in sync.
-    return pipelineReportInterval + getRandomReportDelay();
-  }
-
-  private long getRandomReportDelay() {
-    return RandomUtils.nextLong(0, pipelineReportInterval);
-  }
-
-  @Override
-  protected PipelineReportsProto getReport() {
-    return getContext().getParent().getContainer().getPipelineReport();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
deleted file mode 100644
index 536d4cc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * ReportManager is responsible for managing all the {@link ReportPublisher}
- * and also provides {@link ScheduledExecutorService} to ReportPublisher
- * which should be used for scheduling the reports.
- */
-public final class ReportManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReportManager.class);
-
-  private final StateContext context;
-  private final List<ReportPublisher> publishers;
-  private final ScheduledExecutorService executorService;
-
-  /**
-   * Construction of {@link ReportManager} should be done via
-   * {@link ReportManager.Builder}.
-   *
-   * @param context StateContext which holds the report
-   * @param publishers List of publishers which generates report
-   */
-  private ReportManager(StateContext context,
-                        List<ReportPublisher> publishers) {
-    this.context = context;
-    this.publishers = publishers;
-    this.executorService = HadoopExecutors.newScheduledThreadPool(
-        publishers.size(),
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("Datanode ReportManager Thread - %d").build());
-  }
-
-  /**
-   * Initializes ReportManager, also initializes all the configured
-   * report publishers.
-   */
-  public void init() {
-    for (ReportPublisher publisher : publishers) {
-      publisher.init(context, executorService);
-    }
-  }
-
-  /**
-   * Shutdown the ReportManager.
-   */
-  public void shutdown() {
-    executorService.shutdown();
-    try {
-      executorService.awaitTermination(5, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("Failed to shutdown Report Manager", e);
-    }
-  }
-
-  /**
-   * Returns new {@link ReportManager.Builder} which can be used to construct.
-   * {@link ReportManager}
-   * @param conf  - Conf
-   * @return builder - Builder.
-   */
-  public static Builder newBuilder(Configuration conf) {
-    return new Builder(conf);
-  }
-
-  /**
-   * Builder to construct {@link ReportManager}.
-   */
-  public static final class Builder {
-
-    private StateContext stateContext;
-    private List<ReportPublisher> reportPublishers;
-    private ReportPublisherFactory publisherFactory;
-
-
-    private Builder(Configuration conf) {
-      this.reportPublishers = new ArrayList<>();
-      this.publisherFactory = new ReportPublisherFactory(conf);
-    }
-
-    /**
-     * Sets the {@link StateContext}.
-     *
-     * @param context StateContext
-
-     * @return ReportManager.Builder
-     */
-    public Builder setStateContext(StateContext context) {
-      stateContext = context;
-      return this;
-    }
-
-    /**
-     * Adds publisher for the corresponding report.
-     *
-     * @param report report for which publisher needs to be added
-     *
-     * @return ReportManager.Builder
-     */
-    public Builder addPublisherFor(Class<? extends GeneratedMessage> report) {
-      reportPublishers.add(publisherFactory.getPublisherFor(report));
-      return this;
-    }
-
-    /**
-     * Adds new ReportPublisher to the ReportManager.
-     *
-     * @param publisher ReportPublisher
-     *
-     * @return ReportManager.Builder
-     */
-    public Builder addPublisher(ReportPublisher publisher) {
-      reportPublishers.add(publisher);
-      return this;
-    }
-
-    /**
-     * Build and returns ReportManager.
-     *
-     * @return {@link ReportManager}
-     */
-    public ReportManager build() {
-      Preconditions.checkNotNull(stateContext);
-      return new ReportManager(stateContext, reportPublishers);
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
deleted file mode 100644
index e3910db..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Abstract class responsible for scheduling the reports based on the
- * configured interval. All the ReportPublishers should extend this class.
- */
-public abstract class ReportPublisher<T extends GeneratedMessage>
-    implements Configurable, Runnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ReportPublisher.class);
-
-  private Configuration config;
-  private StateContext context;
-  private ScheduledExecutorService executor;
-
-  /**
-   * Initializes ReportPublisher with stateContext and executorService.
-   *
-   * @param stateContext Datanode state context
-   * @param executorService ScheduledExecutorService to schedule reports
-   */
-  public void init(StateContext stateContext,
-                   ScheduledExecutorService executorService) {
-    this.context = stateContext;
-    this.executor = executorService;
-    this.executor.schedule(this,
-        getReportFrequency(), TimeUnit.MILLISECONDS);
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    config = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return config;
-  }
-
-  @Override
-  public void run() {
-    publishReport();
-    if (!executor.isShutdown() ||
-        !(context.getState() == DatanodeStates.SHUTDOWN)) {
-      executor.schedule(this,
-          getReportFrequency(), TimeUnit.MILLISECONDS);
-    }
-  }
-
-  /**
-   * Generates and publishes the report to datanode state context.
-   */
-  private void publishReport() {
-    try {
-      context.addReport(getReport());
-    } catch (IOException e) {
-      LOG.error("Exception while publishing report.", e);
-    }
-  }
-
-  /**
-   * Returns the frequency in which this particular report has to be scheduled.
-   *
-   * @return report interval in milliseconds
-   */
-  protected abstract long getReportFrequency();
-
-  /**
-   * Generate and returns the report which has to be sent as part of heartbeat.
-   *
-   * @return datanode report
-   */
-  protected abstract T getReport() throws IOException;
-
-  /**
-   * Returns {@link StateContext}.
-   *
-   * @return stateContext report
-   */
-  protected StateContext getContext() {
-    return context;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
deleted file mode 100644
index 1c456a0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.
-        StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Factory class to construct {@link ReportPublisher} for a report.
- */
-public class ReportPublisherFactory {
-
-  private final Configuration conf;
-  private final Map<Class<? extends GeneratedMessage>,
-      Class<? extends ReportPublisher>> report2publisher;
-
-  /**
-   * Constructs {@link ReportPublisherFactory} instance.
-   *
-   * @param conf Configuration to be passed to the {@link ReportPublisher}
-   */
-  public ReportPublisherFactory(Configuration conf) {
-    this.conf = conf;
-    this.report2publisher = new HashMap<>();
-
-    report2publisher.put(NodeReportProto.class, NodeReportPublisher.class);
-    report2publisher.put(ContainerReportsProto.class,
-        ContainerReportPublisher.class);
-    report2publisher.put(CommandStatusReportsProto.class,
-        CommandStatusReportPublisher.class);
-    report2publisher.put(PipelineReportsProto.class,
-            PipelineReportPublisher.class);
-  }
-
-  /**
-   * Returns the ReportPublisher for the corresponding report.
-   *
-   * @param report report
-   *
-   * @return report publisher
-   */
-  public ReportPublisher getPublisherFor(
-      Class<? extends GeneratedMessage> report) {
-    Class<? extends ReportPublisher> publisherClass =
-        report2publisher.get(report);
-    if (publisherClass == null) {
-      throw new RuntimeException("No publisher found for report " + report);
-    }
-    return ReflectionUtils.newInstance(publisherClass, conf);
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java
deleted file mode 100644
index 404b37a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.report;
-/**
- * Datanode Reports: As part of heartbeat, datanode has to share its current
- * state with SCM. The state of datanode is split into multiple reports which
- * are sent along with heartbeat in a configured frequency.
- *
- * This package contains code which is responsible for sending reports from
- * datanode to SCM.
- *
- * ReportPublisherFactory: Given a report this constructs corresponding
- * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}.
- *
- * ReportManager: Manages and initializes all the available ReportPublishers.
- *
- * ReportPublisher: Abstract class responsible for scheduling the reports
- * based on the configured interval. All the ReportPublishers should extend
- * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}
- *
- * How to add new report:
- *
- * 1. Create a new ReportPublisher class which extends
- * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}.
- *
- * 2. Add a mapping Report to ReportPublisher entry in ReportPublisherFactory.
- *
- * 3. In DatanodeStateMachine add the report to ReportManager instance.
- *
- *
- *
- * Datanode Reports State Diagram:
- *
- *   DatanodeStateMachine  ReportManager  ReportPublisher           SCM
- *            |                  |              |                    |
- *            |                  |              |                    |
- *            |    construct     |              |                    |
- *            |----------------->|              |                    |
- *            |                  |              |                    |
- *            |     init         |              |                    |
- *            |----------------->|              |                    |
- *            |                  |     init     |                    |
- *            |                  |------------->|                    |
- *            |                  |              |                    |
- *   +--------+------------------+--------------+--------------------+------+
- *   |loop    |                  |              |                    |      |
- *   |        |                  |   publish    |                    |      |
- *   |        |<-----------------+--------------|                    |      |
- *   |        |                  |   report     |                    |      |
- *   |        |                  |              |                    |      |
- *   |        |                  |              |                    |      |
- *   |        |   heartbeat(rpc) |              |                    |      |
- *   |        |------------------+--------------+------------------->|      |
- *   |        |                  |              |                    |      |
- *   |        |                  |              |                    |      |
- *   +--------+------------------+--------------+--------------------+------+
- *            |                  |              |                    |
- *            |                  |              |                    |
- *            |                  |              |                    |
- *            |     shutdown     |              |                    |
- *            |----------------->|              |                    |
- *            |                  |              |                    |
- *            |                  |              |                    |
- *            -                  -              -                    -
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
deleted file mode 100644
index c9eb702..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ /dev/null
@@ -1,489 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.ozone.HddsDatanodeStopService;
-import org.apache.hadoop.ozone.container.common.report.ReportManager;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CloseContainerCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CommandDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .DeleteBlocksCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .DeleteContainerCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .ReplicateContainerCommandHandler;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
-import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
-import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
-import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.JvmPauseMonitor;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * State Machine Class.
- */
-public class DatanodeStateMachine implements Closeable {
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(DatanodeStateMachine.class);
-  private final ExecutorService executorService;
-  private final Configuration conf;
-  private final SCMConnectionManager connectionManager;
-  private StateContext context;
-  private final OzoneContainer container;
-  private DatanodeDetails datanodeDetails;
-  private final CommandDispatcher commandDispatcher;
-  private final ReportManager reportManager;
-  private long commandsHandled;
-  private AtomicLong nextHB;
-  private Thread stateMachineThread = null;
-  private Thread cmdProcessThread = null;
-  private final ReplicationSupervisor supervisor;
-
-  private JvmPauseMonitor jvmPauseMonitor;
-  private CertificateClient dnCertClient;
-  private final HddsDatanodeStopService hddsDatanodeStopService;
-
-  /**
-   * Constructs a a datanode state machine.
-   *  @param datanodeDetails - DatanodeDetails used to identify a datanode
-   * @param conf - Configuration.
-   * @param certClient - Datanode Certificate client, required if security is
-   *                     enabled
-   */
-  public DatanodeStateMachine(DatanodeDetails datanodeDetails,
-      Configuration conf, CertificateClient certClient,
-      HddsDatanodeStopService hddsDatanodeStopService) throws IOException {
-    this.hddsDatanodeStopService = hddsDatanodeStopService;
-    this.conf = conf;
-    this.datanodeDetails = datanodeDetails;
-    executorService = HadoopExecutors.newCachedThreadPool(
-                new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("Datanode State Machine Thread - %d").build());
-    connectionManager = new SCMConnectionManager(conf);
-    context = new StateContext(this.conf, DatanodeStates.getInitState(), this);
-    container = new OzoneContainer(this.datanodeDetails,
-        new OzoneConfiguration(conf), context, certClient);
-    dnCertClient = certClient;
-    nextHB = new AtomicLong(Time.monotonicNow());
-
-    ContainerReplicator replicator =
-        new DownloadAndImportReplicator(container.getContainerSet(),
-            container.getController(),
-            new SimpleContainerDownloader(conf), new TarContainerPacker());
-
-    supervisor =
-        new ReplicationSupervisor(container.getContainerSet(), replicator, 10);
-
-    // When we add new handlers just adding a new handler here should do the
-     // trick.
-    commandDispatcher = CommandDispatcher.newBuilder()
-        .addHandler(new CloseContainerCommandHandler())
-        .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(),
-            conf))
-        .addHandler(new ReplicateContainerCommandHandler(conf, supervisor))
-        .addHandler(new DeleteContainerCommandHandler())
-        .setConnectionManager(connectionManager)
-        .setContainer(container)
-        .setContext(context)
-        .build();
-
-    reportManager = ReportManager.newBuilder(conf)
-        .setStateContext(context)
-        .addPublisherFor(NodeReportProto.class)
-        .addPublisherFor(ContainerReportsProto.class)
-        .addPublisherFor(CommandStatusReportsProto.class)
-        .addPublisherFor(PipelineReportsProto.class)
-        .build();
-  }
-
-  /**
-   *
-   * Return DatanodeDetails if set, return null otherwise.
-   *
-   * @return DatanodeDetails
-   */
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-
-  /**
-   * Returns the Connection manager for this state machine.
-   *
-   * @return - SCMConnectionManager.
-   */
-  public SCMConnectionManager getConnectionManager() {
-    return connectionManager;
-  }
-
-  public OzoneContainer getContainer() {
-    return this.container;
-  }
-
-  /**
-   * Runs the state machine at a fixed frequency.
-   */
-  private void start() throws IOException {
-    long now = 0;
-
-    reportManager.init();
-    initCommandHandlerThread(conf);
-
-    // Start jvm monitor
-    jvmPauseMonitor = new JvmPauseMonitor();
-    jvmPauseMonitor.init(conf);
-    jvmPauseMonitor.start();
-
-    while (context.getState() != DatanodeStates.SHUTDOWN) {
-      try {
-        LOG.debug("Executing cycle Number : {}", context.getExecutionCount());
-        long heartbeatFrequency = context.getHeartbeatFrequency();
-        nextHB.set(Time.monotonicNow() + heartbeatFrequency);
-        context.execute(executorService, heartbeatFrequency,
-            TimeUnit.MILLISECONDS);
-        now = Time.monotonicNow();
-        if (now < nextHB.get()) {
-          if(!Thread.interrupted()) {
-            Thread.sleep(nextHB.get() - now);
-          }
-        }
-      } catch (InterruptedException e) {
-        // Some one has sent interrupt signal, this could be because
-        // 1. Trigger heartbeat immediately
-        // 2. Shutdown has be initiated.
-      } catch (Exception e) {
-        LOG.error("Unable to finish the execution.", e);
-      }
-    }
-
-    // If we have got some exception in stateMachine we set the state to
-    // shutdown to stop the stateMachine thread. Along with this we should
-    // also stop the datanode.
-    if (context.getShutdownOnError()) {
-      LOG.error("DatanodeStateMachine Shutdown due to an critical error");
-      hddsDatanodeStopService.stopService();
-    }
-  }
-
-  /**
-   * Gets the current context.
-   *
-   * @return StateContext
-   */
-  public StateContext getContext() {
-    return context;
-  }
-
-  /**
-   * Sets the current context.
-   *
-   * @param context - Context
-   */
-  public void setContext(StateContext context) {
-    this.context = context;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. If
-   * the stream is already closed then invoking this method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful attention. It is strongly advised to relinquish the
-   * underlying resources and to internally <em>mark</em> the {@code Closeable}
-   * as closed, prior to throwing the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    if (stateMachineThread != null) {
-      stateMachineThread.interrupt();
-    }
-    if (cmdProcessThread != null) {
-      cmdProcessThread.interrupt();
-    }
-    context.setState(DatanodeStates.getLastState());
-    executorService.shutdown();
-    try {
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        executorService.shutdownNow();
-      }
-
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        LOG.error("Unable to shutdown state machine properly.");
-      }
-    } catch (InterruptedException e) {
-      LOG.error("Error attempting to shutdown.", e);
-      executorService.shutdownNow();
-      Thread.currentThread().interrupt();
-    }
-
-    if (connectionManager != null) {
-      connectionManager.close();
-    }
-
-    if(container != null) {
-      container.stop();
-    }
-
-    if (jvmPauseMonitor != null) {
-      jvmPauseMonitor.stop();
-    }
-  }
-
-  /**
-   * States that a datanode  can be in. GetNextState will move this enum from
-   * getInitState to getLastState.
-   */
-  public enum DatanodeStates {
-    INIT(1),
-    RUNNING(2),
-    SHUTDOWN(3);
-    private final int value;
-
-    /**
-     * Constructs states.
-     *
-     * @param value  Enum Value
-     */
-    DatanodeStates(int value) {
-      this.value = value;
-    }
-
-    /**
-     * Returns the first State.
-     *
-     * @return First State.
-     */
-    public static DatanodeStates getInitState() {
-      return INIT;
-    }
-
-    /**
-     * The last state of endpoint states.
-     *
-     * @return last state.
-     */
-    public static DatanodeStates getLastState() {
-      return SHUTDOWN;
-    }
-
-    /**
-     * returns the numeric value associated with the endPoint.
-     *
-     * @return int.
-     */
-    public int getValue() {
-      return value;
-    }
-
-    /**
-     * Returns the next logical state that endPoint should move to. This
-     * function assumes the States are sequentially numbered.
-     *
-     * @return NextState.
-     */
-    public DatanodeStates getNextState() {
-      if (this.value < getLastState().getValue()) {
-        int stateValue = this.getValue() + 1;
-        for (DatanodeStates iter : values()) {
-          if (stateValue == iter.getValue()) {
-            return iter;
-          }
-        }
-      }
-      return getLastState();
-    }
-  }
-
-  /**
-   * Start datanode state machine as a single thread daemon.
-   */
-  public void startDaemon() {
-    Runnable startStateMachineTask = () -> {
-      try {
-        start();
-        LOG.info("Ozone container server started.");
-      } catch (Exception ex) {
-        LOG.error("Unable to start the DatanodeState Machine", ex);
-      }
-    };
-    stateMachineThread =  new ThreadFactoryBuilder()
-        .setDaemon(true)
-        .setNameFormat("Datanode State Machine Thread - %d")
-        .build().newThread(startStateMachineTask);
-    stateMachineThread.start();
-  }
-
-  /**
-   * Calling this will immediately trigger a heartbeat to the SCMs.
-   * This heartbeat will also include all the reports which are ready to
-   * be sent by datanode.
-   */
-  public void triggerHeartbeat() {
-    stateMachineThread.interrupt();
-  }
-
-  /**
-   * Waits for DatanodeStateMachine to exit.
-   *
-   * @throws InterruptedException
-   */
-  public void join() throws InterruptedException {
-    if (stateMachineThread != null) {
-      stateMachineThread.join();
-    }
-
-    if (cmdProcessThread != null) {
-      cmdProcessThread.join();
-    }
-  }
-
-  /**
-   * Stop the daemon thread of the datanode state machine.
-   */
-  public synchronized void stopDaemon() {
-    try {
-      supervisor.stop();
-      context.setState(DatanodeStates.SHUTDOWN);
-      reportManager.shutdown();
-      this.close();
-      LOG.info("Ozone container server stopped.");
-    } catch (IOException e) {
-      LOG.error("Stop ozone container server failed.", e);
-    }
-  }
-
-  /**
-   *
-   * Check if the datanode state machine daemon is stopped.
-   *
-   * @return True if datanode state machine daemon is stopped
-   * and false otherwise.
-   */
-  @VisibleForTesting
-  public boolean isDaemonStopped() {
-    return this.executorService.isShutdown()
-        && this.getContext().getState() == DatanodeStates.SHUTDOWN;
-  }
-
-  /**
-   * Create a command handler thread.
-   *
-   * @param config
-   */
-  private void initCommandHandlerThread(Configuration config) {
-
-    /**
-     * Task that periodically checks if we have any outstanding commands.
-     * It is assumed that commands can be processed slowly and in order.
-     * This assumption might change in future. Right now due to this assumption
-     * we have single command  queue process thread.
-     */
-    Runnable processCommandQueue = () -> {
-      long now;
-      while (getContext().getState() != DatanodeStates.SHUTDOWN) {
-        SCMCommand command = getContext().getNextCommand();
-        if (command != null) {
-          commandDispatcher.handle(command);
-          commandsHandled++;
-        } else {
-          try {
-            // Sleep till the next HB + 1 second.
-            now = Time.monotonicNow();
-            if (nextHB.get() > now) {
-              Thread.sleep((nextHB.get() - now) + 1000L);
-            }
-          } catch (InterruptedException e) {
-            // Ignore this exception.
-          }
-        }
-      }
-    };
-
-    // We will have only one thread for command processing in a datanode.
-    cmdProcessThread = getCommandHandlerThread(processCommandQueue);
-    cmdProcessThread.start();
-  }
-
-  private Thread getCommandHandlerThread(Runnable processCommandQueue) {
-    Thread handlerThread = new Thread(processCommandQueue);
-    handlerThread.setDaemon(true);
-    handlerThread.setName("Command processor thread");
-    handlerThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
-      // Let us just restart this thread after logging a critical error.
-      // if this thread is not running we cannot handle commands from SCM.
-      LOG.error("Critical Error : Command processor thread encountered an " +
-          "error. Thread: {}", t.toString(), e);
-      getCommandHandlerThread(processCommandQueue).start();
-    });
-    return handlerThread;
-  }
-
-  /**
-   * Returns the number of commands handled  by the datanode.
-   * @return  count
-   */
-  @VisibleForTesting
-  public long getCommandHandled() {
-    return commandsHandled;
-  }
-
-  /**
-   * returns the Command Dispatcher.
-   * @return CommandDispatcher
-   */
-  @VisibleForTesting
-  public CommandDispatcher getCommandDispatcher() {
-    return commandDispatcher;
-  }
-
-  @VisibleForTesting
-  public ReplicationSupervisor getSupervisor() {
-    return supervisor;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
deleted file mode 100644
index f0064ec..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.time.ZonedDateTime;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import static org.apache.hadoop.hdds.scm.HddsServerUtil.getLogWarnInterval;
-import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval;
-
-/**
- * Endpoint is used as holder class that keeps state around the RPC endpoint.
- */
-public class EndpointStateMachine
-    implements Closeable, EndpointStateMachineMBean {
-  static final Logger
-      LOG = LoggerFactory.getLogger(EndpointStateMachine.class);
-  private final StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint;
-  private final AtomicLong missedCount;
-  private final InetSocketAddress address;
-  private final Lock lock;
-  private final Configuration conf;
-  private EndPointStates state;
-  private VersionResponse version;
-  private ZonedDateTime lastSuccessfulHeartbeat;
-
-  /**
-   * Constructs RPC Endpoints.
-   *
-   * @param endPoint - RPC endPoint.
-   */
-  public EndpointStateMachine(InetSocketAddress address,
-      StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint,
-      Configuration conf) {
-    this.endPoint = endPoint;
-    this.missedCount = new AtomicLong(0);
-    this.address = address;
-    state = EndPointStates.getInitState();
-    lock = new ReentrantLock();
-    this.conf = conf;
-  }
-
-  /**
-   * Takes a lock on this EndPoint so that other threads don't use this while we
-   * are trying to communicate via this endpoint.
-   */
-  public void lock() {
-    lock.lock();
-  }
-
-  /**
-   * Unlocks this endpoint.
-   */
-  public void unlock() {
-    lock.unlock();
-  }
-
-  /**
-   * Returns the version that we read from the server if anyone asks .
-   *
-   * @return - Version Response.
-   */
-  public VersionResponse getVersion() {
-    return version;
-  }
-
-  /**
-   * Sets the Version reponse we recieved from the SCM.
-   *
-   * @param version VersionResponse
-   */
-  public void setVersion(VersionResponse version) {
-    this.version = version;
-  }
-
-  /**
-   * Returns the current State this end point is in.
-   *
-   * @return - getState.
-   */
-  public EndPointStates getState() {
-    return state;
-  }
-
-  @Override
-  public int getVersionNumber() {
-    if (version != null) {
-      return version.getProtobufMessage().getSoftwareVersion();
-    } else {
-      return -1;
-    }
-  }
-
-  /**
-   * Sets the endpoint state.
-   *
-   * @param epState - end point state.
-   */
-  public EndPointStates setState(EndPointStates epState) {
-    this.state = epState;
-    return this.state;
-  }
-
-  /**
-   * Closes the connection.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (endPoint != null) {
-      endPoint.close();
-    }
-  }
-
-  /**
-   * We maintain a count of how many times we missed communicating with a
-   * specific SCM. This is not made atomic since the access to this is always
-   * guarded by the read or write lock. That is, it is serialized.
-   */
-  public void incMissed() {
-    this.missedCount.incrementAndGet();
-  }
-
-  /**
-   * Returns the value of the missed count.
-   *
-   * @return int
-   */
-  public long getMissedCount() {
-    return this.missedCount.get();
-  }
-
-  @Override
-  public String getAddressString() {
-    return getAddress().toString();
-  }
-
-  public void zeroMissedCount() {
-    this.missedCount.set(0);
-  }
-
-  /**
-   * Returns the InetAddress of the endPoint.
-   *
-   * @return - EndPoint.
-   */
-  public InetSocketAddress getAddress() {
-    return this.address;
-  }
-
-  /**
-   * Returns real RPC endPoint.
-   *
-   * @return rpc client.
-   */
-  public StorageContainerDatanodeProtocolClientSideTranslatorPB
-      getEndPoint() {
-    return endPoint;
-  }
-
-  /**
-   * Returns the string that represents this endpoint.
-   *
-   * @return - String
-   */
-  public String toString() {
-    return address.toString();
-  }
-
-  /**
-   * Logs exception if needed.
-   *  @param ex         - Exception
-   */
-  public void logIfNeeded(Exception ex) {
-    if (this.getMissedCount() % getLogWarnInterval(conf) == 0) {
-      LOG.error(
-          "Unable to communicate to SCM server at {} for past {} seconds.",
-          this.getAddress().getHostString() + ":" + this.getAddress().getPort(),
-          TimeUnit.MILLISECONDS.toSeconds(
-              this.getMissedCount() * getScmHeartbeatInterval(this.conf)), ex);
-    }
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Incrementing the Missed count. Ex : {}", ex);
-    }
-    this.incMissed();
-  }
-
-
-  /**
-   * States that an Endpoint can be in.
-   * <p>
-   * This is a sorted list of states that EndPoint will traverse.
-   * <p>
-   * GetNextState will move this enum from getInitState to getLastState.
-   */
-  public enum EndPointStates {
-    GETVERSION(1),
-    REGISTER(2),
-    HEARTBEAT(3),
-    SHUTDOWN(4); // if you add value after this please edit getLastState too.
-    private final int value;
-
-    /**
-     * Constructs endPointStates.
-     *
-     * @param value  state.
-     */
-    EndPointStates(int value) {
-      this.value = value;
-    }
-
-    /**
-     * Returns the first State.
-     *
-     * @return First State.
-     */
-    public static EndPointStates getInitState() {
-      return GETVERSION;
-    }
-
-    /**
-     * The last state of endpoint states.
-     *
-     * @return last state.
-     */
-    public static EndPointStates getLastState() {
-      return SHUTDOWN;
-    }
-
-    /**
-     * returns the numeric value associated with the endPoint.
-     *
-     * @return int.
-     */
-    public int getValue() {
-      return value;
-    }
-
-    /**
-     * Returns the next logical state that endPoint should move to.
-     * The next state is computed by adding 1 to the current state.
-     *
-     * @return NextState.
-     */
-    public EndPointStates getNextState() {
-      if (this.getValue() < getLastState().getValue()) {
-        int stateValue = this.getValue() + 1;
-        for (EndPointStates iter : values()) {
-          if (stateValue == iter.getValue()) {
-            return iter;
-          }
-        }
-      }
-      return getLastState();
-    }
-  }
-
-  public long getLastSuccessfulHeartbeat() {
-    return lastSuccessfulHeartbeat == null ?
-        0 :
-        lastSuccessfulHeartbeat.toEpochSecond();
-  }
-
-  public void setLastSuccessfulHeartbeat(
-      ZonedDateTime lastSuccessfulHeartbeat) {
-    this.lastSuccessfulHeartbeat = lastSuccessfulHeartbeat;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java
deleted file mode 100644
index 4f64bde..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-
-/**
- * JMX representation of an EndpointStateMachine.
- */
-public interface EndpointStateMachineMBean {
-
-  long getMissedCount();
-
-  String getAddressString();
-
-  EndpointStateMachine.EndPointStates getState();
-
-  int getVersionNumber();
-
-  long getLastSuccessfulHeartbeat();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
deleted file mode 100644
index ce31ebd..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static java.util.Collections.unmodifiableList;
-import static org.apache.hadoop.hdds.scm.HddsServerUtil
-    .getScmRpcTimeOutInMilliseconds;
-
-/**
- * SCMConnectionManager - Acts as a class that manages the membership
- * information of the SCMs that we are working with.
- */
-public class SCMConnectionManager
-    implements Closeable, SCMConnectionManagerMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMConnectionManager.class);
-
-  private final ReadWriteLock mapLock;
-  private final Map<InetSocketAddress, EndpointStateMachine> scmMachines;
-
-  private final int rpcTimeout;
-  private final Configuration conf;
-  private ObjectName jmxBean;
-
-  public SCMConnectionManager(Configuration conf) {
-    this.mapLock = new ReentrantReadWriteLock();
-    Long timeOut = getScmRpcTimeOutInMilliseconds(conf);
-    this.rpcTimeout = timeOut.intValue();
-    this.scmMachines = new HashMap<>();
-    this.conf = conf;
-    jmxBean = MBeans.register("HddsDatanode",
-        "SCMConnectionManager",
-        this);
-  }
-
-
-  /**
-   * Returns Config.
-   *
-   * @return ozoneConfig.
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Get RpcTimeout.
-   *
-   * @return - Return RPC timeout.
-   */
-  public int getRpcTimeout() {
-    return rpcTimeout;
-  }
-
-
-  /**
-   * Takes a read lock.
-   */
-  public void readLock() {
-    this.mapLock.readLock().lock();
-  }
-
-  /**
-   * Releases the read lock.
-   */
-  public void readUnlock() {
-    this.mapLock.readLock().unlock();
-  }
-
-  /**
-   * Takes the write lock.
-   */
-  public void writeLock() {
-    this.mapLock.writeLock().lock();
-  }
-
-  /**
-   * Releases the write lock.
-   */
-  public void writeUnlock() {
-    this.mapLock.writeLock().unlock();
-  }
-
-  /**
-   * adds a new SCM machine to the target set.
-   *
-   * @param address - Address of the SCM machine to send heatbeat to.
-   * @throws IOException
-   */
-  public void addSCMServer(InetSocketAddress address) throws IOException {
-    writeLock();
-    try {
-      if (scmMachines.containsKey(address)) {
-        LOG.warn("Trying to add an existing SCM Machine to Machines group. " +
-            "Ignoring the request.");
-        return;
-      }
-      RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
-          ProtobufRpcEngine.class);
-      long version =
-          RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
-
-      RetryPolicy retryPolicy =
-          RetryPolicies.retryForeverWithFixedSleep(
-              1000, TimeUnit.MILLISECONDS);
-      StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
-          StorageContainerDatanodeProtocolPB.class, version,
-          address, UserGroupInformation.getCurrentUser(), conf,
-          NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(),
-          retryPolicy).getProxy();
-
-      StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
-          new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
-
-      EndpointStateMachine endPoint =
-          new EndpointStateMachine(address, rpcClient, conf);
-      scmMachines.put(address, endPoint);
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  /**
-   * Removes a  SCM machine for the target set.
-   *
-   * @param address - Address of the SCM machine to send heatbeat to.
-   * @throws IOException
-   */
-  public void removeSCMServer(InetSocketAddress address) throws IOException {
-    writeLock();
-    try {
-      if (!scmMachines.containsKey(address)) {
-        LOG.warn("Trying to remove a non-existent SCM machine. " +
-            "Ignoring the request.");
-        return;
-      }
-
-      EndpointStateMachine endPoint = scmMachines.get(address);
-      endPoint.close();
-      scmMachines.remove(address);
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  /**
-   * Returns all known RPCEndpoints.
-   *
-   * @return - List of RPC Endpoints.
-   */
-  public Collection<EndpointStateMachine> getValues() {
-    readLock();
-    try {
-      return unmodifiableList(new ArrayList<>(scmMachines.values()));
-    } finally {
-      readUnlock();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    getValues().forEach(endpointStateMachine
-        -> IOUtils.cleanupWithLogger(LOG, endpointStateMachine));
-    if (jmxBean != null) {
-      MBeans.unregister(jmxBean);
-      jmxBean = null;
-    }
-  }
-
-  @Override
-  public List<EndpointStateMachineMBean> getSCMServers() {
-    readLock();
-    try {
-      return unmodifiableList(new ArrayList<>(scmMachines.values()));
-    } finally {
-      readUnlock();
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java
deleted file mode 100644
index 25ef163..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-import java.util.List;
-
-/**
- * JMX information about the connected SCM servers.
- */
-public interface SCMConnectionManagerMXBean {
-
-  List<EndpointStateMachineMBean> getSCMServers();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
deleted file mode 100644
index 2c01f3a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ /dev/null
@@ -1,502 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.GeneratedMessage;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .InitDatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .RunningDatanodeState;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands
-    .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import static java.lang.Math.min;
-import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.ArrayList;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Consumer;
-
-/**
- * Current Context of State Machine.
- */
-public class StateContext {
-  static final Logger LOG =
-      LoggerFactory.getLogger(StateContext.class);
-  private final Queue<SCMCommand> commandQueue;
-  private final Map<Long, CommandStatus> cmdStatusMap;
-  private final Lock lock;
-  private final DatanodeStateMachine parent;
-  private final AtomicLong stateExecutionCount;
-  private final Configuration conf;
-  private final List<GeneratedMessage> reports;
-  private final Queue<ContainerAction> containerActions;
-  private final Queue<PipelineAction> pipelineActions;
-  private DatanodeStateMachine.DatanodeStates state;
-  private boolean shutdownOnError = false;
-
-  /**
-   * Starting with a 2 sec heartbeat frequency which will be updated to the
-   * real HB frequency after scm registration. With this method the
-   * initial registration could be significant faster.
-   */
-  private AtomicLong heartbeatFrequency = new AtomicLong(2000);
-
-  /**
-   * Constructs a StateContext.
-   *
-   * @param conf   - Configration
-   * @param state  - State
-   * @param parent Parent State Machine
-   */
-  public StateContext(Configuration conf, DatanodeStateMachine.DatanodeStates
-      state, DatanodeStateMachine parent) {
-    this.conf = conf;
-    this.state = state;
-    this.parent = parent;
-    commandQueue = new LinkedList<>();
-    cmdStatusMap = new ConcurrentHashMap<>();
-    reports = new LinkedList<>();
-    containerActions = new LinkedList<>();
-    pipelineActions = new LinkedList<>();
-    lock = new ReentrantLock();
-    stateExecutionCount = new AtomicLong(0);
-  }
-
-  /**
-   * Returns the ContainerStateMachine class that holds this state.
-   *
-   * @return ContainerStateMachine.
-   */
-  public DatanodeStateMachine getParent() {
-    return parent;
-  }
-
-  /**
-   * Returns true if we are entering a new state.
-   *
-   * @return boolean
-   */
-  boolean isEntering() {
-    return stateExecutionCount.get() == 0;
-  }
-
-  /**
-   * Returns true if we are exiting from the current state.
-   *
-   * @param newState - newState.
-   * @return boolean
-   */
-  boolean isExiting(DatanodeStateMachine.DatanodeStates newState) {
-    boolean isExiting = state != newState && stateExecutionCount.get() > 0;
-    if(isExiting) {
-      stateExecutionCount.set(0);
-    }
-    return isExiting;
-  }
-
-  /**
-   * Returns the current state the machine is in.
-   *
-   * @return state.
-   */
-  public DatanodeStateMachine.DatanodeStates getState() {
-    return state;
-  }
-
-  /**
-   * Sets the current state of the machine.
-   *
-   * @param state state.
-   */
-  public void setState(DatanodeStateMachine.DatanodeStates state) {
-    this.state = state;
-  }
-
-  /**
-   * Sets the shutdownOnError. This method needs to be called when we
-   * set DatanodeState to SHUTDOWN when executing a task of a DatanodeState.
-   * @param value
-   */
-  private void setShutdownOnError(boolean value) {
-    this.shutdownOnError = value;
-  }
-
-  /**
-   * Get shutdownStateMachine.
-   * @return boolean
-   */
-  public boolean getShutdownOnError() {
-    return shutdownOnError;
-  }
-  /**
-   * Adds the report to report queue.
-   *
-   * @param report report to be added
-   */
-  public void addReport(GeneratedMessage report) {
-    if (report != null) {
-      synchronized (reports) {
-        reports.add(report);
-      }
-    }
-  }
-
-  /**
-   * Adds the reports which could not be sent by heartbeat back to the
-   * reports list.
-   *
-   * @param reportsToPutBack list of reports which failed to be sent by
-   *                         heartbeat.
-   */
-  public void putBackReports(List<GeneratedMessage> reportsToPutBack) {
-    synchronized (reports) {
-      reports.addAll(0, reportsToPutBack);
-    }
-  }
-
-  /**
-   * Returns all the available reports from the report queue, or empty list if
-   * the queue is empty.
-   *
-   * @return List of reports
-   */
-  public List<GeneratedMessage> getAllAvailableReports() {
-    return getReports(Integer.MAX_VALUE);
-  }
-
-  /**
-   * Returns available reports from the report queue with a max limit on
-   * list size, or empty list if the queue is empty.
-   *
-   * @return List of reports
-   */
-  public List<GeneratedMessage> getReports(int maxLimit) {
-    List<GeneratedMessage> reportsToReturn = new LinkedList<>();
-    synchronized (reports) {
-      List<GeneratedMessage> tempList = reports.subList(
-          0, min(reports.size(), maxLimit));
-      reportsToReturn.addAll(tempList);
-      tempList.clear();
-    }
-    return reportsToReturn;
-  }
-
-
-  /**
-   * Adds the ContainerAction to ContainerAction queue.
-   *
-   * @param containerAction ContainerAction to be added
-   */
-  public void addContainerAction(ContainerAction containerAction) {
-    synchronized (containerActions) {
-      containerActions.add(containerAction);
-    }
-  }
-
-  /**
-   * Add ContainerAction to ContainerAction queue if it's not present.
-   *
-   * @param containerAction ContainerAction to be added
-   */
-  public void addContainerActionIfAbsent(ContainerAction containerAction) {
-    synchronized (containerActions) {
-      if (!containerActions.contains(containerAction)) {
-        containerActions.add(containerAction);
-      }
-    }
-  }
-
-  /**
-   * Returns all the pending ContainerActions from the ContainerAction queue,
-   * or empty list if the queue is empty.
-   *
-   * @return {@literal List<ContainerAction>}
-   */
-  public List<ContainerAction> getAllPendingContainerActions() {
-    return getPendingContainerAction(Integer.MAX_VALUE);
-  }
-
-  /**
-   * Returns pending ContainerActions from the ContainerAction queue with a
-   * max limit on list size, or empty list if the queue is empty.
-   *
-   * @return {@literal List<ContainerAction>}
-   */
-  public List<ContainerAction> getPendingContainerAction(int maxLimit) {
-    List<ContainerAction> containerActionList = new ArrayList<>();
-    synchronized (containerActions) {
-      if (!containerActions.isEmpty()) {
-        int size = containerActions.size();
-        int limit = size > maxLimit ? maxLimit : size;
-        for (int count = 0; count < limit; count++) {
-          // we need to remove the action from the containerAction queue
-          // as well
-          ContainerAction action = containerActions.poll();
-          Preconditions.checkNotNull(action);
-          containerActionList.add(action);
-        }
-      }
-      return containerActionList;
-    }
-  }
-
-  /**
-   * Add PipelineAction to PipelineAction queue if it's not present.
-   *
-   * @param pipelineAction PipelineAction to be added
-   */
-  public void addPipelineActionIfAbsent(PipelineAction pipelineAction) {
-    synchronized (pipelineActions) {
-      /**
-       * If pipelineAction queue already contains entry for the pipeline id
-       * with same action, we should just return.
-       * Note: We should not use pipelineActions.contains(pipelineAction) here
-       * as, pipelineAction has a msg string. So even if two msgs differ though
-       * action remains same on the given pipeline, it will end up adding it
-       * multiple times here.
-       */
-      for (PipelineAction pipelineActionIter : pipelineActions) {
-        if (pipelineActionIter.getAction() == pipelineAction.getAction()
-            && pipelineActionIter.hasClosePipeline() && pipelineAction
-            .hasClosePipeline()
-            && pipelineActionIter.getClosePipeline().getPipelineID()
-            .equals(pipelineAction.getClosePipeline().getPipelineID())) {
-          return;
-        }
-      }
-      pipelineActions.add(pipelineAction);
-    }
-  }
-
-  /**
-   * Returns pending PipelineActions from the PipelineAction queue with a
-   * max limit on list size, or empty list if the queue is empty.
-   *
-   * @return {@literal List<ContainerAction>}
-   */
-  public List<PipelineAction> getPendingPipelineAction(int maxLimit) {
-    List<PipelineAction> pipelineActionList = new ArrayList<>();
-    synchronized (pipelineActions) {
-      if (!pipelineActions.isEmpty()) {
-        int size = pipelineActions.size();
-        int limit = size > maxLimit ? maxLimit : size;
-        for (int count = 0; count < limit; count++) {
-          pipelineActionList.add(pipelineActions.poll());
-        }
-      }
-      return pipelineActionList;
-    }
-  }
-
-  /**
-   * Returns the next task to get executed by the datanode state machine.
-   * @return A callable that will be executed by the
-   * {@link DatanodeStateMachine}
-   */
-  @SuppressWarnings("unchecked")
-  public DatanodeState<DatanodeStateMachine.DatanodeStates> getTask() {
-    switch (this.state) {
-    case INIT:
-      return new InitDatanodeState(this.conf, parent.getConnectionManager(),
-          this);
-    case RUNNING:
-      return new RunningDatanodeState(this.conf, parent.getConnectionManager(),
-          this);
-    case SHUTDOWN:
-      return null;
-    default:
-      throw new IllegalArgumentException("Not Implemented yet.");
-    }
-  }
-
-  /**
-   * Executes the required state function.
-   *
-   * @param service - Executor Service
-   * @param time    - seconds to wait
-   * @param unit    - Seconds.
-   * @throws InterruptedException
-   * @throws ExecutionException
-   * @throws TimeoutException
-   */
-  public void execute(ExecutorService service, long time, TimeUnit unit)
-      throws InterruptedException, ExecutionException, TimeoutException {
-    stateExecutionCount.incrementAndGet();
-    DatanodeState<DatanodeStateMachine.DatanodeStates> task = getTask();
-
-    // Adding not null check, in a case where datanode is still starting up, but
-    // we called stop DatanodeStateMachine, this sets state to SHUTDOWN, and
-    // there is a chance of getting task as null.
-    if (task != null) {
-      if (this.isEntering()) {
-        task.onEnter();
-      }
-      task.execute(service);
-      DatanodeStateMachine.DatanodeStates newState = task.await(time, unit);
-      if (this.state != newState) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Task {} executed, state transited from {} to {}",
-              task.getClass().getSimpleName(), this.state, newState);
-        }
-        if (isExiting(newState)) {
-          task.onExit();
-        }
-        this.setState(newState);
-      }
-
-      if (this.state == DatanodeStateMachine.DatanodeStates.SHUTDOWN) {
-        LOG.error("Critical error occurred in StateMachine, setting " +
-            "shutDownMachine");
-        // When some exception occurred, set shutdownStateMachine to true, so
-        // that we can terminate the datanode.
-        setShutdownOnError(true);
-      }
-    }
-  }
-
-  /**
-   * Returns the next command or null if it is empty.
-   *
-   * @return SCMCommand or Null.
-   */
-  public SCMCommand getNextCommand() {
-    lock.lock();
-    try {
-      return commandQueue.poll();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Adds a command to the State Machine queue.
-   *
-   * @param command - SCMCommand.
-   */
-  public void addCommand(SCMCommand command) {
-    lock.lock();
-    try {
-      commandQueue.add(command);
-    } finally {
-      lock.unlock();
-    }
-    this.addCmdStatus(command);
-  }
-
-  /**
-   * Returns the count of the Execution.
-   * @return long
-   */
-  public long getExecutionCount() {
-    return stateExecutionCount.get();
-  }
-
-  /**
-   * Returns the next {@link CommandStatus} or null if it is empty.
-   *
-   * @return {@link CommandStatus} or Null.
-   */
-  public CommandStatus getCmdStatus(Long key) {
-    return cmdStatusMap.get(key);
-  }
-
-  /**
-   * Adds a {@link CommandStatus} to the State Machine.
-   *
-   * @param status - {@link CommandStatus}.
-   */
-  public void addCmdStatus(Long key, CommandStatus status) {
-    cmdStatusMap.put(key, status);
-  }
-
-  /**
-   * Adds a {@link CommandStatus} to the State Machine for given SCMCommand.
-   *
-   * @param cmd - {@link SCMCommand}.
-   */
-  public void addCmdStatus(SCMCommand cmd) {
-    if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
-      addCmdStatus(cmd.getId(),
-          DeleteBlockCommandStatusBuilder.newBuilder()
-              .setCmdId(cmd.getId())
-              .setStatus(Status.PENDING)
-              .setType(cmd.getType())
-              .build());
-    }
-  }
-
-  /**
-   * Get map holding all {@link CommandStatus} objects.
-   *
-   */
-  public Map<Long, CommandStatus> getCommandStatusMap() {
-    return cmdStatusMap;
-  }
-
-  /**
-   * Updates status of a pending status command.
-   * @param cmdId       command id
-   * @param cmdStatusUpdater Consumer to update command status.
-   * @return true if command status updated successfully else false.
-   */
-  public boolean updateCommandStatus(Long cmdId,
-      Consumer<CommandStatus> cmdStatusUpdater) {
-    if(cmdStatusMap.containsKey(cmdId)) {
-      cmdStatusUpdater.accept(cmdStatusMap.get(cmdId));
-      return true;
-    }
-    return false;
-  }
-
-  public void configureHeartbeatFrequency(){
-    heartbeatFrequency.set(getScmHeartbeatInterval(conf));
-  }
-
-  /**
-   * Return current heartbeat frequency in ms.
-   */
-  public long getHeartbeatFrequency() {
-    return heartbeatFrequency.get();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
deleted file mode 100644
index 2dec08f..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.apache.ratis.protocol.NotLeaderException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handler for close container command received from SCM.
- */
-public class CloseContainerCommandHandler implements CommandHandler {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CloseContainerCommandHandler.class);
-
-  private int invocationCount;
-  private long totalTime;
-
-  /**
-   * Constructs a ContainerReport handler.
-   */
-  public CloseContainerCommandHandler() {
-  }
-
-  /**
-   * Handles a given SCM command.
-   *
-   * @param command           - SCM Command
-   * @param ozoneContainer         - Ozone Container.
-   * @param context           - Current Context.
-   * @param connectionManager - The SCMs that we are talking to.
-   */
-  @Override
-  public void handle(SCMCommand command, OzoneContainer ozoneContainer,
-      StateContext context, SCMConnectionManager connectionManager) {
-    LOG.debug("Processing Close Container command.");
-    invocationCount++;
-    final long startTime = Time.monotonicNow();
-    final DatanodeDetails datanodeDetails = context.getParent()
-        .getDatanodeDetails();
-    final CloseContainerCommandProto closeCommand =
-        ((CloseContainerCommand)command).getProto();
-    final ContainerController controller = ozoneContainer.getController();
-    final long containerId = closeCommand.getContainerID();
-    try {
-      final Container container = controller.getContainer(containerId);
-
-      if (container == null) {
-        LOG.error("Container #{} does not exist in datanode. "
-            + "Container close failed.", containerId);
-        return;
-      }
-
-      // move the container to CLOSING if in OPEN state
-      controller.markContainerForClose(containerId);
-
-      switch (container.getContainerState()) {
-      case OPEN:
-      case CLOSING:
-        // If the container is part of open pipeline, close it via write channel
-        if (ozoneContainer.getWriteChannel()
-            .isExist(closeCommand.getPipelineID())) {
-          ContainerCommandRequestProto request =
-              getContainerCommandRequestProto(datanodeDetails,
-                  closeCommand.getContainerID());
-          ozoneContainer.getWriteChannel()
-              .submitRequest(request, closeCommand.getPipelineID());
-        } else {
-          // Container should not exist in CLOSING state without a pipeline
-          controller.markContainerUnhealthy(containerId);
-        }
-        break;
-      case QUASI_CLOSED:
-        if (closeCommand.getForce()) {
-          controller.closeContainer(containerId);
-          break;
-        }
-      case CLOSED:
-        break;
-      case UNHEALTHY:
-      case INVALID:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Cannot close the container #{}, the container is"
-              + " in {} state.", containerId, container.getContainerState());
-        }
-      default:
-        break;
-      }
-    } catch (NotLeaderException e) {
-      LOG.debug("Follower cannot close container #{}.", containerId);
-    } catch (IOException e) {
-      LOG.error("Can't close container #{}", containerId, e);
-    } finally {
-      long endTime = Time.monotonicNow();
-      totalTime += endTime - startTime;
-    }
-  }
-
-  private ContainerCommandRequestProto getContainerCommandRequestProto(
-      final DatanodeDetails datanodeDetails, final long containerId) {
-    final ContainerCommandRequestProto.Builder command =
-        ContainerCommandRequestProto.newBuilder();
-    command.setCmdType(ContainerProtos.Type.CloseContainer);
-    command.setTraceID(TracingUtil.exportCurrentSpan());
-    command.setContainerID(containerId);
-    command.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    command.setDatanodeUuid(datanodeDetails.getUuidString());
-    return command.build();
-  }
-
-  /**
-   * Returns the command type that this command handler handles.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCommandProto.Type getCommandType() {
-    return SCMCommandProto.Type.closeContainerCommand;
-  }
-
-  /**
-   * Returns number of times this handler has been invoked.
-   *
-   * @return int
-   */
-  @Override
-  public int getInvocationCount() {
-    return invocationCount;
-  }
-
-  /**
-   * Returns the average time this function takes to run.
-   *
-   * @return long
-   */
-  @Override
-  public long getAverageRunTime() {
-    if (invocationCount > 0) {
-      return totalTime / invocationCount;
-    }
-    return 0;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
deleted file mode 100644
index af854ec..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Dispatches command to the correct handler.
- */
-public final class CommandDispatcher {
-  static final Logger LOG =
-      LoggerFactory.getLogger(CommandDispatcher.class);
-  private final StateContext context;
-  private final Map<Type, CommandHandler> handlerMap;
-  private final OzoneContainer container;
-  private final SCMConnectionManager connectionManager;
-
-  /**
-   * Constructs a command Dispatcher.
-   * @param context - Context.
-   */
-  /**
-   * Constructs a command dispatcher.
-   *
-   * @param container - Ozone Container
-   * @param context - Context
-   * @param handlers - Set of handlers.
-   */
-  private CommandDispatcher(OzoneContainer container, SCMConnectionManager
-      connectionManager, StateContext context,
-      CommandHandler... handlers) {
-    Preconditions.checkNotNull(context);
-    Preconditions.checkNotNull(handlers);
-    Preconditions.checkArgument(handlers.length > 0);
-    Preconditions.checkNotNull(container);
-    Preconditions.checkNotNull(connectionManager);
-    this.context = context;
-    this.container = container;
-    this.connectionManager = connectionManager;
-    handlerMap = new HashMap<>();
-    for (CommandHandler h : handlers) {
-      if(handlerMap.containsKey(h.getCommandType())){
-        LOG.error("Duplicate handler for the same command. Exiting. Handle " +
-            "key : { }", h.getCommandType().getDescriptorForType().getName());
-        throw new IllegalArgumentException("Duplicate handler for the same " +
-            "command.");
-      }
-      handlerMap.put(h.getCommandType(), h);
-    }
-  }
-
-  public CommandHandler getCloseContainerHandler() {
-    return handlerMap.get(Type.closeContainerCommand);
-  }
-
-  @VisibleForTesting
-  public CommandHandler getDeleteBlocksCommandHandler() {
-    return handlerMap.get(Type.deleteBlocksCommand);
-  }
-
-  /**
-   * Dispatch the command to the correct handler.
-   *
-   * @param command - SCM Command.
-   */
-  public void handle(SCMCommand command) {
-    Preconditions.checkNotNull(command);
-    CommandHandler handler = handlerMap.get(command.getType());
-    if (handler != null) {
-      handler.handle(command, container, context, connectionManager);
-    } else {
-      LOG.error("Unknown SCM Command queued. There is no handler for this " +
-          "command. Command: {}", command.getType().getDescriptorForType()
-          .getName());
-    }
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Helper class to construct command dispatcher.
-   */
-  public static class Builder {
-    private final List<CommandHandler> handlerList;
-    private OzoneContainer container;
-    private StateContext context;
-    private SCMConnectionManager connectionManager;
-
-    public Builder() {
-      handlerList = new LinkedList<>();
-    }
-
-    /**
-     * Adds a handler.
-     *
-     * @param handler - handler
-     * @return Builder
-     */
-    public Builder addHandler(CommandHandler handler) {
-      Preconditions.checkNotNull(handler);
-      handlerList.add(handler);
-      return this;
-    }
-
-    /**
-     * Add the OzoneContainer.
-     *
-     * @param ozoneContainer - ozone container.
-     * @return Builder
-     */
-    public Builder setContainer(OzoneContainer ozoneContainer) {
-      Preconditions.checkNotNull(ozoneContainer);
-      this.container = ozoneContainer;
-      return this;
-    }
-
-    /**
-     * Set the Connection Manager.
-     *
-     * @param scmConnectionManager
-     * @return this
-     */
-    public Builder setConnectionManager(SCMConnectionManager
-        scmConnectionManager) {
-      Preconditions.checkNotNull(scmConnectionManager);
-      this.connectionManager = scmConnectionManager;
-      return this;
-    }
-
-    /**
-     * Sets the Context.
-     *
-     * @param stateContext - StateContext
-     * @return this
-     */
-    public Builder setContext(StateContext stateContext) {
-      Preconditions.checkNotNull(stateContext);
-      this.context = stateContext;
-      return this;
-    }
-
-    /**
-     * Builds a command Dispatcher.
-     * @return Command Dispatcher.
-     */
-    public CommandDispatcher build() {
-      Preconditions.checkNotNull(this.connectionManager, "Missing connection" +
-          " manager.");
-      Preconditions.checkNotNull(this.container, "Missing container.");
-      Preconditions.checkNotNull(this.context, "Missing context.");
-      Preconditions.checkArgument(this.handlerList.size() > 0);
-      return new CommandDispatcher(this.container, this.connectionManager,
-          this.context, handlerList.toArray(
-              new CommandHandler[handlerList.size()]));
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
deleted file mode 100644
index 1ea0ea8..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.slf4j.Logger;
-
-import java.util.function.Consumer;
-
-/**
- * Generic interface for handlers.
- */
-public interface CommandHandler {
-
-  /**
-   * Handles a given SCM command.
-   * @param command - SCM Command
-   * @param container - Ozone Container.
-   * @param context - Current Context.
-   * @param connectionManager - The SCMs that we are talking to.
-   */
-  void handle(SCMCommand command, OzoneContainer container,
-      StateContext context, SCMConnectionManager connectionManager);
-
-  /**
-   * Returns the command type that this command handler handles.
-   * @return Type
-   */
-  SCMCommandProto.Type getCommandType();
-
-  /**
-   * Returns number of times this handler has been invoked.
-   * @return int
-   */
-  int getInvocationCount();
-
-  /**
-   * Returns the average time this function takes to run.
-   * @return  long
-   */
-  long getAverageRunTime();
-
-  /**
-   * Default implementation for updating command status.
-   */
-  default void updateCommandStatus(StateContext context, SCMCommand command,
-      Consumer<CommandStatus> cmdStatusUpdater, Logger log) {
-    if (!context.updateCommandStatus(command.getId(), cmdStatusUpdater)) {
-      log.debug("{} with Id:{} not found.", command.getType(),
-          command.getId());
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
deleted file mode 100644
index cdecf5d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers
-    .DeletedContainerBlocksSummary;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.function.Consumer;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_NOT_FOUND;
-
-/**
- * Handle block deletion commands.
- */
-public class DeleteBlocksCommandHandler implements CommandHandler {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(DeleteBlocksCommandHandler.class);
-
-  private final ContainerSet containerSet;
-  private final Configuration conf;
-  private int invocationCount;
-  private long totalTime;
-  private boolean cmdExecuted;
-
-  public DeleteBlocksCommandHandler(ContainerSet cset,
-      Configuration conf) {
-    this.containerSet = cset;
-    this.conf = conf;
-  }
-
-  @Override
-  public void handle(SCMCommand command, OzoneContainer container,
-      StateContext context, SCMConnectionManager connectionManager) {
-    cmdExecuted = false;
-    long startTime = Time.monotonicNow();
-    ContainerBlocksDeletionACKProto blockDeletionACK = null;
-    try {
-      if (command.getType() != SCMCommandProto.Type.deleteBlocksCommand) {
-        LOG.warn("Skipping handling command, expected command "
-                + "type {} but found {}",
-            SCMCommandProto.Type.deleteBlocksCommand, command.getType());
-        return;
-      }
-      LOG.debug("Processing block deletion command.");
-      invocationCount++;
-
-      // move blocks to deleting state.
-      // this is a metadata update, the actual deletion happens in another
-      // recycling thread.
-      DeleteBlocksCommand cmd = (DeleteBlocksCommand) command;
-      List<DeletedBlocksTransaction> containerBlocks = cmd.blocksTobeDeleted();
-
-      DeletedContainerBlocksSummary summary =
-          DeletedContainerBlocksSummary.getFrom(containerBlocks);
-      LOG.info("Start to delete container blocks, TXIDs={}, "
-              + "numOfContainers={}, numOfBlocks={}",
-          summary.getTxIDSummary(),
-          summary.getNumOfContainers(),
-          summary.getNumOfBlocks());
-
-      ContainerBlocksDeletionACKProto.Builder resultBuilder =
-          ContainerBlocksDeletionACKProto.newBuilder();
-      containerBlocks.forEach(entry -> {
-        DeleteBlockTransactionResult.Builder txResultBuilder =
-            DeleteBlockTransactionResult.newBuilder();
-        txResultBuilder.setTxID(entry.getTxID());
-        long containerId = entry.getContainerID();
-        try {
-          Container cont = containerSet.getContainer(containerId);
-          if (cont == null) {
-            throw new StorageContainerException("Unable to find the container "
-                + containerId, CONTAINER_NOT_FOUND);
-          }
-          ContainerProtos.ContainerType containerType = cont.getContainerType();
-          switch (containerType) {
-          case KeyValueContainer:
-            KeyValueContainerData containerData = (KeyValueContainerData)
-                cont.getContainerData();
-            cont.writeLock();
-            try {
-              deleteKeyValueContainerBlocks(containerData, entry);
-            } finally {
-              cont.writeUnlock();
-            }
-            txResultBuilder.setContainerID(containerId)
-                .setSuccess(true);
-            break;
-          default:
-            LOG.error(
-                "Delete Blocks Command Handler is not implemented for " +
-                    "containerType {}", containerType);
-          }
-        } catch (IOException e) {
-          LOG.warn("Failed to delete blocks for container={}, TXID={}",
-              entry.getContainerID(), entry.getTxID(), e);
-          txResultBuilder.setContainerID(containerId)
-              .setSuccess(false);
-        }
-        resultBuilder.addResults(txResultBuilder.build())
-            .setDnId(context.getParent().getDatanodeDetails()
-                .getUuid().toString());
-      });
-      blockDeletionACK = resultBuilder.build();
-
-      // Send ACK back to SCM as long as meta updated
-      // TODO Or we should wait until the blocks are actually deleted?
-      if (!containerBlocks.isEmpty()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sending following block deletion ACK to SCM");
-          for (DeleteBlockTransactionResult result : blockDeletionACK
-              .getResultsList()) {
-            LOG.debug(result.getTxID() + " : " + result.getSuccess());
-          }
-        }
-      }
-      cmdExecuted = true;
-    } finally {
-      final ContainerBlocksDeletionACKProto deleteAck =
-          blockDeletionACK;
-      Consumer<CommandStatus> statusUpdater = (cmdStatus) -> {
-        cmdStatus.setStatus(cmdExecuted);
-        ((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck);
-      };
-      updateCommandStatus(context, command, statusUpdater, LOG);
-      long endTime = Time.monotonicNow();
-      totalTime += endTime - startTime;
-    }
-  }
-
-  /**
-   * Move a bunch of blocks from a container to deleting state. This is a meta
-   * update, the actual deletes happen in async mode.
-   *
-   * @param containerData - KeyValueContainerData
-   * @param delTX a block deletion transaction.
-   * @throws IOException if I/O error occurs.
-   */
-  private void deleteKeyValueContainerBlocks(
-      KeyValueContainerData containerData, DeletedBlocksTransaction delTX)
-      throws IOException {
-    long containerId = delTX.getContainerID();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Processing Container : {}, DB path : {}", containerId,
-          containerData.getMetadataPath());
-    }
-
-    if (delTX.getTxID() < containerData.getDeleteTransactionId()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(String.format("Ignoring delete blocks for containerId: %d."
-                + " Outdated delete transactionId %d < %d", containerId,
-            delTX.getTxID(), containerData.getDeleteTransactionId()));
-      }
-      return;
-    }
-
-    int newDeletionBlocks = 0;
-    try(ReferenceCountedDB containerDB =
-            BlockUtils.getDB(containerData, conf)) {
-      for (Long blk : delTX.getLocalIDList()) {
-        BatchOperation batch = new BatchOperation();
-        byte[] blkBytes = Longs.toByteArray(blk);
-        byte[] blkInfo = containerDB.getStore().get(blkBytes);
-        if (blkInfo != null) {
-          byte[] deletingKeyBytes =
-              DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
-          byte[] deletedKeyBytes =
-              DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
-          if (containerDB.getStore().get(deletingKeyBytes) != null
-              || containerDB.getStore().get(deletedKeyBytes) != null) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(String.format(
-                  "Ignoring delete for block %d in container %d."
-                      + " Entry already added.", blk, containerId));
-            }
-            continue;
-          }
-          // Found the block in container db,
-          // use an atomic update to change its state to deleting.
-          batch.put(deletingKeyBytes, blkInfo);
-          batch.delete(blkBytes);
-          try {
-            containerDB.getStore().writeBatch(batch);
-            newDeletionBlocks++;
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Transited Block {} to DELETING state in container {}",
-                  blk, containerId);
-            }
-          } catch (IOException e) {
-            // if some blocks failed to delete, we fail this TX,
-            // without sending this ACK to SCM, SCM will resend the TX
-            // with a certain number of retries.
-            throw new IOException(
-                "Failed to delete blocks for TXID = " + delTX.getTxID(), e);
-          }
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Block {} not found or already under deletion in"
-                + " container {}, skip deleting it.", blk, containerId);
-          }
-        }
-      }
-
-      containerDB.getStore()
-          .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX),
-              Longs.toByteArray(delTX.getTxID()));
-      containerData
-          .updateDeleteTransactionId(delTX.getTxID());
-      // update pending deletion blocks count in in-memory container status
-      containerData.incrPendingDeletionBlocks(newDeletionBlocks);
-    }
-  }
-
-  @Override
-  public SCMCommandProto.Type getCommandType() {
-    return SCMCommandProto.Type.deleteBlocksCommand;
-  }
-
-  @Override
-  public int getInvocationCount() {
-    return this.invocationCount;
-  }
-
-  @Override
-  public long getAverageRunTime() {
-    if (invocationCount > 0) {
-      return totalTime / invocationCount;
-    }
-    return 0;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
deleted file mode 100644
index b54fb1a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handler to process the DeleteContainerCommand from SCM.
- */
-public class DeleteContainerCommandHandler implements CommandHandler {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DeleteContainerCommandHandler.class);
-
-  private int invocationCount;
-  private long totalTime;
-
-  @Override
-  public void handle(final SCMCommand command,
-                     final OzoneContainer ozoneContainer,
-                     final StateContext context,
-                     final SCMConnectionManager connectionManager) {
-    final long startTime = Time.monotonicNow();
-    invocationCount++;
-    try {
-      final DeleteContainerCommand deleteContainerCommand =
-          (DeleteContainerCommand) command;
-      final ContainerController controller = ozoneContainer.getController();
-      controller.deleteContainer(deleteContainerCommand.getContainerID(),
-          deleteContainerCommand.isForce());
-    } catch (IOException e) {
-      LOG.error("Exception occurred while deleting the container.", e);
-    } finally {
-      totalTime += Time.monotonicNow() - startTime;
-    }
-
-  }
-
-  @Override
-  public SCMCommandProto.Type getCommandType() {
-    return SCMCommandProto.Type.deleteContainerCommand;
-  }
-
-  @Override
-  public int getInvocationCount() {
-    return this.invocationCount;
-  }
-
-  @Override
-  public long getAverageRunTime() {
-    return invocationCount == 0 ? 0 : totalTime / invocationCount;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
deleted file mode 100644
index a028041..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
-import org.apache.hadoop.ozone.container.replication.ReplicationTask;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Command handler to copy containers from sources.
- */
-public class ReplicateContainerCommandHandler implements CommandHandler {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(ReplicateContainerCommandHandler.class);
-
-  private int invocationCount;
-
-  private long totalTime;
-
-  private Configuration conf;
-
-  private ReplicationSupervisor supervisor;
-
-  public ReplicateContainerCommandHandler(
-      Configuration conf,
-      ReplicationSupervisor supervisor) {
-    this.conf = conf;
-    this.supervisor = supervisor;
-  }
-
-  @Override
-  public void handle(SCMCommand command, OzoneContainer container,
-      StateContext context, SCMConnectionManager connectionManager) {
-
-    final ReplicateContainerCommand replicateCommand =
-        (ReplicateContainerCommand) command;
-    final List<DatanodeDetails> sourceDatanodes =
-        replicateCommand.getSourceDatanodes();
-    final long containerID = replicateCommand.getContainerID();
-
-    Preconditions.checkArgument(sourceDatanodes.size() > 0,
-        String.format("Replication command is received for container %d "
-            + "but the size of source datanodes was 0.", containerID));
-
-    supervisor.addTask(new ReplicationTask(containerID, sourceDatanodes));
-  }
-
-  @Override
-  public SCMCommandProto.Type getCommandType() {
-    return Type.replicateContainerCommand;
-  }
-
-  @Override
-  public int getInvocationCount() {
-    return this.invocationCount;
-  }
-
-  @Override
-  public long getAverageRunTime() {
-    if (invocationCount > 0) {
-      return totalTime / invocationCount;
-    }
-    return 0;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
deleted file mode 100644
index 1e9c8dc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java
deleted file mode 100644
index feb2f81..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine;
-/**
-
- State machine class is used by the container to denote various states a
- container can be in and also is used for command processing.
-
- Container has the following states.
-
- Start - > getVersion -> Register -> Running  -> Shutdown
-
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java
deleted file mode 100644
index 25be207..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * State Interface that allows tasks to maintain states.
- */
-public interface DatanodeState<T> {
-  /**
-   * Called before entering this state.
-   */
-  void onEnter();
-
-  /**
-   * Called After exiting this state.
-   */
-  void onExit();
-
-  /**
-   * Executes one or more tasks that is needed by this state.
-   *
-   * @param executor -  ExecutorService
-   */
-  void execute(ExecutorService executor);
-
-  /**
-   * Wait for execute to finish.
-   *
-   * @param time - Time
-   * @param timeUnit - Unit of time.
-   * @throws InterruptedException
-   * @throws ExecutionException
-   * @throws TimeoutException
-   */
-  T await(long time, TimeUnit timeUnit)
-      throws InterruptedException, ExecutionException, TimeoutException;
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
deleted file mode 100644
index 2738862..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.datanode;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
-
-/**
- * Init Datanode State is the task that gets run when we are in Init State.
- */
-public class InitDatanodeState implements DatanodeState,
-    Callable<DatanodeStateMachine.DatanodeStates> {
-  static final Logger LOG = LoggerFactory.getLogger(InitDatanodeState.class);
-  private final SCMConnectionManager connectionManager;
-  private final Configuration conf;
-  private final StateContext context;
-  private Future<DatanodeStateMachine.DatanodeStates> result;
-
-  /**
-   *  Create InitDatanodeState Task.
-   *
-   * @param conf - Conf
-   * @param connectionManager - Connection Manager
-   * @param context - Current Context
-   */
-  public InitDatanodeState(Configuration conf,
-                           SCMConnectionManager connectionManager,
-                           StateContext context) {
-    this.conf = conf;
-    this.connectionManager = connectionManager;
-    this.context = context;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public DatanodeStateMachine.DatanodeStates call() throws Exception {
-    Collection<InetSocketAddress> addresses = null;
-    try {
-      addresses = getSCMAddresses(conf);
-    } catch (IllegalArgumentException e) {
-      if(!Strings.isNullOrEmpty(e.getMessage())) {
-        LOG.error("Failed to get SCM addresses: " + e.getMessage());
-      }
-      return DatanodeStateMachine.DatanodeStates.SHUTDOWN;
-    }
-
-    if (addresses == null || addresses.isEmpty()) {
-      LOG.error("Null or empty SCM address list found.");
-      return DatanodeStateMachine.DatanodeStates.SHUTDOWN;
-    } else {
-      for (InetSocketAddress addr : addresses) {
-        if (addr.isUnresolved()) {
-          LOG.warn("One SCM address ({}) can't (yet?) be resolved. Postpone "
-              + "initialization.", addr);
-
-          //skip any further initialization. DatanodeStateMachine will try it
-          // again after the hb frequency
-          return this.context.getState();
-        }
-      }
-      for (InetSocketAddress addr : addresses) {
-        connectionManager.addSCMServer(addr);
-      }
-    }
-
-    // If datanode ID is set, persist it to the ID file.
-    persistContainerDatanodeDetails();
-
-    return this.context.getState().getNextState();
-  }
-
-  /**
-   * Persist DatanodeDetails to datanode.id file.
-   */
-  private void persistContainerDatanodeDetails() {
-    String dataNodeIDPath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (Strings.isNullOrEmpty(dataNodeIDPath)) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
-      return;
-    }
-    File idPath = new File(dataNodeIDPath);
-    DatanodeDetails datanodeDetails = this.context.getParent()
-        .getDatanodeDetails();
-    if (datanodeDetails != null && !idPath.exists()) {
-      try {
-        ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
-      } catch (IOException ex) {
-        // As writing DatanodeDetails in to datanodeid file failed, which is
-        // a critical thing, so shutting down the state machine.
-        LOG.error("Writing to {} failed {}", dataNodeIDPath, ex.getMessage());
-        this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
-        return;
-      }
-      LOG.info("DatanodeDetails is persisted to {}", dataNodeIDPath);
-    }
-  }
-
-  /**
-   * Called before entering this state.
-   */
-  @Override
-  public void onEnter() {
-    LOG.trace("Entering init container state");
-  }
-
-  /**
-   * Called After exiting this state.
-   */
-  @Override
-  public void onExit() {
-    LOG.trace("Exiting init container state");
-  }
-
-  /**
-   * Executes one or more tasks that is needed by this state.
-   *
-   * @param executor -  ExecutorService
-   */
-  @Override
-  public void execute(ExecutorService executor) {
-    result = executor.submit(this);
-  }
-
-  /**
-   * Wait for execute to finish.
-   *
-   * @param time     - Time
-   * @param timeUnit - Unit of time.
-   */
-  @Override
-  public DatanodeStateMachine.DatanodeStates await(long time,
-      TimeUnit timeUnit) throws InterruptedException,
-      ExecutionException, TimeoutException {
-    return result.get(time, timeUnit);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
deleted file mode 100644
index 6b596fe..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.datanode;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint.RegisterEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Class that implements handshake with SCM.
- */
-public class RunningDatanodeState implements DatanodeState {
-  static final Logger
-      LOG = LoggerFactory.getLogger(RunningDatanodeState.class);
-  private final SCMConnectionManager connectionManager;
-  private final Configuration conf;
-  private final StateContext context;
-  private CompletionService<EndpointStateMachine.EndPointStates> ecs;
-
-  public RunningDatanodeState(Configuration conf,
-      SCMConnectionManager connectionManager,
-      StateContext context) {
-    this.connectionManager = connectionManager;
-    this.conf = conf;
-    this.context = context;
-  }
-
-  /**
-   * Called before entering this state.
-   */
-  @Override
-  public void onEnter() {
-    LOG.trace("Entering handshake task.");
-  }
-
-  /**
-   * Called After exiting this state.
-   */
-  @Override
-  public void onExit() {
-    LOG.trace("Exiting handshake task.");
-  }
-
-  /**
-   * Executes one or more tasks that is needed by this state.
-   *
-   * @param executor -  ExecutorService
-   */
-  @Override
-  public void execute(ExecutorService executor) {
-    ecs = new ExecutorCompletionService<>(executor);
-    for (EndpointStateMachine endpoint : connectionManager.getValues()) {
-      Callable<EndpointStateMachine.EndPointStates> endpointTask
-          = getEndPointTask(endpoint);
-      if (endpointTask != null) {
-        ecs.submit(endpointTask);
-      } else {
-        // This can happen if a task is taking more time than the timeOut
-        // specified for the task in await, and when it is completed the task
-        // has set the state to Shutdown, we may see the state as shutdown
-        // here. So, we need to Shutdown DatanodeStateMachine.
-        LOG.error("State is Shutdown in RunningDatanodeState");
-        context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
-      }
-    }
-  }
-  //TODO : Cache some of these tasks instead of creating them
-  //all the time.
-  private Callable<EndpointStateMachine.EndPointStates>
-      getEndPointTask(EndpointStateMachine endpoint) {
-    switch (endpoint.getState()) {
-    case GETVERSION:
-      return new VersionEndpointTask(endpoint, conf, context.getParent()
-          .getContainer());
-    case REGISTER:
-      return  RegisterEndpointTask.newBuilder()
-          .setConfig(conf)
-          .setEndpointStateMachine(endpoint)
-          .setContext(context)
-          .setDatanodeDetails(context.getParent().getDatanodeDetails())
-          .setOzoneContainer(context.getParent().getContainer())
-          .build();
-    case HEARTBEAT:
-      return HeartbeatEndpointTask.newBuilder()
-          .setConfig(conf)
-          .setEndpointStateMachine(endpoint)
-          .setDatanodeDetails(context.getParent().getDatanodeDetails())
-          .setContext(context)
-          .build();
-    case SHUTDOWN:
-      break;
-    default:
-      throw new IllegalArgumentException("Illegal Argument.");
-    }
-    return null;
-  }
-
-  /**
-   * Computes the next state the container state machine must move to by looking
-   * at all the state of endpoints.
-   * <p>
-   * if any endpoint state has moved to Shutdown, either we have an
-   * unrecoverable error or we have been told to shutdown. Either case the
-   * datanode state machine should move to Shutdown state, otherwise we
-   * remain in the Running state.
-   *
-   * @return next container state.
-   */
-  private DatanodeStateMachine.DatanodeStates
-      computeNextContainerState(
-      List<Future<EndpointStateMachine.EndPointStates>> results) {
-    for (Future<EndpointStateMachine.EndPointStates> state : results) {
-      try {
-        if (state.get() == EndpointStateMachine.EndPointStates.SHUTDOWN) {
-          // if any endpoint tells us to shutdown we move to shutdown state.
-          return DatanodeStateMachine.DatanodeStates.SHUTDOWN;
-        }
-      } catch (InterruptedException | ExecutionException e) {
-        LOG.error("Error in executing end point task.", e);
-      }
-    }
-    return DatanodeStateMachine.DatanodeStates.RUNNING;
-  }
-
-  /**
-   * Wait for execute to finish.
-   *
-   * @param duration - Time
-   * @param timeUnit - Unit of duration.
-   */
-  @Override
-  public DatanodeStateMachine.DatanodeStates
-      await(long duration, TimeUnit timeUnit)
-      throws InterruptedException, ExecutionException, TimeoutException {
-    int count = connectionManager.getValues().size();
-    int returned = 0;
-    long timeLeft = timeUnit.toMillis(duration);
-    long startTime = Time.monotonicNow();
-    List<Future<EndpointStateMachine.EndPointStates>> results = new
-        LinkedList<>();
-
-    while (returned < count && timeLeft > 0) {
-      Future<EndpointStateMachine.EndPointStates> result =
-          ecs.poll(timeLeft, TimeUnit.MILLISECONDS);
-      if (result != null) {
-        results.add(result);
-        returned++;
-      }
-      timeLeft = timeLeft - (Time.monotonicNow() - startTime);
-    }
-    return computeNextContainerState(results);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java
deleted file mode 100644
index 6b8d16c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.datanode;
-/**
- This package contians files that guide the state transitions from
- Init->Running->Shutdown for the datanode.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
deleted file mode 100644
index c50f457..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.ozone.container.common.helpers
-    .DeletedContainerBlocksSummary;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine.EndPointStates;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.time.ZonedDateTime;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_CONTAINER_ACTION_MAX_LIMIT;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_PIPELINE_ACTION_MAX_LIMIT;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT;
-
-/**
- * Heartbeat class for SCMs.
- */
-public class HeartbeatEndpointTask
-    implements Callable<EndpointStateMachine.EndPointStates> {
-  static final Logger LOG =
-      LoggerFactory.getLogger(HeartbeatEndpointTask.class);
-  private final EndpointStateMachine rpcEndpoint;
-  private final Configuration conf;
-  private DatanodeDetailsProto datanodeDetailsProto;
-  private StateContext context;
-  private int maxContainerActionsPerHB;
-  private int maxPipelineActionsPerHB;
-
-  /**
-   * Constructs a SCM heart beat.
-   *
-   * @param conf Config.
-   */
-  public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
-      Configuration conf, StateContext context) {
-    this.rpcEndpoint = rpcEndpoint;
-    this.conf = conf;
-    this.context = context;
-    this.maxContainerActionsPerHB = conf.getInt(HDDS_CONTAINER_ACTION_MAX_LIMIT,
-        HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT);
-    this.maxPipelineActionsPerHB = conf.getInt(HDDS_PIPELINE_ACTION_MAX_LIMIT,
-        HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT);
-  }
-
-  /**
-   * Get the container Node ID proto.
-   *
-   * @return ContainerNodeIDProto
-   */
-  public DatanodeDetailsProto getDatanodeDetailsProto() {
-    return datanodeDetailsProto;
-  }
-
-  /**
-   * Set container node ID proto.
-   *
-   * @param datanodeDetailsProto - the node id.
-   */
-  public void setDatanodeDetailsProto(DatanodeDetailsProto
-      datanodeDetailsProto) {
-    this.datanodeDetailsProto = datanodeDetailsProto;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public EndpointStateMachine.EndPointStates call() throws Exception {
-    rpcEndpoint.lock();
-    SCMHeartbeatRequestProto.Builder requestBuilder = null;
-    try {
-      Preconditions.checkState(this.datanodeDetailsProto != null);
-
-      requestBuilder = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(datanodeDetailsProto);
-      addReports(requestBuilder);
-      addContainerActions(requestBuilder);
-      addPipelineActions(requestBuilder);
-      SCMHeartbeatRequestProto request = requestBuilder.build();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Sending heartbeat message :: {}", request.toString());
-      }
-      SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
-          .sendHeartbeat(request);
-      processResponse(reponse, datanodeDetailsProto);
-      rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
-      rpcEndpoint.zeroMissedCount();
-    } catch (IOException ex) {
-      // put back the reports which failed to be sent
-      if (requestBuilder != null) {
-        putBackReports(requestBuilder);
-      }
-      rpcEndpoint.logIfNeeded(ex);
-    } finally {
-      rpcEndpoint.unlock();
-    }
-    return rpcEndpoint.getState();
-  }
-
-  // TODO: Make it generic.
-  private void putBackReports(SCMHeartbeatRequestProto.Builder requestBuilder) {
-    List<GeneratedMessage> reports = new LinkedList<>();
-    if (requestBuilder.hasContainerReport()) {
-      reports.add(requestBuilder.getContainerReport());
-    }
-    if (requestBuilder.hasNodeReport()) {
-      reports.add(requestBuilder.getNodeReport());
-    }
-    if (requestBuilder.getCommandStatusReportsCount() != 0) {
-      reports.addAll(requestBuilder.getCommandStatusReportsList());
-    }
-    if (requestBuilder.getIncrementalContainerReportCount() != 0) {
-      reports.addAll(requestBuilder.getIncrementalContainerReportList());
-    }
-    context.putBackReports(reports);
-  }
-
-  /**
-   * Adds all the available reports to heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   */
-  private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) {
-    for (GeneratedMessage report : context.getAllAvailableReports()) {
-      String reportName = report.getDescriptorForType().getFullName();
-      for (Descriptors.FieldDescriptor descriptor :
-          SCMHeartbeatRequestProto.getDescriptor().getFields()) {
-        String heartbeatFieldName = descriptor.getMessageType().getFullName();
-        if (heartbeatFieldName.equals(reportName)) {
-          if (descriptor.isRepeated()) {
-            requestBuilder.addRepeatedField(descriptor, report);
-          } else {
-            requestBuilder.setField(descriptor, report);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Adds all the pending ContainerActions to the heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   */
-  private void addContainerActions(
-      SCMHeartbeatRequestProto.Builder requestBuilder) {
-    List<ContainerAction> actions = context.getPendingContainerAction(
-        maxContainerActionsPerHB);
-    if (!actions.isEmpty()) {
-      ContainerActionsProto cap = ContainerActionsProto.newBuilder()
-          .addAllContainerActions(actions)
-          .build();
-      requestBuilder.setContainerActions(cap);
-    }
-  }
-
-  /**
-   * Adds all the pending PipelineActions to the heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   */
-  private void addPipelineActions(
-      SCMHeartbeatRequestProto.Builder requestBuilder) {
-    List<PipelineAction> actions = context.getPendingPipelineAction(
-        maxPipelineActionsPerHB);
-    if (!actions.isEmpty()) {
-      PipelineActionsProto pap = PipelineActionsProto.newBuilder()
-          .addAllPipelineActions(actions)
-          .build();
-      requestBuilder.setPipelineActions(pap);
-    }
-  }
-
-  /**
-   * Returns a builder class for HeartbeatEndpointTask task.
-   * @return   Builder.
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Add this command to command processing Queue.
-   *
-   * @param response - SCMHeartbeat response.
-   */
-  private void processResponse(SCMHeartbeatResponseProto response,
-      final DatanodeDetailsProto datanodeDetails) {
-    Preconditions.checkState(response.getDatanodeUUID()
-            .equalsIgnoreCase(datanodeDetails.getUuid()),
-        "Unexpected datanode ID in the response.");
-    // Verify the response is indeed for this datanode.
-    for (SCMCommandProto commandResponseProto : response
-        .getCommandsList()) {
-      switch (commandResponseProto.getCommandType()) {
-      case reregisterCommand:
-        if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Received SCM notification to register."
-                + " Interrupt HEARTBEAT and transit to REGISTER state.");
-          }
-          rpcEndpoint.setState(EndPointStates.REGISTER);
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Illegal state {} found, expecting {}.",
-                rpcEndpoint.getState().name(), EndPointStates.HEARTBEAT);
-          }
-        }
-        break;
-      case deleteBlocksCommand:
-        DeleteBlocksCommand db = DeleteBlocksCommand
-            .getFromProtobuf(
-                commandResponseProto.getDeleteBlocksCommandProto());
-        if (!db.blocksTobeDeleted().isEmpty()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(DeletedContainerBlocksSummary
-                .getFrom(db.blocksTobeDeleted())
-                .toString());
-          }
-          this.context.addCommand(db);
-        }
-        break;
-      case closeContainerCommand:
-        CloseContainerCommand closeContainer =
-            CloseContainerCommand.getFromProtobuf(
-                commandResponseProto.getCloseContainerCommandProto());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Received SCM container close request for container {}",
-              closeContainer.getContainerID());
-        }
-        this.context.addCommand(closeContainer);
-        break;
-      case replicateContainerCommand:
-        ReplicateContainerCommand replicateContainerCommand =
-            ReplicateContainerCommand.getFromProtobuf(
-                commandResponseProto.getReplicateContainerCommandProto());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Received SCM container replicate request for container {}",
-              replicateContainerCommand.getContainerID());
-        }
-        this.context.addCommand(replicateContainerCommand);
-        break;
-      case deleteContainerCommand:
-        DeleteContainerCommand deleteContainerCommand =
-            DeleteContainerCommand.getFromProtobuf(
-                commandResponseProto.getDeleteContainerCommandProto());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Received SCM delete container request for container {}",
-              deleteContainerCommand.getContainerID());
-        }
-        this.context.addCommand(deleteContainerCommand);
-        break;
-      default:
-        throw new IllegalArgumentException("Unknown response : "
-            + commandResponseProto.getCommandType().name());
-      }
-    }
-  }
-
-  /**
-   * Builder class for HeartbeatEndpointTask.
-   */
-  public static class Builder {
-    private EndpointStateMachine endPointStateMachine;
-    private Configuration conf;
-    private DatanodeDetails datanodeDetails;
-    private StateContext context;
-
-    /**
-     * Constructs the builder class.
-     */
-    public Builder() {
-    }
-
-    /**
-     * Sets the endpoint state machine.
-     *
-     * @param rpcEndPoint - Endpoint state machine.
-     * @return Builder
-     */
-    public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) {
-      this.endPointStateMachine = rpcEndPoint;
-      return this;
-    }
-
-    /**
-     * Sets the Config.
-     *
-     * @param config - config
-     * @return Builder
-     */
-    public Builder setConfig(Configuration config) {
-      this.conf = config;
-      return this;
-    }
-
-    /**
-     * Sets the NodeID.
-     *
-     * @param dnDetails - NodeID proto
-     * @return Builder
-     */
-    public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
-      this.datanodeDetails = dnDetails;
-      return this;
-    }
-
-    /**
-     * Sets the context.
-     * @param stateContext - State context.
-     * @return this.
-     */
-    public Builder setContext(StateContext stateContext) {
-      this.context = stateContext;
-      return this;
-    }
-
-    public HeartbeatEndpointTask build() {
-      if (endPointStateMachine == null) {
-        LOG.error("No endpoint specified.");
-        throw new IllegalArgumentException("A valid endpoint state machine is" +
-            " needed to construct HeartbeatEndpointTask task");
-      }
-
-      if (conf == null) {
-        LOG.error("No config specified.");
-        throw new IllegalArgumentException("A valid configration is needed to" +
-            " construct HeartbeatEndpointTask task");
-      }
-
-      if (datanodeDetails == null) {
-        LOG.error("No datanode specified.");
-        throw new IllegalArgumentException("A vaild Node ID is needed to " +
-            "construct HeartbeatEndpointTask task");
-      }
-
-      HeartbeatEndpointTask task = new HeartbeatEndpointTask(this
-          .endPointStateMachine, this.conf, this.context);
-      task.setDatanodeDetailsProto(datanodeDetails.getProtoBufMessage());
-      return task;
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
deleted file mode 100644
index b94b1cf..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-
-/**
- * Register a datanode with SCM.
- */
-public final class RegisterEndpointTask implements
-    Callable<EndpointStateMachine.EndPointStates> {
-  static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class);
-
-  private final EndpointStateMachine rpcEndPoint;
-  private final Configuration conf;
-  private Future<EndpointStateMachine.EndPointStates> result;
-  private DatanodeDetails datanodeDetails;
-  private final OzoneContainer datanodeContainerManager;
-  private StateContext stateContext;
-
-  /**
-   * Creates a register endpoint task.
-   *
-   * @param rpcEndPoint - endpoint
-   * @param conf - conf
-   * @param ozoneContainer - container
-   */
-  @VisibleForTesting
-  public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
-      Configuration conf, OzoneContainer ozoneContainer,
-      StateContext context) {
-    this.rpcEndPoint = rpcEndPoint;
-    this.conf = conf;
-    this.datanodeContainerManager = ozoneContainer;
-    this.stateContext = context;
-
-  }
-
-  /**
-   * Get the DatanodeDetails.
-   *
-   * @return DatanodeDetailsProto
-   */
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  /**
-   * Set the contiainerNodeID Proto.
-   *
-   * @param datanodeDetails - Container Node ID.
-   */
-  public void setDatanodeDetails(
-      DatanodeDetails datanodeDetails) {
-    this.datanodeDetails = datanodeDetails;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public EndpointStateMachine.EndPointStates call() throws Exception {
-
-    if (getDatanodeDetails() == null) {
-      LOG.error("DatanodeDetails cannot be null in RegisterEndpoint task, " +
-          "shutting down the endpoint.");
-      return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
-    }
-
-    rpcEndPoint.lock();
-    try {
-
-      ContainerReportsProto containerReport = datanodeContainerManager
-          .getController().getContainerReport();
-      NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
-      PipelineReportsProto pipelineReportsProto =
-              datanodeContainerManager.getPipelineReport();
-      // TODO : Add responses to the command Queue.
-      SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint()
-          .register(datanodeDetails.getProtoBufMessage(), nodeReport,
-                  containerReport, pipelineReportsProto);
-      Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())
-              .equals(datanodeDetails.getUuid()),
-          "Unexpected datanode ID in the response.");
-      Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()),
-          "Invalid cluster ID in the response.");
-      if (response.hasHostname() && response.hasIpAddress()) {
-        datanodeDetails.setHostName(response.getHostname());
-        datanodeDetails.setIpAddress(response.getIpAddress());
-      }
-      if (response.hasNetworkName() && response.hasNetworkLocation()) {
-        datanodeDetails.setNetworkName(response.getNetworkName());
-        datanodeDetails.setNetworkLocation(response.getNetworkLocation());
-      }
-      EndpointStateMachine.EndPointStates nextState =
-          rpcEndPoint.getState().getNextState();
-      rpcEndPoint.setState(nextState);
-      rpcEndPoint.zeroMissedCount();
-      this.stateContext.configureHeartbeatFrequency();
-    } catch (IOException ex) {
-      rpcEndPoint.logIfNeeded(ex);
-    } finally {
-      rpcEndPoint.unlock();
-    }
-
-    return rpcEndPoint.getState();
-  }
-
-  /**
-   * Returns a builder class for RegisterEndPoint task.
-   *
-   * @return Builder.
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for RegisterEndPoint task.
-   */
-  public static class Builder {
-    private EndpointStateMachine endPointStateMachine;
-    private Configuration conf;
-    private DatanodeDetails datanodeDetails;
-    private OzoneContainer container;
-    private StateContext context;
-
-    /**
-     * Constructs the builder class.
-     */
-    public Builder() {
-    }
-
-    /**
-     * Sets the endpoint state machine.
-     *
-     * @param rpcEndPoint - Endpoint state machine.
-     * @return Builder
-     */
-    public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) {
-      this.endPointStateMachine = rpcEndPoint;
-      return this;
-    }
-
-    /**
-     * Sets the Config.
-     *
-     * @param config - config
-     * @return Builder.
-     */
-    public Builder setConfig(Configuration config) {
-      this.conf = config;
-      return this;
-    }
-
-    /**
-     * Sets the NodeID.
-     *
-     * @param dnDetails - NodeID proto
-     * @return Builder
-     */
-    public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
-      this.datanodeDetails = dnDetails;
-      return this;
-    }
-
-    /**
-     * Sets the ozonecontainer.
-     * @param ozoneContainer
-     * @return Builder
-     */
-    public Builder setOzoneContainer(OzoneContainer ozoneContainer) {
-      this.container = ozoneContainer;
-      return this;
-    }
-
-    public Builder setContext(StateContext stateContext) {
-      this.context = stateContext;
-      return this;
-    }
-
-    public RegisterEndpointTask build() {
-      if (endPointStateMachine == null) {
-        LOG.error("No endpoint specified.");
-        throw new IllegalArgumentException("A valid endpoint state machine is" +
-            " needed to construct RegisterEndPoint task");
-      }
-
-      if (conf == null) {
-        LOG.error("No config specified.");
-        throw new IllegalArgumentException(
-            "A valid configuration is needed to construct RegisterEndpoint "
-                + "task");
-      }
-
-      if (datanodeDetails == null) {
-        LOG.error("No datanode specified.");
-        throw new IllegalArgumentException("A vaild Node ID is needed to " +
-            "construct RegisterEndpoint task");
-      }
-
-      if (container == null) {
-        LOG.error("Container is not specified");
-        throw new IllegalArgumentException("Container is not specified to " +
-            "construct RegisterEndpoint task");
-      }
-
-      if (context == null) {
-        LOG.error("StateContext is not specified");
-        throw new IllegalArgumentException("Container is not specified to " +
-            "construct RegisterEndpoint task");
-      }
-
-      RegisterEndpointTask task = new RegisterEndpointTask(this
-          .endPointStateMachine, this.conf, this.container, this.context);
-      task.setDatanodeDetails(datanodeDetails);
-      return task;
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
deleted file mode 100644
index 04eaa05..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.Callable;
-
-/**
- * Task that returns version.
- */
-public class VersionEndpointTask implements
-    Callable<EndpointStateMachine.EndPointStates> {
-  public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask
-      .class);
-  private final EndpointStateMachine rpcEndPoint;
-  private final Configuration configuration;
-  private final OzoneContainer ozoneContainer;
-
-  public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
-                             Configuration conf, OzoneContainer container) {
-    this.rpcEndPoint = rpcEndPoint;
-    this.configuration = conf;
-    this.ozoneContainer = container;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public EndpointStateMachine.EndPointStates call() throws Exception {
-    rpcEndPoint.lock();
-    try{
-      if (rpcEndPoint.getState().equals(
-          EndpointStateMachine.EndPointStates.GETVERSION)) {
-        SCMVersionResponseProto versionResponse =
-            rpcEndPoint.getEndPoint().getVersion(null);
-        VersionResponse response = VersionResponse.getFromProtobuf(
-            versionResponse);
-        rpcEndPoint.setVersion(response);
-
-        String scmId = response.getValue(OzoneConsts.SCM_ID);
-        String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
-
-        // Check volumes
-        VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-        volumeSet.writeLock();
-        try {
-          Map<String, HddsVolume> volumeMap = volumeSet.getVolumeMap();
-
-          Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
-              "null");
-          Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
-              "cannot be null");
-
-          // If version file does not exist
-          // create version file and also set scmId
-
-          for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-            HddsVolume hddsVolume = entry.getValue();
-            boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
-                clusterId, LOG);
-            if (!result) {
-              volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
-            }
-          }
-          if (volumeSet.getVolumesList().size() == 0) {
-            // All volumes are in inconsistent state
-            throw new DiskOutOfSpaceException("All configured Volumes are in " +
-                "Inconsistent State");
-          }
-        } finally {
-          volumeSet.writeUnlock();
-        }
-
-        // Start the container services after getting the version information
-        ozoneContainer.start(scmId);
-
-        EndpointStateMachine.EndPointStates nextState =
-            rpcEndPoint.getState().getNextState();
-        rpcEndPoint.setState(nextState);
-        rpcEndPoint.zeroMissedCount();
-      } else {
-        LOG.debug("Cannot execute GetVersion task as endpoint state machine " +
-            "is in {} state", rpcEndPoint.getState());
-      }
-    } catch (DiskOutOfSpaceException ex) {
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
-    } catch(IOException ex) {
-      rpcEndPoint.logIfNeeded(ex);
-    } finally {
-      rpcEndPoint.unlock();
-    }
-    return rpcEndPoint.getState();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
deleted file mode 100644
index 1122598..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-/**
- This package contains code for RPC endpoints transitions.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java
deleted file mode 100644
index 92c953f..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
deleted file mode 100644
index dc5f5bc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .XceiverClientProtocolServiceGrpc;
-import org.apache.hadoop.hdds.security.token.TokenVerifier;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Grpc Service for handling Container Commands on datanode.
- */
-public class GrpcXceiverService extends
-    XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceImplBase {
-  public static final Logger
-      LOG = LoggerFactory.getLogger(GrpcXceiverService.class);
-
-  private final ContainerDispatcher dispatcher;
-  private final boolean isGrpcTokenEnabled;
-  private final TokenVerifier tokenVerifier;
-
-  public GrpcXceiverService(ContainerDispatcher dispatcher) {
-    this(dispatcher, false, null);
-  }
-
-  public GrpcXceiverService(ContainerDispatcher dispatcher,
-      boolean grpcTokenEnabled, TokenVerifier tokenVerifier) {
-    this.dispatcher = dispatcher;
-    this.isGrpcTokenEnabled = grpcTokenEnabled;
-    this.tokenVerifier = tokenVerifier;
-  }
-
-  @Override
-  public StreamObserver<ContainerCommandRequestProto> send(
-      StreamObserver<ContainerCommandResponseProto> responseObserver) {
-    return new StreamObserver<ContainerCommandRequestProto>() {
-      private final AtomicBoolean isClosed = new AtomicBoolean(false);
-
-      @Override
-      public void onNext(ContainerCommandRequestProto request) {
-        try {
-          if(isGrpcTokenEnabled) {
-            // ServerInterceptors intercepts incoming request and creates ugi.
-            tokenVerifier.verify(UserGroupInformation.getCurrentUser()
-                .getShortUserName(), request.getEncodedToken());
-          }
-          ContainerCommandResponseProto resp =
-              dispatcher.dispatch(request, null);
-          responseObserver.onNext(resp);
-        } catch (Throwable e) {
-          LOG.error("{} got exception when processing"
-                    + " ContainerCommandRequestProto {}: {}", request, e);
-          responseObserver.onError(e);
-        }
-      }
-
-      @Override
-      public void onError(Throwable t) {
-        // for now we just log a msg
-        LOG.error("{}: ContainerCommand send on error. Exception: {}", t);
-      }
-
-      @Override
-      public void onCompleted() {
-        if (isClosed.compareAndSet(false, true)) {
-          LOG.debug("{}: ContainerCommand send completed");
-          responseObserver.onCompleted();
-        }
-      }
-    };
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java
deleted file mode 100644
index 968f0c8..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.token.TokenVerifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.io.grpc.Context;
-import org.apache.ratis.thirdparty.io.grpc.Contexts;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-import org.apache.ratis.thirdparty.io.grpc.ServerCall;
-import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler;
-import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor;
-import org.apache.ratis.thirdparty.io.grpc.Status;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.UGI_CTX_KEY;
-/**
- * Grpc Server Interceptor for Ozone Block token.
- */
-public class ServerCredentialInterceptor implements ServerInterceptor {
-
-
-  private static final ServerCall.Listener NOOP_LISTENER =
-      new ServerCall.Listener() {
-  };
-
-  private final TokenVerifier verifier;
-
-  ServerCredentialInterceptor(TokenVerifier verifier) {
-    this.verifier = verifier;
-  }
-
-  @Override
-  public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
-      ServerCall<ReqT, RespT> call, Metadata headers,
-      ServerCallHandler<ReqT, RespT> next) {
-    String token = headers.get(OBT_METADATA_KEY);
-    String user = headers.get(USER_METADATA_KEY);
-    Context ctx = Context.current();
-    try {
-      UserGroupInformation ugi = verifier.verify(user, token);
-      if (ugi == null) {
-        call.close(Status.UNAUTHENTICATED.withDescription("Missing Block " +
-            "Token from headers when block token is required."), headers);
-        return NOOP_LISTENER;
-      } else {
-        ctx = ctx.withValue(UGI_CTX_KEY, ugi);
-      }
-    } catch (SCMSecurityException e) {
-      call.close(Status.UNAUTHENTICATED.withDescription(e.getMessage())
-          .withCause(e), headers);
-      return NOOP_LISTENER;
-    }
-    return Contexts.interceptCall(ctx, call, headers, next);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
deleted file mode 100644
index c6b0d92..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.token.BlockTokenVerifier;
-import org.apache.hadoop.hdds.security.token.TokenVerifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.MISSING_BLOCK_TOKEN;
-
-/**
- * A server endpoint that acts as the communication layer for Ozone containers.
- */
-public abstract class XceiverServer implements XceiverServerSpi {
-
-  private final SecurityConfig secConfig;
-  private final TokenVerifier tokenVerifier;
-  private final CertificateClient caClient;
-
-  public XceiverServer(Configuration conf, CertificateClient client) {
-    Preconditions.checkNotNull(conf);
-    this.secConfig = new SecurityConfig(conf);
-    this.caClient = client;
-    tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient());
-  }
-
-  /**
-   * Default implementation which just validates security token if security is
-   * enabled.
-   *
-   * @param request ContainerCommandRequest
-   */
-  @Override
-  public void submitRequest(ContainerCommandRequestProto request,
-      HddsProtos.PipelineID pipelineID) throws IOException {
-    if (secConfig.isSecurityEnabled()) {
-      String encodedToken = request.getEncodedToken();
-      if (encodedToken == null) {
-        throw new SCMSecurityException("Security is enabled but client " +
-            "request is missing block token.", MISSING_BLOCK_TOKEN);
-      }
-      tokenVerifier.verify(encodedToken, encodedToken);
-    }
-  }
-
-  @VisibleForTesting
-  protected CertificateClient getCaClient() {
-    return caClient;
-  }
-
-  protected SecurityConfig getSecurityConfig() {
-    return secConfig;
-  }
-
-  protected TokenVerifier getBlockTokenVerifier() {
-    return tokenVerifier;
-  }
-
-  public SecurityConfig getSecConfig() {
-    return secConfig;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
deleted file mode 100644
index bb352ea..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    StorageContainerException;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-
-import io.opentracing.Scope;
-import org.apache.ratis.thirdparty.io.grpc.BindableService;
-import org.apache.ratis.thirdparty.io.grpc.Server;
-import org.apache.ratis.thirdparty.io.grpc.ServerBuilder;
-import org.apache.ratis.thirdparty.io.grpc.ServerInterceptors;
-import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
-import org.apache.ratis.thirdparty.io.grpc.netty.NettyServerBuilder;
-import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Creates a Grpc server endpoint that acts as the communication layer for
- * Ozone containers.
- */
-public final class XceiverServerGrpc extends XceiverServer {
-  private static final Logger
-      LOG = LoggerFactory.getLogger(XceiverServerGrpc.class);
-  private static final String COMPONENT = "dn";
-  private int port;
-  private UUID id;
-  private Server server;
-  private final ContainerDispatcher storageContainer;
-  private boolean isStarted;
-  private DatanodeDetails datanodeDetails;
-
-
-  /**
-   * Constructs a Grpc server class.
-   *
-   * @param conf - Configuration
-   */
-  public XceiverServerGrpc(DatanodeDetails datanodeDetails, Configuration conf,
-      ContainerDispatcher dispatcher, CertificateClient caClient,
-      BindableService... additionalServices) {
-    super(conf, caClient);
-    Preconditions.checkNotNull(conf);
-
-    this.id = datanodeDetails.getUuid();
-    this.datanodeDetails = datanodeDetails;
-    this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-
-    if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
-      this.port = 0;
-    }
-
-    NettyServerBuilder nettyServerBuilder =
-        ((NettyServerBuilder) ServerBuilder.forPort(port))
-            .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
-
-    ServerCredentialInterceptor credInterceptor =
-        new ServerCredentialInterceptor(getBlockTokenVerifier());
-    GrpcServerInterceptor tracingInterceptor = new GrpcServerInterceptor();
-    nettyServerBuilder.addService(ServerInterceptors.intercept(
-        new GrpcXceiverService(dispatcher,
-            getSecurityConfig().isBlockTokenEnabled(),
-            getBlockTokenVerifier()), credInterceptor,
-        tracingInterceptor));
-
-    for (BindableService service : additionalServices) {
-      nettyServerBuilder.addService(service);
-    }
-
-    if (getSecConfig().isGrpcTlsEnabled()) {
-      try {
-        SslContextBuilder sslClientContextBuilder = SslContextBuilder.forServer(
-            caClient.getPrivateKey(), caClient.getCertificate());
-        SslContextBuilder sslContextBuilder = GrpcSslContexts.configure(
-            sslClientContextBuilder, getSecurityConfig().getGrpcSslProvider());
-        nettyServerBuilder.sslContext(sslContextBuilder.build());
-      } catch (Exception ex) {
-        LOG.error("Unable to setup TLS for secure datanode GRPC endpoint.", ex);
-      }
-    }
-    server = nettyServerBuilder.build();
-    storageContainer = dispatcher;
-  }
-
-  @Override
-  public int getIPCPort() {
-    return this.port;
-  }
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   *
-   * @return enum -- {Stand_Alone, Ratis, Grpc, Chained}
-   */
-  @Override
-  public HddsProtos.ReplicationType getServerType() {
-    return HddsProtos.ReplicationType.STAND_ALONE;
-  }
-
-  @Override
-  public void start() throws IOException {
-    if (!isStarted) {
-      server.start();
-      int realPort = server.getPort();
-
-      if (port == 0) {
-        LOG.info("{} {} is started using port {}", getClass().getSimpleName(),
-            this.id, realPort);
-        port = realPort;
-      }
-
-      //register the real port to the datanode details.
-      datanodeDetails.setPort(DatanodeDetails
-          .newPort(Name.STANDALONE,
-              realPort));
-
-      isStarted = true;
-    }
-  }
-
-  @Override
-  public void stop() {
-    if (isStarted) {
-      server.shutdown();
-      try {
-        server.awaitTermination(5, TimeUnit.SECONDS);
-      } catch (Exception e) {
-        LOG.error("failed to shutdown XceiverServerGrpc", e);
-      }
-      isStarted = false;
-    }
-  }
-
-  @Override
-  public void submitRequest(ContainerCommandRequestProto request,
-      HddsProtos.PipelineID pipelineID) throws IOException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope(
-            "XceiverServerGrpc." + request.getCmdType().name(),
-            request.getTraceID())) {
-
-      super.submitRequest(request, pipelineID);
-      ContainerProtos.ContainerCommandResponseProto response =
-          storageContainer.dispatch(request, null);
-      if (response.getResult() != ContainerProtos.Result.SUCCESS) {
-        throw new StorageContainerException(response.getMessage(),
-            response.getResult());
-      }
-    }
-  }
-
-  @Override
-  public boolean isExist(HddsProtos.PipelineID pipelineId) {
-    return PipelineID.valueOf(id).getProtobuf().equals(pipelineId);
-  }
-
-  @Override
-  public List<PipelineReport> getPipelineReport() {
-    return Collections.singletonList(
-            PipelineReport.newBuilder()
-                    .setPipelineID(PipelineID.valueOf(id).getProtobuf())
-                    .build());
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
deleted file mode 100644
index 4e0d343..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReport;
-
-import java.io.IOException;
-import java.util.List;
-
-/** A server endpoint that acts as the communication layer for Ozone
- * containers. */
-public interface XceiverServerSpi {
-  /** Starts the server. */
-  void start() throws IOException;
-
-  /** Stops a running server. */
-  void stop();
-
-  /** Get server IPC port. */
-  int getIPCPort();
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   * @return enum -- {Stand_Alone, Ratis, Chained}
-   */
-  HddsProtos.ReplicationType getServerType();
-
-  /**
-   * submits a containerRequest to be performed by the replication pipeline.
-   * @param request ContainerCommandRequest
-   */
-  void submitRequest(ContainerCommandRequestProto request,
-      HddsProtos.PipelineID pipelineID)
-      throws IOException;
-
-  /**
-   * Returns true if the given pipeline exist.
-   *
-   * @return true if pipeline present, else false
-   */
-  boolean isExist(HddsProtos.PipelineID pipelineId);
-
-  /**
-   * Get pipeline report for the XceiverServer instance.
-   * @return list of report for each pipeline.
-   */
-  List<PipelineReport> getPipelineReport();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java
deleted file mode 100644
index 59c96f1..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-/**
- * This package contains classes for the server of the storage container
- * protocol.
- */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
deleted file mode 100644
index 9893ae4..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.ratis.protocol.RaftGroupId;
-
-/**
- * This class is for maintaining Container State Machine statistics.
- */
-@InterfaceAudience.Private
-@Metrics(about="Container State Machine Metrics", context="dfs")
-public class CSMMetrics {
-  public static final String SOURCE_NAME =
-      CSMMetrics.class.getSimpleName();
-
-  // ratis op metrics metrics
-  private @Metric MutableCounterLong numWriteStateMachineOps;
-  private @Metric MutableCounterLong numQueryStateMachineOps;
-  private @Metric MutableCounterLong numApplyTransactionOps;
-  private @Metric MutableCounterLong numReadStateMachineOps;
-  private @Metric MutableCounterLong numBytesWrittenCount;
-  private @Metric MutableCounterLong numBytesCommittedCount;
-
-  private @Metric MutableRate transactionLatency;
-  private MutableRate[] opsLatency;
-  private MetricsRegistry registry = null;
-
-  // Failure Metrics
-  private @Metric MutableCounterLong numWriteStateMachineFails;
-  private @Metric MutableCounterLong numWriteDataFails;
-  private @Metric MutableCounterLong numQueryStateMachineFails;
-  private @Metric MutableCounterLong numApplyTransactionFails;
-  private @Metric MutableCounterLong numReadStateMachineFails;
-  private @Metric MutableCounterLong numReadStateMachineMissCount;
-  private @Metric MutableCounterLong numStartTransactionVerifyFailures;
-  private @Metric MutableCounterLong numContainerNotOpenVerifyFailures;
-
-  private @Metric MutableRate applyTransaction;
-  private @Metric MutableRate writeStateMachineData;
-
-  public CSMMetrics() {
-    int numCmdTypes = ContainerProtos.Type.values().length;
-    this.opsLatency = new MutableRate[numCmdTypes];
-    this.registry = new MetricsRegistry(CSMMetrics.class.getSimpleName());
-    for (int i = 0; i < numCmdTypes; i++) {
-      opsLatency[i] = registry.newRate(
-          ContainerProtos.Type.forNumber(i + 1).toString(),
-          ContainerProtos.Type.forNumber(i + 1) + " op");
-    }
-  }
-
-  public static CSMMetrics create(RaftGroupId gid) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME + gid.toString(),
-        "Container State Machine",
-        new CSMMetrics());
-  }
-
-  public void incNumWriteStateMachineOps() {
-    numWriteStateMachineOps.incr();
-  }
-
-  public void incNumQueryStateMachineOps() {
-    numQueryStateMachineOps.incr();
-  }
-
-  public void incNumReadStateMachineOps() {
-    numReadStateMachineOps.incr();
-  }
-
-  public void incNumApplyTransactionsOps() {
-    numApplyTransactionOps.incr();
-  }
-
-  public void incNumWriteStateMachineFails() {
-    numWriteStateMachineFails.incr();
-  }
-
-  public void incNumWriteDataFails() {
-    numWriteDataFails.incr();
-  }
-
-  public void incNumQueryStateMachineFails() {
-    numQueryStateMachineFails.incr();
-  }
-
-  public void incNumBytesWrittenCount(long value) {
-    numBytesWrittenCount.incr(value);
-  }
-
-  public void incNumBytesCommittedCount(long value) {
-    numBytesCommittedCount.incr(value);
-  }
-
-  public void incNumReadStateMachineFails() {
-    numReadStateMachineFails.incr();
-  }
-
-  public void incNumReadStateMachineMissCount() {
-    numReadStateMachineMissCount.incr();
-  }
-
-  public void incNumApplyTransactionsFails() {
-    numApplyTransactionFails.incr();
-  }
-
-  @VisibleForTesting
-  public long getNumWriteStateMachineOps() {
-    return numWriteStateMachineOps.value();
-  }
-
-  @VisibleForTesting
-  public long getNumQueryStateMachineOps() {
-    return numQueryStateMachineOps.value();
-  }
-
-  @VisibleForTesting
-  public long getNumApplyTransactionsOps() {
-    return numApplyTransactionOps.value();
-  }
-
-  @VisibleForTesting
-  public long getNumWriteStateMachineFails() {
-    return numWriteStateMachineFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumWriteDataFails() {
-    return numWriteDataFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumQueryStateMachineFails() {
-    return numQueryStateMachineFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumApplyTransactionsFails() {
-    return numApplyTransactionFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumReadStateMachineFails() {
-    return numReadStateMachineFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumReadStateMachineMissCount() {
-    return numReadStateMachineMissCount.value();
-  }
-
-  @VisibleForTesting
-  public long getNumReadStateMachineOps() {
-    return numReadStateMachineOps.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBytesWrittenCount() {
-    return numBytesWrittenCount.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBytesCommittedCount() {
-    return numBytesCommittedCount.value();
-  }
-
-  public MutableRate getApplyTransactionLatency() {
-    return applyTransaction;
-  }
-
-  public void incPipelineLatency(ContainerProtos.Type type, long latencyNanos) {
-    opsLatency[type.ordinal()].add(latencyNanos);
-    transactionLatency.add(latencyNanos);
-  }
-
-  public void incNumStartTransactionVerifyFailures() {
-    numStartTransactionVerifyFailures.incr();
-  }
-
-  public void incNumContainerNotOpenVerifyFailures() {
-    numContainerNotOpenVerifyFailures.incr();
-  }
-
-  public void recordApplyTransactionCompletion(long latencyNanos) {
-    applyTransaction.add(latencyNanos);
-  }
-
-  public void recordWriteStateMachineCompletion(long latencyNanos) {
-    writeStateMachineData.add(latencyNanos);
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
deleted file mode 100644
index b89ec73..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ /dev/null
@@ -1,871 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.util.Time;
-import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.StateMachineException;
-import org.apache.ratis.server.RaftServer;
-import org.apache.ratis.server.impl.RaftServerProxy;
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.server.raftlog.RaftLog;
-import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
-import org.apache.ratis.thirdparty.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    Container2BCSIDMapProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkResponseProto;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.hdds.security.token.TokenVerifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientRequest;
-import org.apache.ratis.server.storage.RaftStorage;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
-import org.apache.ratis.proto.RaftProtos.LogEntryProto;
-import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
-import org.apache.ratis.statemachine.StateMachineStorage;
-import org.apache.ratis.statemachine.TransactionContext;
-import org.apache.ratis.statemachine.impl.BaseStateMachine;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
-import java.util.concurrent.Executors;
-import java.io.FileOutputStream;
-import java.io.FileInputStream;
-import java.io.OutputStream;
-
-/** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
- *
- * The stateMachine is responsible for handling different types of container
- * requests. The container requests can be divided into readonly and write
- * requests.
- *
- * Read only requests are classified in
- * {@link org.apache.hadoop.hdds.HddsUtils#isReadOnly}
- * and these readonly requests are replied from the {@link #query(Message)}.
- *
- * The write requests can be divided into requests with user data
- * (WriteChunkRequest) and other request without user data.
- *
- * Inorder to optimize the write throughput, the writeChunk request is
- * processed in 2 phases. The 2 phases are divided in
- * {@link #startTransaction(RaftClientRequest)}, in the first phase the user
- * data is written directly into the state machine via
- * {@link #writeStateMachineData} and in the second phase the
- * transaction is committed via {@link #applyTransaction(TransactionContext)}
- *
- * For the requests with no stateMachine data, the transaction is directly
- * committed through
- * {@link #applyTransaction(TransactionContext)}
- *
- * There are 2 ordering operation which are enforced right now in the code,
- * 1) Write chunk operation are executed after the create container operation,
- * the write chunk operation will fail otherwise as the container still hasn't
- * been created. Hence the create container operation has been split in the
- * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing
- * the calls in {@link #writeStateMachineData}
- *
- * 2) Write chunk commit operation is executed after write chunk state machine
- * operation. This will ensure that commit operation is sync'd with the state
- * machine operation.For example, synchronization between writeChunk and
- * createContainer in {@link ContainerStateMachine}.
- **/
-
-public class ContainerStateMachine extends BaseStateMachine {
-  static final Logger LOG =
-      LoggerFactory.getLogger(ContainerStateMachine.class);
-  private final SimpleStateMachineStorage storage =
-      new SimpleStateMachineStorage();
-  private final RaftGroupId gid;
-  private final ContainerDispatcher dispatcher;
-  private final ContainerController containerController;
-  private ThreadPoolExecutor chunkExecutor;
-  private final XceiverServerRatis ratisServer;
-  private final ConcurrentHashMap<Long,
-      CompletableFuture<ContainerCommandResponseProto>> writeChunkFutureMap;
-
-  // keeps track of the containers created per pipeline
-  private final Map<Long, Long> container2BCSIDMap;
-  private ExecutorService[] executors;
-  private final Map<Long, Long> applyTransactionCompletionMap;
-  private final Cache<Long, ByteString> stateMachineDataCache;
-  private final boolean isBlockTokenEnabled;
-  private final TokenVerifier tokenVerifier;
-  private final AtomicBoolean stateMachineHealthy;
-
-  private final Semaphore applyTransactionSemaphore;
-  /**
-   * CSM metrics.
-   */
-  private final CSMMetrics metrics;
-
-  @SuppressWarnings("parameternumber")
-  public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
-      ContainerController containerController, ThreadPoolExecutor chunkExecutor,
-      XceiverServerRatis ratisServer, long expiryInterval,
-      boolean isBlockTokenEnabled, TokenVerifier tokenVerifier,
-      Configuration conf) {
-    this.gid = gid;
-    this.dispatcher = dispatcher;
-    this.containerController = containerController;
-    this.chunkExecutor = chunkExecutor;
-    this.ratisServer = ratisServer;
-    metrics = CSMMetrics.create(gid);
-    this.writeChunkFutureMap = new ConcurrentHashMap<>();
-    applyTransactionCompletionMap = new ConcurrentHashMap<>();
-    stateMachineDataCache = CacheBuilder.newBuilder()
-        .expireAfterAccess(expiryInterval, TimeUnit.MILLISECONDS)
-        // set the limit on no of cached entries equal to no of max threads
-        // executing writeStateMachineData
-        .maximumSize(chunkExecutor.getCorePoolSize()).build();
-    this.isBlockTokenEnabled = isBlockTokenEnabled;
-    this.tokenVerifier = tokenVerifier;
-    this.container2BCSIDMap = new ConcurrentHashMap<>();
-
-    final int numContainerOpExecutors = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT);
-    int maxPendingApplyTransactions = conf.getInt(
-        ScmConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS,
-        ScmConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
-    applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
-    stateMachineHealthy = new AtomicBoolean(true);
-    this.executors = new ExecutorService[numContainerOpExecutors];
-    for (int i = 0; i < numContainerOpExecutors; i++) {
-      final int index = i;
-      this.executors[index] = Executors.newSingleThreadExecutor(r -> {
-        Thread t = new Thread(r);
-        t.setName("RatisApplyTransactionExecutor " + index);
-        return t;
-      });
-    }
-  }
-
-  @Override
-  public StateMachineStorage getStateMachineStorage() {
-    return storage;
-  }
-
-  public CSMMetrics getMetrics() {
-    return metrics;
-  }
-
-  @Override
-  public void initialize(
-      RaftServer server, RaftGroupId id, RaftStorage raftStorage)
-      throws IOException {
-    super.initialize(server, id, raftStorage);
-    storage.init(raftStorage);
-    ratisServer.notifyGroupAdd(gid);
-
-    loadSnapshot(storage.getLatestSnapshot());
-  }
-
-  private long loadSnapshot(SingleFileSnapshotInfo snapshot)
-      throws IOException {
-    if (snapshot == null) {
-      TermIndex empty =
-          TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX);
-      LOG.info("{}: The snapshot info is null. Setting the last applied index" +
-              "to:{}", gid, empty);
-      setLastAppliedTermIndex(empty);
-      return empty.getIndex();
-    }
-
-    final File snapshotFile = snapshot.getFile().getPath().toFile();
-    final TermIndex last =
-        SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile);
-    LOG.info("{}: Setting the last applied index to {}", gid, last);
-    setLastAppliedTermIndex(last);
-
-    // initialize the dispatcher with snapshot so that it build the missing
-    // container list
-    try (FileInputStream fin = new FileInputStream(snapshotFile)) {
-      byte[] container2BCSIDData = IOUtils.toByteArray(fin);
-      ContainerProtos.Container2BCSIDMapProto proto =
-          ContainerProtos.Container2BCSIDMapProto
-              .parseFrom(container2BCSIDData);
-      // read the created containers list from the snapshot file and add it to
-      // the container2BCSIDMap here.
-      // container2BCSIDMap will further grow as and when containers get created
-      container2BCSIDMap.putAll(proto.getContainer2BCSIDMap());
-      dispatcher.buildMissingContainerSetAndValidate(container2BCSIDMap);
-    }
-    return last.getIndex();
-  }
-
-  /**
-   * As a part of taking snapshot with Ratis StateMachine, it will persist
-   * the existing container set in the snapshotFile.
-   * @param out OutputStream mapped to the Ratis snapshot file
-   * @throws IOException
-   */
-  public void persistContainerSet(OutputStream out) throws IOException {
-    Container2BCSIDMapProto.Builder builder =
-        Container2BCSIDMapProto.newBuilder();
-    builder.putAllContainer2BCSID(container2BCSIDMap);
-    // TODO : while snapshot is being taken, deleteContainer call should not
-    // should not happen. Lock protection will be required if delete
-    // container happens outside of Ratis.
-    IOUtils.write(builder.build().toByteArray(), out);
-  }
-
-  public boolean isStateMachineHealthy() {
-    return stateMachineHealthy.get();
-  }
-
-  @Override
-  public long takeSnapshot() throws IOException {
-    TermIndex ti = getLastAppliedTermIndex();
-    long startTime = Time.monotonicNow();
-    if (!isStateMachineHealthy()) {
-      String msg =
-          "Failed to take snapshot " + " for " + gid + " as the stateMachine"
-              + " is unhealthy. The last applied index is at " + ti;
-      StateMachineException sme = new StateMachineException(msg);
-      LOG.error(msg);
-      throw sme;
-    }
-    if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
-      final File snapshotFile =
-          storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
-      LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile);
-      try (FileOutputStream fos = new FileOutputStream(snapshotFile)) {
-        persistContainerSet(fos);
-        fos.flush();
-        // make sure the snapshot file is synced
-        fos.getFD().sync();
-      } catch (IOException ioe) {
-        LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti,
-            snapshotFile);
-        throw ioe;
-      }
-      LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti,
-          snapshotFile, (Time.monotonicNow() - startTime));
-      return ti.getIndex();
-    }
-    return -1;
-  }
-
-  @Override
-  public TransactionContext startTransaction(RaftClientRequest request)
-      throws IOException {
-    long startTime = Time.monotonicNowNanos();
-    final ContainerCommandRequestProto proto =
-        message2ContainerCommandRequestProto(request.getMessage());
-    Preconditions.checkArgument(request.getRaftGroupId().equals(gid));
-    try {
-      dispatcher.validateContainerCommand(proto);
-    } catch (IOException ioe) {
-      if (ioe instanceof ContainerNotOpenException) {
-        metrics.incNumContainerNotOpenVerifyFailures();
-      } else {
-        metrics.incNumStartTransactionVerifyFailures();
-        LOG.error("startTransaction validation failed on leader", ioe);
-      }
-      TransactionContext ctxt = TransactionContext.newBuilder()
-          .setClientRequest(request)
-          .setStateMachine(this)
-          .setServerRole(RaftPeerRole.LEADER)
-          .build();
-      ctxt.setException(ioe);
-      return ctxt;
-    }
-    if (proto.getCmdType() == Type.WriteChunk) {
-      final WriteChunkRequestProto write = proto.getWriteChunk();
-      // create the log entry proto
-      final WriteChunkRequestProto commitWriteChunkProto =
-          WriteChunkRequestProto.newBuilder()
-              .setBlockID(write.getBlockID())
-              .setChunkData(write.getChunkData())
-              // skipping the data field as it is
-              // already set in statemachine data proto
-              .build();
-      ContainerCommandRequestProto commitContainerCommandProto =
-          ContainerCommandRequestProto
-              .newBuilder(proto)
-              .setWriteChunk(commitWriteChunkProto)
-              .setTraceID(proto.getTraceID())
-              .build();
-
-      return TransactionContext.newBuilder()
-          .setClientRequest(request)
-          .setStateMachine(this)
-          .setServerRole(RaftPeerRole.LEADER)
-          .setStateMachineContext(startTime)
-          .setStateMachineData(write.getData())
-          .setLogData(commitContainerCommandProto.toByteString())
-          .build();
-    } else {
-      return TransactionContext.newBuilder()
-          .setClientRequest(request)
-          .setStateMachine(this)
-          .setServerRole(RaftPeerRole.LEADER)
-          .setStateMachineContext(startTime)
-          .setLogData(proto.toByteString())
-          .build();
-    }
-
-  }
-
-  private ByteString getStateMachineData(StateMachineLogEntryProto entryProto) {
-    return entryProto.getStateMachineEntry().getStateMachineData();
-  }
-
-  private ContainerCommandRequestProto getContainerCommandRequestProto(
-      ByteString request) throws InvalidProtocolBufferException {
-    // TODO: We can avoid creating new builder and set pipeline Id if
-    // the client is already sending the pipeline id, then we just have to
-    // validate the pipeline Id.
-    return ContainerCommandRequestProto.newBuilder(
-        ContainerCommandRequestProto.parseFrom(request))
-        .setPipelineID(gid.getUuid().toString()).build();
-  }
-
-  private ContainerCommandRequestProto message2ContainerCommandRequestProto(
-      Message message) throws InvalidProtocolBufferException {
-    return ContainerCommandRequestMessage.toProto(message.getContent(), gid);
-  }
-
-  private ContainerCommandResponseProto dispatchCommand(
-      ContainerCommandRequestProto requestProto, DispatcherContext context) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid,
-          requestProto.getCmdType(), requestProto.getContainerID(),
-          requestProto.getPipelineID(), requestProto.getTraceID());
-    }
-    if (isBlockTokenEnabled) {
-      try {
-        // ServerInterceptors intercepts incoming request and creates ugi.
-        tokenVerifier
-            .verify(UserGroupInformation.getCurrentUser().getShortUserName(),
-                requestProto.getEncodedToken());
-      } catch (IOException ioe) {
-        StorageContainerException sce = new StorageContainerException(
-            "Block token verification failed. " + ioe.getMessage(), ioe,
-            ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
-        return ContainerUtils.logAndReturnError(LOG, sce, requestProto);
-      }
-    }
-    ContainerCommandResponseProto response =
-        dispatcher.dispatch(requestProto, context);
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("{}: response {}", gid, response);
-    }
-    return response;
-  }
-
-  private ContainerCommandResponseProto runCommand(
-      ContainerCommandRequestProto requestProto,
-      DispatcherContext context) {
-    return dispatchCommand(requestProto, context);
-  }
-
-  private ExecutorService getCommandExecutor(
-      ContainerCommandRequestProto requestProto) {
-    int executorId = (int)(requestProto.getContainerID() % executors.length);
-    return executors[executorId];
-  }
-
-  private CompletableFuture<Message> handleWriteChunk(
-      ContainerCommandRequestProto requestProto, long entryIndex, long term,
-      long startTime) {
-    final WriteChunkRequestProto write = requestProto.getWriteChunk();
-    RaftServer server = ratisServer.getServer();
-    Preconditions.checkState(server instanceof RaftServerProxy);
-    try {
-      if (((RaftServerProxy) server).getImpl(gid).isLeader()) {
-        stateMachineDataCache.put(entryIndex, write.getData());
-      }
-    } catch (IOException ioe) {
-      return completeExceptionally(ioe);
-    }
-    DispatcherContext context =
-        new DispatcherContext.Builder()
-            .setTerm(term)
-            .setLogIndex(entryIndex)
-            .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
-            .setContainer2BCSIDMap(container2BCSIDMap)
-            .build();
-    CompletableFuture<Message> raftFuture = new CompletableFuture<>();
-    // ensure the write chunk happens asynchronously in writeChunkExecutor pool
-    // thread.
-    CompletableFuture<ContainerCommandResponseProto> writeChunkFuture =
-        CompletableFuture.supplyAsync(() -> {
-          try {
-            return runCommand(requestProto, context);
-          } catch (Exception e) {
-            LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId"
-                + write.getBlockID() + " logIndex " + entryIndex + " chunkName "
-                + write.getChunkData().getChunkName() + e);
-            raftFuture.completeExceptionally(e);
-            throw e;
-          }
-        }, chunkExecutor);
-
-    writeChunkFutureMap.put(entryIndex, writeChunkFuture);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " +
-          write.getBlockID() + " logIndex " + entryIndex + " chunkName "
-          + write.getChunkData().getChunkName());
-    }
-    // Remove the future once it finishes execution from the
-    // writeChunkFutureMap.
-    writeChunkFuture.thenApply(r -> {
-      if (r.getResult() != ContainerProtos.Result.SUCCESS) {
-        StorageContainerException sce =
-            new StorageContainerException(r.getMessage(), r.getResult());
-        LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" +
-            write.getBlockID() + " logIndex " + entryIndex + " chunkName " +
-            write.getChunkData().getChunkName() + " Error message: " +
-            r.getMessage() + " Container Result: " + r.getResult());
-        metrics.incNumWriteDataFails();
-        raftFuture.completeExceptionally(sce);
-      } else {
-        metrics.incNumBytesWrittenCount(
-            requestProto.getWriteChunk().getChunkData().getLen());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(gid +
-              ": writeChunk writeStateMachineData  completed: blockId" +
-              write.getBlockID() + " logIndex " + entryIndex + " chunkName " +
-              write.getChunkData().getChunkName());
-        }
-        raftFuture.complete(r::toByteString);
-        metrics.recordWriteStateMachineCompletion(
-            Time.monotonicNowNanos() - startTime);
-      }
-
-      writeChunkFutureMap.remove(entryIndex);
-      return r;
-    });
-    return raftFuture;
-  }
-
-  /*
-   * writeStateMachineData calls are not synchronized with each other
-   * and also with applyTransaction.
-   */
-  @Override
-  public CompletableFuture<Message> writeStateMachineData(LogEntryProto entry) {
-    try {
-      metrics.incNumWriteStateMachineOps();
-      long writeStateMachineStartTime = Time.monotonicNowNanos();
-      ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
-              entry.getStateMachineLogEntry().getLogData());
-      WriteChunkRequestProto writeChunk =
-          WriteChunkRequestProto.newBuilder(requestProto.getWriteChunk())
-              .setData(getStateMachineData(entry.getStateMachineLogEntry()))
-              .build();
-      requestProto = ContainerCommandRequestProto.newBuilder(requestProto)
-          .setWriteChunk(writeChunk).build();
-      Type cmdType = requestProto.getCmdType();
-
-      // For only writeChunk, there will be writeStateMachineData call.
-      // CreateContainer will happen as a part of writeChunk only.
-      switch (cmdType) {
-      case WriteChunk:
-        return handleWriteChunk(requestProto, entry.getIndex(),
-            entry.getTerm(), writeStateMachineStartTime);
-      default:
-        throw new IllegalStateException("Cmd Type:" + cmdType
-            + " should not have state machine data");
-      }
-    } catch (IOException e) {
-      metrics.incNumWriteStateMachineFails();
-      return completeExceptionally(e);
-    }
-  }
-
-  @Override
-  public CompletableFuture<Message> query(Message request) {
-    try {
-      metrics.incNumQueryStateMachineOps();
-      final ContainerCommandRequestProto requestProto =
-          message2ContainerCommandRequestProto(request);
-      return CompletableFuture
-          .completedFuture(runCommand(requestProto, null)::toByteString);
-    } catch (IOException e) {
-      metrics.incNumQueryStateMachineFails();
-      return completeExceptionally(e);
-    }
-  }
-
-  private ByteString readStateMachineData(
-      ContainerCommandRequestProto requestProto, long term, long index)
-      throws IOException {
-    // the stateMachine data is not present in the stateMachine cache,
-    // increment the stateMachine cache miss count
-    metrics.incNumReadStateMachineMissCount();
-    WriteChunkRequestProto writeChunkRequestProto =
-        requestProto.getWriteChunk();
-    ContainerProtos.ChunkInfo chunkInfo = writeChunkRequestProto.getChunkData();
-    // prepare the chunk to be read
-    ReadChunkRequestProto.Builder readChunkRequestProto =
-        ReadChunkRequestProto.newBuilder()
-            .setBlockID(writeChunkRequestProto.getBlockID())
-            .setChunkData(chunkInfo);
-    ContainerCommandRequestProto dataContainerCommandProto =
-        ContainerCommandRequestProto.newBuilder(requestProto)
-            .setCmdType(Type.ReadChunk).setReadChunk(readChunkRequestProto)
-            .build();
-    DispatcherContext context =
-        new DispatcherContext.Builder().setTerm(term).setLogIndex(index)
-            .setReadFromTmpFile(true).build();
-    // read the chunk
-    ContainerCommandResponseProto response =
-        dispatchCommand(dataContainerCommandProto, context);
-    if (response.getResult() != ContainerProtos.Result.SUCCESS) {
-      StorageContainerException sce =
-          new StorageContainerException(response.getMessage(),
-              response.getResult());
-      LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : "
-              + "{} Container Result: {}", gid, response.getCmdType(), index,
-          response.getMessage(), response.getResult());
-      throw sce;
-    }
-
-    ReadChunkResponseProto responseProto = response.getReadChunk();
-
-    ByteString data = responseProto.getData();
-    // assert that the response has data in it.
-    Preconditions
-        .checkNotNull(data, "read chunk data is null for chunk:" + chunkInfo);
-    Preconditions.checkState(data.size() == chunkInfo.getLen(), String.format(
-        "read chunk len=%d does not match chunk expected len=%d for chunk:%s",
-        data.size(), chunkInfo.getLen(), chunkInfo));
-    return data;
-  }
-
-  /**
-   * Reads the Entry from the Cache or loads it back by reading from disk.
-   */
-  private ByteString getCachedStateMachineData(Long logIndex, long term,
-      ContainerCommandRequestProto requestProto) throws ExecutionException {
-    return stateMachineDataCache.get(logIndex,
-        () -> readStateMachineData(requestProto, term, logIndex));
-  }
-
-  /**
-   * Returns the combined future of all the writeChunks till the given log
-   * index. The Raft log worker will wait for the stateMachineData to complete
-   * flush as well.
-   *
-   * @param index log index till which the stateMachine data needs to be flushed
-   * @return Combined future of all writeChunks till the log index given.
-   */
-  @Override
-  public CompletableFuture<Void> flushStateMachineData(long index) {
-    List<CompletableFuture<ContainerCommandResponseProto>> futureList =
-        writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index)
-            .map(Map.Entry::getValue).collect(Collectors.toList());
-    return CompletableFuture.allOf(
-        futureList.toArray(new CompletableFuture[futureList.size()]));
-  }
-  /*
-   * This api is used by the leader while appending logs to the follower
-   * This allows the leader to read the state machine data from the
-   * state machine implementation in case cached state machine data has been
-   * evicted.
-   */
-  @Override
-  public CompletableFuture<ByteString> readStateMachineData(
-      LogEntryProto entry) {
-    StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry();
-    metrics.incNumReadStateMachineOps();
-    if (!getStateMachineData(smLogEntryProto).isEmpty()) {
-      return CompletableFuture.completedFuture(ByteString.EMPTY);
-    }
-    try {
-      final ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
-              entry.getStateMachineLogEntry().getLogData());
-      // readStateMachineData should only be called for "write" to Ratis.
-      Preconditions.checkArgument(!HddsUtils.isReadOnly(requestProto));
-      if (requestProto.getCmdType() == Type.WriteChunk) {
-        final CompletableFuture<ByteString> future = new CompletableFuture<>();
-        CompletableFuture.supplyAsync(() -> {
-          try {
-            future.complete(
-                getCachedStateMachineData(entry.getIndex(), entry.getTerm(),
-                    requestProto));
-          } catch (ExecutionException e) {
-            metrics.incNumReadStateMachineFails();
-            future.completeExceptionally(e);
-          }
-          return future;
-        }, chunkExecutor);
-        return future;
-      } else {
-        throw new IllegalStateException("Cmd type:" + requestProto.getCmdType()
-            + " cannot have state machine data");
-      }
-    } catch (Exception e) {
-      metrics.incNumReadStateMachineFails();
-      LOG.error("{} unable to read stateMachineData:", gid, e);
-      return completeExceptionally(e);
-    }
-  }
-
-  private synchronized void updateLastApplied() {
-    Long appliedTerm = null;
-    long appliedIndex = -1;
-    for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) {
-      final Long removed = applyTransactionCompletionMap.remove(i);
-      if (removed == null) {
-        break;
-      }
-      appliedTerm = removed;
-      appliedIndex = i;
-    }
-    if (appliedTerm != null) {
-      updateLastAppliedTermIndex(appliedTerm, appliedIndex);
-    }
-  }
-
-  /**
-   * Notifies the state machine about index updates because of entries
-   * which do not cause state machine update, i.e. conf entries, metadata
-   * entries
-   * @param term term of the log entry
-   * @param index index of the log entry
-   */
-  @Override
-  public void notifyIndexUpdate(long term, long index) {
-    applyTransactionCompletionMap.put(index, term);
-  }
-
-  /*
-   * ApplyTransaction calls in Ratis are sequential.
-   */
-  @Override
-  public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
-    long index = trx.getLogEntry().getIndex();
-    DispatcherContext.Builder builder =
-        new DispatcherContext.Builder()
-            .setTerm(trx.getLogEntry().getTerm())
-            .setLogIndex(index);
-
-    long applyTxnStartTime = Time.monotonicNowNanos();
-    try {
-      applyTransactionSemaphore.acquire();
-      metrics.incNumApplyTransactionsOps();
-      ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
-              trx.getStateMachineLogEntry().getLogData());
-      Type cmdType = requestProto.getCmdType();
-      // Make sure that in write chunk, the user data is not set
-      if (cmdType == Type.WriteChunk) {
-        Preconditions
-            .checkArgument(requestProto.getWriteChunk().getData().isEmpty());
-        builder
-            .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA);
-      }
-      if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile
-          || cmdType == Type.PutBlock || cmdType == Type.CreateContainer) {
-        builder.setContainer2BCSIDMap(container2BCSIDMap);
-      }
-      CompletableFuture<Message> applyTransactionFuture =
-          new CompletableFuture<>();
-      // Ensure the command gets executed in a separate thread than
-      // stateMachineUpdater thread which is calling applyTransaction here.
-      CompletableFuture<ContainerCommandResponseProto> future =
-          CompletableFuture.supplyAsync(() -> {
-            try {
-              return runCommand(requestProto, builder.build());
-            } catch (Exception e) {
-              LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex "
-                      + "{} exception {}", gid, requestProto.getCmdType(),
-                  index, e);
-              applyTransactionFuture.completeExceptionally(e);
-              throw e;
-            }
-          }, getCommandExecutor(requestProto));
-      future.thenApply(r -> {
-        if (trx.getServerRole() == RaftPeerRole.LEADER) {
-          long startTime = (long) trx.getStateMachineContext();
-          metrics.incPipelineLatency(cmdType,
-              Time.monotonicNowNanos() - startTime);
-        }
-        // ignore close container exception while marking the stateMachine
-        // unhealthy
-        if (r.getResult() != ContainerProtos.Result.SUCCESS
-            && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN
-            && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO) {
-          StorageContainerException sce =
-              new StorageContainerException(r.getMessage(), r.getResult());
-          LOG.error(
-              "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : "
-                  + "{} Container Result: {}", gid, r.getCmdType(), index,
-              r.getMessage(), r.getResult());
-          metrics.incNumApplyTransactionsFails();
-          // Since the applyTransaction now is completed exceptionally,
-          // before any further snapshot is taken , the exception will be
-          // caught in stateMachineUpdater in Ratis and ratis server will
-          // shutdown.
-          applyTransactionFuture.completeExceptionally(sce);
-          stateMachineHealthy.compareAndSet(true, false);
-          ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole());
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : "
-                    + "{} Container Result: {}", gid, r.getCmdType(), index,
-                r.getMessage(), r.getResult());
-          }
-          applyTransactionFuture.complete(r::toByteString);
-          if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) {
-            metrics.incNumBytesCommittedCount(
-                requestProto.getWriteChunk().getChunkData().getLen());
-          }
-          // add the entry to the applyTransactionCompletionMap only if the
-          // stateMachine is healthy i.e, there has been no applyTransaction
-          // failures before.
-          if (isStateMachineHealthy()) {
-            final Long previous = applyTransactionCompletionMap
-                .put(index, trx.getLogEntry().getTerm());
-            Preconditions.checkState(previous == null);
-            updateLastApplied();
-          }
-        }
-        return applyTransactionFuture;
-      }).whenComplete((r, t) ->  {
-        applyTransactionSemaphore.release();
-        metrics.recordApplyTransactionCompletion(
-            Time.monotonicNowNanos() - applyTxnStartTime);
-      });
-      return applyTransactionFuture;
-    } catch (IOException | InterruptedException e) {
-      metrics.incNumApplyTransactionsFails();
-      return completeExceptionally(e);
-    }
-  }
-
-  private static <T> CompletableFuture<T> completeExceptionally(Exception e) {
-    final CompletableFuture<T> future = new CompletableFuture<>();
-    future.completeExceptionally(e);
-    return future;
-  }
-
-  @VisibleForTesting
-  public void evictStateMachineCache() {
-    stateMachineDataCache.invalidateAll();
-    stateMachineDataCache.cleanUp();
-  }
-
-  @Override
-  public void notifySlowness(RoleInfoProto roleInfoProto) {
-    ratisServer.handleNodeSlowness(gid, roleInfoProto);
-  }
-
-  @Override
-  public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) {
-    ratisServer.handleNoLeader(gid, roleInfoProto);
-  }
-
-  @Override
-  public void notifyNotLeader(Collection<TransactionContext> pendingEntries)
-      throws IOException {
-    evictStateMachineCache();
-  }
-
-  @Override
-  public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) {
-    ratisServer.handleNodeLogFailure(gid, t);
-  }
-
-  @Override
-  public CompletableFuture<TermIndex> notifyInstallSnapshotFromLeader(
-      RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) {
-    ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto,
-        firstTermIndexInLog);
-    final CompletableFuture<TermIndex> future = new CompletableFuture<>();
-    future.complete(firstTermIndexInLog);
-    return future;
-  }
-
-  @Override
-  public void notifyGroupRemove() {
-    ratisServer.notifyGroupRemove(gid);
-    // Make best effort to quasi-close all the containers on group removal.
-    // Containers already in terminal state like CLOSED or UNHEALTHY will not
-    // be affected.
-    for (Long cid : container2BCSIDMap.keySet()) {
-      try {
-        containerController.markContainerForClose(cid);
-        containerController.quasiCloseContainer(cid);
-      } catch (IOException e) {
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    evictStateMachineCache();
-    for (ExecutorService executor : executors) {
-      executor.shutdown();
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
deleted file mode 100644
index 7d46910..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import java.util.Map;
-
-/**
- * DispatcherContext class holds transport protocol specific context info
- * required for execution of container commands over the container dispatcher.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class DispatcherContext {
-  /**
-   * Determines which stage of writeChunk a write chunk request is for.
-   */
-  public enum WriteChunkStage {
-    WRITE_DATA, COMMIT_DATA, COMBINED
-  }
-
-  // whether the chunk data needs to be written or committed or both
-  private final WriteChunkStage stage;
-  // indicates whether the read from tmp chunk files is allowed
-  private final boolean readFromTmpFile;
-  // which term the request is being served in Ratis
-  private final long term;
-  // the log index in Ratis log to which the request belongs to
-  private final long logIndex;
-
-  private final Map<Long, Long> container2BCSIDMap;
-
-  private DispatcherContext(long term, long index, WriteChunkStage stage,
-      boolean readFromTmpFile, Map<Long, Long> container2BCSIDMap) {
-    this.term = term;
-    this.logIndex = index;
-    this.stage = stage;
-    this.readFromTmpFile = readFromTmpFile;
-    this.container2BCSIDMap = container2BCSIDMap;
-  }
-
-  public long getLogIndex() {
-    return logIndex;
-  }
-
-  public boolean isReadFromTmpFile() {
-    return readFromTmpFile;
-  }
-
-  public long getTerm() {
-    return term;
-  }
-
-  public WriteChunkStage getStage() {
-    return stage;
-  }
-
-  public Map<Long, Long> getContainer2BCSIDMap() {
-    return container2BCSIDMap;
-  }
-
-  /**
-   * Builder class for building DispatcherContext.
-   */
-  public static final class Builder {
-    private WriteChunkStage stage = WriteChunkStage.COMBINED;
-    private boolean readFromTmpFile = false;
-    private long term;
-    private long logIndex;
-    private Map<Long, Long> container2BCSIDMap;
-
-    /**
-     * Sets the WriteChunkStage.
-     *
-     * @param writeChunkStage WriteChunk Stage
-     * @return DispatcherContext.Builder
-     */
-    public Builder setStage(WriteChunkStage writeChunkStage) {
-      this.stage = writeChunkStage;
-      return this;
-    }
-
-    /**
-     * Sets the flag for reading from tmp chunk files.
-     *
-     * @param setReadFromTmpFile whether to read from tmp chunk file or not
-     * @return DispatcherContext.Builder
-     */
-    public Builder setReadFromTmpFile(boolean setReadFromTmpFile) {
-      this.readFromTmpFile = setReadFromTmpFile;
-      return this;
-    }
-
-    /**
-     * Sets the current term for the container request from Ratis.
-     *
-     * @param currentTerm current term
-     * @return DispatcherContext.Builder
-     */
-    public Builder setTerm(long currentTerm) {
-      this.term = currentTerm;
-      return this;
-    }
-
-    /**
-     * Sets the logIndex for the container request from Ratis.
-     *
-     * @param index log index
-     * @return DispatcherContext.Builder
-     */
-    public Builder setLogIndex(long index) {
-      this.logIndex = index;
-      return this;
-    }
-
-    /**
-     * Sets the container2BCSIDMap to contain all the containerIds per
-     * RaftGroup.
-     * @param map container2BCSIDMap
-     * @return Builder
-     */
-    public Builder setContainer2BCSIDMap(Map<Long, Long> map) {
-      this.container2BCSIDMap = map;
-      return this;
-    }
-    /**
-     * Builds and returns DispatcherContext instance.
-     *
-     * @return DispatcherContext
-     */
-    public DispatcherContext build() {
-      return new DispatcherContext(term, logIndex, stage, readFromTmpFile,
-          container2BCSIDMap);
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java
deleted file mode 100644
index 7f112ea..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigTag;
-import org.apache.hadoop.hdds.conf.ConfigType;
-
-/**
- * Holds configuration items for Ratis/Raft server.
- */
-@ConfigGroup(prefix = "hdds.ratis.server")
-public class RatisServerConfiguration {
-
-  private int numSnapshotsRetained;
-
-  @Config(key = "num.snapshots.retained",
-      type = ConfigType.INT,
-      defaultValue = "5",
-      tags = {ConfigTag.STORAGE},
-      description = "Config parameter to specify number of old snapshots " +
-          "retained at the Ratis leader.")
-  public void setNumSnapshotsRetained(int numSnapshotsRetained) {
-    this.numSnapshotsRetained = numSnapshotsRetained;
-  }
-
-  public int getNumSnapshotsRetained() {
-    return numSnapshotsRetained;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
deleted file mode 100644
index 80e91cd..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
-
-import io.opentracing.Scope;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.grpc.GrpcFactory;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.netty.NettyConfigKeys;
-import org.apache.ratis.protocol.*;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.server.RaftServer;
-import org.apache.ratis.server.RaftServerConfigKeys;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
-import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.server.impl.RaftServerProxy;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.Collections;
-import java.util.Set;
-import java.util.UUID;
-import java.util.ArrayList;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Creates a ratis server endpoint that acts as the communication layer for
- * Ozone containers.
- */
-public final class XceiverServerRatis extends XceiverServer {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(XceiverServerRatis.class);
-  private static final AtomicLong CALL_ID_COUNTER = new AtomicLong();
-
-  private static long nextCallId() {
-    return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE;
-  }
-
-  private int port;
-  private final RaftServer server;
-  private ThreadPoolExecutor chunkExecutor;
-  private final ContainerDispatcher dispatcher;
-  private final ContainerController containerController;
-  private ClientId clientId = ClientId.randomId();
-  private final StateContext context;
-  private final ReplicationLevel replicationLevel;
-  private long nodeFailureTimeoutMs;
-  private final long cacheEntryExpiryInteval;
-  private boolean isStarted = false;
-  private DatanodeDetails datanodeDetails;
-  private final OzoneConfiguration conf;
-  // TODO: Remove the gids set when Ratis supports an api to query active
-  // pipelines
-  private final Set<RaftGroupId> raftGids = new HashSet<>();
-
-  @SuppressWarnings("parameternumber")
-  private XceiverServerRatis(DatanodeDetails dd, int port,
-      ContainerDispatcher dispatcher, ContainerController containerController,
-      StateContext context, GrpcTlsConfig tlsConfig, CertificateClient caClient,
-      OzoneConfiguration conf)
-      throws IOException {
-    super(conf, caClient);
-    this.conf = conf;
-    Objects.requireNonNull(dd, "id == null");
-    datanodeDetails = dd;
-    this.port = port;
-    RaftProperties serverProperties = newRaftProperties();
-    final int numWriteChunkThreads = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
-    chunkExecutor =
-        new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads,
-            100, TimeUnit.SECONDS,
-            new ArrayBlockingQueue<>(1024),
-            new ThreadPoolExecutor.CallerRunsPolicy());
-    this.context = context;
-    this.replicationLevel =
-        conf.getEnum(OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY,
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT);
-    cacheEntryExpiryInteval = conf.getTimeDuration(OzoneConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL,
-        OzoneConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    this.dispatcher = dispatcher;
-    this.containerController = containerController;
-
-    RaftServer.Builder builder =
-        RaftServer.newBuilder().setServerId(RatisHelper.toRaftPeerId(dd))
-            .setProperties(serverProperties)
-            .setStateMachineRegistry(this::getStateMachine);
-    if (tlsConfig != null) {
-      builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig));
-    }
-    this.server = builder.build();
-  }
-
-  private ContainerStateMachine getStateMachine(RaftGroupId gid) {
-    return new ContainerStateMachine(gid, dispatcher, containerController,
-        chunkExecutor, this, cacheEntryExpiryInteval,
-        getSecurityConfig().isBlockTokenEnabled(), getBlockTokenVerifier(),
-        conf);
-  }
-
-  private RaftProperties newRaftProperties() {
-    final RaftProperties properties = new RaftProperties();
-
-    // Set rpc type
-    final RpcType rpc = setRpcType(properties);
-
-    // set raft segment size
-    setRaftSegmentSize(properties);
-
-    // set raft segment pre-allocated size
-    final int raftSegmentPreallocatedSize =
-        setRaftSegmentPreallocatedSize(properties);
-
-    // Set max write buffer size, which is the scm chunk size
-    final int maxChunkSize = setMaxWriteBuffer(properties);
-    TimeUnit timeUnit;
-    long duration;
-
-    // set the configs enable and set the stateMachineData sync timeout
-    RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true);
-    timeUnit = OzoneConfigKeys.
-        DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT,
-        OzoneConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration dataSyncTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Log.StateMachineData
-        .setSyncTimeout(properties, dataSyncTimeout);
-
-    // Set the server Request timeout
-    setServerRequestTimeout(properties);
-
-    // set timeout for a retry cache entry
-    setTimeoutForRetryCache(properties);
-
-    // Set the ratis leader election timeout
-    setRatisLeaderElectionTimeout(properties);
-
-    // Set the maximum cache segments
-    RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2);
-
-    // set the node failure timeout
-    setNodeFailureTimeout(properties);
-
-    // Set the ratis storage directory
-    String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf);
-    RaftServerConfigKeys.setStorageDirs(properties,
-        Collections.singletonList(new File(storageDir)));
-
-    // For grpc set the maximum message size
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(maxChunkSize + raftSegmentPreallocatedSize));
-
-    // Set the ratis port number
-    if (rpc == SupportedRpcType.GRPC) {
-      GrpcConfigKeys.Server.setPort(properties, port);
-    } else if (rpc == SupportedRpcType.NETTY) {
-      NettyConfigKeys.Server.setPort(properties, port);
-    }
-
-    long snapshotThreshold =
-        conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY,
-            OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT);
-    RaftServerConfigKeys.Snapshot.
-      setAutoTriggerEnabled(properties, true);
-    RaftServerConfigKeys.Snapshot.
-      setAutoTriggerThreshold(properties, snapshotThreshold);
-    int maxPendingRequets = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT
-    );
-    RaftServerConfigKeys.Write.setElementLimit(properties, maxPendingRequets);
-    int logQueueNumElements =
-        conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS,
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT);
-    final int logQueueByteLimit = (int) conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.setQueueElementLimit(
-        properties, logQueueNumElements);
-    RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit);
-
-    int numSyncRetries = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES,
-        OzoneConfigKeys.
-            DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT);
-    RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties,
-        numSyncRetries);
-
-    // Enable the StateMachineCaching
-    RaftServerConfigKeys.Log.StateMachineData.setCachingEnabled(
-        properties, true);
-
-    RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties,
-        false);
-
-    int purgeGap = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT);
-    RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap);
-
-    //Set the number of Snapshots Retained.
-    RatisServerConfiguration ratisServerConfiguration =
-        conf.getObject(RatisServerConfiguration.class);
-    int numSnapshotsRetained =
-        ratisServerConfiguration.getNumSnapshotsRetained();
-    RaftServerConfigKeys.Snapshot.setRetentionFileNum(properties,
-        numSnapshotsRetained);
-    return properties;
-  }
-
-  private void setNodeFailureTimeout(RaftProperties properties) {
-    TimeUnit timeUnit;
-    long duration;
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration nodeFailureTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties,
-        nodeFailureTimeout);
-    RaftServerConfigKeys.Rpc.setSlownessTimeout(properties,
-        nodeFailureTimeout);
-    nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS);
-  }
-
-  private void setRatisLeaderElectionTimeout(RaftProperties properties) {
-    long duration;
-    TimeUnit leaderElectionMinTimeoutUnit =
-        OzoneConfigKeys.
-            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.
-            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), leaderElectionMinTimeoutUnit);
-    final TimeDuration leaderElectionMinTimeout =
-        TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit);
-    RaftServerConfigKeys.Rpc
-        .setTimeoutMin(properties, leaderElectionMinTimeout);
-    long leaderElectionMaxTimeout =
-        leaderElectionMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200;
-    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
-        TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS));
-  }
-
-  private void setTimeoutForRetryCache(RaftProperties properties) {
-    TimeUnit timeUnit;
-    long duration;
-    timeUnit =
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration retryCacheTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.RetryCache
-        .setExpiryTime(properties, retryCacheTimeout);
-  }
-
-  private void setServerRequestTimeout(RaftProperties properties) {
-    TimeUnit timeUnit;
-    long duration;
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration serverRequestTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Rpc
-        .setRequestTimeout(properties, serverRequestTimeout);
-  }
-
-  private int setMaxWriteBuffer(RaftProperties properties) {
-    final int maxChunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
-    RaftServerConfigKeys.Log.setWriteBufferSize(properties,
-        SizeInBytes.valueOf(maxChunkSize));
-    return maxChunkSize;
-  }
-
-  private int setRaftSegmentPreallocatedSize(RaftProperties properties) {
-    final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    int logAppenderQueueNumElements = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
-        OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
-    final int logAppenderQueueByteLimit = (int) conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
-        OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.Appender
-        .setBufferElementLimit(properties, logAppenderQueueNumElements);
-    RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties,
-        SizeInBytes.valueOf(logAppenderQueueByteLimit));
-    RaftServerConfigKeys.Log.setPreallocatedSize(properties,
-        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
-    return raftSegmentPreallocatedSize;
-  }
-
-  private void setRaftSegmentSize(RaftProperties properties) {
-    final int raftSegmentSize = (int)conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
-        SizeInBytes.valueOf(raftSegmentSize));
-  }
-
-  private RpcType setRpcType(RaftProperties properties) {
-    final String rpcType = conf.get(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
-    RaftConfigKeys.Rpc.setType(properties, rpc);
-    return rpc;
-  }
-
-  public static XceiverServerRatis newXceiverServerRatis(
-      DatanodeDetails datanodeDetails, OzoneConfiguration ozoneConf,
-      ContainerDispatcher dispatcher, ContainerController containerController,
-      CertificateClient caClient, StateContext context) throws IOException {
-    int localPort = ozoneConf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
-
-    // Get an available port on current node and
-    // use that as the container port
-    if (ozoneConf.getBoolean(OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT)) {
-      localPort = 0;
-    }
-    GrpcTlsConfig tlsConfig = RatisHelper.createTlsServerConfigForDN(
-          new SecurityConfig(ozoneConf), caClient);
-
-    return new XceiverServerRatis(datanodeDetails, localPort, dispatcher,
-        containerController, context, tlsConfig, caClient, ozoneConf);
-  }
-
-  @Override
-  public void start() throws IOException {
-    if (!isStarted) {
-      LOG.info("Starting {} {} at port {}", getClass().getSimpleName(),
-          server.getId(), getIPCPort());
-      chunkExecutor.prestartAllCoreThreads();
-      server.start();
-
-      int realPort =
-          ((RaftServerProxy) server).getServerRpc().getInetSocketAddress()
-              .getPort();
-
-      if (port == 0) {
-        LOG.info("{} {} is started using port {}", getClass().getSimpleName(),
-            server.getId(), realPort);
-        port = realPort;
-      }
-
-      //register the real port to the datanode details.
-      datanodeDetails.setPort(DatanodeDetails
-          .newPort(DatanodeDetails.Port.Name.RATIS,
-              realPort));
-
-      isStarted = true;
-    }
-  }
-
-  @Override
-  public void stop() {
-    if (isStarted) {
-      try {
-        // shutdown server before the executors as while shutting down,
-        // some of the tasks would be executed using the executors.
-        server.close();
-        chunkExecutor.shutdown();
-        isStarted = false;
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  @Override
-  public int getIPCPort() {
-    return port;
-  }
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   *
-   * @return enum -- {Stand_Alone, Ratis, Chained}
-   */
-  @Override
-  public HddsProtos.ReplicationType getServerType() {
-    return HddsProtos.ReplicationType.RATIS;
-  }
-
-  @VisibleForTesting
-  public RaftServer getServer() {
-    return server;
-  }
-
-  private void processReply(RaftClientReply reply) throws IOException {
-    // NotLeader exception is thrown only when the raft server to which the
-    // request is submitted is not the leader. The request will be rejected
-    // and will eventually be executed once the request comes via the leader
-    // node.
-    NotLeaderException notLeaderException = reply.getNotLeaderException();
-    if (notLeaderException != null) {
-      throw notLeaderException;
-    }
-    StateMachineException stateMachineException =
-        reply.getStateMachineException();
-    if (stateMachineException != null) {
-      throw stateMachineException;
-    }
-  }
-
-  @Override
-  public void submitRequest(ContainerCommandRequestProto request,
-      HddsProtos.PipelineID pipelineID) throws IOException {
-    super.submitRequest(request, pipelineID);
-    RaftClientReply reply;
-    try (Scope scope = TracingUtil
-        .importAndCreateScope(
-            "XceiverServerRatis." + request.getCmdType().name(),
-            request.getTraceID())) {
-
-      RaftClientRequest raftClientRequest =
-          createRaftClientRequest(request, pipelineID,
-              RaftClientRequest.writeRequestType());
-      try {
-        reply = server.submitClientRequestAsync(raftClientRequest).get();
-      } catch (Exception e) {
-        throw new IOException(e.getMessage(), e);
-      }
-      processReply(reply);
-    }
-  }
-
-  private RaftClientRequest createRaftClientRequest(
-      ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID,
-      RaftClientRequest.Type type) {
-    return new RaftClientRequest(clientId, server.getId(),
-        RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-        nextCallId(), ContainerCommandRequestMessage.toMessage(request, null),
-        type, null);
-  }
-
-  private GroupInfoRequest createGroupInfoRequest(
-      HddsProtos.PipelineID pipelineID) {
-    return new GroupInfoRequest(clientId, server.getId(),
-        RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-        nextCallId());
-  }
-
-  private void handlePipelineFailure(RaftGroupId groupId,
-      RoleInfoProto roleInfoProto) {
-    String msg;
-    UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf());
-    RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId());
-    switch (roleInfoProto.getRole()) {
-    case CANDIDATE:
-      msg = datanode + " is in candidate state for " +
-          roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms";
-      break;
-    case LEADER:
-      StringBuilder sb = new StringBuilder();
-      sb.append(datanode).append(" has not seen follower/s");
-      for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo()
-          .getFollowerInfoList()) {
-        if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) {
-          sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId()))
-              .append(" for ").append(follower.getLastRpcElapsedTimeMs())
-              .append("ms");
-        }
-      }
-      msg = sb.toString();
-      break;
-    default:
-      LOG.error("unknown state:" + roleInfoProto.getRole());
-      throw new IllegalStateException("node" + id + " is in illegal role "
-          + roleInfoProto.getRole());
-    }
-
-    triggerPipelineClose(groupId, msg,
-        ClosePipelineInfo.Reason.PIPELINE_FAILED, false);
-  }
-
-  private void triggerPipelineClose(RaftGroupId groupId, String detail,
-      ClosePipelineInfo.Reason reasonCode, boolean triggerHB) {
-    PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid());
-    ClosePipelineInfo.Builder closePipelineInfo =
-        ClosePipelineInfo.newBuilder()
-            .setPipelineID(pipelineID.getProtobuf())
-            .setReason(reasonCode)
-            .setDetailedReason(detail);
-
-    PipelineAction action = PipelineAction.newBuilder()
-        .setClosePipeline(closePipelineInfo)
-        .setAction(PipelineAction.Action.CLOSE)
-        .build();
-    context.addPipelineActionIfAbsent(action);
-    // wait for the next HB timeout or right away?
-    if (triggerHB) {
-      context.getParent().triggerHeartbeat();
-    }
-    LOG.error(
-        "pipeline Action " + action.getAction() + "  on pipeline " + pipelineID
-            + ".Reason : " + action.getClosePipeline().getDetailedReason());
-  }
-
-  @Override
-  public boolean isExist(HddsProtos.PipelineID pipelineId) {
-    return raftGids.contains(
-        RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId()));
-  }
-
-  @Override
-  public List<PipelineReport> getPipelineReport() {
-    try {
-      Iterable<RaftGroupId> gids = server.getGroupIds();
-      List<PipelineReport> reports = new ArrayList<>();
-      for (RaftGroupId groupId : gids) {
-        reports.add(PipelineReport.newBuilder()
-            .setPipelineID(PipelineID.valueOf(groupId.getUuid()).getProtobuf())
-            .build());
-      }
-      return reports;
-    } catch (Exception e) {
-      return null;
-    }
-  }
-
-  @VisibleForTesting
-  public List<PipelineID> getPipelineIds() {
-    Iterable<RaftGroupId> gids = server.getGroupIds();
-    List<PipelineID> pipelineIDs = new ArrayList<>();
-    for (RaftGroupId groupId : gids) {
-      pipelineIDs.add(PipelineID.valueOf(groupId.getUuid()));
-      LOG.info("pipeline id {}", PipelineID.valueOf(groupId.getUuid()));
-    }
-    return pipelineIDs;
-  }
-
-  void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) {
-    handlePipelineFailure(groupId, roleInfoProto);
-  }
-
-  void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) {
-    handlePipelineFailure(groupId, roleInfoProto);
-  }
-
-  void handleApplyTransactionFailure(RaftGroupId groupId,
-      RaftProtos.RaftPeerRole role) {
-    UUID dnId = RatisHelper.toDatanodeId(getServer().getId());
-    String msg =
-        "Ratis Transaction failure in datanode " + dnId + " with role " + role
-            + " .Triggering pipeline close action.";
-    triggerPipelineClose(groupId, msg,
-        ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED, true);
-  }
-  /**
-   * The fact that the snapshot contents cannot be used to actually catch up
-   * the follower, it is the reason to initiate close pipeline and
-   * not install the snapshot. The follower will basically never be able to
-   * catch up.
-   *
-   * @param groupId raft group information
-   * @param roleInfoProto information about the current node role and
-   *                      rpc delay information.
-   * @param firstTermIndexInLog After the snapshot installation is complete,
-   * return the last included term index in the snapshot.
-   */
-  void handleInstallSnapshotFromLeader(RaftGroupId groupId,
-                                       RoleInfoProto roleInfoProto,
-                                       TermIndex firstTermIndexInLog) {
-    LOG.warn("Install snapshot notification received from Leader with " +
-        "termIndex: {}, terminating pipeline: {}",
-        firstTermIndexInLog, groupId);
-    handlePipelineFailure(groupId, roleInfoProto);
-  }
-
-  /**
-   * Notify the Datanode Ratis endpoint of Ratis log failure.
-   * Expected to be invoked from the Container StateMachine
-   * @param groupId the Ratis group/pipeline for which log has failed
-   * @param t exception encountered at the time of the failure
-   *
-   */
-  @VisibleForTesting
-  public void handleNodeLogFailure(RaftGroupId groupId, Throwable t) {
-    String msg = (t == null) ? "Unspecified failure reported in Ratis log"
-        : t.getMessage();
-
-    triggerPipelineClose(groupId, msg,
-        ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED, true);
-  }
-
-  public long getMinReplicatedIndex(PipelineID pipelineID) throws IOException {
-    Long minIndex;
-    GroupInfoReply reply = getServer()
-        .getGroupInfo(createGroupInfoRequest(pipelineID.getProtobuf()));
-    minIndex = RatisHelper.getMinReplicatedIndex(reply.getCommitInfos());
-    return minIndex == null ? -1 : minIndex.longValue();
-  }
-
-  void notifyGroupRemove(RaftGroupId gid) {
-    raftGids.remove(gid);
-  }
-
-  void notifyGroupAdd(RaftGroupId gid) {
-    raftGids.add(gid);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java
deleted file mode 100644
index 8debfe0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-/**
- * This package contains classes for the server implementation
- * using Apache Ratis
- */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
deleted file mode 100644
index 4ddb4e4..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.MapIterator;
-import org.apache.commons.collections.map.LRUMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * container cache is a LRUMap that maintains the DB handles.
- */
-public final class ContainerCache extends LRUMap {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerCache.class);
-  private final Lock lock = new ReentrantLock();
-  private static ContainerCache cache;
-  private static final float LOAD_FACTOR = 0.75f;
-  /**
-   * Constructs a cache that holds DBHandle references.
-   */
-  private ContainerCache(int maxSize, float loadFactor, boolean
-      scanUntilRemovable) {
-    super(maxSize, loadFactor, scanUntilRemovable);
-  }
-
-  /**
-   * Return a singleton instance of {@link ContainerCache}
-   * that holds the DB handlers.
-   *
-   * @param conf - Configuration.
-   * @return A instance of {@link ContainerCache}.
-   */
-  public synchronized static ContainerCache getInstance(Configuration conf) {
-    if (cache == null) {
-      int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE,
-          OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT);
-      cache = new ContainerCache(cacheSize, LOAD_FACTOR, true);
-    }
-    return cache;
-  }
-
-  /**
-   * Closes all the db instances and resets the cache.
-   */
-  public void shutdownCache() {
-    lock.lock();
-    try {
-      // iterate the cache and close each db
-      MapIterator iterator = cache.mapIterator();
-      while (iterator.hasNext()) {
-        iterator.next();
-        ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue();
-        Preconditions.checkArgument(db.cleanup(), "refCount:",
-            db.getReferenceCount());
-      }
-      // reset the cache
-      cache.clear();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected boolean removeLRU(LinkEntry entry) {
-    ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue();
-    lock.lock();
-    try {
-      return db.cleanup();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Returns a DB handle if available, create the handler otherwise.
-   *
-   * @param containerID - ID of the container.
-   * @param containerDBType - DB type of the container.
-   * @param containerDBPath - DB path of the container.
-   * @param conf - Hadoop Configuration.
-   * @return ReferenceCountedDB.
-   */
-  public ReferenceCountedDB getDB(long containerID, String containerDBType,
-                             String containerDBPath, Configuration conf)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative.");
-    lock.lock();
-    try {
-      ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath);
-
-      if (db == null) {
-        MetadataStore metadataStore =
-            MetadataStoreBuilder.newBuilder()
-            .setDbFile(new File(containerDBPath))
-            .setCreateIfMissing(false)
-            .setConf(conf)
-            .setDBType(containerDBType)
-            .build();
-        db = new ReferenceCountedDB(metadataStore, containerDBPath);
-        this.put(containerDBPath, db);
-      }
-      // increment the reference before returning the object
-      db.incrementReference();
-      return db;
-    } catch (Exception e) {
-      LOG.error("Error opening DB. Container:{} ContainerPath:{}",
-          containerID, containerDBPath, e);
-      throw e;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param containerDBPath - path of the container db file.
-   */
-  public void removeDB(String containerDBPath) {
-    lock.lock();
-    try {
-      ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath);
-      if (db != null) {
-        Preconditions.checkArgument(db.cleanup(), "refCount:",
-            db.getReferenceCount());
-      }
-      this.remove(containerDBPath);
-    } finally {
-      lock.unlock();
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
deleted file mode 100644
index cb356da..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * A util class for {@link HddsVolume}.
- */
-public final class HddsVolumeUtil {
-
-  // Private constructor for Utility class. Unused.
-  private HddsVolumeUtil() {
-  }
-
-  private static final String VERSION_FILE   = "VERSION";
-  private static final String STORAGE_ID_PREFIX = "DS-";
-
-  public static File getVersionFile(File rootDir) {
-    return new File(rootDir, VERSION_FILE);
-  }
-
-  public static String generateUuid() {
-    return STORAGE_ID_PREFIX + UUID.randomUUID();
-  }
-
-  /**
-   * Get hddsRoot from volume root. If volumeRoot points to hddsRoot, it is
-   * returned as is.
-   * For a volumeRoot /data/disk1, the hddsRoot is /data/disk1/hdds.
-   * @param volumeRoot root of the volume.
-   * @return hddsRoot of the volume.
-   */
-  public static String getHddsRoot(String volumeRoot) {
-    if (volumeRoot.endsWith(HddsVolume.HDDS_VOLUME_DIR)) {
-      return volumeRoot;
-    } else {
-      File hddsRoot = new File(volumeRoot, HddsVolume.HDDS_VOLUME_DIR);
-      return hddsRoot.getPath();
-    }
-  }
-
-  /**
-   * Returns storageID if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getStorageID(Properties props, File versionFile)
-      throws InconsistentStorageStateException {
-    return getProperty(props, OzoneConsts.STORAGE_ID, versionFile);
-  }
-
-  /**
-   * Returns clusterID if it is valid. It should match the clusterID from the
-   * Datanode. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getClusterID(Properties props, File versionFile,
-      String clusterID) throws InconsistentStorageStateException {
-    String cid = getProperty(props, OzoneConsts.CLUSTER_ID, versionFile);
-
-    if (clusterID == null) {
-      return cid;
-    }
-    if (!clusterID.equals(cid)) {
-      throw new InconsistentStorageStateException("Mismatched " +
-          "ClusterIDs. Version File : " + versionFile + " has clusterID: " +
-          cid + " and Datanode has clusterID: " + clusterID);
-    }
-    return cid;
-  }
-
-  /**
-   * Returns datanodeUuid if it is valid. It should match the UUID of the
-   * Datanode. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getDatanodeUUID(Properties props, File versionFile,
-      String datanodeUuid)
-      throws InconsistentStorageStateException {
-    String datanodeID = getProperty(props, OzoneConsts.DATANODE_UUID,
-        versionFile);
-
-    if (datanodeUuid != null && !datanodeUuid.equals(datanodeID)) {
-      throw new InconsistentStorageStateException("Mismatched " +
-          "DatanodeUUIDs. Version File : " + versionFile + " has datanodeUuid: "
-          + datanodeID + " and Datanode has datanodeUuid: " + datanodeUuid);
-    }
-    return datanodeID;
-  }
-
-  /**
-   * Returns creationTime if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static long getCreationTime(Properties props, File versionFile)
-      throws InconsistentStorageStateException {
-    String cTimeStr = getProperty(props, OzoneConsts.CTIME, versionFile);
-
-    long cTime = Long.parseLong(cTimeStr);
-    long currentTime = Time.now();
-    if (cTime > currentTime || cTime < 0) {
-      throw new InconsistentStorageStateException("Invalid Creation time in " +
-          "Version File : " + versionFile + " - " + cTime + ". Current system" +
-          " time is " + currentTime);
-    }
-    return cTime;
-  }
-
-  /**
-   * Returns layOutVersion if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static int getLayOutVersion(Properties props, File versionFile) throws
-      InconsistentStorageStateException {
-    String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile);
-
-    int lv = Integer.parseInt(lvStr);
-    if(DataNodeLayoutVersion.getLatestVersion().getVersion() != lv) {
-      throw new InconsistentStorageStateException("Invalid layOutVersion. " +
-          "Version file has layOutVersion as " + lv + " and latest Datanode " +
-          "layOutVersion is " +
-          DataNodeLayoutVersion.getLatestVersion().getVersion());
-    }
-    return lv;
-  }
-
-  private static String getProperty(Properties props, String propName, File
-      versionFile)
-      throws InconsistentStorageStateException {
-    String value = props.getProperty(propName);
-    if (StringUtils.isBlank(value)) {
-      throw new InconsistentStorageStateException("Invalid " + propName +
-          ". Version File : " + versionFile + " has null or empty " + propName);
-    }
-    return value;
-  }
-
-  /**
-   * Check Volume is in consistent state or not.
-   * @param hddsVolume
-   * @param scmId
-   * @param clusterId
-   * @param logger
-   * @return true - if volume is in consistent state, otherwise false.
-   */
-  public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String
-      clusterId, Logger logger) {
-    File hddsRoot = hddsVolume.getHddsRootDir();
-    String volumeRoot = hddsRoot.getPath();
-    File scmDir = new File(hddsRoot, scmId);
-
-    try {
-      hddsVolume.format(clusterId);
-    } catch (IOException ex) {
-      logger.error("Error during formatting volume {}, exception is {}",
-          volumeRoot, ex);
-      return false;
-    }
-
-    File[] hddsFiles = hddsRoot.listFiles();
-
-    if(hddsFiles == null) {
-      // This is the case for IOException, where listFiles returns null.
-      // So, we fail the volume.
-      return false;
-    } else if (hddsFiles.length == 1) {
-      // DN started for first time or this is a newly added volume.
-      // So we create scm directory.
-      if (!scmDir.mkdir()) {
-        logger.error("Unable to create scmDir {}", scmDir);
-        return false;
-      }
-      return true;
-    } else if(hddsFiles.length == 2) {
-      // The files should be Version and SCM directory
-      if (scmDir.exists()) {
-        return true;
-      } else {
-        logger.error("Volume {} is in Inconsistent state, expected scm " +
-                "directory {} does not exist", volumeRoot, scmDir
-            .getAbsolutePath());
-        return false;
-      }
-    } else {
-      // The hdds root dir should always have 2 files. One is Version file
-      // and other is SCM directory.
-      return false;
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
deleted file mode 100644
index fb143a4..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Class to implement reference counting over instances handed by Container
- * Cache.
- * Enable DEBUG log below will enable us quickly locate the leaked reference
- * from caller stack. When JDK9 StackWalker is available, we can switch to
- * StackWalker instead of new Exception().printStackTrace().
- */
-public class ReferenceCountedDB implements Closeable {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReferenceCountedDB.class);
-  private final AtomicInteger referenceCount;
-  private final MetadataStore store;
-  private final String containerDBPath;
-
-  public ReferenceCountedDB(MetadataStore store, String containerDBPath) {
-    this.referenceCount = new AtomicInteger(0);
-    this.store = store;
-    this.containerDBPath = containerDBPath;
-  }
-
-  public long getReferenceCount() {
-    return referenceCount.get();
-  }
-
-  public void incrementReference() {
-    this.referenceCount.incrementAndGet();
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", containerDBPath,
-          referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable()));
-    }
-  }
-
-  public void decrementReference() {
-    int refCount = this.referenceCount.decrementAndGet();
-    Preconditions.checkArgument(refCount >= 0, "refCount:", refCount);
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", containerDBPath,
-          referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable()));
-    }
-  }
-
-  public boolean cleanup() {
-    if (referenceCount.get() == 0 && store != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Close {} refCnt {}", containerDBPath,
-            referenceCount.get());
-      }
-      try {
-        store.close();
-        return true;
-      } catch (Exception e) {
-        LOG.error("Error closing DB. Container: " + containerDBPath, e);
-        return false;
-      }
-    } else {
-      return false;
-    }
-  }
-
-  public MetadataStore getStore() {
-    return store;
-  }
-
-  public void close() {
-    decrementReference();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
deleted file mode 100644
index 08264f0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.utils;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
deleted file mode 100644
index c0c719b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++ /dev/null
@@ -1,1298 +0,0 @@
-/*
- * Copyright (C) 2007 The Guava Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Some portions of this class have been modified to make it functional in this
- * package.
- */
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.Beta;
-import com.google.common.annotations.GwtCompatible;
-import com.google.common.base.Preconditions;
-import static com.google.common.base.Preconditions.checkNotNull;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
-    .newUpdater;
-
-import javax.annotation.Nullable;
-import java.security.AccessController;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
-import java.util.concurrent.locks.LockSupport;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * An abstract implementation of {@link ListenableFuture}, intended for
- * advanced users only. More common ways to create a {@code ListenableFuture}
- * include instantiating a {@link SettableFuture}, submitting a task to a
- * {@link ListeningExecutorService}, and deriving a {@code Future} from an
- * existing one, typically using methods like {@link Futures#transform
- * (ListenableFuture, com.google.common.base.Function) Futures.transform}
- * and its overloaded versions.
- * <p>
- * <p>This class implements all methods in {@code ListenableFuture}.
- * Subclasses should provide a way to set the result of the computation
- * through the protected methods {@link #set(Object)},
- * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}.
- * Subclasses may also override {@link #interruptTask()}, which will be
- * invoked automatically if a call to {@link #cancel(boolean) cancel(true)}
- * succeeds in canceling the future. Subclasses should rarely override other
- * methods.
- */
-
-@GwtCompatible(emulated = true)
-public abstract class AbstractFuture<V> implements ListenableFuture<V> {
-  // NOTE: Whenever both tests are cheap and functional, it's faster to use &,
-  // | instead of &&, ||
-
-  private static final boolean GENERATE_CANCELLATION_CAUSES =
-      Boolean.parseBoolean(
-          System.getProperty("guava.concurrent.generate_cancellation_cause",
-              "false"));
-
-  /**
-   * A less abstract subclass of AbstractFuture. This can be used to optimize
-   * setFuture by ensuring that {@link #get} calls exactly the implementation
-   * of {@link AbstractFuture#get}.
-   */
-  abstract static class TrustedFuture<V> extends AbstractFuture<V> {
-    @Override
-    public final V get() throws InterruptedException, ExecutionException {
-      return super.get();
-    }
-
-    @Override
-    public final V get(long timeout, TimeUnit unit)
-        throws InterruptedException, ExecutionException, TimeoutException {
-      return super.get(timeout, unit);
-    }
-
-    @Override
-    public final boolean isDone() {
-      return super.isDone();
-    }
-
-    @Override
-    public final boolean isCancelled() {
-      return super.isCancelled();
-    }
-
-    @Override
-    public final void addListener(Runnable listener, Executor executor) {
-      super.addListener(listener, executor);
-    }
-
-    @Override
-    public final boolean cancel(boolean mayInterruptIfRunning) {
-      return super.cancel(mayInterruptIfRunning);
-    }
-  }
-
-  // Logger to log exceptions caught when running listeners.
-  private static final Logger LOG = Logger
-      .getLogger(AbstractFuture.class.getName());
-
-  // A heuristic for timed gets. If the remaining timeout is less than this,
-  // spin instead of
-  // blocking. This value is what AbstractQueuedSynchronizer uses.
-  private static final long SPIN_THRESHOLD_NANOS = 1000L;
-
-  private static final AtomicHelper ATOMIC_HELPER;
-
-  static {
-    AtomicHelper helper;
-
-    try {
-      helper = new UnsafeAtomicHelper();
-    } catch (Throwable unsafeFailure) {
-      // catch absolutely everything and fall through to our 'SafeAtomicHelper'
-      // The access control checks that ARFU does means the caller class has
-      // to be AbstractFuture
-      // instead of SafeAtomicHelper, so we annoyingly define these here
-      try {
-        helper =
-            new SafeAtomicHelper(
-                newUpdater(Waiter.class, Thread.class, "thread"),
-                newUpdater(Waiter.class, Waiter.class, "next"),
-                newUpdater(AbstractFuture.class, Waiter.class, "waiters"),
-                newUpdater(AbstractFuture.class, Listener.class, "listeners"),
-                newUpdater(AbstractFuture.class, Object.class, "value"));
-      } catch (Throwable atomicReferenceFieldUpdaterFailure) {
-        // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs
-        // that cause getDeclaredField to throw a NoSuchFieldException when
-        // the field is definitely there.
-        // For these users fallback to a suboptimal implementation, based on
-        // synchronized. This will be a definite performance hit to those users.
-        LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
-        LOG.log(
-            Level.SEVERE, "SafeAtomicHelper is broken!",
-            atomicReferenceFieldUpdaterFailure);
-        helper = new SynchronizedHelper();
-      }
-    }
-    ATOMIC_HELPER = helper;
-
-    // Prevent rare disastrous classloading in first call to LockSupport.park.
-    // See: https://bugs.openjdk.java.net/browse/JDK-8074773
-    @SuppressWarnings("unused")
-    @SuppressFBWarnings
-    Class<?> ensureLoaded = LockSupport.class;
-  }
-
-  /**
-   * Waiter links form a Treiber stack, in the {@link #waiters} field.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class Waiter {
-    static final Waiter TOMBSTONE = new Waiter(false /* ignored param */);
-
-    @Nullable volatile Thread thread;
-    @Nullable volatile Waiter next;
-
-    /**
-     * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this
-     * class is loaded before the ATOMIC_HELPER. Apparently this is possible
-     * on some android platforms.
-     */
-    Waiter(boolean unused) {
-    }
-
-    Waiter() {
-      // avoid volatile write, write is made visible by subsequent CAS on
-      // waiters field
-      ATOMIC_HELPER.putThread(this, Thread.currentThread());
-    }
-
-    // non-volatile write to the next field. Should be made visible by
-    // subsequent CAS on waiters field.
-    void setNext(Waiter next) {
-      ATOMIC_HELPER.putNext(this, next);
-    }
-
-    void unpark() {
-      // This is racy with removeWaiter. The consequence of the race is that
-      // we may spuriously call unpark even though the thread has already
-      // removed itself from the list. But even if we did use a CAS, that
-      // race would still exist (it would just be ever so slightly smaller).
-      Thread w = thread;
-      if (w != null) {
-        thread = null;
-        LockSupport.unpark(w);
-      }
-    }
-  }
-
-  /**
-   * Marks the given node as 'deleted' (null waiter) and then scans the list
-   * to unlink all deleted nodes. This is an O(n) operation in the common
-   * case (and O(n^2) in the worst), but we are saved by two things.
-   * <ul>
-   * <li>This is only called when a waiting thread times out or is
-   * interrupted. Both of which should be rare.
-   * <li>The waiters list should be very short.
-   * </ul>
-   */
-  private void removeWaiter(Waiter node) {
-    node.thread = null; // mark as 'deleted'
-    restart:
-    while (true) {
-      Waiter pred = null;
-      Waiter curr = waiters;
-      if (curr == Waiter.TOMBSTONE) {
-        return; // give up if someone is calling complete
-      }
-      Waiter succ;
-      while (curr != null) {
-        succ = curr.next;
-        if (curr.thread != null) { // we aren't unlinking this node, update
-          // pred.
-          pred = curr;
-        } else if (pred != null) { // We are unlinking this node and it has a
-          // predecessor.
-          pred.next = succ;
-          if (pred.thread == null) { // We raced with another node that
-            // unlinked pred. Restart.
-            continue restart;
-          }
-        } else if (!ATOMIC_HELPER
-            .casWaiters(this, curr, succ)) { // We are unlinking head
-          continue restart; // We raced with an add or complete
-        }
-        curr = succ;
-      }
-      break;
-    }
-  }
-
-  /**
-   * Listeners also form a stack through the {@link #listeners} field.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class Listener {
-    static final Listener TOMBSTONE = new Listener(null, null);
-    final Runnable task;
-    final Executor executor;
-
-    // writes to next are made visible by subsequent CAS's on the listeners
-    // field
-    @Nullable Listener next;
-
-    Listener(Runnable task, Executor executor) {
-      this.task = task;
-      this.executor = executor;
-    }
-  }
-
-  /**
-   * A special value to represent {@code null}.
-   */
-  private static final Object NULL = new Object();
-
-  /**
-   * A special value to represent failure, when {@link #setException} is
-   * called successfully.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class Failure {
-    static final Failure FALLBACK_INSTANCE =
-        new Failure(
-          new Throwable("Failure occurred while trying to finish a future" +
-              ".") {
-            @Override
-            public synchronized Throwable fillInStackTrace() {
-              return this; // no stack trace
-            }
-          });
-    final Throwable exception;
-
-    Failure(Throwable exception) {
-      this.exception = checkNotNull(exception);
-    }
-  }
-
-  /**
-   * A special value to represent cancellation and the 'wasInterrupted' bit.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class Cancellation {
-    final boolean wasInterrupted;
-    @Nullable final Throwable cause;
-
-    Cancellation(boolean wasInterrupted, @Nullable Throwable cause) {
-      this.wasInterrupted = wasInterrupted;
-      this.cause = cause;
-    }
-  }
-
-  /**
-   * A special value that encodes the 'setFuture' state.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class SetFuture<V> implements Runnable {
-    final AbstractFuture<V> owner;
-    final ListenableFuture<? extends V> future;
-
-    SetFuture(AbstractFuture<V> owner, ListenableFuture<? extends V> future) {
-      this.owner = owner;
-      this.future = future;
-    }
-
-    @Override
-    public void run() {
-      if (owner.value != this) {
-        // nothing to do, we must have been cancelled, don't bother inspecting
-        // the future.
-        return;
-      }
-      Object valueToSet = getFutureValue(future);
-      if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) {
-        complete(owner);
-      }
-    }
-  }
-
-  /**
-   * This field encodes the current state of the future.
-   * <p>
-   * <p>The valid values are:
-   * <ul>
-   * <li>{@code null} initial state, nothing has happened.
-   * <li>{@link Cancellation} terminal state, {@code cancel} was called.
-   * <li>{@link Failure} terminal state, {@code setException} was called.
-   * <li>{@link SetFuture} intermediate state, {@code setFuture} was called.
-   * <li>{@link #NULL} terminal state, {@code set(null)} was called.
-   * <li>Any other non-null value, terminal state, {@code set} was called with
-   * a non-null argument.
-   * </ul>
-   */
-  private volatile Object value;
-
-  /**
-   * All listeners.
-   */
-  private volatile Listener listeners;
-
-  /**
-   * All waiting threads.
-   */
-  private volatile Waiter waiters;
-
-  /**
-   * Constructor for use by subclasses.
-   */
-  protected AbstractFuture() {
-  }
-
-  // Gets and Timed Gets
-  //
-  // * Be responsive to interruption
-  // * Don't create Waiter nodes if you aren't going to park, this helps
-  // reduce contention on the waiters field.
-  // * Future completion is defined by when #value becomes non-null/non
-  // SetFuture
-  // * Future completion can be observed if the waiters field contains a
-  // TOMBSTONE
-
-  // Timed Get
-  // There are a few design constraints to consider
-  // * We want to be responsive to small timeouts, unpark() has non trivial
-  // latency overheads (I have observed 12 micros on 64 bit linux systems to
-  // wake up a parked thread). So if the timeout is small we shouldn't park().
-  // This needs to be traded off with the cpu overhead of spinning, so we use
-  // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for
-  // similar purposes.
-  // * We want to behave reasonably for timeouts of 0
-  // * We are more responsive to completion than timeouts. This is because
-  // parkNanos depends on system scheduling and as such we could either miss
-  // our deadline, or unpark() could be delayed so that it looks like we
-  // timed out even though we didn't. For comparison FutureTask respects
-  // completion preferably and AQS is non-deterministic (depends on where in
-  // the queue the waiter is). If we wanted to be strict about it, we could
-  // store the unpark() time in the Waiter node and we could use that to make
-  // a decision about whether or not we timed out prior to being unparked.
-
-  /*
-   * Improve the documentation of when InterruptedException is thrown. Our
-   * behavior matches the JDK's, but the JDK's documentation is misleading.
-   */
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * <p>The default {@link AbstractFuture} implementation throws {@code
-   * InterruptedException} if the current thread is interrupted before or
-   * during the call, even if the value is already available.
-   *
-   * @throws InterruptedException  if the current thread was interrupted
-   * before or during the call
-   *                               (optional but recommended).
-   * @throws CancellationException {@inheritDoc}
-   */
-  @Override
-  public V get(long timeout, TimeUnit unit)
-      throws InterruptedException, TimeoutException, ExecutionException {
-    // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into
-    // the while(true) loop at the bottom and throw a timeoutexception.
-    long remainingNanos = unit
-        .toNanos(timeout); // we rely on the implicit null check on unit.
-    if (Thread.interrupted()) {
-      throw new InterruptedException();
-    }
-    Object localValue = value;
-    if (localValue != null & !(localValue instanceof SetFuture)) {
-      return getDoneValue(localValue);
-    }
-    // we delay calling nanoTime until we know we will need to either park or
-    // spin
-    final long endNanos = remainingNanos > 0 ? System
-        .nanoTime() + remainingNanos : 0;
-    long_wait_loop:
-    if (remainingNanos >= SPIN_THRESHOLD_NANOS) {
-      Waiter oldHead = waiters;
-      if (oldHead != Waiter.TOMBSTONE) {
-        Waiter node = new Waiter();
-        do {
-          node.setNext(oldHead);
-          if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) {
-            while (true) {
-              LockSupport.parkNanos(this, remainingNanos);
-              // Check interruption first, if we woke up due to interruption
-              // we need to honor that.
-              if (Thread.interrupted()) {
-                removeWaiter(node);
-                throw new InterruptedException();
-              }
-
-              // Otherwise re-read and check doneness. If we loop then it must
-              // have been a spurious wakeup
-              localValue = value;
-              if (localValue != null & !(localValue instanceof SetFuture)) {
-                return getDoneValue(localValue);
-              }
-
-              // timed out?
-              remainingNanos = endNanos - System.nanoTime();
-              if (remainingNanos < SPIN_THRESHOLD_NANOS) {
-                // Remove the waiter, one way or another we are done parking
-                // this thread.
-                removeWaiter(node);
-                break long_wait_loop; // jump down to the busy wait loop
-              }
-            }
-          }
-          oldHead = waiters; // re-read and loop.
-        } while (oldHead != Waiter.TOMBSTONE);
-      }
-      // re-read value, if we get here then we must have observed a TOMBSTONE
-      // while trying to add a waiter.
-      return getDoneValue(value);
-    }
-    // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and
-    // there is no node on the waiters list
-    while (remainingNanos > 0) {
-      localValue = value;
-      if (localValue != null & !(localValue instanceof SetFuture)) {
-        return getDoneValue(localValue);
-      }
-      if (Thread.interrupted()) {
-        throw new InterruptedException();
-      }
-      remainingNanos = endNanos - System.nanoTime();
-    }
-    throw new TimeoutException();
-  }
-
-  /*
-   * Improve the documentation of when InterruptedException is thrown. Our
-   * behavior matches the JDK's, but the JDK's documentation is misleading.
-   */
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * <p>The default {@link AbstractFuture} implementation throws {@code
-   * InterruptedException} if the current thread is interrupted before or
-   * during the call, even if the value is already available.
-   *
-   * @throws InterruptedException  if the current thread was interrupted
-   * before or during the call
-   *                               (optional but recommended).
-   * @throws CancellationException {@inheritDoc}
-   */
-  @Override
-  public V get() throws InterruptedException, ExecutionException {
-    if (Thread.interrupted()) {
-      throw new InterruptedException();
-    }
-    Object localValue = value;
-    if (localValue != null & !(localValue instanceof SetFuture)) {
-      return getDoneValue(localValue);
-    }
-    Waiter oldHead = waiters;
-    if (oldHead != Waiter.TOMBSTONE) {
-      Waiter node = new Waiter();
-      do {
-        node.setNext(oldHead);
-        if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) {
-          // we are on the stack, now wait for completion.
-          while (true) {
-            LockSupport.park(this);
-            // Check interruption first, if we woke up due to interruption we
-            // need to honor that.
-            if (Thread.interrupted()) {
-              removeWaiter(node);
-              throw new InterruptedException();
-            }
-            // Otherwise re-read and check doneness. If we loop then it must
-            // have been a spurious wakeup
-            localValue = value;
-            if (localValue != null & !(localValue instanceof SetFuture)) {
-              return getDoneValue(localValue);
-            }
-          }
-        }
-        oldHead = waiters; // re-read and loop.
-      } while (oldHead != Waiter.TOMBSTONE);
-    }
-    // re-read value, if we get here then we must have observed a TOMBSTONE
-    // while trying to add a waiter.
-    return getDoneValue(value);
-  }
-
-  /**
-   * Unboxes {@code obj}. Assumes that obj is not {@code null} or a
-   * {@link SetFuture}.
-   */
-  private V getDoneValue(Object obj) throws ExecutionException {
-    // While this seems like it might be too branch-y, simple benchmarking
-    // proves it to be unmeasurable (comparing done AbstractFutures with
-    // immediateFuture)
-    if (obj instanceof Cancellation) {
-      throw cancellationExceptionWithCause(
-          "Task was cancelled.", ((Cancellation) obj).cause);
-    } else if (obj instanceof Failure) {
-      throw new ExecutionException(((Failure) obj).exception);
-    } else if (obj == NULL) {
-      return null;
-    } else {
-      @SuppressWarnings("unchecked") // this is the only other option
-          V asV = (V) obj;
-      return asV;
-    }
-  }
-
-  @Override
-  public boolean isDone() {
-    final Object localValue = value;
-    return localValue != null & !(localValue instanceof SetFuture);
-  }
-
-  @Override
-  public boolean isCancelled() {
-    final Object localValue = value;
-    return localValue instanceof Cancellation;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * <p>If a cancellation attempt succeeds on a {@code Future} that had
-   * previously been {@linkplain#setFuture set asynchronously}, then the
-   * cancellation will also be propagated to the delegate {@code Future} that
-   * was supplied in the {@code setFuture} call.
-   */
-  @Override
-  public boolean cancel(boolean mayInterruptIfRunning) {
-    Object localValue = value;
-    boolean rValue = false;
-    if (localValue == null | localValue instanceof SetFuture) {
-      // Try to delay allocating the exception. At this point we may still
-      // lose the CAS, but it is certainly less likely.
-      Throwable cause =
-          GENERATE_CANCELLATION_CAUSES
-              ? new CancellationException("Future.cancel() was called.")
-              : null;
-      Object valueToSet = new Cancellation(mayInterruptIfRunning, cause);
-      AbstractFuture<?> abstractFuture = this;
-      while (true) {
-        if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) {
-          rValue = true;
-          // We call interuptTask before calling complete(), which is
-          // consistent with FutureTask
-          if (mayInterruptIfRunning) {
-            abstractFuture.interruptTask();
-          }
-          complete(abstractFuture);
-          if (localValue instanceof SetFuture) {
-            // propagate cancellation to the future set in setfuture, this is
-            // racy, and we don't care if we are successful or not.
-            ListenableFuture<?> futureToPropagateTo = ((SetFuture) localValue)
-                .future;
-            if (futureToPropagateTo instanceof TrustedFuture) {
-              // If the future is a TrustedFuture then we specifically avoid
-              // calling cancel() this has 2 benefits
-              // 1. for long chains of futures strung together with setFuture
-              // we consume less stack
-              // 2. we avoid allocating Cancellation objects at every level of
-              // the cancellation chain
-              // We can only do this for TrustedFuture, because
-              // TrustedFuture.cancel is final and does nothing but delegate
-              // to this method.
-              AbstractFuture<?> trusted = (AbstractFuture<?>)
-                  futureToPropagateTo;
-              localValue = trusted.value;
-              if (localValue == null | localValue instanceof SetFuture) {
-                abstractFuture = trusted;
-                continue;  // loop back up and try to complete the new future
-              }
-            } else {
-              // not a TrustedFuture, call cancel directly.
-              futureToPropagateTo.cancel(mayInterruptIfRunning);
-            }
-          }
-          break;
-        }
-        // obj changed, reread
-        localValue = abstractFuture.value;
-        if (!(localValue instanceof SetFuture)) {
-          // obj cannot be null at this point, because value can only change
-          // from null to non-null. So if value changed (and it did since we
-          // lost the CAS), then it cannot be null and since it isn't a
-          // SetFuture, then the future must be done and we should exit the loop
-          break;
-        }
-      }
-    }
-    return rValue;
-  }
-
-  /**
-   * Subclasses can override this method to implement interruption of the
-   * future's computation. The method is invoked automatically by a
-   * successful call to {@link #cancel(boolean) cancel(true)}.
-   * <p>
-   * <p>The default implementation does nothing.
-   *
-   * @since 10.0
-   */
-  protected void interruptTask() {
-  }
-
-  /**
-   * Returns true if this future was cancelled with {@code
-   * mayInterruptIfRunning} set to {@code true}.
-   *
-   * @since 14.0
-   */
-  protected final boolean wasInterrupted() {
-    final Object localValue = value;
-    return (localValue instanceof Cancellation) && ((Cancellation) localValue)
-        .wasInterrupted;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @since 10.0
-   */
-  @Override
-  public void addListener(Runnable listener, Executor executor) {
-    checkNotNull(listener, "Runnable was null.");
-    checkNotNull(executor, "Executor was null.");
-    Listener oldHead = listeners;
-    if (oldHead != Listener.TOMBSTONE) {
-      Listener newNode = new Listener(listener, executor);
-      do {
-        newNode.next = oldHead;
-        if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) {
-          return;
-        }
-        oldHead = listeners; // re-read
-      } while (oldHead != Listener.TOMBSTONE);
-    }
-    // If we get here then the Listener TOMBSTONE was set, which means the
-    // future is done, call the listener.
-    executeListener(listener, executor);
-  }
-
-  /**
-   * Sets the result of this {@code Future} unless this {@code Future} has
-   * already been cancelled or set (including
-   * {@linkplain #setFuture set asynchronously}). When a call to this method
-   * returns, the {@code Future} is guaranteed to be
-   * {@linkplain #isDone done} <b>only if</b> the call was accepted (in which
-   * case it returns {@code true}). If it returns {@code false}, the {@code
-   * Future} may have previously been set asynchronously, in which case its
-   * result may not be known yet. That result, though not yet known, cannot
-   * be overridden by a call to a {@code set*} method, only by a call to
-   * {@link #cancel}.
-   *
-   * @param value the value to be used as the result
-   * @return true if the attempt was accepted, completing the {@code Future}
-   */
-  protected boolean set(@Nullable V val) {
-    Object valueToSet = value == null ? NULL : val;
-    if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
-      complete(this);
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Sets the failed result of this {@code Future} unless this {@code Future}
-   * has already been cancelled or set (including
-   * {@linkplain #setFuture set asynchronously}). When a call to this method
-   * returns, the {@code Future} is guaranteed to be
-   * {@linkplain #isDone done} <b>only if</b> the call was accepted (in which
-   * case it returns {@code true}). If it returns {@code false}, the
-   * {@code Future} may have previously been set asynchronously, in which case
-   * its result may not be known yet. That result, though not yet known,
-   * cannot be overridden by a call to a {@code set*} method, only by a call
-   * to {@link #cancel}.
-   *
-   * @param throwable the exception to be used as the failed result
-   * @return true if the attempt was accepted, completing the {@code Future}
-   */
-  protected boolean setException(Throwable throwable) {
-    Object valueToSet = new Failure(checkNotNull(throwable));
-    if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
-      complete(this);
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Sets the result of this {@code Future} to match the supplied input
-   * {@code Future} once the supplied {@code Future} is done, unless this
-   * {@code Future} has already been cancelled or set (including "set
-   * asynchronously," defined below).
-   * <p>
-   * <p>If the supplied future is {@linkplain #isDone done} when this method
-   * is called and the call is accepted, then this future is guaranteed to
-   * have been completed with the supplied future by the time this method
-   * returns. If the supplied future is not done and the call is accepted, then
-   * the future will be <i>set asynchronously</i>. Note that such a result,
-   * though not yet known, cannot be overridden by a call to a {@code set*}
-   * method, only by a call to {@link #cancel}.
-   * <p>
-   * <p>If the call {@code setFuture(delegate)} is accepted and this {@code
-   * Future} is later cancelled, cancellation will be propagated to {@code
-   * delegate}. Additionally, any call to {@code setFuture} after any
-   * cancellation will propagate cancellation to the supplied {@code Future}.
-   *
-   * @param future the future to delegate to
-   * @return true if the attempt was accepted, indicating that the {@code
-   * Future} was not previously cancelled or set.
-   * @since 19.0
-   */
-  @Beta
-  @SuppressWarnings("deadstore")
-  protected boolean setFuture(ListenableFuture<? extends V> future) {
-    checkNotNull(future);
-    Object localValue = value;
-    if (localValue == null) {
-      if (future.isDone()) {
-        Object val = getFutureValue(future);
-        if (ATOMIC_HELPER.casValue(this, null, val)) {
-          complete(this);
-          return true;
-        }
-        return false;
-      }
-      SetFuture valueToSet = new SetFuture<V>(this, future);
-      if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
-        // the listener is responsible for calling completeWithFuture,
-        // directExecutor is appropriate since all we are doing is unpacking
-        // a completed future which should be fast.
-        try {
-          future.addListener(valueToSet, directExecutor());
-        } catch (Throwable t) {
-          // addListener has thrown an exception! SetFuture.run can't throw
-          // any exceptions so this must have been caused by addListener
-          // itself. The most likely explanation is a misconfigured mock. Try
-          // to switch to Failure.
-          Failure failure;
-          try {
-            failure = new Failure(t);
-          } catch (Throwable oomMostLikely) {
-            failure = Failure.FALLBACK_INSTANCE;
-          }
-          // Note: The only way this CAS could fail is if cancel() has raced
-          // with us. That is ok.
-          boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure);
-        }
-        return true;
-      }
-      localValue = value; // we lost the cas, fall through and maybe cancel
-    }
-    // The future has already been set to something. If it is cancellation we
-    // should cancel the incoming future.
-    if (localValue instanceof Cancellation) {
-      // we don't care if it fails, this is best-effort.
-      future.cancel(((Cancellation) localValue).wasInterrupted);
-    }
-    return false;
-  }
-
-  /**
-   * Returns a value, suitable for storing in the {@link #value} field. From
-   * the given future, which is assumed to be done.
-   * <p>
-   * <p>This is approximately the inverse of {@link #getDoneValue(Object)}
-   */
-  private static Object getFutureValue(ListenableFuture<?> future) {
-    Object valueToSet;
-    if (future instanceof TrustedFuture) {
-      // Break encapsulation for TrustedFuture instances since we know that
-      // subclasses cannot override .get() (since it is final) and therefore
-      // this is equivalent to calling .get() and unpacking the exceptions
-      // like we do below (just much faster because it is a single field read
-      // instead of a read, several branches and possibly creating exceptions).
-      return ((AbstractFuture<?>) future).value;
-    } else {
-      // Otherwise calculate valueToSet by calling .get()
-      try {
-        Object v = getDone(future);
-        valueToSet = v == null ? NULL : v;
-      } catch (ExecutionException exception) {
-        valueToSet = new Failure(exception.getCause());
-      } catch (CancellationException cancellation) {
-        valueToSet = new Cancellation(false, cancellation);
-      } catch (Throwable t) {
-        valueToSet = new Failure(t);
-      }
-    }
-    return valueToSet;
-  }
-
-  /**
-   * Unblocks all threads and runs all listeners.
-   */
-  private static void complete(AbstractFuture<?> future) {
-    Listener next = null;
-    outer:
-    while (true) {
-      future.releaseWaiters();
-      // We call this before the listeners in order to avoid needing to manage
-      // a separate stack data structure for them. afterDone() should be
-      // generally fast and only used for cleanup work... but in theory can
-      // also be recursive and create StackOverflowErrors
-      future.afterDone();
-      // push the current set of listeners onto next
-      next = future.clearListeners(next);
-      future = null;
-      while (next != null) {
-        Listener curr = next;
-        next = next.next;
-        Runnable task = curr.task;
-        if (task instanceof SetFuture) {
-          SetFuture<?> setFuture = (SetFuture<?>) task;
-          // We unwind setFuture specifically to avoid StackOverflowErrors in
-          // the case of long chains of SetFutures
-          // Handling this special case is important because there is no way
-          // to pass an executor to setFuture, so a user couldn't break the
-          // chain by doing this themselves.  It is also potentially common
-          // if someone writes a recursive Futures.transformAsync transformer.
-          future = setFuture.owner;
-          if (future.value == setFuture) {
-            Object valueToSet = getFutureValue(setFuture.future);
-            if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) {
-              continue outer;
-            }
-          }
-          // other wise the future we were trying to set is already done.
-        } else {
-          executeListener(task, curr.executor);
-        }
-      }
-      break;
-    }
-  }
-
-  public static <V> V getDone(Future<V> future) throws ExecutionException {
-    /*
-     * We throw IllegalStateException, since the call could succeed later.
-     * Perhaps we "should" throw IllegalArgumentException, since the call
-     * could succeed with a different argument. Those exceptions' docs
-     * suggest that either is acceptable. Google's Java Practices page
-     * recommends IllegalArgumentException here, in part to keep its
-     * recommendation simple: Static methods should throw
-     * IllegalStateException only when they use static state.
-     *
-     *
-     * Why do we deviate here? The answer: We want for fluentFuture.getDone()
-      * to throw the same exception as Futures.getDone(fluentFuture).
-     */
-    Preconditions.checkState(future.isDone(), "Future was expected to be " +
-        "done:" +
-        " %s", future);
-    return Uninterruptibles.getUninterruptibly(future);
-  }
-
-  /**
-   * Callback method that is called exactly once after the future is completed.
-   * <p>
-   * <p>If {@link #interruptTask} is also run during completion,
-   * {@link #afterDone} runs after it.
-   * <p>
-   * <p>The default implementation of this method in {@code AbstractFuture}
-   * does nothing.  This is intended for very lightweight cleanup work, for
-   * example, timing statistics or clearing fields.
-   * If your task does anything heavier consider, just using a listener with
-   * an executor.
-   *
-   * @since 20.0
-   */
-  @Beta
-  protected void afterDone() {
-  }
-
-  /**
-   * If this future has been cancelled (and possibly interrupted), cancels
-   * (and possibly interrupts) the given future (if available).
-   * <p>
-   * <p>This method should be used only when this future is completed. It is
-   * designed to be called from {@code done}.
-   */
-  final void maybePropagateCancellation(@Nullable Future<?> related) {
-    if (related != null & isCancelled()) {
-      related.cancel(wasInterrupted());
-    }
-  }
-
-  /**
-   * Releases all threads in the {@link #waiters} list, and clears the list.
-   */
-  private void releaseWaiters() {
-    Waiter head;
-    do {
-      head = waiters;
-    } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE));
-    for (Waiter currentWaiter = head;
-         currentWaiter != null; currentWaiter = currentWaiter.next) {
-      currentWaiter.unpark();
-    }
-  }
-
-  /**
-   * Clears the {@link #listeners} list and prepends its contents to {@code
-   * onto}, least recently added first.
-   */
-  private Listener clearListeners(Listener onto) {
-    // We need to
-    // 1. atomically swap the listeners with TOMBSTONE, this is because
-    // addListener uses that to to synchronize with us
-    // 2. reverse the linked list, because despite our rather clear contract,
-    // people depend on us executing listeners in the order they were added
-    // 3. push all the items onto 'onto' and return the new head of the stack
-    Listener head;
-    do {
-      head = listeners;
-    } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE));
-    Listener reversedList = onto;
-    while (head != null) {
-      Listener tmp = head;
-      head = head.next;
-      tmp.next = reversedList;
-      reversedList = tmp;
-    }
-    return reversedList;
-  }
-
-  /**
-   * Submits the given runnable to the given {@link Executor} catching and
-   * logging all {@linkplain RuntimeException runtime exceptions} thrown by
-   * the executor.
-   */
-  private static void executeListener(Runnable runnable, Executor executor) {
-    try {
-      executor.execute(runnable);
-    } catch (RuntimeException e) {
-      // Log it and keep going -- bad runnable and/or executor. Don't punish
-      // the other runnables if we're given a bad one. We only catch
-      // RuntimeException because we want Errors to propagate up.
-      LOG.log(
-          Level.SEVERE,
-          "RuntimeException while executing runnable " + runnable + " with " +
-              "executor " + executor,
-          e);
-    }
-  }
-
-  private abstract static class AtomicHelper {
-    /**
-     * Non volatile write of the thread to the {@link Waiter#thread} field.
-     */
-    abstract void putThread(Waiter waiter, Thread newValue);
-
-    /**
-     * Non volatile write of the waiter to the {@link Waiter#next} field.
-     */
-    abstract void putNext(Waiter waiter, Waiter newValue);
-
-    /**
-     * Performs a CAS operation on the {@link #waiters} field.
-     */
-    abstract boolean casWaiters(
-        AbstractFuture<?> future, Waiter expect,
-        Waiter update);
-
-    /**
-     * Performs a CAS operation on the {@link #listeners} field.
-     */
-    abstract boolean casListeners(
-        AbstractFuture<?> future, Listener expect,
-        Listener update);
-
-    /**
-     * Performs a CAS operation on the {@link #value} field.
-     */
-    abstract boolean casValue(
-        AbstractFuture<?> future, Object expect, Object update);
-  }
-
-  /**
-   * {@link AtomicHelper} based on {@link sun.misc.Unsafe}.
-   * <p>
-   * <p>Static initialization of this class will fail if the
-   * {@link sun.misc.Unsafe} object cannot be accessed.
-   */
-  private static final class UnsafeAtomicHelper extends AtomicHelper {
-    static final sun.misc.Unsafe UNSAFE;
-    static final long LISTENERS_OFFSET;
-    static final long WAITERS_OFFSET;
-    static final long VALUE_OFFSET;
-    static final long WAITER_THREAD_OFFSET;
-    static final long WAITER_NEXT_OFFSET;
-
-    static {
-      sun.misc.Unsafe unsafe = null;
-      try {
-        unsafe = sun.misc.Unsafe.getUnsafe();
-      } catch (SecurityException tryReflectionInstead) {
-        try {
-          unsafe =
-              AccessController.doPrivileged(
-                  new PrivilegedExceptionAction<sun.misc.Unsafe>() {
-                    @Override
-                    public sun.misc.Unsafe run() throws Exception {
-                      Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
-                      for (java.lang.reflect.Field f : k.getDeclaredFields()) {
-                        f.setAccessible(true);
-                        Object x = f.get(null);
-                        if (k.isInstance(x)) {
-                          return k.cast(x);
-                        }
-                      }
-                      throw new NoSuchFieldError("the Unsafe");
-                    }
-                  });
-        } catch (PrivilegedActionException e) {
-          throw new RuntimeException(
-              "Could not initialize intrinsics", e.getCause());
-        }
-      }
-      try {
-        Class<?> abstractFuture = AbstractFuture.class;
-        WAITERS_OFFSET = unsafe
-            .objectFieldOffset(abstractFuture.getDeclaredField("waiters"));
-        LISTENERS_OFFSET = unsafe
-            .objectFieldOffset(abstractFuture.getDeclaredField("listeners"));
-        VALUE_OFFSET = unsafe
-            .objectFieldOffset(abstractFuture.getDeclaredField("value"));
-        WAITER_THREAD_OFFSET = unsafe
-            .objectFieldOffset(Waiter.class.getDeclaredField("thread"));
-        WAITER_NEXT_OFFSET = unsafe
-            .objectFieldOffset(Waiter.class.getDeclaredField("next"));
-        UNSAFE = unsafe;
-      } catch (Exception e) {
-        throwIfUnchecked(e);
-        throw new RuntimeException(e);
-      }
-    }
-
-    public static void throwIfUnchecked(Throwable throwable) {
-      checkNotNull(throwable);
-      if (throwable instanceof RuntimeException) {
-        throw (RuntimeException) throwable;
-      }
-      if (throwable instanceof Error) {
-        throw (Error) throwable;
-      }
-    }
-
-    @Override
-    void putThread(Waiter waiter, Thread newValue) {
-      UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue);
-    }
-
-    @Override
-    void putNext(Waiter waiter, Waiter newValue) {
-      UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue);
-    }
-
-    /**
-     * Performs a CAS operation on the {@link #waiters} field.
-     */
-    @Override
-    boolean casWaiters(AbstractFuture<?> future, Waiter expect, Waiter
-        update) {
-      return UNSAFE
-          .compareAndSwapObject(future, WAITERS_OFFSET, expect, update);
-    }
-
-    /**
-     * Performs a CAS operation on the {@link #listeners} field.
-     */
-    @Override
-    boolean casListeners(
-        AbstractFuture<?> future, Listener expect, Listener update) {
-      return UNSAFE
-          .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update);
-    }
-
-    /**
-     * Performs a CAS operation on the {@link #value} field.
-     */
-    @Override
-    boolean casValue(AbstractFuture<?> future, Object expect, Object update) {
-      return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update);
-    }
-  }
-
-  /**
-   * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  private static final class SafeAtomicHelper extends AtomicHelper {
-    final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
-    final AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater;
-    final AtomicReferenceFieldUpdater<AbstractFuture, Waiter> waitersUpdater;
-    final AtomicReferenceFieldUpdater<AbstractFuture, Listener>
-        listenersUpdater;
-    final AtomicReferenceFieldUpdater<AbstractFuture, Object> valueUpdater;
-
-    SafeAtomicHelper(
-        AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater,
-        AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater,
-        AtomicReferenceFieldUpdater<AbstractFuture, Waiter> waitersUpdater,
-        AtomicReferenceFieldUpdater<AbstractFuture, Listener> listenersUpdater,
-        AtomicReferenceFieldUpdater<AbstractFuture, Object> valueUpdater) {
-      this.waiterThreadUpdater = waiterThreadUpdater;
-      this.waiterNextUpdater = waiterNextUpdater;
-      this.waitersUpdater = waitersUpdater;
-      this.listenersUpdater = listenersUpdater;
-      this.valueUpdater = valueUpdater;
-    }
-
-    @Override
-    void putThread(Waiter waiter, Thread newValue) {
-      waiterThreadUpdater.lazySet(waiter, newValue);
-    }
-
-    @Override
-    void putNext(Waiter waiter, Waiter newValue) {
-      waiterNextUpdater.lazySet(waiter, newValue);
-    }
-
-    @Override
-    boolean casWaiters(AbstractFuture<?> future, Waiter expect, Waiter
-        update) {
-      return waitersUpdater.compareAndSet(future, expect, update);
-    }
-
-    @Override
-    boolean casListeners(
-        AbstractFuture<?> future, Listener expect, Listener update) {
-      return listenersUpdater.compareAndSet(future, expect, update);
-    }
-
-    @Override
-    boolean casValue(AbstractFuture<?> future, Object expect, Object update) {
-      return valueUpdater.compareAndSet(future, expect, update);
-    }
-  }
-
-  /**
-   * {@link AtomicHelper} based on {@code synchronized} and volatile writes.
-   * <p>
-   * <p>This is an implementation of last resort for when certain basic VM
-   * features are broken (like AtomicReferenceFieldUpdater).
-   */
-  private static final class SynchronizedHelper extends AtomicHelper {
-    @Override
-    void putThread(Waiter waiter, Thread newValue) {
-      waiter.thread = newValue;
-    }
-
-    @Override
-    void putNext(Waiter waiter, Waiter newValue) {
-      waiter.next = newValue;
-    }
-
-    @Override
-    boolean casWaiters(AbstractFuture<?> future, Waiter expect, Waiter
-        update) {
-      synchronized (future) {
-        if (future.waiters == expect) {
-          future.waiters = update;
-          return true;
-        }
-        return false;
-      }
-    }
-
-    @Override
-    boolean casListeners(
-        AbstractFuture<?> future, Listener expect, Listener update) {
-      synchronized (future) {
-        if (future.listeners == expect) {
-          future.listeners = update;
-          return true;
-        }
-        return false;
-      }
-    }
-
-    @Override
-    boolean casValue(AbstractFuture<?> future, Object expect, Object update) {
-      synchronized (future) {
-        if (future.value == expect) {
-          future.value = update;
-          return true;
-        }
-        return false;
-      }
-    }
-  }
-
-  private static CancellationException cancellationExceptionWithCause(
-      @Nullable String message, @Nullable Throwable cause) {
-    CancellationException exception = new CancellationException(message);
-    exception.initCause(cause);
-    return exception;
-  }
-
-  /**
-   * Returns an {@link Executor} that runs each task in the thread that invokes
-   * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}.
-   * <p>
-   * <p>This instance is equivalent to: <pre>   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}</pre>
-   */
-  public static Executor directExecutor() {
-    return DirectExecutor.INSTANCE;
-  }
-
-  /**
-   * See {@link #directExecutor} for behavioral notes.
-   */
-  private enum DirectExecutor implements Executor {
-    INSTANCE;
-
-    @Override
-    public void execute(Runnable command) {
-      command.run();
-    }
-
-    @Override
-    public String toString() {
-      return "MoreExecutors.directExecutor()";
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java
deleted file mode 100644
index f7391e3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
-
-import java.util.Optional;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A class that can be used to schedule an asynchronous check on a given
- * {@link Checkable}. If the check is successfully scheduled then a
- * {@link ListenableFuture} is returned.
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface AsyncChecker<K, V> {
-
-  /**
-   * Schedule an asynchronous check for the given object.
-   *
-   * @param target object to be checked.
-   *
-   * @param context the interpretation of the context depends on the
-   *                target.
-   *
-   * @return returns a {@link Optional of ListenableFuture} that can be used to
-   *         retrieve the result of the asynchronous check.
-   */
-  Optional<ListenableFuture<V>> schedule(Checkable<K, V> target, K context);
-
-  /**
-   * Cancel all executing checks and wait for them to complete.
-   * First attempts a graceful cancellation, then cancels forcefully.
-   * Waits for the supplied timeout after both attempts.
-   *
-   * See {@link ExecutorService#awaitTermination} for a description of
-   * the parameters.
-   *
-   * @throws InterruptedException
-   */
-  void shutdownAndWait(long timeout, TimeUnit timeUnit)
-      throws InterruptedException;
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
deleted file mode 100644
index 3e89f90..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ /dev/null
@@ -1,455 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import javax.annotation.Nullable;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.Time;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * HddsVolume represents volume in a datanode. {@link VolumeSet} maintains a
- * list of HddsVolumes, one for each volume in the Datanode.
- * {@link VolumeInfo} in encompassed by this class.
- * <p>
- * The disk layout per volume is as follows:
- * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
- * >>/metadata}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
- * >>/<<dataDir>>}
- * <p>
- * Each hdds volume has its own VERSION file. The hdds volume will have one
- * scmUuid directory for each SCM it is a part of (currently only one SCM is
- * supported).
- *
- * During DN startup, if the VERSION file exists, we verify that the
- * clusterID in the version file matches the clusterID from SCM.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-@SuppressWarnings("finalclass")
-public class HddsVolume
-    implements Checkable<Boolean, VolumeCheckResult> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(HddsVolume.class);
-
-  public static final String HDDS_VOLUME_DIR = "hdds";
-
-  private final File hddsRootDir;
-  private final VolumeInfo volumeInfo;
-  private VolumeState state;
-  private final VolumeIOStats volumeIOStats;
-
-  // VERSION file properties
-  private String storageID;       // id of the file system
-  private String clusterID;       // id of the cluster
-  private String datanodeUuid;    // id of the DataNode
-  private long cTime;             // creation time of the file system state
-  private int layoutVersion;      // layout version of the storage data
-  private final AtomicLong committedBytes; // till Open containers become full
-
-  /**
-   * Run a check on the current volume to determine if it is healthy.
-   * @param unused context for the check, ignored.
-   * @return result of checking the volume.
-   * @throws Exception if an exception was encountered while running
-   *            the volume check.
-   */
-  @Override
-  public VolumeCheckResult check(@Nullable Boolean unused) throws Exception {
-    DiskChecker.checkDir(hddsRootDir);
-    return VolumeCheckResult.HEALTHY;
-  }
-
-  /**
-   * Builder for HddsVolume.
-   */
-  public static class Builder {
-    private final String volumeRootStr;
-    private Configuration conf;
-    private StorageType storageType;
-    private long configuredCapacity;
-
-    private String datanodeUuid;
-    private String clusterID;
-    private boolean failedVolume = false;
-
-    public Builder(String rootDirStr) {
-      this.volumeRootStr = rootDirStr;
-    }
-
-    public Builder conf(Configuration config) {
-      this.conf = config;
-      return this;
-    }
-
-    public Builder storageType(StorageType st) {
-      this.storageType = st;
-      return this;
-    }
-
-    public Builder configuredCapacity(long capacity) {
-      this.configuredCapacity = capacity;
-      return this;
-    }
-
-    public Builder datanodeUuid(String datanodeUUID) {
-      this.datanodeUuid = datanodeUUID;
-      return this;
-    }
-
-    public Builder clusterID(String cid) {
-      this.clusterID = cid;
-      return this;
-    }
-
-    // This is added just to create failed volume objects, which will be used
-    // to create failed HddsVolume objects in the case of any exceptions caused
-    // during creating HddsVolume object.
-    public Builder failedVolume(boolean failed) {
-      this.failedVolume = failed;
-      return this;
-    }
-
-    public HddsVolume build() throws IOException {
-      return new HddsVolume(this);
-    }
-  }
-
-  private HddsVolume(Builder b) throws IOException {
-    if (!b.failedVolume) {
-      StorageLocation location = StorageLocation.parse(b.volumeRootStr);
-      hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
-      this.state = VolumeState.NOT_INITIALIZED;
-      this.clusterID = b.clusterID;
-      this.datanodeUuid = b.datanodeUuid;
-      this.volumeIOStats = new VolumeIOStats();
-
-      VolumeInfo.Builder volumeBuilder =
-          new VolumeInfo.Builder(b.volumeRootStr, b.conf)
-              .storageType(b.storageType)
-              .configuredCapacity(b.configuredCapacity);
-      this.volumeInfo = volumeBuilder.build();
-      this.committedBytes = new AtomicLong(0);
-
-      LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
-          b.storageType + " and capacity : " + volumeInfo.getCapacity());
-
-      initialize();
-    } else {
-      // Builder is called with failedVolume set, so create a failed volume
-      // HddsVolumeObject.
-      hddsRootDir = new File(b.volumeRootStr);
-      volumeIOStats = null;
-      volumeInfo = null;
-      storageID = UUID.randomUUID().toString();
-      state = VolumeState.FAILED;
-      committedBytes = null;
-    }
-  }
-
-  public VolumeInfo getVolumeInfo() {
-    return volumeInfo;
-  }
-
-  /**
-   * Initializes the volume.
-   * Creates the Version file if not present,
-   * otherwise returns with IOException.
-   * @throws IOException
-   */
-  private void initialize() throws IOException {
-    VolumeState intialVolumeState = analyzeVolumeState();
-    switch (intialVolumeState) {
-    case NON_EXISTENT:
-      // Root directory does not exist. Create it.
-      if (!hddsRootDir.mkdirs()) {
-        throw new IOException("Cannot create directory " + hddsRootDir);
-      }
-      setState(VolumeState.NOT_FORMATTED);
-      createVersionFile();
-      break;
-    case NOT_FORMATTED:
-      // Version File does not exist. Create it.
-      createVersionFile();
-      break;
-    case NOT_INITIALIZED:
-      // Version File exists. Verify its correctness and update property fields.
-      readVersionFile();
-      setState(VolumeState.NORMAL);
-      break;
-    case INCONSISTENT:
-      // Volume Root is in an inconsistent state. Skip loading this volume.
-      throw new IOException("Volume is in an " + VolumeState.INCONSISTENT +
-          " state. Skipped loading volume: " + hddsRootDir.getPath());
-    default:
-      throw new IOException("Unrecognized initial state : " +
-          intialVolumeState + "of volume : " + hddsRootDir);
-    }
-  }
-
-  private VolumeState analyzeVolumeState() {
-    if (!hddsRootDir.exists()) {
-      // Volume Root does not exist.
-      return VolumeState.NON_EXISTENT;
-    }
-    if (!hddsRootDir.isDirectory()) {
-      // Volume Root exists but is not a directory.
-      return VolumeState.INCONSISTENT;
-    }
-    File[] files = hddsRootDir.listFiles();
-    if (files == null || files.length == 0) {
-      // Volume Root exists and is empty.
-      return VolumeState.NOT_FORMATTED;
-    }
-    if (!getVersionFile().exists()) {
-      // Volume Root is non empty but VERSION file does not exist.
-      return VolumeState.INCONSISTENT;
-    }
-    // Volume Root and VERSION file exist.
-    return VolumeState.NOT_INITIALIZED;
-  }
-
-  public void format(String cid) throws IOException {
-    Preconditions.checkNotNull(cid, "clusterID cannot be null while " +
-        "formatting Volume");
-    this.clusterID = cid;
-    initialize();
-  }
-
-  /**
-   * Create Version File and write property fields into it.
-   * @throws IOException
-   */
-  private void createVersionFile() throws IOException {
-    this.storageID = HddsVolumeUtil.generateUuid();
-    this.cTime = Time.now();
-    this.layoutVersion = ChunkLayOutVersion.getLatestVersion().getVersion();
-
-    if (this.clusterID == null || datanodeUuid == null) {
-      // HddsDatanodeService does not have the cluster information yet. Wait
-      // for registration with SCM.
-      LOG.debug("ClusterID not available. Cannot format the volume {}",
-          this.hddsRootDir.getPath());
-      setState(VolumeState.NOT_FORMATTED);
-    } else {
-      // Write the version file to disk.
-      writeVersionFile();
-      setState(VolumeState.NORMAL);
-    }
-  }
-
-  private void writeVersionFile() throws IOException {
-    Preconditions.checkNotNull(this.storageID,
-        "StorageID cannot be null in Version File");
-    Preconditions.checkNotNull(this.clusterID,
-        "ClusterID cannot be null in Version File");
-    Preconditions.checkNotNull(this.datanodeUuid,
-        "DatanodeUUID cannot be null in Version File");
-    Preconditions.checkArgument(this.cTime > 0,
-        "Creation Time should be positive");
-    Preconditions.checkArgument(this.layoutVersion ==
-            DataNodeLayoutVersion.getLatestVersion().getVersion(),
-        "Version File should have the latest LayOutVersion");
-
-    File versionFile = getVersionFile();
-    LOG.debug("Writing Version file to disk, {}", versionFile);
-
-    DatanodeVersionFile dnVersionFile = new DatanodeVersionFile(this.storageID,
-        this.clusterID, this.datanodeUuid, this.cTime, this.layoutVersion);
-    dnVersionFile.createVersionFile(versionFile);
-  }
-
-  /**
-   * Read Version File and update property fields.
-   * Get common storage fields.
-   * Should be overloaded if additional fields need to be read.
-   *
-   * @throws IOException on error
-   */
-  private void readVersionFile() throws IOException {
-    File versionFile = getVersionFile();
-    Properties props = DatanodeVersionFile.readFrom(versionFile);
-    if (props.isEmpty()) {
-      throw new InconsistentStorageStateException(
-          "Version file " + versionFile + " is missing");
-    }
-
-    LOG.debug("Reading Version file from disk, {}", versionFile);
-    this.storageID = HddsVolumeUtil.getStorageID(props, versionFile);
-    this.clusterID = HddsVolumeUtil.getClusterID(props, versionFile,
-        this.clusterID);
-    this.datanodeUuid = HddsVolumeUtil.getDatanodeUUID(props, versionFile,
-        this.datanodeUuid);
-    this.cTime = HddsVolumeUtil.getCreationTime(props, versionFile);
-    this.layoutVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile);
-  }
-
-  private File getVersionFile() {
-    return HddsVolumeUtil.getVersionFile(hddsRootDir);
-  }
-
-  public File getHddsRootDir() {
-    return hddsRootDir;
-  }
-
-  public StorageType getStorageType() {
-    if(volumeInfo != null) {
-      return volumeInfo.getStorageType();
-    }
-    return StorageType.DEFAULT;
-  }
-
-  public String getStorageID() {
-    return storageID;
-  }
-
-  public String getClusterID() {
-    return clusterID;
-  }
-
-  public String getDatanodeUuid() {
-    return datanodeUuid;
-  }
-
-  public long getCTime() {
-    return cTime;
-  }
-
-  public int getLayoutVersion() {
-    return layoutVersion;
-  }
-
-  public VolumeState getStorageState() {
-    return state;
-  }
-
-  public long getCapacity() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getCapacity();
-    }
-    return 0;
-  }
-
-  public long getAvailable() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getAvailable();
-    }
-    return 0;
-  }
-
-  public void setState(VolumeState state) {
-    this.state = state;
-  }
-
-  public boolean isFailed() {
-    return (state == VolumeState.FAILED);
-  }
-
-  public VolumeIOStats getVolumeIOStats() {
-    return volumeIOStats;
-  }
-
-  public void failVolume() {
-    setState(VolumeState.FAILED);
-    if (volumeInfo != null) {
-      volumeInfo.shutdownUsageThread();
-    }
-  }
-
-  public void shutdown() {
-    this.state = VolumeState.NON_EXISTENT;
-    if (volumeInfo != null) {
-      volumeInfo.shutdownUsageThread();
-    }
-  }
-
-  /**
-   * VolumeState represents the different states a HddsVolume can be in.
-   * NORMAL          =&gt; Volume can be used for storage
-   * FAILED          =&gt; Volume has failed due and can no longer be used for
-   *                    storing containers.
-   * NON_EXISTENT    =&gt; Volume Root dir does not exist
-   * INCONSISTENT    =&gt; Volume Root dir is not empty but VERSION file is
-   *                    missing or Volume Root dir is not a directory
-   * NOT_FORMATTED   =&gt; Volume Root exists but not formatted(no VERSION file)
-   * NOT_INITIALIZED =&gt; VERSION file exists but has not been verified for
-   *                    correctness.
-   */
-  public enum VolumeState {
-    NORMAL,
-    FAILED,
-    NON_EXISTENT,
-    INCONSISTENT,
-    NOT_FORMATTED,
-    NOT_INITIALIZED
-  }
-
-  /**
-   * add "delta" bytes to committed space in the volume.
-   * @param delta bytes to add to committed space counter
-   * @return bytes of committed space
-   */
-  public long incCommittedBytes(long delta) {
-    return committedBytes.addAndGet(delta);
-  }
-
-  /**
-   * return the committed space in the volume.
-   * @return bytes of committed space
-   */
-  public long getCommittedBytes() {
-    return committedBytes.get();
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    if (volumeInfo != null) {
-      volumeInfo.setScmUsageForTesting(scmUsageForTest);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
deleted file mode 100644
index 800789f..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.Timer;
-
-import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Optional;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
-
-
-/**
- * A class that encapsulates running disk checks against each HDDS volume and
- * allows retrieving a list of failed volumes.
- */
-public class HddsVolumeChecker {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(HddsVolumeChecker.class);
-
-  private AsyncChecker<Boolean, VolumeCheckResult> delegateChecker;
-
-  private final AtomicLong numVolumeChecks = new AtomicLong(0);
-  private final AtomicLong numAllVolumeChecks = new AtomicLong(0);
-  private final AtomicLong numSkippedChecks = new AtomicLong(0);
-
-  /**
-   * Max allowed time for a disk check in milliseconds. If the check
-   * doesn't complete within this time we declare the disk as dead.
-   */
-  private final long maxAllowedTimeForCheckMs;
-
-  /**
-   * Minimum time between two successive disk checks of a volume.
-   */
-  private final long minDiskCheckGapMs;
-
-  /**
-   * Timestamp of the last check of all volumes.
-   */
-  private long lastAllVolumesCheck;
-
-  private final Timer timer;
-
-  private final ExecutorService checkVolumeResultHandlerExecutorService;
-
-  /**
-   * @param conf Configuration object.
-   * @param timer {@link Timer} object used for throttling checks.
-   */
-  public HddsVolumeChecker(Configuration conf, Timer timer)
-      throws DiskErrorException {
-    maxAllowedTimeForCheckMs = conf.getTimeDuration(
-        DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
-        DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    if (maxAllowedTimeForCheckMs <= 0) {
-      throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
-          + maxAllowedTimeForCheckMs + " (should be > 0)");
-    }
-
-    this.timer = timer;
-
-    /**
-     * Maximum number of volume failures that can be tolerated without
-     * declaring a fatal error.
-     */
-    int maxVolumeFailuresTolerated = conf.getInt(
-        DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
-        DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
-
-    minDiskCheckGapMs = conf.getTimeDuration(
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    if (minDiskCheckGapMs < 0) {
-      throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - "
-          + minDiskCheckGapMs + " (should be >= 0)");
-    }
-
-    long diskCheckTimeout = conf.getTimeDuration(
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    if (diskCheckTimeout < 0) {
-      throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
-          + diskCheckTimeout + " (should be >= 0)");
-    }
-
-    lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
-
-    if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
-      throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
-          + maxVolumeFailuresTolerated + " "
-          + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
-    }
-
-    delegateChecker = new ThrottledAsyncChecker<>(
-        timer, minDiskCheckGapMs, diskCheckTimeout,
-        Executors.newCachedThreadPool(
-            new ThreadFactoryBuilder()
-                .setNameFormat("DataNode DiskChecker thread %d")
-                .setDaemon(true)
-                .build()));
-
-    checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool(
-        new ThreadFactoryBuilder()
-            .setNameFormat("VolumeCheck ResultHandler thread %d")
-            .setDaemon(true)
-            .build());
-  }
-
-  /**
-   * Run checks against all HDDS volumes.
-   *
-   * This check may be performed at service startup and subsequently at
-   * regular intervals to detect and handle failed volumes.
-   *
-   * @param volumes - Set of volumes to be checked. This set must be immutable
-   *                for the duration of the check else the results will be
-   *                unexpected.
-   *
-   * @return set of failed volumes.
-   */
-  public Set<HddsVolume> checkAllVolumes(Collection<HddsVolume> volumes)
-      throws InterruptedException {
-    final long gap = timer.monotonicNow() - lastAllVolumesCheck;
-    if (gap < minDiskCheckGapMs) {
-      numSkippedChecks.incrementAndGet();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(
-            "Skipped checking all volumes, time since last check {} is less " +
-                "than the minimum gap between checks ({} ms).",
-            gap, minDiskCheckGapMs);
-      }
-      return Collections.emptySet();
-    }
-
-    lastAllVolumesCheck = timer.monotonicNow();
-    final Set<HddsVolume> healthyVolumes = new HashSet<>();
-    final Set<HddsVolume> failedVolumes = new HashSet<>();
-    final Set<HddsVolume> allVolumes = new HashSet<>();
-
-    final AtomicLong numVolumes = new AtomicLong(volumes.size());
-    final CountDownLatch latch = new CountDownLatch(1);
-
-    for (HddsVolume v : volumes) {
-      Optional<ListenableFuture<VolumeCheckResult>> olf =
-          delegateChecker.schedule(v, null);
-      LOG.info("Scheduled health check for volume {}", v);
-      if (olf.isPresent()) {
-        allVolumes.add(v);
-        Futures.addCallback(olf.get(),
-            new ResultHandler(v, healthyVolumes, failedVolumes,
-                numVolumes, (ignored1, ignored2) -> latch.countDown()));
-      } else {
-        if (numVolumes.decrementAndGet() == 0) {
-          latch.countDown();
-        }
-      }
-    }
-
-    // Wait until our timeout elapses, after which we give up on
-    // the remaining volumes.
-    if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-      LOG.warn("checkAllVolumes timed out after {} ms" +
-          maxAllowedTimeForCheckMs);
-    }
-
-    numAllVolumeChecks.incrementAndGet();
-    synchronized (this) {
-      // All volumes that have not been detected as healthy should be
-      // considered failed. This is a superset of 'failedVolumes'.
-      //
-      // Make a copy under the mutex as Sets.difference() returns a view
-      // of a potentially changing set.
-      return new HashSet<>(Sets.difference(allVolumes, healthyVolumes));
-    }
-  }
-
-  /**
-   * A callback interface that is supplied the result of running an
-   * async disk check on multiple volumes.
-   */
-  public interface Callback {
-    /**
-     * @param healthyVolumes set of volumes that passed disk checks.
-     * @param failedVolumes set of volumes that failed disk checks.
-     */
-    void call(Set<HddsVolume> healthyVolumes,
-              Set<HddsVolume> failedVolumes);
-  }
-
-  /**
-   * Check a single volume asynchronously, returning a {@link ListenableFuture}
-   * that can be used to retrieve the final result.
-   *
-   * If the volume cannot be referenced then it is already closed and
-   * cannot be checked. No error is propagated to the callback.
-   *
-   * @param volume the volume that is to be checked.
-   * @param callback callback to be invoked when the volume check completes.
-   * @return true if the check was scheduled and the callback will be invoked.
-   *         false otherwise.
-   */
-  public boolean checkVolume(final HddsVolume volume, Callback callback) {
-    if (volume == null) {
-      LOG.debug("Cannot schedule check on null volume");
-      return false;
-    }
-
-    Optional<ListenableFuture<VolumeCheckResult>> olf =
-        delegateChecker.schedule(volume, null);
-    if (olf.isPresent()) {
-      numVolumeChecks.incrementAndGet();
-      Futures.addCallback(olf.get(),
-          new ResultHandler(volume, new HashSet<>(), new HashSet<>(),
-              new AtomicLong(1), callback),
-          checkVolumeResultHandlerExecutorService
-      );
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * A callback to process the results of checking a volume.
-   */
-  private class ResultHandler
-      implements FutureCallback<VolumeCheckResult> {
-    private final HddsVolume volume;
-    private final Set<HddsVolume> failedVolumes;
-    private final Set<HddsVolume> healthyVolumes;
-    private final AtomicLong volumeCounter;
-
-    @Nullable
-    private final Callback callback;
-
-    /**
-     *
-     * @param healthyVolumes set of healthy volumes. If the disk check is
-     *                       successful, add the volume here.
-     * @param failedVolumes set of failed volumes. If the disk check fails,
-     *                      add the volume here.
-     * @param volumeCounter volumeCounter used to trigger callback invocation.
-     * @param callback invoked when the volumeCounter reaches 0.
-     */
-    ResultHandler(HddsVolume volume,
-                  Set<HddsVolume> healthyVolumes,
-                  Set<HddsVolume> failedVolumes,
-                  AtomicLong volumeCounter,
-                  @Nullable Callback callback) {
-      this.volume = volume;
-      this.healthyVolumes = healthyVolumes;
-      this.failedVolumes = failedVolumes;
-      this.volumeCounter = volumeCounter;
-      this.callback = callback;
-    }
-
-    @Override
-    public void onSuccess(@Nonnull VolumeCheckResult result) {
-      switch (result) {
-      case HEALTHY:
-      case DEGRADED:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Volume {} is {}.", volume, result);
-        }
-        markHealthy();
-        break;
-      case FAILED:
-        LOG.warn("Volume {} detected as being unhealthy", volume);
-        markFailed();
-        break;
-      default:
-        LOG.error("Unexpected health check result {} for volume {}",
-            result, volume);
-        markHealthy();
-        break;
-      }
-      cleanup();
-    }
-
-    @Override
-    public void onFailure(@Nonnull Throwable t) {
-      Throwable exception = (t instanceof ExecutionException) ?
-          t.getCause() : t;
-      LOG.warn("Exception running disk checks against volume " +
-          volume, exception);
-      markFailed();
-      cleanup();
-    }
-
-    private void markHealthy() {
-      synchronized (HddsVolumeChecker.this) {
-        healthyVolumes.add(volume);
-      }
-    }
-
-    private void markFailed() {
-      synchronized (HddsVolumeChecker.this) {
-        failedVolumes.add(volume);
-      }
-    }
-
-    private void cleanup() {
-      invokeCallback();
-    }
-
-    private void invokeCallback() {
-      try {
-        final long remaining = volumeCounter.decrementAndGet();
-        if (callback != null && remaining == 0) {
-          callback.call(healthyVolumes, failedVolumes);
-        }
-      } catch(Exception e) {
-        // Propagating this exception is unlikely to be helpful.
-        LOG.warn("Unexpected exception", e);
-      }
-    }
-  }
-
-  /**
-   * Shutdown the checker and its associated ExecutorService.
-   *
-   * See {@link ExecutorService#awaitTermination} for the interpretation
-   * of the parameters.
-   */
-  void shutdownAndWait(int gracePeriod, TimeUnit timeUnit) {
-    try {
-      delegateChecker.shutdownAndWait(gracePeriod, timeUnit);
-    } catch (InterruptedException e) {
-      LOG.warn("{} interrupted during shutdown.",
-          this.getClass().getSimpleName());
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  /**
-   * This method is for testing only.
-   *
-   * @param testDelegate
-   */
-  @VisibleForTesting
-  void setDelegateChecker(
-      AsyncChecker<Boolean, VolumeCheckResult> testDelegate) {
-    delegateChecker = testDelegate;
-  }
-
-  /**
-   * Return the number of {@link #checkVolume} invocations.
-   */
-  public long getNumVolumeChecks() {
-    return numVolumeChecks.get();
-  }
-
-  /**
-   * Return the number of {@link #checkAllVolumes} invocations.
-   */
-  public long getNumAllVolumeChecks() {
-    return numAllVolumeChecks.get();
-  }
-
-  /**
-   * Return the number of checks skipped because the minimum gap since the
-   * last check had not elapsed.
-   */
-  public long getNumSkippedChecks() {
-    return numSkippedChecks.get();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
deleted file mode 100644
index f503149..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Choose volumes in round-robin order.
- * The caller should synchronize access to the list of volumes.
- */
-public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy {
-
-  public static final Log LOG = LogFactory.getLog(
-      RoundRobinVolumeChoosingPolicy.class);
-
-  // Stores the index of the next volume to be returned.
-  private AtomicInteger nextVolumeIndex = new AtomicInteger(0);
-
-  @Override
-  public HddsVolume chooseVolume(List<HddsVolume> volumes,
-      long maxContainerSize) throws IOException {
-
-    // No volumes available to choose from
-    if (volumes.size() < 1) {
-      throw new DiskOutOfSpaceException("No more available volumes");
-    }
-
-    // since volumes could've been removed because of the failure
-    // make sure we are not out of bounds
-    int nextIndex = nextVolumeIndex.get();
-    int currentVolumeIndex = nextIndex < volumes.size() ? nextIndex : 0;
-
-    int startVolumeIndex = currentVolumeIndex;
-    long maxAvailable = 0;
-
-    while (true) {
-      final HddsVolume volume = volumes.get(currentVolumeIndex);
-      // adjust for remaining capacity in Open containers
-      long availableVolumeSize = volume.getAvailable()
-          - volume.getCommittedBytes();
-
-      currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size();
-
-      if (availableVolumeSize > maxContainerSize) {
-        nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex);
-        return volume;
-      }
-
-      if (availableVolumeSize > maxAvailable) {
-        maxAvailable = availableVolumeSize;
-      }
-
-      if (currentVolumeIndex == startVolumeIndex) {
-        throw new DiskOutOfSpaceException("Out of space: "
-            + "The volume with the most available space (=" + maxAvailable
-            + " B) is less than the container size (=" + maxContainerSize
-            + " B).");
-      }
-
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
deleted file mode 100644
index 836fdf3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
-import org.apache.hadoop.util.Timer;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.WeakHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-/**
- * An implementation of {@link AsyncChecker} that skips checking recently
- * checked objects. It will enforce at least minMsBetweenChecks
- * milliseconds between two successive checks of any one object.
- *
- * It is assumed that the total number of Checkable objects in the system
- * is small, (not more than a few dozen) since the checker uses O(Checkables)
- * storage and also potentially O(Checkables) threads.
- *
- * minMsBetweenChecks should be configured reasonably
- * by the caller to avoid spinning up too many threads frequently.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(ThrottledAsyncChecker.class);
-
-  private final Timer timer;
-
-  /**
-   * The ExecutorService used to schedule asynchronous checks.
-   */
-  private final ListeningExecutorService executorService;
-  private final ScheduledExecutorService scheduledExecutorService;
-
-  /**
-   * The minimum gap in milliseconds between two successive checks
-   * of the same object. This is the throttle.
-   */
-  private final long minMsBetweenChecks;
-  private final long diskCheckTimeout;
-
-  /**
-   * Map of checks that are currently in progress. Protected by the object
-   * lock.
-   */
-  private final Map<Checkable, ListenableFuture<V>> checksInProgress;
-
-  /**
-   * Maps Checkable objects to a future that can be used to retrieve
-   * the results of the operation.
-   * Protected by the object lock.
-   */
-  private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>>
-      completedChecks;
-
-  public ThrottledAsyncChecker(final Timer timer,
-                               final long minMsBetweenChecks,
-                               final long diskCheckTimeout,
-                               final ExecutorService executorService) {
-    this.timer = timer;
-    this.minMsBetweenChecks = minMsBetweenChecks;
-    this.diskCheckTimeout = diskCheckTimeout;
-    this.executorService = MoreExecutors.listeningDecorator(executorService);
-    this.checksInProgress = new HashMap<>();
-    this.completedChecks = new WeakHashMap<>();
-
-    if (this.diskCheckTimeout > 0) {
-      ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new
-          ScheduledThreadPoolExecutor(1);
-      this.scheduledExecutorService = MoreExecutors
-          .getExitingScheduledExecutorService(scheduledThreadPoolExecutor);
-    } else {
-      this.scheduledExecutorService = null;
-    }
-  }
-
-  /**
-   * See {@link AsyncChecker#schedule}
-   *
-   * If the object has been checked recently then the check will
-   * be skipped. Multiple concurrent checks for the same object
-   * will receive the same Future.
-   */
-  @Override
-  public Optional<ListenableFuture<V>> schedule(
-      Checkable<K, V> target, K context) {
-    if (checksInProgress.containsKey(target)) {
-      return Optional.empty();
-    }
-
-    if (completedChecks.containsKey(target)) {
-      final ThrottledAsyncChecker.LastCheckResult<V> result =
-          completedChecks.get(target);
-      final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
-      if (msSinceLastCheck < minMsBetweenChecks) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skipped checking {}. Time since last check {}ms " +
-                  "is less than the min gap {}ms.",
-              target, msSinceLastCheck, minMsBetweenChecks);
-        }
-        return Optional.empty();
-      }
-    }
-
-    LOG.info("Scheduling a check for {}", target);
-    final ListenableFuture<V> lfWithoutTimeout = executorService.submit(
-        () -> target.check(context));
-    final ListenableFuture<V> lf;
-
-    if (diskCheckTimeout > 0) {
-      lf = TimeoutFuture
-          .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS,
-              scheduledExecutorService);
-    } else {
-      lf = lfWithoutTimeout;
-    }
-
-    checksInProgress.put(target, lf);
-    addResultCachingCallback(target, lf);
-    return Optional.of(lf);
-  }
-
-  /**
-   * Register a callback to cache the result of a check.
-   * @param target
-   * @param lf
-   */
-  private void addResultCachingCallback(
-      Checkable<K, V> target, ListenableFuture<V> lf) {
-    Futures.addCallback(lf, new FutureCallback<V>() {
-      @Override
-      public void onSuccess(@Nullable V result) {
-        synchronized (ThrottledAsyncChecker.this) {
-          checksInProgress.remove(target);
-          completedChecks.put(target, new LastCheckResult<>(
-              result, timer.monotonicNow()));
-        }
-      }
-
-      @Override
-      public void onFailure(@Nonnull Throwable t) {
-        synchronized (ThrottledAsyncChecker.this) {
-          checksInProgress.remove(target);
-          completedChecks.put(target, new LastCheckResult<>(
-              t, timer.monotonicNow()));
-        }
-      }
-    });
-  }
-
-  /**
-   * {@inheritDoc}.
-   *
-   * The results of in-progress checks are not useful during shutdown,
-   * so we optimize for faster shutdown by interrupt all actively
-   * executing checks.
-   */
-  @Override
-  public void shutdownAndWait(long timeout, TimeUnit timeUnit)
-      throws InterruptedException {
-    if (scheduledExecutorService != null) {
-      scheduledExecutorService.shutdownNow();
-      scheduledExecutorService.awaitTermination(timeout, timeUnit);
-    }
-
-    executorService.shutdownNow();
-    executorService.awaitTermination(timeout, timeUnit);
-  }
-
-  /**
-   * Status of running a check. It can either be a result or an
-   * exception, depending on whether the check completed or threw.
-   */
-  private static final class LastCheckResult<V> {
-    /**
-     * Timestamp at which the check completed.
-     */
-    private final long completedAt;
-
-    /**
-     * Result of running the check if it completed. null if it threw.
-     */
-    @Nullable
-    private final V result;
-
-    /**
-     * Exception thrown by the check. null if it returned a result.
-     */
-    private final Throwable exception; // null on success.
-
-    /**
-     * Initialize with a result.
-     * @param result
-     */
-    private LastCheckResult(V result, long completedAt) {
-      this.result = result;
-      this.exception = null;
-      this.completedAt = completedAt;
-    }
-
-    /**
-     * Initialize with an exception.
-     * @param completedAt
-     * @param t
-     */
-    private LastCheckResult(Throwable t, long completedAt) {
-      this.result = null;
-      this.exception = t;
-      this.completedAt = completedAt;
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
deleted file mode 100644
index 626814e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (C) 2007 The Guava Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Some portions of this class have been modified to make it functional in this
- * package.
- */
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nullable;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Implementation of {@code Futures#withTimeout}.
- * <p>
- * <p>Future that delegates to another but will finish early (via a
- * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the
- * specified duration expires. The delegate future is interrupted and
- * cancelled if it times out.
- */
-final class TimeoutFuture<V> extends AbstractFuture.TrustedFuture<V> {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TimeoutFuture.class);
-
-  static <V> ListenableFuture<V> create(
-      ListenableFuture<V> delegate,
-      long time,
-      TimeUnit unit,
-      ScheduledExecutorService scheduledExecutor) {
-    TimeoutFuture<V> result = new TimeoutFuture<V>(delegate);
-    TimeoutFuture.Fire<V> fire = new TimeoutFuture.Fire<V>(result);
-    result.timer = scheduledExecutor.schedule(fire, time, unit);
-    delegate.addListener(fire, directExecutor());
-    return result;
-  }
-
-  /*
-   * Memory visibility of these fields. There are two cases to consider.
-   *
-   * 1. visibility of the writes to these fields to Fire.run:
-   *
-   * The initial write to delegateRef is made definitely visible via the
-   * semantics of addListener/SES.schedule. The later racy write in cancel()
-   * is not guaranteed to be observed, however that is fine since the
-   * correctness is based on the atomic state in our base class. The initial
-   * write to timer is never definitely visible to Fire.run since it is
-   * assigned after SES.schedule is called. Therefore Fire.run has to check
-   * for null. However, it should be visible if Fire.run is called by
-   * delegate.addListener since addListener is called after the assignment
-   * to timer, and importantly this is the main situation in which we need to
-   * be able to see the write.
-   *
-   * 2. visibility of the writes to an afterDone() call triggered by cancel():
-   *
-   * Since these fields are non-final that means that TimeoutFuture is not
-   * being 'safely published', thus a motivated caller may be able to expose
-   * the reference to another thread that would then call cancel() and be
-   * unable to cancel the delegate. There are a number of ways to solve this,
-   * none of which are very pretty, and it is currently believed to be a
-   * purely theoretical problem (since the other actions should supply
-   * sufficient write-barriers).
-   */
-
-  @Nullable private ListenableFuture<V> delegateRef;
-  @Nullable private Future<?> timer;
-
-  private TimeoutFuture(ListenableFuture<V> delegate) {
-    this.delegateRef = Preconditions.checkNotNull(delegate);
-  }
-
-  /**
-   * A runnable that is called when the delegate or the timer completes.
-   */
-  private static final class Fire<V> implements Runnable {
-    @Nullable
-    private TimeoutFuture<V> timeoutFutureRef;
-
-    Fire(
-        TimeoutFuture<V> timeoutFuture) {
-      this.timeoutFutureRef = timeoutFuture;
-    }
-
-    @Override
-    public void run() {
-      // If either of these reads return null then we must be after a
-      // successful cancel or another call to this method.
-      TimeoutFuture<V> timeoutFuture = timeoutFutureRef;
-      if (timeoutFuture == null) {
-        return;
-      }
-      ListenableFuture<V> delegate = timeoutFuture.delegateRef;
-      if (delegate == null) {
-        return;
-      }
-
-      /*
-       * If we're about to complete the TimeoutFuture, we want to release our
-       * reference to it. Otherwise, we'll pin it (and its result) in memory
-       * until the timeout task is GCed. (The need to clear our reference to
-       * the TimeoutFuture is the reason we use a *static* nested class with
-       * a manual reference back to the "containing" class.)
-       *
-       * This has the nice-ish side effect of limiting reentrancy: run() calls
-       * timeoutFuture.setException() calls run(). That reentrancy would
-       * already be harmless, since timeoutFuture can be set (and delegate
-       * cancelled) only once. (And "set only once" is important for other
-       * reasons: run() can still be invoked concurrently in different threads,
-       * even with the above null checks.)
-       */
-      timeoutFutureRef = null;
-      if (delegate.isDone()) {
-        timeoutFuture.setFuture(delegate);
-      } else {
-        try {
-          timeoutFuture.setException(
-              new TimeoutException("Future timed out: " + delegate));
-        } finally {
-          delegate.cancel(true);
-        }
-      }
-    }
-  }
-
-  @Override
-  protected void afterDone() {
-    maybePropagateCancellation(delegateRef);
-
-    Future<?> localTimer = timer;
-    // Try to cancel the timer as an optimization.
-    // timer may be null if this call to run was by the timer task since there
-    // is no happens-before edge between the assignment to timer and an
-    // execution of the timer task.
-    if (localTimer != null) {
-      localTimer.cancel(false);
-    }
-
-    delegateRef = null;
-    timer = null;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
deleted file mode 100644
index 9e2eb22..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class is used to track Volume IO stats for each HDDS Volume.
- */
-public class VolumeIOStats {
-
-  private final AtomicLong readBytes;
-  private final AtomicLong readOpCount;
-  private final AtomicLong writeBytes;
-  private final AtomicLong writeOpCount;
-  private final AtomicLong readTime;
-  private final AtomicLong writeTime;
-
-  public VolumeIOStats() {
-    readBytes = new AtomicLong(0);
-    readOpCount = new AtomicLong(0);
-    writeBytes = new AtomicLong(0);
-    writeOpCount = new AtomicLong(0);
-    readTime = new AtomicLong(0);
-    writeTime = new AtomicLong(0);
-  }
-
-  /**
-   * Increment number of bytes read from the volume.
-   * @param bytesRead
-   */
-  public void incReadBytes(long bytesRead) {
-    readBytes.addAndGet(bytesRead);
-  }
-
-  /**
-   * Increment the read operations performed on the volume.
-   */
-  public void incReadOpCount() {
-    readOpCount.incrementAndGet();
-  }
-
-  /**
-   * Increment number of bytes written on to the volume.
-   * @param bytesWritten
-   */
-  public void incWriteBytes(long bytesWritten) {
-    writeBytes.addAndGet(bytesWritten);
-  }
-
-  /**
-   * Increment the write operations performed on the volume.
-   */
-  public void incWriteOpCount() {
-    writeOpCount.incrementAndGet();
-  }
-
-  /**
-   * Increment the time taken by read operation on the volume.
-   * @param time
-   */
-  public void incReadTime(long time) {
-    readTime.addAndGet(time);
-  }
-
-  /**
-   * Increment the time taken by write operation on the volume.
-   * @param time
-   */
-  public void incWriteTime(long time) {
-    writeTime.addAndGet(time);
-  }
-
-  /**
-   * Returns total number of bytes read from the volume.
-   * @return long
-   */
-  public long getReadBytes() {
-    return readBytes.get();
-  }
-
-  /**
-   * Returns total number of bytes written to the volume.
-   * @return long
-   */
-  public long getWriteBytes() {
-    return writeBytes.get();
-  }
-
-  /**
-   * Returns total number of read operations performed on the volume.
-   * @return long
-   */
-  public long getReadOpCount() {
-    return readOpCount.get();
-  }
-
-  /**
-   * Returns total number of write operations performed on the volume.
-   * @return long
-   */
-  public long getWriteOpCount() {
-    return writeOpCount.get();
-  }
-
-  /**
-   * Returns total read operations time on the volume.
-   * @return long
-   */
-  public long getReadTime() {
-    return readTime.get();
-  }
-
-  /**
-   * Returns total write operations time on the volume.
-   * @return long
-   */
-  public long getWriteTime() {
-    return writeTime.get();
-  }
-
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
deleted file mode 100644
index 31f83ec..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * Stores information about a disk/volume.
- */
-public final class VolumeInfo {
-
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class);
-
-  private final String rootDir;
-  private final StorageType storageType;
-
-  // Space usage calculator
-  private final VolumeUsage usage;
-
-  // Capacity configured. This is useful when we want to
-  // limit the visible capacity for tests. If negative, then we just
-  // query from the filesystem.
-  private long configuredCapacity;
-
-  /**
-   * Builder for VolumeInfo.
-   */
-  public static class Builder {
-    private final Configuration conf;
-    private final String rootDir;
-    private StorageType storageType;
-    private long configuredCapacity;
-
-    public Builder(String root, Configuration config) {
-      this.rootDir = root;
-      this.conf = config;
-    }
-
-    public Builder storageType(StorageType st) {
-      this.storageType = st;
-      return this;
-    }
-
-    public Builder configuredCapacity(long capacity) {
-      this.configuredCapacity = capacity;
-      return this;
-    }
-
-    public VolumeInfo build() throws IOException {
-      return new VolumeInfo(this);
-    }
-  }
-
-  private VolumeInfo(Builder b) throws IOException {
-
-    this.rootDir = b.rootDir;
-    File root = new File(this.rootDir);
-
-    Boolean succeeded = root.isDirectory() || root.mkdirs();
-
-    if (!succeeded) {
-      LOG.error("Unable to create the volume root dir at : {}", root);
-      throw new IOException("Unable to create the volume root dir at " + root);
-    }
-
-    this.storageType = (b.storageType != null ?
-        b.storageType : StorageType.DEFAULT);
-
-    this.configuredCapacity = (b.configuredCapacity != 0 ?
-        b.configuredCapacity : -1);
-
-    this.usage = new VolumeUsage(root, b.conf);
-  }
-
-  public long getCapacity() throws IOException {
-    if (configuredCapacity < 0) {
-      return usage.getCapacity();
-    }
-    return configuredCapacity;
-  }
-
-  public long getAvailable() throws IOException {
-    return usage.getAvailable();
-  }
-
-  public long getScmUsed() throws IOException {
-    return usage.getScmUsed();
-  }
-
-  protected void shutdownUsageThread() {
-    usage.shutdown();
-  }
-
-  public String getRootDir() {
-    return this.rootDir;
-  }
-
-  public StorageType getStorageType() {
-    return this.storageType;
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    usage.setScmUsageForTesting(scmUsageForTest);
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public VolumeUsage getUsageForTesting() {
-    return usage;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
deleted file mode 100644
index 875e96a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ /dev/null
@@ -1,519 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ShutdownHookManager;
-import org.apache.hadoop.util.Timer;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * VolumeSet to manage HDDS volumes in a DataNode.
- */
-public class VolumeSet {
-
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeSet.class);
-
-  private Configuration conf;
-
-  /**
-   * {@link VolumeSet#volumeMap} maintains a map of all active volumes in the
-   * DataNode. Each volume has one-to-one mapping with a volumeInfo object.
-   */
-  private Map<String, HddsVolume> volumeMap;
-  /**
-   * {@link VolumeSet#failedVolumeMap} maintains a map of volumes which have
-   * failed. The keys in this map and {@link VolumeSet#volumeMap} are
-   * mutually exclusive.
-   */
-  private Map<String, HddsVolume> failedVolumeMap;
-
-  /**
-   * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per
-   * StorageType.
-   */
-  private EnumMap<StorageType, List<HddsVolume>> volumeStateMap;
-
-  /**
-   * An executor for periodic disk checks.
-   */
-  private final ScheduledExecutorService diskCheckerservice;
-  private final ScheduledFuture<?> periodicDiskChecker;
-
-  private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
-
-  /**
-   * A Reentrant Read Write Lock to synchronize volume operations in VolumeSet.
-   * Any update to {@link VolumeSet#volumeMap},
-   * {@link VolumeSet#failedVolumeMap}, or {@link VolumeSet#volumeStateMap}
-   * should be done after acquiring the write lock.
-   */
-  private final ReentrantReadWriteLock volumeSetRWLock;
-
-  private final String datanodeUuid;
-  private String clusterID;
-
-  private Runnable shutdownHook;
-  private final HddsVolumeChecker volumeChecker;
-
-  public VolumeSet(String dnUuid, Configuration conf)
-      throws IOException {
-    this(dnUuid, null, conf);
-  }
-
-  public VolumeSet(String dnUuid, String clusterID, Configuration conf)
-      throws IOException {
-    this.datanodeUuid = dnUuid;
-    this.clusterID = clusterID;
-    this.conf = conf;
-    this.volumeSetRWLock = new ReentrantReadWriteLock();
-    this.volumeChecker = getVolumeChecker(conf);
-    this.diskCheckerservice = Executors.newScheduledThreadPool(
-        1, r -> new Thread(r, "Periodic HDDS volume checker"));
-    this.periodicDiskChecker =
-      diskCheckerservice.scheduleWithFixedDelay(() -> {
-        try {
-          checkAllVolumes();
-        } catch (IOException e) {
-          LOG.warn("Exception while checking disks", e);
-        }
-      }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
-        TimeUnit.MINUTES);
-    initializeVolumeSet();
-  }
-
-  @VisibleForTesting
-  HddsVolumeChecker getVolumeChecker(Configuration configuration)
-      throws DiskChecker.DiskErrorException {
-    return new HddsVolumeChecker(configuration, new Timer());
-  }
-
-  /**
-   * Add DN volumes configured through ConfigKeys to volumeMap.
-   */
-  private void initializeVolumeSet() throws IOException {
-    volumeMap = new ConcurrentHashMap<>();
-    failedVolumeMap = new ConcurrentHashMap<>();
-    volumeStateMap = new EnumMap<>(StorageType.class);
-
-    Collection<String> rawLocations = conf.getTrimmedStringCollection(
-        HDDS_DATANODE_DIR_KEY);
-    if (rawLocations.isEmpty()) {
-      rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
-    }
-    if (rawLocations.isEmpty()) {
-      throw new IllegalArgumentException("No location configured in either "
-          + HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY);
-    }
-
-    for (StorageType storageType : StorageType.values()) {
-      volumeStateMap.put(storageType, new ArrayList<>());
-    }
-
-    for (String locationString : rawLocations) {
-      try {
-        StorageLocation location = StorageLocation.parse(locationString);
-
-        HddsVolume hddsVolume = createVolume(location.getUri().getPath(),
-            location.getStorageType());
-
-        checkAndSetClusterID(hddsVolume.getClusterID());
-
-        LOG.info("Added Volume : {} to VolumeSet",
-            hddsVolume.getHddsRootDir().getPath());
-
-        if (!hddsVolume.getHddsRootDir().mkdirs() &&
-            !hddsVolume.getHddsRootDir().exists()) {
-          throw new IOException("Failed to create HDDS storage dir " +
-              hddsVolume.getHddsRootDir());
-        }
-        volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume);
-        volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume);
-      } catch (IOException e) {
-        HddsVolume volume = new HddsVolume.Builder(locationString)
-            .failedVolume(true).build();
-        failedVolumeMap.put(locationString, volume);
-        LOG.error("Failed to parse the storage location: " + locationString, e);
-      }
-    }
-
-    // First checking if we have any volumes, if all volumes are failed the
-    // volumeMap size will be zero, and we throw Exception.
-    if (volumeMap.size() == 0) {
-      throw new DiskOutOfSpaceException("No storage locations configured");
-    }
-
-    checkAllVolumes();
-
-    // Ensure volume threads are stopped and scm df is saved during shutdown.
-    shutdownHook = () -> {
-      saveVolumeSetUsed();
-    };
-    ShutdownHookManager.get().addShutdownHook(shutdownHook,
-        SHUTDOWN_HOOK_PRIORITY);
-  }
-
-  /**
-   * Run a synchronous parallel check of all HDDS volumes, removing
-   * failed volumes.
-   */
-  private void checkAllVolumes() throws IOException {
-    List<HddsVolume> allVolumes = getVolumesList();
-    Set<HddsVolume> failedVolumes;
-    try {
-      failedVolumes = volumeChecker.checkAllVolumes(allVolumes);
-    } catch (InterruptedException e) {
-      throw new IOException("Interrupted while running disk check", e);
-    }
-
-    if (failedVolumes.size() > 0) {
-      LOG.warn("checkAllVolumes got {} failed volumes - {}",
-          failedVolumes.size(), failedVolumes);
-      handleVolumeFailures(failedVolumes);
-    } else {
-      LOG.debug("checkAllVolumes encountered no failures");
-    }
-  }
-
-  /**
-   * Handle one or more failed volumes.
-   * @param failedVolumes
-   */
-  private void handleVolumeFailures(Set<HddsVolume> failedVolumes) {
-    for (HddsVolume v: failedVolumes) {
-      this.writeLock();
-      try {
-        // Immediately mark the volume as failed so it is unavailable
-        // for new containers.
-        volumeMap.remove(v.getHddsRootDir().getPath());
-        failedVolumeMap.putIfAbsent(v.getHddsRootDir().getPath(), v);
-      } finally {
-        this.writeUnlock();
-      }
-
-      // TODO:
-      // 1. Mark all closed containers on the volume as unhealthy.
-      // 2. Consider stopping IO on open containers and tearing down
-      //    active pipelines.
-      // 3. Handle Ratis log disk failure.
-    }
-  }
-
-  /**
-   * If Version file exists and the {@link VolumeSet#clusterID} is not set yet,
-   * assign it the value from Version file. Otherwise, check that the given
-   * id matches with the id from version file.
-   * @param idFromVersionFile value of the property from Version file
-   * @throws InconsistentStorageStateException
-   */
-  private void checkAndSetClusterID(String idFromVersionFile)
-      throws InconsistentStorageStateException {
-    // If the clusterID is null (not set), assign it the value
-    // from version file.
-    if (this.clusterID == null) {
-      this.clusterID = idFromVersionFile;
-      return;
-    }
-
-    // If the clusterID is already set, it should match with the value from the
-    // version file.
-    if (!idFromVersionFile.equals(this.clusterID)) {
-      throw new InconsistentStorageStateException(
-          "Mismatched ClusterIDs. VolumeSet has: " + this.clusterID +
-              ", and version file has: " + idFromVersionFile);
-    }
-  }
-
-  /**
-   * Acquire Volume Set Read lock.
-   */
-  public void readLock() {
-    volumeSetRWLock.readLock().lock();
-  }
-
-  /**
-   * Release Volume Set Read lock.
-   */
-  public void readUnlock() {
-    volumeSetRWLock.readLock().unlock();
-  }
-
-  /**
-   * Acquire Volume Set Write lock.
-   */
-  public void writeLock() {
-    volumeSetRWLock.writeLock().lock();
-  }
-
-  /**
-   * Release Volume Set Write lock.
-   */
-  public void writeUnlock() {
-    volumeSetRWLock.writeLock().unlock();
-  }
-
-
-  private HddsVolume createVolume(String locationString,
-      StorageType storageType) throws IOException {
-    HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(locationString)
-        .conf(conf)
-        .datanodeUuid(datanodeUuid)
-        .clusterID(clusterID)
-        .storageType(storageType);
-    return volumeBuilder.build();
-  }
-
-
-  // Add a volume to VolumeSet
-  boolean addVolume(String dataDir) {
-    return addVolume(dataDir, StorageType.DEFAULT);
-  }
-
-  // Add a volume to VolumeSet
-  private boolean addVolume(String volumeRoot, StorageType storageType) {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot);
-    boolean success;
-
-    this.writeLock();
-    try {
-      if (volumeMap.containsKey(hddsRoot)) {
-        LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot);
-        success = false;
-      } else {
-        if (failedVolumeMap.containsKey(hddsRoot)) {
-          failedVolumeMap.remove(hddsRoot);
-        }
-
-        HddsVolume hddsVolume = createVolume(volumeRoot, storageType);
-        volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume);
-        volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume);
-
-        LOG.info("Added Volume : {} to VolumeSet",
-            hddsVolume.getHddsRootDir().getPath());
-        success = true;
-      }
-    } catch (IOException ex) {
-      LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex);
-      success = false;
-    } finally {
-      this.writeUnlock();
-    }
-    return success;
-  }
-
-  // Mark a volume as failed
-  public void failVolume(String dataDir) {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
-
-    this.writeLock();
-    try {
-      if (volumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = volumeMap.get(hddsRoot);
-        hddsVolume.failVolume();
-
-        volumeMap.remove(hddsRoot);
-        volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume);
-        failedVolumeMap.put(hddsRoot, hddsVolume);
-
-        LOG.info("Moving Volume : {} to failed Volumes", hddsRoot);
-      } else if (failedVolumeMap.containsKey(hddsRoot)) {
-        LOG.info("Volume : {} is not active", hddsRoot);
-      } else {
-        LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
-      }
-    } finally {
-      this.writeUnlock();
-    }
-  }
-
-  // Remove a volume from the VolumeSet completely.
-  public void removeVolume(String dataDir) throws IOException {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
-
-    this.writeLock();
-    try {
-      if (volumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = volumeMap.get(hddsRoot);
-        hddsVolume.shutdown();
-
-        volumeMap.remove(hddsRoot);
-        volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume);
-
-        LOG.info("Removed Volume : {} from VolumeSet", hddsRoot);
-      } else if (failedVolumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = failedVolumeMap.get(hddsRoot);
-        hddsVolume.setState(VolumeState.NON_EXISTENT);
-
-        failedVolumeMap.remove(hddsRoot);
-        LOG.info("Removed Volume : {} from failed VolumeSet", hddsRoot);
-      } else {
-        LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
-      }
-    } finally {
-      this.writeUnlock();
-    }
-  }
-
-  /**
-   * This method, call shutdown on each volume to shutdown volume usage
-   * thread and write scmUsed on each volume.
-   */
-  private void saveVolumeSetUsed() {
-    for (HddsVolume hddsVolume : volumeMap.values()) {
-      try {
-        hddsVolume.shutdown();
-      } catch (Exception ex) {
-        LOG.error("Failed to shutdown volume : " + hddsVolume.getHddsRootDir(),
-            ex);
-      }
-    }
-  }
-
-  /**
-   * Shutdown the volumeset.
-   */
-  public void shutdown() {
-    saveVolumeSetUsed();
-    stopDiskChecker();
-    if (shutdownHook != null) {
-      ShutdownHookManager.get().removeShutdownHook(shutdownHook);
-    }
-  }
-
-  private void stopDiskChecker() {
-    periodicDiskChecker.cancel(true);
-    volumeChecker.shutdownAndWait(0, TimeUnit.SECONDS);
-    diskCheckerservice.shutdownNow();
-  }
-
-  @VisibleForTesting
-  public List<HddsVolume> getVolumesList() {
-    return ImmutableList.copyOf(volumeMap.values());
-  }
-
-  @VisibleForTesting
-  public List<HddsVolume> getFailedVolumesList() {
-    return ImmutableList.copyOf(failedVolumeMap.values());
-  }
-
-  @VisibleForTesting
-  public Map<String, HddsVolume> getVolumeMap() {
-    return ImmutableMap.copyOf(volumeMap);
-  }
-
-  @VisibleForTesting
-  public Map<StorageType, List<HddsVolume>> getVolumeStateMap() {
-    return ImmutableMap.copyOf(volumeStateMap);
-  }
-
-  public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
-      throws IOException {
-    boolean failed;
-    this.readLock();
-    try {
-      StorageLocationReport[] reports = new StorageLocationReport[volumeMap
-          .size() + failedVolumeMap.size()];
-      int counter = 0;
-      HddsVolume hddsVolume;
-      for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-        hddsVolume = entry.getValue();
-        VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
-        long scmUsed;
-        long remaining;
-        long capacity;
-        failed = false;
-        try {
-          scmUsed = volumeInfo.getScmUsed();
-          remaining = volumeInfo.getAvailable();
-          capacity = volumeInfo.getCapacity();
-        } catch (IOException ex) {
-          LOG.warn("Failed to get scmUsed and remaining for container " +
-              "storage location {}", volumeInfo.getRootDir(), ex);
-          // reset scmUsed and remaining if df/du failed.
-          scmUsed = 0;
-          remaining = 0;
-          capacity = 0;
-          failed = true;
-        }
-
-        StorageLocationReport.Builder builder =
-            StorageLocationReport.newBuilder();
-        builder.setStorageLocation(volumeInfo.getRootDir())
-            .setId(hddsVolume.getStorageID())
-            .setFailed(failed)
-            .setCapacity(capacity)
-            .setRemaining(remaining)
-            .setScmUsed(scmUsed)
-            .setStorageType(hddsVolume.getStorageType());
-        StorageLocationReport r = builder.build();
-        reports[counter++] = r;
-      }
-      for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
-        hddsVolume = entry.getValue();
-        StorageLocationReport.Builder builder = StorageLocationReport
-            .newBuilder();
-        builder.setStorageLocation(hddsVolume.getHddsRootDir()
-            .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
-            .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
-            hddsVolume.getStorageType());
-        StorageLocationReport r = builder.build();
-        reports[counter++] = r;
-      }
-      NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
-      for (int i = 0; i < reports.length; i++) {
-        nrb.addStorageReport(reports[i].getProtoBufMessage());
-      }
-      return nrb.build();
-    } finally {
-      this.readUnlock();
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
deleted file mode 100644
index 693bcb5..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CachingGetSpaceUsed;
-import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.nio.charset.StandardCharsets;
-import java.util.Scanner;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Class that wraps the space df of the Datanode Volumes used by SCM
- * containers.
- */
-public class VolumeUsage {
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class);
-
-  private final File rootDir;
-  private final DF df;
-  private final File scmUsedFile;
-  private AtomicReference<GetSpaceUsed> scmUsage;
-  private boolean shutdownComplete;
-
-  private static final String DU_CACHE_FILE = "scmUsed";
-  private volatile boolean scmUsedSaved = false;
-
-  VolumeUsage(File dataLoc, Configuration conf)
-      throws IOException {
-    this.rootDir = dataLoc;
-
-    // SCM used cache file
-    scmUsedFile = new File(rootDir, DU_CACHE_FILE);
-    // get overall disk df
-    this.df = new DF(rootDir, conf);
-
-    startScmUsageThread(conf);
-  }
-
-  void startScmUsageThread(Configuration conf) throws IOException {
-    // get SCM specific df
-    scmUsage = new AtomicReference<>(
-        new CachingGetSpaceUsed.Builder().setPath(rootDir)
-            .setConf(conf)
-            .setInitialUsed(loadScmUsed())
-            .build());
-  }
-
-  long getCapacity() {
-    long capacity = df.getCapacity();
-    return (capacity > 0) ? capacity : 0;
-  }
-
-  /*
-   * Calculate the available space in the volume.
-   */
-  long getAvailable() throws IOException {
-    long remaining = getCapacity() - getScmUsed();
-    long available = df.getAvailable();
-    if (remaining > available) {
-      remaining = available;
-    }
-    return (remaining > 0) ? remaining : 0;
-  }
-
-  long getScmUsed() throws IOException{
-    return scmUsage.get().getUsed();
-  }
-
-  public synchronized void shutdown() {
-    if (!shutdownComplete) {
-      saveScmUsed();
-
-      if (scmUsage.get() instanceof CachingGetSpaceUsed) {
-        IOUtils.cleanupWithLogger(
-            null, ((CachingGetSpaceUsed) scmUsage.get()));
-      }
-      shutdownComplete = true;
-    }
-  }
-
-  /**
-   * Read in the cached DU value and return it if it is less than 600 seconds
-   * old (DU update interval). Slight imprecision of scmUsed is not critical
-   * and skipping DU can significantly shorten the startup time.
-   * If the cached value is not available or too old, -1 is returned.
-   */
-  long loadScmUsed() {
-    long cachedScmUsed;
-    long mtime;
-    Scanner sc;
-
-    try {
-      sc = new Scanner(scmUsedFile, "UTF-8");
-    } catch (FileNotFoundException fnfe) {
-      return -1;
-    }
-
-    try {
-      // Get the recorded scmUsed from the file.
-      if (sc.hasNextLong()) {
-        cachedScmUsed = sc.nextLong();
-      } else {
-        return -1;
-      }
-      // Get the recorded mtime from the file.
-      if (sc.hasNextLong()) {
-        mtime = sc.nextLong();
-      } else {
-        return -1;
-      }
-
-      // Return the cached value if mtime is okay.
-      if (mtime > 0 && (Time.now() - mtime < 600000L)) {
-        LOG.info("Cached ScmUsed found for {} : {} ", rootDir,
-            cachedScmUsed);
-        return cachedScmUsed;
-      }
-      return -1;
-    } finally {
-      sc.close();
-    }
-  }
-
-  /**
-   * Write the current scmUsed to the cache file.
-   */
-  void saveScmUsed() {
-    if (scmUsedFile.exists() && !scmUsedFile.delete()) {
-      LOG.warn("Failed to delete old scmUsed file in {}.", rootDir);
-    }
-    OutputStreamWriter out = null;
-    try {
-      long used = getScmUsed();
-      if (used > 0) {
-        out = new OutputStreamWriter(new FileOutputStream(scmUsedFile),
-            StandardCharsets.UTF_8);
-        // mtime is written last, so that truncated writes won't be valid.
-        out.write(Long.toString(used) + " " + Long.toString(Time.now()));
-        out.flush();
-        out.close();
-        out = null;
-      }
-    } catch (IOException ioe) {
-      // If write failed, the volume might be bad. Since the cache file is
-      // not critical, log the error and continue.
-      LOG.warn("Failed to write scmUsed to " + scmUsedFile, ioe);
-    } finally {
-      IOUtils.cleanupWithLogger(null, out);
-    }
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  @SuppressFBWarnings(
-      value = "IS2_INCONSISTENT_SYNC",
-      justification = "scmUsage is an AtomicReference. No additional " +
-          "synchronization is needed.")
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    scmUsage.set(scmUsageForTest);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
deleted file mode 100644
index 86093c6..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-/**
- This package contains volume/ disk related classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
deleted file mode 100644
index ad68c4d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.hdds.utils.MetaStoreIterator;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.apache.hadoop.hdds.utils.MetadataStore.KeyValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.IOException;
-import java.util.NoSuchElementException;
-
-
-/**
- * Block Iterator for KeyValue Container. This block iterator returns blocks
- * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
- * filter is specified, then default filter used is
- * {@link MetadataKeyFilters#getNormalKeyFilter()}
- */
-@InterfaceAudience.Public
-public class KeyValueBlockIterator implements BlockIterator<BlockData>,
-    Closeable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      KeyValueBlockIterator.class);
-
-  private MetaStoreIterator<KeyValue> blockIterator;
-  private final ReferenceCountedDB db;
-  private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
-      .getNormalKeyFilter();
-  private KeyPrefixFilter blockFilter;
-  private BlockData nextBlock;
-  private long containerId;
-
-  /**
-   * KeyValueBlockIterator to iterate blocks in a container.
-   * @param id - container id
-   * @param path -  container base path
-   * @throws IOException
-   */
-
-  public KeyValueBlockIterator(long id, File path)
-      throws IOException {
-    this(id, path, defaultBlockFilter);
-  }
-
-  /**
-   * KeyValueBlockIterator to iterate blocks in a container.
-   * @param id - container id
-   * @param path - container base path
-   * @param filter - Block filter, filter to be applied for blocks
-   * @throws IOException
-   */
-  public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter)
-      throws IOException {
-    containerId = id;
-    File metdataPath = new File(path, OzoneConsts.METADATA);
-    File containerFile = ContainerUtils.getContainerFile(metdataPath
-        .getParentFile());
-    ContainerData containerData = ContainerDataYaml.readContainerFile(
-        containerFile);
-    KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
-        containerData;
-    keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
-        .getContainerDBFile(metdataPath, containerId));
-    db = BlockUtils.getDB(keyValueContainerData, new
-        OzoneConfiguration());
-    blockIterator = db.getStore().iterator();
-    blockFilter = filter;
-  }
-
-  /**
-   * This method returns blocks matching with the filter.
-   * @return next block or null if no more blocks
-   * @throws IOException
-   */
-  @Override
-  public BlockData nextBlock() throws IOException, NoSuchElementException {
-    if (nextBlock != null) {
-      BlockData currentBlock = nextBlock;
-      nextBlock = null;
-      return currentBlock;
-    }
-    if(hasNext()) {
-      return nextBlock();
-    }
-    throw new NoSuchElementException("Block Iterator reached end for " +
-        "ContainerID " + containerId);
-  }
-
-  @Override
-  public boolean hasNext() throws IOException {
-    if (nextBlock != null) {
-      return true;
-    }
-    if (blockIterator.hasNext()) {
-      KeyValue block = blockIterator.next();
-      if (blockFilter.filterKey(null, block.getKey(), null)) {
-        nextBlock = BlockUtils.getBlockData(block.getValue());
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Block matching with filter found: blockID is : {} for " +
-              "containerID {}", nextBlock.getLocalID(), containerId);
-        }
-        return true;
-      }
-      hasNext();
-    }
-    return false;
-  }
-
-  @Override
-  public void seekToFirst() {
-    nextBlock = null;
-    blockIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    nextBlock = null;
-    blockIterator.seekToLast();
-  }
-
-  public void close() {
-    db.close();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
deleted file mode 100644
index a6e914b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ /dev/null
@@ -1,730 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.util.Map;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers
-    .KeyValueContainerLocationUtil;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_ALREADY_EXISTS;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_FILES_CREATE_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.DISK_OUT_OF_SPACE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.ERROR_IN_COMPACT_DB;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.ERROR_IN_DB_SYNC;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.INVALID_CONTAINER_STATE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.UNSUPPORTED_REQUEST;
-
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class to perform KeyValue Container operations. Any modifications to
- * KeyValueContainer object should ideally be done via api exposed in
- * KeyValueHandler class.
- */
-public class KeyValueContainer implements Container<KeyValueContainerData> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Container.class);
-
-  // Use a non-fair RW lock for better throughput, we may revisit this decision
-  // if this causes fairness issues.
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-
-  private final KeyValueContainerData containerData;
-  private Configuration config;
-
-  public KeyValueContainer(KeyValueContainerData containerData, Configuration
-      ozoneConfig) {
-    Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot " +
-        "be null");
-    Preconditions.checkNotNull(ozoneConfig, "Ozone configuration cannot " +
-        "be null");
-    this.config = ozoneConfig;
-    this.containerData = containerData;
-  }
-
-  @Override
-  public void create(VolumeSet volumeSet, VolumeChoosingPolicy
-      volumeChoosingPolicy, String scmId) throws StorageContainerException {
-    Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " +
-        "cannot be null");
-    Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null");
-    Preconditions.checkNotNull(scmId, "scmId cannot be null");
-
-    File containerMetaDataPath = null;
-    //acquiring volumeset read lock
-    long maxSize = containerData.getMaxSize();
-    volumeSet.readLock();
-    try {
-      HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
-          .getVolumesList(), maxSize);
-      String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-
-      long containerID = containerData.getContainerID();
-
-      containerMetaDataPath = KeyValueContainerLocationUtil
-          .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID);
-      containerData.setMetadataPath(containerMetaDataPath.getPath());
-
-      File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-          hddsVolumeDir, scmId, containerID);
-
-      // Check if it is new Container.
-      ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
-
-      //Create Metadata path chunks path and metadata db
-      File dbFile = getContainerDBFile();
-      KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath,
-          chunksPath, dbFile, config);
-
-      String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-          OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
-
-      //Set containerData for the KeyValueContainer.
-      containerData.setChunksPath(chunksPath.getPath());
-      containerData.setContainerDBType(impl);
-      containerData.setDbFile(dbFile);
-      containerData.setVolume(containerVolume);
-
-      // Create .container file
-      File containerFile = getContainerFile();
-      createContainerFile(containerFile);
-
-    } catch (StorageContainerException ex) {
-      if (containerMetaDataPath != null && containerMetaDataPath.getParentFile()
-          .exists()) {
-        FileUtil.fullyDelete(containerMetaDataPath.getParentFile());
-      }
-      throw ex;
-    } catch (DiskOutOfSpaceException ex) {
-      throw new StorageContainerException("Container creation failed, due to " +
-          "disk out of space", ex, DISK_OUT_OF_SPACE);
-    } catch (FileAlreadyExistsException ex) {
-      throw new StorageContainerException("Container creation failed because " +
-          "ContainerFile already exists", ex, CONTAINER_ALREADY_EXISTS);
-    } catch (IOException ex) {
-      if (containerMetaDataPath != null && containerMetaDataPath.getParentFile()
-          .exists()) {
-        FileUtil.fullyDelete(containerMetaDataPath.getParentFile());
-      }
-      throw new StorageContainerException("Container creation failed.", ex,
-          CONTAINER_INTERNAL_ERROR);
-    } finally {
-      volumeSet.readUnlock();
-    }
-  }
-
-  /**
-   * Set all of the path realted container data fields based on the name
-   * conventions.
-   *
-   * @param scmId
-   * @param containerVolume
-   * @param hddsVolumeDir
-   */
-  public void populatePathFields(String scmId,
-      HddsVolume containerVolume, String hddsVolumeDir) {
-
-    long containerId = containerData.getContainerID();
-
-    File containerMetaDataPath = KeyValueContainerLocationUtil
-        .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId);
-
-    File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-        hddsVolumeDir, scmId, containerId);
-    File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
-        containerMetaDataPath, containerId);
-
-    //Set containerData for the KeyValueContainer.
-    containerData.setMetadataPath(containerMetaDataPath.getPath());
-    containerData.setChunksPath(chunksPath.getPath());
-    containerData.setDbFile(dbFile);
-    containerData.setVolume(containerVolume);
-  }
-
-  /**
-   * Writes to .container file.
-   *
-   * @param containerFile container file name
-   * @param isCreate True if creating a new file. False is updating an
-   *                 existing container file.
-   * @throws StorageContainerException
-   */
-  private void writeToContainerFile(File containerFile, boolean isCreate)
-      throws StorageContainerException {
-    File tempContainerFile = null;
-    long containerId = containerData.getContainerID();
-    try {
-      tempContainerFile = createTempFile(containerFile);
-      ContainerDataYaml.createContainerFile(
-          ContainerType.KeyValueContainer, containerData, tempContainerFile);
-
-      // NativeIO.renameTo is an atomic function. But it might fail if the
-      // container file already exists. Hence, we handle the two cases
-      // separately.
-      if (isCreate) {
-        NativeIO.renameTo(tempContainerFile, containerFile);
-      } else {
-        Files.move(tempContainerFile.toPath(), containerFile.toPath(),
-            StandardCopyOption.REPLACE_EXISTING);
-      }
-
-    } catch (IOException ex) {
-      throw new StorageContainerException("Error while creating/ updating " +
-          ".container file. ContainerID: " + containerId, ex,
-          CONTAINER_FILES_CREATE_ERROR);
-    } finally {
-      if (tempContainerFile != null && tempContainerFile.exists()) {
-        if (!tempContainerFile.delete()) {
-          LOG.warn("Unable to delete container temporary file: {}.",
-              tempContainerFile.getAbsolutePath());
-        }
-      }
-    }
-  }
-
-  private void createContainerFile(File containerFile)
-      throws StorageContainerException {
-    writeToContainerFile(containerFile, true);
-  }
-
-  private void updateContainerFile(File containerFile)
-      throws StorageContainerException {
-    writeToContainerFile(containerFile, false);
-  }
-
-
-  @Override
-  public void delete() throws StorageContainerException {
-    long containerId = containerData.getContainerID();
-    try {
-      KeyValueContainerUtil.removeContainer(containerData, config);
-    } catch (StorageContainerException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      // TODO : An I/O error during delete can leave partial artifacts on the
-      // disk. We will need the cleaner thread to cleanup this information.
-      String errMsg = String.format("Failed to cleanup container. ID: %d",
-          containerId);
-      LOG.error(errMsg, ex);
-      throw new StorageContainerException(errMsg, ex, CONTAINER_INTERNAL_ERROR);
-    }
-  }
-
-  @Override
-  public void markContainerForClose() throws StorageContainerException {
-    writeLock();
-    try {
-      if (getContainerState() != ContainerDataProto.State.OPEN) {
-        throw new StorageContainerException(
-            "Attempting to close a " + getContainerState() + " container.",
-            CONTAINER_NOT_OPEN);
-      }
-      updateContainerData(() ->
-          containerData.setState(ContainerDataProto.State.CLOSING));
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  @Override
-  public void markContainerUnhealthy() throws StorageContainerException {
-    writeLock();
-    try {
-      updateContainerData(() ->
-          containerData.setState(ContainerDataProto.State.UNHEALTHY));
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  @Override
-  public void quasiClose() throws StorageContainerException {
-    // The DB must be synced during close operation
-    flushAndSyncDB();
-
-    writeLock();
-    try {
-      // Second sync should be a very light operation as sync has already
-      // been done outside the lock.
-      flushAndSyncDB();
-      updateContainerData(containerData::quasiCloseContainer);
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  @Override
-  public void close() throws StorageContainerException {
-    // The DB must be synced during close operation
-    flushAndSyncDB();
-
-    writeLock();
-    try {
-      // Second sync should be a very light operation as sync has already
-      // been done outside the lock.
-      flushAndSyncDB();
-      updateContainerData(containerData::closeContainer);
-    } finally {
-      writeUnlock();
-    }
-    LOG.info("Container {} is closed with bcsId {}.",
-        containerData.getContainerID(),
-        containerData.getBlockCommitSequenceId());
-  }
-
-  /**
-   *
-   * Must be invoked with the writeLock held.
-   *
-   * @param update
-   * @throws StorageContainerException
-   */
-  private void updateContainerData(Runnable update)
-      throws StorageContainerException {
-    Preconditions.checkState(hasWriteLock());
-    ContainerDataProto.State oldState = null;
-    try {
-      oldState = containerData.getState();
-      update.run();
-      File containerFile = getContainerFile();
-      // update the new container data to .container File
-      updateContainerFile(containerFile);
-
-    } catch (StorageContainerException ex) {
-      if (oldState != null
-          && containerData.getState() != ContainerDataProto.State.UNHEALTHY) {
-        // Failed to update .container file. Reset the state to old state only
-        // if the current state is not unhealthy.
-        containerData.setState(oldState);
-      }
-      throw ex;
-    }
-  }
-
-  private void compactDB() throws StorageContainerException {
-    try {
-      try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
-        db.getStore().compactDB();
-      }
-    } catch (StorageContainerException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      LOG.error("Error in DB compaction while closing container", ex);
-      throw new StorageContainerException(ex, ERROR_IN_COMPACT_DB);
-    }
-  }
-
-  private void flushAndSyncDB() throws StorageContainerException {
-    try {
-      try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
-        db.getStore().flushDB(true);
-        LOG.info("Container {} is synced with bcsId {}.",
-            containerData.getContainerID(),
-            containerData.getBlockCommitSequenceId());
-      }
-    } catch (StorageContainerException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      LOG.error("Error in DB sync while closing container", ex);
-      throw new StorageContainerException(ex, ERROR_IN_DB_SYNC);
-    }
-  }
-
-  @Override
-  public KeyValueContainerData getContainerData()  {
-    return containerData;
-  }
-
-  @Override
-  public ContainerProtos.ContainerDataProto.State getContainerState() {
-    return containerData.getState();
-  }
-
-  @Override
-  public ContainerType getContainerType() {
-    return ContainerType.KeyValueContainer;
-  }
-
-  @Override
-  public void update(
-      Map<String, String> metadata, boolean forceUpdate)
-      throws StorageContainerException {
-
-    // TODO: Now, when writing the updated data to .container file, we are
-    // holding lock and writing data to disk. We can have async implementation
-    // to flush the update container data to disk.
-    long containerId = containerData.getContainerID();
-    if(!containerData.isValid()) {
-      LOG.debug("Invalid container data. ContainerID: {}", containerId);
-      throw new StorageContainerException("Invalid container data. " +
-          "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
-    }
-    if (!forceUpdate && !containerData.isOpen()) {
-      throw new StorageContainerException(
-          "Updating a closed container without force option is not allowed. " +
-              "ContainerID: " + containerId, UNSUPPORTED_REQUEST);
-    }
-
-    Map<String, String> oldMetadata = containerData.getMetadata();
-    try {
-      writeLock();
-      for (Map.Entry<String, String> entry : metadata.entrySet()) {
-        containerData.addMetadata(entry.getKey(), entry.getValue());
-      }
-
-      File containerFile = getContainerFile();
-      // update the new container data to .container File
-      updateContainerFile(containerFile);
-    } catch (StorageContainerException  ex) {
-      containerData.setMetadata(oldMetadata);
-      throw ex;
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  @Override
-  public void updateDeleteTransactionId(long deleteTransactionId) {
-    containerData.updateDeleteTransactionId(deleteTransactionId);
-  }
-
-  @Override
-  public KeyValueBlockIterator blockIterator() throws IOException{
-    return new KeyValueBlockIterator(containerData.getContainerID(), new File(
-        containerData.getContainerPath()));
-  }
-
-  @Override
-  public void importContainerData(InputStream input,
-      ContainerPacker<KeyValueContainerData> packer) throws IOException {
-    writeLock();
-    try {
-      if (getContainerFile().exists()) {
-        String errorMessage = String.format(
-            "Can't import container (cid=%d) data to a specific location"
-                + " as the container descriptor (%s) has already been exist.",
-            getContainerData().getContainerID(),
-            getContainerFile().getAbsolutePath());
-        throw new IOException(errorMessage);
-      }
-      //copy the values from the input stream to the final destination
-      // directory.
-      byte[] descriptorContent = packer.unpackContainerData(this, input);
-
-      Preconditions.checkNotNull(descriptorContent,
-          "Container descriptor is missing from the container archive: "
-              + getContainerData().getContainerID());
-
-      //now, we have extracted the container descriptor from the previous
-      //datanode. We can load it and upload it with the current data
-      // (original metadata + current filepath fields)
-      KeyValueContainerData originalContainerData =
-          (KeyValueContainerData) ContainerDataYaml
-              .readContainer(descriptorContent);
-
-
-      containerData.setState(originalContainerData.getState());
-      containerData
-          .setContainerDBType(originalContainerData.getContainerDBType());
-      containerData.setBytesUsed(originalContainerData.getBytesUsed());
-
-      //rewriting the yaml file with new checksum calculation.
-      update(originalContainerData.getMetadata(), true);
-
-      //fill in memory stat counter (keycount, byte usage)
-      KeyValueContainerUtil.parseKVContainerData(containerData, config);
-
-    } catch (Exception ex) {
-      //delete all the temporary data in case of any exception.
-      try {
-        FileUtils.deleteDirectory(new File(containerData.getMetadataPath()));
-        FileUtils.deleteDirectory(new File(containerData.getChunksPath()));
-        FileUtils.deleteDirectory(getContainerFile());
-      } catch (Exception deleteex) {
-        LOG.error(
-            "Can not cleanup destination directories after a container import"
-                + " error (cid" +
-                containerData.getContainerID() + ")", deleteex);
-      }
-      throw ex;
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  @Override
-  public void exportContainerData(OutputStream destination,
-      ContainerPacker<KeyValueContainerData> packer) throws IOException {
-    if (getContainerData().getState() !=
-        ContainerProtos.ContainerDataProto.State.CLOSED) {
-      throw new IllegalStateException(
-          "Only closed containers could be exported: ContainerId="
-              + getContainerData().getContainerID());
-    }
-    compactDB();
-    packer.pack(this, destination);
-  }
-
-  /**
-   * Acquire read lock.
-   */
-  public void readLock() {
-    this.lock.readLock().lock();
-
-  }
-
-  /**
-   * Release read lock.
-   */
-  public void readUnlock() {
-    this.lock.readLock().unlock();
-  }
-
-  /**
-   * Check if the current thread holds read lock.
-   */
-  public boolean hasReadLock() {
-    return this.lock.readLock().tryLock();
-  }
-
-  /**
-   * Acquire write lock.
-   */
-  public void writeLock() {
-    // TODO: The lock for KeyValueContainer object should not be exposed
-    // publicly.
-    this.lock.writeLock().lock();
-  }
-
-  /**
-   * Release write lock.
-   */
-  public void writeUnlock() {
-    this.lock.writeLock().unlock();
-
-  }
-
-  /**
-   * Check if the current thread holds write lock.
-   */
-  public boolean hasWriteLock() {
-    return this.lock.writeLock().isHeldByCurrentThread();
-  }
-
-  /**
-   * Acquire read lock, unless interrupted while waiting.
-   * @throws InterruptedException
-   */
-  @Override
-  public void readLockInterruptibly() throws InterruptedException {
-    this.lock.readLock().lockInterruptibly();
-  }
-
-  /**
-   * Acquire write lock, unless interrupted while waiting.
-   * @throws InterruptedException
-   */
-  @Override
-  public void writeLockInterruptibly() throws InterruptedException {
-    this.lock.writeLock().lockInterruptibly();
-
-  }
-
-  /**
-   * Returns containerFile.
-   * @return .container File name
-   */
-  @Override
-  public File getContainerFile() {
-    return getContainerFile(containerData.getMetadataPath(),
-            containerData.getContainerID());
-  }
-
-  static File getContainerFile(String metadataPath, long containerId) {
-    return new File(metadataPath,
-        containerId + OzoneConsts.CONTAINER_EXTENSION);
-  }
-
-  @Override
-  public void updateBlockCommitSequenceId(long blockCommitSequenceId) {
-    containerData.updateBlockCommitSequenceId(blockCommitSequenceId);
-  }
-
-  @Override
-  public long getBlockCommitSequenceId() {
-    return containerData.getBlockCommitSequenceId();
-  }
-
-
-  /**
-   * Returns KeyValueContainerReport for the KeyValueContainer.
-   */
-  @Override
-  public ContainerReplicaProto getContainerReport()
-      throws StorageContainerException {
-    ContainerReplicaProto.Builder ciBuilder =
-        ContainerReplicaProto.newBuilder();
-    ciBuilder.setContainerID(containerData.getContainerID())
-        .setReadCount(containerData.getReadCount())
-        .setWriteCount(containerData.getWriteCount())
-        .setReadBytes(containerData.getReadBytes())
-        .setWriteBytes(containerData.getWriteBytes())
-        .setKeyCount(containerData.getKeyCount())
-        .setUsed(containerData.getBytesUsed())
-        .setState(getHddsState())
-        .setDeleteTransactionId(containerData.getDeleteTransactionId())
-        .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId())
-        .setOriginNodeId(containerData.getOriginNodeId());
-    return ciBuilder.build();
-  }
-
-  /**
-   * Returns LifeCycle State of the container.
-   * @return LifeCycle State of the container in HddsProtos format
-   * @throws StorageContainerException
-   */
-  private ContainerReplicaProto.State getHddsState()
-      throws StorageContainerException {
-    ContainerReplicaProto.State state;
-    switch (containerData.getState()) {
-    case OPEN:
-      state = ContainerReplicaProto.State.OPEN;
-      break;
-    case CLOSING:
-      state = ContainerReplicaProto.State.CLOSING;
-      break;
-    case QUASI_CLOSED:
-      state = ContainerReplicaProto.State.QUASI_CLOSED;
-      break;
-    case CLOSED:
-      state = ContainerReplicaProto.State.CLOSED;
-      break;
-    case UNHEALTHY:
-      state = ContainerReplicaProto.State.UNHEALTHY;
-      break;
-    default:
-      throw new StorageContainerException("Invalid Container state found: " +
-          containerData.getContainerID(), INVALID_CONTAINER_STATE);
-    }
-    return state;
-  }
-
-  /**
-   * Returns container DB file.
-   * @return
-   */
-  public File getContainerDBFile() {
-    return new File(containerData.getMetadataPath(), containerData
-        .getContainerID() + OzoneConsts.DN_CONTAINER_DB);
-  }
-
-  public boolean scanMetaData() {
-    long containerId = containerData.getContainerID();
-    KeyValueContainerCheck checker =
-        new KeyValueContainerCheck(containerData.getMetadataPath(), config,
-            containerId);
-    return checker.fastCheck();
-  }
-
-  @Override
-  public boolean shouldScanData() {
-    return containerData.getState() == ContainerDataProto.State.CLOSED
-        || containerData.getState() == ContainerDataProto.State.QUASI_CLOSED;
-  }
-
-  public boolean scanData(DataTransferThrottler throttler, Canceler canceler) {
-    if (!shouldScanData()) {
-      throw new IllegalStateException("The checksum verification can not be" +
-          " done for container in state "
-          + containerData.getState());
-    }
-
-    long containerId = containerData.getContainerID();
-    KeyValueContainerCheck checker =
-        new KeyValueContainerCheck(containerData.getMetadataPath(), config,
-            containerId);
-
-    return checker.fullCheck(throttler, canceler);
-  }
-
-  private enum ContainerCheckLevel {
-    NO_CHECK, FAST_CHECK, FULL_CHECK
-  }
-
-  /**
-   * Creates a temporary file.
-   * @param file
-   * @return
-   * @throws IOException
-   */
-  private File createTempFile(File file) throws IOException{
-    return File.createTempFile("tmp_" + System.currentTimeMillis() + "_",
-        file.getName(), file.getParentFile());
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
deleted file mode 100644
index a4bd376..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-/**
- * Class to run integrity checks on Datanode Containers.
- * Provide infra for Data Scrubbing
- */
-
-public class KeyValueContainerCheck {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Container.class);
-
-  private long containerID;
-  private KeyValueContainerData onDiskContainerData; //loaded from fs/disk
-  private Configuration checkConfig;
-
-  private String metadataPath;
-
-  public KeyValueContainerCheck(String metadataPath, Configuration conf,
-      long containerID) {
-    Preconditions.checkArgument(metadataPath != null);
-
-    this.checkConfig = conf;
-    this.containerID = containerID;
-    this.onDiskContainerData = null;
-    this.metadataPath = metadataPath;
-  }
-
-  /**
-   * Run basic integrity checks on container metadata.
-   * These checks do not look inside the metadata files.
-   * Applicable for OPEN containers.
-   *
-   * @return true : integrity checks pass, false : otherwise.
-   */
-  public boolean fastCheck() {
-    LOG.info("Running basic checks for container {};", containerID);
-    boolean valid = false;
-    try {
-      loadContainerData();
-      checkLayout();
-      checkContainerFile();
-      valid = true;
-
-    } catch (IOException e) {
-      handleCorruption(e);
-    }
-
-    return valid;
-  }
-
-  /**
-   * full checks comprise scanning all metadata inside the container.
-   * Including the KV database. These checks are intrusive, consume more
-   * resources compared to fast checks and should only be done on Closed
-   * or Quasi-closed Containers. Concurrency being limited to delete
-   * workflows.
-   * <p>
-   * fullCheck is a superset of fastCheck
-   *
-   * @return true : integrity checks pass, false : otherwise.
-   */
-  public boolean fullCheck(DataTransferThrottler throttler, Canceler canceler) {
-    boolean valid;
-
-    try {
-      valid = fastCheck();
-      if (valid) {
-        scanData(throttler, canceler);
-      }
-    } catch (IOException e) {
-      handleCorruption(e);
-      valid = false;
-    }
-
-    return valid;
-  }
-
-  /**
-   * Check the integrity of the directory structure of the container.
-   */
-  private void checkLayout() throws IOException {
-
-    // is metadataPath accessible as a directory?
-    checkDirPath(metadataPath);
-
-    // is chunksPath accessible as a directory?
-    String chunksPath = onDiskContainerData.getChunksPath();
-    checkDirPath(chunksPath);
-  }
-
-  private void checkDirPath(String path) throws IOException {
-
-    File dirPath = new File(path);
-    String errStr;
-
-    try {
-      if (!dirPath.isDirectory()) {
-        errStr = "Not a directory [" + path + "]";
-        throw new IOException(errStr);
-      }
-    } catch (SecurityException se) {
-      throw new IOException("Security exception checking dir ["
-          + path + "]", se);
-    }
-
-    String[] ls = dirPath.list();
-    if (ls == null) {
-      // null result implies operation failed
-      errStr = "null listing for directory [" + path + "]";
-      throw new IOException(errStr);
-    }
-  }
-
-  private void checkContainerFile() throws IOException {
-    /*
-     * compare the values in the container file loaded from disk,
-     * with the values we are expecting
-     */
-    String dbType;
-    Preconditions
-        .checkState(onDiskContainerData != null, "Container File not loaded");
-
-    ContainerUtils.verifyChecksum(onDiskContainerData);
-
-    if (onDiskContainerData.getContainerType()
-        != ContainerProtos.ContainerType.KeyValueContainer) {
-      String errStr = "Bad Container type in Containerdata for " + containerID;
-      throw new IOException(errStr);
-    }
-
-    if (onDiskContainerData.getContainerID() != containerID) {
-      String errStr =
-          "Bad ContainerID field in Containerdata for " + containerID;
-      throw new IOException(errStr);
-    }
-
-    dbType = onDiskContainerData.getContainerDBType();
-    if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) &&
-        !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) {
-      String errStr = "Unknown DBType [" + dbType
-          + "] in Container File for  [" + containerID + "]";
-      throw new IOException(errStr);
-    }
-
-    KeyValueContainerData kvData = onDiskContainerData;
-    if (!metadataPath.equals(kvData.getMetadataPath())) {
-      String errStr =
-          "Bad metadata path in Containerdata for " + containerID + "Expected ["
-              + metadataPath + "] Got [" + kvData.getMetadataPath()
-              + "]";
-      throw new IOException(errStr);
-    }
-  }
-
-  private void scanData(DataTransferThrottler throttler, Canceler canceler)
-      throws IOException {
-    /*
-     * Check the integrity of the DB inside each container.
-     * 1. iterate over each key (Block) and locate the chunks for the block
-     * 2. garbage detection (TBD): chunks which exist in the filesystem,
-     *    but not in the DB. This function will be implemented in HDDS-1202
-     * 3. chunk checksum verification.
-     */
-    Preconditions.checkState(onDiskContainerData != null,
-        "invoke loadContainerData prior to calling this function");
-    File dbFile;
-    File metaDir = new File(metadataPath);
-
-    dbFile = KeyValueContainerLocationUtil
-        .getContainerDBFile(metaDir, containerID);
-
-    if (!dbFile.exists() || !dbFile.canRead()) {
-      String dbFileErrorMsg = "Unable to access DB File [" + dbFile.toString()
-          + "] for Container [" + containerID + "] metadata path ["
-          + metadataPath + "]";
-      throw new IOException(dbFileErrorMsg);
-    }
-
-    onDiskContainerData.setDbFile(dbFile);
-    try(ReferenceCountedDB db =
-            BlockUtils.getDB(onDiskContainerData, checkConfig);
-        KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
-            new File(onDiskContainerData.getContainerPath()))) {
-
-      while(kvIter.hasNext()) {
-        BlockData block = kvIter.nextBlock();
-        for(ContainerProtos.ChunkInfo chunk : block.getChunks()) {
-          File chunkFile = ChunkUtils.getChunkFile(onDiskContainerData,
-              ChunkInfo.getFromProtoBuf(chunk));
-          if (!chunkFile.exists()) {
-            // concurrent mutation in Block DB? lookup the block again.
-            byte[] bdata = db.getStore().get(
-                Longs.toByteArray(block.getBlockID().getLocalID()));
-            if (bdata != null) {
-              throw new IOException("Missing chunk file "
-                  + chunkFile.getAbsolutePath());
-            }
-          } else if (chunk.getChecksumData().getType()
-              != ContainerProtos.ChecksumType.NONE){
-            int length = chunk.getChecksumData().getChecksumsList().size();
-            ChecksumData cData = new ChecksumData(
-                chunk.getChecksumData().getType(),
-                chunk.getChecksumData().getBytesPerChecksum(),
-                chunk.getChecksumData().getChecksumsList());
-            Checksum cal = new Checksum(cData.getChecksumType(),
-                cData.getBytesPerChecksum());
-            long bytesRead = 0;
-            byte[] buffer = new byte[cData.getBytesPerChecksum()];
-            try (InputStream fs = new FileInputStream(chunkFile)) {
-              for (int i = 0; i < length; i++) {
-                int v = fs.read(buffer);
-                if (v == -1) {
-                  break;
-                }
-                bytesRead += v;
-                throttler.throttle(v, canceler);
-                ByteString expected = cData.getChecksums().get(i);
-                ByteString actual = cal.computeChecksum(buffer, 0, v)
-                    .getChecksums().get(0);
-                if (!Arrays.equals(expected.toByteArray(),
-                    actual.toByteArray())) {
-                  throw new OzoneChecksumException(String
-                      .format("Inconsistent read for chunk=%s len=%d expected" +
-                              " checksum %s actual checksum %s for block %s",
-                          chunk.getChunkName(), chunk.getLen(),
-                          Arrays.toString(expected.toByteArray()),
-                          Arrays.toString(actual.toByteArray()),
-                          block.getBlockID()));
-                }
-
-              }
-              if (bytesRead != chunk.getLen()) {
-                throw new OzoneChecksumException(String
-                    .format("Inconsistent read for chunk=%s expected length=%d"
-                            + " actual length=%d for block %s",
-                        chunk.getChunkName(),
-                        chunk.getLen(), bytesRead, block.getBlockID()));
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  private void loadContainerData() throws IOException {
-    File containerFile = KeyValueContainer
-        .getContainerFile(metadataPath, containerID);
-
-    onDiskContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-  }
-
-  private void handleCorruption(IOException e) {
-    String errStr =
-        "Corruption detected in container: [" + containerID + "] ";
-    String logMessage = errStr + "Exception: [" + e.getMessage() + "]";
-    LOG.error(logMessage);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
deleted file mode 100644
index 2a9eedc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.collect.Lists;
-import java.util.Collections;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.yaml.snakeyaml.nodes.Tag;
-
-
-import java.io.File;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static java.lang.Math.max;
-import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE;
-import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
-
-/**
- * This class represents the KeyValueContainer metadata, which is the
- * in-memory representation of container metadata and is represented on disk
- * by the .container file.
- */
-public class KeyValueContainerData extends ContainerData {
-
-  // Yaml Tag used for KeyValueContainerData.
-  public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData");
-
-  // Fields need to be stored in .container file.
-  private static final List<String> KV_YAML_FIELDS;
-
-  // Path to Container metadata Level DB/RocksDB Store and .container file.
-  private String metadataPath;
-
-  // Path to Physical file system where chunks are stored.
-  private String chunksPath;
-
-  //Type of DB used to store key to chunks mapping
-  private String containerDBType;
-
-  private File dbFile = null;
-
-  /**
-   * Number of pending deletion blocks in KeyValueContainer.
-   */
-  private final AtomicInteger numPendingDeletionBlocks;
-
-  private long deleteTransactionId;
-
-  private long blockCommitSequenceId;
-
-  static {
-    // Initialize YAML fields
-    KV_YAML_FIELDS = Lists.newArrayList();
-    KV_YAML_FIELDS.addAll(YAML_FIELDS);
-    KV_YAML_FIELDS.add(METADATA_PATH);
-    KV_YAML_FIELDS.add(CHUNKS_PATH);
-    KV_YAML_FIELDS.add(CONTAINER_DB_TYPE);
-  }
-
-  /**
-   * Constructs KeyValueContainerData object.
-   * @param id - ContainerId
-   * @param size - maximum size of the container in bytes
-   */
-  public KeyValueContainerData(long id, long size,
-      String originPipelineId, String originNodeId) {
-    super(ContainerProtos.ContainerType.KeyValueContainer, id, size,
-        originPipelineId, originNodeId);
-    this.numPendingDeletionBlocks = new AtomicInteger(0);
-    this.deleteTransactionId = 0;
-  }
-
-  /**
-   * Constructs KeyValueContainerData object.
-   * @param id - ContainerId
-   * @param layOutVersion
-   * @param size - maximum size of the container in bytes
-   */
-  public KeyValueContainerData(long id, int layOutVersion, long size,
-      String originPipelineId, String originNodeId) {
-    super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
-        size, originPipelineId, originNodeId);
-    this.numPendingDeletionBlocks = new AtomicInteger(0);
-    this.deleteTransactionId = 0;
-  }
-
-
-  /**
-   * Sets Container dbFile. This should be called only during creation of
-   * KeyValue container.
-   * @param containerDbFile
-   */
-  public void setDbFile(File containerDbFile) {
-    dbFile = containerDbFile;
-  }
-
-  /**
-   * Returns container DB file.
-   * @return dbFile
-   */
-  public File getDbFile() {
-    return dbFile;
-  }
-
-  /**
-   * Returns container metadata path.
-   * @return - Physical path where container file and checksum is stored.
-   */
-  public String getMetadataPath() {
-    return metadataPath;
-  }
-
-  /**
-   * Sets container metadata path.
-   *
-   * @param path - String.
-   */
-  public void setMetadataPath(String path) {
-    this.metadataPath = path;
-  }
-
-  /**
-   * Returns the path to base dir of the container.
-   * @return Path to base dir
-   */
-  public String getContainerPath() {
-    if (metadataPath == null) {
-      return null;
-    }
-    return new File(metadataPath).getParent();
-  }
-
-  /**
-   * Returns the blockCommitSequenceId.
-   */
-  public long getBlockCommitSequenceId() {
-    return blockCommitSequenceId;
-  }
-
-  /**
-   * updates the blockCommitSequenceId.
-   */
-  public void updateBlockCommitSequenceId(long id) {
-    this.blockCommitSequenceId = id;
-  }
-
-  /**
-   * Get chunks path.
-   * @return - Path where chunks are stored
-   */
-  public String getChunksPath() {
-    return chunksPath;
-  }
-
-  /**
-   * Set chunks Path.
-   * @param chunkPath - File path.
-   */
-  public void setChunksPath(String chunkPath) {
-    this.chunksPath = chunkPath;
-  }
-
-  /**
-   * Returns the DBType used for the container.
-   * @return containerDBType
-   */
-  public String getContainerDBType() {
-    return containerDBType;
-  }
-
-  /**
-   * Sets the DBType used for the container.
-   * @param containerDBType
-   */
-  public void setContainerDBType(String containerDBType) {
-    this.containerDBType = containerDBType;
-  }
-
-  /**
-   * Increase the count of pending deletion blocks.
-   *
-   * @param numBlocks increment number
-   */
-  public void incrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks.addAndGet(numBlocks);
-  }
-
-  /**
-   * Decrease the count of pending deletion blocks.
-   *
-   * @param numBlocks decrement number
-   */
-  public void decrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks);
-  }
-
-  /**
-   * Get the number of pending deletion blocks.
-   */
-  public int getNumPendingDeletionBlocks() {
-    return this.numPendingDeletionBlocks.get();
-  }
-
-  /**
-   * Sets deleteTransactionId to latest delete transactionId for the container.
-   *
-   * @param transactionId latest transactionId of the container.
-   */
-  public void updateDeleteTransactionId(long transactionId) {
-    deleteTransactionId = max(transactionId, deleteTransactionId);
-  }
-
-  /**
-   * Return the latest deleteTransactionId of the container.
-   */
-  public long getDeleteTransactionId() {
-    return deleteTransactionId;
-  }
-
-  /**
-   * Returns a ProtoBuf Message from ContainerData.
-   *
-   * @return Protocol Buffer Message
-   */
-  public ContainerDataProto getProtoBufMessage() {
-    ContainerDataProto.Builder builder = ContainerDataProto.newBuilder();
-    builder.setContainerID(this.getContainerID());
-    builder.setContainerPath(this.getMetadataPath());
-    builder.setState(this.getState());
-
-    for (Map.Entry<String, String> entry : getMetadata().entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-
-    if (this.getBytesUsed() >= 0) {
-      builder.setBytesUsed(this.getBytesUsed());
-    }
-
-    if(this.getContainerType() != null) {
-      builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
-    }
-
-    return builder.build();
-  }
-
-  public static List<String> getYamlFields() {
-    return Collections.unmodifiableList(KV_YAML_FIELDS);
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
deleted file mode 100644
index bc41883..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ /dev/null
@@ -1,1043 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Function;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto.State;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.scm.ByteStringConversion;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis
-    .DispatcherContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis
-    .DispatcherContext.WriteChunkStage;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
-import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
-import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    Result.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handler for KeyValue Container type.
- */
-public class KeyValueHandler extends Handler {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      KeyValueHandler.class);
-
-  private final ContainerType containerType;
-  private final BlockManager blockManager;
-  private final ChunkManager chunkManager;
-  private final VolumeChoosingPolicy volumeChoosingPolicy;
-  private final long maxContainerSize;
-  private final Function<ByteBuffer, ByteString> byteBufferToByteString;
-
-  // A lock that is held during container creation.
-  private final AutoCloseableLock containerCreationLock;
-  private final boolean doSyncWrite;
-
-  public KeyValueHandler(Configuration config, StateContext context,
-      ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics) {
-    super(config, context, contSet, volSet, metrics);
-    containerType = ContainerType.KeyValueContainer;
-    blockManager = new BlockManagerImpl(config);
-    doSyncWrite =
-        conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY,
-            OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT);
-    chunkManager = ChunkManagerFactory.getChunkManager(config, doSyncWrite);
-    volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
-        HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
-            .class, VolumeChoosingPolicy.class), conf);
-    maxContainerSize = (long)config.getStorageSize(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-            ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
-    // this handler lock is used for synchronizing createContainer Requests,
-    // so using a fair lock here.
-    containerCreationLock = new AutoCloseableLock(new ReentrantLock(true));
-    byteBufferToByteString =
-        ByteStringConversion.createByteBufferConversion(conf);
-  }
-
-  @VisibleForTesting
-  public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() {
-    return volumeChoosingPolicy;
-  }
-
-  @Override
-  public void stop() {
-  }
-
-  @Override
-  public ContainerCommandResponseProto handle(
-      ContainerCommandRequestProto request, Container container,
-      DispatcherContext dispatcherContext) {
-
-    Type cmdType = request.getCmdType();
-    KeyValueContainer kvContainer = (KeyValueContainer) container;
-    switch(cmdType) {
-    case CreateContainer:
-      return handleCreateContainer(request, kvContainer);
-    case ReadContainer:
-      return handleReadContainer(request, kvContainer);
-    case UpdateContainer:
-      return handleUpdateContainer(request, kvContainer);
-    case DeleteContainer:
-      return handleDeleteContainer(request, kvContainer);
-    case ListContainer:
-      return handleUnsupportedOp(request);
-    case CloseContainer:
-      return handleCloseContainer(request, kvContainer);
-    case PutBlock:
-      return handlePutBlock(request, kvContainer, dispatcherContext);
-    case GetBlock:
-      return handleGetBlock(request, kvContainer);
-    case DeleteBlock:
-      return handleDeleteBlock(request, kvContainer);
-    case ListBlock:
-      return handleUnsupportedOp(request);
-    case ReadChunk:
-      return handleReadChunk(request, kvContainer, dispatcherContext);
-    case DeleteChunk:
-      return handleDeleteChunk(request, kvContainer);
-    case WriteChunk:
-      return handleWriteChunk(request, kvContainer, dispatcherContext);
-    case ListChunk:
-      return handleUnsupportedOp(request);
-    case CompactChunk:
-      return handleUnsupportedOp(request);
-    case PutSmallFile:
-      return handlePutSmallFile(request, kvContainer, dispatcherContext);
-    case GetSmallFile:
-      return handleGetSmallFile(request, kvContainer);
-    case GetCommittedBlockLength:
-      return handleGetCommittedBlockLength(request, kvContainer);
-    default:
-      return null;
-    }
-  }
-
-  @VisibleForTesting
-  public ChunkManager getChunkManager() {
-    return this.chunkManager;
-  }
-
-  @VisibleForTesting
-  public BlockManager getBlockManager() {
-    return this.blockManager;
-  }
-
-  /**
-   * Handles Create Container Request. If successful, adds the container to
-   * ContainerSet and sends an ICR to the SCM.
-   */
-  ContainerCommandResponseProto handleCreateContainer(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-    if (!request.hasCreateContainer()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Create Container request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-    // Create Container request should be passed a null container as the
-    // container would be created here.
-    Preconditions.checkArgument(kvContainer == null);
-
-    long containerID = request.getContainerID();
-
-    KeyValueContainerData newContainerData = new KeyValueContainerData(
-        containerID, maxContainerSize, request.getPipelineID(),
-        getDatanodeDetails().getUuidString());
-    // TODO: Add support to add metadataList to ContainerData. Add metadata
-    // to container during creation.
-    KeyValueContainer newContainer = new KeyValueContainer(
-        newContainerData, conf);
-
-    boolean created = false;
-    try (AutoCloseableLock l = containerCreationLock.acquire()) {
-      if (containerSet.getContainer(containerID) == null) {
-        newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
-        created = containerSet.addContainer(newContainer);
-      } else {
-        // The create container request for an already existing container can
-        // arrive in case the ContainerStateMachine reapplies the transaction
-        // on datanode restart. Just log a warning msg here.
-        LOG.debug("Container already exists." +
-            "container Id " + containerID);
-      }
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    }
-
-    if (created) {
-      try {
-        sendICR(newContainer);
-      } catch (StorageContainerException ex) {
-        return ContainerUtils.logAndReturnError(LOG, ex, request);
-      }
-    }
-    return ContainerUtils.getSuccessResponse(request);
-  }
-
-  public void populateContainerPathFields(KeyValueContainer container,
-      long maxSize) throws IOException {
-    volumeSet.readLock();
-    try {
-      HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
-          .getVolumesList(), maxSize);
-      String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-      container.populatePathFields(scmID, containerVolume, hddsVolumeDir);
-    } finally {
-      volumeSet.readUnlock();
-    }
-  }
-
-  /**
-   * Handles Read Container Request. Returns the ContainerData as response.
-   */
-  ContainerCommandResponseProto handleReadContainer(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-    if (!request.hasReadContainer()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Read Container request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout in that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    KeyValueContainerData containerData = kvContainer.getContainerData();
-    return KeyValueContainerUtil.getReadContainerResponse(
-        request, containerData);
-  }
-
-
-  /**
-   * Handles Update Container Request. If successful, the container metadata
-   * is updated.
-   */
-  ContainerCommandResponseProto handleUpdateContainer(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasUpdateContainer()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Update Container request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    boolean forceUpdate = request.getUpdateContainer().getForceUpdate();
-    List<KeyValue> keyValueList =
-        request.getUpdateContainer().getMetadataList();
-    Map<String, String> metadata = new HashMap<>();
-    for (KeyValue keyValue : keyValueList) {
-      metadata.put(keyValue.getKey(), keyValue.getValue());
-    }
-
-    try {
-      if (!metadata.isEmpty()) {
-        kvContainer.update(metadata, forceUpdate);
-      }
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    }
-    return ContainerUtils.getSuccessResponse(request);
-  }
-
-  /**
-   * Handles Delete Container Request.
-   * Open containers cannot be deleted.
-   * Holds writeLock on ContainerSet till the container is removed from
-   * containerMap. On disk deletion of container files will happen
-   * asynchronously without the lock.
-   */
-  ContainerCommandResponseProto handleDeleteContainer(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasDeleteContainer()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Delete container request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    boolean forceDelete = request.getDeleteContainer().getForceDelete();
-    try {
-      deleteInternal(kvContainer, forceDelete);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    }
-    return ContainerUtils.getSuccessResponse(request);
-  }
-
-  /**
-   * Handles Close Container Request. An open container is closed.
-   * Close Container call is idempotent.
-   */
-  ContainerCommandResponseProto handleCloseContainer(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasCloseContainer()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Update Container request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-    try {
-      markContainerForClose(kvContainer);
-      closeContainer(kvContainer);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Close Container failed", ex,
-              IO_EXCEPTION), request);
-    }
-
-    return ContainerUtils.getSuccessResponse(request);
-  }
-
-  /**
-   * Handle Put Block operation. Calls BlockManager to process the request.
-   */
-  ContainerCommandResponseProto handlePutBlock(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer,
-      DispatcherContext dispatcherContext) {
-
-    long blockLength;
-    if (!request.hasPutBlock()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Put Key request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    BlockData blockData;
-    try {
-      checkContainerOpen(kvContainer);
-
-      blockData = BlockData.getFromProtoBuf(
-          request.getPutBlock().getBlockData());
-      Preconditions.checkNotNull(blockData);
-      long bcsId =
-          dispatcherContext == null ? 0 : dispatcherContext.getLogIndex();
-      blockData.setBlockCommitSequenceId(bcsId);
-      long numBytes = blockData.getProtoBufMessage().toByteArray().length;
-      blockManager.putBlock(kvContainer, blockData);
-      metrics.incContainerBytesStats(Type.PutBlock, numBytes);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Put Key failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    return BlockUtils.putBlockResponseSuccess(request, blockData);
-  }
-
-  /**
-   * Handle Get Block operation. Calls BlockManager to process the request.
-   */
-  ContainerCommandResponseProto handleGetBlock(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasGetBlock()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Get Key request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout in that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    BlockData responseData;
-    try {
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getGetBlock().getBlockID());
-      responseData = blockManager.getBlock(kvContainer, blockID);
-      long numBytes = responseData.getProtoBufMessage().toByteArray().length;
-      metrics.incContainerBytesStats(Type.GetBlock, numBytes);
-
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Get Key failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    return BlockUtils.getBlockDataResponse(request, responseData);
-  }
-
-  /**
-   * Handles GetCommittedBlockLength operation.
-   * Calls BlockManager to process the request.
-   */
-  ContainerCommandResponseProto handleGetCommittedBlockLength(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-    if (!request.hasGetCommittedBlockLength()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Get Key request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout in that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    long blockLength;
-    try {
-      BlockID blockID = BlockID
-          .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID());
-      blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("GetCommittedBlockLength failed", ex,
-              IO_EXCEPTION), request);
-    }
-
-    return BlockUtils.getBlockLengthResponse(request, blockLength);
-  }
-
-  /**
-   * Handle Delete Block operation. Calls BlockManager to process the request.
-   */
-  ContainerCommandResponseProto handleDeleteBlock(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasDeleteBlock()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Delete Key request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getDeleteBlock().getBlockID());
-
-      blockManager.deleteBlock(kvContainer, blockID);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Delete Key failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    return BlockUtils.getBlockResponseSuccess(request);
-  }
-
-  /**
-   * Handle Read Chunk operation. Calls ChunkManager to process the request.
-   */
-  ContainerCommandResponseProto handleReadChunk(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer,
-      DispatcherContext dispatcherContext) {
-
-    if (!request.hasReadChunk()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Read Chunk request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout if that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    ChunkInfo chunkInfo;
-    ByteBuffer data;
-    try {
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getReadChunk().getBlockID());
-      chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk()
-          .getChunkData());
-      Preconditions.checkNotNull(chunkInfo);
-
-      if (dispatcherContext == null) {
-        dispatcherContext = new DispatcherContext.Builder().build();
-      }
-
-      data = chunkManager
-          .readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
-      metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen());
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    Preconditions.checkNotNull(data, "Chunk data is null");
-
-    ContainerProtos.ReadChunkResponseProto.Builder response =
-        ContainerProtos.ReadChunkResponseProto.newBuilder();
-    response.setChunkData(chunkInfo.getProtoBufMessage());
-    response.setData(byteBufferToByteString.apply(data));
-    response.setBlockID(request.getReadChunk().getBlockID());
-
-    ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(request);
-    builder.setReadChunk(response);
-    return builder.build();
-  }
-
-  /**
-   * Throw an exception if the container is unhealthy.
-   *
-   * @throws StorageContainerException if the container is unhealthy.
-   * @param kvContainer
-   */
-  @VisibleForTesting
-  void checkContainerIsHealthy(KeyValueContainer kvContainer)
-      throws StorageContainerException {
-    kvContainer.readLock();
-    try {
-      if (kvContainer.getContainerData().getState() == State.UNHEALTHY) {
-        throw new StorageContainerException(
-            "The container replica is unhealthy.",
-            CONTAINER_UNHEALTHY);
-      }
-    } finally {
-      kvContainer.readUnlock();
-    }
-  }
-
-  /**
-   * Handle Delete Chunk operation. Calls ChunkManager to process the request.
-   */
-  ContainerCommandResponseProto handleDeleteChunk(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasDeleteChunk()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Delete Chunk request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout in that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getDeleteChunk().getBlockID());
-      ContainerProtos.ChunkInfo chunkInfoProto = request.getDeleteChunk()
-          .getChunkData();
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
-      Preconditions.checkNotNull(chunkInfo);
-
-      chunkManager.deleteChunk(kvContainer, blockID, chunkInfo);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Delete Chunk failed", ex,
-              IO_EXCEPTION), request);
-    }
-
-    return ChunkUtils.getChunkResponseSuccess(request);
-  }
-
-  /**
-   * Handle Write Chunk operation. Calls ChunkManager to process the request.
-   */
-  ContainerCommandResponseProto handleWriteChunk(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer,
-      DispatcherContext dispatcherContext) {
-
-    if (!request.hasWriteChunk()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Write Chunk request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getWriteChunk().getBlockID());
-      ContainerProtos.ChunkInfo chunkInfoProto =
-          request.getWriteChunk().getChunkData();
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
-      Preconditions.checkNotNull(chunkInfo);
-
-      ByteBuffer data = null;
-      if (dispatcherContext == null) {
-        dispatcherContext = new DispatcherContext.Builder().build();
-      }
-      WriteChunkStage stage = dispatcherContext.getStage();
-      if (stage == WriteChunkStage.WRITE_DATA ||
-          stage == WriteChunkStage.COMBINED) {
-        data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
-      }
-
-      chunkManager
-          .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
-
-      // We should increment stats after writeChunk
-      if (stage == WriteChunkStage.WRITE_DATA||
-          stage == WriteChunkStage.COMBINED) {
-        metrics.incContainerBytesStats(Type.WriteChunk, request.getWriteChunk()
-            .getChunkData().getLen());
-      }
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Write Chunk failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    return ChunkUtils.getChunkResponseSuccess(request);
-  }
-
-  /**
-   * Handle Put Small File operation. Writes the chunk and associated key
-   * using a single RPC. Calls BlockManager and ChunkManager to process the
-   * request.
-   */
-  ContainerCommandResponseProto handlePutSmallFile(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer,
-      DispatcherContext dispatcherContext) {
-
-    if (!request.hasPutSmallFile()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Put Small File request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-    PutSmallFileRequestProto putSmallFileReq =
-        request.getPutSmallFile();
-    BlockData blockData;
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getBlock()
-          .getBlockData().getBlockID());
-      blockData = BlockData.getFromProtoBuf(
-          putSmallFileReq.getBlock().getBlockData());
-      Preconditions.checkNotNull(blockData);
-
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
-          putSmallFileReq.getChunkInfo());
-      Preconditions.checkNotNull(chunkInfo);
-      ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
-      if (dispatcherContext == null) {
-        dispatcherContext = new DispatcherContext.Builder().build();
-      }
-
-      // chunks will be committed as a part of handling putSmallFile
-      // here. There is no need to maintain this info in openContainerBlockMap.
-      chunkManager
-          .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
-
-      List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
-      chunks.add(chunkInfo.getProtoBufMessage());
-      blockData.setChunks(chunks);
-      blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
-
-      blockManager.putBlock(kvContainer, blockData);
-      metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
-
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Read Chunk failed", ex,
-              PUT_SMALL_FILE_ERROR), request);
-    }
-
-    return SmallFileUtils.getPutFileResponseSuccess(request, blockData);
-  }
-
-  /**
-   * Handle Get Small File operation. Gets a data stream using a key. This
-   * helps in reducing the RPC overhead for small files. Calls BlockManager and
-   * ChunkManager to process the request.
-   */
-  ContainerCommandResponseProto handleGetSmallFile(
-      ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasGetSmallFile()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Get Small File request. trace ID: {}",
-            request.getTraceID());
-      }
-      return ContainerUtils.malformedRequest(request);
-    }
-
-    // The container can become unhealthy after the lock is released.
-    // The operation will likely fail/timeout in that happens.
-    try {
-      checkContainerIsHealthy(kvContainer);
-    } catch (StorageContainerException sce) {
-      return ContainerUtils.logAndReturnError(LOG, sce, request);
-    }
-
-    GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
-
-    try {
-      BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock()
-          .getBlockID());
-      BlockData responseData = blockManager.getBlock(kvContainer, blockID);
-
-      ContainerProtos.ChunkInfo chunkInfo = null;
-      ByteString dataBuf = ByteString.EMPTY;
-      DispatcherContext dispatcherContext =
-          new DispatcherContext.Builder().build();
-      for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) {
-        // if the block is committed, all chunks must have been committed.
-        // Tmp chunk files won't exist here.
-        ByteBuffer data = chunkManager.readChunk(kvContainer, blockID,
-            ChunkInfo.getFromProtoBuf(chunk), dispatcherContext);
-        ByteString current = byteBufferToByteString.apply(data);
-        dataBuf = dataBuf.concat(current);
-        chunkInfo = chunk;
-      }
-      metrics.incContainerBytesStats(Type.GetSmallFile, dataBuf.size());
-      return SmallFileUtils.getGetSmallFileResponseSuccess(request, dataBuf
-          .toByteArray(), ChunkInfo.getFromProtoBuf(chunkInfo));
-    } catch (StorageContainerException e) {
-      return ContainerUtils.logAndReturnError(LOG, e, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Write Chunk failed", ex,
-              GET_SMALL_FILE_ERROR), request);
-    }
-  }
-
-  /**
-   * Handle unsupported operation.
-   */
-  ContainerCommandResponseProto handleUnsupportedOp(
-      ContainerCommandRequestProto request) {
-    // TODO : remove all unsupported operations or handle them.
-    return ContainerUtils.unsupportedRequest(request);
-  }
-
-  /**
-   * Check if container is open. Throw exception otherwise.
-   * @param kvContainer
-   * @throws StorageContainerException
-   */
-  private void checkContainerOpen(KeyValueContainer kvContainer)
-      throws StorageContainerException {
-
-    final State containerState = kvContainer.getContainerState();
-
-    /*
-     * In a closing state, follower will receive transactions from leader.
-     * Once the leader is put to closing state, it will reject further requests
-     * from clients. Only the transactions which happened before the container
-     * in the leader goes to closing state, will arrive here even the container
-     * might already be in closing state here.
-     */
-    if (containerState == State.OPEN || containerState == State.CLOSING) {
-      return;
-    }
-
-    final ContainerProtos.Result result;
-    switch (containerState) {
-    case QUASI_CLOSED:
-      result = CLOSED_CONTAINER_IO;
-      break;
-    case CLOSED:
-      result = CLOSED_CONTAINER_IO;
-      break;
-    case UNHEALTHY:
-      result = CONTAINER_UNHEALTHY;
-      break;
-    case INVALID:
-      result = INVALID_CONTAINER_STATE;
-      break;
-    default:
-      result = CONTAINER_INTERNAL_ERROR;
-    }
-    String msg = "Requested operation not allowed as ContainerState is " +
-        containerState;
-    throw new StorageContainerException(msg, result);
-  }
-
-  @Override
-  public Container importContainer(final long containerID,
-      final long maxSize, final String originPipelineId,
-      final String originNodeId, final InputStream rawContainerStream,
-      final TarContainerPacker packer)
-      throws IOException {
-
-    // TODO: Add layout version!
-    KeyValueContainerData containerData =
-        new KeyValueContainerData(containerID,
-            maxSize, originPipelineId, originNodeId);
-
-    KeyValueContainer container = new KeyValueContainer(containerData,
-        conf);
-
-    populateContainerPathFields(container, maxSize);
-    container.importContainerData(rawContainerStream, packer);
-    sendICR(container);
-    return container;
-
-  }
-
-  @Override
-  public void exportContainer(final Container container,
-      final OutputStream outputStream,
-      final TarContainerPacker packer)
-      throws IOException{
-    container.readLock();
-    try {
-      final KeyValueContainer kvc = (KeyValueContainer) container;
-      kvc.exportContainerData(outputStream, packer);
-    } finally {
-      container.readUnlock();
-    }
-  }
-
-  @Override
-  public void markContainerForClose(Container container)
-      throws IOException {
-    container.writeLock();
-    try {
-      // Move the container to CLOSING state only if it's OPEN
-      if (container.getContainerState() == State.OPEN) {
-        container.markContainerForClose();
-        sendICR(container);
-      }
-    } finally {
-      container.writeUnlock();
-    }
-  }
-
-  @Override
-  public void markContainerUnhealthy(Container container)
-      throws IOException {
-    container.writeLock();
-    try {
-      if (container.getContainerState() != State.UNHEALTHY) {
-        try {
-          container.markContainerUnhealthy();
-        } catch (IOException ex) {
-          // explicitly catch IOException here since the this operation
-          // will fail if the Rocksdb metadata is corrupted.
-          long id = container.getContainerData().getContainerID();
-          LOG.warn("Unexpected error while marking container " + id
-              + " as unhealthy", ex);
-        } finally {
-          sendICR(container);
-        }
-      }
-    } finally {
-      container.writeUnlock();
-    }
-  }
-
-  @Override
-  public void quasiCloseContainer(Container container)
-      throws IOException {
-    container.writeLock();
-    try {
-      final State state = container.getContainerState();
-      // Quasi close call is idempotent.
-      if (state == State.QUASI_CLOSED) {
-        return;
-      }
-      // The container has to be in CLOSING state.
-      if (state != State.CLOSING) {
-        ContainerProtos.Result error =
-            state == State.INVALID ? INVALID_CONTAINER_STATE :
-                CONTAINER_INTERNAL_ERROR;
-        throw new StorageContainerException(
-            "Cannot quasi close container #" + container.getContainerData()
-                .getContainerID() + " while in " + state + " state.", error);
-      }
-      container.quasiClose();
-      sendICR(container);
-    } finally {
-      container.writeUnlock();
-    }
-  }
-
-  @Override
-  public void closeContainer(Container container)
-      throws IOException {
-    container.writeLock();
-    try {
-      final State state = container.getContainerState();
-      // Close call is idempotent.
-      if (state == State.CLOSED) {
-        return;
-      }
-      if (state == State.UNHEALTHY) {
-        throw new StorageContainerException(
-            "Cannot close container #" + container.getContainerData()
-                .getContainerID() + " while in " + state + " state.",
-            ContainerProtos.Result.CONTAINER_UNHEALTHY);
-      }
-      // The container has to be either in CLOSING or in QUASI_CLOSED state.
-      if (state != State.CLOSING && state != State.QUASI_CLOSED) {
-        ContainerProtos.Result error =
-            state == State.INVALID ? INVALID_CONTAINER_STATE :
-                CONTAINER_INTERNAL_ERROR;
-        throw new StorageContainerException(
-            "Cannot close container #" + container.getContainerData()
-                .getContainerID() + " while in " + state + " state.", error);
-      }
-      container.close();
-      sendICR(container);
-    } finally {
-      container.writeUnlock();
-    }
-  }
-
-  @Override
-  public void deleteContainer(Container container, boolean force)
-      throws IOException {
-    deleteInternal(container, force);
-  }
-
-  private void deleteInternal(Container container, boolean force)
-      throws StorageContainerException {
-    container.writeLock();
-    try {
-    // If force is false, we check container state.
-      if (!force) {
-        // Check if container is open
-        if (container.getContainerData().isOpen()) {
-          throw new StorageContainerException(
-              "Deletion of Open Container is not allowed.",
-              DELETE_ON_OPEN_CONTAINER);
-        }
-      }
-      long containerId = container.getContainerData().getContainerID();
-      containerSet.removeContainer(containerId);
-    } finally {
-      container.writeUnlock();
-    }
-    // Avoid holding write locks for disk operations
-    container.delete();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
deleted file mode 100644
index 13689a7..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.compressors.CompressorException;
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.apache.commons.io.IOUtils;
-
-/**
- * Compress/uncompress KeyValueContainer data to a tar.gz archive.
- */
-public class TarContainerPacker
-    implements ContainerPacker<KeyValueContainerData> {
-
-  private static final String CHUNKS_DIR_NAME = OzoneConsts.STORAGE_DIR_CHUNKS;
-
-  private static final String DB_DIR_NAME = "db";
-
-  private static final String CONTAINER_FILE_NAME = "container.yaml";
-
-
-
-  /**
-   * Given an input stream (tar file) extract the data to the specified
-   * directories.
-   *
-   * @param container container which defines the destination structure.
-   * @param inputStream the input stream.
-   * @throws IOException
-   */
-  @Override
-  public byte[] unpackContainerData(Container<KeyValueContainerData> container,
-      InputStream inputStream)
-      throws IOException {
-    byte[] descriptorFileContent = null;
-    try {
-      KeyValueContainerData containerData = container.getContainerData();
-      CompressorInputStream compressorInputStream =
-          new CompressorStreamFactory()
-              .createCompressorInputStream(CompressorStreamFactory.GZIP,
-                  inputStream);
-
-      TarArchiveInputStream tarInput =
-          new TarArchiveInputStream(compressorInputStream);
-
-      TarArchiveEntry entry = tarInput.getNextTarEntry();
-      while (entry != null) {
-        String name = entry.getName();
-        if (name.startsWith(DB_DIR_NAME + "/")) {
-          Path destinationPath = containerData.getDbFile().toPath()
-              .resolve(name.substring(DB_DIR_NAME.length() + 1));
-          extractEntry(tarInput, entry.getSize(), destinationPath);
-        } else if (name.startsWith(CHUNKS_DIR_NAME + "/")) {
-          Path destinationPath = Paths.get(containerData.getChunksPath())
-              .resolve(name.substring(CHUNKS_DIR_NAME.length() + 1));
-          extractEntry(tarInput, entry.getSize(), destinationPath);
-        } else if (name.equals(CONTAINER_FILE_NAME)) {
-          //Don't do anything. Container file should be unpacked in a
-          //separated step by unpackContainerDescriptor call.
-          descriptorFileContent = readEntry(tarInput, entry);
-        } else {
-          throw new IllegalArgumentException(
-              "Unknown entry in the tar file: " + "" + name);
-        }
-        entry = tarInput.getNextTarEntry();
-      }
-      return descriptorFileContent;
-
-    } catch (CompressorException e) {
-      throw new IOException(
-          "Can't uncompress the given container: " + container
-              .getContainerData().getContainerID(),
-          e);
-    }
-  }
-
-  private void extractEntry(TarArchiveInputStream tarInput, long size,
-      Path path) throws IOException {
-    Preconditions.checkNotNull(path, "Path element should not be null");
-    Path parent = Preconditions.checkNotNull(path.getParent(),
-        "Path element should have a parent directory");
-    Files.createDirectories(parent);
-    try (BufferedOutputStream bos = new BufferedOutputStream(
-        new FileOutputStream(path.toAbsolutePath().toString()))) {
-      int bufferSize = 1024;
-      byte[] buffer = new byte[bufferSize + 1];
-      long remaining = size;
-      while (remaining > 0) {
-        int read =
-            tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize));
-        if (read >= 0) {
-          remaining -= read;
-          bos.write(buffer, 0, read);
-        } else {
-          remaining = 0;
-        }
-      }
-    }
-
-  }
-
-  /**
-   * Given a containerData include all the required container data/metadata
-   * in a tar file.
-   *
-   * @param container Container to archive (data + metadata).
-   * @param destination   Destination tar file/stream.
-   * @throws IOException
-   */
-  @Override
-  public void pack(Container<KeyValueContainerData> container,
-      OutputStream destination)
-      throws IOException {
-
-    KeyValueContainerData containerData = container.getContainerData();
-
-    try (CompressorOutputStream gzippedOut = new CompressorStreamFactory()
-          .createCompressorOutputStream(CompressorStreamFactory.GZIP,
-              destination)) {
-
-      try (ArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(
-          gzippedOut)) {
-
-        includePath(containerData.getDbFile().toString(), DB_DIR_NAME,
-            archiveOutputStream);
-
-        includePath(containerData.getChunksPath(), CHUNKS_DIR_NAME,
-            archiveOutputStream);
-
-        includeFile(container.getContainerFile(),
-            CONTAINER_FILE_NAME,
-            archiveOutputStream);
-      }
-    } catch (CompressorException e) {
-      throw new IOException(
-          "Can't compress the container: " + containerData.getContainerID(),
-          e);
-    }
-
-  }
-
-  @Override
-  public byte[] unpackContainerDescriptor(InputStream inputStream)
-      throws IOException {
-    try {
-      CompressorInputStream compressorInputStream =
-          new CompressorStreamFactory()
-              .createCompressorInputStream(CompressorStreamFactory.GZIP,
-                  inputStream);
-
-      TarArchiveInputStream tarInput =
-          new TarArchiveInputStream(compressorInputStream);
-
-      TarArchiveEntry entry = tarInput.getNextTarEntry();
-      while (entry != null) {
-        String name = entry.getName();
-        if (name.equals(CONTAINER_FILE_NAME)) {
-          return readEntry(tarInput, entry);
-        }
-        entry = tarInput.getNextTarEntry();
-      }
-
-    } catch (CompressorException e) {
-      throw new IOException(
-          "Can't read the container descriptor from the container archive",
-          e);
-    }
-    throw new IOException(
-        "Container descriptor is missing from the container archive.");
-  }
-
-  private byte[] readEntry(TarArchiveInputStream tarInput,
-      TarArchiveEntry entry) throws IOException {
-    ByteArrayOutputStream bos = new ByteArrayOutputStream();
-    int bufferSize = 1024;
-    byte[] buffer = new byte[bufferSize + 1];
-    long remaining = entry.getSize();
-    while (remaining > 0) {
-      int read =
-          tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize));
-      remaining -= read;
-      bos.write(buffer, 0, read);
-    }
-    return bos.toByteArray();
-  }
-
-  private void includePath(String containerPath, String subdir,
-      ArchiveOutputStream archiveOutputStream) throws IOException {
-
-    for (Path path : Files.list(Paths.get(containerPath))
-        .collect(Collectors.toList())) {
-
-      includeFile(path.toFile(), subdir + "/" + path.getFileName(),
-          archiveOutputStream);
-    }
-  }
-
-  private void includeFile(File file, String entryName,
-      ArchiveOutputStream archiveOutputStream) throws IOException {
-    ArchiveEntry archiveEntry =
-        archiveOutputStream.createArchiveEntry(file, entryName);
-    archiveOutputStream.putArchiveEntry(archiveEntry);
-    try (FileInputStream fis = new FileInputStream(file)) {
-      IOUtils.copy(fis, archiveOutputStream);
-    }
-    archiveOutputStream.closeArchiveEntry();
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
deleted file mode 100644
index da7c857..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    GetCommittedBlockLengthResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    PutBlockResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_BLOCK;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.UNABLE_TO_READ_METADATA_DB;
-
-/**
- * Utils functions to help block functions.
- */
-public final class BlockUtils {
-
-  /** Never constructed. **/
-  private BlockUtils() {
-
-  }
-  /**
-   * Get a DB handler for a given container.
-   * If the handler doesn't exist in cache yet, first create one and
-   * add into cache. This function is called with containerManager
-   * ReadLock held.
-   *
-   * @param containerData containerData.
-   * @param conf configuration.
-   * @return MetadataStore handle.
-   * @throws StorageContainerException
-   */
-  public static ReferenceCountedDB getDB(KeyValueContainerData containerData,
-                                    Configuration conf) throws
-      StorageContainerException {
-    Preconditions.checkNotNull(containerData);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    Preconditions.checkNotNull(containerData.getDbFile());
-    try {
-      return cache.getDB(containerData.getContainerID(), containerData
-          .getContainerDBType(), containerData.getDbFile().getAbsolutePath(),
-          conf);
-    } catch (IOException ex) {
-      String message = String.format("Error opening DB. Container:%s " +
-          "ContainerPath:%s", containerData.getContainerID(), containerData
-          .getDbFile().getPath());
-      throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
-    }
-  }
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param container - Container data.
-   * @param conf - Configuration.
-   */
-  public static void removeDB(KeyValueContainerData container, Configuration
-      conf) {
-    Preconditions.checkNotNull(container);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    cache.removeDB(container.getDbFile().getAbsolutePath());
-  }
-
-  /**
-   * Shutdown all DB Handles.
-   *
-   * @param cache - Cache for DB Handles.
-   */
-  @SuppressWarnings("unchecked")
-  public static void shutdownCache(ContainerCache cache)  {
-    cache.shutdownCache();
-  }
-
-  /**
-   * Parses the {@link BlockData} from a bytes array.
-   *
-   * @param bytes Block data in bytes.
-   * @return Block data.
-   * @throws IOException if the bytes array is malformed or invalid.
-   */
-  public static BlockData getBlockData(byte[] bytes) throws IOException {
-    try {
-      ContainerProtos.BlockData blockData = ContainerProtos.BlockData.parseFrom(
-          bytes);
-      BlockData data = BlockData.getFromProtoBuf(blockData);
-      return data;
-    } catch (IOException e) {
-      throw new StorageContainerException("Failed to parse block data from " +
-          "the bytes array.", NO_SUCH_BLOCK);
-    }
-  }
-
-  /**
-   * Returns putBlock response success.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto putBlockResponseSuccess(
-      ContainerCommandRequestProto msg, BlockData blockData) {
-    ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage();
-    GetCommittedBlockLengthResponseProto.Builder
-        committedBlockLengthResponseBuilder =
-        getCommittedBlockLengthResponseBuilder(blockData.getSize(),
-            blockDataProto.getBlockID());
-    PutBlockResponseProto.Builder putKeyResponse =
-        PutBlockResponseProto.newBuilder();
-    putKeyResponse
-        .setCommittedBlockLength(committedBlockLengthResponseBuilder);
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setPutBlock(putKeyResponse);
-    return builder.build();
-  }
-  /**
-   * Returns successful blockResponse.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getBlockResponseSuccess(
-      ContainerCommandRequestProto msg) {
-    return ContainerUtils.getSuccessResponse(msg);
-  }
-
-
-  public static ContainerCommandResponseProto getBlockDataResponse(
-      ContainerCommandRequestProto msg, BlockData data) {
-    GetBlockResponseProto.Builder getBlock = ContainerProtos
-        .GetBlockResponseProto
-        .newBuilder();
-    getBlock.setBlockData(data.getProtoBufMessage());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setGetBlock(getBlock);
-    return  builder.build();
-  }
-
-  /**
-   * Returns successful getCommittedBlockLength Response.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getBlockLengthResponse(
-          ContainerCommandRequestProto msg, long blockLength) {
-    GetCommittedBlockLengthResponseProto.Builder
-        committedBlockLengthResponseBuilder =
-        getCommittedBlockLengthResponseBuilder(blockLength,
-            msg.getGetCommittedBlockLength().getBlockID());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
-    return builder.build();
-  }
-
-  public static GetCommittedBlockLengthResponseProto.Builder
-          getCommittedBlockLengthResponseBuilder(long blockLength,
-      ContainerProtos.DatanodeBlockID blockID) {
-    ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
-        getCommittedBlockLengthResponseBuilder = ContainerProtos.
-        GetCommittedBlockLengthResponseProto.newBuilder();
-    getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
-    getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
-    return getCommittedBlockLengthResponseBuilder;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
deleted file mode 100644
index 8ca59b5..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.util.Time;
-import org.apache.ratis.util.function.CheckedSupplier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.FileLock;
-import java.nio.file.Path;
-import java.nio.file.StandardOpenOption;
-import java.security.NoSuchAlgorithmException;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
-
-/**
- * Utility methods for chunk operations for KeyValue container.
- */
-public final class ChunkUtils {
-
-  private static final Set<Path> LOCKS = ConcurrentHashMap.newKeySet();
-
-  /** Never constructed. **/
-  private ChunkUtils() {
-
-  }
-
-  /**
-   * Writes the data in chunk Info to the specified location in the chunkfile.
-   *
-   * @param chunkFile - File to write data to.
-   * @param chunkInfo - Data stream to write.
-   * @param data - The data buffer.
-   * @param volumeIOStats statistics collector
-   * @param sync whether to do fsync or not
-   */
-  public static void writeData(File chunkFile, ChunkInfo chunkInfo,
-      ByteBuffer data, VolumeIOStats volumeIOStats, boolean sync)
-      throws StorageContainerException, ExecutionException,
-      InterruptedException, NoSuchAlgorithmException {
-    final int bufferSize = data.remaining();
-    Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
-    if (bufferSize != chunkInfo.getLen()) {
-      String err = String.format("data array does not match the length " +
-              "specified. DataLen: %d Byte Array: %d",
-          chunkInfo.getLen(), bufferSize);
-      log.error(err);
-      throw new StorageContainerException(err, INVALID_WRITE_SIZE);
-    }
-
-    Path path = chunkFile.toPath();
-    long startTime = Time.monotonicNow();
-    processFileExclusively(path, () -> {
-      FileChannel file = null;
-      try {
-        // skip SYNC and DSYNC to reduce contention on file.lock
-        file = FileChannel.open(path,
-            StandardOpenOption.CREATE,
-            StandardOpenOption.WRITE,
-            StandardOpenOption.SPARSE);
-
-        int size;
-        try (FileLock ignored = file.lock()) {
-          size = file.write(data, chunkInfo.getOffset());
-        }
-
-        // Increment volumeIO stats here.
-        volumeIOStats.incWriteTime(Time.monotonicNow() - startTime);
-        volumeIOStats.incWriteOpCount();
-        volumeIOStats.incWriteBytes(size);
-        if (size != bufferSize) {
-          log.error("Invalid write size found. Size:{}  Expected: {} ", size,
-              bufferSize);
-          throw new StorageContainerException("Invalid write size found. " +
-              "Size: " + size + " Expected: " + bufferSize, INVALID_WRITE_SIZE);
-        }
-      } catch (StorageContainerException ex) {
-        throw ex;
-      } catch (IOException e) {
-        throw new StorageContainerException(e, IO_EXCEPTION);
-      } finally {
-        closeFile(file, sync);
-      }
-
-      return null;
-    });
-
-    if (log.isDebugEnabled()) {
-      log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile,
-          bufferSize);
-    }
-  }
-
-  /**
-   * Reads data from an existing chunk file.
-   *
-   * @param chunkFile - file where data lives.
-   * @param data - chunk definition.
-   * @param volumeIOStats statistics collector
-   * @return ByteBuffer
-   */
-  public static ByteBuffer readData(File chunkFile, ChunkInfo data,
-      VolumeIOStats volumeIOStats) throws StorageContainerException {
-    Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
-
-    if (!chunkFile.exists()) {
-      log.error("Unable to find the chunk file. chunk info : {}",
-          data.toString());
-      throw new StorageContainerException("Unable to find the chunk file. " +
-          "chunk info " +
-          data.toString(), UNABLE_TO_FIND_CHUNK);
-    }
-
-    long offset = data.getOffset();
-    long len = data.getLen();
-    ByteBuffer buf = ByteBuffer.allocate((int) len);
-
-    Path path = chunkFile.toPath();
-    long startTime = Time.monotonicNow();
-    return processFileExclusively(path, () -> {
-      FileChannel file = null;
-
-      try {
-        file = FileChannel.open(path, StandardOpenOption.READ);
-
-        try (FileLock ignored = file.lock(offset, len, true)) {
-          file.read(buf, offset);
-          buf.flip();
-        }
-
-        // Increment volumeIO stats here.
-        volumeIOStats.incReadTime(Time.monotonicNow() - startTime);
-        volumeIOStats.incReadOpCount();
-        volumeIOStats.incReadBytes(len);
-
-        return buf;
-      } catch (IOException e) {
-        throw new StorageContainerException(e, IO_EXCEPTION);
-      } finally {
-        if (file != null) {
-          IOUtils.closeStream(file);
-        }
-      }
-    });
-  }
-
-  /**
-   * Validates chunk data and returns a file object to Chunk File that we are
-   * expected to write data to.
-   *
-   * @param chunkFile - chunkFile to write data into.
-   * @param info - chunk info.
-   * @return true if the chunkFile exists and chunkOffset &lt; chunkFile length,
-   *         false otherwise.
-   */
-  public static boolean validateChunkForOverwrite(File chunkFile,
-      ChunkInfo info) {
-
-    Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
-
-    if (isOverWriteRequested(chunkFile, info)) {
-      if (!isOverWritePermitted(info)) {
-        log.warn("Duplicate write chunk request. Chunk overwrite " +
-            "without explicit request. {}", info.toString());
-      }
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Validates that Path to chunk file exists.
-   *
-   * @param containerData - Container Data
-   * @param info - Chunk info
-   * @return - File.
-   * @throws StorageContainerException
-   */
-  public static File getChunkFile(KeyValueContainerData containerData,
-                                  ChunkInfo info) throws
-      StorageContainerException {
-
-    Preconditions.checkNotNull(containerData, "Container data can't be null");
-    Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
-
-    String chunksPath = containerData.getChunksPath();
-    if (chunksPath == null) {
-      log.error("Chunks path is null in the container data");
-      throw new StorageContainerException("Unable to get Chunks directory.",
-          UNABLE_TO_FIND_DATA_DIR);
-    }
-    File chunksLoc = new File(chunksPath);
-    if (!chunksLoc.exists()) {
-      log.error("Chunks path does not exist");
-      throw new StorageContainerException("Unable to get Chunks directory.",
-          UNABLE_TO_FIND_DATA_DIR);
-    }
-
-    return chunksLoc.toPath().resolve(info.getChunkName()).toFile();
-  }
-
-  /**
-   * Checks if we are getting a request to overwrite an existing range of
-   * chunk.
-   *
-   * @param chunkFile - File
-   * @param chunkInfo - Buffer to write
-   * @return bool
-   */
-  public static boolean isOverWriteRequested(File chunkFile, ChunkInfo
-      chunkInfo) {
-
-    if (!chunkFile.exists()) {
-      return false;
-    }
-
-    long offset = chunkInfo.getOffset();
-    return offset < chunkFile.length();
-  }
-
-  /**
-   * Overwrite is permitted if an only if the user explicitly asks for it. We
-   * permit this iff the key/value pair contains a flag called
-   * [OverWriteRequested, true].
-   *
-   * @param chunkInfo - Chunk info
-   * @return true if the user asks for it.
-   */
-  public static boolean isOverWritePermitted(ChunkInfo chunkInfo) {
-    String overWrite = chunkInfo.getMetadata().get(OzoneConsts.CHUNK_OVERWRITE);
-    return (overWrite != null) &&
-        (!overWrite.isEmpty()) &&
-        (Boolean.valueOf(overWrite));
-  }
-
-  /**
-   * Returns a CreateContainer Response. This call is used by create and delete
-   * containers which have null success responses.
-   *
-   * @param msg Request
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getChunkResponseSuccess(
-      ContainerCommandRequestProto msg) {
-    return ContainerUtils.getSuccessResponse(msg);
-  }
-
-  @VisibleForTesting
-  static <T, E extends Exception> T processFileExclusively(
-      Path path, CheckedSupplier<T, E> op
-  ) throws E {
-    for (;;) {
-      if (LOCKS.add(path)) {
-        break;
-      }
-    }
-
-    try {
-      return op.get();
-    } finally {
-      LOCKS.remove(path);
-    }
-  }
-
-  private static void closeFile(FileChannel file, boolean sync)
-      throws StorageContainerException {
-    if (file != null) {
-      try {
-        if (sync) {
-          // ensure data and metadata is persisted
-          file.force(true);
-        }
-        file.close();
-      } catch (IOException e) {
-        throw new StorageContainerException("Error closing chunk file",
-            e, CONTAINER_INTERNAL_ERROR);
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
deleted file mode 100644
index 0c7a04e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
-
-import java.io.File;
-
-/**
- * Class which provides utility methods for container locations.
- */
-public final class KeyValueContainerLocationUtil {
-
-  /* Never constructed. */
-  private KeyValueContainerLocationUtil() {
-
-  }
-  /**
-   * Returns Container Metadata Location.
-   * @param hddsVolumeDir base dir of the hdds volume where scm directories
-   *                      are stored
-   * @param scmId
-   * @param containerId
-   * @return containerMetadata Path to container metadata location where
-   * .container file will be stored.
-   */
-  public static File getContainerMetaDataPath(String hddsVolumeDir,
-                                              String scmId,
-                                              long containerId) {
-    String containerMetaDataPath =
-        getBaseContainerLocation(hddsVolumeDir, scmId,
-            containerId);
-    containerMetaDataPath = containerMetaDataPath + File.separator +
-        OzoneConsts.CONTAINER_META_PATH;
-    return new File(containerMetaDataPath);
-  }
-
-
-  /**
-   * Returns Container Chunks Location.
-   * @param baseDir
-   * @param scmId
-   * @param containerId
-   * @return chunksPath
-   */
-  public static File getChunksLocationPath(String baseDir, String scmId,
-                                           long containerId) {
-    String chunksPath = getBaseContainerLocation(baseDir, scmId, containerId)
-        + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS;
-    return new File(chunksPath);
-  }
-
-  /**
-   * Returns base directory for specified container.
-   * @param hddsVolumeDir
-   * @param scmId
-   * @param containerId
-   * @return base directory for container.
-   */
-  private static String getBaseContainerLocation(String hddsVolumeDir,
-                                                 String scmId,
-                                                 long containerId) {
-    Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
-    Preconditions.checkNotNull(scmId, "scmUuid cannot be null");
-    Preconditions.checkState(containerId >= 0,
-        "Container Id cannot be negative.");
-
-    String containerSubDirectory = getContainerSubDirectory(containerId);
-
-    String containerMetaDataPath = hddsVolumeDir  + File.separator + scmId +
-        File.separator + Storage.STORAGE_DIR_CURRENT + File.separator +
-        containerSubDirectory + File.separator + containerId;
-
-    return containerMetaDataPath;
-  }
-
-  /**
-   * Returns subdirectory, where this container needs to be placed.
-   * @param containerId
-   * @return container sub directory
-   */
-  private static String getContainerSubDirectory(long containerId){
-    int directory = (int) ((containerId >> 9) & 0xFF);
-    return Storage.CONTAINER_DIR + directory;
-  }
-
-  /**
-   * Return containerDB File.
-   */
-  public static File getContainerDBFile(File containerMetaDataPath,
-      long containerID) {
-    return new File(containerMetaDataPath, containerID + OzoneConsts
-        .DN_CONTAINER_DB);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
deleted file mode 100644
index 3733b06..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class which defines utility methods for KeyValueContainer.
- */
-
-public final class KeyValueContainerUtil {
-
-  /* Never constructed. */
-  private KeyValueContainerUtil() {
-
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      KeyValueContainerUtil.class);
-
-  /**
-   * creates metadata path, chunks path and  metadata DB for the specified
-   * container.
-   *
-   * @param containerMetaDataPath
-   * @throws IOException
-   */
-  public static void createContainerMetaData(File containerMetaDataPath, File
-      chunksPath, File dbFile, Configuration conf) throws IOException {
-    Preconditions.checkNotNull(containerMetaDataPath);
-    Preconditions.checkNotNull(conf);
-
-    if (!containerMetaDataPath.mkdirs()) {
-      LOG.error("Unable to create directory for metadata storage. Path: {}",
-          containerMetaDataPath);
-      throw new IOException("Unable to create directory for metadata storage." +
-          " Path: " + containerMetaDataPath);
-    }
-    MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf)
-        .setCreateIfMissing(true).setDbFile(dbFile).build();
-
-    // we close since the SCM pre-creates containers.
-    // we will open and put Db handle into a cache when keys are being created
-    // in a container.
-
-    store.close();
-
-    if (!chunksPath.mkdirs()) {
-      LOG.error("Unable to create chunks directory Container {}",
-          chunksPath);
-      //clean up container metadata path and metadata db
-      FileUtils.deleteDirectory(containerMetaDataPath);
-      FileUtils.deleteDirectory(containerMetaDataPath.getParentFile());
-      throw new IOException("Unable to create directory for data storage." +
-          " Path: " + chunksPath);
-    }
-  }
-
-  /**
-   * remove Container if it is empty.
-   * <p>
-   * There are three things we need to delete.
-   * <p>
-   * 1. Container file and metadata file. 2. The Level DB file 3. The path that
-   * we created on the data location.
-   *
-   * @param containerData - Data of the container to remove.
-   * @param conf - configuration of the cluster.
-   * @throws IOException
-   */
-  public static void removeContainer(KeyValueContainerData containerData,
-                                     Configuration conf)
-      throws IOException {
-    Preconditions.checkNotNull(containerData);
-    File containerMetaDataPath = new File(containerData
-        .getMetadataPath());
-    File chunksPath = new File(containerData.getChunksPath());
-
-    // Close the DB connection and remove the DB handler from cache
-    BlockUtils.removeDB(containerData, conf);
-
-    // Delete the Container MetaData path.
-    FileUtils.deleteDirectory(containerMetaDataPath);
-
-    //Delete the Container Chunks Path.
-    FileUtils.deleteDirectory(chunksPath);
-
-    //Delete Container directory
-    FileUtils.deleteDirectory(containerMetaDataPath.getParentFile());
-  }
-
-  /**
-   * Returns a ReadContainer Response.
-   *
-   * @param request Request
-   * @param containerData - data
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getReadContainerResponse(
-      ContainerCommandRequestProto request,
-      KeyValueContainerData containerData) {
-    Preconditions.checkNotNull(containerData);
-
-    ContainerProtos.ReadContainerResponseProto.Builder response =
-        ContainerProtos.ReadContainerResponseProto.newBuilder();
-    response.setContainerData(containerData.getProtoBufMessage());
-
-    ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(request);
-    builder.setReadContainer(response);
-    return builder.build();
-  }
-
-  /**
-   * Parse KeyValueContainerData and verify checksum.
-   * @param kvContainerData
-   * @param config
-   * @throws IOException
-   */
-  public static void parseKVContainerData(KeyValueContainerData kvContainerData,
-      Configuration config) throws IOException {
-
-    long containerID = kvContainerData.getContainerID();
-    File metadataPath = new File(kvContainerData.getMetadataPath());
-
-    // Verify Checksum
-    ContainerUtils.verifyChecksum(kvContainerData);
-
-    File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
-        metadataPath, containerID);
-    if (!dbFile.exists()) {
-      LOG.error("Container DB file is missing for ContainerID {}. " +
-          "Skipping loading of this container.", containerID);
-      // Don't further process this container, as it is missing db file.
-      return;
-    }
-    kvContainerData.setDbFile(dbFile);
-
-    try(ReferenceCountedDB metadata =
-            BlockUtils.getDB(kvContainerData, config)) {
-      long bytesUsed = 0;
-      List<Map.Entry<byte[], byte[]>> liveKeys = metadata.getStore()
-          .getRangeKVs(null, Integer.MAX_VALUE,
-              MetadataKeyFilters.getNormalKeyFilter());
-
-      bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
-        BlockData blockData;
-        try {
-          blockData = BlockUtils.getBlockData(e.getValue());
-          return blockData.getSize();
-        } catch (IOException ex) {
-          return 0L;
-        }
-      }).sum();
-      kvContainerData.setBytesUsed(bytesUsed);
-      kvContainerData.setKeyCount(liveKeys.size());
-      byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
-          OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
-      if (bcsId != null) {
-        kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
-      }
-    }
-  }
-
-  /**
-   * Returns the path where data or chunks live for a given container.
-   *
-   * @param kvContainerData - KeyValueContainerData
-   * @return - Path to the chunks directory
-   */
-  public static Path getDataDirectory(KeyValueContainerData kvContainerData) {
-
-    String chunksPath = kvContainerData.getChunksPath();
-    Preconditions.checkNotNull(chunksPath);
-
-    return Paths.get(chunksPath);
-  }
-
-  /**
-   * Container metadata directory -- here is where the level DB and
-   * .container file lives.
-   *
-   * @param kvContainerData - KeyValueContainerData
-   * @return Path to the metadata directory
-   */
-  public static Path getMetadataDirectory(
-      KeyValueContainerData kvContainerData) {
-
-    String metadataPath = kvContainerData.getMetadataPath();
-    Preconditions.checkNotNull(metadataPath);
-
-    return Paths.get(metadataPath);
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
deleted file mode 100644
index ba2b02c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-
-/**
- * File Utils are helper routines used by putSmallFile and getSmallFile
- * RPCs.
- */
-public final class SmallFileUtils {
-  /**
-   * Never Constructed.
-   */
-  private SmallFileUtils() {
-  }
-
-  /**
-   * Gets a response for the putSmallFile RPC.
-   * @param msg - ContainerCommandRequestProto
-   * @return - ContainerCommandResponseProto
-   */
-  public static ContainerCommandResponseProto getPutFileResponseSuccess(
-      ContainerCommandRequestProto msg, BlockData blockData) {
-    ContainerProtos.PutSmallFileResponseProto.Builder getResponse =
-        ContainerProtos.PutSmallFileResponseProto.newBuilder();
-    ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage();
-    ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
-        committedBlockLengthResponseBuilder = BlockUtils
-        .getCommittedBlockLengthResponseBuilder(blockDataProto.getSize(),
-            blockDataProto.getBlockID());
-    getResponse.setCommittedBlockLength(committedBlockLengthResponseBuilder);
-    ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setCmdType(ContainerProtos.Type.PutSmallFile);
-    builder.setPutSmallFile(getResponse);
-    return  builder.build();
-  }
-
-  /**
-   * Gets a response to the read small file call.
-   * @param msg - Msg
-   * @param data  - Data
-   * @param info  - Info
-   * @return    Response.
-   */
-  public static ContainerCommandResponseProto getGetSmallFileResponseSuccess(
-      ContainerCommandRequestProto msg, byte[] data, ChunkInfo info) {
-    Preconditions.checkNotNull(msg);
-
-    ContainerProtos.ReadChunkResponseProto.Builder readChunkresponse =
-        ContainerProtos.ReadChunkResponseProto.newBuilder();
-    readChunkresponse.setChunkData(info.getProtoBufMessage());
-    readChunkresponse.setData(ByteString.copyFrom(data));
-    readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID());
-
-    ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
-        ContainerProtos.GetSmallFileResponseProto.newBuilder();
-    getSmallFile.setData(readChunkresponse.build());
-    ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setCmdType(ContainerProtos.Type.GetSmallFile);
-    builder.setGetSmallFile(getSmallFile);
-    return builder.build();
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java
deleted file mode 100644
index 041f485..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-/**
- This package contains utility classes for KeyValue container type.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
deleted file mode 100644
index 4272861..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
-/**
- * This class is for performing block related operations on the KeyValue
- * Container.
- */
-public class BlockManagerImpl implements BlockManager {
-
-  static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
-  private static byte[] blockCommitSequenceIdKey =
-          DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
-
-  private Configuration config;
-
-  /**
-   * Constructs a Block Manager.
-   *
-   * @param conf - Ozone configuration
-   */
-  public BlockManagerImpl(Configuration conf) {
-    Preconditions.checkNotNull(conf, "Config cannot be null");
-    this.config = conf;
-  }
-
-  /**
-   * Puts or overwrites a block.
-   *
-   * @param container - Container for which block need to be added.
-   * @param data     - BlockData.
-   * @return length of the block.
-   * @throws IOException
-   */
-  public long putBlock(Container container, BlockData data) throws IOException {
-    Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
-        "operation.");
-    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
-        "cannot be negative");
-    // We are not locking the key manager since LevelDb serializes all actions
-    // against a single DB. We rely on DB level locking to avoid conflicts.
-    try(ReferenceCountedDB db = BlockUtils.
-        getDB((KeyValueContainerData) container.getContainerData(), config)) {
-      // This is a post condition that acts as a hint to the user.
-      // Should never fail.
-      Preconditions.checkNotNull(db, "DB cannot be null here");
-
-      long bcsId = data.getBlockCommitSequenceId();
-      long containerBCSId = ((KeyValueContainerData) container.
-          getContainerData()).getBlockCommitSequenceId();
-
-      // default blockCommitSequenceId for any block is 0. It the putBlock
-      // request is not coming via Ratis(for test scenarios), it will be 0.
-      // In such cases, we should overwrite the block as well
-      if (bcsId != 0) {
-        if (bcsId <= containerBCSId) {
-          // Since the blockCommitSequenceId stored in the db is greater than
-          // equal to blockCommitSequenceId to be updated, it means the putBlock
-          // transaction is reapplied in the ContainerStateMachine on restart.
-          // It also implies that the given block must already exist in the db.
-          // just log and return
-          LOG.warn("blockCommitSequenceId " + containerBCSId
-              + " in the Container Db is greater than" + " the supplied value "
-              + bcsId + " .Ignoring it");
-          return data.getSize();
-        }
-      }
-      // update the blockData as well as BlockCommitSequenceId here
-      BatchOperation batch = new BatchOperation();
-      batch.put(Longs.toByteArray(data.getLocalID()),
-          data.getProtoBufMessage().toByteArray());
-      batch.put(blockCommitSequenceIdKey,
-          Longs.toByteArray(bcsId));
-      db.getStore().writeBatch(batch);
-      container.updateBlockCommitSequenceId(bcsId);
-      // Increment keycount here
-      container.getContainerData().incrKeyCount();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Block " + data.getBlockID() + " successfully committed with bcsId "
-                + bcsId + " chunk size " + data.getChunks().size());
-      }
-      return data.getSize();
-    }
-  }
-
-  /**
-   * Gets an existing block.
-   *
-   * @param container - Container from which block need to be fetched.
-   * @param blockID - BlockID of the block.
-   * @return Key Data.
-   * @throws IOException
-   */
-  @Override
-  public BlockData getBlock(Container container, BlockID blockID)
-      throws IOException {
-    long bcsId = blockID.getBlockCommitSequenceId();
-    Preconditions.checkNotNull(blockID,
-        "BlockID cannot be null in GetBlock request");
-    Preconditions.checkNotNull(container,
-        "Container cannot be null");
-
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
-      // This is a post condition that acts as a hint to the user.
-      // Should never fail.
-      Preconditions.checkNotNull(db, "DB cannot be null here");
-
-      long containerBCSId = containerData.getBlockCommitSequenceId();
-      if (containerBCSId < bcsId) {
-        throw new StorageContainerException(
-            "Unable to find the block with bcsID " + bcsId + " .Container "
-                + container.getContainerData().getContainerID() + " bcsId is "
-                + containerBCSId + ".", UNKNOWN_BCSID);
-      }
-      byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
-      if (kData == null) {
-        throw new StorageContainerException("Unable to find the block." +
-            blockID, NO_SUCH_BLOCK);
-      }
-      ContainerProtos.BlockData blockData =
-          ContainerProtos.BlockData.parseFrom(kData);
-      long id = blockData.getBlockID().getBlockCommitSequenceId();
-      if (id < bcsId) {
-        throw new StorageContainerException(
-            "bcsId " + bcsId + " mismatches with existing block Id "
-                + id + " for block " + blockID + ".", BCSID_MISMATCH);
-      }
-      return BlockData.getFromProtoBuf(blockData);
-    }
-  }
-
-  /**
-   * Returns the length of the committed block.
-   *
-   * @param container - Container from which block need to be fetched.
-   * @param blockID - BlockID of the block.
-   * @return length of the block.
-   * @throws IOException in case, the block key does not exist in db.
-   */
-  @Override
-  public long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException {
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
-      // This is a post condition that acts as a hint to the user.
-      // Should never fail.
-      Preconditions.checkNotNull(db, "DB cannot be null here");
-      byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
-      if (kData == null) {
-        throw new StorageContainerException("Unable to find the block.",
-            NO_SUCH_BLOCK);
-      }
-      ContainerProtos.BlockData blockData =
-          ContainerProtos.BlockData.parseFrom(kData);
-      return blockData.getSize();
-    }
-  }
-
-  /**
-   * Deletes an existing block.
-   *
-   * @param container - Container from which block need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  public void deleteBlock(Container container, BlockID blockID) throws
-      IOException {
-    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
-    Preconditions.checkState(blockID.getContainerID() >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(blockID.getLocalID() >= 0,
-        "Local ID cannot be negative.");
-
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
-      // This is a post condition that acts as a hint to the user.
-      // Should never fail.
-      Preconditions.checkNotNull(db, "DB cannot be null here");
-      // Note : There is a race condition here, since get and delete
-      // are not atomic. Leaving it here since the impact is refusing
-      // to delete a Block which might have just gotten inserted after
-      // the get check.
-      byte[] kKey = Longs.toByteArray(blockID.getLocalID());
-
-      byte[] kData = db.getStore().get(kKey);
-      if (kData == null) {
-        throw new StorageContainerException("Unable to find the block.",
-            NO_SUCH_BLOCK);
-      }
-      db.getStore().delete(kKey);
-      // Decrement blockcount here
-      container.getContainerData().decrKeyCount();
-    }
-  }
-
-  /**
-   * List blocks in a container.
-   *
-   * @param container - Container from which blocks need to be listed.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of blocks to return.
-   * @return List of Blocks that match the criteria.
-   */
-  @Override
-  public List<BlockData> listBlock(Container container, long startLocalID, int
-      count) throws IOException {
-    Preconditions.checkNotNull(container, "container cannot be null");
-    Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
-        "negative");
-    Preconditions.checkArgument(count > 0,
-        "Count must be a positive number.");
-    container.readLock();
-    try {
-      List<BlockData> result = null;
-      KeyValueContainerData cData =
-          (KeyValueContainerData) container.getContainerData();
-      try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
-        result = new ArrayList<>();
-        byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
-        List<Map.Entry<byte[], byte[]>> range = db.getStore()
-            .getSequentialRangeKVs(startKeyInBytes, count,
-                MetadataKeyFilters.getNormalKeyFilter());
-        for (Map.Entry<byte[], byte[]> entry : range) {
-          BlockData value = BlockUtils.getBlockData(entry.getValue());
-          BlockData data = new BlockData(value.getBlockID());
-          result.add(data);
-        }
-        return result;
-      }
-    } finally {
-      container.readUnlock();
-    }
-  }
-
-  /**
-   * Shutdown KeyValueContainerManager.
-   */
-  public void shutdown() {
-    BlockUtils.shutdownCache(ContainerCache.getInstance(config));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
deleted file mode 100644
index fa9e205..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
-
-/**
- * Implementation of ChunkManager built for running performance tests.
- * Chunks are not written to disk, Reads are returned with zero-filled buffers
- */
-public class ChunkManagerDummyImpl extends ChunkManagerImpl {
-  static final Logger LOG = LoggerFactory.getLogger(
-      ChunkManagerDummyImpl.class);
-
-  public ChunkManagerDummyImpl(boolean sync) {
-    super(sync);
-  }
-
-  /**
-   * writes a given chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block
-   * @param info - ChunkInfo
-   * @param data - data of the chunk
-   * @param dispatcherContext - dispatcherContextInfo
-   * @throws StorageContainerException
-   */
-  @Override
-  public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
-      ByteBuffer data, DispatcherContext dispatcherContext)
-      throws StorageContainerException {
-    long writeTimeStart = Time.monotonicNow();
-
-    Preconditions.checkNotNull(dispatcherContext);
-    DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
-
-    Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
-
-    try {
-      KeyValueContainerData containerData =
-          (KeyValueContainerData) container.getContainerData();
-      HddsVolume volume = containerData.getVolume();
-      VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
-      int bufferSize;
-
-      switch (stage) {
-      case WRITE_DATA:
-        bufferSize = data.capacity();
-        if (bufferSize != info.getLen()) {
-          String err = String.format("data array does not match the length "
-                  + "specified. DataLen: %d Byte Array: %d",
-              info.getLen(), bufferSize);
-          log.error(err);
-          throw new StorageContainerException(err, INVALID_WRITE_SIZE);
-        }
-
-        // Increment volumeIO stats here.
-        volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart);
-        volumeIOStats.incWriteOpCount();
-        volumeIOStats.incWriteBytes(info.getLen());
-        break;
-      case COMMIT_DATA:
-        updateContainerWriteStats(container, info, false);
-        break;
-      case COMBINED:
-        updateContainerWriteStats(container, info, false);
-        break;
-      default:
-        throw new IOException("Can not identify write operation.");
-      }
-    } catch (IOException ex) {
-      LOG.error("write data failed. error: {}", ex);
-      throw new StorageContainerException("Internal error: ", ex,
-          CONTAINER_INTERNAL_ERROR);
-    }
-  }
-
-  /**
-   * return a zero-filled buffer.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @param dispatcherContext dispatcher context info.
-   * @return byte array
-   * TODO: Right now we do not support partial reads and writes of chunks.
-   * TODO: Explore if we need to do that for ozone.
-   */
-  @Override
-  public ByteBuffer readChunk(Container container, BlockID blockID,
-      ChunkInfo info, DispatcherContext dispatcherContext) {
-
-    long readStartTime = Time.monotonicNow();
-
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    ByteBuffer data;
-    HddsVolume volume = containerData.getVolume();
-    VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
-
-    data = ByteBuffer.allocate((int) info.getLen());
-
-    // Increment volumeIO stats here.
-    volumeIOStats.incReadTime(Time.monotonicNow() - readStartTime);
-    volumeIOStats.incReadOpCount();
-    volumeIOStats.incReadBytes(info.getLen());
-
-    return data;
-  }
-
-  /**
-   * Delete a given chunk - Do nothing except stats.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block
-   * @param info - Chunk Info
-   */
-  @Override
-  public void deleteChunk(Container container, BlockID blockID,
-      ChunkInfo info) {
-    Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
-    KeyValueContainerData containerData =
-        (KeyValueContainerData) container.getContainerData();
-
-    if (info.getOffset() == 0) {
-      containerData.decrBytesUsed(info.getLen());
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
deleted file mode 100644
index 8549578..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT;
-
-/**
- * Select an appropriate ChunkManager implementation as per config setting.
- * Ozone ChunkManager is a Singleton
- */
-public final class ChunkManagerFactory {
-  static final Logger LOG = LoggerFactory.getLogger(ChunkManagerFactory.class);
-
-  private static volatile ChunkManager instance = null;
-  private static boolean syncChunks = false;
-
-  private ChunkManagerFactory() {
-  }
-
-  public static ChunkManager getChunkManager(Configuration config,
-      boolean sync) {
-    if (instance == null) {
-      synchronized (ChunkManagerFactory.class) {
-        if (instance == null) {
-          instance = createChunkManager(config, sync);
-          syncChunks = sync;
-        }
-      }
-    }
-
-    Preconditions.checkArgument((syncChunks == sync),
-        "value of sync conflicts with previous invocation");
-    return instance;
-  }
-
-  private static ChunkManager createChunkManager(Configuration config,
-      boolean sync) {
-    ChunkManager manager = null;
-    boolean persist = config.getBoolean(HDDS_CONTAINER_PERSISTDATA,
-        HDDS_CONTAINER_PERSISTDATA_DEFAULT);
-
-    if (!persist) {
-      boolean scrubber = config.getBoolean(
-          HDDS_CONTAINER_SCRUB_ENABLED,
-          HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT);
-      if (scrubber) {
-        // Data Scrubber needs to be disabled for non-persistent chunks.
-        LOG.warn("Failed to set " + HDDS_CONTAINER_PERSISTDATA + " to false."
-            + " Please set " + HDDS_CONTAINER_SCRUB_ENABLED
-            + " also to false to enable non-persistent containers.");
-        persist = true;
-      }
-    }
-
-    if (persist) {
-      manager = new ChunkManagerImpl(sync);
-    } else {
-      LOG.warn(HDDS_CONTAINER_PERSISTDATA
-          + " is set to false. This should be used only for testing."
-          + " All user data will be discarded.");
-      manager = new ChunkManagerDummyImpl(sync);
-    }
-
-    return manager;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
deleted file mode 100644
index e22841e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.security.NoSuchAlgorithmException;
-import java.util.concurrent.ExecutionException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
-
-/**
- * This class is for performing chunk related operations.
- */
-public class ChunkManagerImpl implements ChunkManager {
-  static final Logger LOG = LoggerFactory.getLogger(ChunkManagerImpl.class);
-  private final boolean doSyncWrite;
-
-  public ChunkManagerImpl(boolean sync) {
-    doSyncWrite = sync;
-  }
-
-  /**
-   * writes a given chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block
-   * @param info - ChunkInfo
-   * @param data - data of the chunk
-   * @param dispatcherContext - dispatcherContextInfo
-   * @throws StorageContainerException
-   */
-  public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
-      ByteBuffer data, DispatcherContext dispatcherContext)
-      throws StorageContainerException {
-    Preconditions.checkNotNull(dispatcherContext);
-    DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
-    try {
-
-      KeyValueContainerData containerData = (KeyValueContainerData) container
-          .getContainerData();
-      HddsVolume volume = containerData.getVolume();
-      VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
-
-      File chunkFile = ChunkUtils.getChunkFile(containerData, info);
-
-      boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(
-          chunkFile, info);
-      File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}",
-            info.getChunkName(), stage, chunkFile, tmpChunkFile);
-      }
-
-      switch (stage) {
-      case WRITE_DATA:
-        if (isOverwrite) {
-          // if the actual chunk file already exists here while writing the temp
-          // chunk file, then it means the same ozone client request has
-          // generated two raft log entries. This can happen either because
-          // retryCache expired in Ratis (or log index mismatch/corruption in
-          // Ratis). This can be solved by two approaches as of now:
-          // 1. Read the complete data in the actual chunk file ,
-          //    verify the data integrity and in case it mismatches , either
-          // 2. Delete the chunk File and write the chunk again. For now,
-          //    let's rewrite the chunk file
-          // TODO: once the checksum support for write chunks gets plugged in,
-          // the checksum needs to be verified for the actual chunk file and
-          // the data to be written here which should be efficient and
-          // it matches we can safely return without rewriting.
-          LOG.warn("ChunkFile already exists" + chunkFile + ".Deleting it.");
-          FileUtil.fullyDelete(chunkFile);
-        }
-        if (tmpChunkFile.exists()) {
-          // If the tmp chunk file already exists it means the raft log got
-          // appended, but later on the log entry got truncated in Ratis leaving
-          // behind garbage.
-          // TODO: once the checksum support for data chunks gets plugged in,
-          // instead of rewriting the chunk here, let's compare the checkSums
-          LOG.warn(
-              "tmpChunkFile already exists" + tmpChunkFile + "Overwriting it.");
-        }
-        // Initially writes to temporary chunk file.
-        ChunkUtils
-            .writeData(tmpChunkFile, info, data, volumeIOStats, doSyncWrite);
-        // No need to increment container stats here, as still data is not
-        // committed here.
-        break;
-      case COMMIT_DATA:
-        // commit the data, means move chunk data from temporary chunk file
-        // to actual chunk file.
-        if (isOverwrite) {
-          // if the actual chunk file already exists , it implies the write
-          // chunk transaction in the containerStateMachine is getting
-          // reapplied. This can happen when a node restarts.
-          // TODO: verify the checkSums for the existing chunkFile and the
-          // chunkInfo to be committed here
-          LOG.warn("ChunkFile already exists" + chunkFile);
-          return;
-        }
-        // While committing a chunk , just rename the tmp chunk file which has
-        // the same term and log index appended as the current transaction
-        commitChunk(tmpChunkFile, chunkFile);
-        // Increment container stats here, as we commit the data.
-        updateContainerWriteStats(container, info, isOverwrite);
-        break;
-      case COMBINED:
-        // directly write to the chunk file
-        ChunkUtils.writeData(chunkFile, info, data, volumeIOStats, doSyncWrite);
-        updateContainerWriteStats(container, info, isOverwrite);
-        break;
-      default:
-        throw new IOException("Can not identify write operation.");
-      }
-    } catch (StorageContainerException ex) {
-      throw ex;
-    } catch (NoSuchAlgorithmException ex) {
-      LOG.error("write data failed. error: {}", ex);
-      throw new StorageContainerException("Internal error: ", ex,
-          NO_SUCH_ALGORITHM);
-    } catch (ExecutionException  | IOException ex) {
-      LOG.error("write data failed. error: {}", ex);
-      throw new StorageContainerException("Internal error: ", ex,
-          CONTAINER_INTERNAL_ERROR);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.error("write data failed. error: {}", e);
-      throw new StorageContainerException("Internal error: ", e,
-          CONTAINER_INTERNAL_ERROR);
-    }
-  }
-
-  protected void updateContainerWriteStats(Container container, ChunkInfo info,
-      boolean isOverwrite) {
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-
-    if (!isOverwrite) {
-      containerData.incrBytesUsed(info.getLen());
-    }
-    containerData.incrWriteCount();
-    containerData.incrWriteBytes(info.getLen());
-  }
-
-  /**
-   * reads the data defined by a chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @param dispatcherContext dispatcher context info.
-   * @return byte array
-   * @throws StorageContainerException
-   * TODO: Right now we do not support partial reads and writes of chunks.
-   * TODO: Explore if we need to do that for ozone.
-   */
-  public ByteBuffer readChunk(Container container, BlockID blockID,
-      ChunkInfo info, DispatcherContext dispatcherContext)
-      throws StorageContainerException {
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    ByteBuffer data;
-    HddsVolume volume = containerData.getVolume();
-    VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
-
-    // Checking here, which layout version the container is, and reading
-    // the chunk file in that format.
-    // In version1, we verify checksum if it is available and return data
-    // of the chunk file.
-    if (containerData.getLayOutVersion() == ChunkLayOutVersion
-        .getLatestVersion().getVersion()) {
-      File chunkFile = ChunkUtils.getChunkFile(containerData, info);
-
-      // In case the chunk file does not exist but tmp chunk file exist,
-      // read from tmp chunk file if readFromTmpFile is set to true
-      if (!chunkFile.exists() && dispatcherContext != null
-          && dispatcherContext.isReadFromTmpFile()) {
-        chunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
-      }
-      data = ChunkUtils.readData(chunkFile, info, volumeIOStats);
-      containerData.incrReadCount();
-      long length = chunkFile.length();
-      containerData.incrReadBytes(length);
-      return data;
-    }
-    return null;
-  }
-
-  /**
-   * Deletes a given chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block
-   * @param info - Chunk Info
-   * @throws StorageContainerException
-   */
-  public void deleteChunk(Container container, BlockID blockID, ChunkInfo info)
-      throws StorageContainerException {
-    Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    // Checking here, which layout version the container is, and performing
-    // deleting chunk operation.
-    // In version1, we have only chunk file.
-    if (containerData.getLayOutVersion() == ChunkLayOutVersion
-        .getLatestVersion().getVersion()) {
-      File chunkFile = ChunkUtils.getChunkFile(containerData, info);
-
-      // if the chunk file does not exist, it might have already been deleted.
-      // The call might be because of reapply of transactions on datanode
-      // restart.
-      if (!chunkFile.exists()) {
-        LOG.warn("Chunk file doe not exist. chunk info :" + info.toString());
-        return;
-      }
-      if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) {
-        FileUtil.fullyDelete(chunkFile);
-        containerData.decrBytesUsed(chunkFile.length());
-      } else {
-        LOG.error("Not Supported Operation. Trying to delete a " +
-            "chunk that is in shared file. chunk info : " + info.toString());
-        throw new StorageContainerException("Not Supported Operation. " +
-            "Trying to delete a chunk that is in shared file. chunk info : "
-            + info.toString(), UNSUPPORTED_REQUEST);
-      }
-    }
-  }
-
-  /**
-   * Shutdown the chunkManager.
-   *
-   * In the chunkManager we haven't acquired any resources, so nothing to do
-   * here.
-   */
-
-  public void shutdown() {
-    //TODO: need to revisit this during integration of container IO.
-  }
-
-  /**
-   * Returns the temporary chunkFile path.
-   * @param chunkFile chunkFileName
-   * @param dispatcherContext dispatcher context info
-   * @return temporary chunkFile path
-   * @throws StorageContainerException
-   */
-  private File getTmpChunkFile(File chunkFile,
-      DispatcherContext dispatcherContext)  {
-    return new File(chunkFile.getParent(),
-        chunkFile.getName() +
-            OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
-            OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX +
-            OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
-            dispatcherContext.getTerm() +
-            OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
-            dispatcherContext.getLogIndex());
-  }
-
-  /**
-   * Commit the chunk by renaming the temporary chunk file to chunk file.
-   * @param tmpChunkFile
-   * @param chunkFile
-   * @throws IOException
-   */
-  private void commitChunk(File tmpChunkFile, File chunkFile) throws
-      IOException {
-    Files.move(tmpChunkFile.toPath(), chunkFile.toPath(),
-        StandardCopyOption.REPLACE_EXISTING);
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
deleted file mode 100644
index 564b50e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-/**
- * Chunk manager and block manager implementations for keyvalue container type.
- */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
deleted file mode 100644
index 6812b0d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * BlockManager is for performing key related operations on the container.
- */
-public interface BlockManager {
-
-  /**
-   * Puts or overwrites a block.
-   *
-   * @param container - Container for which block need to be added.
-   * @param data     - Block Data.
-   * @return length of the Block.
-   * @throws IOException
-   */
-  long putBlock(Container container, BlockData data) throws IOException;
-
-  /**
-   * Gets an existing block.
-   *
-   * @param container - Container from which block need to be get.
-   * @param blockID - BlockID of the Block.
-   * @return Block Data.
-   * @throws IOException
-   */
-  BlockData getBlock(Container container, BlockID blockID)
-      throws IOException;
-
-  /**
-   * Deletes an existing block.
-   *
-   * @param container - Container from which block need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  void deleteBlock(Container container, BlockID blockID) throws IOException;
-
-  /**
-   * List blocks in a container.
-   *
-   * @param container - Container from which blocks need to be listed.
-   * @param startLocalID  - Block to start from, 0 to begin.
-   * @param count    - Number of blocks to return.
-   * @return List of Blocks that match the criteria.
-   */
-  List<BlockData> listBlock(Container container, long startLocalID, int count)
-      throws IOException;
-
-  /**
-   * Returns the last committed block length for the block.
-   * @param blockID blockId
-   */
-  long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException;
-
-  /**
-   * Shutdown ContainerManager.
-   */
-  void shutdown();
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
deleted file mode 100644
index 5adb641..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
+++ /dev/null
@@ -1,83 +0,0 @@
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-
-import java.nio.ByteBuffer;
-
-/**
- * Chunk Manager allows read, write, delete and listing of chunks in
- * a container.
- */
-
-public interface ChunkManager {
-
-  /**
-   * writes a given chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @param dispatcherContext - dispatcher context info.
-   * @throws StorageContainerException
-   */
-  void writeChunk(Container container, BlockID blockID, ChunkInfo info,
-      ByteBuffer data, DispatcherContext dispatcherContext)
-      throws StorageContainerException;
-
-  /**
-   * reads the data defined by a chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @param dispatcherContext - dispatcher context info.
-   * @return  byte array
-   * @throws StorageContainerException
-   *
-   * TODO: Right now we do not support partial reads and writes of chunks.
-   * TODO: Explore if we need to do that for ozone.
-   */
-  ByteBuffer readChunk(Container container, BlockID blockID, ChunkInfo info,
-      DispatcherContext dispatcherContext) throws StorageContainerException;
-
-  /**
-   * Deletes a given chunk.
-   *
-   * @param container - Container for the chunk
-   * @param blockID - ID of the block.
-   * @param info  - Chunk Info
-   * @throws StorageContainerException
-   */
-  void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws
-      StorageContainerException;
-
-  // TODO : Support list operations.
-
-  /**
-   * Shutdown the chunkManager.
-   */
-  void shutdown();
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
deleted file mode 100644
index 5129094..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-/**
- * Chunk manager and block manager interfaces for keyvalue container type.
- */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
deleted file mode 100644
index 53c9f1e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue;
-/**
- This package contains classes for KeyValue container type.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
deleted file mode 100644
index bc3f51a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ /dev/null
@@ -1,332 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.BackgroundTask;
-import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT;
-
-/**
- * A per-datanode container block deleting service takes in charge
- * of deleting staled ozone blocks.
- */
-// TODO: Fix BlockDeletingService to work with new StorageLayer
-public class BlockDeletingService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BlockDeletingService.class);
-
-  private OzoneContainer ozoneContainer;
-  private ContainerDeletionChoosingPolicy containerDeletionPolicy;
-  private final Configuration conf;
-
-  // Throttle number of blocks to delete per task,
-  // set to 1 for testing
-  private final int blockLimitPerTask;
-
-  // Throttle the number of containers to process concurrently at a time,
-  private final int containerLimitPerInterval;
-
-  // Task priority is useful when a to-delete block has weight.
-  private final static int TASK_PRIORITY_DEFAULT = 1;
-  // Core pool size for container tasks
-  private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
-
-  public BlockDeletingService(OzoneContainer ozoneContainer,
-      long serviceInterval, long serviceTimeout, TimeUnit timeUnit,
-      Configuration conf) {
-    super("BlockDeletingService", serviceInterval, timeUnit,
-        BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
-    this.ozoneContainer = ozoneContainer;
-    containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        TopNOrderedContainerDeletionChoosingPolicy.class,
-        ContainerDeletionChoosingPolicy.class), conf);
-    this.conf = conf;
-    this.blockLimitPerTask =
-        conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
-            OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT);
-    this.containerLimitPerInterval =
-        conf.getInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
-            OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
-  }
-
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    List<ContainerData> containers = Lists.newArrayList();
-    try {
-      // We at most list a number of containers a time,
-      // in case there are too many containers and start too many workers.
-      // We must ensure there is no empty container in this result.
-      // The chosen result depends on what container deletion policy is
-      // configured.
-      containers = chooseContainerForBlockDeletion(containerLimitPerInterval,
-              containerDeletionPolicy);
-      if (containers.size() > 0) {
-        LOG.info("Plan to choose {} containers for block deletion, "
-                + "actually returns {} valid containers.",
-            containerLimitPerInterval, containers.size());
-      }
-
-      for(ContainerData container : containers) {
-        BlockDeletingTask containerTask =
-            new BlockDeletingTask(container, TASK_PRIORITY_DEFAULT);
-        queue.add(containerTask);
-      }
-    } catch (StorageContainerException e) {
-      LOG.warn("Failed to initiate block deleting tasks, "
-          + "caused by unable to get containers info. "
-          + "Retry in next interval. ", e);
-    } catch (Exception e) {
-      // In case listContainer call throws any uncaught RuntimeException.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unexpected error occurs during deleting blocks.", e);
-      }
-    }
-    return queue;
-  }
-
-  public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      ContainerDeletionChoosingPolicy deletionPolicy)
-      throws StorageContainerException {
-    Map<Long, ContainerData> containerDataMap =
-        ozoneContainer.getContainerSet().getContainerMap().entrySet().stream()
-            .filter(e -> isDeletionAllowed(e.getValue().getContainerData(),
-                deletionPolicy)).collect(Collectors
-            .toMap(Map.Entry::getKey, e -> e.getValue().getContainerData()));
-    return deletionPolicy
-        .chooseContainerForBlockDeletion(count, containerDataMap);
-  }
-
-  private boolean isDeletionAllowed(ContainerData containerData,
-      ContainerDeletionChoosingPolicy deletionPolicy) {
-    if (!deletionPolicy
-        .isValidContainerType(containerData.getContainerType())) {
-      return false;
-    } else if (!containerData.isClosed()) {
-      return false;
-    } else {
-      if (ozoneContainer.getWriteChannel() instanceof XceiverServerRatis) {
-        XceiverServerRatis ratisServer =
-            (XceiverServerRatis) ozoneContainer.getWriteChannel();
-        PipelineID pipelineID = PipelineID
-            .valueOf(UUID.fromString(containerData.getOriginPipelineId()));
-        // in case te ratis group does not exist, just mark it for deletion.
-        if (!ratisServer.isExist(pipelineID.getProtobuf())) {
-          return true;
-        }
-        try {
-          long minReplicatedIndex =
-              ratisServer.getMinReplicatedIndex(pipelineID);
-          long containerBCSID = containerData.getBlockCommitSequenceId();
-          if (minReplicatedIndex >= 0 && minReplicatedIndex < containerBCSID) {
-            LOG.warn("Close Container log Index {} is not replicated across all"
-                    + "the servers in the pipeline {} as the min replicated "
-                    + "index is {}. Deletion is not allowed in this container "
-                    + "yet.", containerBCSID,
-                containerData.getOriginPipelineId(), minReplicatedIndex);
-            return false;
-          } else {
-            return true;
-          }
-        } catch (IOException ioe) {
-          // in case of any exception check again whether the pipeline exist
-          // and in case the pipeline got destroyed, just mark it for deletion
-          if (!ratisServer.isExist(pipelineID.getProtobuf())) {
-            return true;
-          } else {
-            LOG.info(ioe.getMessage());
-            return false;
-          }
-        }
-      }
-      return true;
-    }
-  }
-
-  private static class ContainerBackgroundTaskResult
-      implements BackgroundTaskResult {
-    private List<String> deletedBlockIds;
-
-    ContainerBackgroundTaskResult() {
-      deletedBlockIds = new LinkedList<>();
-    }
-
-    public void addBlockId(String blockId) {
-      deletedBlockIds.add(blockId);
-    }
-
-    public void addAll(List<String> blockIds) {
-      deletedBlockIds.addAll(blockIds);
-    }
-
-    public List<String> getDeletedBlocks() {
-      return deletedBlockIds;
-    }
-
-    @Override
-    public int getSize() {
-      return deletedBlockIds.size();
-    }
-  }
-
-  private class BlockDeletingTask
-      implements BackgroundTask<BackgroundTaskResult> {
-
-    private final int priority;
-    private final KeyValueContainerData containerData;
-
-    BlockDeletingTask(ContainerData containerName, int priority) {
-      this.priority = priority;
-      this.containerData = (KeyValueContainerData) containerName;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
-      final Container container = ozoneContainer.getContainerSet()
-          .getContainer(containerData.getContainerID());
-      container.writeLock();
-      long startTime = Time.monotonicNow();
-      // Scan container's db and get list of under deletion blocks
-      try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) {
-        // # of blocks to delete is throttled
-        KeyPrefixFilter filter =
-            new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-        List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
-            meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask,
-                filter);
-        if (toDeleteBlocks.isEmpty()) {
-          LOG.debug("No under deletion block found in container : {}",
-              containerData.getContainerID());
-        }
-
-        List<String> succeedBlocks = new LinkedList<>();
-        LOG.debug("Container : {}, To-Delete blocks : {}",
-            containerData.getContainerID(), toDeleteBlocks.size());
-        File dataDir = new File(containerData.getChunksPath());
-        if (!dataDir.exists() || !dataDir.isDirectory()) {
-          LOG.error("Invalid container data dir {} : "
-              + "does not exist or not a directory", dataDir.getAbsolutePath());
-          return crr;
-        }
-
-        toDeleteBlocks.forEach(entry -> {
-          String blockName = DFSUtil.bytes2String(entry.getKey());
-          LOG.debug("Deleting block {}", blockName);
-          try {
-            ContainerProtos.BlockData data =
-                ContainerProtos.BlockData.parseFrom(entry.getValue());
-            for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
-              File chunkFile = dataDir.toPath()
-                  .resolve(chunkInfo.getChunkName()).toFile();
-              if (FileUtils.deleteQuietly(chunkFile)) {
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("block {} chunk {} deleted", blockName,
-                      chunkFile.getAbsolutePath());
-                }
-              }
-            }
-            succeedBlocks.add(blockName);
-          } catch (InvalidProtocolBufferException e) {
-            LOG.error("Failed to parse block info for block {}", blockName, e);
-          }
-        });
-
-        // Once files are deleted... replace deleting entries with deleted
-        // entries
-        BatchOperation batch = new BatchOperation();
-        succeedBlocks.forEach(entry -> {
-          String blockId =
-              entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
-          String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
-          batch.put(DFSUtil.string2Bytes(deletedEntry),
-              DFSUtil.string2Bytes(blockId));
-          batch.delete(DFSUtil.string2Bytes(entry));
-        });
-        meta.getStore().writeBatch(batch);
-        // update count of pending deletion blocks in in-memory container status
-        containerData.decrPendingDeletionBlocks(succeedBlocks.size());
-
-        if (!succeedBlocks.isEmpty()) {
-          LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
-              containerData.getContainerID(), succeedBlocks.size(),
-              Time.monotonicNow() - startTime);
-        }
-        crr.addAll(succeedBlocks);
-        return crr;
-      } finally {
-        container.writeUnlock();
-      }
-    }
-
-    @Override
-    public int getPriority() {
-      return priority;
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
deleted file mode 100644
index 69d8042..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
deleted file mode 100644
index 8bbdec9..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto.State;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * Control plane for container management in datanode.
- */
-public class ContainerController {
-
-  private final ContainerSet containerSet;
-  private final Map<ContainerType, Handler> handlers;
-
-  public ContainerController(final ContainerSet containerSet,
-      final Map<ContainerType, Handler> handlers) {
-    this.containerSet = containerSet;
-    this.handlers = handlers;
-  }
-
-  /**
-   * Returns the Container given a container id.
-   *
-   * @param containerId ID of the container
-   * @return Container
-   */
-  public Container getContainer(final long containerId) {
-    return containerSet.getContainer(containerId);
-  }
-
-  /**
-   * Marks the container for closing. Moves the container to CLOSING state.
-   *
-   * @param containerId Id of the container to update
-   * @throws IOException in case of exception
-   */
-  public void markContainerForClose(final long containerId)
-      throws IOException {
-    Container container = containerSet.getContainer(containerId);
-
-    if (container.getContainerState() == State.OPEN) {
-      getHandler(container).markContainerForClose(container);
-    }
-  }
-
-  /**
-   * Marks the container as UNHEALTHY.
-   *
-   * @param containerId Id of the container to update
-   * @throws IOException in case of exception
-   */
-  public void markContainerUnhealthy(final long containerId)
-          throws IOException {
-    Container container = containerSet.getContainer(containerId);
-    getHandler(container).markContainerUnhealthy(container);
-  }
-
-  /**
-   * Returns the container report.
-   *
-   * @return ContainerReportsProto
-   * @throws IOException in case of exception
-   */
-  public ContainerReportsProto getContainerReport()
-      throws IOException {
-    return containerSet.getContainerReport();
-  }
-
-  /**
-   * Quasi closes a container given its id.
-   *
-   * @param containerId Id of the container to quasi close
-   * @throws IOException in case of exception
-   */
-  public void quasiCloseContainer(final long containerId) throws IOException {
-    final Container container = containerSet.getContainer(containerId);
-    getHandler(container).quasiCloseContainer(container);
-  }
-
-  /**
-   * Closes a container given its Id.
-   *
-   * @param containerId Id of the container to close
-   * @throws IOException in case of exception
-   */
-  public void closeContainer(final long containerId) throws IOException {
-    final Container container = containerSet.getContainer(containerId);
-    getHandler(container).closeContainer(container);
-  }
-
-  public Container importContainer(final ContainerType type,
-      final long containerId, final long maxSize, final String originPipelineId,
-      final String originNodeId, final InputStream rawContainerStream,
-      final TarContainerPacker packer)
-      throws IOException {
-    return handlers.get(type).importContainer(containerId, maxSize,
-        originPipelineId, originNodeId, rawContainerStream, packer);
-  }
-
-  public void exportContainer(final ContainerType type,
-      final long containerId, final OutputStream outputStream,
-      final TarContainerPacker packer) throws IOException {
-    handlers.get(type).exportContainer(
-        containerSet.getContainer(containerId), outputStream, packer);
-  }
-
-  /**
-   * Deletes a container given its Id.
-   * @param containerId Id of the container to be deleted
-   * @param force if this is set to true, we delete container without checking
-   * state of the container.
-   */
-  public void deleteContainer(final long containerId, boolean force)
-      throws IOException {
-    final Container container = containerSet.getContainer(containerId);
-    if (container != null) {
-      getHandler(container).deleteContainer(container, force);
-    }
-  }
-
-  /**
-   * Given a container, returns its handler instance.
-   *
-   * @param container Container
-   * @return handler of the container
-   */
-  private Handler getHandler(final Container container) {
-    return handlers.get(container.getContainerType());
-  }
-
-  public Iterator<Container<?>> getContainers() {
-    return containerSet.getContainerIterator();
-  }
-
-  /**
-   * Return an iterator of containers which are associated with the specified
-   * <code>volume</code>.
-   *
-   * @param  volume the HDDS volume which should be used to filter containers
-   * @return {@literal Iterator<Container>}
-   */
-  public Iterator<Container<?>> getContainers(HddsVolume volume) {
-    return containerSet.getContainerIterator(volume);
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
deleted file mode 100644
index 1141951..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * VolumeScanner scans a single volume.  Each VolumeScanner has its own thread.
- * <p>They are all managed by the DataNode's BlockScanner.
- */
-public class ContainerDataScanner extends Thread {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(ContainerDataScanner.class);
-
-  /**
-   * The volume that we're scanning.
-   */
-  private final HddsVolume volume;
-  private final ContainerController controller;
-  private final DataTransferThrottler throttler;
-  private final Canceler canceler;
-  private final ContainerDataScrubberMetrics metrics;
-  private final long dataScanInterval;
-
-  /**
-   * True if the thread is stopping.<p/>
-   * Protected by this object's lock.
-   */
-  private volatile boolean stopping = false;
-
-
-  public ContainerDataScanner(ContainerScrubberConfiguration conf,
-                              ContainerController controller,
-                              HddsVolume volume) {
-    this.controller = controller;
-    this.volume = volume;
-    dataScanInterval = conf.getDataScanInterval();
-    throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume());
-    canceler = new Canceler();
-    metrics = ContainerDataScrubberMetrics.create(volume.toString());
-    setName("ContainerDataScanner(" + volume + ")");
-    setDaemon(true);
-  }
-
-  @Override
-  public void run() {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("{}: thread starting.", this);
-    }
-    try {
-      while (!stopping) {
-        runIteration();
-        metrics.resetNumContainersScanned();
-        metrics.resetNumUnhealthyContainers();
-      }
-      LOG.info("{} exiting.", this);
-    } catch (Throwable e) {
-      LOG.error("{} exiting because of exception ", this, e);
-    } finally {
-      if (metrics != null) {
-        metrics.unregister();
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public void runIteration() {
-    long startTime = System.nanoTime();
-    Iterator<Container<?>> itr = controller.getContainers(volume);
-    while (!stopping && itr.hasNext()) {
-      Container c = itr.next();
-      if (c.shouldScanData()) {
-        try {
-          if (!c.scanData(throttler, canceler)) {
-            metrics.incNumUnHealthyContainers();
-            controller.markContainerUnhealthy(
-                c.getContainerData().getContainerID());
-          }
-        } catch (IOException ex) {
-          long containerId = c.getContainerData().getContainerID();
-          LOG.warn("Unexpected exception while scanning container "
-              + containerId, ex);
-        } finally {
-          metrics.incNumContainersScanned();
-        }
-      }
-    }
-    long totalDuration = System.nanoTime() - startTime;
-    if (!stopping) {
-      if (metrics.getNumContainersScanned() > 0) {
-        metrics.incNumScanIterations();
-        LOG.info("Completed an iteration of container data scrubber in" +
-                " {} minutes." +
-                " Number of iterations (since the data-node restart) : {}" +
-                ", Number of containers scanned in this iteration : {}" +
-                ", Number of unhealthy containers found in this iteration : {}",
-            TimeUnit.NANOSECONDS.toMinutes(totalDuration),
-            metrics.getNumScanIterations(),
-            metrics.getNumContainersScanned(),
-            metrics.getNumUnHealthyContainers());
-      }
-      long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration);
-      long remainingSleep = dataScanInterval - elapsedMillis;
-      if (remainingSleep > 0) {
-        try {
-          Thread.sleep(remainingSleep);
-        } catch (InterruptedException ignored) {
-        }
-      }
-    }
-  }
-
-  public synchronized void shutdown() {
-    this.stopping = true;
-    this.canceler.cancel("ContainerDataScanner("+volume+") is shutting down");
-    this.interrupt();
-    try {
-      this.join();
-    } catch (InterruptedException ex) {
-      LOG.warn("Unexpected exception while stopping data scanner for volume "
-          + volume, ex);
-    }
-  }
-
-  @VisibleForTesting
-  public ContainerDataScrubberMetrics getMetrics() {
-    return metrics;
-  }
-
-  @Override
-  public String toString() {
-    return "ContainerDataScanner(" + volume +
-        ", " + volume.getStorageID() + ")";
-  }
-
-  private class HddsDataTransferThrottler extends DataTransferThrottler {
-    HddsDataTransferThrottler(long bandwidthPerSec) {
-      super(bandwidthPerSec);
-    }
-
-    @Override
-    public synchronized void throttle(long numOfBytes) {
-      ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes);
-      super.throttle(numOfBytes);
-    }
-
-    @Override
-    public synchronized void throttle(long numOfBytes, Canceler c) {
-      ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes);
-      super.throttle(numOfBytes, c);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
deleted file mode 100644
index 3cf4f58..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterInt;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * This class captures the container data scrubber metrics on the data-node.
- **/
-@InterfaceAudience.Private
-@Metrics(about="DataNode container data scrubber metrics", context="dfs")
-public final class ContainerDataScrubberMetrics {
-  private final String name;
-  private final MetricsSystem ms;
-  @Metric("number of containers scanned in the current iteration")
-  private MutableGaugeInt numContainersScanned;
-  @Metric("number of unhealthy containers found in the current iteration")
-  private MutableGaugeInt numUnHealthyContainers;
-  @Metric("number of iterations of scanner completed since the restart")
-  private MutableCounterInt numScanIterations;
-  @Metric("disk bandwidth used by the container data scrubber per volume")
-  private MutableRate numBytesScanned;
-
-  public int getNumContainersScanned() {
-    return numContainersScanned.value();
-  }
-
-  public void incNumContainersScanned() {
-    numContainersScanned.incr();
-  }
-
-  public void resetNumContainersScanned() {
-    numContainersScanned.decr(getNumContainersScanned());
-  }
-
-  public int getNumUnHealthyContainers() {
-    return numUnHealthyContainers.value();
-  }
-
-  public void incNumUnHealthyContainers() {
-    numUnHealthyContainers.incr();
-  }
-
-  public void resetNumUnhealthyContainers() {
-    numUnHealthyContainers.decr(getNumUnHealthyContainers());
-  }
-
-  public int getNumScanIterations() {
-    return numScanIterations.value();
-  }
-
-  public void incNumScanIterations() {
-    numScanIterations.incr();
-  }
-
-  public double getNumBytesScannedMean() {
-    return numBytesScanned.lastStat().mean();
-  }
-
-  public long getNumBytesScannedSampleCount() {
-    return numBytesScanned.lastStat().numSamples();
-  }
-
-  public double getNumBytesScannedStdDev() {
-    return numBytesScanned.lastStat().stddev();
-  }
-
-  public void incNumBytesScanned(long bytes) {
-    numBytesScanned.add(bytes);
-  }
-
-  public void unregister() {
-    ms.unregisterSource(name);
-  }
-
-  private ContainerDataScrubberMetrics(String name, MetricsSystem ms) {
-    this.name = name;
-    this.ms = ms;
-  }
-
-  public static ContainerDataScrubberMetrics create(final String volumeName) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty()
-        ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt()
-        : volumeName.replace(':', '-'));
-
-    return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms));
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
deleted file mode 100644
index 46aaf73..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class is responsible to perform metadata verification of the
- * containers.
- */
-public class ContainerMetadataScanner extends Thread {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(ContainerMetadataScanner.class);
-
-  private final ContainerController controller;
-  private final long metadataScanInterval;
-  private final ContainerMetadataScrubberMetrics metrics;
-  /**
-   * True if the thread is stopping.<p/>
-   * Protected by this object's lock.
-   */
-  private boolean stopping = false;
-
-  public ContainerMetadataScanner(ContainerScrubberConfiguration conf,
-                                  ContainerController controller) {
-    this.controller = controller;
-    this.metadataScanInterval = conf.getMetadataScanInterval();
-    this.metrics = ContainerMetadataScrubberMetrics.create();
-    setName("ContainerMetadataScanner");
-    setDaemon(true);
-  }
-
-  @Override
-  public void run() {
-    /*
-     * the outer daemon loop exits on shutdown()
-     */
-    LOG.info("Background ContainerMetadataScanner starting up");
-    while (!stopping) {
-      runIteration();
-      if(!stopping) {
-        metrics.resetNumUnhealthyContainers();
-        metrics.resetNumContainersScanned();
-      }
-    }
-  }
-
-  @VisibleForTesting
-  void runIteration() {
-    long start = System.nanoTime();
-    Iterator<Container<?>> containerIt = controller.getContainers();
-    while (!stopping && containerIt.hasNext()) {
-      Container container = containerIt.next();
-      try {
-        scrub(container);
-      } catch (IOException e) {
-        LOG.info("Unexpected error while scrubbing container {}",
-            container.getContainerData().getContainerID());
-      } finally {
-        metrics.incNumContainersScanned();
-      }
-    }
-    long interval = System.nanoTime()-start;
-    if (!stopping) {
-      metrics.incNumScanIterations();
-      LOG.info("Completed an iteration of container metadata scrubber in" +
-              " {} minutes." +
-              " Number of  iterations (since the data-node restart) : {}" +
-              ", Number of containers scanned in this iteration : {}" +
-              ", Number of unhealthy containers found in this iteration : {}",
-          TimeUnit.NANOSECONDS.toMinutes(interval),
-          metrics.getNumScanIterations(),
-          metrics.getNumContainersScanned(),
-          metrics.getNumUnHealthyContainers());
-      // ensure to delay next metadata scan with respect to user config.
-      if (interval < metadataScanInterval) {
-        try {
-          Thread.sleep(metadataScanInterval - interval);
-        } catch (InterruptedException e) {
-          LOG.info("Background ContainerMetadataScanner interrupted." +
-              " Going to exit");
-        }
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public void scrub(Container container) throws IOException {
-    if (!container.scanMetaData()) {
-      metrics.incNumUnHealthyContainers();
-      controller.markContainerUnhealthy(
-          container.getContainerData().getContainerID());
-    }
-  }
-
-  @VisibleForTesting
-  public ContainerMetadataScrubberMetrics getMetrics() {
-    return metrics;
-  }
-
-  public synchronized void shutdown() {
-    this.stopping = true;
-    this.interrupt();
-    try {
-      this.join();
-    } catch (InterruptedException ex) {
-      LOG.warn("Unexpected exception while stopping metadata scanner.", ex);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
deleted file mode 100644
index 3effc35..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterInt;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-
-/**
- * This class captures the container meta-data scrubber metrics on the
- * data-node.
- **/
-@InterfaceAudience.Private
-@Metrics(about="DataNode container data scrubber metrics", context="dfs")
-public final class ContainerMetadataScrubberMetrics {
-  private final String name;
-  private final MetricsSystem ms;
-  @Metric("number of containers scanned in the current iteration")
-  private MutableGaugeInt numContainersScanned;
-  @Metric("number of unhealthy containers found in the current iteration")
-  private MutableGaugeInt numUnHealthyContainers;
-  @Metric("number of iterations of scanner completed since the restart")
-  private MutableCounterInt numScanIterations;
-
-  public int getNumContainersScanned() {
-    return numContainersScanned.value();
-  }
-
-  public void incNumContainersScanned() {
-    numContainersScanned.incr();
-  }
-
-  public void resetNumContainersScanned() {
-    numContainersScanned.decr(getNumContainersScanned());
-  }
-
-  public int getNumUnHealthyContainers() {
-    return numUnHealthyContainers.value();
-  }
-
-  public void incNumUnHealthyContainers() {
-    numUnHealthyContainers.incr();
-  }
-
-  public void resetNumUnhealthyContainers() {
-    numUnHealthyContainers.decr(getNumUnHealthyContainers());
-  }
-
-  public int getNumScanIterations() {
-    return numScanIterations.value();
-  }
-
-  public void incNumScanIterations() {
-    numScanIterations.incr();
-  }
-
-  public void unregister() {
-    ms.unregisterSource(name);
-  }
-
-  private ContainerMetadataScrubberMetrics(String name, MetricsSystem ms) {
-    this.name = name;
-    this.ms = ms;
-  }
-
-  public static ContainerMetadataScrubberMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerMetadataScrubberMetrics";
-    return ms.register(name, null,
-        new ContainerMetadataScrubberMetrics(name, ms));
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
deleted file mode 100644
index 621da70..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Class used to read .container files from Volume and build container map.
- *
- * Layout of the container directory on disk is as follows:
- *
- * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
- * >/metadata/<<containerID>>.container}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
- * >/<<dataPath>>}
- * <p>
- * Some ContainerTypes will have extra metadata other than the .container
- * file. For example, KeyValueContainer will have a .db file. This .db file
- * will also be stored in the metadata folder along with the .container file.
- * <p>
- * {@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
- * >/metadata/<<KVcontainerID>>.db}
- * <p>
- * Note that the {@literal <<dataPath>>} is dependent on the ContainerType.
- * For KeyValueContainers, the data is stored in a "chunks" folder. As such,
- * the {@literal <<dataPath>>} layout for KeyValueContainers is:
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
- * >/chunks/<<chunksFile>>}
- *
- */
-public class ContainerReader implements Runnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ContainerReader.class);
-  private HddsVolume hddsVolume;
-  private final ContainerSet containerSet;
-  private final OzoneConfiguration config;
-  private final File hddsVolumeDir;
-  private final VolumeSet volumeSet;
-
-  ContainerReader(VolumeSet volSet, HddsVolume volume, ContainerSet cset,
-                  OzoneConfiguration conf) {
-    Preconditions.checkNotNull(volume);
-    this.hddsVolume = volume;
-    this.hddsVolumeDir = hddsVolume.getHddsRootDir();
-    this.containerSet = cset;
-    this.config = conf;
-    this.volumeSet = volSet;
-  }
-
-  @Override
-  public void run() {
-    try {
-      readVolume(hddsVolumeDir);
-    } catch (RuntimeException ex) {
-      LOG.error("Caught a Run time exception during reading container files" +
-          " from Volume {} {}", hddsVolumeDir, ex);
-    }
-  }
-
-  public void readVolume(File hddsVolumeRootDir) {
-    Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
-        "cannot be null");
-
-    //filtering scm directory
-    File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() {
-      @Override
-      public boolean accept(File pathname) {
-        return pathname.isDirectory();
-      }
-    });
-
-    if (scmDir == null) {
-      LOG.error("IO error for the volume {}, skipped loading",
-          hddsVolumeRootDir);
-      volumeSet.failVolume(hddsVolumeRootDir.getPath());
-      return;
-    }
-
-    if (scmDir.length > 1) {
-      LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir);
-      volumeSet.failVolume(hddsVolumeRootDir.getPath());
-      return;
-    }
-
-    for (File scmLoc : scmDir) {
-      File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT);
-      File[] containerTopDirs = currentDir.listFiles();
-      if (containerTopDirs != null) {
-        for (File containerTopDir : containerTopDirs) {
-          if (containerTopDir.isDirectory()) {
-            File[] containerDirs = containerTopDir.listFiles();
-            if (containerDirs != null) {
-              for (File containerDir : containerDirs) {
-                File containerFile = ContainerUtils.getContainerFile(
-                    containerDir);
-                long containerID = ContainerUtils.getContainerID(containerDir);
-                if (containerFile.exists()) {
-                  verifyContainerFile(containerID, containerFile);
-                } else {
-                  LOG.error("Missing .container file for ContainerID: {}",
-                      containerDir.getName());
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  private void verifyContainerFile(long containerID, File containerFile) {
-    try {
-      ContainerData containerData = ContainerDataYaml.readContainerFile(
-          containerFile);
-      if (containerID != containerData.getContainerID()) {
-        LOG.error("Invalid ContainerID in file {}. " +
-            "Skipping loading of this container.", containerFile);
-        return;
-      }
-      verifyAndFixupContainerData(containerData);
-    } catch (IOException ex) {
-      LOG.error("Failed to parse ContainerFile for ContainerID: {}",
-          containerID, ex);
-    }
-  }
-
-  /**
-   * verify ContainerData loaded from disk and fix-up stale members.
-   * Specifically blockCommitSequenceId, delete related metadata
-   * and bytesUsed
-   * @param containerData
-   * @throws IOException
-   */
-  public void verifyAndFixupContainerData(ContainerData containerData)
-      throws IOException {
-    switch (containerData.getContainerType()) {
-    case KeyValueContainer:
-      if (containerData instanceof KeyValueContainerData) {
-        KeyValueContainerData kvContainerData = (KeyValueContainerData)
-            containerData;
-        containerData.setVolume(hddsVolume);
-
-        KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
-        KeyValueContainer kvContainer = new KeyValueContainer(
-            kvContainerData, config);
-        try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
-            config)) {
-          MetadataKeyFilters.KeyPrefixFilter filter =
-              new MetadataKeyFilters.KeyPrefixFilter()
-                  .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-          int numPendingDeletionBlocks =
-              containerDB.getStore().getSequentialRangeKVs(null,
-                  Integer.MAX_VALUE, filter)
-                  .size();
-          kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
-          byte[] delTxnId = containerDB.getStore().get(
-              DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX));
-          if (delTxnId != null) {
-            kvContainerData
-                .updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
-          }
-          // sets the BlockCommitSequenceId.
-          byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes(
-              OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
-          if (bcsId != null) {
-            kvContainerData
-                .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
-          }
-          if (kvContainer.getContainerState()
-              == ContainerProtos.ContainerDataProto.State.OPEN) {
-            // commitSpace for Open Containers relies on usedBytes
-            initializeUsedBytes(kvContainer);
-          }
-          containerSet.addContainer(kvContainer);
-        }
-      } else {
-        throw new StorageContainerException("Container File is corrupted. " +
-            "ContainerType is KeyValueContainer but cast to " +
-            "KeyValueContainerData failed. ",
-            ContainerProtos.Result.CONTAINER_METADATA_ERROR);
-      }
-      break;
-    default:
-      throw new StorageContainerException("Unrecognized ContainerType " +
-          containerData.getContainerType(),
-          ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE);
-    }
-  }
-
-  private void initializeUsedBytes(KeyValueContainer container)
-      throws IOException {
-    try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator(
-        container.getContainerData().getContainerID(),
-        new File(container.getContainerData().getContainerPath()))) {
-      long usedBytes = 0;
-
-      while (blockIter.hasNext()) {
-        BlockData block = blockIter.nextBlock();
-        long blockLen = 0;
-
-        List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks();
-        for (ContainerProtos.ChunkInfo chunk : chunkInfoList) {
-          ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk);
-          blockLen += info.getLen();
-        }
-
-        usedBytes += blockLen;
-      }
-
-      container.getContainerData().setBytesUsed(usedBytes);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java
deleted file mode 100644
index 454ce84..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigTag;
-import org.apache.hadoop.hdds.conf.ConfigType;
-
-/**
- * This class defines configuration parameters for container scrubber.
- **/
-@ConfigGroup(prefix = "hdds.containerscrub")
-public class ContainerScrubberConfiguration {
-  private boolean enabled;
-  private long metadataScanInterval;
-  private long dataScanInterval;
-  private long bandwidthPerVolume;
-
-  @Config(key = "enabled",
-      type = ConfigType.BOOLEAN,
-      defaultValue = "false",
-      tags = {ConfigTag.STORAGE},
-      description = "Config parameter to enable container scrubber.")
-  public void setEnabled(boolean enabled) {
-    this.enabled = enabled;
-  }
-
-  public boolean isEnabled() {
-    return enabled;
-  }
-
-  @Config(key = "metadata.scan.interval",
-      type = ConfigType.TIME,
-      defaultValue = "3h",
-      tags = {ConfigTag.STORAGE},
-      description = "Config parameter define time interval in milliseconds" +
-          " between two metadata scans by container scrubber.")
-  public void setMetadataScanInterval(long metadataScanInterval) {
-    this.metadataScanInterval = metadataScanInterval;
-  }
-
-  public long getMetadataScanInterval() {
-    return metadataScanInterval;
-  }
-
-  @Config(key = "data.scan.interval",
-      type = ConfigType.TIME,
-      defaultValue = "1m",
-      tags = { ConfigTag.STORAGE },
-      description = "Minimum time interval between two iterations of container"
-          + " data scanning.  If an iteration takes less time than this, the"
-          + " scanner will wait before starting the next iteration."
-  )
-  public void setDataScanInterval(long dataScanInterval) {
-    this.dataScanInterval = dataScanInterval;
-  }
-
-  public long getDataScanInterval() {
-    return dataScanInterval;
-  }
-
-  @Config(key = "volume.bytes.per.second",
-      type = ConfigType.LONG,
-      defaultValue = "1048576",
-      tags = {ConfigTag.STORAGE},
-      description = "Config parameter to throttle I/O bandwidth used"
-          + " by scrubber per volume.")
-  public void setBandwidthPerVolume(long bandwidthPerVolume) {
-    this.bandwidthPerVolume = bandwidthPerVolume;
-  }
-
-  public long getBandwidthPerVolume() {
-    return bandwidthPerVolume;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
deleted file mode 100644
index a026f0e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
-import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
-import org.apache.hadoop.ozone.container.replication
-    .OnDemandContainerReplicationSource;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.*;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
-
-/**
- * Ozone main class sets up the network servers and initializes the container
- * layer.
- */
-public class OzoneContainer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      OzoneContainer.class);
-
-  private final HddsDispatcher hddsDispatcher;
-  private final Map<ContainerType, Handler> handlers;
-  private final OzoneConfiguration config;
-  private final VolumeSet volumeSet;
-  private final ContainerSet containerSet;
-  private final XceiverServerSpi writeChannel;
-  private final XceiverServerSpi readChannel;
-  private final ContainerController controller;
-  private ContainerMetadataScanner metadataScanner;
-  private List<ContainerDataScanner> dataScanners;
-  private final BlockDeletingService blockDeletingService;
-
-  /**
-   * Construct OzoneContainer object.
-   * @param datanodeDetails
-   * @param conf
-   * @param certClient
-   * @throws DiskOutOfSpaceException
-   * @throws IOException
-   */
-  public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration
-      conf, StateContext context, CertificateClient certClient)
-      throws IOException {
-    this.config = conf;
-    this.volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
-    this.containerSet = new ContainerSet();
-    this.metadataScanner = null;
-
-    buildContainerSet();
-    final ContainerMetrics metrics = ContainerMetrics.create(conf);
-    this.handlers = Maps.newHashMap();
-    for (ContainerType containerType : ContainerType.values()) {
-      handlers.put(containerType,
-          Handler.getHandlerForContainerType(
-              containerType, conf, context, containerSet, volumeSet, metrics));
-    }
-    this.hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet,
-        handlers, context, metrics);
-
-    /*
-     * ContainerController is the control plane
-     * XceiverServerRatis is the write channel
-     * XceiverServerGrpc is the read channel
-     */
-    this.controller = new ContainerController(containerSet, handlers);
-    this.writeChannel = XceiverServerRatis.newXceiverServerRatis(
-        datanodeDetails, config, hddsDispatcher, controller, certClient,
-        context);
-    this.readChannel = new XceiverServerGrpc(
-        datanodeDetails, config, hddsDispatcher, certClient,
-        createReplicationService());
-    long svcInterval = config
-        .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-            OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    long serviceTimeout = config
-        .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-            OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    this.blockDeletingService =
-        new BlockDeletingService(this, svcInterval, serviceTimeout,
-            TimeUnit.MILLISECONDS, config);
-  }
-
-  private GrpcReplicationService createReplicationService() {
-    return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(controller));
-  }
-
-  /**
-   * Build's container map.
-   */
-  private void buildContainerSet() {
-    Iterator<HddsVolume> volumeSetIterator = volumeSet.getVolumesList()
-        .iterator();
-    ArrayList<Thread> volumeThreads = new ArrayList<Thread>();
-
-    //TODO: diskchecker should be run before this, to see how disks are.
-    // And also handle disk failure tolerance need to be added
-    while (volumeSetIterator.hasNext()) {
-      HddsVolume volume = volumeSetIterator.next();
-      Thread thread = new Thread(new ContainerReader(volumeSet, volume,
-          containerSet, config));
-      thread.start();
-      volumeThreads.add(thread);
-    }
-
-    try {
-      for (int i = 0; i < volumeThreads.size(); i++) {
-        volumeThreads.get(i).join();
-      }
-    } catch (InterruptedException ex) {
-      LOG.info("Volume Threads Interrupted exception", ex);
-    }
-
-  }
-
-
-  /**
-   * Start background daemon thread for performing container integrity checks.
-   */
-  private void startContainerScrub() {
-    ContainerScrubberConfiguration c = config.getObject(
-        ContainerScrubberConfiguration.class);
-    boolean enabled = c.isEnabled();
-
-    if (!enabled) {
-      LOG.info("Background container scanner has been disabled.");
-    } else {
-      if (this.metadataScanner == null) {
-        this.metadataScanner = new ContainerMetadataScanner(c, controller);
-      }
-      this.metadataScanner.start();
-
-      dataScanners = new ArrayList<>();
-      for (HddsVolume v : volumeSet.getVolumesList()) {
-        ContainerDataScanner s = new ContainerDataScanner(c, controller, v);
-        s.start();
-        dataScanners.add(s);
-      }
-    }
-  }
-
-  /**
-   * Stop the scanner thread and wait for thread to die.
-   */
-  private void stopContainerScrub() {
-    if (metadataScanner == null) {
-      return;
-    }
-    metadataScanner.shutdown();
-    metadataScanner = null;
-    for (ContainerDataScanner s : dataScanners) {
-      s.shutdown();
-    }
-  }
-
-  /**
-   * Starts serving requests to ozone container.
-   *
-   * @throws IOException
-   */
-  public void start(String scmId) throws IOException {
-    LOG.info("Attempting to start container services.");
-    startContainerScrub();
-    writeChannel.start();
-    readChannel.start();
-    hddsDispatcher.init();
-    hddsDispatcher.setScmId(scmId);
-    blockDeletingService.start();
-  }
-
-  /**
-   * Stop Container Service on the datanode.
-   */
-  public void stop() {
-    //TODO: at end of container IO integration work.
-    LOG.info("Attempting to stop container services.");
-    stopContainerScrub();
-    writeChannel.stop();
-    readChannel.stop();
-    this.handlers.values().forEach(Handler::stop);
-    hddsDispatcher.shutdown();
-    volumeSet.shutdown();
-    blockDeletingService.shutdown();
-    ContainerMetrics.remove();
-  }
-
-
-  @VisibleForTesting
-  public ContainerSet getContainerSet() {
-    return containerSet;
-  }
-  /**
-   * Returns container report.
-   * @return - container report.
-   */
-
-  public PipelineReportsProto getPipelineReport() {
-    PipelineReportsProto.Builder pipelineReportsProto =
-        PipelineReportsProto.newBuilder();
-    pipelineReportsProto.addAllPipelineReport(writeChannel.getPipelineReport());
-    return pipelineReportsProto.build();
-  }
-
-  public XceiverServerSpi getWriteChannel() {
-    return writeChannel;
-  }
-
-  public XceiverServerSpi getReadChannel() {
-    return readChannel;
-  }
-
-  public ContainerController getController() {
-    return controller;
-  }
-
-  /**
-   * Returns node report of container storage usage.
-   */
-  public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
-      throws IOException {
-    return volumeSet.getNodeReport();
-  }
-
-  @VisibleForTesting
-  public ContainerDispatcher getDispatcher() {
-    return this.hddsDispatcher;
-  }
-
-  public VolumeSet getVolumeSet() {
-    return volumeSet;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
deleted file mode 100644
index c99c038..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-/**
- Ozone main that calls into the container layer
-**/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java
deleted file mode 100644
index 9511241..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.Closeable;
-import java.nio.file.Path;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-/**
- * Service to download container data from other datanodes.
- * <p>
- * The implementation of this interface should copy the raw container data in
- * compressed form to working directory.
- * <p>
- * A smart implementation would use multiple sources to do parallel download.
- */
-public interface ContainerDownloader extends Closeable {
-
-  CompletableFuture<Path> getContainerDataFromReplicas(long containerId,
-      List<DatanodeDetails> sources);
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java
deleted file mode 100644
index 69582f7..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * Contract to prepare provide the container in binary form..
- * <p>
- * Prepare will be called when container is closed. An implementation could
- * precache any binary representation of a container and store the pre packede
- * images.
- */
-public interface ContainerReplicationSource {
-
-  /**
-   * Prepare for the replication.
-   *
-   * @param containerId The name of the container the package.
-   */
-  void prepare(long containerId);
-
-  /**
-   * Copy the container data to an output stream.
-   *
-   * @param containerId Container to replicate
-   * @param destination   The destination stream to copy all the container data.
-   * @throws IOException
-   */
-  void copyData(long containerId, OutputStream destination)
-      throws IOException;
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java
deleted file mode 100644
index 827b9d6..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-/**
- * Service to do the real replication task.
- *
- * An implementation should download the container and im
- */
-public interface ContainerReplicator {
-  void replicate(ReplicationTask task);
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java
deleted file mode 100644
index f7fd8a4..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.StreamingOutput;
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * JAX-RS streaming output to return the binary container data.
- */
-public class ContainerStreamingOutput implements StreamingOutput {
-
-  private long containerId;
-
-  private ContainerReplicationSource containerReplicationSource;
-
-  public ContainerStreamingOutput(long containerId,
-      ContainerReplicationSource containerReplicationSource) {
-    this.containerId = containerId;
-    this.containerReplicationSource = containerReplicationSource;
-  }
-
-  @Override
-  public void write(OutputStream outputStream)
-      throws IOException, WebApplicationException {
-    containerReplicationSource.copyData(containerId, outputStream);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java
deleted file mode 100644
index eef01a1..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.FileInputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Default replication implementation.
- * <p>
- * This class does the real job. Executes the download and import the container
- * to the container set.
- */
-public class DownloadAndImportReplicator implements ContainerReplicator {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DownloadAndImportReplicator.class);
-
-  private final ContainerSet containerSet;
-
-  private final ContainerController controller;
-
-  private final ContainerDownloader downloader;
-
-  private final TarContainerPacker packer;
-
-  public DownloadAndImportReplicator(
-      ContainerSet containerSet,
-      ContainerController controller,
-      ContainerDownloader downloader,
-      TarContainerPacker packer) {
-    this.containerSet = containerSet;
-    this.controller = controller;
-    this.downloader = downloader;
-    this.packer = packer;
-  }
-
-  public void importContainer(long containerID, Path tarFilePath) {
-    try {
-      ContainerData originalContainerData;
-      try (FileInputStream tempContainerTarStream = new FileInputStream(
-          tarFilePath.toFile())) {
-        byte[] containerDescriptorYaml =
-            packer.unpackContainerDescriptor(tempContainerTarStream);
-        originalContainerData = ContainerDataYaml.readContainer(
-            containerDescriptorYaml);
-      }
-
-      try (FileInputStream tempContainerTarStream = new FileInputStream(
-          tarFilePath.toFile())) {
-
-        Container container = controller.importContainer(
-            originalContainerData.getContainerType(),
-            containerID,
-            originalContainerData.getMaxSize(),
-            originalContainerData.getOriginPipelineId(),
-            originalContainerData.getOriginNodeId(),
-            tempContainerTarStream,
-            packer);
-
-        containerSet.addContainer(container);
-      }
-
-    } catch (Exception e) {
-      LOG.error(
-          "Can't import the downloaded container data id=" + containerID,
-          e);
-    } finally {
-      try {
-        Files.delete(tarFilePath);
-      } catch (Exception ex) {
-        LOG.error("Got exception while deleting downloaded container file: "
-            + tarFilePath.toAbsolutePath().toString(), ex);
-      }
-    }
-  }
-
-  @Override
-  public void replicate(ReplicationTask task) {
-    long containerID = task.getContainerId();
-
-    List<DatanodeDetails> sourceDatanodes = task.getSources();
-
-    LOG.info("Starting replication of container {} from {}", containerID,
-        sourceDatanodes);
-
-    CompletableFuture<Path> tempTarFile = downloader
-        .getContainerDataFromReplicas(containerID,
-            sourceDatanodes);
-
-    try {
-      //wait for the download. This thread pool is limiting the paralell
-      //downloads, so it's ok to block here and wait for the full download.
-      Path path = tempTarFile.get();
-      LOG.info("Container {} is downloaded, starting to import.",
-          containerID);
-      importContainer(containerID, path);
-      LOG.info("Container {} is replicated successfully", containerID);
-      task.setStatus(Status.DONE);
-    } catch (Exception e) {
-      LOG.error("Container replication was unsuccessful .", e);
-      task.setStatus(Status.FAILED);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
deleted file mode 100644
index 8494a15..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.BufferedOutputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CopyContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CopyContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .IntraDatanodeProtocolServiceGrpc;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
-import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Client to read container data from Grpc.
- */
-public class GrpcReplicationClient {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(GrpcReplicationClient.class);
-
-  private final ManagedChannel channel;
-
-  private final IntraDatanodeProtocolServiceStub client;
-
-  private final Path workingDirectory;
-
-  public GrpcReplicationClient(String host,
-      int port, Path workingDir) {
-
-    channel = NettyChannelBuilder.forAddress(host, port)
-        .usePlaintext()
-        .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
-        .build();
-    client = IntraDatanodeProtocolServiceGrpc.newStub(channel);
-    this.workingDirectory = workingDir;
-
-  }
-
-  public CompletableFuture<Path> download(long containerId) {
-    CopyContainerRequestProto request =
-        CopyContainerRequestProto.newBuilder()
-            .setContainerID(containerId)
-            .setLen(-1)
-            .setReadOffset(0)
-            .build();
-
-    CompletableFuture<Path> response = new CompletableFuture<>();
-
-    Path destinationPath =
-        getWorkingDirectory().resolve("container-" + containerId + ".tar.gz");
-
-    client.download(request,
-        new StreamDownloader(containerId, response, destinationPath));
-    return response;
-  }
-
-  private Path getWorkingDirectory() {
-    return workingDirectory;
-  }
-
-  public void shutdown() {
-    channel.shutdown();
-    try {
-      channel.awaitTermination(5, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("failed to shutdown replication channel", e);
-    }
-  }
-
-  /**
-   * Grpc stream observer to ComletableFuture adapter.
-   */
-  public static class StreamDownloader
-      implements StreamObserver<CopyContainerResponseProto> {
-
-    private final CompletableFuture<Path> response;
-
-    private final long containerId;
-
-    private BufferedOutputStream stream;
-
-    private Path outputPath;
-
-    public StreamDownloader(long containerId, CompletableFuture<Path> response,
-        Path outputPath) {
-      this.response = response;
-      this.containerId = containerId;
-      this.outputPath = outputPath;
-      try {
-        Preconditions.checkNotNull(outputPath, "Output path cannot be null");
-        Path parentPath = Preconditions.checkNotNull(outputPath.getParent());
-        Files.createDirectories(parentPath);
-        stream =
-            new BufferedOutputStream(new FileOutputStream(outputPath.toFile()));
-      } catch (IOException e) {
-        throw new RuntimeException("OutputPath can't be used: " + outputPath,
-            e);
-      }
-
-    }
-
-    @Override
-    public void onNext(CopyContainerResponseProto chunk) {
-      try {
-        stream.write(chunk.getData().toByteArray());
-      } catch (IOException e) {
-        response.completeExceptionally(e);
-      }
-    }
-
-    @Override
-    public void onError(Throwable throwable) {
-      try {
-        stream.close();
-        LOG.error("Container download was unsuccessfull", throwable);
-        try {
-          Files.delete(outputPath);
-        } catch (IOException ex) {
-          LOG.error(
-              "Error happened during the download but can't delete the "
-                  + "temporary destination.", ex);
-        }
-        response.completeExceptionally(throwable);
-      } catch (IOException e) {
-        response.completeExceptionally(e);
-      }
-    }
-
-    @Override
-    public void onCompleted() {
-      try {
-        stream.close();
-        LOG.info("Container is downloaded to {}", outputPath);
-        response.complete(outputPath);
-      } catch (IOException e) {
-        response.completeExceptionally(e);
-      }
-
-    }
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java
deleted file mode 100644
index 7919e54..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.ByteArrayOutputStream;
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CopyContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CopyContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .IntraDatanodeProtocolServiceGrpc;
-
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Service to make containers available for replication.
- */
-public class GrpcReplicationService extends
-    IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceImplBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(GrpcReplicationService.class);
-
-  private final ContainerReplicationSource containerReplicationSource;
-
-  public GrpcReplicationService(
-      ContainerReplicationSource containerReplicationSource) {
-    this.containerReplicationSource = containerReplicationSource;
-  }
-
-  @Override
-  public void download(CopyContainerRequestProto request,
-      StreamObserver<CopyContainerResponseProto> responseObserver) {
-    LOG.info("Streaming container data ({}) to other datanode",
-        request.getContainerID());
-    try {
-      GrpcOutputStream outputStream =
-          new GrpcOutputStream(responseObserver, request.getContainerID());
-      containerReplicationSource
-          .copyData(request.getContainerID(), outputStream);
-    } catch (IOException e) {
-      LOG.error("Can't stream the container data", e);
-      responseObserver.onError(e);
-    }
-  }
-
-  private static class GrpcOutputStream extends OutputStream
-      implements Closeable {
-
-    private static final int BUFFER_SIZE_IN_BYTES = 1024 * 1024;
-
-    private final StreamObserver<CopyContainerResponseProto> responseObserver;
-
-    private final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-
-    private long containerId;
-
-    private int readOffset = 0;
-
-    private int writtenBytes;
-
-    GrpcOutputStream(
-        StreamObserver<CopyContainerResponseProto> responseObserver,
-        long containerId) {
-      this.responseObserver = responseObserver;
-      this.containerId = containerId;
-    }
-
-    @Override
-    public void write(int b) throws IOException {
-      try {
-        buffer.write(b);
-        if (buffer.size() > BUFFER_SIZE_IN_BYTES) {
-          flushBuffer(false);
-        }
-      } catch (Exception ex) {
-        responseObserver.onError(ex);
-      }
-    }
-
-    private void flushBuffer(boolean eof) {
-      if (buffer.size() > 0) {
-        CopyContainerResponseProto response =
-            CopyContainerResponseProto.newBuilder()
-                .setContainerID(containerId)
-                .setData(ByteString.copyFrom(buffer.toByteArray()))
-                .setEof(eof)
-                .setReadOffset(readOffset)
-                .setLen(buffer.size())
-                .build();
-        responseObserver.onNext(response);
-        readOffset += buffer.size();
-        writtenBytes += buffer.size();
-        buffer.reset();
-      }
-    }
-
-    @Override
-    public void close() throws IOException {
-      flushBuffer(true);
-      LOG.info("{} bytes written to the rpc stream from container {}",
-          writtenBytes, containerId);
-      responseObserver.onCompleted();
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
deleted file mode 100644
index d318ffa..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A naive implementation of the replication source which creates a tar file
- * on-demand without pre-create the compressed archives.
- */
-public class OnDemandContainerReplicationSource
-    implements ContainerReplicationSource {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerReplicationSource.class);
-
-  private ContainerController controller;
-
-  private TarContainerPacker packer = new TarContainerPacker();
-
-  public OnDemandContainerReplicationSource(
-      ContainerController controller) {
-    this.controller = controller;
-  }
-
-  @Override
-  public void prepare(long containerId) {
-
-  }
-
-  @Override
-  public void copyData(long containerId, OutputStream destination)
-      throws IOException {
-
-    Container container = controller.getContainer(containerId);
-
-    Preconditions.checkNotNull(
-        container, "Container is not found " + containerId);
-
-    controller.exportContainer(
-        container.getContainerType(), containerId, destination, packer);
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
deleted file mode 100644
index 7a07c4d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentHashMap.KeySetView;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Single point to schedule the downloading tasks based on priorities.
- */
-public class ReplicationSupervisor {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationSupervisor.class);
-
-  private final ContainerSet containerSet;
-  private final ContainerReplicator replicator;
-  private final ThreadPoolExecutor executor;
-  private final AtomicLong replicationCounter;
-
-  /**
-   * A set of container IDs that are currently being downloaded
-   * or queued for download. Tracked so we don't schedule > 1
-   * concurrent download for the same container.
-   */
-  private final KeySetView<Object, Boolean> containersInFlight;
-
-  public ReplicationSupervisor(
-      ContainerSet containerSet,
-      ContainerReplicator replicator, int poolSize) {
-    this.containerSet = containerSet;
-    this.replicator = replicator;
-    this.containersInFlight = ConcurrentHashMap.newKeySet();
-    replicationCounter = new AtomicLong();
-    this.executor = new ThreadPoolExecutor(
-        0, poolSize, 60, TimeUnit.SECONDS,
-        new LinkedBlockingQueue<>(),
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("ContainerReplicationThread-%d")
-            .build());
-  }
-
-  /**
-   * Queue an asynchronous download of the given container.
-   */
-  public void addTask(ReplicationTask task) {
-    if (containersInFlight.add(task.getContainerId())) {
-      executor.submit(new TaskRunner(task));
-    }
-  }
-
-  public void stop() {
-    try {
-      executor.shutdown();
-      if (!executor.awaitTermination(3, TimeUnit.SECONDS)) {
-        executor.shutdownNow();
-      }
-    } catch (InterruptedException ie) {
-      // Ignore, we don't really care about the failure.
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  /**
-   * Get the number of containers currently being downloaded
-   * or scheduled for download.
-   * @return Count of in-flight replications.
-   */
-  @VisibleForTesting
-  public int getInFlightReplications() {
-    return containersInFlight.size();
-  }
-
-  private final class TaskRunner implements Runnable {
-    private final ReplicationTask task;
-
-    private TaskRunner(ReplicationTask task) {
-      this.task = task;
-    }
-
-    @Override
-    public void run() {
-      try {
-        if (containerSet.getContainer(task.getContainerId()) != null) {
-          LOG.debug("Container {} has already been downloaded.",
-              task.getContainerId());
-          return;
-        }
-
-        task.setStatus(Status.DOWNLOADING);
-        replicator.replicate(task);
-
-        if (task.getStatus() == Status.FAILED) {
-          LOG.error(
-              "Container {} can't be downloaded from any of the datanodes.",
-              task.getContainerId());
-        } else if (task.getStatus() == Status.DONE) {
-          LOG.info("Container {} is replicated.", task.getContainerId());
-        }
-      } finally {
-        containersInFlight.remove(task.getContainerId());
-        replicationCounter.incrementAndGet();
-      }
-    }
-  }
-
-  public long getReplicationCounter() {
-    return replicationCounter.get();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
deleted file mode 100644
index 9019811..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.time.Instant;
-import java.util.List;
-import java.util.Objects;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-/**
- * The task to download a container from the sources.
- */
-public class ReplicationTask {
-
-  private volatile Status status = Status.QUEUED;
-
-  private final long containerId;
-
-  private List<DatanodeDetails> sources;
-
-  private final Instant queued = Instant.now();
-
-  public ReplicationTask(long containerId,
-      List<DatanodeDetails> sources) {
-    this.containerId = containerId;
-    this.sources = sources;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ReplicationTask that = (ReplicationTask) o;
-    return containerId == that.containerId;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(containerId);
-  }
-
-  public long getContainerId() {
-    return containerId;
-  }
-
-  public List<DatanodeDetails> getSources() {
-    return sources;
-  }
-
-  public Status getStatus() {
-    return status;
-  }
-
-  public void setStatus(
-      Status status) {
-    this.status = status;
-  }
-
-  @Override
-  public String toString() {
-    return "ReplicationTask{" +
-        "status=" + status +
-        ", containerId=" + containerId +
-        ", sources=" + sources +
-        ", queued=" + queued +
-        '}';
-  }
-
-  public Instant getQueued() {
-    return queued;
-  }
-
-  /**
-   * Status of the replication.
-   */
-  public enum Status {
-    QUEUED,
-    DOWNLOADING,
-    FAILED,
-    DONE
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
deleted file mode 100644
index 37a44ac..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.function.Function;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Simple ContainerDownloaderImplementation to download the missing container
- * from the first available datanode.
- * <p>
- * This is not the most effective implementation as it uses only one source
- * for he container download.
- */
-public class SimpleContainerDownloader implements ContainerDownloader {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SimpleContainerDownloader.class);
-
-  private final Path workingDirectory;
-
-  public SimpleContainerDownloader(Configuration conf) {
-
-    String workDirString =
-        conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR);
-
-    if (workDirString == null) {
-      workingDirectory = Paths.get(System.getProperty("java.io.tmpdir"))
-          .resolve("container-copy");
-    } else {
-      workingDirectory = Paths.get(workDirString);
-    }
-  }
-
-  @Override
-  public CompletableFuture<Path> getContainerDataFromReplicas(long containerId,
-      List<DatanodeDetails> sourceDatanodes) {
-
-    CompletableFuture<Path> result = null;
-    for (DatanodeDetails datanode : sourceDatanodes) {
-      try {
-
-        if (result == null) {
-          GrpcReplicationClient grpcReplicationClient =
-              new GrpcReplicationClient(datanode.getIpAddress(),
-                  datanode.getPort(Name.STANDALONE).getValue(),
-                  workingDirectory);
-          result = grpcReplicationClient.download(containerId);
-        } else {
-          result = result.thenApply(CompletableFuture::completedFuture)
-              .exceptionally(t -> {
-                LOG.error("Error on replicating container: " + containerId, t);
-                GrpcReplicationClient grpcReplicationClient =
-                    new GrpcReplicationClient(datanode.getIpAddress(),
-                        datanode.getPort(Name.STANDALONE).getValue(),
-                        workingDirectory);
-                return grpcReplicationClient.download(containerId);
-              }).thenCompose(Function.identity());
-        }
-      } catch (Exception ex) {
-        LOG.error(String.format(
-            "Container %s download from datanode %s was unsuccessful. "
-                + "Trying the next datanode", containerId, datanode), ex);
-      }
-
-    }
-    return result;
-
-  }
-
-  @Override
-  public void close() {
-    // noop
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 38a853c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-/**
- Classes to replicate container data between datanodes.
-**/
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index 1a51012..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-/**
- * Generic ozone specific classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
deleted file mode 100644
index 61bdb27..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-
-import java.io.IOException;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * The protocol spoken between datanodes and SCM. For specifics please the
- * Protoc file that defines this protocol.
- */
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-@InterfaceAudience.Private
-public interface StorageContainerDatanodeProtocol {
-
-  @SuppressWarnings("checkstyle:ConstantName")
-  /**
-   * Version 1: Initial version.
-   */
-  long versionID = 1L;
-
-  /**
-   * Returns SCM version.
-   * @return Version info.
-   */
-  SCMVersionResponseProto getVersion(SCMVersionRequestProto versionRequest)
-      throws IOException;
-
-  /**
-   * Used by data node to send a Heartbeat.
-   * @param heartbeat Heartbeat
-   * @return - SCMHeartbeatResponseProto
-   * @throws IOException
-   */
-  SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat)
-      throws IOException;
-
-  /**
-   * Register Datanode.
-   * @param datanodeDetails - Datanode Details.
-   * @param nodeReport - Node Report.
-   * @param containerReportsRequestProto - Container Reports.
-   * @return SCM Command.
-   */
-  SCMRegisteredResponseProto register(
-          DatanodeDetailsProto datanodeDetails,
-          NodeReportProto nodeReport,
-          ContainerReportsProto containerReportsRequestProto,
-          PipelineReportsProto pipelineReports) throws IOException;
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
deleted file mode 100644
index b5d75ef..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import java.util.List;
-
-/**
- * The protocol spoken between datanodes and SCM.
- *
- * Please note that the full protocol spoken between a datanode and SCM is
- * separated into 2 interfaces. One interface that deals with node state and
- * another interface that deals with containers.
- *
- * This interface has functions that deals with the state of datanode.
- */
-@InterfaceAudience.Private
-public interface StorageContainerNodeProtocol {
-  /**
-   * Gets the version info from SCM.
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed
-   * by datanode.
-   */
-  VersionResponse getVersion(SCMVersionRequestProto versionRequest);
-
-  /**
-   * Register the node if the node finds that it is not registered with any SCM.
-   * @param datanodeDetails DatanodeDetails
-   * @param nodeReport NodeReportProto
-   * @param pipelineReport PipelineReportsProto
-   * @return  SCMHeartbeatResponseProto
-   */
-  RegisteredCommand register(DatanodeDetails datanodeDetails,
-                             NodeReportProto nodeReport,
-                             PipelineReportsProto pipelineReport);
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   * @param datanodeDetails - Datanode ID.
-   * @return SCMheartbeat response list
-   */
-  List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails);
-
-  /**
-   * Check if node is registered or not.
-   * Return true if Node is registered and false otherwise.
-   * @param datanodeDetails - Datanode ID.
-   * @return true if Node is registered, false otherwise
-   */
-  Boolean isNodeRegistered(DatanodeDetails datanodeDetails);
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java
deleted file mode 100644
index 4d328d3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java
+++ /dev/null
@@ -1,154 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocol;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * Version response class.
- */
-public class VersionResponse {
-  private final int version;
-  private final Map<String, String> values;
-
-  /**
-   * Creates a version response class.
-   * @param version
-   * @param values
-   */
-  public VersionResponse(int version, Map<String, String> values) {
-    this.version = version;
-    this.values = values;
-  }
-
-  /**
-   * Creates a version Response class.
-   * @param version
-   */
-  public VersionResponse(int version) {
-    this.version = version;
-    this.values = new HashMap<>();
-  }
-
-  /**
-   * Returns a new Builder.
-   * @return - Builder.
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Returns this class from protobuf message.
-   * @param response - SCMVersionResponseProto
-   * @return VersionResponse
-   */
-  public static VersionResponse getFromProtobuf(SCMVersionResponseProto
-                                                    response) {
-    return new VersionResponse(response.getSoftwareVersion(),
-        response.getKeysList().stream()
-            .collect(Collectors.toMap(KeyValue::getKey,
-                KeyValue::getValue)));
-  }
-
-  /**
-   * Adds a value to version Response.
-   * @param key - String
-   * @param value - String
-   */
-  public void put(String key, String value) {
-    if (this.values.containsKey(key)) {
-      throw new IllegalArgumentException("Duplicate key in version response");
-    }
-    values.put(key, value);
-  }
-
-  /**
-   * Return a protobuf message.
-   * @return SCMVersionResponseProto.
-   */
-  public SCMVersionResponseProto getProtobufMessage() {
-
-    List<KeyValue> list = new LinkedList<>();
-    for (Map.Entry<String, String> entry : values.entrySet()) {
-      list.add(KeyValue.newBuilder().setKey(entry.getKey()).
-          setValue(entry.getValue()).build());
-    }
-    return
-        SCMVersionResponseProto.newBuilder()
-            .setSoftwareVersion(this.version)
-            .addAllKeys(list).build();
-  }
-
-  public String getValue(String key) {
-    return this.values.get(key);
-  }
-
-  /**
-   * Builder class.
-   */
-  public static class Builder {
-    private int version;
-    private Map<String, String> values;
-
-    Builder() {
-      values = new HashMap<>();
-    }
-
-    /**
-     * Sets the version.
-     * @param ver - version
-     * @return Builder
-     */
-    public Builder setVersion(int ver) {
-      this.version = ver;
-      return this;
-    }
-
-    /**
-     * Adds a value to version Response.
-     * @param key - String
-     * @param value - String
-     */
-    public Builder addValue(String key, String value) {
-      if (this.values.containsKey(key)) {
-        throw new IllegalArgumentException("Duplicate key in version response");
-      }
-      values.put(key, value);
-      return this;
-    }
-
-    /**
-     * Builds the version response.
-     * @return VersionResponse.
-     */
-    public VersionResponse build() {
-      return new VersionResponse(this.version, this.values);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
deleted file mode 100644
index ded0464..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-
-/**
- * Asks datanode to close a container.
- */
-public class CloseContainerCommand
-    extends SCMCommand<CloseContainerCommandProto> {
-
-  private final PipelineID pipelineID;
-  private boolean force;
-
-  public CloseContainerCommand(final long containerID,
-      final PipelineID pipelineID) {
-    this(containerID, pipelineID, false);
-  }
-
-  public CloseContainerCommand(final long containerID,
-      final PipelineID pipelineID, boolean force) {
-    super(containerID);
-    this.pipelineID = pipelineID;
-    this.force = force;
-  }
-
-  /**
-   * Returns the type of this command.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCommandProto.Type getType() {
-    return SCMCommandProto.Type.closeContainerCommand;
-  }
-
-  @Override
-  public CloseContainerCommandProto getProto() {
-    return CloseContainerCommandProto.newBuilder()
-        .setContainerID(getId())
-        .setCmdId(getId())
-        .setPipelineID(pipelineID.getProtobuf())
-        .setForce(force)
-        .build();
-  }
-
-  public static CloseContainerCommand getFromProtobuf(
-      CloseContainerCommandProto closeContainerProto) {
-    Preconditions.checkNotNull(closeContainerProto);
-    return new CloseContainerCommand(closeContainerProto.getCmdId(),
-        PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()),
-        closeContainerProto.getForce());
-  }
-
-  public long getContainerID() {
-    return getId();
-  }
-
-  public PipelineID getPipelineID() {
-    return pipelineID;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
deleted file mode 100644
index 66bf623..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import java.util.UUID;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
-
-/**
- * Command for the datanode with the destination address.
- */
-public class CommandForDatanode<T extends GeneratedMessage> implements
-    IdentifiableEventPayload {
-
-  private final UUID datanodeId;
-
-  private final SCMCommand<T> command;
-
-  // TODO: Command for datanode should take DatanodeDetails as parameter.
-  public CommandForDatanode(UUID datanodeId, SCMCommand<T> command) {
-    this.datanodeId = datanodeId;
-    this.command = command;
-  }
-
-  public UUID getDatanodeId() {
-    return datanodeId;
-  }
-
-  public SCMCommand<T> getCommand() {
-    return command;
-  }
-
-  public long getId() {
-    return command.getId();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java
deleted file mode 100644
index 4b3ce84..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-
-/**
- * A class that is used to communicate status of datanode commands.
- */
-public class CommandStatus {
-
-  private SCMCommandProto.Type type;
-  private Long cmdId;
-  private Status status;
-  private String msg;
-
-  CommandStatus(Type type, Long cmdId, Status status, String msg) {
-    this.type = type;
-    this.cmdId = cmdId;
-    this.status = status;
-    this.msg = msg;
-  }
-
-  public Type getType() {
-    return type;
-  }
-
-  public Long getCmdId() {
-    return cmdId;
-  }
-
-  public Status getStatus() {
-    return status;
-  }
-
-  public String getMsg() {
-    return msg;
-  }
-
-  /**
-   * To allow change of status once commandStatus is initialized.
-   *
-   * @param status
-   */
-  public void setStatus(Status status) {
-    this.status = status;
-  }
-
-  public void setStatus(boolean cmdExecuted) {
-    setStatus(cmdExecuted ? Status.EXECUTED : Status.FAILED);
-  }
-
-  /**
-   * Returns a CommandStatus from the protocol buffers.
-   *
-   * @param cmdStatusProto - protoBuf Message
-   * @return CommandStatus
-   */
-  public CommandStatus getFromProtoBuf(
-      StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) {
-    return CommandStatusBuilder.newBuilder()
-        .setCmdId(cmdStatusProto.getCmdId())
-        .setStatus(cmdStatusProto.getStatus())
-        .setType(cmdStatusProto.getType())
-        .setMsg(cmdStatusProto.getMsg())
-        .build();
-  }
-  /**
-   * Returns a CommandStatus from the protocol buffers.
-   *
-   * @return StorageContainerDatanodeProtocolProtos.CommandStatus
-   */
-  public StorageContainerDatanodeProtocolProtos.CommandStatus
-      getProtoBufMessage() {
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder =
-        StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder()
-            .setCmdId(this.getCmdId())
-            .setStatus(this.getStatus())
-            .setType(this.getType());
-    if (this.getMsg() != null) {
-      builder.setMsg(this.getMsg());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Builder class for CommandStatus.
-   */
-  public static class CommandStatusBuilder {
-
-    private SCMCommandProto.Type type;
-    private Long cmdId;
-    private StorageContainerDatanodeProtocolProtos.CommandStatus.Status status;
-    private String msg;
-
-    CommandStatusBuilder() {
-    }
-
-    public static CommandStatusBuilder newBuilder() {
-      return new CommandStatusBuilder();
-    }
-
-    public Type getType() {
-      return type;
-    }
-
-    public Long getCmdId() {
-      return cmdId;
-    }
-
-    public Status getStatus() {
-      return status;
-    }
-
-    public String getMsg() {
-      return msg;
-    }
-
-    public CommandStatusBuilder setType(Type commandType) {
-      this.type = commandType;
-      return this;
-    }
-
-    public CommandStatusBuilder setCmdId(Long commandId) {
-      this.cmdId = commandId;
-      return this;
-    }
-
-    public CommandStatusBuilder setStatus(Status commandStatus) {
-      this.status = commandStatus;
-      return this;
-    }
-
-    public CommandStatusBuilder setMsg(String message) {
-      this.msg = message;
-      return this;
-    }
-
-    public CommandStatus build() {
-      return new CommandStatus(type, cmdId, status, msg);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java
deleted file mode 100644
index e9ccb08..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-
-/**
- * Command status to report about block deletion.
- */
-public class DeleteBlockCommandStatus extends CommandStatus {
-
-  private ContainerBlocksDeletionACKProto blocksDeletionAck = null;
-
-  public DeleteBlockCommandStatus(Type type, Long cmdId,
-      StorageContainerDatanodeProtocolProtos.CommandStatus.Status status,
-      String msg,
-      ContainerBlocksDeletionACKProto blocksDeletionAck) {
-    super(type, cmdId, status, msg);
-    this.blocksDeletionAck = blocksDeletionAck;
-  }
-
-  public void setBlocksDeletionAck(
-      ContainerBlocksDeletionACKProto deletionAck) {
-    blocksDeletionAck = deletionAck;
-  }
-
-  @Override
-  public CommandStatus getFromProtoBuf(
-      StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) {
-    return DeleteBlockCommandStatusBuilder.newBuilder()
-        .setBlockDeletionAck(cmdStatusProto.getBlockDeletionAck())
-        .setCmdId(cmdStatusProto.getCmdId())
-        .setStatus(cmdStatusProto.getStatus())
-        .setType(cmdStatusProto.getType())
-        .setMsg(cmdStatusProto.getMsg())
-        .build();
-  }
-
-  @Override
-  public StorageContainerDatanodeProtocolProtos.CommandStatus
-      getProtoBufMessage() {
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder =
-        StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder()
-            .setCmdId(this.getCmdId())
-            .setStatus(this.getStatus())
-            .setType(this.getType());
-    if (blocksDeletionAck != null) {
-      builder.setBlockDeletionAck(blocksDeletionAck);
-    }
-    if (this.getMsg() != null) {
-      builder.setMsg(this.getMsg());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Builder for DeleteBlockCommandStatus.
-   */
-  public static final class DeleteBlockCommandStatusBuilder
-      extends CommandStatusBuilder {
-    private ContainerBlocksDeletionACKProto blocksDeletionAck = null;
-
-    public static DeleteBlockCommandStatusBuilder newBuilder() {
-      return new DeleteBlockCommandStatusBuilder();
-    }
-
-    public DeleteBlockCommandStatusBuilder setBlockDeletionAck(
-        ContainerBlocksDeletionACKProto deletionAck) {
-      this.blocksDeletionAck = deletionAck;
-      return this;
-    }
-
-    @Override
-    public CommandStatus build() {
-      return new DeleteBlockCommandStatus(getType(), getCmdId(), getStatus(),
-          getMsg(), blocksDeletionAck);
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
deleted file mode 100644
index 03a876c..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
-
-import java.util.List;
-
-/**
- * A SCM command asks a datanode to delete a number of blocks.
- */
-public class DeleteBlocksCommand extends
-    SCMCommand<DeleteBlocksCommandProto> {
-
-  private List<DeletedBlocksTransaction> blocksTobeDeleted;
-
-
-  public DeleteBlocksCommand(List<DeletedBlocksTransaction> blocks) {
-    super();
-    this.blocksTobeDeleted = blocks;
-  }
-
-  // Should be called only for protobuf conversion
-  private DeleteBlocksCommand(List<DeletedBlocksTransaction> blocks,
-      long id) {
-    super(id);
-    this.blocksTobeDeleted = blocks;
-  }
-
-  public List<DeletedBlocksTransaction> blocksTobeDeleted() {
-    return this.blocksTobeDeleted;
-  }
-
-  @Override
-  public SCMCommandProto.Type getType() {
-    return SCMCommandProto.Type.deleteBlocksCommand;
-  }
-
-  public static DeleteBlocksCommand getFromProtobuf(
-      DeleteBlocksCommandProto deleteBlocksProto) {
-    return new DeleteBlocksCommand(deleteBlocksProto
-        .getDeletedBlocksTransactionsList(), deleteBlocksProto.getCmdId());
-  }
-
-  @Override
-  public DeleteBlocksCommandProto getProto() {
-    return DeleteBlocksCommandProto.newBuilder()
-        .setCmdId(getId())
-        .addAllDeletedBlocksTransactions(blocksTobeDeleted).build();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
deleted file mode 100644
index 48aa83b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeleteContainerCommandProto;
-
-/**
- * SCM command which tells the datanode to delete a container.
- */
-public class DeleteContainerCommand extends
-    SCMCommand<DeleteContainerCommandProto> {
-
-  private final long containerId;
-  private final boolean force;
-
-  /**
-   * DeleteContainerCommand, to send a command for datanode to delete a
-   * container.
-   * @param containerId
-   */
-  public DeleteContainerCommand(long containerId) {
-    this(containerId, false);
-  }
-
-  /**
-   * DeleteContainerCommand, to send a command for datanode to delete a
-   * container.
-   * @param containerId
-   * @param forceFlag if this is set to true, we delete container without
-   * checking state of the container.
-   */
-
-  public DeleteContainerCommand(long containerId, boolean forceFlag) {
-    this.containerId = containerId;
-    this.force = forceFlag;
-  }
-
-  @Override
-  public SCMCommandProto.Type getType() {
-    return SCMCommandProto.Type.deleteContainerCommand;
-  }
-
-  @Override
-  public DeleteContainerCommandProto getProto() {
-    DeleteContainerCommandProto.Builder builder =
-        DeleteContainerCommandProto.newBuilder();
-    builder.setCmdId(getId())
-        .setContainerID(getContainerID()).setForce(force);
-    return builder.build();
-  }
-
-  public long getContainerID() {
-    return containerId;
-  }
-
-  public boolean isForce() {
-    return force;
-  }
-
-  public static DeleteContainerCommand getFromProtobuf(
-      DeleteContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
-    return new DeleteContainerCommand(protoMessage.getContainerID(),
-        protoMessage.getForce());
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
deleted file mode 100644
index 42778cb..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
-    .ErrorCode;
-
-/**
- * Response to Datanode Register call.
- */
-public class RegisteredCommand {
-  private String clusterID;
-  private ErrorCode error;
-  private DatanodeDetails datanode;
-
-  public RegisteredCommand(final ErrorCode error, final DatanodeDetails node,
-      final String clusterID) {
-    this.datanode = node;
-    this.clusterID = clusterID;
-    this.error = error;
-  }
-
-  /**
-   * Returns a new builder.
-   *
-   * @return - Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Returns datanode.
-   *
-   * @return - Datanode.
-   */
-  public DatanodeDetails getDatanode() {
-    return datanode;
-  }
-
-  /**
-   * Returns cluster ID.
-   *
-   * @return -- ClusterID
-   */
-  public String getClusterID() {
-    return clusterID;
-  }
-
-  /**
-   * Returns ErrorCode.
-   *
-   * @return - ErrorCode
-   */
-  public ErrorCode getError() {
-    return error;
-  }
-
-  /**
-   * Gets the protobuf message of this object.
-   *
-   * @return A protobuf message.
-   */
-  public SCMRegisteredResponseProto getProtoBufMessage() {
-    SCMRegisteredResponseProto.Builder builder =
-        SCMRegisteredResponseProto.newBuilder()
-            // TODO : Fix this later when we have multiple SCM support.
-            // .setAddressList(addressList)
-            .setClusterID(this.clusterID)
-            .setDatanodeUUID(this.datanode.getUuidString())
-            .setErrorCode(this.error);
-    if (!Strings.isNullOrEmpty(datanode.getHostName())) {
-      builder.setHostname(datanode.getHostName());
-    }
-    if (!Strings.isNullOrEmpty(datanode.getIpAddress())) {
-      builder.setIpAddress(datanode.getIpAddress());
-    }
-    if (!Strings.isNullOrEmpty(datanode.getNetworkName())) {
-      builder.setNetworkName(datanode.getNetworkName());
-    }
-    if (!Strings.isNullOrEmpty(datanode.getNetworkLocation())) {
-      builder.setNetworkLocation(datanode.getNetworkLocation());
-    }
-
-    return builder.build();
-  }
-
-  /**
-   * A builder class to verify all values are sane.
-   */
-  public static class Builder {
-    private DatanodeDetails datanode;
-    private String clusterID;
-    private ErrorCode error;
-
-    /**
-     * sets datanode details.
-     *
-     * @param node - datanode details
-     * @return Builder
-     */
-    public Builder setDatanode(DatanodeDetails node) {
-      this.datanode = node;
-      return this;
-    }
-
-    /**
-     * Sets cluster ID.
-     *
-     * @param cluster - clusterID
-     * @return Builder
-     */
-    public Builder setClusterID(String cluster) {
-      this.clusterID = cluster;
-      return this;
-    }
-
-    /**
-     * Sets Error code.
-     *
-     * @param errorCode - error code
-     * @return Builder
-     */
-    public Builder setErrorCode(ErrorCode errorCode) {
-      this.error = errorCode;
-      return this;
-    }
-
-    /**
-     * Build the command object.
-     *
-     * @return RegisteredCommand
-     */
-    public RegisteredCommand build() {
-      if ((this.error == ErrorCode.success) && (this.datanode == null
-          || Strings.isNullOrEmpty(this.datanode.getUuidString())
-          || Strings.isNullOrEmpty(this.clusterID))) {
-        throw new IllegalArgumentException("On success, RegisteredCommand "
-            + "needs datanodeUUID and ClusterID.");
-      }
-      return new RegisteredCommand(this.error, this.datanode, this.clusterID);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
deleted file mode 100644
index e663bed..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto
-    .Builder;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-
-import com.google.common.base.Preconditions;
-
-/**
- * SCM command to request replication of a container.
- */
-public class ReplicateContainerCommand
-    extends SCMCommand<ReplicateContainerCommandProto> {
-
-  private final long containerID;
-  private final List<DatanodeDetails> sourceDatanodes;
-
-  public ReplicateContainerCommand(long containerID,
-      List<DatanodeDetails> sourceDatanodes) {
-    super();
-    this.containerID = containerID;
-    this.sourceDatanodes = sourceDatanodes;
-  }
-
-  // Should be called only for protobuf conversion
-  public ReplicateContainerCommand(long containerID,
-      List<DatanodeDetails> sourceDatanodes, long id) {
-    super(id);
-    this.containerID = containerID;
-    this.sourceDatanodes = sourceDatanodes;
-  }
-
-  @Override
-  public Type getType() {
-    return SCMCommandProto.Type.replicateContainerCommand;
-  }
-
-  @Override
-  public ReplicateContainerCommandProto getProto() {
-    Builder builder = ReplicateContainerCommandProto.newBuilder()
-        .setCmdId(getId())
-        .setContainerID(containerID);
-    for (DatanodeDetails dd : sourceDatanodes) {
-      builder.addSources(dd.getProtoBufMessage());
-    }
-    return builder.build();
-  }
-
-  public static ReplicateContainerCommand getFromProtobuf(
-      ReplicateContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
-
-    List<DatanodeDetails> datanodeDetails =
-        protoMessage.getSourcesList()
-            .stream()
-            .map(DatanodeDetails::getFromProtoBuf)
-            .collect(Collectors.toList());
-
-    return new ReplicateContainerCommand(protoMessage.getContainerID(),
-        datanodeDetails, protoMessage.getCmdId());
-
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public List<DatanodeDetails> getSourceDatanodes() {
-    return sourceDatanodes;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
deleted file mode 100644
index e3ea4ae..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
-
-/**
- * Informs a datanode to register itself with SCM again.
- */
-public class ReregisterCommand extends
-    SCMCommand<ReregisterCommandProto>{
-
-  /**
-   * Returns the type of this command.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCommandProto.Type getType() {
-    return SCMCommandProto.Type.reregisterCommand;
-  }
-
-  /**
-   * Not implemented for ReregisterCommand.
-   *
-   * @return cmdId.
-   */
-  @Override
-  public long getId() {
-    return 0;
-  }
-
-  @Override
-  public ReregisterCommandProto getProto() {
-    return ReregisterCommandProto
-        .newBuilder()
-        .build();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
deleted file mode 100644
index 3c4e05b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
-
-/**
- * A class that acts as the base class to convert between Java and SCM
- * commands in protobuf format.
- * @param <T>
- */
-public abstract class SCMCommand<T extends GeneratedMessage> implements
-    IdentifiableEventPayload {
-  private long id;
-
-  SCMCommand() {
-    this.id = HddsIdFactory.getLongId();
-  }
-
-  SCMCommand(long id) {
-    this.id = id;
-  }
-  /**
-   * Returns the type of this command.
-   * @return Type
-   */
-  public  abstract SCMCommandProto.Type getType();
-
-  /**
-   * Gets the protobuf message of this object.
-   * @return A protobuf message.
-   */
-  public abstract T getProto();
-
-  /**
-   * Gets the commandId of this object.
-   * @return uuid.
-   */
-  public long getId() {
-    return id;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
deleted file mode 100644
index 7083c1b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-/**
- Set of classes that help in protoc conversions.
- **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
deleted file mode 100644
index a718fa7..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocol;
-
-/**
- * This package contains classes for HDDS protocol definitions.
- */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 9b44666..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest.Builder;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.function.Consumer;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link StorageContainerDatanodeProtocol} interface to the RPC server
- * implementing {@link StorageContainerDatanodeProtocolPB}.
- */
-public class StorageContainerDatanodeProtocolClientSideTranslatorPB
-    implements StorageContainerDatanodeProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-  private final StorageContainerDatanodeProtocolPB rpcProxy;
-
-  /**
-   * Constructs a Client side interface that calls into SCM datanode protocol.
-   *
-   * @param rpcProxy - Proxy for RPC.
-   */
-  public StorageContainerDatanodeProtocolClientSideTranslatorPB(
-      StorageContainerDatanodeProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. If
-   * the stream is already closed then invoking this method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful attention. It is strongly advised to relinquish the
-   * underlying resources and to internally <em>mark</em> the {@code Closeable}
-   * as closed, prior to throwing the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    RPC.stopProxy(rpcProxy);
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  /**
-   * Helper method to wrap the request and send the message.
-   */
-  private SCMDatanodeResponse submitRequest(Type type,
-      Consumer<SCMDatanodeRequest.Builder> builderConsumer) throws IOException {
-    final SCMDatanodeResponse response;
-    try {
-      Builder builder = SCMDatanodeRequest.newBuilder()
-          .setCmdType(type);
-      builderConsumer.accept(builder);
-      SCMDatanodeRequest wrapper = builder.build();
-
-      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
-    } catch (ServiceException ex) {
-      throw ProtobufHelper.getRemoteException(ex);
-    }
-    return response;
-  }
-
-  /**
-   * Returns SCM version.
-   *
-   * @param unused - set to null and unused.
-   * @return Version info.
-   */
-  @Override
-  public SCMVersionResponseProto getVersion(SCMVersionRequestProto
-      request) throws IOException {
-    return submitRequest(Type.GetVersion,
-        (builder) -> builder
-            .setGetVersionRequest(SCMVersionRequestProto.newBuilder().build()))
-        .getGetVersionResponse();
-  }
-
-  /**
-   * Send by datanode to SCM.
-   *
-   * @param heartbeat node heartbeat
-   * @throws IOException
-   */
-
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(
-      SCMHeartbeatRequestProto heartbeat) throws IOException {
-    return submitRequest(Type.SendHeartbeat,
-        (builder) -> builder.setSendHeartbeatRequest(heartbeat))
-        .getSendHeartbeatResponse();
-  }
-
-  /**
-   * Register Datanode.
-   *
-   * @param datanodeDetailsProto - Datanode Details
-   * @param nodeReport - Node Report.
-   * @param containerReportsRequestProto - Container Reports.
-   * @return SCM Command.
-   */
-  @Override
-  public SCMRegisteredResponseProto register(
-      DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
-      ContainerReportsProto containerReportsRequestProto,
-      PipelineReportsProto pipelineReportsProto)
-      throws IOException {
-    SCMRegisterRequestProto.Builder req =
-        SCMRegisterRequestProto.newBuilder();
-    req.setDatanodeDetails(datanodeDetailsProto);
-    req.setContainerReport(containerReportsRequestProto);
-    req.setPipelineReports(pipelineReportsProto);
-    req.setNodeReport(nodeReport);
-    return submitRequest(Type.Register,
-        (builder) -> builder.setRegisterRequest(req))
-        .getRegisterResponse();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
deleted file mode 100644
index 9006e91..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .StorageContainerDatanodeProtocolService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * Protocol used from a datanode to StorageContainerManager.  This extends
- * the Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
-    protocolVersion = 1)
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
-    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
-public interface StorageContainerDatanodeProtocolPB extends
-    StorageContainerDatanodeProtocolService.BlockingInterface {
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
deleted file mode 100644
index ed704eb..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Status;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type;
-import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerDatanodeProtocolPB} to the {@link
- * StorageContainerDatanodeProtocol} server implementation.
- */
-public class StorageContainerDatanodeProtocolServerSideTranslatorPB
-    implements StorageContainerDatanodeProtocolPB {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(StorageContainerDatanodeProtocolServerSideTranslatorPB.class);
-
-  private final StorageContainerDatanodeProtocol impl;
-  private final OzoneProtocolMessageDispatcher<SCMDatanodeRequest,
-      SCMDatanodeResponse> dispatcher;
-
-  public StorageContainerDatanodeProtocolServerSideTranslatorPB(
-      StorageContainerDatanodeProtocol impl,
-      ProtocolMessageMetrics protocolMessageMetrics) {
-    this.impl = impl;
-    dispatcher =
-        new OzoneProtocolMessageDispatcher<>("SCMDatanodeProtocol",
-            protocolMessageMetrics,
-            LOG);
-  }
-
-  public SCMRegisteredResponseProto register(
-      SCMRegisterRequestProto request) throws IOException {
-    ContainerReportsProto containerRequestProto = request
-        .getContainerReport();
-    NodeReportProto dnNodeReport = request.getNodeReport();
-    PipelineReportsProto pipelineReport = request.getPipelineReports();
-    return impl.register(request.getDatanodeDetails(), dnNodeReport,
-        containerRequestProto, pipelineReport);
-
-  }
-
-  @Override
-  public SCMDatanodeResponse submitRequest(RpcController controller,
-      SCMDatanodeRequest request) throws ServiceException {
-    return dispatcher.processRequest(request, this::processMessage,
-        request.getCmdType(), request.getTraceID());
-  }
-
-  public SCMDatanodeResponse processMessage(SCMDatanodeRequest request)
-      throws ServiceException {
-    try {
-      Type cmdType = request.getCmdType();
-      switch (cmdType) {
-      case GetVersion:
-        return SCMDatanodeResponse.newBuilder()
-            .setCmdType(cmdType)
-            .setStatus(Status.OK)
-            .setGetVersionResponse(
-                impl.getVersion(request.getGetVersionRequest()))
-            .build();
-      case SendHeartbeat:
-        return SCMDatanodeResponse.newBuilder()
-            .setCmdType(cmdType)
-            .setStatus(Status.OK)
-            .setSendHeartbeatResponse(
-                impl.sendHeartbeat(request.getSendHeartbeatRequest()))
-            .build();
-      case Register:
-        return SCMDatanodeResponse.newBuilder()
-            .setCmdType(cmdType)
-            .setStatus(Status.OK)
-            .setRegisterResponse(register(request.getRegisterRequest()))
-            .build();
-      default:
-        throw new ServiceException("Unknown command type: " + cmdType);
-      }
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 378a8f3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
deleted file mode 100644
index a975cd5..0000000
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ /dev/null
@@ -1,429 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-
-option java_outer_classname = "StorageContainerDatanodeProtocolProtos";
-
-option java_generic_services = true;
-
-option java_generate_equals_and_hash = true;
-
-package hadoop.hdds;
-
-import "hdds.proto";
-
-
-message SCMDatanodeRequest {
-  required Type cmdType = 1; // Type of the command
-
-  optional string traceID = 2;
-
-  optional SCMVersionRequestProto getVersionRequest = 3;
-  optional SCMRegisterRequestProto registerRequest = 4;
-  optional SCMHeartbeatRequestProto sendHeartbeatRequest = 5;
-}
-
-message SCMDatanodeResponse {
-  required Type cmdType = 1; // Type of the command
-
-  optional string traceID = 2;
-
-  optional bool success = 3 [default = true];
-
-  optional string message = 4;
-
-  required Status status = 5;
-
-  optional SCMVersionResponseProto getVersionResponse = 6;
-  optional SCMRegisteredResponseProto registerResponse = 7;
-  optional SCMHeartbeatResponseProto sendHeartbeatResponse = 8;
-
-}
-
-enum Type {
-  GetVersion = 1;
-  Register = 2;
-  SendHeartbeat = 3;
-}
-
-enum Status {
-  OK = 1;
-  ERROR = 2;
-}
-
-/**
- * Request for version info of the software stack on the server.
- */
-message SCMVersionRequestProto {}
-
-/**
-* Generic response that is send to a version request. This allows keys to be
-* added on the fly and protocol to remain stable.
-*/
-message SCMVersionResponseProto {
-  required uint32 softwareVersion = 1;
-  repeated hadoop.hdds.KeyValue keys = 2;
-}
-
-message SCMRegisterRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  required NodeReportProto nodeReport = 2;
-  required ContainerReportsProto containerReport = 3;
-  required PipelineReportsProto pipelineReports = 4;
-}
-
-/**
- * Datanode ID returned by the SCM. This is similar to name node
- * registeration of a datanode.
- */
-message SCMRegisteredResponseProto {
-  enum ErrorCode {
-    success = 1;
-    errorNodeNotPermitted = 2;
-  }
-  required ErrorCode errorCode = 1;
-  required string datanodeUUID = 2;
-  required string clusterID = 3;
-  optional SCMNodeAddressList addressList = 4;
-  optional string hostname = 5;
-  optional string ipAddress = 6;
-  optional string networkName = 7;
-  optional string networkLocation = 8;
-}
-
-/**
-* This message is send by data node to indicate that it is alive or it is
-* registering with the node manager.
-*/
-message SCMHeartbeatRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  optional NodeReportProto nodeReport = 2;
-  optional ContainerReportsProto containerReport = 3;
-  repeated IncrementalContainerReportProto incrementalContainerReport = 4;
-  repeated CommandStatusReportsProto commandStatusReports = 5;
-  optional ContainerActionsProto containerActions = 6;
-  optional PipelineActionsProto pipelineActions = 7;
-  optional PipelineReportsProto pipelineReports = 8;
-}
-
-/*
- * A group of commands for the datanode to execute
- */
-message SCMHeartbeatResponseProto {
-  required string datanodeUUID = 1;
-  repeated SCMCommandProto commands = 2;
-}
-
-message SCMNodeAddressList {
-  repeated string addressList = 1;
-}
-
-/**
-* This message is send along with the heart beat to report datanode
-* storage utilization to SCM.
-*/
-message NodeReportProto {
-  repeated StorageReportProto storageReport = 1;
-}
-
-message StorageReportProto {
-  required string storageUuid = 1;
-  required string storageLocation = 2;
-  optional uint64 capacity = 3 [default = 0];
-  optional uint64 scmUsed = 4 [default = 0];
-  optional uint64 remaining = 5 [default = 0];
-  optional StorageTypeProto storageType = 6 [default = DISK];
-  optional bool failed = 7 [default = false];
-}
-
-/**
- * Types of recognized storage media.
- */
-enum StorageTypeProto {
-  DISK = 1;
-  SSD = 2;
-  ARCHIVE = 3;
-  RAM_DISK = 4;
-  PROVIDED = 5;
-}
-
-message ContainerReportsProto {
-  repeated ContainerReplicaProto reports = 1;
-}
-
-message IncrementalContainerReportProto {
-  repeated ContainerReplicaProto report = 1;
-}
-
-message ContainerReplicaProto {
-  enum State {
-    OPEN = 1;
-    CLOSING = 2;
-    QUASI_CLOSED = 3;
-    CLOSED = 4;
-    UNHEALTHY = 5;
-    INVALID = 6;
-  }
-  required int64 containerID = 1;
-  required State state = 2;
-  optional int64 size = 3;
-  optional int64 used = 4;
-  optional int64 keyCount = 5;
-  optional int64 readCount = 6;
-  optional int64 writeCount = 7;
-  optional int64 readBytes = 8;
-  optional int64 writeBytes = 9;
-  optional string finalhash = 10;
-  optional int64 deleteTransactionId = 11;
-  optional uint64 blockCommitSequenceId = 12;
-  optional string originNodeId = 13;
-}
-
-message CommandStatusReportsProto {
-  repeated CommandStatus cmdStatus = 1;
-}
-
-message CommandStatus {
-  enum Status {
-    PENDING = 1;
-    EXECUTED = 2;
-    FAILED = 3;
-  }
-  required int64 cmdId = 1;
-  required Status status = 2 [default = PENDING];
-  required SCMCommandProto.Type type = 3;
-  optional string msg = 4;
-  optional ContainerBlocksDeletionACKProto blockDeletionAck = 5;
-}
-
-message ContainerActionsProto {
-  repeated ContainerAction containerActions = 1;
-}
-
-message ContainerAction {
-  enum Action {
-    CLOSE = 1;
-  }
-
-  enum Reason {
-    CONTAINER_FULL = 1;
-    CONTAINER_UNHEALTHY = 2;
-  }
-
-  required int64 containerID = 1;
-  required Action action = 2;
-  optional Reason reason = 3;
-}
-
-message PipelineReport {
-  required PipelineID pipelineID = 1;
-}
-
-message PipelineReportsProto {
-  repeated PipelineReport pipelineReport = 1;
-}
-
-message PipelineActionsProto {
-  repeated PipelineAction pipelineActions = 1;
-}
-
-message ClosePipelineInfo {
-  enum Reason {
-    PIPELINE_FAILED = 1;
-    PIPELINE_LOG_FAILED = 2;
-    STATEMACHINE_TRANSACTION_FAILED = 3;
-  }
-  required PipelineID pipelineID = 1;
-  optional Reason reason = 3;
-  optional string detailedReason = 4;
-}
-
-message PipelineAction {
-  enum Action {
-    CLOSE = 1;
-  }
-
-  /**
-   * Action will be used to identify the correct pipeline action.
-   */
-  required Action action = 1;
-  optional ClosePipelineInfo closePipeline = 2;
-}
-
-/*
- * These are commands returned by SCM for to the datanode to execute.
- */
-message SCMCommandProto {
-  enum Type {
-    reregisterCommand = 1;
-    deleteBlocksCommand = 2;
-    closeContainerCommand = 3;
-    deleteContainerCommand = 4;
-    replicateContainerCommand = 5;
-  }
-  // TODO: once we start using protoc 3.x, refactor this message using "oneof"
-  required Type commandType = 1;
-  optional ReregisterCommandProto reregisterCommandProto = 2;
-  optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3;
-  optional CloseContainerCommandProto closeContainerCommandProto = 4;
-  optional DeleteContainerCommandProto deleteContainerCommandProto = 5;
-  optional ReplicateContainerCommandProto replicateContainerCommandProto = 6;
-}
-
-/**
- * SCM informs a datanode to register itself again.
- * With recieving this command, datanode will transit to REGISTER state.
- */
-message ReregisterCommandProto {}
-
-
-// HB response from SCM, contains a list of block deletion transactions.
-message DeleteBlocksCommandProto {
-  repeated DeletedBlocksTransaction deletedBlocksTransactions = 1;
-  required int64 cmdId = 3;
-}
-
-// The deleted blocks which are stored in deletedBlock.db of scm.
-// We don't use BlockID because this only contians multiple localIDs
-// of the same containerID.
-message DeletedBlocksTransaction {
-  required int64 txID = 1;
-  required int64 containerID = 2;
-  repeated int64 localID = 3;
-  // the retry time of sending deleting command to datanode.
-  required int32 count = 4;
-}
-
-// ACK message datanode sent to SCM, contains the result of
-// block deletion transactions.
-message ContainerBlocksDeletionACKProto {
-  message DeleteBlockTransactionResult {
-    required int64 txID = 1;
-    required int64 containerID = 2;
-    required bool success = 3;
-  }
-  repeated DeleteBlockTransactionResult results = 1;
-  required string dnId = 2;
-}
-
-/**
-This command asks the datanode to close a specific container.
-*/
-message CloseContainerCommandProto {
-  required int64 containerID = 1;
-  required PipelineID pipelineID = 2;
-  // cmdId will be removed
-  required int64 cmdId = 3;
-  // Force will be used when closing a container out side of ratis.
-  optional bool force = 4 [default = false];
-}
-
-/**
-This command asks the datanode to delete a specific container.
-*/
-message DeleteContainerCommandProto {
-  required int64 containerID = 1;
-  required int64 cmdId = 2;
-  required bool force = 3 [default = false];
-}
-
-/**
-This command asks the datanode to replicate a container from specific sources.
-*/
-message ReplicateContainerCommandProto {
-  required int64 containerID = 1;
-  repeated DatanodeDetailsProto sources = 2;
-  required int64 cmdId = 3;
-}
-
-/**
- * Protocol used from a datanode to StorageContainerManager.
- *
- * Please see the request and response messages for details of the RPC calls.
- *
- * Here is a simple state diagram that shows how a datanode would boot up and
- * communicate with SCM.
- *
- *           -----------------------
- *          |         Start         |
- *           ---------- ------------
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *           ----------v-------------
- *          |   Searching for  SCM    ------------
- *           ---------- -------------             |
- *                     |                          |
- *                     |                          |
- *                     |                ----------v-------------
- *                     |               | Register if needed     |
- *                     |                ----------- ------------
- *                     |                           |
- *                     v                           |
- *            ----------- ----------------         |
- *  ---------   Heartbeat state           <--------
- * |          --------^-------------------
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- *  ------------------
- *
- *
- *
- * Here is how this protocol is used by the datanode. When a datanode boots up
- * it moves into a stated called SEARCHING_SCM. In this state datanode is
- * trying to establish communication with the SCM. The address of the SCMs are
- * retrieved from the configuration information.
- *
- * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion
- * call to SCM. Once any of the SCMs reply, datanode checks if it has a local
- * persisted datanode ID. If it has this means that this datanode is already
- * registered with some SCM. If this file is not found, datanode assumes that
- * it needs to do a registration.
- *
- * If registration is need datanode moves into REGISTER state. It will
- * send a register call with DatanodeDetailsProto data structure and presist
- * that info.
- *
- * The response to the command contains clusterID. This information is
- * also persisted by the datanode and moves into heartbeat state.
- *
- * Once in the heartbeat state, datanode sends heartbeats and container reports
- * to SCM and process commands issued by SCM until it is shutdown.
- *
- */
-service StorageContainerDatanodeProtocolService {
-
-  //Message sent from Datanode to SCM as a heartbeat.
-  rpc submitRequest (SCMDatanodeRequest) returns (SCMDatanodeResponse);
-}
diff --git a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
deleted file mode 100644
index 2e103fe..0000000
--- a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider
diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep
deleted file mode 100644
index ff1232e..0000000
--- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
deleted file mode 100644
index af56d06..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.ServicePlugin;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test class for {@link HddsDatanodeService}.
- */
-public class TestHddsDatanodeService {
-  private File testDir;
-  private OzoneConfiguration conf;
-  private HddsDatanodeService service;
-  private String[] args = new String[] {};
-
-  @Before
-  public void setUp() {
-    testDir = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_ENABLED, true);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
-    conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class,
-        ServicePlugin.class);
-
-    String volumeDir = testDir + "/disk1";
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
-  }
-
-  @After
-  public void tearDown() {
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Test
-  public void testStartup() throws IOException {
-    service = HddsDatanodeService.createHddsDatanodeService(args);
-    service.start(conf);
-
-    assertNotNull(service.getDatanodeDetails());
-    assertNotNull(service.getDatanodeDetails().getHostName());
-    assertFalse(service.getDatanodeStateMachine().isDaemonStopped());
-
-    service.stop();
-    service.join();
-    service.close();
-  }
-
-  static class MockService implements ServicePlugin {
-
-    @Override
-    public void close() throws IOException {
-      // Do nothing
-    }
-
-    @Override
-    public void start(Object arg0) {
-      // Do nothing
-    }
-
-    @Override
-    public void stop() {
-      // Do nothing
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
deleted file mode 100644
index 04fd3a4..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.ServicePlugin;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.Callable;
-
-import static org.apache.hadoop.ozone.HddsDatanodeService.getLogger;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-/**
- * Test class for {@link HddsDatanodeService}.
- */
-public class TestHddsSecureDatanodeInit {
-
-  private static File testDir;
-  private static OzoneConfiguration conf;
-  private static HddsDatanodeService service;
-  private static String[] args = new String[]{};
-  private static PrivateKey privateKey;
-  private static PublicKey publicKey;
-  private static GenericTestUtils.LogCapturer dnLogs;
-  private static CertificateClient client;
-  private static SecurityConfig securityConfig;
-  private static KeyCodec keyCodec;
-  private static CertificateCodec certCodec;
-  private static X509CertificateHolder certHolder;
-  private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    testDir = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
-    //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
-    String volumeDir = testDir + "/disk1";
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
-
-    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
-        TestHddsDatanodeService.MockService.class,
-        ServicePlugin.class);
-    securityConfig = new SecurityConfig(conf);
-
-    service = HddsDatanodeService.createHddsDatanodeService(args);
-    dnLogs = GenericTestUtils.LogCapturer.captureLogs(getLogger());
-    callQuietly(() -> {
-      service.start(conf);
-      return null;
-    });
-    callQuietly(() -> {
-      service.initializeCertificateClient(conf);
-      return null;
-    });
-    certCodec = new CertificateCodec(securityConfig, DN_COMPONENT);
-    keyCodec = new KeyCodec(securityConfig, DN_COMPONENT);
-    dnLogs.clearOutput();
-    privateKey = service.getCertificateClient().getPrivateKey();
-    publicKey = service.getCertificateClient().getPublicKey();
-    X509Certificate x509Certificate = null;
-
-    x509Certificate = KeyStoreTestUtil.generateCertificate(
-        "CN=Test", new KeyPair(publicKey, privateKey), 10,
-        securityConfig.getSignatureAlgo());
-    certHolder = new X509CertificateHolder(x509Certificate.getEncoded());
-
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Before
-  public void setUpDNCertClient(){
-
-    FileUtils.deleteQuietly(Paths.get(
-        securityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        securityConfig.getPrivateKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(
-        securityConfig.getKeyLocation(DN_COMPONENT).toString(),
-        securityConfig.getPublicKeyFileName()).toFile());
-    FileUtils.deleteQuietly(Paths.get(securityConfig
-        .getCertificateLocation(DN_COMPONENT).toString(),
-        securityConfig.getCertificateFileName()).toFile());
-    dnLogs.clearOutput();
-    client = new DNCertificateClient(securityConfig,
-        certHolder.getSerialNumber().toString());
-    service.setCertificateClient(client);
-  }
-
-  @Test
-  public void testSecureDnStartupCase0() throws Exception {
-
-    // Case 0: When keypair as well as certificate is missing. Initial keypair
-    // boot-up. Get certificate will fail as no SCM is not running.
-    LambdaTestUtils.intercept(Exception.class, "",
-        () -> service.initializeCertificateClient(conf));
-
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase1() throws Exception {
-    // Case 1: When only certificate is present.
-
-    certCodec.writeCertificate(certHolder);
-    LambdaTestUtils.intercept(RuntimeException.class, "DN security" +
-            " initialization failed",
-        () -> service.initializeCertificateClient(conf));
-    Assert.assertNull(client.getPrivateKey());
-    Assert.assertNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase2() throws Exception {
-    // Case 2: When private key and certificate is missing.
-    keyCodec.writePublicKey(publicKey);
-    LambdaTestUtils.intercept(RuntimeException.class, "DN security" +
-            " initialization failed",
-        () -> service.initializeCertificateClient(conf));
-    Assert.assertNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase3() throws Exception {
-    // Case 3: When only public key and certificate is present.
-    keyCodec.writePublicKey(publicKey);
-    certCodec.writeCertificate(certHolder);
-    LambdaTestUtils.intercept(RuntimeException.class, "DN security" +
-            " initialization failed",
-        () -> service.initializeCertificateClient(conf));
-    Assert.assertNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase4() throws Exception {
-    // Case 4: When public key as well as certificate is missing.
-    keyCodec.writePrivateKey(privateKey);
-    LambdaTestUtils.intercept(RuntimeException.class, " DN security" +
-            " initialization failed",
-        () -> service.initializeCertificateClient(conf));
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE"));
-    dnLogs.clearOutput();
-  }
-
-  @Test
-  public void testSecureDnStartupCase5() throws Exception {
-    // Case 5: If private key and certificate is present.
-    certCodec.writeCertificate(certHolder);
-    keyCodec.writePrivateKey(privateKey);
-    service.initializeCertificateClient(conf);
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase6() throws Exception {
-    // Case 6: If key pair already exist than response should be GETCERT.
-    keyCodec.writePublicKey(publicKey);
-    keyCodec.writePrivateKey(privateKey);
-    LambdaTestUtils.intercept(Exception.class, "",
-        () -> service.initializeCertificateClient(conf));
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT"));
-  }
-
-  @Test
-  public void testSecureDnStartupCase7() throws Exception {
-    // Case 7 When keypair and certificate is present.
-    keyCodec.writePublicKey(publicKey);
-    keyCodec.writePrivateKey(privateKey);
-    certCodec.writeCertificate(certHolder);
-
-    service.initializeCertificateClient(conf);
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-    Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS"));
-  }
-
-  /**
-   * Invoke a callable; Ignore all exception.
-   * @param closure closure to execute
-   * @return
-   */
-  public static void callQuietly(Callable closure) {
-    try {
-      closure.call();
-    } catch (Throwable e) {
-      // Ignore all Throwable,
-    }
-  }
-
-  @Test
-  public void testGetCSR() throws Exception {
-    keyCodec.writePublicKey(publicKey);
-    keyCodec.writePrivateKey(privateKey);
-    service.setCertificateClient(client);
-    PKCS10CertificationRequest csr =
-        service.getCSR(conf);
-    Assert.assertNotNull(csr);
-
-    csr = service.getCSR(conf);
-    Assert.assertNotNull(csr);
-
-    csr = service.getCSR(conf);
-    Assert.assertNotNull(csr);
-
-    csr = service.getCSR(conf);
-    Assert.assertNotNull(csr);
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
deleted file mode 100644
index 923440e..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.net.InetSocketAddress;
-
-/**
- * Helper utility to test containers.
- */
-public final class ContainerTestUtils {
-
-  private ContainerTestUtils() {
-  }
-
-  /**
-   * Creates an Endpoint class for testing purpose.
-   *
-   * @param conf - Conf
-   * @param address - InetAddres
-   * @param rpcTimeout - rpcTimeOut
-   * @return EndPoint
-   * @throws Exception
-   */
-  public static EndpointStateMachine createEndpoint(Configuration conf,
-      InetSocketAddress address, int rpcTimeout) throws Exception {
-    RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long version =
-        RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
-
-    StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
-        StorageContainerDatanodeProtocolPB.class, version,
-        address, UserGroupInformation.getCurrentUser(), conf,
-        NetUtils.getDefaultSocketFactory(conf), rpcTimeout,
-        RetryPolicies.TRY_ONCE_THEN_FAIL).getProxy();
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
-        new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
-    return new EndpointStateMachine(address, rpcClient, conf);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
deleted file mode 100644
index 5a7c30c..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import com.google.protobuf.BlockingService;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import org.mockito.Mockito;
-
-/**
- * Test Endpoint class.
- */
-public final class SCMTestUtils {
-  /**
-   * Never constructed.
-   */
-  private SCMTestUtils() {
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private static RPC.Server startRpcServer(Configuration conf,
-      InetSocketAddress addr, Class<?>
-      protocol, BlockingService instance, int handlerCount)
-      throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(null)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-
-  /**
-   * Start Datanode RPC server.
-   */
-  public static RPC.Server startScmRpcServer(Configuration configuration,
-      StorageContainerDatanodeProtocol server,
-      InetSocketAddress rpcServerAddresss, int handlerCount) throws
-      IOException {
-    RPC.setProtocolEngine(configuration,
-        StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    BlockingService scmDatanodeService =
-        StorageContainerDatanodeProtocolService.
-            newReflectiveBlockingService(
-                new StorageContainerDatanodeProtocolServerSideTranslatorPB(
-                    server, Mockito.mock(ProtocolMessageMetrics.class)));
-
-    RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss,
-        StorageContainerDatanodeProtocolPB.class, scmDatanodeService,
-        handlerCount);
-
-    scmServer.start();
-    return scmServer;
-  }
-
-  public static InetSocketAddress getReuseableAddress() throws IOException {
-    try (ServerSocket socket = new ServerSocket(0)) {
-      socket.setReuseAddress(true);
-      int port = socket.getLocalPort();
-      String addr = InetAddress.getLoopbackAddress().getHostAddress();
-      return new InetSocketAddress(addr, port);
-    }
-  }
-
-  public static OzoneConfiguration getConf() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils
-        .getRandomizedTempPath());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
-        .getRandomizedTempPath());
-    return conf;
-  }
-
-  public static OzoneConfiguration getOzoneConf() {
-    return new OzoneConfiguration();
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
deleted file mode 100644
index c4b29ba..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.scm.VersionInfo;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * SCM RPC mock class.
- */
-public class ScmTestMock implements StorageContainerDatanodeProtocol {
-  private int rpcResponseDelay;
-  private AtomicInteger heartbeatCount = new AtomicInteger(0);
-  private AtomicInteger rpcCount = new AtomicInteger(0);
-  private AtomicInteger containerReportsCount = new AtomicInteger(0);
-  private String clusterId;
-  private String scmId;
-
-  public ScmTestMock() {
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-  }
-
-  // Map of datanode to containers
-  private Map<DatanodeDetails,
-      Map<String, ContainerReplicaProto>> nodeContainers =
-      new HashMap<>();
-  private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
-  private AtomicInteger commandStatusReport = new AtomicInteger(0);
-  private List<CommandStatus> cmdStatusList = new ArrayList<>();
-  private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
-  /**
-   * Returns the number of heartbeats made to this class.
-   *
-   * @return int
-   */
-  public int getHeartbeatCount() {
-    return heartbeatCount.get();
-  }
-
-  /**
-   * Returns the number of RPC calls made to this mock class instance.
-   *
-   * @return - Number of RPC calls serviced by this class.
-   */
-  public int getRpcCount() {
-    return rpcCount.get();
-  }
-
-  /**
-   * Gets the RPC response delay.
-   *
-   * @return delay in milliseconds.
-   */
-  public int getRpcResponseDelay() {
-    return rpcResponseDelay;
-  }
-
-  /**
-   * Sets the RPC response delay.
-   *
-   * @param rpcResponseDelay - delay in milliseconds.
-   */
-  public void setRpcResponseDelay(int rpcResponseDelay) {
-    this.rpcResponseDelay = rpcResponseDelay;
-  }
-
-  /**
-   * Returns the number of container reports server has seen.
-   * @return int
-   */
-  public int getContainerReportsCount() {
-    return containerReportsCount.get();
-  }
-
-  /**
-   * Returns the number of containers that have been reported so far.
-   * @return - count of reported containers.
-   */
-  public long getContainerCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.size();
-    }).sum();
-  }
-
-  /**
-   * Get the number keys reported from container reports.
-   * @return - number of keys reported.
-   */
-  public long getKeyCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getKeyCount();
-      }).sum();
-    }).sum();
-  }
-
-  /**
-   * Get the number of bytes used from container reports.
-   * @return - number of bytes used.
-   */
-  public long getBytesUsed() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getUsed();
-      }).sum();
-    }).sum();
-  }
-
-  /**
-   * Returns SCM version.
-   *
-   * @return Version info.
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto
-      getVersion(StorageContainerDatanodeProtocolProtos
-      .SCMVersionRequestProto unused) throws IOException {
-    rpcCount.incrementAndGet();
-    sleepIfNeeded();
-    VersionInfo versionInfo = VersionInfo.getLatestVersion();
-    return VersionResponse.newBuilder()
-        .setVersion(versionInfo.getVersion())
-        .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
-        .addValue(OzoneConsts.SCM_ID, scmId)
-        .addValue(OzoneConsts.CLUSTER_ID, clusterId)
-        .build().getProtobufMessage();
-
-  }
-
-  private void sleepIfNeeded() {
-    if (getRpcResponseDelay() > 0) {
-      try {
-        Thread.sleep(getRpcResponseDelay());
-      } catch (InterruptedException ex) {
-        // Just ignore this exception.
-      }
-    }
-  }
-
-  /**
-   * Used by data node to send a Heartbeat.
-   *
-   * @param heartbeat - node heartbeat.
-   * @return - SCMHeartbeatResponseProto
-   * @throws IOException
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto
-      sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException {
-    rpcCount.incrementAndGet();
-    heartbeatCount.incrementAndGet();
-    if (heartbeat.getCommandStatusReportsCount() != 0) {
-      for (CommandStatusReportsProto statusReport : heartbeat
-          .getCommandStatusReportsList()) {
-        cmdStatusList.addAll(statusReport.getCmdStatusList());
-        commandStatusReport.incrementAndGet();
-      }
-    }
-    sleepIfNeeded();
-    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(
-        scmCommandRequests)
-        .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid())
-        .build();
-  }
-
-  /**
-   * Register Datanode.
-   *
-   * @param datanodeDetailsProto DatanodDetailsProto.
-   * @return SCM Command.
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos
-      .SCMRegisteredResponseProto register(
-          DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
-          ContainerReportsProto containerReportsRequestProto,
-          PipelineReportsProto pipelineReportsProto)
-      throws IOException {
-    rpcCount.incrementAndGet();
-    updateNodeReport(datanodeDetailsProto, nodeReport);
-    updateContainerReport(containerReportsRequestProto, datanodeDetailsProto);
-    sleepIfNeeded();
-    return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
-        .newBuilder().setClusterID(UUID.randomUUID().toString())
-        .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode(
-            StorageContainerDatanodeProtocolProtos
-                .SCMRegisteredResponseProto.ErrorCode.success).build();
-  }
-
-  /**
-   * Update nodeReport.
-   * @param datanodeDetailsProto
-   * @param nodeReport
-   */
-  public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto,
-      NodeReportProto nodeReport) {
-    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
-        datanodeDetailsProto);
-    NodeReportProto.Builder nodeReportProto = NodeReportProto.newBuilder();
-
-    List<StorageReportProto> storageReports =
-        nodeReport.getStorageReportList();
-
-    for(StorageReportProto report : storageReports) {
-      nodeReportProto.addStorageReport(report);
-    }
-
-    nodeReports.put(datanode, nodeReportProto.build());
-
-  }
-
-  /**
-   * Update the cotainerReport.
-   *
-   * @param reports Container report
-   * @param datanodeDetails DataNode Info
-   * @throws IOException
-   */
-  public void updateContainerReport(
-      StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports,
-      DatanodeDetailsProto datanodeDetails) throws IOException {
-    Preconditions.checkNotNull(reports);
-    containerReportsCount.incrementAndGet();
-    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
-        datanodeDetails);
-    if (reports.getReportsCount() > 0) {
-      Map containers = nodeContainers.get(datanode);
-      if (containers == null) {
-        containers = new LinkedHashMap();
-        nodeContainers.put(datanode, containers);
-      }
-
-      for (ContainerReplicaProto report : reports
-          .getReportsList()) {
-        containers.put(report.getContainerID(), report);
-      }
-    }
-  }
-
-
-  /**
-   * Return the number of StorageReports of a datanode.
-   * @param datanodeDetails
-   * @return count of containers of a datanode
-   */
-  public int getNodeReportsCount(DatanodeDetails datanodeDetails) {
-    return nodeReports.get(datanodeDetails).getStorageReportCount();
-  }
-
-  /**
-   * Returns the number of containers of a datanode.
-   * @param datanodeDetails
-   * @return count of storage reports of a datanode
-   */
-  public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) {
-    Map<String, ContainerReplicaProto> cr =
-        nodeContainers.get(datanodeDetails);
-    if(cr != null) {
-      return cr.size();
-    }
-    return 0;
-  }
-
-  /**
-   * Reset the mock Scm for test to get a fresh start without rebuild MockScm.
-   */
-  public void reset() {
-    heartbeatCount.set(0);
-    rpcCount.set(0);
-    containerReportsCount.set(0);
-    nodeContainers.clear();
-
-  }
-
-  public int getCommandStatusReportCount() {
-    return commandStatusReport.get();
-  }
-
-  public List<CommandStatus> getCmdStatusList() {
-    return cmdStatusList;
-  }
-
-  public List<SCMCommandProto> getScmCommandRequests() {
-    return scmCommandRequests;
-  }
-
-  public void clearScmCommandRequests() {
-    scmCommandRequests.clear();
-  }
-
-  public void addScmCommandRequest(SCMCommandProto scmCmd) {
-    scmCommandRequests.add(scmCmd);
-  }
-
-  /**
-   * Set scmId.
-   * @param id
-   */
-  public void setScmId(String id) {
-    this.scmId = id;
-  }
-
-  /**
-   * Set scmId.
-   * @return scmId
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
deleted file mode 100644
index a4e0028..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * This class tests ChunkLayOutVersion.
- */
-public class TestChunkLayOutVersion {
-
-  @Test
-  public void testChunkLayOutVersion() {
-
-    // Check Latest Version and description
-    Assert.assertEquals(1, ChunkLayOutVersion.getLatestVersion().getVersion());
-    Assert.assertEquals("Data without checksums.", ChunkLayOutVersion
-        .getLatestVersion().getDescription());
-
-    Assert.assertEquals(1, ChunkLayOutVersion.getAllVersions().length);
-
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
deleted file mode 100644
index b6584d1..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
-
-
-/**
- * Test ContainerCache with evictions.
- */
-public class TestContainerCache {
-  private static String testRoot = new FileSystemTestHelper().getTestRootDir();
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private void createContainerDB(OzoneConfiguration conf, File dbFile)
-      throws Exception {
-    MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf)
-        .setCreateIfMissing(true).setDbFile(dbFile).build();
-
-    // we close since the SCM pre-creates containers.
-    // we will open and put Db handle into a cache when keys are being created
-    // in a container.
-
-    store.close();
-  }
-
-  @Test
-  public void testContainerCacheEviction() throws Exception {
-    File root = new File(testRoot);
-    root.mkdirs();
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
-
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    File containerDir1 = new File(root, "cont1");
-    File containerDir2 = new File(root, "cont2");
-    File containerDir3 = new File(root, "cont3");
-    File containerDir4 = new File(root, "cont4");
-
-
-    createContainerDB(conf, containerDir1);
-    createContainerDB(conf, containerDir2);
-    createContainerDB(conf, containerDir3);
-    createContainerDB(conf, containerDir4);
-
-    // Get 2 references out of the same db and verify the objects are same.
-    ReferenceCountedDB db1 = cache.getDB(1, "RocksDB",
-        containerDir1.getPath(), conf);
-    Assert.assertEquals(1, db1.getReferenceCount());
-    ReferenceCountedDB db2 = cache.getDB(1, "RocksDB",
-        containerDir1.getPath(), conf);
-    Assert.assertEquals(2, db2.getReferenceCount());
-    Assert.assertEquals(2, db1.getReferenceCount());
-    Assert.assertEquals(db1, db2);
-
-    // add one more references to ContainerCache.
-    ReferenceCountedDB db3 = cache.getDB(2, "RocksDB",
-        containerDir2.getPath(), conf);
-    Assert.assertEquals(1, db3.getReferenceCount());
-
-    // and close the reference
-    db3.close();
-    Assert.assertEquals(0, db3.getReferenceCount());
-
-    Assert.assertTrue(cache.isFull());
-
-    // add one more reference to ContainerCache and verify that it will not
-    // evict the least recent entry as it has reference.
-    ReferenceCountedDB db4 = cache.getDB(3, "RocksDB",
-        containerDir3.getPath(), conf);
-    Assert.assertEquals(1, db4.getReferenceCount());
-
-    Assert.assertEquals(2, cache.size());
-    Assert.assertNotNull(cache.get(containerDir1.getPath()));
-    Assert.assertNull(cache.get(containerDir2.getPath()));
-
-    // Now close both the references for container1
-    db1.close();
-    db2.close();
-    Assert.assertEquals(0, db1.getReferenceCount());
-    Assert.assertEquals(0, db2.getReferenceCount());
-
-
-    // The reference count for container1 is 0 but it is not evicted.
-    ReferenceCountedDB db5 = cache.getDB(1, "RocksDB",
-        containerDir1.getPath(), conf);
-    Assert.assertEquals(1, db5.getReferenceCount());
-    Assert.assertEquals(db1, db5);
-    db5.close();
-    db4.close();
-
-
-    // Decrementing reference count below zero should fail.
-    thrown.expect(IllegalArgumentException.class);
-    db5.close();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
deleted file mode 100644
index 5cabef2..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * This class tests DatanodeLayOutVersion.
- */
-public class TestDatanodeLayOutVersion {
-
-  @Test
-  public void testDatanodeLayOutVersion() {
-    // Check Latest Version and description
-    Assert.assertEquals(1, DataNodeLayoutVersion.getLatestVersion()
-        .getVersion());
-    Assert.assertEquals("HDDS Datanode LayOut Version 1", DataNodeLayoutVersion
-        .getLatestVersion().getDescription());
-    Assert.assertEquals(DataNodeLayoutVersion.getAllVersions().length,
-        DataNodeLayoutVersion.getAllVersions().length);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
deleted file mode 100644
index 0f3e7d1..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ /dev/null
@@ -1,444 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .InitDatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .RunningDatanodeState;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests the datanode state machine class and its states.
- */
-public class TestDatanodeStateMachine {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestDatanodeStateMachine.class);
-  // Changing it to 1, as current code checks for multiple scm directories,
-  // and fail if exists
-  private final int scmServerCount = 1;
-  private List<String> serverAddresses;
-  private List<RPC.Server> scmServers;
-  private List<ScmTestMock> mockServers;
-  private ExecutorService executorService;
-  private Configuration conf;
-  private File testRoot;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = SCMTestUtils.getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
-        TimeUnit.MILLISECONDS);
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
-    serverAddresses = new ArrayList<>();
-    scmServers = new ArrayList<>();
-    mockServers = new ArrayList<>();
-    for (int x = 0; x < scmServerCount; x++) {
-      int port = SCMTestUtils.getReuseableAddress().getPort();
-      String address = "127.0.0.1";
-      serverAddresses.add(address + ":" + port);
-      ScmTestMock mock = new ScmTestMock();
-      scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock,
-          new InetSocketAddress(address, port), 10));
-      mockServers.add(mock);
-    }
-
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
-        serverAddresses.toArray(new String[0]));
-
-    String path = GenericTestUtils
-        .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
-    testRoot = new File(path);
-    if (!testRoot.mkdirs()) {
-      LOG.info("Required directories {} already exist.", testRoot);
-    }
-
-    File dataDir = new File(testRoot, "data");
-    conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath());
-    if (!dataDir.mkdirs()) {
-      LOG.info("Data dir create failed.");
-    }
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        new File(testRoot, "scm").getAbsolutePath());
-    path = new File(testRoot, "datanodeID").getAbsolutePath();
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path);
-    executorService = HadoopExecutors.newCachedThreadPool(
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("Test Data Node State Machine Thread - %d").build());
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      if (executorService != null) {
-        executorService.shutdown();
-        try {
-          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-            executorService.shutdownNow();
-          }
-
-          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-            LOG.error("Unable to shutdown properly.");
-          }
-        } catch (InterruptedException e) {
-          LOG.error("Error attempting to shutdown.", e);
-          executorService.shutdownNow();
-        }
-      }
-      for (RPC.Server s : scmServers) {
-        s.stop();
-      }
-    } catch (Exception e) {
-      //ignore all execption from the shutdown
-    } finally {
-      FileUtil.fullyDelete(testRoot);
-    }
-  }
-
-  /**
-   * Assert that starting statemachine executes the Init State.
-   */
-  @Test
-  public void testStartStopDatanodeStateMachine() throws IOException,
-      InterruptedException, TimeoutException {
-    try (DatanodeStateMachine stateMachine =
-        new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null)) {
-      stateMachine.startDaemon();
-      SCMConnectionManager connectionManager =
-          stateMachine.getConnectionManager();
-      GenericTestUtils.waitFor(
-          () -> {
-            int size = connectionManager.getValues().size();
-            LOG.info("connectionManager.getValues().size() is {}", size);
-            return size == 1;
-          }, 1000, 30000);
-
-      stateMachine.stopDaemon();
-      assertTrue(stateMachine.isDaemonStopped());
-    }
-  }
-
-  /**
-   * This test explores the state machine by invoking each call in sequence just
-   * like as if the state machine would call it. Because this is a test we are
-   * able to verify each of the assumptions.
-   * <p>
-   * Here is what happens at High level.
-   * <p>
-   * 1. We start the datanodeStateMachine in the INIT State.
-   * <p>
-   * 2. We invoke the INIT state task.
-   * <p>
-   * 3. That creates a set of RPC endpoints that are ready to connect to SCMs.
-   * <p>
-   * 4. We assert that we have moved to the running state for the
-   * DatanodeStateMachine.
-   * <p>
-   * 5. We get the task for the Running State - Executing that running state,
-   * makes the first network call in of the state machine. The Endpoint is in
-   * the GETVERSION State and we invoke the task.
-   * <p>
-   * 6. We assert that this call was a success by checking that each of the
-   * endponts now have version response that it got from the SCM server that it
-   * was talking to and also each of the mock server serviced one RPC call.
-   * <p>
-   * 7. Since the Register is done now, next calls to get task will return
-   * HeartbeatTask, which sends heartbeats to SCM. We assert that we get right
-   * task from sub-system below.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testDatanodeStateContext() throws IOException,
-      InterruptedException, ExecutionException, TimeoutException {
-    // There is no mini cluster started in this test,
-    // create a ID file so that state machine could load a fake datanode ID.
-    File idPath = new File(
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
-        OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
-    idPath.delete();
-    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
-    DatanodeDetails.Port port = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    datanodeDetails.setPort(port);
-    ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
-
-    try (DatanodeStateMachine stateMachine =
-             new DatanodeStateMachine(datanodeDetails, conf, null, null)) {
-      DatanodeStateMachine.DatanodeStates currentState =
-          stateMachine.getContext().getState();
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-          currentState);
-
-      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-          stateMachine.getContext().getTask();
-      Assert.assertEquals(InitDatanodeState.class, task.getClass());
-
-      task.execute(executorService);
-      DatanodeStateMachine.DatanodeStates newState =
-          task.await(2, TimeUnit.SECONDS);
-
-      for (EndpointStateMachine endpoint :
-          stateMachine.getConnectionManager().getValues()) {
-        // We assert that each of the is in State GETVERSION.
-        Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-            endpoint.getState());
-      }
-
-      // The Datanode has moved into Running State, since endpoints are created.
-      // We move to running state when we are ready to issue RPC calls to SCMs.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      // If we had called context.execute instead of calling into each state
-      // this would have happened automatically.
-      stateMachine.getContext().setState(newState);
-      task = stateMachine.getContext().getTask();
-      Assert.assertEquals(RunningDatanodeState.class, task.getClass());
-
-      // This execute will invoke getVersion calls against all SCM endpoints
-      // that we know of.
-
-      task.execute(executorService);
-      newState = task.await(10, TimeUnit.SECONDS);
-
-      // Wait for GetVersion call (called by task.execute) to finish. After
-      // Earlier task.execute called into GetVersion. Wait for the execution
-      // to finish and the endPointState to move to REGISTER state.
-      GenericTestUtils.waitFor(() -> {
-        for (EndpointStateMachine endpoint :
-            stateMachine.getConnectionManager().getValues()) {
-          if (endpoint.getState() !=
-              EndpointStateMachine.EndPointStates.REGISTER) {
-            return false;
-          }
-        }
-        return true;
-      }, 1000, 50000);
-
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      for (EndpointStateMachine endpoint :
-          stateMachine.getConnectionManager().getValues()) {
-
-        // Since the earlier task.execute called into GetVersion, the
-        // endPointState Machine should move to REGISTER state.
-        Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-            endpoint.getState());
-
-        // We assert that each of the end points have gotten a version from the
-        // SCM Server.
-        Assert.assertNotNull(endpoint.getVersion());
-      }
-
-      // We can also assert that all mock servers have received only one RPC
-      // call at this point of time.
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(1, mock.getRpcCount());
-      }
-
-      // This task is the Running task, but running task executes tasks based
-      // on the state of Endpoints, hence this next call will be a Register at
-      // the endpoint RPC level.
-      task = stateMachine.getContext().getTask();
-      task.execute(executorService);
-      newState = task.await(2, TimeUnit.SECONDS);
-
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(2, mock.getRpcCount());
-      }
-
-      // This task is the Running task, but running task executes tasks based
-      // on the state of Endpoints, hence this next call will be a
-      // HeartbeatTask at the endpoint RPC level.
-      task = stateMachine.getContext().getTask();
-      task.execute(executorService);
-      newState = task.await(2, TimeUnit.SECONDS);
-
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(1, mock.getHeartbeatCount());
-      }
-    }
-  }
-
-  @Test
-  public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
-
-    File idPath = new File(
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
-        OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
-    idPath.delete();
-    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
-    DatanodeDetails.Port port = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    datanodeDetails.setPort(port);
-
-    try (DatanodeStateMachine stateMachine =
-             new DatanodeStateMachine(datanodeDetails, conf, null, null)) {
-      DatanodeStateMachine.DatanodeStates currentState =
-          stateMachine.getContext().getState();
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-          currentState);
-
-      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-          stateMachine.getContext().getTask();
-      Assert.assertEquals(InitDatanodeState.class, task.getClass());
-
-      //Set the idPath to read only, state machine will fail to write
-      // datanodeId file and set the state to shutdown.
-      idPath.getParentFile().mkdirs();
-      idPath.getParentFile().setReadOnly();
-
-      task.execute(executorService);
-      DatanodeStateMachine.DatanodeStates newState =
-          task.await(2, TimeUnit.SECONDS);
-
-      //As, we have changed the permission of idPath to readable, writing
-      // will fail and it will set the state to shutdown.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
-          newState);
-
-      //Setting back to writable.
-      idPath.getParentFile().setWritable(true);
-    }
-  }
-
-  /**
-   * Test state transition with a list of invalid scm configurations,
-   * and verify the state transits to SHUTDOWN each time.
-   */
-  @Test
-  public void testDatanodeStateMachineWithInvalidConfiguration()
-      throws Exception {
-    List<Map.Entry<String, String>> confList =
-        new ArrayList<>();
-    confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
-
-    // Invalid ozone.scm.names
-    /** Empty **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, ""));
-    /** Invalid schema **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "x..y"));
-    /** Invalid port **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz"));
-    /** Port out of range **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456"));
-    // Invalid ozone.scm.datanode.id.dir
-    /** Empty **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, ""));
-
-    confList.forEach((entry) -> {
-      Configuration perTestConf = new Configuration(conf);
-      perTestConf.setStrings(entry.getKey(), entry.getValue());
-      LOG.info("Test with {} = {}", entry.getKey(), entry.getValue());
-      try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
-          getNewDatanodeDetails(), perTestConf, null, null)) {
-        DatanodeStateMachine.DatanodeStates currentState =
-            stateMachine.getContext().getState();
-        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-            currentState);
-        DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-            stateMachine.getContext().getTask();
-        task.execute(executorService);
-        DatanodeStateMachine.DatanodeStates newState =
-            task.await(2, TimeUnit.SECONDS);
-        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
-            newState);
-      } catch (Exception e) {
-        Assert.fail("Unexpected exception found");
-      }
-    });
-  }
-
-  private DatanodeDetails getNewDatanodeDetails() {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    return DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .build();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
deleted file mode 100644
index c6fa8d6..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class is used to test the KeyValueContainerData.
- */
-public class TestKeyValueContainerData {
-
-  private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
-  @Test
-  public void testKeyValueData() {
-    long containerId = 1L;
-    ContainerProtos.ContainerType containerType = ContainerProtos
-        .ContainerType.KeyValueContainer;
-    String path = "/tmp";
-    String containerDBType = "RocksDB";
-    ContainerProtos.ContainerDataProto.State state =
-        ContainerProtos.ContainerDataProto.State.CLOSED;
-    AtomicLong val = new AtomicLong(0);
-    UUID pipelineId = UUID.randomUUID();
-    UUID datanodeId = UUID.randomUUID();
-
-    KeyValueContainerData kvData = new KeyValueContainerData(containerId,
-        MAXSIZE, pipelineId.toString(), datanodeId.toString());
-
-    assertEquals(containerType, kvData.getContainerType());
-    assertEquals(containerId, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
-        .getState());
-    assertEquals(0, kvData.getMetadata().size());
-    assertEquals(0, kvData.getNumPendingDeletionBlocks());
-    assertEquals(val.get(), kvData.getReadBytes());
-    assertEquals(val.get(), kvData.getWriteBytes());
-    assertEquals(val.get(), kvData.getReadCount());
-    assertEquals(val.get(), kvData.getWriteCount());
-    assertEquals(val.get(), kvData.getKeyCount());
-    assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-
-    kvData.setState(state);
-    kvData.setContainerDBType(containerDBType);
-    kvData.setChunksPath(path);
-    kvData.setMetadataPath(path);
-    kvData.incrReadBytes(10);
-    kvData.incrWriteBytes(10);
-    kvData.incrReadCount();
-    kvData.incrWriteCount();
-    kvData.incrKeyCount();
-    kvData.incrPendingDeletionBlocks(1);
-
-    assertEquals(state, kvData.getState());
-    assertEquals(containerDBType, kvData.getContainerDBType());
-    assertEquals(path, kvData.getChunksPath());
-    assertEquals(path, kvData.getMetadataPath());
-
-    assertEquals(10, kvData.getReadBytes());
-    assertEquals(10, kvData.getWriteBytes());
-    assertEquals(1, kvData.getReadCount());
-    assertEquals(1, kvData.getWriteCount());
-    assertEquals(1, kvData.getKeyCount());
-    assertEquals(1, kvData.getNumPendingDeletionBlocks());
-    assertEquals(pipelineId.toString(), kvData.getOriginPipelineId());
-    assertEquals(datanodeId.toString(), kvData.getOriginNodeId());
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
deleted file mode 100644
index 5889222..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-
-/**
- * This class tests {@link DatanodeVersionFile}.
- */
-public class TestDatanodeVersionFile {
-
-  private File versionFile;
-  private DatanodeVersionFile dnVersionFile;
-  private Properties properties;
-
-  private String storageID;
-  private String clusterID;
-  private String datanodeUUID;
-  private long cTime;
-  private int lv;
-
-  @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
-
-  @Before
-  public void setup() throws IOException {
-    versionFile = folder.newFile("Version");
-    storageID = UUID.randomUUID().toString();
-    clusterID = UUID.randomUUID().toString();
-    datanodeUUID = UUID.randomUUID().toString();
-    cTime = Time.now();
-    lv = DataNodeLayoutVersion.getLatestVersion().getVersion();
-
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, cTime, lv);
-
-    dnVersionFile.createVersionFile(versionFile);
-
-    properties = dnVersionFile.readFrom(versionFile);
-  }
-
-  @Test
-  public void testCreateAndReadVersionFile() throws IOException{
-
-    //Check VersionFile exists
-    assertTrue(versionFile.exists());
-
-    assertEquals(storageID, HddsVolumeUtil.getStorageID(
-        properties, versionFile));
-    assertEquals(clusterID, HddsVolumeUtil.getClusterID(
-        properties, versionFile, clusterID));
-    assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID(
-        properties, versionFile, datanodeUUID));
-    assertEquals(cTime, HddsVolumeUtil.getCreationTime(
-        properties, versionFile));
-    assertEquals(lv, HddsVolumeUtil.getLayOutVersion(
-        properties, versionFile));
-  }
-
-  @Test
-  public void testIncorrectClusterId() throws IOException{
-    try {
-      String randomClusterID = UUID.randomUUID().toString();
-      HddsVolumeUtil.getClusterID(properties, versionFile,
-          randomClusterID);
-      fail("Test failure in testIncorrectClusterId");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex);
-    }
-  }
-
-  @Test
-  public void testVerifyCTime() throws IOException{
-    long invalidCTime = -10;
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, invalidCTime, lv);
-    dnVersionFile.createVersionFile(versionFile);
-    properties = dnVersionFile.readFrom(versionFile);
-
-    try {
-      HddsVolumeUtil.getCreationTime(properties, versionFile);
-      fail("Test failure in testVerifyCTime");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid Creation time in " +
-          "Version File : " + versionFile, ex);
-    }
-  }
-
-  @Test
-  public void testVerifyLayOut() throws IOException{
-    int invalidLayOutVersion = 100;
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
-    dnVersionFile.createVersionFile(versionFile);
-    Properties props = dnVersionFile.readFrom(versionFile);
-
-    try {
-      HddsVolumeUtil.getLayOutVersion(props, versionFile);
-      fail("Test failure in testVerifyLayOut");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
deleted file mode 100644
index c611ccb..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests create/read .container files.
- */
-public class TestContainerDataYaml {
-
-  private static long testContainerID = 1234;
-
-  private static String testRoot = new FileSystemTestHelper().getTestRootDir();
-
-  private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
-
-  /**
-   * Creates a .container file. cleanup() should be called at the end of the
-   * test when container file is created.
-   */
-  private File createContainerFile(long containerID) throws IOException {
-    new File(testRoot).mkdirs();
-
-    String containerPath = containerID + ".container";
-
-    KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
-        containerID, MAXSIZE, UUID.randomUUID().toString(),
-        UUID.randomUUID().toString());
-    keyValueContainerData.setContainerDBType("RocksDB");
-    keyValueContainerData.setMetadataPath(testRoot);
-    keyValueContainerData.setChunksPath(testRoot);
-
-    File containerFile = new File(testRoot, containerPath);
-
-    // Create .container file with ContainerData
-    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
-        .KeyValueContainer, keyValueContainerData, containerFile);
-
-    //Check .container file exists or not.
-    assertTrue(containerFile.exists());
-
-    return containerFile;
-  }
-
-  private void cleanup() {
-    FileUtil.fullyDelete(new File(testRoot));
-  }
-
-  @Test
-  public void testCreateContainerFile() throws IOException {
-    long containerID = testContainerID++;
-
-    File containerFile = createContainerFile(containerID);
-
-    // Read from .container file, and verify data.
-    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(containerID, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-        .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
-    assertEquals(containerFile.getParent(), kvData.getMetadataPath());
-    assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
-        .getState());
-    assertEquals(1, kvData.getLayOutVersion());
-    assertEquals(0, kvData.getMetadata().size());
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-
-    // Update ContainerData.
-    kvData.addMetadata("VOLUME", "hdfs");
-    kvData.addMetadata("OWNER", "ozone");
-    kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
-
-
-    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
-            .KeyValueContainer, kvData, containerFile);
-
-    // Reading newly updated data from .container file
-    kvData =  (KeyValueContainerData) ContainerDataYaml.readContainerFile(
-        containerFile);
-
-    // verify data.
-    assertEquals(containerID, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-        .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
-    assertEquals(containerFile.getParent(), kvData.getMetadataPath());
-    assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
-        .getState());
-    assertEquals(1, kvData.getLayOutVersion());
-    assertEquals(2, kvData.getMetadata().size());
-    assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
-    assertEquals("ozone", kvData.getMetadata().get("OWNER"));
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-  }
-
-  @Test
-  public void testIncorrectContainerFile() throws IOException{
-    try {
-      String containerFile = "incorrect.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      fail("testIncorrectContainerFile failed");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("No enum constant", ex);
-    }
-  }
-
-
-  @Test
-  public void testCheckBackWardCompatibilityOfContainerFile() throws
-      IOException {
-    // This test is for if we upgrade, and then .container files added by new
-    // server will have new fields added to .container file, after a while we
-    // decided to rollback. Then older ozone can read .container files
-    // created or not.
-
-    try {
-      String containerFile = "additionalfields.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      ContainerUtils.verifyChecksum(kvData);
-
-      //Checking the Container file data is consistent or not
-      assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
-          .getState());
-      assertEquals("RocksDB", kvData.getContainerDBType());
-      assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-          .getContainerType());
-      assertEquals(9223372036854775807L, kvData.getContainerID());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
-          .getChunksPath());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
-          .getMetadataPath());
-      assertEquals(1, kvData.getLayOutVersion());
-      assertEquals(2, kvData.getMetadata().size());
-
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      fail("testCheckBackWardCompatibilityOfContainerFile failed");
-    }
-  }
-
-  /**
-   * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
-   */
-  @Test
-  public void testChecksumInContainerFile() throws IOException {
-    long containerID = testContainerID++;
-
-    File containerFile = createContainerFile(containerID);
-
-    // Read from .container file, and verify data.
-    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    ContainerUtils.verifyChecksum(kvData);
-
-    cleanup();
-  }
-
-  /**
-   * Test to verify incorrect checksum is detected.
-   */
-  @Test
-  public void testIncorrectChecksum() {
-    try {
-      String containerFile = "incorrect.checksum.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      ContainerUtils.verifyChecksum(kvData);
-      fail("testIncorrectChecksum failed");
-    } catch (Exception ex) {
-      GenericTestUtils.assertExceptionContains("Container checksum error for " +
-          "ContainerID:", ex);
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
deleted file mode 100644
index e1e7119..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Class used to test ContainerSet operations.
- */
-public class TestContainerSet {
-
-  @Test
-  public void testAddGetRemoveContainer() throws StorageContainerException {
-    ContainerSet containerSet = new ContainerSet();
-    long containerId = 100L;
-    ContainerProtos.ContainerDataProto.State state = ContainerProtos
-        .ContainerDataProto.State.CLOSED;
-
-    KeyValueContainerData kvData = new KeyValueContainerData(containerId,
-        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-        UUID.randomUUID().toString());
-    kvData.setState(state);
-    KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
-        OzoneConfiguration());
-
-    //addContainer
-    boolean result = containerSet.addContainer(keyValueContainer);
-    assertTrue(result);
-    try {
-      result = containerSet.addContainer(keyValueContainer);
-      fail("Adding same container ID twice should fail.");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Container already exists with" +
-          " container Id " + containerId, ex);
-    }
-
-    //getContainer
-    KeyValueContainer container = (KeyValueContainer) containerSet
-        .getContainer(containerId);
-    KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
-        container.getContainerData();
-    assertEquals(containerId, keyValueContainerData.getContainerID());
-    assertEquals(state, keyValueContainerData.getState());
-    assertNull(containerSet.getContainer(1000L));
-
-    //removeContainer
-    assertTrue(containerSet.removeContainer(containerId));
-    assertFalse(containerSet.removeContainer(1000L));
-  }
-
-  @Test
-  public void testIteratorsAndCount() throws StorageContainerException {
-
-    ContainerSet containerSet = createContainerSet();
-
-    assertEquals(10, containerSet.containerCount());
-
-    Iterator<Container<?>> iterator = containerSet.getContainerIterator();
-
-    int count = 0;
-    while(iterator.hasNext()) {
-      Container kv = iterator.next();
-      ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
-            containerData.getState());
-      } else {
-        assertEquals(ContainerProtos.ContainerDataProto.State.OPEN,
-            containerData.getState());
-      }
-      count++;
-    }
-    assertEquals(10, count);
-
-    //Using containerMapIterator.
-    Iterator<Map.Entry<Long, Container<?>>> containerMapIterator = containerSet
-        .getContainerMapIterator();
-
-    count = 0;
-    while (containerMapIterator.hasNext()) {
-      Container kv = containerMapIterator.next().getValue();
-      ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
-            containerData.getState());
-      } else {
-        assertEquals(ContainerProtos.ContainerDataProto.State.OPEN,
-            containerData.getState());
-      }
-      count++;
-    }
-    assertEquals(10, count);
-
-  }
-
-  @Test
-  public void testIteratorPerVolume() throws StorageContainerException {
-    HddsVolume vol1 = Mockito.mock(HddsVolume.class);
-    Mockito.when(vol1.getStorageID()).thenReturn("uuid-1");
-    HddsVolume vol2 = Mockito.mock(HddsVolume.class);
-    Mockito.when(vol2.getStorageID()).thenReturn("uuid-2");
-
-    ContainerSet containerSet = new ContainerSet();
-    for (int i=0; i<10; i++) {
-      KeyValueContainerData kvData = new KeyValueContainerData(i,
-              (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-              UUID.randomUUID().toString());
-      if (i%2 == 0) {
-        kvData.setVolume(vol1);
-      } else {
-        kvData.setVolume(vol2);
-      }
-      kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
-      KeyValueContainer kv = new KeyValueContainer(kvData, new
-              OzoneConfiguration());
-      containerSet.addContainer(kv);
-    }
-
-    Iterator<Container<?>> iter1 = containerSet.getContainerIterator(vol1);
-    int count1 = 0;
-    while (iter1.hasNext()) {
-      Container c = iter1.next();
-      assertEquals(0, (c.getContainerData().getContainerID() % 2));
-      count1++;
-    }
-    assertEquals(5, count1);
-
-    Iterator<Container<?>> iter2 = containerSet.getContainerIterator(vol2);
-    int count2 = 0;
-    while (iter2.hasNext()) {
-      Container c = iter2.next();
-      assertEquals(1, (c.getContainerData().getContainerID() % 2));
-      count2++;
-    }
-    assertEquals(5, count2);
-  }
-
-  @Test
-  public void testGetContainerReport() throws IOException {
-
-    ContainerSet containerSet = createContainerSet();
-
-    ContainerReportsProto containerReportsRequestProto = containerSet
-        .getContainerReport();
-
-    assertEquals(10, containerReportsRequestProto.getReportsList().size());
-  }
-
-
-
-  @Test
-  public void testListContainer() throws StorageContainerException {
-    ContainerSet containerSet = createContainerSet();
-
-    List<ContainerData> result = new ArrayList<>();
-    containerSet.listContainer(2, 5, result);
-
-    assertEquals(5, result.size());
-
-    for(ContainerData containerData : result) {
-      assertTrue(containerData.getContainerID() >=2 && containerData
-          .getContainerID()<=6);
-    }
-  }
-
-  private ContainerSet createContainerSet() throws StorageContainerException {
-    ContainerSet containerSet = new ContainerSet();
-    for (int i=0; i<10; i++) {
-      KeyValueContainerData kvData = new KeyValueContainerData(i,
-          (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-          UUID.randomUUID().toString());
-      if (i%2 == 0) {
-        kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
-      } else {
-        kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
-      }
-      KeyValueContainer kv = new KeyValueContainer(kvData, new
-          OzoneConfiguration());
-      containerSet.addContainer(kv);
-    }
-    return containerSet;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
deleted file mode 100644
index fe27eeb..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.collect.Maps;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Map;
-import java.util.UUID;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Test-cases to verify the functionality of HddsDispatcher.
- */
-public class TestHddsDispatcher {
-
-  @Test
-  public void testContainerCloseActionWhenFull() throws IOException {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-    DatanodeDetails dd = randomDatanodeDetails();
-    VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
-
-    try {
-      UUID scmId = UUID.randomUUID();
-      ContainerSet containerSet = new ContainerSet();
-
-      DatanodeStateMachine stateMachine = Mockito.mock(
-          DatanodeStateMachine.class);
-      StateContext context = Mockito.mock(StateContext.class);
-      Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
-      Mockito.when(context.getParent()).thenReturn(stateMachine);
-      KeyValueContainerData containerData = new KeyValueContainerData(1L,
-          (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(),
-          dd.getUuidString());
-      Container container = new KeyValueContainer(containerData, conf);
-      container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
-          scmId.toString());
-      containerSet.addContainer(container);
-      ContainerMetrics metrics = ContainerMetrics.create(conf);
-      Map<ContainerType, Handler> handlers = Maps.newHashMap();
-      for (ContainerType containerType : ContainerType.values()) {
-        handlers.put(containerType,
-            Handler.getHandlerForContainerType(containerType, conf, context,
-                containerSet, volumeSet, metrics));
-      }
-      HddsDispatcher hddsDispatcher = new HddsDispatcher(
-          conf, containerSet, volumeSet, handlers, context, metrics);
-      hddsDispatcher.setScmId(scmId.toString());
-      ContainerCommandResponseProto responseOne = hddsDispatcher
-          .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          responseOne.getResult());
-      verify(context, times(0))
-          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
-      containerData.setBytesUsed(Double.valueOf(
-          StorageUnit.MB.toBytes(950)).longValue());
-      ContainerCommandResponseProto responseTwo = hddsDispatcher
-          .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          responseTwo.getResult());
-      verify(context, times(1))
-          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
-
-    } finally {
-      volumeSet.shutdown();
-      FileUtils.deleteDirectory(new File(testDir));
-    }
-
-  }
-
-  @Test
-  public void testCreateContainerWithWriteChunk() throws IOException {
-    String testDir =
-        GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
-    try {
-      UUID scmId = UUID.randomUUID();
-      OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      DatanodeDetails dd = randomDatanodeDetails();
-      HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
-      ContainerCommandRequestProto writeChunkRequest =
-          getWriteChunkRequest(dd.getUuidString(), 1L, 1L);
-      // send read chunk request and make sure container does not exist
-      ContainerCommandResponseProto response =
-          hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null);
-      Assert.assertEquals(response.getResult(),
-          ContainerProtos.Result.CONTAINER_NOT_FOUND);
-      // send write chunk request without sending create container
-      response = hddsDispatcher.dispatch(writeChunkRequest, null);
-      // container should be created as part of write chunk request
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-      // send read chunk request to read the chunk written above
-      response =
-          hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-      Assert.assertEquals(response.getReadChunk().getData(),
-          writeChunkRequest.getWriteChunk().getData());
-    } finally {
-      FileUtils.deleteDirectory(new File(testDir));
-    }
-  }
-
-  @Test
-  public void testWriteChunkWithCreateContainerFailure() throws IOException {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
-    try {
-      UUID scmId = UUID.randomUUID();
-      OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      DatanodeDetails dd = randomDatanodeDetails();
-      HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
-      ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
-          dd.getUuidString(), 1L, 1L);
-
-      HddsDispatcher mockDispatcher = Mockito.spy(hddsDispatcher);
-      ContainerCommandResponseProto.Builder builder = ContainerUtils
-          .getContainerCommandResponse(writeChunkRequest,
-              ContainerProtos.Result.DISK_OUT_OF_SPACE, "");
-      // Return DISK_OUT_OF_SPACE response when writing chunk
-      // with container creation.
-      Mockito.doReturn(builder.build()).when(mockDispatcher)
-          .createContainer(writeChunkRequest);
-
-      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-          .captureLogs(HddsDispatcher.LOG);
-      // send write chunk request without sending create container
-      mockDispatcher.dispatch(writeChunkRequest, null);
-      // verify the error log
-      assertTrue(logCapturer.getOutput()
-          .contains("ContainerID " + writeChunkRequest.getContainerID()
-              + " creation failed : Result: DISK_OUT_OF_SPACE"));
-    } finally {
-      FileUtils.deleteDirectory(new File(testDir));
-    }
-  }
-
-  /**
-   * Creates HddsDispatcher instance with given infos.
-   * @param dd datanode detail info.
-   * @param scmId UUID of scm id.
-   * @param conf configuration be used.
-   * @return HddsDispatcher HddsDispatcher instance.
-   * @throws IOException
-   */
-  private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId,
-      OzoneConfiguration conf) throws IOException {
-    ContainerSet containerSet = new ContainerSet();
-    VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    ContainerMetrics metrics = ContainerMetrics.create(conf);
-    Map<ContainerType, Handler> handlers = Maps.newHashMap();
-    for (ContainerType containerType : ContainerType.values()) {
-      handlers.put(containerType,
-          Handler.getHandlerForContainerType(containerType, conf, context,
-              containerSet, volumeSet, metrics));
-    }
-
-    HddsDispatcher hddsDispatcher = new HddsDispatcher(
-        conf, containerSet, volumeSet, handlers, context, metrics);
-    hddsDispatcher.setScmId(scmId.toString());
-    return hddsDispatcher;
-  }
-
-  // This method has to be removed once we move scm/TestUtils.java
-  // from server-scm project to container-service or to common project.
-  private static DatanodeDetails randomDatanodeDetails() {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-  private ContainerCommandRequestProto getWriteChunkRequest(
-      String datanodeId, Long containerId, Long localId) {
-
-    ByteString data = ByteString.copyFrom(
-        UUID.randomUUID().toString().getBytes(UTF_8));
-    ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
-        .newBuilder()
-        .setChunkName(
-            DigestUtils.md5Hex("dummy-key") + "_stream_"
-                + containerId + "_chunk_" + localId)
-        .setOffset(0)
-        .setLen(data.size())
-        .setChecksumData(Checksum.getNoChecksumDataProto())
-        .build();
-
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(new BlockID(containerId, localId)
-            .getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-
-    return ContainerCommandRequestProto
-        .newBuilder()
-        .setContainerID(containerId)
-        .setCmdType(ContainerProtos.Type.WriteChunk)
-        .setDatanodeUuid(datanodeId)
-        .setWriteChunk(writeChunkRequest)
-        .build();
-  }
-
-  /**
-   * Creates container read chunk request using input container write chunk
-   * request.
-   *
-   * @param writeChunkRequest - Input container write chunk request
-   * @return container read chunk request
-   */
-  private ContainerCommandRequestProto getReadChunkRequest(
-      ContainerCommandRequestProto writeChunkRequest) {
-    WriteChunkRequestProto writeChunk = writeChunkRequest.getWriteChunk();
-    ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest =
-        ContainerProtos.ReadChunkRequestProto.newBuilder()
-            .setBlockID(writeChunk.getBlockID())
-            .setChunkData(writeChunk.getChunkData());
-    return ContainerCommandRequestProto.newBuilder()
-        .setCmdType(ContainerProtos.Type.ReadChunk)
-        .setContainerID(writeChunk.getBlockID().getContainerID())
-        .setTraceID(writeChunkRequest.getTraceID())
-        .setDatanodeUuid(writeChunkRequest.getDatanodeUuid())
-        .setReadChunk(readChunkRequest)
-        .build();
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
deleted file mode 100644
index 07c78c0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Datanode container related test-cases.
- */
-package org.apache.hadoop.ozone.container.common.impl;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
deleted file mode 100644
index a6ba1031..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-
-import java.util.Map;
-
-/**
- * Tests Handler interface.
- */
-public class TestHandler {
-  @Rule
-  public TestRule timeout = new Timeout(300000);
-
-  private Configuration conf;
-  private HddsDispatcher dispatcher;
-  private ContainerSet containerSet;
-  private VolumeSet volumeSet;
-  private Handler handler;
-
-  @Before
-  public void setup() throws Exception {
-    this.conf = new Configuration();
-    this.containerSet = Mockito.mock(ContainerSet.class);
-    this.volumeSet = Mockito.mock(VolumeSet.class);
-    DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    ContainerMetrics metrics = ContainerMetrics.create(conf);
-    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
-    for (ContainerProtos.ContainerType containerType :
-        ContainerProtos.ContainerType.values()) {
-      handlers.put(containerType,
-          Handler.getHandlerForContainerType(
-              containerType, conf, context, containerSet, volumeSet, metrics));
-    }
-    this.dispatcher = new HddsDispatcher(
-        conf, containerSet, volumeSet, handlers, null, metrics);
-  }
-
-  @Test
-  public void testGetKeyValueHandler() throws Exception {
-    Handler kvHandler = dispatcher.getHandler(
-        ContainerProtos.ContainerType.KeyValueContainer);
-
-    Assert.assertTrue("getHandlerForContainerType returned incorrect handler",
-        (kvHandler instanceof KeyValueHandler));
-  }
-
-  @Test
-  public void testGetHandlerForInvalidContainerType() {
-    // When new ContainerProtos.ContainerType are added, increment the code
-    // for invalid enum.
-    ContainerProtos.ContainerType invalidContainerType =
-        ContainerProtos.ContainerType.forNumber(2);
-
-    Assert.assertEquals("New ContainerType detected. Not an invalid " +
-        "containerType", invalidContainerType, null);
-
-    Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType);
-    Assert.assertEquals("Get Handler for Invalid ContainerType should " +
-        "return null.", dispatcherHandler, null);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
deleted file mode 100644
index ca3d29d..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * SCM Testing and Mocking Utils.
- */
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
deleted file mode 100644
index aae388d..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Test cases to test {@link ReportManager}.
- */
-public class TestReportManager {
-
-  @Test
-  public void testReportManagerInit() {
-    Configuration conf = new OzoneConfiguration();
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class);
-    ReportManager.Builder builder = ReportManager.newBuilder(conf);
-    builder.setStateContext(dummyContext);
-    builder.addPublisher(dummyPublisher);
-    ReportManager reportManager = builder.build();
-    reportManager.init();
-    verify(dummyPublisher, times(1)).init(eq(dummyContext),
-        any(ScheduledExecutorService.class));
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
deleted file mode 100644
index 03f0cd4..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.GeneratedMessage;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * Test cases to test {@link ReportPublisher}.
- */
-public class TestReportPublisher {
-
-  private static Configuration config;
-
-  @BeforeClass
-  public static void setup() {
-    config = new OzoneConfiguration();
-  }
-
-  /**
-   * Dummy report publisher for testing.
-   */
-  private static class DummyReportPublisher extends ReportPublisher {
-
-    private final long frequency;
-    private int getReportCount = 0;
-
-    DummyReportPublisher(long frequency) {
-      this.frequency = frequency;
-    }
-
-    @Override
-    protected long getReportFrequency() {
-      return frequency;
-    }
-
-    @Override
-    protected GeneratedMessage getReport() {
-      getReportCount++;
-      return null;
-    }
-  }
-
-  @Test
-  public void testReportPublisherInit() {
-    ReportPublisher publisher = new DummyReportPublisher(0);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService dummyExecutorService = Mockito.mock(
-        ScheduledExecutorService.class);
-    publisher.init(dummyContext, dummyExecutorService);
-    verify(dummyExecutorService, times(1)).schedule(publisher,
-        0, TimeUnit.MILLISECONDS);
-  }
-
-  @Test
-  public void testScheduledReport() throws InterruptedException {
-    ReportPublisher publisher = new DummyReportPublisher(100);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Thread.sleep(150);
-    Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-    Thread.sleep(100);
-    Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount);
-    executorService.shutdown();
-  }
-
-  @Test
-  public void testPublishReport() throws InterruptedException {
-    ReportPublisher publisher = new DummyReportPublisher(100);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Thread.sleep(150);
-    executorService.shutdown();
-    Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-    verify(dummyContext, times(1)).addReport(null);
-
-  }
-
-  @Test
-  public void testCommandStatusPublisher() throws InterruptedException {
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ReportPublisher publisher = new CommandStatusReportPublisher();
-    final Map<Long, CommandStatus> cmdStatusMap = new ConcurrentHashMap<>();
-    when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap);
-    publisher.setConf(config);
-
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport());
-
-    // Insert to status object to state context map and then get the report.
-    CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder()
-        .setCmdId(HddsIdFactory.getLongId())
-        .setType(Type.deleteBlocksCommand)
-        .setStatus(Status.PENDING)
-        .build();
-    CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder()
-        .setCmdId(HddsIdFactory.getLongId())
-        .setType(Type.closeContainerCommand)
-        .setStatus(Status.EXECUTED)
-        .build();
-    cmdStatusMap.put(obj1.getCmdId(), obj1);
-    cmdStatusMap.put(obj2.getCmdId(), obj2);
-    // We are not sending the commands whose status is PENDING.
-    Assert.assertEquals("Should publish report with 2 status objects", 1,
-        ((CommandStatusReportPublisher) publisher).getReport()
-            .getCmdStatusCount());
-    executorService.shutdown();
-  }
-
-  /**
-   * Get a datanode details.
-   *
-   * @return DatanodeDetails
-   */
-  private static DatanodeDetails getDatanodeDetails() {
-    String uuid = UUID.randomUUID().toString();
-    Random random = new Random();
-    String ipAddress =
-        random.nextInt(256) + "." + random.nextInt(256) + "." + random
-            .nextInt(256) + "." + random.nextInt(256);
-
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
deleted file mode 100644
index f8c5fe5..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test cases to test ReportPublisherFactory.
- */
-public class TestReportPublisherFactory {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void testGetContainerReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    ReportPublisher publisher = factory
-        .getPublisherFor(ContainerReportsProto.class);
-    Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
-  }
-
-  @Test
-  public void testGetNodeReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    ReportPublisher publisher = factory
-        .getPublisherFor(NodeReportProto.class);
-    Assert.assertEquals(NodeReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
-  }
-
-  @Test
-  public void testInvalidReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    exception.expect(RuntimeException.class);
-    exception.expectMessage("No publisher found for report");
-    factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class);
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
deleted file mode 100644
index 37615bc..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.report;
-/**
- * This package has test cases for all the report publishers which generates
- * reports that are sent to SCM via heartbeat.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
deleted file mode 100644
index a92f236..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.hadoop.ozone.OzoneConsts.GB;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * Test cases to verify CloseContainerCommandHandler in datanode.
- */
-public class TestCloseContainerCommandHandler {
-
-  private static final long CONTAINER_ID = 123L;
-
-  private OzoneContainer ozoneContainer;
-  private StateContext context;
-  private XceiverServerSpi writeChannel;
-  private Container container;
-  private Handler containerHandler;
-  private PipelineID pipelineID;
-  private PipelineID nonExistentPipelineID = PipelineID.randomId();
-
-  private CloseContainerCommandHandler subject =
-      new CloseContainerCommandHandler();
-
-  @Before
-  public void before() throws Exception {
-    context = mock(StateContext.class);
-    DatanodeStateMachine dnStateMachine = mock(DatanodeStateMachine.class);
-    when(dnStateMachine.getDatanodeDetails())
-        .thenReturn(randomDatanodeDetails());
-    when(context.getParent()).thenReturn(dnStateMachine);
-
-    pipelineID = PipelineID.randomId();
-
-    KeyValueContainerData data = new KeyValueContainerData(CONTAINER_ID, GB,
-        pipelineID.getId().toString(), null);
-
-    container = new KeyValueContainer(data, new OzoneConfiguration());
-    ContainerSet containerSet = new ContainerSet();
-    containerSet.addContainer(container);
-
-    containerHandler = mock(Handler.class);
-    ContainerController controller = new ContainerController(containerSet,
-        singletonMap(ContainerProtos.ContainerType.KeyValueContainer,
-            containerHandler));
-
-    writeChannel = mock(XceiverServerSpi.class);
-    ozoneContainer = mock(OzoneContainer.class);
-    when(ozoneContainer.getController()).thenReturn(controller);
-    when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
-    when(ozoneContainer.getWriteChannel()).thenReturn(writeChannel);
-    when(writeChannel.isExist(pipelineID.getProtobuf())).thenReturn(true);
-    when(writeChannel.isExist(nonExistentPipelineID.getProtobuf()))
-        .thenReturn(false);
-  }
-
-  @Test
-  public void closeContainerWithPipeline() throws Exception {
-    // close a container that's associated with an existing pipeline
-    subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null);
-
-    verify(containerHandler)
-        .markContainerForClose(container);
-    verify(writeChannel)
-        .submitRequest(any(), eq(pipelineID.getProtobuf()));
-    verify(containerHandler, never())
-        .quasiCloseContainer(container);
-  }
-
-  @Test
-  public void closeContainerWithoutPipeline() throws IOException {
-    // close a container that's NOT associated with an open pipeline
-    subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null);
-
-    verify(containerHandler)
-        .markContainerForClose(container);
-    verify(writeChannel, never())
-        .submitRequest(any(), any());
-    // Container in CLOSING state is moved to UNHEALTHY if pipeline does not
-    // exist. Container should not exist in CLOSING state without a pipeline.
-    verify(containerHandler)
-        .markContainerUnhealthy(container);
-  }
-
-  @Test
-  public void forceCloseQuasiClosedContainer() throws Exception {
-    // force-close a container that's already quasi closed
-    container.getContainerData()
-        .setState(ContainerProtos.ContainerDataProto.State.QUASI_CLOSED);
-
-    subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null);
-
-    verify(writeChannel, never())
-        .submitRequest(any(), any());
-    verify(containerHandler)
-        .closeContainer(container);
-  }
-
-  @Test
-  public void forceCloseOpenContainer() throws Exception {
-    // force-close a container that's NOT associated with an open pipeline
-    subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null);
-
-    verify(writeChannel, never())
-        .submitRequest(any(), any());
-    // Container in CLOSING state is moved to UNHEALTHY if pipeline does not
-    // exist. Container should not exist in CLOSING state without a pipeline.
-    verify(containerHandler)
-        .markContainerUnhealthy(container);
-  }
-
-  @Test
-  public void forceCloseOpenContainerWithPipeline() throws Exception {
-    // force-close a container that's associated with an existing pipeline
-    subject.handle(forceCloseWithPipeline(), ozoneContainer, context, null);
-
-    verify(containerHandler)
-        .markContainerForClose(container);
-    verify(writeChannel)
-        .submitRequest(any(), any());
-    verify(containerHandler, never())
-        .quasiCloseContainer(container);
-    verify(containerHandler, never())
-        .closeContainer(container);
-  }
-
-  @Test
-  public void closeAlreadyClosedContainer() throws Exception {
-    container.getContainerData()
-        .setState(ContainerProtos.ContainerDataProto.State.CLOSED);
-
-    // Since the container is already closed, these commands should do nothing,
-    // neither should they fail
-    subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null);
-    subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null);
-
-    verify(containerHandler, never())
-        .markContainerForClose(container);
-    verify(containerHandler, never())
-        .quasiCloseContainer(container);
-    verify(containerHandler, never())
-        .closeContainer(container);
-    verify(writeChannel, never())
-        .submitRequest(any(), any());
-  }
-
-  private CloseContainerCommand closeWithKnownPipeline() {
-    return new CloseContainerCommand(CONTAINER_ID, pipelineID);
-  }
-
-  private CloseContainerCommand closeWithUnknownPipeline() {
-    return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID);
-  }
-
-  private CloseContainerCommand forceCloseWithPipeline() {
-    return new CloseContainerCommand(CONTAINER_ID, pipelineID, true);
-  }
-
-  private CloseContainerCommand forceCloseWithoutPipeline() {
-    return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID, true);
-  }
-
-  /**
-   * Creates a random DatanodeDetails.
-   * @return DatanodeDetails
-   */
-  private static DatanodeDetails randomDatanodeDetails() {
-    String ipAddress = "127.0.0.1";
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
deleted file mode 100644
index 05ac76d..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * Tests for command handlers.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
deleted file mode 100644
index 606940b..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-
-import java.util.UUID;
-
-/**
- * This class tests the functionality of HeartbeatEndpointTask.
- */
-public class TestHeartbeatEndpointTask {
-
-
-  @Test
-  public void testheartbeatWithoutReports() throws Exception {
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm);
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithNodeReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithCommandStatusReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerActions() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithAllReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0);
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy.
-   *
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-    return getHeartbeatEndpointTask(conf, context, proxy);
-
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask with the given conf, context and
-   * StorageContainerManager client side proxy.
-   *
-   * @param conf Configuration
-   * @param context StateContext
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      Configuration conf,
-      StateContext context,
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .build();
-    EndpointStateMachine endpointStateMachine = Mockito
-        .mock(EndpointStateMachine.class);
-    Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
-    return HeartbeatEndpointTask.newBuilder()
-        .setConfig(conf)
-        .setDatanodeDetails(datanodeDetails)
-        .setContext(context)
-        .setEndpointStateMachine(endpointStateMachine)
-        .build();
-  }
-
-  private ContainerAction getContainerAction() {
-    ContainerAction.Builder builder = ContainerAction.newBuilder();
-    builder.setContainerID(1L)
-        .setAction(ContainerAction.Action.CLOSE)
-        .setReason(ContainerAction.Reason.CONTAINER_FULL);
-    return builder.build();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
deleted file mode 100644
index d120a5c..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
deleted file mode 100644
index fb2f29b..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.util.Properties;
-import java.util.UUID;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Unit tests for {@link HddsVolume}.
- */
-public class TestHddsVolume {
-
-  private static final String DATANODE_UUID = UUID.randomUUID().toString();
-  private static final String CLUSTER_ID = UUID.randomUUID().toString();
-  private static final Configuration CONF = new Configuration();
-  private static final String DU_CACHE_FILE = "scmUsed";
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private File rootDir;
-  private HddsVolume volume;
-  private File versionFile;
-
-  @Before
-  public void setup() throws Exception {
-    rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
-    volume = new HddsVolume.Builder(folder.getRoot().getPath())
-        .datanodeUuid(DATANODE_UUID)
-        .conf(CONF)
-        .build();
-    versionFile = HddsVolumeUtil.getVersionFile(rootDir);
-  }
-
-  @Test
-  public void testHddsVolumeInitialization() throws Exception {
-
-    // The initial state of HddsVolume should be "NOT_FORMATTED" when
-    // clusterID is not specified and the version file should not be written
-    // to disk.
-    assertTrue(volume.getClusterID() == null);
-    assertEquals(StorageType.DEFAULT, volume.getStorageType());
-    assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
-        volume.getStorageState());
-    assertFalse("Version file should not be created when clusterID is not " +
-        "known.", versionFile.exists());
-
-
-    // Format the volume with clusterID.
-    volume.format(CLUSTER_ID);
-
-    // The state of HddsVolume after formatting with clusterID should be
-    // NORMAL and the version file should exist.
-    assertTrue("Volume format should create Version file",
-        versionFile.exists());
-    assertEquals(volume.getClusterID(), CLUSTER_ID);
-    assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
-  }
-
-  @Test
-  public void testReadPropertiesFromVersionFile() throws Exception {
-    volume.format(CLUSTER_ID);
-
-    Properties properties = DatanodeVersionFile.readFrom(versionFile);
-
-    String storageID = HddsVolumeUtil.getStorageID(properties, versionFile);
-    String clusterID = HddsVolumeUtil.getClusterID(
-        properties, versionFile, CLUSTER_ID);
-    String datanodeUuid = HddsVolumeUtil.getDatanodeUUID(
-        properties, versionFile, DATANODE_UUID);
-    long cTime = HddsVolumeUtil.getCreationTime(
-        properties, versionFile);
-    int layoutVersion = HddsVolumeUtil.getLayOutVersion(
-        properties, versionFile);
-
-    assertEquals(volume.getStorageID(), storageID);
-    assertEquals(volume.getClusterID(), clusterID);
-    assertEquals(volume.getDatanodeUuid(), datanodeUuid);
-    assertEquals(volume.getCTime(), cTime);
-    assertEquals(volume.getLayoutVersion(), layoutVersion);
-  }
-
-  @Test
-  public void testShutdown() throws Exception {
-    // Return dummy value > 0 for scmUsage so that scm cache file is written
-    // during shutdown.
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    volume.setScmUsageForTesting(scmUsageMock);
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100));
-
-    assertTrue("Available volume should be positive",
-        volume.getAvailable() > 0);
-
-    // Shutdown the volume.
-    volume.shutdown();
-
-    // Volume state should be "NON_EXISTENT" when volume is shutdown.
-    assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState());
-
-    // Volume should save scmUsed cache file once volume is shutdown
-    File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
-    System.out.println("scmUsedFile: " + scmUsedFile);
-    assertTrue("scmUsed cache file should be saved on shutdown",
-        scmUsedFile.exists());
-
-    // Volume.getAvailable() should succeed even when usage thread
-    // is shutdown.
-    volume.getAvailable();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java
deleted file mode 100644
index 2e267be..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.FakeTimer;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.*;
-
-
-/**
- * Tests for {@link HddsVolumeChecker}.
- */
-@RunWith(Parameterized.class)
-public class TestHddsVolumeChecker {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestHddsVolumeChecker.class);
-
-  @Rule
-  public TestName testName = new TestName();
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(30_000);
-
-  /**
-   * Run each test case for each possible value of {@link VolumeCheckResult}.
-   * Including "null" for 'throw exception'.
-   * @return
-   */
-  @Parameters(name="{0}")
-  public static Collection<Object[]> data() {
-    List<Object[]> values = new ArrayList<>();
-    for (VolumeCheckResult result : VolumeCheckResult.values()) {
-      values.add(new Object[] {result});
-    }
-    values.add(new Object[] {null});
-    return values;
-  }
-
-  /**
-   * When null, the check call should throw an exception.
-   */
-  private final VolumeCheckResult expectedVolumeHealth;
-  private static final int NUM_VOLUMES = 2;
-
-
-  public TestHddsVolumeChecker(VolumeCheckResult expectedVolumeHealth) {
-    this.expectedVolumeHealth = expectedVolumeHealth;
-  }
-
-  /**
-   * Test {@link HddsVolumeChecker#checkVolume} propagates the
-   * check to the delegate checker.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testCheckOneVolume() throws Exception {
-    LOG.info("Executing {}", testName.getMethodName());
-    final HddsVolume volume = makeVolumes(1, expectedVolumeHealth).get(0);
-    final HddsVolumeChecker checker =
-        new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer());
-    checker.setDelegateChecker(new DummyChecker());
-    final AtomicLong numCallbackInvocations = new AtomicLong(0);
-
-    /**
-     * Request a check and ensure it triggered {@link HddsVolume#check}.
-     */
-    boolean result =
-        checker.checkVolume(volume, (healthyVolumes, failedVolumes) -> {
-          numCallbackInvocations.incrementAndGet();
-          if (expectedVolumeHealth != null &&
-              expectedVolumeHealth != FAILED) {
-            assertThat(healthyVolumes.size(), is(1));
-            assertThat(failedVolumes.size(), is(0));
-          } else {
-            assertThat(healthyVolumes.size(), is(0));
-            assertThat(failedVolumes.size(), is(1));
-          }
-        });
-
-    GenericTestUtils.waitFor(() -> numCallbackInvocations.get() > 0, 5, 10000);
-
-    // Ensure that the check was invoked at least once.
-    verify(volume, times(1)).check(anyObject());
-    if (result) {
-      assertThat(numCallbackInvocations.get(), is(1L));
-    }
-  }
-
-  /**
-   * Test {@link HddsVolumeChecker#checkAllVolumes} propagates
-   * checks for all volumes to the delegate checker.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testCheckAllVolumes() throws Exception {
-    LOG.info("Executing {}", testName.getMethodName());
-
-    final List<HddsVolume> volumes = makeVolumes(
-        NUM_VOLUMES, expectedVolumeHealth);
-    final HddsVolumeChecker checker =
-        new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer());
-    checker.setDelegateChecker(new DummyChecker());
-
-    Set<HddsVolume> failedVolumes = checker.checkAllVolumes(volumes);
-    LOG.info("Got back {} failed volumes", failedVolumes.size());
-
-    if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) {
-      assertThat(failedVolumes.size(), is(NUM_VOLUMES));
-    } else {
-      assertTrue(failedVolumes.isEmpty());
-    }
-
-    // Ensure each volume's check() method was called exactly once.
-    for (HddsVolume volume : volumes) {
-      verify(volume, times(1)).check(anyObject());
-    }
-  }
-
-  /**
-   * A checker to wraps the result of {@link HddsVolume#check} in
-   * an ImmediateFuture.
-   */
-  static class DummyChecker
-      implements AsyncChecker<Boolean, VolumeCheckResult> {
-
-    @Override
-    public Optional<ListenableFuture<VolumeCheckResult>> schedule(
-        Checkable<Boolean, VolumeCheckResult> target,
-        Boolean context) {
-      try {
-        LOG.info("Returning success for volume check");
-        return Optional.of(
-            Futures.immediateFuture(target.check(context)));
-      } catch (Exception e) {
-        LOG.info("check routine threw exception " + e);
-        return Optional.of(Futures.immediateFailedFuture(e));
-      }
-    }
-
-    @Override
-    public void shutdownAndWait(long timeout, TimeUnit timeUnit)
-        throws InterruptedException {
-      // Nothing to cancel.
-    }
-  }
-
-  static List<HddsVolume> makeVolumes(
-      int numVolumes, VolumeCheckResult health) throws Exception {
-    final List<HddsVolume> volumes = new ArrayList<>(numVolumes);
-    for (int i = 0; i < numVolumes; ++i) {
-      final HddsVolume volume = mock(HddsVolume.class);
-
-      if (health != null) {
-        when(volume.check(any(Boolean.class))).thenReturn(health);
-        when(volume.check(isNull())).thenReturn(health);
-      } else {
-        final DiskErrorException de = new DiskErrorException("Fake Exception");
-        when(volume.check(any(Boolean.class))).thenThrow(de);
-        when(volume.check(isNull())).thenThrow(de);
-      }
-      volumes.add(volume);
-    }
-    return volumes;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
deleted file mode 100644
index d0fbf10..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Tests {@link RoundRobinVolumeChoosingPolicy}.
- */
-public class TestRoundRobinVolumeChoosingPolicy {
-
-  private RoundRobinVolumeChoosingPolicy policy;
-  private List<HddsVolume> volumes;
-  private VolumeSet volumeSet;
-
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume1 = baseDir + "disk1";
-  private final String volume2 = baseDir + "disk2";
-
-  private static final String DUMMY_IP_ADDR = "0.0.0.0";
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    policy = ReflectionUtils.newInstance(
-        RoundRobinVolumeChoosingPolicy.class, null);
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-    volumes = volumeSet.getVolumesList();
-  }
-
-  @After
-  public void cleanUp() {
-    if (volumeSet != null) {
-      volumeSet.shutdown();
-      volumeSet = null;
-    }
-  }
-
-  @Test
-  public void testRRVolumeChoosingPolicy() throws Exception {
-    HddsVolume hddsVolume1 = volumes.get(0);
-    HddsVolume hddsVolume2 = volumes.get(1);
-
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
-    Assert.assertEquals(100L, hddsVolume1.getAvailable());
-    Assert.assertEquals(200L, hddsVolume2.getAvailable());
-
-    // Test two rounds of round-robin choosing
-    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
-
-    // The first volume has only 100L space, so the policy should
-    // choose the second one in case we ask for more.
-    Assert.assertEquals(hddsVolume2,
-        policy.chooseVolume(volumes, 150));
-
-    // Fail if no volume has enough space available
-    try {
-      policy.chooseVolume(volumes, Long.MAX_VALUE);
-      Assert.fail();
-    } catch (IOException e) {
-      // Passed.
-    }
-  }
-
-  @Test
-  public void testRRPolicyExceptionMessage() throws Exception {
-    HddsVolume hddsVolume1 = volumes.get(0);
-    HddsVolume hddsVolume2 = volumes.get(1);
-
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
-    int blockSize = 300;
-    try {
-      policy.chooseVolume(volumes, blockSize);
-      Assert.fail("expected to throw DiskOutOfSpaceException");
-    } catch(DiskOutOfSpaceException e) {
-      Assert.assertEquals("Not returnig the expected message",
-          "Out of space: The volume with the most available space (=" + 200
-              + " B) is less than the container size (=" + blockSize + " B).",
-          e.getMessage());
-    }
-  }
-
-  private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace)
-      throws IOException {
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    hddsVolume.setScmUsageForTesting(scmUsageMock);
-    // Set used space to capacity -requiredAvailableSpace so that
-    // getAvailable() returns us the specified availableSpace.
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(
-        (hddsVolume.getCapacity() - availableSpace));
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
deleted file mode 100644
index fa280dd..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.io.IOException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
-    .HDDS_VOLUME_DIR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Tests {@link VolumeSet} operations.
- */
-public class TestVolumeSet {
-
-  private OzoneConfiguration conf;
-  private VolumeSet volumeSet;
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume1 = baseDir + "disk1";
-  private final String volume2 = baseDir + "disk2";
-  private final List<String> volumes = new ArrayList<>();
-
-  private static final String DUMMY_IP_ADDR = "0.0.0.0";
-
-  private void initializeVolumeSet() throws Exception {
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    volumes.add(volume1);
-    volumes.add(volume2);
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    initializeVolumeSet();
-  }
-
-  @After
-  public void shutdown() throws IOException {
-    // Delete the hdds volume root dir
-    List<HddsVolume> hddsVolumes = new ArrayList<>();
-    hddsVolumes.addAll(volumeSet.getVolumesList());
-    hddsVolumes.addAll(volumeSet.getFailedVolumesList());
-
-    for (HddsVolume volume : hddsVolumes) {
-      FileUtils.deleteDirectory(volume.getHddsRootDir());
-    }
-    volumeSet.shutdown();
-
-    FileUtil.fullyDelete(new File(baseDir));
-  }
-
-  private boolean checkVolumeExistsInVolumeSet(String volume) {
-    for (HddsVolume hddsVolume : volumeSet.getVolumesList()) {
-      if (hddsVolume.getHddsRootDir().getPath().equals(
-          HddsVolumeUtil.getHddsRoot(volume))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Test
-  public void testVolumeSetInitialization() throws Exception {
-
-    List<HddsVolume> volumesList = volumeSet.getVolumesList();
-
-    // VolumeSet initialization should add volume1 and volume2 to VolumeSet
-    assertEquals("VolumeSet intialization is incorrect",
-        volumesList.size(), volumes.size());
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume1));
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume2));
-  }
-
-  @Test
-  public void testAddVolume() {
-
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Add a volume to VolumeSet
-    String volume3 = baseDir + "disk3";
-    boolean success = volumeSet.addVolume(volume3);
-
-    assertTrue(success);
-    assertEquals(3, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume did not add requested volume to VolumeSet",
-        checkVolumeExistsInVolumeSet(volume3));
-  }
-
-  @Test
-  public void testFailVolume() throws Exception {
-
-    //Fail a volume
-    volumeSet.failVolume(volume1);
-
-    // Failed volume should not show up in the volumeList
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Failed volume should be added to FailedVolumeList
-    assertEquals("Failed volume not present in FailedVolumeMap",
-        1, volumeSet.getFailedVolumesList().size());
-    assertEquals("Failed Volume list did not match",
-        HddsVolumeUtil.getHddsRoot(volume1),
-        volumeSet.getFailedVolumesList().get(0).getHddsRootDir().getPath());
-    assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
-
-    // Failed volume should not exist in VolumeMap
-    assertFalse(volumeSet.getVolumeMap().containsKey(volume1));
-  }
-
-  @Test
-  public void testRemoveVolume() throws Exception {
-
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Remove a volume from VolumeSet
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Attempting to remove a volume which does not exist in VolumeSet should
-    // log a warning.
-    LogCapturer logs = LogCapturer.captureLogs(
-        LogFactory.getLog(VolumeSet.class));
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-    String expectedLogMessage = "Volume : " +
-        HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet";
-    assertTrue("Log output does not contain expected log message: "
-        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
-  }
-
-  @Test
-  public void testVolumeInInconsistentState() throws Exception {
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Add a volume to VolumeSet
-    String volume3 = baseDir + "disk3";
-
-    // Create the root volume dir and create a sub-directory within it.
-    File newVolume = new File(volume3, HDDS_VOLUME_DIR);
-    System.out.println("new volume root: " + newVolume);
-    newVolume.mkdirs();
-    assertTrue("Failed to create new volume root", newVolume.exists());
-    File dataDir = new File(newVolume, "chunks");
-    dataDir.mkdirs();
-    assertTrue(dataDir.exists());
-
-    // The new volume is in an inconsistent state as the root dir is
-    // non-empty but the version file does not exist. Add Volume should
-    // return false.
-    boolean success = volumeSet.addVolume(volume3);
-
-    assertFalse(success);
-    assertEquals(2, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume should fail for an inconsistent volume",
-        !checkVolumeExistsInVolumeSet(volume3));
-
-    // Delete volume3
-    File volume = new File(volume3);
-    FileUtils.deleteDirectory(volume);
-  }
-
-  @Test
-  public void testShutdown() throws Exception {
-    List<HddsVolume> volumesList = volumeSet.getVolumesList();
-
-    volumeSet.shutdown();
-
-    // Verify that volume usage can be queried during shutdown.
-    for (HddsVolume volume : volumesList) {
-      Assert.assertNotNull(volume.getVolumeInfo().getUsageForTesting());
-      volume.getAvailable();
-    }
-  }
-
-  @Test
-  public void testFailVolumes() throws  Exception{
-    VolumeSet volSet = null;
-    File readOnlyVolumePath = new File(baseDir);
-    //Set to readonly, so that this volume will be failed
-    readOnlyVolumePath.setReadOnly();
-    File volumePath = GenericTestUtils.getRandomizedTestDir();
-    OzoneConfiguration ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath()
-        + "," + volumePath.getAbsolutePath());
-    volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig);
-    assertEquals(1, volSet.getFailedVolumesList().size());
-    assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0)
-        .getHddsRootDir());
-
-    //Set back to writable
-    try {
-      readOnlyVolumePath.setWritable(true);
-      volSet.shutdown();
-    } finally {
-      FileUtil.fullyDelete(volumePath);
-    }
-
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
deleted file mode 100644
index c5deff0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.Timer;
-
-import com.google.common.collect.Iterables;
-import org.apache.commons.io.FileUtils;
-import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.hamcrest.CoreMatchers.is;
-import org.junit.After;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Verify that {@link VolumeSet} correctly checks for failed disks
- * during initialization.
- */
-public class TestVolumeSetDiskChecks {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestVolumeSetDiskChecks.class);
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(30_000);
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private Configuration conf = null;
-
-  /**
-   * Cleanup volume directories.
-   */
-  @After
-  public void cleanup() {
-    final Collection<String> dirs = conf.getTrimmedStringCollection(
-        DFS_DATANODE_DATA_DIR_KEY);
-
-    for (String d: dirs) {
-      FileUtils.deleteQuietly(new File(d));
-    }
-  }
-
-  /**
-   * Verify that VolumeSet creates volume root directories at startup.
-   * @throws IOException
-   */
-  @Test
-  public void testOzoneDirsAreCreated() throws IOException {
-    final int numVolumes = 2;
-
-    conf = getConfWithDataNodeDirs(numVolumes);
-    final VolumeSet volumeSet =
-        new VolumeSet(UUID.randomUUID().toString(), conf);
-
-    assertThat(volumeSet.getVolumesList().size(), is(numVolumes));
-    assertThat(volumeSet.getFailedVolumesList().size(), is(0));
-
-    // Verify that the Ozone dirs were created during initialization.
-    Collection<String> dirs = conf.getTrimmedStringCollection(
-        DFS_DATANODE_DATA_DIR_KEY);
-    for (String d : dirs) {
-      assertTrue(new File(d).isDirectory());
-    }
-    volumeSet.shutdown();
-  }
-
-  /**
-   * Verify that bad volumes are filtered at startup.
-   * @throws IOException
-   */
-  @Test
-  public void testBadDirectoryDetection() throws IOException {
-    final int numVolumes = 5;
-    final int numBadVolumes = 2;
-
-    conf = getConfWithDataNodeDirs(numVolumes);
-    final VolumeSet volumeSet = new VolumeSet(
-        UUID.randomUUID().toString(), conf) {
-      @Override
-      HddsVolumeChecker getVolumeChecker(Configuration configuration)
-          throws DiskErrorException {
-        return new DummyChecker(configuration, new Timer(), numBadVolumes);
-      }
-    };
-
-    assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
-    assertThat(volumeSet.getVolumesList().size(),
-        is(numVolumes - numBadVolumes));
-    volumeSet.shutdown();
-  }
-
-  /**
-   * Verify that all volumes are added to fail list if all volumes are bad.
-   */
-  @Test
-  public void testAllVolumesAreBad() throws IOException {
-    final int numVolumes = 5;
-
-    conf = getConfWithDataNodeDirs(numVolumes);
-
-    final VolumeSet volumeSet = new VolumeSet(
-        UUID.randomUUID().toString(), conf) {
-      @Override
-      HddsVolumeChecker getVolumeChecker(Configuration configuration)
-          throws DiskErrorException {
-        return new DummyChecker(configuration, new Timer(), numVolumes);
-      }
-    };
-
-    assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes);
-    assertEquals(volumeSet.getVolumesList().size(), 0);
-    volumeSet.shutdown();
-  }
-
-  /**
-   * Update configuration with the specified number of Datanode
-   * storage directories.
-   * @param conf
-   * @param numDirs
-   */
-  private Configuration getConfWithDataNodeDirs(int numDirs) {
-    final Configuration ozoneConf = new OzoneConfiguration();
-    final List<String> dirs = new ArrayList<>();
-    for (int i = 0; i < numDirs; ++i) {
-      dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
-    }
-    ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
-    return ozoneConf;
-  }
-
-  /**
-   * A no-op checker that fails the given number of volumes and succeeds
-   * the rest.
-   */
-  static class DummyChecker extends HddsVolumeChecker {
-    private final int numBadVolumes;
-
-    DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
-        throws DiskErrorException {
-      super(conf, timer);
-      this.numBadVolumes = numBadVolumes;
-    }
-
-    @Override
-    public Set<HddsVolume> checkAllVolumes(Collection<HddsVolume> volumes)
-        throws InterruptedException {
-      // Return the first 'numBadVolumes' as failed.
-      return ImmutableSet.copyOf(Iterables.limit(volumes, numBadVolumes));
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
deleted file mode 100644
index 3328deb..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Tests for Container Volumes.
- */
-package org.apache.hadoop.ozone.container.common.volume;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
deleted file mode 100644
index 1d580a0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test key related operations on the container.
- */
-public class TestBlockManagerImpl {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private BlockData blockData;
-  private BlockManagerImpl blockManager;
-  private BlockID blockID;
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    UUID datanodeId = UUID.randomUUID();
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(datanodeId
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-        datanodeId.toString());
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    // Creating BlockData
-    blockID = new BlockID(1L, 1L);
-    blockData = new BlockData(blockID);
-    blockData.addMetadata("VOLUME", "ozone");
-    blockData.addMetadata("OWNER", "hdfs");
-    List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, 1024);
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-
-    // Create KeyValueContainerManager
-    blockManager = new BlockManagerImpl(config);
-
-  }
-
-  @Test
-  public void testPutAndGetBlock() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-    //Get Block
-    BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
-        blockData.getBlockID());
-
-    assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
-    assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
-    assertEquals(blockData.getChunks().size(),
-        fromGetBlockData.getChunks().size());
-    assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata()
-        .size());
-
-  }
-
-  @Test
-  public void testDeleteBlock() throws Exception {
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-    assertEquals(1,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Delete Block
-    blockManager.deleteBlock(keyValueContainer, blockID);
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    try {
-      blockManager.getBlock(keyValueContainer, blockID);
-      fail("testDeleteBlock");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains(
-          "Unable to find the block", ex);
-    }
-  }
-
-  @Test
-  public void testListBlock() throws Exception {
-    blockManager.putBlock(keyValueContainer, blockData);
-    List<BlockData> listBlockData = blockManager.listBlock(
-        keyValueContainer, 1, 10);
-    assertNotNull(listBlockData);
-    assertTrue(listBlockData.size() == 1);
-
-    for (long i = 2; i <= 10; i++) {
-      blockID = new BlockID(1L, i);
-      blockData = new BlockData(blockID);
-      blockData.addMetadata("VOLUME", "ozone");
-      blockData.addMetadata("OWNER", "hdfs");
-      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-      ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, 1024);
-      chunkList.add(info.getProtoBufMessage());
-      blockData.setChunks(chunkList);
-      blockManager.putBlock(keyValueContainer, blockData);
-    }
-
-    listBlockData = blockManager.listBlock(
-        keyValueContainer, 1, 10);
-    assertNotNull(listBlockData);
-    assertTrue(listBlockData.size() == 10);
-  }
-
-  @Test
-  public void testGetNoSuchBlock() throws Exception {
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-    assertEquals(1,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Delete Block
-    blockManager.deleteBlock(keyValueContainer, blockID);
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    try {
-      //Since the block has been deleted, we should not be able to find it
-      blockManager.getBlock(keyValueContainer, blockID);
-      fail("testGetNoSuchBlock failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains(
-          "Unable to find the block", ex);
-      assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
-    }
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
deleted file mode 100644
index 84ab56d..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test ChunkManager operations.
- */
-public class TestChunkManagerImpl {
-
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private HddsVolume hddsVolume;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private BlockID blockID;
-  private ChunkManagerImpl chunkManager;
-  private ChunkInfo chunkInfo;
-  private ByteBuffer data;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    UUID datanodeId = UUID.randomUUID();
-    hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(datanodeId
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-        datanodeId.toString());
-
-    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    data = ByteBuffer.wrap("testing write chunks".getBytes(UTF_8));
-    // Creating BlockData
-    blockID = new BlockID(1L, 1L);
-    chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, data.capacity());
-
-    // Create a ChunkManager object.
-    chunkManager = new ChunkManagerImpl(true);
-
-  }
-
-  private DispatcherContext getDispatcherContext() {
-    return new DispatcherContext.Builder().build();
-  }
-
-  @Test
-  public void testWriteChunkStageWriteAndCommit() throws Exception {
-    //As in Setup, we try to create container, these paths should exist.
-    assertTrue(keyValueContainerData.getChunksPath() != null);
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    assertTrue(chunksPath.exists());
-    // Initially chunks folder should be empty.
-    assertTrue(chunksPath.listFiles().length == 0);
-
-    // As no chunks are written to the volume writeBytes should be 0
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        new DispatcherContext.Builder()
-            .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build());
-    // Now a chunk file is being written with Stage WRITE_DATA, so it should
-    // create a temporary chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-
-    long term = 0;
-    long index = 0;
-    File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
-    File tempChunkFile = new File(chunkFile.getParent(),
-        chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER
-            + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX
-            + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + term
-            + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + index);
-
-    // As chunk write stage is WRITE_DATA, temp chunk file will be created.
-    assertTrue(tempChunkFile.exists());
-
-    checkWriteIOStats(data.capacity(), 1);
-
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        new DispatcherContext.Builder()
-            .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build());
-
-    checkWriteIOStats(data.capacity(), 1);
-
-    // Old temp file should have been renamed to chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-
-    // As commit happened, chunk file should exist.
-    assertTrue(chunkFile.exists());
-    assertFalse(tempChunkFile.exists());
-
-  }
-
-  @Test
-  public void testWriteChunkIncorrectLength() throws Exception {
-    try {
-      long randomLength = 200L;
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, randomLength);
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          getDispatcherContext());
-      fail("testWriteChunkIncorrectLength failed");
-    } catch (StorageContainerException ex) {
-      // As we got an exception, writeBytes should be 0.
-      checkWriteIOStats(0, 0);
-      GenericTestUtils.assertExceptionContains("data array does not match " +
-          "the length ", ex);
-      assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testWriteChunkStageCombinedData() throws Exception {
-    //As in Setup, we try to create container, these paths should exist.
-    assertTrue(keyValueContainerData.getChunksPath() != null);
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    assertTrue(chunksPath.exists());
-    // Initially chunks folder should be empty.
-    assertTrue(chunksPath.listFiles().length == 0);
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        getDispatcherContext());
-    // Now a chunk file is being written with Stage COMBINED_DATA, so it should
-    // create a chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-    File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
-    assertTrue(chunkFile.exists());
-    checkWriteIOStats(data.capacity(), 1);
-  }
-
-  @Test
-  public void testReadChunk() throws Exception {
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        getDispatcherContext());
-    checkWriteIOStats(data.capacity(), 1);
-    checkReadIOStats(0, 0);
-    ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, blockID,
-        chunkInfo, getDispatcherContext());
-    assertEquals(expectedData.limit()-expectedData.position(),
-        chunkInfo.getLen());
-    assertTrue(expectedData.rewind().equals(data.rewind()));
-    checkReadIOStats(expectedData.capacity(), 1);
-  }
-
-  @Test
-  public void testDeleteChunk() throws Exception {
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        getDispatcherContext());
-    assertTrue(chunksPath.listFiles().length == 1);
-    chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo);
-    assertTrue(chunksPath.listFiles().length == 0);
-  }
-
-  @Test
-  public void testDeleteChunkUnsupportedRequest() throws Exception {
-    try {
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          getDispatcherContext());
-      long randomLength = 200L;
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, randomLength);
-      chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo);
-      fail("testDeleteChunkUnsupportedRequest");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Not Supported Operation.", ex);
-      assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testReadChunkFileNotExists() throws Exception {
-    try {
-      // trying to read a chunk, where chunk file does not exist
-      ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer,
-          blockID, chunkInfo, getDispatcherContext());
-      fail("testReadChunkFileNotExists failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Unable to find the chunk " +
-          "file.", ex);
-      assertEquals(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testWriteAndReadChunkMultipleTimes() throws Exception {
-    for (int i=0; i<100; i++) {
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), i), 0, data.capacity());
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          getDispatcherContext());
-      data.rewind();
-    }
-    checkWriteIOStats(data.capacity()*100, 100);
-    assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0);
-
-    for (int i=0; i<100; i++) {
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), i), 0, data.capacity());
-      chunkManager.readChunk(keyValueContainer, blockID, chunkInfo,
-          getDispatcherContext());
-    }
-    checkReadIOStats(data.capacity()*100, 100);
-    assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0);
-  }
-
-
-  /**
-   * Check WriteIO stats.
-   * @param length
-   * @param opCount
-   */
-  private void checkWriteIOStats(long length, long opCount) {
-    VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
-    assertEquals(length, volumeIOStats.getWriteBytes());
-    assertEquals(opCount, volumeIOStats.getWriteOpCount());
-  }
-
-  /**
-   * Check ReadIO stats.
-   * @param length
-   * @param opCount
-   */
-  private void checkReadIOStats(long length, long opCount) {
-    VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
-    assertEquals(length, volumeIOStats.getReadBytes());
-    assertEquals(opCount, volumeIOStats.getReadOpCount());
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
deleted file mode 100644
index 4fdd994..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class is used to test KeyValue container block iterator.
- */
-@RunWith(Parameterized.class)
-public class TestKeyValueBlockIterator {
-
-  private KeyValueContainer container;
-  private KeyValueContainerData containerData;
-  private VolumeSet volumeSet;
-  private Configuration conf;
-  private File testRoot;
-
-  private final String storeImpl;
-
-  public TestKeyValueBlockIterator(String metadataImpl) {
-    this.storeImpl = metadataImpl;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OZONE_METADATA_STORE_IMPL_ROCKSDB}});
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    testRoot = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
-    conf.set(OZONE_METADATA_STORE_IMPL, storeImpl);
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-  }
-
-
-  @After
-  public void tearDown() {
-    volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
-
-    long containerID = 100L;
-    int deletedBlocks = 5;
-    int normalBlocks = 5;
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath))) {
-
-      int counter = 0;
-      while (keyValueBlockIterator.hasNext()) {
-        BlockData blockData = keyValueBlockIterator.nextBlock();
-        assertEquals(blockData.getLocalID(), counter++);
-      }
-
-      assertFalse(keyValueBlockIterator.hasNext());
-
-      keyValueBlockIterator.seekToFirst();
-      counter = 0;
-      while (keyValueBlockIterator.hasNext()) {
-        BlockData blockData = keyValueBlockIterator.nextBlock();
-        assertEquals(blockData.getLocalID(), counter++);
-      }
-      assertFalse(keyValueBlockIterator.hasNext());
-
-      try {
-        keyValueBlockIterator.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-            "for ContainerID " + containerID, ex);
-      }
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
-    long containerID = 101L;
-    createContainerWithBlocks(containerID, 2, 0);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath))) {
-      long blockID = 0L;
-      assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-      assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-      try {
-        keyValueBlockIterator.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-            "for ContainerID " + containerID, ex);
-      }
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithHasNext() throws Exception {
-    long containerID = 102L;
-    createContainerWithBlocks(containerID, 2, 0);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath))) {
-      long blockID = 0L;
-
-      // Even calling multiple times hasNext() should not move entry forward.
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-      keyValueBlockIterator.seekToLast();
-      assertTrue(keyValueBlockIterator.hasNext());
-      assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-      keyValueBlockIterator.seekToFirst();
-      blockID = 0L;
-      assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-      assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-      try {
-        keyValueBlockIterator.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-            "for ContainerID " + containerID, ex);
-      }
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithFilter() throws Exception {
-    long containerId = 103L;
-    int deletedBlocks = 5;
-    int normalBlocks = 5;
-    createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerId, new File(containerPath), MetadataKeyFilters
-        .getDeletingKeyFilter())) {
-
-      int counter = 5;
-      while (keyValueBlockIterator.hasNext()) {
-        BlockData blockData = keyValueBlockIterator.nextBlock();
-        assertEquals(blockData.getLocalID(), counter++);
-      }
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
-      Exception {
-    long containerId = 104L;
-    createContainerWithBlocks(containerId, 0, 5);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerId, new File(containerPath))) {
-      //As all blocks are deleted blocks, blocks does not match with normal key
-      // filter.
-      assertFalse(keyValueBlockIterator.hasNext());
-    }
-  }
-
-  /**
-   * Creates a container with specified number of normal blocks and deleted
-   * blocks. First it will insert normal blocks, and then it will insert
-   * deleted blocks.
-   * @param containerId
-   * @param normalBlocks
-   * @param deletedBlocks
-   * @throws Exception
-   */
-  private void createContainerWithBlocks(long containerId, int
-      normalBlocks, int deletedBlocks) throws
-      Exception {
-    containerData = new KeyValueContainerData(containerId,
-        (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(),
-        UUID.randomUUID().toString());
-    container = new KeyValueContainer(containerData, conf);
-    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
-        .randomUUID().toString());
-    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
-        conf)) {
-
-      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-      ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
-      chunkList.add(info.getProtoBufMessage());
-
-      for (int i = 0; i < normalBlocks; i++) {
-        BlockID blockID = new BlockID(containerId, i);
-        BlockData blockData = new BlockData(blockID);
-        blockData.setChunks(chunkList);
-        metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
-            blockData
-            .getProtoBufMessage().toByteArray());
-      }
-
-      for (int i = normalBlocks; i < deletedBlocks; i++) {
-        BlockID blockID = new BlockID(containerId, i);
-        BlockData blockData = new BlockData(blockID);
-        blockData.setChunks(chunkList);
-        metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts
-            .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
-            .getProtoBufMessage().toByteArray());
-      }
-    }
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
deleted file mode 100644
index 81d3065..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ /dev/null
@@ -1,394 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.mockito.Mockito;
-
-import java.io.File;
-
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-import java.util.UUID;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.ratis.util.Preconditions.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * Class to test KeyValue Container operations.
- */
-public class TestKeyValueContainer {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  private OzoneConfiguration conf;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private UUID datanodeId;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    datanodeId = UUID.randomUUID();
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-        datanodeId.toString());
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-
-  }
-
-  @Test
-  public void testBlockIterator() throws Exception{
-    keyValueContainerData = new KeyValueContainerData(100L,
-        (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(),
-        datanodeId.toString());
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    KeyValueBlockIterator blockIterator = keyValueContainer.blockIterator();
-    //As no blocks created, hasNext should return false.
-    assertFalse(blockIterator.hasNext());
-    int blockCount = 10;
-    addBlocks(blockCount);
-    blockIterator = keyValueContainer.blockIterator();
-    assertTrue(blockIterator.hasNext());
-    BlockData blockData;
-    int blockCounter = 0;
-    while(blockIterator.hasNext()) {
-      blockData = blockIterator.nextBlock();
-      assertEquals(blockCounter++, blockData.getBlockID().getLocalID());
-    }
-    assertEquals(blockCount, blockCounter);
-  }
-
-  private void addBlocks(int count) throws Exception {
-    long containerId = keyValueContainerData.getContainerID();
-
-    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
-        .getContainerData(), conf)) {
-      for (int i = 0; i < count; i++) {
-        // Creating BlockData
-        BlockID blockID = new BlockID(containerId, i);
-        BlockData blockData = new BlockData(blockID);
-        blockData.addMetadata("VOLUME", "ozone");
-        blockData.addMetadata("OWNER", "hdfs");
-        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-            .getLocalID(), 0), 0, 1024);
-        chunkList.add(info.getProtoBufMessage());
-        blockData.setChunks(chunkList);
-        metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
-            blockData
-            .getProtoBufMessage().toByteArray());
-      }
-    }
-  }
-
-  @SuppressWarnings("RedundantCast")
-  @Test
-  public void testCreateContainer() throws Exception {
-
-    // Create Container.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    String chunksPath = keyValueContainerData.getChunksPath();
-
-    // Check whether containerMetaDataPath and chunksPath exists or not.
-    assertTrue(containerMetaDataPath != null);
-    assertTrue(chunksPath != null);
-    //Check whether container file and container db file exists or not.
-    assertTrue(keyValueContainer.getContainerFile().exists(),
-        ".Container File does not exist");
-    assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " +
-        "DB does not exist");
-  }
-
-  @Test
-  public void testContainerImportExport() throws Exception {
-
-    long containerId = keyValueContainer.getContainerData().getContainerID();
-    // Create Container.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    keyValueContainerData.setState(
-        ContainerProtos.ContainerDataProto.State.CLOSED);
-
-    int numberOfKeysToWrite = 12;
-    //write one few keys to check the key count after import
-    try(ReferenceCountedDB metadataStore =
-        BlockUtils.getDB(keyValueContainerData, conf)) {
-      for (int i = 0; i < numberOfKeysToWrite; i++) {
-        metadataStore.getStore().put(("test" + i).getBytes(UTF_8),
-            "test".getBytes(UTF_8));
-      }
-    }
-    BlockUtils.removeDB(keyValueContainerData, conf);
-
-    Map<String, String> metadata = new HashMap<>();
-    metadata.put("key1", "value1");
-    keyValueContainer.update(metadata, true);
-
-    //destination path
-    File folderToExport = folder.newFile("exported.tar.gz");
-
-    TarContainerPacker packer = new TarContainerPacker();
-
-    //export the container
-    try (FileOutputStream fos = new FileOutputStream(folderToExport)) {
-      keyValueContainer
-          .exportContainerData(fos, packer);
-    }
-
-    //delete the original one
-    keyValueContainer.delete();
-
-    //create a new one
-    KeyValueContainerData containerData =
-        new KeyValueContainerData(containerId, 1,
-            keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(),
-            datanodeId.toString());
-    KeyValueContainer container = new KeyValueContainer(containerData, conf);
-
-    HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
-        .getVolumesList(), 1);
-    String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-
-    container.populatePathFields(scmId, containerVolume, hddsVolumeDir);
-    try (FileInputStream fis = new FileInputStream(folderToExport)) {
-      container.importContainerData(fis, packer);
-    }
-
-    Assert.assertEquals("value1", containerData.getMetadata().get("key1"));
-    Assert.assertEquals(keyValueContainerData.getContainerDBType(),
-        containerData.getContainerDBType());
-    Assert.assertEquals(keyValueContainerData.getState(),
-        containerData.getState());
-    Assert.assertEquals(numberOfKeysToWrite,
-        containerData.getKeyCount());
-    Assert.assertEquals(keyValueContainerData.getLayOutVersion(),
-        containerData.getLayOutVersion());
-    Assert.assertEquals(keyValueContainerData.getMaxSize(),
-        containerData.getMaxSize());
-    Assert.assertEquals(keyValueContainerData.getBytesUsed(),
-        containerData.getBytesUsed());
-
-    //Can't overwrite existing container
-    try {
-      try (FileInputStream fis = new FileInputStream(folderToExport)) {
-        container.importContainerData(fis, packer);
-      }
-      fail("Container is imported twice. Previous files are overwritten");
-    } catch (IOException ex) {
-      //all good
-    }
-
-  }
-
-  @Test
-  public void testDuplicateContainer() throws Exception {
-    try {
-      // Create Container.
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDuplicateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("ContainerFile already " +
-          "exists", ex);
-      assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex
-          .getResult());
-    }
-  }
-
-  @Test
-  public void testDiskFullExceptionCreateContainer() throws Exception {
-
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenThrow(DiskChecker.DiskOutOfSpaceException.class);
-    try {
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDiskFullExceptionCreateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("disk out of space",
-          ex);
-      assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testDeleteContainer() throws Exception {
-    keyValueContainerData.setState(ContainerProtos.ContainerDataProto.State
-        .CLOSED);
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.delete();
-
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    File containerMetaDataLoc = new File(containerMetaDataPath);
-
-    assertFalse("Container directory still exists", containerMetaDataLoc
-        .getParentFile().exists());
-
-    assertFalse("Container File still exists",
-        keyValueContainer.getContainerFile().exists());
-    assertFalse("Container DB file still exists",
-        keyValueContainer.getContainerDBFile().exists());
-  }
-
-
-  @Test
-  public void testCloseContainer() throws Exception {
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.close();
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
-        keyValueContainerData.getState());
-
-    //Check state in the .container file
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    File containerFile = keyValueContainer.getContainerFile();
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
-        keyValueContainerData.getState());
-  }
-
-  @Test
-  public void testReportOfUnhealthyContainer() throws Exception {
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    Assert.assertNotNull(keyValueContainer.getContainerReport());
-    keyValueContainer.markContainerUnhealthy();
-    File containerFile = keyValueContainer.getContainerFile();
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
-        keyValueContainerData.getState());
-    Assert.assertNotNull(keyValueContainer.getContainerReport());
-  }
-
-  @Test
-  public void testUpdateContainer() throws IOException {
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    Map<String, String> metadata = new HashMap<>();
-    metadata.put("VOLUME", "ozone");
-    metadata.put("OWNER", "hdfs");
-    keyValueContainer.update(metadata, true);
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    assertEquals(2, keyValueContainerData.getMetadata().size());
-
-    //Check metadata in the .container file
-    File containerFile = keyValueContainer.getContainerFile();
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(2, keyValueContainerData.getMetadata().size());
-
-  }
-
-  @Test
-  public void testUpdateContainerUnsupportedRequest() throws Exception {
-    try {
-      keyValueContainerData.setState(
-          ContainerProtos.ContainerDataProto.State.CLOSED);
-      keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      Map<String, String> metadata = new HashMap<>();
-      metadata.put("VOLUME", "ozone");
-      keyValueContainer.update(metadata, false);
-      fail("testUpdateContainerUnsupportedRequest failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Updating a closed container " +
-          "without force option is not allowed", ex);
-      assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex
-          .getResult());
-    }
-  }
-
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
deleted file mode 100644
index fe702fc..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.io.RandomAccessFile;
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-
-/**
- * Basic sanity test for the KeyValueContainerCheck class.
- */
-@RunWith(Parameterized.class) public class TestKeyValueContainerCheck {
-  private final String storeImpl;
-  private KeyValueContainer container;
-  private KeyValueContainerData containerData;
-  private VolumeSet volumeSet;
-  private OzoneConfiguration conf;
-  private File testRoot;
-
-  public TestKeyValueContainerCheck(String metadataImpl) {
-    this.storeImpl = metadataImpl;
-  }
-
-  @Parameterized.Parameters public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {{OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OZONE_METADATA_STORE_IMPL_ROCKSDB}});
-  }
-
-  @Before public void setUp() throws Exception {
-    this.testRoot = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
-    conf.set(OZONE_METADATA_STORE_IMPL, storeImpl);
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-  }
-
-  @After public void teardown() {
-    volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
-  }
-
-  /**
-   * Sanity test, when there are no corruptions induced.
-   */
-  @Test
-  public void testKeyValueContainerCheckNoCorruption() throws Exception {
-    long containerID = 101;
-    int deletedBlocks = 1;
-    int normalBlocks = 3;
-    int chunksPerBlock = 4;
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
-
-    // test Closed Container
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks,
-        chunksPerBlock);
-
-    KeyValueContainerCheck kvCheck =
-        new KeyValueContainerCheck(containerData.getMetadataPath(), conf,
-            containerID);
-
-    // first run checks on a Open Container
-    boolean valid = kvCheck.fastCheck();
-    assertTrue(valid);
-
-    container.close();
-
-    // next run checks on a Closed Container
-    valid = kvCheck.fullCheck(new DataTransferThrottler(
-        c.getBandwidthPerVolume()), null);
-    assertTrue(valid);
-  }
-
-  /**
-   * Sanity test, when there are corruptions induced.
-   */
-  @Test
-  public void testKeyValueContainerCheckCorruption() throws Exception {
-    long containerID = 102;
-    int deletedBlocks = 1;
-    int normalBlocks = 3;
-    int chunksPerBlock = 4;
-    ContainerScrubberConfiguration sc = conf.getObject(
-        ContainerScrubberConfiguration.class);
-
-    // test Closed Container
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks,
-        chunksPerBlock);
-
-    container.close();
-
-    KeyValueContainerCheck kvCheck =
-        new KeyValueContainerCheck(containerData.getMetadataPath(), conf,
-            containerID);
-
-    File metaDir = new File(containerData.getMetadataPath());
-    File dbFile = KeyValueContainerLocationUtil
-        .getContainerDBFile(metaDir, containerID);
-    containerData.setDbFile(dbFile);
-    try (ReferenceCountedDB ignored =
-            BlockUtils.getDB(containerData, conf);
-        KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
-            new File(containerData.getContainerPath()))) {
-      BlockData block = kvIter.nextBlock();
-      assertFalse(block.getChunks().isEmpty());
-      ContainerProtos.ChunkInfo c = block.getChunks().get(0);
-      File chunkFile = ChunkUtils.getChunkFile(containerData,
-          ChunkInfo.getFromProtoBuf(c));
-      long length = chunkFile.length();
-      assertTrue(length > 0);
-      // forcefully truncate the file to induce failure.
-      try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) {
-        file.setLength(length / 2);
-      }
-      assertEquals(length/2, chunkFile.length());
-    }
-
-    // metadata check should pass.
-    boolean valid = kvCheck.fastCheck();
-    assertTrue(valid);
-
-    // checksum validation should fail.
-    valid = kvCheck.fullCheck(new DataTransferThrottler(
-            sc.getBandwidthPerVolume()), null);
-    assertFalse(valid);
-  }
-
-  /**
-   * Creates a container with normal and deleted blocks.
-   * First it will insert normal blocks, and then it will insert
-   * deleted blocks.
-   */
-  private void createContainerWithBlocks(long containerId, int normalBlocks,
-      int deletedBlocks, int chunksPerBlock) throws Exception {
-    String strBlock = "block";
-    String strChunk = "-chunkFile";
-    long totalBlocks = normalBlocks + deletedBlocks;
-    int unitLen = 1024;
-    int chunkLen = 3 * unitLen;
-    int bytesPerChecksum = 2 * unitLen;
-    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256,
-        bytesPerChecksum);
-    byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes();
-    ChecksumData checksumData = checksum.computeChecksum(chunkData);
-
-    containerData = new KeyValueContainerData(containerId,
-        (long) StorageUnit.BYTES.toBytes(
-            chunksPerBlock * chunkLen * totalBlocks),
-        UUID.randomUUID().toString(), UUID.randomUUID().toString());
-    container = new KeyValueContainer(containerData, conf);
-    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
-        UUID.randomUUID().toString());
-    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
-        conf)) {
-      ChunkManagerImpl chunkManager = new ChunkManagerImpl(true);
-
-      assertNotNull(containerData.getChunksPath());
-      File chunksPath = new File(containerData.getChunksPath());
-      assertTrue(chunksPath.exists());
-      // Initially chunks folder should be empty.
-      File[] chunkFilesBefore = chunksPath.listFiles();
-      assertNotNull(chunkFilesBefore);
-      assertEquals(0, chunkFilesBefore.length);
-
-      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-      for (int i = 0; i < totalBlocks; i++) {
-        BlockID blockID = new BlockID(containerId, i);
-        BlockData blockData = new BlockData(blockID);
-
-        chunkList.clear();
-        for (long chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) {
-          String chunkName = strBlock + i + strChunk + chunkCount;
-          ChunkInfo info = new ChunkInfo(chunkName, 0, chunkLen);
-          info.setChecksumData(checksumData);
-          chunkList.add(info.getProtoBufMessage());
-          chunkManager
-              .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData),
-                  new DispatcherContext.Builder()
-                      .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
-                      .build());
-          chunkManager
-              .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData),
-                  new DispatcherContext.Builder()
-                      .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
-                      .build());
-        }
-        blockData.setChunks(chunkList);
-
-        if (i >= normalBlocks) {
-          // deleted key
-          metadataStore.getStore().put(DFSUtil.string2Bytes(
-              OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()),
-              blockData.getProtoBufMessage().toByteArray());
-        } else {
-          // normal key
-          metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
-              blockData.getProtoBufMessage().toByteArray());
-        }
-      }
-
-      File[] chunkFilesAfter = chunksPath.listFiles();
-      assertNotNull(chunkFilesAfter);
-      assertEquals((deletedBlocks + normalBlocks) * chunksPerBlock,
-          chunkFilesAfter.length);
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
deleted file mode 100644
index c3e67c7..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * Tests unhealthy container functionality in the {@link KeyValueContainer}
- * class.
- */
-public class TestKeyValueContainerMarkUnhealthy {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestKeyValueContainerMarkUnhealthy.class);
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Rule
-  public Timeout timeout = new Timeout(600_000);
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private OzoneConfiguration conf;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private UUID datanodeId;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    datanodeId = UUID.randomUUID();
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
-        datanodeId.toString());
-    final File metaDir = GenericTestUtils.getRandomizedTestDir();
-    metaDir.mkdirs();
-    keyValueContainerData.setMetadataPath(metaDir.getPath());
-
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-  }
-
-  @After
-  public void teardown() {
-    volumeSet = null;
-    keyValueContainer = null;
-    keyValueContainerData = null;
-  }
-
-  /**
-   * Verify that the .container file is correctly updated when a
-   * container is marked as unhealthy.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testMarkContainerUnhealthy() throws IOException {
-    assertThat(keyValueContainerData.getState(), is(OPEN));
-    keyValueContainer.markContainerUnhealthy();
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-
-    // Check metadata in the .container file
-    File containerFile = keyValueContainer.getContainerFile();
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-  }
-
-  /**
-   * Attempting to close an unhealthy container should fail.
-   * @throws IOException
-   */
-  @Test
-  public void testCloseUnhealthyContainer() throws IOException {
-    keyValueContainer.markContainerUnhealthy();
-    thrown.expect(StorageContainerException.class);
-    keyValueContainer.markContainerForClose();
-  }
-
-  /**
-   * Attempting to mark a closed container as unhealthy should succeed.
-   */
-  @Test
-  public void testMarkClosedContainerAsUnhealthy() throws IOException {
-    // We need to create the container so the compact-on-close operation
-    // does not NPE.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.close();
-    keyValueContainer.markContainerUnhealthy();
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-  }
-
-  /**
-   * Attempting to mark a quasi-closed container as unhealthy should succeed.
-   */
-  @Test
-  public void testMarkQuasiClosedContainerAsUnhealthy() throws IOException {
-    // We need to create the container so the sync-on-quasi-close operation
-    // does not NPE.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.quasiClose();
-    keyValueContainer.markContainerUnhealthy();
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-  }
-
-  /**
-   * Attempting to mark a closing container as unhealthy should succeed.
-   */
-  @Test
-  public void testMarkClosingContainerAsUnhealthy() throws IOException {
-    keyValueContainer.markContainerForClose();
-    keyValueContainer.markContainerUnhealthy();
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
deleted file mode 100644
index 2c71fef..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.doCallRealMethod;
-import static org.mockito.Mockito.times;
-
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.UUID;
-
-/**
- * Unit tests for {@link KeyValueHandler}.
- */
-public class TestKeyValueHandler {
-
-  @Rule
-  public TestRule timeout = new Timeout(300000);
-
-  private static HddsDispatcher dispatcher;
-  private static KeyValueHandler handler;
-
-  private final static String DATANODE_UUID = UUID.randomUUID().toString();
-
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume = baseDir + "disk1";
-
-  private static final long DUMMY_CONTAINER_ID = 9999;
-
-  @BeforeClass
-  public static void setup() throws StorageContainerException {
-    // Create mock HddsDispatcher and KeyValueHandler.
-    handler = Mockito.mock(KeyValueHandler.class);
-    dispatcher = Mockito.mock(HddsDispatcher.class);
-    Mockito.when(dispatcher.getHandler(any())).thenReturn(handler);
-    Mockito.when(dispatcher.dispatch(any(), any())).thenCallRealMethod();
-    Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
-        Mockito.mock(KeyValueContainer.class));
-    Mockito.when(dispatcher.getMissingContainerSet())
-        .thenReturn(new HashSet<>());
-    Mockito.when(handler.handle(any(), any(), any())).thenCallRealMethod();
-    doCallRealMethod().when(dispatcher).setMetricsForTesting(any());
-    dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class));
-    Mockito.when(dispatcher.buildAuditMessageForFailure(any(), any(), any()))
-        .thenCallRealMethod();
-    Mockito.when(dispatcher.buildAuditMessageForSuccess(any(), any()))
-        .thenCallRealMethod();
-  }
-
-  @Test
-  /**
-   * Test that Handler handles different command types correctly.
-   */
-  public void testHandlerCommandHandling() throws Exception {
-
-    // Test Create Container Request handling
-    ContainerCommandRequestProto createContainerRequest =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CreateContainer)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .setCreateContainer(ContainerProtos.CreateContainerRequestProto
-                .getDefaultInstance())
-            .build();
-    DispatcherContext context = new DispatcherContext.Builder().build();
-    dispatcher.dispatch(createContainerRequest, context);
-    Mockito.verify(handler, times(1)).handleCreateContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Read Container Request handling
-    ContainerCommandRequestProto readContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer);
-    dispatcher.dispatch(readContainerRequest, context);
-    Mockito.verify(handler, times(1)).handleReadContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Update Container Request handling
-    ContainerCommandRequestProto updateContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer);
-    dispatcher.dispatch(updateContainerRequest, context);
-    Mockito.verify(handler, times(1)).handleUpdateContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Delete Container Request handling
-    ContainerCommandRequestProto deleteContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer);
-    dispatcher.dispatch(deleteContainerRequest, null);
-    Mockito.verify(handler, times(1)).handleDeleteContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test List Container Request handling
-    ContainerCommandRequestProto listContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListContainer);
-    dispatcher.dispatch(listContainerRequest, context);
-    Mockito.verify(handler, times(1)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Close Container Request handling
-    ContainerCommandRequestProto closeContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer);
-    dispatcher.dispatch(closeContainerRequest, context);
-    Mockito.verify(handler, times(1)).handleCloseContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Put Block Request handling
-    ContainerCommandRequestProto putBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
-    dispatcher.dispatch(putBlockRequest, context);
-    Mockito.verify(handler, times(1)).handlePutBlock(
-        any(ContainerCommandRequestProto.class), any(), any());
-
-    // Test Get Block Request handling
-    ContainerCommandRequestProto getBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
-    dispatcher.dispatch(getBlockRequest, context);
-    Mockito.verify(handler, times(1)).handleGetBlock(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Delete Block Request handling
-    ContainerCommandRequestProto deleteBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
-    dispatcher.dispatch(deleteBlockRequest, context);
-    Mockito.verify(handler, times(1)).handleDeleteBlock(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test List Block Request handling
-    ContainerCommandRequestProto listBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
-    dispatcher.dispatch(listBlockRequest, context);
-    Mockito.verify(handler, times(2)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Read Chunk Request handling
-    ContainerCommandRequestProto readChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk);
-    dispatcher.dispatch(readChunkRequest, context);
-    Mockito.verify(handler, times(1)).handleReadChunk(
-        any(ContainerCommandRequestProto.class), any(), any());
-
-    // Test Delete Chunk Request handling
-    ContainerCommandRequestProto deleteChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteChunk);
-    dispatcher.dispatch(deleteChunkRequest, context);
-    Mockito.verify(handler, times(1)).handleDeleteChunk(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Write Chunk Request handling
-    ContainerCommandRequestProto writeChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk);
-    dispatcher.dispatch(writeChunkRequest, context);
-    Mockito.verify(handler, times(1)).handleWriteChunk(
-        any(ContainerCommandRequestProto.class), any(), any());
-
-    // Test List Chunk Request handling
-    ContainerCommandRequestProto listChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListChunk);
-    dispatcher.dispatch(listChunkRequest, context);
-    Mockito.verify(handler, times(3)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Put Small File Request handling
-    ContainerCommandRequestProto putSmallFileRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile);
-    dispatcher.dispatch(putSmallFileRequest, context);
-    Mockito.verify(handler, times(1)).handlePutSmallFile(
-        any(ContainerCommandRequestProto.class), any(), any());
-
-    // Test Get Small File Request handling
-    ContainerCommandRequestProto getSmallFileRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile);
-    dispatcher.dispatch(getSmallFileRequest, context);
-    Mockito.verify(handler, times(1)).handleGetSmallFile(
-        any(ContainerCommandRequestProto.class), any());
-  }
-
-  @Test
-  public void testVolumeSetInKeyValueHandler() throws Exception{
-    File path = GenericTestUtils.getRandomizedTestDir();
-    Configuration conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
-    VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-    try {
-      ContainerSet cset = new ContainerSet();
-      int[] interval = new int[1];
-      interval[0] = 2;
-      ContainerMetrics metrics = new ContainerMetrics(interval);
-      DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
-      DatanodeStateMachine stateMachine = Mockito.mock(
-          DatanodeStateMachine.class);
-      StateContext context = Mockito.mock(StateContext.class);
-      Mockito.when(stateMachine.getDatanodeDetails())
-          .thenReturn(datanodeDetails);
-      Mockito.when(context.getParent()).thenReturn(stateMachine);
-      KeyValueHandler keyValueHandler = new KeyValueHandler(conf, context, cset,
-          volumeSet, metrics);
-      assertEquals("org.apache.hadoop.ozone.container.common" +
-          ".volume.RoundRobinVolumeChoosingPolicy",
-          keyValueHandler.getVolumeChoosingPolicyForTesting()
-              .getClass().getName());
-
-      //Set a class which is not of sub class of VolumeChoosingPolicy
-      conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
-          "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
-      try {
-        new KeyValueHandler(conf, context, cset, volumeSet, metrics);
-      } catch (RuntimeException ex) {
-        GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
-            ".ozone.container.common.impl.HddsDispatcher not org.apache" +
-            ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy",
-            ex);
-      }
-    } finally {
-      volumeSet.shutdown();
-      FileUtil.fullyDelete(path);
-    }
-  }
-
-  private ContainerCommandRequestProto getDummyCommandRequestProto(
-      ContainerProtos.Type cmdType) {
-    ContainerCommandRequestProto request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(cmdType)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .build();
-
-    return request;
-  }
-
-
-  @Test
-  public void testCloseInvalidContainer() throws IOException {
-    long containerID = 1234L;
-    Configuration conf = new Configuration();
-    KeyValueContainerData kvData = new KeyValueContainerData(containerID,
-        (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(),
-        UUID.randomUUID().toString());
-    KeyValueContainer container = new KeyValueContainer(kvData, conf);
-    kvData.setState(ContainerProtos.ContainerDataProto.State.INVALID);
-
-    // Create Close container request
-    ContainerCommandRequestProto closeContainerRequest =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CloseContainer)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .setCloseContainer(ContainerProtos.CloseContainerRequestProto
-                .getDefaultInstance())
-            .build();
-    dispatcher.dispatch(closeContainerRequest, null);
-
-    Mockito.when(handler.handleCloseContainer(any(), any()))
-        .thenCallRealMethod();
-    doCallRealMethod().when(handler).closeContainer(any());
-    // Closing invalid container should return error response.
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleCloseContainer(closeContainerRequest, container);
-
-    Assert.assertTrue("Close container should return Invalid container error",
-        response.getResult().equals(
-            ContainerProtos.Result.INVALID_CONTAINER_STATE));
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
deleted file mode 100644
index e3ae56a..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_UNHEALTHY;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-
-/**
- * Test that KeyValueHandler fails certain operations when the
- * container is unhealthy.
- */
-public class TestKeyValueHandlerWithUnhealthyContainer {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestKeyValueHandlerWithUnhealthyContainer.class);
-
-  private final static String DATANODE_UUID = UUID.randomUUID().toString();
-  private static final long DUMMY_CONTAINER_ID = 9999;
-
-  @Test
-  public void testRead() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleReadContainer(
-            getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
-  public void testGetBlock() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleGetBlock(
-            getDummyCommandRequestProto(ContainerProtos.Type.GetBlock),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
-  public void testGetCommittedBlockLength() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleGetCommittedBlockLength(
-            getDummyCommandRequestProto(
-                ContainerProtos.Type.GetCommittedBlockLength),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
-  public void testReadChunk() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleReadChunk(
-            getDummyCommandRequestProto(
-                ContainerProtos.Type.ReadChunk),
-            container, null);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
-  public void testDeleteChunk() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleDeleteChunk(
-            getDummyCommandRequestProto(
-                ContainerProtos.Type.DeleteChunk),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
-  public void testGetSmallFile() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleGetSmallFile(
-            getDummyCommandRequestProto(
-                ContainerProtos.Type.GetSmallFile),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  // -- Helper methods below.
-
-  private KeyValueHandler getDummyHandler() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    DatanodeDetails dnDetails = DatanodeDetails.newBuilder()
-        .setUuid(DATANODE_UUID)
-        .setHostName("dummyHost")
-        .setIpAddress("1.2.3.4")
-        .build();
-    DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class);
-    when(stateMachine.getDatanodeDetails()).thenReturn(dnDetails);
-
-    StateContext context = new StateContext(
-        conf, DatanodeStateMachine.DatanodeStates.RUNNING,
-        stateMachine);
-
-    return new KeyValueHandler(
-        new OzoneConfiguration(),
-        context,
-        mock(ContainerSet.class),
-        mock(VolumeSet.class),
-        mock(ContainerMetrics.class));
-  }
-
-  private KeyValueContainer getMockUnhealthyContainer() {
-    KeyValueContainerData containerData = mock(KeyValueContainerData.class);
-    when(containerData.getState()).thenReturn(
-        ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-    return new KeyValueContainer(containerData, new OzoneConfiguration());
-  }
-
-  /**
-   * Construct fake protobuf messages for various types of requests.
-   * This is tedious, however necessary to test. Protobuf classes are final
-   * and cannot be mocked by Mockito.
-   *
-   * @param cmdType type of the container command.
-   * @return
-   */
-  private ContainerCommandRequestProto getDummyCommandRequestProto(
-      ContainerProtos.Type cmdType) {
-    final ContainerCommandRequestProto.Builder builder =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(cmdType)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID);
-
-    final ContainerProtos.DatanodeBlockID fakeBlockId =
-        ContainerProtos.DatanodeBlockID.newBuilder()
-            .setContainerID(DUMMY_CONTAINER_ID).setLocalID(1).build();
-
-    final ContainerProtos.ChunkInfo fakeChunkInfo =
-        ContainerProtos.ChunkInfo.newBuilder()
-            .setChunkName("dummy")
-            .setOffset(0)
-            .setLen(100)
-            .setChecksumData(ContainerProtos.ChecksumData.newBuilder()
-                .setBytesPerChecksum(1)
-                .setType(ContainerProtos.ChecksumType.CRC32)
-                .build())
-            .build();
-
-    switch (cmdType) {
-    case ReadContainer:
-      builder.setReadContainer(
-          ContainerProtos.ReadContainerRequestProto.newBuilder().build());
-      break;
-    case GetBlock:
-      builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
-          .setBlockID(fakeBlockId).build());
-      break;
-    case GetCommittedBlockLength:
-      builder.setGetCommittedBlockLength(
-          ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
-              .setBlockID(fakeBlockId).build());
-    case ReadChunk:
-      builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
-          .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
-      break;
-    case DeleteChunk:
-      builder
-          .setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
-              .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
-      break;
-    case GetSmallFile:
-      builder
-          .setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
-              .setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
-                  .setBlockID(fakeBlockId)
-                  .build())
-              .build());
-      break;
-
-    default:
-      Assert.fail("Unhandled request type " + cmdType + " in unit test");
-    }
-
-    return builder.build();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
deleted file mode 100644
index 9e6f653..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
-
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.compress.compressors.CompressorException;
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Test the tar/untar for a given container.
- */
-public class TestTarContainerPacker {
-
-  private static final String TEST_DB_FILE_NAME = "test1";
-
-  private static final String TEST_DB_FILE_CONTENT = "test1";
-
-  private static final String TEST_CHUNK_FILE_NAME = "chunk1";
-
-  private static final String TEST_CHUNK_FILE_CONTENT = "This is a chunk";
-
-  private static final String TEST_DESCRIPTOR_FILE_CONTENT = "descriptor";
-
-  private ContainerPacker packer = new TarContainerPacker();
-
-  private static final Path SOURCE_CONTAINER_ROOT =
-      Paths.get("target/test/data/packer-source-dir");
-
-  private static final Path DEST_CONTAINER_ROOT =
-      Paths.get("target/test/data/packer-dest-dir");
-
-  @BeforeClass
-  public static void init() throws IOException {
-    initDir(SOURCE_CONTAINER_ROOT);
-    initDir(DEST_CONTAINER_ROOT);
-  }
-
-  private static void initDir(Path path) throws IOException {
-    if (path.toFile().exists()) {
-      FileUtils.deleteDirectory(path.toFile());
-    }
-    path.toFile().mkdirs();
-  }
-
-  private KeyValueContainerData createContainer(long id, Path dir,
-      OzoneConfiguration conf) throws IOException {
-
-    Path containerDir = dir.resolve("container" + id);
-    Path dbDir = containerDir.resolve("db");
-    Path dataDir = containerDir.resolve("data");
-    Files.createDirectories(dbDir);
-    Files.createDirectories(dataDir);
-
-    KeyValueContainerData containerData = new KeyValueContainerData(
-        id, -1, UUID.randomUUID().toString(), UUID.randomUUID().toString());
-    containerData.setChunksPath(dataDir.toString());
-    containerData.setMetadataPath(dbDir.getParent().toString());
-    containerData.setDbFile(dbDir.toFile());
-
-
-    return containerData;
-  }
-
-  @Test
-  public void pack() throws IOException, CompressorException {
-
-    //GIVEN
-    OzoneConfiguration conf = new OzoneConfiguration();
-
-    KeyValueContainerData sourceContainerData =
-        createContainer(1L, SOURCE_CONTAINER_ROOT, conf);
-
-    KeyValueContainer sourceContainer =
-        new KeyValueContainer(sourceContainerData, conf);
-
-    //sample db file in the metadata directory
-    try (FileWriter writer = new FileWriter(
-        sourceContainerData.getDbFile().toPath()
-            .resolve(TEST_DB_FILE_NAME)
-            .toFile())) {
-      IOUtils.write(TEST_DB_FILE_CONTENT, writer);
-    }
-
-    //sample chunk file in the chunk directory
-    try (FileWriter writer = new FileWriter(
-        Paths.get(sourceContainerData.getChunksPath())
-            .resolve(TEST_CHUNK_FILE_NAME)
-            .toFile())) {
-      IOUtils.write(TEST_CHUNK_FILE_CONTENT, writer);
-    }
-
-    //sample container descriptor file
-    try (FileWriter writer = new FileWriter(
-        sourceContainer.getContainerFile())) {
-      IOUtils.write(TEST_DESCRIPTOR_FILE_CONTENT, writer);
-    }
-
-    Path targetFile =
-        SOURCE_CONTAINER_ROOT.getParent().resolve("container.tar.gz");
-
-    //WHEN: pack it
-    try (FileOutputStream output = new FileOutputStream(targetFile.toFile())) {
-      packer.pack(sourceContainer, output);
-    }
-
-    //THEN: check the result
-    try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
-      CompressorInputStream uncompressed = new CompressorStreamFactory()
-          .createCompressorInputStream(CompressorStreamFactory.GZIP, input);
-      TarArchiveInputStream tarStream = new TarArchiveInputStream(uncompressed);
-
-      TarArchiveEntry entry;
-      Map<String, TarArchiveEntry> entries = new HashMap<>();
-      while ((entry = tarStream.getNextTarEntry()) != null) {
-        entries.put(entry.getName(), entry);
-      }
-
-      Assert.assertTrue(
-          entries.containsKey("container.yaml"));
-
-    }
-
-    //read the container descriptor only
-    try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
-      String containerYaml = new String(packer.unpackContainerDescriptor(input),
-          Charset.forName(UTF_8.name()));
-      Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml);
-    }
-
-    KeyValueContainerData destinationContainerData =
-        createContainer(2L, DEST_CONTAINER_ROOT, conf);
-
-    KeyValueContainer destinationContainer =
-        new KeyValueContainer(destinationContainerData, conf);
-
-    String descriptor = "";
-
-    //unpackContainerData
-    try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
-      descriptor =
-          new String(packer.unpackContainerData(destinationContainer, input),
-              Charset.forName(UTF_8.name()));
-    }
-
-    assertExampleMetadataDbIsGood(
-        destinationContainerData.getDbFile().toPath());
-    assertExampleChunkFileIsGood(
-        Paths.get(destinationContainerData.getChunksPath()));
-    Assert.assertFalse(
-        "Descriptor file should not been exctarcted by the "
-            + "unpackContainerData Call",
-        destinationContainer.getContainerFile().exists());
-    Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, descriptor);
-
-  }
-
-
-  private void assertExampleMetadataDbIsGood(Path dbPath)
-      throws IOException {
-
-    Path dbFile = dbPath.resolve(TEST_DB_FILE_NAME);
-
-    Assert.assertTrue(
-        "example DB file is missing after pack/unpackContainerData: " + dbFile,
-        Files.exists(dbFile));
-
-    try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) {
-      List<String> strings = IOUtils
-          .readLines(testFile, Charset.forName(UTF_8.name()));
-      Assert.assertEquals(1, strings.size());
-      Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0));
-    }
-  }
-
-  private void assertExampleChunkFileIsGood(Path chunkDirPath)
-      throws IOException {
-
-    Path chunkFile = chunkDirPath.resolve(TEST_CHUNK_FILE_NAME);
-
-    Assert.assertTrue(
-        "example chunk file is missing after pack/unpackContainerData: "
-            + chunkFile,
-        Files.exists(chunkFile));
-
-    try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) {
-      List<String> strings = IOUtils
-          .readLines(testFile, Charset.forName(UTF_8.name()));
-      Assert.assertEquals(1, strings.size());
-      Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0));
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
deleted file mode 100644
index 4a1637c..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-/**
- * Tests for {@link ChunkUtils}.
- */
-public class TestChunkUtils {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestChunkUtils.class);
-
-  private static final String PREFIX = TestChunkUtils.class.getSimpleName();
-
-  @Test
-  public void concurrentReadOfSameFile() throws Exception {
-    String s = "Hello World";
-    byte[] array = s.getBytes();
-    ByteBuffer data = ByteBuffer.wrap(array);
-    Path tempFile = Files.createTempFile(PREFIX, "concurrent");
-    try {
-      ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(),
-          0, data.capacity());
-      File file = tempFile.toFile();
-      VolumeIOStats stats = new VolumeIOStats();
-      ChunkUtils.writeData(file, chunkInfo, data, stats, true);
-      int threads = 10;
-      ExecutorService executor = new ThreadPoolExecutor(threads, threads,
-          0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
-      AtomicInteger processed = new AtomicInteger();
-      AtomicBoolean failed = new AtomicBoolean();
-      for (int i = 0; i < threads; i++) {
-        final int threadNumber = i;
-        executor.submit(() -> {
-          try {
-            ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats);
-            LOG.info("Read data ({}): {}", threadNumber,
-                new String(readBuffer.array()));
-            if (!Arrays.equals(array, readBuffer.array())) {
-              failed.set(true);
-            }
-          } catch (Exception e) {
-            LOG.error("Failed to read data ({})", threadNumber, e);
-            failed.set(true);
-          }
-          processed.incrementAndGet();
-        });
-      }
-      try {
-        GenericTestUtils.waitFor(() -> processed.get() == threads,
-            100, (int) TimeUnit.SECONDS.toMillis(5));
-      } finally {
-        executor.shutdownNow();
-      }
-      assertEquals(threads * stats.getWriteBytes(), stats.getReadBytes());
-      assertFalse(failed.get());
-    } finally {
-      Files.deleteIfExists(tempFile);
-    }
-  }
-
-  @Test
-  public void concurrentProcessing() throws Exception {
-    final int perThreadWait = 1000;
-    final int maxTotalWait = 5000;
-    int threads = 20;
-    List<Path> paths = new LinkedList<>();
-
-    try {
-      ExecutorService executor = new ThreadPoolExecutor(threads, threads,
-          0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
-      AtomicInteger processed = new AtomicInteger();
-      for (int i = 0; i < threads; i++) {
-        Path path = Files.createTempFile(PREFIX, String.valueOf(i));
-        paths.add(path);
-        executor.submit(() -> {
-          ChunkUtils.processFileExclusively(path, () -> {
-            try {
-              Thread.sleep(perThreadWait);
-            } catch (InterruptedException e) {
-              e.printStackTrace();
-            }
-            processed.incrementAndGet();
-            return null;
-          });
-        });
-      }
-      try {
-        GenericTestUtils.waitFor(() -> processed.get() == threads,
-            100, maxTotalWait);
-      } finally {
-        executor.shutdownNow();
-      }
-    } finally {
-      for (Path path : paths) {
-        FileUtils.deleteQuietly(path.toFile());
-      }
-    }
-  }
-
-  @Test
-  public void serialRead() throws Exception {
-    String s = "Hello World";
-    byte[] array = s.getBytes();
-    ByteBuffer data = ByteBuffer.wrap(array);
-    Path tempFile = Files.createTempFile(PREFIX, "serial");
-    try {
-      ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(),
-          0, data.capacity());
-      File file = tempFile.toFile();
-      VolumeIOStats stats = new VolumeIOStats();
-      ChunkUtils.writeData(file, chunkInfo, data, stats, true);
-      ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats);
-      assertArrayEquals(array, readBuffer.array());
-      assertEquals(stats.getWriteBytes(), stats.getReadBytes());
-    } catch (Exception e) {
-      LOG.error("Failed to read data", e);
-    } finally {
-      Files.deleteIfExists(tempFile);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
deleted file mode 100644
index afbf274..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Chunk Manager Checks.
- */
-package org.apache.hadoop.ozone.container.keyvalue;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
deleted file mode 100644
index b9b1bea..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * This test verifies the container scrubber metrics functionality.
- */
-public class TestContainerScrubberMetrics {
-  @Test
-  public void testContainerMetaDataScrubberMetrics() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
-    c.setMetadataScanInterval(0);
-    HddsVolume vol = Mockito.mock(HddsVolume.class);
-    ContainerController cntrl = mockContainerController(vol);
-
-    ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl);
-    mc.runIteration();
-
-    Assert.assertEquals(1, mc.getMetrics().getNumScanIterations());
-    Assert.assertEquals(3, mc.getMetrics().getNumContainersScanned());
-    Assert.assertEquals(1, mc.getMetrics().getNumUnHealthyContainers());
-  }
-
-  @Test
-  public void testContainerDataScrubberMetrics() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
-    c.setDataScanInterval(0);
-    HddsVolume vol = Mockito.mock(HddsVolume.class);
-    ContainerController cntrl = mockContainerController(vol);
-
-    ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol);
-    sc.runIteration();
-
-    ContainerDataScrubberMetrics m = sc.getMetrics();
-    Assert.assertEquals(1, m.getNumScanIterations());
-    Assert.assertEquals(2, m.getNumContainersScanned());
-    Assert.assertEquals(1, m.getNumUnHealthyContainers());
-  }
-
-  private ContainerController mockContainerController(HddsVolume vol) {
-    // healthy container
-    Container<ContainerData> c1 = Mockito.mock(Container.class);
-    Mockito.when(c1.shouldScanData()).thenReturn(true);
-    Mockito.when(c1.scanMetaData()).thenReturn(true);
-    Mockito.when(c1.scanData(
-        Mockito.any(DataTransferThrottler.class),
-        Mockito.any(Canceler.class))).thenReturn(true);
-
-    // unhealthy container (corrupt data)
-    ContainerData c2d = Mockito.mock(ContainerData.class);
-    Mockito.when(c2d.getContainerID()).thenReturn(101L);
-    Container<ContainerData> c2 = Mockito.mock(Container.class);
-    Mockito.when(c2.scanMetaData()).thenReturn(true);
-    Mockito.when(c2.shouldScanData()).thenReturn(true);
-    Mockito.when(c2.scanData(
-        Mockito.any(DataTransferThrottler.class),
-        Mockito.any(Canceler.class))).thenReturn(false);
-    Mockito.when(c2.getContainerData()).thenReturn(c2d);
-
-    // unhealthy container (corrupt metadata)
-    ContainerData c3d = Mockito.mock(ContainerData.class);
-    Mockito.when(c3d.getContainerID()).thenReturn(102L);
-    Container<ContainerData> c3 = Mockito.mock(Container.class);
-    Mockito.when(c3.shouldScanData()).thenReturn(false);
-    Mockito.when(c3.scanMetaData()).thenReturn(false);
-    Mockito.when(c3.getContainerData()).thenReturn(c3d);
-
-    Collection<Container<?>> containers = Arrays.asList(c1, c2, c3);
-    ContainerController cntrl = Mockito.mock(ContainerController.class);
-    Mockito.when(cntrl.getContainers(vol))
-        .thenReturn(containers.iterator());
-    Mockito.when(cntrl.getContainers())
-        .thenReturn(containers.iterator());
-
-    return cntrl;
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
deleted file mode 100644
index 2d679a1..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Random;
-import java.util.UUID;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE;
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class is used to test OzoneContainer.
- */
-public class TestOzoneContainer {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneContainer.class);
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneConfiguration conf;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private final DatanodeDetails datanodeDetails = createDatanodeDetails();
-  private HashMap<String, Long> commitSpaceMap; //RootDir -> committed space
-  private final int numTestContainers = 10;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
-        .getAbsolutePath());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        folder.newFolder().getAbsolutePath());
-    commitSpaceMap = new HashMap<String, Long>();
-    volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
-    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
-  }
-
-  @After
-  public void cleanUp() throws Exception {
-    if (volumeSet != null) {
-      volumeSet.shutdown();
-      volumeSet = null;
-    }
-  }
-
-  @Test
-  public void testBuildContainerMap() throws Exception {
-    // Format the volumes
-    for (HddsVolume volume : volumeSet.getVolumesList()) {
-      volume.format(UUID.randomUUID().toString());
-      commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0));
-    }
-
-    // Add containers to disk
-    for (int i = 0; i < numTestContainers; i++) {
-      long freeBytes = 0;
-      long volCommitBytes;
-      long maxCap = (long) StorageUnit.GB.toBytes(1);
-
-      HddsVolume myVolume;
-
-      keyValueContainerData = new KeyValueContainerData(i,
-          maxCap, UUID.randomUUID().toString(),
-          datanodeDetails.getUuidString());
-      keyValueContainer = new KeyValueContainer(
-          keyValueContainerData, conf);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      myVolume = keyValueContainer.getContainerData().getVolume();
-
-      freeBytes = addBlocks(keyValueContainer, 2, 3);
-
-      // update our expectation of volume committed space in the map
-      volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue();
-      Preconditions.checkState(freeBytes >= 0);
-      commitSpaceMap.put(getVolumeKey(myVolume),
-          Long.valueOf(volCommitBytes + freeBytes));
-    }
-
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    // When OzoneContainer is started, the containers from disk should be
-    // loaded into the containerSet.
-    // Also expected to initialize committed space for each volume.
-    OzoneContainer ozoneContainer = new
-        OzoneContainer(datanodeDetails, conf, context, null);
-
-    ContainerSet containerset = ozoneContainer.getContainerSet();
-    assertEquals(numTestContainers, containerset.containerCount());
-
-    verifyCommittedSpace(ozoneContainer);
-  }
-
-  @Test
-  public void testContainerCreateDiskFull() throws Exception {
-    long containerSize = (long) StorageUnit.MB.toBytes(100);
-
-    // Format the volumes
-    for (HddsVolume volume : volumeSet.getVolumesList()) {
-      volume.format(UUID.randomUUID().toString());
-
-      // eat up all available space except size of 1 container
-      volume.incCommittedBytes(volume.getAvailable() - containerSize);
-      // eat up 10 bytes more, now available space is less than 1 container
-      volume.incCommittedBytes(10);
-    }
-    keyValueContainerData = new KeyValueContainerData(99, containerSize,
-        UUID.randomUUID().toString(), datanodeDetails.getUuidString());
-    keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
-
-    // we expect an out of space Exception
-    StorageContainerException e = LambdaTestUtils.intercept(
-        StorageContainerException.class,
-        () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId)
-    );
-    if (!DISK_OUT_OF_SPACE.equals(e.getResult())) {
-      LOG.info("Unexpected error during container creation", e);
-    }
-    assertEquals(DISK_OUT_OF_SPACE, e.getResult());
-  }
-
-  //verify committed space on each volume
-  private void verifyCommittedSpace(OzoneContainer oc) {
-    for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) {
-      String key = getVolumeKey(dnVol);
-      long expectedCommit = commitSpaceMap.get(key).longValue();
-      long volumeCommitted = dnVol.getCommittedBytes();
-      assertEquals("Volume committed space not initialized correctly",
-          expectedCommit, volumeCommitted);
-    }
-  }
-
-  private long addBlocks(KeyValueContainer container,
-      int blocks, int chunksPerBlock) throws Exception {
-    String strBlock = "block";
-    String strChunk = "-chunkFile";
-    int datalen = 65536;
-    long usedBytes = 0;
-
-    long freeBytes = container.getContainerData().getMaxSize();
-    long containerId = container.getContainerData().getContainerID();
-    ReferenceCountedDB db = BlockUtils.getDB(container
-        .getContainerData(), conf);
-
-    for (int bi = 0; bi < blocks; bi++) {
-      // Creating BlockData
-      BlockID blockID = new BlockID(containerId, bi);
-      BlockData blockData = new BlockData(blockID);
-      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-
-      chunkList.clear();
-      for (int ci = 0; ci < chunksPerBlock; ci++) {
-        String chunkName = strBlock + bi + strChunk + ci;
-        long offset = ci * datalen;
-        ChunkInfo info = new ChunkInfo(chunkName, offset, datalen);
-        usedBytes += datalen;
-        chunkList.add(info.getProtoBufMessage());
-      }
-      blockData.setChunks(chunkList);
-      db.getStore().put(Longs.toByteArray(blockID.getLocalID()),
-          blockData.getProtoBufMessage().toByteArray());
-    }
-
-    // remaining available capacity of the container
-    return (freeBytes - usedBytes);
-  }
-
-  private String getVolumeKey(HddsVolume volume) {
-    return volume.getHddsRootDir().getPath();
-  }
-
-  private DatanodeDetails createDatanodeDetails() {
-    Random random = new Random();
-    String ipAddress =
-        random.nextInt(256) + "." + random.nextInt(256) + "." + random
-            .nextInt(256) + "." + random.nextInt(256);
-
-    String uuid = UUID.randomUUID().toString();
-    String hostName = uuid;
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
deleted file mode 100644
index c3d3b17..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test the replication supervisor.
- */
-public class TestReplicationSupervisor {
-
-  private OzoneConfiguration conf = new OzoneConfiguration();
-
-  @Test
-  public void normal() throws Exception {
-    //GIVEN
-    ContainerSet set = new ContainerSet();
-
-    FakeReplicator replicator = new FakeReplicator(set);
-    ReplicationSupervisor supervisor =
-        new ReplicationSupervisor(set, replicator, 5);
-
-    List<DatanodeDetails> datanodes = IntStream.range(1, 3)
-        .mapToObj(v -> Mockito.mock(DatanodeDetails.class))
-        .collect(Collectors.toList());
-
-    try {
-      //WHEN
-      supervisor.addTask(new ReplicationTask(1L, datanodes));
-      supervisor.addTask(new ReplicationTask(1L, datanodes));
-      supervisor.addTask(new ReplicationTask(1L, datanodes));
-      supervisor.addTask(new ReplicationTask(2L, datanodes));
-      supervisor.addTask(new ReplicationTask(2L, datanodes));
-      supervisor.addTask(new ReplicationTask(3L, datanodes));
-      //THEN
-      LambdaTestUtils.await(200_000, 1000,
-          () -> supervisor.getInFlightReplications() == 0);
-
-      Assert.assertEquals(3, replicator.replicated.size());
-
-    } finally {
-      supervisor.stop();
-    }
-  }
-
-  @Test
-  public void duplicateMessageAfterAWhile() throws Exception {
-    //GIVEN
-    ContainerSet set = new ContainerSet();
-
-    FakeReplicator replicator = new FakeReplicator(set);
-    ReplicationSupervisor supervisor =
-        new ReplicationSupervisor(set, replicator, 2);
-
-    List<DatanodeDetails> datanodes = IntStream.range(1, 3)
-        .mapToObj(v -> Mockito.mock(DatanodeDetails.class))
-        .collect(Collectors.toList());
-
-    try {
-      //WHEN
-      supervisor.addTask(new ReplicationTask(1L, datanodes));
-      LambdaTestUtils.await(200_000, 1000,
-          () -> supervisor.getInFlightReplications() == 0);
-      supervisor.addTask(new ReplicationTask(1L, datanodes));
-      LambdaTestUtils.await(200_000, 1000,
-          () -> supervisor.getInFlightReplications() == 0);
-
-      //THEN
-      System.out.println(replicator.replicated.get(0));
-
-      Assert.assertEquals(1, replicator.replicated.size());
-
-    } finally {
-      supervisor.stop();
-    }
-  }
-
-  private class FakeReplicator implements ContainerReplicator {
-
-    private List<ReplicationTask> replicated = new ArrayList<>();
-
-    private ContainerSet containerSet;
-
-    FakeReplicator(ContainerSet set) {
-      this.containerSet = set;
-    }
-
-    @Override
-    public void replicate(ReplicationTask task) {
-      KeyValueContainerData kvcd =
-          new KeyValueContainerData(task.getContainerId(), 100L,
-              UUID.randomUUID().toString(), UUID.randomUUID().toString());
-      KeyValueContainer kvc =
-          new KeyValueContainer(kvcd, conf);
-      try {
-        //download is slow
-        Thread.sleep(100);
-        replicated.add(task);
-        containerSet.addContainer(kvc);
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 5c905e0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for the container replication.
- */
-package org.apache.hadoop.ozone.container.replication;
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
deleted file mode 100644
index a136983..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
-    .BlockDeletingService;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A test class implementation for {@link BlockDeletingService}.
- */
-public class BlockDeletingServiceTestImpl
-    extends BlockDeletingService {
-
-  // the service timeout
-  private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
-
-  // tests only
-  private CountDownLatch latch;
-  private Thread testingThread;
-  private AtomicInteger numOfProcessed = new AtomicInteger(0);
-
-  public BlockDeletingServiceTestImpl(OzoneContainer container,
-      int serviceInterval, Configuration conf) {
-    super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
-        TimeUnit.MILLISECONDS, conf);
-  }
-
-  @VisibleForTesting
-  public void runDeletingTasks() {
-    if (latch.getCount() > 0) {
-      this.latch.countDown();
-    } else {
-      throw new IllegalStateException("Count already reaches zero");
-    }
-  }
-
-  @VisibleForTesting
-  public boolean isStarted() {
-    return latch != null && testingThread.isAlive();
-  }
-
-  public int getTimesOfProcessed() {
-    return numOfProcessed.get();
-  }
-
-  // Override the implementation to start a single on-call control thread.
-  @Override
-  public void start() {
-    PeriodicalTask svc = new PeriodicalTask();
-    // In test mode, relies on a latch countdown to runDeletingTasks tasks.
-    Runnable r = () -> {
-      while (true) {
-        latch = new CountDownLatch(1);
-        try {
-          latch.await();
-        } catch (InterruptedException e) {
-          break;
-        }
-        Future<?> future = this.getExecutorService().submit(svc);
-        try {
-          // for tests, we only wait for 3s for completion
-          future.get(3, TimeUnit.SECONDS);
-          numOfProcessed.incrementAndGet();
-        } catch (Exception e) {
-          return;
-        }
-      }
-    };
-
-    testingThread = new ThreadFactoryBuilder()
-        .setDaemon(true)
-        .build()
-        .newThread(r);
-    testingThread.start();
-  }
-
-  @Override
-  public void shutdown() {
-    testingThread.interrupt();
-    super.shutdown();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
deleted file mode 100644
index 4e8a90b..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container
deleted file mode 100644
index faaed06..0000000
--- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container
+++ /dev/null
@@ -1,14 +0,0 @@
-!<KeyValueContainerData>
-containerDBType: RocksDB
-chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
-containerID: 9223372036854775807
-containerType: KeyValueContainer
-metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
-layOutVersion: 1
-maxSize: 5368709120
-originPipelineId: 1297e8a9-2850-4ced-b96c-5ae31d2c73ad
-originNodeId: 7f541a06-6c26-476d-9994-c6e1947e11cb
-metadata: {OWNER: ozone, VOLUME: hdfs}
-state: CLOSED
-aclEnabled: true
-checksum: 61db56da7d50798561b5365c123c5fbf7faf99fbbbd571a746af79020b7f79ba
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container b/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container
deleted file mode 100644
index ce32947..0000000
--- a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container
+++ /dev/null
@@ -1,13 +0,0 @@
-!<KeyValueContainerData>
-containerDBType: RocksDB
-chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
-containerID: 9223372036854775807
-containerType: KeyValueContainer
-metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
-layOutVersion: 1
-maxSize: 5368709120
-originPipelineId: 4d41dd20-6d73-496a-b247-4c6cb483f54e
-originNodeId: 54842560-67a5-48a5-a7d4-4701d9538706
-metadata: {OWNER: ozone, VOLUME: hdfs}
-state: OPEN
-checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container
deleted file mode 100644
index 38384c8..0000000
--- a/hadoop-hdds/container-service/src/test/resources/incorrect.container
+++ /dev/null
@@ -1,13 +0,0 @@
-!<KeyValueContainerData>
-containerDBType: RocksDB
-chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
-containerID: 9223372036854775807
-containerType: KeyValueContainer
-metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
-layOutVersion: 1
-maxSize: 5368709120
-originPipelineId: b2c96aa4-b757-4f97-b286-6fb80a1baf8e
-originNodeId: 6dcfb385-caea-4efb-9ef3-f87fadca0f51
-metadata: {OWNER: ozone, VOLUME: hdfs}
-state: NO_SUCH_STATE
-checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/resources/log4j.properties b/hadoop-hdds/container-service/src/test/resources/log4j.properties
deleted file mode 100644
index bb5cbe5..0000000
--- a/hadoop-hdds/container-service/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#   Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  See the NOTICE file distributed with
-#   this work for additional information regarding copyright ownership.
-#   The ASF licenses this file to You under the Apache License, Version 2.0
-#   (the "License"); you may not use this file except in compliance with
-#   the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl b/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl
deleted file mode 100644
index 7f2aedf..0000000
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl
+++ /dev/null
@@ -1,189 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<xsl:stylesheet	xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html" indent="yes"/>
-<xsl:decimal-format decimal-separator="." grouping-separator="," />
-
-<xsl:key name="files" match="file" use="@name" />
-
-<!-- Checkstyle XML Style Sheet by Stephane Bailliez <sbailliez@apache.org>         -->
-<!-- Part of the Checkstyle distribution found at http://checkstyle.sourceforge.net -->
-<!-- Usage (generates checkstyle_report.html):                                      -->
-<!--    <checkstyle failonviolation="false" config="${check.config}">               -->
-<!--      <fileset dir="${src.dir}" includes="**/*.java"/>                          -->
-<!--      <formatter type="xml" toFile="${doc.dir}/checkstyle_report.xml"/>         -->
-<!--    </checkstyle>                                                               -->
-<!--    <style basedir="${doc.dir}" destdir="${doc.dir}"                            -->
-<!--            includes="checkstyle_report.xml"                                    -->
-<!--            style="${doc.dir}/checkstyle-noframes-sorted.xsl"/>                 -->
-
-<xsl:template match="checkstyle">
-  <html>
-    <head>
-      <style type="text/css">
-    .bannercell {
-      border: 0px;
-      padding: 0px;
-    }
-    body {
-      margin-left: 10;
-      margin-right: 10;
-      font:normal 80% arial,helvetica,sanserif;
-      background-color:#FFFFFF;
-      color:#000000;
-    }
-    .a td {
-      background: #efefef;
-    }
-    .b td {
-      background: #fff;
-    }
-    th, td {
-      text-align: left;
-      vertical-align: top;
-    }
-    th {
-      font-weight:bold;
-      background: #ccc;
-      color: black;
-    }
-    table, th, td {
-      font-size:100%;
-      border: none
-    }
-    table.log tr td, tr th {
-
-    }
-    h2 {
-      font-weight:bold;
-      font-size:140%;
-      margin-bottom: 5;
-    }
-    h3 {
-      font-size:100%;
-      font-weight:bold;
-      background: #525D76;
-      color: white;
-      text-decoration: none;
-      padding: 5px;
-      margin-right: 2px;
-      margin-left: 2px;
-      margin-bottom: 0;
-    }
-    </style>
-    </head>
-    <body>
-      <a name="top"></a>
-      <!-- jakarta logo -->
-      <table border="0" cellpadding="0" cellspacing="0" width="100%">
-      <tr>
-        <td class="bannercell" rowspan="2">
-          <!--a href="http://jakarta.apache.org/">
-          <img src="http://jakarta.apache.org/images/jakarta-logo.gif" alt="http://jakarta.apache.org" align="left" border="0"/>
-          </a-->
-        </td>
-        <td class="text-align:right"><h2>CheckStyle Audit</h2></td>
-      </tr>
-        <tr>
-          <td class="text-align:right">Designed for use with <a href='http://checkstyle.sourceforge.net/'>CheckStyle</a> and <a href='http://jakarta.apache.org'>Ant</a>.</td>
-        </tr>
-      </table>
-      <hr size="1"/>
-
-      <!-- Summary part -->
-      <xsl:apply-templates select="." mode="summary"/>
-      <hr size="1" width="100%" align="left"/>
-
-      <!-- Package List part -->
-      <xsl:apply-templates select="." mode="filelist"/>
-      <hr size="1" width="100%" align="left"/>
-
-      <!-- For each package create its part -->
-      <xsl:apply-templates select="file[@name and generate-id(.) = generate-id(key('files', @name))]" />
-
-      <hr size="1" width="100%" align="left"/>
-
-    </body>
-  </html>
-</xsl:template>
-
-  <xsl:template match="checkstyle" mode="filelist">
-    <h3>Files</h3>
-    <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
-      <tr>
-        <th>Name</th>
-        <th>Errors</th>
-      </tr>
-      <xsl:for-each select="file[@name and generate-id(.) = generate-id(key('files', @name))]">
-        <xsl:sort data-type="number" order="descending" select="count(key('files', @name)/error)"/>
-        <xsl:variable name="errorCount" select="count(error)"/>
-        <tr>
-          <xsl:call-template name="alternated-row"/>
-          <td><a href="#f-{@name}"><xsl:value-of select="@name"/></a></td>
-          <td><xsl:value-of select="$errorCount"/></td>
-        </tr>
-      </xsl:for-each>
-    </table>
-  </xsl:template>
-
-  <xsl:template match="file">
-    <a name="f-{@name}"></a>
-    <h3>File <xsl:value-of select="@name"/></h3>
-
-    <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
-      <tr>
-        <th>Error Description</th>
-        <th>Line</th>
-      </tr>
-      <xsl:for-each select="key('files', @name)/error">
-        <xsl:sort data-type="number" order="ascending" select="@line"/>
-        <tr>
-          <xsl:call-template name="alternated-row"/>
-          <td><xsl:value-of select="@message"/></td>
-          <td><xsl:value-of select="@line"/></td>
-        </tr>
-      </xsl:for-each>
-    </table>
-    <a href="#top">Back to top</a>
-  </xsl:template>
-
-  <xsl:template match="checkstyle" mode="summary">
-    <h3>Summary</h3>
-    <xsl:variable name="fileCount" select="count(file[@name and generate-id(.) = generate-id(key('files', @name))])"/>
-    <xsl:variable name="errorCount" select="count(file/error)"/>
-    <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
-      <tr>
-        <th>Files</th>
-        <th>Errors</th>
-      </tr>
-      <tr>
-        <xsl:call-template name="alternated-row"/>
-        <td><xsl:value-of select="$fileCount"/></td>
-        <td><xsl:value-of select="$errorCount"/></td>
-      </tr>
-    </table>
-  </xsl:template>
-
-  <xsl:template name="alternated-row">
-    <xsl:attribute name="class">
-      <xsl:if test="position() mod 2 = 1">a</xsl:if>
-      <xsl:if test="position() mod 2 = 0">b</xsl:if>
-    </xsl:attribute>
-  </xsl:template>
-</xsl:stylesheet>
-
-
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
deleted file mode 100644
index 1c43741..0000000
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ /dev/null
@@ -1,196 +0,0 @@
-<?xml version="1.0"?>
-<!DOCTYPE module PUBLIC
-    "-//Checkstyle//DTD Checkstyle Configuration 1.2//EN"
-    "https://checkstyle.org/dtds/configuration_1_2.dtd">
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!--
-
-  Checkstyle configuration for Hadoop HDDS that is based on the sun_checks.xml file
-  that is bundled with Checkstyle and includes checks for:
-
-    - the Java Language Specification at
-      http://java.sun.com/docs/books/jls/second_edition/html/index.html
-
-    - the Sun Code Conventions at http://java.sun.com/docs/codeconv/
-
-    - the Javadoc guidelines at
-      http://java.sun.com/j2se/javadoc/writingdoccomments/index.html
-
-    - the JDK Api documentation http://java.sun.com/j2se/docs/api/index.html
-
-    - some best practices
-
-  Checkstyle is very configurable. Be sure to read the documentation at
-  http://checkstyle.sf.net (or in your downloaded distribution).
-
-  Most Checks are configurable, be sure to consult the documentation.
-
-  To completely disable a check, just comment it out or delete it from the file.
-
-  Finally, it is worth reading the documentation.
-
--->
-
-<module name="Checker">
-
-    <module name="SuppressWarningsFilter"/>
-
-    <!-- Checks that a package.html file exists for each package.     -->
-    <!-- See http://checkstyle.sf.net/config_javadoc.html#PackageHtml -->
-    <module name="JavadocPackage"/>
-
-    <!-- Checks whether files end with a new line.                        -->
-    <!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
-    <!-- module name="NewlineAtEndOfFile"/-->
-
-    <!-- Checks that property files contain the same keys.         -->
-    <!-- See http://checkstyle.sf.net/config_misc.html#Translation -->
-    <module name="Translation"/>
-
-    <!-- We have many existing long files, this check ends up being spurious -->
-    <!--<module name="FileLength">-->
-    <module name="FileTabCharacter"/>
-
-    <module name="TreeWalker">
-
-        <module name="SuppressWarningsHolder"/>
-        <module name="SuppressionCommentFilter"/>
-        <module name="SuppressWithNearbyCommentFilter"/>
-
-
-        <!-- Checks for Javadoc comments.                     -->
-        <!-- See http://checkstyle.sf.net/config_javadoc.html -->
-        <module name="JavadocType">
-          <property name="scope" value="public"/>
-          <property name="allowMissingParamTags" value="true"/>
-        </module>
-        <module name="JavadocStyle"/>
-
-        <!-- Checks for Naming Conventions.                  -->
-        <!-- See http://checkstyle.sf.net/config_naming.html -->
-        <module name="ConstantName"/>
-        <module name="LocalFinalVariableName"/>
-        <module name="LocalVariableName"/>
-        <module name="MemberName"/>
-        <module name="MethodName"/>
-        <module name="PackageName"/>
-        <module name="ParameterName"/>
-        <module name="StaticVariableName"/>
-        <module name="TypeName"/>
-
-
-        <!-- Checks for Headers                                -->
-        <!-- See http://checkstyle.sf.net/config_header.html   -->
-        <!-- <module name="Header">                            -->
-            <!-- The follow property value demonstrates the ability     -->
-            <!-- to have access to ANT properties. In this case it uses -->
-            <!-- the ${basedir} property to allow Checkstyle to be run  -->
-            <!-- from any directory within a project. See property      -->
-            <!-- expansion,                                             -->
-            <!-- http://checkstyle.sf.net/config.html#properties        -->
-            <!-- <property                                              -->
-            <!--     name="headerFile"                                  -->
-            <!--     value="${basedir}/java.header"/>                   -->
-        <!-- </module> -->
-
-        <!-- Following interprets the header file as regular expressions. -->
-        <!-- <module name="RegexpHeader"/>                                -->
-
-
-        <!-- Checks for imports                              -->
-        <!-- See http://checkstyle.sf.net/config_import.html -->
-        <module name="IllegalImport"/> <!-- defaults to sun.* packages -->
-        <module name="RedundantImport"/>
-        <module name="UnusedImports"/>
-
-
-        <!-- Checks for Size Violations.                    -->
-        <!-- See http://checkstyle.sf.net/config_sizes.html -->
-        <module name="LineLength"/>
-        <module name="MethodLength"/>
-        <module name="ParameterNumber">
-          <property name="ignoreOverriddenMethods" value="true"/>
-        </module>
-
-
-        <!-- Checks for whitespace                               -->
-        <!-- See http://checkstyle.sf.net/config_whitespace.html -->
-        <module name="EmptyForIteratorPad"/>
-        <module name="MethodParamPad"/>
-        <module name="NoWhitespaceAfter"/>
-        <module name="NoWhitespaceBefore"/>
-        <module name="ParenPad"/>
-        <module name="TypecastParenPad"/>
-        <module name="WhitespaceAfter">
-          <property name="tokens" value="COMMA, SEMI"/>
-        </module>
-
-
-        <!-- Modifier Checks                                    -->
-        <!-- See http://checkstyle.sf.net/config_modifiers.html -->
-        <!-- This one is nitty, disable -->
-        <!-- <module name="ModifierOrder"/> -->
-        <module name="RedundantModifier"/>
-
-
-        <!-- Checks for blocks. You know, those {}'s         -->
-        <!-- See http://checkstyle.sf.net/config_blocks.html -->
-        <module name="AvoidNestedBlocks"/>
-        <module name="EmptyBlock"/>
-        <module name="LeftCurly"/>
-        <module name="NeedBraces"/>
-        <module name="RightCurly"/>
-
-
-        <!-- Checks for common coding problems               -->
-        <!-- See http://checkstyle.sf.net/config_coding.html -->
-        <!-- module name="AvoidInlineConditionals"/-->
-        <module name="EmptyStatement"/>
-        <module name="EqualsHashCode"/>
-        <module name="HiddenField">
-          <property name="ignoreConstructorParameter" value="true"/>
-          <property name="ignoreSetter" value="true"/>
-        </module>
-        <module name="IllegalInstantiation"/>
-        <module name="InnerAssignment"/>
-        <module name="MissingSwitchDefault"/>
-        <module name="SimplifyBooleanExpression"/>
-        <module name="SimplifyBooleanReturn"/>
-
-        <!-- Checks for class design                         -->
-        <!-- See http://checkstyle.sf.net/config_design.html -->
-        <module name="FinalClass"/>
-        <module name="HideUtilityClassConstructor"/>
-        <module name="InterfaceIsType"/>
-        <module name="VisibilityModifier"/>
-
-
-        <!-- Miscellaneous other checks.                   -->
-        <!-- See http://checkstyle.sf.net/config_misc.html -->
-        <module name="ArrayTypeStyle"/>
-        <module name="Indentation">
-            <property name="basicOffset" value="2" />
-            <property name="caseIndent" value="0" />
-        </module>
-        <!--<module name="TodoComment"/>-->
-        <module name="UpperEll"/>
-
-    </module>
-
-</module>
diff --git a/hadoop-hdds/dev-support/checkstyle/suppressions.xml b/hadoop-hdds/dev-support/checkstyle/suppressions.xml
deleted file mode 100644
index 7bc9479..0000000
--- a/hadoop-hdds/dev-support/checkstyle/suppressions.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<!DOCTYPE suppressions PUBLIC
-    "-//Checkstyle//DTD SuppressionFilter Configuration 1.1//EN"
-    "https://checkstyle.org/dtds/suppressions_1_1.dtd">
-
-<suppressions>
-  <suppress checks="JavadocPackage" files="[\\/]src[\\/]test[\\/].*"/>
-</suppressions>
diff --git a/hadoop-hdds/docs/README.md b/hadoop-hdds/docs/README.md
deleted file mode 100644
index 8d5cdb7..0000000
--- a/hadoop-hdds/docs/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-# Hadoop Ozone/HDDS docs
-
-This subproject contains the inline documentation for Ozone/HDDS components.
-
-You can create a new page with:
-
-```
-hugo new content/title.md
-```
-
-You can check the rendering with:
-
-```
-hugo serve
-```
-
-This maven project will create the rendered HTML page during the build (ONLY if hugo is available).
-And the dist project will include the documentation.
-
-You can adjust the menu hierarchy with adjusting the header of the markdown file:
-
-To show it in the main header add the menu entry:
-
-```
----
-menu: main
----
-```
-
-To show it as a subpage, you can set the parent. (The value could be the title of the parent page,
-our you can defined an `id: ...` in the parent markdown and use that in the parent reference.
-
-```
----
-menu:
-   main:
-	   parent: "Getting started"
----
-```
diff --git a/hadoop-hdds/docs/archetypes/default.md b/hadoop-hdds/docs/archetypes/default.md
deleted file mode 100644
index f4cc999..0000000
--- a/hadoop-hdds/docs/archetypes/default.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: "{{ replace .Name "-" " " | title }}"
-menu: main
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
diff --git a/hadoop-hdds/docs/config.yaml b/hadoop-hdds/docs/config.yaml
deleted file mode 100644
index 7b75888..0000000
--- a/hadoop-hdds/docs/config.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-languageCode: "en-us"
-DefaultContentLanguage: "en"
-title: "Ozone"
-theme: "ozonedoc"
-pygmentsCodeFences: true
-uglyurls: true
-relativeURLs: true
-disableKinds:
-- taxonomy
-- taxonomyTerm
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/_index.md b/hadoop-hdds/docs/content/_index.md
deleted file mode 100644
index bb1bf9a..0000000
--- a/hadoop-hdds/docs/content/_index.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-title: Overview
-menu: main
-weight: -10
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-# Apache Hadoop Ozone
-
-<img src="ozone-usage.png" style="max-width: 60%;"/>
-
-*_Ozone is a scalable, redundant, and distributed object store for Hadoop. <p>
-Apart from scaling to billions of objects of varying sizes,
-Ozone can function effectively in containerized environments
-like Kubernetes._* <p>
-
-Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{<
-ref "JavaApi.md"
->}}), [S3 protocol support] ({{< ref "S3.md" >}}), and a [command line interface]
-({{< ref "shell/_index.md" >}})  which makes it easy to use Ozone.
-
-Ozone consists of volumes, buckets, and keys:
-
-* Volumes are similar to user accounts. Only administrators can create or delete volumes.
-* Buckets are similar to directories. A bucket can contain any number of keys, but buckets cannot contain other buckets.
-* Keys are similar to files.
-
- <a href="{{< ref "start/_index.md" >}}"> <button type="button"
- class="btn  btn-success btn-lg">Next >></button>
-</div>
diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md
deleted file mode 100644
index ea7e3b1..0000000
--- a/hadoop-hdds/docs/content/beyond/Containers.md
+++ /dev/null
@@ -1,235 +0,0 @@
----
-title: "Ozone Containers"
-summary: Ozone uses containers extensively for testing. This page documents the usage and best practices of Ozone.
-weight: 2
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Docker heavily is used at the ozone development with three principal use-cases:
-
-* __dev__:
-     * We use docker to start local pseudo-clusters (docker provides unified environment, but no image creation is required)
-* __test__:
-     * We create docker images from the dev branches to test ozone in kubernetes and other container orchestrator system
-     * We provide _apache/ozone_ images for each release to make it easier for evaluation of Ozone.
-     These images are __not__ created __for production__ usage.
-
-<div class="alert alert-warning" role="alert">
-We <b>strongly</b> recommend that you create your own custom images when you
-deploy ozone into production using containers. Please treat all the standard
-shipped container images and k8s resources as examples and guides to help you
- customize your own deployment.
-</div>
-
-* __production__:
-     * We have documentation on how you can create your own docker image for your production cluster.
-
-Let's check out each of the use-cases in more detail:
-
-## Development
-
-Ozone artifact contains example docker-compose directories to make it easier to start Ozone cluster in your local machine.
-
-From distribution:
-
-```bash
-cd compose/ozone
-docker-compose up -d
-```
-
-After a local build:
-
-```bash
-cd  hadoop-ozone/dist/target/ozone-*/compose
-docker-compose up -d
-```
-
-These environments are very important tools to start different type of Ozone clusters at any time.
-
-To be sure that the compose files are up-to-date, we also provide acceptance test suites which start
-the cluster and check the basic behaviour.
-
-The acceptance tests are part of the distribution, and you can find the test definitions in `smoketest` directory.
-
-You can start the tests from any compose directory:
-
-For example:
-
-```bash
-cd compose/ozone
-./test.sh
-```
-
-### Implementation details
-
-`compose` tests are based on the apache/hadoop-runner docker image. The image itself does not contain
-any Ozone jar file or binary just the helper scripts to start ozone.
-
-hadoop-runner provdes a fixed environment to run Ozone everywhere, but the ozone distribution itself
-is mounted from the including directory:
-
-(Example docker-compose fragment)
-
-```
- scm:
-      image: apache/hadoop-runner:jdk11
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-
-```
-
-The containers are configured based on environment variables, but because the same environment
-variables should be set for each containers we maintain the list of the environment variables
-in a separated file:
-
-```
- scm:
-      image: apache/hadoop-runner:jdk11
-      #...
-      env_file:
-          - ./docker-config
-```
-
-The docker-config file contains the list of the required environment variables:
-
-```
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-#...
-```
-
-As you can see we use naming convention. Based on the name of the environment variable, the
-appropriate hadoop config XML (`ozone-site.xml` in our case) will be generated by a
-[script](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) which is
-included in the `hadoop-runner` base image.
-
-The [entrypoint](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter.sh)
-of the `hadoop-runner` image contains a helper shell script which triggers this transformation and
-can do additional actions (eg. initialize scm/om storage, download required keytabs, etc.)
-based on environment variables.
-
-## Test/Staging
-
-The `docker-compose` based approach is recommended only for local test, not for multi node cluster.
-To use containers on a multi-node cluster we need a Container Orchestrator like Kubernetes.
-
-Kubernetes example files are included in the `kubernetes` folder.
-
-*Please note*: all the provided images are based the `hadoop-runner` image which contains all the
-required tool for testing in staging environments. For production we recommend to create your own,
-hardened image with your own base image.
-
-### Test the release
-
-The release can be tested with deploying any of the example clusters:
-
-```bash
-cd kubernetes/examples/ozone
-kubectl apply -f
-```
-
-Plese note that in this case the latest released container will be downloaded from the dockerhub.
-
-### Test the development build
-
-To test a development build you can create your own image and upload it to your own docker registry:
-
-
-```bash
-mvn clean install -f pom.ozone.xml -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone
-```
-
-The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build)
-
-```bash
-cd kubernetes/examples/ozone
-kubectl apply -f
-```
-
-## Production
-
-<div class="alert alert-danger" role="alert">
-We <b>strongly</b> recommend to use your own image in your production cluster
-and
-adjust base image, umask, security settings, user settings according to your own requirements.
-</div>
-
-You can use the source of our development images as an example:
-
- * [Base image] (https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile)
- * [Docker image] (https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile)
-
- Most of the elements are optional and just helper function but to use the provided example
- kubernetes resources you may need the scripts from
- [here](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts)
-
-  * The two python scripts convert environment variables to real hadoop XML config files
-  * The start.sh executes the python scripts (and other initialization) based on environment variables.
-
-## Containers
-
-Ozone related container images and source locations:
-
-
-<table class="table table-dark">
-  <thead>
-    <tr>
-      <th scope="col">#</th>
-      <th scope="col">Container</th>
-      <th scope="col">Repository</th>
-      <th scope="col">Base</th>
-      <th scope="col">Branch</th>
-      <th scope="col">Tags</th>
-      <th scope="col">Comments</th>
-    </tr>
-  </thead>
-  <tbody>
-    <tr>
-      <th scope="row">1</th>
-      <td>apache/ozone</td>
-      <td>https://github.com/apache/hadoop-docker-ozone</td>
-      <td>ozone-... </td>
-      <td>hadoop-runner</td>
-      <td>0.3.0,0.4.0,0.4.1</td>
-      <td>For each Ozone release we create new release tag.</td>
-    </tr>
-    <tr>
-      <th scope="row">2</th>
-      <td>apache/hadoop-runner </td>
-      <td>https://github.com/apache/hadoop</td>
-      <td>docker-hadoop-runner</td>
-      <td>centos</td>
-      <td>jdk11,jdk8,latest</td>
-      <td>This is the base image used for testing Hadoop Ozone.
-       This is a set of utilities that make it easy for us run ozone.</td>
-    </tr>
-    <!---tr>
-      <th scope="row">3</th>
-      <td>apache/ozone:build (WIP)</td>
-      <td>https://github.com/apache/hadoop-docker-ozone</td>
-      <td>ozone-build </td>
-      <td> </td>
-      <td> </td>
-      <td>TODO: Add more documentation here.</td>
-    </tr-->
-  </tbody>
-</table>
diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md
deleted file mode 100644
index f4f5492..0000000
--- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-title: "Docker Cheat Sheet"
-date: 2017-08-10
-summary: Docker Compose cheat sheet to help you remember the common commands to control an Ozone cluster running on top of Docker.
-weight: 4
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-In the `compose` directory of the ozone distribution there are multiple pseudo-cluster setup which
-can be used to run Ozone in different way (for example: secure cluster, with tracing enabled,
-with prometheus etc.).
-
-If the usage is not document in a specific directory the default usage is the following:
-
-```bash
-cd compose/ozone
-docker-compose up -d
-```
-
-The data of the container is ephemeral and deleted together with the docker volumes.
-```bash
-docker-compose down
-```
-
-## Useful Docker & Ozone Commands
-
-If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests.
-
-Here are the instructions to run freon in a docker-based cluster.
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-{{< /highlight >}}
-
-This will open a bash shell on the data node container.
-Now we can execute freon for load generation.
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10
-{{< /highlight >}}
-
-Here is a set of helpful commands for working with docker for ozone.
-To check the status of the components:
-
-{{< highlight bash >}}
-docker-compose ps
-{{< /highlight >}}
-
-To get logs from a specific node/service:
-
-{{< highlight bash >}}
-docker-compose logs scm
-{{< /highlight >}}
-
-
-As the WebUI ports are forwarded to the external machine, you can check the web UI:
-
-* For the Storage Container Manager: http://localhost:9876
-* For the Ozone Manager: http://localhost:9874
-* For the Datanode: check the port with `docker ps` (as there could be multiple data nodes, ports are mapped to the ephemeral port range)
-
-You can start multiple data nodes with:
-
-{{< highlight bash >}}
-docker-compose scale datanode=3
-{{< /highlight >}}
-
-You can test the commands from the [Ozone CLI]({{< ref "shell/_index.md" >}}) after opening a new bash shell in one of the containers:
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-{{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md b/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md
deleted file mode 100644
index 154be533..0000000
--- a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-title: Running concurrently with HDFS
-linktitle: Runing with HDFS
-weight: 1
-summary: Ozone is designed to run concurrently with HDFS. This page explains how to deploy Ozone in a exisiting HDFS cluster.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone is designed to work with HDFS. So it is easy to deploy ozone in an
-existing HDFS cluster.
-
-The container manager part of Ozone can run inside DataNodes as a pluggable module
-or as a standalone component. This document describe how can it be started as
-a HDFS datanode plugin.
-
-To activate ozone you should define the service plugin implementation class.
-
-<div class="alert alert-warning" role="alert">
-<b>Important</b>: It should be added to the <b>hdfs-site.xml</b> as the plugin should
-be activated as part of the normal HDFS Datanode bootstrap.
-</div>
-
-{{< highlight xml >}}
-<property>
-   <name>dfs.datanode.plugins</name>
-   <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
-</property>
-{{< /highlight >}}
-
-You also need to add the ozone-datanode-plugin jar file to the classpath:
-
-{{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar
-{{< /highlight >}}
-
-
-
-To start ozone with HDFS you should start the the following components:
-
- 1. HDFS Namenode (from Hadoop distribution)
- 2. HDFS Datanode (from the Hadoop distribution with the plugin on the
- classpath from the Ozone distribution)
- 3. Ozone Manager (from the Ozone distribution)
- 4. Storage Container Manager (from the Ozone distribution)
-
-Please check the log of the datanode whether the HDDS/Ozone plugin is started or
-not. Log of datanode should contain something like this:
-
-```
-2018-09-17 16:19:24 INFO  HddsDatanodeService:158 - Started plug-in org.apache.hadoop.ozone.web.OzoneHddsDatanodeService@6f94fb9d
-```
-
-<div class="alert alert-warning" role="alert">
-<b>Note:</b> The current version of Ozone is tested with Hadoop 3.1.
-</div>
diff --git a/hadoop-hdds/docs/content/beyond/_index.md b/hadoop-hdds/docs/content/beyond/_index.md
deleted file mode 100644
index 2a29a58..0000000
--- a/hadoop-hdds/docs/content/beyond/_index.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: "Beyond Basics"
-date: "2017-10-10"
-menu: main
-weight: 7
-
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{<jumbotron title="Beyond Basics">}}
-  Beyond Basics pages go into custom configurations of Ozone, including how
-  to run Ozone concurrently with an existing HDFS cluster. These pages also
-  take deep into how to run profilers and leverage tracing support built into
-  Ozone.
-{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/concept/ContainerMetadata.png b/hadoop-hdds/docs/content/concept/ContainerMetadata.png
deleted file mode 100644
index 48bd1c4..0000000
--- a/hadoop-hdds/docs/content/concept/ContainerMetadata.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md
deleted file mode 100644
index ea63fe4..0000000
--- a/hadoop-hdds/docs/content/concept/Datanodes.md
+++ /dev/null
@@ -1,75 +0,0 @@
----
-title: "Datanodes"
-date: "2017-09-14"
-weight: 4
-summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Datanodes are the worker bees of Ozone. All data is stored on data nodes.
-Clients write data in terms of blocks. Datanode aggregates these blocks into
-a storage container. A storage container is the data streams and metadata
-about the blocks written by the clients.
-
-## Storage Containers
-
-![FunctionalOzone](ContainerMetadata.png)
-
-A storage container is a self-contained super block. It has a list of Ozone
-blocks that reside inside it, as well as on-disk files which contain the
-actual data streams. This is the default Storage container format. From
-Ozone's perspective, container is a protocol spec, actual storage layouts
-does not matter. In other words, it is trivial to extend or bring new
-container layouts. Hence this should be treated as a reference implementation
-of containers under Ozone.
-
-## Understanding Ozone Blocks and Containers
-
-When a client wants to read a key from Ozone, the client sends the name of
-the key to the Ozone Manager. Ozone manager returns the list of Ozone blocks
-that make up that key.
-
-An Ozone block contains the container ID and a local ID. The figure below
-shows the logical layout out of Ozone block.
-
-![OzoneBlock](OzoneBlock.png)
-
-The container ID lets the clients discover the location of the container. The
-authoritative information about where a container is located is with the
-Storage Container Manager (SCM). In most cases, the container location will be
-cached by Ozone Manager and will be returned along with the Ozone blocks.
-
-
-Once the client is able to locate the contianer, that is, understand which
-data nodes contain this container, the client will connect to the datanode
-and read the data stream specified by _Container ID:Local ID_. In other
-words, the local ID serves as index into the container which describes what
-data stream we want to read from.
-
-### Discovering the Container Locations
-
-How does SCM know where the containers are located ? This is very similar to
-what HDFS does; the data nodes regularly send container reports like block
-reports. Container reports are far more concise than block reports. For
-example, an Ozone deployment with a 196 TB data node will have around 40
-thousand containers. Compare that with HDFS block count of million and half
-blocks that get reported. That is a 40x reduction in the block reports.
-
-This extra indirection helps tremendously with scaling Ozone. SCM has far
-less block data to process and the name node is a different service are
-critical to scaling Ozone.
diff --git a/hadoop-hdds/docs/content/concept/FunctionalOzone.png b/hadoop-hdds/docs/content/concept/FunctionalOzone.png
deleted file mode 100644
index 0bc75b5..0000000
--- a/hadoop-hdds/docs/content/concept/FunctionalOzone.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/concept/Hdds.md b/hadoop-hdds/docs/content/concept/Hdds.md
deleted file mode 100644
index ad17b54..0000000
--- a/hadoop-hdds/docs/content/concept/Hdds.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-title: "Storage Container Manager"
-date: "2017-09-14"
-weight: 3
-summary:  Storage Container Manager or SCM is the core metadata service of Ozone. SCM provides a distributed block layer for Ozone.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Storage container manager provides multiple critical functions for the Ozone
-cluster.  SCM acts as the cluster manager, Certificate authority, Block
-manager and the Replica manager.
-
-{{<card title="Cluster Management" icon="tasks">}}
-SCM is in charge of creating an Ozone cluster. When an SCM is booted up via <kbd>init</kbd> command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster.
-{{</card>}}
-
-{{<card title="Service Identity Management" icon="eye-open">}}
-SCM's Ceritificate authority is in
-charge of issuing identity certificates for each and every
-service in the cluster. This certificate infrastructre makes
-it easy to enable mTLS at network layer and also the block
-token infrastructure depends on this certificate infrastructure.
-{{</card>}}
-
-{{<card title="Block Management" icon="th">}}
-SCM is the block manager. SCM
-allocates blocks and assigns them to data nodes. Clients
-read and write these blocks directly.
-{{</card>}}
-
-
-{{<card title="Replica Management" icon="link">}}
-SCM keeps track of all the block
-replicas. If there is a loss of data node or a disk, SCM
-detects it and instructs data nodes make copies of the
-missing blocks to ensure high avialablity.
-{{</card>}}
diff --git a/hadoop-hdds/docs/content/concept/Overview.md b/hadoop-hdds/docs/content/concept/Overview.md
deleted file mode 100644
index 9e5746d..0000000
--- a/hadoop-hdds/docs/content/concept/Overview.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-title: Overview
-date: "2017-10-10"
-weight: 1
-summary: Ozone's overview and components that make up Ozone.
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone is a redundant, distributed object store optimized for Big data
-workloads. The primary design point of ozone is scalability, and it aims to
-scale to billions of objects.
-
-Ozone separates namespace management and block space management; this helps
-ozone to scale much better. The namespace is managed by a daemon called
-[Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM),  and block space is
-managed by [Storage Container Manager] ({{< ref "Hdds.md" >}}) (SCM).
-
-
-Ozone consists of volumes, buckets, and keys.
-A volume is similar to a home directory in the ozone world.
-Only an administrator can create it.
-
-Volumes are used to store buckets.
-Once a volume is created users can create as many buckets as needed.
-Ozone stores data as keys which live inside these buckets.
-
-Ozone namespace is composed of many storage volumes.
-Storage volumes are also used as the basis for storage accounting.
-
-The block diagram shows the core components of Ozone.
-
-![Architecture diagram](ozoneBlockDiagram.png)
-
-The Ozone Manager is the name space manager, Storage Container Manager
-manages the physical and data layer and Recon is the management interface for
-Ozone.
-
-
-## Different Perspectives
-
-![FunctionalOzone](FunctionalOzone.png)
-
-Any distributed system can be viewed from different perspectives. One way to
-look at Ozone is to imagine it as Ozone Manager as a name space service built on
- top of HDDS, a distributed block store.
-
-Another way to visualize Ozone is to look at the functional layers; we have a
- metadata data management layer, composed of Ozone Manager and Storage
- Container Manager.
-
-We have a data storage layer, which is basically the data nodes and they are
- managed by SCM.
-
-The replication layer, provided by Ratis is used to replicate metadata (OM and SCM)
-and also used for consistency when data is modified at the
-data nodes.
-
-We have a management server called Recon, that talks to all other components
-of Ozone and provides a unified management API and UX for Ozone.
-
-We have a protocol bus that allows Ozone to be extended via other
-protocols. We currently only have S3 protocol support built via Protocol bus.
-Protocol Bus provides a generic notion that you can implement new file system
- or object store protocols that call into O3 Native protocol.
-
diff --git a/hadoop-hdds/docs/content/concept/OzoneBlock.png b/hadoop-hdds/docs/content/concept/OzoneBlock.png
deleted file mode 100644
index 9583bd5..0000000
--- a/hadoop-hdds/docs/content/concept/OzoneBlock.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md
deleted file mode 100644
index 1ebdd49..0000000
--- a/hadoop-hdds/docs/content/concept/OzoneManager.md
+++ /dev/null
@@ -1,87 +0,0 @@
----
-title: "Ozone Manager"
-date: "2017-09-14"
-weight: 2
-summary: Ozone Manager is the principal name space service of Ozone. OM manages the life cycle of volumes, buckets and Keys.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone Manager (OM) is the namespace manager for Ozone.
-
-This means that when you want to write some data, you ask Ozone
-Manager for a block and Ozone Manager gives you a block and remembers that
-information. When you want to read that file back, you need to find the
-address of the block and Ozone Manager returns it you.
-
-Ozone Manager also allows users to organize keys under a volume and bucket.
-Volumes and buckets are part of the namespace and managed by Ozone Manager.
-
-Each ozone volume is the root of an independent namespace under OM.
-This is very different from HDFS which provides a single rooted file system.
-
-Ozone's namespace is a collection of volumes or is a forest instead of a
-single rooted tree as in HDFS. This property makes it easy to deploy multiple
-OMs for scaling.
-
-## Ozone Manager Metadata
-
-OM maintains a list of volumes, buckets, and keys.
-For each user, it maintains a list of volumes.
-For each volume, the list of buckets and for each bucket the list of keys.
-
-Ozone Manager will use Apache Ratis(A Raft protocol implementation) to
-replicate Ozone Manager state. This will ensure High Availability for Ozone.
-
-
-## Ozone Manager and Storage Container Manager
-
-The relationship between Ozone Manager and Storage Container Manager is best
-understood if we trace what happens during a key write and key read.
-
-### Key Write
-
-* To write a key to Ozone, a client tells Ozone manager that it would like to
-write a key into a bucket that lives inside a specific volume. Once Ozone
-Manager determines that you are allowed to write a key to the specified bucket,
-OM needs to allocate a block for the client to write data.
-
-* To allocate a block, Ozone Manager sends a request to Storage Container
-Manager (SCM); SCM is the manager of data nodes. SCM picks three data nodes
-into which client can write data. SCM allocates the block and returns the
-block ID to Ozone Manager.
-
-* Ozone manager records this block information in its metadata and returns the
-block and a block token (a security permission to write data to the block)
-to the client.
-
-* The client uses the block token to prove that it is allowed to write data to
-the block and writes data to the data node.
-
-* Once the write is complete on the data node, the client will update the block
-information on
-Ozone manager.
-
-
-### Key Reads
-
-* Key reads are simpler, the client requests the block list from the Ozone
-Manager
-* Ozone manager will return the block list and block tokens which
-allows the client to read the data from data nodes.
-* Client connects to the data  node and presents the block token and reads
-the data from the data node.
diff --git a/hadoop-hdds/docs/content/concept/_index.md b/hadoop-hdds/docs/content/concept/_index.md
deleted file mode 100644
index 8f0aeb0..0000000
--- a/hadoop-hdds/docs/content/concept/_index.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: Concepts
-date: "2017-10-10"
-menu: main
-weight: 6
-
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{<jumbotron title="Ozone Architecture">}}
-
-Ozone's architectural elements are explained in the following pages. The
-metadata layer, data layer, protocol bus, replication layer and Recon  are
-discussed in the following pages. These concepts are useful if you want to
-understand how ozone works in depth.
-
-{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png b/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png
deleted file mode 100644
index 7fb738f..0000000
--- a/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/design/decommissioning.md b/hadoop-hdds/docs/content/design/decommissioning.md
deleted file mode 100644
index 8d620be..0000000
--- a/hadoop-hdds/docs/content/design/decommissioning.md
+++ /dev/null
@@ -1,624 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
----
-title: Decommissioning in Ozone
-summary: Formal process to shut down machines in a safe way after the required replications.
-date: 2019-07-31
-jira: HDDS-1881
-status: current
-author: Anu Engineer, Marton Elek, Stephen O'Donnell
----
-
-# Abstract
-
-The goal of decommissioning is to turn off a selected set of machines without data loss. It may or may not require to move the existing replicas of the containers to other nodes.
-
-There are two main classes of the decommissioning:
-
- * __Maintenance mode__: where the node is expected to be back after a while. It may not require replication of containers if enough replicas are available from other nodes (as we expect to have the current replicas after the restart.)
-
- * __Decommissioning__: where the node won't be started again. All the data should be replicated according to the current replication rules.
-
-Goals:
-
- * Decommissioning can be canceled any time
- * The progress of the decommissioning should be trackable
- * The nodes under decommissioning / maintenance mode should not been used for new pipelines / containers
- * The state of the datanodes should be persisted / replicated by the SCM (in HDFS the decommissioning info exclude/include lists are replicated manually by the admin). If datanode is marked for decommissioning this state be available after SCM and/or Datanode restarts.
- * We need to support validations before decommissioing (but the violations can be ignored by the admin).
- * The administrator should be notified when a node can be turned off.
- * The maintenance mode can be time constrained: if the node marked for maintenance for 1 week and the node is not up after one week, the containers should be considered as lost (DEAD node) and should be replicated.
-
-# Introduction
-
-Ozone is a highly available file system that relies on commodity hardware. In other words, Ozone is designed to handle failures of these nodes all the time.
-
-The Storage Container Manager(SCM) is designed to monitor the node health and replicate blocks and containers as needed.
-
-At times, Operators of the cluster can help the SCM by giving it hints. When removing a datanode, the operator can provide a hint. That is, a planned failure of the node is coming up, and SCM can make sure it reaches a safe state to handle this planned failure.
-
-Some times, this failure is transient; that is, the operator is taking down this node temporarily. In that case, we can live with lower replica counts by being optimistic.
-
-Both of these operations, __Maintenance__, and __Decommissioning__ are similar from the Replication point of view. In both cases, and the user instructs us on how to handle an upcoming failure.
-
-Today, SCM (*Replication Manager* component inside SCM) understands only one form of failure handling. This paper extends Replica Manager failure modes to allow users to request which failure handling model to be adopted(Optimistic or Pessimistic).
-
-Based on physical realities, there are two responses to any perceived failure, to heal the system by taking corrective actions or ignore the failure since the actions in the future will heal the system automatically.
-
-## User Experiences (Decommissioning vs Maintenance mode)
-
-From the user's point of view, there are two kinds of planned failures that the user would like to communicate to Ozone.
-
-The first kind is when a 'real' failure is going to happen in the future. This 'real' failure is the act of decommissioning. We denote this as "decommission" throughout this paper. The response that the user wants is SCM/Ozone to make replicas to deal with the planned failure.
-
-The second kind is when the failure is 'transient.' The user knows that this failure is temporary and cluster in most cases can safely ignore this issue. However, if the transient failures are going to cause a failure of availability; then the user would like the Ozone to take appropriate actions to address it.  An example of this case, is if the user put 3 data nodes into maintenance mode and switched them off.
-
-The transient failure can violate the availability guarantees of Ozone; Since the user is telling us not to take corrective actions. Many times, the user does not understand the impact on availability while asking Ozone to ignore the failure.
-
-So this paper proposes the following definitions for Decommission and Maintenance of data nodes.
-
-__Decommission__ *of a data node is deemed to be complete when SCM/Ozone completes the replica of all containers on decommissioned data node to other data nodes.That is, the expected count matches the healthy count of containers in the cluster*.
-
-__Maintenance mode__ of a data node is complete if Ozone can guarantee at *least one copy of every container* is available in other healthy data nodes.
-
-## Examples
-
-Here are some illustrative examples:
-
-1.  Let us say we have a container, which has only one copy and resides on Machine A. If the user wants to put machine A into maintenance mode; Ozone will make a replica before entering the maintenance mode.
-
-2. Suppose a container has two copies, and the user wants to put Machine A to maintenance mode. In this case; the Ozone understands that availability of the container is not affected and hence can decide to forgo replication.
-
-3. Suppose a container has two copies, and the user wants to put Machine A into maintenance mode. However, the user wants to put the machine into maintenance mode for one month. As the period of maintenance mode increases, the probability of data loss increases; hence, Ozone might choose to make a replica of the container even if we are entering maintenance mode.
-
-4. The semantics of decommissioning means that as long as we can find copies of containers in other machines, we can technically get away with calling decommission complete. Hence this clarification node; in the ordinary course of action; each decommission will create a replication flow for each container we have; however, it is possible to complete a decommission of a data node, even if we get a failure of the  data node being decommissioned. As long as we can find the other datanodes to replicate from and get the number of replicas needed backup to expected count we are good.
-
-5. Let us say we have a copy of a container replica on Machine A, B, and C. It is possible to decommission all three machines at the same time, as decommissioning is just a status indicator of the data node and until we finish the decommissioning process.
-
-
-The user-visible features for both of these  are very similar:
-
-Both Decommission and Maintenance mode can be canceled any time before the operation is marked as completed by SCM.
-
-Decommissioned nodes, if and when added back, shall be treated as new data nodes; if they have blocks or containers on them, they can be used to reconstruct data.
-
-
-## Maintenance mode in HDFS
-
-HDFS supports decommissioning and maintenance mode similar to Ozone. This is a quick description of the HDFS approach.
-
-The usage of HDFS maintenance mode:
-
-  * First, you set a minimum replica count on the cluster, which can be zero, but defaults to 1.
-  * Then you can set a number of nodes into maintenance, with an expiry time or have them remain in maintenance forever, until they are manually removed. Nodes are put into maintenance in much the same way as nodes are decommissioned.
- * When a set of nodes go into maintenance, all blocks hosted on them are scanned and if the node going into maintenance would cause the number of replicas to fall below the minimum replica count, the relevant nodes go into a decommissioning like state while new replicas are made for the blocks.
-  * Once the node goes into maintenance, it can be stopped etc and HDFS will not be concerned about the under-replicated state of the blocks.
-  * When the expiry time passes, the node is put back to normal state (if it is online and heartbeating) or marked as dead, at which time new replicas will start to be made.
-
-This is very similar to decommissioning, and the code to track maintenance mode and ensure the blocks are replicated etc, is effectively the same code as with decommissioning. The one area that differs is probably in the replication monitor as it must understand that the node is expected to be offline.
-
-The ideal way to use maintenance mode, is when you know there are a set of nodes you can stop without having to do any replications. In HDFS, the rack awareness states that all blocks should be on two racks, so that means a rack can be put into maintenance safely.
-
-There is another feature in HDFS called "upgrade Domain" which allows each datanode to be assigned a group. By default there should be at least 3 groups (domains) and then each of the 3 replicas will be stored on different group, allowing one full group to be put into maintenance at once. That is not yet supported in CDH, but is something we are targeting for CDPD I believe.
-
-One other difference with maintenance mode and decommissioning, is that you must have some sort of monitor thread checking for when maintenance is scheduled to end. HDFS solves this by having a class called the DatanodeAdminManager, and it tracks all nodes transitioning state, the under-replicated block count on them etc.
-
-
-# Implementation
-
-
-## Datanode state machine
-
-`NodeStateManager` maintains the state of the connected datanodes. The possible states:
-
-  state                | description
-  ---------------------|------------
-  HEALTHY              | The node is up and running.
-  STALE                | Some heartbeats were missing for an already missing nodes.
-  DEAD                 | The stale node has not been recovered.
-  ENTERING_MAINTENANCE | The in-progress state, scheduling is disabled but the node can't not been turned off due to in-progress replication.
-  IN_MAINTENANCE       | Node can be turned off but we expecteed to get it back and have all the replicas.
-  DECOMMISSIONING      | The in-progress state, scheduling is disabled, all the containers should be replicated to other nodes.
-  DECOMMISSIONED       | The node can be turned off, all the containers are replicated to other machine
-
-
-
-## High level algorithm
-
-The Algorithm is pretty simple from the Decommission or Maintenance point of view;
-
- 1. Mark a data node as DECOMMISSIONING or ENTERING_MAINTENANCE. This implies that node is NOT healthy anymore; we assume the use of a single flag and law of excluded middle.
-
- 2. Pipelines should be shut down and wait for confirmation that all pipelines are shutdown. So no new I/O or container creation can happen on a Datanode that is part of decomm/maint.
-
- 3. Once the Node has been marked as DECOMMISSIONING or ENTERING_MAINTENANCE; the Node will generate a list of containers that need replication. This list is generated by the Replica Count decisions for each container; the Replica Count will be computed by Replica Manager;
-
- 4. Replica Manager will check the stop condition for each node. The following should be true for all the containers to go from DECOMMISSIONING to DECOMMISSIONED or from ENTERING_MAINTENANCE to IN_MAINTENANCE.
-
-   * Container is closed.
-   * We have at least one HEALTHY copy at all times.
-   * For entering DECOMMISSIONED mode `maintenance + healthy` must equal to `expectedeCount`
-
- 5. We will update the node state to DECOMMISSIONED or IN_MAINTENANCE reached state.
-
-_Replica count_ is a calculated number which represents the number of _missing_ replicas. The number can be negative in case of an over-replicated container.
-
-## Calculation of the _Replica count_ (required replicas)
-
-### Counters / Variables
-
-We have 7 different datanode state, but some of them can be combined. At high level we can group the existing state to three groups:
-
- * healthy state (when the container is available)
- * maintenance (including IN_MAINTENANCE and ENTERING_MAINTENANCE)
- * all the others.
-
-To calculate the required steps (required replication + stop condition) we need counter about the first two categories.
-
-Each counters should be calculated per container bases.
-
-   Node state                            | Variable (# of containers)      |
-   --------------------------------------|---------------------------------|
-   HEALTHY	                             | `healthy`                       |
-   STALE + DEAD + DECOMMISSIONED	     |                                 |
-   DECOMMISSIONING                       |                                 |
-   ENTERING_MAINTENANCE + IN_MAINTENANCE | `maintenance`                   |
-
-
-### The current replication model
-
-The current replication model in SCM/Ozone is very simplistic. We compute the replication count or the number of replications that we need to do as:
-
-```
-Replica count = expectedCount - currentCount
-```
-
-In case the _Replica count_ is positive, it means that we need to make more replicas. If the number is negative, it means that we are over replicated and we need to remove some replicas of this container. If the Replica count for a container is zero; it means that we have the expected number of containers in the cluster.
-
-To support idempontent placement strategies we should substract the in-fligt replications from the result: If there are one in-flight replication process and two replicas we won't start a new replication command unless the original command is timed out. The timeout is configured with `hdds.scm.replication.event.timeout` and the default value is 10 minutes.
-
-More preciously the current algorithm is the following:
-
-```java
-replicaCount = expectedCount - healthy;
-
-if (replicaCount - inflight copies + inflight deletes) > 0 {
-   // container is over replicated
-}.else if (replicaCount - inflight copies + inflight deletes) <0 {
-  // handle under replicated containers
-}
-```
-
-The handling of inflight copies and deletes are independent from the decommissioning problem therefore here we focus only on the core mode:
-
-```
-replicaCount = expectedCount - healthy;
-```
-
-### The proposed solution
-
-To support the notion that a user can provide hints to the replication model, we propose to add two variables to the current model.
-
-In the new model, we propose to break the `currentCount` into the two separate groups. That is _Healthy nodes_ and _Maintenance nodes_. The new model replaces the currentCount with these two separate counts. The following function captures the code that drives the logic of computing Replica counts in the new model. The later section discusses the input and output of this model very extensively.
-
-```java
-/**
- * Calculate the number of the missing replicas.
- *
- * @return the number of the missing replicas.
-     If it's less than zero, the container is over replicated.
- */
-int getReplicationCount(int expectedCount, int healthy, int maintenance) {
-
-   //for over replication, count only with the healthy replicas
-   if (expectedCount <= healthy) {
-      return expectedCount - healthy;
-   }
-
-   replicaCount = expectedCount - (healthy + maintenance);
-
-   //at least one HEALTHY replicas should be guaranteed!
-   if (replicaCount == 0 && healthy < 1) {
-      replicaCount ++;
-   }
-
-   //over replication is already handled. Always return with
-   // positive as over replication is already handled
-   return Math.max(0, replicaCount);
-}
-
-```
-
-To understand the reasing behind the two special `if` condition check the examples below.
-
-We also need to specify two end condition when the DECOMMISSIONING node can be moved to the DECOMMISSIONED state or the ENTERING_MAINTENANCE mode can be moved to the IN_MAINTENANCE state.
-
-The following conditions should be true for all the containers and all the containers on the specific node should be closed.
-
-**From DECOMMISSIONING to DECOMMISSIONED**:
-
- * There are at least one healthy replica
- * We have three replicas (both helthy and maintenance)
-
-Which means that our stop condition can be formalized as:
-
-```
-(healthy >= 1) && (healthy + maintenance >= 3)
-```
-
-Both the numbers can be configurable:
-
-  * 1 is the minimum number of healthy replicas (`decommissioning.minimum.healthy-replicas`)
-  * 3 is the minimum number of existing replicas (`decommissioning.minimum.replicas`)
-
-For example `decommissioning.minimum.healthy-replicas` can be set to two if administrator would like to survive an additional node failure during the maintenance period.
-
-**From ENTERING_MAINTENANCE to IN_MAINTENANCE:**
-
- * There are at least one healthy replicas
-
-This is the weaker version of the previous condition:
-
-```
-(healthy >= 1)
-```
-
-### Examples (normal cases)
-
-In this section we show example use cases together with the output of the proposed algorithm
-
-#### All healthy
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | HEALTHY|  3     | 3       | 0       | 0
-
-The container C1 exists on machines A, B , and C. All the container reports tell us that the container is healthy.  Running the above algorithm, we get:
-
-`expected - healthy + maint. = 3 - (3 + 0) = 0`
-
-It means, _"we don’t need no replication"._
-
-#### One failure
-
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | DEAD   |  3     | 2       | 0       | 1
-
-
-The machine C has failed, and as a result, the healthy count has gone down from `3` to `2`. This means that we need to start one replication flow.
-
-`ReplicaCount = expected - healthy + maint. =  3 - (2 + 0) = 1.`
-
-This means that the new model will handle failure cases just like the current model.
-
-#### One decommissioning
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | DECOMM |  3     | 2       | 0       | 1
-
-
-In this case, machine C is being decommissioned. Therefore the healthy count has gone down to `2` , and decommission count is `1`. Since the
-
-```ReplicaCount = expected - healthy + maint`. we have `1 = 3 - (2 + 0)```,
-
-this gives us the decommission count implicitly. The trick here is to realize that incrementing decommission automatically causes a decrement in the healthy count, which allows us not to have _decommission_ in the equation explicitly.
-
-**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the DECOMMISSIONED state.
-
-#### Failure + decommissioning
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | DEAD    | DECOMM |  3     | 1       | 0       | 2
-
-
-Here is a case where we have a failure of a data node and a decommission of another data node. In this case, the container C1 needs two replica flows to heal itself. The equation is the same and we get
-
-`ReplicaCount(2) = ExpectecCount(3) - healthy(1)`
-
-The maintenance is still zero so ignored in this equation.
-
-#### 1 failure + 2 decommissioning
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | DECOMM  | DECOMM |  3     | 0       | 0       | 3
-
-In this case, we have one failed data node and two data nodes being decommissioned. We need to get three replica flows in the system. This is achieved by:
-
-```
-ReplicaCount(3) = ExpectedCount(3) - (healthy(0) + maintenance(0))
-```
-
-#### Maintenance mode
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | DEAD    | MAINT  |  3     | 2       | 1       | 0
-
-This represents the normal maintenance mode, where a single machine is marked as in maintenance mode. This means the following:
-
-```
-ReplicaCount(0) = ExpectedCount(3) - (healthy(2) + maintenance(1)
-```
-
-There are no replica flows since the user has asked us to move a single node into maintenance mode, and asked us explicitly not to worry about the single missing node.
-
-**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the IN_MAINTENANCE state.
-
-#### Maintenance + decommissioning
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  HEALTHY | DECOMM  | MAINT  |  3     | 1       | 1       | 1
-
-*This is a fascinating case*; We have one good node; one decommissioned node and one node in maintenance mode. The expected result is that the replica manager will launch one replication flow to compensate for the node that is being decommissioned, and we also expect that there will be no replication for the node in maintenance mode.
-
-```
-Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1))
-```
-So as expected we have one replication flow in the system.
-
-**Stop condition**: Not that if this containers is the only one in the system:
-
- * node C can be moved to the IN_MAINTENANCE state
- * node B can not be decommissioned (we need the three replicas first)
-
-#### Decommissioning all the replicas
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  DECOMM  | DECOMM  | DECOMM |  3     | 0       | 0       | 3
-
-In this case, we deal with all the data nodes being decommissioned. The number of healthy replicas for this container is 0, and hence:
-
-```
-replicaCount (3) = expectedCount (3)- (healthy(0) + maintenance(0)).
-```
-
-This provides us with all 3 independent replica flows in the system.
-
-#### Decommissioning the one remaining replicas
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  DEAD    | DEAD    | DECOMM |  3     | 0       | 0       | 3
-
-We have two failed nodes and one node in Decomm. It is the opposite of case Line 5, where we have one failed node and 2 nodes in Decomm. The expected results are the same, we get 3 flows.
-
-#### Total failure
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  DEAD    | DEAD    | DEAD   |  3     | 0       | 0       | 3
-
-This is really an error condition. We have lost all 3 data nodes. The Replica Manager will compute that we need to rebuild 3 replicas, but we might not have a source to rebuild from.
-
-### Last replica is on ENTERING_MAINTENANCE
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  DEAD    | MAINT   | DEAD   |  3     | 0       | 1       | 2
-
-Is also an interesting case; we have lost 2 data nodes; and one node is being marked as Maint. Since we have 2 failed nodes, we need 2 replica flows in the system. However, the maintenance mode cannot be entered, since we will lose lone replica if we do that.
-
-### All maintenance
-
-  Node A  | Node B  | Node C | expctd | healthy | mainten | repCount
-  --------|---------|--------|--------|---------|---------|----------
-  MAINT   | MAINT   | MAINT  |  3     | 0       | 3       | *1*
-
-This is also a very special case; this is the case where the user is telling us to ignore the peril for all 3 replicas being offline. This means that the system will not be able to get to that container and would lead to potential I/O errors. Ozone will strive to avoid that case; this means that Ozone will hit the “if condition” and discover that we our ReplicCount is 0; since the user asked for it; but we are also going to lose all Replicas. At this point of time, we make a conscious decision to replicate one copy instead of obeying the user command and get to the situation where I/O can fail.
-
-**This brings us back to the semantics of Maintenance mode in Ozone**. If going into maintenance mode will not lead to a potential I/O failure, we will enter into the maintenance mode; Otherwise, we will replicate and enter into the maintenance mode after the replication is done. This is just the core replication algorithm, not the complete Decommission or Maintenance mode algorithms, just how the replica manager would behave.  Once we define the behavior of Replica Manager, rest of the algorithm is easy to construct.
-
-Note: this is the case why we need the seconf `if` in the model (numbers in the brackets shows the actual value):
-
-```
-   replicaCount(0) = expectedCount(3) - ( healthy(0) + maintenance(0) );
-
-
-   //at least one HEALTHY replicas should be guaranteed!
-   if (replicaCount(0) == 0 && healthy(0) < 1) {
-      replicaCount ++;
-   }
-```
-
-### Over replication
-
-For over-replicated containers Ozone prefers to keep the replicas on the healthy nodes. We delete containers only if we have enough replicas on *healthy* nodes.
-
-```
-int getReplicationCount(int expectedCount, int healthy, int maintenance) {
-
-   //for over replication, count only with the healthy replicas
-   if (expectedCount <= healthy) {
-      return expectedCount - healthy;
-   }
-
-   replicaCount = ... //calculate missing replicas
-
-   //over replication is already handled. Always return with
-   // positive as over replication is already handled
-   return Math.max(0, replicaCount);
-}
-```
-
-Please note that we always assume that the the in-flight deletion are applied and the container is already deleted.
-
-There is a very rare case where the in-flight deletion is timed out (and as a result replication manager would assume the container is not deleted) BUT in the mean-time the container finally deleted. It can be survivied with including the creation timestamp in the ContainerDeleteCommand.
-
-### Over replication examples
-
-#### 4 replicas
-
-
-  Node A  | Node B  | Node C  | Node D  | expctd | healthy | mainten | repCount
-  --------|---------|---------|---------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | HEALTHY | HEALTHY |  3     | 4       | 0       | 0
-
-This is an easy case as we have too many replicas we can safely remove on.
-
-```
-if (expectedCount <= healthy) {
-   return expectedCount -  healthy
-}
-```
-
-#### over replicated with IN_MAINTENANCE
-
-
-  Node A  | Node B  | Node C  | Node D  | expctd | healthy | mainten | repCount
-  --------|---------|---------|---------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | HEALTHY | MAINT   |  3     | 3       | 1       | 0
-
-
-In this case we will delete the forth replica only after node D is restored and healthy again. (expectedCount is not less than healthy). As the `expectedCount (3) <= healthy (3)` the replicaCount will be calculated as `0`.
-
-#### over replicated with IN_MAINTENANCE
-
-
-  Node A  | Node B  | Node C  | Node D  | expctd | healthy | mainten | repCount
-  --------|---------|---------|---------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | MAINT   | MAINT   |  3     | 2       | 2       | 0
-
-Here we are not over-repliacated as we don't have any healthy nodes. We will calculate the under-replication number as defined in the previous section:
-
-```
-replicaCount(-1) = expectedCount(3) - ( healthy(2) + maintenance(2) );
-```
-
-The main algorithm would return with `replicaCount = -1` but as we return `Math.max(0,replicaCount)` the real response will be 0. Waiting for healthy nodes.
-
-### Handling in-flight replications
-
-In-flight replication requests and deletes are handled by the Replica Manager and the problem is orthogonal to the replication problem, but this section shows that the proposed model is not conflicted with the existing approach.
-
-Let's say we have an under-replicated container and we already selected a new datanode to copy a new replica to that specific node.
-
-
-  Node A  | Node B  | Node C  | expctd | healthy | mainten | repCount
-  --------|---------|---------|--------|---------|---------|----------
-  HEALTHY | HEALTHY | (copy)  |  3     | 2       | 0       | 1
-
-
-Here the Replication Manager detects that one replica is missing but the real copy shouldn't been requested as it's alread inprogress. ReplicaManager must not select a new datanode based on the ContainerPlacementPolicy implementation as the policy may or may not be idempotent.
-
-For example if the placement policy would select a datanode randomly with each loop we would select a new datanode to replicate to.
-
-To avoid such a situation Replica Manager maintains a list of the in-flight copies (in-memory) on the SCM side. In this list we have all the sent replication requests but they are removed after a given amount of time (10 minutes) by default.
-
-With calculating the in-flight copy as a possible replication the Replication Manger doesn't need to request new replication.
-
-When a datanode is marked to be decommissioned there could be any in-flight replication copy process in that time.
-
- * At datanode we should stop all of the in-flight copy (datanodes should be notified about the DECOMMISSIONING/IN_MAINTENANCE state)
- * We never ask any non-healthy nodes to replicate containers.
- * In SCM, we don't need to do any special action
-     * In `ReplicationManager` we already have a map about the inflight replications (`Map<ContainerID, List<InflightAction>>`).
-     * During a normal replication the number of in-flight replications are counted as real replication (2 real replicas + 1 inflight replication = replica count 3). During this calculation we need to check the current state of the datanodes and ignore the inflight replication if they are assigned to a node which is in decommissioning state. (Or we should update the inflight map, in case of node state change)
-
-### In-flight examples
-
-#### Maintenance + inflight
-
-  Node A  | Node B  | Node C  | expctd | healthy | mainten | repCount | copy |
-  --------|---------|---------|--------|---------|---------|----------|------|
-  HEALTHY | MAINT   | copying |  3     | 1       | 1       | 1        | 1    |
-
-Here one have one node ENTERING_MAINTENANCE state, and one replica is missing and already started to be replicated. We don't need to start a new copy and node B can be moved to the IN_MAINTENANCE mode.
-
-```
-Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1))
-Containers to copy (0) = Replica Count (1) - inflight copies (1)
-```
-
-#### Maintenance + inflight
-
-  Node A  | Node B  | Node C  | expctd | healthy | mainten | repCount | copy |
-  --------|---------|---------|--------|---------|---------|----------|------|
-  DECOMM  | copying | copying |  3     | 0       | 0       | 3        | 1    |
-
-
-Node A can not be DECOMMISSIONED as we have no HEALTHY replicas at all.
-
-
-## Statefulness
-
-SCM stores all the node state in-memory. After a restart on the SCM side the datanode state can be lost.
-
-**Ozone doesn't guarantee that decommissioning/maintenance mode state survives the SCM restarts!!!**
-
- * If SCM restarts DECOMMISSIONED nodes will not report any more container reports and the nodes won't be registered.
- * ENTERING_MAINTENANCE and DECOMMISSIONING nodes will became HEALTHY again and the decommissioning CLI command should be repeated.
- *  IN_MAINTENANCE nodes will become DEAD and all the containers will be replicated.
-
- *Ozone assumes that the maintenance mode is used short-term and SCM is not restarted during this specific period.*
-
-*Reasoning*:
-
-Neither of the node state nor the container state are persisted in SCM side. The decommissioned state can be stored on the SCM side (or on the SCM side and the datanode side) which can provide better user experience (and may be implemented).
-
-But to support maintenance mode after restart all the container information is required to be persisted (which is a too big architectural change).
-
-To make a replication decision replication manager needs the number of healthy replicas (they are reported via heartbeats) AND the number of containers on the node which is in maintenance mode. The later one is not available if the SCM is restarted as the container map exists only in the memory and the node which is turned off can't report any more container reports. Therefore the information about the existing containers on the node which is in the maintenance mode **can't be available'**.
-
-## Throttling
-
-SCM should avoid to request too many replication to live enough network bandwidth for the requests.
-
-Replication Manager can easily throttle the replication requests based on `inflightReplication` map, but this problem is independent from the handling of the decommissioning / maintenance mode because it should be solved for any kind of replication not just for this.
-
-## User interface
-
-The decommissioning and maintenance mode can be administered with a CLI interface.
-
-Required feature:
-
- * Set the state of a datanode (to DECOMMISSIONING or ENTERING_MAINTENANCE)
- * Undo the decommissioning process
- * check the current progress:
-   * This can be a table with the nodes, status of the nodes, number of containers, containers under replication and containers which doesn't much the stop condition yet (required replications)
- * All the commands can support topology related filters (eg. display the nodes only for a specific rack or show the status of the nodes of s specific rack)
- * Check current replication information of one specific container (required to debug why the decommissioning is stuck)
-
-## Checks before the decommissioning
-
-Decommissioning is requested via a new RPC call with the help of a new CLI tool. The server should check the current state of the cluster and deny the decommissioning if it's not possible. Possible violations:
-
- * Not enough space to store the new replicas.
- * Not enough node to create all kind of pipelines
-
- In case of any violation, the request will fail, but any of theses rules can be turned off with a next request and the decommissioning can be forced.
-
-## Maintain progress
-
-We need to show the progress of the decommissioning process per node and cluster-wide. We already have the information about the under replicated containers, but we don't know the numbers of the containers before decommissioning.
-
-Instead of saving the original number of the required replications before (which is very fragile) we don't provide an absolute progress just the numbers of the remaining replication:
-
-
-```
- Node            | Status                 | # containers |  in-progress replications| required replication
- ----------------|------------------------|--------------|--------------------------|------------------------
- Node A          | ENTERING_MAINTENANCE   | 2837         | 12                       | 402
- Node B          | HEALTHY                | 1239         | 0                        | 0
- Node C          | IN_MAINTENANCE         | 2348         | 0                        | 0
-```
-
-`# containers` means the total number of the containers on the specific datanodes. To get the original number of the planned copies we can save the original 'container-to-node' map in memory and show some progress and provide more information for the users.
diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md
deleted file mode 100644
index cc7569eb..0000000
--- a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md
+++ /dev/null
@@ -1,197 +0,0 @@
----
-title: Ozone Enhancement Proposals
-summary: Definition of the process to share new technical proposals with the Ozone community.
-date: 2019-06-07
-jira: HDDS-1659
-status: accepted
-author: Anu Enginner, Marton Elek
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-## Problem statement
-
-Some of the biggers features requires well defined plans before the implementation. Until now it was managed by uploading PDF design docs to selected JIRA. There are multiple problems with the current practice.
-
- 1. There is no easy way to find existing up-to-date and outdated design docs.
- 2. Design docs usually have better description of the problem that the user docs
- 3. We need better tools to discuss the design docs in the development phase of the doc
-
-We propose to follow the same process what we have now, but instead of uploading a PDF to the JIRA, create a PR to merge the proposal document to the documentation project.
-
-## Non-goals
-
- * Modify the existing workflow or approval process
- * Migrate existing documents
- * Make it harder to create design docs (it should be easy to support the creation of proposals for any kind of tasks)
- * Define how the design docs are handled/created *before* the publication (this proposal is about the publishing process)
-
-## Proposed solution
-
- * Open a dedicated Jira (`HDDS-*` but with specific component)
- * Use standard name prefix in the jira (easy to filter on the mailing list) `[OEP]
- * Create a PR to add the design doc to the current documentation
-   * The content of the design can be added to the documentation (Recommended)
-   * Or can be added as external reference
- * The design doc (or the summary with the reference) will be merged to the design doc folder of `hadoop-hdds/docs/content/design` (will be part of the docs)
- * Discuss it as before (lazy consesus, except if somebody calls for a real vote)
- * Design docs can be updated according to the changes during the implementation
- * Only the implemented design docs will be visible as part of the design docs
-
-
-As a result all the design docs can be listed under the documentation page.
-
-A good design doc has the following properties:
-
- 1. Publicly available for anybody (Please try to avoid services which are available only with registration, eg: google docs)
- 2. Archived for the future (Commit it to the source OR use apache jira or wiki)
- 3. Editable later (Best format is markdown, RTF is also good. PDF has a limitation, it's very hard to reuse the text, or create an updated design doc)
- 4. Well structured to make it easy to comment any part of the document (Markdown files which are part of the pull request can be commented in the PR line by line)
-
-
-### Example 1: Design doc as a markdown file
-
-The easiest way to create a design doc is to create a new markdown file in a PR and merge it to `hadoop-hdds/docs/content/design`.
-
- 1. Publicly available: YES, it can be linked from Apache git or github
- 2. Archived: YES, and it's also versioned. All the change history can be tracked.
- 3. Editable later: YES, as it's just a simple text file
- 4. Commentable: YES, comment can be added to each line.
-
-### Example 2: Design doc as a PDF
-
-A very common practice of today is to create design doc on google docs and upload it to the JIRA.
-
- 1. Publicy available: YES, anybody can download it from the Jira.
- 2. Archived: YES, it's available from Apache infra.
- 3. Editable: NO, It's harder to reuse the text to import to the docs or create a new design doc.
- 4. Commentable: PARTIAL, Not as easy as a text file or the original google docs, but a good structure with numbered section may help
-
-
-### The format
-
-While the first version (markdown files) are the most powerful, the second version (the existing practice) is also acceptable. In this case we propose to create a PR with adding a reference page *without* the content but including the link.
-
-For example:
-
-```yaml
----
-title: Ozone Security Design
-summary: A comprehensive description of the security flow between server and client components.
-date: 2018-02-22
-jira: HDDS-4
-status: implemented
-author: Sanjay Radia, Jitendra Pandey, Xiaoyu Yao, Anu Engineer
-
-## Summary
-
-Ozone security model is based on Kerberos and similar to the Hadoop security but some of the parts are improved: for example the SCM works as a Certificate Authority and PKI based solutions are wildely used.
-
-## Reference
-
-For more details please check the (uploaded design doc)[https://issues.apache.org/jira/secure/attachment/12911638/HadoopStorageLayerSecurity.pdf].
-
-```
-
-Obviously with the first approach the design doc itself can be included in this markdown file.
-
-## Migration
-
-It's not a hard requirement to migrate all the design doc. But process is always open:
-
- 1. To create reference pages for any of the old design docs
- 2. To migrate any new design docs to markdown formats (by anybody not just by the author)
- 3. To update any of the old design docs based on the current state of the code (We have versioning!)
-
-## Document template
-
-This the proposed template to document any proposal. It's recommended but not required the use exactly the some structure. Some proposal may require different structure, but we need the following information.
-
-1. Summary
-
-> Give a one sentence summary, like the jira title. It will be displayed on the documentation page. Should be enough to understand
-
-2. Status
-
-Defined in the markdown header. Proposed statuses:
-
- * `accepted`: (Use this as by default. If not accapted, won't be merged)
-
- * `implemented`: The discussed technical solution is implemented (maybe with some minor implementation difference)
-
- * `replaced`: Replaced by a new design doc
-
- * `outdated`: Code has been changed and design doc doesn't reflect any more the state of the current code.
-
- Note: the _accepted_ design docs won't be visible as part of the documentation or only under a dedicated section to clearly comminucate that it's not ready, yet.
-
-3. Problem statement (Motivation / Abstract)
-
-> What is the problem and how would you solve it? Think about an abstract of a paper: one paragraph overview. Why will the world better with this change?
-
-4. Non-goals
-
- > Very important to define what is outside of the scope of this proposal
-
-5.   Technical Description (Architecture and implementation details)
-
- > Explain the problem in more details. How can it be reproduced? What is the current solution? What is the limitation of the current solution?
-
- > How the new proposed solution would solve the problem? Architectural design.
-
- > Implementation details. What should be changed in the code. Is it a huge change? Do we need to change wire protocol? Backward compatibility?
-
-6. Alternatives
-
- > What are the other alternatives you considered and why do yoy prefer the proposed solution The goal of this section is to help people understand why this is the best solution now, and also to prevent churn in the future when old alternatives are reconsidered.
-
-Note: In some cases 4/5 can be combined. For example if you have multiple proposals, the first version may include multiple solutions. At the end ot the discussion we can move the alternatives to 5. and explain why the community is decided to use the selected option.
-
-7. Plan
-
- > Planning to implement the feature. Estimated size of the work? Do we need feature branch? Any migration plan, dependency? If it's not a big new feature it can be one sentence or optional.
-
-8. References
-
-## Workflows form other projects
-
-There are similar process in other open source projects. This document and the template is inspired by the following projects:
-
- * [Apache Kafka Improvement Proposals](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals)
- * [Apache Spark Project Improvement Proposals](https://spark.apache.org/improvement-proposals.html)
- * [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/tree/master/keps)
-
-Short summary of the processes:
-
-__Kafka__ process:
-
- * Create wiki page
- * Start discussion on mail thread
- * Vote on mail thread
-
-__Spark__ process:
-
- * Create JIRA (dedicated label)
- * Discuss on the jira page
- * Vote on dev list
-
-*Kubernetes*:
-
- * Deditaced git repository
- * KEPs are committed to the repo
- * Well defined approval process managed by SIGs (KEPs are assigned to SIGs)
-
diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md b/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md
deleted file mode 100644
index dd23e04..0000000
--- a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: "GDPR in Ozone"
-date: "2019-September-17"
-weight: 5
-summary: GDPR in Ozone
-icon: user
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-Enabling GDPR compliance in Ozone is very straight forward. During bucket
-creation, you can specify `--enforcegdpr=true` or `-g=true` and this will
-ensure the bucket is GDPR compliant. Thus, any key created under this bucket
-will automatically be GDPR compliant.
-
-GDPR can only be enabled on a new bucket. For existing buckets, you would
-have to create a new GDPR compliant bucket and copy data from old bucket into
- new bucket to take advantage of GDPR.
-
-Example to create a GDPR compliant bucket:
-
-`ozone sh bucket create --enforcegdpr=true /hive/jan`
-
-`ozone sh bucket create -g=true /hive/jan`
-
-If you want to create an ordinary bucket then you can skip `--enforcegdpr`
-and `-g` flags.
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/gdpr/_index.md b/hadoop-hdds/docs/content/gdpr/_index.md
deleted file mode 100644
index 9888369..0000000
--- a/hadoop-hdds/docs/content/gdpr/_index.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: GDPR
-name: GDPR
-identifier: gdpr
-menu: main
-weight: 5
----
-<!---
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-        http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-    -->
-
-{{<jumbotron title="GDPR compliance in Ozone">}}
-          The General Data Protection Regulation (GDPR) is a law that governs how personal data should be handled. This is an European Union law, but due to the nature of software oftentimes spills into other geographies.
-          Ozone supports GDPR's Right to Erasure(Right to be Forgotten).
-{{</jumbotron>}}
-
-<div class="alert alert-warning" role="alert">
-If you would like to understand Ozone's GDPR framework at a greater
-depth, please take a look at <a href="https://issues.apache.org/jira/secure/attachment/12978992/Ozone%20GDPR%20Framework.pdf">Ozone GDPR Framework.</a>
-</div>
-
-Once you create a GDPR compliant bucket, any key created in that bucket will 
-automatically by GDPR compliant.
-
-
diff --git a/hadoop-hdds/docs/content/interface/JavaApi.md b/hadoop-hdds/docs/content/interface/JavaApi.md
deleted file mode 100644
index bb18068..0000000
--- a/hadoop-hdds/docs/content/interface/JavaApi.md
+++ /dev/null
@@ -1,156 +0,0 @@
----
-title: "Java API"
-date: "2017-09-14"
-weight: 1
-summary: Ozone has a set of Native RPC based APIs. This is the lowest level API's on which all other protocols are built. This is the most performant and feature-full of all Ozone protocols.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone ships with its own client library that supports RPC. For generic use cases the S3
-compatible REST interface also can be used instead of the Ozone client.
-
-
-## Creating an Ozone client
-The Ozone client factory creates the ozone client. To get a RPC client we can call
-
-{{< highlight java >}}
-OzoneClient ozClient = OzoneClientFactory.getRpcClient();
-{{< /highlight >}}
-
-If the user want to create a client based on the configuration, then they can
-call.
-
-{{< highlight java >}}
-OzoneClient ozClient = OzoneClientFactory.getClient();
-{{< /highlight >}}
-
-and an appropriate client based on configuration will be returned.
-
-## Writing data using Ozone Client
-
-The hierarchy of data inside ozone is a volume, bucket and a key. A volume
-is a collection of buckets. A bucket is a collection of keys. To write data
-to the ozone, you need a volume, bucket and a key.
-
-### Creating a Volume
-
-Once we have a client, we need to get a reference to the ObjectStore.  This
-is done via
-
-{{< highlight java >}}
-ObjectStore objectStore = ozClient.getObjectStore();
-{{< /highlight >}}
-
-An object store represents an active cluster against which the client is working.
-
-{{< highlight java >}}
-// Let us create a volume to store our game assets.
-// This uses default arguments for creating that volume.
-objectStore.createVolume("assets");
-
-// Let us verify that the volume got created.
-OzoneVolume assets = objectStore.getVolume("assets");
-{{< /highlight >}}
-
-
-It is possible to pass an array of arguments to the createVolume by creating volume arguments.
-
-### Creating a Bucket
-
-Once you have a volume, you can create buckets inside the volume.
-
-{{< highlight java >}}
-// Let us create a bucket called videos.
-assets.createBucket("videos");
-OzoneBucket video = assets.getBucket("videos");
-{{< /highlight >}}
-
-At this point we have a usable volume and a bucket. Our volume is called _assets_ and bucket is called _videos_.
-
-Now we can create a Key.
-
-### Reading and Writing a Key
-
-With a bucket object the users can now read and write keys. The following code reads a video called intro.mp4 from the local disk and stores in the _video_ bucket that we just created.
-
-{{< highlight java >}}
-// read data from the file, this is a user provided function.
-byte [] videoData = readFile("intro.mp4");
-
-// Create an output stream and write data.
-OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576);
-videoStream.write(videoData);
-
-// Close the stream when it is done.
-videoStream.close();
-
-
-// We can use the same bucket to read the file that we just wrote, by creating an input Stream.
-// Let us allocate a byte array to hold the video first.
-byte[] data = new byte[(int)1048576];
-OzoneInputStream introStream = video.readKey("intro.mp4");
-// read intro.mp4 into the data buffer
-introStream.read(data);
-introStream.close();
-{{< /highlight >}}
-
-
-Here is a complete example of the code that we just wrote. Please note the close functions being called in this program.
-
-{{< highlight java >}}
-// Let us create a client
-OzoneClient ozClient = OzoneClientFactory.getClient();
-
-// Get a reference to the ObjectStore using the client
-ObjectStore objectStore = ozClient.getObjectStore();
-
-// Let us create a volume to store our game assets.
-// This default arguments for creating that volume.
-objectStore.createVolume("assets");
-
-// Let us verify that the volume got created.
-OzoneVolume assets = objectStore.getVolume("assets");
-
-// Let us create a bucket called videos.
-assets.createBucket("videos");
-OzoneBucket video = assets.getBucket("videos");
-
-// read data from the file, this is assumed to be a user provided function.
-byte [] videoData = readFile("intro.mp4");
-
-// Create an output stream and write data.
-OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576);
-videoStream.write(videoData);
-
-// Close the stream when it is done.
-videoStream.close();
-
-
-// We can use the same bucket to read the file that we just wrote, by creating an input Stream.
-// Let us allocate a byte array to hold the video first.
-
-byte[] data = new byte[(int)1048576];
-OzoneInputStream introStream = video.readKey("intro.mp4");
-introStream.read(data);
-
-// Close the stream when it is done.
-introStream.close();
-
-// Close the client.
-ozClient.close();
-{{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.md b/hadoop-hdds/docs/content/interface/OzoneFS.md
deleted file mode 100644
index fcfef6d..0000000
--- a/hadoop-hdds/docs/content/interface/OzoneFS.md
+++ /dev/null
@@ -1,155 +0,0 @@
----
-title: Ozone File System
-date: 2017-09-14
-weight: 2
-summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-The Hadoop compatible file system interface allows storage backends like Ozone
-to be easily integrated into Hadoop eco-system.  Ozone file system is an
-Hadoop compatible file system.
-
-## Setting up the Ozone file system
-
-To create an ozone file system, we have to choose a bucket where the file system would live. This bucket will be used as the backend store for OzoneFileSystem. All the files and directories will be stored as keys in this bucket.
-
-Please run the following commands to create a volume and bucket, if you don't have them already.
-
-{{< highlight bash >}}
-ozone sh volume create /volume
-ozone sh bucket create /volume/bucket
-{{< /highlight >}}
-
-Once this is created, please make sure that bucket exists via the _list volume_ or _list bucket_ commands.
-
-Please add the following entry to the core-site.xml.
-
-{{< highlight xml >}}
-<property>
-  <name>fs.o3fs.impl</name>
-  <value>org.apache.hadoop.fs.ozone.OzoneFileSystem</value>
-</property>
-<property>
-  <name>fs.AbstractFileSystem.o3fs.impl</name>
-  <value>org.apache.hadoop.fs.ozone.OzFs</value>
-</property>
-<property>
-  <name>fs.defaultFS</name>
-  <value>o3fs://bucket.volume</value>
-</property>
-{{< /highlight >}}
-
-This will make this bucket to be the default file system for HDFS dfs commands and register the o3fs file system type.
-
-You also need to add the ozone-filesystem.jar file to the classpath:
-
-{{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-lib-current*.jar:$HADOOP_CLASSPATH
-{{< /highlight >}}
-
-Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc.
-For example,
-
-{{< highlight bash >}}
-hdfs dfs -ls /
-{{< /highlight >}}
-
-or
-
-{{< highlight bash >}}
-hdfs dfs -mkdir /users
-{{< /highlight >}}
-
-
-Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system.
-Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as directories and files in the Ozone File System.
-
-Note: Bucket and volume names are not allowed to have a period in them.
-Moreover, the filesystem URI can take a fully qualified form with the OM host and an optional port as a part of the path following the volume name.
-For example, you can specify both host and port:
-
-{{< highlight bash>}}
-hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:5678/key
-{{< /highlight >}}
-
-When the port number is not specified, it will be retrieved from config key `ozone.om.address`
-if defined; or it will fall back to the default port `9862`.
-For example, we have `ozone.om.address` configured as following in `ozone-site.xml`:
-
-{{< highlight xml >}}
-  <property>
-    <name>ozone.om.address</name>
-    <value>0.0.0.0:6789</value>
-  </property>
-{{< /highlight >}}
-
-When we run command:
-
-{{< highlight bash>}}
-hdfs dfs -ls o3fs://bucket.volume.om-host.example.com/key
-{{< /highlight >}}
-
-The above command is essentially equivalent to:
-
-{{< highlight bash>}}
-hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:6789/key
-{{< /highlight >}}
-
-Note: Only port number from the config is used in this case, 
-whereas the host name in the config `ozone.om.address` is ignored.
-
-
-## Supporting older Hadoop version (Legacy jar, BasicOzoneFilesystem)
-
-There are two ozonefs files, both of them include all the dependencies:
-
- * share/ozone/lib/hadoop-ozone-filesystem-lib-current-VERSION.jar
- * share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-VERSION.jar
-
-The first one contains all the required dependency to use ozonefs with a
- compatible hadoop version (hadoop 3.2).
-
-The second one contains all the dependency in an internal, separated directory,
- and a special class loader is used to load all the classes from the location.
-
-With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from
- any older hadoop version (eg. hadoop 3.1, hadoop 2.7 or spark+hadoop 2.7)
-
-Similar to the dependency jar, there are two OzoneFileSystem implementation.
-
-For hadoop 3.0 and newer, you can use `org.apache.hadoop.fs.ozone.OzoneFileSystem`
- which is a full implementation of the Hadoop compatible File System API.
-
-For Hadoop 2.x you should use the Basic version: `org.apache.hadoop.fs.ozone.BasicOzoneFileSystem`.
-
-This is the same implementation but doesn't include the features/dependencies which are added with
- Hadoop 3.0. (eg. FS statistics, encryption zones).
-
-### Summary
-
-The following table summarize which jar files and implementation should be used:
-
-Hadoop version | Required jar            | OzoneFileSystem implementation
----------------|-------------------------|----------------------------------------------------
-3.2            | filesystem-lib-current  | org.apache.hadoop.fs.ozone.OzoneFileSystem
-3.1            | filesystem-lib-legacy   | org.apache.hadoop.fs.ozone.OzoneFileSystem
-2.9            | filesystem-lib-legacy   | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
-2.7            | filesystem-lib-legacy   | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
- With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from
- any older hadoop version (eg. hadoop 2.7 or spark+hadoop 2.7)
diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md
deleted file mode 100644
index 6a8e2d7..0000000
--- a/hadoop-hdds/docs/content/interface/S3.md
+++ /dev/null
@@ -1,150 +0,0 @@
----
-title: S3 Protocol
-weight: 3
-summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone.
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools.
-
-## Getting started
-
-S3 Gateway is a separated component which provides the S3 compatible APIs. It should be started additional to the regular Ozone components.
-
-You can start a docker based cluster, including the S3 gateway from the release package.
-
-Go to the `compose/ozones3` directory, and start the server:
-
-```bash
-docker-compose up -d
-```
-
-You can access the S3 gateway at `http://localhost:9878`
-
-## URL Schema
-
-Ozone S3 gateway supports both the virtual-host-style URL s3 bucket addresses (eg. http://bucketname.host:9878) and the path-style addresses (eg. http://host:9878/bucketname)
-
-By default it uses the path-style addressing. To use virtual host style URLs set your main domain name in your `ozone-site.xml`:
-
-```xml
-<property>
-   <name>ozone.s3g.domain.name</name>
-   <value>s3g.internal</value>
-</property>
-```
-
-## Bucket browser
-
-Buckets could be browsed from the browser by adding `?browser=true` to the bucket URL.
-
-For example the content of the 'testbucket' could be checked from the browser using the URL http://localhost:9878/testbucket?browser=true
-
-
-## Implemented REST endpoints
-
-Operations on S3Gateway service:
-
-Endpoint    | Status      |
-------------|-------------|
-GET service | implemented |
-
-Operations on Bucket:
-
-Endpoint                            | Status      | Notes
-------------------------------------|-------------|---------------
-GET Bucket (List Objects) Version 2 | implemented |
-HEAD Bucket                         | implemented |
-DELETE Bucket                       | implemented |
-PUT Bucket (Create bucket)          | implemented |
-Delete Multiple Objects (POST)      | implemented |
-
-Operation on Objects:
-
-Endpoint                            | Status          | Notes
-------------------------------------|-----------------|---------------
-PUT Object                          | implemented     |
-GET Object                          | implemented     |
-Multipart Upload                    | implemented     | Except the listing of the current MultiPartUploads.
-DELETE Object                       | implemented     |
-HEAD Object                         | implemented     |
-
-
-## Security
-
-If security is not enabled, you can *use* **any** AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
-
-If security is enabled, you can get the key and the secret with the `ozone s3 getsecret` command (*kerberos based authentication is required).
-
-```bash
-/etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM
-ozone s3 getsecret
-awsAccessKey=testuser/scm@EXAMPLE.COM
-awsSecret=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999
-
-```
-
-Now, you can use the key and the secret to access the S3 endpoint:
-
-```bash
-export AWS_ACCESS_KEY_ID=testuser/scm@EXAMPLE.COM
-export AWS_SECRET_ACCESS_KEY=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999
-aws s3api --endpoint http://localhost:9878 create-bucket --bucket bucket1
-```
-
-
-## S3 bucket name mapping to Ozone buckets
-
-**Note**: Ozone has a notion for 'volumes' which is missing from the S3 Rest endpoint. Under the hood S3 bucket names are mapped to Ozone 'volume/bucket' locations (depending on the given authentication information).
-
-To show the storage location of a S3 bucket, use the `ozone s3 path <bucketname>` command.
-
-```bash
-aws s3api --endpoint-url http://localhost:9878 create-bucket --bucket=bucket1
-
-ozone s3 path bucket1
-Volume name for S3Bucket is : s3thisisakey
-Ozone FileSystem Uri is : o3fs://bucket1.s3thisisakey
-```
-
-## Clients
-
-### AWS Cli
-
-`aws` CLI could be used by specifying the custom REST endpoint.
-
-```bash
-aws s3api --endpoint http://localhost:9878 create-bucket --bucket buckettest
-```
-
-Or
-
-```bash
-aws s3 ls --endpoint http://localhost:9878 s3://buckettest
-```
-
-### S3 Fuse driver (goofys)
-
-Goofys is a S3 FUSE driver. It could be used to mount any Ozone bucket as posix file system.
-
-
-```bash
-goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1
-```
diff --git a/hadoop-hdds/docs/content/interface/_index.md b/hadoop-hdds/docs/content/interface/_index.md
deleted file mode 100644
index 2548647..0000000
--- a/hadoop-hdds/docs/content/interface/_index.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: "Programming Interfaces"
-menu:
-   main:
-      weight: 4
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{<jumbotron title="Multi-Protocol Support">}}
-Ozone is a multi-protocol file system. There are different protocols by which
- users can access data on Ozone.
-{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/recipe/Prometheus.md b/hadoop-hdds/docs/content/recipe/Prometheus.md
deleted file mode 100644
index 310d078..0000000
--- a/hadoop-hdds/docs/content/recipe/Prometheus.md
+++ /dev/null
@@ -1,95 +0,0 @@
----
-title: Monitoring with Prometheus
-summary: A Simple recipe to monitor Ozone using Prometheus
-linktitle: Prometheus
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-[Prometheus](https://prometheus.io/) is an open-source monitoring server developed under under the [Cloud Native Computing Foundation](https://www.cncf.io/).
-
-Ozone supports Prometheus out of the box. The servers start a prometheus
-compatible metrics endpoint where all the available hadoop metrics are published in prometheus exporter format.
-
-## Prerequisites
-
- 1. [Install the and start]({{< ref "start/RunningViaDocker.md" >}}) an Ozone cluster.
- 2. [Download](https://prometheus.io/download/#prometheus) the prometheus binary.
-
-## Monitoring with prometheus
-
-* To enable the Prometheus metrics endpoint you need to add a new configuration to the `ozone-site.xml` file.
-
- ```xml
-  <property>
-    <name>hdds.prometheus.endpoint.enabled</name>
-    <value>true</value>
-  </property>
-```
-
-_Note_: for Docker compose based pseudo cluster put the \
-`OZONE-SITE.XML_hdds.prometheus.endpoint.enabled=true` line to the `docker-config` file.
-
-* Restart the Ozone Manager and Storage Container Manager and check the prometheus endpoints:
-
- * http://scm:9874/prom
-
- * http://ozoneManager:9876/prom
-
-* Create a prometheus.yaml configuration with the previous endpoints:
-
-```yaml
-global:
-  scrape_interval: 15s
-
-scrape_configs:
-  - job_name: ozone
-    metrics_path: /prom
-    static_configs:
-     - targets:
-        - "scm:9876"
-        - "ozoneManager:9874"
-```
-
-* Start with prometheus from the directory where you have the prometheus.yaml file:
-
-```bash
-prometheus
-```
-
-* Check the active targets in the prometheus web-ui:
-
-http://localhost:9090/targets
-
-![Prometheus target page example](prometheus.png)
-
-
-* Check any metrics on the prometheus web ui.\
-For example:
-
-http://localhost:9090/graph?g0.range_input=1h&g0.expr=om_metrics_num_key_allocate&g0.tab=1
-
-![Prometheus metrics page example](prometheus-key-allocate.png)
-
-## Note
-
-The ozone distribution contains a ready-to-use, dockerized environment to try out ozone and prometheus. It can be found under `compose/ozoneperf` directory.
-
-```bash
-cd compose/ozoneperf
-docker-compose up -d
-```
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
deleted file mode 100644
index 9f9d347..0000000
--- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
+++ /dev/null
@@ -1,188 +0,0 @@
----
-title: Spark in Kubernetes with OzoneFS
-linktitle: Spark
-summary: How to use Apache Spark with Ozone on K8s?
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-This recipe shows how Ozone object store can be used from Spark using:
-
- - OzoneFS (Hadoop compatible file system)
- - Hadoop 2.7 (included in the Spark distribution)
- - Kubernetes Spark scheduler
- - Local spark client
-
-
-## Requirements
-
-Download latest Spark and Ozone distribution and extract them. This method is
-tested with the `spark-2.4.0-bin-hadoop2.7` distribution.
-
-You also need the following:
-
- * A container repository to push and pull the spark+ozone images. (In this recipe we will use the dockerhub)
- * A repo/name for the custom containers (in this recipe _myrepo/ozone-spark_)
- * A dedicated namespace in kubernetes (we use _yournamespace_ in this recipe)
-
-## Create the docker image for drivers
-
-### Create the base Spark driver/executor image
-
-First of all create a docker image with the Spark image creator.
-Execute the following from the Spark distribution
-
-```bash
-./bin/docker-image-tool.sh -r myrepo -t 2.4.0 build
-```
-
-_Note_: if you use Minikube add the `-m` flag to use the docker daemon of the Minikube image:
-
-```bash
-./bin/docker-image-tool.sh -m -r myrepo -t 2.4.0 build
-```
-
-`./bin/docker-image-tool.sh` is an official Spark tool to create container images and this step will create multiple Spark container images with the name _myrepo/spark_. The first container will be used as a base container in the following steps.
-
-### Customize the docker image
-
-Create a new directory for customizing the created docker image.
-
-Copy the `ozone-site.xml` from the cluster:
-
-```bash
-kubectl cp om-0:/opt/hadoop/etc/hadoop/ozone-site.xml .
-```
-
-And create a custom `core-site.xml`.
-
-```xml
-<configuration>
-    <property>
-        <name>fs.o3fs.impl</name>
-        <value>org.apache.hadoop.fs.ozone.BasicOzoneFileSystem</value>
-    </property>
-    <property>
-        <name>fs.AbstractFileSystem.o3fs.impl</name>
-        <value>org.apache.hadoop.fs.ozone.OzFs</value>
-     </property>
-</configuration>
-```
-
-_Note_: You may also use `org.apache.hadoop.fs.ozone.OzoneFileSystem` without the `Basic` prefix. The `Basic` version doesn't support FS statistics and encryption zones but can work together with older hadoop versions.
-
-Copy the `ozonefs.jar` file from an ozone distribution (__use the legacy version!__)
-
-```
-kubectl cp om-0:/opt/hadoop/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar .
-```
-
-
-Create a new Dockerfile and build the image:
-```
-FROM myrepo/spark:2.4.0
-ADD core-site.xml /opt/hadoop/conf/core-site.xml
-ADD ozone-site.xml /opt/hadoop/conf/ozone-site.xml
-ENV HADOOP_CONF_DIR=/opt/hadoop/conf
-ENV SPARK_EXTRA_CLASSPATH=/opt/hadoop/conf
-ADD hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar /opt/hadoop-ozone-filesystem-lib-legacy.jar
-```
-
-```bash
-docker build -t myrepo/spark-ozone
-```
-
-For remote kubernetes cluster you may need to push it:
-
-```bash
-docker push myrepo/spark-ozone
-```
-
-## Create a bucket and identify the ozonefs path
-
-Download any text file and put it to the `/tmp/alice.txt` first.
-
-```bash
-kubectl port-forward s3g-0 9878:9878
-aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test
-aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt
-kubectl exec -it scm-0 ozone s3 path test
-```
-
-The output of the last command is something like this:
-
-```
-Volume name for S3Bucket is : s3asdlkjqiskjdsks
-Ozone FileSystem Uri is : o3fs://test.s3asdlkjqiskjdsks
-```
-
-Write down the ozone filesystem uri as it should be used with the spark-submit command.
-
-## Create service account to use
-
-```bash
-kubectl create serviceaccount spark -n yournamespace
-kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=yournamespace:spark --namespace=yournamespace
-```
-## Execute the job
-
-Execute the following spark-submit command, but change at least the following values:
-
- * the kubernetes master url (you can check your _~/.kube/config_ to find the actual value)
- * the kubernetes namespace (_yournamespace_ in this example)
- * serviceAccountName (you can use the _spark_ value if you followed the previous steps)
- * container.image (in this example this is _myrepo/spark-ozone_. This is pushed to the registry in the previous steps)
- * location of the input file (o3fs://...), use the string which is identified earlier with the \
- `ozone s3 path <bucketname>` command
-
-```bash
-bin/spark-submit \
-    --master k8s://https://kubernetes:6443 \
-    --deploy-mode cluster \
-    --name spark-word-count \
-    --class org.apache.spark.examples.JavaWordCount \
-    --conf spark.executor.instances=1 \
-    --conf spark.kubernetes.namespace=yournamespace \
-    --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
-    --conf spark.kubernetes.container.image=myrepo/spark-ozone \
-    --conf spark.kubernetes.container.image.pullPolicy=Always \
-    --jars /opt/hadoop-ozone-filesystem-lib-legacy.jar \
-    local:///opt/spark/examples/jars/spark-examples_2.11-2.4.0.jar \
-    o3fs://bucket.volume/alice.txt
-```
-
-Check the available `spark-word-count-...` pods with `kubectl get pod`
-
-Check the output of the calculation with \
-`kubectl logs spark-word-count-1549973913699-driver`
-
-You should see the output of the wordcount job. For example:
-
-```
-...
-name: 8
-William: 3
-this,': 1
-SOUP!': 1
-`Silence: 1
-`Mine: 1
-ordered.: 1
-considering: 3
-muttering: 3
-candle: 2
-...
-```
diff --git a/hadoop-hdds/docs/content/recipe/_index.md b/hadoop-hdds/docs/content/recipe/_index.md
deleted file mode 100644
index 47053ab..0000000
--- a/hadoop-hdds/docs/content/recipe/_index.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-title: Recipes
-date: "2017-10-10"
-menu: main
-weight: 9
-
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-{{<jumbotron title="Recipes of Ozone">}}
-   Standard how-to documents which describe how to use Ozone with other Software.
-   For example, how to use Ozone with Apache Spark.
-{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png b/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png
deleted file mode 100644
index c934fc0..0000000
--- a/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/recipe/prometheus.png b/hadoop-hdds/docs/content/recipe/prometheus.png
deleted file mode 100644
index 12bbe55..0000000
--- a/hadoop-hdds/docs/content/recipe/prometheus.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/security/SecuityWithRanger.md b/hadoop-hdds/docs/content/security/SecuityWithRanger.md
deleted file mode 100644
index cbbd53e..0000000
--- a/hadoop-hdds/docs/content/security/SecuityWithRanger.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-title: "Apache Ranger"
-date: "2019-April-03"
-weight: 5
-summary: Apache Ranger is a framework to enable, monitor and manage comprehensive data security across the Hadoop platform.
-icon: user
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-Apache Ranger™ is a framework to enable, monitor and manage comprehensive data
-security across the Hadoop platform. Any version of Apache Ranger which is greater
-than 1.20 is aware of Ozone, and can manage an Ozone cluster.
-
-
-To use Apache Ranger, you must have Apache Ranger installed in your Hadoop
-Cluster. For installation instructions of Apache Ranger, Please take a look
-at the [Apache Ranger website](https://ranger.apache.org/index.html).
-
-If you have a working Apache Ranger installation that is aware of Ozone, then
-configuring Ozone to work with Apache Ranger is trivial. You have to enable
-the ACLs support and set the acl authorizer class inside Ozone to be Ranger
-authorizer. Please add the following properties to the ozone-site.xml.
-
-Property|Value
---------|------------------------------------------------------------
-ozone.acl.enabled         | true
-ozone.acl.authorizer.class| org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer
diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md
deleted file mode 100644
index d4d836f..0000000
--- a/hadoop-hdds/docs/content/security/SecureOzone.md
+++ /dev/null
@@ -1,178 +0,0 @@
----
-title: "Securing Ozone"
-date: "2019-April-03"
-summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM.
-weight: 1
-icon: tower
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-# Kerberos
-
-Ozone depends on [Kerberos](https://web.mit.edu/kerberos/) to make the
-clusters secure. Historically, HDFS has supported running in an isolated
-secure networks where it is possible to deploy without securing the cluster.
-
-This release of Ozone follows that model, but soon will move to _secure by
-default._  Today to enable security in ozone cluster, we need to set the
-configuration **ozone.security.enabled** to _true_ and **hadoop.security.authentication**
-to _kerberos_.
-
-Property|Value
-----------------------|---------
-ozone.security.enabled| _true_
-hadoop.security.authentication| _kerberos_
-
-# Tokens #
-
-Ozone uses a notion of tokens to avoid overburdening the Kerberos server.
-When you serve thousands of requests per second, involving Kerberos might not
-work well. Hence once an authentication is done, Ozone issues delegation
-tokens and block tokens to the clients. These tokens allow applications to do
-specified operations against the cluster, as if they have kerberos tickets
-with them. Ozone supports following kinds of tokens.
-
-### Delegation Token ###
-Delegation tokens allow an application to impersonate a users kerberos
-credentials. This token is based on verification of kerberos identity and is
-issued by the Ozone Manager. Delegation tokens are enabled by default when
-security is enabled.
-
-### Block Token ###
-
-Block tokens allow a client to read or write a block. This is needed so that
-data nodes know that the user/client has permission to read or make
-modifications to the block.
-
-### S3Token ###
-
-S3 uses a very different shared secret security scheme. Ozone supports the AWS Signature Version 4 protocol,
-and from the end users perspective Ozone's s3 feels exactly like AWS S3.
-
-The S3 credential tokens are called S3 tokens in the code. These tokens are
-also enabled by default when security is enabled.
-
-
-Each of the service daemons that make up Ozone needs a  Kerberos service
-principal name and a corresponding [kerberos key tab](https://web.mit.edu/kerberos/krb5-latest/doc/basic/keytab_def.html) file.
-
-All these settings should be made in ozone-site.xml.
-
-<div class="card-group">
-  <div class="card">
-    <div class="card-body">
-      <h3 class="card-title">Storage Container Manager</h3>
-      <p class="card-text">
-      <br>
-        SCM requires two Kerberos principals, and the corresponding key tab files
-        for both of these principals.
-      <br>
-      <table class="table table-dark">
-        <thead>
-          <tr>
-            <th scope="col">Property</th>
-            <th scope="col">Description</th>
-          </tr>
-        </thead>
-        <tbody>
-          <tr>
-            <td>hdds.scm.kerberos.principal</th>
-            <td>The SCM service principal. <br/> e.g. scm/_HOST@REALM.COM</td>
-          </tr>
-          <tr>
-            <td>hdds.scm.kerberos.keytab.file</th>
-            <td>The keytab file used by SCM daemon to login as its service principal.</td>
-          </tr>
-          <tr>
-            <td>hdds.scm.http.kerberos.principal</th>
-            <td>SCM http server service principal.</td>
-          </tr>
-          <tr>
-            <td>hdds.scm.http.kerberos.keytab</th>
-            <td>The keytab file used by SCM http server to login as its service principal.</td>
-          </tr>
-        </tbody>
-      </table>
-    </div>
-  </div>
-  <div class="card">
-    <div class="card-body">
-      <h3 class="card-title">Ozone Manager</h3>
-      <p class="card-text">
-      <br>
-        Like SCM, OM also requires two Kerberos principals, and the
-        corresponding key tab files for both of these principals.
-      <br>
-      <table class="table table-dark">
-        <thead>
-          <tr>
-            <th scope="col">Property</th>
-            <th scope="col">Description</th>
-          </tr>
-        </thead>
-        <tbody>
-          <tr>
-            <td>ozone.om.kerberos.principal</th>
-            <td>The OzoneManager service principal. <br/> e.g. om/_HOST@REALM.COM</td>
-          </tr>
-          <tr>
-            <td>ozone.om.kerberos.keytab.file</th>
-            <td>TThe keytab file used by SCM daemon to login as its service principal.</td>
-          </tr>
-          <tr>
-            <td>ozone.om.http.kerberos.principal</th>
-            <td>Ozone Manager http server service principal.</td>
-          </tr>
-          <tr>
-            <td>ozone.om.http.kerberos.keytab</th>
-            <td>The keytab file used by OM http server to login as its service principal.</td>
-          </tr>
-        </tbody>
-      </table>
-    </div>
-  </div>
-  <div class="card">
-    <div class="card-body">
-      <h3 class="card-title">S3 Gateway</h3>
-      <p class="card-text">
-      <br>
-        S3 gateway requires one service principal and here the configuration values
-        needed in the ozone-site.xml.
-      <br>
-      <table class="table table-dark">
-        <thead>
-          <tr>
-            <th scope="col">Property</th>
-            <th scope="col">Description</th>
-          </tr>
-        </thead>
-        <tbody>
-          <tr>
-            <td>ozone.s3g.authentication.kerberos.principal</th>
-            <td>S3 Gateway principal. <br/> e.g. HTTP/_HOST@EXAMPLE.COM</td>
-          </tr>
-          <tr>
-            <td>ozone.s3g.keytab.file</th>
-            <td>The keytab file used by S3 gateway</td>
-          </tr>
-        </tbody>
-      </table>
-    </div>
-  </div>
-</div>
diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md
deleted file mode 100644
index 6b7d823..0000000
--- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: "Securing Datanodes"
-date: "2019-April-03"
-weight: 2
-summary:  Explains different modes of securing data nodes. These range from kerberos to auto approval.
-icon: th
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-Datanodes under Hadoop is traditionally secured by creating a Keytab file on
-the data nodes. With Ozone, we have moved away to using data node
-certificates. That is, Kerberos on data nodes is not needed in case of a
-secure Ozone cluster.
-
-However, we support the legacy Kerberos based Authentication to make it easy
-for the current set of users.The HDFS configuration keys are the following
-that is setup in  hdfs-site.xml.
-
-Property|Description
---------|--------------
-dfs.datanode.kerberos.principal|The datanode service principal. <br/> e.g. dn/_HOST@REALM.COM
-dfs.datanode.keytab.file| The keytab file used by datanode daemon to login as its service principal.
-hdds.datanode.http.kerberos.principal| Datanode http server service principal.
-hdds.datanode.http.kerberos.keytab| The keytab file used by datanode http server to login as its service principal.
-
-
-## How a data node becomes secure.
-
-Under Ozone, when a data node boots up and discovers SCM's address, the first
-thing that data node does is to create a private key and send a certificate
-request to the SCM.
-
-<h3>Certificate Approval via Kerberos <span class="badge badge-secondary">Current Model</span></h3>
-SCM has a built-in CA, and SCM has to approve this request. If the data node
-already has a Kerberos key tab, then SCM will trust Kerberos credentials and
-issue a certificate automatically.
-
-
-<h3>Manual Approval <span class="badge badge-primary">In Progress</span></h3>
-If these are band new data nodes and Kerberos key tabs are not present at the
-data nodes, then this request for the data nodes identity certificate is
-queued up for approval from the administrator(This is work in progress,
-not committed in Ozone yet). In other words, the web of trust is established
-by the administrator of the cluster.
-
-<h3>Automatic Approval <span class="badge badge-secondary">In Progress</span></h3>
-If you running under an container orchestrator like  Kubernetes, we rely on
-Kubernetes to create a one-time token that will be given to data node during
-boot time to prove the identity of the data node container (This is also work
-in progress.)
-
-
-Once a certificate is issued, a data node is secure and Ozone manager can
-issue block tokens. If there is no data node certificates or the SCM's root
-certificate is not present in the data node, then data node will register
-itself and down load the SCM's root certificate as well get the certificates
-for itself.
diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md
deleted file mode 100644
index 1cb0c80..0000000
--- a/hadoop-hdds/docs/content/security/SecuringS3.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-title: "Securing S3"
-date: "2019-April-03"
-summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience.
-weight: 4
-icon: cloud
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-To access an S3 bucket, users need AWS access key ID and AWS secret. Both of
-these are generated by going to AWS website. When you use Ozone's S3
-protocol, you need the same AWS access key and secret.
-
-Under Ozone, the clients can download the access key directly from Ozone.
-The user needs to `kinit` first and once they have authenticated via kerberos
- they can download the S3 access key ID and AWS secret. Just like AWS S3,
- both of these are secrets that needs to be protected by the client since it
- gives full access to the S3 buckets.
-
-
-* S3 clients can get the secret access id and user secret from OzoneManager.
-
-```bash
-ozone s3 getsecret
-```
-This command will talk to ozone, validate the user via kerberos and generate
-the AWS credentials. The values will be printed out on the screen. You can
-set these values up in your _.aws_ file for automatic access while working
-against Ozone S3 buckets.
-
-<div class="alert alert-danger" role="alert">
- Please note: These S3 crediantials are like your kerberos passswords
- that give compelete access to your buckets.
-</div>
-
-
-* Now you can proceed to setup these secrets in aws configs:
-
-```bash
-aws configure set default.s3.signature_version s3v4
-aws configure set aws_access_key_id ${accessId}
-aws configure set aws_secret_access_key ${secret}
-aws configure set region us-west-1
-```
-Please refer to AWS S3 documentation on how to use S3 via command line or via
-S3 API.
diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md
deleted file mode 100644
index 3e8f2d1..0000000
--- a/hadoop-hdds/docs/content/security/SecuringTDE.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: "Transparent Data Encryption"
-date: "2019-April-03"
-summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. You can enable this per key or per bucket.
-weight: 3
-icon: lock
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone TDE setup process and usage are very similar to HDFS TDE.
-The major difference is that Ozone TDE is enabled at Ozone bucket level
-when a bucket is created.
-
-### Setting up the Key Management Server
-
-To use TDE, clients must setup a Key Management Server and provide that URI to
-Ozone/HDFS. Since Ozone and HDFS can use the same Key Management Server, this
- configuration can be provided via *hdfs-site.xml*.
-
-Property| Value
------------------------------------|-----------------------------------------
-hadoop.security.key.provider.path  | KMS uri. <br> e.g. kms://http@kms-host:9600/kms
-
-### Using Transparent Data Encryption
-If this is already configured for your cluster, then you can simply proceed
-to create the encryption key and enable encrypted buckets.
-
-To create an encrypted bucket, client need to:
-
-   * Create a bucket encryption key with hadoop key CLI, which is similar to
-  how you would use HDFS encryption zones.
-
-  ```bash
-  hadoop key create encKey
-  ```
-  The above command creates an encryption key for the bucket you want to protect.
-  Once the key is created, you can tell Ozone to use that key when you are
-  reading and writing data into a bucket.
-
-   * Assign the encryption key to a bucket.
-
-  ```bash
-  ozone sh bucket create -k encKey /vol/encryptedBucket
-  ```
-
-After this command, all data written to the _encryptedBucket_ will be encrypted
-via the encKey and while reading the clients will talk to Key Management
-Server and read the key and decrypt it. In other words, the data stored
-inside Ozone is always encrypted. The fact that data is encrypted at rest
-will be completely transparent to the clients and end users.
diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md
deleted file mode 100644
index 31bbb0a..0000000
--- a/hadoop-hdds/docs/content/security/SecurityAcls.md
+++ /dev/null
@@ -1,85 +0,0 @@
----
-title: "Ozone ACLs"
-date: "2019-April-03"
-weight: 6
-summary: Native Ozone Authorizer provides Access Control List (ACL) support for Ozone without Ranger integration.
-icon: transfer
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone supports a set of native ACLs. These ACLs can be used independently or
-along with Ranger. If Apache Ranger is enabled, then ACL will be checked
-first with Ranger and then Ozone's internal ACLs will be evaluated.
-
-Ozone ACLs are a super set of Posix and S3 ACLs.
-
-The general format of an ACL is _object_:_who_:_rights_.
-
-Where an _object_ can be:
-
-1. **Volume** - An Ozone volume.  e.g. _/volume_
-2. **Bucket** - An Ozone bucket. e.g. _/volume/bucket_
-3. **Key** - An object key or an object. e.g. _/volume/bucket/key_
-4. **Prefix** - A path prefix for a specific key. e.g. _/volume/bucket/prefix1/prefix2_
-
-Where a _who_ can be:
-
-1. **User** - A user in the Kerberos domain. User like in Posix world can be
-named or unnamed.
-2. **Group** - A group in the Kerberos domain. Group also like in Posix world
-can
-be named or unnamed.
-3. **World** - All authenticated users in the Kerberos domain. This maps to
-others in the Posix domain.
-4. **Anonymous** - Ignore the user field completely. This is an extension to
-the Posix semantics, This is needed for S3 protocol, where we express that
-we have no way of knowing who the user is or we don't care.
-
-
-<div class="alert alert-success" role="alert">
-  A S3 user accesing Ozone via AWS v4 signature protocol will be translated
-  to the appropriate Kerberos user by Ozone Manager.
-</div>
-
-Where a _right_ can be:
-
-1. **Create** – This ACL provides a user the ability to create buckets in a
-volume and keys in a bucket. Please note: Under Ozone, Only admins can create volumes.
-2. **List** – This ACL allows listing of buckets and keys. This ACL is attached
- to the volume and buckets which allow listing of the child objects. Please note: The user and admins can list the volumes owned by the user.
-3. **Delete** – Allows the user to delete a volume, bucket or key.
-4. **Read** – Allows the user to read the metadata of a Volume and Bucket and
-data stream and metadata of a key.
-5. **Write** - Allows the user to write the metadata of a Volume and Bucket and
-allows the user to overwrite an existing ozone key.
-6. **Read_ACL** – Allows a user to read the ACL on a specific object.
-7. **Write_ACL** – Allows a user to write the ACL on a specific object.
-
-<h3>Ozone Native ACL APIs</h3>
-
-The ACLs can be manipulated by a set of APIs supported by Ozone. The APIs
-supported are:
-
-1. **SetAcl** – This API will take user principal, the name, type
-of the ozone object and a list of ACLs.
-2. **GetAcl** – This API will take the name and type of the ozone object
-and will return a list of ACLs.
-3. **AddAcl** - This API will take the name, type of the ozone object, the
-ACL, and add it to existing ACL entries of the ozone object.
-4. **RemoveAcl** - This API will take the name, type of the
-ozone object and the ACL that has to be removed.
diff --git a/hadoop-hdds/docs/content/security/_index.md b/hadoop-hdds/docs/content/security/_index.md
deleted file mode 100644
index 20967e3..0000000
--- a/hadoop-hdds/docs/content/security/_index.md
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: Security
-name: Security
-identifier: SecureOzone
-menu: main
-weight: 5
----
-<!---
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-        http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-    -->
-
-{{<jumbotron title="Securing Ozone">}}
-          Ozone is an enterprise class, secure storage system. There are many
-          optional security features in Ozone. Following pages discuss how
-          you can leverage the security features of Ozone.
-{{</jumbotron>}}
-
-<div class="alert alert-warning" role="alert">
-If you would like to understand Ozone's security architecture at a greater
-depth, please take a look at <a href="https://issues.apache.org/jira/secure/attachment/12911638/HadoopStorageLayerSecurity.pdf">Ozone security architecture.</a>
-</div>
-
-Depending on your needs, there are multiple optional steps in securing ozone.
diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md b/hadoop-hdds/docs/content/shell/BucketCommands.md
deleted file mode 100644
index e817349..0000000
--- a/hadoop-hdds/docs/content/shell/BucketCommands.md
+++ /dev/null
@@ -1,99 +0,0 @@
----
-title: Bucket Commands
-summary: Bucket commands help you to manage the life cycle of a volume.
-weight: 3
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone shell supports the following bucket commands.
-
-  * [create](#create)
-  * [delete](#delete)
-  * [info](#info)
-  * [list](#list)
-
-### Create
-
-The `bucket create` command allows users to create a bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -g, \-\-enforcegdpr            | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket.
-|  Uri                           | The name of the bucket in **/volume/bucket** format.
-
-
-{{< highlight bash >}}
-ozone sh bucket create /hive/jan
-{{< /highlight >}}
-
-The above command will create a bucket called _jan_ in the _hive_ volume.
-Since no scheme was specified this command defaults to O3 (RPC) protocol.
-
-### Delete
-
-The `bucket delete` command allows users to delete a bucket. If the
-bucket is not empty then this command will fail.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket
-
-{{< highlight bash >}}
-ozone sh bucket delete /hive/jan
-{{< /highlight >}}
-
-The above command will delete _jan_ bucket if it is empty.
-
-### Info
-
-The `bucket info` commands returns the information about the bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket.
-
-{{< highlight bash >}}
-ozone sh bucket info /hive/jan
-{{< /highlight >}}
-
-The above command will print out the information about _jan_ bucket.
-
-### List
-
-The `bucket list` command allows users to list the buckets in a volume.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -l, \-\-length                   | Maximum number of results to return. Default: 100
-| -p, \-\-prefix                   | Optional, Only buckets that match this prefix will be returned.
-| -s, \-\-start                    | The listing will start from key after the start key.
-|  Uri                           | The name of the _volume_.
-
-{{< highlight bash >}}
-ozone sh bucket list /hive
-{{< /highlight >}}
-
-This command will list all buckets on the volume _hive_.
diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md
deleted file mode 100644
index 72174c9..0000000
--- a/hadoop-hdds/docs/content/shell/Format.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-title: Shell Overview
-summary: Explains the command syntax used by shell command.
-weight: 1
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone shell help can be invoked at _object_ level or at _action_ level.
-For example:
-
-{{< highlight bash >}}
-ozone sh volume --help
-{{< /highlight >}}
-
-This will show all possible actions for volumes.
-
-or it can be invoked to explain a specific action like
-{{< highlight bash >}}
-ozone sh volume create --help
-{{< /highlight >}}
-This command will give you command line options of the create command.
-
-</p>
-
-
-### General Command Format
-
-The Ozone shell commands take the following format.
-
-> _ozone sh object action url_
-
-**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is
-invoked via ```sh``` command.
-
-The object can be a volume, bucket or a key. The action is various verbs like
-create, list, delete etc.
-
-
-Ozone URL can point to a volume, bucket or keys in the following format:
-
-_\[scheme\]\[server:port\]/volume/bucket/key_
-
-
-Where,
-
-1. **Scheme** - This should be `o3` which is the native RPC protocol to access
-  Ozone API. The usage of the schema is optional.
-
-2. **Server:Port** - This is the address of the Ozone Manager. If the port is
-omitted the default port from ozone-site.xml will be used.
-
-Depending on the call, the volume/bucket/key names will be part of the URL.
-Please see volume commands, bucket commands, and key commands section for more
-detail.
diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md
deleted file mode 100644
index b4a38c8..0000000
--- a/hadoop-hdds/docs/content/shell/KeyCommands.md
+++ /dev/null
@@ -1,139 +0,0 @@
----
-title: Key Commands
-summary: Key commands help you to manage the life cycle of
-     Keys / Objects.
-weight: 4
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-Ozone shell supports the following key commands.
-
-  * [get](#get)
-  * [put](#put)
-  * [delete](#delete)
-  * [info](#info)
-  * [list](#list)
-  * [rename](#rename)
-
-
-### Get
-
-The `key get` command downloads a key from Ozone cluster to local file system.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key in **/volume/bucket/key** format.
-|  FileName                      | Local file to download the key to.
-
-
-{{< highlight bash >}}
-ozone sh key get /hive/jan/sales.orc sales.orc
-{{< /highlight >}}
-Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the
-local file sales.orc.
-
-### Put
-
-The `key put` command uploads a file from the local file system to the specified bucket.
-
-***Params:***
-
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key in **/volume/bucket/key** format.
-|  FileName                      | Local file to upload.
-| -r, \-\-replication              | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration.
-
-{{< highlight bash >}}
-ozone sh key put /hive/jan/corrected-sales.orc sales.orc
-{{< /highlight >}}
-The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_.
-
-### Delete
-
-The `key delete` command removes the key from the bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key.
-
-{{< highlight bash >}}
-ozone sh key delete /hive/jan/corrected-sales.orc
-{{< /highlight >}}
-
-The above command deletes the key _/hive/jan/corrected-sales.orc_.
-
-
-### Info
-
-The `key info` commands returns the information about the key.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key.
-
-{{< highlight bash >}}
-ozone sh key info /hive/jan/sales.orc
-{{< /highlight >}}
-
-The above command will print out the information about _/hive/jan/sales.orc_
-key.
-
-### List
-
-The `key list` command allows user to list all keys in a bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -l, \-\-length                   | Maximum number of results to return. Default: 1000
-| -p, \-\-prefix                   | Optional, Only buckets that match this prefix will be returned.
-| -s, \-\-start                    | The listing will start from key after the start key.
-|  Uri                           | The name of the _volume_.
-
-{{< highlight bash >}}
-ozone sh key list /hive/jan
-{{< /highlight >}}
-
-This command will list all keys in the bucket _/hive/jan_.
-
-### Rename
-
-The `key rename` command changes the name of an existing key in the specified bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket in **/volume/bucket** format.
-|  FromKey                       | The existing key to be renamed
-|  ToKey                         | The new desired name of the key
-
-{{< highlight bash >}}
-ozone sh key rename /hive/jan sales.orc new_name.orc
-{{< /highlight >}}
-The above command will rename _sales.orc_ to _new\_name.orc_ in the bucket _/hive/jan_.
diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.md b/hadoop-hdds/docs/content/shell/VolumeCommands.md
deleted file mode 100644
index 47fb985..0000000
--- a/hadoop-hdds/docs/content/shell/VolumeCommands.md
+++ /dev/null
@@ -1,112 +0,0 @@
----
-title: Volume Commands
-weight: 2
-summary: Volume commands help you to manage the life cycle of a volume.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Volume commands generally need administrator privileges. The ozone shell supports the following volume commands.
-
-  * [create](#create)
-  * [delete](#delete)
-  * [info](#info)
-  * [list](#list)
-  * [update](#update)
-
-### Create
-
-The `volume create` command allows an administrator to create a volume and
-assign it to a user.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -q, \-\-quota                    | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster.                    |
-| -u, \-\-user                     |  Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume.                                       |
-|  Uri                           | The name of the volume.                                        |
-
-{{< highlight bash >}}
-ozone sh volume create --quota=1TB --user=bilbo /hive
-{{< /highlight >}}
-
-The above command will create a volume called _hive_ on the ozone cluster. This
-volume has a quota of 1TB, and the owner is _bilbo_.
-
-### Delete
-
-The `volume delete` command allows an administrator to delete a volume. If the
-volume is not empty then this command will fail.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the volume.
-
-{{< highlight bash >}}
-ozone sh volume delete /hive
-{{< /highlight >}}
-
-The above command will delete the volume hive, if the volume has no buckets
-inside it.
-
-### Info
-
-The `volume info` commands returns the information about the volume including
-quota and owner information.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the volume.
-
-{{< highlight bash >}}
-ozone sh volume info /hive
-{{< /highlight >}}
-
-The above command will print out the information about hive volume.
-
-### List
-
-The `volume list` command will list the volumes owned by a user.
-
-{{< highlight bash >}}
-ozone sh volume list --user hadoop
-{{< /highlight >}}
-
-The above command will print out all the volumes owned by the user hadoop.
-
-### Update
-
-The volume update command allows changing of owner and quota on a given volume.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -q, \-\-quota                    | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster.                    |
-| -u, \-\-user                     |  Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume.                                       |
-|  Uri                           | The name of the volume.                                        |
-
-{{< highlight bash >}}
-ozone sh volume update --quota=10TB /hive
-{{< /highlight >}}
-
-The above command updates the volume quota to 10TB.
diff --git a/hadoop-hdds/docs/content/shell/_index.md b/hadoop-hdds/docs/content/shell/_index.md
deleted file mode 100644
index 3cb1a9f..0000000
--- a/hadoop-hdds/docs/content/shell/_index.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: Command Line Interface
-menu:
-   main:
-      weight: 3
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-{{<jumbotron title="OzoneShell">}}
-    Ozone  shell is the primary interface to interact with Ozone.
-    It provides a command shell interface to work against Ozone.
-{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md
deleted file mode 100644
index 1e920d9..0000000
--- a/hadoop-hdds/docs/content/start/FromSource.md
+++ /dev/null
@@ -1,68 +0,0 @@
----
-title: From Source
-weight: 30
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{< requirements >}}
- * Java 1.8
- * Maven
- * Protoc (2.5)
-{{< /requirements >}}
-
-<div class="alert alert-info" role="alert">This is a guide on how to build the ozone sources.  If you are <font
-color="red">not</font>
-planning to build sources yourself, you can safely skip this page.</div>
-
-If you are a Hadoop ninja, and wise in the ways of Apache, you already know
-that a real Apache release is a source release.
-
-If you want to build from sources, Please untar the source tarball and run
-the ozone build command. This instruction assumes that you have all the
-dependencies to build Hadoop on your build machine. If you need instructions
-on how to build Hadoop, please look at the Apache Hadoop Website.
-
-```bash
-mvn -f pom.ozone.xml clean package -DskipTests=true
-```
-
-This will build an ozone-\<version\>.tar.gz in your `hadoop-ozone/dist/target` directory.
-
-You can copy this tarball and use this instead of binary artifacts that are
-provided along with the official release.
-
-## How to test the build
-
-You can run the acceptance tests in the hadoop-ozone directory to make sure
-that  your build is functional. To launch the acceptance tests, please follow
- the instructions in the **README.md** in the `smoketest` directory.
-
-```bash
-cd smoketest
-./test.sh
-```
-
- You can also execute only a minimal subset of the tests:
-
-```bash
-cd smoketest
-./test.sh --env ozone basic
-```
-
-Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file
- system is fully functional.
diff --git a/hadoop-hdds/docs/content/start/Kubernetes.md b/hadoop-hdds/docs/content/start/Kubernetes.md
deleted file mode 100644
index ad85534..0000000
--- a/hadoop-hdds/docs/content/start/Kubernetes.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: Ozone on Kubernetes
-weight: 22
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-{{< requirements >}}
- * Working kubernetes cluster (LoadBalancer, PersistentVolume are not required)
- * kubectl
-{{< /requirements >}}
-
-
-As the _apache/ozone_ docker images are available from the dockerhub the deployment process is very similar to Minikube deployment. The only big difference is that we have dedicated set of k8s files for hosted clusters (for example we can use one datanode per host)
-Deploy to kubernetes
-
-`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases.
-
-To deploy to a hosted cluster use the ozone subdirectory:
-
-```
-cd kubernetes/examples/ozone
-kubectl apply -f .
-```
-
-And you can check the results with
-
-```
-kubectl get pod
-Access the services
-```
-
-Now you can access any of the services. By default the services are not published but you can access them with port-foward rules.
-
-```
-kubectl port-forward s3g-0 9878:9878
-kubectl port-forward scm-0 9876:9876
-```
diff --git a/hadoop-hdds/docs/content/start/Minikube.md b/hadoop-hdds/docs/content/start/Minikube.md
deleted file mode 100644
index ebb249d..0000000
--- a/hadoop-hdds/docs/content/start/Minikube.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-title: Minikube & Ozone
-weight: 21
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-{{< requirements >}}
- * Working minikube setup
- * kubectl
-{{< /requirements >}}
-
-`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. By default the kubernetes resource files are configured to use `apache/ozone` image from the dockerhub.
-
-To deploy it to minikube use the minikube configuration set:
-
-```
-cd kubernetes/examples/minikube
-kubectl apply -f .
-```
-
-And you can check the results with
-
-```
-kubectl get pod
-```
-
-Note: the kubernetes/examples/minikube resource set is optimized for minikube usage:
-
- * You can have multiple datanodes even if you have only one host (in a real production cluster usually you need one datanode per physical host)
- * The services are published with node port
-
-## Access the services
-
-Now you can access any of the services. For each web endpoint an additional NodeType service is defined in the minikube k8s resource set. NodeType services are available via a generated port of any of the host nodes:
-
-```bash
-kubectl get svc
-NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
-datanode     ClusterIP   None            <none>        <none>           27s
-kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP          118m
-om           ClusterIP   None            <none>        9874/TCP         27s
-om-public    NodePort    10.108.48.148   <none>        9874:32649/TCP   27s
-s3g          ClusterIP   None            <none>        9878/TCP         27s
-s3g-public   NodePort    10.97.133.137   <none>        9878:31880/TCP   27s
-scm          ClusterIP   None            <none>        9876/TCP         27s
-scm-public   NodePort    10.105.231.28   <none>        9876:32171/TCP   27s
-```
-
-Minikube contains a convenience command to access any of the NodePort services:
-
-```
-minikube service s3g-public
-Opening kubernetes service default/s3g-public in default browser...
-```
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/start/OnPrem.md b/hadoop-hdds/docs/content/start/OnPrem.md
deleted file mode 100644
index 3bf40a6..0000000
--- a/hadoop-hdds/docs/content/start/OnPrem.md
+++ /dev/null
@@ -1,187 +0,0 @@
----
-title: Ozone On Premise Installation
-weight: 20
-
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-If you are feeling adventurous, you can setup ozone in a real cluster.
-Setting up a real cluster requires us to understand the components of Ozone.
-Ozone is designed to work concurrently with HDFS. However, Ozone is also
-capable of running independently. The components of ozone are the same in both approaches.
-
-## Ozone Components
-
-1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. Ozone Manager is responsible for all volume, bucket and key operations.
-2. Storage Container Manager - Acts as the block manager. Ozone Manager
-requests blocks from SCM, to which clients can write data.
-3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the independent deployment case runs an ozone datanode daemon.
-
-## Setting up an Ozone only cluster
-
-* Please untar the ozone-\<version\> to the directory where you are going
-to run Ozone from. We need Ozone jars on all machines in the cluster. So you
-need to do this on all machines in the cluster.
-
-* Ozone relies on a configuration file called ```ozone-site.xml```. To
-generate a template that you can replace with proper values, please run the
-following command. This will generate a template called ```ozone-site.xml``` at
-the specified path (directory).
-
-{{< highlight bash >}}
-ozone genconf <path>
-{{< /highlight >}}
-
-Let us look at the settings inside the generated file (ozone-site.xml) and
-how they control ozone. Once the right values are defined, this file
-needs to be copied to ```ozone directory/etc/hadoop```.
-
-
-* **ozone.enabled** This is the most critical setting for ozone.
-Ozone is a work in progress and users have to enable this service explicitly.
-By default, Ozone is disabled. Setting this flag to `true` enables ozone in the
-HDFS or Ozone cluster.
-
-Here is an example,
-
-{{< highlight xml >}}
-    <property>
-       <name>ozone.enabled</name>
-       <value>true</value>
-    </property>
-{{< /highlight >}}
-
-* **ozone.metadata.dirs** Allows Administrators to specify where the
- metadata must reside. Usually you pick your fastest disk (SSD if
- you have them on your nodes). OzoneManager, SCM and datanode will  write the
- metadata to this path. This is a required setting, if this is missing Ozone
- will fail to come up.
-
-  Here is an example,
-
-{{< highlight xml >}}
-   <property>
-      <name>ozone.metadata.dirs</name>
-      <value>/data/disk1/meta</value>
-   </property>
-{{< /highlight >}}
-
-*  **ozone.scm.names**  Storage container manager(SCM) is a distributed block
-  service which is used by ozone. This property allows data nodes to discover
-   SCM's address. Data nodes send heartbeat to SCM.
-   Until HA  feature is  complete, we configure ozone.scm.names to be a
-   single machine.
-
-  Here is an example,
-
-  {{< highlight xml >}}
-      <property>
-        <name>ozone.scm.names</name>
-        <value>scm.hadoop.apache.org</value>
-      </property>
-  {{< /highlight >}}
-
- * **ozone.scm.datanode.id.dir** Data nodes generate a Unique ID called Datanode
- ID. This identity is written to the file datanode.id in a directory specified by this path. *Data nodes
-    will create this path if it doesn't exist already.*
-
-Here is an  example,
-{{< highlight xml >}}
-   <property>
-      <name>ozone.scm.datanode.id.dir</name>
-      <value>/data/disk1/meta/node</value>
-   </property>
-{{< /highlight >}}
-
-* **ozone.om.address** OM server address. This is used by OzoneClient and
-Ozone File System.
-
-Here is an  example,
-{{< highlight xml >}}
-    <property>
-       <name>ozone.om.address</name>
-       <value>ozonemanager.hadoop.apache.org</value>
-    </property>
-{{< /highlight >}}
-
-
-## Ozone Settings Summary
-
-| Setting                        | Value                        | Comment |
-|--------------------------------|------------------------------|------------------------------------------------------------------|
-| ozone.enabled                  | true                         | This enables SCM and  containers in HDFS cluster.                |
-| ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
-| ozone.scm.names                | SCM server name              | Hostname:port or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                         |
-| ozone.scm.client.address       | SCM server name and port     | Used by client-side                                              |
-| ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
-| ozone.om.address               | OM server name               | Used by Ozone handler and Ozone file system.                     |
-
-
-## Startup the cluster
-
-Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone Manager.
-
-{{< highlight bash >}}
-ozone scm --init
-{{< /highlight >}}
-This allows SCM to create the cluster Identity and initialize its state.
-The ```init``` command is similar to Namenode format. Init command is executed only once, that allows SCM to create all the required on-disk structures to work correctly.
-{{< highlight bash >}}
-ozone --daemon start scm
-{{< /highlight >}}
-
-Once we know SCM is up and running, we can create an Object Store for our use. This is done by running the following command.
-
-{{< highlight bash >}}
-ozone om --init
-{{< /highlight >}}
-
-
-Once Ozone manager is initialized, we are ready to run the name service.
-
-{{< highlight bash >}}
-ozone --daemon start om
-{{< /highlight >}}
-
-At this point Ozone's name services, the Ozone manager, and the block service  SCM is both running.\
-**Please note**: If SCM is not running
-```om --init``` command will fail. SCM start will fail if on-disk data structures are missing. So please make sure you have done both ```scm --init``` and ```om --init``` commands.
-
-Now we need to start the data nodes. Please run the following command on each datanode.
-{{< highlight bash >}}
-ozone --daemon start datanode
-{{< /highlight >}}
-
-At this point SCM, Ozone Manager and data nodes are up and running.
-
-***Congratulations!, You have set up a functional ozone cluster.***
-
-## Shortcut
-
-If you want to make your life simpler, you can just run
-{{< highlight bash >}}
-ozone scm --init
-ozone om --init
-start-ozone.sh
-{{< /highlight >}}
-
-This assumes that you have set up the slaves file correctly and ssh
-configuration that allows ssh-ing to all data nodes. This is the same as the
-HDFS configuration, so please refer to HDFS documentation on how to set this
-up.
diff --git a/hadoop-hdds/docs/content/start/RunningViaDocker.md b/hadoop-hdds/docs/content/start/RunningViaDocker.md
deleted file mode 100644
index 9e1e361..0000000
--- a/hadoop-hdds/docs/content/start/RunningViaDocker.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-title: Pseudo-cluster
-weight: 23
-
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{< requirements >}}
- * docker and docker-compose
-{{< /requirements >}}
-
-* Download the Ozone binary tarball and untar it.
-
-* Go to the directory where the docker compose files exist and tell
-`docker-compose` to start Ozone in the background. This will start a small
-ozone instance on your machine.
-
-{{< highlight bash >}}
-cd compose/ozone/
-
-docker-compose up -d
-{{< /highlight >}}
-
-To verify that ozone is working as expected, let us log into a data node and
-run _freon_, the load generator for Ozone. The ```exec datanode bash``` command
-will open a bash shell on the datanode.
-
-The `ozone freon` command is executed within the datanode container. You can quit freon via CTRL-C any time. The
-```rk``` profile instructs freon to generate random keys.
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-ozone freon rk
-{{< /highlight >}}
-
-You can check out the **OzoneManager UI** at http://localhost:9874/ to see the
-activity generated by freon.
-While you are there, please don't forget to check out the ozone configuration explorer.
-
-***Congratulations, You have just run your first ozone cluster.***
-
-To shutdown the cluster, please run
-{{< highlight bash >}}
-docker-compose down
-{{< /highlight >}}
-
diff --git a/hadoop-hdds/docs/content/start/StartFromDockerHub.md b/hadoop-hdds/docs/content/start/StartFromDockerHub.md
deleted file mode 100644
index e3e7d41c..0000000
--- a/hadoop-hdds/docs/content/start/StartFromDockerHub.md
+++ /dev/null
@@ -1,111 +0,0 @@
----
-title: Simple Single Ozone
-weight: 10
-
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-{{< requirements >}}
- * Working docker setup
- * AWS CLI (optional)
-{{< /requirements >}}
-
-# Ozone in a Single Container
-
-The easiest way to start up an all-in-one ozone container is to use the latest
-docker image from docker hub:
-
-```bash
-docker run -p 9878:9878 -p 9876:9876 apache/ozone
-```
-This command will pull down the ozone image from docker hub and start all
-ozone services in a single container. <br>
-This container will run the required metadata servers (Ozone Manager, Storage
-Container Manager) one data node  and the S3 compatible REST server
-(S3 Gateway).
-
-# Local multi-container cluster
-
-If you would like to use a more realistic pseudo-cluster where each components
-run in own containers, you can start it with a docker-compose file.
-
-We have shipped a docker-compose and an enviorment file as part of the
-container image  that is uploaded to docker hub.
-
-The following commands can be used to extract these files from the image in the docker hub.
-```bash
-docker run apache/ozone cat docker-compose.yaml > docker-compose.yaml
-docker run apache/ozone cat docker-config > docker-config
-```
-
- Now you can start the cluster with docker-compose:
-
-```bash
-docker-compose up -d
-```
-
-If you need multiple datanodes, we can just scale it up:
-
-```bash
- docker-compose scale datanode=3
- ```
-# Running S3 Clients
-
-Once the cluster is booted up and ready, you can verify its status by
-connecting to the SCM's UI at [http://localhost:9876](http://localhost:9876).
-
-The S3 gateway endpoint will be exposed at port 9878. You can use Ozone's S3
-support as if you are working against the real S3.
-
-
-Here is how you create buckets from command line:
-
-```bash
-aws s3api --endpoint http://localhost:9878/ create-bucket --bucket=bucket1
-```
-
-Only notable difference in the above command line is the fact that you have
-to tell the _endpoint_ address to the aws s3api command.
-
-Now let us put a simple file into the S3 Bucket hosted by Ozone. We will
-start by creating a temporary file that we can upload to Ozone via S3 support.
-```bash
-ls -1 > /tmp/testfile
- ```
- This command creates a temporary file that
- we can upload to Ozone. The next command actually uploads to Ozone's S3
- bucket using the standard aws s3 command line interface.
-
-```bash
-aws s3 --endpoint http://localhost:9878 cp --storage-class REDUCED_REDUNDANCY  /tmp/testfile  s3://bucket1/testfile
-```
-<div class="alert alert-info" role="alert">
-Note: REDUCED_REDUNDANCY is required for the single container ozone, since it
- has a single datanode. </div>
-We can now verify that file got uploaded by running the list command against
-our bucket.
-
-```bash
-aws s3 --endpoint http://localhost:9878 ls s3://bucket1/testfile
-```
-
-<div class="alert alert-info" role="alert"> You can also check the internal
-bucket browser supported by Ozone S3 interface by clicking on the below link.
-<br>
-</div>
-http://localhost:9878/bucket1?browser
diff --git a/hadoop-hdds/docs/content/start/_index.md b/hadoop-hdds/docs/content/start/_index.md
deleted file mode 100644
index 5529661..0000000
--- a/hadoop-hdds/docs/content/start/_index.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-title: Getting Started
-name: Getting Started
-identifier: Starting
-menu: main
-weight: 1
-cards: "false"
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-{{<jumbotron title="Installing Ozone">}}
-There are many ways to install and run Ozone. Starting from simple docker
-deployments on
-local nodes, to full scale multi-node cluster deployment on
-Kubernetes or bare-metal.
-{{</jumbotron>}}
-
-<section class="row cardgroup">
-
-<span class="label label-warning label-">Easy Start</span>
-
-<h2>Running Ozone from Docker Hub</h2>
-
-You can try out Ozone using docker hub without downloading the official release. This makes it easy to explore Ozone.
-<br />
-  {{<card title="Starting ozone inside a single container" link="start/StartFromDockerHub.md" link-text="Ozone In Docker" image="start/docker.png">}}
-  The simplest and easiest way to start an ozone cluster
-      to explore what it can do is to start ozone via docker.
-  {{</card>}}
-
-</section>
-
-<section class="row cardgroup">
-
-<span class="label label-success">Recommended</span>
-
-
-<h2>Running Ozone from an Official Release</h2>
-
- Apache Ozone can also be run from the official release packages. Along with the official source releases, we also release a set of convenience binary packages. It is easy to run these binaries in different configurations.
-<br />
-  {{<card title="Ozone on a physical cluster" link="start/OnPrem" link-text="On-Prem Ozone Cluster" image="start/hadoop.png">}}
-Ozone is designed to work concurrently with HDFS. The physical cluster instructions explain each component of Ozone and how to deploy with maximum control.
-  {{</card>}}
-
-  {{<card title="Ozone on K8s" link="start/Kubernetes" link-text="Kubernetes" image="start/k8s.png">}}
-Ozone is designed to work well under Kubernetes. These are instructions to deploy Ozone on K8s. Ozone provides a replicated storage solution for K8s based apps.
-  {{</card>}}
-
-  {{<card title="Ozone using MiniKube" link="start/Minikube" link-text="Minikube cluster" image="start/minikube.png">}}
-Ozone comes with a standard set of K8s resources. You can deploy them to MiniKube and experiment with the K8s based deployments.
-  {{</card>}}
-
-  {{<card title="Ozone cluster in Local Node" link="start/RunningViaDocker.md" link-text="docker-compose" image="start/docker.png">}}
- We also ship standard docker files with official release. These are part of official release and not depend upon Docker Hub.
-  {{</card>}}
-
-</section>
-
-<section class="row cardgroup">
-
-<span class="label label-danger">Hadoop Ninja</span>
-
-<h2>Building From Sources </h2>
-
- Instructions to build Ozone from source to create deployment packages.
-
-  {{<card title="Building From Sources" link="start/FromSource.md" link-text="Build ozone from source" image="start/hadoop.png">}}
-If you are a Hadoop ninja, and wise in the ways of Apache, you already know that a real Apache release is a source release. We believe that even ninjas need help at times.
-  {{</card>}}
-
-</section>
diff --git a/hadoop-hdds/docs/content/start/docker.png b/hadoop-hdds/docs/content/start/docker.png
deleted file mode 100644
index 048730b..0000000
--- a/hadoop-hdds/docs/content/start/docker.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/start/hadoop.png b/hadoop-hdds/docs/content/start/hadoop.png
deleted file mode 100644
index 183867c..0000000
--- a/hadoop-hdds/docs/content/start/hadoop.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/start/k8s.png b/hadoop-hdds/docs/content/start/k8s.png
deleted file mode 100644
index 5fa2e9a..0000000
--- a/hadoop-hdds/docs/content/start/k8s.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/start/minikube.png b/hadoop-hdds/docs/content/start/minikube.png
deleted file mode 100644
index 0609ecc..0000000
--- a/hadoop-hdds/docs/content/start/minikube.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/content/tools/AuditParser.md b/hadoop-hdds/docs/content/tools/AuditParser.md
deleted file mode 100644
index e4da208..0000000
--- a/hadoop-hdds/docs/content/tools/AuditParser.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-title: "Audit Parser"
-date: 2018-12-17
-summary: Audit Parser tool can be used for querying the ozone audit logs.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Audit Parser tool can be used for querying the ozone audit logs.
-This tool creates a sqllite database at the specified path. If the database
-already exists, it will avoid creating a database.
-
-The database contains only one table called `audit` defined as:
-
-CREATE TABLE IF NOT EXISTS audit (
-datetime text,
-level varchar(7),
-logger varchar(7),
-user text,
-ip text,
-op text,
-params text,
-result varchar(7),
-exception text,
-UNIQUE(datetime,level,logger,user,ip,op,params,result))
-
-Usage:
-{{< highlight bash >}}
-ozone auditparser <path to db file> [COMMAND] [PARAM]
-{{< /highlight >}}
-
-To load an audit log to database:
-{{< highlight bash >}}
-ozone auditparser <path to db file> load <path to audit log>
-{{< /highlight >}}
-Load command creates the audit table described above.
-
-To run a custom read-only query:
-{{< highlight bash >}}
-ozone auditparser <path to db file> query <select query enclosed within double quotes>
-{{< /highlight >}}
-
-Audit Parser comes with a set of templates(most commonly used queries).
-
-To run a template query:
-{{< highlight bash >}}
-ozone auditparser <path to db file> template <templateName>
-{{< /highlight >}}
-
-Following templates are available:
-
-|Template Name|Description|SQL|
-|----------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|
-|top5users|Top 5 users|select user,count(*) as total from audit group by user order by total DESC limit 5|
-|top5cmds|Top 5 commands|select op,count(*) as total from audit group by op order by total DESC limit 5|
-|top5activetimebyseconds|Top 5 active times, grouped by seconds|select substr(datetime,1,charindex(',',datetime)-1) as dt,count(*) as thecount from audit group by dt order by thecount DESC limit 5|
diff --git a/hadoop-hdds/docs/content/tools/Genconf.md b/hadoop-hdds/docs/content/tools/Genconf.md
deleted file mode 100644
index 35d5e3d..0000000
--- a/hadoop-hdds/docs/content/tools/Genconf.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: "Generate Configurations"
-date: 2018-12-18
-summary: Tool to generate default configuration
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Genconf tool generates a template ozone-site.xml file at the specified path.
-This template file can be edited to replace with proper values.
-
-`ozone genconf <path>`
diff --git a/hadoop-hdds/docs/content/tools/SCMCLI.md b/hadoop-hdds/docs/content/tools/SCMCLI.md
deleted file mode 100644
index 04950c2..0000000
--- a/hadoop-hdds/docs/content/tools/SCMCLI.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: "SCMCLI"
-date: 2017-08-10
-summary: Admin tool for managing SCM
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-SCM is the block service for Ozone. It is also the workhorse for ozone. But user process never talks to SCM. However, being able to read the state of SCM is useful.
-
-SCMCLI allows the developer to access SCM directly. Please note: Improper usage of this tool can destroy your cluster. Unless you know exactly what you are doing, Please do *not* use this tool. In other words, this is a developer only tool. We might even remove this command in future to prevent improper use.
-
-[^1]: This assumes that you have a working docker installation on the development machine.
diff --git a/hadoop-hdds/docs/content/tools/TestTools.md b/hadoop-hdds/docs/content/tools/TestTools.md
deleted file mode 100644
index a077f2e..0000000
--- a/hadoop-hdds/docs/content/tools/TestTools.md
+++ /dev/null
@@ -1,228 +0,0 @@
----
-title: "Testing tools"
-summary: Ozone contains multiple test tools for load generation, partitioning test or acceptance tests.
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Testing is one of the most important part during the development of a distributed system. We have the following type of test.
-
-This page includes our existing test tool which are part of the Ozone source base.
-
-Note: we have more tests (like TCP-DS, TCP-H tests via Spark or Hive) which are not included here because they use external tools only.
-
-## Unit test
-
-As every almost every java project we have the good old unit tests inside each of our projects.
-
-## Integration test (JUnit)
-
-Traditional unit tests are supposed to test only one unit, but we also have higher level unit tests. They use `MiniOzoneCluster` which is a helper method to start real daemons (scm,om,datanodes) during the unit test.
-
-From maven/java point of view they are just simple unit tests (JUnit library is used) but to separate them (and solve some dependency problems) we moved all of these tests to `hadoop-ozone/integration-test`
-
-## Smoketest
-
-We use docker-compose based pseudo-cluster to run different configuration of Ozone. To be sure that the different configuration can be started we implemented _acceptance_ tests with the help of https://robotframework.org/.
-
-The smoketests are available from the distribution (`./smoketest`) but the robot files defines only the tests: usually they start CLI and check the output.
-
-To run the tests in different environmente (docker-compose, kubernetes) you need a definition to start the containers and execute the right tests in the right containers.
-
-These definition of the tests are included in the `compose` directory (check `./compose/*/test.sh` or `./compose/test-all.sh`).
-
-For example a simple way to test the distribution packege:
-
-```
-cd compose/ozonze
-./test.sh
-```
-
-## Blockade
-
-[Blockade](https://github.com/worstcase/blockade) is a tool to test network failures and partitions (it's inspired by the legendary [Jepsen tests](https://jepsen.io/analyses)).
-
-Blockade tests are implemented with the help of tests and can be started from the `./blockade` directory of the distrubution.
-
-```
-cd blocakde
-pip install pytest==2.8.7,blockade
-python -m pytest -s .
-```
-
-See the README in the blockade directory for more details.
-
-## MiniChaosOzoneCluster
-
-This is a way to get [chaos](https://en.wikipedia.org/wiki/Chaos_engineering) in your machine. It can be started from the source code and a MiniOzoneCluster (which starts real daemons) will be started and killed randomly.
-
-## Freon
-
-Freon is a command line application which is included in the Ozone distribution. It's a load generator which is used in our stress tests.
-
-For example:
-
-```
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  --replicationType=RATIS --factor=THREE
-```
-
-```
-***************************************************
-Status: Success
-Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3
-Number of Volumes created: 10
-Number of Buckets created: 100
-Number of Keys added: 1000
-Ratis replication factor: THREE
-Ratis replication type: RATIS
-Average Time spent in volume creation: 00:00:00,035
-Average Time spent in bucket creation: 00:00:00,319
-Average Time spent in key creation: 00:00:03,659
-Average Time spent in key write: 00:00:10,894
-Total bytes written: 10240000
-Total Execution time: 00:00:16,898
-***********************
-```
-
-For more information check the [documentation page](https://hadoop.apache.org/ozone/docs/0.4.0-alpha/freon.html)
-
-## Genesis
-
-Genesis is a microbenchmarking tool. It's also included in the distribution (`ozone genesis`) but it doesn't require real cluster. It measures different part of the code in an isolated way (eg. the code which saves the data to the local RocksDB based key value stores)
-
-Example run:
-
-```
- ozone genesis -benchmark=BenchMarkRocksDbStore
-# JMH version: 1.19
-# VM version: JDK 11.0.1, VM 11.0.1+13-LTS
-# VM invoker: /usr/lib/jvm/java-11-openjdk-11.0.1.13-3.el7_6.x86_64/bin/java
-# VM options: -Dproc_genesis -Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop -Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/opt/hadoop -Dhadoop.id.str=hadoop -Dhadoop.root.logger=INFO,console -Dhadoop.policy.file=hadoop-policy.xml -Dhadoop.security.logger=INFO,NullAppender
-# Warmup: 2 iterations, 1 s each
-# Measurement: 20 iterations, 1 s each
-# Timeout: 10 min per iteration
-# Threads: 4 threads, will synchronize iterations
-# Benchmark mode: Throughput, ops/time
-# Benchmark: org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
-# Parameters: (backgroundThreads = 4, blockSize = 8, maxBackgroundFlushes = 4, maxBytesForLevelBase = 512, maxOpenFiles = 5000, maxWriteBufferNumber = 16, writeBufferSize = 64)
-
-# Run progress: 0.00% complete, ETA 00:00:22
-# Fork: 1 of 1
-# Warmup Iteration   1: 213775.360 ops/s
-# Warmup Iteration   2: 32041.633 ops/s
-Iteration   1: 196342.348 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   2: 41926.816 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   3: 210433.231 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   4: 46941.951 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   5: 212825.884 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   6: 145914.351 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   7: 141838.469 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   8: 205334.438 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration   9: 163709.519 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  10: 162494.608 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  11: 199155.793 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  12: 209679.298 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  13: 193787.574 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  14: 127004.147 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  15: 145511.080 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  16: 223433.864 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  17: 169752.665 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  18: 165217.191 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  19: 191038.476 ops/s
-                 ?stack: <delayed till summary>
-
-Iteration  20: 196335.579 ops/s
-                 ?stack: <delayed till summary>
-
-
-
-Result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test":
-  167433.864 ?(99.9%) 43530.883 ops/s [Average]
-  (min, avg, max) = (41926.816, 167433.864, 223433.864), stdev = 50130.230
-  CI (99.9%): [123902.981, 210964.748] (assumes normal distribution)
-
-Secondary result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test:?stack":
-Stack profiler:
-
-....[Thread state distributions]....................................................................
- 78.9%         RUNNABLE
- 20.0%         TIMED_WAITING
-  1.1%         WAITING
-
-....[Thread state: RUNNABLE]........................................................................
- 59.8%  75.8% org.rocksdb.RocksDB.put
- 16.5%  20.9% org.rocksdb.RocksDB.get
-  0.7%   0.9% java.io.UnixFileSystem.delete0
-  0.7%   0.9% org.rocksdb.RocksDB.disposeInternal
-  0.3%   0.4% java.lang.Long.formatUnsignedLong0
-  0.1%   0.2% org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
-  0.1%   0.1% java.lang.Long.toUnsignedString0
-  0.1%   0.1% org.apache.hadoop.ozone.genesis.generated.BenchMarkRocksDbStore_test_jmhTest.test_thrpt_jmhStub
-  0.0%   0.1% java.lang.Object.clone
-  0.0%   0.0% java.lang.Thread.currentThread
-  0.4%   0.5% <other>
-
-....[Thread state: TIMED_WAITING]...................................................................
- 20.0% 100.0% java.lang.Object.wait
-
-....[Thread state: WAITING].........................................................................
-  1.1% 100.0% jdk.internal.misc.Unsafe.park
-
-
-
-# Run complete. Total time: 00:00:38
-
-Benchmark                          (backgroundThreads)  (blockSize)  (maxBackgroundFlushes)  (maxBytesForLevelBase)  (maxOpenFiles)  (maxWriteBufferNumber)  (writeBufferSize)   Mode  Cnt       Score       Error  Units
-BenchMarkRocksDbStore.test                           4            8                       4                     512            5000                      16                 64  thrpt   20  167433.864 ? 43530.883  ops/s
-BenchMarkRocksDbStore.test:?stack                    4            8                       4                     512            5000                      16                 64  thrpt              NaN                ---
-```
diff --git a/hadoop-hdds/docs/content/tools/_index.md b/hadoop-hdds/docs/content/tools/_index.md
deleted file mode 100644
index d7c9270..0000000
--- a/hadoop-hdds/docs/content/tools/_index.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: "Tools"
-date: "2017-10-10"
-summary: Ozone supports a set of tools that are handy for developers.Here is a quick list of command line tools.
-menu:
-   main:
-      weight: 8
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone has a set of command line tools that can be used to manage ozone.
-
-All these commands are invoked via the ```ozone``` script.
-
-Daemon commands:
-
-   * **scm** -  Storage Container Manager service, via daemon can be started
-   or stopped.
-   * **om** -   Ozone Manager, via daemon command can be started or stopped.
-   * **datanode** - Via daemon command, the HDDS data nodes can be started or
-   stopped.
-   * **s3g** -
-
-Client commands:
-
-   * **sh** -  Primary command line interface for ozone to manage volumes/buckets/keys.
-   * **fs** - Runs a command on ozone file system (similar to `hdfs dfs`)
-   * **version** - Prints the version of Ozone and HDDS.
-
-
-Admin commands:
-
-   * **classpath** - Prints the class path needed to get the hadoop jar and the
-    required libraries.
-   * **dtutil**    - Operations related to delegation tokens
-   * **envvars** - Display computed Hadoop environment variables.
-   * **getconf** -  Reads ozone config values from configuration.
-   * **jmxget**  - Get JMX exported values from NameNode or DataNode.
-   * **scmcli** -  Developer only, Command Line Interface for the Storage
-   Container Manager.
-   * **genconf** -  Generate minimally required ozone configs and output to
-   ozone-site.xml.
-
-Test tools:
-
-   * **freon** -  Runs the ozone load generator.
-   * **genesis**  - Developer Only, Ozone micro-benchmark application.
-
- For more information see the following subpages:
\ No newline at end of file
diff --git a/hadoop-hdds/docs/dev-support/bin/generate-site.sh b/hadoop-hdds/docs/dev-support/bin/generate-site.sh
deleted file mode 100755
index d8b5d48..0000000
--- a/hadoop-hdds/docs/dev-support/bin/generate-site.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-DOCDIR="$DIR/../.."
-
-if [ ! "$(which hugo)" ]; then
-   echo "Hugo is not yet installed. Doc generation is skipped."
-   exit 0
-fi
-
-DESTDIR="$DOCDIR/target/classes/docs"
-mkdir -p "$DESTDIR"
-cd "$DOCDIR"
-hugo -d "$DESTDIR" "$@"
-cd -
diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml
deleted file mode 100644
index 6c6d77f..0000000
--- a/hadoop-hdds/docs/pom.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-docs</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop HDDS/Ozone Documentation</description>
-  <name>Apache Hadoop HDDS/Ozone Documentation</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <version>1.6.0</version>
-        <executions>
-          <execution>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <phase>compile</phase>
-          </execution>
-        </executions>
-        <configuration>
-          <executable>dev-support/bin/generate-site.sh</executable>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>themes/ozonedoc/static/js/bootstrap.min.js</exclude>
-            <exclude>themes/ozonedoc/static/js/jquery-3.4.1.min.js</exclude>
-            <exclude>themes/ozonedoc/static/css/bootstrap-theme.min.css
-            </exclude>
-            <exclude>themes/ozonedoc/static/css/bootstrap.min.css.map</exclude>
-            <exclude>themes/ozonedoc/static/css/bootstrap.min.css</exclude>
-            <exclude>themes/ozonedoc/static/css/bootstrap-theme.min.css.map
-            </exclude>
-            <exclude>
-              themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
-            </exclude>
-            <exclude>themes/ozonedoc/layouts/index.html</exclude>
-            <exclude>themes/ozonedoc/theme.toml</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-hdds/docs/static/NOTES.md b/hadoop-hdds/docs/static/NOTES.md
deleted file mode 100644
index b90edcf..0000000
--- a/hadoop-hdds/docs/static/NOTES.md
+++ /dev/null
@@ -1,20 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-The source of Ozone logo is available here:
-
-https://gitbox.apache.org/repos/asf?p=hadoop-site.git;a=tree;f=ozone/static;hb=refs/heads/asf-site
diff --git a/hadoop-hdds/docs/static/OzoneOverview.png b/hadoop-hdds/docs/static/OzoneOverview.png
deleted file mode 100644
index 7e011d5..0000000
--- a/hadoop-hdds/docs/static/OzoneOverview.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/static/OzoneOverview.svg b/hadoop-hdds/docs/static/OzoneOverview.svg
deleted file mode 100644
index 9d4660d..0000000
--- a/hadoop-hdds/docs/static/OzoneOverview.svg
+++ /dev/null
@@ -1,238 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<svg width="703px" height="465px" viewBox="0 0 703 465" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
-    <!-- Generator: Sketch 44.1 (41455) - http://www.bohemiancoding.com/sketch -->
-    <title>Desktop HD</title>
-    <desc>Created with Sketch.</desc>
-    <defs>
-        <rect id="path-1" x="0" y="0" width="131" height="36" rx="8"></rect>
-        <rect id="path-2" x="0" y="0" width="131" height="36" rx="8"></rect>
-        <rect id="path-3" x="9" y="304" width="437" height="144"></rect>
-        <mask id="mask-4" maskContentUnits="userSpaceOnUse" maskUnits="objectBoundingBox" x="0" y="0" width="437" height="144" fill="white">
-            <use xlink:href="#path-3"></use>
-        </mask>
-        <rect id="path-5" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-6" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-7" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-8" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-9" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-10" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-11" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-12" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-13" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
-        <rect id="path-14" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
-        <rect id="path-15" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
-        <rect id="path-16" x="0" y="0" width="131" height="36" rx="8"></rect>
-        <rect id="path-17" x="0" y="0" width="131" height="36" rx="8"></rect>
-        <rect id="path-18" x="0.140758874" y="0" width="142.859241" height="35.1071084" rx="8"></rect>
-        <rect id="path-19" x="0" y="0" width="226" height="36" rx="8"></rect>
-        <rect id="path-20" x="6" y="4" width="226" height="36" rx="8"></rect>
-        <rect id="path-21" x="13" y="10" width="226" height="36" rx="8"></rect>
-        <rect id="path-22" x="0" y="0" width="226" height="36" rx="8"></rect>
-        <rect id="path-23" x="6" y="4" width="226" height="36" rx="8"></rect>
-        <rect id="path-24" x="13" y="10" width="226" height="36" rx="8"></rect>
-        <rect id="path-25" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
-    </defs>
-    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
-        <path d="M84.5,51.5 L240.5,130.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-        <path id="Line-decoration-1" d="M240.5,130.5 L232.220366,122.944362 L229.50967,128.29713 L240.5,130.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-        <path d="M142.5,150.5 L177.5,150.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-        <path id="Line-decoration-1" d="M177.5,150.5 L166.7,147.5 L166.7,153.5 L177.5,150.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-        <g id="Desktop-HD">
-            <g id="Client" transform="translate(176.000000, 132.000000)">
-                <g id="Rectangle">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-1"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
-                </g>
-                <text id="Ozone-Client" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="24.2107393" y="24">Ozone Client</tspan>
-                </text>
-            </g>
-            <g id="Handler" transform="translate(35.000000, 18.000000)">
-                <g id="Rectangle-2">
-                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-2"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
-                </g>
-                <text id="Rest-Handler" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="22.3208008" y="22">Rest Handler</tspan>
-                </text>
-            </g>
-            <use id="Rectangle-4" stroke="#979797" mask="url(#mask-4)" stroke-width="2" fill="#FFFFFF" stroke-dasharray="1,3,1,3" xlink:href="#path-3"></use>
-            <g id="Ratis" transform="translate(315.000000, 378.000000)">
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-5"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-6"></use>
-                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-7"></use>
-                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
-                </text>
-            </g>
-            <g id="Ratis" transform="translate(166.000000, 378.000000)">
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-8"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-9"></use>
-                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-10"></use>
-                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
-                </text>
-            </g>
-            <g id="Ratis" transform="translate(10.000000, 378.000000)">
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-11"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-12"></use>
-                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-13"></use>
-                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
-                </g>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
-                </text>
-            </g>
-            <path d="M240.5,168.5 L240.5,311.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path id="Line-decoration-1" d="M240.5,311.5 L243.5,300.7 L237.5,300.7 L240.5,311.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path d="M243.5,54.5 L243.5,131.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path id="Line-decoration-1" d="M243.5,131.5 L246.5,120.7 L240.5,120.7 L243.5,131.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <g id="Container" transform="translate(328.000000, 313.000000)">
-                <g id="Rectangle-5">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-14"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
-                </g>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="14.3310547" y="37">Container</tspan>
-                </text>
-            </g>
-            <g id="Container" transform="translate(176.000000, 312.000000)">
-                <g id="Rectangle-5">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-15"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
-                </g>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="14.3310547" y="37">Container</tspan>
-                </text>
-            </g>
-            <g id="FileSystem" transform="translate(11.000000, 133.000000)">
-                <g id="Rectangle-2">
-                    <use fill="#7ED321" fill-rule="evenodd" xlink:href="#path-16"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
-                </g>
-                <text id="Ozone-File-System" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="3.25878906" y="22">Ozone File System</tspan>
-                </text>
-            </g>
-            <g id="CLI" transform="translate(179.000000, 18.000000)">
-                <g id="Rectangle-2">
-                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-17"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
-                </g>
-                <text id="Ozone-CLI" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="26.3896484" y="23">Ozone CLI</tspan>
-                </text>
-            </g>
-            <path d="M333.336323,48.7787611 L248.494492,130.227891" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path id="Line-decoration-1" d="M248.494492,130.227891 L258.363039,124.91265 L254.207822,120.584351 L248.494492,130.227891 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <g id="Corona" transform="translate(325.000000, 17.000000)">
-                <g id="Rectangle-2">
-                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-18"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.640758874" y="0.5" width="141.859241" height="34.1071084" rx="8"></rect>
-                </g>
-                <text id="Freon" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="50.7326544" y="22.8128606">Freon</tspan>
-                </text>
-            </g>
-            <path d="M307.5,148.5 L433.5,148.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path id="Line-decoration-1" d="M433.5,148.5 L422.7,145.5 L422.7,151.5 L433.5,148.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path d="M4,232 L699,232" id="Line" stroke="#000000" stroke-width="2" stroke-linecap="square" stroke-dasharray="5,2,5"></path>
-            <g id="OM" transform="translate(432.000000, 132.000000)">
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-19"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-20"></use>
-                    <rect stroke="#000000" stroke-width="1" x="6.5" y="4.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-21"></use>
-                    <rect stroke="#000000" stroke-width="1" x="13.5" y="10.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <text id="Ozone-Manager" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="67.3793945" y="32">Ozone Manager</tspan>
-                </text>
-            </g>
-            <g id="SCM" transform="translate(450.000000, 281.000000)">
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-22"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-23"></use>
-                    <rect stroke="#000000" stroke-width="1" x="6.5" y="4.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <g id="Rectangle-3">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-24"></use>
-                    <rect stroke="#000000" stroke-width="1" x="13.5" y="10.5" width="225" height="35" rx="8"></rect>
-                </g>
-                <text id="Storage-Container-Manager" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="28.0932617" y="32">Storage Container Manager</tspan>
-                </text>
-            </g>
-            <path d="M534.5,178.5 L534.5,283.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <path id="Line-decoration-1" d="M534.5,283.5 L537.5,272.7 L531.5,272.7 L534.5,283.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
-            <text id="Datanodes" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                <tspan x="196.213867" y="462">Datanodes</tspan>
-            </text>
-            <g id="Container" transform="translate(15.000000, 311.000000)">
-                <g id="Rectangle-5">
-                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-25"></use>
-                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
-                </g>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
-                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
-                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
-                    <tspan x="14.3310547" y="37">Container</tspan>
-                </text>
-            </g>
-            <path d="M84.5,137.5 L86,139" id="Line" stroke="#979797" stroke-linecap="square"></path>
-            <text id="Hadoop-Distributed-D" font-family="Helvetica-Bold, Helvetica" font-size="20" font-weight="bold" fill="#000000">
-                <tspan x="205.433594" y="230">Hadoop Distributed Data Store</tspan>
-            </text>
-        </g>
-    </g>
-</svg>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/static/SCMBlockDiagram.png b/hadoop-hdds/docs/static/SCMBlockDiagram.png
deleted file mode 100644
index 04d27ad..0000000
--- a/hadoop-hdds/docs/static/SCMBlockDiagram.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/static/ozone-logo-small.png b/hadoop-hdds/docs/static/ozone-logo-small.png
deleted file mode 100644
index cdc8e4e..0000000
--- a/hadoop-hdds/docs/static/ozone-logo-small.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/static/ozone-usage.png b/hadoop-hdds/docs/static/ozone-usage.png
deleted file mode 100644
index adcbdcf6..0000000
--- a/hadoop-hdds/docs/static/ozone-usage.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
deleted file mode 100644
index 5c01241..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
+++ /dev/null
@@ -1,71 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-{{ partial "header.html" . }}
-
-<body>
-
-{{ partial "navbar.html" . }}
-
-<div class="container-fluid">
-    <div class="row">
-        {{ partial "sidebar.html" . }}
-        <div class="col-sm-10 col-sm-offset-2 col-md-10 col-md-offset-2 main">
-            <div class="col-md-9">
-                <h1>{{ .Title }}</h1>
-            </div>
-            <div class="col-md-9">
-                {{ .Content }}
-                {{.Params.card}}
-                {{ if not (eq .Params.cards "false")}}
-                {{ range $page_index, $page_val := .Pages }}
-
-                {{ $page_count := len .Pages }}
-                {{if (eq (mod $page_index 2) 0)}}
-                <div class="row">
-                    {{end}}
-                    <div class="col-sm-6">
-                        <div class="card">
-                            <div class="card-body">
-                                <h2 class="card-title">
-                                    {{ with .Params.Icon}}
-                                    <span class="glyphicon glyphicon-{{.}}"
-                                          aria-hidden="true"></span>
-                                    {{end}}
-                                    {{ .LinkTitle }}
-                                </h2>
-                                <p class="card-text">{{.Summary}}</p>
-                                <a href="{{.Permalink}}"
-                                   class=" btn btn-primary btn-lg">{{.LinkTitle}}</a>
-                            </div>
-                        </div>
-                    </div>
-
-                    {{if (or (eq (mod $page_index 2) 1) (eq $page_index (sub $page_count 1)))}}
-                        </div>
-                    {{end}}
-                {{ end }}
-                {{end}}
-            </div>
-        </div>
-    </div>
-</div>
-
-{{ partial "footer.html" . }}
-
-</body>
-
-</html>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
deleted file mode 100644
index 3679ddb..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
+++ /dev/null
@@ -1,57 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-{{ partial "header.html" . }}
-
-<body>
-
-  {{ partial "navbar.html" . }}
-
-  <div class="container-fluid">
-    <div class="row">
-      {{ partial "sidebar.html" . }}
-      <div class="col-sm-10 col-sm-offset-2 col-md-10 col-md-offset-2 main">
-
-
-
-        <div class="col-md-9">
-            <nav aria-label="breadcrumb">
-                <ol class="breadcrumb">
-                  <li class="breadcrumb-item"><a href="/">Home</a></li>
-                  <li class="breadcrumb-item" aria-current="page"><a href="{{.CurrentSection.Permalink}}">{{.CurrentSection.Title}}</a></li>
-                  <li class="breadcrumb-item active" aria-current="page">{{ .Title }}</li>
-                </ol>
-              </nav>
-
-          <div class="col-md-9">
-            <h1>{{.Title}}</h1>
-          </div>
-
-          {{ .Content }}
-
-          {{ with .PrevInSection }}
-          <a class="btn  btn-success btn-lg" href="{{ .Permalink }}">Next >></a>
-          {{ end }}
-        </div>
-      </div>
-    </div>
-  </div>
-
-  {{ partial "footer.html" . }}
-
-</body>
-
-</html>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/index.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/index.html
deleted file mode 100644
index 045c692..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/index.html
+++ /dev/null
@@ -1,37 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-{{ partial "header.html" . }}
-
-  <body>
-
-{{ partial "navbar.html" . }}
-
-    <div class="container-fluid">
-      <div class="row">
-        {{ partial "sidebar.html" . }}
-        <div class="col-sm-10 col-sm-offset-2 col-md-10 col-md-offset-2 main">
-                    {{ .Content }}
-
-
-        </div>
-      </div>
-    </div>
-
-{{ partial "footer.html" . }}
-
-  </body>
-</html>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html
deleted file mode 100644
index 0e5ca0f..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html
+++ /dev/null
@@ -1,22 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Bootstrap core JavaScript
-================================================== -->
-<!-- Placed at the end of the document so the pages load faster -->
-<script src="{{ "js/jquery-3.4.1.min.js" | relURL}}"></script>
-<script src="{{ "js/ozonedoc.js" | relURL}}"></script>
-<script src="{{ "js/bootstrap.min.js" | relURL}}"></script>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/header.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/header.html
deleted file mode 100644
index a4e24c9..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/header.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="Hadoop Ozone Documentation">
-
-    <title>Documentation for Apache Hadoop Ozone</title>
-
-    <!-- Bootstrap core CSS -->
-    <link href="{{ "css/bootstrap.min.css" | relURL}}" rel="stylesheet">
-
-    <!-- Custom styles for this template -->
-    <link href="{{ "css/ozonedoc.css" | relURL}}" rel="stylesheet">
-
-  </head>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
deleted file mode 100644
index 0f26571..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
+++ /dev/null
@@ -1,42 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<nav class="navbar navbar-inverse navbar-fixed-top">
-  <div class="container-fluid">
-    <div class="navbar-header">
-      <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#sidebar" aria-expanded="false" aria-controls="navbar">
-        <span class="sr-only">Toggle navigation</span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-      </button>
-      <a href="#" class="navbar-left" style="height: 50px; padding: 5px 5px 5px 0;">
-        <img src="{{ "ozone-logo-small.png" | relURL }}" width="40"/>
-      </a>
-      <a class="navbar-brand hidden-xs" href="#">
-        Apache Hadoop Ozone/HDDS documentation
-      </a>
-      <a class="navbar-brand visible-xs-inline" href="#">Hadoop Ozone</a>
-    </div>
-    <div id="navbar" class="navbar-collapse collapse">
-      <ul class="nav navbar-nav navbar-right">
-        <li><a href="https://github.com/apache/hadoop">Source</a></li>
-        <li><a href="https://hadoop.apache.org">Apache Hadoop</a></li>
-        <li><a href="https://apache.org">ASF</a></li>
-      </ul>
-    </div>
-  </div>
-</nav>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/sidebar.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/sidebar.html
deleted file mode 100644
index e065f15..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/sidebar.html
+++ /dev/null
@@ -1,71 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<div class="col-sm-2 col-md-2 sidebar" id="sidebar">
-  <ul class="nav nav-sidebar">
-    {{ $currentPage := . }}
-    {{ range .Site.Menus.main }}
-        {{ if .HasChildren }}
-            <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
-                <a href="{{ .URL | relURL}}">
-                    {{ .Pre }}
-                    <span>{{ .Name }}</span>
-                </a>
-                <ul class="nav">
-                    {{ range .Children }}
-                        <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
-                           {{ if .HasChildren }}
-                               <a href="{{ .URL | relURL}}">
-                                 {{ .Pre }}
-                                 <span>{{ .Name }}</span>
-                               </a>
-                               <ul class="nav">
-                               {{ range .Children }}
-                                  <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
-                                     <a href="{{ .URL | relURL}}">{{ .Name }}</a>
-                                  </li>
-                               {{ end }}
-                               </ul>
-                           {{ else }}
-                           <a href="{{ .URL }}">{{ .Name }}</a>
-                           {{ end }}
-                        </li>
-                    {{ end }}
-                </ul>
-            </li>
-        {{ else }}
-            <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
-                {{ if eq .URL "/" }}
-                   <a href="{{ "index.html" | relURL }}">
-                {{ else }}
-                   <a href="{{ .URL | relURL }}">
-                {{ end }}
-
-                    {{ .Pre }}
-                    <span>{{ .Name }}</span>
-                </a>
-            </li>
-        {{ end }}
-    {{ end }}
-    <li class="visible-xs"><a href="#">References</a>
-    <ul class="nav">
-        <li><a href="https://github.com/apache/hadoop"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> Source</a></li>
-        <li><a href="https://hadoop.apache.org"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> Apache Hadoop</a></li>
-        <li><a href="https://apache.org"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> ASF</a></li>
-    </ul></li>
-  </ul>
-
-</div>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/buttonlink.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/buttonlink.html
deleted file mode 100644
index 9b88cb2..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/buttonlink.html
+++ /dev/null
@@ -1,20 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<a class="btn btn-primary">
-  {{ .Get "ref" }}
-  {{ .Inner }}
-</a>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/card.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/card.html
deleted file mode 100644
index 16d48a3..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/card.html
+++ /dev/null
@@ -1,40 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-<div class="col-sm-6">
-
-    <div class="media">
-        {{with .Get "image"}}
-        <div class="media-left media-top">
-            <img src="{{.}}"></img>
-        </div>
-        {{end}}
-        <div class="media-body">
-            <h4 class="media-title">
-                {{ if .Get "icon" }}
-                   <span class="glyphicon glyphicon-{{ .Get "icon"}}"></span>
-                {{end}}
-                {{ .Get "title" }}
-            </h4>
-            {{  .Inner }}
-            {{ if .Get "link" }}
-            <p><a href="{{ .Get "link" | ref .}}" class=" btn btn-primary btn-lg">{{.Get "link-text" }}</a></p>
-            {{end}}
-        </div>
-    </div>
-</div>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/jumbotron.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/jumbotron.html
deleted file mode 100644
index d7f1626..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/jumbotron.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<div class="jumbotron jumbotron-fluid">
-    <div class="container">
-        <h3 class="display-4">{{ .Get "title"}} </h3>
-        <p class="lead">
-            {{ .Inner }}
-        </p>
-    </div>
-</div>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/requirements.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/requirements.html
deleted file mode 100644
index a89cd6f..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/requirements.html
+++ /dev/null
@@ -1,22 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<div class="panel panel-default">
-    <div class="panel-heading">Requirements</div>
-    <div class="panel-body">
-        {{ .Inner | markdownify}}
-    </div>
-</div>
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css b/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
deleted file mode 100644
index 2a69f48..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x;background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x;background-color:#2e6da4}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
-/*# sourceMappingURL=bootstrap-theme.min.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map b/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map
deleted file mode 100644
index 5d75106..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap-theme.css","dist/css/bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAAA;;;;ACUA,YCWA,aDbA,UAFA,aACA,aAEA,aCkBE,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBF7CV,mBANA,mBACA,oBCWE,oBDRF,iBANA,iBAIA,oBANA,oBAOA,oBANA,oBAQA,oBANA,oBEmDE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBFpCV,qBAMA,sBCJE,sBDDF,uBAHA,mBAMA,oBARA,sBAMA,uBALA,sBAMA,uBAJA,sBAMA,uBAOA,+BALA,gCAGA,6BAFA,gCACA,gCAEA,gCEwBE,mBAAA,KACQ,WAAA,KFfV,mBCnCA,oBDiCA,iBAFA,oBACA,oBAEA,oBCXI,YAAA,KDgBJ,YCyBE,YAEE,iBAAA,KAKJ,aEvEI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QAyCA,YAAA,EAAA,IAAA,EAAA,KACA,aAAA,KDnBF,mBCrBE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDuBJ,oBCpBE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBD8BJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCdM,iBAAA,QACA,iBAAA,KAoBN,aE5EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDgEF,mBC9DE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDgEJ,oBC7DE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDuEJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCvDM,iBAAA,QACA,iBAAA,KAqBN,aE7EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDyGF,mBCvGE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDyGJ,oBCtGE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDgHJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCChGM,iBAAA,QACA,iBAAA,KAsBN,UE9EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDkJF,gBChJE,gBAEE,iBAAA,QACA,oBAAA,EAAA,MDkJJ,iBC/IE,iBAEE,iBAAA,QACA,aAAA,QAMA,mBDyJJ,0BANA,yBAGA,0BANA,yBAHA,yBAFA,oBAeA,2BANA,0BAGA,2BANA,0BAHA,0BAFA,6BAeA,oCANA,mCAGA,oCANA,mCAHA,mCCzIM,iBAAA,QACA,iBAAA,KAuBN,aE/EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QD2LF,mBCzLE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MD2LJ,oBCxLE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDkMJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCClLM,iBAAA,QACA,iBAAA,KAwBN,YEhFI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDoOF,kBClOE,kBAEE,iBAAA,QACA,oBAAA,EAAA,MDoOJ,mBCjOE,mBAEE,iBAAA,QACA,aAAA,QAMA,qBD2OJ,4BANA,2BAGA,4BANA,2BAHA,2BAFA,sBAeA,6BANA,4BAGA,6BANA,4BAHA,4BAFA,+BAeA,sCANA,qCAGA,sCANA,qCAHA,qCC3NM,iBAAA,QACA,iBAAA,KD2ON,eC5MA,WCtCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBFsPV,0BCvMA,0BEjGI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgGF,iBAAA,QAEF,yBD6MA,+BADA,+BGlTI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFsGF,iBAAA,QASF,gBEnHI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHqIA,cAAA,ICrEA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBFuRV,sCCtNA,oCEnHI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD8EV,cDoNA,iBClNE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEtII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHwJA,cAAA,IDyNF,sCC5NA,oCEtII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDoFV,8BDuOA,iCC3NI,YAAA,EAAA,KAAA,EAAA,gBDgOJ,qBADA,kBC1NA,mBAGE,cAAA,EAIF,yBAEI,mDDwNF,yDADA,yDCpNI,MAAA,KEnKF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UF2KJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC/HA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBD0IV,eE5LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAKF,YE7LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAMF,eE9LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAOF,cE/LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAeF,UEvMI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF6MJ,cEjNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8MJ,sBElNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+MJ,mBEnNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgNJ,sBEpNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiNJ,qBErNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFqNJ,sBExLI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKF+LJ,YACE,cAAA,IClLA,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBDoLV,wBDiQA,8BADA,8BC7PE,YAAA,EAAA,KAAA,EAAA,QEzOE,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFuOF,aAAA,QALF,+BD6QA,qCADA,qCCpQI,YAAA,KAUJ,OCvME,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gBDgNV,8BElQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+PJ,8BEnQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgQJ,8BEpQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiQJ,2BErQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFkQJ,8BEtQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFmQJ,6BEvQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0QJ,ME9QI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF4QF,aAAA,QC/NA,mBAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #2e6da4;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0));\n  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641));\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2));\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316));\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a));\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #2e6da4;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#f8f8f8));\n  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2));\n  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222));\n  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f));\n  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc));\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0));\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0));\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3));\n  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5));\n  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44));\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5));\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f));\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c));\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6));\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3));\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc));\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc));\n  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5));\n  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","// stylelint-disable selector-no-qualifying-type, selector-max-compound-selectors\n\n/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    .box-shadow(none);\n  }\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    background-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &.focus,\n    &:active,\n    &.active {\n      background-color: darken(@btn-color, 12%);\n      background-image: none;\n    }\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default {\n  .btn-styles(@btn-default-bg);\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n  border-radius: @navbar-border-radius;\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .25));\n  }\n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a {\n    &,\n    &:hover,\n    &:focus {\n      color: #fff;\n      #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n    }\n  }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n  #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css b/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css
deleted file mode 100644
index 5b96335..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;-moz-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:"Glyphicons Halflings";src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format("embedded-opentype"),url(../fonts/glyphicons-halflings-regular.woff2) format("woff2"),url(../fonts/glyphicons-halflings-regular.woff) format("woff"),url(../fonts/glyphicons-halflings-regular.ttf) format("truetype"),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format("svg")}.glyphicon{position:relative;top:1px;display:inline-block;font-family:"Glyphicons Halflings";font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none;margin-left:-5px}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:"\2014 \00A0"}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:""}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:"\00A0 \2014"}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.row-no-gutters{margin-right:0;margin-left:0}.row-no-gutters [class*=col-]{padding-right:0;padding-left:0}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-appearance:none;-moz-appearance:none;appearance:none}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s,-webkit-box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=datetime-local].form-control,input[type=month].form-control,input[type=time].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],.input-group-sm input[type=time],input[type=date].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm,input[type=time].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],.input-group-lg input[type=time],input[type=date].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg,input[type=time].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;margin-bottom:0;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;background-image:none;border:1px solid transparent;padding:6px 12px;font-size:14px;line-height:1.42857143;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);opacity:.65;-webkit-box-shadow:none;box-shadow:none}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;background-image:none;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;background-image:none;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;background-image:none;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;background-image:none;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;background-image:none;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;background-image:none;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-right:15px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-right:-15px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);margin-top:8px;margin-bottom:8px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0%;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out;transition:transform .3s ease-out,-webkit-transform .3s ease-out,-o-transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5);outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:12px;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:14px;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover>.arrow{border-width:11px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:-webkit-transform .6s ease-in-out;transition:transform .6s ease-in-out;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out,-o-transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);left:0}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);left:0}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);left:0}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;outline:0;filter:alpha(opacity=90);opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:"\2039"}.carousel-control .icon-next:before{content:"\203a"}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}
-/*# sourceMappingURL=bootstrap.min.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css.map b/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
deleted file mode 100644
index 0ae3de5..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap.css","less/normalize.less","dist/css/bootstrap.css","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"AAAA;;;;AAKA,4ECKA,KACE,YAAA,WACA,qBAAA,KACA,yBAAA,KAOF,KACE,OAAA,EAaF,QCnBA,MACA,QACA,WACA,OACA,OACA,OACA,OACA,KACA,KACA,IACA,QACA,QDqBE,QAAA,MAQF,MCzBA,OACA,SACA,MD2BE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SCrCA,SDuCE,QAAA,KAUF,EACE,iBAAA,YAQF,SCnDA,QDqDE,QAAA,EAWF,YACE,cAAA,KACA,gBAAA,UACA,wBAAA,UAAA,OAAA,qBAAA,UAAA,OAAA,gBAAA,UAAA,OAOF,EC/DA,ODiEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,UAAA,IACA,OAAA,MAAA,EAOF,KACE,WAAA,KACA,MAAA,KAOF,MACE,UAAA,IAOF,ICzFA,ID2FE,UAAA,IACA,YAAA,EACA,SAAA,SACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,mBAAA,YAAA,gBAAA,YAAA,WAAA,YACA,OAAA,EAOF,IACE,SAAA,KAOF,KC7HA,IACA,IACA,KD+HE,YAAA,SAAA,CAAA,UACA,UAAA,IAkBF,OC7IA,MACA,SACA,OACA,SD+IE,MAAA,QACA,KAAA,QACA,OAAA,EAOF,OACE,SAAA,QAUF,OC1JA,OD4JE,eAAA,KAWF,OCnKA,wBACA,kBACA,mBDqKE,mBAAA,OACA,OAAA,QAOF,iBCxKA,qBD0KE,OAAA,QAOF,yBC7KA,wBD+KE,OAAA,EACA,QAAA,EAQF,MACE,YAAA,OAWF,qBC5LA,kBD8LE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CCjMA,8CDmME,OAAA,KAQF,mBACE,mBAAA,UACA,mBAAA,YAAA,gBAAA,YAAA,WAAA,YASF,iDC5MA,8CD8ME,mBAAA,KAOF,SACE,OAAA,IAAA,MAAA,OACA,OAAA,EAAA,IACA,QAAA,MAAA,OAAA,MAQF,OACE,OAAA,EACA,QAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,gBAAA,SACA,eAAA,EAGF,GC3OA,GD6OE,QAAA,EDlPF,qFGhLA,aACE,ED2LA,OADA,QCvLE,MAAA,eACA,YAAA,eACA,WAAA,cACA,mBAAA,eAAA,WAAA,eAGF,ED0LA,UCxLE,gBAAA,UAGF,cACE,QAAA,KAAA,WAAA,IAGF,kBACE,QAAA,KAAA,YAAA,IAKF,mBDqLA,6BCnLE,QAAA,GDuLF,WCpLA,IAEE,OAAA,IAAA,MAAA,KACA,kBAAA,MAGF,MACE,QAAA,mBDqLF,IClLA,GAEE,kBAAA,MAGF,IACE,UAAA,eDmLF,GACA,GCjLA,EAGE,QAAA,EACA,OAAA,EAGF,GD+KA,GC7KE,iBAAA,MAMF,QACE,QAAA,KAEF,YD2KA,oBCxKI,iBAAA,eAGJ,OACE,OAAA,IAAA,MAAA,KAGF,OACE,gBAAA,mBADF,UD2KA,UCtKI,iBAAA,eD0KJ,mBCvKA,mBAGI,OAAA,IAAA,MAAA,gBCrFN,WACE,YAAA,uBACA,IAAA,+CACA,IAAA,sDAAA,2BAAA,CAAA,iDAAA,eAAA,CAAA,gDAAA,cAAA,CAAA,+CAAA,kBAAA,CAAA,2EAAA,cAQF,WACE,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EACA,uBAAA,YACA,wBAAA,UAIkC,2BAAW,QAAA,QACX,uBAAW,QAAA,QF2P/C,sBEzPoC,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,+BAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,gCAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,gCAAW,QAAA,QACX,gCAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,0BAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,mCAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,sBAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QACX,4BAAW,QAAA,QACX,qCAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,mCAAW,QAAA,QACX,uCAAW,QAAA,QACX,gCAAW,QAAA,QACX,oCAAW,QAAA,QACX,qCAAW,QAAA,QACX,yCAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,iCAAW,QAAA,QACX,oCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,qBAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QASX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,+BAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,mCAAW,QAAA,QACX,4BAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,kCAAW,QAAA,QACX,mCAAW,QAAA,QACX,sCAAW,QAAA,QACX,0CAAW,QAAA,QACX,oCAAW,QAAA,QACX,wCAAW,QAAA,QACX,qCAAW,QAAA,QACX,iCAAW,QAAA,QACX,gCAAW,QAAA,QACX,kCAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QCxS/C,ECkEE,mBAAA,WACG,gBAAA,WACK,WAAA,WJo+BV,OGriCA,QC+DE,mBAAA,WACG,gBAAA,WACK,WAAA,WDzDV,KACE,UAAA,KACA,4BAAA,cAGF,KACE,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,iBAAA,KHoiCF,OGhiCA,MHiiCA,OACA,SG9hCE,YAAA,QACA,UAAA,QACA,YAAA,QAMF,EACE,MAAA,QACA,gBAAA,KH8hCF,QG5hCE,QAEE,MAAA,QACA,gBAAA,UAGF,QEnDA,QAAA,IAAA,KAAA,yBACA,eAAA,KF6DF,OACE,OAAA,EAMF,IACE,eAAA,OHqhCF,4BADA,0BGhhCA,gBH+gCA,iBADA,eMxlCE,QAAA,MACA,UAAA,KACA,OAAA,KH6EF,aACE,cAAA,IAMF,eACE,QAAA,IACA,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IC+FA,mBAAA,IAAA,IAAA,YACK,cAAA,IAAA,IAAA,YACG,WAAA,IAAA,IAAA,YE5LR,QAAA,aACA,UAAA,KACA,OAAA,KHiGF,YACE,cAAA,IAMF,GACE,WAAA,KACA,cAAA,KACA,OAAA,EACA,WAAA,IAAA,MAAA,KAQF,SACE,SAAA,SACA,MAAA,IACA,OAAA,IACA,QAAA,EACA,OAAA,KACA,SAAA,OACA,KAAA,cACA,OAAA,EAQA,0BH8/BF,yBG5/BI,SAAA,OACA,MAAA,KACA,OAAA,KACA,OAAA,EACA,SAAA,QACA,KAAA,KAWJ,cACE,OAAA,QH4/BF,IACA,IACA,IACA,IACA,IACA,IOtpCA,GP4oCA,GACA,GACA,GACA,GACA,GO9oCE,YAAA,QACA,YAAA,IACA,YAAA,IACA,MAAA,QPyqCF,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UACA,UOxqCA,SPyqCA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SOxpCI,YAAA,IACA,YAAA,EACA,MAAA,KP8qCJ,IAEA,IAEA,IO9qCA,GP2qCA,GAEA,GO1qCE,WAAA,KACA,cAAA,KPqrCF,WANA,UAQA,WANA,UAQA,WANA,UACA,UOxrCA,SP0rCA,UANA,SAQA,UANA,SO9qCI,UAAA,IPyrCJ,IAEA,IAEA,IO1rCA,GPurCA,GAEA,GOtrCE,WAAA,KACA,cAAA,KPisCF,WANA,UAQA,WANA,UAQA,WANA,UACA,UOpsCA,SPssCA,UANA,SAQA,UANA,SO1rCI,UAAA,IPqsCJ,IOjsCA,GAAU,UAAA,KPqsCV,IOpsCA,GAAU,UAAA,KPwsCV,IOvsCA,GAAU,UAAA,KP2sCV,IO1sCA,GAAU,UAAA,KP8sCV,IO7sCA,GAAU,UAAA,KPitCV,IOhtCA,GAAU,UAAA,KAMV,EACE,OAAA,EAAA,EAAA,KAGF,MACE,cAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,IAEA,yBAAA,MACE,UAAA,MPitCJ,OOxsCA,MAEE,UAAA,IP0sCF,MOvsCA,KAEE,QAAA,KACA,iBAAA,QAIF,WAAuB,WAAA,KACvB,YAAuB,WAAA,MACvB,aAAuB,WAAA,OACvB,cAAuB,WAAA,QACvB,aAAuB,YAAA,OAGvB,gBAAuB,eAAA,UACvB,gBAAuB,eAAA,UACvB,iBAAuB,eAAA,WAGvB,YACE,MAAA,KAEF,cCvGE,MAAA,QR2zCF,qBQ1zCE,qBAEE,MAAA,QDuGJ,cC1GE,MAAA,QRk0CF,qBQj0CE,qBAEE,MAAA,QD0GJ,WC7GE,MAAA,QRy0CF,kBQx0CE,kBAEE,MAAA,QD6GJ,cChHE,MAAA,QRg1CF,qBQ/0CE,qBAEE,MAAA,QDgHJ,aCnHE,MAAA,QRu1CF,oBQt1CE,oBAEE,MAAA,QDuHJ,YAGE,MAAA,KE7HA,iBAAA,QT+1CF,mBS91CE,mBAEE,iBAAA,QF6HJ,YEhIE,iBAAA,QTs2CF,mBSr2CE,mBAEE,iBAAA,QFgIJ,SEnIE,iBAAA,QT62CF,gBS52CE,gBAEE,iBAAA,QFmIJ,YEtIE,iBAAA,QTo3CF,mBSn3CE,mBAEE,iBAAA,QFsIJ,WEzIE,iBAAA,QT23CF,kBS13CE,kBAEE,iBAAA,QF8IJ,aACE,eAAA,IACA,OAAA,KAAA,EAAA,KACA,cAAA,IAAA,MAAA,KPgvCF,GOxuCA,GAEE,WAAA,EACA,cAAA,KP4uCF,MAFA,MACA,MO9uCA,MAMI,cAAA,EAOJ,eACE,aAAA,EACA,WAAA,KAIF,aALE,aAAA,EACA,WAAA,KAMA,YAAA,KAFF,gBAKI,QAAA,aACA,cAAA,IACA,aAAA,IAKJ,GACE,WAAA,EACA,cAAA,KPouCF,GOluCA,GAEE,YAAA,WAEF,GACE,YAAA,IAEF,GACE,YAAA,EAaA,yBAAA,kBAEI,MAAA,KACA,MAAA,MACA,MAAA,KACA,WAAA,MGxNJ,SAAA,OACA,cAAA,SACA,YAAA,OHiNA,kBASI,YAAA,OP4tCN,0BOjtCA,YAEE,OAAA,KAGF,YACE,UAAA,IA9IqB,eAAA,UAmJvB,WACE,QAAA,KAAA,KACA,OAAA,EAAA,EAAA,KACA,UAAA,OACA,YAAA,IAAA,MAAA,KPitCF,yBO5sCI,wBP2sCJ,yBO1sCM,cAAA,EPgtCN,kBO1tCA,kBPytCA,iBOtsCI,QAAA,MACA,UAAA,IACA,YAAA,WACA,MAAA,KP4sCJ,yBO1sCI,yBPysCJ,wBOxsCM,QAAA,cAQN,oBPqsCA,sBOnsCE,cAAA,KACA,aAAA,EACA,WAAA,MACA,aAAA,IAAA,MAAA,KACA,YAAA,EP0sCF,kCOpsCI,kCPksCJ,iCAGA,oCAJA,oCAEA,mCOnsCe,QAAA,GP4sCf,iCO3sCI,iCPysCJ,gCAGA,mCAJA,mCAEA,kCOzsCM,QAAA,cAMN,QACE,cAAA,KACA,WAAA,OACA,YAAA,WIxSF,KXm/CA,IACA,IACA,KWj/CE,YAAA,KAAA,CAAA,MAAA,CAAA,QAAA,CAAA,aAAA,CAAA,UAIF,KACE,QAAA,IAAA,IACA,UAAA,IACA,MAAA,QACA,iBAAA,QACA,cAAA,IAIF,IACE,QAAA,IAAA,IACA,UAAA,IACA,MAAA,KACA,iBAAA,KACA,cAAA,IACA,mBAAA,MAAA,EAAA,KAAA,EAAA,gBAAA,WAAA,MAAA,EAAA,KAAA,EAAA,gBANF,QASI,QAAA,EACA,UAAA,KACA,YAAA,IACA,mBAAA,KAAA,WAAA,KAKJ,IACE,QAAA,MACA,QAAA,MACA,OAAA,EAAA,EAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,WAAA,UACA,UAAA,WACA,iBAAA,QACA,OAAA,IAAA,MAAA,KACA,cAAA,IAXF,SAeI,QAAA,EACA,UAAA,QACA,MAAA,QACA,YAAA,SACA,iBAAA,YACA,cAAA,EAKJ,gBACE,WAAA,MACA,WAAA,OC1DF,WCHE,cAAA,KACA,aAAA,KACA,aAAA,KACA,YAAA,KDGA,yBAAA,WACE,MAAA,OAEF,yBAAA,WACE,MAAA,OAEF,0BAAA,WACE,MAAA,QAUJ,iBCvBE,cAAA,KACA,aAAA,KACA,aAAA,KACA,YAAA,KD6BF,KCvBE,aAAA,MACA,YAAA,MD0BF,gBACE,aAAA,EACA,YAAA,EAFF,8BAKI,cAAA,EACA,aAAA,EZwiDJ,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAjCA,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAjCA,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UatnDC,UbynDD,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UcpmDM,SAAA,SAEA,WAAA,IAEA,cAAA,KACA,aAAA,KDtBL,UbmpDD,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,Uc3mDM,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,EFCJ,yBCzEC,Ub2zDC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,UcnxDI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GFUJ,yBClFC,Ubo+DC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,Uc57DI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GFmBJ,0BC3FC,Ub6oEC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,UcrmEI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GCjEJ,MACE,iBAAA,YADF,uBAQI,SAAA,OACA,QAAA,aACA,MAAA,KAKA,sBf+xEJ,sBe9xEM,SAAA,OACA,QAAA,WACA,MAAA,KAKN,QACE,YAAA,IACA,eAAA,IACA,MAAA,KACA,WAAA,KAGF,GACE,WAAA,KAMF,OACE,MAAA,KACA,UAAA,KACA,cAAA,Kf6xEF,mBAHA,mBAIA,mBAHA,mBACA,mBe/xEA,mBAWQ,QAAA,IACA,YAAA,WACA,eAAA,IACA,WAAA,IAAA,MAAA,KAdR,mBAoBI,eAAA,OACA,cAAA,IAAA,MAAA,KfyxEJ,uCe9yEA,uCf+yEA,wCAHA,wCAIA,2CAHA,2Ce/wEQ,WAAA,EA9BR,mBAoCI,WAAA,IAAA,MAAA,KApCJ,cAyCI,iBAAA,KfoxEJ,6BAHA,6BAIA,6BAHA,6BACA,6Be5wEA,6BAOQ,QAAA,IAWR,gBACE,OAAA,IAAA,MAAA,KfqwEF,4BAHA,4BAIA,4BAHA,4BACA,4BerwEA,4BAQQ,OAAA,IAAA,MAAA,KfmwER,4Be3wEA,4BAeM,oBAAA,IAUN,yCAEI,iBAAA,QASJ,4BAEI,iBAAA,QfqvEJ,0BAGA,0BATA,0BAGA,0BAIA,0BAGA,0BATA,0BAGA,0BACA,0BAGA,0BgBt4EE,0BhBg4EF,0BgBz3EM,iBAAA,QhBs4EN,sCAEA,sCADA,oCgBj4EE,sChB+3EF,sCgBz3EM,iBAAA,QhBs4EN,2BAGA,2BATA,2BAGA,2BAIA,2BAGA,2BATA,2BAGA,2BACA,2BAGA,2BgB35EE,2BhBq5EF,2BgB94EM,iBAAA,QhB25EN,uCAEA,uCADA,qCgBt5EE,uChBo5EF,uCgB94EM,iBAAA,QhB25EN,wBAGA,wBATA,wBAGA,wBAIA,wBAGA,wBATA,wBAGA,wBACA,wBAGA,wBgBh7EE,wBhB06EF,wBgBn6EM,iBAAA,QhBg7EN,oCAEA,oCADA,kCgB36EE,oChBy6EF,oCgBn6EM,iBAAA,QhBg7EN,2BAGA,2BATA,2BAGA,2BAIA,2BAGA,2BATA,2BAGA,2BACA,2BAGA,2BgBr8EE,2BhB+7EF,2BgBx7EM,iBAAA,QhBq8EN,uCAEA,uCADA,qCgBh8EE,uChB87EF,uCgBx7EM,iBAAA,QhBq8EN,0BAGA,0BATA,0BAGA,0BAIA,0BAGA,0BATA,0BAGA,0BACA,0BAGA,0BgB19EE,0BhBo9EF,0BgB78EM,iBAAA,QhB09EN,sCAEA,sCADA,oCgBr9EE,sChBm9EF,sCgB78EM,iBAAA,QDoJN,kBACE,WAAA,KACA,WAAA,KAEA,oCAAA,kBACE,MAAA,KACA,cAAA,KACA,WAAA,OACA,mBAAA,yBACA,OAAA,IAAA,MAAA,KALF,yBASI,cAAA,Efq0EJ,qCAHA,qCAIA,qCAHA,qCACA,qCe70EA,qCAkBU,YAAA,OAlBV,kCA0BI,OAAA,Ef+zEJ,0DAHA,0DAIA,0DAHA,0DACA,0Dex1EA,0DAmCU,YAAA,Ef8zEV,yDAHA,yDAIA,yDAHA,yDACA,yDeh2EA,yDAuCU,aAAA,Efg0EV,yDev2EA,yDfw2EA,yDAFA,yDelzEU,cAAA,GEzNZ,SAIE,UAAA,EACA,QAAA,EACA,OAAA,EACA,OAAA,EAGF,OACE,QAAA,MACA,MAAA,KACA,QAAA,EACA,cAAA,KACA,UAAA,KACA,YAAA,QACA,MAAA,KACA,OAAA,EACA,cAAA,IAAA,MAAA,QAGF,MACE,QAAA,aACA,UAAA,KACA,cAAA,IACA,YAAA,IAUF,mBb6BE,mBAAA,WACG,gBAAA,WACK,WAAA,WarBR,mBAAA,KACA,gBAAA,KAAA,WAAA,KjBkgFF,qBiB9/EA,kBAEE,OAAA,IAAA,EAAA,EACA,WAAA,MACA,YAAA,OjBogFF,wCADA,qCADA,8BAFA,+BACA,2BiB3/EE,4BAGE,OAAA,YAIJ,iBACE,QAAA,MAIF,kBACE,QAAA,MACA,MAAA,KAIF,iBjBu/EA,aiBr/EE,OAAA,KjB0/EF,2BiBt/EA,uBjBq/EA,wBK/kFE,QAAA,IAAA,KAAA,yBACA,eAAA,KYgGF,OACE,QAAA,MACA,YAAA,IACA,UAAA,KACA,YAAA,WACA,MAAA,KA0BF,cACE,QAAA,MACA,MAAA,KACA,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,iBAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,Ib3EA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBAyHR,mBAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KACK,cAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KACG,mBAAA,aAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,Kc1IR,oBACE,aAAA,QACA,QAAA,EdYF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,qBAiCR,gCACE,MAAA,KACA,QAAA,EAEF,oCAA0B,MAAA,KAC1B,yCAAgC,MAAA,Ka+ChC,0BACE,iBAAA,YACA,OAAA,EAQF,wBjBq+EF,wBACA,iCiBn+EI,iBAAA,KACA,QAAA,EAGF,wBjBo+EF,iCiBl+EI,OAAA,YAIF,sBACE,OAAA,KAcJ,qDAKI,8BjBm9EF,wCACA,+BAFA,8BiBj9EI,YAAA,KjB09EJ,iCAEA,2CACA,kCAFA,iCiBx9EE,0BjBq9EF,oCACA,2BAFA,0BiBl9EI,YAAA,KjB+9EJ,iCAEA,2CACA,kCAFA,iCiB79EE,0BjB09EF,oCACA,2BAFA,0BiBv9EI,YAAA,MAWN,YACE,cAAA,KjBy9EF,UiBj9EA,OAEE,SAAA,SACA,QAAA,MACA,WAAA,KACA,cAAA,KjBm9EF,yBiBh9EE,sBjBk9EF,mCADA,gCiB98EM,OAAA,YjBm9EN,gBiB99EA,aAgBI,WAAA,KACA,aAAA,KACA,cAAA,EACA,YAAA,IACA,OAAA,QjBm9EJ,+BACA,sCiBj9EA,yBjB+8EA,gCiB38EE,SAAA,SACA,WAAA,MACA,YAAA,MjBi9EF,oBiB98EA,cAEE,WAAA,KjBg9EF,iBiB58EA,cAEE,SAAA,SACA,QAAA,aACA,aAAA,KACA,cAAA,EACA,YAAA,IACA,eAAA,OACA,OAAA,QjB88EF,0BiB38EE,uBjB68EF,oCADA,iCiB18EI,OAAA,YjB+8EJ,kCiB58EA,4BAEE,WAAA,EACA,YAAA,KASF,qBACE,WAAA,KAEA,YAAA,IACA,eAAA,IAEA,cAAA,EAEA,8BjBm8EF,8BiBj8EI,cAAA,EACA,aAAA,EAaJ,UC3PE,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IAEA,gBACE,OAAA,KACA,YAAA,KlBsrFJ,0BkBnrFE,kBAEE,OAAA,KDiPJ,6BAEI,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IANJ,mCASI,OAAA,KACA,YAAA,KjBq8EJ,6CiB/8EA,qCAcI,OAAA,KAdJ,oCAiBI,OAAA,KACA,WAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IAIJ,UCvRE,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IAEA,gBACE,OAAA,KACA,YAAA,KlB2tFJ,0BkBxtFE,kBAEE,OAAA,KD6QJ,6BAEI,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IANJ,mCASI,OAAA,KACA,YAAA,KjB88EJ,6CiBx9EA,qCAcI,OAAA,KAdJ,oCAiBI,OAAA,KACA,WAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UASJ,cAEE,SAAA,SAFF,4BAMI,cAAA,OAIJ,uBACE,SAAA,SACA,IAAA,EACA,MAAA,EACA,QAAA,EACA,QAAA,MACA,MAAA,KACA,OAAA,KACA,YAAA,KACA,WAAA,OACA,eAAA,KjBo8EF,oDADA,uCiBj8EA,iCAGE,MAAA,KACA,OAAA,KACA,YAAA,KjBo8EF,oDADA,uCiBj8EA,iCAGE,MAAA,KACA,OAAA,KACA,YAAA,KjBq8EF,uBAEA,8BAJA,4BiB/7EA,yBjBg8EA,oBAEA,2BAGA,4BAEA,mCAHA,yBAEA,gCkBx1FI,MAAA,QDkZJ,2BC9YI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,iCACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,Qa4VV,gCCpYI,MAAA,QACA,iBAAA,QACA,aAAA,QDkYJ,oCC9XI,MAAA,QlB61FJ,uBAEA,8BAJA,4BiB19EA,yBjB29EA,oBAEA,2BAGA,4BAEA,mCAHA,yBAEA,gCkBt3FI,MAAA,QDqZJ,2BCjZI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,iCACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,Qa+VV,gCCvYI,MAAA,QACA,iBAAA,QACA,aAAA,QDqYJ,oCCjYI,MAAA,QlB23FJ,qBAEA,4BAJA,0BiBr/EA,uBjBs/EA,kBAEA,yBAGA,0BAEA,iCAHA,uBAEA,8BkBp5FI,MAAA,QDwZJ,yBCpZI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,+BACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QakWV,8BC1YI,MAAA,QACA,iBAAA,QACA,aAAA,QDwYJ,kCCpYI,MAAA,QD2YF,2CACE,IAAA,KAEF,mDACE,IAAA,EAUJ,YACE,QAAA,MACA,WAAA,IACA,cAAA,KACA,MAAA,QAkBA,yBAAA,yBAGI,QAAA,aACA,cAAA,EACA,eAAA,OALJ,2BAUI,QAAA,aACA,MAAA,KACA,eAAA,OAZJ,kCAiBI,QAAA,aAjBJ,0BAqBI,QAAA,aACA,eAAA,OjBi/EJ,wCiBvgFA,6CjBsgFA,2CiB3+EM,MAAA,KA3BN,wCAiCI,MAAA,KAjCJ,4BAqCI,cAAA,EACA,eAAA,OjB4+EJ,uBiBlhFA,oBA6CI,QAAA,aACA,WAAA,EACA,cAAA,EACA,eAAA,OjBy+EJ,6BiBzhFA,0BAmDM,aAAA,EjB0+EN,4CiB7hFA,sCAwDI,SAAA,SACA,YAAA,EAzDJ,kDA8DI,IAAA,GjBw+EN,2BAEA,kCiB/9EA,wBjB89EA,+BiBr9EI,YAAA,IACA,WAAA,EACA,cAAA,EjB09EJ,2BiBr+EA,wBAiBI,WAAA,KAjBJ,6BJ9gBE,aAAA,MACA,YAAA,MIwiBA,yBAAA,gCAEI,YAAA,IACA,cAAA,EACA,WAAA,OA/BN,sDAwCI,MAAA,KAQA,yBAAA,+CAEI,YAAA,KACA,UAAA,MAKJ,yBAAA,+CAEI,YAAA,IACA,UAAA,ME9kBR,KACE,QAAA,aACA,cAAA,EACA,YAAA,IACA,WAAA,OACA,YAAA,OACA,eAAA,OACA,iBAAA,aAAA,aAAA,aACA,OAAA,QACA,iBAAA,KACA,OAAA,IAAA,MAAA,YCoCA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,cAAA,IhBqKA,oBAAA,KACG,iBAAA,KACC,gBAAA,KACI,YAAA,KJs1FV,kBAHA,kBACA,WACA,kBAHA,kBmB1hGI,WdrBF,QAAA,IAAA,KAAA,yBACA,eAAA,KLwjGF,WADA,WmB7hGE,WAGE,MAAA,KACA,gBAAA,KnB+hGJ,YmB5hGE,YAEE,iBAAA,KACA,QAAA,Ef2BF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBexBR,cnB4hGF,eACA,wBmB1hGI,OAAA,YE9CF,OAAA,kBACA,QAAA,IjBiEA,mBAAA,KACQ,WAAA,KefN,enB4hGJ,yBmB1hGM,eAAA,KASN,aC7DE,MAAA,KACA,iBAAA,KACA,aAAA,KpBqlGF,mBoBnlGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpBqlGJ,oBoBnlGE,oBpBolGF,mCoBjlGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB2lGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoBrlGI,0BpB0lGJ,yCAHA,yCAHA,yCoBjlGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBgmGN,4BAHA,4BoBvlGI,4BpB2lGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBnlGM,iBAAA,KACA,aAAA,KDuBN,oBClBI,MAAA,KACA,iBAAA,KDoBJ,aChEE,MAAA,KACA,iBAAA,QACA,aAAA,QpB0oGF,mBoBxoGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB0oGJ,oBoBxoGE,oBpByoGF,mCoBtoGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBgpGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoB1oGI,0BpB+oGJ,yCAHA,yCAHA,yCoBtoGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBqpGN,4BAHA,4BoB5oGI,4BpBgpGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBxoGM,iBAAA,QACA,aAAA,QD0BN,oBCrBI,MAAA,QACA,iBAAA,KDwBJ,aCpEE,MAAA,KACA,iBAAA,QACA,aAAA,QpB+rGF,mBoB7rGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB+rGJ,oBoB7rGE,oBpB8rGF,mCoB3rGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBqsGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoB/rGI,0BpBosGJ,yCAHA,yCAHA,yCoB3rGM,MAAA,KACA,iBAAA,QACA,aAAA,QpB0sGN,4BAHA,4BoBjsGI,4BpBqsGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoB7rGM,iBAAA,QACA,aAAA,QD8BN,oBCzBI,MAAA,QACA,iBAAA,KD4BJ,UCxEE,MAAA,KACA,iBAAA,QACA,aAAA,QpBovGF,gBoBlvGE,gBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,gBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpBovGJ,iBoBlvGE,iBpBmvGF,gCoBhvGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB0vGJ,uBAHA,uBAHA,uBAKA,uBAHA,uBoBpvGI,uBpByvGJ,sCAHA,sCAHA,sCoBhvGM,MAAA,KACA,iBAAA,QACA,aAAA,QpB+vGN,yBAHA,yBoBtvGI,yBpB0vGJ,0BAHA,0BAHA,0BAOA,mCAHA,mCAHA,mCoBlvGM,iBAAA,QACA,aAAA,QDkCN,iBC7BI,MAAA,QACA,iBAAA,KDgCJ,aC5EE,MAAA,KACA,iBAAA,QACA,aAAA,QpByyGF,mBoBvyGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpByyGJ,oBoBvyGE,oBpBwyGF,mCoBryGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB+yGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoBzyGI,0BpB8yGJ,yCAHA,yCAHA,yCoBryGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBozGN,4BAHA,4BoB3yGI,4BpB+yGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBvyGM,iBAAA,QACA,aAAA,QDsCN,oBCjCI,MAAA,QACA,iBAAA,KDoCJ,YChFE,MAAA,KACA,iBAAA,QACA,aAAA,QpB81GF,kBoB51GE,kBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,kBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB81GJ,mBoB51GE,mBpB61GF,kCoB11GI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBo2GJ,yBAHA,yBAHA,yBAKA,yBAHA,yBoB91GI,yBpBm2GJ,wCAHA,wCAHA,wCoB11GM,MAAA,KACA,iBAAA,QACA,aAAA,QpBy2GN,2BAHA,2BoBh2GI,2BpBo2GJ,4BAHA,4BAHA,4BAOA,qCAHA,qCAHA,qCoB51GM,iBAAA,QACA,aAAA,QD0CN,mBCrCI,MAAA,QACA,iBAAA,KD6CJ,UACE,YAAA,IACA,MAAA,QACA,cAAA,EAEA,UnBwzGF,iBADA,iBAEA,oBACA,6BmBrzGI,iBAAA,YfnCF,mBAAA,KACQ,WAAA,KeqCR,UnB0zGF,iBADA,gBADA,gBmBpzGI,aAAA,YnB0zGJ,gBmBxzGE,gBAEE,MAAA,QACA,gBAAA,UACA,iBAAA,YnB2zGJ,0BmBvzGI,0BnBwzGJ,mCAFA,mCmBpzGM,MAAA,KACA,gBAAA,KnB0zGN,mBmBjzGA,QC9EE,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IpBm4GF,mBmBpzGA,QClFE,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IpB04GF,mBmBvzGA,QCtFE,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,cAAA,ID2FF,WACE,QAAA,MACA,MAAA,KAIF,sBACE,WAAA,InBuzGF,6BADA,4BmB/yGE,6BACE,MAAA,KG1JJ,MACE,QAAA,ElBoLA,mBAAA,QAAA,KAAA,OACK,cAAA,QAAA,KAAA,OACG,WAAA,QAAA,KAAA,OkBnLR,SACE,QAAA,EAIJ,UACE,QAAA,KAEA,aAAY,QAAA,MACZ,eAAY,QAAA,UACZ,kBAAY,QAAA,gBAGd,YACE,SAAA,SACA,OAAA,EACA,SAAA,OlBsKA,4BAAA,MAAA,CAAA,WACQ,uBAAA,MAAA,CAAA,WAAA,oBAAA,MAAA,CAAA,WAOR,4BAAA,KACQ,uBAAA,KAAA,oBAAA,KAGR,mCAAA,KACQ,8BAAA,KAAA,2BAAA,KmB5MV,OACE,QAAA,aACA,MAAA,EACA,OAAA,EACA,YAAA,IACA,eAAA,OACA,WAAA,IAAA,OACA,WAAA,IAAA,QACA,aAAA,IAAA,MAAA,YACA,YAAA,IAAA,MAAA,YvBu/GF,UuBn/GA,QAEE,SAAA,SAIF,uBACE,QAAA,EAIF,eACE,SAAA,SACA,IAAA,KACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,MAAA,KACA,UAAA,MACA,QAAA,IAAA,EACA,OAAA,IAAA,EAAA,EACA,UAAA,KACA,WAAA,KACA,WAAA,KACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,gBACA,cAAA,InBuBA,mBAAA,EAAA,IAAA,KAAA,iBACQ,WAAA,EAAA,IAAA,KAAA,iBmBlBR,0BACE,MAAA,EACA,KAAA,KAzBJ,wBCzBE,OAAA,IACA,OAAA,IAAA,EACA,SAAA,OACA,iBAAA,QDsBF,oBAmCI,QAAA,MACA,QAAA,IAAA,KACA,MAAA,KACA,YAAA,IACA,YAAA,WACA,MAAA,KACA,YAAA,OvB8+GJ,0BuB5+GI,0BAEE,MAAA,QACA,gBAAA,KACA,iBAAA,QAOJ,yBvBw+GF,+BADA,+BuBp+GI,MAAA,KACA,gBAAA,KACA,iBAAA,QACA,QAAA,EASF,2BvBi+GF,iCADA,iCuB79GI,MAAA,KvBk+GJ,iCuB99GE,iCAEE,gBAAA,KACA,OAAA,YACA,iBAAA,YACA,iBAAA,KEzGF,OAAA,0DF+GF,qBAGI,QAAA,MAHJ,QAQI,QAAA,EAQJ,qBACE,MAAA,EACA,KAAA,KAQF,oBACE,MAAA,KACA,KAAA,EAIF,iBACE,QAAA,MACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,YAAA,OAIF,mBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,IAIF,2BACE,MAAA,EACA,KAAA,KAQF,evB+7GA,sCuB37GI,QAAA,GACA,WAAA,EACA,cAAA,IAAA,OACA,cAAA,IAAA,QAPJ,uBvBs8GA,8CuB37GI,IAAA,KACA,OAAA,KACA,cAAA,IASJ,yBACE,6BApEA,MAAA,EACA,KAAA,KAmEA,kCA1DA,MAAA,KACA,KAAA,GG1IF,W1BkoHA,oB0BhoHE,SAAA,SACA,QAAA,aACA,eAAA,O1BooHF,yB0BxoHA,gBAMI,SAAA,SACA,MAAA,K1B4oHJ,gCAFA,gCAFA,+BAFA,+BAKA,uBAFA,uBAFA,sB0BroHI,sBAIE,QAAA,EAMN,qB1BooHA,2BACA,2BACA,iC0BjoHI,YAAA,KAKJ,aACE,YAAA,KADF,kB1BmoHA,wBACA,0B0B7nHI,MAAA,KAPJ,kB1BwoHA,wBACA,0B0B7nHI,YAAA,IAIJ,yEACE,cAAA,EAIF,4BACE,YAAA,EACA,mECpDA,wBAAA,EACA,2BAAA,EDwDF,6C1B2nHA,8C2B5qHE,uBAAA,EACA,0BAAA,EDsDF,sBACE,MAAA,KAEF,8DACE,cAAA,EAEF,mE1B0nHA,oE2B/rHE,wBAAA,EACA,2BAAA,ED0EF,oECnEE,uBAAA,EACA,0BAAA,EDuEF,mC1BwnHA,iC0BtnHE,QAAA,EAiBF,iCACE,cAAA,IACA,aAAA,IAEF,oCACE,cAAA,KACA,aAAA,KAKF,iCtB/CE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBsBkDR,0CtBnDA,mBAAA,KACQ,WAAA,KsByDV,YACE,YAAA,EAGF,eACE,aAAA,IAAA,IAAA,EACA,oBAAA,EAGF,uBACE,aAAA,EAAA,IAAA,IAOF,yB1B4lHA,+BACA,oC0BzlHI,QAAA,MACA,MAAA,KACA,MAAA,KACA,UAAA,KAPJ,oCAcM,MAAA,KAdN,8B1BumHA,oCACA,oCACA,0C0BnlHI,WAAA,KACA,YAAA,EAKF,4DACE,cAAA,EAEF,sDC7KA,uBAAA,IACA,wBAAA,IAOA,2BAAA,EACA,0BAAA,EDwKA,sDCjLA,uBAAA,EACA,wBAAA,EAOA,2BAAA,IACA,0BAAA,ID6KF,uEACE,cAAA,EAEF,4E1BqlHA,6E2BtwHE,2BAAA,EACA,0BAAA,EDsLF,6EC/LE,uBAAA,EACA,wBAAA,EDsMF,qBACE,QAAA,MACA,MAAA,KACA,aAAA,MACA,gBAAA,SAJF,0B1BslHA,gC0B/kHI,QAAA,WACA,MAAA,KACA,MAAA,GATJ,qCAYI,MAAA,KAZJ,+CAgBI,KAAA,K1BmlHJ,gD0BlkHA,6C1BmkHA,2DAFA,wD0B5jHM,SAAA,SACA,KAAA,cACA,eAAA,KE1ON,aACE,SAAA,SACA,QAAA,MACA,gBAAA,SAGA,0BACE,MAAA,KACA,cAAA,EACA,aAAA,EATJ,2BAeI,SAAA,SACA,QAAA,EAKA,MAAA,KAEA,MAAA,KACA,cAAA,EAEA,iCACE,QAAA,EAUN,8B5B2xHA,mCACA,sCkBpwHE,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IAEA,oClBswHF,yCACA,4CkBtwHI,OAAA,KACA,YAAA,KlB4wHJ,8CACA,mDACA,sDkB3wHE,sClBuwHF,2CACA,8CkBtwHI,OAAA,KUhCJ,8B5B6yHA,mCACA,sCkB3xHE,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IAEA,oClB6xHF,yCACA,4CkB7xHI,OAAA,KACA,YAAA,KlBmyHJ,8CACA,mDACA,sDkBlyHE,sClB8xHF,2CACA,8CkB7xHI,OAAA,KlBqyHJ,2B4B5zHA,mB5B2zHA,iB4BxzHE,QAAA,W5B8zHF,8D4B5zHE,sD5B2zHF,oD4B1zHI,cAAA,EAIJ,mB5B2zHA,iB4BzzHE,MAAA,GACA,YAAA,OACA,eAAA,OAKF,mBACE,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IAGA,4BACE,QAAA,IAAA,KACA,UAAA,KACA,cAAA,IAEF,4BACE,QAAA,KAAA,KACA,UAAA,KACA,cAAA,I5ByzHJ,wC4B70HA,qCA0BI,WAAA,EAKJ,uC5BkzHA,+BACA,kCACA,6CACA,8CAEA,6DADA,wE2B55HE,wBAAA,EACA,2BAAA,EC8GF,+BACE,aAAA,EAEF,sC5BmzHA,8BAKA,+DADA,oDAHA,iCACA,4CACA,6C2Bh6HE,uBAAA,EACA,0BAAA,ECkHF,8BACE,YAAA,EAKF,iBACE,SAAA,SAGA,UAAA,EACA,YAAA,OALF,sBAUI,SAAA,SAVJ,2BAYM,YAAA,K5BizHN,6BADA,4B4B7yHI,4BAGE,QAAA,EAKJ,kC5B0yHF,wC4BvyHM,aAAA,KAGJ,iC5BwyHF,uC4BryHM,QAAA,EACA,YAAA,KC/JN,KACE,aAAA,EACA,cAAA,EACA,WAAA,KAHF,QAOI,SAAA,SACA,QAAA,MARJ,UAWM,SAAA,SACA,QAAA,MACA,QAAA,KAAA,K7By8HN,gB6Bx8HM,gBAEE,gBAAA,KACA,iBAAA,KAKJ,mBACE,MAAA,K7Bu8HN,yB6Br8HM,yBAEE,MAAA,KACA,gBAAA,KACA,OAAA,YACA,iBAAA,YAOJ,a7Bi8HJ,mBADA,mB6B77HM,iBAAA,KACA,aAAA,QAzCN,kBLLE,OAAA,IACA,OAAA,IAAA,EACA,SAAA,OACA,iBAAA,QKEF,cA0DI,UAAA,KASJ,UACE,cAAA,IAAA,MAAA,KADF,aAGI,MAAA,KAEA,cAAA,KALJ,eASM,aAAA,IACA,YAAA,WACA,OAAA,IAAA,MAAA,YACA,cAAA,IAAA,IAAA,EAAA,EACA,qBACE,aAAA,KAAA,KAAA,KAMF,sB7B86HN,4BADA,4B6B16HQ,MAAA,KACA,OAAA,QACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,oBAAA,YAKN,wBAqDA,MAAA,KA8BA,cAAA,EAnFA,2BAwDE,MAAA,KAxDF,6BA0DI,cAAA,IACA,WAAA,OA3DJ,iDAgEE,IAAA,KACA,KAAA,KAGF,yBAAA,2BAEI,QAAA,WACA,MAAA,GAHJ,6BAKM,cAAA,GAzEN,6BAuFE,aAAA,EACA,cAAA,IAxFF,kC7Bu8HF,wCADA,wC6Bx2HI,OAAA,IAAA,MAAA,KAGF,yBAAA,6BAEI,cAAA,IAAA,MAAA,KACA,cAAA,IAAA,IAAA,EAAA,EAHJ,kC7Bg3HA,wCADA,wC6Bv2HI,oBAAA,MAhGN,cAEI,MAAA,KAFJ,gBAMM,cAAA,IANN,iBASM,YAAA,IAKA,uB7By8HN,6BADA,6B6Br8HQ,MAAA,KACA,iBAAA,QAQR,gBAEI,MAAA,KAFJ,mBAIM,WAAA,IACA,YAAA,EAYN,eACE,MAAA,KADF,kBAII,MAAA,KAJJ,oBAMM,cAAA,IACA,WAAA,OAPN,wCAYI,IAAA,KACA,KAAA,KAGF,yBAAA,kBAEI,QAAA,WACA,MAAA,GAHJ,oBAKM,cAAA,GASR,oBACE,cAAA,EADF,yBAKI,aAAA,EACA,cAAA,IANJ,8B7By7HA,oCADA,oC6B56HI,OAAA,IAAA,MAAA,KAGF,yBAAA,yBAEI,cAAA,IAAA,MAAA,KACA,cAAA,IAAA,IAAA,EAAA,EAHJ,8B7Bo7HA,oCADA,oC6B36HI,oBAAA,MAUN,uBAEI,QAAA,KAFJ,qBAKI,QAAA,MASJ,yBAEE,WAAA,KF7OA,uBAAA,EACA,wBAAA,EGQF,QACE,SAAA,SACA,WAAA,KACA,cAAA,KACA,OAAA,IAAA,MAAA,YAKA,yBAAA,QACE,cAAA,KAaF,yBAAA,eACE,MAAA,MAeJ,iBACE,cAAA,KACA,aAAA,KACA,WAAA,QACA,WAAA,IAAA,MAAA,YACA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,WAAA,MAAA,EAAA,IAAA,EAAA,qBAEA,2BAAA,MAEA,oBACE,WAAA,KAGF,yBAAA,iBACE,MAAA,KACA,WAAA,EACA,mBAAA,KAAA,WAAA,KAEA,0BACE,QAAA,gBACA,OAAA,eACA,eAAA,EACA,SAAA,kBAGF,oBACE,WAAA,Q9BknIJ,sC8B7mIE,mC9B4mIF,oC8BzmII,cAAA,EACA,aAAA,G9B+mIN,qB8B1mIA,kBAWE,SAAA,MACA,MAAA,EACA,KAAA,EACA,QAAA,K9BmmIF,sC8BjnIA,mCAGI,WAAA,MAEA,4D9BinIF,sC8BjnIE,mCACE,WAAA,OAWJ,yB9B2mIA,qB8B3mIA,kBACE,cAAA,GAIJ,kBACE,IAAA,EACA,aAAA,EAAA,EAAA,IAEF,qBACE,OAAA,EACA,cAAA,EACA,aAAA,IAAA,EAAA,E9B+mIF,kCAFA,gCACA,4B8BtmIA,0BAII,aAAA,MACA,YAAA,MAEA,yB9BwmIF,kCAFA,gCACA,4B8BvmIE,0BACE,aAAA,EACA,YAAA,GAaN,mBACE,QAAA,KACA,aAAA,EAAA,EAAA,IAEA,yBAAA,mBACE,cAAA,GAOJ,cACE,MAAA,KACA,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,K9B8lIF,oB8B5lIE,oBAEE,gBAAA,KATJ,kBAaI,QAAA,MAGF,yBACE,iC9B0lIF,uC8BxlII,YAAA,OAWN,eACE,SAAA,SACA,MAAA,MACA,QAAA,IAAA,KACA,aAAA,KC9LA,WAAA,IACA,cAAA,ID+LA,iBAAA,YACA,iBAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,IAIA,qBACE,QAAA,EAdJ,yBAmBI,QAAA,MACA,MAAA,KACA,OAAA,IACA,cAAA,IAtBJ,mCAyBI,WAAA,IAGF,yBAAA,eACE,QAAA,MAUJ,YACE,OAAA,MAAA,MADF,iBAII,YAAA,KACA,eAAA,KACA,YAAA,KAGF,yBAAA,iCAGI,SAAA,OACA,MAAA,KACA,MAAA,KACA,WAAA,EACA,iBAAA,YACA,OAAA,EACA,mBAAA,KAAA,WAAA,K9BykIJ,kD8BllIA,sCAYM,QAAA,IAAA,KAAA,IAAA,KAZN,sCAeM,YAAA,K9B0kIN,4C8BzkIM,4CAEE,iBAAA,MAOR,yBAAA,YACE,MAAA,KACA,OAAA,EAFF,eAKI,MAAA,KALJ,iBAOM,YAAA,KACA,eAAA,MAYR,aACE,QAAA,KAAA,KACA,aAAA,MACA,YAAA,MACA,WAAA,IAAA,MAAA,YACA,cAAA,IAAA,MAAA,Y1B5NA,mBAAA,MAAA,EAAA,IAAA,EAAA,oBAAA,CAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,oBAAA,CAAA,EAAA,IAAA,EAAA,qB2BjER,WAAA,IACA,cAAA,Id6cA,yBAAA,yBAGI,QAAA,aACA,cAAA,EACA,eAAA,OALJ,2BAUI,QAAA,aACA,MAAA,KACA,eAAA,OAZJ,kCAiBI,QAAA,aAjBJ,0BAqBI,QAAA,aACA,eAAA,OjB+4HJ,wCiBr6HA,6CjBo6HA,2CiBz4HM,MAAA,KA3BN,wCAiCI,MAAA,KAjCJ,4BAqCI,cAAA,EACA,eAAA,OjB04HJ,uBiBh7HA,oBA6CI,QAAA,aACA,WAAA,EACA,cAAA,EACA,eAAA,OjBu4HJ,6BiBv7HA,0BAmDM,aAAA,EjBw4HN,4CiB37HA,sCAwDI,SAAA,SACA,YAAA,EAzDJ,kDA8DI,IAAA,GaxOF,yBAAA,yBACE,cAAA,IAEA,oCACE,cAAA,GASN,yBAAA,aACE,MAAA,KACA,YAAA,EACA,eAAA,EACA,aAAA,EACA,YAAA,EACA,OAAA,E1BvPF,mBAAA,KACQ,WAAA,M0B+PV,8BACE,WAAA,EHpUA,uBAAA,EACA,wBAAA,EGuUF,mDACE,cAAA,EHzUA,uBAAA,IACA,wBAAA,IAOA,2BAAA,EACA,0BAAA,EG0UF,YChVE,WAAA,IACA,cAAA,IDkVA,mBCnVA,WAAA,KACA,cAAA,KDqVA,mBCtVA,WAAA,KACA,cAAA,KD+VF,aChWE,WAAA,KACA,cAAA,KDkWA,yBAAA,aACE,MAAA,KACA,aAAA,KACA,YAAA,MAaJ,yBACE,aEtWA,MAAA,eFuWA,cE1WA,MAAA,gBF4WE,aAAA,MAFF,4BAKI,aAAA,GAUN,gBACE,iBAAA,QACA,aAAA,QAFF,8BAKI,MAAA,K9BmlIJ,oC8BllII,oCAEE,MAAA,QACA,iBAAA,YATN,6BAcI,MAAA,KAdJ,iCAmBM,MAAA,K9BglIN,uC8B9kIM,uCAEE,MAAA,KACA,iBAAA,YAIF,sC9B6kIN,4CADA,4C8BzkIQ,MAAA,KACA,iBAAA,QAIF,wC9B2kIN,8CADA,8C8BvkIQ,MAAA,KACA,iBAAA,YAOF,oC9BskIN,0CADA,0C8BlkIQ,MAAA,KACA,iBAAA,QAIJ,yBAAA,sDAIM,MAAA,K9BmkIR,4D8BlkIQ,4DAEE,MAAA,KACA,iBAAA,YAIF,2D9BikIR,iEADA,iE8B7jIU,MAAA,KACA,iBAAA,QAIF,6D9B+jIR,mEADA,mE8B3jIU,MAAA,KACA,iBAAA,aA/EZ,+BAuFI,aAAA,K9B4jIJ,qC8B3jII,qCAEE,iBAAA,KA1FN,yCA6FM,iBAAA,KA7FN,iC9B0pIA,6B8BvjII,aAAA,QAnGJ,6BA4GI,MAAA,KACA,mCACE,MAAA,KA9GN,0BAmHI,MAAA,K9BojIJ,gC8BnjII,gCAEE,MAAA,K9BsjIN,0C8BljIM,0C9BmjIN,mDAFA,mD8B/iIQ,MAAA,KAQR,gBACE,iBAAA,KACA,aAAA,QAFF,8BAKI,MAAA,Q9B+iIJ,oC8B9iII,oCAEE,MAAA,KACA,iBAAA,YATN,6BAcI,MAAA,QAdJ,iCAmBM,MAAA,Q9B4iIN,uC8B1iIM,uCAEE,MAAA,KACA,iBAAA,YAIF,sC9ByiIN,4CADA,4C8BriIQ,MAAA,KACA,iBAAA,QAIF,wC9BuiIN,8CADA,8C8BniIQ,MAAA,KACA,iBAAA,YAMF,oC9BmiIN,0CADA,0C8B/hIQ,MAAA,KACA,iBAAA,QAIJ,yBAAA,kEAIM,aAAA,QAJN,0DAOM,iBAAA,QAPN,sDAUM,MAAA,Q9BgiIR,4D8B/hIQ,4DAEE,MAAA,KACA,iBAAA,YAIF,2D9B8hIR,iEADA,iE8B1hIU,MAAA,KACA,iBAAA,QAIF,6D9B4hIR,mEADA,mE8BxhIU,MAAA,KACA,iBAAA,aApFZ,+BA6FI,aAAA,K9BwhIJ,qC8BvhII,qCAEE,iBAAA,KAhGN,yCAmGM,iBAAA,KAnGN,iC9B4nIA,6B8BnhII,aAAA,QAzGJ,6BA6GI,MAAA,QACA,mCACE,MAAA,KA/GN,0BAoHI,MAAA,Q9BqhIJ,gC8BphII,gCAEE,MAAA,K9BuhIN,0C8BnhIM,0C9BohIN,mDAFA,mD8BhhIQ,MAAA,KGtoBR,YACE,QAAA,IAAA,KACA,cAAA,KACA,WAAA,KACA,iBAAA,QACA,cAAA,IALF,eAQI,QAAA,aARJ,yBAWM,QAAA,EAAA,IACA,MAAA,KACA,QAAA,SAbN,oBAkBI,MAAA,KCpBJ,YACE,QAAA,aACA,aAAA,EACA,OAAA,KAAA,EACA,cAAA,IAJF,eAOI,QAAA,OAPJ,iBlCyrJA,oBkC/qJM,SAAA,SACA,MAAA,KACA,QAAA,IAAA,KACA,YAAA,KACA,YAAA,WACA,MAAA,QACA,gBAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KlCorJN,uBkClrJM,uBlCmrJN,0BAFA,0BkC/qJQ,QAAA,EACA,MAAA,QACA,iBAAA,KACA,aAAA,KAGJ,6BlCkrJJ,gCkC/qJQ,YAAA,EPnBN,uBAAA,IACA,0BAAA,IOsBE,4BlCirJJ,+B2BhtJE,wBAAA,IACA,2BAAA,IOwCE,sBlC+qJJ,4BAFA,4BADA,yBAIA,+BAFA,+BkC3qJM,QAAA,EACA,MAAA,KACA,OAAA,QACA,iBAAA,QACA,aAAA,QlCmrJN,wBAEA,8BADA,8BkCxuJA,2BlCsuJA,iCADA,iCkCtqJM,MAAA,KACA,OAAA,YACA,iBAAA,KACA,aAAA,KASN,oBlCqqJA,uBmC7uJM,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UAEF,gCnC+uJJ,mC2B1uJE,uBAAA,IACA,0BAAA,IQAE,+BnC8uJJ,kC2BvvJE,wBAAA,IACA,2BAAA,IO2EF,oBlCgrJA,uBmC7vJM,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IAEF,gCnC+vJJ,mC2B1vJE,uBAAA,IACA,0BAAA,IQAE,+BnC8vJJ,kC2BvwJE,wBAAA,IACA,2BAAA,ISHF,OACE,aAAA,EACA,OAAA,KAAA,EACA,WAAA,OACA,WAAA,KAJF,UAOI,QAAA,OAPJ,YpCuxJA,eoC7wJM,QAAA,aACA,QAAA,IAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,KpCixJN,kBoC/xJA,kBAmBM,gBAAA,KACA,iBAAA,KApBN,epCoyJA,kBoCzwJM,MAAA,MA3BN,mBpCwyJA,sBoCtwJM,MAAA,KAlCN,mBpC6yJA,yBADA,yBAEA,sBoCnwJM,MAAA,KACA,OAAA,YACA,iBAAA,KC9CN,OACE,QAAA,OACA,QAAA,KAAA,KAAA,KACA,UAAA,IACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,SACA,cAAA,MrCuzJF,cqCnzJI,cAEE,MAAA,KACA,gBAAA,KACA,OAAA,QAKJ,aACE,QAAA,KAIF,YACE,SAAA,SACA,IAAA,KAOJ,eCtCE,iBAAA,KtCk1JF,2BsC/0JI,2BAEE,iBAAA,QDqCN,eC1CE,iBAAA,QtCy1JF,2BsCt1JI,2BAEE,iBAAA,QDyCN,eC9CE,iBAAA,QtCg2JF,2BsC71JI,2BAEE,iBAAA,QD6CN,YClDE,iBAAA,QtCu2JF,wBsCp2JI,wBAEE,iBAAA,QDiDN,eCtDE,iBAAA,QtC82JF,2BsC32JI,2BAEE,iBAAA,QDqDN,cC1DE,iBAAA,QtCq3JF,0BsCl3JI,0BAEE,iBAAA,QCFN,OACE,QAAA,aACA,UAAA,KACA,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,OACA,iBAAA,KACA,cAAA,KAGA,aACE,QAAA,KAIF,YACE,SAAA,SACA,IAAA,KvCq3JJ,0BuCl3JE,eAEE,IAAA,EACA,QAAA,IAAA,IvCo3JJ,cuC/2JI,cAEE,MAAA,KACA,gBAAA,KACA,OAAA,QAKJ,+BvC42JF,4BuC12JI,MAAA,QACA,iBAAA,KAGF,wBACE,MAAA,MAGF,+BACE,aAAA,IAGF,uBACE,YAAA,IC1DJ,WACE,YAAA,KACA,eAAA,KACA,cAAA,KACA,MAAA,QACA,iBAAA,KxCu6JF,ewC56JA,cASI,MAAA,QATJ,aAaI,cAAA,KACA,UAAA,KACA,YAAA,IAfJ,cAmBI,iBAAA,QAGF,sBxCk6JF,4BwCh6JI,cAAA,KACA,aAAA,KACA,cAAA,IA1BJ,sBA8BI,UAAA,KAGF,oCAAA,WACE,YAAA,KACA,eAAA,KAEA,sBxCi6JF,4BwC/5JI,cAAA,KACA,aAAA,KxCm6JJ,ewC16JA,cAYI,UAAA,MC1CN,WACE,QAAA,MACA,QAAA,IACA,cAAA,KACA,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IrCiLA,mBAAA,OAAA,IAAA,YACK,cAAA,OAAA,IAAA,YACG,WAAA,OAAA,IAAA,YJ+xJV,iByCz9JA,eAaI,aAAA,KACA,YAAA,KzCi9JJ,mBADA,kByC58JE,kBAGE,aAAA,QArBJ,oBA0BI,QAAA,IACA,MAAA,KC3BJ,OACE,QAAA,KACA,cAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,IAJF,UAQI,WAAA,EACA,MAAA,QATJ,mBAcI,YAAA,IAdJ,S1Co/JA,U0Ch+JI,cAAA,EApBJ,WAwBI,WAAA,IASJ,mB1C09JA,mB0Cx9JE,cAAA,KAFF,0B1C89JA,0B0Cx9JI,SAAA,SACA,IAAA,KACA,MAAA,MACA,MAAA,QAQJ,eCvDE,MAAA,QACA,iBAAA,QACA,aAAA,QDqDF,kBClDI,iBAAA,QDkDJ,2BC9CI,MAAA,QDkDJ,YC3DE,MAAA,QACA,iBAAA,QACA,aAAA,QDyDF,eCtDI,iBAAA,QDsDJ,wBClDI,MAAA,QDsDJ,eC/DE,MAAA,QACA,iBAAA,QACA,aAAA,QD6DF,kBC1DI,iBAAA,QD0DJ,2BCtDI,MAAA,QD0DJ,cCnEE,MAAA,QACA,iBAAA,QACA,aAAA,QDiEF,iBC9DI,iBAAA,QD8DJ,0BC1DI,MAAA,QCDJ,wCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAIV,mCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAFV,gCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAQV,UACE,OAAA,KACA,cAAA,KACA,SAAA,OACA,iBAAA,QACA,cAAA,IxCsCA,mBAAA,MAAA,EAAA,IAAA,IAAA,eACQ,WAAA,MAAA,EAAA,IAAA,IAAA,ewClCV,cACE,MAAA,KACA,MAAA,GACA,OAAA,KACA,UAAA,KACA,YAAA,KACA,MAAA,KACA,WAAA,OACA,iBAAA,QxCyBA,mBAAA,MAAA,EAAA,KAAA,EAAA,gBACQ,WAAA,MAAA,EAAA,KAAA,EAAA,gBAyHR,mBAAA,MAAA,IAAA,KACK,cAAA,MAAA,IAAA,KACG,WAAA,MAAA,IAAA,KJw6JV,sB4CnjKA,gCCDI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKDEF,wBAAA,KAAA,KAAA,gBAAA,KAAA,K5CwjKF,qB4CjjKA,+BxC5CE,kBAAA,qBAAA,GAAA,OAAA,SACK,aAAA,qBAAA,GAAA,OAAA,SACG,UAAA,qBAAA,GAAA,OAAA,SwCmDV,sBEvEE,iBAAA,QAGA,wCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKDsBJ,mBE3EE,iBAAA,QAGA,qCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKD0BJ,sBE/EE,iBAAA,QAGA,wCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKD8BJ,qBEnFE,iBAAA,QAGA,uCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKExDJ,OAEE,WAAA,KAEA,mBACE,WAAA,EAIJ,O/CqpKA,Y+CnpKE,SAAA,OACA,KAAA,EAGF,YACE,MAAA,QAGF,cACE,QAAA,MAGA,4BACE,UAAA,KAIJ,a/CgpKA,mB+C9oKE,aAAA,KAGF,Y/C+oKA,kB+C7oKE,cAAA,K/CkpKF,Y+C/oKA,Y/C8oKA,a+C3oKE,QAAA,WACA,eAAA,IAGF,cACE,eAAA,OAGF,cACE,eAAA,OAIF,eACE,WAAA,EACA,cAAA,IAMF,YACE,aAAA,EACA,WAAA,KCrDF,YAEE,aAAA,EACA,cAAA,KAQF,iBACE,SAAA,SACA,QAAA,MACA,QAAA,KAAA,KAEA,cAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KAGA,6BrB7BA,uBAAA,IACA,wBAAA,IqB+BA,4BACE,cAAA,ErBzBF,2BAAA,IACA,0BAAA,IqB6BA,0BhDqrKF,gCADA,gCgDjrKI,MAAA,KACA,OAAA,YACA,iBAAA,KALF,mDhD4rKF,yDADA,yDgDlrKM,MAAA,QATJ,gDhDisKF,sDADA,sDgDprKM,MAAA,KAKJ,wBhDqrKF,8BADA,8BgDjrKI,QAAA,EACA,MAAA,KACA,iBAAA,QACA,aAAA,QANF,iDhDisKF,wDAHA,uDADA,uDAMA,8DAHA,6DAJA,uDAMA,8DAHA,6DgDnrKM,MAAA,QAZJ,8ChDwsKF,oDADA,oDgDxrKM,MAAA,QAWN,kBhDkrKA,uBgDhrKE,MAAA,KAFF,2ChDsrKA,gDgDjrKI,MAAA,KhDsrKJ,wBgDlrKE,wBhDmrKF,6BAFA,6BgD/qKI,MAAA,KACA,gBAAA,KACA,iBAAA,QAIJ,uBACE,MAAA,KACA,WAAA,KnCvGD,yBoCIG,MAAA,QACA,iBAAA,QAEA,0BjDuxKJ,+BiDrxKM,MAAA,QAFF,mDjD2xKJ,wDiDtxKQ,MAAA,QjD2xKR,gCiDxxKM,gCjDyxKN,qCAFA,qCiDrxKQ,MAAA,QACA,iBAAA,QAEF,iCjD4xKN,uCAFA,uCADA,sCAIA,4CAFA,4CiDxxKQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,sBoCIG,MAAA,QACA,iBAAA,QAEA,uBjDozKJ,4BiDlzKM,MAAA,QAFF,gDjDwzKJ,qDiDnzKQ,MAAA,QjDwzKR,6BiDrzKM,6BjDszKN,kCAFA,kCiDlzKQ,MAAA,QACA,iBAAA,QAEF,8BjDyzKN,oCAFA,oCADA,mCAIA,yCAFA,yCiDrzKQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,yBoCIG,MAAA,QACA,iBAAA,QAEA,0BjDi1KJ,+BiD/0KM,MAAA,QAFF,mDjDq1KJ,wDiDh1KQ,MAAA,QjDq1KR,gCiDl1KM,gCjDm1KN,qCAFA,qCiD/0KQ,MAAA,QACA,iBAAA,QAEF,iCjDs1KN,uCAFA,uCADA,sCAIA,4CAFA,4CiDl1KQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,wBoCIG,MAAA,QACA,iBAAA,QAEA,yBjD82KJ,8BiD52KM,MAAA,QAFF,kDjDk3KJ,uDiD72KQ,MAAA,QjDk3KR,+BiD/2KM,+BjDg3KN,oCAFA,oCiD52KQ,MAAA,QACA,iBAAA,QAEF,gCjDm3KN,sCAFA,sCADA,qCAIA,2CAFA,2CiD/2KQ,MAAA,KACA,iBAAA,QACA,aAAA,QDiGR,yBACE,WAAA,EACA,cAAA,IAEF,sBACE,cAAA,EACA,YAAA,IExHF,OACE,cAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,I9C0DA,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gB8CtDV,YACE,QAAA,KAKF,eACE,QAAA,KAAA,KACA,cAAA,IAAA,MAAA,YvBtBA,uBAAA,IACA,wBAAA,IuBmBF,0CAMI,MAAA,QAKJ,aACE,WAAA,EACA,cAAA,EACA,UAAA,KACA,MAAA,QlD24KF,oBAEA,sBkDj5KA,elD84KA,mBAEA,qBkDr4KI,MAAA,QAKJ,cACE,QAAA,KAAA,KACA,iBAAA,QACA,WAAA,IAAA,MAAA,KvB1CA,2BAAA,IACA,0BAAA,IuBmDF,mBlD+3KA,mCkD53KI,cAAA,EAHJ,oClDm4KA,oDkD73KM,aAAA,IAAA,EACA,cAAA,EAIF,4DlD63KJ,4EkD33KQ,WAAA,EvBzEN,uBAAA,IACA,wBAAA,IuB8EE,0DlD23KJ,0EkDz3KQ,cAAA,EvBzEN,2BAAA,IACA,0BAAA,IuBmDF,+EvB5DE,uBAAA,EACA,wBAAA,EuB4FF,wDAEI,iBAAA,EAGJ,0BACE,iBAAA,ElDw3KF,8BkDh3KA,clD+2KA,gCkD32KI,cAAA,ElDi3KJ,sCkDr3KA,sBlDo3KA,wCkD72KM,cAAA,KACA,aAAA,KlDk3KN,wDkD13KA,0BvB3GE,uBAAA,IACA,wBAAA,I3B2+KF,yFAFA,yFACA,2DkDh4KA,2DAmBQ,uBAAA,IACA,wBAAA,IlDo3KR,wGAIA,wGANA,wGAIA,wGAHA,0EAIA,0EkD34KA,0ElDy4KA,0EkDj3KU,uBAAA,IlD03KV,uGAIA,uGANA,uGAIA,uGAHA,yEAIA,yEkDr5KA,yElDm5KA,yEkDv3KU,wBAAA,IlD83KV,sDkD15KA,yBvBnGE,2BAAA,IACA,0BAAA,I3BigLF,qFAEA,qFkDj6KA,wDlDg6KA,wDkDv3KQ,2BAAA,IACA,0BAAA,IlD43KR,oGAIA,oGAFA,oGAIA,oGkD56KA,uElDy6KA,uEAFA,uEAIA,uEkD73KU,0BAAA,IlDk4KV,mGAIA,mGAFA,mGAIA,mGkDt7KA,sElDm7KA,sEAFA,sEAIA,sEkDn4KU,2BAAA,IAlDV,0BlD07KA,qCACA,0BACA,qCkDj4KI,WAAA,IAAA,MAAA,KlDq4KJ,kDkDh8KA,kDA+DI,WAAA,EA/DJ,uBlDo8KA,yCkDj4KI,OAAA,ElD44KJ,+CANA,+CAQA,+CANA,+CAEA,+CkD78KA,+ClDg9KA,iEANA,iEAQA,iEANA,iEAEA,iEANA,iEkD93KU,YAAA,ElDm5KV,8CANA,8CAQA,8CANA,8CAEA,8CkD39KA,8ClD89KA,gEANA,gEAQA,gEANA,gEAEA,gEANA,gEkDx4KU,aAAA,ElDu5KV,+CAIA,+CkDz+KA,+ClDu+KA,+CADA,iEAIA,iEANA,iEAIA,iEkDj5KU,cAAA,EAvFV,8ClDi/KA,8CAFA,8CAIA,8CALA,gEAIA,gEAFA,gEAIA,gEkDp5KU,cAAA,EAhGV,yBAsGI,cAAA,EACA,OAAA,EAUJ,aACE,cAAA,KADF,oBAKI,cAAA,EACA,cAAA,IANJ,2BASM,WAAA,IATN,4BAcI,cAAA,ElD04KJ,wDkDx5KA,wDAkBM,WAAA,IAAA,MAAA,KAlBN,2BAuBI,WAAA,EAvBJ,uDAyBM,cAAA,IAAA,MAAA,KAON,eC5PE,aAAA,KAEA,8BACE,MAAA,KACA,iBAAA,QACA,aAAA,KAHF,0DAMI,iBAAA,KANJ,qCASI,MAAA,QACA,iBAAA,KAGJ,yDAEI,oBAAA,KD8ON,eC/PE,aAAA,QAEA,8BACE,MAAA,KACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,KAGJ,yDAEI,oBAAA,QDiPN,eClQE,aAAA,QAEA,8BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,QAGJ,yDAEI,oBAAA,QDoPN,YCrQE,aAAA,QAEA,2BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,uDAMI,iBAAA,QANJ,kCASI,MAAA,QACA,iBAAA,QAGJ,sDAEI,oBAAA,QDuPN,eCxQE,aAAA,QAEA,8BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,QAGJ,yDAEI,oBAAA,QD0PN,cC3QE,aAAA,QAEA,6BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,yDAMI,iBAAA,QANJ,oCASI,MAAA,QACA,iBAAA,QAGJ,wDAEI,oBAAA,QChBN,kBACE,SAAA,SACA,QAAA,MACA,OAAA,EACA,QAAA,EACA,SAAA,OALF,yCpDivLA,wBADA,yBAEA,yBACA,wBoDvuLI,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,KACA,OAAA,KACA,OAAA,EAKJ,wBACE,eAAA,OAIF,uBACE,eAAA,IC3BF,MACE,WAAA,KACA,QAAA,KACA,cAAA,KACA,iBAAA,QACA,OAAA,IAAA,MAAA,QACA,cAAA,IjD0DA,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBiDjEV,iBASI,aAAA,KACA,aAAA,gBAKJ,SACE,QAAA,KACA,cAAA,IAEF,SACE,QAAA,IACA,cAAA,ICpBF,OACE,MAAA,MACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,YAAA,EAAA,IAAA,EAAA,KjCTA,OAAA,kBACA,QAAA,GrBkyLF,asDvxLE,aAEE,MAAA,KACA,gBAAA,KACA,OAAA,QjChBF,OAAA,kBACA,QAAA,GiCuBA,aACE,QAAA,EACA,OAAA,QACA,WAAA,IACA,OAAA,EACA,mBAAA,KACA,gBAAA,KAAA,WAAA,KCxBJ,YACE,SAAA,OAIF,OACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,SAAA,OACA,2BAAA,MAIA,QAAA,EAGA,0BnDiHA,kBAAA,kBACI,cAAA,kBACC,aAAA,kBACG,UAAA,kBAkER,mBAAA,kBAAA,IAAA,SAEK,cAAA,aAAA,IAAA,SACG,WAAA,kBAAA,IAAA,SAAA,WAAA,UAAA,IAAA,SAAA,WAAA,UAAA,IAAA,QAAA,CAAA,kBAAA,IAAA,QAAA,CAAA,aAAA,IAAA,SmDrLR,wBnD6GA,kBAAA,eACI,cAAA,eACC,aAAA,eACG,UAAA,emD9GV,mBACE,WAAA,OACA,WAAA,KAIF,cACE,SAAA,SACA,MAAA,KACA,OAAA,KAIF,eACE,SAAA,SACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,eACA,cAAA,InDcA,mBAAA,EAAA,IAAA,IAAA,eACQ,WAAA,EAAA,IAAA,IAAA,emDZR,QAAA,EAIF,gBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,iBAAA,KAEA,qBlCpEA,OAAA,iBACA,QAAA,EkCoEA,mBlCrEA,OAAA,kBACA,QAAA,GkCyEF,cACE,QAAA,KACA,cAAA,IAAA,MAAA,QAIF,qBACE,WAAA,KAIF,aACE,OAAA,EACA,YAAA,WAKF,YACE,SAAA,SACA,QAAA,KAIF,cACE,QAAA,KACA,WAAA,MACA,WAAA,IAAA,MAAA,QAHF,wBAQI,cAAA,EACA,YAAA,IATJ,mCAaI,YAAA,KAbJ,oCAiBI,YAAA,EAKJ,yBACE,SAAA,SACA,IAAA,QACA,MAAA,KACA,OAAA,KACA,SAAA,OAIF,yBAEE,cACE,MAAA,MACA,OAAA,KAAA,KAEF,enDrEA,mBAAA,EAAA,IAAA,KAAA,eACQ,WAAA,EAAA,IAAA,KAAA,emDyER,UAAY,MAAA,OAGd,yBACE,UAAY,MAAA,OC9Id,SACE,SAAA,SACA,QAAA,KACA,QAAA,MCRA,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WAEA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,OACA,WAAA,OACA,aAAA,OACA,UAAA,OACA,YAAA,ODHA,UAAA,KnCTA,OAAA,iBACA,QAAA,EmCYA,YnCbA,OAAA,kBACA,QAAA,GmCaA,aACE,QAAA,IAAA,EACA,WAAA,KAEF,eACE,QAAA,EAAA,IACA,YAAA,IAEF,gBACE,QAAA,IAAA,EACA,WAAA,IAEF,cACE,QAAA,EAAA,IACA,YAAA,KAIF,4BACE,OAAA,EACA,KAAA,IACA,YAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,iCACE,MAAA,IACA,OAAA,EACA,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,kCACE,OAAA,EACA,KAAA,IACA,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,8BACE,IAAA,IACA,KAAA,EACA,WAAA,KACA,aAAA,IAAA,IAAA,IAAA,EACA,mBAAA,KAEF,6BACE,IAAA,IACA,MAAA,EACA,WAAA,KACA,aAAA,IAAA,EAAA,IAAA,IACA,kBAAA,KAEF,+BACE,IAAA,EACA,KAAA,IACA,YAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEF,oCACE,IAAA,EACA,MAAA,IACA,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEF,qCACE,IAAA,EACA,KAAA,IACA,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAKJ,eACE,UAAA,MACA,QAAA,IAAA,IACA,MAAA,KACA,WAAA,OACA,iBAAA,KACA,cAAA,IAIF,eACE,SAAA,SACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MEzGF,SACE,SAAA,SACA,IAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,UAAA,MACA,QAAA,IDXA,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WAEA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,OACA,WAAA,OACA,aAAA,OACA,UAAA,OACA,YAAA,OCAA,UAAA,KACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,eACA,cAAA,ItDiDA,mBAAA,EAAA,IAAA,KAAA,eACQ,WAAA,EAAA,IAAA,KAAA,esD9CR,aAAQ,WAAA,MACR,eAAU,YAAA,KACV,gBAAW,WAAA,KACX,cAAS,YAAA,MAvBX,gBA4BI,aAAA,KAEA,gB1DkjMJ,sB0DhjMM,SAAA,SACA,QAAA,MACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MAGF,sBACE,QAAA,GACA,aAAA,KAIJ,oBACE,OAAA,MACA,KAAA,IACA,YAAA,MACA,iBAAA,KACA,iBAAA,gBACA,oBAAA,EACA,0BACE,OAAA,IACA,YAAA,MACA,QAAA,IACA,iBAAA,KACA,oBAAA,EAGJ,sBACE,IAAA,IACA,KAAA,MACA,WAAA,MACA,mBAAA,KACA,mBAAA,gBACA,kBAAA,EACA,4BACE,OAAA,MACA,KAAA,IACA,QAAA,IACA,mBAAA,KACA,kBAAA,EAGJ,uBACE,IAAA,MACA,KAAA,IACA,YAAA,MACA,iBAAA,EACA,oBAAA,KACA,oBAAA,gBACA,6BACE,IAAA,IACA,YAAA,MACA,QAAA,IACA,iBAAA,EACA,oBAAA,KAIJ,qBACE,IAAA,IACA,MAAA,MACA,WAAA,MACA,mBAAA,EACA,kBAAA,KACA,kBAAA,gBACA,2BACE,MAAA,IACA,OAAA,MACA,QAAA,IACA,mBAAA,EACA,kBAAA,KAKN,eACE,QAAA,IAAA,KACA,OAAA,EACA,UAAA,KACA,iBAAA,QACA,cAAA,IAAA,MAAA,QACA,cAAA,IAAA,IAAA,EAAA,EAGF,iBACE,QAAA,IAAA,KCpHF,UACE,SAAA,SAGF,gBACE,SAAA,SACA,MAAA,KACA,SAAA,OAHF,sBAMI,SAAA,SACA,QAAA,KvD6KF,mBAAA,IAAA,YAAA,KACK,cAAA,IAAA,YAAA,KACG,WAAA,IAAA,YAAA,KJs/LV,4B2D5qMA,0BAcM,YAAA,EAIF,8BAAA,uBAAA,sBvDuLF,mBAAA,kBAAA,IAAA,YAEK,cAAA,aAAA,IAAA,YACG,WAAA,kBAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,WAAA,UAAA,IAAA,WAAA,CAAA,kBAAA,IAAA,WAAA,CAAA,aAAA,IAAA,YA7JR,4BAAA,OAEQ,oBAAA,OA+GR,oBAAA,OAEQ,YAAA,OJ0hMR,mC2DrqMI,2BvDmHJ,kBAAA,sBACQ,UAAA,sBuDjHF,KAAA,E3DwqMN,kC2DtqMI,2BvD8GJ,kBAAA,uBACQ,UAAA,uBuD5GF,KAAA,E3D0qMN,6B2DxqMI,gC3DuqMJ,iCI9jMA,kBAAA,mBACQ,UAAA,mBuDtGF,KAAA,GArCR,wB3DgtMA,sBACA,sB2DpqMI,QAAA,MA7CJ,wBAiDI,KAAA,EAjDJ,sB3DwtMA,sB2DlqMI,SAAA,SACA,IAAA,EACA,MAAA,KAxDJ,sBA4DI,KAAA,KA5DJ,sBA+DI,KAAA,MA/DJ,2B3DouMA,4B2DjqMI,KAAA,EAnEJ,6BAuEI,KAAA,MAvEJ,8BA0EI,KAAA,KAQJ,kBACE,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,IACA,UAAA,KACA,MAAA,KACA,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eACA,iBAAA,ctCpGA,OAAA,kBACA,QAAA,GsCyGA,uBdrGE,iBAAA,sEACA,iBAAA,iEACA,iBAAA,uFAAA,iBAAA,kEACA,OAAA,+GACA,kBAAA,ScoGF,wBACE,MAAA,EACA,KAAA,Kd1GA,iBAAA,sEACA,iBAAA,iEACA,iBAAA,uFAAA,iBAAA,kEACA,OAAA,+GACA,kBAAA,S7C6wMJ,wB2DlqME,wBAEE,MAAA,KACA,gBAAA,KACA,QAAA,EtCxHF,OAAA,kBACA,QAAA,GrB8xMF,0CACA,2CAFA,6B2DpsMA,6BAuCI,SAAA,SACA,IAAA,IACA,QAAA,EACA,QAAA,aACA,WAAA,M3DmqMJ,0C2D9sMA,6BA+CI,KAAA,IACA,YAAA,M3DmqMJ,2C2DntMA,6BAoDI,MAAA,IACA,aAAA,M3DmqMJ,6B2DxtMA,6BAyDI,MAAA,KACA,OAAA,KACA,YAAA,MACA,YAAA,EAIA,oCACE,QAAA,QAIF,oCACE,QAAA,QAUN,qBACE,SAAA,SACA,OAAA,KACA,KAAA,IACA,QAAA,GACA,MAAA,IACA,aAAA,EACA,YAAA,KACA,WAAA,OACA,WAAA,KATF,wBAYI,QAAA,aACA,MAAA,KACA,OAAA,KACA,OAAA,IACA,YAAA,OACA,OAAA,QAUA,iBAAA,OACA,iBAAA,cAEA,OAAA,IAAA,MAAA,KACA,cAAA,KA/BJ,6BAmCI,MAAA,KACA,OAAA,KACA,OAAA,EACA,iBAAA,KAOJ,kBACE,SAAA,SACA,MAAA,IACA,OAAA,KACA,KAAA,IACA,QAAA,GACA,YAAA,KACA,eAAA,KACA,MAAA,KACA,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eAEA,uBACE,YAAA,KAMJ,oCAGE,0C3D+nMA,2CAEA,6BADA,6B2D3nMI,MAAA,KACA,OAAA,KACA,WAAA,MACA,UAAA,KARJ,0C3DwoMA,6B2D5nMI,YAAA,MAZJ,2C3D4oMA,6B2D5nMI,aAAA,MAKJ,kBACE,MAAA,IACA,KAAA,IACA,eAAA,KAIF,qBACE,OAAA,M3D0oMJ,qCADA,sCADA,mBADA,oBAXA,gB4D73ME,iB5Dm4MF,uBADA,wBADA,iBADA,kBADA,wBADA,yBASA,mCADA,oCAqBA,oBADA,qBADA,oBADA,qBAXA,WADA,YAOA,uBADA,wBADA,qBADA,sBADA,cADA,eAOA,aADA,cAGA,kBADA,mBAjBA,WADA,Y4Dl4MI,QAAA,MACA,QAAA,I5Dm6MJ,qCADA,mB4Dh6ME,gB5D65MF,uBADA,iBADA,wBAIA,mCAUA,oBADA,oBANA,WAGA,uBADA,qBADA,cAGA,aACA,kBATA,W4D75MI,MAAA,K5BNJ,c6BVE,QAAA,MACA,aAAA,KACA,YAAA,K7BWF,YACE,MAAA,gBAEF,WACE,MAAA,eAQF,MACE,QAAA,eAEF,MACE,QAAA,gBAEF,WACE,WAAA,OAEF,W8BzBE,KAAA,CAAA,CAAA,EAAA,EACA,MAAA,YACA,YAAA,KACA,iBAAA,YACA,OAAA,E9B8BF,QACE,QAAA,eAOF,OACE,SAAA,M+BjCF,cACE,MAAA,a/D88MF,YADA,YADA,Y+Dt8MA,YClBE,QAAA,ehEs+MF,kBACA,mBACA,yBALA,kBACA,mBACA,yBALA,kBACA,mBACA,yB+Dz8MA,kB/Dq8MA,mBACA,yB+D17ME,QAAA,eAIA,yBAAA,YCjDA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhE4/MV,cgE3/MA,cACU,QAAA,sBDkDV,yBAAA,kBACE,QAAA,iBAIF,yBAAA,mBACE,QAAA,kBAIF,yBAAA,yBACE,QAAA,wBAKF,+CAAA,YCtEA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhE0hNV,cgEzhNA,cACU,QAAA,sBDuEV,+CAAA,kBACE,QAAA,iBAIF,+CAAA,mBACE,QAAA,kBAIF,+CAAA,yBACE,QAAA,wBAKF,gDAAA,YC3FA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhEwjNV,cgEvjNA,cACU,QAAA,sBD4FV,gDAAA,kBACE,QAAA,iBAIF,gDAAA,mBACE,QAAA,kBAIF,gDAAA,yBACE,QAAA,wBAKF,0BAAA,YChHA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhEslNV,cgErlNA,cACU,QAAA,sBDiHV,0BAAA,kBACE,QAAA,iBAIF,0BAAA,mBACE,QAAA,kBAIF,0BAAA,yBACE,QAAA,wBAKF,yBAAA,WC7HA,QAAA,gBDkIA,+CAAA,WClIA,QAAA,gBDuIA,gDAAA,WCvIA,QAAA,gBD4IA,0BAAA,WC5IA,QAAA,gBDuJF,eCvJE,QAAA,eD0JA,aAAA,eClKA,QAAA,gBACA,oBAAU,QAAA,gBACV,iBAAU,QAAA,oBhE2oNV,iBgE1oNA,iBACU,QAAA,sBDkKZ,qBACE,QAAA,eAEA,aAAA,qBACE,QAAA,iBAGJ,sBACE,QAAA,eAEA,aAAA,sBACE,QAAA,kBAGJ,4BACE,QAAA,eAEA,aAAA,4BACE,QAAA,wBAKF,aAAA,cCrLA,QAAA","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: none;\n  text-decoration: underline;\n  text-decoration: underline dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n  src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: 400;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: 0.2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: \"\\00A0 \\2014\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: 700;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n  padding-right: 0;\n  padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1,\n  .col-sm-2,\n  .col-sm-3,\n  .col-sm-4,\n  .col-sm-5,\n  .col-sm-6,\n  .col-sm-7,\n  .col-sm-8,\n  .col-sm-9,\n  .col-sm-10,\n  .col-sm-11,\n  .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1,\n  .col-md-2,\n  .col-md-3,\n  .col-md-4,\n  .col-md-5,\n  .col-md-6,\n  .col-md-7,\n  .col-md-8,\n  .col-md-9,\n  .col-md-10,\n  .col-md-11,\n  .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1,\n  .col-lg-2,\n  .col-lg-3,\n  .col-lg-4,\n  .col-lg-5,\n  .col-lg-6,\n  .col-lg-7,\n  .col-lg-8,\n  .col-lg-9,\n  .col-lg-10,\n  .col-lg-11,\n  .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: 0.01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: 700;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  -webkit-appearance: none;\n  appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eeeeee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  margin-bottom: 0;\n  font-weight: normal;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none;\n  border: 1px solid transparent;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  border-radius: 4px;\n  -webkit-user-select: none;\n  -moz-user-select: none;\n  -ms-user-select: none;\n  user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  opacity: 0.65;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  background-image: none;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  background-image: none;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  background-image: none;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  background-image: none;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  background-image: none;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  background-image: none;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: 400;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity 0.15s linear;\n  -o-transition: opacity 0.15s linear;\n  transition: opacity 0.15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-property: height, visibility;\n  transition-property: height, visibility;\n  -webkit-transition-duration: 0.35s;\n  transition-duration: 0.35s;\n  -webkit-transition-timing-function: ease;\n  transition-timing-function: ease;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: 400;\n  line-height: 1.42857143;\n  color: #333333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: 400;\n  line-height: 1;\n  color: #555555;\n  text-align: center;\n  background-color: #eeeeee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n  color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eeeeee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: 15px;\n  margin-top: 8px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-right: -15px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eeeeee;\n  border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: 0.2em 0.6em 0.3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border 0.2s ease-in-out;\n  -o-transition: border 0.2s ease-in-out;\n  transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-transition: width 0.6s ease;\n  -o-transition: width 0.6s ease;\n  transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n  -o-animation: progress-bar-stripes 2s linear infinite;\n  animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\nbutton.close {\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n  -webkit-appearance: none;\n  appearance: none;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transform: translate(0, -25%);\n  -ms-transform: translate(0, -25%);\n  -o-transform: translate(0, -25%);\n  transform: translate(0, -25%);\n  -webkit-transition: -webkit-transform 0.3s ease-out;\n  -moz-transition: -moz-transform 0.3s ease-out;\n  -o-transition: -o-transform 0.3s ease-out;\n  transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n  -ms-transform: translate(0, 0);\n  -o-transform: translate(0, 0);\n  transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  outline: 0;\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 12px;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 14px;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999999;\n  border-top-color: rgba(0, 0, 0, 0.25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999999;\n  border-right-color: rgba(0, 0, 0, 0.25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999999;\n  border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999999;\n  border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: 0.6s ease-in-out left;\n  -o-transition: 0.6s ease-in-out left;\n  transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform 0.6s ease-in-out;\n    -moz-transition: -moz-transform 0.6s ease-in-out;\n    -o-transition: -o-transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out;\n    -webkit-backface-visibility: hidden;\n    -moz-backface-visibility: hidden;\n    backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n    -moz-perspective: 1000px;\n    perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    -webkit-transform: translate3d(100%, 0, 0);\n    transform: translate3d(100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    -webkit-transform: translate3d(-100%, 0, 0);\n    transform: translate3d(-100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    -webkit-transform: translate3d(0, 0, 0);\n    transform: translate3d(0, 0, 0);\n    left: 0;\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  outline: 0;\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n  content: \"\\203a\";\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable\n\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n//    without disabling user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `template` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// 1. Remove the bottom border in Chrome 57- and Firefox 39-.\n// 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n//\n\nabbr[title] {\n  border-bottom: none; // 1\n  text-decoration: underline; // 2\n  text-decoration: underline dotted; // 2\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; // 2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  border: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n","/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: none;\n  text-decoration: underline;\n  -webkit-text-decoration: underline dotted;\n  -moz-text-decoration: underline dotted;\n  text-decoration: underline dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    -webkit-box-shadow: none !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n  src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: 400;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: 0.2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: \"\\00A0 \\2014\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: 700;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n  padding-right: 0;\n  padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1,\n  .col-sm-2,\n  .col-sm-3,\n  .col-sm-4,\n  .col-sm-5,\n  .col-sm-6,\n  .col-sm-7,\n  .col-sm-8,\n  .col-sm-9,\n  .col-sm-10,\n  .col-sm-11,\n  .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1,\n  .col-md-2,\n  .col-md-3,\n  .col-md-4,\n  .col-md-5,\n  .col-md-6,\n  .col-md-7,\n  .col-md-8,\n  .col-md-9,\n  .col-md-10,\n  .col-md-11,\n  .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1,\n  .col-lg-2,\n  .col-lg-3,\n  .col-lg-4,\n  .col-lg-5,\n  .col-lg-6,\n  .col-lg-7,\n  .col-lg-8,\n  .col-lg-9,\n  .col-lg-10,\n  .col-lg-11,\n  .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: 0.01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: 700;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  -webkit-appearance: none;\n  -moz-appearance: none;\n  appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eeeeee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  margin-bottom: 0;\n  font-weight: normal;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  -ms-touch-action: manipulation;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none;\n  border: 1px solid transparent;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  border-radius: 4px;\n  -webkit-user-select: none;\n  -moz-user-select: none;\n  -ms-user-select: none;\n  user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  opacity: 0.65;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  background-image: none;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  background-image: none;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  background-image: none;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  background-image: none;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  background-image: none;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  background-image: none;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: 400;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity 0.15s linear;\n  -o-transition: opacity 0.15s linear;\n  transition: opacity 0.15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-property: height, visibility;\n  -o-transition-property: height, visibility;\n  transition-property: height, visibility;\n  -webkit-transition-duration: 0.35s;\n  -o-transition-duration: 0.35s;\n  transition-duration: 0.35s;\n  -webkit-transition-timing-function: ease;\n  -o-transition-timing-function: ease;\n  transition-timing-function: ease;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: 400;\n  line-height: 1.42857143;\n  color: #333333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: 400;\n  line-height: 1;\n  color: #555555;\n  text-align: center;\n  background-color: #eeeeee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n  color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eeeeee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: 15px;\n  margin-top: 8px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-right: -15px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eeeeee;\n  border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: 0.2em 0.6em 0.3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border 0.2s ease-in-out;\n  -o-transition: border 0.2s ease-in-out;\n  transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@-o-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-transition: width 0.6s ease;\n  -o-transition: width 0.6s ease;\n  transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  -webkit-background-size: 40px 40px;\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n  -o-animation: progress-bar-stripes 2s linear infinite;\n  animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\nbutton.close {\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n  -webkit-appearance: none;\n  -moz-appearance: none;\n  appearance: none;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transform: translate(0, -25%);\n  -ms-transform: translate(0, -25%);\n  -o-transform: translate(0, -25%);\n  transform: translate(0, -25%);\n  -webkit-transition: -webkit-transform 0.3s ease-out;\n  -o-transition: -o-transform 0.3s ease-out;\n  transition: -webkit-transform 0.3s ease-out;\n  transition: transform 0.3s ease-out;\n  transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out, -o-transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n  -ms-transform: translate(0, 0);\n  -o-transform: translate(0, 0);\n  transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  outline: 0;\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 12px;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 14px;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999999;\n  border-top-color: rgba(0, 0, 0, 0.25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999999;\n  border-right-color: rgba(0, 0, 0, 0.25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999999;\n  border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999999;\n  border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: 0.6s ease-in-out left;\n  -o-transition: 0.6s ease-in-out left;\n  transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform 0.6s ease-in-out;\n    -o-transition: -o-transform 0.6s ease-in-out;\n    transition: -webkit-transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out, -webkit-transform 0.6s ease-in-out, -o-transform 0.6s ease-in-out;\n    -webkit-backface-visibility: hidden;\n    backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n    perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    -webkit-transform: translate3d(100%, 0, 0);\n    transform: translate3d(100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    -webkit-transform: translate3d(-100%, 0, 0);\n    transform: translate3d(-100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    -webkit-transform: translate3d(0, 0, 0);\n    transform: translate3d(0, 0, 0);\n    left: 0;\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001)));\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5)));\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  outline: 0;\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n  content: \"\\203a\";\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable declaration-no-important, selector-no-qualifying-type\n\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important; // Black prints faster: h5bp.com/s\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n\n  // Don't show links that are fragment identifiers,\n  // or use the `javascript:` pseudo protocol\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n\n  thead {\n    display: table-header-group; // h5bp.com/t\n  }\n\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n\n  img {\n    max-width: 100% !important;\n  }\n\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n\n  // Bootstrap specific changes start\n\n  // Bootstrap components\n  .navbar {\n    display: none;\n  }\n  .btn,\n  .dropup > .btn {\n    > .caret {\n      border-top-color: #000 !important;\n    }\n  }\n  .label {\n    border: 1px solid #000;\n  }\n\n  .table {\n    border-collapse: collapse !important;\n\n    td,\n    th {\n      background-color: #fff !important;\n    }\n  }\n  .table-bordered {\n    th,\n    td {\n      border: 1px solid #ddd !important;\n    }\n  }\n}\n","// stylelint-disable value-list-comma-newline-after, value-list-comma-space-after, indentation, declaration-colon-newline-after, font-family-no-missing-generic-family-keyword\n\n//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// <a href=\"#\"><span class=\"glyphicon glyphicon-star\"></span> Star</a>\n\n// Import the fonts\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot\");\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot?#iefix\") format(\"embedded-opentype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff2\") format(\"woff2\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff\") format(\"woff\"),\n       url(\"@{icon-font-path}@{icon-font-name}.ttf\") format(\"truetype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}\") format(\"svg\");\n}\n\n// Catchall baseclass\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk               { &:before { content: \"\\002a\"; } }\n.glyphicon-plus                   { &:before { content: \"\\002b\"; } }\n.glyphicon-euro,\n.glyphicon-eur                    { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus                  { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud                  { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope               { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil                 { &:before { content: \"\\270f\"; } }\n.glyphicon-glass                  { &:before { content: \"\\e001\"; } }\n.glyphicon-music                  { &:before { content: \"\\e002\"; } }\n.glyphicon-search                 { &:before { content: \"\\e003\"; } }\n.glyphicon-heart                  { &:before { content: \"\\e005\"; } }\n.glyphicon-star                   { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty             { &:before { content: \"\\e007\"; } }\n.glyphicon-user                   { &:before { content: \"\\e008\"; } }\n.glyphicon-film                   { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large               { &:before { content: \"\\e010\"; } }\n.glyphicon-th                     { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list                { &:before { content: \"\\e012\"; } }\n.glyphicon-ok                     { &:before { content: \"\\e013\"; } }\n.glyphicon-remove                 { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in                { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out               { &:before { content: \"\\e016\"; } }\n.glyphicon-off                    { &:before { content: \"\\e017\"; } }\n.glyphicon-signal                 { &:before { content: \"\\e018\"; } }\n.glyphicon-cog                    { &:before { content: \"\\e019\"; } }\n.glyphicon-trash                  { &:before { content: \"\\e020\"; } }\n.glyphicon-home                   { &:before { content: \"\\e021\"; } }\n.glyphicon-file                   { &:before { content: \"\\e022\"; } }\n.glyphicon-time                   { &:before { content: \"\\e023\"; } }\n.glyphicon-road                   { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt           { &:before { content: \"\\e025\"; } }\n.glyphicon-download               { &:before { content: \"\\e026\"; } }\n.glyphicon-upload                 { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox                  { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle            { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat                 { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh                { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt               { &:before { content: \"\\e032\"; } }\n.glyphicon-lock                   { &:before { content: \"\\e033\"; } }\n.glyphicon-flag                   { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones             { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off             { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down            { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up              { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode                 { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode                { &:before { content: \"\\e040\"; } }\n.glyphicon-tag                    { &:before { content: \"\\e041\"; } }\n.glyphicon-tags                   { &:before { content: \"\\e042\"; } }\n.glyphicon-book                   { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark               { &:before { content: \"\\e044\"; } }\n.glyphicon-print                  { &:before { content: \"\\e045\"; } }\n.glyphicon-camera                 { &:before { content: \"\\e046\"; } }\n.glyphicon-font                   { &:before { content: \"\\e047\"; } }\n.glyphicon-bold                   { &:before { content: \"\\e048\"; } }\n.glyphicon-italic                 { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height            { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width             { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left             { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center           { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right            { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify          { &:before { content: \"\\e055\"; } }\n.glyphicon-list                   { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left            { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right           { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video         { &:before { content: \"\\e059\"; } }\n.glyphicon-picture                { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker             { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust                 { &:before { content: \"\\e063\"; } }\n.glyphicon-tint                   { &:before { content: \"\\e064\"; } }\n.glyphicon-edit                   { &:before { content: \"\\e065\"; } }\n.glyphicon-share                  { &:before { content: \"\\e066\"; } }\n.glyphicon-check                  { &:before { content: \"\\e067\"; } }\n.glyphicon-move                   { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward          { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward          { &:before { content: \"\\e070\"; } }\n.glyphicon-backward               { &:before { content: \"\\e071\"; } }\n.glyphicon-play                   { &:before { content: \"\\e072\"; } }\n.glyphicon-pause                  { &:before { content: \"\\e073\"; } }\n.glyphicon-stop                   { &:before { content: \"\\e074\"; } }\n.glyphicon-forward                { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward           { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward           { &:before { content: \"\\e077\"; } }\n.glyphicon-eject                  { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left           { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right          { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign              { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign             { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign            { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign                { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign          { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign              { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot             { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle          { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle              { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle             { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left             { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right            { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up               { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down             { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt              { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full            { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small           { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign       { &:before { content: \"\\e101\"; } }\n.glyphicon-gift                   { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf                   { &:before { content: \"\\e103\"; } }\n.glyphicon-fire                   { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open               { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close              { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign           { &:before { content: \"\\e107\"; } }\n.glyphicon-plane                  { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar               { &:before { content: \"\\e109\"; } }\n.glyphicon-random                 { &:before { content: \"\\e110\"; } }\n.glyphicon-comment                { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet                 { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up             { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down           { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet                { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart          { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close           { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open            { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical        { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal      { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd                    { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn               { &:before { content: \"\\e122\"; } }\n.glyphicon-bell                   { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate            { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up              { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down            { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right             { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left              { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up                { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down              { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right     { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left      { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up        { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down      { &:before { content: \"\\e134\"; } }\n.glyphicon-globe                  { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench                 { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks                  { &:before { content: \"\\e137\"; } }\n.glyphicon-filter                 { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase              { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen             { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard              { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip              { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty            { &:before { content: \"\\e143\"; } }\n.glyphicon-link                   { &:before { content: \"\\e144\"; } }\n.glyphicon-phone                  { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin                { &:before { content: \"\\e146\"; } }\n.glyphicon-usd                    { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp                    { &:before { content: \"\\e149\"; } }\n.glyphicon-sort                   { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet       { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt   { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order          { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt      { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes     { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked              { &:before { content: \"\\e157\"; } }\n.glyphicon-expand                 { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down          { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up            { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in                 { &:before { content: \"\\e161\"; } }\n.glyphicon-flash                  { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out                { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window             { &:before { content: \"\\e164\"; } }\n.glyphicon-record                 { &:before { content: \"\\e165\"; } }\n.glyphicon-save                   { &:before { content: \"\\e166\"; } }\n.glyphicon-open                   { &:before { content: \"\\e167\"; } }\n.glyphicon-saved                  { &:before { content: \"\\e168\"; } }\n.glyphicon-import                 { &:before { content: \"\\e169\"; } }\n.glyphicon-export                 { &:before { content: \"\\e170\"; } }\n.glyphicon-send                   { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk            { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved           { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove          { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save            { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open            { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card            { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer               { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery                { &:before { content: \"\\e179\"; } }\n.glyphicon-header                 { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed             { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone               { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt              { &:before { content: \"\\e183\"; } }\n.glyphicon-tower                  { &:before { content: \"\\e184\"; } }\n.glyphicon-stats                  { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video               { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video               { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles              { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo           { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby            { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1              { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1              { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1              { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark         { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark      { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download         { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload           { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer           { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous         { &:before { content: \"\\e200\"; } }\n.glyphicon-cd                     { &:before { content: \"\\e201\"; } }\n.glyphicon-save-file              { &:before { content: \"\\e202\"; } }\n.glyphicon-open-file              { &:before { content: \"\\e203\"; } }\n.glyphicon-level-up               { &:before { content: \"\\e204\"; } }\n.glyphicon-copy                   { &:before { content: \"\\e205\"; } }\n.glyphicon-paste                  { &:before { content: \"\\e206\"; } }\n// The following 2 Glyphicons are omitted for the time being because\n// they currently use Unicode codepoints that are outside the\n// Basic Multilingual Plane (BMP). Older buggy versions of WebKit can't handle\n// non-BMP codepoints in CSS string escapes, and thus can't display these two icons.\n// Notably, the bug affects some older versions of the Android Browser.\n// More info: https://github.com/twbs/bootstrap/issues/10106\n// .glyphicon-door                   { &:before { content: \"\\1f6aa\"; } }\n// .glyphicon-key                    { &:before { content: \"\\1f511\"; } }\n.glyphicon-alert                  { &:before { content: \"\\e209\"; } }\n.glyphicon-equalizer              { &:before { content: \"\\e210\"; } }\n.glyphicon-king                   { &:before { content: \"\\e211\"; } }\n.glyphicon-queen                  { &:before { content: \"\\e212\"; } }\n.glyphicon-pawn                   { &:before { content: \"\\e213\"; } }\n.glyphicon-bishop                 { &:before { content: \"\\e214\"; } }\n.glyphicon-knight                 { &:before { content: \"\\e215\"; } }\n.glyphicon-baby-formula           { &:before { content: \"\\e216\"; } }\n.glyphicon-tent                   { &:before { content: \"\\26fa\"; } }\n.glyphicon-blackboard             { &:before { content: \"\\e218\"; } }\n.glyphicon-bed                    { &:before { content: \"\\e219\"; } }\n.glyphicon-apple                  { &:before { content: \"\\f8ff\"; } }\n.glyphicon-erase                  { &:before { content: \"\\e221\"; } }\n.glyphicon-hourglass              { &:before { content: \"\\231b\"; } }\n.glyphicon-lamp                   { &:before { content: \"\\e223\"; } }\n.glyphicon-duplicate              { &:before { content: \"\\e224\"; } }\n.glyphicon-piggy-bank             { &:before { content: \"\\e225\"; } }\n.glyphicon-scissors               { &:before { content: \"\\e226\"; } }\n.glyphicon-bitcoin                { &:before { content: \"\\e227\"; } }\n.glyphicon-btc                    { &:before { content: \"\\e227\"; } }\n.glyphicon-xbt                    { &:before { content: \"\\e227\"; } }\n.glyphicon-yen                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-jpy                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-ruble                  { &:before { content: \"\\20bd\"; } }\n.glyphicon-rub                    { &:before { content: \"\\20bd\"; } }\n.glyphicon-scale                  { &:before { content: \"\\e230\"; } }\n.glyphicon-ice-lolly              { &:before { content: \"\\e231\"; } }\n.glyphicon-ice-lolly-tasted       { &:before { content: \"\\e232\"; } }\n.glyphicon-education              { &:before { content: \"\\e233\"; } }\n.glyphicon-option-horizontal      { &:before { content: \"\\e234\"; } }\n.glyphicon-option-vertical        { &:before { content: \"\\e235\"; } }\n.glyphicon-menu-hamburger         { &:before { content: \"\\e236\"; } }\n.glyphicon-modal-window           { &:before { content: \"\\e237\"; } }\n.glyphicon-oil                    { &:before { content: \"\\e238\"; } }\n.glyphicon-grain                  { &:before { content: \"\\e239\"; } }\n.glyphicon-sunglasses             { &:before { content: \"\\e240\"; } }\n.glyphicon-text-size              { &:before { content: \"\\e241\"; } }\n.glyphicon-text-color             { &:before { content: \"\\e242\"; } }\n.glyphicon-text-background        { &:before { content: \"\\e243\"; } }\n.glyphicon-object-align-top       { &:before { content: \"\\e244\"; } }\n.glyphicon-object-align-bottom    { &:before { content: \"\\e245\"; } }\n.glyphicon-object-align-horizontal{ &:before { content: \"\\e246\"; } }\n.glyphicon-object-align-left      { &:before { content: \"\\e247\"; } }\n.glyphicon-object-align-vertical  { &:before { content: \"\\e248\"; } }\n.glyphicon-object-align-right     { &:before { content: \"\\e249\"; } }\n.glyphicon-triangle-right         { &:before { content: \"\\e250\"; } }\n.glyphicon-triangle-left          { &:before { content: \"\\e251\"; } }\n.glyphicon-triangle-bottom        { &:before { content: \"\\e252\"; } }\n.glyphicon-triangle-top           { &:before { content: \"\\e253\"; } }\n.glyphicon-console                { &:before { content: \"\\e254\"; } }\n.glyphicon-superscript            { &:before { content: \"\\e255\"; } }\n.glyphicon-subscript              { &:before { content: \"\\e256\"; } }\n.glyphicon-menu-left              { &:before { content: \"\\e257\"; } }\n.glyphicon-menu-right             { &:before { content: \"\\e258\"; } }\n.glyphicon-menu-down              { &:before { content: \"\\e259\"; } }\n.glyphicon-menu-up                { &:before { content: \"\\e260\"; } }\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// https://getbootstrap.com/docs/3.4/getting-started/#third-box-sizing\n* {\n  .box-sizing(border-box);\n}\n*:before,\n*:after {\n  .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\nbody {\n  font-family: @font-family-base;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @text-color;\n  background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\n\n\n// Links\n\na {\n  color: @link-color;\n  text-decoration: none;\n\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n  }\n\n  &:focus {\n    .tab-focus();\n  }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n  margin: 0;\n}\n\n\n// Images\n\nimg {\n  vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n  .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n  border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n  padding: @thumbnail-padding;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  // Keep them at most 100% wide\n  .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n  border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n  margin-top: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  border: 0;\n  border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: https://a11yproject.com/posts/how-to-hide-content\n\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n\n// Use in conjunction with .sr-only to only display content when it's focused.\n// Useful for \"Skip to main content\" links; see https://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n// Credit: HTML5 Boilerplate\n\n.sr-only-focusable {\n  &:active,\n  &:focus {\n    position: static;\n    width: auto;\n    height: auto;\n    margin: 0;\n    overflow: visible;\n    clip: auto;\n  }\n}\n\n\n// iOS \"clickable elements\" fix for role=\"button\"\n//\n// Fixes \"clickability\" issue (and more generally, the firing of events such as focus as well)\n// for traditionally non-focusable elements with role=\"button\"\n// see https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n\n[role=\"button\"] {\n  cursor: pointer;\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// WebKit-style focus\n\n.tab-focus() {\n  // WebKit-specific. Other browsers will keep their default outline style.\n  // (Initially tried to also force default via `outline: initial`,\n  // but that seems to erroneously remove the outline in Firefox altogether.)\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n","// stylelint-disable media-feature-name-no-vendor-prefix, media-feature-parentheses-space-inside, media-feature-name-no-unknown, indentation, at-rule-name-space-after\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// Retina image\n//\n// Short retina mixin for setting background-image and -size. Note that the\n// spelling of `min--moz-device-pixel-ratio` is intentional.\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and ( min--moz-device-pixel-ratio: 2),\n  only screen and ( -o-min-device-pixel-ratio: 2/1),\n  only screen and ( min-device-pixel-ratio: 2),\n  only screen and ( min-resolution: 192dpi),\n  only screen and ( min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n","// stylelint-disable selector-list-comma-newline-after, selector-no-qualifying-type\n\n//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n  font-family: @headings-font-family;\n  font-weight: @headings-font-weight;\n  line-height: @headings-line-height;\n  color: @headings-color;\n\n  small,\n  .small {\n    font-weight: 400;\n    line-height: 1;\n    color: @headings-small-color;\n  }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n  margin-top: @line-height-computed;\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 65%;\n  }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n  margin-top: (@line-height-computed / 2);\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 75%;\n  }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n  margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n  margin-bottom: @line-height-computed;\n  font-size: floor((@font-size-base * 1.15));\n  font-weight: 300;\n  line-height: 1.4;\n\n  @media (min-width: @screen-sm-min) {\n    font-size: (@font-size-base * 1.5);\n  }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: (12px small font / 14px base font) * 100% = about 85%\nsmall,\n.small {\n  font-size: floor((100% * @font-size-small / @font-size-base));\n}\n\nmark,\n.mark {\n  padding: .2em;\n  background-color: @state-warning-bg;\n}\n\n// Alignment\n.text-left           { text-align: left; }\n.text-right          { text-align: right; }\n.text-center         { text-align: center; }\n.text-justify        { text-align: justify; }\n.text-nowrap         { white-space: nowrap; }\n\n// Transformation\n.text-lowercase      { text-transform: lowercase; }\n.text-uppercase      { text-transform: uppercase; }\n.text-capitalize     { text-transform: capitalize; }\n\n// Contextual colors\n.text-muted {\n  color: @text-muted;\n}\n.text-primary {\n  .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n  .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n  .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n  .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n  .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n  // Given the contrast here, this is the only class to have its color inverted\n  // automatically.\n  color: #fff;\n  .bg-variant(@brand-primary);\n}\n.bg-success {\n  .bg-variant(@state-success-bg);\n}\n.bg-info {\n  .bg-variant(@state-info-bg);\n}\n.bg-warning {\n  .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n  .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n  padding-bottom: ((@line-height-computed / 2) - 1);\n  margin: (@line-height-computed * 2) 0 @line-height-computed;\n  border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// -------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n  margin-top: 0;\n  margin-bottom: (@line-height-computed / 2);\n  ul,\n  ol {\n    margin-bottom: 0;\n  }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n  .list-unstyled();\n  margin-left: -5px;\n\n  > li {\n    display: inline-block;\n    padding-right: 5px;\n    padding-left: 5px;\n  }\n}\n\n// Description Lists\ndl {\n  margin-top: 0; // Remove browser default\n  margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n  line-height: @line-height-base;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n.dl-horizontal {\n  dd {\n    &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n  }\n\n  @media (min-width: @dl-horizontal-breakpoint) {\n    dt {\n      float: left;\n      width: (@dl-horizontal-offset - 20);\n      clear: left;\n      text-align: right;\n      .text-overflow();\n    }\n    dd {\n      margin-left: @dl-horizontal-offset;\n    }\n  }\n}\n\n\n// Misc\n// -------------------------\n\n// Abbreviations and acronyms\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n\n.initialism {\n  font-size: 90%;\n  .text-uppercase();\n}\n\n// Blockquotes\nblockquote {\n  padding: (@line-height-computed / 2) @line-height-computed;\n  margin: 0 0 @line-height-computed;\n  font-size: @blockquote-font-size;\n  border-left: 5px solid @blockquote-border-color;\n\n  p,\n  ul,\n  ol {\n    &:last-child {\n      margin-bottom: 0;\n    }\n  }\n\n  // Note: Deprecated small and .small as of v3.1.0\n  // Context: https://github.com/twbs/bootstrap/issues/11660\n  footer,\n  small,\n  .small {\n    display: block;\n    font-size: 80%; // back to default font-size\n    line-height: @line-height-base;\n    color: @blockquote-small-color;\n\n    &:before {\n      content: \"\\2014 \\00A0\"; // em dash, nbsp\n    }\n  }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid @blockquote-border-color;\n  border-left: 0;\n\n  // Account for citation\n  footer,\n  small,\n  .small {\n    &:before { content: \"\"; }\n    &:after {\n      content: \"\\00A0 \\2014\"; // nbsp, em dash\n    }\n  }\n}\n\n// Addresses\naddress {\n  margin-bottom: @line-height-computed;\n  font-style: normal;\n  line-height: @line-height-base;\n}\n","// Typography\n\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover,\n  a&:focus {\n    color: darken(@color, 10%);\n  }\n}\n","// Contextual backgrounds\n\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover,\n  a&:focus {\n    background-color: darken(@color, 10%);\n  }\n}\n","// Text overflow\n// Requires inline-block or block for proper styling\n\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n  font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @code-color;\n  background-color: @code-bg;\n  border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @kbd-color;\n  background-color: @kbd-bg;\n  border-radius: @border-radius-small;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n\n  kbd {\n    padding: 0;\n    font-size: 100%;\n    font-weight: 700;\n    box-shadow: none;\n  }\n}\n\n// Blocks of code\npre {\n  display: block;\n  padding: ((@line-height-computed - 1) / 2);\n  margin: 0 0 (@line-height-computed / 2);\n  font-size: (@font-size-base - 1); // 14px to 13px\n  line-height: @line-height-base;\n  color: @pre-color;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: @pre-bg;\n  border: 1px solid @pre-border-color;\n  border-radius: @border-radius-base;\n\n  // Account for some code outputs that place code tags in pre tags\n  code {\n    padding: 0;\n    font-size: inherit;\n    color: inherit;\n    white-space: pre-wrap;\n    background-color: transparent;\n    border-radius: 0;\n  }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n  max-height: @pre-scrollable-max-height;\n  overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n  .container-fixed();\n\n  @media (min-width: @screen-sm-min) {\n    width: @container-sm;\n  }\n  @media (min-width: @screen-md-min) {\n    width: @container-md;\n  }\n  @media (min-width: @screen-lg-min) {\n    width: @container-lg;\n  }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n  .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n  .make-row();\n}\n\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n\n  [class*=\"col-\"] {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid(xs);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n  .make-grid(sm);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n  .make-grid(md);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n  .make-grid(lg);\n}\n","// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n// Centered container element\n.container-fixed(@gutter: @grid-gutter-width) {\n  padding-right: ceil((@gutter / 2));\n  padding-left: floor((@gutter / 2));\n  margin-right: auto;\n  margin-left: auto;\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-right: floor((@gutter / -2));\n  margin-left: ceil((@gutter / -2));\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  margin-left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-push(@columns) {\n  left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-pull(@columns) {\n  right: percentage((@columns / @grid-columns));\n}\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n","// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-right: floor((@grid-gutter-width / 2));\n      padding-left: ceil((@grid-gutter-width / 2));\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.float-grid-columns(@class) {\n  .col(@index) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid-column(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index > 0) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index = 0) {\n  .col-@{class}-push-0 {\n    left: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index > 0) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index = 0) {\n  .col-@{class}-pull-0 {\n    right: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.loop-grid-columns(@index, @class, @type) when (@index >= 0) {\n  .calc-grid-column(@index, @class, @type);\n  // next iteration\n  .loop-grid-columns((@index - 1), @class, @type);\n}\n\n// Create grid for specific class\n.make-grid(@class) {\n  .float-grid-columns(@class);\n  .loop-grid-columns(@grid-columns, @class, width);\n  .loop-grid-columns(@grid-columns, @class, pull);\n  .loop-grid-columns(@grid-columns, @class, push);\n  .loop-grid-columns(@grid-columns, @class, offset);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-no-qualifying-type\n\n//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n  background-color: @table-bg;\n\n  // Table cell sizing\n  //\n  // Reset default table behavior\n\n  col[class*=\"col-\"] {\n    position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n    display: table-column;\n    float: none;\n  }\n\n  td,\n  th {\n    &[class*=\"col-\"] {\n      position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n      display: table-cell;\n      float: none;\n    }\n  }\n}\n\ncaption {\n  padding-top: @table-cell-padding;\n  padding-bottom: @table-cell-padding;\n  color: @text-muted;\n  text-align: left;\n}\n\nth {\n  text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: @line-height-computed;\n  // Cells\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-cell-padding;\n        line-height: @line-height-base;\n        vertical-align: top;\n        border-top: 1px solid @table-border-color;\n      }\n    }\n  }\n  // Bottom align for column headings\n  > thead > tr > th {\n    vertical-align: bottom;\n    border-bottom: 2px solid @table-border-color;\n  }\n  // Remove top border from thead by default\n  > caption + thead,\n  > colgroup + thead,\n  > thead:first-child {\n    > tr:first-child {\n      > th,\n      > td {\n        border-top: 0;\n      }\n    }\n  }\n  // Account for multiple tbody instances\n  > tbody + tbody {\n    border-top: 2px solid @table-border-color;\n  }\n\n  // Nesting\n  .table {\n    background-color: @body-bg;\n  }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-condensed-cell-padding;\n      }\n    }\n  }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n  border: 1px solid @table-border-color;\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        border: 1px solid @table-border-color;\n      }\n    }\n  }\n  > thead > tr {\n    > th,\n    > td {\n      border-bottom-width: 2px;\n    }\n  }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n  > tbody > tr:nth-of-type(odd) {\n    background-color: @table-bg-accent;\n  }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n  > tbody > tr:hover {\n    background-color: @table-bg-hover;\n  }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n.table-responsive {\n  min-height: .01%; // Workaround for IE9 bug (see https://github.com/twbs/bootstrap/issues/14837)\n  overflow-x: auto;\n\n  @media screen and (max-width: @screen-xs-max) {\n    width: 100%;\n    margin-bottom: (@line-height-computed * .75);\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid @table-border-color;\n\n    // Tighten up spacing\n    > .table {\n      margin-bottom: 0;\n\n      // Ensure the content doesn't wrap\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th,\n          > td {\n            white-space: nowrap;\n          }\n        }\n      }\n    }\n\n    // Special overrides for the bordered tables\n    > .table-bordered {\n      border: 0;\n\n      // Nuke the appropriate borders so that the parent can handle them\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th:first-child,\n          > td:first-child {\n            border-left: 0;\n          }\n          > th:last-child,\n          > td:last-child {\n            border-right: 0;\n          }\n        }\n      }\n\n      // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n      // chances are there will be only one `tr` in a `thead` and that would\n      // remove the border altogether.\n      > tbody,\n      > tfoot {\n        > tr:last-child {\n          > th,\n          > td {\n            border-bottom: 0;\n          }\n        }\n      }\n\n    }\n  }\n}\n","// Tables\n\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &:hover > .@{state},\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, property-no-vendor-prefix, media-feature-name-no-vendor-prefix\n\n//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n  // Chrome and Firefox set a `min-width: min-content;` on fieldsets,\n  // so we reset that to ensure it behaves more like a standard block element.\n  // See https://github.com/twbs/bootstrap/issues/12359.\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\n\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: @line-height-computed;\n  font-size: (@font-size-base * 1.5);\n  line-height: inherit;\n  color: @legend-color;\n  border: 0;\n  border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n  display: inline-block;\n  max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)\n  margin-bottom: 5px;\n  font-weight: 700;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\ninput[type=\"search\"] {\n  // Override content-box in Normalize (* isn't specific enough)\n  .box-sizing(border-box);\n\n  // Search inputs in iOS\n  //\n  // This overrides the extra rounded corners on search inputs in iOS so that our\n  // `.form-control` class can properly style them. Note that this cannot simply\n  // be added to `.form-control` as it's not specific enough. For details, see\n  // https://github.com/twbs/bootstrap/issues/11586.\n  -webkit-appearance: none;\n  appearance: none;\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9; // IE8-9\n  line-height: normal;\n\n  // Apply same disabled cursor tweak as for inputs\n  // Some special care is needed because <label>s don't inherit their parent's `cursor`.\n  //\n  // Note: Neither radios nor checkboxes can be readonly.\n  &[disabled],\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n\ninput[type=\"file\"] {\n  display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n  height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  .tab-focus();\n}\n\n// Adjust output element\noutput {\n  display: block;\n  padding-top: (@padding-base-vertical + 1);\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n  display: block;\n  width: 100%;\n  height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n  background-color: @input-bg;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid @input-border;\n  border-radius: @input-border-radius; // Note: This has no effect on <select>s in some browsers, due to the limited stylability of <select>s in CSS.\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075));\n  .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n  // Customize the `:focus` state to imitate native WebKit styles.\n  .form-control-focus();\n\n  // Placeholder\n  .placeholder();\n\n  // Unstyle the caret on `<select>`s in IE10+.\n  &::-ms-expand {\n    background-color: transparent;\n    border: 0;\n  }\n\n  // Disabled and read-only inputs\n  //\n  // HTML5 says that controls under a fieldset > legend:first-child won't be\n  // disabled if the fieldset is disabled. Due to implementation difficulty, we\n  // don't honor that edge case; we style them as disabled anyway.\n  &[disabled],\n  &[readonly],\n  fieldset[disabled] & {\n    background-color: @input-bg-disabled;\n    opacity: 1; // iOS fix for unreadable disabled content; see https://github.com/twbs/bootstrap/issues/11655\n  }\n\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n\n  // Reset height for `textarea`s\n  textarea& {\n    height: auto;\n  }\n}\n\n\n// Special styles for iOS temporal inputs\n//\n// In Mobile Safari, setting `display: block` on temporal inputs causes the\n// text within the input to become vertically misaligned. As a workaround, we\n// set a pixel line-height that matches the given height of the input, but only\n// for Safari. See https://bugs.webkit.org/show_bug.cgi?id=139848\n//\n// Note that as of 9.3, iOS doesn't support `week`.\n\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"],\n  input[type=\"time\"],\n  input[type=\"datetime-local\"],\n  input[type=\"month\"] {\n    &.form-control {\n      line-height: @input-height-base;\n    }\n\n    &.input-sm,\n    .input-group-sm & {\n      line-height: @input-height-small;\n    }\n\n    &.input-lg,\n    .input-group-lg & {\n      line-height: @input-height-large;\n    }\n  }\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n  margin-bottom: @form-group-margin-bottom;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n\n  // These are used on elements with <label> descendants\n  &.disabled,\n  fieldset[disabled] & {\n    label {\n      cursor: @cursor-disabled;\n    }\n  }\n\n  label {\n    min-height: @line-height-computed; // Ensure the input doesn't jump when there is no text\n    padding-left: 20px;\n    margin-bottom: 0;\n    font-weight: 400;\n    cursor: pointer;\n  }\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px; // Move up sibling radios or checkboxes for tighter spacing\n}\n\n// Radios and checkboxes on same line\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n\n  // These are used directly on <label>s\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px; // space out consecutive inline controls\n}\n\n\n// Static form control text\n//\n// Apply class to a `p` element to make any string of text align with labels in\n// a horizontal form layout.\n\n.form-control-static {\n  min-height: (@line-height-computed + @font-size-base);\n  // Size it appropriately next to real form controls\n  padding-top: (@padding-base-vertical + 1);\n  padding-bottom: (@padding-base-vertical + 1);\n  // Remove default margin from `p`\n  margin-bottom: 0;\n\n  &.input-lg,\n  &.input-sm {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Form control sizing\n//\n// Build on `.form-control` with modifier classes to decrease or increase the\n// height and font-size of form controls.\n//\n// The `.form-group-* form-control` variations are sadly duplicated to avoid the\n// issue documented in https://github.com/twbs/bootstrap/issues/15074.\n\n.input-sm {\n  .input-size(@input-height-small; @padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @input-border-radius-small);\n}\n.form-group-sm {\n  .form-control {\n    height: @input-height-small;\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n    border-radius: @input-border-radius-small;\n  }\n  select.form-control {\n    height: @input-height-small;\n    line-height: @input-height-small;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-small;\n    min-height: (@line-height-computed + @font-size-small);\n    padding: (@padding-small-vertical + 1) @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n  }\n}\n\n.input-lg {\n  .input-size(@input-height-large; @padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @input-border-radius-large);\n}\n.form-group-lg {\n  .form-control {\n    height: @input-height-large;\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n    border-radius: @input-border-radius-large;\n  }\n  select.form-control {\n    height: @input-height-large;\n    line-height: @input-height-large;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-large;\n    min-height: (@line-height-computed + @font-size-large);\n    padding: (@padding-large-vertical + 1) @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n  }\n}\n\n\n// Form control feedback states\n//\n// Apply contextual and semantic states to individual form controls.\n\n.has-feedback {\n  // Enable absolute positioning\n  position: relative;\n\n  // Ensure icons don't overlap text\n  .form-control {\n    padding-right: (@input-height-base * 1.25);\n  }\n}\n// Feedback icon (requires .glyphicon classes)\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2; // Ensure icon is above input groups\n  display: block;\n  width: @input-height-base;\n  height: @input-height-base;\n  line-height: @input-height-base;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: @input-height-large;\n  height: @input-height-large;\n  line-height: @input-height-large;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: @input-height-small;\n  height: @input-height-small;\n  line-height: @input-height-small;\n}\n\n// Feedback states\n.has-success {\n  .form-control-validation(@state-success-text; @state-success-text; @state-success-bg);\n}\n.has-warning {\n  .form-control-validation(@state-warning-text; @state-warning-text; @state-warning-bg);\n}\n.has-error {\n  .form-control-validation(@state-danger-text; @state-danger-text; @state-danger-bg);\n}\n\n// Reposition feedback icon if input has visible label above\n.has-feedback label {\n\n  & ~ .form-control-feedback {\n    top: (@line-height-computed + 5); // Height of the `label` and its margin\n  }\n  &.sr-only ~ .form-control-feedback {\n    top: 0;\n  }\n}\n\n\n// Help text\n//\n// Apply to any element you wish to create light text for placement immediately\n// below a form control. Use for general help, formatting, or instructional text.\n\n.help-block {\n  display: block; // account for any element using help-block\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: lighten(@text-color, 25%); // lighten the text some for contrast\n}\n\n\n// Inline forms\n//\n// Make forms appear inline(-block) by adding the `.form-inline` class. Inline\n// forms begin stacked on extra small (mobile) devices and then go inline when\n// viewports reach <768px.\n//\n// Requires wrapping inputs and labels with `.form-group` for proper display of\n// default HTML form controls and our custom form controls (e.g., input groups).\n//\n// Heads up! This is mixin-ed into `.navbar-form` in navbars.less.\n\n.form-inline {\n\n  // Kick in the inline\n  @media (min-width: @screen-sm-min) {\n    // Inline-block all the things for \"inline\"\n    .form-group {\n      display: inline-block;\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // In navbar-form, allow folks to *not* use `.form-group`\n    .form-control {\n      display: inline-block;\n      width: auto; // Prevent labels from stacking above inputs in `.form-group`\n      vertical-align: middle;\n    }\n\n    // Make static controls behave like regular ones\n    .form-control-static {\n      display: inline-block;\n    }\n\n    .input-group {\n      display: inline-table;\n      vertical-align: middle;\n\n      .input-group-addon,\n      .input-group-btn,\n      .form-control {\n        width: auto;\n      }\n    }\n\n    // Input groups need that 100% width though\n    .input-group > .form-control {\n      width: 100%;\n    }\n\n    .control-label {\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // Remove default margin on radios/checkboxes that were used for stacking, and\n    // then undo the floating of radios and checkboxes to match.\n    .radio,\n    .checkbox {\n      display: inline-block;\n      margin-top: 0;\n      margin-bottom: 0;\n      vertical-align: middle;\n\n      label {\n        padding-left: 0;\n      }\n    }\n    .radio input[type=\"radio\"],\n    .checkbox input[type=\"checkbox\"] {\n      position: relative;\n      margin-left: 0;\n    }\n\n    // Re-override the feedback icon.\n    .has-feedback .form-control-feedback {\n      top: 0;\n    }\n  }\n}\n\n\n// Horizontal forms\n//\n// Horizontal forms are built on grid classes and allow you to create forms with\n// labels on the left and inputs on the right.\n\n.form-horizontal {\n\n  // Consistent vertical alignment of radios and checkboxes\n  //\n  // Labels also get some reset styles, but that is scoped to a media query below.\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline {\n    padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n    margin-top: 0;\n    margin-bottom: 0;\n  }\n  // Account for padding we're adding to ensure the alignment and of help text\n  // and other content below items\n  .radio,\n  .checkbox {\n    min-height: (@line-height-computed + (@padding-base-vertical + 1));\n  }\n\n  // Make form groups behave like rows\n  .form-group {\n    .make-row();\n  }\n\n  // Reset spacing and right align labels, but scope to media queries so that\n  // labels on narrow viewports stack the same as a default form example.\n  @media (min-width: @screen-sm-min) {\n    .control-label {\n      padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n      margin-bottom: 0;\n      text-align: right;\n    }\n  }\n\n  // Validation states\n  //\n  // Reposition the icon because it's now within a grid column and columns have\n  // `position: relative;` on them. Also accounts for the grid gutter padding.\n  .has-feedback .form-control-feedback {\n    right: floor((@grid-gutter-width / 2));\n  }\n\n  // Form group sizes\n  //\n  // Quick utility class for applying `.input-lg` and `.input-sm` styles to the\n  // inputs and labels within a `.form-group`.\n  .form-group-lg {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-large-vertical + 1);\n        font-size: @font-size-large;\n      }\n    }\n  }\n  .form-group-sm {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-small-vertical + 1);\n        font-size: @font-size-small;\n      }\n    }\n  }\n}\n","// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline,\n  &.radio label,\n  &.checkbox label,\n  &.radio-inline label,\n  &.checkbox-inline label  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    background-color: @background-color;\n    border-color: @border-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-border-focus` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n  display: inline-block;\n  margin-bottom: 0; // For input.btn\n  font-weight: @btn-font-weight;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @btn-border-radius-base);\n  .user-select(none);\n\n  &,\n  &:active,\n  &.active {\n    &:focus,\n    &.focus {\n      .tab-focus();\n    }\n  }\n\n  &:hover,\n  &:focus,\n  &.focus {\n    color: @btn-default-color;\n    text-decoration: none;\n  }\n\n  &:active,\n  &.active {\n    background-image: none;\n    outline: 0;\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n    .opacity(.65);\n    .box-shadow(none);\n  }\n\n  a& {\n    &.disabled,\n    fieldset[disabled] & {\n      pointer-events: none; // Future-proof disabling of clicks on `<a>` elements\n    }\n  }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n  .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n  .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n  .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n  .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n  .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n  .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n  font-weight: 400;\n  color: @link-color;\n  border-radius: 0;\n\n  &,\n  &:active,\n  &.active,\n  &[disabled],\n  fieldset[disabled] & {\n    background-color: transparent;\n    .box-shadow(none);\n  }\n  &,\n  &:hover,\n  &:focus,\n  &:active {\n    border-color: transparent;\n  }\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n    background-color: transparent;\n  }\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus {\n      color: @btn-link-disabled-color;\n      text-decoration: none;\n    }\n  }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n  // line-height: ensure even-numbered height of button next to large input\n  .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @btn-border-radius-large);\n}\n.btn-sm {\n  // line-height: ensure proper height of button next to small input\n  .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n.btn-xs {\n  .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n  display: block;\n  width: 100%;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n  &.btn-block {\n    width: 100%;\n  }\n}\n","// Button variants\n//\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:focus,\n  &.focus {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 25%);\n  }\n  &:hover {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open > .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 10%);\n    background-image: none;\n    border-color: darken(@border, 12%);\n\n    &:hover,\n    &:focus,\n    &.focus {\n      color: @color;\n      background-color: darken(@background, 17%);\n      border-color: darken(@border, 25%);\n    }\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus,\n    &.focus {\n      background-color: @background;\n      border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n","// Opacity\n\n.opacity(@opacity) {\n  @opacity-ie: (@opacity * 100);  // IE8 filter\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n  opacity: @opacity;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twbs/bootstrap/pull/3552.\n\n.fade {\n  opacity: 0;\n  .transition(opacity .15s linear);\n\n  &.in {\n    opacity: 1;\n  }\n}\n\n.collapse {\n  display: none;\n\n  &.in      { display: block; }\n  tr&.in    { display: table-row; }\n  tbody&.in { display: table-row-group; }\n}\n\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  .transition-property(~\"height, visibility\");\n  .transition-duration(.35s);\n  .transition-timing-function(ease);\n}\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: @caret-width-base dashed;\n  border-top: @caret-width-base solid ~\"\\9\"; // IE8\n  border-right: @caret-width-base solid transparent;\n  border-left: @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropup,\n.dropdown {\n  position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n  outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: @zindex-dropdown;\n  display: none; // none by default, but block on \"open\" of the menu\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0; // override default ul\n  font-size: @font-size-base;\n  text-align: left; // Ensures proper alignment if parent has it changed (e.g., modal footer)\n  list-style: none;\n  background-color: @dropdown-bg;\n  background-clip: padding-box;\n  border: 1px solid @dropdown-fallback-border; // IE8 fallback\n  border: 1px solid @dropdown-border;\n  border-radius: @border-radius-base;\n  .box-shadow(0 6px 12px rgba(0, 0, 0, .175));\n\n  // Aligns the dropdown menu to right\n  //\n  // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n  &.pull-right {\n    right: 0;\n    left: auto;\n  }\n\n  // Dividers (basically an hr) within the dropdown\n  .divider {\n    .nav-divider(@dropdown-divider-bg);\n  }\n\n  // Links within the dropdown menu\n  > li > a {\n    display: block;\n    padding: 3px 20px;\n    clear: both;\n    font-weight: 400;\n    line-height: @line-height-base;\n    color: @dropdown-link-color;\n    white-space: nowrap; // prevent links from randomly breaking onto new lines\n\n    &:hover,\n    &:focus {\n      color: @dropdown-link-hover-color;\n      text-decoration: none;\n      background-color: @dropdown-link-hover-bg;\n    }\n  }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-active-color;\n    text-decoration: none;\n    background-color: @dropdown-link-active-bg;\n    outline: 0;\n  }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-disabled-color;\n  }\n\n  // Nuke hover/focus effects\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    cursor: @cursor-disabled;\n    background-color: transparent;\n    background-image: none; // Remove CSS gradient\n    .reset-filter();\n  }\n}\n\n// Open state for the dropdown\n.open {\n  // Show the menu\n  > .dropdown-menu {\n    display: block;\n  }\n\n  // Remove the outline when :focus is triggered\n  > a {\n    outline: 0;\n  }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n  right: 0;\n  left: auto; // Reset the default from `.dropdown-menu`\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n\n// Dropdown section headers\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: @font-size-small;\n  line-height: @line-height-base;\n  color: @dropdown-header-color;\n  white-space: nowrap; // as with > li > a\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n  // Reverse the caret\n  .caret {\n    content: \"\";\n    border-top: 0;\n    border-bottom: @caret-width-base dashed;\n    border-bottom: @caret-width-base solid ~\"\\9\"; // IE8\n  }\n  // Different positioning for bottom up menu\n  .dropdown-menu {\n    top: auto;\n    bottom: 100%;\n    margin-bottom: 2px;\n  }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-right {\n    .dropdown-menu {\n      .dropdown-menu-right();\n    }\n    // Necessary for overrides of the default right aligned menu.\n    // Will remove come v4 in all likelihood.\n    .dropdown-menu-left {\n      .dropdown-menu-left();\n    }\n  }\n}\n","// Horizontal dividers\n//\n// Dividers (basically an hr) within dropdowns and nav lists\n\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n","// stylelint-disable selector-no-qualifying-type */\n\n//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle; // match .btn alignment given font-size hack above\n  > .btn {\n    position: relative;\n    float: left;\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      z-index: 2;\n    }\n  }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n  .btn + .btn,\n  .btn + .btn-group,\n  .btn-group + .btn,\n  .btn-group + .btn-group {\n    margin-left: -1px;\n  }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n  margin-left: -5px; // Offset the first child's margin\n  &:extend(.clearfix all);\n\n  .btn,\n  .btn-group,\n  .input-group {\n    float: left;\n  }\n  > .btn,\n  > .btn-group,\n  > .input-group {\n    margin-left: 5px;\n  }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n  margin-left: 0;\n  &:not(:last-child):not(.dropdown-toggle) {\n    .border-right-radius(0);\n  }\n}\n// Need .dropdown-toggle since :last-child doesn't apply, given that a .dropdown-menu is used immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-right-radius(0);\n  }\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { &:extend(.btn-xs); }\n.btn-group-sm > .btn { &:extend(.btn-sm); }\n.btn-group-lg > .btn { &:extend(.btn-lg); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n  .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n\n  // Show no shadow for `.btn-link` since it has no other button styles.\n  &.btn-link {\n    .box-shadow(none);\n  }\n}\n\n\n// Reposition the caret\n.btn .caret {\n  margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n  border-width: @caret-width-large @caret-width-large 0;\n  border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n  border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n  > .btn,\n  > .btn-group,\n  > .btn-group > .btn {\n    display: block;\n    float: none;\n    width: 100%;\n    max-width: 100%;\n  }\n\n  // Clear floats so dropdown menus can be properly placed\n  > .btn-group {\n    &:extend(.clearfix all);\n    > .btn {\n      float: none;\n    }\n  }\n\n  > .btn + .btn,\n  > .btn + .btn-group,\n  > .btn-group + .btn,\n  > .btn-group + .btn-group {\n    margin-top: -1px;\n    margin-left: 0;\n  }\n}\n\n.btn-group-vertical > .btn {\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n  &:first-child:not(:last-child) {\n    .border-top-radius(@btn-border-radius-base);\n    .border-bottom-radius(0);\n  }\n  &:last-child:not(:first-child) {\n    .border-top-radius(0);\n    .border-bottom-radius(@btn-border-radius-base);\n  }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-bottom-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-top-radius(0);\n}\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n  > .btn,\n  > .btn-group {\n    display: table-cell;\n    float: none;\n    width: 1%;\n  }\n  > .btn-group .btn {\n    width: 100%;\n  }\n\n  > .btn-group .dropdown-menu {\n    left: auto;\n  }\n}\n\n\n// Checkbox and radio options\n//\n// In order to support the browser's form validation feedback, powered by the\n// `required` attribute, we have to \"hide\" the inputs via `clip`. We cannot use\n// `display: none;` or `visibility: hidden;` as that also hides the popover.\n// Simply visually hiding the inputs via `opacity` would leave them clickable in\n// certain cases which is prevented by using `clip` and `pointer-events`.\n// This way, we ensure a DOM element is visible to position the popover from.\n//\n// See https://github.com/twbs/bootstrap/pull/12794 and\n// https://github.com/twbs/bootstrap/pull/14559 for more information.\n\n[data-toggle=\"buttons\"] {\n  > .btn,\n  > .btn-group > .btn {\n    input[type=\"radio\"],\n    input[type=\"checkbox\"] {\n      position: absolute;\n      clip: rect(0, 0, 0, 0);\n      pointer-events: none;\n    }\n  }\n}\n","// Single side border-radius\n\n.border-top-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-top-right-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-top-right-radius: @radius;\n  border-bottom-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n  position: relative; // For dropdowns\n  display: table;\n  border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n  // Undo padding and float of grid classes\n  &[class*=\"col-\"] {\n    float: none;\n    padding-right: 0;\n    padding-left: 0;\n  }\n\n  .form-control {\n    // Ensure that the input is always above the *appended* addon button for\n    // proper border colors.\n    position: relative;\n    z-index: 2;\n\n    // IE9 fubars the placeholder attribute in text inputs and the arrows on\n    // select elements in input groups. To fix it, we float the input. Details:\n    // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n    float: left;\n\n    width: 100%;\n    margin-bottom: 0;\n\n    &:focus {\n      z-index: 3;\n    }\n  }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  .input-lg();\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  .input-sm();\n}\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  font-weight: 400;\n  line-height: 1;\n  color: @input-color;\n  text-align: center;\n  background-color: @input-group-addon-bg;\n  border: 1px solid @input-group-addon-border-color;\n  border-radius: @input-border-radius;\n\n  // Sizing\n  &.input-sm {\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    border-radius: @input-border-radius-small;\n  }\n  &.input-lg {\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    border-radius: @input-border-radius-large;\n  }\n\n  // Nuke default margins from checkboxes and radios to vertically center within.\n  input[type=\"radio\"],\n  input[type=\"checkbox\"] {\n    margin-top: 0;\n  }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  .border-right-radius(0);\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  .border-left-radius(0);\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n  position: relative;\n  // Jankily prevent input button groups from wrapping with `white-space` and\n  // `font-size` in combination with `inline-block` on buttons.\n  font-size: 0;\n  white-space: nowrap;\n\n  // Negative margin for spacing, position for bringing hovered/focused/actived\n  // element above the siblings.\n  > .btn {\n    position: relative;\n    + .btn {\n      margin-left: -1px;\n    }\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active {\n      z-index: 2;\n    }\n  }\n\n  // Negative margin to only have a 1px border between the two\n  &:first-child {\n    > .btn,\n    > .btn-group {\n      margin-right: -1px;\n    }\n  }\n  &:last-child {\n    > .btn,\n    > .btn-group {\n      z-index: 2;\n      margin-left: -1px;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, selector-max-type\n\n//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n  padding-left: 0; // Override default ul/ol\n  margin-bottom: 0;\n  list-style: none;\n  &:extend(.clearfix all);\n\n  > li {\n    position: relative;\n    display: block;\n\n    > a {\n      position: relative;\n      display: block;\n      padding: @nav-link-padding;\n      &:hover,\n      &:focus {\n        text-decoration: none;\n        background-color: @nav-link-hover-bg;\n      }\n    }\n\n    // Disabled state sets text to gray and nukes hover/tab effects\n    &.disabled > a {\n      color: @nav-disabled-link-color;\n\n      &:hover,\n      &:focus {\n        color: @nav-disabled-link-hover-color;\n        text-decoration: none;\n        cursor: @cursor-disabled;\n        background-color: transparent;\n      }\n    }\n  }\n\n  // Open dropdowns\n  .open > a {\n    &,\n    &:hover,\n    &:focus {\n      background-color: @nav-link-hover-bg;\n      border-color: @link-color;\n    }\n  }\n\n  // Nav dividers (deprecated with v3.0.1)\n  //\n  // This should have been removed in v3 with the dropping of `.nav-list`, but\n  // we missed it. We don't currently support this anywhere, but in the interest\n  // of maintaining backward compatibility in case you use it, it's deprecated.\n  .nav-divider {\n    .nav-divider();\n  }\n\n  // Prevent IE8 from misplacing imgs\n  //\n  // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n  > li > a > img {\n    max-width: none;\n  }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n  border-bottom: 1px solid @nav-tabs-border-color;\n  > li {\n    float: left;\n    // Make the list-items overlay the bottom border\n    margin-bottom: -1px;\n\n    // Actual tabs (as links)\n    > a {\n      margin-right: 2px;\n      line-height: @line-height-base;\n      border: 1px solid transparent;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n      &:hover {\n        border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n      }\n    }\n\n    // Active state, and its :hover to override normal :hover\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-tabs-active-link-hover-color;\n        cursor: default;\n        background-color: @nav-tabs-active-link-hover-bg;\n        border: 1px solid @nav-tabs-active-link-hover-border-color;\n        border-bottom-color: transparent;\n      }\n    }\n  }\n  // pulling this in mainly for less shorthand\n  &.nav-justified {\n    .nav-justified();\n    .nav-tabs-justified();\n  }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n  > li {\n    float: left;\n\n    // Links rendered as pills\n    > a {\n      border-radius: @nav-pills-border-radius;\n    }\n    + li {\n      margin-left: 2px;\n    }\n\n    // Active state\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-pills-active-link-hover-color;\n        background-color: @nav-pills-active-link-hover-bg;\n      }\n    }\n  }\n}\n\n\n// Stacked pills\n.nav-stacked {\n  > li {\n    float: none;\n    + li {\n      margin-top: 2px;\n      margin-left: 0; // no need for this gap between nav items\n    }\n  }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n  width: 100%;\n\n  > li {\n    float: none;\n    > a {\n      margin-bottom: 5px;\n      text-align: center;\n    }\n  }\n\n  > .dropdown .dropdown-menu {\n    top: auto;\n    left: auto;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li {\n      display: table-cell;\n      width: 1%;\n      > a {\n        margin-bottom: 0;\n      }\n    }\n  }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n  border-bottom: 0;\n\n  > li > a {\n    // Override margin from .nav-tabs\n    margin-right: 0;\n    border-radius: @border-radius-base;\n  }\n\n  > .active > a,\n  > .active > a:hover,\n  > .active > a:focus {\n    border: 1px solid @nav-tabs-justified-link-border-color;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li > a {\n      border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n    }\n    > .active > a,\n    > .active > a:hover,\n    > .active > a:focus {\n      border-bottom-color: @nav-tabs-justified-active-link-border-color;\n    }\n  }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n  > .tab-pane {\n    display: none;\n  }\n  > .active {\n    display: block;\n  }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n  // make dropdown border overlap tab border\n  margin-top: -1px;\n  // Remove the top rounded corners here since there is a hard edge above the menu\n  .border-top-radius(0);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, selector-max-class, declaration-no-important, selector-no-qualifying-type\n\n//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n  position: relative;\n  min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n  margin-bottom: @navbar-margin-bottom;\n  border: 1px solid transparent;\n\n  // Prevent floats from breaking the navbar\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: @navbar-border-radius;\n  }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n  }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n  padding-right: @navbar-padding-horizontal;\n  padding-left: @navbar-padding-horizontal;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n  &:extend(.clearfix all);\n  -webkit-overflow-scrolling: touch;\n\n  &.in {\n    overflow-y: auto;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n\n    &.collapse {\n      display: block !important;\n      height: auto !important;\n      padding-bottom: 0; // Override default setting\n      overflow: visible !important;\n    }\n\n    &.in {\n      overflow-y: visible;\n    }\n\n    // Undo the collapse side padding for navbars with containers to ensure\n    // alignment of right-aligned contents.\n    .navbar-fixed-top &,\n    .navbar-static-top &,\n    .navbar-fixed-bottom & {\n      padding-right: 0;\n      padding-left: 0;\n    }\n  }\n}\n\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  .navbar-collapse {\n    max-height: @navbar-collapse-max-height;\n\n    @media (max-device-width: @screen-xs-min) and (orientation: landscape) {\n      max-height: 200px;\n    }\n  }\n\n  // Fix the top/bottom navbars when screen real estate supports it\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: @zindex-navbar-fixed;\n\n  // Undo the rounded corners\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0; // override .navbar defaults\n  border-width: 1px 0 0;\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n  > .navbar-header,\n  > .navbar-collapse {\n    margin-right: -@navbar-padding-horizontal;\n    margin-left: -@navbar-padding-horizontal;\n\n    @media (min-width: @grid-float-breakpoint) {\n      margin-right: 0;\n      margin-left: 0;\n    }\n  }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n  z-index: @zindex-navbar;\n  border-width: 0 0 1px;\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n  float: left;\n  height: @navbar-height;\n  padding: @navbar-padding-vertical @navbar-padding-horizontal;\n  font-size: @font-size-large;\n  line-height: @line-height-computed;\n\n  &:hover,\n  &:focus {\n    text-decoration: none;\n  }\n\n  > img {\n    display: block;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    .navbar > .container &,\n    .navbar > .container-fluid & {\n      margin-left: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: @navbar-padding-horizontal;\n  .navbar-vertical-align(34px);\n  background-color: transparent;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  border-radius: @border-radius-base;\n\n  // We remove the `outline` here, but later compensate by attaching `:hover`\n  // styles to `:focus`.\n  &:focus {\n    outline: 0;\n  }\n\n  // Bars\n  .icon-bar {\n    display: block;\n    width: 22px;\n    height: 2px;\n    border-radius: 1px;\n  }\n  .icon-bar + .icon-bar {\n    margin-top: 4px;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    display: none;\n  }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n  margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n  > li > a {\n    padding-top: 10px;\n    padding-bottom: 10px;\n    line-height: @line-height-computed;\n  }\n\n  @media (max-width: @grid-float-breakpoint-max) {\n    // Dropdowns get custom display when collapsed\n    .open .dropdown-menu {\n      position: static;\n      float: none;\n      width: auto;\n      margin-top: 0;\n      background-color: transparent;\n      border: 0;\n      box-shadow: none;\n      > li > a,\n      .dropdown-header {\n        padding: 5px 15px 5px 25px;\n      }\n      > li > a {\n        line-height: @line-height-computed;\n        &:hover,\n        &:focus {\n          background-image: none;\n        }\n      }\n    }\n  }\n\n  // Uncollapse the nav\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin: 0;\n\n    > li {\n      float: left;\n      > a {\n        padding-top: @navbar-padding-vertical;\n        padding-bottom: @navbar-padding-vertical;\n      }\n    }\n  }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n  padding: 10px @navbar-padding-horizontal;\n  margin-right: -@navbar-padding-horizontal;\n  margin-left: -@navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n\n  // Mixin behavior for optimum display\n  .form-inline();\n\n  .form-group {\n    @media (max-width: @grid-float-breakpoint-max) {\n      margin-bottom: 5px;\n\n      &:last-child {\n        margin-bottom: 0;\n      }\n    }\n  }\n\n  // Vertically center in expanded, horizontal navbar\n  .navbar-vertical-align(@input-height-base);\n\n  // Undo 100% width for pull classes\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    .box-shadow(none);\n  }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  .border-top-radius(@navbar-border-radius);\n  .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n  .navbar-vertical-align(@input-height-base);\n\n  &.btn-sm {\n    .navbar-vertical-align(@input-height-small);\n  }\n  &.btn-xs {\n    .navbar-vertical-align(22);\n  }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n  .navbar-vertical-align(@line-height-computed);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin-right: @navbar-padding-horizontal;\n    margin-left: @navbar-padding-horizontal;\n  }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n//\n// Declared after the navbar components to ensure more specificity on the margins.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-left  { .pull-left(); }\n  .navbar-right {\n    .pull-right();\n    margin-right: -@navbar-padding-horizontal;\n\n    ~ .navbar-right {\n      margin-right: 0;\n    }\n  }\n}\n\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  background-color: @navbar-default-bg;\n  border-color: @navbar-default-border;\n\n  .navbar-brand {\n    color: @navbar-default-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-brand-hover-color;\n      background-color: @navbar-default-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-default-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-default-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-hover-color;\n        background-color: @navbar-default-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n        background-color: @navbar-default-link-disabled-bg;\n      }\n    }\n\n    // Dropdown menu items\n    // Remove background color from open dropdown\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display when collapsed\n      .open .dropdown-menu {\n        > li > a {\n          color: @navbar-default-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-hover-color;\n            background-color: @navbar-default-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-active-color;\n            background-color: @navbar-default-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-disabled-color;\n            background-color: @navbar-default-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  .navbar-toggle {\n    border-color: @navbar-default-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-default-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-default-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: @navbar-default-border;\n  }\n\n\n  // Links in navbars\n  //\n  // Add a class to ensure links outside the navbar nav are colored correctly.\n\n  .navbar-link {\n    color: @navbar-default-link-color;\n    &:hover {\n      color: @navbar-default-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-default-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n      }\n    }\n  }\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n  background-color: @navbar-inverse-bg;\n  border-color: @navbar-inverse-border;\n\n  .navbar-brand {\n    color: @navbar-inverse-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-brand-hover-color;\n      background-color: @navbar-inverse-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-inverse-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-inverse-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-hover-color;\n        background-color: @navbar-inverse-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n        background-color: @navbar-inverse-link-disabled-bg;\n      }\n    }\n\n    // Dropdowns\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display\n      .open .dropdown-menu {\n        > .dropdown-header {\n          border-color: @navbar-inverse-border;\n        }\n        .divider {\n          background-color: @navbar-inverse-border;\n        }\n        > li > a {\n          color: @navbar-inverse-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-hover-color;\n            background-color: @navbar-inverse-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-active-color;\n            background-color: @navbar-inverse-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-disabled-color;\n            background-color: @navbar-inverse-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  // Darken the responsive nav toggle\n  .navbar-toggle {\n    border-color: @navbar-inverse-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-inverse-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-inverse-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: darken(@navbar-inverse-bg, 7%);\n  }\n\n  .navbar-link {\n    color: @navbar-inverse-link-color;\n    &:hover {\n      color: @navbar-inverse-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-inverse-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n      }\n    }\n  }\n}\n","// Navbar vertical align\n//\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n","// stylelint-disable declaration-no-important\n\n//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n  .clearfix();\n}\n.center-block {\n  .center-block();\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n  display: none !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n  position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n  padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n  margin-bottom: @line-height-computed;\n  list-style: none;\n  background-color: @breadcrumb-bg;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline-block;\n\n    + li:before {\n      padding: 0 5px;\n      color: @breadcrumb-color;\n      content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n    }\n  }\n\n  > .active {\n    color: @breadcrumb-active-color;\n  }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline; // Remove list-style and block-level defaults\n    > a,\n    > span {\n      position: relative;\n      float: left; // Collapse white-space\n      padding: @padding-base-vertical @padding-base-horizontal;\n      margin-left: -1px;\n      line-height: @line-height-base;\n      color: @pagination-color;\n      text-decoration: none;\n      background-color: @pagination-bg;\n      border: 1px solid @pagination-border;\n\n      &:hover,\n      &:focus {\n        z-index: 2;\n        color: @pagination-hover-color;\n        background-color: @pagination-hover-bg;\n        border-color: @pagination-hover-border;\n      }\n    }\n    &:first-child {\n      > a,\n      > span {\n        margin-left: 0;\n        .border-left-radius(@border-radius-base);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius-base);\n      }\n    }\n  }\n\n  > .active > a,\n  > .active > span {\n    &,\n    &:hover,\n    &:focus {\n      z-index: 3;\n      color: @pagination-active-color;\n      cursor: default;\n      background-color: @pagination-active-bg;\n      border-color: @pagination-active-border;\n    }\n  }\n\n  > .disabled {\n    > span,\n    > span:hover,\n    > span:focus,\n    > a,\n    > a:hover,\n    > a:focus {\n      color: @pagination-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pagination-disabled-bg;\n      border-color: @pagination-disabled-border;\n    }\n  }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n  .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n  .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n","// Pagination\n\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n      line-height: @line-height;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  text-align: center;\n  list-style: none;\n  &:extend(.clearfix all);\n  li {\n    display: inline;\n    > a,\n    > span {\n      display: inline-block;\n      padding: 5px 14px;\n      background-color: @pager-bg;\n      border: 1px solid @pager-border;\n      border-radius: @pager-border-radius;\n    }\n\n    > a:hover,\n    > a:focus {\n      text-decoration: none;\n      background-color: @pager-hover-bg;\n    }\n  }\n\n  .next {\n    > a,\n    > span {\n      float: right;\n    }\n  }\n\n  .previous {\n    > a,\n    > span {\n      float: left;\n    }\n  }\n\n  .disabled {\n    > a,\n    > a:hover,\n    > a:focus,\n    > span {\n      color: @pager-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pager-bg;\n    }\n  }\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: @label-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n\n  // Add hover effects, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @label-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Empty labels collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for labels in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n  .label-variant(@label-default-bg);\n}\n\n.label-primary {\n  .label-variant(@label-primary-bg);\n}\n\n.label-success {\n  .label-variant(@label-success-bg);\n}\n\n.label-info {\n  .label-variant(@label-info-bg);\n}\n\n.label-warning {\n  .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n  .label-variant(@label-danger-bg);\n}\n","// Labels\n\n.label-variant(@color) {\n  background-color: @color;\n\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base class\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: @font-size-small;\n  font-weight: @badge-font-weight;\n  line-height: @badge-line-height;\n  color: @badge-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: @badge-bg;\n  border-radius: @badge-border-radius;\n\n  // Empty badges collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for badges in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n\n  .btn-xs &,\n  .btn-group-xs > .btn & {\n    top: 0;\n    padding: 1px 5px;\n  }\n\n  // Hover state, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @badge-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Account for badges in navs\n  .list-group-item.active > &,\n  .nav-pills > .active > a > & {\n    color: @badge-active-color;\n    background-color: @badge-active-bg;\n  }\n\n  .list-group-item > & {\n    float: right;\n  }\n\n  .list-group-item > & + & {\n    margin-right: 5px;\n  }\n\n  .nav-pills > li > a > & {\n    margin-left: 3px;\n  }\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n  padding-top: @jumbotron-padding;\n  padding-bottom: @jumbotron-padding;\n  margin-bottom: @jumbotron-padding;\n  color: @jumbotron-color;\n  background-color: @jumbotron-bg;\n\n  h1,\n  .h1 {\n    color: @jumbotron-heading-color;\n  }\n\n  p {\n    margin-bottom: (@jumbotron-padding / 2);\n    font-size: @jumbotron-font-size;\n    font-weight: 200;\n  }\n\n  > hr {\n    border-top-color: darken(@jumbotron-bg, 10%);\n  }\n\n  .container &,\n  .container-fluid & {\n    padding-right: (@grid-gutter-width / 2);\n    padding-left: (@grid-gutter-width / 2);\n    border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n  }\n\n  .container {\n    max-width: 100%;\n  }\n\n  @media screen and (min-width: @screen-sm-min) {\n    padding-top: (@jumbotron-padding * 1.6);\n    padding-bottom: (@jumbotron-padding * 1.6);\n\n    .container &,\n    .container-fluid & {\n      padding-right: (@jumbotron-padding * 2);\n      padding-left: (@jumbotron-padding * 2);\n    }\n\n    h1,\n    .h1 {\n      font-size: @jumbotron-heading-font-size;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n  display: block;\n  padding: @thumbnail-padding;\n  margin-bottom: @line-height-computed;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(border .2s ease-in-out);\n\n  > img,\n  a > img {\n    &:extend(.img-responsive);\n    margin-right: auto;\n    margin-left: auto;\n  }\n\n  // Add a hover state for linked versions only\n  a&:hover,\n  a&:focus,\n  a&.active {\n    border-color: @link-color;\n  }\n\n  // Image captions\n  .caption {\n    padding: @thumbnail-caption-padding;\n    color: @thumbnail-caption-color;\n  }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n  padding: @alert-padding;\n  margin-bottom: @line-height-computed;\n  border: 1px solid transparent;\n  border-radius: @alert-border-radius;\n\n  // Headings for larger alerts\n  h4 {\n    margin-top: 0;\n    color: inherit; // Specified for the h4 to prevent conflicts of changing @headings-color\n  }\n\n  // Provide class for links that match alerts\n  .alert-link {\n    font-weight: @alert-link-font-weight;\n  }\n\n  // Improve alignment and spacing of inner content\n  > p,\n  > ul {\n    margin-bottom: 0;\n  }\n\n  > p + p {\n    margin-top: 5px;\n  }\n}\n\n// Dismissible alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n// The misspelled .alert-dismissable was deprecated in 3.2.0.\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: (@alert-padding + 20);\n\n  // Adjust close link position\n  .close {\n    position: relative;\n    top: -2px;\n    right: -21px;\n    color: inherit;\n  }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n  .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n\n.alert-info {\n  .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n\n.alert-warning {\n  .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n\n.alert-danger {\n  .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","// Alerts\n\n.alert-variant(@background; @border; @text-color) {\n  color: @text-color;\n  background-color: @background;\n  border-color: @border;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n","// stylelint-disable at-rule-no-vendor-prefix\n\n//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n  height: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  overflow: hidden;\n  background-color: @progress-bg;\n  border-radius: @progress-border-radius;\n  .box-shadow(inset 0 1px 2px rgba(0, 0, 0, .1));\n}\n\n// Bar of progress\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: @font-size-small;\n  line-height: @line-height-computed;\n  color: @progress-bar-color;\n  text-align: center;\n  background-color: @progress-bar-bg;\n  .box-shadow(inset 0 -1px 0 rgba(0, 0, 0, .15));\n  .transition(width .6s ease);\n}\n\n// Striped bars\n//\n// `.progress-striped .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar-striped` class, which you just add to an existing\n// `.progress-bar`.\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  #gradient > .striped();\n  background-size: 40px 40px;\n}\n\n// Call animation for the active one\n//\n// `.progress.active .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar.active` approach.\n.progress.active .progress-bar,\n.progress-bar.active {\n  .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n  .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n  .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n  .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n  .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Progress bars\n\n.progress-bar-variant(@color) {\n  background-color: @color;\n\n  // Deprecated parent class requirement as of v3.2.0\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n",".media {\n  // Proper spacing between instances of .media\n  margin-top: 15px;\n\n  &:first-child {\n    margin-top: 0;\n  }\n}\n\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n\n.media-body {\n  width: 10000px;\n}\n\n.media-object {\n  display: block;\n\n  // Fix collapse in webkit from max-width: 100% and display: table-cell.\n  &.img-thumbnail {\n    max-width: none;\n  }\n}\n\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n\n.media-middle {\n  vertical-align: middle;\n}\n\n.media-bottom {\n  vertical-align: bottom;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n\n// Media list variation\n//\n// Undo default ul/ol styles\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on <ul>, <ol>, or <div>.\n\n.list-group {\n  // No need to set list-style: none; since .list-group-item is block level\n  padding-left: 0; // reset padding because ul and ol\n  margin-bottom: 20px;\n}\n\n\n// Individual list items\n//\n// Use on `li`s or `div`s within the `.list-group` parent.\n\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  // Place the border on the list items and negative margin up for better styling\n  margin-bottom: -1px;\n  background-color: @list-group-bg;\n  border: 1px solid @list-group-border;\n\n  // Round the first and last items\n  &:first-child {\n    .border-top-radius(@list-group-border-radius);\n  }\n  &:last-child {\n    margin-bottom: 0;\n    .border-bottom-radius(@list-group-border-radius);\n  }\n\n  // Disabled state\n  &.disabled,\n  &.disabled:hover,\n  &.disabled:focus {\n    color: @list-group-disabled-color;\n    cursor: @cursor-disabled;\n    background-color: @list-group-disabled-bg;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-disabled-text-color;\n    }\n  }\n\n  // Active class on item itself, not parent\n  &.active,\n  &.active:hover,\n  &.active:focus {\n    z-index: 2; // Place active items above their siblings for proper border styling\n    color: @list-group-active-color;\n    background-color: @list-group-active-bg;\n    border-color: @list-group-active-border;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading,\n    .list-group-item-heading > small,\n    .list-group-item-heading > .small {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-active-text-color;\n    }\n  }\n}\n\n\n// Interactive list items\n//\n// Use anchor or button elements instead of `li`s or `div`s to create interactive items.\n// Includes an extra `.active` modifier class for showing selected items.\n\na.list-group-item,\nbutton.list-group-item {\n  color: @list-group-link-color;\n\n  .list-group-item-heading {\n    color: @list-group-link-heading-color;\n  }\n\n  // Hover state\n  &:hover,\n  &:focus {\n    color: @list-group-link-hover-color;\n    text-decoration: none;\n    background-color: @list-group-hover-bg;\n  }\n}\n\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n\n\n// Contextual variants\n//\n// Add modifier classes to change text and background color on individual items.\n// Organizationally, this must come after the `:hover` states.\n\n.list-group-item-variant(success; @state-success-bg; @state-success-text);\n.list-group-item-variant(info; @state-info-bg; @state-info-text);\n.list-group-item-variant(warning; @state-warning-bg; @state-warning-text);\n.list-group-item-variant(danger; @state-danger-bg; @state-danger-text);\n\n\n// Custom content options\n//\n// Extra classes for creating well-formatted content within `.list-group-item`s.\n\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n","// List Groups\n\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a&,\n    button& {\n      color: @color;\n\n      .list-group-item-heading {\n        color: inherit;\n      }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, no-duplicate-selectors\n\n//\n// Panels\n// --------------------------------------------------\n\n\n// Base class\n.panel {\n  margin-bottom: @line-height-computed;\n  background-color: @panel-bg;\n  border: 1px solid transparent;\n  border-radius: @panel-border-radius;\n  .box-shadow(0 1px 1px rgba(0, 0, 0, .05));\n}\n\n// Panel contents\n.panel-body {\n  padding: @panel-body-padding;\n  &:extend(.clearfix all);\n}\n\n// Optional heading\n.panel-heading {\n  padding: @panel-heading-padding;\n  border-bottom: 1px solid transparent;\n  .border-top-radius((@panel-border-radius - 1));\n\n  > .dropdown .dropdown-toggle {\n    color: inherit;\n  }\n}\n\n// Within heading, strip any `h*` tag of its default margins for spacing.\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: ceil((@font-size-base * 1.125));\n  color: inherit;\n\n  > a,\n  > small,\n  > .small,\n  > small > a,\n  > .small > a {\n    color: inherit;\n  }\n}\n\n// Optional footer (stays gray in every modifier class)\n.panel-footer {\n  padding: @panel-footer-padding;\n  background-color: @panel-footer-bg;\n  border-top: 1px solid @panel-inner-border;\n  .border-bottom-radius((@panel-border-radius - 1));\n}\n\n\n// List groups in panels\n//\n// By default, space out list group content from panel headings to account for\n// any kind of custom content between the two.\n\n.panel {\n  > .list-group,\n  > .panel-collapse > .list-group {\n    margin-bottom: 0;\n\n    .list-group-item {\n      border-width: 1px 0;\n      border-radius: 0;\n    }\n\n    // Add border top radius for first one\n    &:first-child {\n      .list-group-item:first-child {\n        border-top: 0;\n        .border-top-radius((@panel-border-radius - 1));\n      }\n    }\n\n    // Add border bottom radius for last one\n    &:last-child {\n      .list-group-item:last-child {\n        border-bottom: 0;\n        .border-bottom-radius((@panel-border-radius - 1));\n      }\n    }\n  }\n  > .panel-heading + .panel-collapse > .list-group {\n    .list-group-item:first-child {\n      .border-top-radius(0);\n    }\n  }\n}\n// Collapse space between when there's no additional content.\n.panel-heading + .list-group {\n  .list-group-item:first-child {\n    border-top-width: 0;\n  }\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n\n// Tables in panels\n//\n// Place a non-bordered `.table` within a panel (not within a `.panel-body`) and\n// watch it go full width.\n\n.panel {\n  > .table,\n  > .table-responsive > .table,\n  > .panel-collapse > .table {\n    margin-bottom: 0;\n\n    caption {\n      padding-right: @panel-body-padding;\n      padding-left: @panel-body-padding;\n    }\n  }\n  // Add border top radius for first one\n  > .table:first-child,\n  > .table-responsive:first-child > .table:first-child {\n    .border-top-radius((@panel-border-radius - 1));\n\n    > thead:first-child,\n    > tbody:first-child {\n      > tr:first-child {\n        border-top-left-radius: (@panel-border-radius - 1);\n        border-top-right-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-top-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-top-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  // Add border bottom radius for last one\n  > .table:last-child,\n  > .table-responsive:last-child > .table:last-child {\n    .border-bottom-radius((@panel-border-radius - 1));\n\n    > tbody:last-child,\n    > tfoot:last-child {\n      > tr:last-child {\n        border-bottom-right-radius: (@panel-border-radius - 1);\n        border-bottom-left-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-bottom-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-bottom-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  > .panel-body + .table,\n  > .panel-body + .table-responsive,\n  > .table + .panel-body,\n  > .table-responsive + .panel-body {\n    border-top: 1px solid @table-border-color;\n  }\n  > .table > tbody:first-child > tr:first-child th,\n  > .table > tbody:first-child > tr:first-child td {\n    border-top: 0;\n  }\n  > .table-bordered,\n  > .table-responsive > .table-bordered {\n    border: 0;\n    > thead,\n    > tbody,\n    > tfoot {\n      > tr {\n        > th:first-child,\n        > td:first-child {\n          border-left: 0;\n        }\n        > th:last-child,\n        > td:last-child {\n          border-right: 0;\n        }\n      }\n    }\n    > thead,\n    > tbody {\n      > tr:first-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n    > tbody,\n    > tfoot {\n      > tr:last-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n  }\n  > .table-responsive {\n    margin-bottom: 0;\n    border: 0;\n  }\n}\n\n\n// Collapsible panels (aka, accordion)\n//\n// Wrap a series of panels in `.panel-group` to turn them into an accordion with\n// the help of our collapse JavaScript plugin.\n\n.panel-group {\n  margin-bottom: @line-height-computed;\n\n  // Tighten up margin so it's only between panels\n  .panel {\n    margin-bottom: 0;\n    border-radius: @panel-border-radius;\n\n    + .panel {\n      margin-top: 5px;\n    }\n  }\n\n  .panel-heading {\n    border-bottom: 0;\n\n    + .panel-collapse > .panel-body,\n    + .panel-collapse > .list-group {\n      border-top: 1px solid @panel-inner-border;\n    }\n  }\n\n  .panel-footer {\n    border-top: 0;\n    + .panel-collapse .panel-body {\n      border-bottom: 1px solid @panel-inner-border;\n    }\n  }\n}\n\n\n// Contextual variations\n.panel-default {\n  .panel-variant(@panel-default-border; @panel-default-text; @panel-default-heading-bg; @panel-default-border);\n}\n.panel-primary {\n  .panel-variant(@panel-primary-border; @panel-primary-text; @panel-primary-heading-bg; @panel-primary-border);\n}\n.panel-success {\n  .panel-variant(@panel-success-border; @panel-success-text; @panel-success-heading-bg; @panel-success-border);\n}\n.panel-info {\n  .panel-variant(@panel-info-border; @panel-info-text; @panel-info-heading-bg; @panel-info-border);\n}\n.panel-warning {\n  .panel-variant(@panel-warning-border; @panel-warning-text; @panel-warning-heading-bg; @panel-warning-border);\n}\n.panel-danger {\n  .panel-variant(@panel-danger-border; @panel-danger-text; @panel-danger-heading-bg; @panel-danger-border);\n}\n","// Panels\n\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse > .panel-body {\n      border-top-color: @border;\n    }\n    .badge {\n      color: @heading-bg-color;\n      background-color: @heading-text-color;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse > .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n","// Embeds responsive\n//\n// Credit: Nicolas Gallagher and SUIT CSS.\n\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n\n  .embed-responsive-item,\n  iframe,\n  embed,\n  object,\n  video {\n    position: absolute;\n    top: 0;\n    bottom: 0;\n    left: 0;\n    width: 100%;\n    height: 100%;\n    border: 0;\n  }\n}\n\n// Modifier class for 16:9 aspect ratio\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n\n// Modifier class for 4:3 aspect ratio\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n","//\n// Wells\n// --------------------------------------------------\n\n\n// Base class\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: @well-bg;\n  border: 1px solid @well-border;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .05));\n  blockquote {\n    border-color: #ddd;\n    border-color: rgba(0, 0, 0, .15);\n  }\n}\n\n// Sizes\n.well-lg {\n  padding: 24px;\n  border-radius: @border-radius-large;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: @border-radius-small;\n}\n","// stylelint-disable property-no-vendor-prefix\n\n//\n// Close icons\n// --------------------------------------------------\n\n\n.close {\n  float: right;\n  font-size: (@font-size-base * 1.5);\n  font-weight: @close-font-weight;\n  line-height: 1;\n  color: @close-color;\n  text-shadow: @close-text-shadow;\n  .opacity(.2);\n\n  &:hover,\n  &:focus {\n    color: @close-color;\n    text-decoration: none;\n    cursor: pointer;\n    .opacity(.5);\n  }\n\n  // Additional properties for button version\n  // iOS requires the button element instead of an anchor tag.\n  // If you want the anchor version, it requires `href=\"#\"`.\n  // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n  button& {\n    padding: 0;\n    cursor: pointer;\n    background: transparent;\n    border: 0;\n    -webkit-appearance: none;\n    appearance: none;\n  }\n}\n","//\n// Modals\n// --------------------------------------------------\n\n// .modal-open      - body class for killing the scroll\n// .modal           - container to scroll within\n// .modal-dialog    - positioning shell for the actual modal\n// .modal-content   - actual modal w/ bg and corners and shit\n\n// Kill the scroll on the body\n.modal-open {\n  overflow: hidden;\n}\n\n// Container that the modal scrolls within\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n\n  // Prevent Chrome on Windows from adding a focus outline. For details, see\n  // https://github.com/twbs/bootstrap/pull/10951.\n  outline: 0;\n\n  // When fading in the modal, animate it to slide down\n  &.fade .modal-dialog {\n    .translate(0, -25%);\n    .transition-transform(~\"0.3s ease-out\");\n  }\n  &.in .modal-dialog { .translate(0, 0); }\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n\n// Shell div to position the modal with bottom padding\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n\n// Actual modal\n.modal-content {\n  position: relative;\n  background-color: @modal-content-bg;\n  background-clip: padding-box;\n  border: 1px solid @modal-content-fallback-border-color; //old browsers fallback (ie8 etc)\n  border: 1px solid @modal-content-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 3px 9px rgba(0, 0, 0, .5));\n  // Remove focus outline from opened modal\n  outline: 0;\n}\n\n// Modal background\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal-background;\n  background-color: @modal-backdrop-bg;\n  // Fade for backdrop\n  &.fade { .opacity(0); }\n  &.in { .opacity(@modal-backdrop-opacity); }\n}\n\n// Modal header\n// Top section of the modal w/ title and dismiss\n.modal-header {\n  padding: @modal-title-padding;\n  border-bottom: 1px solid @modal-header-border-color;\n  &:extend(.clearfix all);\n}\n// Close icon\n.modal-header .close {\n  margin-top: -2px;\n}\n\n// Title text within header\n.modal-title {\n  margin: 0;\n  line-height: @modal-title-line-height;\n}\n\n// Modal body\n// Where all modal content resides (sibling of .modal-header and .modal-footer)\n.modal-body {\n  position: relative;\n  padding: @modal-inner-padding;\n}\n\n// Footer (for actions)\n.modal-footer {\n  padding: @modal-inner-padding;\n  text-align: right; // right align buttons\n  border-top: 1px solid @modal-footer-border-color;\n  &:extend(.clearfix all); // clear it in case folks use .pull-* classes on buttons\n\n  // Properly space out buttons\n  .btn + .btn {\n    margin-bottom: 0; // account for input[type=\"submit\"] which gets the bottom margin like all other inputs\n    margin-left: 5px;\n  }\n  // but override that for button groups\n  .btn-group .btn + .btn {\n    margin-left: -1px;\n  }\n  // and override it for block buttons as well\n  .btn-block + .btn-block {\n    margin-left: 0;\n  }\n}\n\n// Measure scrollbar width for padding body during modal show/hide\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n\n// Scale up the modal\n@media (min-width: @screen-sm-min) {\n  // Automatically set modal's width for larger viewports\n  .modal-dialog {\n    width: @modal-md;\n    margin: 30px auto;\n  }\n  .modal-content {\n    .box-shadow(0 5px 15px rgba(0, 0, 0, .5));\n  }\n\n  // Modal sizes\n  .modal-sm { width: @modal-sm; }\n}\n\n@media (min-width: @screen-md-min) {\n  .modal-lg { width: @modal-lg; }\n}\n","//\n// Tooltips\n// --------------------------------------------------\n\n\n// Base class\n.tooltip {\n  position: absolute;\n  z-index: @zindex-tooltip;\n  display: block;\n  // Our parent element can be arbitrary since tooltips are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-small;\n\n  .opacity(0);\n\n  &.in { .opacity(@tooltip-opacity); }\n  &.top {\n    padding: @tooltip-arrow-width 0;\n    margin-top: -3px;\n  }\n  &.right {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: 3px;\n  }\n  &.bottom {\n    padding: @tooltip-arrow-width 0;\n    margin-top: 3px;\n  }\n  &.left {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: -3px;\n  }\n\n  // Note: Deprecated .top-left, .top-right, .bottom-left, and .bottom-right as of v3.3.1\n  &.top .tooltip-arrow {\n    bottom: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-left .tooltip-arrow {\n    right: @tooltip-arrow-width;\n    bottom: 0;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-right .tooltip-arrow {\n    bottom: 0;\n    left: @tooltip-arrow-width;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.right .tooltip-arrow {\n    top: 50%;\n    left: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-right-color: @tooltip-arrow-color;\n  }\n  &.left .tooltip-arrow {\n    top: 50%;\n    right: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-left-color: @tooltip-arrow-color;\n  }\n  &.bottom .tooltip-arrow {\n    top: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-left .tooltip-arrow {\n    top: 0;\n    right: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-right .tooltip-arrow {\n    top: 0;\n    left: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n}\n\n// Wrapper for the tooltip content\n.tooltip-inner {\n  max-width: @tooltip-max-width;\n  padding: 3px 8px;\n  color: @tooltip-color;\n  text-align: center;\n  background-color: @tooltip-bg;\n  border-radius: @border-radius-base;\n}\n\n// Arrows\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n",".reset-text() {\n  font-family: @font-family-base;\n  // We deliberately do NOT reset font-size.\n  font-style: normal;\n  font-weight: 400;\n  line-height: @line-height-base;\n  line-break: auto;\n  text-align: left; // Fallback for where `start` is not supported\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n}\n","//\n// Popovers\n// --------------------------------------------------\n\n\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: @zindex-popover;\n  display: none;\n  max-width: @popover-max-width;\n  padding: 1px;\n  // Our parent element can be arbitrary since popovers are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-base;\n  background-color: @popover-bg;\n  background-clip: padding-box;\n  border: 1px solid @popover-fallback-border-color;\n  border: 1px solid @popover-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 5px 10px rgba(0, 0, 0, .2));\n\n  // Offset the popover to account for the popover arrow\n  &.top { margin-top: -@popover-arrow-width; }\n  &.right { margin-left: @popover-arrow-width; }\n  &.bottom { margin-top: @popover-arrow-width; }\n  &.left { margin-left: -@popover-arrow-width; }\n\n  // Arrows\n  // .arrow is outer, .arrow:after is inner\n  > .arrow {\n    border-width: @popover-arrow-outer-width;\n\n    &,\n    &:after {\n      position: absolute;\n      display: block;\n      width: 0;\n      height: 0;\n      border-color: transparent;\n      border-style: solid;\n    }\n\n    &:after {\n      content: \"\";\n      border-width: @popover-arrow-width;\n    }\n  }\n\n  &.top > .arrow {\n    bottom: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-top-color: @popover-arrow-outer-color;\n    border-bottom-width: 0;\n    &:after {\n      bottom: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-color: @popover-arrow-color;\n      border-bottom-width: 0;\n    }\n  }\n  &.right > .arrow {\n    top: 50%;\n    left: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-right-color: @popover-arrow-outer-color;\n    border-left-width: 0;\n    &:after {\n      bottom: -@popover-arrow-width;\n      left: 1px;\n      content: \" \";\n      border-right-color: @popover-arrow-color;\n      border-left-width: 0;\n    }\n  }\n  &.bottom > .arrow {\n    top: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-width: 0;\n    border-bottom-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-bottom-color: @popover-arrow-outer-color;\n    &:after {\n      top: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-width: 0;\n      border-bottom-color: @popover-arrow-color;\n    }\n  }\n\n  &.left > .arrow {\n    top: 50%;\n    right: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-width: 0;\n    border-left-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-left-color: @popover-arrow-outer-color;\n    &:after {\n      right: 1px;\n      bottom: -@popover-arrow-width;\n      content: \" \";\n      border-right-width: 0;\n      border-left-color: @popover-arrow-color;\n    }\n  }\n}\n\n.popover-title {\n  padding: 8px 14px;\n  margin: 0; // reset heading margin\n  font-size: @font-size-base;\n  background-color: @popover-title-bg;\n  border-bottom: 1px solid darken(@popover-title-bg, 5%);\n  border-radius: (@border-radius-large - 1) (@border-radius-large - 1) 0 0;\n}\n\n.popover-content {\n  padding: 9px 14px;\n}\n","// stylelint-disable media-feature-name-no-unknown\n\n//\n// Carousel\n// --------------------------------------------------\n\n\n// Wrapper for the slide container and indicators\n.carousel {\n  position: relative;\n}\n\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n\n  > .item {\n    position: relative;\n    display: none;\n    .transition(.6s ease-in-out left);\n\n    // Account for jankitude on images\n    > img,\n    > a > img {\n      &:extend(.img-responsive);\n      line-height: 1;\n    }\n\n    // WebKit CSS3 transforms for supported devices\n    @media all and (transform-3d), (-webkit-transform-3d) {\n      .transition-transform(~\"0.6s ease-in-out\");\n      .backface-visibility(~\"hidden\");\n      .perspective(1000px);\n\n      &.next,\n      &.active.right {\n        .translate3d(100%, 0, 0);\n        left: 0;\n      }\n      &.prev,\n      &.active.left {\n        .translate3d(-100%, 0, 0);\n        left: 0;\n      }\n      &.next.left,\n      &.prev.right,\n      &.active {\n        .translate3d(0, 0, 0);\n        left: 0;\n      }\n    }\n  }\n\n  > .active,\n  > .next,\n  > .prev {\n    display: block;\n  }\n\n  > .active {\n    left: 0;\n  }\n\n  > .next,\n  > .prev {\n    position: absolute;\n    top: 0;\n    width: 100%;\n  }\n\n  > .next {\n    left: 100%;\n  }\n  > .prev {\n    left: -100%;\n  }\n  > .next.left,\n  > .prev.right {\n    left: 0;\n  }\n\n  > .active.left {\n    left: -100%;\n  }\n  > .active.right {\n    left: 100%;\n  }\n\n}\n\n// Left/right controls for nav\n// ---------------------------\n\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: @carousel-control-width;\n  font-size: @carousel-control-font-size;\n  color: @carousel-control-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  background-color: rgba(0, 0, 0, 0); // Fix IE9 click-thru bug\n  .opacity(@carousel-control-opacity);\n  // We can't have this transition here because WebKit cancels the carousel\n  // animation if you trip this while in the middle of another animation.\n\n  // Set gradients for backgrounds\n  &.left {\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .5); @end-color: rgba(0, 0, 0, .0001));\n  }\n  &.right {\n    right: 0;\n    left: auto;\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .0001); @end-color: rgba(0, 0, 0, .5));\n  }\n\n  // Hover/focus state\n  &:hover,\n  &:focus {\n    color: @carousel-control-color;\n    text-decoration: none;\n    outline: 0;\n    .opacity(.9);\n  }\n\n  // Toggles\n  .icon-prev,\n  .icon-next,\n  .glyphicon-chevron-left,\n  .glyphicon-chevron-right {\n    position: absolute;\n    top: 50%;\n    z-index: 5;\n    display: inline-block;\n    margin-top: -10px;\n  }\n  .icon-prev,\n  .glyphicon-chevron-left {\n    left: 50%;\n    margin-left: -10px;\n  }\n  .icon-next,\n  .glyphicon-chevron-right {\n    right: 50%;\n    margin-right: -10px;\n  }\n  .icon-prev,\n  .icon-next {\n    width: 20px;\n    height: 20px;\n    font-family: serif;\n    line-height: 1;\n  }\n\n  .icon-prev {\n    &:before {\n      content: \"\\2039\";// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)\n    }\n  }\n  .icon-next {\n    &:before {\n      content: \"\\203a\";// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)\n    }\n  }\n}\n\n// Optional indicator pips\n//\n// Add an unordered list with the following class and add a list item for each\n// slide your carousel holds.\n\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n\n  li {\n    display: inline-block;\n    width: 10px;\n    height: 10px;\n    margin: 1px;\n    text-indent: -999px;\n    cursor: pointer;\n    // IE8-9 hack for event handling\n    //\n    // Internet Explorer 8-9 does not support clicks on elements without a set\n    // `background-color`. We cannot use `filter` since that's not viewed as a\n    // background color by the browser. Thus, a hack is needed.\n    // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Internet_Explorer\n    //\n    // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we\n    // set alpha transparency for the best results possible.\n    background-color: #000 \\9; // IE8\n    background-color: rgba(0, 0, 0, 0); // IE9\n\n    border: 1px solid @carousel-indicator-border-color;\n    border-radius: 10px;\n  }\n\n  .active {\n    width: 12px;\n    height: 12px;\n    margin: 0;\n    background-color: @carousel-indicator-active-bg;\n  }\n}\n\n// Optional captions\n// -----------------------------\n// Hidden by default for smaller viewports\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: @carousel-caption-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n\n  & .btn {\n    text-shadow: none; // No shadow for button elements in carousel-caption\n  }\n}\n\n\n// Scale up controls for tablets and up\n@media screen and (min-width: @screen-sm-min) {\n\n  // Scale up the controls a smidge\n  .carousel-control {\n    .glyphicon-chevron-left,\n    .glyphicon-chevron-right,\n    .icon-prev,\n    .icon-next {\n      width: (@carousel-control-font-size * 1.5);\n      height: (@carousel-control-font-size * 1.5);\n      margin-top: (@carousel-control-font-size / -2);\n      font-size: (@carousel-control-font-size * 1.5);\n    }\n    .glyphicon-chevron-left,\n    .icon-prev {\n      margin-left: (@carousel-control-font-size / -2);\n    }\n    .glyphicon-chevron-right,\n    .icon-next {\n      margin-right: (@carousel-control-font-size / -2);\n    }\n  }\n\n  // Show and left align the captions\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n\n  // Move up the indicators\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n","// Clearfix\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n//\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n\n.clearfix() {\n  &:before,\n  &:after {\n    display: table; // 2\n    content: \" \"; // 1\n  }\n  &:after {\n    clear: both;\n  }\n}\n","// Center-align a block level element\n\n.center-block() {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n","// stylelint-disable font-family-name-quotes, font-family-no-missing-generic-family-keyword\n\n// CSS image replacement\n//\n// Heads up! v3 launched with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (has been removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n","// stylelint-disable declaration-no-important, at-rule-no-vendor-prefix\n\n//\n// Responsive: Utility classes\n// --------------------------------------------------\n\n\n// IE10 in Windows (Phone) 8\n//\n// Support for responsive views via media queries is kind of borked in IE10, for\n// Surface/desktop in split view and for Windows Phone 8. This particular fix\n// must be accompanied by a snippet of JavaScript to sniff the user agent and\n// apply some conditional CSS to *only* the Surface/desktop Windows 8. Look at\n// our Getting Started page for more information on this bug.\n//\n// For more information, see the following:\n//\n// Issue: https://github.com/twbs/bootstrap/issues/10497\n// Docs: https://getbootstrap.com/docs/3.4/getting-started/#support-ie10-width\n// Source: https://timkadlec.com/2013/01/windows-phone-8-and-device-width/\n// Source: https://timkadlec.com/2012/10/ie10-snap-mode-and-responsive-design/\n\n@-ms-viewport {\n  width: device-width;\n}\n\n\n// Visibility utilities\n// Note: Deprecated .visible-xs, .visible-sm, .visible-md, and .visible-lg as of v3.2.0\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  .responsive-invisibility();\n}\n\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n\n.visible-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-visibility();\n  }\n}\n.visible-xs-block {\n  @media (max-width: @screen-xs-max) {\n    display: block !important;\n  }\n}\n.visible-xs-inline {\n  @media (max-width: @screen-xs-max) {\n    display: inline !important;\n  }\n}\n.visible-xs-inline-block {\n  @media (max-width: @screen-xs-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-visibility();\n  }\n}\n.visible-sm-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: block !important;\n  }\n}\n.visible-sm-inline {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline !important;\n  }\n}\n.visible-sm-inline-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-visibility();\n  }\n}\n.visible-md-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: block !important;\n  }\n}\n.visible-md-inline {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline !important;\n  }\n}\n.visible-md-inline-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-visibility();\n  }\n}\n.visible-lg-block {\n  @media (min-width: @screen-lg-min) {\n    display: block !important;\n  }\n}\n.visible-lg-inline {\n  @media (min-width: @screen-lg-min) {\n    display: inline !important;\n  }\n}\n.visible-lg-inline-block {\n  @media (min-width: @screen-lg-min) {\n    display: inline-block !important;\n  }\n}\n\n.hidden-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-invisibility();\n  }\n}\n\n\n// Print utilities\n//\n// Media queries are placed on the inside to be mixin-friendly.\n\n// Note: Deprecated .visible-print as of v3.2.0\n.visible-print {\n  .responsive-invisibility();\n\n  @media print {\n    .responsive-visibility();\n  }\n}\n.visible-print-block {\n  display: none !important;\n\n  @media print {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n\n  @media print {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n\n  @media print {\n    display: inline-block !important;\n  }\n}\n\n.hidden-print {\n  @media print {\n    .responsive-invisibility();\n  }\n}\n","// stylelint-disable declaration-no-important\n\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table !important; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n  display: none !important;\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
deleted file mode 100644
index 6f812c8..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Base structure
- */
-
-/* Move down content because we have a fixed navbar that is 50px tall */
-body {
-  padding-top: 50px;
-  font-size: 14pt;
-}
-
-pre {
-  font-size: 14pt;
-}
-
-a {
-  color: #448628;
-}
-
-a:hover {
-  color: #245F0B;
-}
-/*
- * Global add-ons
- */
-
-.sub-header {
-  padding-bottom: 10px;
-  border-bottom: 1px solid #eee;
-}
-
-/*
- * Top navigation
- * Hide default border to remove 1px line.
- */
-.navbar-fixed-top {
-  border: 0;
-}
-
-/*
- * Sidebar
- */
-
-/* Hide for mobile, show later */
-.sidebar {
-  display: none;
-}
-@media (min-width: 768px) {
-  .sidebar {
-    position: fixed;
-    top: 51px;
-    bottom: 0;
-    left: 0;
-    z-index: 1000;
-    display: block;
-    padding: 20px;
-    overflow-x: hidden;
-    overflow-y: auto; /* Scrollable contents if viewport is shorter than content. */
-    background-color: #f5f5f5;
-    border-right: 1px solid #eee;
-  }
-}
-
-/* Sidebar navigation */
-.nav-sidebar {
-  margin-right: -21px; /* 20px padding + 1px border */
-  margin-bottom: 20px;
-  margin-left: -20px;
-}
-.nav-sidebar > li > a {
-  padding-right: 20px;
-  padding-left: 20px;
-}
-.nav-sidebar > li > ul > li > a {
-  padding-right: 40px;
-  padding-left: 40px;
-}
-.nav-sidebar > li > ul > li > ul > li > a {
-  padding-right: 60px;
-  padding-left: 60px;
-}
-.nav-sidebar  .active > a,
-.nav-sidebar  .active > a:hover,
-.nav-sidebar  .active > a:focus {
-  color: #fff;
-  background-color: #73B148;
-}
-
-
-/*
- * Main content
- */
-
-.main {
-  padding: 20px;
-}
-@media (min-width: 768px) {
-  .main {
-    padding-right: 40px;
-    padding-left: 40px;
-  }
-}
-.main .page-header {
-  margin-top: 0;
-}
-
-
-/*
- * Placeholder dashboard ideas
- */
-
-.placeholders {
-  margin-bottom: 30px;
-  text-align: center;
-}
-.placeholders h4 {
-  margin-bottom: 0;
-}
-.placeholder {
-  margin-bottom: 20px;
-}
-.placeholder img {
-  display: inline-block;
-  border-radius: 50%;
-}
-
-
-
-
-h4 {
-  font-weight: bold;
-}
-
-.cardgroup {
-   margin-bottom: 50px;
-}
-
-.cardgroup .card {
-  padding: 20px;
-}
-
-.cardgroup .media {
-  padding: 30px;
-}
-
-h1 {
-  margin-bottom: 20px;
-}
-
-.card {
-  padding: 20px;
-}
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot b/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
deleted file mode 100644
index b93a495..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg b/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
deleted file mode 100644
index f155876..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
+++ /dev/null
@@ -1,288 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata></metadata>
-<defs>
-<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
-<font-face units-per-em="1200" ascent="960" descent="-240" />
-<missing-glyph horiz-adv-x="500" />
-<glyph horiz-adv-x="0" />
-<glyph horiz-adv-x="400" />
-<glyph unicode=" " />
-<glyph unicode="*" d="M600 1100q15 0 34 -1.5t30 -3.5l11 -1q10 -2 17.5 -10.5t7.5 -18.5v-224l158 158q7 7 18 8t19 -6l106 -106q7 -8 6 -19t-8 -18l-158 -158h224q10 0 18.5 -7.5t10.5 -17.5q6 -41 6 -75q0 -15 -1.5 -34t-3.5 -30l-1 -11q-2 -10 -10.5 -17.5t-18.5 -7.5h-224l158 -158 q7 -7 8 -18t-6 -19l-106 -106q-8 -7 -19 -6t-18 8l-158 158v-224q0 -10 -7.5 -18.5t-17.5 -10.5q-41 -6 -75 -6q-15 0 -34 1.5t-30 3.5l-11 1q-10 2 -17.5 10.5t-7.5 18.5v224l-158 -158q-7 -7 -18 -8t-19 6l-106 106q-7 8 -6 19t8 18l158 158h-224q-10 0 -18.5 7.5 t-10.5 17.5q-6 41 -6 75q0 15 1.5 34t3.5 30l1 11q2 10 10.5 17.5t18.5 7.5h224l-158 158q-7 7 -8 18t6 19l106 106q8 7 19 6t18 -8l158 -158v224q0 10 7.5 18.5t17.5 10.5q41 6 75 6z" />
-<glyph unicode="+" d="M450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-350h350q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-350v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v350h-350q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5 h350v350q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xa0;" />
-<glyph unicode="&#xa5;" d="M825 1100h250q10 0 12.5 -5t-5.5 -13l-364 -364q-6 -6 -11 -18h268q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-100h275q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-174q0 -11 -7.5 -18.5t-18.5 -7.5h-148q-11 0 -18.5 7.5t-7.5 18.5v174 h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h125v100h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h118q-5 12 -11 18l-364 364q-8 8 -5.5 13t12.5 5h250q25 0 43 -18l164 -164q8 -8 18 -8t18 8l164 164q18 18 43 18z" />
-<glyph unicode="&#x2000;" horiz-adv-x="650" />
-<glyph unicode="&#x2001;" horiz-adv-x="1300" />
-<glyph unicode="&#x2002;" horiz-adv-x="650" />
-<glyph unicode="&#x2003;" horiz-adv-x="1300" />
-<glyph unicode="&#x2004;" horiz-adv-x="433" />
-<glyph unicode="&#x2005;" horiz-adv-x="325" />
-<glyph unicode="&#x2006;" horiz-adv-x="216" />
-<glyph unicode="&#x2007;" horiz-adv-x="216" />
-<glyph unicode="&#x2008;" horiz-adv-x="162" />
-<glyph unicode="&#x2009;" horiz-adv-x="260" />
-<glyph unicode="&#x200a;" horiz-adv-x="72" />
-<glyph unicode="&#x202f;" horiz-adv-x="260" />
-<glyph unicode="&#x205f;" horiz-adv-x="325" />
-<glyph unicode="&#x20ac;" d="M744 1198q242 0 354 -189q60 -104 66 -209h-181q0 45 -17.5 82.5t-43.5 61.5t-58 40.5t-60.5 24t-51.5 7.5q-19 0 -40.5 -5.5t-49.5 -20.5t-53 -38t-49 -62.5t-39 -89.5h379l-100 -100h-300q-6 -50 -6 -100h406l-100 -100h-300q9 -74 33 -132t52.5 -91t61.5 -54.5t59 -29 t47 -7.5q22 0 50.5 7.5t60.5 24.5t58 41t43.5 61t17.5 80h174q-30 -171 -128 -278q-107 -117 -274 -117q-206 0 -324 158q-36 48 -69 133t-45 204h-217l100 100h112q1 47 6 100h-218l100 100h134q20 87 51 153.5t62 103.5q117 141 297 141z" />
-<glyph unicode="&#x20bd;" d="M428 1200h350q67 0 120 -13t86 -31t57 -49.5t35 -56.5t17 -64.5t6.5 -60.5t0.5 -57v-16.5v-16.5q0 -36 -0.5 -57t-6.5 -61t-17 -65t-35 -57t-57 -50.5t-86 -31.5t-120 -13h-178l-2 -100h288q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-138v-175q0 -11 -5.5 -18 t-15.5 -7h-149q-10 0 -17.5 7.5t-7.5 17.5v175h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v100h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v475q0 10 7.5 17.5t17.5 7.5zM600 1000v-300h203q64 0 86.5 33t22.5 119q0 84 -22.5 116t-86.5 32h-203z" />
-<glyph unicode="&#x2212;" d="M250 700h800q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#x231b;" d="M1000 1200v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-50v-100q0 -91 -49.5 -165.5t-130.5 -109.5q81 -35 130.5 -109.5t49.5 -165.5v-150h50q21 0 35.5 -14.5t14.5 -35.5v-150h-800v150q0 21 14.5 35.5t35.5 14.5h50v150q0 91 49.5 165.5t130.5 109.5q-81 35 -130.5 109.5 t-49.5 165.5v100h-50q-21 0 -35.5 14.5t-14.5 35.5v150h800zM400 1000v-100q0 -60 32.5 -109.5t87.5 -73.5q28 -12 44 -37t16 -55t-16 -55t-44 -37q-55 -24 -87.5 -73.5t-32.5 -109.5v-150h400v150q0 60 -32.5 109.5t-87.5 73.5q-28 12 -44 37t-16 55t16 55t44 37 q55 24 87.5 73.5t32.5 109.5v100h-400z" />
-<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
-<glyph unicode="&#x2601;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -206.5q0 -121 -85 -207.5t-205 -86.5h-750q-79 0 -135.5 57t-56.5 137q0 69 42.5 122.5t108.5 67.5q-2 12 -2 37q0 153 108 260.5t260 107.5z" />
-<glyph unicode="&#x26fa;" d="M774 1193.5q16 -9.5 20.5 -27t-5.5 -33.5l-136 -187l467 -746h30q20 0 35 -18.5t15 -39.5v-42h-1200v42q0 21 15 39.5t35 18.5h30l468 746l-135 183q-10 16 -5.5 34t20.5 28t34 5.5t28 -20.5l111 -148l112 150q9 16 27 20.5t34 -5zM600 200h377l-182 112l-195 534v-646z " />
-<glyph unicode="&#x2709;" d="M25 1100h1150q10 0 12.5 -5t-5.5 -13l-564 -567q-8 -8 -18 -8t-18 8l-564 567q-8 8 -5.5 13t12.5 5zM18 882l264 -264q8 -8 8 -18t-8 -18l-264 -264q-8 -8 -13 -5.5t-5 12.5v550q0 10 5 12.5t13 -5.5zM918 618l264 264q8 8 13 5.5t5 -12.5v-550q0 -10 -5 -12.5t-13 5.5 l-264 264q-8 8 -8 18t8 18zM818 482l364 -364q8 -8 5.5 -13t-12.5 -5h-1150q-10 0 -12.5 5t5.5 13l364 364q8 8 18 8t18 -8l164 -164q8 -8 18 -8t18 8l164 164q8 8 18 8t18 -8z" />
-<glyph unicode="&#x270f;" d="M1011 1210q19 0 33 -13l153 -153q13 -14 13 -33t-13 -33l-99 -92l-214 214l95 96q13 14 32 14zM1013 800l-615 -614l-214 214l614 614zM317 96l-333 -112l110 335z" />
-<glyph unicode="&#xe001;" d="M700 650v-550h250q21 0 35.5 -14.5t14.5 -35.5v-50h-800v50q0 21 14.5 35.5t35.5 14.5h250v550l-500 550h1200z" />
-<glyph unicode="&#xe002;" d="M368 1017l645 163q39 15 63 0t24 -49v-831q0 -55 -41.5 -95.5t-111.5 -63.5q-79 -25 -147 -4.5t-86 75t25.5 111.5t122.5 82q72 24 138 8v521l-600 -155v-606q0 -42 -44 -90t-109 -69q-79 -26 -147 -5.5t-86 75.5t25.5 111.5t122.5 82.5q72 24 138 7v639q0 38 14.5 59 t53.5 34z" />
-<glyph unicode="&#xe003;" d="M500 1191q100 0 191 -39t156.5 -104.5t104.5 -156.5t39 -191l-1 -2l1 -5q0 -141 -78 -262l275 -274q23 -26 22.5 -44.5t-22.5 -42.5l-59 -58q-26 -20 -46.5 -20t-39.5 20l-275 274q-119 -77 -261 -77l-5 1l-2 -1q-100 0 -191 39t-156.5 104.5t-104.5 156.5t-39 191 t39 191t104.5 156.5t156.5 104.5t191 39zM500 1022q-88 0 -162 -43t-117 -117t-43 -162t43 -162t117 -117t162 -43t162 43t117 117t43 162t-43 162t-117 117t-162 43z" />
-<glyph unicode="&#xe005;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104z" />
-<glyph unicode="&#xe006;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429z" />
-<glyph unicode="&#xe007;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429zM477 700h-240l197 -142l-74 -226 l193 139l195 -140l-74 229l192 140h-234l-78 211z" />
-<glyph unicode="&#xe008;" d="M600 1200q124 0 212 -88t88 -212v-250q0 -46 -31 -98t-69 -52v-75q0 -10 6 -21.5t15 -17.5l358 -230q9 -5 15 -16.5t6 -21.5v-93q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v93q0 10 6 21.5t15 16.5l358 230q9 6 15 17.5t6 21.5v75q-38 0 -69 52 t-31 98v250q0 124 88 212t212 88z" />
-<glyph unicode="&#xe009;" d="M25 1100h1150q10 0 17.5 -7.5t7.5 -17.5v-1050q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v1050q0 10 7.5 17.5t17.5 7.5zM100 1000v-100h100v100h-100zM875 1000h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5t17.5 -7.5h550 q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM1000 1000v-100h100v100h-100zM100 800v-100h100v100h-100zM1000 800v-100h100v100h-100zM100 600v-100h100v100h-100zM1000 600v-100h100v100h-100zM875 500h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5 t17.5 -7.5h550q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM100 400v-100h100v100h-100zM1000 400v-100h100v100h-100zM100 200v-100h100v100h-100zM1000 200v-100h100v100h-100z" />
-<glyph unicode="&#xe010;" d="M50 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM50 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe011;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM850 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 700h200q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5 t35.5 14.5z" />
-<glyph unicode="&#xe012;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h700q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe013;" d="M465 477l571 571q8 8 18 8t17 -8l177 -177q8 -7 8 -17t-8 -18l-783 -784q-7 -8 -17.5 -8t-17.5 8l-384 384q-8 8 -8 18t8 17l177 177q7 8 17 8t18 -8l171 -171q7 -7 18 -7t18 7z" />
-<glyph unicode="&#xe014;" d="M904 1083l178 -179q8 -8 8 -18.5t-8 -17.5l-267 -268l267 -268q8 -7 8 -17.5t-8 -18.5l-178 -178q-8 -8 -18.5 -8t-17.5 8l-268 267l-268 -267q-7 -8 -17.5 -8t-18.5 8l-178 178q-8 8 -8 18.5t8 17.5l267 268l-267 268q-8 7 -8 17.5t8 18.5l178 178q8 8 18.5 8t17.5 -8 l268 -267l268 268q7 7 17.5 7t18.5 -7z" />
-<glyph unicode="&#xe015;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM425 900h150q10 0 17.5 -7.5t7.5 -17.5v-75h75q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5 t-17.5 -7.5h-75v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-75q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v75q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe016;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM325 800h350q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-350q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe017;" d="M550 1200h100q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM800 975v166q167 -62 272 -209.5t105 -331.5q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5 t-184.5 123t-123 184.5t-45.5 224q0 184 105 331.5t272 209.5v-166q-103 -55 -165 -155t-62 -220q0 -116 57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5q0 120 -62 220t-165 155z" />
-<glyph unicode="&#xe018;" d="M1025 1200h150q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM725 800h150q10 0 17.5 -7.5t7.5 -17.5v-750q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v750 q0 10 7.5 17.5t17.5 7.5zM425 500h150q10 0 17.5 -7.5t7.5 -17.5v-450q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v450q0 10 7.5 17.5t17.5 7.5zM125 300h150q10 0 17.5 -7.5t7.5 -17.5v-250q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5 v250q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe019;" d="M600 1174q33 0 74 -5l38 -152l5 -1q49 -14 94 -39l5 -2l134 80q61 -48 104 -105l-80 -134l3 -5q25 -44 39 -93l1 -6l152 -38q5 -43 5 -73q0 -34 -5 -74l-152 -38l-1 -6q-15 -49 -39 -93l-3 -5l80 -134q-48 -61 -104 -105l-134 81l-5 -3q-44 -25 -94 -39l-5 -2l-38 -151 q-43 -5 -74 -5q-33 0 -74 5l-38 151l-5 2q-49 14 -94 39l-5 3l-134 -81q-60 48 -104 105l80 134l-3 5q-25 45 -38 93l-2 6l-151 38q-6 42 -6 74q0 33 6 73l151 38l2 6q13 48 38 93l3 5l-80 134q47 61 105 105l133 -80l5 2q45 25 94 39l5 1l38 152q43 5 74 5zM600 815 q-89 0 -152 -63t-63 -151.5t63 -151.5t152 -63t152 63t63 151.5t-63 151.5t-152 63z" />
-<glyph unicode="&#xe020;" d="M500 1300h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-75h-1100v75q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5zM500 1200v-100h300v100h-300zM1100 900v-800q0 -41 -29.5 -70.5t-70.5 -29.5h-700q-41 0 -70.5 29.5t-29.5 70.5 v800h900zM300 800v-700h100v700h-100zM500 800v-700h100v700h-100zM700 800v-700h100v700h-100zM900 800v-700h100v700h-100z" />
-<glyph unicode="&#xe021;" d="M18 618l620 608q8 7 18.5 7t17.5 -7l608 -608q8 -8 5.5 -13t-12.5 -5h-175v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v375h-300v-375q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v575h-175q-10 0 -12.5 5t5.5 13z" />
-<glyph unicode="&#xe022;" d="M600 1200v-400q0 -41 29.5 -70.5t70.5 -29.5h300v-650q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5h450zM1000 800h-250q-21 0 -35.5 14.5t-14.5 35.5v250z" />
-<glyph unicode="&#xe023;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h50q10 0 17.5 -7.5t7.5 -17.5v-275h175q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe024;" d="M1300 0h-538l-41 400h-242l-41 -400h-538l431 1200h209l-21 -300h162l-20 300h208zM515 800l-27 -300h224l-27 300h-170z" />
-<glyph unicode="&#xe025;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-450h191q20 0 25.5 -11.5t-7.5 -27.5l-327 -400q-13 -16 -32 -16t-32 16l-327 400q-13 16 -7.5 27.5t25.5 11.5h191v450q0 21 14.5 35.5t35.5 14.5zM1125 400h50q10 0 17.5 -7.5t7.5 -17.5v-350q0 -10 -7.5 -17.5t-17.5 -7.5 h-1050q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h50q10 0 17.5 -7.5t7.5 -17.5v-175h900v175q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe026;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -275q-13 -16 -32 -16t-32 16l-223 275q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z " />
-<glyph unicode="&#xe027;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM632 914l223 -275q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5l223 275q13 16 32 16 t32 -16z" />
-<glyph unicode="&#xe028;" d="M225 1200h750q10 0 19.5 -7t12.5 -17l186 -652q7 -24 7 -49v-425q0 -12 -4 -27t-9 -17q-12 -6 -37 -6h-1100q-12 0 -27 4t-17 8q-6 13 -6 38l1 425q0 25 7 49l185 652q3 10 12.5 17t19.5 7zM878 1000h-556q-10 0 -19 -7t-11 -18l-87 -450q-2 -11 4 -18t16 -7h150 q10 0 19.5 -7t11.5 -17l38 -152q2 -10 11.5 -17t19.5 -7h250q10 0 19.5 7t11.5 17l38 152q2 10 11.5 17t19.5 7h150q10 0 16 7t4 18l-87 450q-2 11 -11 18t-19 7z" />
-<glyph unicode="&#xe029;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM540 820l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
-<glyph unicode="&#xe030;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-362q0 -10 -7.5 -17.5t-17.5 -7.5h-362q-11 0 -13 5.5t5 12.5l133 133q-109 76 -238 76q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5h150q0 -117 -45.5 -224 t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117z" />
-<glyph unicode="&#xe031;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-361q0 -11 -7.5 -18.5t-18.5 -7.5h-361q-11 0 -13 5.5t5 12.5l134 134q-110 75 -239 75q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5h-150q0 117 45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117zM1027 600h150 q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5q-192 0 -348 118l-134 -134q-7 -8 -12.5 -5.5t-5.5 12.5v360q0 11 7.5 18.5t18.5 7.5h360q10 0 12.5 -5.5t-5.5 -12.5l-133 -133q110 -76 240 -76q116 0 214.5 57t155.5 155.5t57 214.5z" />
-<glyph unicode="&#xe032;" d="M125 1200h1050q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-1050q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM1075 1000h-850q-10 0 -17.5 -7.5t-7.5 -17.5v-850q0 -10 7.5 -17.5t17.5 -7.5h850q10 0 17.5 7.5t7.5 17.5v850 q0 10 -7.5 17.5t-17.5 7.5zM325 900h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 900h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 700h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 700h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 500h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 500h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 300h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 300h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe033;" d="M900 800v200q0 83 -58.5 141.5t-141.5 58.5h-300q-82 0 -141 -59t-59 -141v-200h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h900q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-100zM400 800v150q0 21 15 35.5t35 14.5h200 q20 0 35 -14.5t15 -35.5v-150h-300z" />
-<glyph unicode="&#xe034;" d="M125 1100h50q10 0 17.5 -7.5t7.5 -17.5v-1075h-100v1075q0 10 7.5 17.5t17.5 7.5zM1075 1052q4 0 9 -2q16 -6 16 -23v-421q0 -6 -3 -12q-33 -59 -66.5 -99t-65.5 -58t-56.5 -24.5t-52.5 -6.5q-26 0 -57.5 6.5t-52.5 13.5t-60 21q-41 15 -63 22.5t-57.5 15t-65.5 7.5 q-85 0 -160 -57q-7 -5 -15 -5q-6 0 -11 3q-14 7 -14 22v438q22 55 82 98.5t119 46.5q23 2 43 0.5t43 -7t32.5 -8.5t38 -13t32.5 -11q41 -14 63.5 -21t57 -14t63.5 -7q103 0 183 87q7 8 18 8z" />
-<glyph unicode="&#xe035;" d="M600 1175q116 0 227 -49.5t192.5 -131t131 -192.5t49.5 -227v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v300q0 127 -70.5 231.5t-184.5 161.5t-245 57t-245 -57t-184.5 -161.5t-70.5 -231.5v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50 q-10 0 -17.5 7.5t-7.5 17.5v300q0 116 49.5 227t131 192.5t192.5 131t227 49.5zM220 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6zM820 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460 q0 8 6 14t14 6z" />
-<glyph unicode="&#xe036;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM900 668l120 120q7 7 17 7t17 -7l34 -34q7 -7 7 -17t-7 -17l-120 -120l120 -120q7 -7 7 -17 t-7 -17l-34 -34q-7 -7 -17 -7t-17 7l-120 119l-120 -119q-7 -7 -17 -7t-17 7l-34 34q-7 7 -7 17t7 17l119 120l-119 120q-7 7 -7 17t7 17l34 34q7 8 17 8t17 -8z" />
-<glyph unicode="&#xe037;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6 l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238q-6 8 -4.5 18t9.5 17l29 22q7 5 15 5z" />
-<glyph unicode="&#xe038;" d="M967 1004h3q11 -1 17 -10q135 -179 135 -396q0 -105 -34 -206.5t-98 -185.5q-7 -9 -17 -10h-3q-9 0 -16 6l-42 34q-8 6 -9 16t5 18q111 150 111 328q0 90 -29.5 176t-84.5 157q-6 9 -5 19t10 16l42 33q7 5 15 5zM321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5 t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238 q-6 8 -4.5 18.5t9.5 16.5l29 22q7 5 15 5z" />
-<glyph unicode="&#xe039;" d="M500 900h100v-100h-100v-100h-400v-100h-100v600h500v-300zM1200 700h-200v-100h200v-200h-300v300h-200v300h-100v200h600v-500zM100 1100v-300h300v300h-300zM800 1100v-300h300v300h-300zM300 900h-100v100h100v-100zM1000 900h-100v100h100v-100zM300 500h200v-500 h-500v500h200v100h100v-100zM800 300h200v-100h-100v-100h-200v100h-100v100h100v200h-200v100h300v-300zM100 400v-300h300v300h-300zM300 200h-100v100h100v-100zM1200 200h-100v100h100v-100zM700 0h-100v100h100v-100zM1200 0h-300v100h300v-100z" />
-<glyph unicode="&#xe040;" d="M100 200h-100v1000h100v-1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 200h-200v1000h200v-1000zM400 0h-300v100h300v-100zM600 0h-100v91h100v-91zM800 0h-100v91h100v-91zM1100 0h-200v91h200v-91z" />
-<glyph unicode="&#xe041;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
-<glyph unicode="&#xe042;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM800 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-56 56l424 426l-700 700h150zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5 t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
-<glyph unicode="&#xe043;" d="M300 1200h825q75 0 75 -75v-900q0 -25 -18 -43l-64 -64q-8 -8 -13 -5.5t-5 12.5v950q0 10 -7.5 17.5t-17.5 7.5h-700q-25 0 -43 -18l-64 -64q-8 -8 -5.5 -13t12.5 -5h700q10 0 17.5 -7.5t7.5 -17.5v-950q0 -10 -7.5 -17.5t-17.5 -7.5h-850q-10 0 -17.5 7.5t-7.5 17.5v975 q0 25 18 43l139 139q18 18 43 18z" />
-<glyph unicode="&#xe044;" d="M250 1200h800q21 0 35.5 -14.5t14.5 -35.5v-1150l-450 444l-450 -445v1151q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe045;" d="M822 1200h-444q-11 0 -19 -7.5t-9 -17.5l-78 -301q-7 -24 7 -45l57 -108q6 -9 17.5 -15t21.5 -6h450q10 0 21.5 6t17.5 15l62 108q14 21 7 45l-83 301q-1 10 -9 17.5t-19 7.5zM1175 800h-150q-10 0 -21 -6.5t-15 -15.5l-78 -156q-4 -9 -15 -15.5t-21 -6.5h-550 q-10 0 -21 6.5t-15 15.5l-78 156q-4 9 -15 15.5t-21 6.5h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-650q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h750q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5 t7.5 17.5v650q0 10 -7.5 17.5t-17.5 7.5zM850 200h-500q-10 0 -19.5 -7t-11.5 -17l-38 -152q-2 -10 3.5 -17t15.5 -7h600q10 0 15.5 7t3.5 17l-38 152q-2 10 -11.5 17t-19.5 7z" />
-<glyph unicode="&#xe046;" d="M500 1100h200q56 0 102.5 -20.5t72.5 -50t44 -59t25 -50.5l6 -20h150q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h150q2 8 6.5 21.5t24 48t45 61t72 48t102.5 21.5zM900 800v-100 h100v100h-100zM600 730q-95 0 -162.5 -67.5t-67.5 -162.5t67.5 -162.5t162.5 -67.5t162.5 67.5t67.5 162.5t-67.5 162.5t-162.5 67.5zM600 603q43 0 73 -30t30 -73t-30 -73t-73 -30t-73 30t-30 73t30 73t73 30z" />
-<glyph unicode="&#xe047;" d="M681 1199l385 -998q20 -50 60 -92q18 -19 36.5 -29.5t27.5 -11.5l10 -2v-66h-417v66q53 0 75 43.5t5 88.5l-82 222h-391q-58 -145 -92 -234q-11 -34 -6.5 -57t25.5 -37t46 -20t55 -6v-66h-365v66q56 24 84 52q12 12 25 30.5t20 31.5l7 13l399 1006h93zM416 521h340 l-162 457z" />
-<glyph unicode="&#xe048;" d="M753 641q5 -1 14.5 -4.5t36 -15.5t50.5 -26.5t53.5 -40t50.5 -54.5t35.5 -70t14.5 -87q0 -67 -27.5 -125.5t-71.5 -97.5t-98.5 -66.5t-108.5 -40.5t-102 -13h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 24 -0.5 34t-3.5 24t-8.5 19.5t-17 13.5t-28 12.5t-42.5 11.5v71 l471 -1q57 0 115.5 -20.5t108 -57t80.5 -94t31 -124.5q0 -51 -15.5 -96.5t-38 -74.5t-45 -50.5t-38.5 -30.5zM400 700h139q78 0 130.5 48.5t52.5 122.5q0 41 -8.5 70.5t-29.5 55.5t-62.5 39.5t-103.5 13.5h-118v-350zM400 200h216q80 0 121 50.5t41 130.5q0 90 -62.5 154.5 t-156.5 64.5h-159v-400z" />
-<glyph unicode="&#xe049;" d="M877 1200l2 -57q-83 -19 -116 -45.5t-40 -66.5l-132 -839q-9 -49 13 -69t96 -26v-97h-500v97q186 16 200 98l173 832q3 17 3 30t-1.5 22.5t-9 17.5t-13.5 12.5t-21.5 10t-26 8.5t-33.5 10q-13 3 -19 5v57h425z" />
-<glyph unicode="&#xe050;" d="M1300 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM175 1000h-75v-800h75l-125 -167l-125 167h75v800h-75l125 167z" />
-<glyph unicode="&#xe051;" d="M1100 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-650q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v650h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM1167 50l-167 -125v75h-800v-75l-167 125l167 125v-75h800v75z" />
-<glyph unicode="&#xe052;" d="M50 1100h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe053;" d="M250 1100h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM250 500h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000 q-21 0 -35.5 14.5t-14.5 35.5zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5z" />
-<glyph unicode="&#xe055;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe056;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 1100h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 800h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 500h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 500h800q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 200h800 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe057;" d="M400 0h-100v1100h100v-1100zM550 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM267 550l-167 -125v75h-200v100h200v75zM550 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe058;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM900 0h-100v1100h100v-1100zM50 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM1100 600h200v-100h-200v-75l-167 125l167 125v-75zM50 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe059;" d="M75 1000h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22zM1200 300l-300 300l300 300v-600z" />
-<glyph unicode="&#xe060;" d="M44 1100h1112q18 0 31 -13t13 -31v-1012q0 -18 -13 -31t-31 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13zM100 1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500h-1000zM342 884q56 0 95 -39t39 -94.5t-39 -95t-95 -39.5t-95 39.5t-39 95t39 94.5 t95 39z" />
-<glyph unicode="&#xe062;" d="M648 1169q117 0 216 -60t156.5 -161t57.5 -218q0 -115 -70 -258q-69 -109 -158 -225.5t-143 -179.5l-54 -62q-9 8 -25.5 24.5t-63.5 67.5t-91 103t-98.5 128t-95.5 148q-60 132 -60 249q0 88 34 169.5t91.5 142t137 96.5t166.5 36zM652.5 974q-91.5 0 -156.5 -65 t-65 -157t65 -156.5t156.5 -64.5t156.5 64.5t65 156.5t-65 157t-156.5 65z" />
-<glyph unicode="&#xe063;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 173v854q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57z" />
-<glyph unicode="&#xe064;" d="M554 1295q21 -72 57.5 -143.5t76 -130t83 -118t82.5 -117t70 -116t49.5 -126t18.5 -136.5q0 -71 -25.5 -135t-68.5 -111t-99 -82t-118.5 -54t-125.5 -23q-84 5 -161.5 34t-139.5 78.5t-99 125t-37 164.5q0 69 18 136.5t49.5 126.5t69.5 116.5t81.5 117.5t83.5 119 t76.5 131t58.5 143zM344 710q-23 -33 -43.5 -70.5t-40.5 -102.5t-17 -123q1 -37 14.5 -69.5t30 -52t41 -37t38.5 -24.5t33 -15q21 -7 32 -1t13 22l6 34q2 10 -2.5 22t-13.5 19q-5 4 -14 12t-29.5 40.5t-32.5 73.5q-26 89 6 271q2 11 -6 11q-8 1 -15 -10z" />
-<glyph unicode="&#xe065;" d="M1000 1013l108 115q2 1 5 2t13 2t20.5 -1t25 -9.5t28.5 -21.5q22 -22 27 -43t0 -32l-6 -10l-108 -115zM350 1100h400q50 0 105 -13l-187 -187h-368q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v182l200 200v-332 q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM1009 803l-362 -362l-161 -50l55 170l355 355z" />
-<glyph unicode="&#xe066;" d="M350 1100h361q-164 -146 -216 -200h-195q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-103q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M824 1073l339 -301q8 -7 8 -17.5t-8 -17.5l-340 -306q-7 -6 -12.5 -4t-6.5 11v203q-26 1 -54.5 0t-78.5 -7.5t-92 -17.5t-86 -35t-70 -57q10 59 33 108t51.5 81.5t65 58.5t68.5 40.5t67 24.5t56 13.5t40 4.5v210q1 10 6.5 12.5t13.5 -4.5z" />
-<glyph unicode="&#xe067;" d="M350 1100h350q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-219q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M643 639l395 395q7 7 17.5 7t17.5 -7l101 -101q7 -7 7 -17.5t-7 -17.5l-531 -532q-7 -7 -17.5 -7t-17.5 7l-248 248q-7 7 -7 17.5t7 17.5l101 101q7 7 17.5 7t17.5 -7l111 -111q8 -7 18 -7t18 7z" />
-<glyph unicode="&#xe068;" d="M318 918l264 264q8 8 18 8t18 -8l260 -264q7 -8 4.5 -13t-12.5 -5h-170v-200h200v173q0 10 5 12t13 -5l264 -260q8 -7 8 -17.5t-8 -17.5l-264 -265q-8 -7 -13 -5t-5 12v173h-200v-200h170q10 0 12.5 -5t-4.5 -13l-260 -264q-8 -8 -18 -8t-18 8l-264 264q-8 8 -5.5 13 t12.5 5h175v200h-200v-173q0 -10 -5 -12t-13 5l-264 265q-8 7 -8 17.5t8 17.5l264 260q8 7 13 5t5 -12v-173h200v200h-175q-10 0 -12.5 5t5.5 13z" />
-<glyph unicode="&#xe069;" d="M250 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe070;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5 t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe071;" d="M1200 1050v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-492 480q-15 14 -15 35t15 35l492 480q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25z" />
-<glyph unicode="&#xe072;" d="M243 1074l814 -498q18 -11 18 -26t-18 -26l-814 -498q-18 -11 -30.5 -4t-12.5 28v1000q0 21 12.5 28t30.5 -4z" />
-<glyph unicode="&#xe073;" d="M250 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM650 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800 q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe074;" d="M1100 950v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5z" />
-<glyph unicode="&#xe075;" d="M500 612v438q0 21 10.5 25t25.5 -10l492 -480q15 -14 15 -35t-15 -35l-492 -480q-15 -14 -25.5 -10t-10.5 25v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10z" />
-<glyph unicode="&#xe076;" d="M1048 1102l100 1q20 0 35 -14.5t15 -35.5l5 -1000q0 -21 -14.5 -35.5t-35.5 -14.5l-100 -1q-21 0 -35.5 14.5t-14.5 35.5l-2 437l-463 -454q-14 -15 -24.5 -10.5t-10.5 25.5l-2 437l-462 -455q-15 -14 -25.5 -9.5t-10.5 24.5l-5 1000q0 21 10.5 25.5t25.5 -10.5l466 -450 l-2 438q0 20 10.5 24.5t25.5 -9.5l466 -451l-2 438q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10l464 -453v438q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe078;" d="M686 1081l501 -540q15 -15 10.5 -26t-26.5 -11h-1042q-22 0 -26.5 11t10.5 26l501 540q15 15 36 15t36 -15zM150 400h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe079;" d="M885 900l-352 -353l352 -353l-197 -198l-552 552l552 550z" />
-<glyph unicode="&#xe080;" d="M1064 547l-551 -551l-198 198l353 353l-353 353l198 198z" />
-<glyph unicode="&#xe081;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM650 900h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-150 q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5h150v-150q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v150h150q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-150v150q0 21 -14.5 35.5t-35.5 14.5z" />
-<glyph unicode="&#xe082;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM850 700h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5 t35.5 -14.5h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5z" />
-<glyph unicode="&#xe083;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM741.5 913q-12.5 0 -21.5 -9l-120 -120l-120 120q-9 9 -21.5 9 t-21.5 -9l-141 -141q-9 -9 -9 -21.5t9 -21.5l120 -120l-120 -120q-9 -9 -9 -21.5t9 -21.5l141 -141q9 -9 21.5 -9t21.5 9l120 120l120 -120q9 -9 21.5 -9t21.5 9l141 141q9 9 9 21.5t-9 21.5l-120 120l120 120q9 9 9 21.5t-9 21.5l-141 141q-9 9 -21.5 9z" />
-<glyph unicode="&#xe084;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM546 623l-84 85q-7 7 -17.5 7t-18.5 -7l-139 -139q-7 -8 -7 -18t7 -18 l242 -241q7 -8 17.5 -8t17.5 8l375 375q7 7 7 17.5t-7 18.5l-139 139q-7 7 -17.5 7t-17.5 -7z" />
-<glyph unicode="&#xe085;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM588 941q-29 0 -59 -5.5t-63 -20.5t-58 -38.5t-41.5 -63t-16.5 -89.5 q0 -25 20 -25h131q30 -5 35 11q6 20 20.5 28t45.5 8q20 0 31.5 -10.5t11.5 -28.5q0 -23 -7 -34t-26 -18q-1 0 -13.5 -4t-19.5 -7.5t-20 -10.5t-22 -17t-18.5 -24t-15.5 -35t-8 -46q-1 -8 5.5 -16.5t20.5 -8.5h173q7 0 22 8t35 28t37.5 48t29.5 74t12 100q0 47 -17 83 t-42.5 57t-59.5 34.5t-64 18t-59 4.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe086;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM675 1000h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5 t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5zM675 700h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h75v-200h-75q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h350q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5 t-17.5 7.5h-75v275q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe087;" d="M525 1200h150q10 0 17.5 -7.5t7.5 -17.5v-194q103 -27 178.5 -102.5t102.5 -178.5h194q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-194q-27 -103 -102.5 -178.5t-178.5 -102.5v-194q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v194 q-103 27 -178.5 102.5t-102.5 178.5h-194q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h194q27 103 102.5 178.5t178.5 102.5v194q0 10 7.5 17.5t17.5 7.5zM700 893v-168q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v168q-68 -23 -119 -74 t-74 -119h168q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-168q23 -68 74 -119t119 -74v168q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-168q68 23 119 74t74 119h-168q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h168 q-23 68 -74 119t-119 74z" />
-<glyph unicode="&#xe088;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM759 823l64 -64q7 -7 7 -17.5t-7 -17.5l-124 -124l124 -124q7 -7 7 -17.5t-7 -17.5l-64 -64q-7 -7 -17.5 -7t-17.5 7l-124 124l-124 -124q-7 -7 -17.5 -7t-17.5 7l-64 64 q-7 7 -7 17.5t7 17.5l124 124l-124 124q-7 7 -7 17.5t7 17.5l64 64q7 7 17.5 7t17.5 -7l124 -124l124 124q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe089;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM782 788l106 -106q7 -7 7 -17.5t-7 -17.5l-320 -321q-8 -7 -18 -7t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l197 197q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe090;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5q0 -120 65 -225 l587 587q-105 65 -225 65zM965 819l-584 -584q104 -62 219 -62q116 0 214.5 57t155.5 155.5t57 214.5q0 115 -62 219z" />
-<glyph unicode="&#xe091;" d="M39 582l522 427q16 13 27.5 8t11.5 -26v-291h550q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-550v-291q0 -21 -11.5 -26t-27.5 8l-522 427q-16 13 -16 32t16 32z" />
-<glyph unicode="&#xe092;" d="M639 1009l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291h-550q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h550v291q0 21 11.5 26t27.5 -8z" />
-<glyph unicode="&#xe093;" d="M682 1161l427 -522q13 -16 8 -27.5t-26 -11.5h-291v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v550h-291q-21 0 -26 11.5t8 27.5l427 522q13 16 32 16t32 -16z" />
-<glyph unicode="&#xe094;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-550h291q21 0 26 -11.5t-8 -27.5l-427 -522q-13 -16 -32 -16t-32 16l-427 522q-13 16 -8 27.5t26 11.5h291v550q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe095;" d="M639 1109l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291q-94 -2 -182 -20t-170.5 -52t-147 -92.5t-100.5 -135.5q5 105 27 193.5t67.5 167t113 135t167 91.5t225.5 42v262q0 21 11.5 26t27.5 -8z" />
-<glyph unicode="&#xe096;" d="M850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5zM350 0h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249 q8 7 18 7t18 -7l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5z" />
-<glyph unicode="&#xe097;" d="M1014 1120l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249q8 7 18 7t18 -7zM250 600h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5z" />
-<glyph unicode="&#xe101;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM704 900h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5 t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe102;" d="M260 1200q9 0 19 -2t15 -4l5 -2q22 -10 44 -23l196 -118q21 -13 36 -24q29 -21 37 -12q11 13 49 35l196 118q22 13 45 23q17 7 38 7q23 0 47 -16.5t37 -33.5l13 -16q14 -21 18 -45l25 -123l8 -44q1 -9 8.5 -14.5t17.5 -5.5h61q10 0 17.5 -7.5t7.5 -17.5v-50 q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 -7.5t-7.5 -17.5v-175h-400v300h-200v-300h-400v175q0 10 -7.5 17.5t-17.5 7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5h61q11 0 18 3t7 8q0 4 9 52l25 128q5 25 19 45q2 3 5 7t13.5 15t21.5 19.5t26.5 15.5 t29.5 7zM915 1079l-166 -162q-7 -7 -5 -12t12 -5h219q10 0 15 7t2 17l-51 149q-3 10 -11 12t-15 -6zM463 917l-177 157q-8 7 -16 5t-11 -12l-51 -143q-3 -10 2 -17t15 -7h231q11 0 12.5 5t-5.5 12zM500 0h-375q-10 0 -17.5 7.5t-7.5 17.5v375h400v-400zM1100 400v-375 q0 -10 -7.5 -17.5t-17.5 -7.5h-375v400h400z" />
-<glyph unicode="&#xe103;" d="M1165 1190q8 3 21 -6.5t13 -17.5q-2 -178 -24.5 -323.5t-55.5 -245.5t-87 -174.5t-102.5 -118.5t-118 -68.5t-118.5 -33t-120 -4.5t-105 9.5t-90 16.5q-61 12 -78 11q-4 1 -12.5 0t-34 -14.5t-52.5 -40.5l-153 -153q-26 -24 -37 -14.5t-11 43.5q0 64 42 102q8 8 50.5 45 t66.5 58q19 17 35 47t13 61q-9 55 -10 102.5t7 111t37 130t78 129.5q39 51 80 88t89.5 63.5t94.5 45t113.5 36t129 31t157.5 37t182 47.5zM1116 1098q-8 9 -22.5 -3t-45.5 -50q-38 -47 -119 -103.5t-142 -89.5l-62 -33q-56 -30 -102 -57t-104 -68t-102.5 -80.5t-85.5 -91 t-64 -104.5q-24 -56 -31 -86t2 -32t31.5 17.5t55.5 59.5q25 30 94 75.5t125.5 77.5t147.5 81q70 37 118.5 69t102 79.5t99 111t86.5 148.5q22 50 24 60t-6 19z" />
-<glyph unicode="&#xe104;" d="M653 1231q-39 -67 -54.5 -131t-10.5 -114.5t24.5 -96.5t47.5 -80t63.5 -62.5t68.5 -46.5t65 -30q-4 7 -17.5 35t-18.5 39.5t-17 39.5t-17 43t-13 42t-9.5 44.5t-2 42t4 43t13.5 39t23 38.5q96 -42 165 -107.5t105 -138t52 -156t13 -159t-19 -149.5q-13 -55 -44 -106.5 t-68 -87t-78.5 -64.5t-72.5 -45t-53 -22q-72 -22 -127 -11q-31 6 -13 19q6 3 17 7q13 5 32.5 21t41 44t38.5 63.5t21.5 81.5t-6.5 94.5t-50 107t-104 115.5q10 -104 -0.5 -189t-37 -140.5t-65 -93t-84 -52t-93.5 -11t-95 24.5q-80 36 -131.5 114t-53.5 171q-2 23 0 49.5 t4.5 52.5t13.5 56t27.5 60t46 64.5t69.5 68.5q-8 -53 -5 -102.5t17.5 -90t34 -68.5t44.5 -39t49 -2q31 13 38.5 36t-4.5 55t-29 64.5t-36 75t-26 75.5q-15 85 2 161.5t53.5 128.5t85.5 92.5t93.5 61t81.5 25.5z" />
-<glyph unicode="&#xe105;" d="M600 1094q82 0 160.5 -22.5t140 -59t116.5 -82.5t94.5 -95t68 -95t42.5 -82.5t14 -57.5t-14 -57.5t-43 -82.5t-68.5 -95t-94.5 -95t-116.5 -82.5t-140 -59t-159.5 -22.5t-159.5 22.5t-140 59t-116.5 82.5t-94.5 95t-68.5 95t-43 82.5t-14 57.5t14 57.5t42.5 82.5t68 95 t94.5 95t116.5 82.5t140 59t160.5 22.5zM888 829q-15 15 -18 12t5 -22q25 -57 25 -119q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 59 23 114q8 19 4.5 22t-17.5 -12q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q22 -36 47 -71t70 -82t92.5 -81t113 -58.5t133.5 -24.5 t133.5 24t113 58.5t92.5 81.5t70 81.5t47 70.5q11 18 9 42.5t-14 41.5q-90 117 -163 189zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l35 34q14 15 12.5 33.5t-16.5 33.5q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
-<glyph unicode="&#xe106;" d="M592 0h-148l31 120q-91 20 -175.5 68.5t-143.5 106.5t-103.5 119t-66.5 110t-22 76q0 21 14 57.5t42.5 82.5t68 95t94.5 95t116.5 82.5t140 59t160.5 22.5q61 0 126 -15l32 121h148zM944 770l47 181q108 -85 176.5 -192t68.5 -159q0 -26 -19.5 -71t-59.5 -102t-93 -112 t-129 -104.5t-158 -75.5l46 173q77 49 136 117t97 131q11 18 9 42.5t-14 41.5q-54 70 -107 130zM310 824q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q18 -30 39 -60t57 -70.5t74 -73t90 -61t105 -41.5l41 154q-107 18 -178.5 101.5t-71.5 193.5q0 59 23 114q8 19 4.5 22 t-17.5 -12zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l12 11l22 86l-3 4q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
-<glyph unicode="&#xe107;" d="M-90 100l642 1066q20 31 48 28.5t48 -35.5l642 -1056q21 -32 7.5 -67.5t-50.5 -35.5h-1294q-37 0 -50.5 34t7.5 66zM155 200h345v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h345l-445 723zM496 700h208q20 0 32 -14.5t8 -34.5l-58 -252 q-4 -20 -21.5 -34.5t-37.5 -14.5h-54q-20 0 -37.5 14.5t-21.5 34.5l-58 252q-4 20 8 34.5t32 14.5z" />
-<glyph unicode="&#xe108;" d="M650 1200q62 0 106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -93 100 -113v-64q0 -21 -13 -29t-32 1l-205 128l-205 -128q-19 -9 -32 -1t-13 29v64q0 20 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41 q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44z" />
-<glyph unicode="&#xe109;" d="M850 1200h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-150h-1100v150q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-50h500v50q0 21 14.5 35.5t35.5 14.5zM1100 800v-750q0 -21 -14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v750h1100zM100 600v-100h100v100h-100zM300 600v-100h100v100h-100zM500 600v-100h100v100h-100zM700 600v-100h100v100h-100zM900 600v-100h100v100h-100zM100 400v-100h100v100h-100zM300 400v-100h100v100h-100zM500 400 v-100h100v100h-100zM700 400v-100h100v100h-100zM900 400v-100h100v100h-100zM100 200v-100h100v100h-100zM300 200v-100h100v100h-100zM500 200v-100h100v100h-100zM700 200v-100h100v100h-100zM900 200v-100h100v100h-100z" />
-<glyph unicode="&#xe110;" d="M1135 1165l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-159l-600 -600h-291q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h209l600 600h241v150q0 21 10.5 25t24.5 -10zM522 819l-141 -141l-122 122h-209q-21 0 -35.5 14.5 t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h291zM1135 565l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-241l-181 181l141 141l122 -122h159v150q0 21 10.5 25t24.5 -10z" />
-<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
-<glyph unicode="&#xe112;" d="M150 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM850 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM1100 800v-300q0 -41 -3 -77.5t-15 -89.5t-32 -96t-58 -89t-89 -77t-129 -51t-174 -20t-174 20 t-129 51t-89 77t-58 89t-32 96t-15 89.5t-3 77.5v300h300v-250v-27v-42.5t1.5 -41t5 -38t10 -35t16.5 -30t25.5 -24.5t35 -19t46.5 -12t60 -4t60 4.5t46.5 12.5t35 19.5t25 25.5t17 30.5t10 35t5 38t2 40.5t-0.5 42v25v250h300z" />
-<glyph unicode="&#xe113;" d="M1100 411l-198 -199l-353 353l-353 -353l-197 199l551 551z" />
-<glyph unicode="&#xe114;" d="M1101 789l-550 -551l-551 551l198 199l353 -353l353 353z" />
-<glyph unicode="&#xe115;" d="M404 1000h746q21 0 35.5 -14.5t14.5 -35.5v-551h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v401h-381zM135 984l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-400h385l215 -200h-750q-21 0 -35.5 14.5 t-14.5 35.5v550h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe116;" d="M56 1200h94q17 0 31 -11t18 -27l38 -162h896q24 0 39 -18.5t10 -42.5l-100 -475q-5 -21 -27 -42.5t-55 -21.5h-633l48 -200h535q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-50q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-300v-50 q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-31q-18 0 -32.5 10t-20.5 19l-5 10l-201 961h-54q-20 0 -35 14.5t-15 35.5t15 35.5t35 14.5z" />
-<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
-<glyph unicode="&#xe118;" d="M200 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q42 0 71 -29.5t29 -70.5h500v-200h-1000zM1500 700l-300 -700h-1200l300 700h1200z" />
-<glyph unicode="&#xe119;" d="M635 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-601h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v601h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe120;" d="M936 864l249 -229q14 -15 14 -35.5t-14 -35.5l-249 -229q-15 -15 -25.5 -10.5t-10.5 24.5v151h-600v-151q0 -20 -10.5 -24.5t-25.5 10.5l-249 229q-14 15 -14 35.5t14 35.5l249 229q15 15 25.5 10.5t10.5 -25.5v-149h600v149q0 21 10.5 25.5t25.5 -10.5z" />
-<glyph unicode="&#xe121;" d="M1169 400l-172 732q-5 23 -23 45.5t-38 22.5h-672q-20 0 -38 -20t-23 -41l-172 -739h1138zM1100 300h-1000q-41 0 -70.5 -29.5t-29.5 -70.5v-100q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v100q0 41 -29.5 70.5t-70.5 29.5zM800 100v100h100v-100h-100 zM1000 100v100h100v-100h-100z" />
-<glyph unicode="&#xe122;" d="M1150 1100q21 0 35.5 -14.5t14.5 -35.5v-850q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v850q0 21 14.5 35.5t35.5 14.5zM1000 200l-675 200h-38l47 -276q3 -16 -5.5 -20t-29.5 -4h-7h-84q-20 0 -34.5 14t-18.5 35q-55 337 -55 351v250v6q0 16 1 23.5t6.5 14 t17.5 6.5h200l675 250v-850zM0 750v-250q-4 0 -11 0.5t-24 6t-30 15t-24 30t-11 48.5v50q0 26 10.5 46t25 30t29 16t25.5 7z" />
-<glyph unicode="&#xe123;" d="M553 1200h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q19 0 33 -14.5t14 -35t-13 -40.5t-31 -27q-8 -4 -23 -9.5t-65 -19.5t-103 -25t-132.5 -20t-158.5 -9q-57 0 -115 5t-104 12t-88.5 15.5t-73.5 17.5t-54.5 16t-35.5 12l-11 4 q-18 8 -31 28t-13 40.5t14 35t33 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3.5 32t28.5 13zM498 110q50 -6 102 -6q53 0 102 6q-12 -49 -39.5 -79.5t-62.5 -30.5t-63 30.5t-39 79.5z" />
-<glyph unicode="&#xe124;" d="M800 946l224 78l-78 -224l234 -45l-180 -155l180 -155l-234 -45l78 -224l-224 78l-45 -234l-155 180l-155 -180l-45 234l-224 -78l78 224l-234 45l180 155l-180 155l234 45l-78 224l224 -78l45 234l155 -180l155 180z" />
-<glyph unicode="&#xe125;" d="M650 1200h50q40 0 70 -40.5t30 -84.5v-150l-28 -125h328q40 0 70 -40.5t30 -84.5v-100q0 -45 -29 -74l-238 -344q-16 -24 -38 -40.5t-45 -16.5h-250q-7 0 -42 25t-66 50l-31 25h-61q-45 0 -72.5 18t-27.5 57v400q0 36 20 63l145 196l96 198q13 28 37.5 48t51.5 20z M650 1100l-100 -212l-150 -213v-375h100l136 -100h214l250 375v125h-450l50 225v175h-50zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe126;" d="M600 1100h250q23 0 45 -16.5t38 -40.5l238 -344q29 -29 29 -74v-100q0 -44 -30 -84.5t-70 -40.5h-328q28 -118 28 -125v-150q0 -44 -30 -84.5t-70 -40.5h-50q-27 0 -51.5 20t-37.5 48l-96 198l-145 196q-20 27 -20 63v400q0 39 27.5 57t72.5 18h61q124 100 139 100z M50 1000h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM636 1000l-136 -100h-100v-375l150 -213l100 -212h50v175l-50 225h450v125l-250 375h-214z" />
-<glyph unicode="&#xe127;" d="M356 873l363 230q31 16 53 -6l110 -112q13 -13 13.5 -32t-11.5 -34l-84 -121h302q84 0 138 -38t54 -110t-55 -111t-139 -39h-106l-131 -339q-6 -21 -19.5 -41t-28.5 -20h-342q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM400 792v-503l100 -89h293l131 339 q6 21 19.5 41t28.5 20h203q21 0 30.5 25t0.5 50t-31 25h-456h-7h-6h-5.5t-6 0.5t-5 1.5t-5 2t-4 2.5t-4 4t-2.5 4.5q-12 25 5 47l146 183l-86 83zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500 q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe128;" d="M475 1103l366 -230q2 -1 6 -3.5t14 -10.5t18 -16.5t14.5 -20t6.5 -22.5v-525q0 -13 -86 -94t-93 -81h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-85 0 -139.5 39t-54.5 111t54 110t138 38h302l-85 121q-11 15 -10.5 34t13.5 32l110 112q22 22 53 6zM370 945l146 -183 q17 -22 5 -47q-2 -2 -3.5 -4.5t-4 -4t-4 -2.5t-5 -2t-5 -1.5t-6 -0.5h-6h-6.5h-6h-475v-100h221q15 0 29 -20t20 -41l130 -339h294l106 89v503l-342 236zM1050 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5 v500q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe129;" d="M550 1294q72 0 111 -55t39 -139v-106l339 -131q21 -6 41 -19.5t20 -28.5v-342q0 -7 -81 -90t-94 -83h-525q-17 0 -35.5 14t-28.5 28l-9 14l-230 363q-16 31 6 53l112 110q13 13 32 13.5t34 -11.5l121 -84v302q0 84 38 138t110 54zM600 972v203q0 21 -25 30.5t-50 0.5 t-25 -31v-456v-7v-6v-5.5t-0.5 -6t-1.5 -5t-2 -5t-2.5 -4t-4 -4t-4.5 -2.5q-25 -12 -47 5l-183 146l-83 -86l236 -339h503l89 100v293l-339 131q-21 6 -41 19.5t-20 28.5zM450 200h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe130;" d="M350 1100h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5zM600 306v-106q0 -84 -39 -139t-111 -55t-110 54t-38 138v302l-121 -84q-15 -12 -34 -11.5t-32 13.5l-112 110 q-22 22 -6 53l230 363q1 2 3.5 6t10.5 13.5t16.5 17t20 13.5t22.5 6h525q13 0 94 -83t81 -90v-342q0 -15 -20 -28.5t-41 -19.5zM308 900l-236 -339l83 -86l183 146q22 17 47 5q2 -1 4.5 -2.5t4 -4t2.5 -4t2 -5t1.5 -5t0.5 -6v-5.5v-6v-7v-456q0 -22 25 -31t50 0.5t25 30.5 v203q0 15 20 28.5t41 19.5l339 131v293l-89 100h-503z" />
-<glyph unicode="&#xe131;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM914 632l-275 223q-16 13 -27.5 8t-11.5 -26v-137h-275 q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h275v-137q0 -21 11.5 -26t27.5 8l275 223q16 13 16 32t-16 32z" />
-<glyph unicode="&#xe132;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM561 855l-275 -223q-16 -13 -16 -32t16 -32l275 -223q16 -13 27.5 -8 t11.5 26v137h275q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5h-275v137q0 21 -11.5 26t-27.5 -8z" />
-<glyph unicode="&#xe133;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM855 639l-223 275q-13 16 -32 16t-32 -16l-223 -275q-13 -16 -8 -27.5 t26 -11.5h137v-275q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v275h137q21 0 26 11.5t-8 27.5z" />
-<glyph unicode="&#xe134;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM675 900h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-275h-137q-21 0 -26 -11.5 t8 -27.5l223 -275q13 -16 32 -16t32 16l223 275q13 16 8 27.5t-26 11.5h-137v275q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe135;" d="M600 1176q116 0 222.5 -46t184 -123.5t123.5 -184t46 -222.5t-46 -222.5t-123.5 -184t-184 -123.5t-222.5 -46t-222.5 46t-184 123.5t-123.5 184t-46 222.5t46 222.5t123.5 184t184 123.5t222.5 46zM627 1101q-15 -12 -36.5 -20.5t-35.5 -12t-43 -8t-39 -6.5 q-15 -3 -45.5 0t-45.5 -2q-20 -7 -51.5 -26.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79q-9 -34 5 -93t8 -87q0 -9 17 -44.5t16 -59.5q12 0 23 -5t23.5 -15t19.5 -14q16 -8 33 -15t40.5 -15t34.5 -12q21 -9 52.5 -32t60 -38t57.5 -11 q7 -15 -3 -34t-22.5 -40t-9.5 -38q13 -21 23 -34.5t27.5 -27.5t36.5 -18q0 -7 -3.5 -16t-3.5 -14t5 -17q104 -2 221 112q30 29 46.5 47t34.5 49t21 63q-13 8 -37 8.5t-36 7.5q-15 7 -49.5 15t-51.5 19q-18 0 -41 -0.5t-43 -1.5t-42 -6.5t-38 -16.5q-51 -35 -66 -12 q-4 1 -3.5 25.5t0.5 25.5q-6 13 -26.5 17.5t-24.5 6.5q1 15 -0.5 30.5t-7 28t-18.5 11.5t-31 -21q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q7 -12 18 -24t21.5 -20.5t20 -15t15.5 -10.5l5 -3q2 12 7.5 30.5t8 34.5t-0.5 32q-3 18 3.5 29 t18 22.5t15.5 24.5q6 14 10.5 35t8 31t15.5 22.5t34 22.5q-6 18 10 36q8 0 24 -1.5t24.5 -1.5t20 4.5t20.5 15.5q-10 23 -31 42.5t-37.5 29.5t-49 27t-43.5 23q0 1 2 8t3 11.5t1.5 10.5t-1 9.5t-4.5 4.5q31 -13 58.5 -14.5t38.5 2.5l12 5q5 28 -9.5 46t-36.5 24t-50 15 t-41 20q-18 -4 -37 0zM613 994q0 -17 8 -42t17 -45t9 -23q-8 1 -39.5 5.5t-52.5 10t-37 16.5q3 11 16 29.5t16 25.5q10 -10 19 -10t14 6t13.5 14.5t16.5 12.5z" />
-<glyph unicode="&#xe136;" d="M756 1157q164 92 306 -9l-259 -138l145 -232l251 126q6 -89 -34 -156.5t-117 -110.5q-60 -34 -127 -39.5t-126 16.5l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-34 101 5.5 201.5t135.5 154.5z" />
-<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
-<glyph unicode="&#xe138;" d="M150 1200h900q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM700 500v-300l-200 -200v500l-350 500h900z" />
-<glyph unicode="&#xe139;" d="M500 1200h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5zM500 1100v-100h200v100h-200zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
-<glyph unicode="&#xe140;" d="M50 1200h300q21 0 25 -10.5t-10 -24.5l-94 -94l199 -199q7 -8 7 -18t-7 -18l-106 -106q-8 -7 -18 -7t-18 7l-199 199l-94 -94q-14 -14 -24.5 -10t-10.5 25v300q0 21 14.5 35.5t35.5 14.5zM850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-199 -199q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l199 199l-94 94q-14 14 -10 24.5t25 10.5zM364 470l106 -106q7 -8 7 -18t-7 -18l-199 -199l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l199 199 q8 7 18 7t18 -7zM1071 271l94 94q14 14 24.5 10t10.5 -25v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -25 10.5t10 24.5l94 94l-199 199q-7 8 -7 18t7 18l106 106q8 7 18 7t18 -7z" />
-<glyph unicode="&#xe141;" d="M596 1192q121 0 231.5 -47.5t190 -127t127 -190t47.5 -231.5t-47.5 -231.5t-127 -190.5t-190 -127t-231.5 -47t-231.5 47t-190.5 127t-127 190.5t-47 231.5t47 231.5t127 190t190.5 127t231.5 47.5zM596 1010q-112 0 -207.5 -55.5t-151 -151t-55.5 -207.5t55.5 -207.5 t151 -151t207.5 -55.5t207.5 55.5t151 151t55.5 207.5t-55.5 207.5t-151 151t-207.5 55.5zM454.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38.5 -16.5t-38.5 16.5t-16 39t16 38.5t38.5 16zM754.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38 -16.5q-14 0 -29 10l-55 -145 q17 -23 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5q0 32 20.5 56.5t51.5 29.5l122 126l1 1q-9 14 -9 28q0 23 16 39t38.5 16zM345.5 709q22.5 0 38.5 -16t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16zM854.5 709q22.5 0 38.5 -16 t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16z" />
-<glyph unicode="&#xe142;" d="M546 173l469 470q91 91 99 192q7 98 -52 175.5t-154 94.5q-22 4 -47 4q-34 0 -66.5 -10t-56.5 -23t-55.5 -38t-48 -41.5t-48.5 -47.5q-376 -375 -391 -390q-30 -27 -45 -41.5t-37.5 -41t-32 -46.5t-16 -47.5t-1.5 -56.5q9 -62 53.5 -95t99.5 -33q74 0 125 51l548 548 q36 36 20 75q-7 16 -21.5 26t-32.5 10q-26 0 -50 -23q-13 -12 -39 -38l-341 -338q-15 -15 -35.5 -15.5t-34.5 13.5t-14 34.5t14 34.5q327 333 361 367q35 35 67.5 51.5t78.5 16.5q14 0 29 -1q44 -8 74.5 -35.5t43.5 -68.5q14 -47 2 -96.5t-47 -84.5q-12 -11 -32 -32 t-79.5 -81t-114.5 -115t-124.5 -123.5t-123 -119.5t-96.5 -89t-57 -45q-56 -27 -120 -27q-70 0 -129 32t-93 89q-48 78 -35 173t81 163l511 511q71 72 111 96q91 55 198 55q80 0 152 -33q78 -36 129.5 -103t66.5 -154q17 -93 -11 -183.5t-94 -156.5l-482 -476 q-15 -15 -36 -16t-37 14t-17.5 34t14.5 35z" />
-<glyph unicode="&#xe143;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104zM896 972q-33 0 -64.5 -19t-56.5 -46t-47.5 -53.5t-43.5 -45.5t-37.5 -19t-36 19t-40 45.5t-43 53.5t-54 46t-65.5 19q-67 0 -122.5 -55.5t-55.5 -132.5q0 -23 13.5 -51t46 -65t57.5 -63t76 -75l22 -22q15 -14 44 -44t50.5 -51t46 -44t41 -35t23 -12 t23.5 12t42.5 36t46 44t52.5 52t44 43q4 4 12 13q43 41 63.5 62t52 55t46 55t26 46t11.5 44q0 79 -53 133.5t-120 54.5z" />
-<glyph unicode="&#xe144;" d="M776.5 1214q93.5 0 159.5 -66l141 -141q66 -66 66 -160q0 -42 -28 -95.5t-62 -87.5l-29 -29q-31 53 -77 99l-18 18l95 95l-247 248l-389 -389l212 -212l-105 -106l-19 18l-141 141q-66 66 -66 159t66 159l283 283q65 66 158.5 66zM600 706l105 105q10 -8 19 -17l141 -141 q66 -66 66 -159t-66 -159l-283 -283q-66 -66 -159 -66t-159 66l-141 141q-66 66 -66 159.5t66 159.5l55 55q29 -55 75 -102l18 -17l-95 -95l247 -248l389 389z" />
-<glyph unicode="&#xe145;" d="M603 1200q85 0 162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5v953q0 21 30 46.5t81 48t129 37.5t163 15zM300 1000v-700h600v700h-600zM600 254q-43 0 -73.5 -30.5t-30.5 -73.5t30.5 -73.5t73.5 -30.5t73.5 30.5 t30.5 73.5t-30.5 73.5t-73.5 30.5z" />
-<glyph unicode="&#xe146;" d="M902 1185l283 -282q15 -15 15 -36t-14.5 -35.5t-35.5 -14.5t-35 15l-36 35l-279 -267v-300l-212 210l-308 -307l-280 -203l203 280l307 308l-210 212h300l267 279l-35 36q-15 14 -15 35t14.5 35.5t35.5 14.5t35 -15z" />
-<glyph unicode="&#xe148;" d="M700 1248v-78q38 -5 72.5 -14.5t75.5 -31.5t71 -53.5t52 -84t24 -118.5h-159q-4 36 -10.5 59t-21 45t-40 35.5t-64.5 20.5v-307l64 -13q34 -7 64 -16.5t70 -32t67.5 -52.5t47.5 -80t20 -112q0 -139 -89 -224t-244 -97v-77h-100v79q-150 16 -237 103q-40 40 -52.5 93.5 t-15.5 139.5h139q5 -77 48.5 -126t117.5 -65v335l-27 8q-46 14 -79 26.5t-72 36t-63 52t-40 72.5t-16 98q0 70 25 126t67.5 92t94.5 57t110 27v77h100zM600 754v274q-29 -4 -50 -11t-42 -21.5t-31.5 -41.5t-10.5 -65q0 -29 7 -50.5t16.5 -34t28.5 -22.5t31.5 -14t37.5 -10 q9 -3 13 -4zM700 547v-310q22 2 42.5 6.5t45 15.5t41.5 27t29 42t12 59.5t-12.5 59.5t-38 44.5t-53 31t-66.5 24.5z" />
-<glyph unicode="&#xe149;" d="M561 1197q84 0 160.5 -40t123.5 -109.5t47 -147.5h-153q0 40 -19.5 71.5t-49.5 48.5t-59.5 26t-55.5 9q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -26 13.5 -63t26.5 -61t37 -66q6 -9 9 -14h241v-100h-197q8 -50 -2.5 -115t-31.5 -95q-45 -62 -99 -112 q34 10 83 17.5t71 7.5q32 1 102 -16t104 -17q83 0 136 30l50 -147q-31 -19 -58 -30.5t-55 -15.5t-42 -4.5t-46 -0.5q-23 0 -76 17t-111 32.5t-96 11.5q-39 -3 -82 -16t-67 -25l-23 -11l-55 145q4 3 16 11t15.5 10.5t13 9t15.5 12t14.5 14t17.5 18.5q48 55 54 126.5 t-30 142.5h-221v100h166q-23 47 -44 104q-7 20 -12 41.5t-6 55.5t6 66.5t29.5 70.5t58.5 71q97 88 263 88z" />
-<glyph unicode="&#xe150;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM935 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-900h-200v900h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe151;" d="M1000 700h-100v100h-100v-100h-100v500h300v-500zM400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM801 1100v-200h100v200h-100zM1000 350l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150z " />
-<glyph unicode="&#xe152;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 1050l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150zM1000 0h-100v100h-100v-100h-100v500h300v-500zM801 400v-200h100v200h-100z " />
-<glyph unicode="&#xe153;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 700h-100v400h-100v100h200v-500zM1100 0h-100v100h-200v400h300v-500zM901 400v-200h100v200h-100z" />
-<glyph unicode="&#xe154;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1100 700h-100v100h-200v400h300v-500zM901 1100v-200h100v200h-100zM1000 0h-100v400h-100v100h200v-500z" />
-<glyph unicode="&#xe155;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
-<glyph unicode="&#xe156;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
-<glyph unicode="&#xe157;" d="M350 1100h400q162 0 256 -93.5t94 -256.5v-400q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5z" />
-<glyph unicode="&#xe158;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-163 0 -256.5 92.5t-93.5 257.5v400q0 163 94 256.5t256 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM440 770l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
-<glyph unicode="&#xe159;" d="M350 1100h400q163 0 256.5 -94t93.5 -256v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 163 92.5 256.5t257.5 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM350 700h400q21 0 26.5 -12t-6.5 -28l-190 -253q-12 -17 -30 -17t-30 17l-190 253q-12 16 -6.5 28t26.5 12z" />
-<glyph unicode="&#xe160;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -163 -92.5 -256.5t-257.5 -93.5h-400q-163 0 -256.5 94t-93.5 256v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM580 693l190 -253q12 -16 6.5 -28t-26.5 -12h-400q-21 0 -26.5 12t6.5 28l190 253q12 17 30 17t30 -17z" />
-<glyph unicode="&#xe161;" d="M550 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h450q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-450q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM338 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
-<glyph unicode="&#xe162;" d="M793 1182l9 -9q8 -10 5 -27q-3 -11 -79 -225.5t-78 -221.5l300 1q24 0 32.5 -17.5t-5.5 -35.5q-1 0 -133.5 -155t-267 -312.5t-138.5 -162.5q-12 -15 -26 -15h-9l-9 8q-9 11 -4 32q2 9 42 123.5t79 224.5l39 110h-302q-23 0 -31 19q-10 21 6 41q75 86 209.5 237.5 t228 257t98.5 111.5q9 16 25 16h9z" />
-<glyph unicode="&#xe163;" d="M350 1100h400q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-450q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h450q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400 q0 165 92.5 257.5t257.5 92.5zM938 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
-<glyph unicode="&#xe164;" d="M750 1200h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -10.5 -25t-24.5 10l-109 109l-312 -312q-15 -15 -35.5 -15t-35.5 15l-141 141q-15 15 -15 35.5t15 35.5l312 312l-109 109q-14 14 -10 24.5t25 10.5zM456 900h-156q-41 0 -70.5 -29.5t-29.5 -70.5v-500 q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v148l200 200v-298q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5h300z" />
-<glyph unicode="&#xe165;" d="M600 1186q119 0 227.5 -46.5t187 -125t125 -187t46.5 -227.5t-46.5 -227.5t-125 -187t-187 -125t-227.5 -46.5t-227.5 46.5t-187 125t-125 187t-46.5 227.5t46.5 227.5t125 187t187 125t227.5 46.5zM600 1022q-115 0 -212 -56.5t-153.5 -153.5t-56.5 -212t56.5 -212 t153.5 -153.5t212 -56.5t212 56.5t153.5 153.5t56.5 212t-56.5 212t-153.5 153.5t-212 56.5zM600 794q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
-<glyph unicode="&#xe166;" d="M450 1200h200q21 0 35.5 -14.5t14.5 -35.5v-350h245q20 0 25 -11t-9 -26l-383 -426q-14 -15 -33.5 -15t-32.5 15l-379 426q-13 15 -8.5 26t25.5 11h250v350q0 21 14.5 35.5t35.5 14.5zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe167;" d="M583 1182l378 -435q14 -15 9 -31t-26 -16h-244v-250q0 -20 -17 -35t-39 -15h-200q-20 0 -32 14.5t-12 35.5v250h-250q-20 0 -25.5 16.5t8.5 31.5l383 431q14 16 33.5 17t33.5 -14zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe168;" d="M396 723l369 369q7 7 17.5 7t17.5 -7l139 -139q7 -8 7 -18.5t-7 -17.5l-525 -525q-7 -8 -17.5 -8t-17.5 8l-292 291q-7 8 -7 18t7 18l139 139q8 7 18.5 7t17.5 -7zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50 h-100z" />
-<glyph unicode="&#xe169;" d="M135 1023l142 142q14 14 35 14t35 -14l77 -77l-212 -212l-77 76q-14 15 -14 36t14 35zM655 855l210 210q14 14 24.5 10t10.5 -25l-2 -599q-1 -20 -15.5 -35t-35.5 -15l-597 -1q-21 0 -25 10.5t10 24.5l208 208l-154 155l212 212zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5 v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe170;" d="M350 1200l599 -2q20 -1 35 -15.5t15 -35.5l1 -597q0 -21 -10.5 -25t-24.5 10l-208 208l-155 -154l-212 212l155 154l-210 210q-14 14 -10 24.5t25 10.5zM524 512l-76 -77q-15 -14 -36 -14t-35 14l-142 142q-14 14 -14 35t14 35l77 77zM50 300h1000q21 0 35.5 -14.5 t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe171;" d="M1200 103l-483 276l-314 -399v423h-399l1196 796v-1096zM483 424v-230l683 953z" />
-<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -14.5 -35.5t-35.5 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200z" />
-<glyph unicode="&#xe173;" d="M1100 1000l-2 -149l-299 -299l-95 95q-9 9 -21.5 9t-21.5 -9l-149 -147h-312v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1132 638l106 -106q7 -7 7 -17.5t-7 -17.5l-420 -421q-8 -7 -18 -7 t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l297 297q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe174;" d="M1100 1000v-269l-103 -103l-134 134q-15 15 -33.5 16.5t-34.5 -12.5l-266 -266h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1202 572l70 -70q15 -15 15 -35.5t-15 -35.5l-131 -131 l131 -131q15 -15 15 -35.5t-15 -35.5l-70 -70q-15 -15 -35.5 -15t-35.5 15l-131 131l-131 -131q-15 -15 -35.5 -15t-35.5 15l-70 70q-15 15 -15 35.5t15 35.5l131 131l-131 131q-15 15 -15 35.5t15 35.5l70 70q15 15 35.5 15t35.5 -15l131 -131l131 131q15 15 35.5 15 t35.5 -15z" />
-<glyph unicode="&#xe175;" d="M1100 1000v-300h-350q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM850 600h100q21 0 35.5 -14.5t14.5 -35.5v-250h150q21 0 25 -10.5t-10 -24.5 l-230 -230q-14 -14 -35 -14t-35 14l-230 230q-14 14 -10 24.5t25 10.5h150v250q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe176;" d="M1100 1000v-400l-165 165q-14 15 -35 15t-35 -15l-263 -265h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM935 565l230 -229q14 -15 10 -25.5t-25 -10.5h-150v-250q0 -20 -14.5 -35 t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35v250h-150q-21 0 -25 10.5t10 25.5l230 229q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe177;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-150h-1200v150q0 21 14.5 35.5t35.5 14.5zM1200 800v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v550h1200zM100 500v-200h400v200h-400z" />
-<glyph unicode="&#xe178;" d="M935 1165l248 -230q14 -14 14 -35t-14 -35l-248 -230q-14 -14 -24.5 -10t-10.5 25v150h-400v200h400v150q0 21 10.5 25t24.5 -10zM200 800h-50q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v-200zM400 800h-100v200h100v-200zM18 435l247 230 q14 14 24.5 10t10.5 -25v-150h400v-200h-400v-150q0 -21 -10.5 -25t-24.5 10l-247 230q-15 14 -15 35t15 35zM900 300h-100v200h100v-200zM1000 500h51q20 0 34.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-34.5 -14.5h-51v200z" />
-<glyph unicode="&#xe179;" d="M862 1073l276 116q25 18 43.5 8t18.5 -41v-1106q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v397q-4 1 -11 5t-24 17.5t-30 29t-24 42t-11 56.5v359q0 31 18.5 65t43.5 52zM550 1200q22 0 34.5 -12.5t14.5 -24.5l1 -13v-450q0 -28 -10.5 -59.5 t-25 -56t-29 -45t-25.5 -31.5l-10 -11v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447q-4 4 -11 11.5t-24 30.5t-30 46t-24 55t-11 60v450q0 2 0.5 5.5t4 12t8.5 15t14.5 12t22.5 5.5q20 0 32.5 -12.5t14.5 -24.5l3 -13v-350h100v350v5.5t2.5 12 t7 15t15 12t25.5 5.5q23 0 35.5 -12.5t13.5 -24.5l1 -13v-350h100v350q0 2 0.5 5.5t3 12t7 15t15 12t24.5 5.5z" />
-<glyph unicode="&#xe180;" d="M1200 1100v-56q-4 0 -11 -0.5t-24 -3t-30 -7.5t-24 -15t-11 -24v-888q0 -22 25 -34.5t50 -13.5l25 -2v-56h-400v56q75 0 87.5 6.5t12.5 43.5v394h-500v-394q0 -37 12.5 -43.5t87.5 -6.5v-56h-400v56q4 0 11 0.5t24 3t30 7.5t24 15t11 24v888q0 22 -25 34.5t-50 13.5 l-25 2v56h400v-56q-75 0 -87.5 -6.5t-12.5 -43.5v-394h500v394q0 37 -12.5 43.5t-87.5 6.5v56h400z" />
-<glyph unicode="&#xe181;" d="M675 1000h375q21 0 35.5 -14.5t14.5 -35.5v-150h-105l-295 -98v98l-200 200h-400l100 100h375zM100 900h300q41 0 70.5 -29.5t29.5 -70.5v-500q0 -41 -29.5 -70.5t-70.5 -29.5h-300q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5zM100 800v-200h300v200 h-300zM1100 535l-400 -133v163l400 133v-163zM100 500v-200h300v200h-300zM1100 398v-248q0 -21 -14.5 -35.5t-35.5 -14.5h-375l-100 -100h-375l-100 100h400l200 200h105z" />
-<glyph unicode="&#xe182;" d="M17 1007l162 162q17 17 40 14t37 -22l139 -194q14 -20 11 -44.5t-20 -41.5l-119 -118q102 -142 228 -268t267 -227l119 118q17 17 42.5 19t44.5 -12l192 -136q19 -14 22.5 -37.5t-13.5 -40.5l-163 -162q-3 -1 -9.5 -1t-29.5 2t-47.5 6t-62.5 14.5t-77.5 26.5t-90 42.5 t-101.5 60t-111 83t-119 108.5q-74 74 -133.5 150.5t-94.5 138.5t-60 119.5t-34.5 100t-15 74.5t-4.5 48z" />
-<glyph unicode="&#xe183;" d="M600 1100q92 0 175 -10.5t141.5 -27t108.5 -36.5t81.5 -40t53.5 -37t31 -27l9 -10v-200q0 -21 -14.5 -33t-34.5 -9l-202 34q-20 3 -34.5 20t-14.5 38v146q-141 24 -300 24t-300 -24v-146q0 -21 -14.5 -38t-34.5 -20l-202 -34q-20 -3 -34.5 9t-14.5 33v200q3 4 9.5 10.5 t31 26t54 37.5t80.5 39.5t109 37.5t141 26.5t175 10.5zM600 795q56 0 97 -9.5t60 -23.5t30 -28t12 -24l1 -10v-50l365 -303q14 -15 24.5 -40t10.5 -45v-212q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45t24.5 40l365 303v50 q0 4 1 10.5t12 23t30 29t60 22.5t97 10z" />
-<glyph unicode="&#xe184;" d="M1100 700l-200 -200h-600l-200 200v500h200v-200h200v200h200v-200h200v200h200v-500zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5 t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe185;" d="M700 1100h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-1000h300v1000q0 41 -29.5 70.5t-70.5 29.5zM1100 800h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-700h300v700q0 41 -29.5 70.5t-70.5 29.5zM400 0h-300v400q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-400z " />
-<glyph unicode="&#xe186;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
-<glyph unicode="&#xe187;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 300h-100v200h-100v-200h-100v500h100v-200h100v200h100v-500zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
-<glyph unicode="&#xe188;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-300h200v-100h-300v500h300v-100zM900 700h-200v-300h200v-100h-300v500h300v-100z" />
-<glyph unicode="&#xe189;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 400l-300 150l300 150v-300zM900 550l-300 -150v300z" />
-<glyph unicode="&#xe190;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM900 300h-700v500h700v-500zM800 700h-130q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300zM300 700v-300 h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130z" />
-<glyph unicode="&#xe191;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 300h-100v400h-100v100h200v-500z M700 300h-100v100h100v-100z" />
-<glyph unicode="&#xe192;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM300 700h200v-400h-300v500h100v-100zM900 300h-100v400h-100v100h200v-500zM300 600v-200h100v200h-100z M700 300h-100v100h100v-100z" />
-<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 500l-199 -200h-100v50l199 200v150h-200v100h300v-300zM900 300h-100v400h-100v100h200v-500zM701 300h-100 v100h100v-100z" />
-<glyph unicode="&#xe194;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700h-300v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
-<glyph unicode="&#xe195;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700v-100l-50 -50l100 -100v-50h-100l-100 100h-150v-100h-100v400h300zM500 700v-100h200v100h-200z" />
-<glyph unicode="&#xe197;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -207t-85 -207t-205 -86.5h-128v250q0 21 -14.5 35.5t-35.5 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-250h-222q-80 0 -136 57.5t-56 136.5q0 69 43 122.5t108 67.5q-2 19 -2 37q0 100 49 185 t134 134t185 49zM525 500h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -244q-13 -16 -32 -16t-32 16l-223 244q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe198;" d="M502 1089q110 0 201 -59.5t135 -156.5q43 15 89 15q121 0 206 -86.5t86 -206.5q0 -99 -60 -181t-150 -110l-378 360q-13 16 -31.5 16t-31.5 -16l-381 -365h-9q-79 0 -135.5 57.5t-56.5 136.5q0 69 43 122.5t108 67.5q-2 19 -2 38q0 100 49 184.5t133.5 134t184.5 49.5z M632 467l223 -228q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5q199 204 223 228q19 19 31.5 19t32.5 -19z" />
-<glyph unicode="&#xe199;" d="M700 100v100h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-100h-50q-21 0 -35.5 -14.5t-14.5 -35.5v-50h400v50q0 21 -14.5 35.5t-35.5 14.5h-50z" />
-<glyph unicode="&#xe200;" d="M600 1179q94 0 167.5 -56.5t99.5 -145.5q89 -6 150.5 -71.5t61.5 -155.5q0 -61 -29.5 -112.5t-79.5 -82.5q9 -29 9 -55q0 -74 -52.5 -126.5t-126.5 -52.5q-55 0 -100 30v-251q21 0 35.5 -14.5t14.5 -35.5v-50h-300v50q0 21 14.5 35.5t35.5 14.5v251q-45 -30 -100 -30 q-74 0 -126.5 52.5t-52.5 126.5q0 18 4 38q-47 21 -75.5 65t-28.5 97q0 74 52.5 126.5t126.5 52.5q5 0 23 -2q0 2 -1 10t-1 13q0 116 81.5 197.5t197.5 81.5z" />
-<glyph unicode="&#xe201;" d="M1010 1010q111 -111 150.5 -260.5t0 -299t-150.5 -260.5q-83 -83 -191.5 -126.5t-218.5 -43.5t-218.5 43.5t-191.5 126.5q-111 111 -150.5 260.5t0 299t150.5 260.5q83 83 191.5 126.5t218.5 43.5t218.5 -43.5t191.5 -126.5zM476 1065q-4 0 -8 -1q-121 -34 -209.5 -122.5 t-122.5 -209.5q-4 -12 2.5 -23t18.5 -14l36 -9q3 -1 7 -1q23 0 29 22q27 96 98 166q70 71 166 98q11 3 17.5 13.5t3.5 22.5l-9 35q-3 13 -14 19q-7 4 -15 4zM512 920q-4 0 -9 -2q-80 -24 -138.5 -82.5t-82.5 -138.5q-4 -13 2 -24t19 -14l34 -9q4 -1 8 -1q22 0 28 21 q18 58 58.5 98.5t97.5 58.5q12 3 18 13.5t3 21.5l-9 35q-3 12 -14 19q-7 4 -15 4zM719.5 719.5q-49.5 49.5 -119.5 49.5t-119.5 -49.5t-49.5 -119.5t49.5 -119.5t119.5 -49.5t119.5 49.5t49.5 119.5t-49.5 119.5zM855 551q-22 0 -28 -21q-18 -58 -58.5 -98.5t-98.5 -57.5 q-11 -4 -17 -14.5t-3 -21.5l9 -35q3 -12 14 -19q7 -4 15 -4q4 0 9 2q80 24 138.5 82.5t82.5 138.5q4 13 -2.5 24t-18.5 14l-34 9q-4 1 -8 1zM1000 515q-23 0 -29 -22q-27 -96 -98 -166q-70 -71 -166 -98q-11 -3 -17.5 -13.5t-3.5 -22.5l9 -35q3 -13 14 -19q7 -4 15 -4 q4 0 8 1q121 34 209.5 122.5t122.5 209.5q4 12 -2.5 23t-18.5 14l-36 9q-3 1 -7 1z" />
-<glyph unicode="&#xe202;" d="M700 800h300v-380h-180v200h-340v-200h-380v755q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM700 300h162l-212 -212l-212 212h162v200h100v-200zM520 0h-395q-10 0 -17.5 7.5t-7.5 17.5v395zM1000 220v-195q0 -10 -7.5 -17.5t-17.5 -7.5h-195z" />
-<glyph unicode="&#xe203;" d="M700 800h300v-520l-350 350l-550 -550v1095q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM862 200h-162v-200h-100v200h-162l212 212zM480 0h-355q-10 0 -17.5 7.5t-7.5 17.5v55h380v-80zM1000 80v-55q0 -10 -7.5 -17.5t-17.5 -7.5h-155v80h180z" />
-<glyph unicode="&#xe204;" d="M1162 800h-162v-200h100l100 -100h-300v300h-162l212 212zM200 800h200q27 0 40 -2t29.5 -10.5t23.5 -30t7 -57.5h300v-100h-600l-200 -350v450h100q0 36 7 57.5t23.5 30t29.5 10.5t40 2zM800 400h240l-240 -400h-800l300 500h500v-100z" />
-<glyph unicode="&#xe205;" d="M650 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM1000 850v150q41 0 70.5 -29.5t29.5 -70.5v-800 q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-1 0 -20 4l246 246l-326 326v324q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM412 250l-212 -212v162h-200v100h200v162z" />
-<glyph unicode="&#xe206;" d="M450 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM800 850v150q41 0 70.5 -29.5t29.5 -70.5v-500 h-200v-300h200q0 -36 -7 -57.5t-23.5 -30t-29.5 -10.5t-40 -2h-600q-41 0 -70.5 29.5t-29.5 70.5v800q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM1212 250l-212 -212v162h-200v100h200v162z" />
-<glyph unicode="&#xe209;" d="M658 1197l637 -1104q23 -38 7 -65.5t-60 -27.5h-1276q-44 0 -60 27.5t7 65.5l637 1104q22 39 54 39t54 -39zM704 800h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM500 300v-100h200 v100h-200z" />
-<glyph unicode="&#xe210;" d="M425 1100h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM825 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM25 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5zM425 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5 v150q0 10 7.5 17.5t17.5 7.5zM25 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe211;" d="M700 1200h100v-200h-100v-100h350q62 0 86.5 -39.5t-3.5 -94.5l-66 -132q-41 -83 -81 -134h-772q-40 51 -81 134l-66 132q-28 55 -3.5 94.5t86.5 39.5h350v100h-100v200h100v100h200v-100zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100 h-950l138 100h-13q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe212;" d="M600 1300q40 0 68.5 -29.5t28.5 -70.5h-194q0 41 28.5 70.5t68.5 29.5zM443 1100h314q18 -37 18 -75q0 -8 -3 -25h328q41 0 44.5 -16.5t-30.5 -38.5l-175 -145h-678l-178 145q-34 22 -29 38.5t46 16.5h328q-3 17 -3 25q0 38 18 75zM250 700h700q21 0 35.5 -14.5 t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-150v-200l275 -200h-950l275 200v200h-150q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe213;" d="M600 1181q75 0 128 -53t53 -128t-53 -128t-128 -53t-128 53t-53 128t53 128t128 53zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13 l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe214;" d="M600 1300q47 0 92.5 -53.5t71 -123t25.5 -123.5q0 -78 -55.5 -133.5t-133.5 -55.5t-133.5 55.5t-55.5 133.5q0 62 34 143l144 -143l111 111l-163 163q34 26 63 26zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45 zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe215;" d="M600 1200l300 -161v-139h-300q0 -57 18.5 -108t50 -91.5t63 -72t70 -67.5t57.5 -61h-530q-60 83 -90.5 177.5t-30.5 178.5t33 164.5t87.5 139.5t126 96.5t145.5 41.5v-98zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100 h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe216;" d="M600 1300q41 0 70.5 -29.5t29.5 -70.5v-78q46 -26 73 -72t27 -100v-50h-400v50q0 54 27 100t73 72v78q0 41 29.5 70.5t70.5 29.5zM400 800h400q54 0 100 -27t72 -73h-172v-100h200v-100h-200v-100h200v-100h-200v-100h200q0 -83 -58.5 -141.5t-141.5 -58.5h-400 q-83 0 -141.5 58.5t-58.5 141.5v400q0 83 58.5 141.5t141.5 58.5z" />
-<glyph unicode="&#xe218;" d="M150 1100h900q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM125 400h950q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-283l224 -224q13 -13 13 -31.5t-13 -32 t-31.5 -13.5t-31.5 13l-88 88h-524l-87 -88q-13 -13 -32 -13t-32 13.5t-13 32t13 31.5l224 224h-289q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM541 300l-100 -100h324l-100 100h-124z" />
-<glyph unicode="&#xe219;" d="M200 1100h800q83 0 141.5 -58.5t58.5 -141.5v-200h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100v200q0 83 58.5 141.5t141.5 58.5zM100 600h1000q41 0 70.5 -29.5 t29.5 -70.5v-300h-1200v300q0 41 29.5 70.5t70.5 29.5zM300 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200zM1100 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200z" />
-<glyph unicode="&#xe221;" d="M480 1165l682 -683q31 -31 31 -75.5t-31 -75.5l-131 -131h-481l-517 518q-32 31 -32 75.5t32 75.5l295 296q31 31 75.5 31t76.5 -31zM108 794l342 -342l303 304l-341 341zM250 100h800q21 0 35.5 -14.5t14.5 -35.5v-50h-900v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe223;" d="M1057 647l-189 506q-8 19 -27.5 33t-40.5 14h-400q-21 0 -40.5 -14t-27.5 -33l-189 -506q-8 -19 1.5 -33t30.5 -14h625v-150q0 -21 14.5 -35.5t35.5 -14.5t35.5 14.5t14.5 35.5v150h125q21 0 30.5 14t1.5 33zM897 0h-595v50q0 21 14.5 35.5t35.5 14.5h50v50 q0 21 14.5 35.5t35.5 14.5h48v300h200v-300h47q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-50z" />
-<glyph unicode="&#xe224;" d="M900 800h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-375v591l-300 300v84q0 10 7.5 17.5t17.5 7.5h375v-400zM1200 900h-200v200zM400 600h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-650q-10 0 -17.5 7.5t-7.5 17.5v950q0 10 7.5 17.5t17.5 7.5h375v-400zM700 700h-200v200z " />
-<glyph unicode="&#xe225;" d="M484 1095h195q75 0 146 -32.5t124 -86t89.5 -122.5t48.5 -142q18 -14 35 -20q31 -10 64.5 6.5t43.5 48.5q10 34 -15 71q-19 27 -9 43q5 8 12.5 11t19 -1t23.5 -16q41 -44 39 -105q-3 -63 -46 -106.5t-104 -43.5h-62q-7 -55 -35 -117t-56 -100l-39 -234q-3 -20 -20 -34.5 t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l12 70q-49 -14 -91 -14h-195q-24 0 -65 8l-11 -64q-3 -20 -20 -34.5t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l26 157q-84 74 -128 175l-159 53q-19 7 -33 26t-14 40v50q0 21 14.5 35.5t35.5 14.5h124q11 87 56 166l-111 95 q-16 14 -12.5 23.5t24.5 9.5h203q116 101 250 101zM675 1000h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h250q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe226;" d="M641 900l423 247q19 8 42 2.5t37 -21.5l32 -38q14 -15 12.5 -36t-17.5 -34l-139 -120h-390zM50 1100h106q67 0 103 -17t66 -71l102 -212h823q21 0 35.5 -14.5t14.5 -35.5v-50q0 -21 -14 -40t-33 -26l-737 -132q-23 -4 -40 6t-26 25q-42 67 -100 67h-300q-62 0 -106 44 t-44 106v200q0 62 44 106t106 44zM173 928h-80q-19 0 -28 -14t-9 -35v-56q0 -51 42 -51h134q16 0 21.5 8t5.5 24q0 11 -16 45t-27 51q-18 28 -43 28zM550 727q-32 0 -54.5 -22.5t-22.5 -54.5t22.5 -54.5t54.5 -22.5t54.5 22.5t22.5 54.5t-22.5 54.5t-54.5 22.5zM130 389 l152 130q18 19 34 24t31 -3.5t24.5 -17.5t25.5 -28q28 -35 50.5 -51t48.5 -13l63 5l48 -179q13 -61 -3.5 -97.5t-67.5 -79.5l-80 -69q-47 -40 -109 -35.5t-103 51.5l-130 151q-40 47 -35.5 109.5t51.5 102.5zM380 377l-102 -88q-31 -27 2 -65l37 -43q13 -15 27.5 -19.5 t31.5 6.5l61 53q19 16 14 49q-2 20 -12 56t-17 45q-11 12 -19 14t-23 -8z" />
-<glyph unicode="&#xe227;" d="M625 1200h150q10 0 17.5 -7.5t7.5 -17.5v-109q79 -33 131 -87.5t53 -128.5q1 -46 -15 -84.5t-39 -61t-46 -38t-39 -21.5l-17 -6q6 0 15 -1.5t35 -9t50 -17.5t53 -30t50 -45t35.5 -64t14.5 -84q0 -59 -11.5 -105.5t-28.5 -76.5t-44 -51t-49.5 -31.5t-54.5 -16t-49.5 -6.5 t-43.5 -1v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-100v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-175q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v600h-75q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5h175v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h100v75q0 10 7.5 17.5t17.5 7.5zM400 900v-200h263q28 0 48.5 10.5t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-263zM400 500v-200h363q28 0 48.5 10.5 t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-363z" />
-<glyph unicode="&#xe230;" d="M212 1198h780q86 0 147 -61t61 -147v-416q0 -51 -18 -142.5t-36 -157.5l-18 -66q-29 -87 -93.5 -146.5t-146.5 -59.5h-572q-82 0 -147 59t-93 147q-8 28 -20 73t-32 143.5t-20 149.5v416q0 86 61 147t147 61zM600 1045q-70 0 -132.5 -11.5t-105.5 -30.5t-78.5 -41.5 t-57 -45t-36 -41t-20.5 -30.5l-6 -12l156 -243h560l156 243q-2 5 -6 12.5t-20 29.5t-36.5 42t-57 44.5t-79 42t-105 29.5t-132.5 12zM762 703h-157l195 261z" />
-<glyph unicode="&#xe231;" d="M475 1300h150q103 0 189 -86t86 -189v-500q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
-<glyph unicode="&#xe232;" d="M475 1300h96q0 -150 89.5 -239.5t239.5 -89.5v-446q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
-<glyph unicode="&#xe233;" d="M1294 767l-638 -283l-378 170l-78 -60v-224l100 -150v-199l-150 148l-150 -149v200l100 150v250q0 4 -0.5 10.5t0 9.5t1 8t3 8t6.5 6l47 40l-147 65l642 283zM1000 380l-350 -166l-350 166v147l350 -165l350 165v-147z" />
-<glyph unicode="&#xe234;" d="M250 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM650 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM1050 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
-<glyph unicode="&#xe235;" d="M550 1100q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 700q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 300q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
-<glyph unicode="&#xe236;" d="M125 1100h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM125 700h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM125 300h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe237;" d="M350 1200h500q162 0 256 -93.5t94 -256.5v-500q0 -165 -93.5 -257.5t-256.5 -92.5h-500q-165 0 -257.5 92.5t-92.5 257.5v500q0 165 92.5 257.5t257.5 92.5zM900 1000h-600q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h600q41 0 70.5 29.5 t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5zM350 900h500q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-500q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 14.5 35.5t35.5 14.5zM400 800v-200h400v200h-400z" />
-<glyph unicode="&#xe238;" d="M150 1100h1000q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe239;" d="M650 1187q87 -67 118.5 -156t0 -178t-118.5 -155q-87 66 -118.5 155t0 178t118.5 156zM300 800q124 0 212 -88t88 -212q-124 0 -212 88t-88 212zM1000 800q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM300 500q124 0 212 -88t88 -212q-124 0 -212 88t-88 212z M1000 500q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM700 199v-144q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v142q40 -4 43 -4q17 0 57 6z" />
-<glyph unicode="&#xe240;" d="M745 878l69 19q25 6 45 -12l298 -295q11 -11 15 -26.5t-2 -30.5q-5 -14 -18 -23.5t-28 -9.5h-8q1 0 1 -13q0 -29 -2 -56t-8.5 -62t-20 -63t-33 -53t-51 -39t-72.5 -14h-146q-184 0 -184 288q0 24 10 47q-20 4 -62 4t-63 -4q11 -24 11 -47q0 -288 -184 -288h-142 q-48 0 -84.5 21t-56 51t-32 71.5t-16 75t-3.5 68.5q0 13 2 13h-7q-15 0 -27.5 9.5t-18.5 23.5q-6 15 -2 30.5t15 25.5l298 296q20 18 46 11l76 -19q20 -5 30.5 -22.5t5.5 -37.5t-22.5 -31t-37.5 -5l-51 12l-182 -193h891l-182 193l-44 -12q-20 -5 -37.5 6t-22.5 31t6 37.5 t31 22.5z" />
-<glyph unicode="&#xe241;" d="M1200 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM500 450h-25q0 15 -4 24.5t-9 14.5t-17 7.5t-20 3t-25 0.5h-100v-425q0 -11 12.5 -17.5t25.5 -7.5h12v-50h-200v50q50 0 50 25v425h-100q-17 0 -25 -0.5t-20 -3t-17 -7.5t-9 -14.5t-4 -24.5h-25v150h500v-150z" />
-<glyph unicode="&#xe242;" d="M1000 300v50q-25 0 -55 32q-14 14 -25 31t-16 27l-4 11l-289 747h-69l-300 -754q-18 -35 -39 -56q-9 -9 -24.5 -18.5t-26.5 -14.5l-11 -5v-50h273v50q-49 0 -78.5 21.5t-11.5 67.5l69 176h293l61 -166q13 -34 -3.5 -66.5t-55.5 -32.5v-50h312zM412 691l134 342l121 -342 h-255zM1100 150v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
-<glyph unicode="&#xe243;" d="M50 1200h1100q21 0 35.5 -14.5t14.5 -35.5v-1100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5zM611 1118h-70q-13 0 -18 -12l-299 -753q-17 -32 -35 -51q-18 -18 -56 -34q-12 -5 -12 -18v-50q0 -8 5.5 -14t14.5 -6 h273q8 0 14 6t6 14v50q0 8 -6 14t-14 6q-55 0 -71 23q-10 14 0 39l63 163h266l57 -153q11 -31 -6 -55q-12 -17 -36 -17q-8 0 -14 -6t-6 -14v-50q0 -8 6 -14t14 -6h313q8 0 14 6t6 14v50q0 7 -5.5 13t-13.5 7q-17 0 -42 25q-25 27 -40 63h-1l-288 748q-5 12 -19 12zM639 611 h-197l103 264z" />
-<glyph unicode="&#xe244;" d="M1200 1100h-1200v100h1200v-100zM50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 1000h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM700 900v-300h300v300h-300z" />
-<glyph unicode="&#xe245;" d="M50 1200h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 700h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM700 600v-300h300v300h-300zM1200 0h-1200v100h1200v-100z" />
-<glyph unicode="&#xe246;" d="M50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-350h100v150q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-150h100v-100h-100v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v150h-100v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM700 700v-300h300v300h-300z" />
-<glyph unicode="&#xe247;" d="M100 0h-100v1200h100v-1200zM250 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM300 1000v-300h300v300h-300zM250 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe248;" d="M600 1100h150q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-100h450q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h350v100h-150q-21 0 -35.5 14.5 t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h150v100h100v-100zM400 1000v-300h300v300h-300z" />
-<glyph unicode="&#xe249;" d="M1200 0h-100v1200h100v-1200zM550 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM600 1000v-300h300v300h-300zM50 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe250;" d="M865 565l-494 -494q-23 -23 -41 -23q-14 0 -22 13.5t-8 38.5v1000q0 25 8 38.5t22 13.5q18 0 41 -23l494 -494q14 -14 14 -35t-14 -35z" />
-<glyph unicode="&#xe251;" d="M335 635l494 494q29 29 50 20.5t21 -49.5v-1000q0 -41 -21 -49.5t-50 20.5l-494 494q-14 14 -14 35t14 35z" />
-<glyph unicode="&#xe252;" d="M100 900h1000q41 0 49.5 -21t-20.5 -50l-494 -494q-14 -14 -35 -14t-35 14l-494 494q-29 29 -20.5 50t49.5 21z" />
-<glyph unicode="&#xe253;" d="M635 865l494 -494q29 -29 20.5 -50t-49.5 -21h-1000q-41 0 -49.5 21t20.5 50l494 494q14 14 35 14t35 -14z" />
-<glyph unicode="&#xe254;" d="M700 741v-182l-692 -323v221l413 193l-413 193v221zM1200 0h-800v200h800v-200z" />
-<glyph unicode="&#xe255;" d="M1200 900h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300zM0 700h50q0 21 4 37t9.5 26.5t18 17.5t22 11t28.5 5.5t31 2t37 0.5h100v-550q0 -22 -25 -34.5t-50 -13.5l-25 -2v-100h400v100q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v550h100q25 0 37 -0.5t31 -2 t28.5 -5.5t22 -11t18 -17.5t9.5 -26.5t4 -37h50v300h-800v-300z" />
-<glyph unicode="&#xe256;" d="M800 700h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-100v-550q0 -22 25 -34.5t50 -14.5l25 -1v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v550h-100q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h800v-300zM1100 200h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300z" />
-<glyph unicode="&#xe257;" d="M701 1098h160q16 0 21 -11t-7 -23l-464 -464l464 -464q12 -12 7 -23t-21 -11h-160q-13 0 -23 9l-471 471q-7 8 -7 18t7 18l471 471q10 9 23 9z" />
-<glyph unicode="&#xe258;" d="M339 1098h160q13 0 23 -9l471 -471q7 -8 7 -18t-7 -18l-471 -471q-10 -9 -23 -9h-160q-16 0 -21 11t7 23l464 464l-464 464q-12 12 -7 23t21 11z" />
-<glyph unicode="&#xe259;" d="M1087 882q11 -5 11 -21v-160q0 -13 -9 -23l-471 -471q-8 -7 -18 -7t-18 7l-471 471q-9 10 -9 23v160q0 16 11 21t23 -7l464 -464l464 464q12 12 23 7z" />
-<glyph unicode="&#xe260;" d="M618 993l471 -471q9 -10 9 -23v-160q0 -16 -11 -21t-23 7l-464 464l-464 -464q-12 -12 -23 -7t-11 21v160q0 13 9 23l471 471q8 7 18 7t18 -7z" />
-<glyph unicode="&#xf8ff;" d="M1000 1200q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM450 1000h100q21 0 40 -14t26 -33l79 -194q5 1 16 3q34 6 54 9.5t60 7t65.5 1t61 -10t56.5 -23t42.5 -42t29 -64t5 -92t-19.5 -121.5q-1 -7 -3 -19.5t-11 -50t-20.5 -73t-32.5 -81.5t-46.5 -83t-64 -70 t-82.5 -50q-13 -5 -42 -5t-65.5 2.5t-47.5 2.5q-14 0 -49.5 -3.5t-63 -3.5t-43.5 7q-57 25 -104.5 78.5t-75 111.5t-46.5 112t-26 90l-7 35q-15 63 -18 115t4.5 88.5t26 64t39.5 43.5t52 25.5t58.5 13t62.5 2t59.5 -4.5t55.5 -8l-147 192q-12 18 -5.5 30t27.5 12z" />
-<glyph unicode="&#x1f511;" d="M250 1200h600q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-500l-255 -178q-19 -9 -32 -1t-13 29v650h-150q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM400 1100v-100h300v100h-300z" />
-<glyph unicode="&#x1f6aa;" d="M250 1200h750q39 0 69.5 -40.5t30.5 -84.5v-933l-700 -117v950l600 125h-700v-1000h-100v1025q0 23 15.5 49t34.5 26zM500 525v-100l100 20v100z" />
-</font>
-</defs></svg>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf b/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
deleted file mode 100644
index 1413fc6..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff b/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
deleted file mode 100644
index 9e61285..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2 b/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
deleted file mode 100644
index 64539b5..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js b/hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js
deleted file mode 100644
index eb0a8b4..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under the MIT license
- */
-if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");!function(t){"use strict";var e=jQuery.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1==e[0]&&9==e[1]&&e[2]<1||3<e[0])throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(),function(n){"use strict";n.fn.emulateTransitionEnd=function(t){var e=!1,i=this;n(this).one("bsTransitionEnd",function(){e=!0});return setTimeout(function(){e||n(i).trigger(n.support.transition.end)},t),this},n(function(){n.support.transition=function o(){var t=document.createElement("bootstrap"),e={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var i in e)if(t.style[i]!==undefined)return{end:e[i]};return!1}(),n.support.transition&&(n.event.special.bsTransitionEnd={bindType:n.support.transition.end,delegateType:n.support.transition.end,handle:function(t){if(n(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}})})}(jQuery),function(s){"use strict";var e='[data-dismiss="alert"]',a=function(t){s(t).on("click",e,this.close)};a.VERSION="3.4.1",a.TRANSITION_DURATION=150,a.prototype.close=function(t){var e=s(this),i=e.attr("data-target");i||(i=(i=e.attr("href"))&&i.replace(/.*(?=#[^\s]*$)/,"")),i="#"===i?[]:i;var o=s(document).find(i);function n(){o.detach().trigger("closed.bs.alert").remove()}t&&t.preventDefault(),o.length||(o=e.closest(".alert")),o.trigger(t=s.Event("close.bs.alert")),t.isDefaultPrevented()||(o.removeClass("in"),s.support.transition&&o.hasClass("fade")?o.one("bsTransitionEnd",n).emulateTransitionEnd(a.TRANSITION_DURATION):n())};var t=s.fn.alert;s.fn.alert=function o(i){return this.each(function(){var t=s(this),e=t.data("bs.alert");e||t.data("bs.alert",e=new a(this)),"string"==typeof i&&e[i].call(t)})},s.fn.alert.Constructor=a,s.fn.alert.noConflict=function(){return s.fn.alert=t,this},s(document).on("click.bs.alert.data-api",e,a.prototype.close)}(jQuery),function(s){"use strict";var n=function(t,e){this.$element=s(t),this.options=s.extend({},n.DEFAULTS,e),this.isLoading=!1};function i(o){return this.each(function(){var t=s(this),e=t.data("bs.button"),i="object"==typeof o&&o;e||t.data("bs.button",e=new n(this,i)),"toggle"==o?e.toggle():o&&e.setState(o)})}n.VERSION="3.4.1",n.DEFAULTS={loadingText:"loading..."},n.prototype.setState=function(t){var e="disabled",i=this.$element,o=i.is("input")?"val":"html",n=i.data();t+="Text",null==n.resetText&&i.data("resetText",i[o]()),setTimeout(s.proxy(function(){i[o](null==n[t]?this.options[t]:n[t]),"loadingText"==t?(this.isLoading=!0,i.addClass(e).attr(e,e).prop(e,!0)):this.isLoading&&(this.isLoading=!1,i.removeClass(e).removeAttr(e).prop(e,!1))},this),0)},n.prototype.toggle=function(){var t=!0,e=this.$element.closest('[data-toggle="buttons"]');if(e.length){var i=this.$element.find("input");"radio"==i.prop("type")?(i.prop("checked")&&(t=!1),e.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==i.prop("type")&&(i.prop("checked")!==this.$element.hasClass("active")&&(t=!1),this.$element.toggleClass("active")),i.prop("checked",this.$element.hasClass("active")),t&&i.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var t=s.fn.button;s.fn.button=i,s.fn.button.Constructor=n,s.fn.button.noConflict=function(){return s.fn.button=t,this},s(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(t){var e=s(t.target).closest(".btn");i.call(e,"toggle"),s(t.target).is('input[type="radio"], input[type="checkbox"]')||(t.preventDefault(),e.is("input,button")?e.trigger("focus"):e.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(t){s(t.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(t.type))})}(jQuery),function(p){"use strict";var c=function(t,e){this.$element=p(t),this.$indicators=this.$element.find(".carousel-indicators"),this.options=e,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",p.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",p.proxy(this.pause,this)).on("mouseleave.bs.carousel",p.proxy(this.cycle,this))};function r(n){return this.each(function(){var t=p(this),e=t.data("bs.carousel"),i=p.extend({},c.DEFAULTS,t.data(),"object"==typeof n&&n),o="string"==typeof n?n:i.slide;e||t.data("bs.carousel",e=new c(this,i)),"number"==typeof n?e.to(n):o?e[o]():i.interval&&e.pause().cycle()})}c.VERSION="3.4.1",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(t){if(!/input|textarea/i.test(t.target.tagName)){switch(t.which){case 37:this.prev();break;case 39:this.next();break;default:return}t.preventDefault()}},c.prototype.cycle=function(t){return t||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(p.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(t){return this.$items=t.parent().children(".item"),this.$items.index(t||this.$active)},c.prototype.getItemForDirection=function(t,e){var i=this.getItemIndex(e);if(("prev"==t&&0===i||"next"==t&&i==this.$items.length-1)&&!this.options.wrap)return e;var o=(i+("prev"==t?-1:1))%this.$items.length;return this.$items.eq(o)},c.prototype.to=function(t){var e=this,i=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(t>this.$items.length-1||t<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){e.to(t)}):i==t?this.pause().cycle():this.slide(i<t?"next":"prev",this.$items.eq(t))},c.prototype.pause=function(t){return t||(this.paused=!0),this.$element.find(".next, .prev").length&&p.support.transition&&(this.$element.trigger(p.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(t,e){var i=this.$element.find(".item.active"),o=e||this.getItemForDirection(t,i),n=this.interval,s="next"==t?"left":"right",a=this;if(o.hasClass("active"))return this.sliding=!1;var r=o[0],l=p.Event("slide.bs.carousel",{relatedTarget:r,direction:s});if(this.$element.trigger(l),!l.isDefaultPrevented()){if(this.sliding=!0,n&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var h=p(this.$indicators.children()[this.getItemIndex(o)]);h&&h.addClass("active")}var d=p.Event("slid.bs.carousel",{relatedTarget:r,direction:s});return p.support.transition&&this.$element.hasClass("slide")?(o.addClass(t),"object"==typeof o&&o.length&&o[0].offsetWidth,i.addClass(s),o.addClass(s),i.one("bsTransitionEnd",function(){o.removeClass([t,s].join(" ")).addClass("active"),i.removeClass(["active",s].join(" ")),a.sliding=!1,setTimeout(function(){a.$element.trigger(d)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(i.removeClass("active"),o.addClass("active"),this.sliding=!1,this.$element.trigger(d)),n&&this.cycle(),this}};var t=p.fn.carousel;p.fn.carousel=r,p.fn.carousel.Constructor=c,p.fn.carousel.noConflict=function(){return p.fn.carousel=t,this};var e=function(t){var e=p(this),i=e.attr("href");i&&(i=i.replace(/.*(?=#[^\s]+$)/,""));var o=e.attr("data-target")||i,n=p(document).find(o);if(n.hasClass("carousel")){var s=p.extend({},n.data(),e.data()),a=e.attr("data-slide-to");a&&(s.interval=!1),r.call(n,s),a&&n.data("bs.carousel").to(a),t.preventDefault()}};p(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),p(window).on("load",function(){p('[data-ride="carousel"]').each(function(){var t=p(this);r.call(t,t.data())})})}(jQuery),function(a){"use strict";var r=function(t,e){this.$element=a(t),this.options=a.extend({},r.DEFAULTS,e),this.$trigger=a('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};function n(t){var e,i=t.attr("data-target")||(e=t.attr("href"))&&e.replace(/.*(?=#[^\s]+$)/,"");return a(document).find(i)}function l(o){return this.each(function(){var t=a(this),e=t.data("bs.collapse"),i=a.extend({},r.DEFAULTS,t.data(),"object"==typeof o&&o);!e&&i.toggle&&/show|hide/.test(o)&&(i.toggle=!1),e||t.data("bs.collapse",e=new r(this,i)),"string"==typeof o&&e[o]()})}r.VERSION="3.4.1",r.TRANSITION_DURATION=350,r.DEFAULTS={toggle:!0},r.prototype.dimension=function(){return this.$element.hasClass("width")?"width":"height"},r.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var t,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(t=e.data("bs.collapse"))&&t.transitioning)){var i=a.Event("show.bs.collapse");if(this.$element.trigger(i),!i.isDefaultPrevented()){e&&e.length&&(l.call(e,"hide"),t||e.data("bs.collapse",null));var o=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[o](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var n=function(){this.$element.removeClass("collapsing").addClass("collapse in")[o](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return n.call(this);var s=a.camelCase(["scroll",o].join("-"));this.$element.one("bsTransitionEnd",a.proxy(n,this)).emulateTransitionEnd(r.TRANSITION_DURATION)[o](this.$element[0][s])}}}},r.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var t=a.Event("hide.bs.collapse");if(this.$element.trigger(t),!t.isDefaultPrevented()){var e=this.dimension();this.$element[e](this.$element[e]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var i=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};if(!a.support.transition)return i.call(this);this.$element[e](0).one("bsTransitionEnd",a.proxy(i,this)).emulateTransitionEnd(r.TRANSITION_DURATION)}}},r.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},r.prototype.getParent=function(){return a(document).find(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(t,e){var i=a(e);this.addAriaAndCollapsedClass(n(i),i)},this)).end()},r.prototype.addAriaAndCollapsedClass=function(t,e){var i=t.hasClass("in");t.attr("aria-expanded",i),e.toggleClass("collapsed",!i).attr("aria-expanded",i)};var t=a.fn.collapse;a.fn.collapse=l,a.fn.collapse.Constructor=r,a.fn.collapse.noConflict=function(){return a.fn.collapse=t,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(t){var e=a(this);e.attr("data-target")||t.preventDefault();var i=n(e),o=i.data("bs.collapse")?"toggle":e.data();l.call(i,o)})}(jQuery),function(a){"use strict";var r='[data-toggle="dropdown"]',o=function(t){a(t).on("click.bs.dropdown",this.toggle)};function l(t){var e=t.attr("data-target");e||(e=(e=t.attr("href"))&&/#[A-Za-z]/.test(e)&&e.replace(/.*(?=#[^\s]*$)/,""));var i="#"!==e?a(document).find(e):null;return i&&i.length?i:t.parent()}function s(o){o&&3===o.which||(a(".dropdown-backdrop").remove(),a(r).each(function(){var t=a(this),e=l(t),i={relatedTarget:this};e.hasClass("open")&&(o&&"click"==o.type&&/input|textarea/i.test(o.target.tagName)&&a.contains(e[0],o.target)||(e.trigger(o=a.Event("hide.bs.dropdown",i)),o.isDefaultPrevented()||(t.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",i)))))}))}o.VERSION="3.4.1",o.prototype.toggle=function(t){var e=a(this);if(!e.is(".disabled, :disabled")){var i=l(e),o=i.hasClass("open");if(s(),!o){"ontouchstart"in document.documentElement&&!i.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",s);var n={relatedTarget:this};if(i.trigger(t=a.Event("show.bs.dropdown",n)),t.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),i.toggleClass("open").trigger(a.Event("shown.bs.dropdown",n))}return!1}},o.prototype.keydown=function(t){if(/(38|40|27|32)/.test(t.which)&&!/input|textarea/i.test(t.target.tagName)){var e=a(this);if(t.preventDefault(),t.stopPropagation(),!e.is(".disabled, :disabled")){var i=l(e),o=i.hasClass("open");if(!o&&27!=t.which||o&&27==t.which)return 27==t.which&&i.find(r).trigger("focus"),e.trigger("click");var n=i.find(".dropdown-menu li:not(.disabled):visible a");if(n.length){var s=n.index(t.target);38==t.which&&0<s&&s--,40==t.which&&s<n.length-1&&s++,~s||(s=0),n.eq(s).trigger("focus")}}}};var t=a.fn.dropdown;a.fn.dropdown=function e(i){return this.each(function(){var t=a(this),e=t.data("bs.dropdown");e||t.data("bs.dropdown",e=new o(this)),"string"==typeof i&&e[i].call(t)})},a.fn.dropdown.Constructor=o,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=t,this},a(document).on("click.bs.dropdown.data-api",s).on("click.bs.dropdown.data-api",".dropdown form",function(t){t.stopPropagation()}).on("click.bs.dropdown.data-api",r,o.prototype.toggle).on("keydown.bs.dropdown.data-api",r,o.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",o.prototype.keydown)}(jQuery),function(a){"use strict";var s=function(t,e){this.options=e,this.$body=a(document.body),this.$element=a(t),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.fixedContent=".navbar-fixed-top, .navbar-fixed-bottom",this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};function r(o,n){return this.each(function(){var t=a(this),e=t.data("bs.modal"),i=a.extend({},s.DEFAULTS,t.data(),"object"==typeof o&&o);e||t.data("bs.modal",e=new s(this,i)),"string"==typeof o?e[o](n):i.show&&e.show(n)})}s.VERSION="3.4.1",s.TRANSITION_DURATION=300,s.BACKDROP_TRANSITION_DURATION=150,s.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},s.prototype.toggle=function(t){return this.isShown?this.hide():this.show(t)},s.prototype.show=function(i){var o=this,t=a.Event("show.bs.modal",{relatedTarget:i});this.$element.trigger(t),this.isShown||t.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){o.$element.one("mouseup.dismiss.bs.modal",function(t){a(t.target).is(o.$element)&&(o.ignoreBackdropClick=!0)})}),this.backdrop(function(){var t=a.support.transition&&o.$element.hasClass("fade");o.$element.parent().length||o.$element.appendTo(o.$body),o.$element.show().scrollTop(0),o.adjustDialog(),t&&o.$element[0].offsetWidth,o.$element.addClass("in"),o.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:i});t?o.$dialog.one("bsTransitionEnd",function(){o.$element.trigger("focus").trigger(e)}).emulateTransitionEnd(s.TRANSITION_DURATION):o.$element.trigger("focus").trigger(e)}))},s.prototype.hide=function(t){t&&t.preventDefault(),t=a.Event("hide.bs.modal"),this.$element.trigger(t),this.isShown&&!t.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(s.TRANSITION_DURATION):this.hideModal())},s.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(t){document===t.target||this.$element[0]===t.target||this.$element.has(t.target).length||this.$element.trigger("focus")},this))},s.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(t){27==t.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},s.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},s.prototype.hideModal=function(){var t=this;this.$element.hide(),this.backdrop(function(){t.$body.removeClass("modal-open"),t.resetAdjustments(),t.resetScrollbar(),t.$element.trigger("hidden.bs.modal")})},s.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},s.prototype.backdrop=function(t){var e=this,i=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var o=a.support.transition&&i;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+i).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(t){this.ignoreBackdropClick?this.ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide())},this)),o&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!t)return;o?this.$backdrop.one("bsTransitionEnd",t).emulateTransitionEnd(s.BACKDROP_TRANSITION_DURATION):t()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var n=function(){e.removeBackdrop(),t&&t()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",n).emulateTransitionEnd(s.BACKDROP_TRANSITION_DURATION):n()}else t&&t()},s.prototype.handleUpdate=function(){this.adjustDialog()},s.prototype.adjustDialog=function(){var t=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&t?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!t?this.scrollbarWidth:""})},s.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},s.prototype.checkScrollbar=function(){var t=window.innerWidth;if(!t){var e=document.documentElement.getBoundingClientRect();t=e.right-Math.abs(e.left)}this.bodyIsOverflowing=document.body.clientWidth<t,this.scrollbarWidth=this.measureScrollbar()},s.prototype.setScrollbar=function(){var t=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"";var n=this.scrollbarWidth;this.bodyIsOverflowing&&(this.$body.css("padding-right",t+n),a(this.fixedContent).each(function(t,e){var i=e.style.paddingRight,o=a(e).css("padding-right");a(e).data("padding-right",i).css("padding-right",parseFloat(o)+n+"px")}))},s.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad),a(this.fixedContent).each(function(t,e){var i=a(e).data("padding-right");a(e).removeData("padding-right"),e.style.paddingRight=i||""})},s.prototype.measureScrollbar=function(){var t=document.createElement("div");t.className="modal-scrollbar-measure",this.$body.append(t);var e=t.offsetWidth-t.clientWidth;return this.$body[0].removeChild(t),e};var t=a.fn.modal;a.fn.modal=r,a.fn.modal.Constructor=s,a.fn.modal.noConflict=function(){return a.fn.modal=t,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(t){var e=a(this),i=e.attr("href"),o=e.attr("data-target")||i&&i.replace(/.*(?=#[^\s]+$)/,""),n=a(document).find(o),s=n.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(i)&&i},n.data(),e.data());e.is("a")&&t.preventDefault(),n.one("show.bs.modal",function(t){t.isDefaultPrevented()||n.one("hidden.bs.modal",function(){e.is(":visible")&&e.trigger("focus")})}),r.call(n,s,this)})}(jQuery),function(g){"use strict";var o=["sanitize","whiteList","sanitizeFn"],a=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],t={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},r=/^(?:(?:https?|mailto|ftp|tel|file):|[^&:/?#]*(?:[/?#]|$))/gi,l=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[a-z0-9+/]+=*$/i;function u(t,e){var i=t.nodeName.toLowerCase();if(-1!==g.inArray(i,e))return-1===g.inArray(i,a)||Boolean(t.nodeValue.match(r)||t.nodeValue.match(l));for(var o=g(e).filter(function(t,e){return e instanceof RegExp}),n=0,s=o.length;n<s;n++)if(i.match(o[n]))return!0;return!1}function n(t,e,i){if(0===t.length)return t;if(i&&"function"==typeof i)return i(t);if(!document.implementation||!document.implementation.createHTMLDocument)return t;var o=document.implementation.createHTMLDocument("sanitization");o.body.innerHTML=t;for(var n=g.map(e,function(t,e){return e}),s=g(o.body).find("*"),a=0,r=s.length;a<r;a++){var l=s[a],h=l.nodeName.toLowerCase();if(-1!==g.inArray(h,n))for(var d=g.map(l.attributes,function(t){return t}),p=[].concat(e["*"]||[],e[h]||[]),c=0,f=d.length;c<f;c++)u(d[c],p)||l.removeAttribute(d[c].nodeName);else l.parentNode.removeChild(l)}return o.body.innerHTML}var m=function(t,e){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",t,e)};m.VERSION="3.4.1",m.TRANSITION_DURATION=150,m.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0},sanitize:!0,sanitizeFn:null,whiteList:t},m.prototype.init=function(t,e,i){if(this.enabled=!0,this.type=t,this.$element=g(e),this.options=this.getOptions(i),this.$viewport=this.options.viewport&&g(document).find(g.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var o=this.options.trigger.split(" "),n=o.length;n--;){var s=o[n];if("click"==s)this.$element.on("click."+this.type,this.options.selector,g.proxy(this.toggle,this));else if("manual"!=s){var a="hover"==s?"mouseenter":"focusin",r="hover"==s?"mouseleave":"focusout";this.$element.on(a+"."+this.type,this.options.selector,g.proxy(this.enter,this)),this.$element.on(r+"."+this.type,this.options.selector,g.proxy(this.leave,this))}}this.options.selector?this._options=g.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},m.prototype.getDefaults=function(){return m.DEFAULTS},m.prototype.getOptions=function(t){var e=this.$element.data();for(var i in e)e.hasOwnProperty(i)&&-1!==g.inArray(i,o)&&delete e[i];return(t=g.extend({},this.getDefaults(),e,t)).delay&&"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),t.sanitize&&(t.template=n(t.template,t.whiteList,t.sanitizeFn)),t},m.prototype.getDelegateOptions=function(){var i={},o=this.getDefaults();return this._options&&g.each(this._options,function(t,e){o[t]!=e&&(i[t]=e)}),i},m.prototype.enter=function(t){var e=t instanceof this.constructor?t:g(t.currentTarget).data("bs."+this.type);if(e||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e)),t instanceof g.Event&&(e.inState["focusin"==t.type?"focus":"hover"]=!0),e.tip().hasClass("in")||"in"==e.hoverState)e.hoverState="in";else{if(clearTimeout(e.timeout),e.hoverState="in",!e.options.delay||!e.options.delay.show)return e.show();e.timeout=setTimeout(function(){"in"==e.hoverState&&e.show()},e.options.delay.show)}},m.prototype.isInStateTrue=function(){for(var t in this.inState)if(this.inState[t])return!0;return!1},m.prototype.leave=function(t){var e=t instanceof this.constructor?t:g(t.currentTarget).data("bs."+this.type);if(e||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e)),t instanceof g.Event&&(e.inState["focusout"==t.type?"focus":"hover"]=!1),!e.isInStateTrue()){if(clearTimeout(e.timeout),e.hoverState="out",!e.options.delay||!e.options.delay.hide)return e.hide();e.timeout=setTimeout(function(){"out"==e.hoverState&&e.hide()},e.options.delay.hide)}},m.prototype.show=function(){var t=g.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(t);var e=g.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(t.isDefaultPrevented()||!e)return;var i=this,o=this.tip(),n=this.getUID(this.type);this.setContent(),o.attr("id",n),this.$element.attr("aria-describedby",n),this.options.animation&&o.addClass("fade");var s="function"==typeof this.options.placement?this.options.placement.call(this,o[0],this.$element[0]):this.options.placement,a=/\s?auto?\s?/i,r=a.test(s);r&&(s=s.replace(a,"")||"top"),o.detach().css({top:0,left:0,display:"block"}).addClass(s).data("bs."+this.type,this),this.options.container?o.appendTo(g(document).find(this.options.container)):o.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var l=this.getPosition(),h=o[0].offsetWidth,d=o[0].offsetHeight;if(r){var p=s,c=this.getPosition(this.$viewport);s="bottom"==s&&l.bottom+d>c.bottom?"top":"top"==s&&l.top-d<c.top?"bottom":"right"==s&&l.right+h>c.width?"left":"left"==s&&l.left-h<c.left?"right":s,o.removeClass(p).addClass(s)}var f=this.getCalculatedOffset(s,l,h,d);this.applyPlacement(f,s);var u=function(){var t=i.hoverState;i.$element.trigger("shown.bs."+i.type),i.hoverState=null,"out"==t&&i.leave(i)};g.support.transition&&this.$tip.hasClass("fade")?o.one("bsTransitionEnd",u).emulateTransitionEnd(m.TRANSITION_DURATION):u()}},m.prototype.applyPlacement=function(t,e){var i=this.tip(),o=i[0].offsetWidth,n=i[0].offsetHeight,s=parseInt(i.css("margin-top"),10),a=parseInt(i.css("margin-left"),10);isNaN(s)&&(s=0),isNaN(a)&&(a=0),t.top+=s,t.left+=a,g.offset.setOffset(i[0],g.extend({using:function(t){i.css({top:Math.round(t.top),left:Math.round(t.left)})}},t),0),i.addClass("in");var r=i[0].offsetWidth,l=i[0].offsetHeight;"top"==e&&l!=n&&(t.top=t.top+n-l);var h=this.getViewportAdjustedDelta(e,t,r,l);h.left?t.left+=h.left:t.top+=h.top;var d=/top|bottom/.test(e),p=d?2*h.left-o+r:2*h.top-n+l,c=d?"offsetWidth":"offsetHeight";i.offset(t),this.replaceArrow(p,i[0][c],d)},m.prototype.replaceArrow=function(t,e,i){this.arrow().css(i?"left":"top",50*(1-t/e)+"%").css(i?"top":"left","")},m.prototype.setContent=function(){var t=this.tip(),e=this.getTitle();this.options.html?(this.options.sanitize&&(e=n(e,this.options.whiteList,this.options.sanitizeFn)),t.find(".tooltip-inner").html(e)):t.find(".tooltip-inner").text(e),t.removeClass("fade in top bottom left right")},m.prototype.hide=function(t){var e=this,i=g(this.$tip),o=g.Event("hide.bs."+this.type);function n(){"in"!=e.hoverState&&i.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),t&&t()}if(this.$element.trigger(o),!o.isDefaultPrevented())return i.removeClass("in"),g.support.transition&&i.hasClass("fade")?i.one("bsTransitionEnd",n).emulateTransitionEnd(m.TRANSITION_DURATION):n(),this.hoverState=null,this},m.prototype.fixTitle=function(){var t=this.$element;(t.attr("title")||"string"!=typeof t.attr("data-original-title"))&&t.attr("data-original-title",t.attr("title")||"").attr("title","")},m.prototype.hasContent=function(){return this.getTitle()},m.prototype.getPosition=function(t){var e=(t=t||this.$element)[0],i="BODY"==e.tagName,o=e.getBoundingClientRect();null==o.width&&(o=g.extend({},o,{width:o.right-o.left,height:o.bottom-o.top}));var n=window.SVGElement&&e instanceof window.SVGElement,s=i?{top:0,left:0}:n?null:t.offset(),a={scroll:i?document.documentElement.scrollTop||document.body.scrollTop:t.scrollTop()},r=i?{width:g(window).width(),height:g(window).height()}:null;return g.extend({},o,a,r,s)},m.prototype.getCalculatedOffset=function(t,e,i,o){return"bottom"==t?{top:e.top+e.height,left:e.left+e.width/2-i/2}:"top"==t?{top:e.top-o,left:e.left+e.width/2-i/2}:"left"==t?{top:e.top+e.height/2-o/2,left:e.left-i}:{top:e.top+e.height/2-o/2,left:e.left+e.width}},m.prototype.getViewportAdjustedDelta=function(t,e,i,o){var n={top:0,left:0};if(!this.$viewport)return n;var s=this.options.viewport&&this.options.viewport.padding||0,a=this.getPosition(this.$viewport);if(/right|left/.test(t)){var r=e.top-s-a.scroll,l=e.top+s-a.scroll+o;r<a.top?n.top=a.top-r:l>a.top+a.height&&(n.top=a.top+a.height-l)}else{var h=e.left-s,d=e.left+s+i;h<a.left?n.left=a.left-h:d>a.right&&(n.left=a.left+a.width-d)}return n},m.prototype.getTitle=function(){var t=this.$element,e=this.options;return t.attr("data-original-title")||("function"==typeof e.title?e.title.call(t[0]):e.title)},m.prototype.getUID=function(t){for(;t+=~~(1e6*Math.random()),document.getElementById(t););return t},m.prototype.tip=function(){if(!this.$tip&&(this.$tip=g(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},m.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},m.prototype.enable=function(){this.enabled=!0},m.prototype.disable=function(){this.enabled=!1},m.prototype.toggleEnabled=function(){this.enabled=!this.enabled},m.prototype.toggle=function(t){var e=this;t&&((e=g(t.currentTarget).data("bs."+this.type))||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e))),t?(e.inState.click=!e.inState.click,e.isInStateTrue()?e.enter(e):e.leave(e)):e.tip().hasClass("in")?e.leave(e):e.enter(e)},m.prototype.destroy=function(){var t=this;clearTimeout(this.timeout),this.hide(function(){t.$element.off("."+t.type).removeData("bs."+t.type),t.$tip&&t.$tip.detach(),t.$tip=null,t.$arrow=null,t.$viewport=null,t.$element=null})},m.prototype.sanitizeHtml=function(t){return n(t,this.options.whiteList,this.options.sanitizeFn)};var e=g.fn.tooltip;g.fn.tooltip=function i(o){return this.each(function(){var t=g(this),e=t.data("bs.tooltip"),i="object"==typeof o&&o;!e&&/destroy|hide/.test(o)||(e||t.data("bs.tooltip",e=new m(this,i)),"string"==typeof o&&e[o]())})},g.fn.tooltip.Constructor=m,g.fn.tooltip.noConflict=function(){return g.fn.tooltip=e,this}}(jQuery),function(n){"use strict";var s=function(t,e){this.init("popover",t,e)};if(!n.fn.tooltip)throw new Error("Popover requires tooltip.js");s.VERSION="3.4.1",s.DEFAULTS=n.extend({},n.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),((s.prototype=n.extend({},n.fn.tooltip.Constructor.prototype)).constructor=s).prototype.getDefaults=function(){return s.DEFAULTS},s.prototype.setContent=function(){var t=this.tip(),e=this.getTitle(),i=this.getContent();if(this.options.html){var o=typeof i;this.options.sanitize&&(e=this.sanitizeHtml(e),"string"===o&&(i=this.sanitizeHtml(i))),t.find(".popover-title").html(e),t.find(".popover-content").children().detach().end()["string"===o?"html":"append"](i)}else t.find(".popover-title").text(e),t.find(".popover-content").children().detach().end().text(i);t.removeClass("fade top bottom left right in"),t.find(".popover-title").html()||t.find(".popover-title").hide()},s.prototype.hasContent=function(){return this.getTitle()||this.getContent()},s.prototype.getContent=function(){var t=this.$element,e=this.options;return t.attr("data-content")||("function"==typeof e.content?e.content.call(t[0]):e.content)},s.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var t=n.fn.popover;n.fn.popover=function e(o){return this.each(function(){var t=n(this),e=t.data("bs.popover"),i="object"==typeof o&&o;!e&&/destroy|hide/.test(o)||(e||t.data("bs.popover",e=new s(this,i)),"string"==typeof o&&e[o]())})},n.fn.popover.Constructor=s,n.fn.popover.noConflict=function(){return n.fn.popover=t,this}}(jQuery),function(s){"use strict";function n(t,e){this.$body=s(document.body),this.$scrollElement=s(t).is(document.body)?s(window):s(t),this.options=s.extend({},n.DEFAULTS,e),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",s.proxy(this.process,this)),this.refresh(),this.process()}function e(o){return this.each(function(){var t=s(this),e=t.data("bs.scrollspy"),i="object"==typeof o&&o;e||t.data("bs.scrollspy",e=new n(this,i)),"string"==typeof o&&e[o]()})}n.VERSION="3.4.1",n.DEFAULTS={offset:10},n.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},n.prototype.refresh=function(){var t=this,o="offset",n=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),s.isWindow(this.$scrollElement[0])||(o="position",n=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var t=s(this),e=t.data("target")||t.attr("href"),i=/^#./.test(e)&&s(e);return i&&i.length&&i.is(":visible")&&[[i[o]().top+n,e]]||null}).sort(function(t,e){return t[0]-e[0]}).each(function(){t.offsets.push(this[0]),t.targets.push(this[1])})},n.prototype.process=function(){var t,e=this.$scrollElement.scrollTop()+this.options.offset,i=this.getScrollHeight(),o=this.options.offset+i-this.$scrollElement.height(),n=this.offsets,s=this.targets,a=this.activeTarget;if(this.scrollHeight!=i&&this.refresh(),o<=e)return a!=(t=s[s.length-1])&&this.activate(t);if(a&&e<n[0])return this.activeTarget=null,this.clear();for(t=n.length;t--;)a!=s[t]&&e>=n[t]&&(n[t+1]===undefined||e<n[t+1])&&this.activate(s[t])},n.prototype.activate=function(t){this.activeTarget=t,this.clear();var e=this.selector+'[data-target="'+t+'"],'+this.selector+'[href="'+t+'"]',i=s(e).parents("li").addClass("active");i.parent(".dropdown-menu").length&&(i=i.closest("li.dropdown").addClass("active")),i.trigger("activate.bs.scrollspy")},n.prototype.clear=function(){s(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var t=s.fn.scrollspy;s.fn.scrollspy=e,s.fn.scrollspy.Constructor=n,s.fn.scrollspy.noConflict=function(){return s.fn.scrollspy=t,this},s(window).on("load.bs.scrollspy.data-api",function(){s('[data-spy="scroll"]').each(function(){var t=s(this);e.call(t,t.data())})})}(jQuery),function(r){"use strict";var a=function(t){this.element=r(t)};function e(i){return this.each(function(){var t=r(this),e=t.data("bs.tab");e||t.data("bs.tab",e=new a(this)),"string"==typeof i&&e[i]()})}a.VERSION="3.4.1",a.TRANSITION_DURATION=150,a.prototype.show=function(){var t=this.element,e=t.closest("ul:not(.dropdown-menu)"),i=t.data("target");if(i||(i=(i=t.attr("href"))&&i.replace(/.*(?=#[^\s]*$)/,"")),!t.parent("li").hasClass("active")){var o=e.find(".active:last a"),n=r.Event("hide.bs.tab",{relatedTarget:t[0]}),s=r.Event("show.bs.tab",{relatedTarget:o[0]});if(o.trigger(n),t.trigger(s),!s.isDefaultPrevented()&&!n.isDefaultPrevented()){var a=r(document).find(i);this.activate(t.closest("li"),e),this.activate(a,a.parent(),function(){o.trigger({type:"hidden.bs.tab",relatedTarget:t[0]}),t.trigger({type:"shown.bs.tab",relatedTarget:o[0]})})}}},a.prototype.activate=function(t,e,i){var o=e.find("> .active"),n=i&&r.support.transition&&(o.length&&o.hasClass("fade")||!!e.find("> .fade").length);function s(){o.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),t.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),n?(t[0].offsetWidth,t.addClass("in")):t.removeClass("fade"),t.parent(".dropdown-menu").length&&t.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),i&&i()}o.length&&n?o.one("bsTransitionEnd",s).emulateTransitionEnd(a.TRANSITION_DURATION):s(),o.removeClass("in")};var t=r.fn.tab;r.fn.tab=e,r.fn.tab.Constructor=a,r.fn.tab.noConflict=function(){return r.fn.tab=t,this};var i=function(t){t.preventDefault(),e.call(r(this),"show")};r(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',i).on("click.bs.tab.data-api",'[data-toggle="pill"]',i)}(jQuery),function(l){"use strict";var h=function(t,e){this.options=l.extend({},h.DEFAULTS,e);var i=this.options.target===h.DEFAULTS.target?l(this.options.target):l(document).find(this.options.target);this.$target=i.on("scroll.bs.affix.data-api",l.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",l.proxy(this.checkPositionWithEventLoop,this)),this.$element=l(t),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};function i(o){return this.each(function(){var t=l(this),e=t.data("bs.affix"),i="object"==typeof o&&o;e||t.data("bs.affix",e=new h(this,i)),"string"==typeof o&&e[o]()})}h.VERSION="3.4.1",h.RESET="affix affix-top affix-bottom",h.DEFAULTS={offset:0,target:window},h.prototype.getState=function(t,e,i,o){var n=this.$target.scrollTop(),s=this.$element.offset(),a=this.$target.height();if(null!=i&&"top"==this.affixed)return n<i&&"top";if("bottom"==this.affixed)return null!=i?!(n+this.unpin<=s.top)&&"bottom":!(n+a<=t-o)&&"bottom";var r=null==this.affixed,l=r?n:s.top;return null!=i&&n<=i?"top":null!=o&&t-o<=l+(r?a:e)&&"bottom"},h.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(h.RESET).addClass("affix");var t=this.$target.scrollTop(),e=this.$element.offset();return this.pinnedOffset=e.top-t},h.prototype.checkPositionWithEventLoop=function(){setTimeout(l.proxy(this.checkPosition,this),1)},h.prototype.checkPosition=function(){if(this.$element.is(":visible")){var t=this.$element.height(),e=this.options.offset,i=e.top,o=e.bottom,n=Math.max(l(document).height(),l(document.body).height());"object"!=typeof e&&(o=i=e),"function"==typeof i&&(i=e.top(this.$element)),"function"==typeof o&&(o=e.bottom(this.$element));var s=this.getState(n,t,i,o);if(this.affixed!=s){null!=this.unpin&&this.$element.css("top","");var a="affix"+(s?"-"+s:""),r=l.Event(a+".bs.affix");if(this.$element.trigger(r),r.isDefaultPrevented())return;this.affixed=s,this.unpin="bottom"==s?this.getPinnedOffset():null,this.$element.removeClass(h.RESET).addClass(a).trigger(a.replace("affix","affixed")+".bs.affix")}"bottom"==s&&this.$element.offset({top:n-t-o})}};var t=l.fn.affix;l.fn.affix=i,l.fn.affix.Constructor=h,l.fn.affix.noConflict=function(){return l.fn.affix=t,this},l(window).on("load",function(){l('[data-spy="affix"]').each(function(){var t=l(this),e=t.data();e.offset=e.offset||{},null!=e.offsetBottom&&(e.offset.bottom=e.offsetBottom),null!=e.offsetTop&&(e.offset.top=e.offsetTop),i.call(t,e)})})}(jQuery);
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js b/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js
deleted file mode 100644
index a1c07fd..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */
-!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}k.fn=k.prototype={jquery:f,constructor:k,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=k.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return k.each(this,e)},map:function(n){return this.pushStack(k.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},k.extend=k.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(k.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||k.isPlainObject(n)?n:{},i=!1,a[t]=k.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},k.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){b(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(d(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(p,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?k.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g.apply([],a)},guid:1,support:y}),"function"==typeof Symbol&&(k.fn[Symbol.iterator]=t[Symbol.iterator]),k.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var h=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,k="sizzle"+1*new Date,m=n.document,S=0,r=0,p=ue(),x=ue(),N=ue(),A=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",$=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",F=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="<a id='"+k+"'></a><select id='"+k+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!==C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(F," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[S,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[S,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[k]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace(B,"$1"));return s[k]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[S,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[k]||(e[k]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===S&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[k]&&(v=Ce(v)),y&&!y[k]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[k]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace(B,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(B," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=N[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[k]?i.push(a):o.push(a);(a=N(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=S+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t===C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument===C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(S=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(S=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=k.split("").sort(D).join("")===k,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);k.find=h,k.expr=h.selectors,k.expr[":"]=k.expr.pseudos,k.uniqueSort=k.unique=h.uniqueSort,k.text=h.getText,k.isXMLDoc=h.isXML,k.contains=h.contains,k.escapeSelector=h.escape;var T=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&k(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},N=k.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var D=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1<i.call(n,e)!==r}):k.filter(n,e,r)}k.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?k.find.matchesSelector(r,e)?[r]:[]:k.find.matches(e,k.grep(t,function(e){return 1===e.nodeType}))},k.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(k(e).filter(function(){for(t=0;t<r;t++)if(k.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)k.find(e,i[t],n);return 1<r?k.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&N.test(e)?k(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(k.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&k(e);if(!N.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&k.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?k.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(k(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(k.uniqueSort(k.merge(this.get(),k(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),k.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return T(e,"parentNode")},parentsUntil:function(e,t,n){return T(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return T(e,"nextSibling")},prevAll:function(e){return T(e,"previousSibling")},nextUntil:function(e,t,n){return T(e,"nextSibling",n)},prevUntil:function(e,t,n){return T(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return"undefined"!=typeof e.contentDocument?e.contentDocument:(A(e,"template")&&(e=e.content||e),k.merge([],e.childNodes))}},function(r,i){k.fn[r]=function(e,t){var n=k.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=k.filter(t,n)),1<this.length&&(O[r]||k.uniqueSort(n),H.test(r)&&n.reverse()),this.pushStack(n)}});var R=/[^\x20\t\r\n\f]+/g;function M(e){return e}function I(e){throw e}function W(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}k.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},k.each(e.match(R)||[],function(e,t){n[t]=!0}),n):k.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){k.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return k.each(arguments,function(e,t){var n;while(-1<(n=k.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<k.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},k.extend({Deferred:function(e){var o=[["notify","progress",k.Callbacks("memory"),k.Callbacks("memory"),2],["resolve","done",k.Callbacks("once memory"),k.Callbacks("once memory"),0,"resolved"],["reject","fail",k.Callbacks("once memory"),k.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return k.Deferred(function(r){k.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,M,s),l(u,o,I,s)):(u++,t.call(e,l(u,o,M,s),l(u,o,I,s),l(u,o,M,o.notifyWith))):(a!==M&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){k.Deferred.exceptionHook&&k.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==I&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(k.Deferred.getStackHook&&(t.stackTrace=k.Deferred.getStackHook()),C.setTimeout(t))}}return k.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:M,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:M)),o[2][3].add(l(0,e,m(n)?n:I))}).promise()},promise:function(e){return null!=e?k.extend(e,a):a}},s={};return k.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=k.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(W(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)W(i[t],a(t),o.reject);return o.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;k.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&$.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},k.readyException=function(e){C.setTimeout(function(){throw e})};var F=k.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),k.ready()}k.fn.ready=function(e){return F.then(e)["catch"](function(e){k.readyException(e)}),this},k.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--k.readyWait:k.isReady)||(k.isReady=!0)!==e&&0<--k.readyWait||F.resolveWith(E,[k])}}),k.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(k.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var _=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)_(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(k(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,U=/-([a-z])/g;function X(e,t){return t.toUpperCase()}function V(e){return e.replace(z,"ms-").replace(U,X)}var G=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Y(){this.expando=k.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},G(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[V(t)]=n;else for(r in t)i[V(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][V(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(V):(t=V(t))in r?[t]:t.match(R)||[]).length;while(n--)delete r[t[n]]}(void 0===t||k.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!k.isEmptyObject(t)}};var Q=new Y,J=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function ee(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:K.test(i)?JSON.parse(i):i)}catch(e){}J.set(e,t,n)}else n=void 0;return n}k.extend({hasData:function(e){return J.hasData(e)||Q.hasData(e)},data:function(e,t,n){return J.access(e,t,n)},removeData:function(e,t){J.remove(e,t)},_data:function(e,t,n){return Q.access(e,t,n)},_removeData:function(e,t){Q.remove(e,t)}}),k.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=J.get(o),1===o.nodeType&&!Q.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=V(r.slice(5)),ee(o,r,i[r]));Q.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){J.set(this,n)}):_(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=J.get(o,n))?t:void 0!==(t=ee(o,n))?t:void 0;this.each(function(){J.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){J.remove(this,e)})}}),k.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Q.get(e,t),n&&(!r||Array.isArray(n)?r=Q.access(e,t,k.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=k.queue(e,t),r=n.length,i=n.shift(),o=k._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){k.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Q.get(e,n)||Q.access(e,n,{empty:k.Callbacks("once memory").add(function(){Q.remove(e,[t+"queue",n])})})}}),k.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?k.queue(this[0],t):void 0===n?this:this.each(function(){var e=k.queue(this,t,n);k._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&k.dequeue(this,t)})},dequeue:function(e){return this.each(function(){k.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=k.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Q.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var te=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ne=new RegExp("^(?:([+-])=|)("+te+")([a-z%]*)$","i"),re=["Top","Right","Bottom","Left"],ie=E.documentElement,oe=function(e){return k.contains(e.ownerDocument,e)},ae={composed:!0};ie.getRootNode&&(oe=function(e){return k.contains(e.ownerDocument,e)||e.getRootNode(ae)===e.ownerDocument});var se=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&oe(e)&&"none"===k.css(e,"display")},ue=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];for(o in i=n.apply(e,r||[]),t)e.style[o]=a[o];return i};function le(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return k.css(e,t,"")},u=s(),l=n&&n[3]||(k.cssNumber[t]?"":"px"),c=e.nodeType&&(k.cssNumber[t]||"px"!==l&&+u)&&ne.exec(k.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)k.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,k.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ce={};function fe(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Q.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&se(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ce[s])||(o=a.body.appendChild(a.createElement(s)),u=k.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ce[s]=u)))):"none"!==n&&(l[c]="none",Q.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}k.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){se(this)?k(this).show():k(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Q.set(e[n],"globalEval",!t||Q.get(t[n],"globalEval"))}ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;var me,xe,be=/<|&#?\w+;/;function we(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))k.merge(p,o.nodeType?[o]:o);else if(be.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+k.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;k.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<k.inArray(o,r))i&&i.push(o);else if(l=oe(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}me=E.createDocumentFragment().appendChild(E.createElement("div")),(xe=E.createElement("input")).setAttribute("type","radio"),xe.setAttribute("checked","checked"),xe.setAttribute("name","t"),me.appendChild(xe),y.checkClone=me.cloneNode(!0).cloneNode(!0).lastChild.checked,me.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t<arguments.length;t++)u[t]=arguments[t];if(s.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,s)){a=k.event.handlers.call(this,s,l),t=0;while((i=a[t++])&&!s.isPropagationStopped()){s.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!s.isImmediatePropagationStopped())s.rnamespace&&!1!==o.namespace&&!s.rnamespace.test(o.namespace)||(s.handleObj=o,s.data=o.data,void 0!==(r=((k.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,u))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<k(i,this).index(l):k.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(k.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[k.expando]?e:new k.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click",ke),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Q.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},k.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},k.Event=function(e,t){if(!(this instanceof k.Event))return new k.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?ke:Se,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&k.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[k.expando]=!0},k.Event.prototype={constructor:k.Event,isDefaultPrevented:Se,isPropagationStopped:Se,isImmediatePropagationStopped:Se,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=ke,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=ke,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=ke,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},k.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Te.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Ce.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},k.event.addProp),k.each({focus:"focusin",blur:"focusout"},function(e,t){k.event.special[e]={setup:function(){return De(this,e,Ne),!1},trigger:function(){return De(this,e),!0},delegateType:t}}),k.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){k.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||k.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),k.fn.extend({on:function(e,t,n,r){return Ae(this,e,t,n,r)},one:function(e,t,n,r){return Ae(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,k(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Se),this.each(function(){k.event.remove(this,e,n,t)})}});var je=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/<script|<style|<link/i,Le=/checked\s*(?:[^=]|=\s*.checked.)/i,He=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n<r;n++)k.event.add(t,i,l[i][n]);J.hasData(e)&&(s=J.access(e),u=k.extend({},s),J.set(t,u))}}function Ie(n,r,i,o){r=g.apply([],r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&Le.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Ie(t,r,i,o)});if(f&&(t=(e=we(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=k.map(ve(e,"script"),Pe)).length;c<f;c++)u=e,c!==p&&(u=k.clone(u,!0,!0),s&&k.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,k.map(a,Re),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Q.access(u,"globalEval")&&k.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?k._evalUrl&&!u.noModule&&k._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")}):b(u.textContent.replace(He,""),u,l))}return n}function We(e,t,n){for(var r,i=t?k.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||k.cleanData(ve(r)),r.parentNode&&(n&&oe(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}k.extend({htmlPrefilter:function(e){return e.replace(je,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Me(o[r],a[r]);else Me(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=k.event.special,o=0;void 0!==(n=e[o]);o++)if(G(n)){if(t=n[Q.expando]){if(t.events)for(r in t.events)i[r]?k.event.remove(n,r):k.removeEvent(n,r,t.handle);n[Q.expando]=void 0}n[J.expando]&&(n[J.expando]=void 0)}}}),k.fn.extend({detach:function(e){return We(this,e,!0)},remove:function(e){return We(this,e)},text:function(e){return _(this,function(e){return void 0===e?k.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Ie(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)})},prepend:function(){return Ie(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(k.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return k.clone(this,e,t)})},html:function(e){return _(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!qe.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=k.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(k.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Ie(this,arguments,function(e){var t=this.parentNode;k.inArray(this,n)<0&&(k.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),k.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){k.fn[e]=function(e){for(var t,n=[],r=k(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),k(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var $e=new RegExp("^("+te+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},Be=new RegExp(re.join("|"),"i");function _e(e,t,n){var r,i,o,a,s=e.style;return(n=n||Fe(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||oe(e)||(a=k.style(e,t)),!y.pixelBoxStyles()&&$e.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){s.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",ie.appendChild(s).appendChild(u);var e=C.getComputedStyle(u);n="1%"!==e.top,a=12===t(e.marginLeft),u.style.right="60%",o=36===t(e.right),r=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),ie.removeChild(s),u=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s=E.createElement("div"),u=E.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===u.style.backgroundClip,k.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),a},scrollboxSize:function(){return e(),i}}))}();var Ue=["Webkit","Moz","ms"],Xe=E.createElement("div").style,Ve={};function Ge(e){var t=k.cssProps[e]||Ve[e];return t||(e in Xe?e:Ve[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=Ue.length;while(n--)if((e=Ue[n]+t)in Xe)return e}(e)||e)}var Ye=/^(none|table(?!-c[ea]).+)/,Qe=/^--/,Je={position:"absolute",visibility:"hidden",display:"block"},Ke={letterSpacing:"0",fontWeight:"400"};function Ze(e,t,n){var r=ne.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function et(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=k.css(e,n+re[a],!0,i)),r?("content"===n&&(u-=k.css(e,"padding"+re[a],!0,i)),"margin"!==n&&(u-=k.css(e,"border"+re[a]+"Width",!0,i))):(u+=k.css(e,"padding"+re[a],!0,i),"padding"!==n?u+=k.css(e,"border"+re[a]+"Width",!0,i):s+=k.css(e,"border"+re[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function tt(e,t,n){var r=Fe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===k.css(e,"boxSizing",!1,r),o=i,a=_e(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if($e.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||"auto"===a||!parseFloat(a)&&"inline"===k.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===k.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+et(e,t,n||(i?"border":"content"),o,r,a)+"px"}function nt(e,t,n,r,i){return new nt.prototype.init(e,t,n,r,i)}k.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=_e(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=V(t),u=Qe.test(t),l=e.style;if(u||(t=Ge(s)),a=k.cssHooks[t]||k.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=ne.exec(n))&&i[1]&&(n=le(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(k.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=V(t);return Qe.test(t)||(t=Ge(s)),(a=k.cssHooks[t]||k.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=_e(e,t,r)),"normal"===i&&t in Ke&&(i=Ke[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),k.each(["height","width"],function(e,u){k.cssHooks[u]={get:function(e,t,n){if(t)return!Ye.test(k.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?tt(e,u,n):ue(e,Je,function(){return tt(e,u,n)})},set:function(e,t,n){var r,i=Fe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===k.css(e,"boxSizing",!1,i),s=n?et(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-et(e,u,"border",!1,i)-.5)),s&&(r=ne.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=k.css(e,u)),Ze(0,t,s)}}}),k.cssHooks.marginLeft=ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(_e(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),k.each({margin:"",padding:"",border:"Width"},function(i,o){k.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+re[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(k.cssHooks[i+o].set=Ze)}),k.fn.extend({css:function(e,t){return _(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Fe(e),i=t.length;a<i;a++)o[t[a]]=k.css(e,t[a],!1,r);return o}return void 0!==n?k.style(e,t,n):k.css(e,t)},e,t,1<arguments.length)}}),((k.Tween=nt).prototype={constructor:nt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||k.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(k.cssNumber[n]?"":"px")},cur:function(){var e=nt.propHooks[this.prop];return e&&e.get?e.get(this):nt.propHooks._default.get(this)},run:function(e){var t,n=nt.propHooks[this.prop];return this.options.duration?this.pos=t=k.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):nt.propHooks._default.set(this),this}}).init.prototype=nt.prototype,(nt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=k.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){k.fx.step[e.prop]?k.fx.step[e.prop](e):1!==e.elem.nodeType||!k.cssHooks[e.prop]&&null==e.elem.style[Ge(e.prop)]?e.elem[e.prop]=e.now:k.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=nt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},k.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},k.fx=nt.prototype.init,k.fx.step={};var rt,it,ot,at,st=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function lt(){it&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(lt):C.setTimeout(lt,k.fx.interval),k.fx.tick())}function ct(){return C.setTimeout(function(){rt=void 0}),rt=Date.now()}function ft(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=re[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function pt(e,t,n){for(var r,i=(dt.tweeners[t]||[]).concat(dt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function dt(o,e,t){var n,a,r=0,i=dt.prefilters.length,s=k.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=rt||ct(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:k.extend({},e),opts:k.extend(!0,{specialEasing:{},easing:k.easing._default},t),originalProperties:e,originalOptions:t,startTime:rt||ct(),duration:t.duration,tweens:[],createTween:function(e,t){var n=k.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=V(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=k.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=dt.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(k._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return k.map(c,pt,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),k.fx.timer(k.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}k.Animation=k.extend(dt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return le(n.elem,e,ne.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(R);for(var n,r=0,i=e.length;r<i;r++)n=e[r],dt.tweeners[n]=dt.tweeners[n]||[],dt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&se(e),v=Q.get(e,"fxshow");for(r in n.queue||(null==(a=k._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,k.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],st.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||k.style(e,r)}if((u=!k.isEmptyObject(t))||!k.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Q.get(e,"display")),"none"===(c=k.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=k.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===k.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Q.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&fe([e],!0),p.done(function(){for(r in g||fe([e]),Q.remove(e,"fxshow"),d)k.style(e,r,d[r])})),u=pt(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?dt.prefilters.unshift(e):dt.prefilters.push(e)}}),k.speed=function(e,t,n){var r=e&&"object"==typeof e?k.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return k.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in k.fx.speeds?r.duration=k.fx.speeds[r.duration]:r.duration=k.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&k.dequeue(this,r.queue)},r},k.fn.extend({fadeTo:function(e,t,n,r){return this.filter(se).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=k.isEmptyObject(t),o=k.speed(e,n,r),a=function(){var e=dt(this,k.extend({},t),o);(i||Q.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&!1!==i&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=k.timers,r=Q.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&ut.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||k.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Q.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=k.timers,o=n?n.length:0;for(t.finish=!0,k.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),k.each(["toggle","show","hide"],function(e,r){var i=k.fn[r];k.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(ft(r,!0),e,t,n)}}),k.each({slideDown:ft("show"),slideUp:ft("hide"),slideToggle:ft("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){k.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),k.timers=[],k.fx.tick=function(){var e,t=0,n=k.timers;for(rt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||k.fx.stop(),rt=void 0},k.fx.timer=function(e){k.timers.push(e),k.fx.start()},k.fx.interval=13,k.fx.start=function(){it||(it=!0,lt())},k.fx.stop=function(){it=null},k.fx.speeds={slow:600,fast:200,_default:400},k.fn.delay=function(r,e){return r=k.fx&&k.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},ot=E.createElement("input"),at=E.createElement("select").appendChild(E.createElement("option")),ot.type="checkbox",y.checkOn=""!==ot.value,y.optSelected=at.selected,(ot=E.createElement("input")).value="t",ot.type="radio",y.radioValue="t"===ot.value;var ht,gt=k.expr.attrHandle;k.fn.extend({attr:function(e,t){return _(this,k.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){k.removeAttr(this,e)})}}),k.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?k.prop(e,t,n):(1===o&&k.isXMLDoc(e)||(i=k.attrHooks[t.toLowerCase()]||(k.expr.match.bool.test(t)?ht:void 0)),void 0!==n?null===n?void k.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=k.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(R);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),ht={set:function(e,t,n){return!1===t?k.removeAttr(e,n):e.setAttribute(n,n),n}},k.each(k.expr.match.bool.source.match(/\w+/g),function(e,t){var a=gt[t]||k.find.attr;gt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=gt[o],gt[o]=r,r=null!=a(e,t,n)?o:null,gt[o]=i),r}});var vt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;function mt(e){return(e.match(R)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function bt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(R)||[]}k.fn.extend({prop:function(e,t){return _(this,k.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[k.propFix[e]||e]})}}),k.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&k.isXMLDoc(e)||(t=k.propFix[t]||t,i=k.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=k.find.attr(e,"tabindex");return t?parseInt(t,10):vt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(k.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),k.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){k.propFix[this.toLowerCase()]=this}),k.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).addClass(t.call(this,e,xt(this)))});if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).removeClass(t.call(this,e,xt(this)))});if(!arguments.length)return this.attr("class","");if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){k(this).toggleClass(i.call(this,e,xt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=k(this),r=bt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=xt(this))&&Q.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Q.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+mt(xt(n))+" ").indexOf(t))return!0;return!1}});var wt=/\r/g;k.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,k(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=k.map(t,function(e){return null==e?"":e+""})),(r=k.valHooks[this.type]||k.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=k.valHooks[t.type]||k.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(wt,""):null==e?"":e:void 0}}),k.extend({valHooks:{option:{get:function(e){var t=k.find.attr(e,"value");return null!=t?t:mt(k.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=k(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=k.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<k.inArray(k.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),k.each(["radio","checkbox"],function(){k.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<k.inArray(k(e).val(),t)}},y.checkOn||(k.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var Tt=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};k.extend(k.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Tt.test(d+k.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[k.expando]?e:new k.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:k.makeArray(t,[e]),c=k.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,Tt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Q.get(o,"events")||{})[e.type]&&Q.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&G(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!G(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),k.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,Ct),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,Ct),k.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=k.extend(new k.Event,n,{type:e,isSimulated:!0});k.event.trigger(r,null,t)}}),k.fn.extend({trigger:function(e,t){return this.each(function(){k.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return k.event.trigger(e,t,n,!0)}}),y.focusin||k.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){k.event.simulate(r,e.target,k.event.fix(e))};k.event.special[r]={setup:function(){var e=this.ownerDocument||this,t=Q.access(e,r);t||e.addEventListener(n,i,!0),Q.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this,t=Q.access(e,r)-1;t?Q.access(e,r,t):(e.removeEventListener(n,i,!0),Q.remove(e,r))}}});var Et=C.location,kt=Date.now(),St=/\?/;k.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||k.error("Invalid XML: "+e),t};var Nt=/\[\]$/,At=/\r?\n/g,Dt=/^(?:submit|button|image|reset|file)$/i,jt=/^(?:input|select|textarea|keygen)/i;function qt(n,e,r,i){var t;if(Array.isArray(e))k.each(e,function(e,t){r||Nt.test(n)?i(n,t):qt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)qt(n+"["+t+"]",e[t],r,i)}k.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!k.isPlainObject(e))k.each(e,function(){i(this.name,this.value)});else for(n in e)qt(n,e[n],t,i);return r.join("&")},k.fn.extend({serialize:function(){return k.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=k.prop(this,"elements");return e?k.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!k(this).is(":disabled")&&jt.test(this.nodeName)&&!Dt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=k(this).val();return null==n?null:Array.isArray(n)?k.map(n,function(e){return{name:t.name,value:e.replace(At,"\r\n")}}):{name:t.name,value:n.replace(At,"\r\n")}}).get()}});var Lt=/%20/g,Ht=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Rt=/^(?:GET|HEAD)$/,Mt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Ft=E.createElement("a");function Bt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(R)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function _t(t,i,o,a){var s={},u=t===Wt;function l(e){var r;return s[e]=!0,k.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function zt(e,t){var n,r,i=k.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&k.extend(!0,e,r),e}Ft.href=Et.href,k.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Et.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Et.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":k.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,k.ajaxSettings),t):zt(k.ajaxSettings,e)},ajaxPrefilter:Bt(It),ajaxTransport:Bt(Wt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=k.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?k(y):k.event,x=k.Deferred(),b=k.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Pt.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Et.href)+"").replace(Mt,Et.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(R)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Ft.protocol+"//"+Ft.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=k.param(v.data,v.traditional)),_t(It,v,t,T),h)return T;for(i in(g=k.event&&v.global)&&0==k.active++&&k.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Rt.test(v.type),f=v.url.replace(Ht,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(Lt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(St.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Ot,"$1"),o=(St.test(f)?"&":"?")+"_="+kt+++o),v.url=f+o),v.ifModified&&(k.lastModified[f]&&T.setRequestHeader("If-Modified-Since",k.lastModified[f]),k.etag[f]&&T.setRequestHeader("If-None-Match",k.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+$t+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=_t(Wt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(k.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(k.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--k.active||k.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return k.get(e,t,n,"json")},getScript:function(e,t){return k.get(e,void 0,t,"script")}}),k.each(["get","post"],function(e,i){k[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),k.ajax(k.extend({url:e,type:i,dataType:r,data:t,success:n},k.isPlainObject(e)&&e))}}),k._evalUrl=function(e,t){return k.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){k.globalEval(e,t)}})},k.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=k(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){k(this).wrapInner(n.call(this,e))}):this.each(function(){var e=k(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){k(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){k(this).replaceWith(this.childNodes)}),this}}),k.expr.pseudos.hidden=function(e){return!k.expr.pseudos.visible(e)},k.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},k.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var Ut={0:200,1223:204},Xt=k.ajaxSettings.xhr();y.cors=!!Xt&&"withCredentials"in Xt,y.ajax=Xt=!!Xt,k.ajaxTransport(function(i){var o,a;if(y.cors||Xt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Ut[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),k.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),k.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return k.globalEval(e),e}}}),k.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),k.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=k("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=mt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&k.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?k("<div>").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}}),k.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),k.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),k.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||k.guid++,i},k.holdReady=function(e){e?k.readyWait++:k.ready(!0)},k.isArray=Array.isArray,k.parseJSON=JSON.parse,k.nodeName=A,k.isFunction=m,k.isWindow=x,k.camelCase=V,k.type=w,k.now=Date.now,k.isNumeric=function(e){var t=k.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return k});var Qt=C.jQuery,Jt=C.$;return k.noConflict=function(e){return C.$===k&&(C.$=Jt),e&&C.jQuery===k&&(C.jQuery=Qt),k},e||(C.jQuery=C.$=k),k});
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js b/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js
deleted file mode 100644
index 3f96f00..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-$(
-  function(){
-    $("table").addClass("table table-condensed table-bordered table-striped");
-  }
-);
diff --git a/hadoop-hdds/docs/themes/ozonedoc/theme.toml b/hadoop-hdds/docs/themes/ozonedoc/theme.toml
deleted file mode 100644
index 9f427fe..0000000
--- a/hadoop-hdds/docs/themes/ozonedoc/theme.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-
-name = "Ozonedoc"
diff --git a/hadoop-hdds/framework/README.md b/hadoop-hdds/framework/README.md
deleted file mode 100644
index 0eda3f5..0000000
--- a/hadoop-hdds/framework/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-# Server framework for HDDS/Ozone
-
-This project contains generic utilities and resources for all the HDDS/Ozone
-server-side components.
-
-The project is shared between the server/service projects but not with the
-client packages.
\ No newline at end of file
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
deleted file mode 100644
index 1792689..0000000
--- a/hadoop-hdds/framework/pom.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-server-framework</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Server Framework</description>
-  <name>Apache Hadoop HDDS Server Framework</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java
deleted file mode 100644
index 990d89d..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.HddsConfServlet;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.net.NetUtils;
-
-import org.eclipse.jetty.webapp.WebAppContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.http.HttpServlet;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Optional;
-
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
-
-/**
- * Base class for HTTP server of the Ozone related components.
- */
-public abstract class BaseHttpServer {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BaseHttpServer.class);
-  protected static final String PROMETHEUS_SINK = "PROMETHEUS_SINK";
-
-  private HttpServer2 httpServer;
-  private final Configuration conf;
-
-  private InetSocketAddress httpAddress;
-  private InetSocketAddress httpsAddress;
-
-  private HttpConfig.Policy policy;
-
-  private String name;
-  private PrometheusMetricsSink prometheusMetricsSink;
-
-  private boolean prometheusSupport;
-
-  private boolean profilerSupport;
-
-  public BaseHttpServer(Configuration conf, String name) throws IOException {
-    this.name = name;
-    this.conf = conf;
-    policy = DFSUtil.getHttpPolicy(conf);
-    if (isEnabled()) {
-      this.httpAddress = getHttpBindAddress();
-      this.httpsAddress = getHttpsBindAddress();
-      HttpServer2.Builder builder = null;
-
-      // Avoid registering o.a.h.http.PrometheusServlet in HttpServer2.
-      // TODO: Replace "hadoop.prometheus.endpoint.enabled" with
-      // CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED when possible.
-      conf.setBoolean("hadoop.prometheus.endpoint.enabled", false);
-
-      builder = DFSUtil.httpServerTemplateForNNAndJN(conf, this.httpAddress,
-          this.httpsAddress, name, getSpnegoPrincipal(), getKeytabFile());
-
-      final boolean xFrameEnabled = conf.getBoolean(
-          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
-          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
-
-      final String xFrameOptionValue = conf.getTrimmed(
-          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
-          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
-
-      builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
-
-      httpServer = builder.build();
-      httpServer.addServlet("conf", "/conf", HddsConfServlet.class);
-
-      httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class);
-      prometheusSupport =
-          conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true);
-
-      profilerSupport =
-          conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false);
-
-      if (prometheusSupport) {
-        prometheusMetricsSink = new PrometheusMetricsSink();
-        httpServer.getWebAppContext().getServletContext()
-            .setAttribute(PROMETHEUS_SINK, prometheusMetricsSink);
-        httpServer.addServlet("prometheus", "/prom", PrometheusServlet.class);
-      }
-
-      if (profilerSupport) {
-        LOG.warn(
-            "/prof java profiling servlet is activated. Not safe for "
-                + "production!");
-        httpServer.addServlet("profile", "/prof", ProfileServlet.class);
-      }
-    }
-
-  }
-
-  /**
-   * Add a servlet to BaseHttpServer.
-   *
-   * @param servletName The name of the servlet
-   * @param pathSpec    The path spec for the servlet
-   * @param clazz       The servlet class
-   */
-  protected void addServlet(String servletName, String pathSpec,
-      Class<? extends HttpServlet> clazz) {
-    httpServer.addServlet(servletName, pathSpec, clazz);
-  }
-
-  /**
-   * Returns the WebAppContext associated with this HttpServer.
-   *
-   * @return WebAppContext
-   */
-  protected WebAppContext getWebAppContext() {
-    return httpServer.getWebAppContext();
-  }
-
-  protected InetSocketAddress getBindAddress(String bindHostKey,
-      String addressKey, String bindHostDefault, int bindPortdefault) {
-    final Optional<String> bindHost =
-        getHostNameFromConfigKeys(conf, bindHostKey);
-
-    final Optional<Integer> addressPort =
-        getPortNumberFromConfigKeys(conf, addressKey);
-
-    final Optional<String> addressHost =
-        getHostNameFromConfigKeys(conf, addressKey);
-
-    String hostName = bindHost.orElse(addressHost.orElse(bindHostDefault));
-
-    return NetUtils.createSocketAddr(
-        hostName + ":" + addressPort.orElse(bindPortdefault));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the  HTTPS web interface.
-   *
-   * @return Target InetSocketAddress for the Ozone HTTPS endpoint.
-   */
-  public InetSocketAddress getHttpsBindAddress() {
-    return getBindAddress(getHttpsBindHostKey(), getHttpsAddressKey(),
-        getBindHostDefault(), getHttpsBindPortDefault());
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the  HTTP web interface.
-   * <p>
-   * * @return Target InetSocketAddress for the Ozone HTTP endpoint.
-   */
-  public InetSocketAddress getHttpBindAddress() {
-    return getBindAddress(getHttpBindHostKey(), getHttpAddressKey(),
-        getBindHostDefault(), getHttpBindPortDefault());
-
-  }
-
-  public void start() throws IOException {
-    if (httpServer != null && isEnabled()) {
-      httpServer.start();
-      if (prometheusSupport) {
-        DefaultMetricsSystem.instance()
-            .register("prometheus", "Hadoop metrics prometheus exporter",
-                prometheusMetricsSink);
-      }
-      updateConnectorAddress();
-    }
-
-  }
-
-  private boolean isEnabled() {
-    return conf.getBoolean(getEnabledKey(), true);
-  }
-
-  public void stop() throws Exception {
-    if (httpServer != null) {
-      httpServer.stop();
-    }
-  }
-
-  /**
-   * Update the configured listen address based on the real port
-   * <p>
-   * (eg. replace :0 with real port)
-   */
-  public void updateConnectorAddress() {
-    int connIdx = 0;
-    if (policy.isHttpEnabled()) {
-      httpAddress = httpServer.getConnectorAddress(connIdx++);
-      String realAddress = NetUtils.getHostPortString(httpAddress);
-      conf.set(getHttpAddressKey(), realAddress);
-      LOG.info(
-          String.format("HTTP server of %s is listening at http://%s",
-              name.toUpperCase(), realAddress));
-    }
-
-    if (policy.isHttpsEnabled()) {
-      httpsAddress = httpServer.getConnectorAddress(connIdx);
-      String realAddress = NetUtils.getHostPortString(httpsAddress);
-      conf.set(getHttpsAddressKey(), realAddress);
-      LOG.info(
-          String.format("HTTP server of %s is listening at https://%s",
-              name.toUpperCase(), realAddress));
-    }
-  }
-
-  public InetSocketAddress getHttpAddress() {
-    return httpAddress;
-  }
-
-  public InetSocketAddress getHttpsAddress() {
-    return httpsAddress;
-  }
-
-  protected abstract String getHttpAddressKey();
-
-  protected abstract String getHttpsAddressKey();
-
-  protected abstract String getHttpBindHostKey();
-
-  protected abstract String getHttpsBindHostKey();
-
-  protected abstract String getBindHostDefault();
-
-  protected abstract int getHttpBindPortDefault();
-
-  protected abstract int getHttpsBindPortDefault();
-
-  protected abstract String getKeytabFile();
-
-  protected abstract String getSpnegoPrincipal();
-
-  protected abstract String getEnabledKey();
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java
deleted file mode 100644
index 1869c8b..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.WriterAppender;
-
-/**
- * Servlet to stream the current logs to the response.
- */
-public class LogStreamServlet extends HttpServlet {
-
-  private static final String PATTERN = "%d [%p|%c|%C{1}] %m%n";
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp)
-      throws ServletException, IOException {
-
-    WriterAppender appender =
-        new WriterAppender(new PatternLayout(PATTERN), resp.getWriter());
-    appender.setThreshold(Level.TRACE);
-
-    try {
-      Logger.getRootLogger().addAppender(appender);
-      try {
-        Thread.sleep(Integer.MAX_VALUE);
-      } catch (InterruptedException e) {
-        //interrupted
-      }
-    } finally {
-      Logger.getRootLogger().removeAppender(appender);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
deleted file mode 100644
index d67a759..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.hdds.function.FunctionWithServiceException;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-
-import com.google.protobuf.ProtocolMessageEnum;
-import com.google.protobuf.ServiceException;
-import io.opentracing.Scope;
-import org.slf4j.Logger;
-
-/**
- * Dispatch message after tracing and message logging for insight.
- * <p>
- * This is a generic utility to dispatch message in ServerSide translators.
- * <p>
- * It logs the message type/content on DEBUG/TRACING log for insight and create
- * a new span based on the tracing information.
- */
-public class OzoneProtocolMessageDispatcher<REQUEST, RESPONSE> {
-
-  private String serviceName;
-
-  private final ProtocolMessageMetrics protocolMessageMetrics;
-
-  private Logger logger;
-
-  public OzoneProtocolMessageDispatcher(String serviceName,
-      ProtocolMessageMetrics protocolMessageMetrics, Logger logger) {
-    this.serviceName = serviceName;
-    this.protocolMessageMetrics = protocolMessageMetrics;
-    this.logger = logger;
-  }
-
-  public RESPONSE processRequest(
-      REQUEST request,
-      FunctionWithServiceException<REQUEST, RESPONSE> methodCall,
-      ProtocolMessageEnum type,
-      String traceId) throws ServiceException {
-    Scope scope = TracingUtil
-        .importAndCreateScope(type.toString(), traceId);
-    try {
-      if (logger.isTraceEnabled()) {
-        logger.trace(
-            "{} {} request is received: <json>{}</json>",
-            serviceName,
-            type.toString(),
-            request.toString().replaceAll("\n", "\\\\n"));
-      } else if (logger.isDebugEnabled()) {
-        logger.debug("{} {} request is received",
-            serviceName, type.toString());
-      }
-      protocolMessageMetrics.increment(type);
-
-      RESPONSE response = methodCall.apply(request);
-
-      if (logger.isTraceEnabled()) {
-        logger.trace(
-            "{} {} request is processed. Response: "
-                + "<json>{}</json>",
-            serviceName,
-            type.toString(),
-            response.toString().replaceAll("\n", "\\\\n"));
-      }
-      return response;
-
-    } finally {
-      scope.close();
-    }
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
deleted file mode 100644
index 7cea582..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.management.ManagementFactory;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.regex.Pattern;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Servlet that runs async-profiler as web-endpoint.
- * <p>
- * Source: https://github.com/apache/hive/blob/master/common/src/java/org
- * /apache/hive/http/ProfileServlet.java
- * <p>
- * Following options from async-profiler can be specified as query paramater.
- * //  -e event          profiling event: cpu|alloc|lock|cache-misses etc.
- * //  -d duration       run profiling for <duration> seconds (integer)
- * //  -i interval       sampling interval in nanoseconds (long)
- * //  -j jstackdepth    maximum Java stack depth (integer)
- * //  -b bufsize        frame buffer size (long)
- * //  -t                profile different threads separately
- * //  -s                simple class names instead of FQN
- * //  -o fmt[,fmt...]   output format:
- * summary|traces|flat|collapsed|svg|tree|jfr
- * //  --width px        SVG width pixels (integer)
- * //  --height px       SVG frame height pixels (integer)
- * //  --minwidth px     skip frames smaller than px (double)
- * //  --reverse         generate stack-reversed FlameGraph / Call tree
- * Example:
- * - To collect 30 second CPU profile of current process (returns FlameGraph
- * svg)
- * curl "http://localhost:10002/prof"
- * - To collect 1 minute CPU profile of current process and output in tree
- * format (html)
- * curl "http://localhost:10002/prof?output=tree&duration=60"
- * - To collect 30 second heap allocation profile of current process (returns
- * FlameGraph svg)
- * curl "http://localhost:10002/prof?event=alloc"
- * - To collect lock contention profile of current process (returns
- * FlameGraph svg)
- * curl "http://localhost:10002/prof?event=lock"
- * Following event types are supported (default is 'cpu') (NOTE: not all
- * OS'es support all events)
- * // Perf events:
- * //    cpu
- * //    page-faults
- * //    context-switches
- * //    cycles
- * //    instructions
- * //    cache-references
- * //    cache-misses
- * //    branches
- * //    branch-misses
- * //    bus-cycles
- * //    L1-dcache-load-misses
- * //    LLC-load-misses
- * //    dTLB-load-misses
- * //    mem:breakpoint
- * //    trace:tracepoint
- * // Java events:
- * //    alloc
- * //    lock
- */
-public class ProfileServlet extends HttpServlet {
-  private static final long serialVersionUID = 1L;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ProfileServlet.class);
-  private static final String ACCESS_CONTROL_ALLOW_METHODS =
-      "Access-Control-Allow-Methods";
-  private static final String ALLOWED_METHODS = "GET";
-  private static final String ACCESS_CONTROL_ALLOW_ORIGIN =
-      "Access-Control-Allow-Origin";
-  private static final String CONTENT_TYPE_TEXT = "text/plain; charset=utf-8";
-  private static final String ASYNC_PROFILER_HOME_ENV = "ASYNC_PROFILER_HOME";
-  private static final String ASYNC_PROFILER_HOME_SYSTEM_PROPERTY =
-      "async.profiler.home";
-  private static final String PROFILER_SCRIPT = "/profiler.sh";
-  private static final int DEFAULT_DURATION_SECONDS = 10;
-  private static final AtomicInteger ID_GEN = new AtomicInteger(0);
-  static final Path OUTPUT_DIR =
-      Paths.get(System.getProperty("java.io.tmpdir"), "prof-output");
-  public static final String FILE_PREFIX = "async-prof-pid-";
-
-  public static final Pattern FILE_NAME_PATTERN =
-      Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
-
-  private Lock profilerLock = new ReentrantLock();
-  private final Integer pid;
-  private String asyncProfilerHome;
-  private transient Process process;
-
-  public ProfileServlet() {
-    this.asyncProfilerHome = getAsyncProfilerHome();
-    this.pid = getPid();
-    LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid,
-        asyncProfilerHome);
-    try {
-      Files.createDirectories(OUTPUT_DIR);
-    } catch (IOException e) {
-      LOG.error(
-          "Can't create the output directory for java profiler: " + OUTPUT_DIR,
-          e);
-    }
-  }
-
-  private Integer getPid() {
-    // JVM_PID is exported by bin/ozone
-    String pidStr = System.getenv("JVM_PID");
-
-    // in case if it is not set correctly used fallback from mxbean which is
-    // implementation specific
-    if (pidStr == null || pidStr.trim().isEmpty()) {
-      String name = ManagementFactory.getRuntimeMXBean().getName();
-      if (name != null) {
-        int idx = name.indexOf("@");
-        if (idx != -1) {
-          pidStr = name.substring(0, name.indexOf("@"));
-        }
-      }
-    }
-    try {
-      if (pidStr != null) {
-        return Integer.valueOf(pidStr);
-      }
-    } catch (NumberFormatException nfe) {
-      // ignore
-    }
-    return null;
-  }
-
-  public Process runCmdAsync(List<String> cmd) {
-    try {
-      LOG.info("Running command async: " + cmd);
-      return new ProcessBuilder(cmd).inheritIO().start();
-    } catch (IOException ex) {
-      throw new IllegalStateException(ex);
-    }
-  }
-
-  @VisibleForTesting
-  protected static String generateFileName(Integer pid, Output output,
-      Event event) {
-    return FILE_PREFIX + pid + "-" +
-        event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
-        + "." +
-        output.name().toLowerCase();
-  }
-
-  @VisibleForTesting
-  protected static String validateFileName(String filename) {
-    if (!FILE_NAME_PATTERN.matcher(filename).matches()) {
-      throw new IllegalArgumentException(
-          "Invalid file name parameter " + filename + " doesn't match pattern "
-              + FILE_NAME_PATTERN);
-
-    }
-    return filename;
-  }
-
-  @Override
-  protected void doGet(final HttpServletRequest req,
-      final HttpServletResponse resp) throws IOException {
-    // make sure async profiler home is set
-    if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
-      resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-      setResponseHeader(resp);
-      resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.");
-      return;
-    }
-
-    //download the finished file
-    if (req.getParameter("file") != null) {
-      doGetDownload(req.getParameter("file"), req, resp);
-      return;
-    }
-    // if pid is explicitly specified, use it else default to current process
-    Integer processId = getInteger(req, "pid", pid);
-
-    // if pid is not specified in query param and if current process pid
-    // cannot be determined
-    if (processId == null) {
-      resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-      setResponseHeader(resp);
-      resp.getWriter().write(
-          "'pid' query parameter unspecified or unable to determine PID of "
-              + "current process.");
-      return;
-    }
-
-    final int duration =
-        getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
-    final Output output = getOutput(req);
-    final Event event = getEvent(req);
-    final Long interval = getLong(req, "interval");
-    final Integer jstackDepth = getInteger(req, "jstackdepth", null);
-    final Long bufsize = getLong(req, "bufsize");
-    final boolean thread = req.getParameterMap().containsKey("thread");
-    final boolean simple = req.getParameterMap().containsKey("simple");
-    final Integer width = getInteger(req, "width", null);
-    final Integer height = getInteger(req, "height", null);
-    final Double minwidth = getMinWidth(req);
-    final boolean reverse = req.getParameterMap().containsKey("reverse");
-
-    if (process == null || !process.isAlive()) {
-      try {
-        int lockTimeoutSecs = 3;
-        if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
-          try {
-            //Should be in sync with FILE_NAME_PATTERN
-            File outputFile =
-                OUTPUT_DIR.resolve(
-                    ProfileServlet.generateFileName(processId, output, event))
-                    .toFile();
-            List<String> cmd = new ArrayList<>();
-            cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
-            cmd.add("-e");
-            cmd.add(event.getInternalName());
-            cmd.add("-d");
-            cmd.add("" + duration);
-            cmd.add("-o");
-            cmd.add(output.name().toLowerCase());
-            cmd.add("-f");
-            cmd.add(outputFile.getAbsolutePath());
-            if (interval != null) {
-              cmd.add("-i");
-              cmd.add(interval.toString());
-            }
-            if (jstackDepth != null) {
-              cmd.add("-j");
-              cmd.add(jstackDepth.toString());
-            }
-            if (bufsize != null) {
-              cmd.add("-b");
-              cmd.add(bufsize.toString());
-            }
-            if (thread) {
-              cmd.add("-t");
-            }
-            if (simple) {
-              cmd.add("-s");
-            }
-            if (width != null) {
-              cmd.add("--width");
-              cmd.add(width.toString());
-            }
-            if (height != null) {
-              cmd.add("--height");
-              cmd.add(height.toString());
-            }
-            if (minwidth != null) {
-              cmd.add("--minwidth");
-              cmd.add(minwidth.toString());
-            }
-            if (reverse) {
-              cmd.add("--reverse");
-            }
-            cmd.add(processId.toString());
-            process = runCmdAsync(cmd);
-
-            // set response and set refresh header to output location
-            setResponseHeader(resp);
-            resp.setStatus(HttpServletResponse.SC_ACCEPTED);
-            String relativeUrl = "/prof?file=" + outputFile.getName();
-            resp.getWriter().write(
-                "Started [" + event.getInternalName()
-                    + "] profiling. This page will automatically redirect to "
-                    +
-                    relativeUrl + " after " + duration
-                    + " seconds.\n\ncommand:\n" + Joiner.on(" ").join(cmd));
-            resp.getWriter().write(
-                "\n\n\nPlease make sure that you enabled the profiling on "
-                    + "kernel level:\n"
-                    + "echo 1 > /proc/sys/kernel/perf_event_paranoid\n"
-                    + "echo 0 > /proc/sys/kernel/kptr_restrict\n\n"
-                    + "See https://github"
-                    + ".com/jvm-profiling-tools/async-profiler#basic-usage"
-                    + " for more details.");
-            // to avoid auto-refresh by ProfileOutputServlet, refreshDelay
-            // can be specified via url param
-            int refreshDelay = getInteger(req, "refreshDelay", 0);
-
-            // instead of sending redirect, set auto-refresh so that browsers
-            // will refresh with redirected url
-            resp.setHeader("Refresh",
-                (duration + refreshDelay) + ";" + relativeUrl);
-            resp.getWriter().flush();
-          } finally {
-            profilerLock.unlock();
-          }
-        } else {
-          setResponseHeader(resp);
-          resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-          resp.getWriter().write(
-              "Unable to acquire lock. Another instance of profiler might be "
-                  + "running.");
-          LOG.warn(
-              "Unable to acquire lock in {} seconds. Another instance of "
-                  + "profiler might be running.",
-              lockTimeoutSecs);
-        }
-      } catch (InterruptedException e) {
-        LOG.warn("Interrupted while acquiring profile lock.", e);
-        resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-      }
-    } else {
-      setResponseHeader(resp);
-      resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-      resp.getWriter()
-          .write("Another instance of profiler is already running.");
-    }
-  }
-
-  protected void doGetDownload(String fileName, final HttpServletRequest req,
-      final HttpServletResponse resp)
-      throws IOException {
-
-    String safeFileName = validateFileName(fileName);
-    File requestedFile =
-        ProfileServlet.OUTPUT_DIR
-            .resolve(safeFileName)
-            .toAbsolutePath().toFile();
-    // async-profiler version 1.4 writes 'Started [cpu] profiling' to output
-    // file when profiler is running which
-    // gets replaced by final output. If final output is not ready yet, the
-    // file size will be <100 bytes (in all modes).
-    if (requestedFile.length() < 100) {
-      LOG.info("{} is incomplete. Sending auto-refresh header..",
-          requestedFile);
-      resp.setHeader("Refresh",
-          "2," + req.getRequestURI() + "?file=" + safeFileName);
-      resp.getWriter().write(
-          "This page will auto-refresh every 2 second until output file is "
-              + "ready..");
-    } else {
-      if (safeFileName.endsWith(".svg")) {
-        resp.setContentType("image/svg+xml");
-      } else if (safeFileName.endsWith(".tree")) {
-        resp.setContentType("text/html");
-      }
-      try (InputStream input = new FileInputStream(requestedFile)) {
-        IOUtils.copy(input, resp.getOutputStream());
-      }
-    }
-  }
-
-  private Integer getInteger(final HttpServletRequest req,
-      final String param,
-      final Integer defaultValue) {
-    final String value = req.getParameter(param);
-    if (value != null) {
-      try {
-        return Integer.valueOf(value);
-      } catch (NumberFormatException e) {
-        return defaultValue;
-      }
-    }
-    return defaultValue;
-  }
-
-  private Long getLong(final HttpServletRequest req, final String param) {
-    final String value = req.getParameter(param);
-    if (value != null) {
-      try {
-        return Long.valueOf(value);
-      } catch (NumberFormatException e) {
-        return null;
-      }
-    }
-    return null;
-  }
-
-  private Double getMinWidth(final HttpServletRequest req) {
-    final String value = req.getParameter("minwidth");
-    if (value != null) {
-      try {
-        return Double.valueOf(value);
-      } catch (NumberFormatException e) {
-        return null;
-      }
-    }
-    return null;
-  }
-
-  private Event getEvent(final HttpServletRequest req) {
-    final String eventArg = req.getParameter("event");
-    if (eventArg != null) {
-      Event event = Event.fromInternalName(eventArg);
-      return event == null ? Event.CPU : event;
-    }
-    return Event.CPU;
-  }
-
-  private Output getOutput(final HttpServletRequest req) {
-    final String outputArg = req.getParameter("output");
-    if (req.getParameter("output") != null) {
-      try {
-        return Output.valueOf(outputArg.trim().toUpperCase());
-      } catch (IllegalArgumentException e) {
-        return Output.SVG;
-      }
-    }
-    return Output.SVG;
-  }
-
-  private void setResponseHeader(final HttpServletResponse response) {
-    response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
-    response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
-    response.setContentType(CONTENT_TYPE_TEXT);
-  }
-
-  static String getAsyncProfilerHome() {
-    String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV);
-    // if ENV is not set, see if -Dasync.profiler
-    // .home=/path/to/async/profiler/home is set
-    if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
-      asyncProfilerHome =
-          System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY);
-    }
-
-    return asyncProfilerHome;
-  }
-
-  enum Event {
-    CPU("cpu"),
-    ALLOC("alloc"),
-    LOCK("lock"),
-    PAGE_FAULTS("page-faults"),
-    CONTEXT_SWITCHES("context-switches"),
-    CYCLES("cycles"),
-    INSTRUCTIONS("instructions"),
-    CACHE_REFERENCES("cache-references"),
-    CACHE_MISSES("cache-misses"),
-    BRANCHES("branches"),
-    BRANCH_MISSES("branch-misses"),
-    BUS_CYCLES("bus-cycles"),
-    L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
-    LLC_LOAD_MISSES("LLC-load-misses"),
-    DTLB_LOAD_MISSES("dTLB-load-misses"),
-    MEM_BREAKPOINT("mem-breakpoint"),
-    TRACE_TRACEPOINT("trace-tracepoint");
-
-    private String internalName;
-
-    Event(final String internalName) {
-      this.internalName = internalName;
-    }
-
-    public String getInternalName() {
-      return internalName;
-    }
-
-    public static Event fromInternalName(final String name) {
-      for (Event event : values()) {
-        if (event.getInternalName().equalsIgnoreCase(name)) {
-          return event;
-        }
-      }
-
-      return null;
-    }
-  }
-
-  enum Output {
-    SUMMARY,
-    TRACES,
-    FLAT,
-    COLLAPSED,
-    SVG,
-    TREE,
-    JFR
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java
deleted file mode 100644
index f37d323..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import static org.apache.hadoop.hdds.utils.RocksDBStoreMBean.ROCKSDB_CONTEXT_PREFIX;
-
-import java.io.IOException;
-import java.io.Writer;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricType;
-import org.apache.hadoop.metrics2.MetricsRecord;
-import org.apache.hadoop.metrics2.MetricsSink;
-import org.apache.hadoop.metrics2.MetricsTag;
-
-import org.apache.commons.configuration2.SubsetConfiguration;
-
-/**
- * Metrics sink for prometheus exporter.
- * <p>
- * Stores the metric data in-memory and return with it on request.
- */
-public class PrometheusMetricsSink implements MetricsSink {
-
-  /**
-   * Cached output lines for each metrics.
-   */
-  private final Map<String, String> metricLines = new ConcurrentHashMap<>();
-
-  private static final Pattern SPLIT_PATTERN =
-      Pattern.compile("(?<!(^|[A-Z_]))(?=[A-Z])|(?<!^)(?=[A-Z][a-z])");
-
-  private static final Pattern REPLACE_PATTERN =
-      Pattern.compile("[^a-zA-Z0-9]+");
-
-  public PrometheusMetricsSink() {
-  }
-
-  @Override
-  public void putMetrics(MetricsRecord metricsRecord) {
-    for (AbstractMetric metrics : metricsRecord.metrics()) {
-      if (metrics.type() == MetricType.COUNTER
-          || metrics.type() == MetricType.GAUGE) {
-
-        String key = prometheusName(
-            metricsRecord.name(), metrics.name());
-
-        StringBuilder builder = new StringBuilder();
-        builder.append("# TYPE ")
-            .append(key)
-            .append(" ")
-            .append(metrics.type().toString().toLowerCase())
-            .append("\n");
-
-        StringBuilder prometheusMetricKey = new StringBuilder();
-        prometheusMetricKey.append(key)
-            .append("{");
-        String sep = "";
-
-        //add tags
-        for (MetricsTag tag : metricsRecord.tags()) {
-          String tagName = tag.name().toLowerCase();
-
-          //ignore specific tag which includes sub-hierarchy
-          if (!tagName.equals("numopenconnectionsperuser")) {
-            prometheusMetricKey.append(sep)
-                .append(tagName)
-                .append("=\"")
-                .append(tag.value())
-                .append("\"");
-            sep = ",";
-          }
-        }
-        prometheusMetricKey.append("}");
-
-        String prometheusMetricKeyAsString = prometheusMetricKey.toString();
-        builder.append(prometheusMetricKeyAsString);
-        builder.append(" ");
-        builder.append(metrics.value());
-        builder.append("\n");
-        metricLines.put(prometheusMetricKeyAsString, builder.toString());
-
-      }
-    }
-  }
-
-  /**
-   * Convert CamelCase based names to lower-case names where the separator
-   * is the underscore, to follow prometheus naming conventions.
-   */
-  public String prometheusName(String recordName,
-      String metricName) {
-
-    //RocksDB metric names already have underscores as delimiters.
-    if (StringUtils.isNotEmpty(recordName) &&
-        recordName.startsWith(ROCKSDB_CONTEXT_PREFIX)) {
-      return recordName.toLowerCase() + "_" + metricName.toLowerCase();
-    }
-
-    String baseName = StringUtils.capitalize(recordName)
-        + StringUtils.capitalize(metricName);
-    return normalizeName(baseName);
-  }
-
-  public static String normalizeName(String baseName) {
-    String[] parts = SPLIT_PATTERN.split(baseName);
-    String result = String.join("_", parts).toLowerCase();
-    return REPLACE_PATTERN.matcher(result).replaceAll("_");
-  }
-
-  @Override
-  public void flush() {
-
-  }
-
-  @Override
-  public void init(SubsetConfiguration subsetConfiguration) {
-
-  }
-
-  public void writeMetrics(Writer writer) throws IOException {
-    for (String line : metricLines.values()) {
-      writer.write(line);
-    }
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusServlet.java
deleted file mode 100644
index 726baa3..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusServlet.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-/**
- * Servlet to publish hadoop metrics in prometheus format.
- */
-public class PrometheusServlet extends HttpServlet {
-
-  public PrometheusMetricsSink getPrometheusSink() {
-    return
-        (PrometheusMetricsSink) getServletContext().getAttribute(
-            BaseHttpServer.PROMETHEUS_SINK);
-  }
-
-  @Override
-  protected void doGet(HttpServletRequest req, HttpServletResponse resp)
-      throws ServletException, IOException {
-    DefaultMetricsSystem.instance().publishMetricsNow();
-    getPrometheusSink().writeMetrics(resp.getWriter());
-    resp.getWriter().flush();
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
deleted file mode 100644
index 33a1ca9..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-
-/**
- * Generic utilities for all HDDS/Ozone servers.
- */
-public final class ServerUtils {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ServerUtils.class);
-
-  private ServerUtils() {
-  }
-
-  /**
-   * Checks that a given value is with a range.
-   *
-   * For example, sanitizeUserArgs(17, 3, 5, 10)
-   * ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
-   *
-   * @param key           - config key of the value
-   * @param valueTocheck  - value to check
-   * @param baseKey       - config key of the baseValue
-   * @param baseValue     - the base value that is being used.
-   * @param minFactor     - range min - a 2 here makes us ensure that value
-   *                        valueTocheck is at least twice the baseValue.
-   * @param maxFactor     - range max
-   * @return long
-   */
-  public static long sanitizeUserArgs(String key, long valueTocheck,
-      String baseKey, long baseValue, long minFactor, long maxFactor) {
-    long minLimit = baseValue * minFactor;
-    long maxLimit = baseValue * maxFactor;
-    if (valueTocheck < minLimit) {
-      LOG.warn(
-          "{} value = {} is smaller than min = {} based on"
-          + " the key value of {}, reset to the min value {}.",
-          key, valueTocheck, minLimit, baseKey, minLimit);
-      valueTocheck = minLimit;
-    } else if (valueTocheck > maxLimit) {
-      LOG.warn(
-          "{} value = {} is larger than max = {} based on"
-          + " the key value of {}, reset to the max value {}.",
-          key, valueTocheck, maxLimit, baseKey, maxLimit);
-      valueTocheck = maxLimit;
-    }
-
-    return valueTocheck;
-  }
-
-
-  /**
-   * After starting an RPC server, updates configuration with the actual
-   * listening address of that server. The listening address may be different
-   * from the configured address if, for example, the configured address uses
-   * port 0 to request use of an ephemeral port.
-   *
-   * @param conf configuration to update
-   * @param rpcAddressKey configuration key for RPC server address
-   * @param addr configured address
-   * @param rpcServer started RPC server.
-   */
-  public static InetSocketAddress updateRPCListenAddress(
-      OzoneConfiguration conf, String rpcAddressKey,
-      InetSocketAddress addr, RPC.Server rpcServer) {
-    return updateListenAddress(conf, rpcAddressKey, addr,
-        rpcServer.getListenerAddress());
-  }
-
-
-  /**
-   * After starting an server, updates configuration with the actual
-   * listening address of that server. The listening address may be different
-   * from the configured address if, for example, the configured address uses
-   * port 0 to request use of an ephemeral port.
-   *
-   * @param conf       configuration to update
-   * @param addressKey configuration key for RPC server address
-   * @param addr       configured address
-   * @param listenAddr the real listening address.
-   */
-  public static InetSocketAddress updateListenAddress(OzoneConfiguration conf,
-      String addressKey, InetSocketAddress addr, InetSocketAddress listenAddr) {
-    InetSocketAddress updatedAddr = new InetSocketAddress(addr.getHostString(),
-        listenAddr.getPort());
-    conf.set(addressKey,
-        addr.getHostString() + ":" + listenAddr.getPort());
-    return updatedAddr;
-  }
-
-
-  /**
-   * Releases a http connection if the request is not null.
-   * @param request
-   */
-  public static void releaseConnection(HttpRequestBase request) {
-    if (request != null) {
-      request.releaseConnection();
-    }
-  }
-
-  /**
-   * Get the location where SCM should store its metadata directories.
-   * Fall back to OZONE_METADATA_DIRS if not defined.
-   *
-   * @param conf
-   * @return
-   */
-  public static File getScmDbDir(Configuration conf) {
-    File metadataDir = getDirectoryFromConfig(conf,
-        ScmConfigKeys.OZONE_SCM_DB_DIRS, "SCM");
-    if (metadataDir != null) {
-      return metadataDir;
-    }
-
-    LOG.warn("{} is not configured. We recommend adding this setting. " +
-        "Falling back to {} instead.",
-        ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS);
-    return getOzoneMetaDirPath(conf);
-  }
-
-  /**
-   * Utility method to get value of a given key that corresponds to a DB
-   * directory.
-   * @param conf configuration bag
-   * @param key Key to test
-   * @param componentName Which component's key is this
-   * @return File created from the value of the key in conf.
-   */
-  public static File getDirectoryFromConfig(Configuration conf,
-                                            String key,
-                                            String componentName) {
-    final Collection<String> metadirs = conf.getTrimmedStringCollection(key);
-
-    if (metadirs.size() > 1) {
-      throw new IllegalArgumentException(
-          "Bad config setting " + key +
-              ". " + componentName +
-              " does not support multiple metadata dirs currently");
-    }
-
-    if (metadirs.size() == 1) {
-      final File dbDirPath = new File(metadirs.iterator().next());
-      if (!dbDirPath.exists() && !dbDirPath.mkdirs()) {
-        throw new IllegalArgumentException("Unable to create directory " +
-            dbDirPath + " specified in configuration setting " +
-            key);
-      }
-      return dbDirPath;
-    }
-
-    return null;
-  }
-
-  /**
-   * Checks and creates Ozone Metadir Path if it does not exist.
-   *
-   * @param conf - Configuration
-   * @return File MetaDir
-   * @throws IllegalArgumentException if the configuration setting is not set
-   */
-  public static File getOzoneMetaDirPath(Configuration conf) {
-    File dirPath = getDirectoryFromConfig(conf,
-        HddsConfigKeys.OZONE_METADATA_DIRS, "Ozone");
-    if (dirPath == null) {
-      throw new IllegalArgumentException(
-          HddsConfigKeys.OZONE_METADATA_DIRS + " must be defined.");
-    }
-    return dirPath;
-  }
-
-  public static void setOzoneMetaDirPath(OzoneConfiguration conf,
-      String path) {
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path);
-  }
-
-  /**
-   * Returns with the service specific metadata directory.
-   * <p>
-   * If the directory is missing the method tries to create it.
-   *
-   * @param conf The ozone configuration object
-   * @param key The configuration key which specify the directory.
-   * @return The path of the directory.
-   */
-  public static File getDBPath(Configuration conf, String key) {
-    final File dbDirPath =
-        getDirectoryFromConfig(conf, key, "OM");
-    if (dbDirPath != null) {
-      return dbDirPath;
-    }
-
-    LOG.warn("{} is not configured. We recommend adding this setting. "
-            + "Falling back to {} instead.", key,
-        HddsConfigKeys.OZONE_METADATA_DIRS);
-    return ServerUtils.getOzoneMetaDirPath(conf);
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java
deleted file mode 100644
index bcd75f3..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.server;
-
-/**
- * Common runtime information for any service components.
- *
- * Note: it's intentional to not use MXBean or MBean as a suffix  of the name.
- *
- * Most of the services extends the ServiceRuntimeInfoImpl class and also
- * implements a specific MXBean interface which extends this interface.
- *
- * This inheritance from multiple path could confuse the jmx system and
- * some jmx properties could be disappeared.
- *
- * The solution is to always extend this interface and use the jmx naming
- * convention in the new interface..
- */
-public interface ServiceRuntimeInfo {
-
-  /**
-   * Gets the version of Hadoop.
-   *
-   * @return the version
-   */
-  String getVersion();
-
-  /**
-   * Get the version of software running on the Namenode.
-   *
-   * @return a string representing the version
-   */
-  String getSoftwareVersion();
-
-  /**
-   * Get the compilation information which contains date, user and branch.
-   *
-   * @return the compilation information, as a JSON string.
-   */
-  String getCompileInfo();
-
-  /**
-   * Gets the NN start time in milliseconds.
-   *
-   * @return the NN start time in msec
-   */
-  long getStartedTimeInMillis();
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
deleted file mode 100644
index 2dffc6f..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.hdds.utils.VersionInfo;
-
-/**
- * Helper base class to report the standard version and runtime information.
- *
- */
-public class ServiceRuntimeInfoImpl implements ServiceRuntimeInfo {
-
-  private long startedTimeInMillis;
-  private final VersionInfo versionInfo;
-
-  protected ServiceRuntimeInfoImpl(VersionInfo versionInfo) {
-    this.versionInfo = versionInfo;
-  }
-
-  @Override
-  public String getVersion() {
-    return versionInfo.getVersion() + ", r" + versionInfo.getRevision();
-  }
-
-  @Override
-  public String getSoftwareVersion() {
-    return versionInfo.getVersion();
-  }
-
-  @Override
-  public String getCompileInfo() {
-    return versionInfo.getDate() + " by " + versionInfo.getUser() + " from "
-        + versionInfo.getBranch();
-  }
-
-  @Override
-  public long getStartedTimeInMillis() {
-    return startedTimeInMillis;
-  }
-
-  public void setStartTime() {
-    startedTimeInMillis = System.currentTimeMillis();
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
deleted file mode 100644
index 810c8b3..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Identifier of an async event.
- *
- * @param <PAYLOAD> THe message payload type of this event.
- */
-public interface Event<PAYLOAD> {
-
-  /**
-   * The type of the event payload. Payload contains all the required data
-   * to process the event.
-   *
-   */
-  Class<PAYLOAD> getPayloadType();
-
-  /**
-   * The human readable name of the event.
-   *
-   * Used for display in thread names
-   * and monitoring.
-   *
-   */
-  String getName();
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
deleted file mode 100644
index 4257839..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Executors defined the  way how an EventHandler should be called.
- * <p>
- * Executors are used only by the EventQueue and they do the thread separation
- * between the caller and the EventHandler.
- * <p>
- * Executors should guarantee that only one thread is executing one
- * EventHandler at the same time.
- *
- * @param <PAYLOAD> the payload type of the event.
- */
-public interface EventExecutor<PAYLOAD> extends AutoCloseable {
-
-  /**
-   * Process an event payload.
-   *
-   * @param handler      the handler to process the payload
-   * @param eventPayload to be processed.
-   * @param publisher    to send response/other message forward to the chain.
-   */
-  void onMessage(EventHandler<PAYLOAD> handler,
-      PAYLOAD eventPayload,
-      EventPublisher
-          publisher);
-
-  /**
-   * Return the number of the failed events.
-   */
-  long failedEvents();
-
-
-  /**
-   * Return the number of the processed events.
-   */
-  long successfulEvents();
-
-  /**
-   * Return the number of the not-yet processed events.
-   */
-  long queuedEvents();
-
-  /**
-   * The human readable name for the event executor.
-   * <p>
-   * Used in monitoring and logging.
-   *
-   */
-  String getName();
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
deleted file mode 100644
index f40fc9e..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Processor to react on an event.
- *
- * EventExecutors should guarantee that the implementations are called only
- * from one thread.
- *
- * @param <PAYLOAD>
- */
-@FunctionalInterface
-public interface EventHandler<PAYLOAD> {
-
-  void onMessage(PAYLOAD payload, EventPublisher publisher);
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
deleted file mode 100644
index a47fb57..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Client interface to send a new event.
- */
-public interface EventPublisher {
-
-  <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
-      fireEvent(EVENT_TYPE event, PAYLOAD payload);
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
deleted file mode 100644
index cd09da6..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * Simple async event processing utility.
- * <p>
- * Event queue handles a collection of event handlers and routes the incoming
- * events to one (or more) event handler.
- */
-public class EventQueue implements EventPublisher, AutoCloseable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(EventQueue.class);
-
-  private static final String EXECUTOR_NAME_SEPARATOR = "For";
-
-  private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
-      new HashMap<>();
-
-  private final AtomicLong queuedCount = new AtomicLong(0);
-
-  private final AtomicLong eventCount = new AtomicLong(0);
-
-  private boolean isRunning = true;
-
-  private static final Gson TRACING_SERIALIZER = new GsonBuilder().create();
-
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
-    this.addHandler(event, handler, generateHandlerName(handler));
-  }
-
-  /**
-   * Add new handler to the event queue.
-   * <p>
-   * By default a separated single thread executor will be dedicated to
-   * deliver the events to the registered event handler.
-   *
-   * @param event        Triggering event.
-   * @param handler      Handler of event (will be called from a separated
-   *                     thread)
-   * @param handlerName  The name of handler (should be unique together with
-   *                     the event name)
-   * @param <PAYLOAD>    The type of the event payload.
-   * @param <EVENT_TYPE> The type of the event identifier.
-   */
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event, EventHandler<PAYLOAD> handler, String handlerName) {
-    validateEvent(event);
-    Preconditions.checkNotNull(handler, "Handler name should not be null.");
-    String executorName =
-        StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
-            + handlerName;
-    this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
-  }
-
-  private <EVENT_TYPE extends Event<?>> void validateEvent(EVENT_TYPE event) {
-    Preconditions
-        .checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
-            "Event name should not contain " + EXECUTOR_NAME_SEPARATOR
-                + " string.");
-
-  }
-
-  private <PAYLOAD> String generateHandlerName(EventHandler<PAYLOAD> handler) {
-    if (!"".equals(handler.getClass().getSimpleName())) {
-      return handler.getClass().getSimpleName();
-    } else {
-      return handler.getClass().getName();
-    }
-  }
-
-  /**
-   * Add event handler with custom executor.
-   *
-   * @param event        Triggering event.
-   * @param executor     The executor imlementation to deliver events from a
-   *                     separated threads. Please keep in your mind that
-   *                     registering metrics is the responsibility of the
-   *                     caller.
-   * @param handler      Handler of event (will be called from a separated
-   *                     thread)
-   * @param <PAYLOAD>    The type of the event payload.
-   * @param <EVENT_TYPE> The type of the event identifier.
-   */
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event, EventExecutor<PAYLOAD> executor,
-      EventHandler<PAYLOAD> handler) {
-    if (!isRunning) {
-      LOG.warn("Not adding handler for {}, EventQueue is not running", event);
-      return;
-    }
-    validateEvent(event);
-    executors.putIfAbsent(event, new HashMap<>());
-    executors.get(event).putIfAbsent(executor, new ArrayList<>());
-
-    executors.get(event).get(executor).add(handler);
-  }
-
-  /**
-   * Route an event with payload to the right listener(s).
-   *
-   * @param event   The event identifier
-   * @param payload The payload of the event.
-   * @throws IllegalArgumentException If there is no EventHandler for
-   *                                  the specific event.
-   */
-  @Override
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-      EVENT_TYPE event, PAYLOAD payload) {
-
-    if (!isRunning) {
-      LOG.warn("Processing of {} is skipped, EventQueue is not running", event);
-      return;
-    }
-
-    Map<EventExecutor, List<EventHandler>> eventExecutorListMap =
-        this.executors.get(event);
-
-    eventCount.incrementAndGet();
-    if (eventExecutorListMap != null) {
-
-      for (Map.Entry<EventExecutor, List<EventHandler>> executorAndHandlers :
-          eventExecutorListMap.entrySet()) {
-
-        for (EventHandler handler : executorAndHandlers.getValue()) {
-          queuedCount.incrementAndGet();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Delivering event {} to executor/handler {}: <json>{}</json>",
-                event.getName(),
-                executorAndHandlers.getKey().getName(),
-                TRACING_SERIALIZER.toJson(payload).replaceAll("\n", "\\\\n"));
-          } else if (LOG.isDebugEnabled()) {
-            LOG.debug("Delivering event {} to executor/handler {}: {}",
-                event.getName(),
-                executorAndHandlers.getKey().getName(),
-                payload.getClass().getSimpleName());
-          }
-          executorAndHandlers.getKey()
-              .onMessage(handler, payload, this);
-
-        }
-      }
-
-    } else {
-      LOG.warn("No event handler registered for event " + event);
-    }
-
-  }
-
-  /**
-   * This is just for unit testing, don't use it for production code.
-   * <p>
-   * It waits for all messages to be processed. If one event handler invokes an
-   * other one, the later one also should be finished.
-   * <p>
-   * Long counter overflow is not handled, therefore it's safe only for unit
-   * testing.
-   * <p>
-   * This method is just eventually consistent. In some cases it could return
-   * even if there are new messages in some of the handler. But in a simple
-   * case (one message) it will return only if the message is processed and
-   * all the dependent messages (messages which are sent by current handlers)
-   * are processed.
-   *
-   * @param timeout Timeout in seconds to wait for the processing.
-   */
-  @VisibleForTesting
-  public void processAll(long timeout) {
-    long currentTime = Time.now();
-    while (true) {
-
-      if (!isRunning) {
-        LOG.warn("Processing of event skipped. EventQueue is not running");
-        return;
-      }
-
-      long processed = 0;
-
-      Stream<EventExecutor> allExecutor = this.executors.values().stream()
-          .flatMap(handlerMap -> handlerMap.keySet().stream());
-
-      boolean allIdle =
-          allExecutor.allMatch(executor -> executor.queuedEvents() == executor
-              .successfulEvents() + executor.failedEvents());
-
-      if (allIdle) {
-        return;
-      }
-
-      try {
-        Thread.sleep(100);
-      } catch (InterruptedException e) {
-        LOG.warn("Interrupted exception while sleeping.", e);
-        // We ignore this exception for time being. Review? should we
-        // propogate it back to caller?
-      }
-
-      if (Time.now() > currentTime + timeout) {
-        throw new AssertionError(
-            "Messages are not processed in the given timeframe. Queued: "
-                + queuedCount.get() + " Processed: " + processed);
-      }
-    }
-  }
-
-  @Override
-  public void close() {
-
-    isRunning = false;
-
-    Set<EventExecutor> allExecutors = this.executors.values().stream()
-        .flatMap(handlerMap -> handlerMap.keySet().stream())
-        .collect(Collectors.toSet());
-
-    allExecutors.forEach(executor -> {
-      try {
-        executor.close();
-      } catch (Exception ex) {
-        LOG.error("Can't close the executor " + executor.getName(), ex);
-      }
-    });
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
deleted file mode 100644
index 301c71e..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.lease.Lease;
-import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException;
-import org.apache.hadoop.ozone.lease.LeaseExpiredException;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.map.HashedMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Event watcher the (re)send a message after timeout.
- * <p>
- * Event watcher will send the tracked payload/event after a timeout period
- * unless a confirmation from the original event (completion event) is arrived.
- *
- * @param <TIMEOUT_PAYLOAD>    The type of the events which are tracked.
- * @param <COMPLETION_PAYLOAD> The type of event which could cancel the
- *                             tracking.
- */
-@SuppressWarnings("CheckStyle")
-public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
-    IdentifiableEventPayload,
-    COMPLETION_PAYLOAD extends IdentifiableEventPayload> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class);
-
-  private final Event<TIMEOUT_PAYLOAD> startEvent;
-
-  private final Event<COMPLETION_PAYLOAD> completionEvent;
-
-  private final LeaseManager<Long> leaseManager;
-
-  private final EventWatcherMetrics metrics;
-
-  private final String name;
-
-  private final Map<Long, TIMEOUT_PAYLOAD> trackedEventsByID =
-      new ConcurrentHashMap<>();
-
-  private final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
-
-  private final Map<Long, Long> startTrackingTimes = new HashedMap();
-
-  public EventWatcher(String name, Event<TIMEOUT_PAYLOAD> startEvent,
-      Event<COMPLETION_PAYLOAD> completionEvent,
-      LeaseManager<Long> leaseManager) {
-    this.startEvent = startEvent;
-    this.completionEvent = completionEvent;
-    this.leaseManager = leaseManager;
-    this.metrics = new EventWatcherMetrics();
-    Preconditions.checkNotNull(name);
-    if (name.equals("")) {
-      name = getClass().getSimpleName();
-    }
-    if (name.equals("")) {
-      //for anonymous inner classes
-      name = getClass().getName();
-    }
-    this.name = name;
-  }
-
-  public EventWatcher(Event<TIMEOUT_PAYLOAD> startEvent,
-      Event<COMPLETION_PAYLOAD> completionEvent,
-      LeaseManager<Long> leaseManager) {
-    this("", startEvent, completionEvent, leaseManager);
-  }
-
-  public void start(EventQueue queue) {
-
-    queue.addHandler(startEvent, this::handleStartEvent);
-
-    queue.addHandler(completionEvent, (completionPayload, publisher) -> {
-      try {
-        handleCompletion(completionPayload, publisher);
-      } catch (LeaseNotFoundException e) {
-        //It's already done. Too late, we already retried it.
-        //Not a real problem.
-        LOG.warn("Completion event without active lease. Id={}",
-            completionPayload.getId());
-      }
-    });
-
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.register(name, "EventWatcher metrics", metrics);
-  }
-
-  private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
-      EventPublisher publisher) {
-    metrics.incrementTrackedEvents();
-    long identifier = payload.getId();
-    startTrackingTimes.put(identifier, System.currentTimeMillis());
-
-    trackedEventsByID.put(identifier, payload);
-    trackedEvents.add(payload);
-    try {
-      Lease<Long> lease = leaseManager.acquire(identifier);
-      try {
-        lease.registerCallBack(() -> {
-          handleTimeout(publisher, identifier);
-          return null;
-        });
-
-      } catch (LeaseExpiredException e) {
-        handleTimeout(publisher, identifier);
-      }
-    } catch (LeaseAlreadyExistException e) {
-      //No problem at all. But timer is not reset.
-    }
-  }
-
-  protected synchronized void handleCompletion(COMPLETION_PAYLOAD
-      completionPayload, EventPublisher publisher) throws
-      LeaseNotFoundException {
-    long id = completionPayload.getId();
-    leaseManager.release(id);
-    TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(id);
-    if (trackedEvents.remove(payload)) {
-      metrics.incrementCompletedEvents();
-      long originalTime = startTrackingTimes.remove(id);
-      metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
-      onFinished(publisher, payload);
-    }
-  }
-
-  private synchronized void handleTimeout(EventPublisher publisher,
-      long identifier) {
-    metrics.incrementTimedOutEvents();
-    TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(identifier);
-    trackedEvents.remove(payload);
-    startTrackingTimes.remove(payload.getId());
-    onTimeout(publisher, payload);
-  }
-
-
-  /**
-   * Check if a specific payload is in-progress.
-   */
-  public synchronized boolean contains(TIMEOUT_PAYLOAD payload) {
-    return trackedEvents.contains(payload);
-  }
-
-  public synchronized boolean remove(TIMEOUT_PAYLOAD payload) {
-    try {
-      leaseManager.release(payload.getId());
-    } catch (LeaseNotFoundException e) {
-      LOG.warn("Completion event without active lease. Id={}",
-          payload.getId());
-    }
-    trackedEventsByID.remove(payload.getId());
-    return trackedEvents.remove(payload);
-
-  }
-
-  protected abstract void onTimeout(
-      EventPublisher publisher, TIMEOUT_PAYLOAD payload);
-
-  protected abstract void onFinished(
-      EventPublisher publisher, TIMEOUT_PAYLOAD payload);
-
-  public List<TIMEOUT_PAYLOAD> getTimeoutEvents(
-      Predicate<? super TIMEOUT_PAYLOAD> predicate) {
-    return trackedEventsByID.values().stream().filter(predicate)
-        .collect(Collectors.toList());
-  }
-
-  @VisibleForTesting
-  protected EventWatcherMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Returns a tracked event to which the specified id is
-   * mapped, or {@code null} if there is no mapping for the id.
-   */
-  public TIMEOUT_PAYLOAD getTrackedEventbyId(long id) {
-    return trackedEventsByID.get(id);
-  }
-
-  public Map<Long, TIMEOUT_PAYLOAD> getTrackedEventsByID() {
-    return trackedEventsByID;
-  }
-
-  public Set<TIMEOUT_PAYLOAD> getTrackedEvents() {
-    return trackedEvents;
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
deleted file mode 100644
index 1db81a9..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Metrics for any event watcher.
- */
-public class EventWatcherMetrics {
-
-  @Metric()
-  private MutableCounterLong trackedEvents;
-
-  @Metric()
-  private MutableCounterLong timedOutEvents;
-
-  @Metric()
-  private MutableCounterLong completedEvents;
-
-  @Metric()
-  private MutableRate completionTime;
-
-  public void incrementTrackedEvents() {
-    trackedEvents.incr();
-  }
-
-  public void incrementTimedOutEvents() {
-    timedOutEvents.incr();
-  }
-
-  public void incrementCompletedEvents() {
-    completedEvents.incr();
-  }
-
-  @VisibleForTesting
-  public void updateFinishingTime(long duration) {
-    completionTime.add(duration);
-  }
-
-  @VisibleForTesting
-  public MutableCounterLong getTrackedEvents() {
-    return trackedEvents;
-  }
-
-  @VisibleForTesting
-  public MutableCounterLong getTimedOutEvents() {
-    return timedOutEvents;
-  }
-
-  @VisibleForTesting
-  public MutableCounterLong getCompletedEvents() {
-    return completedEvents;
-  }
-
-  @VisibleForTesting
-  public MutableRate getCompletionTime() {
-    return completionTime;
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
deleted file mode 100644
index 3faa8e7..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Event with an additional unique identifier.
- *
- */
-public interface IdentifiableEventPayload {
-
-  long getId();
-
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
deleted file mode 100644
index 3253f2d..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * Simple EventExecutor to call all the event handler one-by-one.
- *
- * @param <T>
- */
-@Metrics(context = "EventQueue")
-public class SingleThreadExecutor<T> implements EventExecutor<T> {
-
-  public static final String THREAD_NAME_PREFIX = "EventQueue";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SingleThreadExecutor.class);
-
-  private final String name;
-
-  private final ThreadPoolExecutor executor;
-
-  @Metric
-  private MutableCounterLong queued;
-
-  @Metric
-  private MutableCounterLong done;
-
-  @Metric
-  private MutableCounterLong failed;
-
-  /**
-   * Create SingleThreadExecutor.
-   *
-   * @param name Unique name used in monitoring and metrics.
-   */
-  public SingleThreadExecutor(String name) {
-    this.name = name;
-    DefaultMetricsSystem.instance()
-        .register("EventQueue" + name, "Event Executor metrics ", this);
-
-    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
-    executor =
-        new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, workQueue,
-            runnable -> {
-              Thread thread = new Thread(runnable);
-              thread.setName(THREAD_NAME_PREFIX + "-" + name);
-              return thread;
-            });
-
-  }
-
-  @Override
-  public void onMessage(EventHandler<T> handler, T message, EventPublisher
-      publisher) {
-    queued.incr();
-    executor.execute(() -> {
-      try {
-        handler.onMessage(message, publisher);
-        done.incr();
-      } catch (Exception ex) {
-        LOG.error("Error on execution message {}", message, ex);
-        failed.incr();
-      }
-    });
-  }
-
-  @Override
-  public long failedEvents() {
-    return failed.value();
-  }
-
-  @Override
-  public long successfulEvents() {
-    return done.value();
-  }
-
-  @Override
-  public long queuedEvents() {
-    return queued.value();
-  }
-
-  @Override
-  public void close() {
-    executor.shutdown();
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
deleted file mode 100644
index 27bba3a..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Basic event implementation to implement custom events.
- *
- * @param <T>
- */
-public class TypedEvent<T> implements Event<T> {
-
-  private final Class<T> payloadType;
-
-  private final String name;
-
-  public TypedEvent(Class<T> payloadType, String name) {
-    this.payloadType = payloadType;
-    this.name = name;
-  }
-
-  public TypedEvent(Class<T> payloadType) {
-    this.payloadType = payloadType;
-    this.name = payloadType.getSimpleName();
-  }
-
-  @Override
-  public Class<T> getPayloadType() {
-    return payloadType;
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public String toString() {
-    return "TypedEvent{" +
-        "payloadType=" + payloadType.getSimpleName() +
-        ", name='" + name + '\'' +
-        '}';
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
deleted file mode 100644
index 89999ee..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.server.events;
-
-/**
- * Simple event queue implementation for hdds/ozone components.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java
deleted file mode 100644
index 35ad5e7..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.server;
-
-/**
- * Common server side utilities for all the hdds/ozone server components.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js
deleted file mode 100644
index 3b67167..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-  "use strict";
-
-  var data = {ozone: {enabled: false}};
-
-  dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn'));
-
-  function loadDatanodeInfo() {
-    $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) {
-      data.dn = workaround(resp.beans[0]);
-      data.dn.HostName = resp.beans[0]['DatanodeHostname'];
-      render();
-    }).fail(show_err_msg);
-  }
-
-  function loadOzoneScmInfo() {
-        $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.SCMServers = resp.beans[0].SCMServers;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-  }
-
-  function loadOzoneStorageInfo() {
-        $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.LocationReport = resp.beans[0].LocationReport;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-    }
-
-  function workaround(dn) {
-    function node_map_to_array(nodes) {
-      var res = [];
-      for (var n in nodes) {
-        var p = nodes[n];
-        p.name = n;
-        res.push(p);
-      }
-      return res;
-    }
-
-    dn.VolumeInfo = node_map_to_array(JSON.parse(dn.VolumeInfo));
-    dn.BPServiceActorInfo = JSON.parse(dn.BPServiceActorInfo);
-
-    return dn;
-  }
-
-  function render() {
-    var base = dust.makeBase({
-      'helper_relative_time' : function (chunk, ctx, bodies, params) {
-        var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write(moment().subtract(Number(value), 'seconds').fromNow(true));
-      }
-    });
-    dust.render('dn', base.push(data), function(err, out) {
-      $('#tab-overview').html(out);
-      $('#tab-overview').addClass('active');
-    });
-  }
-
-  function show_err_msg() {
-    $('#alert-panel-body').html("Failed to load datanode information");
-    $('#alert-panel').show();
-  }
-
-    loadDatanodeInfo();
-    loadOzoneScmInfo();
-    loadOzoneStorageInfo();
-
-})();
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
deleted file mode 100644
index c4bf158..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- AngularJS v1.6.4
- (c) 2010-2017 Google, Inc. http://angularjs.org
- License: MIT
-*/
-(function(x){'use strict';function L(a,b){b=b||Error;return function(){var d=arguments[0],c;c="["+(a?a+":":"")+d+"] http://errors.angularjs.org/1.6.4/"+(a?a+"/":"")+d;for(d=1;d<arguments.length;d++){c=c+(1==d?"?":"&")+"p"+(d-1)+"=";var e=encodeURIComponent,f;f=arguments[d];f="function"==typeof f?f.toString().replace(/ \{[\s\S]*$/,""):"undefined"==typeof f?"undefined":"string"!=typeof f?JSON.stringify(f):f;c+=e(f)}return new b(c)}}function me(a){if(C(a))u(a.objectMaxDepth)&&(Ic.objectMaxDepth=Sb(a.objectMaxDepth)?
-a.objectMaxDepth:NaN);else return Ic}function Sb(a){return ba(a)&&0<a}function qa(a){if(null==a||Wa(a))return!1;if(H(a)||F(a)||B&&a instanceof B)return!0;var b="length"in Object(a)&&a.length;return ba(b)&&(0<=b&&(b-1 in a||a instanceof Array)||"function"===typeof a.item)}function q(a,b,d){var c,e;if(a)if(D(a))for(c in a)"prototype"!==c&&"length"!==c&&"name"!==c&&a.hasOwnProperty(c)&&b.call(d,a[c],c,a);else if(H(a)||qa(a)){var f="object"!==typeof a;c=0;for(e=a.length;c<e;c++)(f||c in a)&&b.call(d,
-a[c],c,a)}else if(a.forEach&&a.forEach!==q)a.forEach(b,d,a);else if(Jc(a))for(c in a)b.call(d,a[c],c,a);else if("function"===typeof a.hasOwnProperty)for(c in a)a.hasOwnProperty(c)&&b.call(d,a[c],c,a);else for(c in a)ua.call(a,c)&&b.call(d,a[c],c,a);return a}function Kc(a,b,d){for(var c=Object.keys(a).sort(),e=0;e<c.length;e++)b.call(d,a[c[e]],c[e]);return c}function Lc(a){return function(b,d){a(d,b)}}function ne(){return++qb}function Tb(a,b,d){for(var c=a.$$hashKey,e=0,f=b.length;e<f;++e){var g=b[e];
-if(C(g)||D(g))for(var h=Object.keys(g),k=0,l=h.length;k<l;k++){var m=h[k],n=g[m];d&&C(n)?ga(n)?a[m]=new Date(n.valueOf()):Xa(n)?a[m]=new RegExp(n):n.nodeName?a[m]=n.cloneNode(!0):Ub(n)?a[m]=n.clone():(C(a[m])||(a[m]=H(n)?[]:{}),Tb(a[m],[n],!0)):a[m]=n}}c?a.$$hashKey=c:delete a.$$hashKey;return a}function S(a){return Tb(a,va.call(arguments,1),!1)}function oe(a){return Tb(a,va.call(arguments,1),!0)}function Z(a){return parseInt(a,10)}function Vb(a,b){return S(Object.create(a),b)}function z(){}function Ya(a){return a}
-function la(a){return function(){return a}}function Wb(a){return D(a.toString)&&a.toString!==ma}function w(a){return"undefined"===typeof a}function u(a){return"undefined"!==typeof a}function C(a){return null!==a&&"object"===typeof a}function Jc(a){return null!==a&&"object"===typeof a&&!Mc(a)}function F(a){return"string"===typeof a}function ba(a){return"number"===typeof a}function ga(a){return"[object Date]"===ma.call(a)}function D(a){return"function"===typeof a}function Xa(a){return"[object RegExp]"===
-ma.call(a)}function Wa(a){return a&&a.window===a}function Za(a){return a&&a.$evalAsync&&a.$watch}function Ha(a){return"boolean"===typeof a}function pe(a){return a&&ba(a.length)&&qe.test(ma.call(a))}function Ub(a){return!(!a||!(a.nodeName||a.prop&&a.attr&&a.find))}function re(a){var b={};a=a.split(",");var d;for(d=0;d<a.length;d++)b[a[d]]=!0;return b}function wa(a){return Q(a.nodeName||a[0]&&a[0].nodeName)}function $a(a,b){var d=a.indexOf(b);0<=d&&a.splice(d,1);return d}function ra(a,b,d){function c(a,
-b,c){c--;if(0>c)return"...";var d=b.$$hashKey,f;if(H(a)){f=0;for(var g=a.length;f<g;f++)b.push(e(a[f],c))}else if(Jc(a))for(f in a)b[f]=e(a[f],c);else if(a&&"function"===typeof a.hasOwnProperty)for(f in a)a.hasOwnProperty(f)&&(b[f]=e(a[f],c));else for(f in a)ua.call(a,f)&&(b[f]=e(a[f],c));d?b.$$hashKey=d:delete b.$$hashKey;return b}function e(a,b){if(!C(a))return a;var d=g.indexOf(a);if(-1!==d)return h[d];if(Wa(a)||Za(a))throw Fa("cpws");var d=!1,e=f(a);void 0===e&&(e=H(a)?[]:Object.create(Mc(a)),
-d=!0);g.push(a);h.push(e);return d?c(a,e,b):e}function f(a){switch(ma.call(a)){case "[object Int8Array]":case "[object Int16Array]":case "[object Int32Array]":case "[object Float32Array]":case "[object Float64Array]":case "[object Uint8Array]":case "[object Uint8ClampedArray]":case "[object Uint16Array]":case "[object Uint32Array]":return new a.constructor(e(a.buffer),a.byteOffset,a.length);case "[object ArrayBuffer]":if(!a.slice){var b=new ArrayBuffer(a.byteLength);(new Uint8Array(b)).set(new Uint8Array(a));
-return b}return a.slice(0);case "[object Boolean]":case "[object Number]":case "[object String]":case "[object Date]":return new a.constructor(a.valueOf());case "[object RegExp]":return b=new RegExp(a.source,a.toString().match(/[^/]*$/)[0]),b.lastIndex=a.lastIndex,b;case "[object Blob]":return new a.constructor([a],{type:a.type})}if(D(a.cloneNode))return a.cloneNode(!0)}var g=[],h=[];d=Sb(d)?d:NaN;if(b){if(pe(b)||"[object ArrayBuffer]"===ma.call(b))throw Fa("cpta");if(a===b)throw Fa("cpi");H(b)?b.length=
-0:q(b,function(a,c){"$$hashKey"!==c&&delete b[c]});g.push(a);h.push(b);return c(a,b,d)}return e(a,d)}function Xb(a,b){return a===b||a!==a&&b!==b}function sa(a,b){if(a===b)return!0;if(null===a||null===b)return!1;if(a!==a&&b!==b)return!0;var d=typeof a,c;if(d===typeof b&&"object"===d)if(H(a)){if(!H(b))return!1;if((d=a.length)===b.length){for(c=0;c<d;c++)if(!sa(a[c],b[c]))return!1;return!0}}else{if(ga(a))return ga(b)?Xb(a.getTime(),b.getTime()):!1;if(Xa(a))return Xa(b)?a.toString()===b.toString():!1;
-if(Za(a)||Za(b)||Wa(a)||Wa(b)||H(b)||ga(b)||Xa(b))return!1;d=V();for(c in a)if("$"!==c.charAt(0)&&!D(a[c])){if(!sa(a[c],b[c]))return!1;d[c]=!0}for(c in b)if(!(c in d)&&"$"!==c.charAt(0)&&u(b[c])&&!D(b[c]))return!1;return!0}return!1}function ab(a,b,d){return a.concat(va.call(b,d))}function bb(a,b){var d=2<arguments.length?va.call(arguments,2):[];return!D(b)||b instanceof RegExp?b:d.length?function(){return arguments.length?b.apply(a,ab(d,arguments,0)):b.apply(a,d)}:function(){return arguments.length?
-b.apply(a,arguments):b.call(a)}}function Nc(a,b){var d=b;"string"===typeof a&&"$"===a.charAt(0)&&"$"===a.charAt(1)?d=void 0:Wa(b)?d="$WINDOW":b&&x.document===b?d="$DOCUMENT":Za(b)&&(d="$SCOPE");return d}function cb(a,b){if(!w(a))return ba(b)||(b=b?2:null),JSON.stringify(a,Nc,b)}function Oc(a){return F(a)?JSON.parse(a):a}function Pc(a,b){a=a.replace(se,"");var d=Date.parse("Jan 01, 1970 00:00:00 "+a)/6E4;return da(d)?b:d}function Yb(a,b,d){d=d?-1:1;var c=a.getTimezoneOffset();b=Pc(b,c);d*=b-c;a=new Date(a.getTime());
-a.setMinutes(a.getMinutes()+d);return a}function xa(a){a=B(a).clone();try{a.empty()}catch(b){}var d=B("<div>").append(a).html();try{return a[0].nodeType===Ia?Q(d):d.match(/^(<[^>]+>)/)[1].replace(/^<([\w-]+)/,function(a,b){return"<"+Q(b)})}catch(c){return Q(d)}}function Qc(a){try{return decodeURIComponent(a)}catch(b){}}function Rc(a){var b={};q((a||"").split("&"),function(a){var c,e,f;a&&(e=a=a.replace(/\+/g,"%20"),c=a.indexOf("="),-1!==c&&(e=a.substring(0,c),f=a.substring(c+1)),e=Qc(e),u(e)&&(f=
-u(f)?Qc(f):!0,ua.call(b,e)?H(b[e])?b[e].push(f):b[e]=[b[e],f]:b[e]=f))});return b}function Zb(a){var b=[];q(a,function(a,c){H(a)?q(a,function(a){b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))}):b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))});return b.length?b.join("&"):""}function db(a){return $(a,!0).replace(/%26/gi,"&").replace(/%3D/gi,"=").replace(/%2B/gi,"+")}function $(a,b){return encodeURIComponent(a).replace(/%40/gi,"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%3B/gi,";").replace(/%20/g,
-b?"%20":"+")}function te(a,b){var d,c,e=Ja.length;for(c=0;c<e;++c)if(d=Ja[c]+b,F(d=a.getAttribute(d)))return d;return null}function ue(a,b){var d,c,e={};q(Ja,function(b){b+="app";!d&&a.hasAttribute&&a.hasAttribute(b)&&(d=a,c=a.getAttribute(b))});q(Ja,function(b){b+="app";var e;!d&&(e=a.querySelector("["+b.replace(":","\\:")+"]"))&&(d=e,c=e.getAttribute(b))});d&&(ve?(e.strictDi=null!==te(d,"strict-di"),b(d,c?[c]:[],e)):x.console.error("Angular: disabling automatic bootstrap. <script> protocol indicates an extension, document.location.href does not match."))}
-function Sc(a,b,d){C(d)||(d={});d=S({strictDi:!1},d);var c=function(){a=B(a);if(a.injector()){var c=a[0]===x.document?"document":xa(a);throw Fa("btstrpd",c.replace(/</,"&lt;").replace(/>/,"&gt;"));}b=b||[];b.unshift(["$provide",function(b){b.value("$rootElement",a)}]);d.debugInfoEnabled&&b.push(["$compileProvider",function(a){a.debugInfoEnabled(!0)}]);b.unshift("ng");c=eb(b,d.strictDi);c.invoke(["$rootScope","$rootElement","$compile","$injector",function(a,b,c,d){a.$apply(function(){b.data("$injector",
-d);c(b)(a)})}]);return c},e=/^NG_ENABLE_DEBUG_INFO!/,f=/^NG_DEFER_BOOTSTRAP!/;x&&e.test(x.name)&&(d.debugInfoEnabled=!0,x.name=x.name.replace(e,""));if(x&&!f.test(x.name))return c();x.name=x.name.replace(f,"");ea.resumeBootstrap=function(a){q(a,function(a){b.push(a)});return c()};D(ea.resumeDeferredBootstrap)&&ea.resumeDeferredBootstrap()}function we(){x.name="NG_ENABLE_DEBUG_INFO!"+x.name;x.location.reload()}function xe(a){a=ea.element(a).injector();if(!a)throw Fa("test");return a.get("$$testability")}
-function Tc(a,b){b=b||"_";return a.replace(ye,function(a,c){return(c?b:"")+a.toLowerCase()})}function ze(){var a;if(!Uc){var b=rb();(na=w(b)?x.jQuery:b?x[b]:void 0)&&na.fn.on?(B=na,S(na.fn,{scope:Na.scope,isolateScope:Na.isolateScope,controller:Na.controller,injector:Na.injector,inheritedData:Na.inheritedData}),a=na.cleanData,na.cleanData=function(b){for(var c,e=0,f;null!=(f=b[e]);e++)(c=na._data(f,"events"))&&c.$destroy&&na(f).triggerHandler("$destroy");a(b)}):B=W;ea.element=B;Uc=!0}}function fb(a,
-b,d){if(!a)throw Fa("areq",b||"?",d||"required");return a}function sb(a,b,d){d&&H(a)&&(a=a[a.length-1]);fb(D(a),b,"not a function, got "+(a&&"object"===typeof a?a.constructor.name||"Object":typeof a));return a}function Ka(a,b){if("hasOwnProperty"===a)throw Fa("badname",b);}function Vc(a,b,d){if(!b)return a;b=b.split(".");for(var c,e=a,f=b.length,g=0;g<f;g++)c=b[g],a&&(a=(e=a)[c]);return!d&&D(a)?bb(e,a):a}function tb(a){for(var b=a[0],d=a[a.length-1],c,e=1;b!==d&&(b=b.nextSibling);e++)if(c||a[e]!==
-b)c||(c=B(va.call(a,0,e))),c.push(b);return c||a}function V(){return Object.create(null)}function $b(a){if(null==a)return"";switch(typeof a){case "string":break;case "number":a=""+a;break;default:a=!Wb(a)||H(a)||ga(a)?cb(a):a.toString()}return a}function Ae(a){function b(a,b,c){return a[b]||(a[b]=c())}var d=L("$injector"),c=L("ng");a=b(a,"angular",Object);a.$$minErr=a.$$minErr||L;return b(a,"module",function(){var a={};return function(f,g,h){var k={};if("hasOwnProperty"===f)throw c("badname","module");
-g&&a.hasOwnProperty(f)&&(a[f]=null);return b(a,f,function(){function a(b,c,d,f){f||(f=e);return function(){f[d||"push"]([b,c,arguments]);return v}}function b(a,c,d){d||(d=e);return function(b,e){e&&D(e)&&(e.$$moduleName=f);d.push([a,c,arguments]);return v}}if(!g)throw d("nomod",f);var e=[],p=[],r=[],J=a("$injector","invoke","push",p),v={_invokeQueue:e,_configBlocks:p,_runBlocks:r,info:function(a){if(u(a)){if(!C(a))throw c("aobj","value");k=a;return this}return k},requires:g,name:f,provider:b("$provide",
-"provider"),factory:b("$provide","factory"),service:b("$provide","service"),value:a("$provide","value"),constant:a("$provide","constant","unshift"),decorator:b("$provide","decorator",p),animation:b("$animateProvider","register"),filter:b("$filterProvider","register"),controller:b("$controllerProvider","register"),directive:b("$compileProvider","directive"),component:b("$compileProvider","component"),config:J,run:function(a){r.push(a);return this}};h&&J(h);return v})}})}function pa(a,b){if(H(a)){b=
-b||[];for(var d=0,c=a.length;d<c;d++)b[d]=a[d]}else if(C(a))for(d in b=b||{},a)if("$"!==d.charAt(0)||"$"!==d.charAt(1))b[d]=a[d];return b||a}function Be(a,b){var d=[];Sb(b)&&(a=ra(a,null,b));return JSON.stringify(a,function(a,b){b=Nc(a,b);if(C(b)){if(0<=d.indexOf(b))return"...";d.push(b)}return b})}function Ce(a){S(a,{errorHandlingConfig:me,bootstrap:Sc,copy:ra,extend:S,merge:oe,equals:sa,element:B,forEach:q,injector:eb,noop:z,bind:bb,toJson:cb,fromJson:Oc,identity:Ya,isUndefined:w,isDefined:u,isString:F,
-isFunction:D,isObject:C,isNumber:ba,isElement:Ub,isArray:H,version:De,isDate:ga,lowercase:Q,uppercase:ub,callbacks:{$$counter:0},getTestability:xe,reloadWithDebugInfo:we,$$minErr:L,$$csp:Ga,$$encodeUriSegment:db,$$encodeUriQuery:$,$$stringify:$b});ac=Ae(x);ac("ng",["ngLocale"],["$provide",function(a){a.provider({$$sanitizeUri:Ee});a.provider("$compile",Wc).directive({a:Fe,input:Xc,textarea:Xc,form:Ge,script:He,select:Ie,option:Je,ngBind:Ke,ngBindHtml:Le,ngBindTemplate:Me,ngClass:Ne,ngClassEven:Oe,
-ngClassOdd:Pe,ngCloak:Qe,ngController:Re,ngForm:Se,ngHide:Te,ngIf:Ue,ngInclude:Ve,ngInit:We,ngNonBindable:Xe,ngPluralize:Ye,ngRepeat:Ze,ngShow:$e,ngStyle:af,ngSwitch:bf,ngSwitchWhen:cf,ngSwitchDefault:df,ngOptions:ef,ngTransclude:ff,ngModel:gf,ngList:hf,ngChange:jf,pattern:Yc,ngPattern:Yc,required:Zc,ngRequired:Zc,minlength:$c,ngMinlength:$c,maxlength:ad,ngMaxlength:ad,ngValue:kf,ngModelOptions:lf}).directive({ngInclude:mf}).directive(vb).directive(bd);a.provider({$anchorScroll:nf,$animate:of,$animateCss:pf,
-$$animateJs:qf,$$animateQueue:rf,$$AnimateRunner:sf,$$animateAsyncRun:tf,$browser:uf,$cacheFactory:vf,$controller:wf,$document:xf,$$isDocumentHidden:yf,$exceptionHandler:zf,$filter:cd,$$forceReflow:Af,$interpolate:Bf,$interval:Cf,$http:Df,$httpParamSerializer:Ef,$httpParamSerializerJQLike:Ff,$httpBackend:Gf,$xhrFactory:Hf,$jsonpCallbacks:If,$location:Jf,$log:Kf,$parse:Lf,$rootScope:Mf,$q:Nf,$$q:Of,$sce:Pf,$sceDelegate:Qf,$sniffer:Rf,$templateCache:Sf,$templateRequest:Tf,$$testability:Uf,$timeout:Vf,
-$window:Wf,$$rAF:Xf,$$jqLite:Yf,$$Map:Zf,$$cookieReader:$f})}]).info({angularVersion:"1.6.4"})}function gb(a,b){return b.toUpperCase()}function wb(a){return a.replace(ag,gb)}function bc(a){a=a.nodeType;return 1===a||!a||9===a}function dd(a,b){var d,c,e=b.createDocumentFragment(),f=[];if(cc.test(a)){d=e.appendChild(b.createElement("div"));c=(bg.exec(a)||["",""])[1].toLowerCase();c=ha[c]||ha._default;d.innerHTML=c[1]+a.replace(cg,"<$1></$2>")+c[2];for(c=c[0];c--;)d=d.lastChild;f=ab(f,d.childNodes);
-d=e.firstChild;d.textContent=""}else f.push(b.createTextNode(a));e.textContent="";e.innerHTML="";q(f,function(a){e.appendChild(a)});return e}function W(a){if(a instanceof W)return a;var b;F(a)&&(a=T(a),b=!0);if(!(this instanceof W)){if(b&&"<"!==a.charAt(0))throw dc("nosel");return new W(a)}if(b){b=x.document;var d;a=(d=dg.exec(a))?[b.createElement(d[1])]:(d=dd(a,b))?d.childNodes:[];ec(this,a)}else D(a)?ed(a):ec(this,a)}function fc(a){return a.cloneNode(!0)}function xb(a,b){!b&&bc(a)&&B.cleanData([a]);
-a.querySelectorAll&&B.cleanData(a.querySelectorAll("*"))}function fd(a,b,d,c){if(u(c))throw dc("offargs");var e=(c=yb(a))&&c.events,f=c&&c.handle;if(f)if(b){var g=function(b){var c=e[b];u(d)&&$a(c||[],d);u(d)&&c&&0<c.length||(a.removeEventListener(b,f),delete e[b])};q(b.split(" "),function(a){g(a);zb[a]&&g(zb[a])})}else for(b in e)"$destroy"!==b&&a.removeEventListener(b,f),delete e[b]}function gc(a,b){var d=a.ng339,c=d&&hb[d];c&&(b?delete c.data[b]:(c.handle&&(c.events.$destroy&&c.handle({},"$destroy"),
-fd(a)),delete hb[d],a.ng339=void 0))}function yb(a,b){var d=a.ng339,d=d&&hb[d];b&&!d&&(a.ng339=d=++eg,d=hb[d]={events:{},data:{},handle:void 0});return d}function hc(a,b,d){if(bc(a)){var c,e=u(d),f=!e&&b&&!C(b),g=!b;a=(a=yb(a,!f))&&a.data;if(e)a[wb(b)]=d;else{if(g)return a;if(f)return a&&a[wb(b)];for(c in b)a[wb(c)]=b[c]}}}function Ab(a,b){return a.getAttribute?-1<(" "+(a.getAttribute("class")||"")+" ").replace(/[\n\t]/g," ").indexOf(" "+b+" "):!1}function Bb(a,b){b&&a.setAttribute&&q(b.split(" "),
-function(b){a.setAttribute("class",T((" "+(a.getAttribute("class")||"")+" ").replace(/[\n\t]/g," ").replace(" "+T(b)+" "," ")))})}function Cb(a,b){if(b&&a.setAttribute){var d=(" "+(a.getAttribute("class")||"")+" ").replace(/[\n\t]/g," ");q(b.split(" "),function(a){a=T(a);-1===d.indexOf(" "+a+" ")&&(d+=a+" ")});a.setAttribute("class",T(d))}}function ec(a,b){if(b)if(b.nodeType)a[a.length++]=b;else{var d=b.length;if("number"===typeof d&&b.window!==b){if(d)for(var c=0;c<d;c++)a[a.length++]=b[c]}else a[a.length++]=
-b}}function gd(a,b){return Db(a,"$"+(b||"ngController")+"Controller")}function Db(a,b,d){9===a.nodeType&&(a=a.documentElement);for(b=H(b)?b:[b];a;){for(var c=0,e=b.length;c<e;c++)if(u(d=B.data(a,b[c])))return d;a=a.parentNode||11===a.nodeType&&a.host}}function hd(a){for(xb(a,!0);a.firstChild;)a.removeChild(a.firstChild)}function Eb(a,b){b||xb(a);var d=a.parentNode;d&&d.removeChild(a)}function fg(a,b){b=b||x;if("complete"===b.document.readyState)b.setTimeout(a);else B(b).on("load",a)}function ed(a){function b(){x.document.removeEventListener("DOMContentLoaded",
-b);x.removeEventListener("load",b);a()}"complete"===x.document.readyState?x.setTimeout(a):(x.document.addEventListener("DOMContentLoaded",b),x.addEventListener("load",b))}function id(a,b){var d=Fb[b.toLowerCase()];return d&&jd[wa(a)]&&d}function gg(a,b){var d=function(c,d){c.isDefaultPrevented=function(){return c.defaultPrevented};var f=b[d||c.type],g=f?f.length:0;if(g){if(w(c.immediatePropagationStopped)){var h=c.stopImmediatePropagation;c.stopImmediatePropagation=function(){c.immediatePropagationStopped=
-!0;c.stopPropagation&&c.stopPropagation();h&&h.call(c)}}c.isImmediatePropagationStopped=function(){return!0===c.immediatePropagationStopped};var k=f.specialHandlerWrapper||hg;1<g&&(f=pa(f));for(var l=0;l<g;l++)c.isImmediatePropagationStopped()||k(a,c,f[l])}};d.elem=a;return d}function hg(a,b,d){d.call(a,b)}function ig(a,b,d){var c=b.relatedTarget;c&&(c===a||jg.call(a,c))||d.call(a,b)}function Yf(){this.$get=function(){return S(W,{hasClass:function(a,b){a.attr&&(a=a[0]);return Ab(a,b)},addClass:function(a,
-b){a.attr&&(a=a[0]);return Cb(a,b)},removeClass:function(a,b){a.attr&&(a=a[0]);return Bb(a,b)}})}}function Pa(a,b){var d=a&&a.$$hashKey;if(d)return"function"===typeof d&&(d=a.$$hashKey()),d;d=typeof a;return d="function"===d||"object"===d&&null!==a?a.$$hashKey=d+":"+(b||ne)():d+":"+a}function kd(){this._keys=[];this._values=[];this._lastKey=NaN;this._lastIndex=-1}function ld(a){a=Function.prototype.toString.call(a).replace(kg,"");return a.match(lg)||a.match(mg)}function ng(a){return(a=ld(a))?"function("+
-(a[1]||"").replace(/[\s\r\n]+/," ")+")":"fn"}function eb(a,b){function d(a){return function(b,c){if(C(b))q(b,Lc(a));else return a(b,c)}}function c(a,b){Ka(a,"service");if(D(b)||H(b))b=p.instantiate(b);if(!b.$get)throw ya("pget",a);return n[a+"Provider"]=b}function e(a,b){return function(){var c=v.invoke(b,this);if(w(c))throw ya("undef",a);return c}}function f(a,b,d){return c(a,{$get:!1!==d?e(a,b):b})}function g(a){fb(w(a)||H(a),"modulesToLoad","not an array");var b=[],c;q(a,function(a){function d(a){var b,
-c;b=0;for(c=a.length;b<c;b++){var e=a[b],f=p.get(e[0]);f[e[1]].apply(f,e[2])}}if(!m.get(a)){m.set(a,!0);try{F(a)?(c=ac(a),v.modules[a]=c,b=b.concat(g(c.requires)).concat(c._runBlocks),d(c._invokeQueue),d(c._configBlocks)):D(a)?b.push(p.invoke(a)):H(a)?b.push(p.invoke(a)):sb(a,"module")}catch(e){throw H(a)&&(a=a[a.length-1]),e.message&&e.stack&&-1===e.stack.indexOf(e.message)&&(e=e.message+"\n"+e.stack),ya("modulerr",a,e.stack||e.message||e);}}});return b}function h(a,c){function d(b,e){if(a.hasOwnProperty(b)){if(a[b]===
-k)throw ya("cdep",b+" <- "+l.join(" <- "));return a[b]}try{return l.unshift(b),a[b]=k,a[b]=c(b,e),a[b]}catch(f){throw a[b]===k&&delete a[b],f;}finally{l.shift()}}function e(a,c,f){var g=[];a=eb.$$annotate(a,b,f);for(var k=0,h=a.length;k<h;k++){var l=a[k];if("string"!==typeof l)throw ya("itkn",l);g.push(c&&c.hasOwnProperty(l)?c[l]:d(l,f))}return g}return{invoke:function(a,b,c,d){"string"===typeof c&&(d=c,c=null);c=e(a,c,d);H(a)&&(a=a[a.length-1]);d=a;if(za||"function"!==typeof d)d=!1;else{var f=d.$$ngIsClass;
-Ha(f)||(f=d.$$ngIsClass=/^(?:class\b|constructor\()/.test(Function.prototype.toString.call(d)));d=f}return d?(c.unshift(null),new (Function.prototype.bind.apply(a,c))):a.apply(b,c)},instantiate:function(a,b,c){var d=H(a)?a[a.length-1]:a;a=e(a,b,c);a.unshift(null);return new (Function.prototype.bind.apply(d,a))},get:d,annotate:eb.$$annotate,has:function(b){return n.hasOwnProperty(b+"Provider")||a.hasOwnProperty(b)}}}b=!0===b;var k={},l=[],m=new Gb,n={$provide:{provider:d(c),factory:d(f),service:d(function(a,
-b){return f(a,["$injector",function(a){return a.instantiate(b)}])}),value:d(function(a,b){return f(a,la(b),!1)}),constant:d(function(a,b){Ka(a,"constant");n[a]=b;r[a]=b}),decorator:function(a,b){var c=p.get(a+"Provider"),d=c.$get;c.$get=function(){var a=v.invoke(d,c);return v.invoke(b,null,{$delegate:a})}}}},p=n.$injector=h(n,function(a,b){ea.isString(b)&&l.push(b);throw ya("unpr",l.join(" <- "));}),r={},J=h(r,function(a,b){var c=p.get(a+"Provider",b);return v.invoke(c.$get,c,void 0,a)}),v=J;n.$injectorProvider=
-{$get:la(J)};v.modules=p.modules=V();var t=g(a),v=J.get("$injector");v.strictDi=b;q(t,function(a){a&&v.invoke(a)});return v}function nf(){var a=!0;this.disableAutoScrolling=function(){a=!1};this.$get=["$window","$location","$rootScope",function(b,d,c){function e(a){var b=null;Array.prototype.some.call(a,function(a){if("a"===wa(a))return b=a,!0});return b}function f(a){if(a){a.scrollIntoView();var c;c=g.yOffset;D(c)?c=c():Ub(c)?(c=c[0],c="fixed"!==b.getComputedStyle(c).position?0:c.getBoundingClientRect().bottom):
-ba(c)||(c=0);c&&(a=a.getBoundingClientRect().top,b.scrollBy(0,a-c))}else b.scrollTo(0,0)}function g(a){a=F(a)?a:ba(a)?a.toString():d.hash();var b;a?(b=h.getElementById(a))?f(b):(b=e(h.getElementsByName(a)))?f(b):"top"===a&&f(null):f(null)}var h=b.document;a&&c.$watch(function(){return d.hash()},function(a,b){a===b&&""===a||fg(function(){c.$evalAsync(g)})});return g}]}function ib(a,b){if(!a&&!b)return"";if(!a)return b;if(!b)return a;H(a)&&(a=a.join(" "));H(b)&&(b=b.join(" "));return a+" "+b}function og(a){F(a)&&
-(a=a.split(" "));var b=V();q(a,function(a){a.length&&(b[a]=!0)});return b}function ia(a){return C(a)?a:{}}function pg(a,b,d,c){function e(a){try{a.apply(null,va.call(arguments,1))}finally{if(J--,0===J)for(;v.length;)try{v.pop()()}catch(b){d.error(b)}}}function f(){Oa=null;h()}function g(){t=I();t=w(t)?null:t;sa(t,G)&&(t=G);M=G=t}function h(){var a=M;g();if(N!==k.url()||a!==t)N=k.url(),M=t,q(K,function(a){a(k.url(),t)})}var k=this,l=a.location,m=a.history,n=a.setTimeout,p=a.clearTimeout,r={};k.isMock=
-!1;var J=0,v=[];k.$$completeOutstandingRequest=e;k.$$incOutstandingRequestCount=function(){J++};k.notifyWhenNoOutstandingRequests=function(a){0===J?a():v.push(a)};var t,M,N=l.href,A=b.find("base"),Oa=null,I=c.history?function(){try{return m.state}catch(a){}}:z;g();k.url=function(b,d,e){w(e)&&(e=null);l!==a.location&&(l=a.location);m!==a.history&&(m=a.history);if(b){var f=M===e;if(N===b&&(!c.history||f))return k;var h=N&&Aa(N)===Aa(b);N=b;M=e;!c.history||h&&f?(h||(Oa=b),d?l.replace(b):h?(d=l,e=b.indexOf("#"),
-e=-1===e?"":b.substr(e),d.hash=e):l.href=b,l.href!==b&&(Oa=b)):(m[d?"replaceState":"pushState"](e,"",b),g());Oa&&(Oa=b);return k}return Oa||l.href.replace(/%27/g,"'")};k.state=function(){return t};var K=[],E=!1,G=null;k.onUrlChange=function(b){if(!E){if(c.history)B(a).on("popstate",f);B(a).on("hashchange",f);E=!0}K.push(b);return b};k.$$applicationDestroyed=function(){B(a).off("hashchange popstate",f)};k.$$checkUrlChange=h;k.baseHref=function(){var a=A.attr("href");return a?a.replace(/^(https?:)?\/\/[^/]*/,
-""):""};k.defer=function(a,b){var c;J++;c=n(function(){delete r[c];e(a)},b||0);r[c]=!0;return c};k.defer.cancel=function(a){return r[a]?(delete r[a],p(a),e(z),!0):!1}}function uf(){this.$get=["$window","$log","$sniffer","$document",function(a,b,d,c){return new pg(a,c,b,d)}]}function vf(){this.$get=function(){function a(a,c){function e(a){a!==n&&(p?p===a&&(p=a.n):p=a,f(a.n,a.p),f(a,n),n=a,n.n=null)}function f(a,b){a!==b&&(a&&(a.p=b),b&&(b.n=a))}if(a in b)throw L("$cacheFactory")("iid",a);var g=0,h=
-S({},c,{id:a}),k=V(),l=c&&c.capacity||Number.MAX_VALUE,m=V(),n=null,p=null;return b[a]={put:function(a,b){if(!w(b)){if(l<Number.MAX_VALUE){var c=m[a]||(m[a]={key:a});e(c)}a in k||g++;k[a]=b;g>l&&this.remove(p.key);return b}},get:function(a){if(l<Number.MAX_VALUE){var b=m[a];if(!b)return;e(b)}return k[a]},remove:function(a){if(l<Number.MAX_VALUE){var b=m[a];if(!b)return;b===n&&(n=b.p);b===p&&(p=b.n);f(b.n,b.p);delete m[a]}a in k&&(delete k[a],g--)},removeAll:function(){k=V();g=0;m=V();n=p=null},destroy:function(){m=
-h=k=null;delete b[a]},info:function(){return S({},h,{size:g})}}}var b={};a.info=function(){var a={};q(b,function(b,e){a[e]=b.info()});return a};a.get=function(a){return b[a]};return a}}function Sf(){this.$get=["$cacheFactory",function(a){return a("templates")}]}function Wc(a,b){function d(a,b,c){var d=/^\s*([@&<]|=(\*?))(\??)\s*([\w$]*)\s*$/,e=V();q(a,function(a,f){if(a in n)e[f]=n[a];else{var g=a.match(d);if(!g)throw fa("iscp",b,f,a,c?"controller bindings definition":"isolate scope definition");
-e[f]={mode:g[1][0],collection:"*"===g[2],optional:"?"===g[3],attrName:g[4]||f};g[4]&&(n[a]=e[f])}});return e}function c(a){var b=a.charAt(0);if(!b||b!==Q(b))throw fa("baddir",a);if(a!==a.trim())throw fa("baddir",a);}function e(a){var b=a.require||a.controller&&a.name;!H(b)&&C(b)&&q(b,function(a,c){var d=a.match(l);a.substring(d[0].length)||(b[c]=d[0]+c)});return b}var f={},g=/^\s*directive:\s*([\w-]+)\s+(.*)$/,h=/(([\w-]+)(?::([^;]+))?;?)/,k=re("ngSrc,ngSrcset,src,srcset"),l=/^(?:(\^\^?)?(\?)?(\^\^?)?)?/,
-m=/^(on[a-z]+|formaction)$/,n=V();this.directive=function N(b,d){fb(b,"name");Ka(b,"directive");F(b)?(c(b),fb(d,"directiveFactory"),f.hasOwnProperty(b)||(f[b]=[],a.factory(b+"Directive",["$injector","$exceptionHandler",function(a,c){var d=[];q(f[b],function(f,g){try{var h=a.invoke(f);D(h)?h={compile:la(h)}:!h.compile&&h.link&&(h.compile=la(h.link));h.priority=h.priority||0;h.index=g;h.name=h.name||b;h.require=e(h);var k=h,l=h.restrict;if(l&&(!F(l)||!/[EACM]/.test(l)))throw fa("badrestrict",l,b);k.restrict=
-l||"EA";h.$$moduleName=f.$$moduleName;d.push(h)}catch(m){c(m)}});return d}])),f[b].push(d)):q(b,Lc(N));return this};this.component=function(a,b){function c(a){function e(b){return D(b)||H(b)?function(c,d){return a.invoke(b,this,{$element:c,$attrs:d})}:b}var f=b.template||b.templateUrl?b.template:"",g={controller:d,controllerAs:qg(b.controller)||b.controllerAs||"$ctrl",template:e(f),templateUrl:e(b.templateUrl),transclude:b.transclude,scope:{},bindToController:b.bindings||{},restrict:"E",require:b.require};
-q(b,function(a,b){"$"===b.charAt(0)&&(g[b]=a)});return g}var d=b.controller||function(){};q(b,function(a,b){"$"===b.charAt(0)&&(c[b]=a,D(d)&&(d[b]=a))});c.$inject=["$injector"];return this.directive(a,c)};this.aHrefSanitizationWhitelist=function(a){return u(a)?(b.aHrefSanitizationWhitelist(a),this):b.aHrefSanitizationWhitelist()};this.imgSrcSanitizationWhitelist=function(a){return u(a)?(b.imgSrcSanitizationWhitelist(a),this):b.imgSrcSanitizationWhitelist()};var p=!0;this.debugInfoEnabled=function(a){return u(a)?
-(p=a,this):p};var r=!1;this.preAssignBindingsEnabled=function(a){return u(a)?(r=a,this):r};var J=10;this.onChangesTtl=function(a){return arguments.length?(J=a,this):J};var v=!0;this.commentDirectivesEnabled=function(a){return arguments.length?(v=a,this):v};var t=!0;this.cssClassDirectivesEnabled=function(a){return arguments.length?(t=a,this):t};this.$get=["$injector","$interpolate","$exceptionHandler","$templateRequest","$parse","$controller","$rootScope","$sce","$animate","$$sanitizeUri",function(a,
-b,c,e,n,E,G,y,O,X){function P(){try{if(!--ya)throw ia=void 0,fa("infchng",J);G.$apply(function(){for(var a=[],b=0,c=ia.length;b<c;++b)try{ia[b]()}catch(d){a.push(d)}ia=void 0;if(a.length)throw a;})}finally{ya++}}function s(a,b){if(b){var c=Object.keys(b),d,e,f;d=0;for(e=c.length;d<e;d++)f=c[d],this[f]=b[f]}else this.$attr={};this.$$element=a}function R(a,b,c){ta.innerHTML="<span "+b+">";b=ta.firstChild.attributes;var d=b[0];b.removeNamedItem(d.name);d.value=c;a.attributes.setNamedItem(d)}function La(a,
-b){try{a.addClass(b)}catch(c){}}function ca(a,b,c,d,e){a instanceof B||(a=B(a));var f=Ma(a,b,a,c,d,e);ca.$$addScopeClass(a);var g=null;return function(b,c,d){if(!a)throw fa("multilink");fb(b,"scope");e&&e.needsNewScope&&(b=b.$parent.$new());d=d||{};var h=d.parentBoundTranscludeFn,k=d.transcludeControllers;d=d.futureParentElement;h&&h.$$boundTransclude&&(h=h.$$boundTransclude);g||(g=(d=d&&d[0])?"foreignobject"!==wa(d)&&ma.call(d).match(/SVG/)?"svg":"html":"html");d="html"!==g?B(ha(g,B("<div>").append(a).html())):
-c?Na.clone.call(a):a;if(k)for(var l in k)d.data("$"+l+"Controller",k[l].instance);ca.$$addScopeInfo(d,b);c&&c(d,b);f&&f(b,d,d,h);c||(a=f=null);return d}}function Ma(a,b,c,d,e,f){function g(a,c,d,e){var f,k,l,m,n,p,r;if(K)for(r=Array(c.length),m=0;m<h.length;m+=3)f=h[m],r[f]=c[f];else r=c;m=0;for(n=h.length;m<n;)k=r[h[m++]],c=h[m++],f=h[m++],c?(c.scope?(l=a.$new(),ca.$$addScopeInfo(B(k),l)):l=a,p=c.transcludeOnThisElement?ja(a,c.transclude,e):!c.templateOnThisElement&&e?e:!e&&b?ja(a,b):null,c(f,l,
-k,d,p)):f&&f(a,k.childNodes,void 0,e)}for(var h=[],k=H(a)||a instanceof B,l,m,n,p,K,r=0;r<a.length;r++){l=new s;11===za&&L(a,r,k);m=jc(a[r],[],l,0===r?d:void 0,e);(f=m.length?W(m,a[r],l,b,c,null,[],[],f):null)&&f.scope&&ca.$$addScopeClass(l.$$element);l=f&&f.terminal||!(n=a[r].childNodes)||!n.length?null:Ma(n,f?(f.transcludeOnThisElement||!f.templateOnThisElement)&&f.transclude:b);if(f||l)h.push(r,f,l),p=!0,K=K||f;f=null}return p?g:null}function L(a,b,c){var d=a[b],e=d.parentNode,f;if(d.nodeType===
-Ia)for(;;){f=e?d.nextSibling:a[b+1];if(!f||f.nodeType!==Ia)break;d.nodeValue+=f.nodeValue;f.parentNode&&f.parentNode.removeChild(f);c&&f===a[b+1]&&a.splice(b+1,1)}}function ja(a,b,c){function d(e,f,g,h,k){e||(e=a.$new(!1,k),e.$$transcluded=!0);return b(e,f,{parentBoundTranscludeFn:c,transcludeControllers:g,futureParentElement:h})}var e=d.$$slots=V(),f;for(f in b.$$slots)e[f]=b.$$slots[f]?ja(a,b.$$slots[f],c):null;return d}function jc(a,b,c,d,e){var f=c.$attr,g;switch(a.nodeType){case 1:g=wa(a);Y(b,
-Ba(g),"E",d,e);for(var k,l,m,n,p=a.attributes,K=0,r=p&&p.length;K<r;K++){var G=!1,E=!1;k=p[K];l=k.name;m=k.value;k=Ba(l);(n=Ja.test(k))&&(l=l.replace(md,"").substr(8).replace(/_(.)/g,function(a,b){return b.toUpperCase()}));(k=k.match(Ka))&&Z(k[1])&&(G=l,E=l.substr(0,l.length-5)+"end",l=l.substr(0,l.length-6));k=Ba(l.toLowerCase());f[k]=l;if(n||!c.hasOwnProperty(k))c[k]=m,id(a,k)&&(c[k]=!0);pa(a,b,m,k,n);Y(b,k,"A",d,e,G,E)}"input"===g&&"hidden"===a.getAttribute("type")&&a.setAttribute("autocomplete",
-"off");if(!Ga)break;f=a.className;C(f)&&(f=f.animVal);if(F(f)&&""!==f)for(;a=h.exec(f);)k=Ba(a[2]),Y(b,k,"C",d,e)&&(c[k]=T(a[3])),f=f.substr(a.index+a[0].length);break;case Ia:la(b,a.nodeValue);break;case 8:if(!Fa)break;jb(a,b,c,d,e)}b.sort(ea);return b}function jb(a,b,c,d,e){try{var f=g.exec(a.nodeValue);if(f){var h=Ba(f[1]);Y(b,h,"M",d,e)&&(c[h]=T(f[2]))}}catch(k){}}function nd(a,b,c){var d=[],e=0;if(b&&a.hasAttribute&&a.hasAttribute(b)){do{if(!a)throw fa("uterdir",b,c);1===a.nodeType&&(a.hasAttribute(b)&&
-e++,a.hasAttribute(c)&&e--);d.push(a);a=a.nextSibling}while(0<e)}else d.push(a);return B(d)}function od(a,b,c){return function(d,e,f,g,h){e=nd(e[0],b,c);return a(d,e,f,g,h)}}function kc(a,b,c,d,e,f){var g;return a?ca(b,c,d,e,f):function(){g||(g=ca(b,c,d,e,f),b=c=f=null);return g.apply(this,arguments)}}function W(a,b,d,e,f,g,h,k,l){function m(a,b,c,d){if(a){c&&(a=od(a,c,d));a.require=y.require;a.directiveName=P;if(E===y||y.$$isolateScope)a=qa(a,{isolateScope:!0});h.push(a)}if(b){c&&(b=od(b,c,d));b.require=
-y.require;b.directiveName=P;if(E===y||y.$$isolateScope)b=qa(b,{isolateScope:!0});k.push(b)}}function n(a,e,f,g,l){function m(a,b,c,d){var e;Za(a)||(d=c,c=b,b=a,a=void 0);X&&(e=O);c||(c=X?P.parent():P);if(d){var f=l.$$slots[d];if(f)return f(a,b,e,c,R);if(w(f))throw fa("noslot",d,xa(P));}else return l(a,b,e,c,R)}var p,y,t,v,J,O,N,P;b===f?(g=d,P=d.$$element):(P=B(f),g=new s(P,d));J=e;E?v=e.$new(!0):K&&(J=e.$parent);l&&(N=m,N.$$boundTransclude=l,N.isSlotFilled=function(a){return!!l.$$slots[a]});G&&(O=
-ba(P,g,N,G,v,e,E));E&&(ca.$$addScopeInfo(P,v,!0,!(I&&(I===E||I===E.$$originalDirective))),ca.$$addScopeClass(P,!0),v.$$isolateBindings=E.$$isolateBindings,y=na(e,g,v,v.$$isolateBindings,E),y.removeWatches&&v.$on("$destroy",y.removeWatches));for(p in O){y=G[p];t=O[p];var Hb=y.$$bindings.bindToController;if(r){t.bindingInfo=Hb?na(J,g,t.instance,Hb,y):{};var A=t();A!==t.instance&&(t.instance=A,P.data("$"+y.name+"Controller",A),t.bindingInfo.removeWatches&&t.bindingInfo.removeWatches(),t.bindingInfo=
-na(J,g,t.instance,Hb,y))}else t.instance=t(),P.data("$"+y.name+"Controller",t.instance),t.bindingInfo=na(J,g,t.instance,Hb,y)}q(G,function(a,b){var c=a.require;a.bindToController&&!H(c)&&C(c)&&S(O[b].instance,U(b,c,P,O))});q(O,function(a){var b=a.instance;if(D(b.$onChanges))try{b.$onChanges(a.bindingInfo.initialChanges)}catch(d){c(d)}if(D(b.$onInit))try{b.$onInit()}catch(e){c(e)}D(b.$doCheck)&&(J.$watch(function(){b.$doCheck()}),b.$doCheck());D(b.$onDestroy)&&J.$on("$destroy",function(){b.$onDestroy()})});
-p=0;for(y=h.length;p<y;p++)t=h[p],ra(t,t.isolateScope?v:e,P,g,t.require&&U(t.directiveName,t.require,P,O),N);var R=e;E&&(E.template||null===E.templateUrl)&&(R=v);a&&a(R,f.childNodes,void 0,l);for(p=k.length-1;0<=p;p--)t=k[p],ra(t,t.isolateScope?v:e,P,g,t.require&&U(t.directiveName,t.require,P,O),N);q(O,function(a){a=a.instance;D(a.$postLink)&&a.$postLink()})}l=l||{};for(var p=-Number.MAX_VALUE,K=l.newScopeDirective,G=l.controllerDirectives,E=l.newIsolateScopeDirective,I=l.templateDirective,t=l.nonTlbTranscludeDirective,
-J=!1,O=!1,X=l.hasElementTranscludeDirective,v=d.$$element=B(b),y,P,N,A=e,R,u=!1,La=!1,x,z=0,F=a.length;z<F;z++){y=a[z];var Ma=y.$$start,L=y.$$end;Ma&&(v=nd(b,Ma,L));N=void 0;if(p>y.priority)break;if(x=y.scope)y.templateUrl||(C(x)?($("new/isolated scope",E||K,y,v),E=y):$("new/isolated scope",E,y,v)),K=K||y;P=y.name;if(!u&&(y.replace&&(y.templateUrl||y.template)||y.transclude&&!y.$$tlb)){for(x=z+1;u=a[x++];)if(u.transclude&&!u.$$tlb||u.replace&&(u.templateUrl||u.template)){La=!0;break}u=!0}!y.templateUrl&&
-y.controller&&(G=G||V(),$("'"+P+"' controller",G[P],y,v),G[P]=y);if(x=y.transclude)if(J=!0,y.$$tlb||($("transclusion",t,y,v),t=y),"element"===x)X=!0,p=y.priority,N=v,v=d.$$element=B(ca.$$createComment(P,d[P])),b=v[0],ka(f,va.call(N,0),b),N[0].$$parentNode=N[0].parentNode,A=kc(La,N,e,p,g&&g.name,{nonTlbTranscludeDirective:t});else{var ja=V();if(C(x)){N=[];var Q=V(),jb=V();q(x,function(a,b){var c="?"===a.charAt(0);a=c?a.substring(1):a;Q[a]=b;ja[b]=null;jb[b]=c});q(v.contents(),function(a){var b=Q[Ba(wa(a))];
-b?(jb[b]=!0,ja[b]=ja[b]||[],ja[b].push(a)):N.push(a)});q(jb,function(a,b){if(!a)throw fa("reqslot",b);});for(var ic in ja)ja[ic]&&(ja[ic]=kc(La,ja[ic],e))}else N=B(fc(b)).contents();v.empty();A=kc(La,N,e,void 0,void 0,{needsNewScope:y.$$isolateScope||y.$$newScope});A.$$slots=ja}if(y.template)if(O=!0,$("template",I,y,v),I=y,x=D(y.template)?y.template(v,d):y.template,x=Ea(x),y.replace){g=y;N=cc.test(x)?pd(ha(y.templateNamespace,T(x))):[];b=N[0];if(1!==N.length||1!==b.nodeType)throw fa("tplrt",P,"");
-ka(f,v,b);F={$attr:{}};x=jc(b,[],F);var Y=a.splice(z+1,a.length-(z+1));(E||K)&&aa(x,E,K);a=a.concat(x).concat(Y);da(d,F);F=a.length}else v.html(x);if(y.templateUrl)O=!0,$("template",I,y,v),I=y,y.replace&&(g=y),n=ga(a.splice(z,a.length-z),v,d,f,J&&A,h,k,{controllerDirectives:G,newScopeDirective:K!==y&&K,newIsolateScopeDirective:E,templateDirective:I,nonTlbTranscludeDirective:t}),F=a.length;else if(y.compile)try{R=y.compile(v,d,A);var Z=y.$$originalDirective||y;D(R)?m(null,bb(Z,R),Ma,L):R&&m(bb(Z,R.pre),
-bb(Z,R.post),Ma,L)}catch(ea){c(ea,xa(v))}y.terminal&&(n.terminal=!0,p=Math.max(p,y.priority))}n.scope=K&&!0===K.scope;n.transcludeOnThisElement=J;n.templateOnThisElement=O;n.transclude=A;l.hasElementTranscludeDirective=X;return n}function U(a,b,c,d){var e;if(F(b)){var f=b.match(l);b=b.substring(f[0].length);var g=f[1]||f[3],f="?"===f[2];"^^"===g?c=c.parent():e=(e=d&&d[b])&&e.instance;if(!e){var h="$"+b+"Controller";e=g?c.inheritedData(h):c.data(h)}if(!e&&!f)throw fa("ctreq",b,a);}else if(H(b))for(e=
-[],g=0,f=b.length;g<f;g++)e[g]=U(a,b[g],c,d);else C(b)&&(e={},q(b,function(b,f){e[f]=U(a,b,c,d)}));return e||null}function ba(a,b,c,d,e,f,g){var h=V(),k;for(k in d){var l=d[k],m={$scope:l===g||l.$$isolateScope?e:f,$element:a,$attrs:b,$transclude:c},n=l.controller;"@"===n&&(n=b[l.name]);m=E(n,m,!0,l.controllerAs);h[l.name]=m;a.data("$"+l.name+"Controller",m.instance)}return h}function aa(a,b,c){for(var d=0,e=a.length;d<e;d++)a[d]=Vb(a[d],{$$isolateScope:b,$$newScope:c})}function Y(b,c,e,g,h,k,l){if(c===
-h)return null;var m=null;if(f.hasOwnProperty(c)){h=a.get(c+"Directive");for(var n=0,p=h.length;n<p;n++)if(c=h[n],(w(g)||g>c.priority)&&-1!==c.restrict.indexOf(e)){k&&(c=Vb(c,{$$start:k,$$end:l}));if(!c.$$bindings){var K=m=c,r=c.name,t={isolateScope:null,bindToController:null};C(K.scope)&&(!0===K.bindToController?(t.bindToController=d(K.scope,r,!0),t.isolateScope={}):t.isolateScope=d(K.scope,r,!1));C(K.bindToController)&&(t.bindToController=d(K.bindToController,r,!0));if(t.bindToController&&!K.controller)throw fa("noctrl",
-r);m=m.$$bindings=t;C(m.isolateScope)&&(c.$$isolateBindings=m.isolateScope)}b.push(c);m=c}}return m}function Z(b){if(f.hasOwnProperty(b))for(var c=a.get(b+"Directive"),d=0,e=c.length;d<e;d++)if(b=c[d],b.multiElement)return!0;return!1}function da(a,b){var c=b.$attr,d=a.$attr;q(a,function(d,e){"$"!==e.charAt(0)&&(b[e]&&b[e]!==d&&(d=d.length?d+(("style"===e?";":" ")+b[e]):b[e]),a.$set(e,d,!0,c[e]))});q(b,function(b,e){a.hasOwnProperty(e)||"$"===e.charAt(0)||(a[e]=b,"class"!==e&&"style"!==e&&(d[e]=c[e]))})}
-function ga(a,b,d,f,g,h,k,l){var m=[],n,p,K=b[0],r=a.shift(),t=Vb(r,{templateUrl:null,transclude:null,replace:null,$$originalDirective:r}),y=D(r.templateUrl)?r.templateUrl(b,d):r.templateUrl,E=r.templateNamespace;b.empty();e(y).then(function(c){var e,G;c=Ea(c);if(r.replace){c=cc.test(c)?pd(ha(E,T(c))):[];e=c[0];if(1!==c.length||1!==e.nodeType)throw fa("tplrt",r.name,y);c={$attr:{}};ka(f,b,e);var I=jc(e,[],c);C(r.scope)&&aa(I,!0);a=I.concat(a);da(d,c)}else e=K,b.html(c);a.unshift(t);n=W(a,e,d,g,b,
-r,h,k,l);q(f,function(a,c){a===e&&(f[c]=b[0])});for(p=Ma(b[0].childNodes,g);m.length;){c=m.shift();G=m.shift();var v=m.shift(),J=m.shift(),I=b[0];if(!c.$$destroyed){if(G!==K){var O=G.className;l.hasElementTranscludeDirective&&r.replace||(I=fc(e));ka(v,B(G),I);La(B(I),O)}G=n.transcludeOnThisElement?ja(c,n.transclude,J):J;n(p,c,I,f,G)}}m=null}).catch(function(a){a instanceof Error&&c(a)});return function(a,b,c,d,e){a=e;b.$$destroyed||(m?m.push(b,c,d,a):(n.transcludeOnThisElement&&(a=ja(b,n.transclude,
-e)),n(p,b,c,d,a)))}}function ea(a,b){var c=b.priority-a.priority;return 0!==c?c:a.name!==b.name?a.name<b.name?-1:1:a.index-b.index}function $(a,b,c,d){function e(a){return a?" (module: "+a+")":""}if(b)throw fa("multidir",b.name,e(b.$$moduleName),c.name,e(c.$$moduleName),a,xa(d));}function la(a,c){var d=b(c,!0);d&&a.push({priority:0,compile:function(a){a=a.parent();var b=!!a.length;b&&ca.$$addBindingClass(a);return function(a,c){var e=c.parent();b||ca.$$addBindingClass(e);ca.$$addBindingInfo(e,d.expressions);
-a.$watch(d,function(a){c[0].nodeValue=a})}}})}function ha(a,b){a=Q(a||"html");switch(a){case "svg":case "math":var c=x.document.createElement("div");c.innerHTML="<"+a+">"+b+"</"+a+">";return c.childNodes[0].childNodes;default:return b}}function oa(a,b){if("srcdoc"===b)return y.HTML;var c=wa(a);if("src"===b||"ngSrc"===b){if(-1===["img","video","audio","source","track"].indexOf(c))return y.RESOURCE_URL}else if("xlinkHref"===b||"form"===c&&"action"===b||"link"===c&&"href"===b)return y.RESOURCE_URL}function pa(a,
-c,d,e,f){var g=oa(a,e),h=k[e]||f,l=b(d,!f,g,h);if(l){if("multiple"===e&&"select"===wa(a))throw fa("selmulti",xa(a));if(m.test(e))throw fa("nodomevents");c.push({priority:100,compile:function(){return{pre:function(a,c,f){c=f.$$observers||(f.$$observers=V());var k=f[e];k!==d&&(l=k&&b(k,!0,g,h),d=k);l&&(f[e]=l(a),(c[e]||(c[e]=[])).$$inter=!0,(f.$$observers&&f.$$observers[e].$$scope||a).$watch(l,function(a,b){"class"===e&&a!==b?f.$updateClass(a,b):f.$set(e,a)}))}}}})}}function ka(a,b,c){var d=b[0],e=
-b.length,f=d.parentNode,g,h;if(a)for(g=0,h=a.length;g<h;g++)if(a[g]===d){a[g++]=c;h=g+e-1;for(var k=a.length;g<k;g++,h++)h<k?a[g]=a[h]:delete a[g];a.length-=e-1;a.context===d&&(a.context=c);break}f&&f.replaceChild(c,d);a=x.document.createDocumentFragment();for(g=0;g<e;g++)a.appendChild(b[g]);B.hasData(d)&&(B.data(c,B.data(d)),B(d).off("$destroy"));B.cleanData(a.querySelectorAll("*"));for(g=1;g<e;g++)delete b[g];b[0]=c;b.length=1}function qa(a,b){return S(function(){return a.apply(null,arguments)},
-a,b)}function ra(a,b,d,e,f,g){try{a(b,d,e,f,g)}catch(h){c(h,xa(d))}}function na(a,c,d,e,f){function g(b,c,e){D(d.$onChanges)&&!Xb(c,e)&&(ia||(a.$$postDigest(P),ia=[]),m||(m={},ia.push(h)),m[b]&&(e=m[b].previousValue),m[b]=new Ib(e,c))}function h(){d.$onChanges(m);m=void 0}var k=[],l={},m;q(e,function(e,h){var m=e.attrName,p=e.optional,r,t,y,G;switch(e.mode){case "@":p||ua.call(c,m)||(d[h]=c[m]=void 0);p=c.$observe(m,function(a){if(F(a)||Ha(a))g(h,a,d[h]),d[h]=a});c.$$observers[m].$$scope=a;r=c[m];
-F(r)?d[h]=b(r)(a):Ha(r)&&(d[h]=r);l[h]=new Ib(lc,d[h]);k.push(p);break;case "=":if(!ua.call(c,m)){if(p)break;c[m]=void 0}if(p&&!c[m])break;t=n(c[m]);G=t.literal?sa:Xb;y=t.assign||function(){r=d[h]=t(a);throw fa("nonassign",c[m],m,f.name);};r=d[h]=t(a);p=function(b){G(b,d[h])||(G(b,r)?y(a,b=d[h]):d[h]=b);return r=b};p.$stateful=!0;p=e.collection?a.$watchCollection(c[m],p):a.$watch(n(c[m],p),null,t.literal);k.push(p);break;case "<":if(!ua.call(c,m)){if(p)break;c[m]=void 0}if(p&&!c[m])break;t=n(c[m]);
-var E=t.literal,I=d[h]=t(a);l[h]=new Ib(lc,d[h]);p=a.$watch(t,function(a,b){if(b===a){if(b===I||E&&sa(b,I))return;b=I}g(h,a,b);d[h]=a},E);k.push(p);break;case "&":t=c.hasOwnProperty(m)?n(c[m]):z;if(t===z&&p)break;d[h]=function(b){return t(a,b)}}});return{initialChanges:l,removeWatches:k.length&&function(){for(var a=0,b=k.length;a<b;++a)k[a]()}}}var Ca=/^\w/,ta=x.document.createElement("div"),Fa=v,Ga=t,ya=J,ia;s.prototype={$normalize:Ba,$addClass:function(a){a&&0<a.length&&O.addClass(this.$$element,
-a)},$removeClass:function(a){a&&0<a.length&&O.removeClass(this.$$element,a)},$updateClass:function(a,b){var c=qd(a,b);c&&c.length&&O.addClass(this.$$element,c);(c=qd(b,a))&&c.length&&O.removeClass(this.$$element,c)},$set:function(a,b,d,e){var f=id(this.$$element[0],a),g=rd[a],h=a;f?(this.$$element.prop(a,b),e=f):g&&(this[g]=b,h=g);this[a]=b;e?this.$attr[a]=e:(e=this.$attr[a])||(this.$attr[a]=e=Tc(a,"-"));f=wa(this.$$element);if("a"===f&&("href"===a||"xlinkHref"===a)||"img"===f&&"src"===a)this[a]=
-b=X(b,"src"===a);else if("img"===f&&"srcset"===a&&u(b)){for(var f="",g=T(b),k=/(\s+\d+x\s*,|\s+\d+w\s*,|\s+,|,\s+)/,k=/\s/.test(g)?k:/(,)/,g=g.split(k),k=Math.floor(g.length/2),l=0;l<k;l++)var m=2*l,f=f+X(T(g[m]),!0),f=f+(" "+T(g[m+1]));g=T(g[2*l]).split(/\s/);f+=X(T(g[0]),!0);2===g.length&&(f+=" "+T(g[1]));this[a]=b=f}!1!==d&&(null===b||w(b)?this.$$element.removeAttr(e):Ca.test(e)?this.$$element.attr(e,b):R(this.$$element[0],e,b));(a=this.$$observers)&&q(a[h],function(a){try{a(b)}catch(d){c(d)}})},
-$observe:function(a,b){var c=this,d=c.$$observers||(c.$$observers=V()),e=d[a]||(d[a]=[]);e.push(b);G.$evalAsync(function(){e.$$inter||!c.hasOwnProperty(a)||w(c[a])||b(c[a])});return function(){$a(e,b)}}};var Aa=b.startSymbol(),Da=b.endSymbol(),Ea="{{"===Aa&&"}}"===Da?Ya:function(a){return a.replace(/\{\{/g,Aa).replace(/}}/g,Da)},Ja=/^ngAttr[A-Z]/,Ka=/^(.+)Start$/;ca.$$addBindingInfo=p?function(a,b){var c=a.data("$binding")||[];H(b)?c=c.concat(b):c.push(b);a.data("$binding",c)}:z;ca.$$addBindingClass=
-p?function(a){La(a,"ng-binding")}:z;ca.$$addScopeInfo=p?function(a,b,c,d){a.data(c?d?"$isolateScopeNoTemplate":"$isolateScope":"$scope",b)}:z;ca.$$addScopeClass=p?function(a,b){La(a,b?"ng-isolate-scope":"ng-scope")}:z;ca.$$createComment=function(a,b){var c="";p&&(c=" "+(a||"")+": ",b&&(c+=b+" "));return x.document.createComment(c)};return ca}]}function Ib(a,b){this.previousValue=a;this.currentValue=b}function Ba(a){return a.replace(md,"").replace(rg,gb)}function qd(a,b){var d="",c=a.split(/\s+/),
-e=b.split(/\s+/),f=0;a:for(;f<c.length;f++){for(var g=c[f],h=0;h<e.length;h++)if(g===e[h])continue a;d+=(0<d.length?" ":"")+g}return d}function pd(a){a=B(a);var b=a.length;if(1>=b)return a;for(;b--;){var d=a[b];(8===d.nodeType||d.nodeType===Ia&&""===d.nodeValue.trim())&&sg.call(a,b,1)}return a}function qg(a,b){if(b&&F(b))return b;if(F(a)){var d=sd.exec(a);if(d)return d[3]}}function wf(){var a={},b=!1;this.has=function(b){return a.hasOwnProperty(b)};this.register=function(b,c){Ka(b,"controller");C(b)?
-S(a,b):a[b]=c};this.allowGlobals=function(){b=!0};this.$get=["$injector","$window",function(d,c){function e(a,b,c,d){if(!a||!C(a.$scope))throw L("$controller")("noscp",d,b);a.$scope[b]=c}return function(f,g,h,k){var l,m,n;h=!0===h;k&&F(k)&&(n=k);if(F(f)){k=f.match(sd);if(!k)throw td("ctrlfmt",f);m=k[1];n=n||k[3];f=a.hasOwnProperty(m)?a[m]:Vc(g.$scope,m,!0)||(b?Vc(c,m,!0):void 0);if(!f)throw td("ctrlreg",m);sb(f,m,!0)}if(h)return h=(H(f)?f[f.length-1]:f).prototype,l=Object.create(h||null),n&&e(g,n,
-l,m||f.name),S(function(){var a=d.invoke(f,l,g,m);a!==l&&(C(a)||D(a))&&(l=a,n&&e(g,n,l,m||f.name));return l},{instance:l,identifier:n});l=d.instantiate(f,g,m);n&&e(g,n,l,m||f.name);return l}}]}function xf(){this.$get=["$window",function(a){return B(a.document)}]}function yf(){this.$get=["$document","$rootScope",function(a,b){function d(){e=c.hidden}var c=a[0],e=c&&c.hidden;a.on("visibilitychange",d);b.$on("$destroy",function(){a.off("visibilitychange",d)});return function(){return e}}]}function zf(){this.$get=
-["$log",function(a){return function(b,d){a.error.apply(a,arguments)}}]}function mc(a){return C(a)?ga(a)?a.toISOString():cb(a):a}function Ef(){this.$get=function(){return function(a){if(!a)return"";var b=[];Kc(a,function(a,c){null===a||w(a)||(H(a)?q(a,function(a){b.push($(c)+"="+$(mc(a)))}):b.push($(c)+"="+$(mc(a))))});return b.join("&")}}}function Ff(){this.$get=function(){return function(a){function b(a,e,f){null===a||w(a)||(H(a)?q(a,function(a,c){b(a,e+"["+(C(a)?c:"")+"]")}):C(a)&&!ga(a)?Kc(a,function(a,
-c){b(a,e+(f?"":"[")+c+(f?"":"]"))}):d.push($(e)+"="+$(mc(a))))}if(!a)return"";var d=[];b(a,"",!0);return d.join("&")}}}function nc(a,b){if(F(a)){var d=a.replace(tg,"").trim();if(d){var c=b("Content-Type");(c=c&&0===c.indexOf(ud))||(c=(c=d.match(ug))&&vg[c[0]].test(d));if(c)try{a=Oc(d)}catch(e){throw oc("baddata",a,e);}}}return a}function vd(a){var b=V(),d;F(a)?q(a.split("\n"),function(a){d=a.indexOf(":");var e=Q(T(a.substr(0,d)));a=T(a.substr(d+1));e&&(b[e]=b[e]?b[e]+", "+a:a)}):C(a)&&q(a,function(a,
-d){var f=Q(d),g=T(a);f&&(b[f]=b[f]?b[f]+", "+g:g)});return b}function wd(a){var b;return function(d){b||(b=vd(a));return d?(d=b[Q(d)],void 0===d&&(d=null),d):b}}function xd(a,b,d,c){if(D(c))return c(a,b,d);q(c,function(c){a=c(a,b,d)});return a}function Df(){var a=this.defaults={transformResponse:[nc],transformRequest:[function(a){return C(a)&&"[object File]"!==ma.call(a)&&"[object Blob]"!==ma.call(a)&&"[object FormData]"!==ma.call(a)?cb(a):a}],headers:{common:{Accept:"application/json, text/plain, */*"},
-post:pa(pc),put:pa(pc),patch:pa(pc)},xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",paramSerializer:"$httpParamSerializer",jsonpCallbackParam:"callback"},b=!1;this.useApplyAsync=function(a){return u(a)?(b=!!a,this):b};var d=this.interceptors=[];this.$get=["$browser","$httpBackend","$$cookieReader","$cacheFactory","$rootScope","$q","$injector","$sce",function(c,e,f,g,h,k,l,m){function n(b){function d(a,b){for(var c=0,e=b.length;c<e;){var f=b[c++],g=b[c++];a=a.then(f,g)}b.length=0;return a}
-function e(a,b){var c,d={};q(a,function(a,e){D(a)?(c=a(b),null!=c&&(d[e]=c)):d[e]=a});return d}function f(a){var b=S({},a);b.data=xd(a.data,a.headers,a.status,g.transformResponse);a=a.status;return 200<=a&&300>a?b:k.reject(b)}if(!C(b))throw L("$http")("badreq",b);if(!F(m.valueOf(b.url)))throw L("$http")("badreq",b.url);var g=S({method:"get",transformRequest:a.transformRequest,transformResponse:a.transformResponse,paramSerializer:a.paramSerializer,jsonpCallbackParam:a.jsonpCallbackParam},b);g.headers=
-function(b){var c=a.headers,d=S({},b.headers),f,g,h,c=S({},c.common,c[Q(b.method)]);a:for(f in c){g=Q(f);for(h in d)if(Q(h)===g)continue a;d[f]=c[f]}return e(d,pa(b))}(b);g.method=ub(g.method);g.paramSerializer=F(g.paramSerializer)?l.get(g.paramSerializer):g.paramSerializer;c.$$incOutstandingRequestCount();var h=[],n=[];b=k.resolve(g);q(t,function(a){(a.request||a.requestError)&&h.unshift(a.request,a.requestError);(a.response||a.responseError)&&n.push(a.response,a.responseError)});b=d(b,h);b=b.then(function(b){var c=
-b.headers,d=xd(b.data,wd(c),void 0,b.transformRequest);w(d)&&q(c,function(a,b){"content-type"===Q(b)&&delete c[b]});w(b.withCredentials)&&!w(a.withCredentials)&&(b.withCredentials=a.withCredentials);return p(b,d).then(f,f)});b=d(b,n);return b=b.finally(function(){c.$$completeOutstandingRequest(z)})}function p(c,d){function g(a){if(a){var c={};q(a,function(a,d){c[d]=function(c){function d(){a(c)}b?h.$applyAsync(d):h.$$phase?d():h.$apply(d)}});return c}}function l(a,c,d,e){function f(){p(c,a,d,e)}O&&
-(200<=a&&300>a?O.put(R,[a,c,vd(d),e]):O.remove(R));b?h.$applyAsync(f):(f(),h.$$phase||h.$apply())}function p(a,b,d,e){b=-1<=b?b:0;(200<=b&&300>b?G.resolve:G.reject)({data:a,status:b,headers:wd(d),config:c,statusText:e})}function K(a){p(a.data,a.status,pa(a.headers()),a.statusText)}function t(){var a=n.pendingRequests.indexOf(c);-1!==a&&n.pendingRequests.splice(a,1)}var G=k.defer(),y=G.promise,O,X,P=c.headers,s="jsonp"===Q(c.method),R=c.url;s?R=m.getTrustedResourceUrl(R):F(R)||(R=m.valueOf(R));R=r(R,
-c.paramSerializer(c.params));s&&(R=J(R,c.jsonpCallbackParam));n.pendingRequests.push(c);y.then(t,t);!c.cache&&!a.cache||!1===c.cache||"GET"!==c.method&&"JSONP"!==c.method||(O=C(c.cache)?c.cache:C(a.cache)?a.cache:v);O&&(X=O.get(R),u(X)?X&&D(X.then)?X.then(K,K):H(X)?p(X[1],X[0],pa(X[2]),X[3]):p(X,200,{},"OK"):O.put(R,y));w(X)&&((X=yd(c.url)?f()[c.xsrfCookieName||a.xsrfCookieName]:void 0)&&(P[c.xsrfHeaderName||a.xsrfHeaderName]=X),e(c.method,R,d,l,P,c.timeout,c.withCredentials,c.responseType,g(c.eventHandlers),
-g(c.uploadEventHandlers)));return y}function r(a,b){0<b.length&&(a+=(-1===a.indexOf("?")?"?":"&")+b);return a}function J(a,b){if(/[&?][^=]+=JSON_CALLBACK/.test(a))throw oc("badjsonp",a);if((new RegExp("[&?]"+b+"=")).test(a))throw oc("badjsonp",b,a);return a+=(-1===a.indexOf("?")?"?":"&")+b+"=JSON_CALLBACK"}var v=g("$http");a.paramSerializer=F(a.paramSerializer)?l.get(a.paramSerializer):a.paramSerializer;var t=[];q(d,function(a){t.unshift(F(a)?l.get(a):l.invoke(a))});n.pendingRequests=[];(function(a){q(arguments,
-function(a){n[a]=function(b,c){return n(S({},c||{},{method:a,url:b}))}})})("get","delete","head","jsonp");(function(a){q(arguments,function(a){n[a]=function(b,c,d){return n(S({},d||{},{method:a,url:b,data:c}))}})})("post","put","patch");n.defaults=a;return n}]}function Hf(){this.$get=function(){return function(){return new x.XMLHttpRequest}}}function Gf(){this.$get=["$browser","$jsonpCallbacks","$document","$xhrFactory",function(a,b,d,c){return wg(a,c,a.defer,b,d[0])}]}function wg(a,b,d,c,e){function f(a,
-b,d){a=a.replace("JSON_CALLBACK",b);var f=e.createElement("script"),m=null;f.type="text/javascript";f.src=a;f.async=!0;m=function(a){f.removeEventListener("load",m);f.removeEventListener("error",m);e.body.removeChild(f);f=null;var g=-1,r="unknown";a&&("load"!==a.type||c.wasCalled(b)||(a={type:"error"}),r=a.type,g="error"===a.type?404:200);d&&d(g,r)};f.addEventListener("load",m);f.addEventListener("error",m);e.body.appendChild(f);return m}return function(e,h,k,l,m,n,p,r,J,v){function t(){N&&N();A&&
-A.abort()}h=h||a.url();if("jsonp"===Q(e))var M=c.createCallback(h),N=f(h,M,function(a,b){var e=200===a&&c.getResponse(M);u(I)&&d.cancel(I);N=A=null;l(a,e,"",b);c.removeCallback(M)});else{var A=b(e,h);A.open(e,h,!0);q(m,function(a,b){u(a)&&A.setRequestHeader(b,a)});A.onload=function(){var a=A.statusText||"",b="response"in A?A.response:A.responseText,c=1223===A.status?204:A.status;0===c&&(c=b?200:"file"===Ca(h).protocol?404:0);var e=A.getAllResponseHeaders();u(I)&&d.cancel(I);N=A=null;l(c,b,e,a)};e=
-function(){u(I)&&d.cancel(I);N=A=null;l(-1,null,null,"")};A.onerror=e;A.onabort=e;A.ontimeout=e;q(J,function(a,b){A.addEventListener(b,a)});q(v,function(a,b){A.upload.addEventListener(b,a)});p&&(A.withCredentials=!0);if(r)try{A.responseType=r}catch(s){if("json"!==r)throw s;}A.send(w(k)?null:k)}if(0<n)var I=d(t,n);else n&&D(n.then)&&n.then(t)}}function Bf(){var a="{{",b="}}";this.startSymbol=function(b){return b?(a=b,this):a};this.endSymbol=function(a){return a?(b=a,this):b};this.$get=["$parse","$exceptionHandler",
-"$sce",function(d,c,e){function f(a){return"\\\\\\"+a}function g(c){return c.replace(n,a).replace(p,b)}function h(a,b,c,d){var e=a.$watch(function(a){e();return d(a)},b,c);return e}function k(f,k,n,p){function M(a){try{var b=a;a=n?e.getTrusted(n,b):e.valueOf(b);return p&&!u(a)?a:$b(a)}catch(d){c(Da.interr(f,d))}}if(!f.length||-1===f.indexOf(a)){var q;k||(k=g(f),q=la(k),q.exp=f,q.expressions=[],q.$$watchDelegate=h);return q}p=!!p;var A,s,I=0,K=[],E=[];q=f.length;for(var G=[],y=[];I<q;)if(-1!==(A=f.indexOf(a,
-I))&&-1!==(s=f.indexOf(b,A+l)))I!==A&&G.push(g(f.substring(I,A))),I=f.substring(A+l,s),K.push(I),E.push(d(I,M)),I=s+m,y.push(G.length),G.push("");else{I!==q&&G.push(g(f.substring(I)));break}n&&1<G.length&&Da.throwNoconcat(f);if(!k||K.length){var O=function(a){for(var b=0,c=K.length;b<c;b++){if(p&&w(a[b]))return;G[y[b]]=a[b]}return G.join("")};return S(function(a){var b=0,d=K.length,e=Array(d);try{for(;b<d;b++)e[b]=E[b](a);return O(e)}catch(g){c(Da.interr(f,g))}},{exp:f,expressions:K,$$watchDelegate:function(a,
-b){var c;return a.$watchGroup(E,function(d,e){var f=O(d);D(b)&&b.call(this,f,d!==e?c:f,a);c=f})}})}}var l=a.length,m=b.length,n=new RegExp(a.replace(/./g,f),"g"),p=new RegExp(b.replace(/./g,f),"g");k.startSymbol=function(){return a};k.endSymbol=function(){return b};return k}]}function Cf(){this.$get=["$rootScope","$window","$q","$$q","$browser",function(a,b,d,c,e){function f(f,k,l,m){function n(){p?f.apply(null,r):f(t)}var p=4<arguments.length,r=p?va.call(arguments,4):[],J=b.setInterval,v=b.clearInterval,
-t=0,M=u(m)&&!m,q=(M?c:d).defer(),A=q.promise;l=u(l)?l:0;A.$$intervalId=J(function(){M?e.defer(n):a.$evalAsync(n);q.notify(t++);0<l&&t>=l&&(q.resolve(t),v(A.$$intervalId),delete g[A.$$intervalId]);M||a.$apply()},k);g[A.$$intervalId]=q;return A}var g={};f.cancel=function(a){return a&&a.$$intervalId in g?(g[a.$$intervalId].promise.catch(z),g[a.$$intervalId].reject("canceled"),b.clearInterval(a.$$intervalId),delete g[a.$$intervalId],!0):!1};return f}]}function qc(a){a=a.split("/");for(var b=a.length;b--;)a[b]=
-db(a[b]);return a.join("/")}function zd(a,b){var d=Ca(a);b.$$protocol=d.protocol;b.$$host=d.hostname;b.$$port=Z(d.port)||xg[d.protocol]||null}function Ad(a,b){if(yg.test(a))throw kb("badpath",a);var d="/"!==a.charAt(0);d&&(a="/"+a);var c=Ca(a);b.$$path=decodeURIComponent(d&&"/"===c.pathname.charAt(0)?c.pathname.substring(1):c.pathname);b.$$search=Rc(c.search);b.$$hash=decodeURIComponent(c.hash);b.$$path&&"/"!==b.$$path.charAt(0)&&(b.$$path="/"+b.$$path)}function rc(a,b){return a.slice(0,b.length)===
-b}function ka(a,b){if(rc(b,a))return b.substr(a.length)}function Aa(a){var b=a.indexOf("#");return-1===b?a:a.substr(0,b)}function lb(a){return a.replace(/(#.+)|#$/,"$1")}function sc(a,b,d){this.$$html5=!0;d=d||"";zd(a,this);this.$$parse=function(a){var d=ka(b,a);if(!F(d))throw kb("ipthprfx",a,b);Ad(d,this);this.$$path||(this.$$path="/");this.$$compose()};this.$$compose=function(){var a=Zb(this.$$search),d=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(a?"?"+a:"")+d;this.$$absUrl=b+
-this.$$url.substr(1);this.$$urlUpdatedByLocation=!0};this.$$parseLinkUrl=function(c,e){if(e&&"#"===e[0])return this.hash(e.slice(1)),!0;var f,g;u(f=ka(a,c))?(g=f,g=d&&u(f=ka(d,f))?b+(ka("/",f)||f):a+g):u(f=ka(b,c))?g=b+f:b===c+"/"&&(g=b);g&&this.$$parse(g);return!!g}}function tc(a,b,d){zd(a,this);this.$$parse=function(c){var e=ka(a,c)||ka(b,c),f;w(e)||"#"!==e.charAt(0)?this.$$html5?f=e:(f="",w(e)&&(a=c,this.replace())):(f=ka(d,e),w(f)&&(f=e));Ad(f,this);c=this.$$path;var e=a,g=/^\/[A-Z]:(\/.*)/;rc(f,
-e)&&(f=f.replace(e,""));g.exec(f)||(c=(f=g.exec(c))?f[1]:c);this.$$path=c;this.$$compose()};this.$$compose=function(){var b=Zb(this.$$search),e=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(b?"?"+b:"")+e;this.$$absUrl=a+(this.$$url?d+this.$$url:"");this.$$urlUpdatedByLocation=!0};this.$$parseLinkUrl=function(b,d){return Aa(a)===Aa(b)?(this.$$parse(b),!0):!1}}function Bd(a,b,d){this.$$html5=!0;tc.apply(this,arguments);this.$$parseLinkUrl=function(c,e){if(e&&"#"===e[0])return this.hash(e.slice(1)),
-!0;var f,g;a===Aa(c)?f=c:(g=ka(b,c))?f=a+d+g:b===c+"/"&&(f=b);f&&this.$$parse(f);return!!f};this.$$compose=function(){var b=Zb(this.$$search),e=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(b?"?"+b:"")+e;this.$$absUrl=a+d+this.$$url;this.$$urlUpdatedByLocation=!0}}function Jb(a){return function(){return this[a]}}function Cd(a,b){return function(d){if(w(d))return this[a];this[a]=b(d);this.$$compose();return this}}function Jf(){var a="!",b={enabled:!1,requireBase:!0,rewriteLinks:!0};
-this.hashPrefix=function(b){return u(b)?(a=b,this):a};this.html5Mode=function(a){if(Ha(a))return b.enabled=a,this;if(C(a)){Ha(a.enabled)&&(b.enabled=a.enabled);Ha(a.requireBase)&&(b.requireBase=a.requireBase);if(Ha(a.rewriteLinks)||F(a.rewriteLinks))b.rewriteLinks=a.rewriteLinks;return this}return b};this.$get=["$rootScope","$browser","$sniffer","$rootElement","$window",function(d,c,e,f,g){function h(a,b,d){var e=l.url(),f=l.$$state;try{c.url(a,b,d),l.$$state=c.state()}catch(g){throw l.url(e),l.$$state=
-f,g;}}function k(a,b){d.$broadcast("$locationChangeSuccess",l.absUrl(),a,l.$$state,b)}var l,m;m=c.baseHref();var n=c.url(),p;if(b.enabled){if(!m&&b.requireBase)throw kb("nobase");p=n.substring(0,n.indexOf("/",n.indexOf("//")+2))+(m||"/");m=e.history?sc:Bd}else p=Aa(n),m=tc;var r=p.substr(0,Aa(p).lastIndexOf("/")+1);l=new m(p,r,"#"+a);l.$$parseLinkUrl(n,n);l.$$state=c.state();var J=/^\s*(javascript|mailto):/i;f.on("click",function(a){var e=b.rewriteLinks;if(e&&!a.ctrlKey&&!a.metaKey&&!a.shiftKey&&
-2!==a.which&&2!==a.button){for(var h=B(a.target);"a"!==wa(h[0]);)if(h[0]===f[0]||!(h=h.parent())[0])return;if(!F(e)||!w(h.attr(e))){var e=h.prop("href"),k=h.attr("href")||h.attr("xlink:href");C(e)&&"[object SVGAnimatedString]"===e.toString()&&(e=Ca(e.animVal).href);J.test(e)||!e||h.attr("target")||a.isDefaultPrevented()||!l.$$parseLinkUrl(e,k)||(a.preventDefault(),l.absUrl()!==c.url()&&(d.$apply(),g.angular["ff-684208-preventDefault"]=!0))}}});lb(l.absUrl())!==lb(n)&&c.url(l.absUrl(),!0);var v=!0;
-c.onUrlChange(function(a,b){rc(a,r)?(d.$evalAsync(function(){var c=l.absUrl(),e=l.$$state,f;a=lb(a);l.$$parse(a);l.$$state=b;f=d.$broadcast("$locationChangeStart",a,c,b,e).defaultPrevented;l.absUrl()===a&&(f?(l.$$parse(c),l.$$state=e,h(c,!1,e)):(v=!1,k(c,e)))}),d.$$phase||d.$digest()):g.location.href=a});d.$watch(function(){if(v||l.$$urlUpdatedByLocation){l.$$urlUpdatedByLocation=!1;var a=lb(c.url()),b=lb(l.absUrl()),f=c.state(),g=l.$$replace,m=a!==b||l.$$html5&&e.history&&f!==l.$$state;if(v||m)v=
-!1,d.$evalAsync(function(){var b=l.absUrl(),c=d.$broadcast("$locationChangeStart",b,a,l.$$state,f).defaultPrevented;l.absUrl()===b&&(c?(l.$$parse(a),l.$$state=f):(m&&h(b,g,f===l.$$state?null:l.$$state),k(a,f)))})}l.$$replace=!1});return l}]}function Kf(){var a=!0,b=this;this.debugEnabled=function(b){return u(b)?(a=b,this):a};this.$get=["$window",function(d){function c(a){a instanceof Error&&(a.stack&&f?a=a.message&&-1===a.stack.indexOf(a.message)?"Error: "+a.message+"\n"+a.stack:a.stack:a.sourceURL&&
-(a=a.message+"\n"+a.sourceURL+":"+a.line));return a}function e(a){var b=d.console||{},e=b[a]||b.log||z;a=!1;try{a=!!e.apply}catch(f){}return a?function(){var a=[];q(arguments,function(b){a.push(c(b))});return e.apply(b,a)}:function(a,b){e(a,null==b?"":b)}}var f=za||/\bEdge\//.test(d.navigator&&d.navigator.userAgent);return{log:e("log"),info:e("info"),warn:e("warn"),error:e("error"),debug:function(){var c=e("debug");return function(){a&&c.apply(b,arguments)}}()}}]}function zg(a){return a+""}function Ag(a,
-b){return"undefined"!==typeof a?a:b}function Dd(a,b){return"undefined"===typeof a?b:"undefined"===typeof b?a:a+b}function U(a,b){var d,c,e;switch(a.type){case s.Program:d=!0;q(a.body,function(a){U(a.expression,b);d=d&&a.expression.constant});a.constant=d;break;case s.Literal:a.constant=!0;a.toWatch=[];break;case s.UnaryExpression:U(a.argument,b);a.constant=a.argument.constant;a.toWatch=a.argument.toWatch;break;case s.BinaryExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant;
-a.toWatch=a.left.toWatch.concat(a.right.toWatch);break;case s.LogicalExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant;a.toWatch=a.constant?[]:[a];break;case s.ConditionalExpression:U(a.test,b);U(a.alternate,b);U(a.consequent,b);a.constant=a.test.constant&&a.alternate.constant&&a.consequent.constant;a.toWatch=a.constant?[]:[a];break;case s.Identifier:a.constant=!1;a.toWatch=[a];break;case s.MemberExpression:U(a.object,b);a.computed&&U(a.property,b);a.constant=a.object.constant&&
-(!a.computed||a.property.constant);a.toWatch=[a];break;case s.CallExpression:d=e=a.filter?!b(a.callee.name).$stateful:!1;c=[];q(a.arguments,function(a){U(a,b);d=d&&a.constant;a.constant||c.push.apply(c,a.toWatch)});a.constant=d;a.toWatch=e?c:[a];break;case s.AssignmentExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant;a.toWatch=[a];break;case s.ArrayExpression:d=!0;c=[];q(a.elements,function(a){U(a,b);d=d&&a.constant;a.constant||c.push.apply(c,a.toWatch)});a.constant=
-d;a.toWatch=c;break;case s.ObjectExpression:d=!0;c=[];q(a.properties,function(a){U(a.value,b);d=d&&a.value.constant&&!a.computed;a.value.constant||c.push.apply(c,a.value.toWatch);a.computed&&(U(a.key,b),a.key.constant||c.push.apply(c,a.key.toWatch))});a.constant=d;a.toWatch=c;break;case s.ThisExpression:a.constant=!1;a.toWatch=[];break;case s.LocalsExpression:a.constant=!1,a.toWatch=[]}}function Ed(a){if(1===a.length){a=a[0].expression;var b=a.toWatch;return 1!==b.length?b:b[0]!==a?b:void 0}}function Fd(a){return a.type===
-s.Identifier||a.type===s.MemberExpression}function Gd(a){if(1===a.body.length&&Fd(a.body[0].expression))return{type:s.AssignmentExpression,left:a.body[0].expression,right:{type:s.NGValueParameter},operator:"="}}function Hd(a){this.$filter=a}function Id(a){this.$filter=a}function uc(a,b,d){this.ast=new s(a,d);this.astCompiler=d.csp?new Id(b):new Hd(b)}function vc(a){return D(a.valueOf)?a.valueOf():Bg.call(a)}function Lf(){var a=V(),b={"true":!0,"false":!1,"null":null,undefined:void 0},d,c;this.addLiteral=
-function(a,c){b[a]=c};this.setIdentifierFns=function(a,b){d=a;c=b;return this};this.$get=["$filter",function(e){function f(a,b,c){return null==a||null==b?a===b:"object"!==typeof a||(a=vc(a),"object"!==typeof a||c)?a===b||a!==a&&b!==b:!1}function g(a,b,c,d,e){var g=d.inputs,h;if(1===g.length){var k=f,g=g[0];return a.$watch(function(a){var b=g(a);f(b,k,d.literal)||(h=d(a,void 0,void 0,[b]),k=b&&vc(b));return h},b,c,e)}for(var l=[],m=[],n=0,E=g.length;n<E;n++)l[n]=f,m[n]=null;return a.$watch(function(a){for(var b=
-!1,c=0,e=g.length;c<e;c++){var k=g[c](a);if(b||(b=!f(k,l[c],d.literal)))m[c]=k,l[c]=k&&vc(k)}b&&(h=d(a,void 0,void 0,m));return h},b,c,e)}function h(a,b,c,d,e){function f(a){return d(a)}function h(a,c,d){n=a;D(b)&&b(a,c,d);l(a)&&d.$$postDigest(function(){l(n)&&m()})}var l=d.literal?k:u,m,n;return m=d.inputs?g(a,h,c,d,e):a.$watch(f,h,c)}function k(a){var b=!0;q(a,function(a){u(a)||(b=!1)});return b}function l(a,b,c,d){var e=a.$watch(function(a){e();return d(a)},b,c);return e}function m(a,b){function c(d,
-e,g,h){g=f&&h?h[0]:a(d,e,g,h);return b(g,d,e)}function d(c,e,g,k){g=f&&k?k[0]:a(c,e,g,k);c=b(g,c,e);return h(g)?c:g}if(!b)return a;var e=a.$$watchDelegate,f=!1,h=a.literal?k:u,l=a.oneTime?d:c;l.literal=a.literal;l.oneTime=a.oneTime;f=!a.inputs;e&&e!==g?(l.$$watchDelegate=e,l.inputs=a.inputs):b.$stateful||(l.$$watchDelegate=g,l.inputs=a.inputs?a.inputs:[a]);return l}var n={csp:Ga().noUnsafeEval,literals:ra(b),isIdentifierStart:D(d)&&d,isIdentifierContinue:D(c)&&c};return function(b,c){var d,f,k;switch(typeof b){case "string":return k=
-b=b.trim(),d=a[k],d||(":"===b.charAt(0)&&":"===b.charAt(1)&&(f=!0,b=b.substring(2)),d=new wc(n),d=(new uc(d,e,n)).parse(b),d.constant?d.$$watchDelegate=l:f?(d.oneTime=!0,d.$$watchDelegate=h):d.inputs&&(d.$$watchDelegate=g),a[k]=d),m(d,c);case "function":return m(b,c);default:return m(z,c)}}}]}function Nf(){var a=!0;this.$get=["$rootScope","$exceptionHandler",function(b,d){return Jd(function(a){b.$evalAsync(a)},d,a)}];this.errorOnUnhandledRejections=function(b){return u(b)?(a=b,this):a}}function Of(){var a=
-!0;this.$get=["$browser","$exceptionHandler",function(b,d){return Jd(function(a){b.defer(a)},d,a)}];this.errorOnUnhandledRejections=function(b){return u(b)?(a=b,this):a}}function Jd(a,b,d){function c(){return new e}function e(){var a=this.promise=new f;this.resolve=function(b){k(a,b)};this.reject=function(b){m(a,b)};this.notify=function(b){p(a,b)}}function f(){this.$$state={status:0}}function g(){for(;!s&&A.length;){var a=A.shift();if(!a.pur){a.pur=!0;var c=a.value,c="Possibly unhandled rejection: "+
-("function"===typeof c?c.toString().replace(/ \{[\s\S]*$/,""):w(c)?"undefined":"string"!==typeof c?Be(c,void 0):c);a.value instanceof Error?b(a.value,c):b(c)}}}function h(b){!d||b.pending||2!==b.status||b.pur||(0===s&&0===A.length&&a(g),A.push(b));!b.processScheduled&&b.pending&&(b.processScheduled=!0,++s,a(function(){var c,e,f;f=b.pending;b.processScheduled=!1;b.pending=void 0;try{for(var h=0,l=f.length;h<l;++h){b.pur=!0;e=f[h][0];c=f[h][b.status];try{D(c)?k(e,c(b.value)):1===b.status?k(e,b.value):
-m(e,b.value)}catch(n){m(e,n)}}}finally{--s,d&&0===s&&a(g)}}))}function k(a,b){a.$$state.status||(b===a?n(a,M("qcycle",b)):l(a,b))}function l(a,b){function c(b){g||(g=!0,l(a,b))}function d(b){g||(g=!0,n(a,b))}function e(b){p(a,b)}var f,g=!1;try{if(C(b)||D(b))f=b.then;D(f)?(a.$$state.status=-1,f.call(b,c,d,e)):(a.$$state.value=b,a.$$state.status=1,h(a.$$state))}catch(k){d(k)}}function m(a,b){a.$$state.status||n(a,b)}function n(a,b){a.$$state.value=b;a.$$state.status=2;h(a.$$state)}function p(c,d){var e=
-c.$$state.pending;0>=c.$$state.status&&e&&e.length&&a(function(){for(var a,c,f=0,g=e.length;f<g;f++){c=e[f][0];a=e[f][3];try{p(c,D(a)?a(d):d)}catch(h){b(h)}}})}function r(a){var b=new f;m(b,a);return b}function J(a,b,c){var d=null;try{D(c)&&(d=c())}catch(e){return r(e)}return d&&D(d.then)?d.then(function(){return b(a)},r):b(a)}function v(a,b,c,d){var e=new f;k(e,a);return e.then(b,c,d)}function t(a){if(!D(a))throw M("norslvr",a);var b=new f;a(function(a){k(b,a)},function(a){m(b,a)});return b}var M=
-L("$q",TypeError),s=0,A=[];S(f.prototype,{then:function(a,b,c){if(w(a)&&w(b)&&w(c))return this;var d=new f;this.$$state.pending=this.$$state.pending||[];this.$$state.pending.push([d,a,b,c]);0<this.$$state.status&&h(this.$$state);return d},"catch":function(a){return this.then(null,a)},"finally":function(a,b){return this.then(function(b){return J(b,u,a)},function(b){return J(b,r,a)},b)}});var u=v;t.prototype=f.prototype;t.defer=c;t.reject=r;t.when=v;t.resolve=u;t.all=function(a){var b=new f,c=0,d=H(a)?
-[]:{};q(a,function(a,e){c++;v(a).then(function(a){d[e]=a;--c||k(b,d)},function(a){m(b,a)})});0===c&&k(b,d);return b};t.race=function(a){var b=c();q(a,function(a){v(a).then(b.resolve,b.reject)});return b.promise};return t}function Xf(){this.$get=["$window","$timeout",function(a,b){var d=a.requestAnimationFrame||a.webkitRequestAnimationFrame,c=a.cancelAnimationFrame||a.webkitCancelAnimationFrame||a.webkitCancelRequestAnimationFrame,e=!!d,f=e?function(a){var b=d(a);return function(){c(b)}}:function(a){var c=
-b(a,16.66,!1);return function(){b.cancel(c)}};f.supported=e;return f}]}function Mf(){function a(a){function b(){this.$$watchers=this.$$nextSibling=this.$$childHead=this.$$childTail=null;this.$$listeners={};this.$$listenerCount={};this.$$watchersCount=0;this.$id=++qb;this.$$ChildScope=null}b.prototype=a;return b}var b=10,d=L("$rootScope"),c=null,e=null;this.digestTtl=function(a){arguments.length&&(b=a);return b};this.$get=["$exceptionHandler","$parse","$browser",function(f,g,h){function k(a){a.currentScope.$$destroyed=
-!0}function l(a){9===za&&(a.$$childHead&&l(a.$$childHead),a.$$nextSibling&&l(a.$$nextSibling));a.$parent=a.$$nextSibling=a.$$prevSibling=a.$$childHead=a.$$childTail=a.$root=a.$$watchers=null}function m(){this.$id=++qb;this.$$phase=this.$parent=this.$$watchers=this.$$nextSibling=this.$$prevSibling=this.$$childHead=this.$$childTail=null;this.$root=this;this.$$destroyed=!1;this.$$listeners={};this.$$listenerCount={};this.$$watchersCount=0;this.$$isolateBindings=null}function n(a){if(M.$$phase)throw d("inprog",
-M.$$phase);M.$$phase=a}function p(a,b){do a.$$watchersCount+=b;while(a=a.$parent)}function r(a,b,c){do a.$$listenerCount[c]-=b,0===a.$$listenerCount[c]&&delete a.$$listenerCount[c];while(a=a.$parent)}function J(){}function v(){for(;u.length;)try{u.shift()()}catch(a){f(a)}e=null}function t(){null===e&&(e=h.defer(function(){M.$apply(v)}))}m.prototype={constructor:m,$new:function(b,c){var d;c=c||this;b?(d=new m,d.$root=this.$root):(this.$$ChildScope||(this.$$ChildScope=a(this)),d=new this.$$ChildScope);
-d.$parent=c;d.$$prevSibling=c.$$childTail;c.$$childHead?(c.$$childTail.$$nextSibling=d,c.$$childTail=d):c.$$childHead=c.$$childTail=d;(b||c!==this)&&d.$on("$destroy",k);return d},$watch:function(a,b,d,e){var f=g(a);if(f.$$watchDelegate)return f.$$watchDelegate(this,b,d,f,a);var h=this,k=h.$$watchers,l={fn:b,last:J,get:f,exp:e||a,eq:!!d};c=null;D(b)||(l.fn=z);k||(k=h.$$watchers=[],k.$$digestWatchIndex=-1);k.unshift(l);k.$$digestWatchIndex++;p(this,1);return function(){var a=$a(k,l);0<=a&&(p(h,-1),
-a<k.$$digestWatchIndex&&k.$$digestWatchIndex--);c=null}},$watchGroup:function(a,b){function c(){h=!1;k?(k=!1,b(e,e,g)):b(e,d,g)}var d=Array(a.length),e=Array(a.length),f=[],g=this,h=!1,k=!0;if(!a.length){var l=!0;g.$evalAsync(function(){l&&b(e,e,g)});return function(){l=!1}}if(1===a.length)return this.$watch(a[0],function(a,c,f){e[0]=a;d[0]=c;b(e,a===c?e:d,f)});q(a,function(a,b){var k=g.$watch(a,function(a,f){e[b]=a;d[b]=f;h||(h=!0,g.$evalAsync(c))});f.push(k)});return function(){for(;f.length;)f.shift()()}},
-$watchCollection:function(a,b){function c(a){e=a;var b,d,g,h;if(!w(e)){if(C(e))if(qa(e))for(f!==n&&(f=n,t=f.length=0,l++),a=e.length,t!==a&&(l++,f.length=t=a),b=0;b<a;b++)h=f[b],g=e[b],d=h!==h&&g!==g,d||h===g||(l++,f[b]=g);else{f!==p&&(f=p={},t=0,l++);a=0;for(b in e)ua.call(e,b)&&(a++,g=e[b],h=f[b],b in f?(d=h!==h&&g!==g,d||h===g||(l++,f[b]=g)):(t++,f[b]=g,l++));if(t>a)for(b in l++,f)ua.call(e,b)||(t--,delete f[b])}else f!==e&&(f=e,l++);return l}}c.$stateful=!0;var d=this,e,f,h,k=1<b.length,l=0,m=
-g(a,c),n=[],p={},r=!0,t=0;return this.$watch(m,function(){r?(r=!1,b(e,e,d)):b(e,h,d);if(k)if(C(e))if(qa(e)){h=Array(e.length);for(var a=0;a<e.length;a++)h[a]=e[a]}else for(a in h={},e)ua.call(e,a)&&(h[a]=e[a]);else h=e})},$digest:function(){var a,g,k,l,m,p,r,t=b,q,u=[],w,x;n("$digest");h.$$checkUrlChange();this===M&&null!==e&&(h.defer.cancel(e),v());c=null;do{r=!1;q=this;for(p=0;p<s.length;p++){try{x=s[p],l=x.fn,l(x.scope,x.locals)}catch(z){f(z)}c=null}s.length=0;a:do{if(p=q.$$watchers)for(p.$$digestWatchIndex=
-p.length;p.$$digestWatchIndex--;)try{if(a=p[p.$$digestWatchIndex])if(m=a.get,(g=m(q))!==(k=a.last)&&!(a.eq?sa(g,k):da(g)&&da(k)))r=!0,c=a,a.last=a.eq?ra(g,null):g,l=a.fn,l(g,k===J?g:k,q),5>t&&(w=4-t,u[w]||(u[w]=[]),u[w].push({msg:D(a.exp)?"fn: "+(a.exp.name||a.exp.toString()):a.exp,newVal:g,oldVal:k}));else if(a===c){r=!1;break a}}catch(B){f(B)}if(!(p=q.$$watchersCount&&q.$$childHead||q!==this&&q.$$nextSibling))for(;q!==this&&!(p=q.$$nextSibling);)q=q.$parent}while(q=p);if((r||s.length)&&!t--)throw M.$$phase=
-null,d("infdig",b,u);}while(r||s.length);for(M.$$phase=null;I<A.length;)try{A[I++]()}catch(F){f(F)}A.length=I=0;h.$$checkUrlChange()},$destroy:function(){if(!this.$$destroyed){var a=this.$parent;this.$broadcast("$destroy");this.$$destroyed=!0;this===M&&h.$$applicationDestroyed();p(this,-this.$$watchersCount);for(var b in this.$$listenerCount)r(this,this.$$listenerCount[b],b);a&&a.$$childHead===this&&(a.$$childHead=this.$$nextSibling);a&&a.$$childTail===this&&(a.$$childTail=this.$$prevSibling);this.$$prevSibling&&
-(this.$$prevSibling.$$nextSibling=this.$$nextSibling);this.$$nextSibling&&(this.$$nextSibling.$$prevSibling=this.$$prevSibling);this.$destroy=this.$digest=this.$apply=this.$evalAsync=this.$applyAsync=z;this.$on=this.$watch=this.$watchGroup=function(){return z};this.$$listeners={};this.$$nextSibling=null;l(this)}},$eval:function(a,b){return g(a)(this,b)},$evalAsync:function(a,b){M.$$phase||s.length||h.defer(function(){s.length&&M.$digest()});s.push({scope:this,fn:g(a),locals:b})},$$postDigest:function(a){A.push(a)},
-$apply:function(a){try{n("$apply");try{return this.$eval(a)}finally{M.$$phase=null}}catch(b){f(b)}finally{try{M.$digest()}catch(c){throw f(c),c;}}},$applyAsync:function(a){function b(){c.$eval(a)}var c=this;a&&u.push(b);a=g(a);t()},$on:function(a,b){var c=this.$$listeners[a];c||(this.$$listeners[a]=c=[]);c.push(b);var d=this;do d.$$listenerCount[a]||(d.$$listenerCount[a]=0),d.$$listenerCount[a]++;while(d=d.$parent);var e=this;return function(){var d=c.indexOf(b);-1!==d&&(c[d]=null,r(e,1,a))}},$emit:function(a,
-b){var c=[],d,e=this,g=!1,h={name:a,targetScope:e,stopPropagation:function(){g=!0},preventDefault:function(){h.defaultPrevented=!0},defaultPrevented:!1},k=ab([h],arguments,1),l,m;do{d=e.$$listeners[a]||c;h.currentScope=e;l=0;for(m=d.length;l<m;l++)if(d[l])try{d[l].apply(null,k)}catch(n){f(n)}else d.splice(l,1),l--,m--;if(g)return h.currentScope=null,h;e=e.$parent}while(e);h.currentScope=null;return h},$broadcast:function(a,b){var c=this,d=this,e={name:a,targetScope:this,preventDefault:function(){e.defaultPrevented=
-!0},defaultPrevented:!1};if(!this.$$listenerCount[a])return e;for(var g=ab([e],arguments,1),h,k;c=d;){e.currentScope=c;d=c.$$listeners[a]||[];h=0;for(k=d.length;h<k;h++)if(d[h])try{d[h].apply(null,g)}catch(l){f(l)}else d.splice(h,1),h--,k--;if(!(d=c.$$listenerCount[a]&&c.$$childHead||c!==this&&c.$$nextSibling))for(;c!==this&&!(d=c.$$nextSibling);)c=c.$parent}e.currentScope=null;return e}};var M=new m,s=M.$$asyncQueue=[],A=M.$$postDigestQueue=[],u=M.$$applyAsyncQueue=[],I=0;return M}]}function Ee(){var a=
-/^\s*(https?|ftp|mailto|tel|file):/,b=/^\s*((https?|ftp|file|blob):|data:image\/)/;this.aHrefSanitizationWhitelist=function(b){return u(b)?(a=b,this):a};this.imgSrcSanitizationWhitelist=function(a){return u(a)?(b=a,this):b};this.$get=function(){return function(d,c){var e=c?b:a,f;f=Ca(d).href;return""===f||f.match(e)?d:"unsafe:"+f}}}function Cg(a){if("self"===a)return a;if(F(a)){if(-1<a.indexOf("***"))throw ta("iwcard",a);a=Kd(a).replace(/\\\*\\\*/g,".*").replace(/\\\*/g,"[^:/.?&;]*");return new RegExp("^"+
-a+"$")}if(Xa(a))return new RegExp("^"+a.source+"$");throw ta("imatcher");}function Ld(a){var b=[];u(a)&&q(a,function(a){b.push(Cg(a))});return b}function Qf(){this.SCE_CONTEXTS=oa;var a=["self"],b=[];this.resourceUrlWhitelist=function(b){arguments.length&&(a=Ld(b));return a};this.resourceUrlBlacklist=function(a){arguments.length&&(b=Ld(a));return b};this.$get=["$injector",function(d){function c(a,b){return"self"===a?yd(b):!!a.exec(b.href)}function e(a){var b=function(a){this.$$unwrapTrustedValue=
-function(){return a}};a&&(b.prototype=new a);b.prototype.valueOf=function(){return this.$$unwrapTrustedValue()};b.prototype.toString=function(){return this.$$unwrapTrustedValue().toString()};return b}var f=function(a){throw ta("unsafe");};d.has("$sanitize")&&(f=d.get("$sanitize"));var g=e(),h={};h[oa.HTML]=e(g);h[oa.CSS]=e(g);h[oa.URL]=e(g);h[oa.JS]=e(g);h[oa.RESOURCE_URL]=e(h[oa.URL]);return{trustAs:function(a,b){var c=h.hasOwnProperty(a)?h[a]:null;if(!c)throw ta("icontext",a,b);if(null===b||w(b)||
-""===b)return b;if("string"!==typeof b)throw ta("itype",a);return new c(b)},getTrusted:function(d,e){if(null===e||w(e)||""===e)return e;var g=h.hasOwnProperty(d)?h[d]:null;if(g&&e instanceof g)return e.$$unwrapTrustedValue();if(d===oa.RESOURCE_URL){var g=Ca(e.toString()),n,p,r=!1;n=0;for(p=a.length;n<p;n++)if(c(a[n],g)){r=!0;break}if(r)for(n=0,p=b.length;n<p;n++)if(c(b[n],g)){r=!1;break}if(r)return e;throw ta("insecurl",e.toString());}if(d===oa.HTML)return f(e);throw ta("unsafe");},valueOf:function(a){return a instanceof
-g?a.$$unwrapTrustedValue():a}}}]}function Pf(){var a=!0;this.enabled=function(b){arguments.length&&(a=!!b);return a};this.$get=["$parse","$sceDelegate",function(b,d){if(a&&8>za)throw ta("iequirks");var c=pa(oa);c.isEnabled=function(){return a};c.trustAs=d.trustAs;c.getTrusted=d.getTrusted;c.valueOf=d.valueOf;a||(c.trustAs=c.getTrusted=function(a,b){return b},c.valueOf=Ya);c.parseAs=function(a,d){var e=b(d);return e.literal&&e.constant?e:b(d,function(b){return c.getTrusted(a,b)})};var e=c.parseAs,
-f=c.getTrusted,g=c.trustAs;q(oa,function(a,b){var d=Q(b);c[("parse_as_"+d).replace(xc,gb)]=function(b){return e(a,b)};c[("get_trusted_"+d).replace(xc,gb)]=function(b){return f(a,b)};c[("trust_as_"+d).replace(xc,gb)]=function(b){return g(a,b)}});return c}]}function Rf(){this.$get=["$window","$document",function(a,b){var d={},c=!((!a.nw||!a.nw.process)&&a.chrome&&(a.chrome.app&&a.chrome.app.runtime||!a.chrome.app&&a.chrome.runtime&&a.chrome.runtime.id))&&a.history&&a.history.pushState,e=Z((/android (\d+)/.exec(Q((a.navigator||
-{}).userAgent))||[])[1]),f=/Boxee/i.test((a.navigator||{}).userAgent),g=b[0]||{},h=g.body&&g.body.style,k=!1,l=!1;h&&(k=!!("transition"in h||"webkitTransition"in h),l=!!("animation"in h||"webkitAnimation"in h));return{history:!(!c||4>e||f),hasEvent:function(a){if("input"===a&&za)return!1;if(w(d[a])){var b=g.createElement("div");d[a]="on"+a in b}return d[a]},csp:Ga(),transitions:k,animations:l,android:e}}]}function Tf(){var a;this.httpOptions=function(b){return b?(a=b,this):a};this.$get=["$exceptionHandler",
-"$templateCache","$http","$q","$sce",function(b,d,c,e,f){function g(h,k){g.totalPendingRequests++;if(!F(h)||w(d.get(h)))h=f.getTrustedResourceUrl(h);var l=c.defaults&&c.defaults.transformResponse;H(l)?l=l.filter(function(a){return a!==nc}):l===nc&&(l=null);return c.get(h,S({cache:d,transformResponse:l},a)).finally(function(){g.totalPendingRequests--}).then(function(a){d.put(h,a.data);return a.data},function(a){k||(a=Dg("tpload",h,a.status,a.statusText),b(a));return e.reject(a)})}g.totalPendingRequests=
-0;return g}]}function Uf(){this.$get=["$rootScope","$browser","$location",function(a,b,d){return{findBindings:function(a,b,d){a=a.getElementsByClassName("ng-binding");var g=[];q(a,function(a){var c=ea.element(a).data("$binding");c&&q(c,function(c){d?(new RegExp("(^|\\s)"+Kd(b)+"(\\s|\\||$)")).test(c)&&g.push(a):-1!==c.indexOf(b)&&g.push(a)})});return g},findModels:function(a,b,d){for(var g=["ng-","data-ng-","ng\\:"],h=0;h<g.length;++h){var k=a.querySelectorAll("["+g[h]+"model"+(d?"=":"*=")+'"'+b+
-'"]');if(k.length)return k}},getLocation:function(){return d.url()},setLocation:function(b){b!==d.url()&&(d.url(b),a.$digest())},whenStable:function(a){b.notifyWhenNoOutstandingRequests(a)}}}]}function Vf(){this.$get=["$rootScope","$browser","$q","$$q","$exceptionHandler",function(a,b,d,c,e){function f(f,k,l){D(f)||(l=k,k=f,f=z);var m=va.call(arguments,3),n=u(l)&&!l,p=(n?c:d).defer(),r=p.promise,q;q=b.defer(function(){try{p.resolve(f.apply(null,m))}catch(b){p.reject(b),e(b)}finally{delete g[r.$$timeoutId]}n||
-a.$apply()},k);r.$$timeoutId=q;g[q]=p;return r}var g={};f.cancel=function(a){return a&&a.$$timeoutId in g?(g[a.$$timeoutId].promise.catch(z),g[a.$$timeoutId].reject("canceled"),delete g[a.$$timeoutId],b.defer.cancel(a.$$timeoutId)):!1};return f}]}function Ca(a){za&&(aa.setAttribute("href",a),a=aa.href);aa.setAttribute("href",a);return{href:aa.href,protocol:aa.protocol?aa.protocol.replace(/:$/,""):"",host:aa.host,search:aa.search?aa.search.replace(/^\?/,""):"",hash:aa.hash?aa.hash.replace(/^#/,""):
-"",hostname:aa.hostname,port:aa.port,pathname:"/"===aa.pathname.charAt(0)?aa.pathname:"/"+aa.pathname}}function yd(a){a=F(a)?Ca(a):a;return a.protocol===Md.protocol&&a.host===Md.host}function Wf(){this.$get=la(x)}function Nd(a){function b(a){try{return decodeURIComponent(a)}catch(b){return a}}var d=a[0]||{},c={},e="";return function(){var a,g,h,k,l;try{a=d.cookie||""}catch(m){a=""}if(a!==e)for(e=a,a=e.split("; "),c={},h=0;h<a.length;h++)g=a[h],k=g.indexOf("="),0<k&&(l=b(g.substring(0,k)),w(c[l])&&
-(c[l]=b(g.substring(k+1))));return c}}function $f(){this.$get=Nd}function cd(a){function b(d,c){if(C(d)){var e={};q(d,function(a,c){e[c]=b(c,a)});return e}return a.factory(d+"Filter",c)}this.register=b;this.$get=["$injector",function(a){return function(b){return a.get(b+"Filter")}}];b("currency",Od);b("date",Pd);b("filter",Eg);b("json",Fg);b("limitTo",Gg);b("lowercase",Hg);b("number",Qd);b("orderBy",Rd);b("uppercase",Ig)}function Eg(){return function(a,b,d,c){if(!qa(a)){if(null==a)return a;throw L("filter")("notarray",
-a);}c=c||"$";var e;switch(yc(b)){case "function":break;case "boolean":case "null":case "number":case "string":e=!0;case "object":b=Jg(b,d,c,e);break;default:return a}return Array.prototype.filter.call(a,b)}}function Jg(a,b,d,c){var e=C(a)&&d in a;!0===b?b=sa:D(b)||(b=function(a,b){if(w(a))return!1;if(null===a||null===b)return a===b;if(C(b)||C(a)&&!Wb(a))return!1;a=Q(""+a);b=Q(""+b);return-1!==a.indexOf(b)});return function(f){return e&&!C(f)?Ea(f,a[d],b,d,!1):Ea(f,a,b,d,c)}}function Ea(a,b,d,c,e,
-f){var g=yc(a),h=yc(b);if("string"===h&&"!"===b.charAt(0))return!Ea(a,b.substring(1),d,c,e);if(H(a))return a.some(function(a){return Ea(a,b,d,c,e)});switch(g){case "object":var k;if(e){for(k in a)if(k.charAt&&"$"!==k.charAt(0)&&Ea(a[k],b,d,c,!0))return!0;return f?!1:Ea(a,b,d,c,!1)}if("object"===h){for(k in b)if(f=b[k],!D(f)&&!w(f)&&(g=k===c,!Ea(g?a:a[k],f,d,c,g,g)))return!1;return!0}return d(a,b);case "function":return!1;default:return d(a,b)}}function yc(a){return null===a?"null":typeof a}function Od(a){var b=
-a.NUMBER_FORMATS;return function(a,c,e){w(c)&&(c=b.CURRENCY_SYM);w(e)&&(e=b.PATTERNS[1].maxFrac);return null==a?a:Sd(a,b.PATTERNS[1],b.GROUP_SEP,b.DECIMAL_SEP,e).replace(/\u00A4/g,c)}}function Qd(a){var b=a.NUMBER_FORMATS;return function(a,c){return null==a?a:Sd(a,b.PATTERNS[0],b.GROUP_SEP,b.DECIMAL_SEP,c)}}function Kg(a){var b=0,d,c,e,f,g;-1<(c=a.indexOf(Td))&&(a=a.replace(Td,""));0<(e=a.search(/e/i))?(0>c&&(c=e),c+=+a.slice(e+1),a=a.substring(0,e)):0>c&&(c=a.length);for(e=0;a.charAt(e)===zc;e++);
-if(e===(g=a.length))d=[0],c=1;else{for(g--;a.charAt(g)===zc;)g--;c-=e;d=[];for(f=0;e<=g;e++,f++)d[f]=+a.charAt(e)}c>Ud&&(d=d.splice(0,Ud-1),b=c-1,c=1);return{d:d,e:b,i:c}}function Lg(a,b,d,c){var e=a.d,f=e.length-a.i;b=w(b)?Math.min(Math.max(d,f),c):+b;d=b+a.i;c=e[d];if(0<d){e.splice(Math.max(a.i,d));for(var g=d;g<e.length;g++)e[g]=0}else for(f=Math.max(0,f),a.i=1,e.length=Math.max(1,d=b+1),e[0]=0,g=1;g<d;g++)e[g]=0;if(5<=c)if(0>d-1){for(c=0;c>d;c--)e.unshift(0),a.i++;e.unshift(1);a.i++}else e[d-
-1]++;for(;f<Math.max(0,b);f++)e.push(0);if(b=e.reduceRight(function(a,b,c,d){b+=a;d[c]=b%10;return Math.floor(b/10)},0))e.unshift(b),a.i++}function Sd(a,b,d,c,e){if(!F(a)&&!ba(a)||isNaN(a))return"";var f=!isFinite(a),g=!1,h=Math.abs(a)+"",k="";if(f)k="\u221e";else{g=Kg(h);Lg(g,e,b.minFrac,b.maxFrac);k=g.d;h=g.i;e=g.e;f=[];for(g=k.reduce(function(a,b){return a&&!b},!0);0>h;)k.unshift(0),h++;0<h?f=k.splice(h,k.length):(f=k,k=[0]);h=[];for(k.length>=b.lgSize&&h.unshift(k.splice(-b.lgSize,k.length).join(""));k.length>
-b.gSize;)h.unshift(k.splice(-b.gSize,k.length).join(""));k.length&&h.unshift(k.join(""));k=h.join(d);f.length&&(k+=c+f.join(""));e&&(k+="e+"+e)}return 0>a&&!g?b.negPre+k+b.negSuf:b.posPre+k+b.posSuf}function Kb(a,b,d,c){var e="";if(0>a||c&&0>=a)c?a=-a+1:(a=-a,e="-");for(a=""+a;a.length<b;)a=zc+a;d&&(a=a.substr(a.length-b));return e+a}function Y(a,b,d,c,e){d=d||0;return function(f){f=f["get"+a]();if(0<d||f>-d)f+=d;0===f&&-12===d&&(f=12);return Kb(f,b,c,e)}}function mb(a,b,d){return function(c,e){var f=
-c["get"+a](),g=ub((d?"STANDALONE":"")+(b?"SHORT":"")+a);return e[g][f]}}function Vd(a){var b=(new Date(a,0,1)).getDay();return new Date(a,0,(4>=b?5:12)-b)}function Wd(a){return function(b){var d=Vd(b.getFullYear());b=+new Date(b.getFullYear(),b.getMonth(),b.getDate()+(4-b.getDay()))-+d;b=1+Math.round(b/6048E5);return Kb(b,a)}}function Ac(a,b){return 0>=a.getFullYear()?b.ERAS[0]:b.ERAS[1]}function Pd(a){function b(a){var b;if(b=a.match(d)){a=new Date(0);var f=0,g=0,h=b[8]?a.setUTCFullYear:a.setFullYear,
-k=b[8]?a.setUTCHours:a.setHours;b[9]&&(f=Z(b[9]+b[10]),g=Z(b[9]+b[11]));h.call(a,Z(b[1]),Z(b[2])-1,Z(b[3]));f=Z(b[4]||0)-f;g=Z(b[5]||0)-g;h=Z(b[6]||0);b=Math.round(1E3*parseFloat("0."+(b[7]||0)));k.call(a,f,g,h,b)}return a}var d=/^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?::?(\d\d)(?::?(\d\d)(?:\.(\d+))?)?)?(Z|([+-])(\d\d):?(\d\d))?)?$/;return function(c,d,f){var g="",h=[],k,l;d=d||"mediumDate";d=a.DATETIME_FORMATS[d]||d;F(c)&&(c=Mg.test(c)?Z(c):b(c));ba(c)&&(c=new Date(c));if(!ga(c)||!isFinite(c.getTime()))return c;
-for(;d;)(l=Ng.exec(d))?(h=ab(h,l,1),d=h.pop()):(h.push(d),d=null);var m=c.getTimezoneOffset();f&&(m=Pc(f,m),c=Yb(c,f,!0));q(h,function(b){k=Og[b];g+=k?k(c,a.DATETIME_FORMATS,m):"''"===b?"'":b.replace(/(^'|'$)/g,"").replace(/''/g,"'")});return g}}function Fg(){return function(a,b){w(b)&&(b=2);return cb(a,b)}}function Gg(){return function(a,b,d){b=Infinity===Math.abs(Number(b))?Number(b):Z(b);if(da(b))return a;ba(a)&&(a=a.toString());if(!qa(a))return a;d=!d||isNaN(d)?0:Z(d);d=0>d?Math.max(0,a.length+
-d):d;return 0<=b?Bc(a,d,d+b):0===d?Bc(a,b,a.length):Bc(a,Math.max(0,d+b),d)}}function Bc(a,b,d){return F(a)?a.slice(b,d):va.call(a,b,d)}function Rd(a){function b(b){return b.map(function(b){var c=1,d=Ya;if(D(b))d=b;else if(F(b)){if("+"===b.charAt(0)||"-"===b.charAt(0))c="-"===b.charAt(0)?-1:1,b=b.substring(1);if(""!==b&&(d=a(b),d.constant))var e=d(),d=function(a){return a[e]}}return{get:d,descending:c}})}function d(a){switch(typeof a){case "number":case "boolean":case "string":return!0;default:return!1}}
-function c(a,b){var c=0,d=a.type,k=b.type;if(d===k){var k=a.value,l=b.value;"string"===d?(k=k.toLowerCase(),l=l.toLowerCase()):"object"===d&&(C(k)&&(k=a.index),C(l)&&(l=b.index));k!==l&&(c=k<l?-1:1)}else c=d<k?-1:1;return c}return function(a,f,g,h){if(null==a)return a;if(!qa(a))throw L("orderBy")("notarray",a);H(f)||(f=[f]);0===f.length&&(f=["+"]);var k=b(f),l=g?-1:1,m=D(h)?h:c;a=Array.prototype.map.call(a,function(a,b){return{value:a,tieBreaker:{value:b,type:"number",index:b},predicateValues:k.map(function(c){var e=
-c.get(a);c=typeof e;if(null===e)c="string",e="null";else if("object"===c)a:{if(D(e.valueOf)&&(e=e.valueOf(),d(e)))break a;Wb(e)&&(e=e.toString(),d(e))}return{value:e,type:c,index:b}})}});a.sort(function(a,b){for(var c=0,d=k.length;c<d;c++){var e=m(a.predicateValues[c],b.predicateValues[c]);if(e)return e*k[c].descending*l}return m(a.tieBreaker,b.tieBreaker)*l});return a=a.map(function(a){return a.value})}}function Qa(a){D(a)&&(a={link:a});a.restrict=a.restrict||"AC";return la(a)}function Lb(a,b,d,
-c,e){this.$$controls=[];this.$error={};this.$$success={};this.$pending=void 0;this.$name=e(b.name||b.ngForm||"")(d);this.$dirty=!1;this.$valid=this.$pristine=!0;this.$submitted=this.$invalid=!1;this.$$parentForm=Mb;this.$$element=a;this.$$animate=c;Xd(this)}function Xd(a){a.$$classCache={};a.$$classCache[Yd]=!(a.$$classCache[nb]=a.$$element.hasClass(nb))}function Zd(a){function b(a,b,c){c&&!a.$$classCache[b]?(a.$$animate.addClass(a.$$element,b),a.$$classCache[b]=!0):!c&&a.$$classCache[b]&&(a.$$animate.removeClass(a.$$element,
-b),a.$$classCache[b]=!1)}function d(a,c,d){c=c?"-"+Tc(c,"-"):"";b(a,nb+c,!0===d);b(a,Yd+c,!1===d)}var c=a.set,e=a.unset;a.clazz.prototype.$setValidity=function(a,g,h){w(g)?(this.$pending||(this.$pending={}),c(this.$pending,a,h)):(this.$pending&&e(this.$pending,a,h),$d(this.$pending)&&(this.$pending=void 0));Ha(g)?g?(e(this.$error,a,h),c(this.$$success,a,h)):(c(this.$error,a,h),e(this.$$success,a,h)):(e(this.$error,a,h),e(this.$$success,a,h));this.$pending?(b(this,"ng-pending",!0),this.$valid=this.$invalid=
-void 0,d(this,"",null)):(b(this,"ng-pending",!1),this.$valid=$d(this.$error),this.$invalid=!this.$valid,d(this,"",this.$valid));g=this.$pending&&this.$pending[a]?void 0:this.$error[a]?!1:this.$$success[a]?!0:null;d(this,a,g);this.$$parentForm.$setValidity(a,g,this)}}function $d(a){if(a)for(var b in a)if(a.hasOwnProperty(b))return!1;return!0}function Cc(a){a.$formatters.push(function(b){return a.$isEmpty(b)?b:b.toString()})}function Ra(a,b,d,c,e,f){var g=Q(b[0].type);if(!e.android){var h=!1;b.on("compositionstart",
-function(){h=!0});b.on("compositionend",function(){h=!1;l()})}var k,l=function(a){k&&(f.defer.cancel(k),k=null);if(!h){var e=b.val();a=a&&a.type;"password"===g||d.ngTrim&&"false"===d.ngTrim||(e=T(e));(c.$viewValue!==e||""===e&&c.$$hasNativeValidators)&&c.$setViewValue(e,a)}};if(e.hasEvent("input"))b.on("input",l);else{var m=function(a,b,c){k||(k=f.defer(function(){k=null;b&&b.value===c||l(a)}))};b.on("keydown",function(a){var b=a.keyCode;91===b||15<b&&19>b||37<=b&&40>=b||m(a,this,this.value)});if(e.hasEvent("paste"))b.on("paste cut",
-m)}b.on("change",l);if(ae[g]&&c.$$hasNativeValidators&&g===d.type)b.on("keydown wheel mousedown",function(a){if(!k){var b=this.validity,c=b.badInput,d=b.typeMismatch;k=f.defer(function(){k=null;b.badInput===c&&b.typeMismatch===d||l(a)})}});c.$render=function(){var a=c.$isEmpty(c.$viewValue)?"":c.$viewValue;b.val()!==a&&b.val(a)}}function Nb(a,b){return function(d,c){var e,f;if(ga(d))return d;if(F(d)){'"'===d.charAt(0)&&'"'===d.charAt(d.length-1)&&(d=d.substring(1,d.length-1));if(Pg.test(d))return new Date(d);
-a.lastIndex=0;if(e=a.exec(d))return e.shift(),f=c?{yyyy:c.getFullYear(),MM:c.getMonth()+1,dd:c.getDate(),HH:c.getHours(),mm:c.getMinutes(),ss:c.getSeconds(),sss:c.getMilliseconds()/1E3}:{yyyy:1970,MM:1,dd:1,HH:0,mm:0,ss:0,sss:0},q(e,function(a,c){c<b.length&&(f[b[c]]=+a)}),new Date(f.yyyy,f.MM-1,f.dd,f.HH,f.mm,f.ss||0,1E3*f.sss||0)}return NaN}}function ob(a,b,d,c){return function(e,f,g,h,k,l,m){function n(a){return a&&!(a.getTime&&a.getTime()!==a.getTime())}function p(a){return u(a)&&!ga(a)?d(a)||
-void 0:a}Dc(e,f,g,h);Ra(e,f,g,h,k,l);var r=h&&h.$options.getOption("timezone"),q;h.$$parserName=a;h.$parsers.push(function(a){if(h.$isEmpty(a))return null;if(b.test(a))return a=d(a,q),r&&(a=Yb(a,r)),a});h.$formatters.push(function(a){if(a&&!ga(a))throw pb("datefmt",a);if(n(a))return(q=a)&&r&&(q=Yb(q,r,!0)),m("date")(a,c,r);q=null;return""});if(u(g.min)||g.ngMin){var v;h.$validators.min=function(a){return!n(a)||w(v)||d(a)>=v};g.$observe("min",function(a){v=p(a);h.$validate()})}if(u(g.max)||g.ngMax){var t;
-h.$validators.max=function(a){return!n(a)||w(t)||d(a)<=t};g.$observe("max",function(a){t=p(a);h.$validate()})}}}function Dc(a,b,d,c){(c.$$hasNativeValidators=C(b[0].validity))&&c.$parsers.push(function(a){var c=b.prop("validity")||{};return c.badInput||c.typeMismatch?void 0:a})}function be(a){a.$$parserName="number";a.$parsers.push(function(b){if(a.$isEmpty(b))return null;if(Qg.test(b))return parseFloat(b)});a.$formatters.push(function(b){if(!a.$isEmpty(b)){if(!ba(b))throw pb("numfmt",b);b=b.toString()}return b})}
-function Sa(a){u(a)&&!ba(a)&&(a=parseFloat(a));return da(a)?void 0:a}function Ec(a){var b=a.toString(),d=b.indexOf(".");return-1===d?-1<a&&1>a&&(a=/e-(\d+)$/.exec(b))?Number(a[1]):0:b.length-d-1}function ce(a,b,d){a=Number(a);var c=(a|0)!==a,e=(b|0)!==b,f=(d|0)!==d;if(c||e||f){var g=c?Ec(a):0,h=e?Ec(b):0,k=f?Ec(d):0,g=Math.max(g,h,k),g=Math.pow(10,g);a*=g;b*=g;d*=g;c&&(a=Math.round(a));e&&(b=Math.round(b));f&&(d=Math.round(d))}return 0===(a-b)%d}function de(a,b,d,c,e){if(u(c)){a=a(c);if(!a.constant)throw pb("constexpr",
-d,c);return a(b)}return e}function Fc(a,b){function d(a,b){if(!a||!a.length)return[];if(!b||!b.length)return a;var c=[],d=0;a:for(;d<a.length;d++){for(var e=a[d],m=0;m<b.length;m++)if(e===b[m])continue a;c.push(e)}return c}function c(a){var b=a;H(a)?b=a.map(c).join(" "):C(a)&&(b=Object.keys(a).filter(function(b){return a[b]}).join(" "));return b}a="ngClass"+a;var e;return["$parse",function(f){return{restrict:"AC",link:function(g,h,k){function l(a,b){var c=[];q(a,function(a){if(0<b||n[a])n[a]=(n[a]||
-0)+b,n[a]===+(0<b)&&c.push(a)});return c.join(" ")}function m(a){if(a===b){var c=r,c=l(c&&c.split(" "),1);k.$addClass(c)}else c=r,c=l(c&&c.split(" "),-1),k.$removeClass(c);p=a}var n=h.data("$classCounts"),p=!0,r;n||(n=V(),h.data("$classCounts",n));"ngClass"!==a&&(e||(e=f("$index",function(a){return a&1})),g.$watch(e,m));g.$watch(f(k[a],c),function(a){F(a)||(a=c(a));if(p===b){var e=a,f=r&&r.split(" "),g=e&&e.split(" "),e=d(f,g),f=d(g,f),e=l(e,-1),f=l(f,1);k.$addClass(f);k.$removeClass(e)}r=a})}}}]}
-function Ob(a,b,d,c,e,f,g,h,k){this.$modelValue=this.$viewValue=Number.NaN;this.$$rawModelValue=void 0;this.$validators={};this.$asyncValidators={};this.$parsers=[];this.$formatters=[];this.$viewChangeListeners=[];this.$untouched=!0;this.$touched=!1;this.$pristine=!0;this.$dirty=!1;this.$valid=!0;this.$invalid=!1;this.$error={};this.$$success={};this.$pending=void 0;this.$name=k(d.name||"",!1)(a);this.$$parentForm=Mb;this.$options=Pb;this.$$parsedNgModel=e(d.ngModel);this.$$parsedNgModelAssign=this.$$parsedNgModel.assign;
-this.$$ngModelGet=this.$$parsedNgModel;this.$$ngModelSet=this.$$parsedNgModelAssign;this.$$pendingDebounce=null;this.$$parserValid=void 0;this.$$currentValidationRunId=0;Object.defineProperty(this,"$$scope",{value:a});this.$$attr=d;this.$$element=c;this.$$animate=f;this.$$timeout=g;this.$$parse=e;this.$$q=h;this.$$exceptionHandler=b;Xd(this);Rg(this)}function Rg(a){a.$$scope.$watch(function(b){b=a.$$ngModelGet(b);if(b!==a.$modelValue&&(a.$modelValue===a.$modelValue||b===b)){a.$modelValue=a.$$rawModelValue=
-b;a.$$parserValid=void 0;for(var d=a.$formatters,c=d.length,e=b;c--;)e=d[c](e);a.$viewValue!==e&&(a.$$updateEmptyClasses(e),a.$viewValue=a.$$lastCommittedViewValue=e,a.$render(),a.$$runValidators(a.$modelValue,a.$viewValue,z))}return b})}function Gc(a){this.$$options=a}function ee(a,b){q(b,function(b,c){u(a[c])||(a[c]=b)})}function Ta(a,b){a.prop("selected",b);a.attr("selected",b)}var Sg=/^\/(.+)\/([a-z]*)$/,ua=Object.prototype.hasOwnProperty,Ic={objectMaxDepth:5},Q=function(a){return F(a)?a.toLowerCase():
-a},ub=function(a){return F(a)?a.toUpperCase():a},za,B,na,va=[].slice,sg=[].splice,Tg=[].push,ma=Object.prototype.toString,Mc=Object.getPrototypeOf,Fa=L("ng"),ea=x.angular||(x.angular={}),ac,qb=0;za=x.document.documentMode;var da=Number.isNaN||function(a){return a!==a};z.$inject=[];Ya.$inject=[];var H=Array.isArray,qe=/^\[object (?:Uint8|Uint8Clamped|Uint16|Uint32|Int8|Int16|Int32|Float32|Float64)Array]$/,T=function(a){return F(a)?a.trim():a},Kd=function(a){return a.replace(/([-()[\]{}+?*.$^|,:#<!\\])/g,
-"\\$1").replace(/\x08/g,"\\x08")},Ga=function(){if(!u(Ga.rules)){var a=x.document.querySelector("[ng-csp]")||x.document.querySelector("[data-ng-csp]");if(a){var b=a.getAttribute("ng-csp")||a.getAttribute("data-ng-csp");Ga.rules={noUnsafeEval:!b||-1!==b.indexOf("no-unsafe-eval"),noInlineStyle:!b||-1!==b.indexOf("no-inline-style")}}else{a=Ga;try{new Function(""),b=!1}catch(d){b=!0}a.rules={noUnsafeEval:b,noInlineStyle:!1}}}return Ga.rules},rb=function(){if(u(rb.name_))return rb.name_;var a,b,d=Ja.length,
-c,e;for(b=0;b<d;++b)if(c=Ja[b],a=x.document.querySelector("["+c.replace(":","\\:")+"jq]")){e=a.getAttribute(c+"jq");break}return rb.name_=e},se=/:/g,Ja=["ng-","data-ng-","ng:","x-ng-"],ve=function(a){var b=a.currentScript;if(!b)return!0;if(!(b instanceof x.HTMLScriptElement||b instanceof x.SVGScriptElement))return!1;b=b.attributes;return[b.getNamedItem("src"),b.getNamedItem("href"),b.getNamedItem("xlink:href")].every(function(b){if(!b)return!0;if(!b.value)return!1;var c=a.createElement("a");c.href=
-b.value;if(a.location.origin===c.origin)return!0;switch(c.protocol){case "http:":case "https:":case "ftp:":case "blob:":case "file:":case "data:":return!0;default:return!1}})}(x.document),ye=/[A-Z]/g,Uc=!1,Ia=3,De={full:"1.6.4",major:1,minor:6,dot:4,codeName:"phenomenal-footnote"};W.expando="ng339";var hb=W.cache={},eg=1;W._data=function(a){return this.cache[a[this.expando]]||{}};var ag=/-([a-z])/g,Ug=/^-ms-/,zb={mouseleave:"mouseout",mouseenter:"mouseover"},dc=L("jqLite"),dg=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,
-cc=/<|&#?\w+;/,bg=/<([\w:-]+)/,cg=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,ha={option:[1,'<select multiple="multiple">',"</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ha.optgroup=ha.option;ha.tbody=ha.tfoot=ha.colgroup=ha.caption=ha.thead;ha.th=ha.td;var jg=x.Node.prototype.contains||function(a){return!!(this.compareDocumentPosition(a)&
-16)},Na=W.prototype={ready:ed,toString:function(){var a=[];q(this,function(b){a.push(""+b)});return"["+a.join(", ")+"]"},eq:function(a){return 0<=a?B(this[a]):B(this[this.length+a])},length:0,push:Tg,sort:[].sort,splice:[].splice},Fb={};q("multiple selected checked disabled readOnly required open".split(" "),function(a){Fb[Q(a)]=a});var jd={};q("input select option textarea button form details".split(" "),function(a){jd[a]=!0});var rd={ngMinlength:"minlength",ngMaxlength:"maxlength",ngMin:"min",ngMax:"max",
-ngPattern:"pattern",ngStep:"step"};q({data:hc,removeData:gc,hasData:function(a){for(var b in hb[a.ng339])return!0;return!1},cleanData:function(a){for(var b=0,d=a.length;b<d;b++)gc(a[b])}},function(a,b){W[b]=a});q({data:hc,inheritedData:Db,scope:function(a){return B.data(a,"$scope")||Db(a.parentNode||a,["$isolateScope","$scope"])},isolateScope:function(a){return B.data(a,"$isolateScope")||B.data(a,"$isolateScopeNoTemplate")},controller:gd,injector:function(a){return Db(a,"$injector")},removeAttr:function(a,
-b){a.removeAttribute(b)},hasClass:Ab,css:function(a,b,d){b=wb(b.replace(Ug,"ms-"));if(u(d))a.style[b]=d;else return a.style[b]},attr:function(a,b,d){var c=a.nodeType;if(c!==Ia&&2!==c&&8!==c&&a.getAttribute){var c=Q(b),e=Fb[c];if(u(d))null===d||!1===d&&e?a.removeAttribute(b):a.setAttribute(b,e?c:d);else return a=a.getAttribute(b),e&&null!==a&&(a=c),null===a?void 0:a}},prop:function(a,b,d){if(u(d))a[b]=d;else return a[b]},text:function(){function a(a,d){if(w(d)){var c=a.nodeType;return 1===c||c===Ia?
-a.textContent:""}a.textContent=d}a.$dv="";return a}(),val:function(a,b){if(w(b)){if(a.multiple&&"select"===wa(a)){var d=[];q(a.options,function(a){a.selected&&d.push(a.value||a.text)});return d}return a.value}a.value=b},html:function(a,b){if(w(b))return a.innerHTML;xb(a,!0);a.innerHTML=b},empty:hd},function(a,b){W.prototype[b]=function(b,c){var e,f,g=this.length;if(a!==hd&&w(2===a.length&&a!==Ab&&a!==gd?b:c)){if(C(b)){for(e=0;e<g;e++)if(a===hc)a(this[e],b);else for(f in b)a(this[e],f,b[f]);return this}e=
-a.$dv;g=w(e)?Math.min(g,1):g;for(f=0;f<g;f++){var h=a(this[f],b,c);e=e?e+h:h}return e}for(e=0;e<g;e++)a(this[e],b,c);return this}});q({removeData:gc,on:function(a,b,d,c){if(u(c))throw dc("onargs");if(bc(a)){c=yb(a,!0);var e=c.events,f=c.handle;f||(f=c.handle=gg(a,e));c=0<=b.indexOf(" ")?b.split(" "):[b];for(var g=c.length,h=function(b,c,g){var h=e[b];h||(h=e[b]=[],h.specialHandlerWrapper=c,"$destroy"===b||g||a.addEventListener(b,f));h.push(d)};g--;)b=c[g],zb[b]?(h(zb[b],ig),h(b,void 0,!0)):h(b)}},
-off:fd,one:function(a,b,d){a=B(a);a.on(b,function e(){a.off(b,d);a.off(b,e)});a.on(b,d)},replaceWith:function(a,b){var d,c=a.parentNode;xb(a);q(new W(b),function(b){d?c.insertBefore(b,d.nextSibling):c.replaceChild(b,a);d=b})},children:function(a){var b=[];q(a.childNodes,function(a){1===a.nodeType&&b.push(a)});return b},contents:function(a){return a.contentDocument||a.childNodes||[]},append:function(a,b){var d=a.nodeType;if(1===d||11===d){b=new W(b);for(var d=0,c=b.length;d<c;d++)a.appendChild(b[d])}},
-prepend:function(a,b){if(1===a.nodeType){var d=a.firstChild;q(new W(b),function(b){a.insertBefore(b,d)})}},wrap:function(a,b){var d=B(b).eq(0).clone()[0],c=a.parentNode;c&&c.replaceChild(d,a);d.appendChild(a)},remove:Eb,detach:function(a){Eb(a,!0)},after:function(a,b){var d=a,c=a.parentNode;if(c){b=new W(b);for(var e=0,f=b.length;e<f;e++){var g=b[e];c.insertBefore(g,d.nextSibling);d=g}}},addClass:Cb,removeClass:Bb,toggleClass:function(a,b,d){b&&q(b.split(" "),function(b){var e=d;w(e)&&(e=!Ab(a,b));
-(e?Cb:Bb)(a,b)})},parent:function(a){return(a=a.parentNode)&&11!==a.nodeType?a:null},next:function(a){return a.nextElementSibling},find:function(a,b){return a.getElementsByTagName?a.getElementsByTagName(b):[]},clone:fc,triggerHandler:function(a,b,d){var c,e,f=b.type||b,g=yb(a);if(g=(g=g&&g.events)&&g[f])c={preventDefault:function(){this.defaultPrevented=!0},isDefaultPrevented:function(){return!0===this.defaultPrevented},stopImmediatePropagation:function(){this.immediatePropagationStopped=!0},isImmediatePropagationStopped:function(){return!0===
-this.immediatePropagationStopped},stopPropagation:z,type:f,target:a},b.type&&(c=S(c,b)),b=pa(g),e=d?[c].concat(d):[c],q(b,function(b){c.isImmediatePropagationStopped()||b.apply(a,e)})}},function(a,b){W.prototype[b]=function(b,c,e){for(var f,g=0,h=this.length;g<h;g++)w(f)?(f=a(this[g],b,c,e),u(f)&&(f=B(f))):ec(f,a(this[g],b,c,e));return u(f)?f:this}});W.prototype.bind=W.prototype.on;W.prototype.unbind=W.prototype.off;var Vg=Object.create(null);kd.prototype={_idx:function(a){if(a===this._lastKey)return this._lastIndex;
-this._lastKey=a;return this._lastIndex=this._keys.indexOf(a)},_transformKey:function(a){return da(a)?Vg:a},get:function(a){a=this._transformKey(a);a=this._idx(a);if(-1!==a)return this._values[a]},set:function(a,b){a=this._transformKey(a);var d=this._idx(a);-1===d&&(d=this._lastIndex=this._keys.length);this._keys[d]=a;this._values[d]=b},delete:function(a){a=this._transformKey(a);a=this._idx(a);if(-1===a)return!1;this._keys.splice(a,1);this._values.splice(a,1);this._lastKey=NaN;this._lastIndex=-1;return!0}};
-var Gb=kd,Zf=[function(){this.$get=[function(){return Gb}]}],lg=/^([^(]+?)=>/,mg=/^[^(]*\(\s*([^)]*)\)/m,Wg=/,/,Xg=/^\s*(_?)(\S+?)\1\s*$/,kg=/((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg,ya=L("$injector");eb.$$annotate=function(a,b,d){var c;if("function"===typeof a){if(!(c=a.$inject)){c=[];if(a.length){if(b)throw F(d)&&d||(d=a.name||ng(a)),ya("strictdi",d);b=ld(a);q(b[1].split(Wg),function(a){a.replace(Xg,function(a,b,d){c.push(d)})})}a.$inject=c}}else H(a)?(b=a.length-1,sb(a[b],"fn"),c=a.slice(0,b)):sb(a,"fn",
-!0);return c};var fe=L("$animate"),qf=function(){this.$get=z},rf=function(){var a=new Gb,b=[];this.$get=["$$AnimateRunner","$rootScope",function(d,c){function e(a,b,c){var d=!1;b&&(b=F(b)?b.split(" "):H(b)?b:[],q(b,function(b){b&&(d=!0,a[b]=c)}));return d}function f(){q(b,function(b){var c=a.get(b);if(c){var d=og(b.attr("class")),e="",f="";q(c,function(a,b){a!==!!d[b]&&(a?e+=(e.length?" ":"")+b:f+=(f.length?" ":"")+b)});q(b,function(a){e&&Cb(a,e);f&&Bb(a,f)});a.delete(b)}});b.length=0}return{enabled:z,
-on:z,off:z,pin:z,push:function(g,h,k,l){l&&l();k=k||{};k.from&&g.css(k.from);k.to&&g.css(k.to);if(k.addClass||k.removeClass)if(h=k.addClass,l=k.removeClass,k=a.get(g)||{},h=e(k,h,!0),l=e(k,l,!1),h||l)a.set(g,k),b.push(g),1===b.length&&c.$$postDigest(f);g=new d;g.complete();return g}}}]},of=["$provide",function(a){var b=this,d=null;this.$$registeredAnimations=Object.create(null);this.register=function(c,d){if(c&&"."!==c.charAt(0))throw fe("notcsel",c);var f=c+"-animation";b.$$registeredAnimations[c.substr(1)]=
-f;a.factory(f,d)};this.classNameFilter=function(a){if(1===arguments.length&&(d=a instanceof RegExp?a:null)&&/[(\s|\/)]ng-animate[(\s|\/)]/.test(d.toString()))throw d=null,fe("nongcls","ng-animate");return d};this.$get=["$$animateQueue",function(a){function b(a,c,d){if(d){var e;a:{for(e=0;e<d.length;e++){var l=d[e];if(1===l.nodeType){e=l;break a}}e=void 0}!e||e.parentNode||e.previousElementSibling||(d=null)}d?d.after(a):c.prepend(a)}return{on:a.on,off:a.off,pin:a.pin,enabled:a.enabled,cancel:function(a){a.end&&
-a.end()},enter:function(d,g,h,k){g=g&&B(g);h=h&&B(h);g=g||h.parent();b(d,g,h);return a.push(d,"enter",ia(k))},move:function(d,g,h,k){g=g&&B(g);h=h&&B(h);g=g||h.parent();b(d,g,h);return a.push(d,"move",ia(k))},leave:function(b,d){return a.push(b,"leave",ia(d),function(){b.remove()})},addClass:function(b,d,e){e=ia(e);e.addClass=ib(e.addclass,d);return a.push(b,"addClass",e)},removeClass:function(b,d,e){e=ia(e);e.removeClass=ib(e.removeClass,d);return a.push(b,"removeClass",e)},setClass:function(b,d,
-e,k){k=ia(k);k.addClass=ib(k.addClass,d);k.removeClass=ib(k.removeClass,e);return a.push(b,"setClass",k)},animate:function(b,d,e,k,l){l=ia(l);l.from=l.from?S(l.from,d):d;l.to=l.to?S(l.to,e):e;l.tempClasses=ib(l.tempClasses,k||"ng-inline-animate");return a.push(b,"animate",l)}}}]}],tf=function(){this.$get=["$$rAF",function(a){function b(b){d.push(b);1<d.length||a(function(){for(var a=0;a<d.length;a++)d[a]();d=[]})}var d=[];return function(){var a=!1;b(function(){a=!0});return function(d){a?d():b(d)}}}]},
-sf=function(){this.$get=["$q","$sniffer","$$animateAsyncRun","$$isDocumentHidden","$timeout",function(a,b,d,c,e){function f(a){this.setHost(a);var b=d();this._doneCallbacks=[];this._tick=function(a){c()?e(a,0,!1):b(a)};this._state=0}f.chain=function(a,b){function c(){if(d===a.length)b(!0);else a[d](function(a){!1===a?b(!1):(d++,c())})}var d=0;c()};f.all=function(a,b){function c(f){e=e&&f;++d===a.length&&b(e)}var d=0,e=!0;q(a,function(a){a.done(c)})};f.prototype={setHost:function(a){this.host=a||{}},
-done:function(a){2===this._state?a():this._doneCallbacks.push(a)},progress:z,getPromise:function(){if(!this.promise){var b=this;this.promise=a(function(a,c){b.done(function(b){!1===b?c():a()})})}return this.promise},then:function(a,b){return this.getPromise().then(a,b)},"catch":function(a){return this.getPromise()["catch"](a)},"finally":function(a){return this.getPromise()["finally"](a)},pause:function(){this.host.pause&&this.host.pause()},resume:function(){this.host.resume&&this.host.resume()},end:function(){this.host.end&&
-this.host.end();this._resolve(!0)},cancel:function(){this.host.cancel&&this.host.cancel();this._resolve(!1)},complete:function(a){var b=this;0===b._state&&(b._state=1,b._tick(function(){b._resolve(a)}))},_resolve:function(a){2!==this._state&&(q(this._doneCallbacks,function(b){b(a)}),this._doneCallbacks.length=0,this._state=2)}};return f}]},pf=function(){this.$get=["$$rAF","$q","$$AnimateRunner",function(a,b,d){return function(b,e){function f(){a(function(){g.addClass&&(b.addClass(g.addClass),g.addClass=
-null);g.removeClass&&(b.removeClass(g.removeClass),g.removeClass=null);g.to&&(b.css(g.to),g.to=null);h||k.complete();h=!0});return k}var g=e||{};g.$$prepared||(g=ra(g));g.cleanupStyles&&(g.from=g.to=null);g.from&&(b.css(g.from),g.from=null);var h,k=new d;return{start:f,end:f}}}]},fa=L("$compile"),lc=new function(){};Wc.$inject=["$provide","$$sanitizeUriProvider"];Ib.prototype.isFirstChange=function(){return this.previousValue===lc};var md=/^((?:x|data)[:\-_])/i,rg=/[:\-_]+(.)/g,td=L("$controller"),
-sd=/^(\S+)(\s+as\s+([\w$]+))?$/,Af=function(){this.$get=["$document",function(a){return function(b){b?!b.nodeType&&b instanceof B&&(b=b[0]):b=a[0].body;return b.offsetWidth+1}}]},ud="application/json",pc={"Content-Type":ud+";charset=utf-8"},ug=/^\[|^\{(?!\{)/,vg={"[":/]$/,"{":/}$/},tg=/^\)]\}',?\n/,oc=L("$http"),Da=ea.$interpolateMinErr=L("$interpolate");Da.throwNoconcat=function(a){throw Da("noconcat",a);};Da.interr=function(a,b){return Da("interr",a,b.toString())};var If=function(){this.$get=function(){function a(a){var b=
-function(a){b.data=a;b.called=!0};b.id=a;return b}var b=ea.callbacks,d={};return{createCallback:function(c){c="_"+(b.$$counter++).toString(36);var e="angular.callbacks."+c,f=a(c);d[e]=b[c]=f;return e},wasCalled:function(a){return d[a].called},getResponse:function(a){return d[a].data},removeCallback:function(a){delete b[d[a].id];delete d[a]}}}},Yg=/^([^?#]*)(\?([^#]*))?(#(.*))?$/,xg={http:80,https:443,ftp:21},kb=L("$location"),yg=/^\s*[\\/]{2,}/,Zg={$$absUrl:"",$$html5:!1,$$replace:!1,absUrl:Jb("$$absUrl"),
-url:function(a){if(w(a))return this.$$url;var b=Yg.exec(a);(b[1]||""===a)&&this.path(decodeURIComponent(b[1]));(b[2]||b[1]||""===a)&&this.search(b[3]||"");this.hash(b[5]||"");return this},protocol:Jb("$$protocol"),host:Jb("$$host"),port:Jb("$$port"),path:Cd("$$path",function(a){a=null!==a?a.toString():"";return"/"===a.charAt(0)?a:"/"+a}),search:function(a,b){switch(arguments.length){case 0:return this.$$search;case 1:if(F(a)||ba(a))a=a.toString(),this.$$search=Rc(a);else if(C(a))a=ra(a,{}),q(a,function(b,
-c){null==b&&delete a[c]}),this.$$search=a;else throw kb("isrcharg");break;default:w(b)||null===b?delete this.$$search[a]:this.$$search[a]=b}this.$$compose();return this},hash:Cd("$$hash",function(a){return null!==a?a.toString():""}),replace:function(){this.$$replace=!0;return this}};q([Bd,tc,sc],function(a){a.prototype=Object.create(Zg);a.prototype.state=function(b){if(!arguments.length)return this.$$state;if(a!==sc||!this.$$html5)throw kb("nostate");this.$$state=w(b)?null:b;this.$$urlUpdatedByLocation=
-!0;return this}});var Ua=L("$parse"),Bg={}.constructor.prototype.valueOf,Qb=V();q("+ - * / % === !== == != < > <= >= && || ! = |".split(" "),function(a){Qb[a]=!0});var $g={n:"\n",f:"\f",r:"\r",t:"\t",v:"\v","'":"'",'"':'"'},wc=function(a){this.options=a};wc.prototype={constructor:wc,lex:function(a){this.text=a;this.index=0;for(this.tokens=[];this.index<this.text.length;)if(a=this.text.charAt(this.index),'"'===a||"'"===a)this.readString(a);else if(this.isNumber(a)||"."===a&&this.isNumber(this.peek()))this.readNumber();
-else if(this.isIdentifierStart(this.peekMultichar()))this.readIdent();else if(this.is(a,"(){}[].,;:?"))this.tokens.push({index:this.index,text:a}),this.index++;else if(this.isWhitespace(a))this.index++;else{var b=a+this.peek(),d=b+this.peek(2),c=Qb[b],e=Qb[d];Qb[a]||c||e?(a=e?d:c?b:a,this.tokens.push({index:this.index,text:a,operator:!0}),this.index+=a.length):this.throwError("Unexpected next character ",this.index,this.index+1)}return this.tokens},is:function(a,b){return-1!==b.indexOf(a)},peek:function(a){a=
-a||1;return this.index+a<this.text.length?this.text.charAt(this.index+a):!1},isNumber:function(a){return"0"<=a&&"9">=a&&"string"===typeof a},isWhitespace:function(a){return" "===a||"\r"===a||"\t"===a||"\n"===a||"\v"===a||"\u00a0"===a},isIdentifierStart:function(a){return this.options.isIdentifierStart?this.options.isIdentifierStart(a,this.codePointAt(a)):this.isValidIdentifierStart(a)},isValidIdentifierStart:function(a){return"a"<=a&&"z">=a||"A"<=a&&"Z">=a||"_"===a||"$"===a},isIdentifierContinue:function(a){return this.options.isIdentifierContinue?
-this.options.isIdentifierContinue(a,this.codePointAt(a)):this.isValidIdentifierContinue(a)},isValidIdentifierContinue:function(a,b){return this.isValidIdentifierStart(a,b)||this.isNumber(a)},codePointAt:function(a){return 1===a.length?a.charCodeAt(0):(a.charCodeAt(0)<<10)+a.charCodeAt(1)-56613888},peekMultichar:function(){var a=this.text.charAt(this.index),b=this.peek();if(!b)return a;var d=a.charCodeAt(0),c=b.charCodeAt(0);return 55296<=d&&56319>=d&&56320<=c&&57343>=c?a+b:a},isExpOperator:function(a){return"-"===
-a||"+"===a||this.isNumber(a)},throwError:function(a,b,d){d=d||this.index;b=u(b)?"s "+b+"-"+this.index+" ["+this.text.substring(b,d)+"]":" "+d;throw Ua("lexerr",a,b,this.text);},readNumber:function(){for(var a="",b=this.index;this.index<this.text.length;){var d=Q(this.text.charAt(this.index));if("."===d||this.isNumber(d))a+=d;else{var c=this.peek();if("e"===d&&this.isExpOperator(c))a+=d;else if(this.isExpOperator(d)&&c&&this.isNumber(c)&&"e"===a.charAt(a.length-1))a+=d;else if(!this.isExpOperator(d)||
-c&&this.isNumber(c)||"e"!==a.charAt(a.length-1))break;else this.throwError("Invalid exponent")}this.index++}this.tokens.push({index:b,text:a,constant:!0,value:Number(a)})},readIdent:function(){var a=this.index;for(this.index+=this.peekMultichar().length;this.index<this.text.length;){var b=this.peekMultichar();if(!this.isIdentifierContinue(b))break;this.index+=b.length}this.tokens.push({index:a,text:this.text.slice(a,this.index),identifier:!0})},readString:function(a){var b=this.index;this.index++;
-for(var d="",c=a,e=!1;this.index<this.text.length;){var f=this.text.charAt(this.index),c=c+f;if(e)"u"===f?(e=this.text.substring(this.index+1,this.index+5),e.match(/[\da-f]{4}/i)||this.throwError("Invalid unicode escape [\\u"+e+"]"),this.index+=4,d+=String.fromCharCode(parseInt(e,16))):d+=$g[f]||f,e=!1;else if("\\"===f)e=!0;else{if(f===a){this.index++;this.tokens.push({index:b,text:c,constant:!0,value:d});return}d+=f}this.index++}this.throwError("Unterminated quote",b)}};var s=function(a,b){this.lexer=
-a;this.options=b};s.Program="Program";s.ExpressionStatement="ExpressionStatement";s.AssignmentExpression="AssignmentExpression";s.ConditionalExpression="ConditionalExpression";s.LogicalExpression="LogicalExpression";s.BinaryExpression="BinaryExpression";s.UnaryExpression="UnaryExpression";s.CallExpression="CallExpression";s.MemberExpression="MemberExpression";s.Identifier="Identifier";s.Literal="Literal";s.ArrayExpression="ArrayExpression";s.Property="Property";s.ObjectExpression="ObjectExpression";
-s.ThisExpression="ThisExpression";s.LocalsExpression="LocalsExpression";s.NGValueParameter="NGValueParameter";s.prototype={ast:function(a){this.text=a;this.tokens=this.lexer.lex(a);a=this.program();0!==this.tokens.length&&this.throwError("is an unexpected token",this.tokens[0]);return a},program:function(){for(var a=[];;)if(0<this.tokens.length&&!this.peek("}",")",";","]")&&a.push(this.expressionStatement()),!this.expect(";"))return{type:s.Program,body:a}},expressionStatement:function(){return{type:s.ExpressionStatement,
-expression:this.filterChain()}},filterChain:function(){for(var a=this.expression();this.expect("|");)a=this.filter(a);return a},expression:function(){return this.assignment()},assignment:function(){var a=this.ternary();if(this.expect("=")){if(!Fd(a))throw Ua("lval");a={type:s.AssignmentExpression,left:a,right:this.assignment(),operator:"="}}return a},ternary:function(){var a=this.logicalOR(),b,d;return this.expect("?")&&(b=this.expression(),this.consume(":"))?(d=this.expression(),{type:s.ConditionalExpression,
-test:a,alternate:b,consequent:d}):a},logicalOR:function(){for(var a=this.logicalAND();this.expect("||");)a={type:s.LogicalExpression,operator:"||",left:a,right:this.logicalAND()};return a},logicalAND:function(){for(var a=this.equality();this.expect("&&");)a={type:s.LogicalExpression,operator:"&&",left:a,right:this.equality()};return a},equality:function(){for(var a=this.relational(),b;b=this.expect("==","!=","===","!==");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.relational()};
-return a},relational:function(){for(var a=this.additive(),b;b=this.expect("<",">","<=",">=");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.additive()};return a},additive:function(){for(var a=this.multiplicative(),b;b=this.expect("+","-");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.multiplicative()};return a},multiplicative:function(){for(var a=this.unary(),b;b=this.expect("*","/","%");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.unary()};return a},
-unary:function(){var a;return(a=this.expect("+","-","!"))?{type:s.UnaryExpression,operator:a.text,prefix:!0,argument:this.unary()}:this.primary()},primary:function(){var a;this.expect("(")?(a=this.filterChain(),this.consume(")")):this.expect("[")?a=this.arrayDeclaration():this.expect("{")?a=this.object():this.selfReferential.hasOwnProperty(this.peek().text)?a=ra(this.selfReferential[this.consume().text]):this.options.literals.hasOwnProperty(this.peek().text)?a={type:s.Literal,value:this.options.literals[this.consume().text]}:
-this.peek().identifier?a=this.identifier():this.peek().constant?a=this.constant():this.throwError("not a primary expression",this.peek());for(var b;b=this.expect("(","[",".");)"("===b.text?(a={type:s.CallExpression,callee:a,arguments:this.parseArguments()},this.consume(")")):"["===b.text?(a={type:s.MemberExpression,object:a,property:this.expression(),computed:!0},this.consume("]")):"."===b.text?a={type:s.MemberExpression,object:a,property:this.identifier(),computed:!1}:this.throwError("IMPOSSIBLE");
-return a},filter:function(a){a=[a];for(var b={type:s.CallExpression,callee:this.identifier(),arguments:a,filter:!0};this.expect(":");)a.push(this.expression());return b},parseArguments:function(){var a=[];if(")"!==this.peekToken().text){do a.push(this.filterChain());while(this.expect(","))}return a},identifier:function(){var a=this.consume();a.identifier||this.throwError("is not a valid identifier",a);return{type:s.Identifier,name:a.text}},constant:function(){return{type:s.Literal,value:this.consume().value}},
-arrayDeclaration:function(){var a=[];if("]"!==this.peekToken().text){do{if(this.peek("]"))break;a.push(this.expression())}while(this.expect(","))}this.consume("]");return{type:s.ArrayExpression,elements:a}},object:function(){var a=[],b;if("}"!==this.peekToken().text){do{if(this.peek("}"))break;b={type:s.Property,kind:"init"};this.peek().constant?(b.key=this.constant(),b.computed=!1,this.consume(":"),b.value=this.expression()):this.peek().identifier?(b.key=this.identifier(),b.computed=!1,this.peek(":")?
-(this.consume(":"),b.value=this.expression()):b.value=b.key):this.peek("[")?(this.consume("["),b.key=this.expression(),this.consume("]"),b.computed=!0,this.consume(":"),b.value=this.expression()):this.throwError("invalid key",this.peek());a.push(b)}while(this.expect(","))}this.consume("}");return{type:s.ObjectExpression,properties:a}},throwError:function(a,b){throw Ua("syntax",b.text,a,b.index+1,this.text,this.text.substring(b.index));},consume:function(a){if(0===this.tokens.length)throw Ua("ueoe",
-this.text);var b=this.expect(a);b||this.throwError("is unexpected, expecting ["+a+"]",this.peek());return b},peekToken:function(){if(0===this.tokens.length)throw Ua("ueoe",this.text);return this.tokens[0]},peek:function(a,b,d,c){return this.peekAhead(0,a,b,d,c)},peekAhead:function(a,b,d,c,e){if(this.tokens.length>a){a=this.tokens[a];var f=a.text;if(f===b||f===d||f===c||f===e||!(b||d||c||e))return a}return!1},expect:function(a,b,d,c){return(a=this.peek(a,b,d,c))?(this.tokens.shift(),a):!1},selfReferential:{"this":{type:s.ThisExpression},
-$locals:{type:s.LocalsExpression}}};Hd.prototype={compile:function(a){var b=this;this.state={nextId:0,filters:{},fn:{vars:[],body:[],own:{}},assign:{vars:[],body:[],own:{}},inputs:[]};U(a,b.$filter);var d="",c;this.stage="assign";if(c=Gd(a))this.state.computing="assign",d=this.nextId(),this.recurse(c,d),this.return_(d),d="fn.assign="+this.generateFunction("assign","s,v,l");c=Ed(a.body);b.stage="inputs";q(c,function(a,c){var d="fn"+c;b.state[d]={vars:[],body:[],own:{}};b.state.computing=d;var h=b.nextId();
-b.recurse(a,h);b.return_(h);b.state.inputs.push(d);a.watchId=c});this.state.computing="fn";this.stage="main";this.recurse(a);a='"'+this.USE+" "+this.STRICT+'";\n'+this.filterPrefix()+"var fn="+this.generateFunction("fn","s,l,a,i")+d+this.watchFns()+"return fn;";a=(new Function("$filter","getStringValue","ifDefined","plus",a))(this.$filter,zg,Ag,Dd);this.state=this.stage=void 0;return a},USE:"use",STRICT:"strict",watchFns:function(){var a=[],b=this.state.inputs,d=this;q(b,function(b){a.push("var "+
-b+"="+d.generateFunction(b,"s"))});b.length&&a.push("fn.inputs=["+b.join(",")+"];");return a.join("")},generateFunction:function(a,b){return"function("+b+"){"+this.varsPrefix(a)+this.body(a)+"};"},filterPrefix:function(){var a=[],b=this;q(this.state.filters,function(d,c){a.push(d+"=$filter("+b.escape(c)+")")});return a.length?"var "+a.join(",")+";":""},varsPrefix:function(a){return this.state[a].vars.length?"var "+this.state[a].vars.join(",")+";":""},body:function(a){return this.state[a].body.join("")},
-recurse:function(a,b,d,c,e,f){var g,h,k=this,l,m,n;c=c||z;if(!f&&u(a.watchId))b=b||this.nextId(),this.if_("i",this.lazyAssign(b,this.computedMember("i",a.watchId)),this.lazyRecurse(a,b,d,c,e,!0));else switch(a.type){case s.Program:q(a.body,function(b,c){k.recurse(b.expression,void 0,void 0,function(a){h=a});c!==a.body.length-1?k.current().body.push(h,";"):k.return_(h)});break;case s.Literal:m=this.escape(a.value);this.assign(b,m);c(b||m);break;case s.UnaryExpression:this.recurse(a.argument,void 0,
-void 0,function(a){h=a});m=a.operator+"("+this.ifDefined(h,0)+")";this.assign(b,m);c(m);break;case s.BinaryExpression:this.recurse(a.left,void 0,void 0,function(a){g=a});this.recurse(a.right,void 0,void 0,function(a){h=a});m="+"===a.operator?this.plus(g,h):"-"===a.operator?this.ifDefined(g,0)+a.operator+this.ifDefined(h,0):"("+g+")"+a.operator+"("+h+")";this.assign(b,m);c(m);break;case s.LogicalExpression:b=b||this.nextId();k.recurse(a.left,b);k.if_("&&"===a.operator?b:k.not(b),k.lazyRecurse(a.right,
-b));c(b);break;case s.ConditionalExpression:b=b||this.nextId();k.recurse(a.test,b);k.if_(b,k.lazyRecurse(a.alternate,b),k.lazyRecurse(a.consequent,b));c(b);break;case s.Identifier:b=b||this.nextId();d&&(d.context="inputs"===k.stage?"s":this.assign(this.nextId(),this.getHasOwnProperty("l",a.name)+"?l:s"),d.computed=!1,d.name=a.name);k.if_("inputs"===k.stage||k.not(k.getHasOwnProperty("l",a.name)),function(){k.if_("inputs"===k.stage||"s",function(){e&&1!==e&&k.if_(k.isNull(k.nonComputedMember("s",a.name)),
-k.lazyAssign(k.nonComputedMember("s",a.name),"{}"));k.assign(b,k.nonComputedMember("s",a.name))})},b&&k.lazyAssign(b,k.nonComputedMember("l",a.name)));c(b);break;case s.MemberExpression:g=d&&(d.context=this.nextId())||this.nextId();b=b||this.nextId();k.recurse(a.object,g,void 0,function(){k.if_(k.notNull(g),function(){a.computed?(h=k.nextId(),k.recurse(a.property,h),k.getStringValue(h),e&&1!==e&&k.if_(k.not(k.computedMember(g,h)),k.lazyAssign(k.computedMember(g,h),"{}")),m=k.computedMember(g,h),k.assign(b,
-m),d&&(d.computed=!0,d.name=h)):(e&&1!==e&&k.if_(k.isNull(k.nonComputedMember(g,a.property.name)),k.lazyAssign(k.nonComputedMember(g,a.property.name),"{}")),m=k.nonComputedMember(g,a.property.name),k.assign(b,m),d&&(d.computed=!1,d.name=a.property.name))},function(){k.assign(b,"undefined")});c(b)},!!e);break;case s.CallExpression:b=b||this.nextId();a.filter?(h=k.filter(a.callee.name),l=[],q(a.arguments,function(a){var b=k.nextId();k.recurse(a,b);l.push(b)}),m=h+"("+l.join(",")+")",k.assign(b,m),c(b)):
-(h=k.nextId(),g={},l=[],k.recurse(a.callee,h,g,function(){k.if_(k.notNull(h),function(){q(a.arguments,function(b){k.recurse(b,a.constant?void 0:k.nextId(),void 0,function(a){l.push(a)})});m=g.name?k.member(g.context,g.name,g.computed)+"("+l.join(",")+")":h+"("+l.join(",")+")";k.assign(b,m)},function(){k.assign(b,"undefined")});c(b)}));break;case s.AssignmentExpression:h=this.nextId();g={};this.recurse(a.left,void 0,g,function(){k.if_(k.notNull(g.context),function(){k.recurse(a.right,h);m=k.member(g.context,
-g.name,g.computed)+a.operator+h;k.assign(b,m);c(b||m)})},1);break;case s.ArrayExpression:l=[];q(a.elements,function(b){k.recurse(b,a.constant?void 0:k.nextId(),void 0,function(a){l.push(a)})});m="["+l.join(",")+"]";this.assign(b,m);c(b||m);break;case s.ObjectExpression:l=[];n=!1;q(a.properties,function(a){a.computed&&(n=!0)});n?(b=b||this.nextId(),this.assign(b,"{}"),q(a.properties,function(a){a.computed?(g=k.nextId(),k.recurse(a.key,g)):g=a.key.type===s.Identifier?a.key.name:""+a.key.value;h=k.nextId();
-k.recurse(a.value,h);k.assign(k.member(b,g,a.computed),h)})):(q(a.properties,function(b){k.recurse(b.value,a.constant?void 0:k.nextId(),void 0,function(a){l.push(k.escape(b.key.type===s.Identifier?b.key.name:""+b.key.value)+":"+a)})}),m="{"+l.join(",")+"}",this.assign(b,m));c(b||m);break;case s.ThisExpression:this.assign(b,"s");c(b||"s");break;case s.LocalsExpression:this.assign(b,"l");c(b||"l");break;case s.NGValueParameter:this.assign(b,"v"),c(b||"v")}},getHasOwnProperty:function(a,b){var d=a+"."+
-b,c=this.current().own;c.hasOwnProperty(d)||(c[d]=this.nextId(!1,a+"&&("+this.escape(b)+" in "+a+")"));return c[d]},assign:function(a,b){if(a)return this.current().body.push(a,"=",b,";"),a},filter:function(a){this.state.filters.hasOwnProperty(a)||(this.state.filters[a]=this.nextId(!0));return this.state.filters[a]},ifDefined:function(a,b){return"ifDefined("+a+","+this.escape(b)+")"},plus:function(a,b){return"plus("+a+","+b+")"},return_:function(a){this.current().body.push("return ",a,";")},if_:function(a,
-b,d){if(!0===a)b();else{var c=this.current().body;c.push("if(",a,"){");b();c.push("}");d&&(c.push("else{"),d(),c.push("}"))}},not:function(a){return"!("+a+")"},isNull:function(a){return a+"==null"},notNull:function(a){return a+"!=null"},nonComputedMember:function(a,b){var d=/[^$_a-zA-Z0-9]/g;return/^[$_a-zA-Z][$_a-zA-Z0-9]*$/.test(b)?a+"."+b:a+'["'+b.replace(d,this.stringEscapeFn)+'"]'},computedMember:function(a,b){return a+"["+b+"]"},member:function(a,b,d){return d?this.computedMember(a,b):this.nonComputedMember(a,
-b)},getStringValue:function(a){this.assign(a,"getStringValue("+a+")")},lazyRecurse:function(a,b,d,c,e,f){var g=this;return function(){g.recurse(a,b,d,c,e,f)}},lazyAssign:function(a,b){var d=this;return function(){d.assign(a,b)}},stringEscapeRegex:/[^ a-zA-Z0-9]/g,stringEscapeFn:function(a){return"\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)},escape:function(a){if(F(a))return"'"+a.replace(this.stringEscapeRegex,this.stringEscapeFn)+"'";if(ba(a))return a.toString();if(!0===a)return"true";if(!1===
-a)return"false";if(null===a)return"null";if("undefined"===typeof a)return"undefined";throw Ua("esc");},nextId:function(a,b){var d="v"+this.state.nextId++;a||this.current().vars.push(d+(b?"="+b:""));return d},current:function(){return this.state[this.state.computing]}};Id.prototype={compile:function(a){var b=this;U(a,b.$filter);var d,c;if(d=Gd(a))c=this.recurse(d);d=Ed(a.body);var e;d&&(e=[],q(d,function(a,c){var d=b.recurse(a);a.input=d;e.push(d);a.watchId=c}));var f=[];q(a.body,function(a){f.push(b.recurse(a.expression))});
-a=0===a.body.length?z:1===a.body.length?f[0]:function(a,b){var c;q(f,function(d){c=d(a,b)});return c};c&&(a.assign=function(a,b,d){return c(a,d,b)});e&&(a.inputs=e);return a},recurse:function(a,b,d){var c,e,f=this,g;if(a.input)return this.inputs(a.input,a.watchId);switch(a.type){case s.Literal:return this.value(a.value,b);case s.UnaryExpression:return e=this.recurse(a.argument),this["unary"+a.operator](e,b);case s.BinaryExpression:return c=this.recurse(a.left),e=this.recurse(a.right),this["binary"+
-a.operator](c,e,b);case s.LogicalExpression:return c=this.recurse(a.left),e=this.recurse(a.right),this["binary"+a.operator](c,e,b);case s.ConditionalExpression:return this["ternary?:"](this.recurse(a.test),this.recurse(a.alternate),this.recurse(a.consequent),b);case s.Identifier:return f.identifier(a.name,b,d);case s.MemberExpression:return c=this.recurse(a.object,!1,!!d),a.computed||(e=a.property.name),a.computed&&(e=this.recurse(a.property)),a.computed?this.computedMember(c,e,b,d):this.nonComputedMember(c,
-e,b,d);case s.CallExpression:return g=[],q(a.arguments,function(a){g.push(f.recurse(a))}),a.filter&&(e=this.$filter(a.callee.name)),a.filter||(e=this.recurse(a.callee,!0)),a.filter?function(a,c,d,f){for(var n=[],p=0;p<g.length;++p)n.push(g[p](a,c,d,f));a=e.apply(void 0,n,f);return b?{context:void 0,name:void 0,value:a}:a}:function(a,c,d,f){var n=e(a,c,d,f),p;if(null!=n.value){p=[];for(var r=0;r<g.length;++r)p.push(g[r](a,c,d,f));p=n.value.apply(n.context,p)}return b?{value:p}:p};case s.AssignmentExpression:return c=
-this.recurse(a.left,!0,1),e=this.recurse(a.right),function(a,d,f,g){var n=c(a,d,f,g);a=e(a,d,f,g);n.context[n.name]=a;return b?{value:a}:a};case s.ArrayExpression:return g=[],q(a.elements,function(a){g.push(f.recurse(a))}),function(a,c,d,e){for(var f=[],p=0;p<g.length;++p)f.push(g[p](a,c,d,e));return b?{value:f}:f};case s.ObjectExpression:return g=[],q(a.properties,function(a){a.computed?g.push({key:f.recurse(a.key),computed:!0,value:f.recurse(a.value)}):g.push({key:a.key.type===s.Identifier?a.key.name:
-""+a.key.value,computed:!1,value:f.recurse(a.value)})}),function(a,c,d,e){for(var f={},p=0;p<g.length;++p)g[p].computed?f[g[p].key(a,c,d,e)]=g[p].value(a,c,d,e):f[g[p].key]=g[p].value(a,c,d,e);return b?{value:f}:f};case s.ThisExpression:return function(a){return b?{value:a}:a};case s.LocalsExpression:return function(a,c){return b?{value:c}:c};case s.NGValueParameter:return function(a,c,d){return b?{value:d}:d}}},"unary+":function(a,b){return function(d,c,e,f){d=a(d,c,e,f);d=u(d)?+d:0;return b?{value:d}:
-d}},"unary-":function(a,b){return function(d,c,e,f){d=a(d,c,e,f);d=u(d)?-d:-0;return b?{value:d}:d}},"unary!":function(a,b){return function(d,c,e,f){d=!a(d,c,e,f);return b?{value:d}:d}},"binary+":function(a,b,d){return function(c,e,f,g){var h=a(c,e,f,g);c=b(c,e,f,g);h=Dd(h,c);return d?{value:h}:h}},"binary-":function(a,b,d){return function(c,e,f,g){var h=a(c,e,f,g);c=b(c,e,f,g);h=(u(h)?h:0)-(u(c)?c:0);return d?{value:h}:h}},"binary*":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)*b(c,e,f,g);
-return d?{value:c}:c}},"binary/":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)/b(c,e,f,g);return d?{value:c}:c}},"binary%":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)%b(c,e,f,g);return d?{value:c}:c}},"binary===":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)===b(c,e,f,g);return d?{value:c}:c}},"binary!==":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)!==b(c,e,f,g);return d?{value:c}:c}},"binary==":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)==b(c,e,f,g);return d?
-{value:c}:c}},"binary!=":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)!=b(c,e,f,g);return d?{value:c}:c}},"binary<":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)<b(c,e,f,g);return d?{value:c}:c}},"binary>":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)>b(c,e,f,g);return d?{value:c}:c}},"binary<=":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)<=b(c,e,f,g);return d?{value:c}:c}},"binary>=":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)>=b(c,e,f,g);return d?{value:c}:
-c}},"binary&&":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)&&b(c,e,f,g);return d?{value:c}:c}},"binary||":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)||b(c,e,f,g);return d?{value:c}:c}},"ternary?:":function(a,b,d,c){return function(e,f,g,h){e=a(e,f,g,h)?b(e,f,g,h):d(e,f,g,h);return c?{value:e}:e}},value:function(a,b){return function(){return b?{context:void 0,name:void 0,value:a}:a}},identifier:function(a,b,d){return function(c,e,f,g){c=e&&a in e?e:c;d&&1!==d&&c&&null==c[a]&&(c[a]=
-{});e=c?c[a]:void 0;return b?{context:c,name:a,value:e}:e}},computedMember:function(a,b,d,c){return function(e,f,g,h){var k=a(e,f,g,h),l,m;null!=k&&(l=b(e,f,g,h),l+="",c&&1!==c&&k&&!k[l]&&(k[l]={}),m=k[l]);return d?{context:k,name:l,value:m}:m}},nonComputedMember:function(a,b,d,c){return function(e,f,g,h){e=a(e,f,g,h);c&&1!==c&&e&&null==e[b]&&(e[b]={});f=null!=e?e[b]:void 0;return d?{context:e,name:b,value:f}:f}},inputs:function(a,b){return function(d,c,e,f){return f?f[b]:a(d,c,e)}}};uc.prototype=
-{constructor:uc,parse:function(a){a=this.ast.ast(a);var b=this.astCompiler.compile(a);b.literal=0===a.body.length||1===a.body.length&&(a.body[0].expression.type===s.Literal||a.body[0].expression.type===s.ArrayExpression||a.body[0].expression.type===s.ObjectExpression);b.constant=a.constant;return b}};var ta=L("$sce"),oa={HTML:"html",CSS:"css",URL:"url",RESOURCE_URL:"resourceUrl",JS:"js"},xc=/_([a-z])/g,Dg=L("$compile"),aa=x.document.createElement("a"),Md=Ca(x.location.href);Nd.$inject=["$document"];
-cd.$inject=["$provide"];var Ud=22,Td=".",zc="0";Od.$inject=["$locale"];Qd.$inject=["$locale"];var Og={yyyy:Y("FullYear",4,0,!1,!0),yy:Y("FullYear",2,0,!0,!0),y:Y("FullYear",1,0,!1,!0),MMMM:mb("Month"),MMM:mb("Month",!0),MM:Y("Month",2,1),M:Y("Month",1,1),LLLL:mb("Month",!1,!0),dd:Y("Date",2),d:Y("Date",1),HH:Y("Hours",2),H:Y("Hours",1),hh:Y("Hours",2,-12),h:Y("Hours",1,-12),mm:Y("Minutes",2),m:Y("Minutes",1),ss:Y("Seconds",2),s:Y("Seconds",1),sss:Y("Milliseconds",3),EEEE:mb("Day"),EEE:mb("Day",!0),
-a:function(a,b){return 12>a.getHours()?b.AMPMS[0]:b.AMPMS[1]},Z:function(a,b,d){a=-1*d;return a=(0<=a?"+":"")+(Kb(Math[0<a?"floor":"ceil"](a/60),2)+Kb(Math.abs(a%60),2))},ww:Wd(2),w:Wd(1),G:Ac,GG:Ac,GGG:Ac,GGGG:function(a,b){return 0>=a.getFullYear()?b.ERANAMES[0]:b.ERANAMES[1]}},Ng=/((?:[^yMLdHhmsaZEwG']+)|(?:'(?:[^']|'')*')|(?:E+|y+|M+|L+|d+|H+|h+|m+|s+|a|Z|G+|w+))([\s\S]*)/,Mg=/^-?\d+$/;Pd.$inject=["$locale"];var Hg=la(Q),Ig=la(ub);Rd.$inject=["$parse"];var Fe=la({restrict:"E",compile:function(a,
-b){if(!b.href&&!b.xlinkHref)return function(a,b){if("a"===b[0].nodeName.toLowerCase()){var e="[object SVGAnimatedString]"===ma.call(b.prop("href"))?"xlink:href":"href";b.on("click",function(a){b.attr(e)||a.preventDefault()})}}}}),vb={};q(Fb,function(a,b){function d(a,d,e){a.$watch(e[c],function(a){e.$set(b,!!a)})}if("multiple"!==a){var c=Ba("ng-"+b),e=d;"checked"===a&&(e=function(a,b,e){e.ngModel!==e[c]&&d(a,b,e)});vb[c]=function(){return{restrict:"A",priority:100,link:e}}}});q(rd,function(a,b){vb[b]=
-function(){return{priority:100,link:function(a,c,e){if("ngPattern"===b&&"/"===e.ngPattern.charAt(0)&&(c=e.ngPattern.match(Sg))){e.$set("ngPattern",new RegExp(c[1],c[2]));return}a.$watch(e[b],function(a){e.$set(b,a)})}}}});q(["src","srcset","href"],function(a){var b=Ba("ng-"+a);vb[b]=function(){return{priority:99,link:function(d,c,e){var f=a,g=a;"href"===a&&"[object SVGAnimatedString]"===ma.call(c.prop("href"))&&(g="xlinkHref",e.$attr[g]="xlink:href",f=null);e.$observe(b,function(b){b?(e.$set(g,b),
-za&&f&&c.prop(f,e[g])):"href"===a&&e.$set(g,null)})}}}});var Mb={$addControl:z,$$renameControl:function(a,b){a.$name=b},$removeControl:z,$setValidity:z,$setDirty:z,$setPristine:z,$setSubmitted:z};Lb.$inject=["$element","$attrs","$scope","$animate","$interpolate"];Lb.prototype={$rollbackViewValue:function(){q(this.$$controls,function(a){a.$rollbackViewValue()})},$commitViewValue:function(){q(this.$$controls,function(a){a.$commitViewValue()})},$addControl:function(a){Ka(a.$name,"input");this.$$controls.push(a);
-a.$name&&(this[a.$name]=a);a.$$parentForm=this},$$renameControl:function(a,b){var d=a.$name;this[d]===a&&delete this[d];this[b]=a;a.$name=b},$removeControl:function(a){a.$name&&this[a.$name]===a&&delete this[a.$name];q(this.$pending,function(b,d){this.$setValidity(d,null,a)},this);q(this.$error,function(b,d){this.$setValidity(d,null,a)},this);q(this.$$success,function(b,d){this.$setValidity(d,null,a)},this);$a(this.$$controls,a);a.$$parentForm=Mb},$setDirty:function(){this.$$animate.removeClass(this.$$element,
-Va);this.$$animate.addClass(this.$$element,Rb);this.$dirty=!0;this.$pristine=!1;this.$$parentForm.$setDirty()},$setPristine:function(){this.$$animate.setClass(this.$$element,Va,Rb+" ng-submitted");this.$dirty=!1;this.$pristine=!0;this.$submitted=!1;q(this.$$controls,function(a){a.$setPristine()})},$setUntouched:function(){q(this.$$controls,function(a){a.$setUntouched()})},$setSubmitted:function(){this.$$animate.addClass(this.$$element,"ng-submitted");this.$submitted=!0;this.$$parentForm.$setSubmitted()}};
-Zd({clazz:Lb,set:function(a,b,d){var c=a[b];c?-1===c.indexOf(d)&&c.push(d):a[b]=[d]},unset:function(a,b,d){var c=a[b];c&&($a(c,d),0===c.length&&delete a[b])}});var ge=function(a){return["$timeout","$parse",function(b,d){function c(a){return""===a?d('this[""]').assign:d(a).assign||z}return{name:"form",restrict:a?"EAC":"E",require:["form","^^?form"],controller:Lb,compile:function(d,f){d.addClass(Va).addClass(nb);var g=f.name?"name":a&&f.ngForm?"ngForm":!1;return{pre:function(a,d,e,f){var n=f[0];if(!("action"in
-e)){var p=function(b){a.$apply(function(){n.$commitViewValue();n.$setSubmitted()});b.preventDefault()};d[0].addEventListener("submit",p);d.on("$destroy",function(){b(function(){d[0].removeEventListener("submit",p)},0,!1)})}(f[1]||n.$$parentForm).$addControl(n);var r=g?c(n.$name):z;g&&(r(a,n),e.$observe(g,function(b){n.$name!==b&&(r(a,void 0),n.$$parentForm.$$renameControl(n,b),r=c(n.$name),r(a,n))}));d.on("$destroy",function(){n.$$parentForm.$removeControl(n);r(a,void 0);S(n,Mb)})}}}}}]},Ge=ge(),
-Se=ge(!0),Pg=/^\d{4,}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+(?:[+-][0-2]\d:[0-5]\d|Z)$/,ah=/^[a-z][a-z\d.+-]*:\/*(?:[^:@]+(?::[^@]+)?@)?(?:[^\s:/?#]+|\[[a-f\d:]+])(?::\d+)?(?:\/[^?#]*)?(?:\?[^#]*)?(?:#.*)?$/i,bh=/^(?=.{1,254}$)(?=.{1,64}@)[-!#$%&'*+/0-9=?A-Z^_`a-z{|}~]+(\.[-!#$%&'*+/0-9=?A-Z^_`a-z{|}~]+)*@[A-Za-z0-9]([A-Za-z0-9-]{0,61}[A-Za-z0-9])?(\.[A-Za-z0-9]([A-Za-z0-9-]{0,61}[A-Za-z0-9])?)*$/,Qg=/^\s*(-|\+)?(\d+|(\d*(\.\d*)))([eE][+-]?\d+)?\s*$/,he=/^(\d{4,})-(\d{2})-(\d{2})$/,ie=/^(\d{4,})-(\d\d)-(\d\d)T(\d\d):(\d\d)(?::(\d\d)(\.\d{1,3})?)?$/,
-Hc=/^(\d{4,})-W(\d\d)$/,je=/^(\d{4,})-(\d\d)$/,ke=/^(\d\d):(\d\d)(?::(\d\d)(\.\d{1,3})?)?$/,ae=V();q(["date","datetime-local","month","time","week"],function(a){ae[a]=!0});var le={text:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c)},date:ob("date",he,Nb(he,["yyyy","MM","dd"]),"yyyy-MM-dd"),"datetime-local":ob("datetimelocal",ie,Nb(ie,"yyyy MM dd HH mm ss sss".split(" ")),"yyyy-MM-ddTHH:mm:ss.sss"),time:ob("time",ke,Nb(ke,["HH","mm","ss","sss"]),"HH:mm:ss.sss"),week:ob("week",Hc,function(a,b){if(ga(a))return a;
-if(F(a)){Hc.lastIndex=0;var d=Hc.exec(a);if(d){var c=+d[1],e=+d[2],f=d=0,g=0,h=0,k=Vd(c),e=7*(e-1);b&&(d=b.getHours(),f=b.getMinutes(),g=b.getSeconds(),h=b.getMilliseconds());return new Date(c,0,k.getDate()+e,d,f,g,h)}}return NaN},"yyyy-Www"),month:ob("month",je,Nb(je,["yyyy","MM"]),"yyyy-MM"),number:function(a,b,d,c,e,f){Dc(a,b,d,c);be(c);Ra(a,b,d,c,e,f);var g,h;if(u(d.min)||d.ngMin)c.$validators.min=function(a){return c.$isEmpty(a)||w(g)||a>=g},d.$observe("min",function(a){g=Sa(a);c.$validate()});
-if(u(d.max)||d.ngMax)c.$validators.max=function(a){return c.$isEmpty(a)||w(h)||a<=h},d.$observe("max",function(a){h=Sa(a);c.$validate()});if(u(d.step)||d.ngStep){var k;c.$validators.step=function(a,b){return c.$isEmpty(b)||w(k)||ce(b,g||0,k)};d.$observe("step",function(a){k=Sa(a);c.$validate()})}},url:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c);c.$$parserName="url";c.$validators.url=function(a,b){var d=a||b;return c.$isEmpty(d)||ah.test(d)}},email:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c);c.$$parserName=
-"email";c.$validators.email=function(a,b){var d=a||b;return c.$isEmpty(d)||bh.test(d)}},radio:function(a,b,d,c){var e=!d.ngTrim||"false"!==T(d.ngTrim);w(d.name)&&b.attr("name",++qb);b.on("click",function(a){var g;b[0].checked&&(g=d.value,e&&(g=T(g)),c.$setViewValue(g,a&&a.type))});c.$render=function(){var a=d.value;e&&(a=T(a));b[0].checked=a===c.$viewValue};d.$observe("value",c.$render)},range:function(a,b,d,c,e,f){function g(a,c){b.attr(a,d[a]);d.$observe(a,c)}function h(a){n=Sa(a);da(c.$modelValue)||
-(m?(a=b.val(),n>a&&(a=n,b.val(a)),c.$setViewValue(a)):c.$validate())}function k(a){p=Sa(a);da(c.$modelValue)||(m?(a=b.val(),p<a&&(b.val(p),a=p<n?n:p),c.$setViewValue(a)):c.$validate())}function l(a){r=Sa(a);da(c.$modelValue)||(m&&c.$viewValue!==b.val()?c.$setViewValue(b.val()):c.$validate())}Dc(a,b,d,c);be(c);Ra(a,b,d,c,e,f);var m=c.$$hasNativeValidators&&"range"===b[0].type,n=m?0:void 0,p=m?100:void 0,r=m?1:void 0,q=b[0].validity;a=u(d.min);e=u(d.max);f=u(d.step);var s=c.$render;c.$render=m&&u(q.rangeUnderflow)&&
-u(q.rangeOverflow)?function(){s();c.$setViewValue(b.val())}:s;a&&(c.$validators.min=m?function(){return!0}:function(a,b){return c.$isEmpty(b)||w(n)||b>=n},g("min",h));e&&(c.$validators.max=m?function(){return!0}:function(a,b){return c.$isEmpty(b)||w(p)||b<=p},g("max",k));f&&(c.$validators.step=m?function(){return!q.stepMismatch}:function(a,b){return c.$isEmpty(b)||w(r)||ce(b,n||0,r)},g("step",l))},checkbox:function(a,b,d,c,e,f,g,h){var k=de(h,a,"ngTrueValue",d.ngTrueValue,!0),l=de(h,a,"ngFalseValue",
-d.ngFalseValue,!1);b.on("click",function(a){c.$setViewValue(b[0].checked,a&&a.type)});c.$render=function(){b[0].checked=c.$viewValue};c.$isEmpty=function(a){return!1===a};c.$formatters.push(function(a){return sa(a,k)});c.$parsers.push(function(a){return a?k:l})},hidden:z,button:z,submit:z,reset:z,file:z},Xc=["$browser","$sniffer","$filter","$parse",function(a,b,d,c){return{restrict:"E",require:["?ngModel"],link:{pre:function(e,f,g,h){h[0]&&(le[Q(g.type)]||le.text)(e,f,g,h[0],b,a,d,c)}}}}],ch=/^(true|false|\d+)$/,
-kf=function(){function a(a,d,c){var e=u(c)?c:9===za?"":null;a.prop("value",e);d.$set("value",c)}return{restrict:"A",priority:100,compile:function(b,d){return ch.test(d.ngValue)?function(b,d,f){b=b.$eval(f.ngValue);a(d,f,b)}:function(b,d,f){b.$watch(f.ngValue,function(b){a(d,f,b)})}}}},Ke=["$compile",function(a){return{restrict:"AC",compile:function(b){a.$$addBindingClass(b);return function(b,c,e){a.$$addBindingInfo(c,e.ngBind);c=c[0];b.$watch(e.ngBind,function(a){c.textContent=$b(a)})}}}}],Me=["$interpolate",
-"$compile",function(a,b){return{compile:function(d){b.$$addBindingClass(d);return function(c,d,f){c=a(d.attr(f.$attr.ngBindTemplate));b.$$addBindingInfo(d,c.expressions);d=d[0];f.$observe("ngBindTemplate",function(a){d.textContent=w(a)?"":a})}}}}],Le=["$sce","$parse","$compile",function(a,b,d){return{restrict:"A",compile:function(c,e){var f=b(e.ngBindHtml),g=b(e.ngBindHtml,function(b){return a.valueOf(b)});d.$$addBindingClass(c);return function(b,c,e){d.$$addBindingInfo(c,e.ngBindHtml);b.$watch(g,
-function(){var d=f(b);c.html(a.getTrustedHtml(d)||"")})}}}}],jf=la({restrict:"A",require:"ngModel",link:function(a,b,d,c){c.$viewChangeListeners.push(function(){a.$eval(d.ngChange)})}}),Ne=Fc("",!0),Pe=Fc("Odd",0),Oe=Fc("Even",1),Qe=Qa({compile:function(a,b){b.$set("ngCloak",void 0);a.removeClass("ng-cloak")}}),Re=[function(){return{restrict:"A",scope:!0,controller:"@",priority:500}}],bd={},dh={blur:!0,focus:!0};q("click dblclick mousedown mouseup mouseover mouseout mousemove mouseenter mouseleave keydown keyup keypress submit focus blur copy cut paste".split(" "),
-function(a){var b=Ba("ng-"+a);bd[b]=["$parse","$rootScope",function(d,c){return{restrict:"A",compile:function(e,f){var g=d(f[b]);return function(b,d){d.on(a,function(d){var e=function(){g(b,{$event:d})};dh[a]&&c.$$phase?b.$evalAsync(e):b.$apply(e)})}}}}]});var Ue=["$animate","$compile",function(a,b){return{multiElement:!0,transclude:"element",priority:600,terminal:!0,restrict:"A",$$tlb:!0,link:function(d,c,e,f,g){var h,k,l;d.$watch(e.ngIf,function(d){d?k||g(function(d,f){k=f;d[d.length++]=b.$$createComment("end ngIf",
-e.ngIf);h={clone:d};a.enter(d,c.parent(),c)}):(l&&(l.remove(),l=null),k&&(k.$destroy(),k=null),h&&(l=tb(h.clone),a.leave(l).done(function(a){!1!==a&&(l=null)}),h=null))})}}}],Ve=["$templateRequest","$anchorScroll","$animate",function(a,b,d){return{restrict:"ECA",priority:400,terminal:!0,transclude:"element",controller:ea.noop,compile:function(c,e){var f=e.ngInclude||e.src,g=e.onload||"",h=e.autoscroll;return function(c,e,m,n,p){var r=0,q,s,t,w=function(){s&&(s.remove(),s=null);q&&(q.$destroy(),q=
-null);t&&(d.leave(t).done(function(a){!1!==a&&(s=null)}),s=t,t=null)};c.$watch(f,function(f){var m=function(a){!1===a||!u(h)||h&&!c.$eval(h)||b()},s=++r;f?(a(f,!0).then(function(a){if(!c.$$destroyed&&s===r){var b=c.$new();n.template=a;a=p(b,function(a){w();d.enter(a,null,e).done(m)});q=b;t=a;q.$emit("$includeContentLoaded",f);c.$eval(g)}},function(){c.$$destroyed||s!==r||(w(),c.$emit("$includeContentError",f))}),c.$emit("$includeContentRequested",f)):(w(),n.template=null)})}}}}],mf=["$compile",function(a){return{restrict:"ECA",
-priority:-400,require:"ngInclude",link:function(b,d,c,e){ma.call(d[0]).match(/SVG/)?(d.empty(),a(dd(e.template,x.document).childNodes)(b,function(a){d.append(a)},{futureParentElement:d})):(d.html(e.template),a(d.contents())(b))}}}],We=Qa({priority:450,compile:function(){return{pre:function(a,b,d){a.$eval(d.ngInit)}}}}),hf=function(){return{restrict:"A",priority:100,require:"ngModel",link:function(a,b,d,c){var e=d.ngList||", ",f="false"!==d.ngTrim,g=f?T(e):e;c.$parsers.push(function(a){if(!w(a)){var b=
-[];a&&q(a.split(g),function(a){a&&b.push(f?T(a):a)});return b}});c.$formatters.push(function(a){if(H(a))return a.join(e)});c.$isEmpty=function(a){return!a||!a.length}}}},nb="ng-valid",Yd="ng-invalid",Va="ng-pristine",Rb="ng-dirty",pb=L("ngModel");Ob.$inject="$scope $exceptionHandler $attrs $element $parse $animate $timeout $q $interpolate".split(" ");Ob.prototype={$$initGetterSetters:function(){if(this.$options.getOption("getterSetter")){var a=this.$$parse(this.$$attr.ngModel+"()"),b=this.$$parse(this.$$attr.ngModel+
-"($$$p)");this.$$ngModelGet=function(b){var c=this.$$parsedNgModel(b);D(c)&&(c=a(b));return c};this.$$ngModelSet=function(a,c){D(this.$$parsedNgModel(a))?b(a,{$$$p:c}):this.$$parsedNgModelAssign(a,c)}}else if(!this.$$parsedNgModel.assign)throw pb("nonassign",this.$$attr.ngModel,xa(this.$$element));},$render:z,$isEmpty:function(a){return w(a)||""===a||null===a||a!==a},$$updateEmptyClasses:function(a){this.$isEmpty(a)?(this.$$animate.removeClass(this.$$element,"ng-not-empty"),this.$$animate.addClass(this.$$element,
-"ng-empty")):(this.$$animate.removeClass(this.$$element,"ng-empty"),this.$$animate.addClass(this.$$element,"ng-not-empty"))},$setPristine:function(){this.$dirty=!1;this.$pristine=!0;this.$$animate.removeClass(this.$$element,Rb);this.$$animate.addClass(this.$$element,Va)},$setDirty:function(){this.$dirty=!0;this.$pristine=!1;this.$$animate.removeClass(this.$$element,Va);this.$$animate.addClass(this.$$element,Rb);this.$$parentForm.$setDirty()},$setUntouched:function(){this.$touched=!1;this.$untouched=
-!0;this.$$animate.setClass(this.$$element,"ng-untouched","ng-touched")},$setTouched:function(){this.$touched=!0;this.$untouched=!1;this.$$animate.setClass(this.$$element,"ng-touched","ng-untouched")},$rollbackViewValue:function(){this.$$timeout.cancel(this.$$pendingDebounce);this.$viewValue=this.$$lastCommittedViewValue;this.$render()},$validate:function(){if(!da(this.$modelValue)){var a=this.$$lastCommittedViewValue,b=this.$$rawModelValue,d=this.$valid,c=this.$modelValue,e=this.$options.getOption("allowInvalid"),
-f=this;this.$$runValidators(b,a,function(a){e||d===a||(f.$modelValue=a?b:void 0,f.$modelValue!==c&&f.$$writeModelToScope())})}},$$runValidators:function(a,b,d){function c(){var c=!0;q(k.$validators,function(d,e){var g=Boolean(d(a,b));c=c&&g;f(e,g)});return c?!0:(q(k.$asyncValidators,function(a,b){f(b,null)}),!1)}function e(){var c=[],d=!0;q(k.$asyncValidators,function(e,g){var k=e(a,b);if(!k||!D(k.then))throw pb("nopromise",k);f(g,void 0);c.push(k.then(function(){f(g,!0)},function(){d=!1;f(g,!1)}))});
-c.length?k.$$q.all(c).then(function(){g(d)},z):g(!0)}function f(a,b){h===k.$$currentValidationRunId&&k.$setValidity(a,b)}function g(a){h===k.$$currentValidationRunId&&d(a)}this.$$currentValidationRunId++;var h=this.$$currentValidationRunId,k=this;(function(){var a=k.$$parserName||"parse";if(w(k.$$parserValid))f(a,null);else return k.$$parserValid||(q(k.$validators,function(a,b){f(b,null)}),q(k.$asyncValidators,function(a,b){f(b,null)})),f(a,k.$$parserValid),k.$$parserValid;return!0})()?c()?e():g(!1):
-g(!1)},$commitViewValue:function(){var a=this.$viewValue;this.$$timeout.cancel(this.$$pendingDebounce);if(this.$$lastCommittedViewValue!==a||""===a&&this.$$hasNativeValidators)this.$$updateEmptyClasses(a),this.$$lastCommittedViewValue=a,this.$pristine&&this.$setDirty(),this.$$parseAndValidate()},$$parseAndValidate:function(){var a=this.$$lastCommittedViewValue,b=this;if(this.$$parserValid=w(a)?void 0:!0)for(var d=0;d<this.$parsers.length;d++)if(a=this.$parsers[d](a),w(a)){this.$$parserValid=!1;break}da(this.$modelValue)&&
-(this.$modelValue=this.$$ngModelGet(this.$$scope));var c=this.$modelValue,e=this.$options.getOption("allowInvalid");this.$$rawModelValue=a;e&&(this.$modelValue=a,b.$modelValue!==c&&b.$$writeModelToScope());this.$$runValidators(a,this.$$lastCommittedViewValue,function(d){e||(b.$modelValue=d?a:void 0,b.$modelValue!==c&&b.$$writeModelToScope())})},$$writeModelToScope:function(){this.$$ngModelSet(this.$$scope,this.$modelValue);q(this.$viewChangeListeners,function(a){try{a()}catch(b){this.$$exceptionHandler(b)}},
-this)},$setViewValue:function(a,b){this.$viewValue=a;this.$options.getOption("updateOnDefault")&&this.$$debounceViewValueCommit(b)},$$debounceViewValueCommit:function(a){var b=this.$options.getOption("debounce");ba(b[a])?b=b[a]:ba(b["default"])&&(b=b["default"]);this.$$timeout.cancel(this.$$pendingDebounce);var d=this;0<b?this.$$pendingDebounce=this.$$timeout(function(){d.$commitViewValue()},b):this.$$scope.$root.$$phase?this.$commitViewValue():this.$$scope.$apply(function(){d.$commitViewValue()})},
-$overrideModelOptions:function(a){this.$options=this.$options.createChild(a)}};Zd({clazz:Ob,set:function(a,b){a[b]=!0},unset:function(a,b){delete a[b]}});var gf=["$rootScope",function(a){return{restrict:"A",require:["ngModel","^?form","^?ngModelOptions"],controller:Ob,priority:1,compile:function(b){b.addClass(Va).addClass("ng-untouched").addClass(nb);return{pre:function(a,b,e,f){var g=f[0];b=f[1]||g.$$parentForm;if(f=f[2])g.$options=f.$options;g.$$initGetterSetters();b.$addControl(g);e.$observe("name",
-function(a){g.$name!==a&&g.$$parentForm.$$renameControl(g,a)});a.$on("$destroy",function(){g.$$parentForm.$removeControl(g)})},post:function(b,c,e,f){function g(){h.$setTouched()}var h=f[0];if(h.$options.getOption("updateOn"))c.on(h.$options.getOption("updateOn"),function(a){h.$$debounceViewValueCommit(a&&a.type)});c.on("blur",function(){h.$touched||(a.$$phase?b.$evalAsync(g):b.$apply(g))})}}}}}],Pb,eh=/(\s+|^)default(\s+|$)/;Gc.prototype={getOption:function(a){return this.$$options[a]},createChild:function(a){var b=
-!1;a=S({},a);q(a,function(d,c){"$inherit"===d?"*"===c?b=!0:(a[c]=this.$$options[c],"updateOn"===c&&(a.updateOnDefault=this.$$options.updateOnDefault)):"updateOn"===c&&(a.updateOnDefault=!1,a[c]=T(d.replace(eh,function(){a.updateOnDefault=!0;return" "})))},this);b&&(delete a["*"],ee(a,this.$$options));ee(a,Pb.$$options);return new Gc(a)}};Pb=new Gc({updateOn:"",updateOnDefault:!0,debounce:0,getterSetter:!1,allowInvalid:!1,timezone:null});var lf=function(){function a(a,d){this.$$attrs=a;this.$$scope=
-d}a.$inject=["$attrs","$scope"];a.prototype={$onInit:function(){var a=this.parentCtrl?this.parentCtrl.$options:Pb,d=this.$$scope.$eval(this.$$attrs.ngModelOptions);this.$options=a.createChild(d)}};return{restrict:"A",priority:10,require:{parentCtrl:"?^^ngModelOptions"},bindToController:!0,controller:a}},Xe=Qa({terminal:!0,priority:1E3}),fh=L("ngOptions"),gh=/^\s*([\s\S]+?)(?:\s+as\s+([\s\S]+?))?(?:\s+group\s+by\s+([\s\S]+?))?(?:\s+disable\s+when\s+([\s\S]+?))?\s+for\s+(?:([$\w][$\w]*)|(?:\(\s*([$\w][$\w]*)\s*,\s*([$\w][$\w]*)\s*\)))\s+in\s+([\s\S]+?)(?:\s+track\s+by\s+([\s\S]+?))?$/,
-ef=["$compile","$document","$parse",function(a,b,d){function c(a,b,c){function e(a,b,c,d,f){this.selectValue=a;this.viewValue=b;this.label=c;this.group=d;this.disabled=f}function f(a){var b;if(!q&&qa(a))b=a;else{b=[];for(var c in a)a.hasOwnProperty(c)&&"$"!==c.charAt(0)&&b.push(c)}return b}var n=a.match(gh);if(!n)throw fh("iexp",a,xa(b));var p=n[5]||n[7],q=n[6];a=/ as /.test(n[0])&&n[1];var s=n[9];b=d(n[2]?n[1]:p);var v=a&&d(a)||b,t=s&&d(s),u=s?function(a,b){return t(c,b)}:function(a){return Pa(a)},
-w=function(a,b){return u(a,G(a,b))},A=d(n[2]||n[1]),x=d(n[3]||""),I=d(n[4]||""),K=d(n[8]),E={},G=q?function(a,b){E[q]=b;E[p]=a;return E}:function(a){E[p]=a;return E};return{trackBy:s,getTrackByValue:w,getWatchables:d(K,function(a){var b=[];a=a||[];for(var d=f(a),e=d.length,g=0;g<e;g++){var h=a===d?g:d[g],l=a[h],h=G(l,h),l=u(l,h);b.push(l);if(n[2]||n[1])l=A(c,h),b.push(l);n[4]&&(h=I(c,h),b.push(h))}return b}),getOptions:function(){for(var a=[],b={},d=K(c)||[],g=f(d),h=g.length,n=0;n<h;n++){var p=d===
-g?n:g[n],q=G(d[p],p),r=v(c,q),p=u(r,q),t=A(c,q),E=x(c,q),q=I(c,q),r=new e(p,r,t,E,q);a.push(r);b[p]=r}return{items:a,selectValueMap:b,getOptionFromViewValue:function(a){return b[w(a)]},getViewValueFromOption:function(a){return s?ra(a.viewValue):a.viewValue}}}}}var e=x.document.createElement("option"),f=x.document.createElement("optgroup");return{restrict:"A",terminal:!0,require:["select","ngModel"],link:{pre:function(a,b,c,d){d[0].registerOption=z},post:function(d,h,k,l){function m(a){var b=(a=A.getOptionFromViewValue(a))&&
-a.element;b&&!b.selected&&(b.selected=!0);return a}function n(a,b){a.element=b;b.disabled=a.disabled;a.label!==b.label&&(b.label=a.label,b.textContent=a.label);b.value=a.selectValue}function p(){var a=A&&r.readValue();if(A)for(var b=A.items.length-1;0<=b;b--){var c=A.items[b];u(c.group)?Eb(c.element.parentNode):Eb(c.element)}A=z.getOptions();var d={};x&&h.prepend(r.emptyOption);A.items.forEach(function(a){var b;if(u(a.group)){b=d[a.group];b||(b=f.cloneNode(!1),I.appendChild(b),b.label=null===a.group?
-"null":a.group,d[a.group]=b);var c=e.cloneNode(!1)}else b=I,c=e.cloneNode(!1);b.appendChild(c);n(a,c)});h[0].appendChild(I);s.$render();s.$isEmpty(a)||(b=r.readValue(),(z.trackBy||v?sa(a,b):a===b)||(s.$setViewValue(b),s.$render()))}var r=l[0],s=l[1],v=k.multiple;l=0;for(var t=h.children(),w=t.length;l<w;l++)if(""===t[l].value){r.hasEmptyOption=!0;r.emptyOption=t.eq(l);break}var x=!!r.emptyOption;B(e.cloneNode(!1)).val("?");var A,z=c(k.ngOptions,h,d),I=b[0].createDocumentFragment();r.generateUnknownOptionValue=
-function(a){return"?"};v?(r.writeValue=function(a){var b=a&&a.map(m)||[];A.items.forEach(function(a){a.element.selected&&-1===Array.prototype.indexOf.call(b,a)&&(a.element.selected=!1)})},r.readValue=function(){var a=h.val()||[],b=[];q(a,function(a){(a=A.selectValueMap[a])&&!a.disabled&&b.push(A.getViewValueFromOption(a))});return b},z.trackBy&&d.$watchCollection(function(){if(H(s.$viewValue))return s.$viewValue.map(function(a){return z.getTrackByValue(a)})},function(){s.$render()})):(r.writeValue=
-function(a){var b=A.selectValueMap[h.val()],c=A.getOptionFromViewValue(a);b&&b.element.removeAttribute("selected");c?(h[0].value!==c.selectValue&&(r.removeUnknownOption(),r.unselectEmptyOption(),h[0].value=c.selectValue,c.element.selected=!0),c.element.setAttribute("selected","selected")):x?r.selectEmptyOption():r.unknownOption.parent().length?r.updateUnknownOption(a):r.renderUnknownOption(a)},r.readValue=function(){var a=A.selectValueMap[h.val()];return a&&!a.disabled?(r.unselectEmptyOption(),r.removeUnknownOption(),
-A.getViewValueFromOption(a)):null},z.trackBy&&d.$watch(function(){return z.getTrackByValue(s.$viewValue)},function(){s.$render()}));x&&(r.emptyOption.remove(),a(r.emptyOption)(d),8===r.emptyOption[0].nodeType?(r.hasEmptyOption=!1,r.registerOption=function(a,b){""===b.val()&&(r.hasEmptyOption=!0,r.emptyOption=b,r.emptyOption.removeClass("ng-scope"),s.$render(),b.on("$destroy",function(){r.hasEmptyOption=!1;r.emptyOption=void 0}))}):r.emptyOption.removeClass("ng-scope"));h.empty();p();d.$watchCollection(z.getWatchables,
-p)}}}}],Ye=["$locale","$interpolate","$log",function(a,b,d){var c=/{}/g,e=/^when(Minus)?(.+)$/;return{link:function(f,g,h){function k(a){g.text(a||"")}var l=h.count,m=h.$attr.when&&g.attr(h.$attr.when),n=h.offset||0,p=f.$eval(m)||{},r={},s=b.startSymbol(),v=b.endSymbol(),t=s+l+"-"+n+v,u=ea.noop,x;q(h,function(a,b){var c=e.exec(b);c&&(c=(c[1]?"-":"")+Q(c[2]),p[c]=g.attr(h.$attr[b]))});q(p,function(a,d){r[d]=b(a.replace(c,t))});f.$watch(l,function(b){var c=parseFloat(b),e=da(c);e||c in p||(c=a.pluralCat(c-
-n));c===x||e&&da(x)||(u(),e=r[c],w(e)?(null!=b&&d.debug("ngPluralize: no rule defined for '"+c+"' in "+m),u=z,k()):u=f.$watch(e,k),x=c)})}}}],Ze=["$parse","$animate","$compile",function(a,b,d){var c=L("ngRepeat"),e=function(a,b,c,d,e,m,n){a[c]=d;e&&(a[e]=m);a.$index=b;a.$first=0===b;a.$last=b===n-1;a.$middle=!(a.$first||a.$last);a.$odd=!(a.$even=0===(b&1))};return{restrict:"A",multiElement:!0,transclude:"element",priority:1E3,terminal:!0,$$tlb:!0,compile:function(f,g){var h=g.ngRepeat,k=d.$$createComment("end ngRepeat",
-h),l=h.match(/^\s*([\s\S]+?)\s+in\s+([\s\S]+?)(?:\s+as\s+([\s\S]+?))?(?:\s+track\s+by\s+([\s\S]+?))?\s*$/);if(!l)throw c("iexp",h);var m=l[1],n=l[2],p=l[3],r=l[4],l=m.match(/^(?:(\s*[$\w]+)|\(\s*([$\w]+)\s*,\s*([$\w]+)\s*\))$/);if(!l)throw c("iidexp",m);var s=l[3]||l[1],v=l[2];if(p&&(!/^[$a-zA-Z_][$a-zA-Z0-9_]*$/.test(p)||/^(null|undefined|this|\$index|\$first|\$middle|\$last|\$even|\$odd|\$parent|\$root|\$id)$/.test(p)))throw c("badident",p);var t,u,w,x,z={$id:Pa};r?t=a(r):(w=function(a,b){return Pa(b)},
-x=function(a){return a});return function(a,d,f,g,l){t&&(u=function(b,c,d){v&&(z[v]=b);z[s]=c;z.$index=d;return t(a,z)});var m=V();a.$watchCollection(n,function(f){var g,n,r=d[0],t,z=V(),B,D,F,C,G,E,H;p&&(a[p]=f);if(qa(f))G=f,n=u||w;else for(H in n=u||x,G=[],f)ua.call(f,H)&&"$"!==H.charAt(0)&&G.push(H);B=G.length;H=Array(B);for(g=0;g<B;g++)if(D=f===G?g:G[g],F=f[D],C=n(D,F,g),m[C])E=m[C],delete m[C],z[C]=E,H[g]=E;else{if(z[C])throw q(H,function(a){a&&a.scope&&(m[a.id]=a)}),c("dupes",h,C,F);H[g]={id:C,
-scope:void 0,clone:void 0};z[C]=!0}for(t in m){E=m[t];C=tb(E.clone);b.leave(C);if(C[0].parentNode)for(g=0,n=C.length;g<n;g++)C[g].$$NG_REMOVED=!0;E.scope.$destroy()}for(g=0;g<B;g++)if(D=f===G?g:G[g],F=f[D],E=H[g],E.scope){t=r;do t=t.nextSibling;while(t&&t.$$NG_REMOVED);E.clone[0]!==t&&b.move(tb(E.clone),null,r);r=E.clone[E.clone.length-1];e(E.scope,g,s,F,v,D,B)}else l(function(a,c){E.scope=c;var d=k.cloneNode(!1);a[a.length++]=d;b.enter(a,null,r);r=d;E.clone=a;z[E.id]=E;e(E.scope,g,s,F,v,D,B)});m=
-z})}}}}],$e=["$animate",function(a){return{restrict:"A",multiElement:!0,link:function(b,d,c){b.$watch(c.ngShow,function(b){a[b?"removeClass":"addClass"](d,"ng-hide",{tempClasses:"ng-hide-animate"})})}}}],Te=["$animate",function(a){return{restrict:"A",multiElement:!0,link:function(b,d,c){b.$watch(c.ngHide,function(b){a[b?"addClass":"removeClass"](d,"ng-hide",{tempClasses:"ng-hide-animate"})})}}}],af=Qa(function(a,b,d){a.$watch(d.ngStyle,function(a,d){d&&a!==d&&q(d,function(a,c){b.css(c,"")});a&&b.css(a)},
-!0)}),bf=["$animate","$compile",function(a,b){return{require:"ngSwitch",controller:["$scope",function(){this.cases={}}],link:function(d,c,e,f){var g=[],h=[],k=[],l=[],m=function(a,b){return function(c){!1!==c&&a.splice(b,1)}};d.$watch(e.ngSwitch||e.on,function(c){for(var d,e;k.length;)a.cancel(k.pop());d=0;for(e=l.length;d<e;++d){var s=tb(h[d].clone);l[d].$destroy();(k[d]=a.leave(s)).done(m(k,d))}h.length=0;l.length=0;(g=f.cases["!"+c]||f.cases["?"])&&q(g,function(c){c.transclude(function(d,e){l.push(e);
-var f=c.element;d[d.length++]=b.$$createComment("end ngSwitchWhen");h.push({clone:d});a.enter(d,f.parent(),f)})})})}}}],cf=Qa({transclude:"element",priority:1200,require:"^ngSwitch",multiElement:!0,link:function(a,b,d,c,e){a=d.ngSwitchWhen.split(d.ngSwitchWhenSeparator).sort().filter(function(a,b,c){return c[b-1]!==a});q(a,function(a){c.cases["!"+a]=c.cases["!"+a]||[];c.cases["!"+a].push({transclude:e,element:b})})}}),df=Qa({transclude:"element",priority:1200,require:"^ngSwitch",multiElement:!0,link:function(a,
-b,d,c,e){c.cases["?"]=c.cases["?"]||[];c.cases["?"].push({transclude:e,element:b})}}),hh=L("ngTransclude"),ff=["$compile",function(a){return{restrict:"EAC",terminal:!0,compile:function(b){var d=a(b.contents());b.empty();return function(a,b,f,g,h){function k(){d(a,function(a){b.append(a)})}if(!h)throw hh("orphan",xa(b));f.ngTransclude===f.$attr.ngTransclude&&(f.ngTransclude="");f=f.ngTransclude||f.ngTranscludeSlot;h(function(a,c){var d;if(d=a.length)a:{d=0;for(var f=a.length;d<f;d++){var g=a[d];if(g.nodeType!==
-Ia||g.nodeValue.trim()){d=!0;break a}}d=void 0}d?b.append(a):(k(),c.$destroy())},null,f);f&&!h.isSlotFilled(f)&&k()}}}}],He=["$templateCache",function(a){return{restrict:"E",terminal:!0,compile:function(b,d){"text/ng-template"===d.type&&a.put(d.id,b[0].text)}}}],ih={$setViewValue:z,$render:z},jh=["$element","$scope",function(a,b){function d(){g||(g=!0,b.$$postDigest(function(){g=!1;e.ngModelCtrl.$render()}))}function c(a){h||(h=!0,b.$$postDigest(function(){b.$$destroyed||(h=!1,e.ngModelCtrl.$setViewValue(e.readValue()),
-a&&e.ngModelCtrl.$render())}))}var e=this,f=new Gb;e.selectValueMap={};e.ngModelCtrl=ih;e.multiple=!1;e.unknownOption=B(x.document.createElement("option"));e.hasEmptyOption=!1;e.emptyOption=void 0;e.renderUnknownOption=function(b){b=e.generateUnknownOptionValue(b);e.unknownOption.val(b);a.prepend(e.unknownOption);Ta(e.unknownOption,!0);a.val(b)};e.updateUnknownOption=function(b){b=e.generateUnknownOptionValue(b);e.unknownOption.val(b);Ta(e.unknownOption,!0);a.val(b)};e.generateUnknownOptionValue=
-function(a){return"? "+Pa(a)+" ?"};e.removeUnknownOption=function(){e.unknownOption.parent()&&e.unknownOption.remove()};e.selectEmptyOption=function(){e.emptyOption&&(a.val(""),Ta(e.emptyOption,!0))};e.unselectEmptyOption=function(){e.hasEmptyOption&&e.emptyOption.removeAttr("selected")};b.$on("$destroy",function(){e.renderUnknownOption=z});e.readValue=function(){var b=a.val(),b=b in e.selectValueMap?e.selectValueMap[b]:b;return e.hasOption(b)?b:null};e.writeValue=function(b){var c=a[0].options[a[0].selectedIndex];
-c&&Ta(B(c),!1);e.hasOption(b)?(e.removeUnknownOption(),c=Pa(b),a.val(c in e.selectValueMap?c:b),Ta(B(a[0].options[a[0].selectedIndex]),!0)):null==b&&e.emptyOption?(e.removeUnknownOption(),e.selectEmptyOption()):e.unknownOption.parent().length?e.updateUnknownOption(b):e.renderUnknownOption(b)};e.addOption=function(a,b){if(8!==b[0].nodeType){Ka(a,'"option value"');""===a&&(e.hasEmptyOption=!0,e.emptyOption=b);var c=f.get(a)||0;f.set(a,c+1);d()}};e.removeOption=function(a){var b=f.get(a);b&&(1===b?(f.delete(a),
-""===a&&(e.hasEmptyOption=!1,e.emptyOption=void 0)):f.set(a,b-1))};e.hasOption=function(a){return!!f.get(a)};var g=!1,h=!1;e.registerOption=function(a,b,f,g,h){if(f.$attr.ngValue){var q,s=NaN;f.$observe("value",function(a){var d,f=b.prop("selected");u(s)&&(e.removeOption(q),delete e.selectValueMap[s],d=!0);s=Pa(a);q=a;e.selectValueMap[s]=a;e.addOption(a,b);b.attr("value",s);d&&f&&c()})}else g?f.$observe("value",function(a){e.readValue();var d,f=b.prop("selected");u(q)&&(e.removeOption(q),d=!0);q=
-a;e.addOption(a,b);d&&f&&c()}):h?a.$watch(h,function(a,d){f.$set("value",a);var g=b.prop("selected");d!==a&&e.removeOption(d);e.addOption(a,b);d&&g&&c()}):e.addOption(f.value,b);f.$observe("disabled",function(a){if("true"===a||a&&b.prop("selected"))e.multiple?c(!0):(e.ngModelCtrl.$setViewValue(null),e.ngModelCtrl.$render())});b.on("$destroy",function(){var a=e.readValue(),b=f.value;e.removeOption(b);d();(e.multiple&&a&&-1!==a.indexOf(b)||a===b)&&c(!0)})}}],Ie=function(){return{restrict:"E",require:["select",
-"?ngModel"],controller:jh,priority:1,link:{pre:function(a,b,d,c){var e=c[0],f=c[1];if(f){if(e.ngModelCtrl=f,b.on("change",function(){e.removeUnknownOption();a.$apply(function(){f.$setViewValue(e.readValue())})}),d.multiple){e.multiple=!0;e.readValue=function(){var a=[];q(b.find("option"),function(b){b.selected&&!b.disabled&&(b=b.value,a.push(b in e.selectValueMap?e.selectValueMap[b]:b))});return a};e.writeValue=function(a){q(b.find("option"),function(b){var c=!!a&&(-1!==Array.prototype.indexOf.call(a,
-b.value)||-1!==Array.prototype.indexOf.call(a,e.selectValueMap[b.value]));c!==b.selected&&Ta(B(b),c)})};var g,h=NaN;a.$watch(function(){h!==f.$viewValue||sa(g,f.$viewValue)||(g=pa(f.$viewValue),f.$render());h=f.$viewValue});f.$isEmpty=function(a){return!a||0===a.length}}}else e.registerOption=z},post:function(a,b,d,c){var e=c[1];if(e){var f=c[0];e.$render=function(){f.writeValue(e.$viewValue)}}}}}},Je=["$interpolate",function(a){return{restrict:"E",priority:100,compile:function(b,d){var c,e;u(d.ngValue)||
-(u(d.value)?c=a(d.value,!0):(e=a(b.text(),!0))||d.$set("value",b.text()));return function(a,b,d){var k=b.parent();(k=k.data("$selectController")||k.parent().data("$selectController"))&&k.registerOption(a,b,d,c,e)}}}}],Zc=function(){return{restrict:"A",require:"?ngModel",link:function(a,b,d,c){c&&(d.required=!0,c.$validators.required=function(a,b){return!d.required||!c.$isEmpty(b)},d.$observe("required",function(){c.$validate()}))}}},Yc=function(){return{restrict:"A",require:"?ngModel",link:function(a,
-b,d,c){if(c){var e,f=d.ngPattern||d.pattern;d.$observe("pattern",function(a){F(a)&&0<a.length&&(a=new RegExp("^"+a+"$"));if(a&&!a.test)throw L("ngPattern")("noregexp",f,a,xa(b));e=a||void 0;c.$validate()});c.$validators.pattern=function(a,b){return c.$isEmpty(b)||w(e)||e.test(b)}}}}},ad=function(){return{restrict:"A",require:"?ngModel",link:function(a,b,d,c){if(c){var e=-1;d.$observe("maxlength",function(a){a=Z(a);e=da(a)?-1:a;c.$validate()});c.$validators.maxlength=function(a,b){return 0>e||c.$isEmpty(b)||
-b.length<=e}}}}},$c=function(){return{restrict:"A",require:"?ngModel",link:function(a,b,d,c){if(c){var e=0;d.$observe("minlength",function(a){e=Z(a)||0;c.$validate()});c.$validators.minlength=function(a,b){return c.$isEmpty(b)||b.length>=e}}}}};x.angular.bootstrap?x.console&&console.log("WARNING: Tried to load angular more than once."):(ze(),Ce(ea),ea.module("ngLocale",[],["$provide",function(a){function b(a){a+="";var b=a.indexOf(".");return-1==b?0:a.length-b-1}a.value("$locale",{DATETIME_FORMATS:{AMPMS:["AM",
-"PM"],DAY:"Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),ERANAMES:["Before Christ","Anno Domini"],ERAS:["BC","AD"],FIRSTDAYOFWEEK:6,MONTH:"January February March April May June July August September October November December".split(" "),SHORTDAY:"Sun Mon Tue Wed Thu Fri Sat".split(" "),SHORTMONTH:"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),STANDALONEMONTH:"January February March April May June July August September October November December".split(" "),WEEKENDRANGE:[5,
-6],fullDate:"EEEE, MMMM d, y",longDate:"MMMM d, y",medium:"MMM d, y h:mm:ss a",mediumDate:"MMM d, y",mediumTime:"h:mm:ss a","short":"M/d/yy h:mm a",shortDate:"M/d/yy",shortTime:"h:mm a"},NUMBER_FORMATS:{CURRENCY_SYM:"$",DECIMAL_SEP:".",GROUP_SEP:",",PATTERNS:[{gSize:3,lgSize:3,maxFrac:3,minFrac:0,minInt:1,negPre:"-",negSuf:"",posPre:"",posSuf:""},{gSize:3,lgSize:3,maxFrac:2,minFrac:2,minInt:1,negPre:"-\u00a4",negSuf:"",posPre:"\u00a4",posSuf:""}]},id:"en-us",localeID:"en_US",pluralCat:function(a,
-c){var e=a|0,f=c;void 0===f&&(f=Math.min(b(a),3));Math.pow(10,f);return 1==e&&0==f?"one":"other"}})}]),B(function(){ue(x.document,Sc)}))})(window);!window.angular.$$csp().noInlineStyle&&window.angular.element(document.head).prepend('<style type="text/css">@charset "UTF-8";[ng\\:cloak],[ng-cloak],[data-ng-cloak],[x-ng-cloak],.ng-cloak,.x-ng-cloak,.ng-hide:not(.ng-hide-animate){display:none !important;}ng\\:form{display:block;}.ng-animate-shim{visibility:hidden;}.ng-anchor{position:absolute;}</style>');
-//# sourceMappingURL=angular.min.js.map
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
deleted file mode 100644
index 4aced57..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
+++ /dev/null
@@ -1 +0,0 @@
-!function(window){"use strict";var nv=window.nv;"undefined"!=typeof exports&&(nv=require("nvd3")),angular.module("nvd3",[]).directive("nvd3",["nvd3Utils",function(nvd3Utils){return{restrict:"AE",scope:{data:"=",options:"=",api:"=?",events:"=?",config:"=?",onReady:"&?"},link:function(scope,element,attrs){function configure(chart,options,chartType){chart&&options&&angular.forEach(chart,function(value,key){"_"===key[0]||("dispatch"===key?(void 0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configureEvents(value,options[key])):"tooltip"===key?(void 0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configure(chart[key],options[key],chartType)):"contentGenerator"===key?options[key]&&chart[key](options[key]):-1===["axis","clearHighlights","defined","highlightPoint","nvPointerEventsClass","options","rangeBand","rangeBands","scatter","open","close","node"].indexOf(key)&&(void 0===options[key]||null===options[key]?scope._config.extended&&(options[key]=value()):chart[key](options[key])))})}function configureEvents(dispatch,options){dispatch&&options&&angular.forEach(dispatch,function(value,key){void 0===options[key]||null===options[key]?scope._config.extended&&(options[key]=value.on):dispatch.on(key+"._",options[key])})}function configureWrapper(name){var _=nvd3Utils.deepExtend(defaultWrapper(name),scope.options[name]||{});scope._config.extended&&(scope.options[name]=_);var wrapElement=angular.element("<div></div>").html(_.html||"").addClass(name).addClass(_.className).removeAttr("style").css(_.css);_.html||wrapElement.text(_.text),_.enable&&("title"===name?element.prepend(wrapElement):"subtitle"===name?angular.element(element[0].querySelector(".title")).after(wrapElement):"caption"===name&&element.append(wrapElement))}function configureStyles(){var _=nvd3Utils.deepExtend(defaultStyles(),scope.options.styles||{});scope._config.extended&&(scope.options.styles=_),angular.forEach(_.classes,function(value,key){value?element.addClass(key):element.removeClass(key)}),element.removeAttr("style").css(_.css)}function defaultWrapper(_){switch(_){case"title":return{enable:!1,text:"Write Your Title",className:"h4",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"subtitle":return{enable:!1,text:"Write Your Subtitle",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"caption":return{enable:!1,text:"Figure 1. Write Your Caption text.",css:{width:scope.options.chart.width+"px",textAlign:"center"}}}}function defaultStyles(){return{classes:{"with-3d-shadow":!0,"with-transitions":!0,gallery:!1},css:{}}}function dataWatchFn(newData,oldData){newData!==oldData&&(scope._config.disabled||(scope._config.refreshDataOnly?scope.api.update():scope.api.refresh()))}var defaultConfig={extended:!1,visible:!0,disabled:!1,refreshDataOnly:!0,deepWatchOptions:!0,deepWatchData:!0,deepWatchDataDepth:2,debounce:10,debounceImmediate:!0};scope.isReady=!1,scope._config=angular.extend(defaultConfig,scope.config),scope.api={refresh:function(){scope.api.updateWithOptions(),scope.isReady=!0},refreshWithTimeout:function(t){setTimeout(function(){scope.api.refresh()},t)},update:function(){scope.chart&&scope.svg?"sunburstChart"===scope.options.chart.type?scope.svg.datum(angular.copy(scope.data)).call(scope.chart):scope.svg.datum(scope.data).call(scope.chart):scope.api.refresh()},updateWithTimeout:function(t){setTimeout(function(){scope.api.update()},t)},updateWithOptions:function(options){if(arguments.length){if(scope.options=options,scope._config.deepWatchOptions&&!scope._config.disabled)return}else options=scope.options;scope.api.clearElement(),angular.isDefined(options)!==!1&&scope._config.visible&&(scope.chart=nv.models[options.chart.type](),scope.chart.id=Math.random().toString(36).substr(2,15),angular.forEach(scope.chart,function(value,key){"_"===key[0]||["clearHighlights","highlightPoint","id","options","resizeHandler","state","open","close","tooltipContent"].indexOf(key)>=0||("dispatch"===key?(void 0!==options.chart[key]&&null!==options.chart[key]||scope._config.extended&&(options.chart[key]={}),configureEvents(scope.chart[key],options.chart[key])):["bars","bars1","bars2","boxplot","bullet","controls","discretebar","distX","distY","focus","interactiveLayer","legend","lines","lines1","lines2","multibar","pie","scatter","scatters1","scatters2","sparkline","stack1","stack2","sunburst","tooltip","x2Axis","xAxis","y1Axis","y2Axis","y3Axis","y4Axis","yAxis","yAxis1","yAxis2"].indexOf(key)>=0||"stacked"===key&&"stackedAreaChart"===options.chart.type?(void 0!==options.chart[key]&&null!==options.chart[key]||scope._config.extended&&(options.chart[key]={}),configure(scope.chart[key],options.chart[key],options.chart.type)):"focusHeight"===key&&"lineChart"===options.chart.type||"focusHeight"===key&&"lineWithFocusChart"===options.chart.type||("xTickFormat"!==key&&"yTickFormat"!==key||"lineWithFocusChart"!==options.chart.type)&&("tooltips"===key&&"boxPlotChart"===options.chart.type||("tooltipXContent"!==key&&"tooltipYContent"!==key||"scatterChart"!==options.chart.type)&&("x"!==key&&"y"!==key||"forceDirectedGraph"!==options.chart.type)&&(void 0===options.chart[key]||null===options.chart[key]?scope._config.extended&&("barColor"===key?options.chart[key]=value()():options.chart[key]=value()):scope.chart[key](options.chart[key]))))}),scope.api.updateWithData(),(options.title||scope._config.extended)&&configureWrapper("title"),(options.subtitle||scope._config.extended)&&configureWrapper("subtitle"),(options.caption||scope._config.extended)&&configureWrapper("caption"),(options.styles||scope._config.extended)&&configureStyles(),nv.addGraph(function(){return scope.chart?(scope.chart.resizeHandler&&scope.chart.resizeHandler.clear(),scope.chart.resizeHandler=nv.utils.windowResize(function(){scope.chart&&scope.chart.update&&scope.chart.update()}),void 0!==options.chart.zoom&&["scatterChart","lineChart","candlestickBarChart","cumulativeLineChart","historicalBarChart","ohlcBarChart","stackedAreaChart"].indexOf(options.chart.type)>-1&&nvd3Utils.zoom(scope,options),scope.chart):void 0},options.chart.callback))},updateWithData:function(data){if(arguments.length){if(scope.data=data,scope._config.deepWatchData&&!scope._config.disabled)return}else data="sunburstChart"===scope.options.chart.type?angular.copy(scope.data):scope.data;if(data){d3.select(element[0]).select("svg").remove();var h,w;scope.svg=d3.select(element[0]).insert("svg",".caption"),(h=scope.options.chart.height)&&(isNaN(+h)||(h+="px"),scope.svg.attr("height",h).style({height:h})),(w=scope.options.chart.width)?(isNaN(+w)||(w+="px"),scope.svg.attr("width",w).style({width:w})):scope.svg.attr("width","100%").style({width:"100%"}),scope.svg.datum(data).call(scope.chart),scope.chart&&scope.chart.zoomRender&&scope.chart.zoomRender()}},clearElement:function(){if(element.find(".title").remove(),element.find(".subtitle").remove(),element.find(".caption").remove(),element.empty(),scope.chart&&scope.chart.tooltip&&scope.chart.tooltip.id&&d3.select("#"+scope.chart.tooltip.id()).remove(),nv.graphs&&scope.chart)for(var i=nv.graphs.length-1;i>=0;i--)nv.graphs[i]&&nv.graphs[i].id===scope.chart.id&&nv.graphs.splice(i,1);nv.tooltip&&nv.tooltip.cleanup&&nv.tooltip.cleanup(),scope.chart&&scope.chart.resizeHandler&&scope.chart.resizeHandler.clear(),scope.chart=null},getScope:function(){return scope},getElement:function(){return element}},scope._config.deepWatchOptions&&scope.$watch("options",nvd3Utils.debounce(function(newOptions){scope._config.disabled||scope.api.refresh()},scope._config.debounce,scope._config.debounceImmediate),!0),scope._config.deepWatchData&&(1===scope._config.deepWatchDataDepth?scope.$watchCollection("data",dataWatchFn):scope.$watch("data",dataWatchFn,2===scope._config.deepWatchDataDepth)),scope.$watch("config",function(newConfig,oldConfig){newConfig!==oldConfig&&(scope._config=angular.extend(defaultConfig,newConfig),scope.api.refresh())},!0),scope._config.deepWatchOptions||scope._config.deepWatchData||scope.api.refresh(),angular.forEach(scope.events,function(eventHandler,event){scope.$on(event,function(e,args){return eventHandler(e,scope,args)})}),element.on("$destroy",function(){scope.api.clearElement()}),scope.$watch("isReady",function(isReady){isReady&&scope.onReady&&"function"==typeof scope.onReady()&&scope.onReady()(scope,element)})}}}]).factory("nvd3Utils",function(){return{debounce:function(func,wait,immediate){var timeout;return function(){var context=this,args=arguments,later=function(){timeout=null,immediate||func.apply(context,args)},callNow=immediate&&!timeout;clearTimeout(timeout),timeout=setTimeout(later,wait),callNow&&func.apply(context,args)}},deepExtend:function(dst){var me=this;return angular.forEach(arguments,function(obj){obj!==dst&&angular.forEach(obj,function(value,key){dst[key]&&dst[key].constructor&&dst[key].constructor===Object?me.deepExtend(dst[key],value):dst[key]=value})}),dst},zoom:function(scope,options){var zoom=options.chart.zoom,enabled="undefined"==typeof zoom.enabled||null===zoom.enabled?!0:zoom.enabled;if(enabled){var fixDomain,d3zoom,zoomed,unzoomed,zoomend,xScale=scope.chart.xAxis.scale(),yScale=scope.chart.yAxis.scale(),xDomain=scope.chart.xDomain||xScale.domain,yDomain=scope.chart.yDomain||yScale.domain,x_boundary=xScale.domain().slice(),y_boundary=yScale.domain().slice(),scale=zoom.scale||1,translate=zoom.translate||[0,0],scaleExtent=zoom.scaleExtent||[1,10],useFixedDomain=zoom.useFixedDomain||!1,useNiceScale=zoom.useNiceScale||!1,horizontalOff=zoom.horizontalOff||!1,verticalOff=zoom.verticalOff||!1,unzoomEventType=zoom.unzoomEventType||"dblclick.zoom";useNiceScale&&(xScale.nice(),yScale.nice()),fixDomain=function(domain,boundary){return domain[0]=Math.min(Math.max(domain[0],boundary[0]),boundary[1]-boundary[1]/scaleExtent[1]),domain[1]=Math.max(boundary[0]+boundary[1]/scaleExtent[1],Math.min(domain[1],boundary[1])),domain},zoomed=function(){if(void 0!==zoom.zoomed){var domains=zoom.zoomed(xScale.domain(),yScale.domain());horizontalOff||xDomain([domains.x1,domains.x2]),verticalOff||yDomain([domains.y1,domains.y2])}else horizontalOff||xDomain(useFixedDomain?fixDomain(xScale.domain(),x_boundary):xScale.domain()),verticalOff||yDomain(useFixedDomain?fixDomain(yScale.domain(),y_boundary):yScale.domain());scope.chart&&scope.chart.update()},unzoomed=function(){if(void 0!==zoom.unzoomed){var domains=zoom.unzoomed(xScale.domain(),yScale.domain());horizontalOff||xDomain([domains.x1,domains.x2]),verticalOff||yDomain([domains.y1,domains.y2])}else horizontalOff||xDomain(x_boundary),verticalOff||yDomain(y_boundary);d3zoom.scale(scale).translate(translate),scope.chart&&scope.chart.update()},zoomend=function(){void 0!==zoom.zoomend&&zoom.zoomend()},d3zoom=d3.behavior.zoom().x(xScale).y(yScale).scaleExtent(scaleExtent).on("zoom",zoomed).on("zoomend",zoomend),scope.svg&&(scope.svg.call(d3zoom),d3zoom.scale(scale).translate(translate).event(scope.svg),"none"!==unzoomEventType&&scope.svg.on(unzoomEventType,unzoomed)),scope.chart&&(scope.chart.zoomRender=function(){d3zoom.scale(scale).translate(translate),xScale=scope.chart.xAxis.scale(),yScale=scope.chart.yAxis.scale(),xDomain=scope.chart.xDomain||xScale.domain,yDomain=scope.chart.yDomain||yScale.domain,x_boundary=xScale.domain().slice(),y_boundary=yScale.domain().slice(),d3zoom.x(xScale).y(yScale),scope.svg.call(d3zoom),"none"!==unzoomEventType&&scope.svg.on(unzoomEventType,unzoomed)})}}}})}(window);
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js
deleted file mode 100644
index 3f985d1..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- AngularJS v1.6.4
- (c) 2010-2017 Google, Inc. http://angularjs.org
- License: MIT
-*/
-(function(J,d){'use strict';function A(d){k&&d.get("$route")}function B(t,u,g){return{restrict:"ECA",terminal:!0,priority:400,transclude:"element",link:function(a,f,b,c,m){function v(){l&&(g.cancel(l),l=null);n&&(n.$destroy(),n=null);p&&(l=g.leave(p),l.done(function(a){!1!==a&&(l=null)}),p=null)}function E(){var b=t.current&&t.current.locals;if(d.isDefined(b&&b.$template)){var b=a.$new(),c=t.current;p=m(b,function(b){g.enter(b,null,p||f).done(function(b){!1===b||!d.isDefined(w)||w&&!a.$eval(w)||u()});
-v()});n=c.scope=b;n.$emit("$viewContentLoaded");n.$eval(k)}else v()}var n,p,l,w=b.autoscroll,k=b.onload||"";a.$on("$routeChangeSuccess",E);E()}}}function C(d,k,g){return{restrict:"ECA",priority:-400,link:function(a,f){var b=g.current,c=b.locals;f.html(c.$template);var m=d(f.contents());if(b.controller){c.$scope=a;var v=k(b.controller,c);b.controllerAs&&(a[b.controllerAs]=v);f.data("$ngControllerController",v);f.children().data("$ngControllerController",v)}a[b.resolveAs||"$resolve"]=c;m(a)}}}var x,
-y,F,G,z=d.module("ngRoute",[]).info({angularVersion:"1.6.4"}).provider("$route",function(){function t(a,f){return d.extend(Object.create(a),f)}function u(a,d){var b=d.caseInsensitiveMatch,c={originalPath:a,regexp:a},g=c.keys=[];a=a.replace(/([().])/g,"\\$1").replace(/(\/)?:(\w+)(\*\?|[?*])?/g,function(a,b,d,c){a="?"===c||"*?"===c?"?":null;c="*"===c||"*?"===c?"*":null;g.push({name:d,optional:!!a});b=b||"";return""+(a?"":b)+"(?:"+(a?b:"")+(c&&"(.+?)"||"([^/]+)")+(a||"")+")"+(a||"")}).replace(/([/$*])/g,
-"\\$1");c.regexp=new RegExp("^"+a+"$",b?"i":"");return c}x=d.isArray;y=d.isObject;F=d.isDefined;G=d.noop;var g={};this.when=function(a,f){var b;b=void 0;if(x(f)){b=b||[];for(var c=0,m=f.length;c<m;c++)b[c]=f[c]}else if(y(f))for(c in b=b||{},f)if("$"!==c.charAt(0)||"$"!==c.charAt(1))b[c]=f[c];b=b||f;d.isUndefined(b.reloadOnSearch)&&(b.reloadOnSearch=!0);d.isUndefined(b.caseInsensitiveMatch)&&(b.caseInsensitiveMatch=this.caseInsensitiveMatch);g[a]=d.extend(b,a&&u(a,b));a&&(c="/"===a[a.length-1]?a.substr(0,
-a.length-1):a+"/",g[c]=d.extend({redirectTo:a},u(c,b)));return this};this.caseInsensitiveMatch=!1;this.otherwise=function(a){"string"===typeof a&&(a={redirectTo:a});this.when(null,a);return this};k=!0;this.eagerInstantiationEnabled=function(a){return F(a)?(k=a,this):k};this.$get=["$rootScope","$location","$routeParams","$q","$injector","$templateRequest","$sce","$browser",function(a,f,b,c,m,k,u,n){function p(e){var h=q.current;(y=(s=C())&&h&&s.$$route===h.$$route&&d.equals(s.pathParams,h.pathParams)&&
-!s.reloadOnSearch&&!D)||!h&&!s||a.$broadcast("$routeChangeStart",s,h).defaultPrevented&&e&&e.preventDefault()}function l(){var e=q.current,h=s;if(y)e.params=h.params,d.copy(e.params,b),a.$broadcast("$routeUpdate",e);else if(h||e){D=!1;q.current=h;var H=c.resolve(h);n.$$incOutstandingRequestCount();H.then(w).then(z).then(function(c){return c&&H.then(A).then(function(c){h===q.current&&(h&&(h.locals=c,d.copy(h.params,b)),a.$broadcast("$routeChangeSuccess",h,e))})}).catch(function(b){h===q.current&&a.$broadcast("$routeChangeError",
-h,e,b)}).finally(function(){n.$$completeOutstandingRequest(G)})}}function w(e){var a={route:e,hasRedirection:!1};if(e)if(e.redirectTo)if(d.isString(e.redirectTo))a.path=x(e.redirectTo,e.params),a.search=e.params,a.hasRedirection=!0;else{var b=f.path(),g=f.search();e=e.redirectTo(e.pathParams,b,g);d.isDefined(e)&&(a.url=e,a.hasRedirection=!0)}else if(e.resolveRedirectTo)return c.resolve(m.invoke(e.resolveRedirectTo)).then(function(e){d.isDefined(e)&&(a.url=e,a.hasRedirection=!0);return a});return a}
-function z(a){var b=!0;if(a.route!==q.current)b=!1;else if(a.hasRedirection){var d=f.url(),c=a.url;c?f.url(c).replace():c=f.path(a.path).search(a.search).replace().url();c!==d&&(b=!1)}return b}function A(a){if(a){var b=d.extend({},a.resolve);d.forEach(b,function(a,e){b[e]=d.isString(a)?m.get(a):m.invoke(a,null,null,e)});a=B(a);d.isDefined(a)&&(b.$template=a);return c.all(b)}}function B(a){var b,c;d.isDefined(b=a.template)?d.isFunction(b)&&(b=b(a.params)):d.isDefined(c=a.templateUrl)&&(d.isFunction(c)&&
-(c=c(a.params)),d.isDefined(c)&&(a.loadedTemplateUrl=u.valueOf(c),b=k(c)));return b}function C(){var a,b;d.forEach(g,function(c,g){var r;if(r=!b){var k=f.path();r=c.keys;var m={};if(c.regexp)if(k=c.regexp.exec(k)){for(var l=1,n=k.length;l<n;++l){var p=r[l-1],q=k[l];p&&q&&(m[p.name]=q)}r=m}else r=null;else r=null;r=a=r}r&&(b=t(c,{params:d.extend({},f.search(),a),pathParams:a}),b.$$route=c)});return b||g[null]&&t(g[null],{params:{},pathParams:{}})}function x(a,b){var c=[];d.forEach((a||"").split(":"),
-function(a,d){if(0===d)c.push(a);else{var e=a.match(/(\w+)(?:[?*])?(.*)/),f=e[1];c.push(b[f]);c.push(e[2]||"");delete b[f]}});return c.join("")}var D=!1,s,y,q={routes:g,reload:function(){D=!0;var b={defaultPrevented:!1,preventDefault:function(){this.defaultPrevented=!0;D=!1}};a.$evalAsync(function(){p(b);b.defaultPrevented||l()})},updateParams:function(a){if(this.current&&this.current.$$route)a=d.extend({},this.current.params,a),f.path(x(this.current.$$route.originalPath,a)),f.search(a);else throw I("norout");
-}};a.$on("$locationChangeStart",p);a.$on("$locationChangeSuccess",l);return q}]}).run(A),I=d.$$minErr("ngRoute"),k;A.$inject=["$injector"];z.provider("$routeParams",function(){this.$get=function(){return{}}});z.directive("ngView",B);z.directive("ngView",C);B.$inject=["$route","$anchorScroll","$animate"];C.$inject=["$compile","$controller","$route"]})(window,window.angular);
-//# sourceMappingURL=angular-route.min.js.map
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-editable.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-editable.css
deleted file mode 100644
index 57eac3b..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-editable.css
+++ /dev/null
@@ -1,655 +0,0 @@
-/*! X-editable - v1.5.0
-* In-place editing with Twitter Bootstrap, jQuery UI or pure jQuery
-* http://github.com/vitalets/x-editable
-* Copyright (c) 2013 Vitaliy Potapov; Licensed MIT */
-.editableform {
-    margin-bottom: 0; /* overwrites bootstrap margin */
-}
-
-.editableform .control-group {
-    margin-bottom: 0; /* overwrites bootstrap margin */
-    white-space: nowrap; /* prevent wrapping buttons on new line */
-    line-height: 20px; /* overwriting bootstrap line-height. See #133 */
-}
-
-.editable-buttons {
-   display: inline-block; /* should be inline to take effect of parent's white-space: nowrap */
-   vertical-align: top;
-   margin-left: 7px;
-   /* inline-block emulation for IE7*/
-   zoom: 1;
-   *display: inline;
-}
-
-.editable-buttons.editable-buttons-bottom {
-   display: block;
-   margin-top: 7px;
-   margin-left: 0;
-}
-
-.editable-input {
-    vertical-align: top;
-    display: inline-block; /* should be inline to take effect of parent's white-space: nowrap */
-    width: auto; /* bootstrap-responsive has width: 100% that breakes layout */
-    white-space: normal; /* reset white-space decalred in parent*/
-   /* display-inline emulation for IE7*/
-   zoom: 1;
-   *display: inline;
-}
-
-.editable-buttons .editable-cancel {
-   margin-left: 7px;
-}
-
-/*for jquery-ui buttons need set height to look more pretty*/
-.editable-buttons button.ui-button-icon-only {
-   height: 24px;
-   width: 30px;
-}
-
-.editableform-loading {
-    background: url('../img/loading.gif') center center no-repeat;
-    height: 25px;
-    width: auto;
-    min-width: 25px;
-}
-
-.editable-inline .editableform-loading {
-    background-position: left 5px;
-}
-
- .editable-error-block {
-    max-width: 300px;
-    margin: 5px 0 0 0;
-    width: auto;
-    white-space: normal;
-}
-
-/*add padding for jquery ui*/
-.editable-error-block.ui-state-error {
-    padding: 3px;
-}
-
-.editable-error {
-   color: red;
-}
-
-/* ---- For specific types ---- */
-
-.editableform .editable-date {
-    padding: 0;
-    margin: 0;
-    float: left;
-}
-
-/* move datepicker icon to center of add-on button. See https://github.com/vitalets/x-editable/issues/183 */
-.editable-inline .add-on .icon-th {
-   margin-top: 3px;
-   margin-left: 1px;
-}
-
-
-/* checklist vertical alignment */
-.editable-checklist label input[type="checkbox"],
-.editable-checklist label span {
-    vertical-align: middle;
-    margin: 0;
-}
-
-.editable-checklist label {
-    white-space: nowrap;
-}
-
-/* set exact width of textarea to fit buttons toolbar */
-.editable-wysihtml5 {
-    width: 566px;
-    height: 250px;
-}
-
-/* clear button shown as link in date inputs */
-.editable-clear {
-   clear: both;
-   font-size: 0.9em;
-   text-decoration: none;
-   text-align: right;
-}
-
-/* IOS-style clear button for text inputs */
-.editable-clear-x {
-   background: url('../img/clear.png') center center no-repeat;
-   display: block;
-   width: 13px;
-   height: 13px;
-   position: absolute;
-   opacity: 0.6;
-   z-index: 100;
-
-   top: 50%;
-   right: 6px;
-   margin-top: -6px;
-
-}
-
-.editable-clear-x:hover {
-   opacity: 1;
-}
-
-.editable-pre-wrapped {
-   white-space: pre-wrap;
-}
-.editable-container.editable-popup {
-    max-width: none !important; /* without this rule poshytip/tooltip does not stretch */
-}
-
-.editable-container.popover {
-    width: auto; /* without this rule popover does not stretch */
-}
-
-.editable-container.editable-inline {
-    display: inline-block;
-    vertical-align: middle;
-    width: auto;
-    /* inline-block emulation for IE7*/
-    zoom: 1;
-    *display: inline;
-}
-
-.editable-container.ui-widget {
-   font-size: inherit;  /* jqueryui widget font 1.1em too big, overwrite it */
-   z-index: 9990; /* should be less than select2 dropdown z-index to close dropdown first when click */
-}
-.editable-click,
-a.editable-click,
-a.editable-click:hover {
-    text-decoration: none;
-    border-bottom: dashed 1px #0088cc;
-}
-
-.editable-click.editable-disabled,
-a.editable-click.editable-disabled,
-a.editable-click.editable-disabled:hover {
-   color: #585858;
-   cursor: default;
-   border-bottom: none;
-}
-
-.editable-empty, .editable-empty:hover, .editable-empty:focus{
-  font-style: italic;
-  color: #DD1144;
-  /* border-bottom: none; */
-  text-decoration: none;
-}
-
-.editable-unsaved {
-  font-weight: bold;
-}
-
-.editable-unsaved:after {
-/*    content: '*'*/
-}
-
-.editable-bg-transition {
-  -webkit-transition: background-color 1400ms ease-out;
-  -moz-transition: background-color 1400ms ease-out;
-  -o-transition: background-color 1400ms ease-out;
-  -ms-transition: background-color 1400ms ease-out;
-  transition: background-color 1400ms ease-out;
-}
-
-/*see https://github.com/vitalets/x-editable/issues/139 */
-.form-horizontal .editable
-{
-    padding-top: 5px;
-    display:inline-block;
-}
-
-
-/*!
- * Datepicker for Bootstrap
- *
- * Copyright 2012 Stefan Petre
- * Improvements by Andrew Rowls
- * Licensed under the Apache License v2.0
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- */
-.datepicker {
-  padding: 4px;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-  direction: ltr;
-  /*.dow {
-		border-top: 1px solid #ddd !important;
-	}*/
-
-}
-.datepicker-inline {
-  width: 220px;
-}
-.datepicker.datepicker-rtl {
-  direction: rtl;
-}
-.datepicker.datepicker-rtl table tr td span {
-  float: right;
-}
-.datepicker-dropdown {
-  top: 0;
-  left: 0;
-}
-.datepicker-dropdown:before {
-  content: '';
-  display: inline-block;
-  border-left: 7px solid transparent;
-  border-right: 7px solid transparent;
-  border-bottom: 7px solid #ccc;
-  border-bottom-color: rgba(0, 0, 0, 0.2);
-  position: absolute;
-  top: -7px;
-  left: 6px;
-}
-.datepicker-dropdown:after {
-  content: '';
-  display: inline-block;
-  border-left: 6px solid transparent;
-  border-right: 6px solid transparent;
-  border-bottom: 6px solid #ffffff;
-  position: absolute;
-  top: -6px;
-  left: 7px;
-}
-.datepicker > div {
-  display: none;
-}
-.datepicker.days div.datepicker-days {
-  display: block;
-}
-.datepicker.months div.datepicker-months {
-  display: block;
-}
-.datepicker.years div.datepicker-years {
-  display: block;
-}
-.datepicker table {
-  margin: 0;
-}
-.datepicker td,
-.datepicker th {
-  text-align: center;
-  width: 20px;
-  height: 20px;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-  border: none;
-}
-.table-striped .datepicker table tr td,
-.table-striped .datepicker table tr th {
-  background-color: transparent;
-}
-.datepicker table tr td.day:hover {
-  background: #eeeeee;
-  cursor: pointer;
-}
-.datepicker table tr td.old,
-.datepicker table tr td.new {
-  color: #999999;
-}
-.datepicker table tr td.disabled,
-.datepicker table tr td.disabled:hover {
-  background: none;
-  color: #999999;
-  cursor: default;
-}
-.datepicker table tr td.today,
-.datepicker table tr td.today:hover,
-.datepicker table tr td.today.disabled,
-.datepicker table tr td.today.disabled:hover {
-  background-color: #fde19a;
-  background-image: -moz-linear-gradient(top, #fdd49a, #fdf59a);
-  background-image: -ms-linear-gradient(top, #fdd49a, #fdf59a);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fdd49a), to(#fdf59a));
-  background-image: -webkit-linear-gradient(top, #fdd49a, #fdf59a);
-  background-image: -o-linear-gradient(top, #fdd49a, #fdf59a);
-  background-image: linear-gradient(top, #fdd49a, #fdf59a);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fdd49a', endColorstr='#fdf59a', GradientType=0);
-  border-color: #fdf59a #fdf59a #fbed50;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-  color: #000;
-}
-.datepicker table tr td.today:hover,
-.datepicker table tr td.today:hover:hover,
-.datepicker table tr td.today.disabled:hover,
-.datepicker table tr td.today.disabled:hover:hover,
-.datepicker table tr td.today:active,
-.datepicker table tr td.today:hover:active,
-.datepicker table tr td.today.disabled:active,
-.datepicker table tr td.today.disabled:hover:active,
-.datepicker table tr td.today.active,
-.datepicker table tr td.today:hover.active,
-.datepicker table tr td.today.disabled.active,
-.datepicker table tr td.today.disabled:hover.active,
-.datepicker table tr td.today.disabled,
-.datepicker table tr td.today:hover.disabled,
-.datepicker table tr td.today.disabled.disabled,
-.datepicker table tr td.today.disabled:hover.disabled,
-.datepicker table tr td.today[disabled],
-.datepicker table tr td.today:hover[disabled],
-.datepicker table tr td.today.disabled[disabled],
-.datepicker table tr td.today.disabled:hover[disabled] {
-  background-color: #fdf59a;
-}
-.datepicker table tr td.today:active,
-.datepicker table tr td.today:hover:active,
-.datepicker table tr td.today.disabled:active,
-.datepicker table tr td.today.disabled:hover:active,
-.datepicker table tr td.today.active,
-.datepicker table tr td.today:hover.active,
-.datepicker table tr td.today.disabled.active,
-.datepicker table tr td.today.disabled:hover.active {
-  background-color: #fbf069 \9;
-}
-.datepicker table tr td.today:hover:hover {
-  color: #000;
-}
-.datepicker table tr td.today.active:hover {
-  color: #fff;
-}
-.datepicker table tr td.range,
-.datepicker table tr td.range:hover,
-.datepicker table tr td.range.disabled,
-.datepicker table tr td.range.disabled:hover {
-  background: #eeeeee;
-  -webkit-border-radius: 0;
-  -moz-border-radius: 0;
-  border-radius: 0;
-}
-.datepicker table tr td.range.today,
-.datepicker table tr td.range.today:hover,
-.datepicker table tr td.range.today.disabled,
-.datepicker table tr td.range.today.disabled:hover {
-  background-color: #f3d17a;
-  background-image: -moz-linear-gradient(top, #f3c17a, #f3e97a);
-  background-image: -ms-linear-gradient(top, #f3c17a, #f3e97a);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f3c17a), to(#f3e97a));
-  background-image: -webkit-linear-gradient(top, #f3c17a, #f3e97a);
-  background-image: -o-linear-gradient(top, #f3c17a, #f3e97a);
-  background-image: linear-gradient(top, #f3c17a, #f3e97a);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#f3c17a', endColorstr='#f3e97a', GradientType=0);
-  border-color: #f3e97a #f3e97a #edde34;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-  -webkit-border-radius: 0;
-  -moz-border-radius: 0;
-  border-radius: 0;
-}
-.datepicker table tr td.range.today:hover,
-.datepicker table tr td.range.today:hover:hover,
-.datepicker table tr td.range.today.disabled:hover,
-.datepicker table tr td.range.today.disabled:hover:hover,
-.datepicker table tr td.range.today:active,
-.datepicker table tr td.range.today:hover:active,
-.datepicker table tr td.range.today.disabled:active,
-.datepicker table tr td.range.today.disabled:hover:active,
-.datepicker table tr td.range.today.active,
-.datepicker table tr td.range.today:hover.active,
-.datepicker table tr td.range.today.disabled.active,
-.datepicker table tr td.range.today.disabled:hover.active,
-.datepicker table tr td.range.today.disabled,
-.datepicker table tr td.range.today:hover.disabled,
-.datepicker table tr td.range.today.disabled.disabled,
-.datepicker table tr td.range.today.disabled:hover.disabled,
-.datepicker table tr td.range.today[disabled],
-.datepicker table tr td.range.today:hover[disabled],
-.datepicker table tr td.range.today.disabled[disabled],
-.datepicker table tr td.range.today.disabled:hover[disabled] {
-  background-color: #f3e97a;
-}
-.datepicker table tr td.range.today:active,
-.datepicker table tr td.range.today:hover:active,
-.datepicker table tr td.range.today.disabled:active,
-.datepicker table tr td.range.today.disabled:hover:active,
-.datepicker table tr td.range.today.active,
-.datepicker table tr td.range.today:hover.active,
-.datepicker table tr td.range.today.disabled.active,
-.datepicker table tr td.range.today.disabled:hover.active {
-  background-color: #efe24b \9;
-}
-.datepicker table tr td.selected,
-.datepicker table tr td.selected:hover,
-.datepicker table tr td.selected.disabled,
-.datepicker table tr td.selected.disabled:hover {
-  background-color: #9e9e9e;
-  background-image: -moz-linear-gradient(top, #b3b3b3, #808080);
-  background-image: -ms-linear-gradient(top, #b3b3b3, #808080);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#b3b3b3), to(#808080));
-  background-image: -webkit-linear-gradient(top, #b3b3b3, #808080);
-  background-image: -o-linear-gradient(top, #b3b3b3, #808080);
-  background-image: linear-gradient(top, #b3b3b3, #808080);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#b3b3b3', endColorstr='#808080', GradientType=0);
-  border-color: #808080 #808080 #595959;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-  color: #fff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-.datepicker table tr td.selected:hover,
-.datepicker table tr td.selected:hover:hover,
-.datepicker table tr td.selected.disabled:hover,
-.datepicker table tr td.selected.disabled:hover:hover,
-.datepicker table tr td.selected:active,
-.datepicker table tr td.selected:hover:active,
-.datepicker table tr td.selected.disabled:active,
-.datepicker table tr td.selected.disabled:hover:active,
-.datepicker table tr td.selected.active,
-.datepicker table tr td.selected:hover.active,
-.datepicker table tr td.selected.disabled.active,
-.datepicker table tr td.selected.disabled:hover.active,
-.datepicker table tr td.selected.disabled,
-.datepicker table tr td.selected:hover.disabled,
-.datepicker table tr td.selected.disabled.disabled,
-.datepicker table tr td.selected.disabled:hover.disabled,
-.datepicker table tr td.selected[disabled],
-.datepicker table tr td.selected:hover[disabled],
-.datepicker table tr td.selected.disabled[disabled],
-.datepicker table tr td.selected.disabled:hover[disabled] {
-  background-color: #808080;
-}
-.datepicker table tr td.selected:active,
-.datepicker table tr td.selected:hover:active,
-.datepicker table tr td.selected.disabled:active,
-.datepicker table tr td.selected.disabled:hover:active,
-.datepicker table tr td.selected.active,
-.datepicker table tr td.selected:hover.active,
-.datepicker table tr td.selected.disabled.active,
-.datepicker table tr td.selected.disabled:hover.active {
-  background-color: #666666 \9;
-}
-.datepicker table tr td.active,
-.datepicker table tr td.active:hover,
-.datepicker table tr td.active.disabled,
-.datepicker table tr td.active.disabled:hover {
-  background-color: #006dcc;
-  background-image: -moz-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -ms-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc));
-  background-image: -webkit-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -o-linear-gradient(top, #0088cc, #0044cc);
-  background-image: linear-gradient(top, #0088cc, #0044cc);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0088cc', endColorstr='#0044cc', GradientType=0);
-  border-color: #0044cc #0044cc #002a80;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-  color: #fff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-.datepicker table tr td.active:hover,
-.datepicker table tr td.active:hover:hover,
-.datepicker table tr td.active.disabled:hover,
-.datepicker table tr td.active.disabled:hover:hover,
-.datepicker table tr td.active:active,
-.datepicker table tr td.active:hover:active,
-.datepicker table tr td.active.disabled:active,
-.datepicker table tr td.active.disabled:hover:active,
-.datepicker table tr td.active.active,
-.datepicker table tr td.active:hover.active,
-.datepicker table tr td.active.disabled.active,
-.datepicker table tr td.active.disabled:hover.active,
-.datepicker table tr td.active.disabled,
-.datepicker table tr td.active:hover.disabled,
-.datepicker table tr td.active.disabled.disabled,
-.datepicker table tr td.active.disabled:hover.disabled,
-.datepicker table tr td.active[disabled],
-.datepicker table tr td.active:hover[disabled],
-.datepicker table tr td.active.disabled[disabled],
-.datepicker table tr td.active.disabled:hover[disabled] {
-  background-color: #0044cc;
-}
-.datepicker table tr td.active:active,
-.datepicker table tr td.active:hover:active,
-.datepicker table tr td.active.disabled:active,
-.datepicker table tr td.active.disabled:hover:active,
-.datepicker table tr td.active.active,
-.datepicker table tr td.active:hover.active,
-.datepicker table tr td.active.disabled.active,
-.datepicker table tr td.active.disabled:hover.active {
-  background-color: #003399 \9;
-}
-.datepicker table tr td span {
-  display: block;
-  width: 23%;
-  height: 54px;
-  line-height: 54px;
-  float: left;
-  margin: 1%;
-  cursor: pointer;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-}
-.datepicker table tr td span:hover {
-  background: #eeeeee;
-}
-.datepicker table tr td span.disabled,
-.datepicker table tr td span.disabled:hover {
-  background: none;
-  color: #999999;
-  cursor: default;
-}
-.datepicker table tr td span.active,
-.datepicker table tr td span.active:hover,
-.datepicker table tr td span.active.disabled,
-.datepicker table tr td span.active.disabled:hover {
-  background-color: #006dcc;
-  background-image: -moz-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -ms-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc));
-  background-image: -webkit-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -o-linear-gradient(top, #0088cc, #0044cc);
-  background-image: linear-gradient(top, #0088cc, #0044cc);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0088cc', endColorstr='#0044cc', GradientType=0);
-  border-color: #0044cc #0044cc #002a80;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-  color: #fff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-.datepicker table tr td span.active:hover,
-.datepicker table tr td span.active:hover:hover,
-.datepicker table tr td span.active.disabled:hover,
-.datepicker table tr td span.active.disabled:hover:hover,
-.datepicker table tr td span.active:active,
-.datepicker table tr td span.active:hover:active,
-.datepicker table tr td span.active.disabled:active,
-.datepicker table tr td span.active.disabled:hover:active,
-.datepicker table tr td span.active.active,
-.datepicker table tr td span.active:hover.active,
-.datepicker table tr td span.active.disabled.active,
-.datepicker table tr td span.active.disabled:hover.active,
-.datepicker table tr td span.active.disabled,
-.datepicker table tr td span.active:hover.disabled,
-.datepicker table tr td span.active.disabled.disabled,
-.datepicker table tr td span.active.disabled:hover.disabled,
-.datepicker table tr td span.active[disabled],
-.datepicker table tr td span.active:hover[disabled],
-.datepicker table tr td span.active.disabled[disabled],
-.datepicker table tr td span.active.disabled:hover[disabled] {
-  background-color: #0044cc;
-}
-.datepicker table tr td span.active:active,
-.datepicker table tr td span.active:hover:active,
-.datepicker table tr td span.active.disabled:active,
-.datepicker table tr td span.active.disabled:hover:active,
-.datepicker table tr td span.active.active,
-.datepicker table tr td span.active:hover.active,
-.datepicker table tr td span.active.disabled.active,
-.datepicker table tr td span.active.disabled:hover.active {
-  background-color: #003399 \9;
-}
-.datepicker table tr td span.old,
-.datepicker table tr td span.new {
-  color: #999999;
-}
-.datepicker th.datepicker-switch {
-  width: 145px;
-}
-.datepicker thead tr:first-child th,
-.datepicker tfoot tr th {
-  cursor: pointer;
-}
-.datepicker thead tr:first-child th:hover,
-.datepicker tfoot tr th:hover {
-  background: #eeeeee;
-}
-.datepicker .cw {
-  font-size: 10px;
-  width: 12px;
-  padding: 0 2px 0 5px;
-  vertical-align: middle;
-}
-.datepicker thead tr:first-child th.cw {
-  cursor: default;
-  background-color: transparent;
-}
-.input-append.date .add-on i,
-.input-prepend.date .add-on i {
-  display: block;
-  cursor: pointer;
-  width: 16px;
-  height: 16px;
-}
-.input-daterange input {
-  text-align: center;
-}
-.input-daterange input:first-child {
-  -webkit-border-radius: 3px 0 0 3px;
-  -moz-border-radius: 3px 0 0 3px;
-  border-radius: 3px 0 0 3px;
-}
-.input-daterange input:last-child {
-  -webkit-border-radius: 0 3px 3px 0;
-  -moz-border-radius: 0 3px 3px 0;
-  border-radius: 0 3px 3px 0;
-}
-.input-daterange .add-on {
-  display: inline-block;
-  width: auto;
-  min-width: 16px;
-  height: 18px;
-  padding: 4px 5px;
-  font-weight: normal;
-  line-height: 18px;
-  text-align: center;
-  text-shadow: 0 1px 0 #ffffff;
-  vertical-align: middle;
-  background-color: #eeeeee;
-  border: 1px solid #ccc;
-  margin-left: -5px;
-  margin-right: -5px;
-}
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css
deleted file mode 100644
index ea33f76..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css
+++ /dev/null
@@ -1,587 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- */
-.btn-default,
-.btn-primary,
-.btn-success,
-.btn-info,
-.btn-warning,
-.btn-danger {
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-.btn-default:active,
-.btn-primary:active,
-.btn-success:active,
-.btn-info:active,
-.btn-warning:active,
-.btn-danger:active,
-.btn-default.active,
-.btn-primary.active,
-.btn-success.active,
-.btn-info.active,
-.btn-warning.active,
-.btn-danger.active {
-  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-}
-.btn-default.disabled,
-.btn-primary.disabled,
-.btn-success.disabled,
-.btn-info.disabled,
-.btn-warning.disabled,
-.btn-danger.disabled,
-.btn-default[disabled],
-.btn-primary[disabled],
-.btn-success[disabled],
-.btn-info[disabled],
-.btn-warning[disabled],
-.btn-danger[disabled],
-fieldset[disabled] .btn-default,
-fieldset[disabled] .btn-primary,
-fieldset[disabled] .btn-success,
-fieldset[disabled] .btn-info,
-fieldset[disabled] .btn-warning,
-fieldset[disabled] .btn-danger {
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-.btn-default .badge,
-.btn-primary .badge,
-.btn-success .badge,
-.btn-info .badge,
-.btn-warning .badge,
-.btn-danger .badge {
-  text-shadow: none;
-}
-.btn:active,
-.btn.active {
-  background-image: none;
-}
-.btn-default {
-  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);
-  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0));
-  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #dbdbdb;
-  text-shadow: 0 1px 0 #fff;
-  border-color: #ccc;
-}
-.btn-default:hover,
-.btn-default:focus {
-  background-color: #e0e0e0;
-  background-position: 0 -15px;
-}
-.btn-default:active,
-.btn-default.active {
-  background-color: #e0e0e0;
-  border-color: #dbdbdb;
-}
-.btn-default.disabled,
-.btn-default[disabled],
-fieldset[disabled] .btn-default,
-.btn-default.disabled:hover,
-.btn-default[disabled]:hover,
-fieldset[disabled] .btn-default:hover,
-.btn-default.disabled:focus,
-.btn-default[disabled]:focus,
-fieldset[disabled] .btn-default:focus,
-.btn-default.disabled.focus,
-.btn-default[disabled].focus,
-fieldset[disabled] .btn-default.focus,
-.btn-default.disabled:active,
-.btn-default[disabled]:active,
-fieldset[disabled] .btn-default:active,
-.btn-default.disabled.active,
-.btn-default[disabled].active,
-fieldset[disabled] .btn-default.active {
-  background-color: #e0e0e0;
-  background-image: none;
-}
-.btn-primary {
-  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);
-  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88));
-  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #245580;
-}
-.btn-primary:hover,
-.btn-primary:focus {
-  background-color: #265a88;
-  background-position: 0 -15px;
-}
-.btn-primary:active,
-.btn-primary.active {
-  background-color: #265a88;
-  border-color: #245580;
-}
-.btn-primary.disabled,
-.btn-primary[disabled],
-fieldset[disabled] .btn-primary,
-.btn-primary.disabled:hover,
-.btn-primary[disabled]:hover,
-fieldset[disabled] .btn-primary:hover,
-.btn-primary.disabled:focus,
-.btn-primary[disabled]:focus,
-fieldset[disabled] .btn-primary:focus,
-.btn-primary.disabled.focus,
-.btn-primary[disabled].focus,
-fieldset[disabled] .btn-primary.focus,
-.btn-primary.disabled:active,
-.btn-primary[disabled]:active,
-fieldset[disabled] .btn-primary:active,
-.btn-primary.disabled.active,
-.btn-primary[disabled].active,
-fieldset[disabled] .btn-primary.active {
-  background-color: #265a88;
-  background-image: none;
-}
-.btn-success {
-  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
-  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641));
-  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #3e8f3e;
-}
-.btn-success:hover,
-.btn-success:focus {
-  background-color: #419641;
-  background-position: 0 -15px;
-}
-.btn-success:active,
-.btn-success.active {
-  background-color: #419641;
-  border-color: #3e8f3e;
-}
-.btn-success.disabled,
-.btn-success[disabled],
-fieldset[disabled] .btn-success,
-.btn-success.disabled:hover,
-.btn-success[disabled]:hover,
-fieldset[disabled] .btn-success:hover,
-.btn-success.disabled:focus,
-.btn-success[disabled]:focus,
-fieldset[disabled] .btn-success:focus,
-.btn-success.disabled.focus,
-.btn-success[disabled].focus,
-fieldset[disabled] .btn-success.focus,
-.btn-success.disabled:active,
-.btn-success[disabled]:active,
-fieldset[disabled] .btn-success:active,
-.btn-success.disabled.active,
-.btn-success[disabled].active,
-fieldset[disabled] .btn-success.active {
-  background-color: #419641;
-  background-image: none;
-}
-.btn-info {
-  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
-  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2));
-  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #28a4c9;
-}
-.btn-info:hover,
-.btn-info:focus {
-  background-color: #2aabd2;
-  background-position: 0 -15px;
-}
-.btn-info:active,
-.btn-info.active {
-  background-color: #2aabd2;
-  border-color: #28a4c9;
-}
-.btn-info.disabled,
-.btn-info[disabled],
-fieldset[disabled] .btn-info,
-.btn-info.disabled:hover,
-.btn-info[disabled]:hover,
-fieldset[disabled] .btn-info:hover,
-.btn-info.disabled:focus,
-.btn-info[disabled]:focus,
-fieldset[disabled] .btn-info:focus,
-.btn-info.disabled.focus,
-.btn-info[disabled].focus,
-fieldset[disabled] .btn-info.focus,
-.btn-info.disabled:active,
-.btn-info[disabled]:active,
-fieldset[disabled] .btn-info:active,
-.btn-info.disabled.active,
-.btn-info[disabled].active,
-fieldset[disabled] .btn-info.active {
-  background-color: #2aabd2;
-  background-image: none;
-}
-.btn-warning {
-  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
-  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316));
-  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #e38d13;
-}
-.btn-warning:hover,
-.btn-warning:focus {
-  background-color: #eb9316;
-  background-position: 0 -15px;
-}
-.btn-warning:active,
-.btn-warning.active {
-  background-color: #eb9316;
-  border-color: #e38d13;
-}
-.btn-warning.disabled,
-.btn-warning[disabled],
-fieldset[disabled] .btn-warning,
-.btn-warning.disabled:hover,
-.btn-warning[disabled]:hover,
-fieldset[disabled] .btn-warning:hover,
-.btn-warning.disabled:focus,
-.btn-warning[disabled]:focus,
-fieldset[disabled] .btn-warning:focus,
-.btn-warning.disabled.focus,
-.btn-warning[disabled].focus,
-fieldset[disabled] .btn-warning.focus,
-.btn-warning.disabled:active,
-.btn-warning[disabled]:active,
-fieldset[disabled] .btn-warning:active,
-.btn-warning.disabled.active,
-.btn-warning[disabled].active,
-fieldset[disabled] .btn-warning.active {
-  background-color: #eb9316;
-  background-image: none;
-}
-.btn-danger {
-  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
-  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a));
-  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  background-repeat: repeat-x;
-  border-color: #b92c28;
-}
-.btn-danger:hover,
-.btn-danger:focus {
-  background-color: #c12e2a;
-  background-position: 0 -15px;
-}
-.btn-danger:active,
-.btn-danger.active {
-  background-color: #c12e2a;
-  border-color: #b92c28;
-}
-.btn-danger.disabled,
-.btn-danger[disabled],
-fieldset[disabled] .btn-danger,
-.btn-danger.disabled:hover,
-.btn-danger[disabled]:hover,
-fieldset[disabled] .btn-danger:hover,
-.btn-danger.disabled:focus,
-.btn-danger[disabled]:focus,
-fieldset[disabled] .btn-danger:focus,
-.btn-danger.disabled.focus,
-.btn-danger[disabled].focus,
-fieldset[disabled] .btn-danger.focus,
-.btn-danger.disabled:active,
-.btn-danger[disabled]:active,
-fieldset[disabled] .btn-danger:active,
-.btn-danger.disabled.active,
-.btn-danger[disabled].active,
-fieldset[disabled] .btn-danger.active {
-  background-color: #c12e2a;
-  background-image: none;
-}
-.thumbnail,
-.img-thumbnail {
-  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
-  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
-}
-.dropdown-menu > li > a:hover,
-.dropdown-menu > li > a:focus {
-  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
-  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));
-  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
-  background-repeat: repeat-x;
-  background-color: #e8e8e8;
-}
-.dropdown-menu > .active > a,
-.dropdown-menu > .active > a:hover,
-.dropdown-menu > .active > a:focus {
-  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));
-  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);
-  background-repeat: repeat-x;
-  background-color: #2e6da4;
-}
-.navbar-default {
-  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
-  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#f8f8f8));
-  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
-}
-.navbar-default .navbar-nav > .open > a,
-.navbar-default .navbar-nav > .active > a {
-  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);
-  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2));
-  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);
-  background-repeat: repeat-x;
-  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
-}
-.navbar-brand,
-.navbar-nav > li > a {
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);
-}
-.navbar-inverse {
-  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);
-  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222));
-  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-  border-radius: 4px;
-}
-.navbar-inverse .navbar-nav > .open > a,
-.navbar-inverse .navbar-nav > .active > a {
-  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);
-  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f));
-  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);
-  background-repeat: repeat-x;
-  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
-  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
-}
-.navbar-inverse .navbar-brand,
-.navbar-inverse .navbar-nav > li > a {
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-.navbar-static-top,
-.navbar-fixed-top,
-.navbar-fixed-bottom {
-  border-radius: 0;
-}
-@media (max-width: 767px) {
-  .navbar .navbar-nav .open .dropdown-menu > .active > a,
-  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,
-  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {
-    color: #fff;
-    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-    background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));
-    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);
-    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);
-    background-repeat: repeat-x;
-  }
-}
-.alert {
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-.alert-success {
-  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
-  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc));
-  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #b2dba1;
-}
-.alert-info {
-  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
-  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0));
-  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #9acfea;
-}
-.alert-warning {
-  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
-  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0));
-  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #f5e79e;
-}
-.alert-danger {
-  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
-  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3));
-  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #dca7a7;
-}
-.progress {
-  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
-  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5));
-  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar {
-  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);
-  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090));
-  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar-success {
-  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
-  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44));
-  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar-info {
-  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
-  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5));
-  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar-warning {
-  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
-  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f));
-  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar-danger {
-  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
-  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c));
-  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
-  background-repeat: repeat-x;
-}
-.progress-bar-striped {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-.list-group {
-  border-radius: 4px;
-  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
-  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
-}
-.list-group-item.active,
-.list-group-item.active:hover,
-.list-group-item.active:focus {
-  text-shadow: 0 -1px 0 #286090;
-  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);
-  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a));
-  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #2b669a;
-}
-.list-group-item.active .badge,
-.list-group-item.active:hover .badge,
-.list-group-item.active:focus .badge {
-  text-shadow: none;
-}
-.panel {
-  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
-  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-.panel-default > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
-  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));
-  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
-  background-repeat: repeat-x;
-}
-.panel-primary > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));
-  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);
-  background-repeat: repeat-x;
-}
-.panel-success > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
-  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6));
-  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
-  background-repeat: repeat-x;
-}
-.panel-info > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
-  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3));
-  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
-  background-repeat: repeat-x;
-}
-.panel-warning > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
-  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc));
-  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
-  background-repeat: repeat-x;
-}
-.panel-danger > .panel-heading {
-  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
-  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc));
-  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
-  background-repeat: repeat-x;
-}
-.well {
-  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
-  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
-  background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5));
-  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
-  background-repeat: repeat-x;
-  border-color: #dcdcdc;
-  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
-  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
-}
-/*# sourceMappingURL=bootstrap-theme.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map
deleted file mode 100644
index 949d097..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAAA;;;;GAIG;ACiBH;;;;;;EAME,yCAAA;EC2CA,4FAAA;EACQ,oFAAA;CFzDT;ACkBC;;;;;;;;;;;;ECsCA,yDAAA;EACQ,iDAAA;CF1CT;ACQC;;;;;;;;;;;;;;;;;;ECiCA,yBAAA;EACQ,iBAAA;CFrBT;AC7BD;;;;;;EAuBI,kBAAA;CDcH;AC2BC;;EAEE,uBAAA;CDzBH;AC8BD;EEvEI,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;EAyCA,0BAAA;EACA,mBAAA;CDtBD;AClBC;;EAEE,0BAAA;EACA,6BAAA;CDoBH;ACjBC;;EAEE,0BAAA;EACA,sBAAA;CDmBH;ACbG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2BL;ACPD;EE5EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4DD;AC1DC;;EAEE,0BAAA;EACA,6BAAA;CD4DH;ACzDC;;EAEE,0BAAA;EACA,sBAAA;CD2DH;ACrDG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmEL;AC9CD;EE7EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CDoGD;AClGC;;EAEE,0BAAA;EACA,6BAAA;CDoGH;ACjGC;;EAEE,0BAAA;EACA,sBAAA;CDmGH;AC7FG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2GL;ACrFD;EE9EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4ID;AC1IC;;EAEE,0BAAA;EACA,6BAAA;CD4IH;ACzIC;;EAEE,0BAAA;EACA,sBAAA;CD2IH;ACrIG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmJL;AC5HD;EE/EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CDoLD;AClLC;;EAEE,0BAAA;EACA,6BAAA;CDoLH;ACjLC;;EAEE,0BAAA;EACA,sBAAA;CDmLH;AC7KG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2LL;ACnKD;EEhFI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4ND;AC1NC;;EAEE,0BAAA;EACA,6BAAA;CD4NH;ACzNC;;EAEE,0BAAA;EACA,sBAAA;CD2NH;ACrNG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmOL;ACpMD;;ECtCE,mDAAA;EACQ,2CAAA;CF8OT;AC/LD;;EEjGI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFgGF,0BAAA;CDqMD;ACnMD;;;EEtGI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFsGF,0BAAA;CDyMD;AChMD;EEnHI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ECnBF,oEAAA;EHqIA,mBAAA;ECrEA,4FAAA;EACQ,oFAAA;CF4QT;AC3MD;;EEnHI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ED6CF,yDAAA;EACQ,iDAAA;CFsRT;ACxMD;;EAEE,+CAAA;CD0MD;ACtMD;EEtII,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EACA,uHAAA;EACA,4BAAA;ECnBF,oEAAA;EHwJA,mBAAA;CD4MD;AC/MD;;EEtII,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ED6CF,wDAAA;EACQ,gDAAA;CF6ST;ACzND;;EAYI,0CAAA;CDiNH;AC5MD;;;EAGE,iBAAA;CD8MD;AC1MD;EAEI;;;IAGE,YAAA;IEnKF,yEAAA;IACA,oEAAA;IACA,8FAAA;IAAA,uEAAA;IACA,uHAAA;IACA,4BAAA;GH+WD;CACF;ACrMD;EACE,8CAAA;EC/HA,2FAAA;EACQ,mFAAA;CFuUT;AC7LD;EE5LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDyMD;ACpMD;EE7LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDiND;AC3MD;EE9LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDyND;AClND;EE/LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDiOD;AClND;EEvMI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH4ZH;AC/MD;EEjNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHmaH;ACrND;EElNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH0aH;AC3ND;EEnNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHibH;ACjOD;EEpNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHwbH;ACvOD;EErNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH+bH;AC1OD;EExLI,8MAAA;EACA,yMAAA;EACA,sMAAA;CHqaH;ACtOD;EACE,mBAAA;EClLA,mDAAA;EACQ,2CAAA;CF2ZT;ACvOD;;;EAGE,8BAAA;EEzOE,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFuOF,sBAAA;CD6OD;AClPD;;;EAQI,kBAAA;CD+OH;ACrOD;ECvME,kDAAA;EACQ,0CAAA;CF+aT;AC/ND;EElQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHoeH;ACrOD;EEnQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH2eH;AC3OD;EEpQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHkfH;ACjPD;EErQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHyfH;ACvPD;EEtQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHggBH;AC7PD;EEvQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHugBH;AC7PD;EE9QI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EF4QF,sBAAA;EC/NA,0FAAA;EACQ,kFAAA;CFmeT","file":"bootstrap-theme.css","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #2e6da4;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","// stylelint-disable selector-no-qualifying-type, selector-max-compound-selectors\n\n/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    .box-shadow(none);\n  }\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    background-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &.focus,\n    &:active,\n    &.active {\n      background-color: darken(@btn-color, 12%);\n      background-image: none;\n    }\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default {\n  .btn-styles(@btn-default-bg);\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n  border-radius: @navbar-border-radius;\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .25));\n  }\n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a {\n    &,\n    &:hover,\n    &:focus {\n      color: #fff;\n      #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n    }\n  }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n  #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css
deleted file mode 100644
index 2a69f48..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x;background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x;background-color:#2e6da4}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
-/*# sourceMappingURL=bootstrap-theme.min.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map
deleted file mode 100644
index 5d75106..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap-theme.css","dist/css/bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAAA;;;;ACUA,YCWA,aDbA,UAFA,aACA,aAEA,aCkBE,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBF7CV,mBANA,mBACA,oBCWE,oBDRF,iBANA,iBAIA,oBANA,oBAOA,oBANA,oBAQA,oBANA,oBEmDE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBFpCV,qBAMA,sBCJE,sBDDF,uBAHA,mBAMA,oBARA,sBAMA,uBALA,sBAMA,uBAJA,sBAMA,uBAOA,+BALA,gCAGA,6BAFA,gCACA,gCAEA,gCEwBE,mBAAA,KACQ,WAAA,KFfV,mBCnCA,oBDiCA,iBAFA,oBACA,oBAEA,oBCXI,YAAA,KDgBJ,YCyBE,YAEE,iBAAA,KAKJ,aEvEI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QAyCA,YAAA,EAAA,IAAA,EAAA,KACA,aAAA,KDnBF,mBCrBE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDuBJ,oBCpBE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBD8BJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCdM,iBAAA,QACA,iBAAA,KAoBN,aE5EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDgEF,mBC9DE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDgEJ,oBC7DE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDuEJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCvDM,iBAAA,QACA,iBAAA,KAqBN,aE7EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDyGF,mBCvGE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDyGJ,oBCtGE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDgHJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCChGM,iBAAA,QACA,iBAAA,KAsBN,UE9EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDkJF,gBChJE,gBAEE,iBAAA,QACA,oBAAA,EAAA,MDkJJ,iBC/IE,iBAEE,iBAAA,QACA,aAAA,QAMA,mBDyJJ,0BANA,yBAGA,0BANA,yBAHA,yBAFA,oBAeA,2BANA,0BAGA,2BANA,0BAHA,0BAFA,6BAeA,oCANA,mCAGA,oCANA,mCAHA,mCCzIM,iBAAA,QACA,iBAAA,KAuBN,aE/EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QD2LF,mBCzLE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MD2LJ,oBCxLE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDkMJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCClLM,iBAAA,QACA,iBAAA,KAwBN,YEhFI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDoOF,kBClOE,kBAEE,iBAAA,QACA,oBAAA,EAAA,MDoOJ,mBCjOE,mBAEE,iBAAA,QACA,aAAA,QAMA,qBD2OJ,4BANA,2BAGA,4BANA,2BAHA,2BAFA,sBAeA,6BANA,4BAGA,6BANA,4BAHA,4BAFA,+BAeA,sCANA,qCAGA,sCANA,qCAHA,qCC3NM,iBAAA,QACA,iBAAA,KD2ON,eC5MA,WCtCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBFsPV,0BCvMA,0BEjGI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgGF,iBAAA,QAEF,yBD6MA,+BADA,+BGlTI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFsGF,iBAAA,QASF,gBEnHI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHqIA,cAAA,ICrEA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBFuRV,sCCtNA,oCEnHI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD8EV,cDoNA,iBClNE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEtII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHwJA,cAAA,IDyNF,sCC5NA,oCEtII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDoFV,8BDuOA,iCC3NI,YAAA,EAAA,KAAA,EAAA,gBDgOJ,qBADA,kBC1NA,mBAGE,cAAA,EAIF,yBAEI,mDDwNF,yDADA,yDCpNI,MAAA,KEnKF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UF2KJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC/HA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBD0IV,eE5LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAKF,YE7LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAMF,eE9LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAOF,cE/LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAeF,UEvMI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF6MJ,cEjNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8MJ,sBElNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+MJ,mBEnNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgNJ,sBEpNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiNJ,qBErNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFqNJ,sBExLI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKF+LJ,YACE,cAAA,IClLA,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBDoLV,wBDiQA,8BADA,8BC7PE,YAAA,EAAA,KAAA,EAAA,QEzOE,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFuOF,aAAA,QALF,+BD6QA,qCADA,qCCpQI,YAAA,KAUJ,OCvME,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gBDgNV,8BElQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+PJ,8BEnQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgQJ,8BEpQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiQJ,2BErQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFkQJ,8BEtQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFmQJ,6BEvQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0QJ,ME9QI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF4QF,aAAA,QC/NA,mBAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #2e6da4;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0));\n  background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641));\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2));\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316));\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a));\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n  background-color: #2e6da4;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#f8f8f8));\n  background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2));\n  background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222));\n  background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f));\n  background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n    background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc));\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0));\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0));\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3));\n  background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5));\n  background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44));\n  background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5));\n  background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f));\n  background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c));\n  background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6));\n  background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3));\n  background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc));\n  background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc));\n  background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5));\n  background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","// stylelint-disable selector-no-qualifying-type, selector-max-compound-selectors\n\n/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    .box-shadow(none);\n  }\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    background-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &.focus,\n    &:active,\n    &.active {\n      background-color: darken(@btn-color, 12%);\n      background-image: none;\n    }\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default {\n  .btn-styles(@btn-default-bg);\n  text-shadow: 0 1px 0 #fff;\n  border-color: #ccc;\n}\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n  border-radius: @navbar-border-radius;\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .25));\n  }\n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a {\n    &,\n    &:hover,\n    &:focus {\n      color: #fff;\n      #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n    }\n  }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n  #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-shadow(0 1px 2px rgba(0, 0, 0, .05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css
deleted file mode 100644
index fcab415..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css
+++ /dev/null
@@ -1,6834 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- */
-/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */
-html {
-  font-family: sans-serif;
-  -ms-text-size-adjust: 100%;
-  -webkit-text-size-adjust: 100%;
-}
-body {
-  margin: 0;
-}
-article,
-aside,
-details,
-figcaption,
-figure,
-footer,
-header,
-hgroup,
-main,
-menu,
-nav,
-section,
-summary {
-  display: block;
-}
-audio,
-canvas,
-progress,
-video {
-  display: inline-block;
-  vertical-align: baseline;
-}
-audio:not([controls]) {
-  display: none;
-  height: 0;
-}
-[hidden],
-template {
-  display: none;
-}
-a {
-  background-color: transparent;
-}
-a:active,
-a:hover {
-  outline: 0;
-}
-abbr[title] {
-  border-bottom: none;
-  text-decoration: underline;
-  -webkit-text-decoration: underline dotted;
-  -moz-text-decoration: underline dotted;
-  text-decoration: underline dotted;
-}
-b,
-strong {
-  font-weight: bold;
-}
-dfn {
-  font-style: italic;
-}
-h1 {
-  font-size: 2em;
-  margin: 0.67em 0;
-}
-mark {
-  background: #ff0;
-  color: #000;
-}
-small {
-  font-size: 80%;
-}
-sub,
-sup {
-  font-size: 75%;
-  line-height: 0;
-  position: relative;
-  vertical-align: baseline;
-}
-sup {
-  top: -0.5em;
-}
-sub {
-  bottom: -0.25em;
-}
-img {
-  border: 0;
-}
-svg:not(:root) {
-  overflow: hidden;
-}
-figure {
-  margin: 1em 40px;
-}
-hr {
-  -webkit-box-sizing: content-box;
-  -moz-box-sizing: content-box;
-  box-sizing: content-box;
-  height: 0;
-}
-pre {
-  overflow: auto;
-}
-code,
-kbd,
-pre,
-samp {
-  font-family: monospace, monospace;
-  font-size: 1em;
-}
-button,
-input,
-optgroup,
-select,
-textarea {
-  color: inherit;
-  font: inherit;
-  margin: 0;
-}
-button {
-  overflow: visible;
-}
-button,
-select {
-  text-transform: none;
-}
-button,
-html input[type="button"],
-input[type="reset"],
-input[type="submit"] {
-  -webkit-appearance: button;
-  cursor: pointer;
-}
-button[disabled],
-html input[disabled] {
-  cursor: default;
-}
-button::-moz-focus-inner,
-input::-moz-focus-inner {
-  border: 0;
-  padding: 0;
-}
-input {
-  line-height: normal;
-}
-input[type="checkbox"],
-input[type="radio"] {
-  -webkit-box-sizing: border-box;
-  -moz-box-sizing: border-box;
-  box-sizing: border-box;
-  padding: 0;
-}
-input[type="number"]::-webkit-inner-spin-button,
-input[type="number"]::-webkit-outer-spin-button {
-  height: auto;
-}
-input[type="search"] {
-  -webkit-appearance: textfield;
-  -webkit-box-sizing: content-box;
-  -moz-box-sizing: content-box;
-  box-sizing: content-box;
-}
-input[type="search"]::-webkit-search-cancel-button,
-input[type="search"]::-webkit-search-decoration {
-  -webkit-appearance: none;
-}
-fieldset {
-  border: 1px solid #c0c0c0;
-  margin: 0 2px;
-  padding: 0.35em 0.625em 0.75em;
-}
-legend {
-  border: 0;
-  padding: 0;
-}
-textarea {
-  overflow: auto;
-}
-optgroup {
-  font-weight: bold;
-}
-table {
-  border-collapse: collapse;
-  border-spacing: 0;
-}
-td,
-th {
-  padding: 0;
-}
-/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */
-@media print {
-  *,
-  *:before,
-  *:after {
-    color: #000 !important;
-    text-shadow: none !important;
-    background: transparent !important;
-    -webkit-box-shadow: none !important;
-    box-shadow: none !important;
-  }
-  a,
-  a:visited {
-    text-decoration: underline;
-  }
-  a[href]:after {
-    content: " (" attr(href) ")";
-  }
-  abbr[title]:after {
-    content: " (" attr(title) ")";
-  }
-  a[href^="#"]:after,
-  a[href^="javascript:"]:after {
-    content: "";
-  }
-  pre,
-  blockquote {
-    border: 1px solid #999;
-    page-break-inside: avoid;
-  }
-  thead {
-    display: table-header-group;
-  }
-  tr,
-  img {
-    page-break-inside: avoid;
-  }
-  img {
-    max-width: 100% !important;
-  }
-  p,
-  h2,
-  h3 {
-    orphans: 3;
-    widows: 3;
-  }
-  h2,
-  h3 {
-    page-break-after: avoid;
-  }
-  .navbar {
-    display: none;
-  }
-  .btn > .caret,
-  .dropup > .btn > .caret {
-    border-top-color: #000 !important;
-  }
-  .label {
-    border: 1px solid #000;
-  }
-  .table {
-    border-collapse: collapse !important;
-  }
-  .table td,
-  .table th {
-    background-color: #fff !important;
-  }
-  .table-bordered th,
-  .table-bordered td {
-    border: 1px solid #ddd !important;
-  }
-}
-@font-face {
-  font-family: "Glyphicons Halflings";
-  src: url("../fonts/glyphicons-halflings-regular.eot");
-  src: url("../fonts/glyphicons-halflings-regular.eot?#iefix") format("embedded-opentype"), url("../fonts/glyphicons-halflings-regular.woff2") format("woff2"), url("../fonts/glyphicons-halflings-regular.woff") format("woff"), url("../fonts/glyphicons-halflings-regular.ttf") format("truetype"), url("../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular") format("svg");
-}
-.glyphicon {
-  position: relative;
-  top: 1px;
-  display: inline-block;
-  font-family: "Glyphicons Halflings";
-  font-style: normal;
-  font-weight: 400;
-  line-height: 1;
-  -webkit-font-smoothing: antialiased;
-  -moz-osx-font-smoothing: grayscale;
-}
-.glyphicon-asterisk:before {
-  content: "\002a";
-}
-.glyphicon-plus:before {
-  content: "\002b";
-}
-.glyphicon-euro:before,
-.glyphicon-eur:before {
-  content: "\20ac";
-}
-.glyphicon-minus:before {
-  content: "\2212";
-}
-.glyphicon-cloud:before {
-  content: "\2601";
-}
-.glyphicon-envelope:before {
-  content: "\2709";
-}
-.glyphicon-pencil:before {
-  content: "\270f";
-}
-.glyphicon-glass:before {
-  content: "\e001";
-}
-.glyphicon-music:before {
-  content: "\e002";
-}
-.glyphicon-search:before {
-  content: "\e003";
-}
-.glyphicon-heart:before {
-  content: "\e005";
-}
-.glyphicon-star:before {
-  content: "\e006";
-}
-.glyphicon-star-empty:before {
-  content: "\e007";
-}
-.glyphicon-user:before {
-  content: "\e008";
-}
-.glyphicon-film:before {
-  content: "\e009";
-}
-.glyphicon-th-large:before {
-  content: "\e010";
-}
-.glyphicon-th:before {
-  content: "\e011";
-}
-.glyphicon-th-list:before {
-  content: "\e012";
-}
-.glyphicon-ok:before {
-  content: "\e013";
-}
-.glyphicon-remove:before {
-  content: "\e014";
-}
-.glyphicon-zoom-in:before {
-  content: "\e015";
-}
-.glyphicon-zoom-out:before {
-  content: "\e016";
-}
-.glyphicon-off:before {
-  content: "\e017";
-}
-.glyphicon-signal:before {
-  content: "\e018";
-}
-.glyphicon-cog:before {
-  content: "\e019";
-}
-.glyphicon-trash:before {
-  content: "\e020";
-}
-.glyphicon-home:before {
-  content: "\e021";
-}
-.glyphicon-file:before {
-  content: "\e022";
-}
-.glyphicon-time:before {
-  content: "\e023";
-}
-.glyphicon-road:before {
-  content: "\e024";
-}
-.glyphicon-download-alt:before {
-  content: "\e025";
-}
-.glyphicon-download:before {
-  content: "\e026";
-}
-.glyphicon-upload:before {
-  content: "\e027";
-}
-.glyphicon-inbox:before {
-  content: "\e028";
-}
-.glyphicon-play-circle:before {
-  content: "\e029";
-}
-.glyphicon-repeat:before {
-  content: "\e030";
-}
-.glyphicon-refresh:before {
-  content: "\e031";
-}
-.glyphicon-list-alt:before {
-  content: "\e032";
-}
-.glyphicon-lock:before {
-  content: "\e033";
-}
-.glyphicon-flag:before {
-  content: "\e034";
-}
-.glyphicon-headphones:before {
-  content: "\e035";
-}
-.glyphicon-volume-off:before {
-  content: "\e036";
-}
-.glyphicon-volume-down:before {
-  content: "\e037";
-}
-.glyphicon-volume-up:before {
-  content: "\e038";
-}
-.glyphicon-qrcode:before {
-  content: "\e039";
-}
-.glyphicon-barcode:before {
-  content: "\e040";
-}
-.glyphicon-tag:before {
-  content: "\e041";
-}
-.glyphicon-tags:before {
-  content: "\e042";
-}
-.glyphicon-book:before {
-  content: "\e043";
-}
-.glyphicon-bookmark:before {
-  content: "\e044";
-}
-.glyphicon-print:before {
-  content: "\e045";
-}
-.glyphicon-camera:before {
-  content: "\e046";
-}
-.glyphicon-font:before {
-  content: "\e047";
-}
-.glyphicon-bold:before {
-  content: "\e048";
-}
-.glyphicon-italic:before {
-  content: "\e049";
-}
-.glyphicon-text-height:before {
-  content: "\e050";
-}
-.glyphicon-text-width:before {
-  content: "\e051";
-}
-.glyphicon-align-left:before {
-  content: "\e052";
-}
-.glyphicon-align-center:before {
-  content: "\e053";
-}
-.glyphicon-align-right:before {
-  content: "\e054";
-}
-.glyphicon-align-justify:before {
-  content: "\e055";
-}
-.glyphicon-list:before {
-  content: "\e056";
-}
-.glyphicon-indent-left:before {
-  content: "\e057";
-}
-.glyphicon-indent-right:before {
-  content: "\e058";
-}
-.glyphicon-facetime-video:before {
-  content: "\e059";
-}
-.glyphicon-picture:before {
-  content: "\e060";
-}
-.glyphicon-map-marker:before {
-  content: "\e062";
-}
-.glyphicon-adjust:before {
-  content: "\e063";
-}
-.glyphicon-tint:before {
-  content: "\e064";
-}
-.glyphicon-edit:before {
-  content: "\e065";
-}
-.glyphicon-share:before {
-  content: "\e066";
-}
-.glyphicon-check:before {
-  content: "\e067";
-}
-.glyphicon-move:before {
-  content: "\e068";
-}
-.glyphicon-step-backward:before {
-  content: "\e069";
-}
-.glyphicon-fast-backward:before {
-  content: "\e070";
-}
-.glyphicon-backward:before {
-  content: "\e071";
-}
-.glyphicon-play:before {
-  content: "\e072";
-}
-.glyphicon-pause:before {
-  content: "\e073";
-}
-.glyphicon-stop:before {
-  content: "\e074";
-}
-.glyphicon-forward:before {
-  content: "\e075";
-}
-.glyphicon-fast-forward:before {
-  content: "\e076";
-}
-.glyphicon-step-forward:before {
-  content: "\e077";
-}
-.glyphicon-eject:before {
-  content: "\e078";
-}
-.glyphicon-chevron-left:before {
-  content: "\e079";
-}
-.glyphicon-chevron-right:before {
-  content: "\e080";
-}
-.glyphicon-plus-sign:before {
-  content: "\e081";
-}
-.glyphicon-minus-sign:before {
-  content: "\e082";
-}
-.glyphicon-remove-sign:before {
-  content: "\e083";
-}
-.glyphicon-ok-sign:before {
-  content: "\e084";
-}
-.glyphicon-question-sign:before {
-  content: "\e085";
-}
-.glyphicon-info-sign:before {
-  content: "\e086";
-}
-.glyphicon-screenshot:before {
-  content: "\e087";
-}
-.glyphicon-remove-circle:before {
-  content: "\e088";
-}
-.glyphicon-ok-circle:before {
-  content: "\e089";
-}
-.glyphicon-ban-circle:before {
-  content: "\e090";
-}
-.glyphicon-arrow-left:before {
-  content: "\e091";
-}
-.glyphicon-arrow-right:before {
-  content: "\e092";
-}
-.glyphicon-arrow-up:before {
-  content: "\e093";
-}
-.glyphicon-arrow-down:before {
-  content: "\e094";
-}
-.glyphicon-share-alt:before {
-  content: "\e095";
-}
-.glyphicon-resize-full:before {
-  content: "\e096";
-}
-.glyphicon-resize-small:before {
-  content: "\e097";
-}
-.glyphicon-exclamation-sign:before {
-  content: "\e101";
-}
-.glyphicon-gift:before {
-  content: "\e102";
-}
-.glyphicon-leaf:before {
-  content: "\e103";
-}
-.glyphicon-fire:before {
-  content: "\e104";
-}
-.glyphicon-eye-open:before {
-  content: "\e105";
-}
-.glyphicon-eye-close:before {
-  content: "\e106";
-}
-.glyphicon-warning-sign:before {
-  content: "\e107";
-}
-.glyphicon-plane:before {
-  content: "\e108";
-}
-.glyphicon-calendar:before {
-  content: "\e109";
-}
-.glyphicon-random:before {
-  content: "\e110";
-}
-.glyphicon-comment:before {
-  content: "\e111";
-}
-.glyphicon-magnet:before {
-  content: "\e112";
-}
-.glyphicon-chevron-up:before {
-  content: "\e113";
-}
-.glyphicon-chevron-down:before {
-  content: "\e114";
-}
-.glyphicon-retweet:before {
-  content: "\e115";
-}
-.glyphicon-shopping-cart:before {
-  content: "\e116";
-}
-.glyphicon-folder-close:before {
-  content: "\e117";
-}
-.glyphicon-folder-open:before {
-  content: "\e118";
-}
-.glyphicon-resize-vertical:before {
-  content: "\e119";
-}
-.glyphicon-resize-horizontal:before {
-  content: "\e120";
-}
-.glyphicon-hdd:before {
-  content: "\e121";
-}
-.glyphicon-bullhorn:before {
-  content: "\e122";
-}
-.glyphicon-bell:before {
-  content: "\e123";
-}
-.glyphicon-certificate:before {
-  content: "\e124";
-}
-.glyphicon-thumbs-up:before {
-  content: "\e125";
-}
-.glyphicon-thumbs-down:before {
-  content: "\e126";
-}
-.glyphicon-hand-right:before {
-  content: "\e127";
-}
-.glyphicon-hand-left:before {
-  content: "\e128";
-}
-.glyphicon-hand-up:before {
-  content: "\e129";
-}
-.glyphicon-hand-down:before {
-  content: "\e130";
-}
-.glyphicon-circle-arrow-right:before {
-  content: "\e131";
-}
-.glyphicon-circle-arrow-left:before {
-  content: "\e132";
-}
-.glyphicon-circle-arrow-up:before {
-  content: "\e133";
-}
-.glyphicon-circle-arrow-down:before {
-  content: "\e134";
-}
-.glyphicon-globe:before {
-  content: "\e135";
-}
-.glyphicon-wrench:before {
-  content: "\e136";
-}
-.glyphicon-tasks:before {
-  content: "\e137";
-}
-.glyphicon-filter:before {
-  content: "\e138";
-}
-.glyphicon-briefcase:before {
-  content: "\e139";
-}
-.glyphicon-fullscreen:before {
-  content: "\e140";
-}
-.glyphicon-dashboard:before {
-  content: "\e141";
-}
-.glyphicon-paperclip:before {
-  content: "\e142";
-}
-.glyphicon-heart-empty:before {
-  content: "\e143";
-}
-.glyphicon-link:before {
-  content: "\e144";
-}
-.glyphicon-phone:before {
-  content: "\e145";
-}
-.glyphicon-pushpin:before {
-  content: "\e146";
-}
-.glyphicon-usd:before {
-  content: "\e148";
-}
-.glyphicon-gbp:before {
-  content: "\e149";
-}
-.glyphicon-sort:before {
-  content: "\e150";
-}
-.glyphicon-sort-by-alphabet:before {
-  content: "\e151";
-}
-.glyphicon-sort-by-alphabet-alt:before {
-  content: "\e152";
-}
-.glyphicon-sort-by-order:before {
-  content: "\e153";
-}
-.glyphicon-sort-by-order-alt:before {
-  content: "\e154";
-}
-.glyphicon-sort-by-attributes:before {
-  content: "\e155";
-}
-.glyphicon-sort-by-attributes-alt:before {
-  content: "\e156";
-}
-.glyphicon-unchecked:before {
-  content: "\e157";
-}
-.glyphicon-expand:before {
-  content: "\e158";
-}
-.glyphicon-collapse-down:before {
-  content: "\e159";
-}
-.glyphicon-collapse-up:before {
-  content: "\e160";
-}
-.glyphicon-log-in:before {
-  content: "\e161";
-}
-.glyphicon-flash:before {
-  content: "\e162";
-}
-.glyphicon-log-out:before {
-  content: "\e163";
-}
-.glyphicon-new-window:before {
-  content: "\e164";
-}
-.glyphicon-record:before {
-  content: "\e165";
-}
-.glyphicon-save:before {
-  content: "\e166";
-}
-.glyphicon-open:before {
-  content: "\e167";
-}
-.glyphicon-saved:before {
-  content: "\e168";
-}
-.glyphicon-import:before {
-  content: "\e169";
-}
-.glyphicon-export:before {
-  content: "\e170";
-}
-.glyphicon-send:before {
-  content: "\e171";
-}
-.glyphicon-floppy-disk:before {
-  content: "\e172";
-}
-.glyphicon-floppy-saved:before {
-  content: "\e173";
-}
-.glyphicon-floppy-remove:before {
-  content: "\e174";
-}
-.glyphicon-floppy-save:before {
-  content: "\e175";
-}
-.glyphicon-floppy-open:before {
-  content: "\e176";
-}
-.glyphicon-credit-card:before {
-  content: "\e177";
-}
-.glyphicon-transfer:before {
-  content: "\e178";
-}
-.glyphicon-cutlery:before {
-  content: "\e179";
-}
-.glyphicon-header:before {
-  content: "\e180";
-}
-.glyphicon-compressed:before {
-  content: "\e181";
-}
-.glyphicon-earphone:before {
-  content: "\e182";
-}
-.glyphicon-phone-alt:before {
-  content: "\e183";
-}
-.glyphicon-tower:before {
-  content: "\e184";
-}
-.glyphicon-stats:before {
-  content: "\e185";
-}
-.glyphicon-sd-video:before {
-  content: "\e186";
-}
-.glyphicon-hd-video:before {
-  content: "\e187";
-}
-.glyphicon-subtitles:before {
-  content: "\e188";
-}
-.glyphicon-sound-stereo:before {
-  content: "\e189";
-}
-.glyphicon-sound-dolby:before {
-  content: "\e190";
-}
-.glyphicon-sound-5-1:before {
-  content: "\e191";
-}
-.glyphicon-sound-6-1:before {
-  content: "\e192";
-}
-.glyphicon-sound-7-1:before {
-  content: "\e193";
-}
-.glyphicon-copyright-mark:before {
-  content: "\e194";
-}
-.glyphicon-registration-mark:before {
-  content: "\e195";
-}
-.glyphicon-cloud-download:before {
-  content: "\e197";
-}
-.glyphicon-cloud-upload:before {
-  content: "\e198";
-}
-.glyphicon-tree-conifer:before {
-  content: "\e199";
-}
-.glyphicon-tree-deciduous:before {
-  content: "\e200";
-}
-.glyphicon-cd:before {
-  content: "\e201";
-}
-.glyphicon-save-file:before {
-  content: "\e202";
-}
-.glyphicon-open-file:before {
-  content: "\e203";
-}
-.glyphicon-level-up:before {
-  content: "\e204";
-}
-.glyphicon-copy:before {
-  content: "\e205";
-}
-.glyphicon-paste:before {
-  content: "\e206";
-}
-.glyphicon-alert:before {
-  content: "\e209";
-}
-.glyphicon-equalizer:before {
-  content: "\e210";
-}
-.glyphicon-king:before {
-  content: "\e211";
-}
-.glyphicon-queen:before {
-  content: "\e212";
-}
-.glyphicon-pawn:before {
-  content: "\e213";
-}
-.glyphicon-bishop:before {
-  content: "\e214";
-}
-.glyphicon-knight:before {
-  content: "\e215";
-}
-.glyphicon-baby-formula:before {
-  content: "\e216";
-}
-.glyphicon-tent:before {
-  content: "\26fa";
-}
-.glyphicon-blackboard:before {
-  content: "\e218";
-}
-.glyphicon-bed:before {
-  content: "\e219";
-}
-.glyphicon-apple:before {
-  content: "\f8ff";
-}
-.glyphicon-erase:before {
-  content: "\e221";
-}
-.glyphicon-hourglass:before {
-  content: "\231b";
-}
-.glyphicon-lamp:before {
-  content: "\e223";
-}
-.glyphicon-duplicate:before {
-  content: "\e224";
-}
-.glyphicon-piggy-bank:before {
-  content: "\e225";
-}
-.glyphicon-scissors:before {
-  content: "\e226";
-}
-.glyphicon-bitcoin:before {
-  content: "\e227";
-}
-.glyphicon-btc:before {
-  content: "\e227";
-}
-.glyphicon-xbt:before {
-  content: "\e227";
-}
-.glyphicon-yen:before {
-  content: "\00a5";
-}
-.glyphicon-jpy:before {
-  content: "\00a5";
-}
-.glyphicon-ruble:before {
-  content: "\20bd";
-}
-.glyphicon-rub:before {
-  content: "\20bd";
-}
-.glyphicon-scale:before {
-  content: "\e230";
-}
-.glyphicon-ice-lolly:before {
-  content: "\e231";
-}
-.glyphicon-ice-lolly-tasted:before {
-  content: "\e232";
-}
-.glyphicon-education:before {
-  content: "\e233";
-}
-.glyphicon-option-horizontal:before {
-  content: "\e234";
-}
-.glyphicon-option-vertical:before {
-  content: "\e235";
-}
-.glyphicon-menu-hamburger:before {
-  content: "\e236";
-}
-.glyphicon-modal-window:before {
-  content: "\e237";
-}
-.glyphicon-oil:before {
-  content: "\e238";
-}
-.glyphicon-grain:before {
-  content: "\e239";
-}
-.glyphicon-sunglasses:before {
-  content: "\e240";
-}
-.glyphicon-text-size:before {
-  content: "\e241";
-}
-.glyphicon-text-color:before {
-  content: "\e242";
-}
-.glyphicon-text-background:before {
-  content: "\e243";
-}
-.glyphicon-object-align-top:before {
-  content: "\e244";
-}
-.glyphicon-object-align-bottom:before {
-  content: "\e245";
-}
-.glyphicon-object-align-horizontal:before {
-  content: "\e246";
-}
-.glyphicon-object-align-left:before {
-  content: "\e247";
-}
-.glyphicon-object-align-vertical:before {
-  content: "\e248";
-}
-.glyphicon-object-align-right:before {
-  content: "\e249";
-}
-.glyphicon-triangle-right:before {
-  content: "\e250";
-}
-.glyphicon-triangle-left:before {
-  content: "\e251";
-}
-.glyphicon-triangle-bottom:before {
-  content: "\e252";
-}
-.glyphicon-triangle-top:before {
-  content: "\e253";
-}
-.glyphicon-console:before {
-  content: "\e254";
-}
-.glyphicon-superscript:before {
-  content: "\e255";
-}
-.glyphicon-subscript:before {
-  content: "\e256";
-}
-.glyphicon-menu-left:before {
-  content: "\e257";
-}
-.glyphicon-menu-right:before {
-  content: "\e258";
-}
-.glyphicon-menu-down:before {
-  content: "\e259";
-}
-.glyphicon-menu-up:before {
-  content: "\e260";
-}
-* {
-  -webkit-box-sizing: border-box;
-  -moz-box-sizing: border-box;
-  box-sizing: border-box;
-}
-*:before,
-*:after {
-  -webkit-box-sizing: border-box;
-  -moz-box-sizing: border-box;
-  box-sizing: border-box;
-}
-html {
-  font-size: 10px;
-  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
-}
-body {
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  font-size: 14px;
-  line-height: 1.42857143;
-  color: #333333;
-  background-color: #fff;
-}
-input,
-button,
-select,
-textarea {
-  font-family: inherit;
-  font-size: inherit;
-  line-height: inherit;
-}
-a {
-  color: #337ab7;
-  text-decoration: none;
-}
-a:hover,
-a:focus {
-  color: #23527c;
-  text-decoration: underline;
-}
-a:focus {
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-figure {
-  margin: 0;
-}
-img {
-  vertical-align: middle;
-}
-.img-responsive,
-.thumbnail > img,
-.thumbnail a > img,
-.carousel-inner > .item > img,
-.carousel-inner > .item > a > img {
-  display: block;
-  max-width: 100%;
-  height: auto;
-}
-.img-rounded {
-  border-radius: 6px;
-}
-.img-thumbnail {
-  padding: 4px;
-  line-height: 1.42857143;
-  background-color: #fff;
-  border: 1px solid #ddd;
-  border-radius: 4px;
-  -webkit-transition: all 0.2s ease-in-out;
-  -o-transition: all 0.2s ease-in-out;
-  transition: all 0.2s ease-in-out;
-  display: inline-block;
-  max-width: 100%;
-  height: auto;
-}
-.img-circle {
-  border-radius: 50%;
-}
-hr {
-  margin-top: 20px;
-  margin-bottom: 20px;
-  border: 0;
-  border-top: 1px solid #eeeeee;
-}
-.sr-only {
-  position: absolute;
-  width: 1px;
-  height: 1px;
-  padding: 0;
-  margin: -1px;
-  overflow: hidden;
-  clip: rect(0, 0, 0, 0);
-  border: 0;
-}
-.sr-only-focusable:active,
-.sr-only-focusable:focus {
-  position: static;
-  width: auto;
-  height: auto;
-  margin: 0;
-  overflow: visible;
-  clip: auto;
-}
-[role="button"] {
-  cursor: pointer;
-}
-h1,
-h2,
-h3,
-h4,
-h5,
-h6,
-.h1,
-.h2,
-.h3,
-.h4,
-.h5,
-.h6 {
-  font-family: inherit;
-  font-weight: 500;
-  line-height: 1.1;
-  color: inherit;
-}
-h1 small,
-h2 small,
-h3 small,
-h4 small,
-h5 small,
-h6 small,
-.h1 small,
-.h2 small,
-.h3 small,
-.h4 small,
-.h5 small,
-.h6 small,
-h1 .small,
-h2 .small,
-h3 .small,
-h4 .small,
-h5 .small,
-h6 .small,
-.h1 .small,
-.h2 .small,
-.h3 .small,
-.h4 .small,
-.h5 .small,
-.h6 .small {
-  font-weight: 400;
-  line-height: 1;
-  color: #777777;
-}
-h1,
-.h1,
-h2,
-.h2,
-h3,
-.h3 {
-  margin-top: 20px;
-  margin-bottom: 10px;
-}
-h1 small,
-.h1 small,
-h2 small,
-.h2 small,
-h3 small,
-.h3 small,
-h1 .small,
-.h1 .small,
-h2 .small,
-.h2 .small,
-h3 .small,
-.h3 .small {
-  font-size: 65%;
-}
-h4,
-.h4,
-h5,
-.h5,
-h6,
-.h6 {
-  margin-top: 10px;
-  margin-bottom: 10px;
-}
-h4 small,
-.h4 small,
-h5 small,
-.h5 small,
-h6 small,
-.h6 small,
-h4 .small,
-.h4 .small,
-h5 .small,
-.h5 .small,
-h6 .small,
-.h6 .small {
-  font-size: 75%;
-}
-h1,
-.h1 {
-  font-size: 36px;
-}
-h2,
-.h2 {
-  font-size: 30px;
-}
-h3,
-.h3 {
-  font-size: 24px;
-}
-h4,
-.h4 {
-  font-size: 18px;
-}
-h5,
-.h5 {
-  font-size: 14px;
-}
-h6,
-.h6 {
-  font-size: 12px;
-}
-p {
-  margin: 0 0 10px;
-}
-.lead {
-  margin-bottom: 20px;
-  font-size: 16px;
-  font-weight: 300;
-  line-height: 1.4;
-}
-@media (min-width: 768px) {
-  .lead {
-    font-size: 21px;
-  }
-}
-small,
-.small {
-  font-size: 85%;
-}
-mark,
-.mark {
-  padding: 0.2em;
-  background-color: #fcf8e3;
-}
-.text-left {
-  text-align: left;
-}
-.text-right {
-  text-align: right;
-}
-.text-center {
-  text-align: center;
-}
-.text-justify {
-  text-align: justify;
-}
-.text-nowrap {
-  white-space: nowrap;
-}
-.text-lowercase {
-  text-transform: lowercase;
-}
-.text-uppercase {
-  text-transform: uppercase;
-}
-.text-capitalize {
-  text-transform: capitalize;
-}
-.text-muted {
-  color: #777777;
-}
-.text-primary {
-  color: #337ab7;
-}
-a.text-primary:hover,
-a.text-primary:focus {
-  color: #286090;
-}
-.text-success {
-  color: #3c763d;
-}
-a.text-success:hover,
-a.text-success:focus {
-  color: #2b542c;
-}
-.text-info {
-  color: #31708f;
-}
-a.text-info:hover,
-a.text-info:focus {
-  color: #245269;
-}
-.text-warning {
-  color: #8a6d3b;
-}
-a.text-warning:hover,
-a.text-warning:focus {
-  color: #66512c;
-}
-.text-danger {
-  color: #a94442;
-}
-a.text-danger:hover,
-a.text-danger:focus {
-  color: #843534;
-}
-.bg-primary {
-  color: #fff;
-  background-color: #337ab7;
-}
-a.bg-primary:hover,
-a.bg-primary:focus {
-  background-color: #286090;
-}
-.bg-success {
-  background-color: #dff0d8;
-}
-a.bg-success:hover,
-a.bg-success:focus {
-  background-color: #c1e2b3;
-}
-.bg-info {
-  background-color: #d9edf7;
-}
-a.bg-info:hover,
-a.bg-info:focus {
-  background-color: #afd9ee;
-}
-.bg-warning {
-  background-color: #fcf8e3;
-}
-a.bg-warning:hover,
-a.bg-warning:focus {
-  background-color: #f7ecb5;
-}
-.bg-danger {
-  background-color: #f2dede;
-}
-a.bg-danger:hover,
-a.bg-danger:focus {
-  background-color: #e4b9b9;
-}
-.page-header {
-  padding-bottom: 9px;
-  margin: 40px 0 20px;
-  border-bottom: 1px solid #eeeeee;
-}
-ul,
-ol {
-  margin-top: 0;
-  margin-bottom: 10px;
-}
-ul ul,
-ol ul,
-ul ol,
-ol ol {
-  margin-bottom: 0;
-}
-.list-unstyled {
-  padding-left: 0;
-  list-style: none;
-}
-.list-inline {
-  padding-left: 0;
-  list-style: none;
-  margin-left: -5px;
-}
-.list-inline > li {
-  display: inline-block;
-  padding-right: 5px;
-  padding-left: 5px;
-}
-dl {
-  margin-top: 0;
-  margin-bottom: 20px;
-}
-dt,
-dd {
-  line-height: 1.42857143;
-}
-dt {
-  font-weight: 700;
-}
-dd {
-  margin-left: 0;
-}
-@media (min-width: 768px) {
-  .dl-horizontal dt {
-    float: left;
-    width: 160px;
-    clear: left;
-    text-align: right;
-    overflow: hidden;
-    text-overflow: ellipsis;
-    white-space: nowrap;
-  }
-  .dl-horizontal dd {
-    margin-left: 180px;
-  }
-}
-abbr[title],
-abbr[data-original-title] {
-  cursor: help;
-}
-.initialism {
-  font-size: 90%;
-  text-transform: uppercase;
-}
-blockquote {
-  padding: 10px 20px;
-  margin: 0 0 20px;
-  font-size: 17.5px;
-  border-left: 5px solid #eeeeee;
-}
-blockquote p:last-child,
-blockquote ul:last-child,
-blockquote ol:last-child {
-  margin-bottom: 0;
-}
-blockquote footer,
-blockquote small,
-blockquote .small {
-  display: block;
-  font-size: 80%;
-  line-height: 1.42857143;
-  color: #777777;
-}
-blockquote footer:before,
-blockquote small:before,
-blockquote .small:before {
-  content: "\2014 \00A0";
-}
-.blockquote-reverse,
-blockquote.pull-right {
-  padding-right: 15px;
-  padding-left: 0;
-  text-align: right;
-  border-right: 5px solid #eeeeee;
-  border-left: 0;
-}
-.blockquote-reverse footer:before,
-blockquote.pull-right footer:before,
-.blockquote-reverse small:before,
-blockquote.pull-right small:before,
-.blockquote-reverse .small:before,
-blockquote.pull-right .small:before {
-  content: "";
-}
-.blockquote-reverse footer:after,
-blockquote.pull-right footer:after,
-.blockquote-reverse small:after,
-blockquote.pull-right small:after,
-.blockquote-reverse .small:after,
-blockquote.pull-right .small:after {
-  content: "\00A0 \2014";
-}
-address {
-  margin-bottom: 20px;
-  font-style: normal;
-  line-height: 1.42857143;
-}
-code,
-kbd,
-pre,
-samp {
-  font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
-}
-code {
-  padding: 2px 4px;
-  font-size: 90%;
-  color: #c7254e;
-  background-color: #f9f2f4;
-  border-radius: 4px;
-}
-kbd {
-  padding: 2px 4px;
-  font-size: 90%;
-  color: #fff;
-  background-color: #333;
-  border-radius: 3px;
-  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);
-  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-kbd kbd {
-  padding: 0;
-  font-size: 100%;
-  font-weight: 700;
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-pre {
-  display: block;
-  padding: 9.5px;
-  margin: 0 0 10px;
-  font-size: 13px;
-  line-height: 1.42857143;
-  color: #333333;
-  word-break: break-all;
-  word-wrap: break-word;
-  background-color: #f5f5f5;
-  border: 1px solid #ccc;
-  border-radius: 4px;
-}
-pre code {
-  padding: 0;
-  font-size: inherit;
-  color: inherit;
-  white-space: pre-wrap;
-  background-color: transparent;
-  border-radius: 0;
-}
-.pre-scrollable {
-  max-height: 340px;
-  overflow-y: scroll;
-}
-.container {
-  padding-right: 15px;
-  padding-left: 15px;
-  margin-right: auto;
-  margin-left: auto;
-}
-@media (min-width: 768px) {
-  .container {
-    width: 750px;
-  }
-}
-@media (min-width: 992px) {
-  .container {
-    width: 970px;
-  }
-}
-@media (min-width: 1200px) {
-  .container {
-    width: 1170px;
-  }
-}
-.container-fluid {
-  padding-right: 15px;
-  padding-left: 15px;
-  margin-right: auto;
-  margin-left: auto;
-}
-.row {
-  margin-right: -15px;
-  margin-left: -15px;
-}
-.row-no-gutters {
-  margin-right: 0;
-  margin-left: 0;
-}
-.row-no-gutters [class*="col-"] {
-  padding-right: 0;
-  padding-left: 0;
-}
-.col-xs-1,
-.col-sm-1,
-.col-md-1,
-.col-lg-1,
-.col-xs-2,
-.col-sm-2,
-.col-md-2,
-.col-lg-2,
-.col-xs-3,
-.col-sm-3,
-.col-md-3,
-.col-lg-3,
-.col-xs-4,
-.col-sm-4,
-.col-md-4,
-.col-lg-4,
-.col-xs-5,
-.col-sm-5,
-.col-md-5,
-.col-lg-5,
-.col-xs-6,
-.col-sm-6,
-.col-md-6,
-.col-lg-6,
-.col-xs-7,
-.col-sm-7,
-.col-md-7,
-.col-lg-7,
-.col-xs-8,
-.col-sm-8,
-.col-md-8,
-.col-lg-8,
-.col-xs-9,
-.col-sm-9,
-.col-md-9,
-.col-lg-9,
-.col-xs-10,
-.col-sm-10,
-.col-md-10,
-.col-lg-10,
-.col-xs-11,
-.col-sm-11,
-.col-md-11,
-.col-lg-11,
-.col-xs-12,
-.col-sm-12,
-.col-md-12,
-.col-lg-12 {
-  position: relative;
-  min-height: 1px;
-  padding-right: 15px;
-  padding-left: 15px;
-}
-.col-xs-1,
-.col-xs-2,
-.col-xs-3,
-.col-xs-4,
-.col-xs-5,
-.col-xs-6,
-.col-xs-7,
-.col-xs-8,
-.col-xs-9,
-.col-xs-10,
-.col-xs-11,
-.col-xs-12 {
-  float: left;
-}
-.col-xs-12 {
-  width: 100%;
-}
-.col-xs-11 {
-  width: 91.66666667%;
-}
-.col-xs-10 {
-  width: 83.33333333%;
-}
-.col-xs-9 {
-  width: 75%;
-}
-.col-xs-8 {
-  width: 66.66666667%;
-}
-.col-xs-7 {
-  width: 58.33333333%;
-}
-.col-xs-6 {
-  width: 50%;
-}
-.col-xs-5 {
-  width: 41.66666667%;
-}
-.col-xs-4 {
-  width: 33.33333333%;
-}
-.col-xs-3 {
-  width: 25%;
-}
-.col-xs-2 {
-  width: 16.66666667%;
-}
-.col-xs-1 {
-  width: 8.33333333%;
-}
-.col-xs-pull-12 {
-  right: 100%;
-}
-.col-xs-pull-11 {
-  right: 91.66666667%;
-}
-.col-xs-pull-10 {
-  right: 83.33333333%;
-}
-.col-xs-pull-9 {
-  right: 75%;
-}
-.col-xs-pull-8 {
-  right: 66.66666667%;
-}
-.col-xs-pull-7 {
-  right: 58.33333333%;
-}
-.col-xs-pull-6 {
-  right: 50%;
-}
-.col-xs-pull-5 {
-  right: 41.66666667%;
-}
-.col-xs-pull-4 {
-  right: 33.33333333%;
-}
-.col-xs-pull-3 {
-  right: 25%;
-}
-.col-xs-pull-2 {
-  right: 16.66666667%;
-}
-.col-xs-pull-1 {
-  right: 8.33333333%;
-}
-.col-xs-pull-0 {
-  right: auto;
-}
-.col-xs-push-12 {
-  left: 100%;
-}
-.col-xs-push-11 {
-  left: 91.66666667%;
-}
-.col-xs-push-10 {
-  left: 83.33333333%;
-}
-.col-xs-push-9 {
-  left: 75%;
-}
-.col-xs-push-8 {
-  left: 66.66666667%;
-}
-.col-xs-push-7 {
-  left: 58.33333333%;
-}
-.col-xs-push-6 {
-  left: 50%;
-}
-.col-xs-push-5 {
-  left: 41.66666667%;
-}
-.col-xs-push-4 {
-  left: 33.33333333%;
-}
-.col-xs-push-3 {
-  left: 25%;
-}
-.col-xs-push-2 {
-  left: 16.66666667%;
-}
-.col-xs-push-1 {
-  left: 8.33333333%;
-}
-.col-xs-push-0 {
-  left: auto;
-}
-.col-xs-offset-12 {
-  margin-left: 100%;
-}
-.col-xs-offset-11 {
-  margin-left: 91.66666667%;
-}
-.col-xs-offset-10 {
-  margin-left: 83.33333333%;
-}
-.col-xs-offset-9 {
-  margin-left: 75%;
-}
-.col-xs-offset-8 {
-  margin-left: 66.66666667%;
-}
-.col-xs-offset-7 {
-  margin-left: 58.33333333%;
-}
-.col-xs-offset-6 {
-  margin-left: 50%;
-}
-.col-xs-offset-5 {
-  margin-left: 41.66666667%;
-}
-.col-xs-offset-4 {
-  margin-left: 33.33333333%;
-}
-.col-xs-offset-3 {
-  margin-left: 25%;
-}
-.col-xs-offset-2 {
-  margin-left: 16.66666667%;
-}
-.col-xs-offset-1 {
-  margin-left: 8.33333333%;
-}
-.col-xs-offset-0 {
-  margin-left: 0%;
-}
-@media (min-width: 768px) {
-  .col-sm-1,
-  .col-sm-2,
-  .col-sm-3,
-  .col-sm-4,
-  .col-sm-5,
-  .col-sm-6,
-  .col-sm-7,
-  .col-sm-8,
-  .col-sm-9,
-  .col-sm-10,
-  .col-sm-11,
-  .col-sm-12 {
-    float: left;
-  }
-  .col-sm-12 {
-    width: 100%;
-  }
-  .col-sm-11 {
-    width: 91.66666667%;
-  }
-  .col-sm-10 {
-    width: 83.33333333%;
-  }
-  .col-sm-9 {
-    width: 75%;
-  }
-  .col-sm-8 {
-    width: 66.66666667%;
-  }
-  .col-sm-7 {
-    width: 58.33333333%;
-  }
-  .col-sm-6 {
-    width: 50%;
-  }
-  .col-sm-5 {
-    width: 41.66666667%;
-  }
-  .col-sm-4 {
-    width: 33.33333333%;
-  }
-  .col-sm-3 {
-    width: 25%;
-  }
-  .col-sm-2 {
-    width: 16.66666667%;
-  }
-  .col-sm-1 {
-    width: 8.33333333%;
-  }
-  .col-sm-pull-12 {
-    right: 100%;
-  }
-  .col-sm-pull-11 {
-    right: 91.66666667%;
-  }
-  .col-sm-pull-10 {
-    right: 83.33333333%;
-  }
-  .col-sm-pull-9 {
-    right: 75%;
-  }
-  .col-sm-pull-8 {
-    right: 66.66666667%;
-  }
-  .col-sm-pull-7 {
-    right: 58.33333333%;
-  }
-  .col-sm-pull-6 {
-    right: 50%;
-  }
-  .col-sm-pull-5 {
-    right: 41.66666667%;
-  }
-  .col-sm-pull-4 {
-    right: 33.33333333%;
-  }
-  .col-sm-pull-3 {
-    right: 25%;
-  }
-  .col-sm-pull-2 {
-    right: 16.66666667%;
-  }
-  .col-sm-pull-1 {
-    right: 8.33333333%;
-  }
-  .col-sm-pull-0 {
-    right: auto;
-  }
-  .col-sm-push-12 {
-    left: 100%;
-  }
-  .col-sm-push-11 {
-    left: 91.66666667%;
-  }
-  .col-sm-push-10 {
-    left: 83.33333333%;
-  }
-  .col-sm-push-9 {
-    left: 75%;
-  }
-  .col-sm-push-8 {
-    left: 66.66666667%;
-  }
-  .col-sm-push-7 {
-    left: 58.33333333%;
-  }
-  .col-sm-push-6 {
-    left: 50%;
-  }
-  .col-sm-push-5 {
-    left: 41.66666667%;
-  }
-  .col-sm-push-4 {
-    left: 33.33333333%;
-  }
-  .col-sm-push-3 {
-    left: 25%;
-  }
-  .col-sm-push-2 {
-    left: 16.66666667%;
-  }
-  .col-sm-push-1 {
-    left: 8.33333333%;
-  }
-  .col-sm-push-0 {
-    left: auto;
-  }
-  .col-sm-offset-12 {
-    margin-left: 100%;
-  }
-  .col-sm-offset-11 {
-    margin-left: 91.66666667%;
-  }
-  .col-sm-offset-10 {
-    margin-left: 83.33333333%;
-  }
-  .col-sm-offset-9 {
-    margin-left: 75%;
-  }
-  .col-sm-offset-8 {
-    margin-left: 66.66666667%;
-  }
-  .col-sm-offset-7 {
-    margin-left: 58.33333333%;
-  }
-  .col-sm-offset-6 {
-    margin-left: 50%;
-  }
-  .col-sm-offset-5 {
-    margin-left: 41.66666667%;
-  }
-  .col-sm-offset-4 {
-    margin-left: 33.33333333%;
-  }
-  .col-sm-offset-3 {
-    margin-left: 25%;
-  }
-  .col-sm-offset-2 {
-    margin-left: 16.66666667%;
-  }
-  .col-sm-offset-1 {
-    margin-left: 8.33333333%;
-  }
-  .col-sm-offset-0 {
-    margin-left: 0%;
-  }
-}
-@media (min-width: 992px) {
-  .col-md-1,
-  .col-md-2,
-  .col-md-3,
-  .col-md-4,
-  .col-md-5,
-  .col-md-6,
-  .col-md-7,
-  .col-md-8,
-  .col-md-9,
-  .col-md-10,
-  .col-md-11,
-  .col-md-12 {
-    float: left;
-  }
-  .col-md-12 {
-    width: 100%;
-  }
-  .col-md-11 {
-    width: 91.66666667%;
-  }
-  .col-md-10 {
-    width: 83.33333333%;
-  }
-  .col-md-9 {
-    width: 75%;
-  }
-  .col-md-8 {
-    width: 66.66666667%;
-  }
-  .col-md-7 {
-    width: 58.33333333%;
-  }
-  .col-md-6 {
-    width: 50%;
-  }
-  .col-md-5 {
-    width: 41.66666667%;
-  }
-  .col-md-4 {
-    width: 33.33333333%;
-  }
-  .col-md-3 {
-    width: 25%;
-  }
-  .col-md-2 {
-    width: 16.66666667%;
-  }
-  .col-md-1 {
-    width: 8.33333333%;
-  }
-  .col-md-pull-12 {
-    right: 100%;
-  }
-  .col-md-pull-11 {
-    right: 91.66666667%;
-  }
-  .col-md-pull-10 {
-    right: 83.33333333%;
-  }
-  .col-md-pull-9 {
-    right: 75%;
-  }
-  .col-md-pull-8 {
-    right: 66.66666667%;
-  }
-  .col-md-pull-7 {
-    right: 58.33333333%;
-  }
-  .col-md-pull-6 {
-    right: 50%;
-  }
-  .col-md-pull-5 {
-    right: 41.66666667%;
-  }
-  .col-md-pull-4 {
-    right: 33.33333333%;
-  }
-  .col-md-pull-3 {
-    right: 25%;
-  }
-  .col-md-pull-2 {
-    right: 16.66666667%;
-  }
-  .col-md-pull-1 {
-    right: 8.33333333%;
-  }
-  .col-md-pull-0 {
-    right: auto;
-  }
-  .col-md-push-12 {
-    left: 100%;
-  }
-  .col-md-push-11 {
-    left: 91.66666667%;
-  }
-  .col-md-push-10 {
-    left: 83.33333333%;
-  }
-  .col-md-push-9 {
-    left: 75%;
-  }
-  .col-md-push-8 {
-    left: 66.66666667%;
-  }
-  .col-md-push-7 {
-    left: 58.33333333%;
-  }
-  .col-md-push-6 {
-    left: 50%;
-  }
-  .col-md-push-5 {
-    left: 41.66666667%;
-  }
-  .col-md-push-4 {
-    left: 33.33333333%;
-  }
-  .col-md-push-3 {
-    left: 25%;
-  }
-  .col-md-push-2 {
-    left: 16.66666667%;
-  }
-  .col-md-push-1 {
-    left: 8.33333333%;
-  }
-  .col-md-push-0 {
-    left: auto;
-  }
-  .col-md-offset-12 {
-    margin-left: 100%;
-  }
-  .col-md-offset-11 {
-    margin-left: 91.66666667%;
-  }
-  .col-md-offset-10 {
-    margin-left: 83.33333333%;
-  }
-  .col-md-offset-9 {
-    margin-left: 75%;
-  }
-  .col-md-offset-8 {
-    margin-left: 66.66666667%;
-  }
-  .col-md-offset-7 {
-    margin-left: 58.33333333%;
-  }
-  .col-md-offset-6 {
-    margin-left: 50%;
-  }
-  .col-md-offset-5 {
-    margin-left: 41.66666667%;
-  }
-  .col-md-offset-4 {
-    margin-left: 33.33333333%;
-  }
-  .col-md-offset-3 {
-    margin-left: 25%;
-  }
-  .col-md-offset-2 {
-    margin-left: 16.66666667%;
-  }
-  .col-md-offset-1 {
-    margin-left: 8.33333333%;
-  }
-  .col-md-offset-0 {
-    margin-left: 0%;
-  }
-}
-@media (min-width: 1200px) {
-  .col-lg-1,
-  .col-lg-2,
-  .col-lg-3,
-  .col-lg-4,
-  .col-lg-5,
-  .col-lg-6,
-  .col-lg-7,
-  .col-lg-8,
-  .col-lg-9,
-  .col-lg-10,
-  .col-lg-11,
-  .col-lg-12 {
-    float: left;
-  }
-  .col-lg-12 {
-    width: 100%;
-  }
-  .col-lg-11 {
-    width: 91.66666667%;
-  }
-  .col-lg-10 {
-    width: 83.33333333%;
-  }
-  .col-lg-9 {
-    width: 75%;
-  }
-  .col-lg-8 {
-    width: 66.66666667%;
-  }
-  .col-lg-7 {
-    width: 58.33333333%;
-  }
-  .col-lg-6 {
-    width: 50%;
-  }
-  .col-lg-5 {
-    width: 41.66666667%;
-  }
-  .col-lg-4 {
-    width: 33.33333333%;
-  }
-  .col-lg-3 {
-    width: 25%;
-  }
-  .col-lg-2 {
-    width: 16.66666667%;
-  }
-  .col-lg-1 {
-    width: 8.33333333%;
-  }
-  .col-lg-pull-12 {
-    right: 100%;
-  }
-  .col-lg-pull-11 {
-    right: 91.66666667%;
-  }
-  .col-lg-pull-10 {
-    right: 83.33333333%;
-  }
-  .col-lg-pull-9 {
-    right: 75%;
-  }
-  .col-lg-pull-8 {
-    right: 66.66666667%;
-  }
-  .col-lg-pull-7 {
-    right: 58.33333333%;
-  }
-  .col-lg-pull-6 {
-    right: 50%;
-  }
-  .col-lg-pull-5 {
-    right: 41.66666667%;
-  }
-  .col-lg-pull-4 {
-    right: 33.33333333%;
-  }
-  .col-lg-pull-3 {
-    right: 25%;
-  }
-  .col-lg-pull-2 {
-    right: 16.66666667%;
-  }
-  .col-lg-pull-1 {
-    right: 8.33333333%;
-  }
-  .col-lg-pull-0 {
-    right: auto;
-  }
-  .col-lg-push-12 {
-    left: 100%;
-  }
-  .col-lg-push-11 {
-    left: 91.66666667%;
-  }
-  .col-lg-push-10 {
-    left: 83.33333333%;
-  }
-  .col-lg-push-9 {
-    left: 75%;
-  }
-  .col-lg-push-8 {
-    left: 66.66666667%;
-  }
-  .col-lg-push-7 {
-    left: 58.33333333%;
-  }
-  .col-lg-push-6 {
-    left: 50%;
-  }
-  .col-lg-push-5 {
-    left: 41.66666667%;
-  }
-  .col-lg-push-4 {
-    left: 33.33333333%;
-  }
-  .col-lg-push-3 {
-    left: 25%;
-  }
-  .col-lg-push-2 {
-    left: 16.66666667%;
-  }
-  .col-lg-push-1 {
-    left: 8.33333333%;
-  }
-  .col-lg-push-0 {
-    left: auto;
-  }
-  .col-lg-offset-12 {
-    margin-left: 100%;
-  }
-  .col-lg-offset-11 {
-    margin-left: 91.66666667%;
-  }
-  .col-lg-offset-10 {
-    margin-left: 83.33333333%;
-  }
-  .col-lg-offset-9 {
-    margin-left: 75%;
-  }
-  .col-lg-offset-8 {
-    margin-left: 66.66666667%;
-  }
-  .col-lg-offset-7 {
-    margin-left: 58.33333333%;
-  }
-  .col-lg-offset-6 {
-    margin-left: 50%;
-  }
-  .col-lg-offset-5 {
-    margin-left: 41.66666667%;
-  }
-  .col-lg-offset-4 {
-    margin-left: 33.33333333%;
-  }
-  .col-lg-offset-3 {
-    margin-left: 25%;
-  }
-  .col-lg-offset-2 {
-    margin-left: 16.66666667%;
-  }
-  .col-lg-offset-1 {
-    margin-left: 8.33333333%;
-  }
-  .col-lg-offset-0 {
-    margin-left: 0%;
-  }
-}
-table {
-  background-color: transparent;
-}
-table col[class*="col-"] {
-  position: static;
-  display: table-column;
-  float: none;
-}
-table td[class*="col-"],
-table th[class*="col-"] {
-  position: static;
-  display: table-cell;
-  float: none;
-}
-caption {
-  padding-top: 8px;
-  padding-bottom: 8px;
-  color: #777777;
-  text-align: left;
-}
-th {
-  text-align: left;
-}
-.table {
-  width: 100%;
-  max-width: 100%;
-  margin-bottom: 20px;
-}
-.table > thead > tr > th,
-.table > tbody > tr > th,
-.table > tfoot > tr > th,
-.table > thead > tr > td,
-.table > tbody > tr > td,
-.table > tfoot > tr > td {
-  padding: 8px;
-  line-height: 1.42857143;
-  vertical-align: top;
-  border-top: 1px solid #ddd;
-}
-.table > thead > tr > th {
-  vertical-align: bottom;
-  border-bottom: 2px solid #ddd;
-}
-.table > caption + thead > tr:first-child > th,
-.table > colgroup + thead > tr:first-child > th,
-.table > thead:first-child > tr:first-child > th,
-.table > caption + thead > tr:first-child > td,
-.table > colgroup + thead > tr:first-child > td,
-.table > thead:first-child > tr:first-child > td {
-  border-top: 0;
-}
-.table > tbody + tbody {
-  border-top: 2px solid #ddd;
-}
-.table .table {
-  background-color: #fff;
-}
-.table-condensed > thead > tr > th,
-.table-condensed > tbody > tr > th,
-.table-condensed > tfoot > tr > th,
-.table-condensed > thead > tr > td,
-.table-condensed > tbody > tr > td,
-.table-condensed > tfoot > tr > td {
-  padding: 5px;
-}
-.table-bordered {
-  border: 1px solid #ddd;
-}
-.table-bordered > thead > tr > th,
-.table-bordered > tbody > tr > th,
-.table-bordered > tfoot > tr > th,
-.table-bordered > thead > tr > td,
-.table-bordered > tbody > tr > td,
-.table-bordered > tfoot > tr > td {
-  border: 1px solid #ddd;
-}
-.table-bordered > thead > tr > th,
-.table-bordered > thead > tr > td {
-  border-bottom-width: 2px;
-}
-.table-striped > tbody > tr:nth-of-type(odd) {
-  background-color: #f9f9f9;
-}
-.table-hover > tbody > tr:hover {
-  background-color: #f5f5f5;
-}
-.table > thead > tr > td.active,
-.table > tbody > tr > td.active,
-.table > tfoot > tr > td.active,
-.table > thead > tr > th.active,
-.table > tbody > tr > th.active,
-.table > tfoot > tr > th.active,
-.table > thead > tr.active > td,
-.table > tbody > tr.active > td,
-.table > tfoot > tr.active > td,
-.table > thead > tr.active > th,
-.table > tbody > tr.active > th,
-.table > tfoot > tr.active > th {
-  background-color: #f5f5f5;
-}
-.table-hover > tbody > tr > td.active:hover,
-.table-hover > tbody > tr > th.active:hover,
-.table-hover > tbody > tr.active:hover > td,
-.table-hover > tbody > tr:hover > .active,
-.table-hover > tbody > tr.active:hover > th {
-  background-color: #e8e8e8;
-}
-.table > thead > tr > td.success,
-.table > tbody > tr > td.success,
-.table > tfoot > tr > td.success,
-.table > thead > tr > th.success,
-.table > tbody > tr > th.success,
-.table > tfoot > tr > th.success,
-.table > thead > tr.success > td,
-.table > tbody > tr.success > td,
-.table > tfoot > tr.success > td,
-.table > thead > tr.success > th,
-.table > tbody > tr.success > th,
-.table > tfoot > tr.success > th {
-  background-color: #dff0d8;
-}
-.table-hover > tbody > tr > td.success:hover,
-.table-hover > tbody > tr > th.success:hover,
-.table-hover > tbody > tr.success:hover > td,
-.table-hover > tbody > tr:hover > .success,
-.table-hover > tbody > tr.success:hover > th {
-  background-color: #d0e9c6;
-}
-.table > thead > tr > td.info,
-.table > tbody > tr > td.info,
-.table > tfoot > tr > td.info,
-.table > thead > tr > th.info,
-.table > tbody > tr > th.info,
-.table > tfoot > tr > th.info,
-.table > thead > tr.info > td,
-.table > tbody > tr.info > td,
-.table > tfoot > tr.info > td,
-.table > thead > tr.info > th,
-.table > tbody > tr.info > th,
-.table > tfoot > tr.info > th {
-  background-color: #d9edf7;
-}
-.table-hover > tbody > tr > td.info:hover,
-.table-hover > tbody > tr > th.info:hover,
-.table-hover > tbody > tr.info:hover > td,
-.table-hover > tbody > tr:hover > .info,
-.table-hover > tbody > tr.info:hover > th {
-  background-color: #c4e3f3;
-}
-.table > thead > tr > td.warning,
-.table > tbody > tr > td.warning,
-.table > tfoot > tr > td.warning,
-.table > thead > tr > th.warning,
-.table > tbody > tr > th.warning,
-.table > tfoot > tr > th.warning,
-.table > thead > tr.warning > td,
-.table > tbody > tr.warning > td,
-.table > tfoot > tr.warning > td,
-.table > thead > tr.warning > th,
-.table > tbody > tr.warning > th,
-.table > tfoot > tr.warning > th {
-  background-color: #fcf8e3;
-}
-.table-hover > tbody > tr > td.warning:hover,
-.table-hover > tbody > tr > th.warning:hover,
-.table-hover > tbody > tr.warning:hover > td,
-.table-hover > tbody > tr:hover > .warning,
-.table-hover > tbody > tr.warning:hover > th {
-  background-color: #faf2cc;
-}
-.table > thead > tr > td.danger,
-.table > tbody > tr > td.danger,
-.table > tfoot > tr > td.danger,
-.table > thead > tr > th.danger,
-.table > tbody > tr > th.danger,
-.table > tfoot > tr > th.danger,
-.table > thead > tr.danger > td,
-.table > tbody > tr.danger > td,
-.table > tfoot > tr.danger > td,
-.table > thead > tr.danger > th,
-.table > tbody > tr.danger > th,
-.table > tfoot > tr.danger > th {
-  background-color: #f2dede;
-}
-.table-hover > tbody > tr > td.danger:hover,
-.table-hover > tbody > tr > th.danger:hover,
-.table-hover > tbody > tr.danger:hover > td,
-.table-hover > tbody > tr:hover > .danger,
-.table-hover > tbody > tr.danger:hover > th {
-  background-color: #ebcccc;
-}
-.table-responsive {
-  min-height: 0.01%;
-  overflow-x: auto;
-}
-@media screen and (max-width: 767px) {
-  .table-responsive {
-    width: 100%;
-    margin-bottom: 15px;
-    overflow-y: hidden;
-    -ms-overflow-style: -ms-autohiding-scrollbar;
-    border: 1px solid #ddd;
-  }
-  .table-responsive > .table {
-    margin-bottom: 0;
-  }
-  .table-responsive > .table > thead > tr > th,
-  .table-responsive > .table > tbody > tr > th,
-  .table-responsive > .table > tfoot > tr > th,
-  .table-responsive > .table > thead > tr > td,
-  .table-responsive > .table > tbody > tr > td,
-  .table-responsive > .table > tfoot > tr > td {
-    white-space: nowrap;
-  }
-  .table-responsive > .table-bordered {
-    border: 0;
-  }
-  .table-responsive > .table-bordered > thead > tr > th:first-child,
-  .table-responsive > .table-bordered > tbody > tr > th:first-child,
-  .table-responsive > .table-bordered > tfoot > tr > th:first-child,
-  .table-responsive > .table-bordered > thead > tr > td:first-child,
-  .table-responsive > .table-bordered > tbody > tr > td:first-child,
-  .table-responsive > .table-bordered > tfoot > tr > td:first-child {
-    border-left: 0;
-  }
-  .table-responsive > .table-bordered > thead > tr > th:last-child,
-  .table-responsive > .table-bordered > tbody > tr > th:last-child,
-  .table-responsive > .table-bordered > tfoot > tr > th:last-child,
-  .table-responsive > .table-bordered > thead > tr > td:last-child,
-  .table-responsive > .table-bordered > tbody > tr > td:last-child,
-  .table-responsive > .table-bordered > tfoot > tr > td:last-child {
-    border-right: 0;
-  }
-  .table-responsive > .table-bordered > tbody > tr:last-child > th,
-  .table-responsive > .table-bordered > tfoot > tr:last-child > th,
-  .table-responsive > .table-bordered > tbody > tr:last-child > td,
-  .table-responsive > .table-bordered > tfoot > tr:last-child > td {
-    border-bottom: 0;
-  }
-}
-fieldset {
-  min-width: 0;
-  padding: 0;
-  margin: 0;
-  border: 0;
-}
-legend {
-  display: block;
-  width: 100%;
-  padding: 0;
-  margin-bottom: 20px;
-  font-size: 21px;
-  line-height: inherit;
-  color: #333333;
-  border: 0;
-  border-bottom: 1px solid #e5e5e5;
-}
-label {
-  display: inline-block;
-  max-width: 100%;
-  margin-bottom: 5px;
-  font-weight: 700;
-}
-input[type="search"] {
-  -webkit-box-sizing: border-box;
-  -moz-box-sizing: border-box;
-  box-sizing: border-box;
-  -webkit-appearance: none;
-  -moz-appearance: none;
-  appearance: none;
-}
-input[type="radio"],
-input[type="checkbox"] {
-  margin: 4px 0 0;
-  margin-top: 1px \9;
-  line-height: normal;
-}
-input[type="radio"][disabled],
-input[type="checkbox"][disabled],
-input[type="radio"].disabled,
-input[type="checkbox"].disabled,
-fieldset[disabled] input[type="radio"],
-fieldset[disabled] input[type="checkbox"] {
-  cursor: not-allowed;
-}
-input[type="file"] {
-  display: block;
-}
-input[type="range"] {
-  display: block;
-  width: 100%;
-}
-select[multiple],
-select[size] {
-  height: auto;
-}
-input[type="file"]:focus,
-input[type="radio"]:focus,
-input[type="checkbox"]:focus {
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-output {
-  display: block;
-  padding-top: 7px;
-  font-size: 14px;
-  line-height: 1.42857143;
-  color: #555555;
-}
-.form-control {
-  display: block;
-  width: 100%;
-  height: 34px;
-  padding: 6px 12px;
-  font-size: 14px;
-  line-height: 1.42857143;
-  color: #555555;
-  background-color: #fff;
-  background-image: none;
-  border: 1px solid #ccc;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
-  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
-  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
-  transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
-  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
-  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
-}
-.form-control:focus {
-  border-color: #66afe9;
-  outline: 0;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);
-}
-.form-control::-moz-placeholder {
-  color: #999;
-  opacity: 1;
-}
-.form-control:-ms-input-placeholder {
-  color: #999;
-}
-.form-control::-webkit-input-placeholder {
-  color: #999;
-}
-.form-control::-ms-expand {
-  background-color: transparent;
-  border: 0;
-}
-.form-control[disabled],
-.form-control[readonly],
-fieldset[disabled] .form-control {
-  background-color: #eeeeee;
-  opacity: 1;
-}
-.form-control[disabled],
-fieldset[disabled] .form-control {
-  cursor: not-allowed;
-}
-textarea.form-control {
-  height: auto;
-}
-@media screen and (-webkit-min-device-pixel-ratio: 0) {
-  input[type="date"].form-control,
-  input[type="time"].form-control,
-  input[type="datetime-local"].form-control,
-  input[type="month"].form-control {
-    line-height: 34px;
-  }
-  input[type="date"].input-sm,
-  input[type="time"].input-sm,
-  input[type="datetime-local"].input-sm,
-  input[type="month"].input-sm,
-  .input-group-sm input[type="date"],
-  .input-group-sm input[type="time"],
-  .input-group-sm input[type="datetime-local"],
-  .input-group-sm input[type="month"] {
-    line-height: 30px;
-  }
-  input[type="date"].input-lg,
-  input[type="time"].input-lg,
-  input[type="datetime-local"].input-lg,
-  input[type="month"].input-lg,
-  .input-group-lg input[type="date"],
-  .input-group-lg input[type="time"],
-  .input-group-lg input[type="datetime-local"],
-  .input-group-lg input[type="month"] {
-    line-height: 46px;
-  }
-}
-.form-group {
-  margin-bottom: 15px;
-}
-.radio,
-.checkbox {
-  position: relative;
-  display: block;
-  margin-top: 10px;
-  margin-bottom: 10px;
-}
-.radio.disabled label,
-.checkbox.disabled label,
-fieldset[disabled] .radio label,
-fieldset[disabled] .checkbox label {
-  cursor: not-allowed;
-}
-.radio label,
-.checkbox label {
-  min-height: 20px;
-  padding-left: 20px;
-  margin-bottom: 0;
-  font-weight: 400;
-  cursor: pointer;
-}
-.radio input[type="radio"],
-.radio-inline input[type="radio"],
-.checkbox input[type="checkbox"],
-.checkbox-inline input[type="checkbox"] {
-  position: absolute;
-  margin-top: 4px \9;
-  margin-left: -20px;
-}
-.radio + .radio,
-.checkbox + .checkbox {
-  margin-top: -5px;
-}
-.radio-inline,
-.checkbox-inline {
-  position: relative;
-  display: inline-block;
-  padding-left: 20px;
-  margin-bottom: 0;
-  font-weight: 400;
-  vertical-align: middle;
-  cursor: pointer;
-}
-.radio-inline.disabled,
-.checkbox-inline.disabled,
-fieldset[disabled] .radio-inline,
-fieldset[disabled] .checkbox-inline {
-  cursor: not-allowed;
-}
-.radio-inline + .radio-inline,
-.checkbox-inline + .checkbox-inline {
-  margin-top: 0;
-  margin-left: 10px;
-}
-.form-control-static {
-  min-height: 34px;
-  padding-top: 7px;
-  padding-bottom: 7px;
-  margin-bottom: 0;
-}
-.form-control-static.input-lg,
-.form-control-static.input-sm {
-  padding-right: 0;
-  padding-left: 0;
-}
-.input-sm {
-  height: 30px;
-  padding: 5px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-  border-radius: 3px;
-}
-select.input-sm {
-  height: 30px;
-  line-height: 30px;
-}
-textarea.input-sm,
-select[multiple].input-sm {
-  height: auto;
-}
-.form-group-sm .form-control {
-  height: 30px;
-  padding: 5px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-  border-radius: 3px;
-}
-.form-group-sm select.form-control {
-  height: 30px;
-  line-height: 30px;
-}
-.form-group-sm textarea.form-control,
-.form-group-sm select[multiple].form-control {
-  height: auto;
-}
-.form-group-sm .form-control-static {
-  height: 30px;
-  min-height: 32px;
-  padding: 6px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-}
-.input-lg {
-  height: 46px;
-  padding: 10px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-  border-radius: 6px;
-}
-select.input-lg {
-  height: 46px;
-  line-height: 46px;
-}
-textarea.input-lg,
-select[multiple].input-lg {
-  height: auto;
-}
-.form-group-lg .form-control {
-  height: 46px;
-  padding: 10px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-  border-radius: 6px;
-}
-.form-group-lg select.form-control {
-  height: 46px;
-  line-height: 46px;
-}
-.form-group-lg textarea.form-control,
-.form-group-lg select[multiple].form-control {
-  height: auto;
-}
-.form-group-lg .form-control-static {
-  height: 46px;
-  min-height: 38px;
-  padding: 11px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-}
-.has-feedback {
-  position: relative;
-}
-.has-feedback .form-control {
-  padding-right: 42.5px;
-}
-.form-control-feedback {
-  position: absolute;
-  top: 0;
-  right: 0;
-  z-index: 2;
-  display: block;
-  width: 34px;
-  height: 34px;
-  line-height: 34px;
-  text-align: center;
-  pointer-events: none;
-}
-.input-lg + .form-control-feedback,
-.input-group-lg + .form-control-feedback,
-.form-group-lg .form-control + .form-control-feedback {
-  width: 46px;
-  height: 46px;
-  line-height: 46px;
-}
-.input-sm + .form-control-feedback,
-.input-group-sm + .form-control-feedback,
-.form-group-sm .form-control + .form-control-feedback {
-  width: 30px;
-  height: 30px;
-  line-height: 30px;
-}
-.has-success .help-block,
-.has-success .control-label,
-.has-success .radio,
-.has-success .checkbox,
-.has-success .radio-inline,
-.has-success .checkbox-inline,
-.has-success.radio label,
-.has-success.checkbox label,
-.has-success.radio-inline label,
-.has-success.checkbox-inline label {
-  color: #3c763d;
-}
-.has-success .form-control {
-  border-color: #3c763d;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-.has-success .form-control:focus {
-  border-color: #2b542c;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;
-}
-.has-success .input-group-addon {
-  color: #3c763d;
-  background-color: #dff0d8;
-  border-color: #3c763d;
-}
-.has-success .form-control-feedback {
-  color: #3c763d;
-}
-.has-warning .help-block,
-.has-warning .control-label,
-.has-warning .radio,
-.has-warning .checkbox,
-.has-warning .radio-inline,
-.has-warning .checkbox-inline,
-.has-warning.radio label,
-.has-warning.checkbox label,
-.has-warning.radio-inline label,
-.has-warning.checkbox-inline label {
-  color: #8a6d3b;
-}
-.has-warning .form-control {
-  border-color: #8a6d3b;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-.has-warning .form-control:focus {
-  border-color: #66512c;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;
-}
-.has-warning .input-group-addon {
-  color: #8a6d3b;
-  background-color: #fcf8e3;
-  border-color: #8a6d3b;
-}
-.has-warning .form-control-feedback {
-  color: #8a6d3b;
-}
-.has-error .help-block,
-.has-error .control-label,
-.has-error .radio,
-.has-error .checkbox,
-.has-error .radio-inline,
-.has-error .checkbox-inline,
-.has-error.radio label,
-.has-error.checkbox label,
-.has-error.radio-inline label,
-.has-error.checkbox-inline label {
-  color: #a94442;
-}
-.has-error .form-control {
-  border-color: #a94442;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-.has-error .form-control:focus {
-  border-color: #843534;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;
-}
-.has-error .input-group-addon {
-  color: #a94442;
-  background-color: #f2dede;
-  border-color: #a94442;
-}
-.has-error .form-control-feedback {
-  color: #a94442;
-}
-.has-feedback label ~ .form-control-feedback {
-  top: 25px;
-}
-.has-feedback label.sr-only ~ .form-control-feedback {
-  top: 0;
-}
-.help-block {
-  display: block;
-  margin-top: 5px;
-  margin-bottom: 10px;
-  color: #737373;
-}
-@media (min-width: 768px) {
-  .form-inline .form-group {
-    display: inline-block;
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .form-inline .form-control {
-    display: inline-block;
-    width: auto;
-    vertical-align: middle;
-  }
-  .form-inline .form-control-static {
-    display: inline-block;
-  }
-  .form-inline .input-group {
-    display: inline-table;
-    vertical-align: middle;
-  }
-  .form-inline .input-group .input-group-addon,
-  .form-inline .input-group .input-group-btn,
-  .form-inline .input-group .form-control {
-    width: auto;
-  }
-  .form-inline .input-group > .form-control {
-    width: 100%;
-  }
-  .form-inline .control-label {
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .form-inline .radio,
-  .form-inline .checkbox {
-    display: inline-block;
-    margin-top: 0;
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .form-inline .radio label,
-  .form-inline .checkbox label {
-    padding-left: 0;
-  }
-  .form-inline .radio input[type="radio"],
-  .form-inline .checkbox input[type="checkbox"] {
-    position: relative;
-    margin-left: 0;
-  }
-  .form-inline .has-feedback .form-control-feedback {
-    top: 0;
-  }
-}
-.form-horizontal .radio,
-.form-horizontal .checkbox,
-.form-horizontal .radio-inline,
-.form-horizontal .checkbox-inline {
-  padding-top: 7px;
-  margin-top: 0;
-  margin-bottom: 0;
-}
-.form-horizontal .radio,
-.form-horizontal .checkbox {
-  min-height: 27px;
-}
-.form-horizontal .form-group {
-  margin-right: -15px;
-  margin-left: -15px;
-}
-@media (min-width: 768px) {
-  .form-horizontal .control-label {
-    padding-top: 7px;
-    margin-bottom: 0;
-    text-align: right;
-  }
-}
-.form-horizontal .has-feedback .form-control-feedback {
-  right: 15px;
-}
-@media (min-width: 768px) {
-  .form-horizontal .form-group-lg .control-label {
-    padding-top: 11px;
-    font-size: 18px;
-  }
-}
-@media (min-width: 768px) {
-  .form-horizontal .form-group-sm .control-label {
-    padding-top: 6px;
-    font-size: 12px;
-  }
-}
-.btn {
-  display: inline-block;
-  margin-bottom: 0;
-  font-weight: normal;
-  text-align: center;
-  white-space: nowrap;
-  vertical-align: middle;
-  -ms-touch-action: manipulation;
-  touch-action: manipulation;
-  cursor: pointer;
-  background-image: none;
-  border: 1px solid transparent;
-  padding: 6px 12px;
-  font-size: 14px;
-  line-height: 1.42857143;
-  border-radius: 4px;
-  -webkit-user-select: none;
-  -moz-user-select: none;
-  -ms-user-select: none;
-  user-select: none;
-}
-.btn:focus,
-.btn:active:focus,
-.btn.active:focus,
-.btn.focus,
-.btn:active.focus,
-.btn.active.focus {
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-.btn:hover,
-.btn:focus,
-.btn.focus {
-  color: #333;
-  text-decoration: none;
-}
-.btn:active,
-.btn.active {
-  background-image: none;
-  outline: 0;
-  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-}
-.btn.disabled,
-.btn[disabled],
-fieldset[disabled] .btn {
-  cursor: not-allowed;
-  filter: alpha(opacity=65);
-  opacity: 0.65;
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-a.btn.disabled,
-fieldset[disabled] a.btn {
-  pointer-events: none;
-}
-.btn-default {
-  color: #333;
-  background-color: #fff;
-  border-color: #ccc;
-}
-.btn-default:focus,
-.btn-default.focus {
-  color: #333;
-  background-color: #e6e6e6;
-  border-color: #8c8c8c;
-}
-.btn-default:hover {
-  color: #333;
-  background-color: #e6e6e6;
-  border-color: #adadad;
-}
-.btn-default:active,
-.btn-default.active,
-.open > .dropdown-toggle.btn-default {
-  color: #333;
-  background-color: #e6e6e6;
-  background-image: none;
-  border-color: #adadad;
-}
-.btn-default:active:hover,
-.btn-default.active:hover,
-.open > .dropdown-toggle.btn-default:hover,
-.btn-default:active:focus,
-.btn-default.active:focus,
-.open > .dropdown-toggle.btn-default:focus,
-.btn-default:active.focus,
-.btn-default.active.focus,
-.open > .dropdown-toggle.btn-default.focus {
-  color: #333;
-  background-color: #d4d4d4;
-  border-color: #8c8c8c;
-}
-.btn-default.disabled:hover,
-.btn-default[disabled]:hover,
-fieldset[disabled] .btn-default:hover,
-.btn-default.disabled:focus,
-.btn-default[disabled]:focus,
-fieldset[disabled] .btn-default:focus,
-.btn-default.disabled.focus,
-.btn-default[disabled].focus,
-fieldset[disabled] .btn-default.focus {
-  background-color: #fff;
-  border-color: #ccc;
-}
-.btn-default .badge {
-  color: #fff;
-  background-color: #333;
-}
-.btn-primary {
-  color: #fff;
-  background-color: #337ab7;
-  border-color: #2e6da4;
-}
-.btn-primary:focus,
-.btn-primary.focus {
-  color: #fff;
-  background-color: #286090;
-  border-color: #122b40;
-}
-.btn-primary:hover {
-  color: #fff;
-  background-color: #286090;
-  border-color: #204d74;
-}
-.btn-primary:active,
-.btn-primary.active,
-.open > .dropdown-toggle.btn-primary {
-  color: #fff;
-  background-color: #286090;
-  background-image: none;
-  border-color: #204d74;
-}
-.btn-primary:active:hover,
-.btn-primary.active:hover,
-.open > .dropdown-toggle.btn-primary:hover,
-.btn-primary:active:focus,
-.btn-primary.active:focus,
-.open > .dropdown-toggle.btn-primary:focus,
-.btn-primary:active.focus,
-.btn-primary.active.focus,
-.open > .dropdown-toggle.btn-primary.focus {
-  color: #fff;
-  background-color: #204d74;
-  border-color: #122b40;
-}
-.btn-primary.disabled:hover,
-.btn-primary[disabled]:hover,
-fieldset[disabled] .btn-primary:hover,
-.btn-primary.disabled:focus,
-.btn-primary[disabled]:focus,
-fieldset[disabled] .btn-primary:focus,
-.btn-primary.disabled.focus,
-.btn-primary[disabled].focus,
-fieldset[disabled] .btn-primary.focus {
-  background-color: #337ab7;
-  border-color: #2e6da4;
-}
-.btn-primary .badge {
-  color: #337ab7;
-  background-color: #fff;
-}
-.btn-success {
-  color: #fff;
-  background-color: #5cb85c;
-  border-color: #4cae4c;
-}
-.btn-success:focus,
-.btn-success.focus {
-  color: #fff;
-  background-color: #449d44;
-  border-color: #255625;
-}
-.btn-success:hover {
-  color: #fff;
-  background-color: #449d44;
-  border-color: #398439;
-}
-.btn-success:active,
-.btn-success.active,
-.open > .dropdown-toggle.btn-success {
-  color: #fff;
-  background-color: #449d44;
-  background-image: none;
-  border-color: #398439;
-}
-.btn-success:active:hover,
-.btn-success.active:hover,
-.open > .dropdown-toggle.btn-success:hover,
-.btn-success:active:focus,
-.btn-success.active:focus,
-.open > .dropdown-toggle.btn-success:focus,
-.btn-success:active.focus,
-.btn-success.active.focus,
-.open > .dropdown-toggle.btn-success.focus {
-  color: #fff;
-  background-color: #398439;
-  border-color: #255625;
-}
-.btn-success.disabled:hover,
-.btn-success[disabled]:hover,
-fieldset[disabled] .btn-success:hover,
-.btn-success.disabled:focus,
-.btn-success[disabled]:focus,
-fieldset[disabled] .btn-success:focus,
-.btn-success.disabled.focus,
-.btn-success[disabled].focus,
-fieldset[disabled] .btn-success.focus {
-  background-color: #5cb85c;
-  border-color: #4cae4c;
-}
-.btn-success .badge {
-  color: #5cb85c;
-  background-color: #fff;
-}
-.btn-info {
-  color: #fff;
-  background-color: #5bc0de;
-  border-color: #46b8da;
-}
-.btn-info:focus,
-.btn-info.focus {
-  color: #fff;
-  background-color: #31b0d5;
-  border-color: #1b6d85;
-}
-.btn-info:hover {
-  color: #fff;
-  background-color: #31b0d5;
-  border-color: #269abc;
-}
-.btn-info:active,
-.btn-info.active,
-.open > .dropdown-toggle.btn-info {
-  color: #fff;
-  background-color: #31b0d5;
-  background-image: none;
-  border-color: #269abc;
-}
-.btn-info:active:hover,
-.btn-info.active:hover,
-.open > .dropdown-toggle.btn-info:hover,
-.btn-info:active:focus,
-.btn-info.active:focus,
-.open > .dropdown-toggle.btn-info:focus,
-.btn-info:active.focus,
-.btn-info.active.focus,
-.open > .dropdown-toggle.btn-info.focus {
-  color: #fff;
-  background-color: #269abc;
-  border-color: #1b6d85;
-}
-.btn-info.disabled:hover,
-.btn-info[disabled]:hover,
-fieldset[disabled] .btn-info:hover,
-.btn-info.disabled:focus,
-.btn-info[disabled]:focus,
-fieldset[disabled] .btn-info:focus,
-.btn-info.disabled.focus,
-.btn-info[disabled].focus,
-fieldset[disabled] .btn-info.focus {
-  background-color: #5bc0de;
-  border-color: #46b8da;
-}
-.btn-info .badge {
-  color: #5bc0de;
-  background-color: #fff;
-}
-.btn-warning {
-  color: #fff;
-  background-color: #f0ad4e;
-  border-color: #eea236;
-}
-.btn-warning:focus,
-.btn-warning.focus {
-  color: #fff;
-  background-color: #ec971f;
-  border-color: #985f0d;
-}
-.btn-warning:hover {
-  color: #fff;
-  background-color: #ec971f;
-  border-color: #d58512;
-}
-.btn-warning:active,
-.btn-warning.active,
-.open > .dropdown-toggle.btn-warning {
-  color: #fff;
-  background-color: #ec971f;
-  background-image: none;
-  border-color: #d58512;
-}
-.btn-warning:active:hover,
-.btn-warning.active:hover,
-.open > .dropdown-toggle.btn-warning:hover,
-.btn-warning:active:focus,
-.btn-warning.active:focus,
-.open > .dropdown-toggle.btn-warning:focus,
-.btn-warning:active.focus,
-.btn-warning.active.focus,
-.open > .dropdown-toggle.btn-warning.focus {
-  color: #fff;
-  background-color: #d58512;
-  border-color: #985f0d;
-}
-.btn-warning.disabled:hover,
-.btn-warning[disabled]:hover,
-fieldset[disabled] .btn-warning:hover,
-.btn-warning.disabled:focus,
-.btn-warning[disabled]:focus,
-fieldset[disabled] .btn-warning:focus,
-.btn-warning.disabled.focus,
-.btn-warning[disabled].focus,
-fieldset[disabled] .btn-warning.focus {
-  background-color: #f0ad4e;
-  border-color: #eea236;
-}
-.btn-warning .badge {
-  color: #f0ad4e;
-  background-color: #fff;
-}
-.btn-danger {
-  color: #fff;
-  background-color: #d9534f;
-  border-color: #d43f3a;
-}
-.btn-danger:focus,
-.btn-danger.focus {
-  color: #fff;
-  background-color: #c9302c;
-  border-color: #761c19;
-}
-.btn-danger:hover {
-  color: #fff;
-  background-color: #c9302c;
-  border-color: #ac2925;
-}
-.btn-danger:active,
-.btn-danger.active,
-.open > .dropdown-toggle.btn-danger {
-  color: #fff;
-  background-color: #c9302c;
-  background-image: none;
-  border-color: #ac2925;
-}
-.btn-danger:active:hover,
-.btn-danger.active:hover,
-.open > .dropdown-toggle.btn-danger:hover,
-.btn-danger:active:focus,
-.btn-danger.active:focus,
-.open > .dropdown-toggle.btn-danger:focus,
-.btn-danger:active.focus,
-.btn-danger.active.focus,
-.open > .dropdown-toggle.btn-danger.focus {
-  color: #fff;
-  background-color: #ac2925;
-  border-color: #761c19;
-}
-.btn-danger.disabled:hover,
-.btn-danger[disabled]:hover,
-fieldset[disabled] .btn-danger:hover,
-.btn-danger.disabled:focus,
-.btn-danger[disabled]:focus,
-fieldset[disabled] .btn-danger:focus,
-.btn-danger.disabled.focus,
-.btn-danger[disabled].focus,
-fieldset[disabled] .btn-danger.focus {
-  background-color: #d9534f;
-  border-color: #d43f3a;
-}
-.btn-danger .badge {
-  color: #d9534f;
-  background-color: #fff;
-}
-.btn-link {
-  font-weight: 400;
-  color: #337ab7;
-  border-radius: 0;
-}
-.btn-link,
-.btn-link:active,
-.btn-link.active,
-.btn-link[disabled],
-fieldset[disabled] .btn-link {
-  background-color: transparent;
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-.btn-link,
-.btn-link:hover,
-.btn-link:focus,
-.btn-link:active {
-  border-color: transparent;
-}
-.btn-link:hover,
-.btn-link:focus {
-  color: #23527c;
-  text-decoration: underline;
-  background-color: transparent;
-}
-.btn-link[disabled]:hover,
-fieldset[disabled] .btn-link:hover,
-.btn-link[disabled]:focus,
-fieldset[disabled] .btn-link:focus {
-  color: #777777;
-  text-decoration: none;
-}
-.btn-lg,
-.btn-group-lg > .btn {
-  padding: 10px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-  border-radius: 6px;
-}
-.btn-sm,
-.btn-group-sm > .btn {
-  padding: 5px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-  border-radius: 3px;
-}
-.btn-xs,
-.btn-group-xs > .btn {
-  padding: 1px 5px;
-  font-size: 12px;
-  line-height: 1.5;
-  border-radius: 3px;
-}
-.btn-block {
-  display: block;
-  width: 100%;
-}
-.btn-block + .btn-block {
-  margin-top: 5px;
-}
-input[type="submit"].btn-block,
-input[type="reset"].btn-block,
-input[type="button"].btn-block {
-  width: 100%;
-}
-.fade {
-  opacity: 0;
-  -webkit-transition: opacity 0.15s linear;
-  -o-transition: opacity 0.15s linear;
-  transition: opacity 0.15s linear;
-}
-.fade.in {
-  opacity: 1;
-}
-.collapse {
-  display: none;
-}
-.collapse.in {
-  display: block;
-}
-tr.collapse.in {
-  display: table-row;
-}
-tbody.collapse.in {
-  display: table-row-group;
-}
-.collapsing {
-  position: relative;
-  height: 0;
-  overflow: hidden;
-  -webkit-transition-property: height, visibility;
-  -o-transition-property: height, visibility;
-  transition-property: height, visibility;
-  -webkit-transition-duration: 0.35s;
-  -o-transition-duration: 0.35s;
-  transition-duration: 0.35s;
-  -webkit-transition-timing-function: ease;
-  -o-transition-timing-function: ease;
-  transition-timing-function: ease;
-}
-.caret {
-  display: inline-block;
-  width: 0;
-  height: 0;
-  margin-left: 2px;
-  vertical-align: middle;
-  border-top: 4px dashed;
-  border-top: 4px solid \9;
-  border-right: 4px solid transparent;
-  border-left: 4px solid transparent;
-}
-.dropup,
-.dropdown {
-  position: relative;
-}
-.dropdown-toggle:focus {
-  outline: 0;
-}
-.dropdown-menu {
-  position: absolute;
-  top: 100%;
-  left: 0;
-  z-index: 1000;
-  display: none;
-  float: left;
-  min-width: 160px;
-  padding: 5px 0;
-  margin: 2px 0 0;
-  font-size: 14px;
-  text-align: left;
-  list-style: none;
-  background-color: #fff;
-  background-clip: padding-box;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.15);
-  border-radius: 4px;
-  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
-  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
-}
-.dropdown-menu.pull-right {
-  right: 0;
-  left: auto;
-}
-.dropdown-menu .divider {
-  height: 1px;
-  margin: 9px 0;
-  overflow: hidden;
-  background-color: #e5e5e5;
-}
-.dropdown-menu > li > a {
-  display: block;
-  padding: 3px 20px;
-  clear: both;
-  font-weight: 400;
-  line-height: 1.42857143;
-  color: #333333;
-  white-space: nowrap;
-}
-.dropdown-menu > li > a:hover,
-.dropdown-menu > li > a:focus {
-  color: #262626;
-  text-decoration: none;
-  background-color: #f5f5f5;
-}
-.dropdown-menu > .active > a,
-.dropdown-menu > .active > a:hover,
-.dropdown-menu > .active > a:focus {
-  color: #fff;
-  text-decoration: none;
-  background-color: #337ab7;
-  outline: 0;
-}
-.dropdown-menu > .disabled > a,
-.dropdown-menu > .disabled > a:hover,
-.dropdown-menu > .disabled > a:focus {
-  color: #777777;
-}
-.dropdown-menu > .disabled > a:hover,
-.dropdown-menu > .disabled > a:focus {
-  text-decoration: none;
-  cursor: not-allowed;
-  background-color: transparent;
-  background-image: none;
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
-}
-.open > .dropdown-menu {
-  display: block;
-}
-.open > a {
-  outline: 0;
-}
-.dropdown-menu-right {
-  right: 0;
-  left: auto;
-}
-.dropdown-menu-left {
-  right: auto;
-  left: 0;
-}
-.dropdown-header {
-  display: block;
-  padding: 3px 20px;
-  font-size: 12px;
-  line-height: 1.42857143;
-  color: #777777;
-  white-space: nowrap;
-}
-.dropdown-backdrop {
-  position: fixed;
-  top: 0;
-  right: 0;
-  bottom: 0;
-  left: 0;
-  z-index: 990;
-}
-.pull-right > .dropdown-menu {
-  right: 0;
-  left: auto;
-}
-.dropup .caret,
-.navbar-fixed-bottom .dropdown .caret {
-  content: "";
-  border-top: 0;
-  border-bottom: 4px dashed;
-  border-bottom: 4px solid \9;
-}
-.dropup .dropdown-menu,
-.navbar-fixed-bottom .dropdown .dropdown-menu {
-  top: auto;
-  bottom: 100%;
-  margin-bottom: 2px;
-}
-@media (min-width: 768px) {
-  .navbar-right .dropdown-menu {
-    right: 0;
-    left: auto;
-  }
-  .navbar-right .dropdown-menu-left {
-    right: auto;
-    left: 0;
-  }
-}
-.btn-group,
-.btn-group-vertical {
-  position: relative;
-  display: inline-block;
-  vertical-align: middle;
-}
-.btn-group > .btn,
-.btn-group-vertical > .btn {
-  position: relative;
-  float: left;
-}
-.btn-group > .btn:hover,
-.btn-group-vertical > .btn:hover,
-.btn-group > .btn:focus,
-.btn-group-vertical > .btn:focus,
-.btn-group > .btn:active,
-.btn-group-vertical > .btn:active,
-.btn-group > .btn.active,
-.btn-group-vertical > .btn.active {
-  z-index: 2;
-}
-.btn-group .btn + .btn,
-.btn-group .btn + .btn-group,
-.btn-group .btn-group + .btn,
-.btn-group .btn-group + .btn-group {
-  margin-left: -1px;
-}
-.btn-toolbar {
-  margin-left: -5px;
-}
-.btn-toolbar .btn,
-.btn-toolbar .btn-group,
-.btn-toolbar .input-group {
-  float: left;
-}
-.btn-toolbar > .btn,
-.btn-toolbar > .btn-group,
-.btn-toolbar > .input-group {
-  margin-left: 5px;
-}
-.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
-  border-radius: 0;
-}
-.btn-group > .btn:first-child {
-  margin-left: 0;
-}
-.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
-  border-top-right-radius: 0;
-  border-bottom-right-radius: 0;
-}
-.btn-group > .btn:last-child:not(:first-child),
-.btn-group > .dropdown-toggle:not(:first-child) {
-  border-top-left-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.btn-group > .btn-group {
-  float: left;
-}
-.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
-  border-radius: 0;
-}
-.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,
-.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
-  border-top-right-radius: 0;
-  border-bottom-right-radius: 0;
-}
-.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {
-  border-top-left-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.btn-group .dropdown-toggle:active,
-.btn-group.open .dropdown-toggle {
-  outline: 0;
-}
-.btn-group > .btn + .dropdown-toggle {
-  padding-right: 8px;
-  padding-left: 8px;
-}
-.btn-group > .btn-lg + .dropdown-toggle {
-  padding-right: 12px;
-  padding-left: 12px;
-}
-.btn-group.open .dropdown-toggle {
-  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
-}
-.btn-group.open .dropdown-toggle.btn-link {
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-.btn .caret {
-  margin-left: 0;
-}
-.btn-lg .caret {
-  border-width: 5px 5px 0;
-  border-bottom-width: 0;
-}
-.dropup .btn-lg .caret {
-  border-width: 0 5px 5px;
-}
-.btn-group-vertical > .btn,
-.btn-group-vertical > .btn-group,
-.btn-group-vertical > .btn-group > .btn {
-  display: block;
-  float: none;
-  width: 100%;
-  max-width: 100%;
-}
-.btn-group-vertical > .btn-group > .btn {
-  float: none;
-}
-.btn-group-vertical > .btn + .btn,
-.btn-group-vertical > .btn + .btn-group,
-.btn-group-vertical > .btn-group + .btn,
-.btn-group-vertical > .btn-group + .btn-group {
-  margin-top: -1px;
-  margin-left: 0;
-}
-.btn-group-vertical > .btn:not(:first-child):not(:last-child) {
-  border-radius: 0;
-}
-.btn-group-vertical > .btn:first-child:not(:last-child) {
-  border-top-left-radius: 4px;
-  border-top-right-radius: 4px;
-  border-bottom-right-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.btn-group-vertical > .btn:last-child:not(:first-child) {
-  border-top-left-radius: 0;
-  border-top-right-radius: 0;
-  border-bottom-right-radius: 4px;
-  border-bottom-left-radius: 4px;
-}
-.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
-  border-radius: 0;
-}
-.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,
-.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
-  border-bottom-right-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
-  border-top-left-radius: 0;
-  border-top-right-radius: 0;
-}
-.btn-group-justified {
-  display: table;
-  width: 100%;
-  table-layout: fixed;
-  border-collapse: separate;
-}
-.btn-group-justified > .btn,
-.btn-group-justified > .btn-group {
-  display: table-cell;
-  float: none;
-  width: 1%;
-}
-.btn-group-justified > .btn-group .btn {
-  width: 100%;
-}
-.btn-group-justified > .btn-group .dropdown-menu {
-  left: auto;
-}
-[data-toggle="buttons"] > .btn input[type="radio"],
-[data-toggle="buttons"] > .btn-group > .btn input[type="radio"],
-[data-toggle="buttons"] > .btn input[type="checkbox"],
-[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] {
-  position: absolute;
-  clip: rect(0, 0, 0, 0);
-  pointer-events: none;
-}
-.input-group {
-  position: relative;
-  display: table;
-  border-collapse: separate;
-}
-.input-group[class*="col-"] {
-  float: none;
-  padding-right: 0;
-  padding-left: 0;
-}
-.input-group .form-control {
-  position: relative;
-  z-index: 2;
-  float: left;
-  width: 100%;
-  margin-bottom: 0;
-}
-.input-group .form-control:focus {
-  z-index: 3;
-}
-.input-group-lg > .form-control,
-.input-group-lg > .input-group-addon,
-.input-group-lg > .input-group-btn > .btn {
-  height: 46px;
-  padding: 10px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-  border-radius: 6px;
-}
-select.input-group-lg > .form-control,
-select.input-group-lg > .input-group-addon,
-select.input-group-lg > .input-group-btn > .btn {
-  height: 46px;
-  line-height: 46px;
-}
-textarea.input-group-lg > .form-control,
-textarea.input-group-lg > .input-group-addon,
-textarea.input-group-lg > .input-group-btn > .btn,
-select[multiple].input-group-lg > .form-control,
-select[multiple].input-group-lg > .input-group-addon,
-select[multiple].input-group-lg > .input-group-btn > .btn {
-  height: auto;
-}
-.input-group-sm > .form-control,
-.input-group-sm > .input-group-addon,
-.input-group-sm > .input-group-btn > .btn {
-  height: 30px;
-  padding: 5px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-  border-radius: 3px;
-}
-select.input-group-sm > .form-control,
-select.input-group-sm > .input-group-addon,
-select.input-group-sm > .input-group-btn > .btn {
-  height: 30px;
-  line-height: 30px;
-}
-textarea.input-group-sm > .form-control,
-textarea.input-group-sm > .input-group-addon,
-textarea.input-group-sm > .input-group-btn > .btn,
-select[multiple].input-group-sm > .form-control,
-select[multiple].input-group-sm > .input-group-addon,
-select[multiple].input-group-sm > .input-group-btn > .btn {
-  height: auto;
-}
-.input-group-addon,
-.input-group-btn,
-.input-group .form-control {
-  display: table-cell;
-}
-.input-group-addon:not(:first-child):not(:last-child),
-.input-group-btn:not(:first-child):not(:last-child),
-.input-group .form-control:not(:first-child):not(:last-child) {
-  border-radius: 0;
-}
-.input-group-addon,
-.input-group-btn {
-  width: 1%;
-  white-space: nowrap;
-  vertical-align: middle;
-}
-.input-group-addon {
-  padding: 6px 12px;
-  font-size: 14px;
-  font-weight: 400;
-  line-height: 1;
-  color: #555555;
-  text-align: center;
-  background-color: #eeeeee;
-  border: 1px solid #ccc;
-  border-radius: 4px;
-}
-.input-group-addon.input-sm {
-  padding: 5px 10px;
-  font-size: 12px;
-  border-radius: 3px;
-}
-.input-group-addon.input-lg {
-  padding: 10px 16px;
-  font-size: 18px;
-  border-radius: 6px;
-}
-.input-group-addon input[type="radio"],
-.input-group-addon input[type="checkbox"] {
-  margin-top: 0;
-}
-.input-group .form-control:first-child,
-.input-group-addon:first-child,
-.input-group-btn:first-child > .btn,
-.input-group-btn:first-child > .btn-group > .btn,
-.input-group-btn:first-child > .dropdown-toggle,
-.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),
-.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {
-  border-top-right-radius: 0;
-  border-bottom-right-radius: 0;
-}
-.input-group-addon:first-child {
-  border-right: 0;
-}
-.input-group .form-control:last-child,
-.input-group-addon:last-child,
-.input-group-btn:last-child > .btn,
-.input-group-btn:last-child > .btn-group > .btn,
-.input-group-btn:last-child > .dropdown-toggle,
-.input-group-btn:first-child > .btn:not(:first-child),
-.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {
-  border-top-left-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.input-group-addon:last-child {
-  border-left: 0;
-}
-.input-group-btn {
-  position: relative;
-  font-size: 0;
-  white-space: nowrap;
-}
-.input-group-btn > .btn {
-  position: relative;
-}
-.input-group-btn > .btn + .btn {
-  margin-left: -1px;
-}
-.input-group-btn > .btn:hover,
-.input-group-btn > .btn:focus,
-.input-group-btn > .btn:active {
-  z-index: 2;
-}
-.input-group-btn:first-child > .btn,
-.input-group-btn:first-child > .btn-group {
-  margin-right: -1px;
-}
-.input-group-btn:last-child > .btn,
-.input-group-btn:last-child > .btn-group {
-  z-index: 2;
-  margin-left: -1px;
-}
-.nav {
-  padding-left: 0;
-  margin-bottom: 0;
-  list-style: none;
-}
-.nav > li {
-  position: relative;
-  display: block;
-}
-.nav > li > a {
-  position: relative;
-  display: block;
-  padding: 10px 15px;
-}
-.nav > li > a:hover,
-.nav > li > a:focus {
-  text-decoration: none;
-  background-color: #eeeeee;
-}
-.nav > li.disabled > a {
-  color: #777777;
-}
-.nav > li.disabled > a:hover,
-.nav > li.disabled > a:focus {
-  color: #777777;
-  text-decoration: none;
-  cursor: not-allowed;
-  background-color: transparent;
-}
-.nav .open > a,
-.nav .open > a:hover,
-.nav .open > a:focus {
-  background-color: #eeeeee;
-  border-color: #337ab7;
-}
-.nav .nav-divider {
-  height: 1px;
-  margin: 9px 0;
-  overflow: hidden;
-  background-color: #e5e5e5;
-}
-.nav > li > a > img {
-  max-width: none;
-}
-.nav-tabs {
-  border-bottom: 1px solid #ddd;
-}
-.nav-tabs > li {
-  float: left;
-  margin-bottom: -1px;
-}
-.nav-tabs > li > a {
-  margin-right: 2px;
-  line-height: 1.42857143;
-  border: 1px solid transparent;
-  border-radius: 4px 4px 0 0;
-}
-.nav-tabs > li > a:hover {
-  border-color: #eeeeee #eeeeee #ddd;
-}
-.nav-tabs > li.active > a,
-.nav-tabs > li.active > a:hover,
-.nav-tabs > li.active > a:focus {
-  color: #555555;
-  cursor: default;
-  background-color: #fff;
-  border: 1px solid #ddd;
-  border-bottom-color: transparent;
-}
-.nav-tabs.nav-justified {
-  width: 100%;
-  border-bottom: 0;
-}
-.nav-tabs.nav-justified > li {
-  float: none;
-}
-.nav-tabs.nav-justified > li > a {
-  margin-bottom: 5px;
-  text-align: center;
-}
-.nav-tabs.nav-justified > .dropdown .dropdown-menu {
-  top: auto;
-  left: auto;
-}
-@media (min-width: 768px) {
-  .nav-tabs.nav-justified > li {
-    display: table-cell;
-    width: 1%;
-  }
-  .nav-tabs.nav-justified > li > a {
-    margin-bottom: 0;
-  }
-}
-.nav-tabs.nav-justified > li > a {
-  margin-right: 0;
-  border-radius: 4px;
-}
-.nav-tabs.nav-justified > .active > a,
-.nav-tabs.nav-justified > .active > a:hover,
-.nav-tabs.nav-justified > .active > a:focus {
-  border: 1px solid #ddd;
-}
-@media (min-width: 768px) {
-  .nav-tabs.nav-justified > li > a {
-    border-bottom: 1px solid #ddd;
-    border-radius: 4px 4px 0 0;
-  }
-  .nav-tabs.nav-justified > .active > a,
-  .nav-tabs.nav-justified > .active > a:hover,
-  .nav-tabs.nav-justified > .active > a:focus {
-    border-bottom-color: #fff;
-  }
-}
-.nav-pills > li {
-  float: left;
-}
-.nav-pills > li > a {
-  border-radius: 4px;
-}
-.nav-pills > li + li {
-  margin-left: 2px;
-}
-.nav-pills > li.active > a,
-.nav-pills > li.active > a:hover,
-.nav-pills > li.active > a:focus {
-  color: #fff;
-  background-color: #337ab7;
-}
-.nav-stacked > li {
-  float: none;
-}
-.nav-stacked > li + li {
-  margin-top: 2px;
-  margin-left: 0;
-}
-.nav-justified {
-  width: 100%;
-}
-.nav-justified > li {
-  float: none;
-}
-.nav-justified > li > a {
-  margin-bottom: 5px;
-  text-align: center;
-}
-.nav-justified > .dropdown .dropdown-menu {
-  top: auto;
-  left: auto;
-}
-@media (min-width: 768px) {
-  .nav-justified > li {
-    display: table-cell;
-    width: 1%;
-  }
-  .nav-justified > li > a {
-    margin-bottom: 0;
-  }
-}
-.nav-tabs-justified {
-  border-bottom: 0;
-}
-.nav-tabs-justified > li > a {
-  margin-right: 0;
-  border-radius: 4px;
-}
-.nav-tabs-justified > .active > a,
-.nav-tabs-justified > .active > a:hover,
-.nav-tabs-justified > .active > a:focus {
-  border: 1px solid #ddd;
-}
-@media (min-width: 768px) {
-  .nav-tabs-justified > li > a {
-    border-bottom: 1px solid #ddd;
-    border-radius: 4px 4px 0 0;
-  }
-  .nav-tabs-justified > .active > a,
-  .nav-tabs-justified > .active > a:hover,
-  .nav-tabs-justified > .active > a:focus {
-    border-bottom-color: #fff;
-  }
-}
-.tab-content > .tab-pane {
-  display: none;
-}
-.tab-content > .active {
-  display: block;
-}
-.nav-tabs .dropdown-menu {
-  margin-top: -1px;
-  border-top-left-radius: 0;
-  border-top-right-radius: 0;
-}
-.navbar {
-  position: relative;
-  min-height: 50px;
-  margin-bottom: 20px;
-  border: 1px solid transparent;
-}
-@media (min-width: 768px) {
-  .navbar {
-    border-radius: 4px;
-  }
-}
-@media (min-width: 768px) {
-  .navbar-header {
-    float: left;
-  }
-}
-.navbar-collapse {
-  padding-right: 15px;
-  padding-left: 15px;
-  overflow-x: visible;
-  border-top: 1px solid transparent;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);
-  -webkit-overflow-scrolling: touch;
-}
-.navbar-collapse.in {
-  overflow-y: auto;
-}
-@media (min-width: 768px) {
-  .navbar-collapse {
-    width: auto;
-    border-top: 0;
-    -webkit-box-shadow: none;
-    box-shadow: none;
-  }
-  .navbar-collapse.collapse {
-    display: block !important;
-    height: auto !important;
-    padding-bottom: 0;
-    overflow: visible !important;
-  }
-  .navbar-collapse.in {
-    overflow-y: visible;
-  }
-  .navbar-fixed-top .navbar-collapse,
-  .navbar-static-top .navbar-collapse,
-  .navbar-fixed-bottom .navbar-collapse {
-    padding-right: 0;
-    padding-left: 0;
-  }
-}
-.navbar-fixed-top,
-.navbar-fixed-bottom {
-  position: fixed;
-  right: 0;
-  left: 0;
-  z-index: 1030;
-}
-.navbar-fixed-top .navbar-collapse,
-.navbar-fixed-bottom .navbar-collapse {
-  max-height: 340px;
-}
-@media (max-device-width: 480px) and (orientation: landscape) {
-  .navbar-fixed-top .navbar-collapse,
-  .navbar-fixed-bottom .navbar-collapse {
-    max-height: 200px;
-  }
-}
-@media (min-width: 768px) {
-  .navbar-fixed-top,
-  .navbar-fixed-bottom {
-    border-radius: 0;
-  }
-}
-.navbar-fixed-top {
-  top: 0;
-  border-width: 0 0 1px;
-}
-.navbar-fixed-bottom {
-  bottom: 0;
-  margin-bottom: 0;
-  border-width: 1px 0 0;
-}
-.container > .navbar-header,
-.container-fluid > .navbar-header,
-.container > .navbar-collapse,
-.container-fluid > .navbar-collapse {
-  margin-right: -15px;
-  margin-left: -15px;
-}
-@media (min-width: 768px) {
-  .container > .navbar-header,
-  .container-fluid > .navbar-header,
-  .container > .navbar-collapse,
-  .container-fluid > .navbar-collapse {
-    margin-right: 0;
-    margin-left: 0;
-  }
-}
-.navbar-static-top {
-  z-index: 1000;
-  border-width: 0 0 1px;
-}
-@media (min-width: 768px) {
-  .navbar-static-top {
-    border-radius: 0;
-  }
-}
-.navbar-brand {
-  float: left;
-  height: 50px;
-  padding: 15px 15px;
-  font-size: 18px;
-  line-height: 20px;
-}
-.navbar-brand:hover,
-.navbar-brand:focus {
-  text-decoration: none;
-}
-.navbar-brand > img {
-  display: block;
-}
-@media (min-width: 768px) {
-  .navbar > .container .navbar-brand,
-  .navbar > .container-fluid .navbar-brand {
-    margin-left: -15px;
-  }
-}
-.navbar-toggle {
-  position: relative;
-  float: right;
-  padding: 9px 10px;
-  margin-right: 15px;
-  margin-top: 8px;
-  margin-bottom: 8px;
-  background-color: transparent;
-  background-image: none;
-  border: 1px solid transparent;
-  border-radius: 4px;
-}
-.navbar-toggle:focus {
-  outline: 0;
-}
-.navbar-toggle .icon-bar {
-  display: block;
-  width: 22px;
-  height: 2px;
-  border-radius: 1px;
-}
-.navbar-toggle .icon-bar + .icon-bar {
-  margin-top: 4px;
-}
-@media (min-width: 768px) {
-  .navbar-toggle {
-    display: none;
-  }
-}
-.navbar-nav {
-  margin: 7.5px -15px;
-}
-.navbar-nav > li > a {
-  padding-top: 10px;
-  padding-bottom: 10px;
-  line-height: 20px;
-}
-@media (max-width: 767px) {
-  .navbar-nav .open .dropdown-menu {
-    position: static;
-    float: none;
-    width: auto;
-    margin-top: 0;
-    background-color: transparent;
-    border: 0;
-    -webkit-box-shadow: none;
-    box-shadow: none;
-  }
-  .navbar-nav .open .dropdown-menu > li > a,
-  .navbar-nav .open .dropdown-menu .dropdown-header {
-    padding: 5px 15px 5px 25px;
-  }
-  .navbar-nav .open .dropdown-menu > li > a {
-    line-height: 20px;
-  }
-  .navbar-nav .open .dropdown-menu > li > a:hover,
-  .navbar-nav .open .dropdown-menu > li > a:focus {
-    background-image: none;
-  }
-}
-@media (min-width: 768px) {
-  .navbar-nav {
-    float: left;
-    margin: 0;
-  }
-  .navbar-nav > li {
-    float: left;
-  }
-  .navbar-nav > li > a {
-    padding-top: 15px;
-    padding-bottom: 15px;
-  }
-}
-.navbar-form {
-  padding: 10px 15px;
-  margin-right: -15px;
-  margin-left: -15px;
-  border-top: 1px solid transparent;
-  border-bottom: 1px solid transparent;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
-  margin-top: 8px;
-  margin-bottom: 8px;
-}
-@media (min-width: 768px) {
-  .navbar-form .form-group {
-    display: inline-block;
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .navbar-form .form-control {
-    display: inline-block;
-    width: auto;
-    vertical-align: middle;
-  }
-  .navbar-form .form-control-static {
-    display: inline-block;
-  }
-  .navbar-form .input-group {
-    display: inline-table;
-    vertical-align: middle;
-  }
-  .navbar-form .input-group .input-group-addon,
-  .navbar-form .input-group .input-group-btn,
-  .navbar-form .input-group .form-control {
-    width: auto;
-  }
-  .navbar-form .input-group > .form-control {
-    width: 100%;
-  }
-  .navbar-form .control-label {
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .navbar-form .radio,
-  .navbar-form .checkbox {
-    display: inline-block;
-    margin-top: 0;
-    margin-bottom: 0;
-    vertical-align: middle;
-  }
-  .navbar-form .radio label,
-  .navbar-form .checkbox label {
-    padding-left: 0;
-  }
-  .navbar-form .radio input[type="radio"],
-  .navbar-form .checkbox input[type="checkbox"] {
-    position: relative;
-    margin-left: 0;
-  }
-  .navbar-form .has-feedback .form-control-feedback {
-    top: 0;
-  }
-}
-@media (max-width: 767px) {
-  .navbar-form .form-group {
-    margin-bottom: 5px;
-  }
-  .navbar-form .form-group:last-child {
-    margin-bottom: 0;
-  }
-}
-@media (min-width: 768px) {
-  .navbar-form {
-    width: auto;
-    padding-top: 0;
-    padding-bottom: 0;
-    margin-right: 0;
-    margin-left: 0;
-    border: 0;
-    -webkit-box-shadow: none;
-    box-shadow: none;
-  }
-}
-.navbar-nav > li > .dropdown-menu {
-  margin-top: 0;
-  border-top-left-radius: 0;
-  border-top-right-radius: 0;
-}
-.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
-  margin-bottom: 0;
-  border-top-left-radius: 4px;
-  border-top-right-radius: 4px;
-  border-bottom-right-radius: 0;
-  border-bottom-left-radius: 0;
-}
-.navbar-btn {
-  margin-top: 8px;
-  margin-bottom: 8px;
-}
-.navbar-btn.btn-sm {
-  margin-top: 10px;
-  margin-bottom: 10px;
-}
-.navbar-btn.btn-xs {
-  margin-top: 14px;
-  margin-bottom: 14px;
-}
-.navbar-text {
-  margin-top: 15px;
-  margin-bottom: 15px;
-}
-@media (min-width: 768px) {
-  .navbar-text {
-    float: left;
-    margin-right: 15px;
-    margin-left: 15px;
-  }
-}
-@media (min-width: 768px) {
-  .navbar-left {
-    float: left !important;
-  }
-  .navbar-right {
-    float: right !important;
-    margin-right: -15px;
-  }
-  .navbar-right ~ .navbar-right {
-    margin-right: 0;
-  }
-}
-.navbar-default {
-  background-color: #f8f8f8;
-  border-color: #e7e7e7;
-}
-.navbar-default .navbar-brand {
-  color: #777;
-}
-.navbar-default .navbar-brand:hover,
-.navbar-default .navbar-brand:focus {
-  color: #5e5e5e;
-  background-color: transparent;
-}
-.navbar-default .navbar-text {
-  color: #777;
-}
-.navbar-default .navbar-nav > li > a {
-  color: #777;
-}
-.navbar-default .navbar-nav > li > a:hover,
-.navbar-default .navbar-nav > li > a:focus {
-  color: #333;
-  background-color: transparent;
-}
-.navbar-default .navbar-nav > .active > a,
-.navbar-default .navbar-nav > .active > a:hover,
-.navbar-default .navbar-nav > .active > a:focus {
-  color: #555;
-  background-color: #e7e7e7;
-}
-.navbar-default .navbar-nav > .disabled > a,
-.navbar-default .navbar-nav > .disabled > a:hover,
-.navbar-default .navbar-nav > .disabled > a:focus {
-  color: #ccc;
-  background-color: transparent;
-}
-.navbar-default .navbar-nav > .open > a,
-.navbar-default .navbar-nav > .open > a:hover,
-.navbar-default .navbar-nav > .open > a:focus {
-  color: #555;
-  background-color: #e7e7e7;
-}
-@media (max-width: 767px) {
-  .navbar-default .navbar-nav .open .dropdown-menu > li > a {
-    color: #777;
-  }
-  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,
-  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
-    color: #333;
-    background-color: transparent;
-  }
-  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,
-  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,
-  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
-    color: #555;
-    background-color: #e7e7e7;
-  }
-  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,
-  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,
-  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {
-    color: #ccc;
-    background-color: transparent;
-  }
-}
-.navbar-default .navbar-toggle {
-  border-color: #ddd;
-}
-.navbar-default .navbar-toggle:hover,
-.navbar-default .navbar-toggle:focus {
-  background-color: #ddd;
-}
-.navbar-default .navbar-toggle .icon-bar {
-  background-color: #888;
-}
-.navbar-default .navbar-collapse,
-.navbar-default .navbar-form {
-  border-color: #e7e7e7;
-}
-.navbar-default .navbar-link {
-  color: #777;
-}
-.navbar-default .navbar-link:hover {
-  color: #333;
-}
-.navbar-default .btn-link {
-  color: #777;
-}
-.navbar-default .btn-link:hover,
-.navbar-default .btn-link:focus {
-  color: #333;
-}
-.navbar-default .btn-link[disabled]:hover,
-fieldset[disabled] .navbar-default .btn-link:hover,
-.navbar-default .btn-link[disabled]:focus,
-fieldset[disabled] .navbar-default .btn-link:focus {
-  color: #ccc;
-}
-.navbar-inverse {
-  background-color: #222;
-  border-color: #080808;
-}
-.navbar-inverse .navbar-brand {
-  color: #9d9d9d;
-}
-.navbar-inverse .navbar-brand:hover,
-.navbar-inverse .navbar-brand:focus {
-  color: #fff;
-  background-color: transparent;
-}
-.navbar-inverse .navbar-text {
-  color: #9d9d9d;
-}
-.navbar-inverse .navbar-nav > li > a {
-  color: #9d9d9d;
-}
-.navbar-inverse .navbar-nav > li > a:hover,
-.navbar-inverse .navbar-nav > li > a:focus {
-  color: #fff;
-  background-color: transparent;
-}
-.navbar-inverse .navbar-nav > .active > a,
-.navbar-inverse .navbar-nav > .active > a:hover,
-.navbar-inverse .navbar-nav > .active > a:focus {
-  color: #fff;
-  background-color: #080808;
-}
-.navbar-inverse .navbar-nav > .disabled > a,
-.navbar-inverse .navbar-nav > .disabled > a:hover,
-.navbar-inverse .navbar-nav > .disabled > a:focus {
-  color: #444;
-  background-color: transparent;
-}
-.navbar-inverse .navbar-nav > .open > a,
-.navbar-inverse .navbar-nav > .open > a:hover,
-.navbar-inverse .navbar-nav > .open > a:focus {
-  color: #fff;
-  background-color: #080808;
-}
-@media (max-width: 767px) {
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {
-    border-color: #080808;
-  }
-  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {
-    background-color: #080808;
-  }
-  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
-    color: #9d9d9d;
-  }
-  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,
-  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {
-    color: #fff;
-    background-color: transparent;
-  }
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {
-    color: #fff;
-    background-color: #080808;
-  }
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,
-  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {
-    color: #444;
-    background-color: transparent;
-  }
-}
-.navbar-inverse .navbar-toggle {
-  border-color: #333;
-}
-.navbar-inverse .navbar-toggle:hover,
-.navbar-inverse .navbar-toggle:focus {
-  background-color: #333;
-}
-.navbar-inverse .navbar-toggle .icon-bar {
-  background-color: #fff;
-}
-.navbar-inverse .navbar-collapse,
-.navbar-inverse .navbar-form {
-  border-color: #101010;
-}
-.navbar-inverse .navbar-link {
-  color: #9d9d9d;
-}
-.navbar-inverse .navbar-link:hover {
-  color: #fff;
-}
-.navbar-inverse .btn-link {
-  color: #9d9d9d;
-}
-.navbar-inverse .btn-link:hover,
-.navbar-inverse .btn-link:focus {
-  color: #fff;
-}
-.navbar-inverse .btn-link[disabled]:hover,
-fieldset[disabled] .navbar-inverse .btn-link:hover,
-.navbar-inverse .btn-link[disabled]:focus,
-fieldset[disabled] .navbar-inverse .btn-link:focus {
-  color: #444;
-}
-.breadcrumb {
-  padding: 8px 15px;
-  margin-bottom: 20px;
-  list-style: none;
-  background-color: #f5f5f5;
-  border-radius: 4px;
-}
-.breadcrumb > li {
-  display: inline-block;
-}
-.breadcrumb > li + li:before {
-  padding: 0 5px;
-  color: #ccc;
-  content: "/\00a0";
-}
-.breadcrumb > .active {
-  color: #777777;
-}
-.pagination {
-  display: inline-block;
-  padding-left: 0;
-  margin: 20px 0;
-  border-radius: 4px;
-}
-.pagination > li {
-  display: inline;
-}
-.pagination > li > a,
-.pagination > li > span {
-  position: relative;
-  float: left;
-  padding: 6px 12px;
-  margin-left: -1px;
-  line-height: 1.42857143;
-  color: #337ab7;
-  text-decoration: none;
-  background-color: #fff;
-  border: 1px solid #ddd;
-}
-.pagination > li > a:hover,
-.pagination > li > span:hover,
-.pagination > li > a:focus,
-.pagination > li > span:focus {
-  z-index: 2;
-  color: #23527c;
-  background-color: #eeeeee;
-  border-color: #ddd;
-}
-.pagination > li:first-child > a,
-.pagination > li:first-child > span {
-  margin-left: 0;
-  border-top-left-radius: 4px;
-  border-bottom-left-radius: 4px;
-}
-.pagination > li:last-child > a,
-.pagination > li:last-child > span {
-  border-top-right-radius: 4px;
-  border-bottom-right-radius: 4px;
-}
-.pagination > .active > a,
-.pagination > .active > span,
-.pagination > .active > a:hover,
-.pagination > .active > span:hover,
-.pagination > .active > a:focus,
-.pagination > .active > span:focus {
-  z-index: 3;
-  color: #fff;
-  cursor: default;
-  background-color: #337ab7;
-  border-color: #337ab7;
-}
-.pagination > .disabled > span,
-.pagination > .disabled > span:hover,
-.pagination > .disabled > span:focus,
-.pagination > .disabled > a,
-.pagination > .disabled > a:hover,
-.pagination > .disabled > a:focus {
-  color: #777777;
-  cursor: not-allowed;
-  background-color: #fff;
-  border-color: #ddd;
-}
-.pagination-lg > li > a,
-.pagination-lg > li > span {
-  padding: 10px 16px;
-  font-size: 18px;
-  line-height: 1.3333333;
-}
-.pagination-lg > li:first-child > a,
-.pagination-lg > li:first-child > span {
-  border-top-left-radius: 6px;
-  border-bottom-left-radius: 6px;
-}
-.pagination-lg > li:last-child > a,
-.pagination-lg > li:last-child > span {
-  border-top-right-radius: 6px;
-  border-bottom-right-radius: 6px;
-}
-.pagination-sm > li > a,
-.pagination-sm > li > span {
-  padding: 5px 10px;
-  font-size: 12px;
-  line-height: 1.5;
-}
-.pagination-sm > li:first-child > a,
-.pagination-sm > li:first-child > span {
-  border-top-left-radius: 3px;
-  border-bottom-left-radius: 3px;
-}
-.pagination-sm > li:last-child > a,
-.pagination-sm > li:last-child > span {
-  border-top-right-radius: 3px;
-  border-bottom-right-radius: 3px;
-}
-.pager {
-  padding-left: 0;
-  margin: 20px 0;
-  text-align: center;
-  list-style: none;
-}
-.pager li {
-  display: inline;
-}
-.pager li > a,
-.pager li > span {
-  display: inline-block;
-  padding: 5px 14px;
-  background-color: #fff;
-  border: 1px solid #ddd;
-  border-radius: 15px;
-}
-.pager li > a:hover,
-.pager li > a:focus {
-  text-decoration: none;
-  background-color: #eeeeee;
-}
-.pager .next > a,
-.pager .next > span {
-  float: right;
-}
-.pager .previous > a,
-.pager .previous > span {
-  float: left;
-}
-.pager .disabled > a,
-.pager .disabled > a:hover,
-.pager .disabled > a:focus,
-.pager .disabled > span {
-  color: #777777;
-  cursor: not-allowed;
-  background-color: #fff;
-}
-.label {
-  display: inline;
-  padding: 0.2em 0.6em 0.3em;
-  font-size: 75%;
-  font-weight: 700;
-  line-height: 1;
-  color: #fff;
-  text-align: center;
-  white-space: nowrap;
-  vertical-align: baseline;
-  border-radius: 0.25em;
-}
-a.label:hover,
-a.label:focus {
-  color: #fff;
-  text-decoration: none;
-  cursor: pointer;
-}
-.label:empty {
-  display: none;
-}
-.btn .label {
-  position: relative;
-  top: -1px;
-}
-.label-default {
-  background-color: #777777;
-}
-.label-default[href]:hover,
-.label-default[href]:focus {
-  background-color: #5e5e5e;
-}
-.label-primary {
-  background-color: #337ab7;
-}
-.label-primary[href]:hover,
-.label-primary[href]:focus {
-  background-color: #286090;
-}
-.label-success {
-  background-color: #5cb85c;
-}
-.label-success[href]:hover,
-.label-success[href]:focus {
-  background-color: #449d44;
-}
-.label-info {
-  background-color: #5bc0de;
-}
-.label-info[href]:hover,
-.label-info[href]:focus {
-  background-color: #31b0d5;
-}
-.label-warning {
-  background-color: #f0ad4e;
-}
-.label-warning[href]:hover,
-.label-warning[href]:focus {
-  background-color: #ec971f;
-}
-.label-danger {
-  background-color: #d9534f;
-}
-.label-danger[href]:hover,
-.label-danger[href]:focus {
-  background-color: #c9302c;
-}
-.badge {
-  display: inline-block;
-  min-width: 10px;
-  padding: 3px 7px;
-  font-size: 12px;
-  font-weight: bold;
-  line-height: 1;
-  color: #fff;
-  text-align: center;
-  white-space: nowrap;
-  vertical-align: middle;
-  background-color: #777777;
-  border-radius: 10px;
-}
-.badge:empty {
-  display: none;
-}
-.btn .badge {
-  position: relative;
-  top: -1px;
-}
-.btn-xs .badge,
-.btn-group-xs > .btn .badge {
-  top: 0;
-  padding: 1px 5px;
-}
-a.badge:hover,
-a.badge:focus {
-  color: #fff;
-  text-decoration: none;
-  cursor: pointer;
-}
-.list-group-item.active > .badge,
-.nav-pills > .active > a > .badge {
-  color: #337ab7;
-  background-color: #fff;
-}
-.list-group-item > .badge {
-  float: right;
-}
-.list-group-item > .badge + .badge {
-  margin-right: 5px;
-}
-.nav-pills > li > a > .badge {
-  margin-left: 3px;
-}
-.jumbotron {
-  padding-top: 30px;
-  padding-bottom: 30px;
-  margin-bottom: 30px;
-  color: inherit;
-  background-color: #eeeeee;
-}
-.jumbotron h1,
-.jumbotron .h1 {
-  color: inherit;
-}
-.jumbotron p {
-  margin-bottom: 15px;
-  font-size: 21px;
-  font-weight: 200;
-}
-.jumbotron > hr {
-  border-top-color: #d5d5d5;
-}
-.container .jumbotron,
-.container-fluid .jumbotron {
-  padding-right: 15px;
-  padding-left: 15px;
-  border-radius: 6px;
-}
-.jumbotron .container {
-  max-width: 100%;
-}
-@media screen and (min-width: 768px) {
-  .jumbotron {
-    padding-top: 48px;
-    padding-bottom: 48px;
-  }
-  .container .jumbotron,
-  .container-fluid .jumbotron {
-    padding-right: 60px;
-    padding-left: 60px;
-  }
-  .jumbotron h1,
-  .jumbotron .h1 {
-    font-size: 63px;
-  }
-}
-.thumbnail {
-  display: block;
-  padding: 4px;
-  margin-bottom: 20px;
-  line-height: 1.42857143;
-  background-color: #fff;
-  border: 1px solid #ddd;
-  border-radius: 4px;
-  -webkit-transition: border 0.2s ease-in-out;
-  -o-transition: border 0.2s ease-in-out;
-  transition: border 0.2s ease-in-out;
-}
-.thumbnail > img,
-.thumbnail a > img {
-  margin-right: auto;
-  margin-left: auto;
-}
-a.thumbnail:hover,
-a.thumbnail:focus,
-a.thumbnail.active {
-  border-color: #337ab7;
-}
-.thumbnail .caption {
-  padding: 9px;
-  color: #333333;
-}
-.alert {
-  padding: 15px;
-  margin-bottom: 20px;
-  border: 1px solid transparent;
-  border-radius: 4px;
-}
-.alert h4 {
-  margin-top: 0;
-  color: inherit;
-}
-.alert .alert-link {
-  font-weight: bold;
-}
-.alert > p,
-.alert > ul {
-  margin-bottom: 0;
-}
-.alert > p + p {
-  margin-top: 5px;
-}
-.alert-dismissable,
-.alert-dismissible {
-  padding-right: 35px;
-}
-.alert-dismissable .close,
-.alert-dismissible .close {
-  position: relative;
-  top: -2px;
-  right: -21px;
-  color: inherit;
-}
-.alert-success {
-  color: #3c763d;
-  background-color: #dff0d8;
-  border-color: #d6e9c6;
-}
-.alert-success hr {
-  border-top-color: #c9e2b3;
-}
-.alert-success .alert-link {
-  color: #2b542c;
-}
-.alert-info {
-  color: #31708f;
-  background-color: #d9edf7;
-  border-color: #bce8f1;
-}
-.alert-info hr {
-  border-top-color: #a6e1ec;
-}
-.alert-info .alert-link {
-  color: #245269;
-}
-.alert-warning {
-  color: #8a6d3b;
-  background-color: #fcf8e3;
-  border-color: #faebcc;
-}
-.alert-warning hr {
-  border-top-color: #f7e1b5;
-}
-.alert-warning .alert-link {
-  color: #66512c;
-}
-.alert-danger {
-  color: #a94442;
-  background-color: #f2dede;
-  border-color: #ebccd1;
-}
-.alert-danger hr {
-  border-top-color: #e4b9c0;
-}
-.alert-danger .alert-link {
-  color: #843534;
-}
-@-webkit-keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-@-o-keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-@keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-.progress {
-  height: 20px;
-  margin-bottom: 20px;
-  overflow: hidden;
-  background-color: #f5f5f5;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
-  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
-}
-.progress-bar {
-  float: left;
-  width: 0%;
-  height: 100%;
-  font-size: 12px;
-  line-height: 20px;
-  color: #fff;
-  text-align: center;
-  background-color: #337ab7;
-  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-  -webkit-transition: width 0.6s ease;
-  -o-transition: width 0.6s ease;
-  transition: width 0.6s ease;
-}
-.progress-striped .progress-bar,
-.progress-bar-striped {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  -webkit-background-size: 40px 40px;
-  background-size: 40px 40px;
-}
-.progress.active .progress-bar,
-.progress-bar.active {
-  -webkit-animation: progress-bar-stripes 2s linear infinite;
-  -o-animation: progress-bar-stripes 2s linear infinite;
-  animation: progress-bar-stripes 2s linear infinite;
-}
-.progress-bar-success {
-  background-color: #5cb85c;
-}
-.progress-striped .progress-bar-success {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-.progress-bar-info {
-  background-color: #5bc0de;
-}
-.progress-striped .progress-bar-info {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-.progress-bar-warning {
-  background-color: #f0ad4e;
-}
-.progress-striped .progress-bar-warning {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-.progress-bar-danger {
-  background-color: #d9534f;
-}
-.progress-striped .progress-bar-danger {
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-.media {
-  margin-top: 15px;
-}
-.media:first-child {
-  margin-top: 0;
-}
-.media,
-.media-body {
-  overflow: hidden;
-  zoom: 1;
-}
-.media-body {
-  width: 10000px;
-}
-.media-object {
-  display: block;
-}
-.media-object.img-thumbnail {
-  max-width: none;
-}
-.media-right,
-.media > .pull-right {
-  padding-left: 10px;
-}
-.media-left,
-.media > .pull-left {
-  padding-right: 10px;
-}
-.media-left,
-.media-right,
-.media-body {
-  display: table-cell;
-  vertical-align: top;
-}
-.media-middle {
-  vertical-align: middle;
-}
-.media-bottom {
-  vertical-align: bottom;
-}
-.media-heading {
-  margin-top: 0;
-  margin-bottom: 5px;
-}
-.media-list {
-  padding-left: 0;
-  list-style: none;
-}
-.list-group {
-  padding-left: 0;
-  margin-bottom: 20px;
-}
-.list-group-item {
-  position: relative;
-  display: block;
-  padding: 10px 15px;
-  margin-bottom: -1px;
-  background-color: #fff;
-  border: 1px solid #ddd;
-}
-.list-group-item:first-child {
-  border-top-left-radius: 4px;
-  border-top-right-radius: 4px;
-}
-.list-group-item:last-child {
-  margin-bottom: 0;
-  border-bottom-right-radius: 4px;
-  border-bottom-left-radius: 4px;
-}
-.list-group-item.disabled,
-.list-group-item.disabled:hover,
-.list-group-item.disabled:focus {
-  color: #777777;
-  cursor: not-allowed;
-  background-color: #eeeeee;
-}
-.list-group-item.disabled .list-group-item-heading,
-.list-group-item.disabled:hover .list-group-item-heading,
-.list-group-item.disabled:focus .list-group-item-heading {
-  color: inherit;
-}
-.list-group-item.disabled .list-group-item-text,
-.list-group-item.disabled:hover .list-group-item-text,
-.list-group-item.disabled:focus .list-group-item-text {
-  color: #777777;
-}
-.list-group-item.active,
-.list-group-item.active:hover,
-.list-group-item.active:focus {
-  z-index: 2;
-  color: #fff;
-  background-color: #337ab7;
-  border-color: #337ab7;
-}
-.list-group-item.active .list-group-item-heading,
-.list-group-item.active:hover .list-group-item-heading,
-.list-group-item.active:focus .list-group-item-heading,
-.list-group-item.active .list-group-item-heading > small,
-.list-group-item.active:hover .list-group-item-heading > small,
-.list-group-item.active:focus .list-group-item-heading > small,
-.list-group-item.active .list-group-item-heading > .small,
-.list-group-item.active:hover .list-group-item-heading > .small,
-.list-group-item.active:focus .list-group-item-heading > .small {
-  color: inherit;
-}
-.list-group-item.active .list-group-item-text,
-.list-group-item.active:hover .list-group-item-text,
-.list-group-item.active:focus .list-group-item-text {
-  color: #c7ddef;
-}
-a.list-group-item,
-button.list-group-item {
-  color: #555;
-}
-a.list-group-item .list-group-item-heading,
-button.list-group-item .list-group-item-heading {
-  color: #333;
-}
-a.list-group-item:hover,
-button.list-group-item:hover,
-a.list-group-item:focus,
-button.list-group-item:focus {
-  color: #555;
-  text-decoration: none;
-  background-color: #f5f5f5;
-}
-button.list-group-item {
-  width: 100%;
-  text-align: left;
-}
-.list-group-item-success {
-  color: #3c763d;
-  background-color: #dff0d8;
-}
-a.list-group-item-success,
-button.list-group-item-success {
-  color: #3c763d;
-}
-a.list-group-item-success .list-group-item-heading,
-button.list-group-item-success .list-group-item-heading {
-  color: inherit;
-}
-a.list-group-item-success:hover,
-button.list-group-item-success:hover,
-a.list-group-item-success:focus,
-button.list-group-item-success:focus {
-  color: #3c763d;
-  background-color: #d0e9c6;
-}
-a.list-group-item-success.active,
-button.list-group-item-success.active,
-a.list-group-item-success.active:hover,
-button.list-group-item-success.active:hover,
-a.list-group-item-success.active:focus,
-button.list-group-item-success.active:focus {
-  color: #fff;
-  background-color: #3c763d;
-  border-color: #3c763d;
-}
-.list-group-item-info {
-  color: #31708f;
-  background-color: #d9edf7;
-}
-a.list-group-item-info,
-button.list-group-item-info {
-  color: #31708f;
-}
-a.list-group-item-info .list-group-item-heading,
-button.list-group-item-info .list-group-item-heading {
-  color: inherit;
-}
-a.list-group-item-info:hover,
-button.list-group-item-info:hover,
-a.list-group-item-info:focus,
-button.list-group-item-info:focus {
-  color: #31708f;
-  background-color: #c4e3f3;
-}
-a.list-group-item-info.active,
-button.list-group-item-info.active,
-a.list-group-item-info.active:hover,
-button.list-group-item-info.active:hover,
-a.list-group-item-info.active:focus,
-button.list-group-item-info.active:focus {
-  color: #fff;
-  background-color: #31708f;
-  border-color: #31708f;
-}
-.list-group-item-warning {
-  color: #8a6d3b;
-  background-color: #fcf8e3;
-}
-a.list-group-item-warning,
-button.list-group-item-warning {
-  color: #8a6d3b;
-}
-a.list-group-item-warning .list-group-item-heading,
-button.list-group-item-warning .list-group-item-heading {
-  color: inherit;
-}
-a.list-group-item-warning:hover,
-button.list-group-item-warning:hover,
-a.list-group-item-warning:focus,
-button.list-group-item-warning:focus {
-  color: #8a6d3b;
-  background-color: #faf2cc;
-}
-a.list-group-item-warning.active,
-button.list-group-item-warning.active,
-a.list-group-item-warning.active:hover,
-button.list-group-item-warning.active:hover,
-a.list-group-item-warning.active:focus,
-button.list-group-item-warning.active:focus {
-  color: #fff;
-  background-color: #8a6d3b;
-  border-color: #8a6d3b;
-}
-.list-group-item-danger {
-  color: #a94442;
-  background-color: #f2dede;
-}
-a.list-group-item-danger,
-button.list-group-item-danger {
-  color: #a94442;
-}
-a.list-group-item-danger .list-group-item-heading,
-button.list-group-item-danger .list-group-item-heading {
-  color: inherit;
-}
-a.list-group-item-danger:hover,
-button.list-group-item-danger:hover,
-a.list-group-item-danger:focus,
-button.list-group-item-danger:focus {
-  color: #a94442;
-  background-color: #ebcccc;
-}
-a.list-group-item-danger.active,
-button.list-group-item-danger.active,
-a.list-group-item-danger.active:hover,
-button.list-group-item-danger.active:hover,
-a.list-group-item-danger.active:focus,
-button.list-group-item-danger.active:focus {
-  color: #fff;
-  background-color: #a94442;
-  border-color: #a94442;
-}
-.list-group-item-heading {
-  margin-top: 0;
-  margin-bottom: 5px;
-}
-.list-group-item-text {
-  margin-bottom: 0;
-  line-height: 1.3;
-}
-.panel {
-  margin-bottom: 20px;
-  background-color: #fff;
-  border: 1px solid transparent;
-  border-radius: 4px;
-  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);
-  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);
-}
-.panel-body {
-  padding: 15px;
-}
-.panel-heading {
-  padding: 10px 15px;
-  border-bottom: 1px solid transparent;
-  border-top-left-radius: 3px;
-  border-top-right-radius: 3px;
-}
-.panel-heading > .dropdown .dropdown-toggle {
-  color: inherit;
-}
-.panel-title {
-  margin-top: 0;
-  margin-bottom: 0;
-  font-size: 16px;
-  color: inherit;
-}
-.panel-title > a,
-.panel-title > small,
-.panel-title > .small,
-.panel-title > small > a,
-.panel-title > .small > a {
-  color: inherit;
-}
-.panel-footer {
-  padding: 10px 15px;
-  background-color: #f5f5f5;
-  border-top: 1px solid #ddd;
-  border-bottom-right-radius: 3px;
-  border-bottom-left-radius: 3px;
-}
-.panel > .list-group,
-.panel > .panel-collapse > .list-group {
-  margin-bottom: 0;
-}
-.panel > .list-group .list-group-item,
-.panel > .panel-collapse > .list-group .list-group-item {
-  border-width: 1px 0;
-  border-radius: 0;
-}
-.panel > .list-group:first-child .list-group-item:first-child,
-.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {
-  border-top: 0;
-  border-top-left-radius: 3px;
-  border-top-right-radius: 3px;
-}
-.panel > .list-group:last-child .list-group-item:last-child,
-.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {
-  border-bottom: 0;
-  border-bottom-right-radius: 3px;
-  border-bottom-left-radius: 3px;
-}
-.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {
-  border-top-left-radius: 0;
-  border-top-right-radius: 0;
-}
-.panel-heading + .list-group .list-group-item:first-child {
-  border-top-width: 0;
-}
-.list-group + .panel-footer {
-  border-top-width: 0;
-}
-.panel > .table,
-.panel > .table-responsive > .table,
-.panel > .panel-collapse > .table {
-  margin-bottom: 0;
-}
-.panel > .table caption,
-.panel > .table-responsive > .table caption,
-.panel > .panel-collapse > .table caption {
-  padding-right: 15px;
-  padding-left: 15px;
-}
-.panel > .table:first-child,
-.panel > .table-responsive:first-child > .table:first-child {
-  border-top-left-radius: 3px;
-  border-top-right-radius: 3px;
-}
-.panel > .table:first-child > thead:first-child > tr:first-child,
-.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,
-.panel > .table:first-child > tbody:first-child > tr:first-child,
-.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {
-  border-top-left-radius: 3px;
-  border-top-right-radius: 3px;
-}
-.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,
-.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,
-.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,
-.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,
-.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,
-.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,
-.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,
-.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {
-  border-top-left-radius: 3px;
-}
-.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,
-.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,
-.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,
-.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,
-.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,
-.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,
-.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,
-.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {
-  border-top-right-radius: 3px;
-}
-.panel > .table:last-child,
-.panel > .table-responsive:last-child > .table:last-child {
-  border-bottom-right-radius: 3px;
-  border-bottom-left-radius: 3px;
-}
-.panel > .table:last-child > tbody:last-child > tr:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,
-.panel > .table:last-child > tfoot:last-child > tr:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {
-  border-bottom-right-radius: 3px;
-  border-bottom-left-radius: 3px;
-}
-.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,
-.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,
-.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
-.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
-.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,
-.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,
-.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,
-.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {
-  border-bottom-left-radius: 3px;
-}
-.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,
-.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
-.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,
-.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,
-.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {
-  border-bottom-right-radius: 3px;
-}
-.panel > .panel-body + .table,
-.panel > .panel-body + .table-responsive,
-.panel > .table + .panel-body,
-.panel > .table-responsive + .panel-body {
-  border-top: 1px solid #ddd;
-}
-.panel > .table > tbody:first-child > tr:first-child th,
-.panel > .table > tbody:first-child > tr:first-child td {
-  border-top: 0;
-}
-.panel > .table-bordered,
-.panel > .table-responsive > .table-bordered {
-  border: 0;
-}
-.panel > .table-bordered > thead > tr > th:first-child,
-.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,
-.panel > .table-bordered > tbody > tr > th:first-child,
-.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,
-.panel > .table-bordered > tfoot > tr > th:first-child,
-.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,
-.panel > .table-bordered > thead > tr > td:first-child,
-.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,
-.panel > .table-bordered > tbody > tr > td:first-child,
-.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,
-.panel > .table-bordered > tfoot > tr > td:first-child,
-.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {
-  border-left: 0;
-}
-.panel > .table-bordered > thead > tr > th:last-child,
-.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,
-.panel > .table-bordered > tbody > tr > th:last-child,
-.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,
-.panel > .table-bordered > tfoot > tr > th:last-child,
-.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,
-.panel > .table-bordered > thead > tr > td:last-child,
-.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,
-.panel > .table-bordered > tbody > tr > td:last-child,
-.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,
-.panel > .table-bordered > tfoot > tr > td:last-child,
-.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {
-  border-right: 0;
-}
-.panel > .table-bordered > thead > tr:first-child > td,
-.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,
-.panel > .table-bordered > tbody > tr:first-child > td,
-.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,
-.panel > .table-bordered > thead > tr:first-child > th,
-.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,
-.panel > .table-bordered > tbody > tr:first-child > th,
-.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {
-  border-bottom: 0;
-}
-.panel > .table-bordered > tbody > tr:last-child > td,
-.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,
-.panel > .table-bordered > tfoot > tr:last-child > td,
-.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,
-.panel > .table-bordered > tbody > tr:last-child > th,
-.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,
-.panel > .table-bordered > tfoot > tr:last-child > th,
-.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {
-  border-bottom: 0;
-}
-.panel > .table-responsive {
-  margin-bottom: 0;
-  border: 0;
-}
-.panel-group {
-  margin-bottom: 20px;
-}
-.panel-group .panel {
-  margin-bottom: 0;
-  border-radius: 4px;
-}
-.panel-group .panel + .panel {
-  margin-top: 5px;
-}
-.panel-group .panel-heading {
-  border-bottom: 0;
-}
-.panel-group .panel-heading + .panel-collapse > .panel-body,
-.panel-group .panel-heading + .panel-collapse > .list-group {
-  border-top: 1px solid #ddd;
-}
-.panel-group .panel-footer {
-  border-top: 0;
-}
-.panel-group .panel-footer + .panel-collapse .panel-body {
-  border-bottom: 1px solid #ddd;
-}
-.panel-default {
-  border-color: #ddd;
-}
-.panel-default > .panel-heading {
-  color: #333333;
-  background-color: #f5f5f5;
-  border-color: #ddd;
-}
-.panel-default > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #ddd;
-}
-.panel-default > .panel-heading .badge {
-  color: #f5f5f5;
-  background-color: #333333;
-}
-.panel-default > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #ddd;
-}
-.panel-primary {
-  border-color: #337ab7;
-}
-.panel-primary > .panel-heading {
-  color: #fff;
-  background-color: #337ab7;
-  border-color: #337ab7;
-}
-.panel-primary > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #337ab7;
-}
-.panel-primary > .panel-heading .badge {
-  color: #337ab7;
-  background-color: #fff;
-}
-.panel-primary > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #337ab7;
-}
-.panel-success {
-  border-color: #d6e9c6;
-}
-.panel-success > .panel-heading {
-  color: #3c763d;
-  background-color: #dff0d8;
-  border-color: #d6e9c6;
-}
-.panel-success > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #d6e9c6;
-}
-.panel-success > .panel-heading .badge {
-  color: #dff0d8;
-  background-color: #3c763d;
-}
-.panel-success > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #d6e9c6;
-}
-.panel-info {
-  border-color: #bce8f1;
-}
-.panel-info > .panel-heading {
-  color: #31708f;
-  background-color: #d9edf7;
-  border-color: #bce8f1;
-}
-.panel-info > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #bce8f1;
-}
-.panel-info > .panel-heading .badge {
-  color: #d9edf7;
-  background-color: #31708f;
-}
-.panel-info > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #bce8f1;
-}
-.panel-warning {
-  border-color: #faebcc;
-}
-.panel-warning > .panel-heading {
-  color: #8a6d3b;
-  background-color: #fcf8e3;
-  border-color: #faebcc;
-}
-.panel-warning > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #faebcc;
-}
-.panel-warning > .panel-heading .badge {
-  color: #fcf8e3;
-  background-color: #8a6d3b;
-}
-.panel-warning > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #faebcc;
-}
-.panel-danger {
-  border-color: #ebccd1;
-}
-.panel-danger > .panel-heading {
-  color: #a94442;
-  background-color: #f2dede;
-  border-color: #ebccd1;
-}
-.panel-danger > .panel-heading + .panel-collapse > .panel-body {
-  border-top-color: #ebccd1;
-}
-.panel-danger > .panel-heading .badge {
-  color: #f2dede;
-  background-color: #a94442;
-}
-.panel-danger > .panel-footer + .panel-collapse > .panel-body {
-  border-bottom-color: #ebccd1;
-}
-.embed-responsive {
-  position: relative;
-  display: block;
-  height: 0;
-  padding: 0;
-  overflow: hidden;
-}
-.embed-responsive .embed-responsive-item,
-.embed-responsive iframe,
-.embed-responsive embed,
-.embed-responsive object,
-.embed-responsive video {
-  position: absolute;
-  top: 0;
-  bottom: 0;
-  left: 0;
-  width: 100%;
-  height: 100%;
-  border: 0;
-}
-.embed-responsive-16by9 {
-  padding-bottom: 56.25%;
-}
-.embed-responsive-4by3 {
-  padding-bottom: 75%;
-}
-.well {
-  min-height: 20px;
-  padding: 19px;
-  margin-bottom: 20px;
-  background-color: #f5f5f5;
-  border: 1px solid #e3e3e3;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
-  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
-}
-.well blockquote {
-  border-color: #ddd;
-  border-color: rgba(0, 0, 0, 0.15);
-}
-.well-lg {
-  padding: 24px;
-  border-radius: 6px;
-}
-.well-sm {
-  padding: 9px;
-  border-radius: 3px;
-}
-.close {
-  float: right;
-  font-size: 21px;
-  font-weight: bold;
-  line-height: 1;
-  color: #000;
-  text-shadow: 0 1px 0 #fff;
-  filter: alpha(opacity=20);
-  opacity: 0.2;
-}
-.close:hover,
-.close:focus {
-  color: #000;
-  text-decoration: none;
-  cursor: pointer;
-  filter: alpha(opacity=50);
-  opacity: 0.5;
-}
-button.close {
-  padding: 0;
-  cursor: pointer;
-  background: transparent;
-  border: 0;
-  -webkit-appearance: none;
-  -moz-appearance: none;
-  appearance: none;
-}
-.modal-open {
-  overflow: hidden;
-}
-.modal {
-  position: fixed;
-  top: 0;
-  right: 0;
-  bottom: 0;
-  left: 0;
-  z-index: 1050;
-  display: none;
-  overflow: hidden;
-  -webkit-overflow-scrolling: touch;
-  outline: 0;
-}
-.modal.fade .modal-dialog {
-  -webkit-transform: translate(0, -25%);
-  -ms-transform: translate(0, -25%);
-  -o-transform: translate(0, -25%);
-  transform: translate(0, -25%);
-  -webkit-transition: -webkit-transform 0.3s ease-out;
-  -o-transition: -o-transform 0.3s ease-out;
-  transition: -webkit-transform 0.3s ease-out;
-  transition: transform 0.3s ease-out;
-  transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out, -o-transform 0.3s ease-out;
-}
-.modal.in .modal-dialog {
-  -webkit-transform: translate(0, 0);
-  -ms-transform: translate(0, 0);
-  -o-transform: translate(0, 0);
-  transform: translate(0, 0);
-}
-.modal-open .modal {
-  overflow-x: hidden;
-  overflow-y: auto;
-}
-.modal-dialog {
-  position: relative;
-  width: auto;
-  margin: 10px;
-}
-.modal-content {
-  position: relative;
-  background-color: #fff;
-  background-clip: padding-box;
-  border: 1px solid #999;
-  border: 1px solid rgba(0, 0, 0, 0.2);
-  border-radius: 6px;
-  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
-  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
-  outline: 0;
-}
-.modal-backdrop {
-  position: fixed;
-  top: 0;
-  right: 0;
-  bottom: 0;
-  left: 0;
-  z-index: 1040;
-  background-color: #000;
-}
-.modal-backdrop.fade {
-  filter: alpha(opacity=0);
-  opacity: 0;
-}
-.modal-backdrop.in {
-  filter: alpha(opacity=50);
-  opacity: 0.5;
-}
-.modal-header {
-  padding: 15px;
-  border-bottom: 1px solid #e5e5e5;
-}
-.modal-header .close {
-  margin-top: -2px;
-}
-.modal-title {
-  margin: 0;
-  line-height: 1.42857143;
-}
-.modal-body {
-  position: relative;
-  padding: 15px;
-}
-.modal-footer {
-  padding: 15px;
-  text-align: right;
-  border-top: 1px solid #e5e5e5;
-}
-.modal-footer .btn + .btn {
-  margin-bottom: 0;
-  margin-left: 5px;
-}
-.modal-footer .btn-group .btn + .btn {
-  margin-left: -1px;
-}
-.modal-footer .btn-block + .btn-block {
-  margin-left: 0;
-}
-.modal-scrollbar-measure {
-  position: absolute;
-  top: -9999px;
-  width: 50px;
-  height: 50px;
-  overflow: scroll;
-}
-@media (min-width: 768px) {
-  .modal-dialog {
-    width: 600px;
-    margin: 30px auto;
-  }
-  .modal-content {
-    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
-    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
-  }
-  .modal-sm {
-    width: 300px;
-  }
-}
-@media (min-width: 992px) {
-  .modal-lg {
-    width: 900px;
-  }
-}
-.tooltip {
-  position: absolute;
-  z-index: 1070;
-  display: block;
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  font-style: normal;
-  font-weight: 400;
-  line-height: 1.42857143;
-  line-break: auto;
-  text-align: left;
-  text-align: start;
-  text-decoration: none;
-  text-shadow: none;
-  text-transform: none;
-  letter-spacing: normal;
-  word-break: normal;
-  word-spacing: normal;
-  word-wrap: normal;
-  white-space: normal;
-  font-size: 12px;
-  filter: alpha(opacity=0);
-  opacity: 0;
-}
-.tooltip.in {
-  filter: alpha(opacity=90);
-  opacity: 0.9;
-}
-.tooltip.top {
-  padding: 5px 0;
-  margin-top: -3px;
-}
-.tooltip.right {
-  padding: 0 5px;
-  margin-left: 3px;
-}
-.tooltip.bottom {
-  padding: 5px 0;
-  margin-top: 3px;
-}
-.tooltip.left {
-  padding: 0 5px;
-  margin-left: -3px;
-}
-.tooltip.top .tooltip-arrow {
-  bottom: 0;
-  left: 50%;
-  margin-left: -5px;
-  border-width: 5px 5px 0;
-  border-top-color: #000;
-}
-.tooltip.top-left .tooltip-arrow {
-  right: 5px;
-  bottom: 0;
-  margin-bottom: -5px;
-  border-width: 5px 5px 0;
-  border-top-color: #000;
-}
-.tooltip.top-right .tooltip-arrow {
-  bottom: 0;
-  left: 5px;
-  margin-bottom: -5px;
-  border-width: 5px 5px 0;
-  border-top-color: #000;
-}
-.tooltip.right .tooltip-arrow {
-  top: 50%;
-  left: 0;
-  margin-top: -5px;
-  border-width: 5px 5px 5px 0;
-  border-right-color: #000;
-}
-.tooltip.left .tooltip-arrow {
-  top: 50%;
-  right: 0;
-  margin-top: -5px;
-  border-width: 5px 0 5px 5px;
-  border-left-color: #000;
-}
-.tooltip.bottom .tooltip-arrow {
-  top: 0;
-  left: 50%;
-  margin-left: -5px;
-  border-width: 0 5px 5px;
-  border-bottom-color: #000;
-}
-.tooltip.bottom-left .tooltip-arrow {
-  top: 0;
-  right: 5px;
-  margin-top: -5px;
-  border-width: 0 5px 5px;
-  border-bottom-color: #000;
-}
-.tooltip.bottom-right .tooltip-arrow {
-  top: 0;
-  left: 5px;
-  margin-top: -5px;
-  border-width: 0 5px 5px;
-  border-bottom-color: #000;
-}
-.tooltip-inner {
-  max-width: 200px;
-  padding: 3px 8px;
-  color: #fff;
-  text-align: center;
-  background-color: #000;
-  border-radius: 4px;
-}
-.tooltip-arrow {
-  position: absolute;
-  width: 0;
-  height: 0;
-  border-color: transparent;
-  border-style: solid;
-}
-.popover {
-  position: absolute;
-  top: 0;
-  left: 0;
-  z-index: 1060;
-  display: none;
-  max-width: 276px;
-  padding: 1px;
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  font-style: normal;
-  font-weight: 400;
-  line-height: 1.42857143;
-  line-break: auto;
-  text-align: left;
-  text-align: start;
-  text-decoration: none;
-  text-shadow: none;
-  text-transform: none;
-  letter-spacing: normal;
-  word-break: normal;
-  word-spacing: normal;
-  word-wrap: normal;
-  white-space: normal;
-  font-size: 14px;
-  background-color: #fff;
-  background-clip: padding-box;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.2);
-  border-radius: 6px;
-  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-}
-.popover.top {
-  margin-top: -10px;
-}
-.popover.right {
-  margin-left: 10px;
-}
-.popover.bottom {
-  margin-top: 10px;
-}
-.popover.left {
-  margin-left: -10px;
-}
-.popover > .arrow {
-  border-width: 11px;
-}
-.popover > .arrow,
-.popover > .arrow:after {
-  position: absolute;
-  display: block;
-  width: 0;
-  height: 0;
-  border-color: transparent;
-  border-style: solid;
-}
-.popover > .arrow:after {
-  content: "";
-  border-width: 10px;
-}
-.popover.top > .arrow {
-  bottom: -11px;
-  left: 50%;
-  margin-left: -11px;
-  border-top-color: #999999;
-  border-top-color: rgba(0, 0, 0, 0.25);
-  border-bottom-width: 0;
-}
-.popover.top > .arrow:after {
-  bottom: 1px;
-  margin-left: -10px;
-  content: " ";
-  border-top-color: #fff;
-  border-bottom-width: 0;
-}
-.popover.right > .arrow {
-  top: 50%;
-  left: -11px;
-  margin-top: -11px;
-  border-right-color: #999999;
-  border-right-color: rgba(0, 0, 0, 0.25);
-  border-left-width: 0;
-}
-.popover.right > .arrow:after {
-  bottom: -10px;
-  left: 1px;
-  content: " ";
-  border-right-color: #fff;
-  border-left-width: 0;
-}
-.popover.bottom > .arrow {
-  top: -11px;
-  left: 50%;
-  margin-left: -11px;
-  border-top-width: 0;
-  border-bottom-color: #999999;
-  border-bottom-color: rgba(0, 0, 0, 0.25);
-}
-.popover.bottom > .arrow:after {
-  top: 1px;
-  margin-left: -10px;
-  content: " ";
-  border-top-width: 0;
-  border-bottom-color: #fff;
-}
-.popover.left > .arrow {
-  top: 50%;
-  right: -11px;
-  margin-top: -11px;
-  border-right-width: 0;
-  border-left-color: #999999;
-  border-left-color: rgba(0, 0, 0, 0.25);
-}
-.popover.left > .arrow:after {
-  right: 1px;
-  bottom: -10px;
-  content: " ";
-  border-right-width: 0;
-  border-left-color: #fff;
-}
-.popover-title {
-  padding: 8px 14px;
-  margin: 0;
-  font-size: 14px;
-  background-color: #f7f7f7;
-  border-bottom: 1px solid #ebebeb;
-  border-radius: 5px 5px 0 0;
-}
-.popover-content {
-  padding: 9px 14px;
-}
-.carousel {
-  position: relative;
-}
-.carousel-inner {
-  position: relative;
-  width: 100%;
-  overflow: hidden;
-}
-.carousel-inner > .item {
-  position: relative;
-  display: none;
-  -webkit-transition: 0.6s ease-in-out left;
-  -o-transition: 0.6s ease-in-out left;
-  transition: 0.6s ease-in-out left;
-}
-.carousel-inner > .item > img,
-.carousel-inner > .item > a > img {
-  line-height: 1;
-}
-@media all and (transform-3d), (-webkit-transform-3d) {
-  .carousel-inner > .item {
-    -webkit-transition: -webkit-transform 0.6s ease-in-out;
-    -o-transition: -o-transform 0.6s ease-in-out;
-    transition: -webkit-transform 0.6s ease-in-out;
-    transition: transform 0.6s ease-in-out;
-    transition: transform 0.6s ease-in-out, -webkit-transform 0.6s ease-in-out, -o-transform 0.6s ease-in-out;
-    -webkit-backface-visibility: hidden;
-    backface-visibility: hidden;
-    -webkit-perspective: 1000px;
-    perspective: 1000px;
-  }
-  .carousel-inner > .item.next,
-  .carousel-inner > .item.active.right {
-    -webkit-transform: translate3d(100%, 0, 0);
-    transform: translate3d(100%, 0, 0);
-    left: 0;
-  }
-  .carousel-inner > .item.prev,
-  .carousel-inner > .item.active.left {
-    -webkit-transform: translate3d(-100%, 0, 0);
-    transform: translate3d(-100%, 0, 0);
-    left: 0;
-  }
-  .carousel-inner > .item.next.left,
-  .carousel-inner > .item.prev.right,
-  .carousel-inner > .item.active {
-    -webkit-transform: translate3d(0, 0, 0);
-    transform: translate3d(0, 0, 0);
-    left: 0;
-  }
-}
-.carousel-inner > .active,
-.carousel-inner > .next,
-.carousel-inner > .prev {
-  display: block;
-}
-.carousel-inner > .active {
-  left: 0;
-}
-.carousel-inner > .next,
-.carousel-inner > .prev {
-  position: absolute;
-  top: 0;
-  width: 100%;
-}
-.carousel-inner > .next {
-  left: 100%;
-}
-.carousel-inner > .prev {
-  left: -100%;
-}
-.carousel-inner > .next.left,
-.carousel-inner > .prev.right {
-  left: 0;
-}
-.carousel-inner > .active.left {
-  left: -100%;
-}
-.carousel-inner > .active.right {
-  left: 100%;
-}
-.carousel-control {
-  position: absolute;
-  top: 0;
-  bottom: 0;
-  left: 0;
-  width: 15%;
-  font-size: 20px;
-  color: #fff;
-  text-align: center;
-  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);
-  background-color: rgba(0, 0, 0, 0);
-  filter: alpha(opacity=50);
-  opacity: 0.5;
-}
-.carousel-control.left {
-  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
-  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
-  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001)));
-  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);
-  background-repeat: repeat-x;
-}
-.carousel-control.right {
-  right: 0;
-  left: auto;
-  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
-  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
-  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5)));
-  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);
-  background-repeat: repeat-x;
-}
-.carousel-control:hover,
-.carousel-control:focus {
-  color: #fff;
-  text-decoration: none;
-  outline: 0;
-  filter: alpha(opacity=90);
-  opacity: 0.9;
-}
-.carousel-control .icon-prev,
-.carousel-control .icon-next,
-.carousel-control .glyphicon-chevron-left,
-.carousel-control .glyphicon-chevron-right {
-  position: absolute;
-  top: 50%;
-  z-index: 5;
-  display: inline-block;
-  margin-top: -10px;
-}
-.carousel-control .icon-prev,
-.carousel-control .glyphicon-chevron-left {
-  left: 50%;
-  margin-left: -10px;
-}
-.carousel-control .icon-next,
-.carousel-control .glyphicon-chevron-right {
-  right: 50%;
-  margin-right: -10px;
-}
-.carousel-control .icon-prev,
-.carousel-control .icon-next {
-  width: 20px;
-  height: 20px;
-  font-family: serif;
-  line-height: 1;
-}
-.carousel-control .icon-prev:before {
-  content: "\2039";
-}
-.carousel-control .icon-next:before {
-  content: "\203a";
-}
-.carousel-indicators {
-  position: absolute;
-  bottom: 10px;
-  left: 50%;
-  z-index: 15;
-  width: 60%;
-  padding-left: 0;
-  margin-left: -30%;
-  text-align: center;
-  list-style: none;
-}
-.carousel-indicators li {
-  display: inline-block;
-  width: 10px;
-  height: 10px;
-  margin: 1px;
-  text-indent: -999px;
-  cursor: pointer;
-  background-color: #000 \9;
-  background-color: rgba(0, 0, 0, 0);
-  border: 1px solid #fff;
-  border-radius: 10px;
-}
-.carousel-indicators .active {
-  width: 12px;
-  height: 12px;
-  margin: 0;
-  background-color: #fff;
-}
-.carousel-caption {
-  position: absolute;
-  right: 15%;
-  bottom: 20px;
-  left: 15%;
-  z-index: 10;
-  padding-top: 20px;
-  padding-bottom: 20px;
-  color: #fff;
-  text-align: center;
-  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);
-}
-.carousel-caption .btn {
-  text-shadow: none;
-}
-@media screen and (min-width: 768px) {
-  .carousel-control .glyphicon-chevron-left,
-  .carousel-control .glyphicon-chevron-right,
-  .carousel-control .icon-prev,
-  .carousel-control .icon-next {
-    width: 30px;
-    height: 30px;
-    margin-top: -10px;
-    font-size: 30px;
-  }
-  .carousel-control .glyphicon-chevron-left,
-  .carousel-control .icon-prev {
-    margin-left: -10px;
-  }
-  .carousel-control .glyphicon-chevron-right,
-  .carousel-control .icon-next {
-    margin-right: -10px;
-  }
-  .carousel-caption {
-    right: 20%;
-    left: 20%;
-    padding-bottom: 30px;
-  }
-  .carousel-indicators {
-    bottom: 20px;
-  }
-}
-.clearfix:before,
-.clearfix:after,
-.dl-horizontal dd:before,
-.dl-horizontal dd:after,
-.container:before,
-.container:after,
-.container-fluid:before,
-.container-fluid:after,
-.row:before,
-.row:after,
-.form-horizontal .form-group:before,
-.form-horizontal .form-group:after,
-.btn-toolbar:before,
-.btn-toolbar:after,
-.btn-group-vertical > .btn-group:before,
-.btn-group-vertical > .btn-group:after,
-.nav:before,
-.nav:after,
-.navbar:before,
-.navbar:after,
-.navbar-header:before,
-.navbar-header:after,
-.navbar-collapse:before,
-.navbar-collapse:after,
-.pager:before,
-.pager:after,
-.panel-body:before,
-.panel-body:after,
-.modal-header:before,
-.modal-header:after,
-.modal-footer:before,
-.modal-footer:after {
-  display: table;
-  content: " ";
-}
-.clearfix:after,
-.dl-horizontal dd:after,
-.container:after,
-.container-fluid:after,
-.row:after,
-.form-horizontal .form-group:after,
-.btn-toolbar:after,
-.btn-group-vertical > .btn-group:after,
-.nav:after,
-.navbar:after,
-.navbar-header:after,
-.navbar-collapse:after,
-.pager:after,
-.panel-body:after,
-.modal-header:after,
-.modal-footer:after {
-  clear: both;
-}
-.center-block {
-  display: block;
-  margin-right: auto;
-  margin-left: auto;
-}
-.pull-right {
-  float: right !important;
-}
-.pull-left {
-  float: left !important;
-}
-.hide {
-  display: none !important;
-}
-.show {
-  display: block !important;
-}
-.invisible {
-  visibility: hidden;
-}
-.text-hide {
-  font: 0/0 a;
-  color: transparent;
-  text-shadow: none;
-  background-color: transparent;
-  border: 0;
-}
-.hidden {
-  display: none !important;
-}
-.affix {
-  position: fixed;
-}
-@-ms-viewport {
-  width: device-width;
-}
-.visible-xs,
-.visible-sm,
-.visible-md,
-.visible-lg {
-  display: none !important;
-}
-.visible-xs-block,
-.visible-xs-inline,
-.visible-xs-inline-block,
-.visible-sm-block,
-.visible-sm-inline,
-.visible-sm-inline-block,
-.visible-md-block,
-.visible-md-inline,
-.visible-md-inline-block,
-.visible-lg-block,
-.visible-lg-inline,
-.visible-lg-inline-block {
-  display: none !important;
-}
-@media (max-width: 767px) {
-  .visible-xs {
-    display: block !important;
-  }
-  table.visible-xs {
-    display: table !important;
-  }
-  tr.visible-xs {
-    display: table-row !important;
-  }
-  th.visible-xs,
-  td.visible-xs {
-    display: table-cell !important;
-  }
-}
-@media (max-width: 767px) {
-  .visible-xs-block {
-    display: block !important;
-  }
-}
-@media (max-width: 767px) {
-  .visible-xs-inline {
-    display: inline !important;
-  }
-}
-@media (max-width: 767px) {
-  .visible-xs-inline-block {
-    display: inline-block !important;
-  }
-}
-@media (min-width: 768px) and (max-width: 991px) {
-  .visible-sm {
-    display: block !important;
-  }
-  table.visible-sm {
-    display: table !important;
-  }
-  tr.visible-sm {
-    display: table-row !important;
-  }
-  th.visible-sm,
-  td.visible-sm {
-    display: table-cell !important;
-  }
-}
-@media (min-width: 768px) and (max-width: 991px) {
-  .visible-sm-block {
-    display: block !important;
-  }
-}
-@media (min-width: 768px) and (max-width: 991px) {
-  .visible-sm-inline {
-    display: inline !important;
-  }
-}
-@media (min-width: 768px) and (max-width: 991px) {
-  .visible-sm-inline-block {
-    display: inline-block !important;
-  }
-}
-@media (min-width: 992px) and (max-width: 1199px) {
-  .visible-md {
-    display: block !important;
-  }
-  table.visible-md {
-    display: table !important;
-  }
-  tr.visible-md {
-    display: table-row !important;
-  }
-  th.visible-md,
-  td.visible-md {
-    display: table-cell !important;
-  }
-}
-@media (min-width: 992px) and (max-width: 1199px) {
-  .visible-md-block {
-    display: block !important;
-  }
-}
-@media (min-width: 992px) and (max-width: 1199px) {
-  .visible-md-inline {
-    display: inline !important;
-  }
-}
-@media (min-width: 992px) and (max-width: 1199px) {
-  .visible-md-inline-block {
-    display: inline-block !important;
-  }
-}
-@media (min-width: 1200px) {
-  .visible-lg {
-    display: block !important;
-  }
-  table.visible-lg {
-    display: table !important;
-  }
-  tr.visible-lg {
-    display: table-row !important;
-  }
-  th.visible-lg,
-  td.visible-lg {
-    display: table-cell !important;
-  }
-}
-@media (min-width: 1200px) {
-  .visible-lg-block {
-    display: block !important;
-  }
-}
-@media (min-width: 1200px) {
-  .visible-lg-inline {
-    display: inline !important;
-  }
-}
-@media (min-width: 1200px) {
-  .visible-lg-inline-block {
-    display: inline-block !important;
-  }
-}
-@media (max-width: 767px) {
-  .hidden-xs {
-    display: none !important;
-  }
-}
-@media (min-width: 768px) and (max-width: 991px) {
-  .hidden-sm {
-    display: none !important;
-  }
-}
-@media (min-width: 992px) and (max-width: 1199px) {
-  .hidden-md {
-    display: none !important;
-  }
-}
-@media (min-width: 1200px) {
-  .hidden-lg {
-    display: none !important;
-  }
-}
-.visible-print {
-  display: none !important;
-}
-@media print {
-  .visible-print {
-    display: block !important;
-  }
-  table.visible-print {
-    display: table !important;
-  }
-  tr.visible-print {
-    display: table-row !important;
-  }
-  th.visible-print,
-  td.visible-print {
-    display: table-cell !important;
-  }
-}
-.visible-print-block {
-  display: none !important;
-}
-@media print {
-  .visible-print-block {
-    display: block !important;
-  }
-}
-.visible-print-inline {
-  display: none !important;
-}
-@media print {
-  .visible-print-inline {
-    display: inline !important;
-  }
-}
-.visible-print-inline-block {
-  display: none !important;
-}
-@media print {
-  .visible-print-inline-block {
-    display: inline-block !important;
-  }
-}
-@media print {
-  .hidden-print {
-    display: none !important;
-  }
-}
-/*# sourceMappingURL=bootstrap.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map
deleted file mode 100644
index caac3e6..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap.css","less/normalize.less","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"AAAA;;;;GAIG;AACH,4EAA4E;ACK5E;EACE,wBAAA;EACA,2BAAA;EACA,+BAAA;CDHD;ACUD;EACE,UAAA;CDRD;ACqBD;;;;;;;;;;;;;EAaE,eAAA;CDnBD;AC2BD;;;;EAIE,sBAAA;EACA,yBAAA;CDzBD;ACiCD;EACE,cAAA;EACA,UAAA;CD/BD;ACuCD;;EAEE,cAAA;CDrCD;AC+CD;EACE,8BAAA;CD7CD;ACqDD;;EAEE,WAAA;CDnDD;AC8DD;EACE,oBAAA;EACA,2BAAA;EACA,0CAAA;EAAA,uCAAA;EAAA,kCAAA;CD5DD;ACmED;;EAEE,kBAAA;CDjED;ACwED;EACE,mBAAA;CDtED;AC8ED;EACE,eAAA;EACA,iBAAA;CD5ED;ACmFD;EACE,iBAAA;EACA,YAAA;CDjFD;ACwFD;EACE,eAAA;CDtFD;AC6FD;;EAEE,eAAA;EACA,eAAA;EACA,mBAAA;EACA,yBAAA;CD3FD;AC8FD;EACE,YAAA;CD5FD;AC+FD;EACE,gBAAA;CD7FD;ACuGD;EACE,UAAA;CDrGD;AC4GD;EACE,iBAAA;CD1GD;ACoHD;EACE,iBAAA;CDlHD;ACyHD;EACE,gCAAA;EAAA,6BAAA;EAAA,wBAAA;EACA,UAAA;CDvHD;AC8HD;EACE,eAAA;CD5HD;ACmID;;;;EAIE,kCAAA;EACA,eAAA;CDjID;ACmJD;;;;;EAKE,eAAA;EACA,cAAA;EACA,UAAA;CDjJD;ACwJD;EACE,kBAAA;CDtJD;ACgKD;;EAEE,qBAAA;CD9JD;ACyKD;;;;EAIE,2BAAA;EACA,gBAAA;CDvKD;AC8KD;;EAEE,gBAAA;CD5KD;ACmLD;;EAEE,UAAA;EACA,WAAA;CDjLD;ACyLD;EACE,oBAAA;CDvLD;ACkMD;;EAEE,+BAAA;EAAA,4BAAA;EAAA,uBAAA;EACA,WAAA;CDhMD;ACyMD;;EAEE,aAAA;CDvMD;AC+MD;EACE,8BAAA;EACA,gCAAA;EAAA,6BAAA;EAAA,wBAAA;CD7MD;ACsND;;EAEE,yBAAA;CDpND;AC2ND;EACE,0BAAA;EACA,cAAA;EACA,+BAAA;CDzND;ACiOD;EACE,UAAA;EACA,WAAA;CD/ND;ACsOD;EACE,eAAA;CDpOD;AC4OD;EACE,kBAAA;CD1OD;ACoPD;EACE,0BAAA;EACA,kBAAA;CDlPD;ACqPD;;EAEE,WAAA;CDnPD;AACD,qFAAqF;AEhLrF;EACE;;;IAGE,uBAAA;IACA,6BAAA;IACA,mCAAA;IACA,oCAAA;IAAA,4BAAA;GFkLD;EE/KD;;IAEE,2BAAA;GFiLD;EE9KD;IACE,6BAAA;GFgLD;EE7KD;IACE,8BAAA;GF+KD;EE1KD;;IAEE,YAAA;GF4KD;EEzKD;;IAEE,uBAAA;IACA,yBAAA;GF2KD;EExKD;IACE,4BAAA;GF0KD;EEvKD;;IAEE,yBAAA;GFyKD;EEtKD;IACE,2BAAA;GFwKD;EErKD;;;IAGE,WAAA;IACA,UAAA;GFuKD;EEpKD;;IAEE,wBAAA;GFsKD;EEhKD;IACE,cAAA;GFkKD;EEhKD;;IAGI,kCAAA;GFiKH;EE9JD;IACE,uBAAA;GFgKD;EE7JD;IACE,qCAAA;GF+JD;EEhKD;;IAKI,kCAAA;GF+JH;EE5JD;;IAGI,kCAAA;GF6JH;CACF;AGnPD;EACE,oCAAA;EACA,sDAAA;EACA,gYAAA;CHqPD;AG7OD;EACE,mBAAA;EACA,SAAA;EACA,sBAAA;EACA,oCAAA;EACA,mBAAA;EACA,iBAAA;EACA,eAAA;EACA,oCAAA;EACA,mCAAA;CH+OD;AG3OmC;EAAW,iBAAA;CH8O9C;AG7OmC;EAAW,iBAAA;CHgP9C;AG9OmC;;EAAW,iBAAA;CHkP9C;AGjPmC;EAAW,iBAAA;CHoP9C;AGnPmC;EAAW,iBAAA;CHsP9C;AGrPmC;EAAW,iBAAA;CHwP9C;AGvPmC;EAAW,iBAAA;CH0P9C;AGzPmC;EAAW,iBAAA;CH4P9C;AG3PmC;EAAW,iBAAA;CH8P9C;AG7PmC;EAAW,iBAAA;CHgQ9C;AG/PmC;EAAW,iBAAA;CHkQ9C;AGjQmC;EAAW,iBAAA;CHoQ9C;AGnQmC;EAAW,iBAAA;CHsQ9C;AGrQmC;EAAW,iBAAA;CHwQ9C;AGvQmC;EAAW,iBAAA;CH0Q9C;AGzQmC;EAAW,iBAAA;CH4Q9C;AG3QmC;EAAW,iBAAA;CH8Q9C;AG7QmC;EAAW,iBAAA;CHgR9C;AG/QmC;EAAW,iBAAA;CHkR9C;AGjRmC;EAAW,iBAAA;CHoR9C;AGnRmC;EAAW,iBAAA;CHsR9C;AGrRmC;EAAW,iBAAA;CHwR9C;AGvRmC;EAAW,iBAAA;CH0R9C;AGzRmC;EAAW,iBAAA;CH4R9C;AG3RmC;EAAW,iBAAA;CH8R9C;AG7RmC;EAAW,iBAAA;CHgS9C;AG/RmC;EAAW,iBAAA;CHkS9C;AGjSmC;EAAW,iBAAA;CHoS9C;AGnSmC;EAAW,iBAAA;CHsS9C;AGrSmC;EAAW,iBAAA;CHwS9C;AGvSmC;EAAW,iBAAA;CH0S9C;AGzSmC;EAAW,iBAAA;CH4S9C;AG3SmC;EAAW,iBAAA;CH8S9C;AG7SmC;EAAW,iBAAA;CHgT9C;AG/SmC;EAAW,iBAAA;CHkT9C;AGjTmC;EAAW,iBAAA;CHoT9C;AGnTmC;EAAW,iBAAA;CHsT9C;AGrTmC;EAAW,iBAAA;CHwT9C;AGvTmC;EAAW,iBAAA;CH0T9C;AGzTmC;EAAW,iBAAA;CH4T9C;AG3TmC;EAAW,iBAAA;CH8T9C;AG7TmC;EAAW,iBAAA;CHgU9C;AG/TmC;EAAW,iBAAA;CHkU9C;AGjUmC;EAAW,iBAAA;CHoU9C;AGnUmC;EAAW,iBAAA;CHsU9C;AGrUmC;EAAW,iBAAA;CHwU9C;AGvUmC;EAAW,iBAAA;CH0U9C;AGzUmC;EAAW,iBAAA;CH4U9C;AG3UmC;EAAW,iBAAA;CH8U9C;AG7UmC;EAAW,iBAAA;CHgV9C;AG/UmC;EAAW,iBAAA;CHkV9C;AGjVmC;EAAW,iBAAA;CHoV9C;AGnVmC;EAAW,iBAAA;CHsV9C;AGrVmC;EAAW,iBAAA;CHwV9C;AGvVmC;EAAW,iBAAA;CH0V9C;AGzVmC;EAAW,iBAAA;CH4V9C;AG3VmC;EAAW,iBAAA;CH8V9C;AG7VmC;EAAW,iBAAA;CHgW9C;AG/VmC;EAAW,iBAAA;CHkW9C;AGjWmC;EAAW,iBAAA;CHoW9C;AGnWmC;EAAW,iBAAA;CHsW9C;AGrWmC;EAAW,iBAAA;CHwW9C;AGvWmC;EAAW,iBAAA;CH0W9C;AGzWmC;EAAW,iBAAA;CH4W9C;AG3WmC;EAAW,iBAAA;CH8W9C;AG7WmC;EAAW,iBAAA;CHgX9C;AG/WmC;EAAW,iBAAA;CHkX9C;AGjXmC;EAAW,iBAAA;CHoX9C;AGnXmC;EAAW,iBAAA;CHsX9C;AGrXmC;EAAW,iBAAA;CHwX9C;AGvXmC;EAAW,iBAAA;CH0X9C;AGzXmC;EAAW,iBAAA;CH4X9C;AG3XmC;EAAW,iBAAA;CH8X9C;AG7XmC;EAAW,iBAAA;CHgY9C;AG/XmC;EAAW,iBAAA;CHkY9C;AGjYmC;EAAW,iBAAA;CHoY9C;AGnYmC;EAAW,iBAAA;CHsY9C;AGrYmC;EAAW,iBAAA;CHwY9C;AGvYmC;EAAW,iBAAA;CH0Y9C;AGzYmC;EAAW,iBAAA;CH4Y9C;AG3YmC;EAAW,iBAAA;CH8Y9C;AG7YmC;EAAW,iBAAA;CHgZ9C;AG/YmC;EAAW,iBAAA;CHkZ9C;AGjZmC;EAAW,iBAAA;CHoZ9C;AGnZmC;EAAW,iBAAA;CHsZ9C;AGrZmC;EAAW,iBAAA;CHwZ9C;AGvZmC;EAAW,iBAAA;CH0Z9C;AGzZmC;EAAW,iBAAA;CH4Z9C;AG3ZmC;EAAW,iBAAA;CH8Z9C;AG7ZmC;EAAW,iBAAA;CHga9C;AG/ZmC;EAAW,iBAAA;CHka9C;AGjamC;EAAW,iBAAA;CHoa9C;AGnamC;EAAW,iBAAA;CHsa9C;AGramC;EAAW,iBAAA;CHwa9C;AGvamC;EAAW,iBAAA;CH0a9C;AGzamC;EAAW,iBAAA;CH4a9C;AG3amC;EAAW,iBAAA;CH8a9C;AG7amC;EAAW,iBAAA;CHgb9C;AG/amC;EAAW,iBAAA;CHkb9C;AGjbmC;EAAW,iBAAA;CHob9C;AGnbmC;EAAW,iBAAA;CHsb9C;AGrbmC;EAAW,iBAAA;CHwb9C;AGvbmC;EAAW,iBAAA;CH0b9C;AGzbmC;EAAW,iBAAA;CH4b9C;AG3bmC;EAAW,iBAAA;CH8b9C;AG7bmC;EAAW,iBAAA;CHgc9C;AG/bmC;EAAW,iBAAA;CHkc9C;AGjcmC;EAAW,iBAAA;CHoc9C;AGncmC;EAAW,iBAAA;CHsc9C;AGrcmC;EAAW,iBAAA;CHwc9C;AGvcmC;EAAW,iBAAA;CH0c9C;AGzcmC;EAAW,iBAAA;CH4c9C;AG3cmC;EAAW,iBAAA;CH8c9C;AG7cmC;EAAW,iBAAA;CHgd9C;AG/cmC;EAAW,iBAAA;CHkd9C;AGjdmC;EAAW,iBAAA;CHod9C;AGndmC;EAAW,iBAAA;CHsd9C;AGrdmC;EAAW,iBAAA;CHwd9C;AGvdmC;EAAW,iBAAA;CH0d9C;AGzdmC;EAAW,iBAAA;CH4d9C;AG3dmC;EAAW,iBAAA;CH8d9C;AG7dmC;EAAW,iBAAA;CHge9C;AG/dmC;EAAW,iBAAA;CHke9C;AGjemC;EAAW,iBAAA;CHoe9C;AGnemC;EAAW,iBAAA;CHse9C;AGremC;EAAW,iBAAA;CHwe9C;AGvemC;EAAW,iBAAA;CH0e9C;AGzemC;EAAW,iBAAA;CH4e9C;AG3emC;EAAW,iBAAA;CH8e9C;AG7emC;EAAW,iBAAA;CHgf9C;AG/emC;EAAW,iBAAA;CHkf9C;AGjfmC;EAAW,iBAAA;CHof9C;AGnfmC;EAAW,iBAAA;CHsf9C;AGrfmC;EAAW,iBAAA;CHwf9C;AGvfmC;EAAW,iBAAA;CH0f9C;AGzfmC;EAAW,iBAAA;CH4f9C;AG3fmC;EAAW,iBAAA;CH8f9C;AG7fmC;EAAW,iBAAA;CHggB9C;AG/fmC;EAAW,iBAAA;CHkgB9C;AGjgBmC;EAAW,iBAAA;CHogB9C;AGngBmC;EAAW,iBAAA;CHsgB9C;AGrgBmC;EAAW,iBAAA;CHwgB9C;AGvgBmC;EAAW,iBAAA;CH0gB9C;AGzgBmC;EAAW,iBAAA;CH4gB9C;AG3gBmC;EAAW,iBAAA;CH8gB9C;AG7gBmC;EAAW,iBAAA;CHghB9C;AG/gBmC;EAAW,iBAAA;CHkhB9C;AGjhBmC;EAAW,iBAAA;CHohB9C;AGnhBmC;EAAW,iBAAA;CHshB9C;AGrhBmC;EAAW,iBAAA;CHwhB9C;AGvhBmC;EAAW,iBAAA;CH0hB9C;AGzhBmC;EAAW,iBAAA;CH4hB9C;AG3hBmC;EAAW,iBAAA;CH8hB9C;AG7hBmC;EAAW,iBAAA;CHgiB9C;AG/hBmC;EAAW,iBAAA;CHkiB9C;AGjiBmC;EAAW,iBAAA;CHoiB9C;AGniBmC;EAAW,iBAAA;CHsiB9C;AGriBmC;EAAW,iBAAA;CHwiB9C;AGviBmC;EAAW,iBAAA;CH0iB9C;AGziBmC;EAAW,iBAAA;CH4iB9C;AG3iBmC;EAAW,iBAAA;CH8iB9C;AG7iBmC;EAAW,iBAAA;CHgjB9C;AG/iBmC;EAAW,iBAAA;CHkjB9C;AGjjBmC;EAAW,iBAAA;CHojB9C;AGnjBmC;EAAW,iBAAA;CHsjB9C;AGrjBmC;EAAW,iBAAA;CHwjB9C;AGvjBmC;EAAW,iBAAA;CH0jB9C;AGzjBmC;EAAW,iBAAA;CH4jB9C;AG3jBmC;EAAW,iBAAA;CH8jB9C;AG7jBmC;EAAW,iBAAA;CHgkB9C;AG/jBmC;EAAW,iBAAA;CHkkB9C;AGjkBmC;EAAW,iBAAA;CHokB9C;AGnkBmC;EAAW,iBAAA;CHskB9C;AGrkBmC;EAAW,iBAAA;CHwkB9C;AGvkBmC;EAAW,iBAAA;CH0kB9C;AGzkBmC;EAAW,iBAAA;CH4kB9C;AG3kBmC;EAAW,iBAAA;CH8kB9C;AG7kBmC;EAAW,iBAAA;CHglB9C;AG/kBmC;EAAW,iBAAA;CHklB9C;AGjlBmC;EAAW,iBAAA;CHolB9C;AGnlBmC;EAAW,iBAAA;CHslB9C;AGrlBmC;EAAW,iBAAA;CHwlB9C;AGvlBmC;EAAW,iBAAA;CH0lB9C;AGzlBmC;EAAW,iBAAA;CH4lB9C;AG3lBmC;EAAW,iBAAA;CH8lB9C;AG7lBmC;EAAW,iBAAA;CHgmB9C;AG/lBmC;EAAW,iBAAA;CHkmB9C;AGjmBmC;EAAW,iBAAA;CHomB9C;AGnmBmC;EAAW,iBAAA;CHsmB9C;AGrmBmC;EAAW,iBAAA;CHwmB9C;AGvmBmC;EAAW,iBAAA;CH0mB9C;AGzmBmC;EAAW,iBAAA;CH4mB9C;AG3mBmC;EAAW,iBAAA;CH8mB9C;AG7mBmC;EAAW,iBAAA;CHgnB9C;AG/mBmC;EAAW,iBAAA;CHknB9C;AGjnBmC;EAAW,iBAAA;CHonB9C;AGnnBmC;EAAW,iBAAA;CHsnB9C;AGrnBmC;EAAW,iBAAA;CHwnB9C;AGvnBmC;EAAW,iBAAA;CH0nB9C;AGznBmC;EAAW,iBAAA;CH4nB9C;AG3nBmC;EAAW,iBAAA;CH8nB9C;AG7nBmC;EAAW,iBAAA;CHgoB9C;AG/nBmC;EAAW,iBAAA;CHkoB9C;AGjoBmC;EAAW,iBAAA;CHooB9C;AGnoBmC;EAAW,iBAAA;CHsoB9C;AGroBmC;EAAW,iBAAA;CHwoB9C;AG/nBmC;EAAW,iBAAA;CHkoB9C;AGjoBmC;EAAW,iBAAA;CHooB9C;AGnoBmC;EAAW,iBAAA;CHsoB9C;AGroBmC;EAAW,iBAAA;CHwoB9C;AGvoBmC;EAAW,iBAAA;CH0oB9C;AGzoBmC;EAAW,iBAAA;CH4oB9C;AG3oBmC;EAAW,iBAAA;CH8oB9C;AG7oBmC;EAAW,iBAAA;CHgpB9C;AG/oBmC;EAAW,iBAAA;CHkpB9C;AGjpBmC;EAAW,iBAAA;CHopB9C;AGnpBmC;EAAW,iBAAA;CHspB9C;AGrpBmC;EAAW,iBAAA;CHwpB9C;AGvpBmC;EAAW,iBAAA;CH0pB9C;AGzpBmC;EAAW,iBAAA;CH4pB9C;AG3pBmC;EAAW,iBAAA;CH8pB9C;AG7pBmC;EAAW,iBAAA;CHgqB9C;AG/pBmC;EAAW,iBAAA;CHkqB9C;AGjqBmC;EAAW,iBAAA;CHoqB9C;AGnqBmC;EAAW,iBAAA;CHsqB9C;AGrqBmC;EAAW,iBAAA;CHwqB9C;AGvqBmC;EAAW,iBAAA;CH0qB9C;AGzqBmC;EAAW,iBAAA;CH4qB9C;AG3qBmC;EAAW,iBAAA;CH8qB9C;AG7qBmC;EAAW,iBAAA;CHgrB9C;AG/qBmC;EAAW,iBAAA;CHkrB9C;AGjrBmC;EAAW,iBAAA;CHorB9C;AGnrBmC;EAAW,iBAAA;CHsrB9C;AGrrBmC;EAAW,iBAAA;CHwrB9C;AGvrBmC;EAAW,iBAAA;CH0rB9C;AGzrBmC;EAAW,iBAAA;CH4rB9C;AG3rBmC;EAAW,iBAAA;CH8rB9C;AG7rBmC;EAAW,iBAAA;CHgsB9C;AG/rBmC;EAAW,iBAAA;CHksB9C;AGjsBmC;EAAW,iBAAA;CHosB9C;AGnsBmC;EAAW,iBAAA;CHssB9C;AGrsBmC;EAAW,iBAAA;CHwsB9C;AGvsBmC;EAAW,iBAAA;CH0sB9C;AGzsBmC;EAAW,iBAAA;CH4sB9C;AG3sBmC;EAAW,iBAAA;CH8sB9C;AG7sBmC;EAAW,iBAAA;CHgtB9C;AG/sBmC;EAAW,iBAAA;CHktB9C;AGjtBmC;EAAW,iBAAA;CHotB9C;AGntBmC;EAAW,iBAAA;CHstB9C;AGrtBmC;EAAW,iBAAA;CHwtB9C;AGvtBmC;EAAW,iBAAA;CH0tB9C;AGztBmC;EAAW,iBAAA;CH4tB9C;AG3tBmC;EAAW,iBAAA;CH8tB9C;AG7tBmC;EAAW,iBAAA;CHguB9C;AG/tBmC;EAAW,iBAAA;CHkuB9C;AGjuBmC;EAAW,iBAAA;CHouB9C;AGnuBmC;EAAW,iBAAA;CHsuB9C;AGruBmC;EAAW,iBAAA;CHwuB9C;AGvuBmC;EAAW,iBAAA;CH0uB9C;AGzuBmC;EAAW,iBAAA;CH4uB9C;AG3uBmC;EAAW,iBAAA;CH8uB9C;AG7uBmC;EAAW,iBAAA;CHgvB9C;AIxhCD;ECkEE,+BAAA;EACG,4BAAA;EACK,uBAAA;CLy9BT;AI1hCD;;EC+DE,+BAAA;EACG,4BAAA;EACK,uBAAA;CL+9BT;AIxhCD;EACE,gBAAA;EACA,8CAAA;CJ0hCD;AIvhCD;EACE,4DAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,uBAAA;CJyhCD;AIrhCD;;;;EAIE,qBAAA;EACA,mBAAA;EACA,qBAAA;CJuhCD;AIjhCD;EACE,eAAA;EACA,sBAAA;CJmhCD;AIjhCC;;EAEE,eAAA;EACA,2BAAA;CJmhCH;AIhhCC;EEnDA,2CAAA;EACA,qBAAA;CNskCD;AIzgCD;EACE,UAAA;CJ2gCD;AIrgCD;EACE,uBAAA;CJugCD;AIngCD;;;;;EG1EE,eAAA;EACA,gBAAA;EACA,aAAA;CPolCD;AIvgCD;EACE,mBAAA;CJygCD;AIngCD;EACE,aAAA;EACA,wBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;EC+FA,yCAAA;EACK,oCAAA;EACG,iCAAA;EE5LR,sBAAA;EACA,gBAAA;EACA,aAAA;CPomCD;AIngCD;EACE,mBAAA;CJqgCD;AI//BD;EACE,iBAAA;EACA,oBAAA;EACA,UAAA;EACA,8BAAA;CJigCD;AIz/BD;EACE,mBAAA;EACA,WAAA;EACA,YAAA;EACA,WAAA;EACA,aAAA;EACA,iBAAA;EACA,uBAAA;EACA,UAAA;CJ2/BD;AIn/BC;;EAEE,iBAAA;EACA,YAAA;EACA,aAAA;EACA,UAAA;EACA,kBAAA;EACA,WAAA;CJq/BH;AI1+BD;EACE,gBAAA;CJ4+BD;AQjoCD;;;;;;;;;;;;EAEE,qBAAA;EACA,iBAAA;EACA,iBAAA;EACA,eAAA;CR6oCD;AQlpCD;;;;;;;;;;;;;;;;;;;;;;;;EASI,iBAAA;EACA,eAAA;EACA,eAAA;CRmqCH;AQ/pCD;;;;;;EAGE,iBAAA;EACA,oBAAA;CRoqCD;AQxqCD;;;;;;;;;;;;EAQI,eAAA;CR8qCH;AQ3qCD;;;;;;EAGE,iBAAA;EACA,oBAAA;CRgrCD;AQprCD;;;;;;;;;;;;EAQI,eAAA;CR0rCH;AQtrCD;;EAAU,gBAAA;CR0rCT;AQzrCD;;EAAU,gBAAA;CR6rCT;AQ5rCD;;EAAU,gBAAA;CRgsCT;AQ/rCD;;EAAU,gBAAA;CRmsCT;AQlsCD;;EAAU,gBAAA;CRssCT;AQrsCD;;EAAU,gBAAA;CRysCT;AQnsCD;EACE,iBAAA;CRqsCD;AQlsCD;EACE,oBAAA;EACA,gBAAA;EACA,iBAAA;EACA,iBAAA;CRosCD;AQlsCC;EAAA;IACE,gBAAA;GRqsCD;CACF;AQ7rCD;;EAEE,eAAA;CR+rCD;AQ5rCD;;EAEE,eAAA;EACA,0BAAA;CR8rCD;AQ1rCD;EAAuB,iBAAA;CR6rCtB;AQ5rCD;EAAuB,kBAAA;CR+rCtB;AQ9rCD;EAAuB,mBAAA;CRisCtB;AQhsCD;EAAuB,oBAAA;CRmsCtB;AQlsCD;EAAuB,oBAAA;CRqsCtB;AQlsCD;EAAuB,0BAAA;CRqsCtB;AQpsCD;EAAuB,0BAAA;CRusCtB;AQtsCD;EAAuB,2BAAA;CRysCtB;AQtsCD;EACE,eAAA;CRwsCD;AQtsCD;ECvGE,eAAA;CTgzCD;AS/yCC;;EAEE,eAAA;CTizCH;AQ1sCD;EC1GE,eAAA;CTuzCD;AStzCC;;EAEE,eAAA;CTwzCH;AQ9sCD;EC7GE,eAAA;CT8zCD;AS7zCC;;EAEE,eAAA;CT+zCH;AQltCD;EChHE,eAAA;CTq0CD;ASp0CC;;EAEE,eAAA;CTs0CH;AQttCD;ECnHE,eAAA;CT40CD;AS30CC;;EAEE,eAAA;CT60CH;AQttCD;EAGE,YAAA;EE7HA,0BAAA;CVo1CD;AUn1CC;;EAEE,0BAAA;CVq1CH;AQxtCD;EEhIE,0BAAA;CV21CD;AU11CC;;EAEE,0BAAA;CV41CH;AQ5tCD;EEnIE,0BAAA;CVk2CD;AUj2CC;;EAEE,0BAAA;CVm2CH;AQhuCD;EEtIE,0BAAA;CVy2CD;AUx2CC;;EAEE,0BAAA;CV02CH;AQpuCD;EEzIE,0BAAA;CVg3CD;AU/2CC;;EAEE,0BAAA;CVi3CH;AQnuCD;EACE,oBAAA;EACA,oBAAA;EACA,iCAAA;CRquCD;AQ7tCD;;EAEE,cAAA;EACA,oBAAA;CR+tCD;AQluCD;;;;EAMI,iBAAA;CRkuCH;AQ3tCD;EACE,gBAAA;EACA,iBAAA;CR6tCD;AQztCD;EALE,gBAAA;EACA,iBAAA;EAMA,kBAAA;CR4tCD;AQ9tCD;EAKI,sBAAA;EACA,mBAAA;EACA,kBAAA;CR4tCH;AQvtCD;EACE,cAAA;EACA,oBAAA;CRytCD;AQvtCD;;EAEE,wBAAA;CRytCD;AQvtCD;EACE,iBAAA;CRytCD;AQvtCD;EACE,eAAA;CRytCD;AQ5sCC;EAAA;IAEI,YAAA;IACA,aAAA;IACA,YAAA;IACA,kBAAA;IGxNJ,iBAAA;IACA,wBAAA;IACA,oBAAA;GXu6CC;EQttCD;IASI,mBAAA;GRgtCH;CACF;AQtsCD;;EAEE,aAAA;CRwsCD;AQrsCD;EACE,eAAA;EA9IqB,0BAAA;CRs1CtB;AQnsCD;EACE,mBAAA;EACA,iBAAA;EACA,kBAAA;EACA,+BAAA;CRqsCD;AQhsCG;;;EACE,iBAAA;CRosCL;AQ9sCD;;;EAmBI,eAAA;EACA,eAAA;EACA,wBAAA;EACA,eAAA;CRgsCH;AQ9rCG;;;EACE,uBAAA;CRksCL;AQ1rCD;;EAEE,oBAAA;EACA,gBAAA;EACA,kBAAA;EACA,gCAAA;EACA,eAAA;CR4rCD;AQtrCG;;;;;;EAAW,YAAA;CR8rCd;AQ7rCG;;;;;;EACE,uBAAA;CRosCL;AQ9rCD;EACE,oBAAA;EACA,mBAAA;EACA,wBAAA;CRgsCD;AYx+CD;;;;EAIE,+DAAA;CZ0+CD;AYt+CD;EACE,iBAAA;EACA,eAAA;EACA,eAAA;EACA,0BAAA;EACA,mBAAA;CZw+CD;AYp+CD;EACE,iBAAA;EACA,eAAA;EACA,YAAA;EACA,uBAAA;EACA,mBAAA;EACA,uDAAA;EAAA,+CAAA;CZs+CD;AY5+CD;EASI,WAAA;EACA,gBAAA;EACA,iBAAA;EACA,yBAAA;EAAA,iBAAA;CZs+CH;AYj+CD;EACE,eAAA;EACA,eAAA;EACA,iBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,sBAAA;EACA,sBAAA;EACA,0BAAA;EACA,uBAAA;EACA,mBAAA;CZm+CD;AY9+CD;EAeI,WAAA;EACA,mBAAA;EACA,eAAA;EACA,sBAAA;EACA,8BAAA;EACA,iBAAA;CZk+CH;AY79CD;EACE,kBAAA;EACA,mBAAA;CZ+9CD;AazhDD;ECHE,oBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;Cd+hDD;Aa5hDC;EAAA;IACE,aAAA;Gb+hDD;CACF;Aa9hDC;EAAA;IACE,aAAA;GbiiDD;CACF;AahiDC;EAAA;IACE,cAAA;GbmiDD;CACF;Aa1hDD;ECvBE,oBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;CdojDD;AavhDD;ECvBE,oBAAA;EACA,mBAAA;CdijDD;AavhDD;EACE,gBAAA;EACA,eAAA;CbyhDD;Aa3hDD;EAKI,iBAAA;EACA,gBAAA;CbyhDH;AczkDA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ECiBK,mBAAA;EAEA,gBAAA;EAEA,oBAAA;EACA,mBAAA;CfwmDL;Ac9nDA;;;;;;;;;;;;ECuCK,YAAA;CfqmDL;Ac5oDA;EC+CG,YAAA;CfgmDH;Ac/oDA;EC+CG,oBAAA;CfmmDH;AclpDA;EC+CG,oBAAA;CfsmDH;AcrpDA;EC+CG,WAAA;CfymDH;AcxpDA;EC+CG,oBAAA;Cf4mDH;Ac3pDA;EC+CG,oBAAA;Cf+mDH;Ac9pDA;EC+CG,WAAA;CfknDH;AcjqDA;EC+CG,oBAAA;CfqnDH;AcpqDA;EC+CG,oBAAA;CfwnDH;AcvqDA;EC+CG,WAAA;Cf2nDH;Ac1qDA;EC+CG,oBAAA;Cf8nDH;Ac7qDA;EC+CG,mBAAA;CfioDH;AchrDA;EC8DG,YAAA;CfqnDH;AcnrDA;EC8DG,oBAAA;CfwnDH;ActrDA;EC8DG,oBAAA;Cf2nDH;AczrDA;EC8DG,WAAA;Cf8nDH;Ac5rDA;EC8DG,oBAAA;CfioDH;Ac/rDA;EC8DG,oBAAA;CfooDH;AclsDA;EC8DG,WAAA;CfuoDH;AcrsDA;EC8DG,oBAAA;Cf0oDH;AcxsDA;EC8DG,oBAAA;Cf6oDH;Ac3sDA;EC8DG,WAAA;CfgpDH;Ac9sDA;EC8DG,oBAAA;CfmpDH;AcjtDA;EC8DG,mBAAA;CfspDH;AcptDA;ECmEG,YAAA;CfopDH;AcvtDA;ECoDG,WAAA;CfsqDH;Ac1tDA;ECoDG,mBAAA;CfyqDH;Ac7tDA;ECoDG,mBAAA;Cf4qDH;AchuDA;ECoDG,UAAA;Cf+qDH;AcnuDA;ECoDG,mBAAA;CfkrDH;ActuDA;ECoDG,mBAAA;CfqrDH;AczuDA;ECoDG,UAAA;CfwrDH;Ac5uDA;ECoDG,mBAAA;Cf2rDH;Ac/uDA;ECoDG,mBAAA;Cf8rDH;AclvDA;ECoDG,UAAA;CfisDH;AcrvDA;ECoDG,mBAAA;CfosDH;AcxvDA;ECoDG,kBAAA;CfusDH;Ac3vDA;ECyDG,WAAA;CfqsDH;Ac9vDA;ECwEG,kBAAA;CfyrDH;AcjwDA;ECwEG,0BAAA;Cf4rDH;AcpwDA;ECwEG,0BAAA;Cf+rDH;AcvwDA;ECwEG,iBAAA;CfksDH;Ac1wDA;ECwEG,0BAAA;CfqsDH;Ac7wDA;ECwEG,0BAAA;CfwsDH;AchxDA;ECwEG,iBAAA;Cf2sDH;AcnxDA;ECwEG,0BAAA;Cf8sDH;ActxDA;ECwEG,0BAAA;CfitDH;AczxDA;ECwEG,iBAAA;CfotDH;Ac5xDA;ECwEG,0BAAA;CfutDH;Ac/xDA;ECwEG,yBAAA;Cf0tDH;AclyDA;ECwEG,gBAAA;Cf6tDH;Aa5tDD;ECzEC;;;;;;;;;;;;ICuCK,YAAA;Gf6wDH;EcpzDF;IC+CG,YAAA;GfwwDD;EcvzDF;IC+CG,oBAAA;Gf2wDD;Ec1zDF;IC+CG,oBAAA;Gf8wDD;Ec7zDF;IC+CG,WAAA;GfixDD;Ech0DF;IC+CG,oBAAA;GfoxDD;Ecn0DF;IC+CG,oBAAA;GfuxDD;Ect0DF;IC+CG,WAAA;Gf0xDD;Ecz0DF;IC+CG,oBAAA;Gf6xDD;Ec50DF;IC+CG,oBAAA;GfgyDD;Ec/0DF;IC+CG,WAAA;GfmyDD;Ecl1DF;IC+CG,oBAAA;GfsyDD;Ecr1DF;IC+CG,mBAAA;GfyyDD;Ecx1DF;IC8DG,YAAA;Gf6xDD;Ec31DF;IC8DG,oBAAA;GfgyDD;Ec91DF;IC8DG,oBAAA;GfmyDD;Ecj2DF;IC8DG,WAAA;GfsyDD;Ecp2DF;IC8DG,oBAAA;GfyyDD;Ecv2DF;IC8DG,oBAAA;Gf4yDD;Ec12DF;IC8DG,WAAA;Gf+yDD;Ec72DF;IC8DG,oBAAA;GfkzDD;Ech3DF;IC8DG,oBAAA;GfqzDD;Ecn3DF;IC8DG,WAAA;GfwzDD;Ect3DF;IC8DG,oBAAA;Gf2zDD;Ecz3DF;IC8DG,mBAAA;Gf8zDD;Ec53DF;ICmEG,YAAA;Gf4zDD;Ec/3DF;ICoDG,WAAA;Gf80DD;Ecl4DF;ICoDG,mBAAA;Gfi1DD;Ecr4DF;ICoDG,mBAAA;Gfo1DD;Ecx4DF;ICoDG,UAAA;Gfu1DD;Ec34DF;ICoDG,mBAAA;Gf01DD;Ec94DF;ICoDG,mBAAA;Gf61DD;Ecj5DF;ICoDG,UAAA;Gfg2DD;Ecp5DF;ICoDG,mBAAA;Gfm2DD;Ecv5DF;ICoDG,mBAAA;Gfs2DD;Ec15DF;ICoDG,UAAA;Gfy2DD;Ec75DF;ICoDG,mBAAA;Gf42DD;Ech6DF;ICoDG,kBAAA;Gf+2DD;Ecn6DF;ICyDG,WAAA;Gf62DD;Ect6DF;ICwEG,kBAAA;Gfi2DD;Ecz6DF;ICwEG,0BAAA;Gfo2DD;Ec56DF;ICwEG,0BAAA;Gfu2DD;Ec/6DF;ICwEG,iBAAA;Gf02DD;Ecl7DF;ICwEG,0BAAA;Gf62DD;Ecr7DF;ICwEG,0BAAA;Gfg3DD;Ecx7DF;ICwEG,iBAAA;Gfm3DD;Ec37DF;ICwEG,0BAAA;Gfs3DD;Ec97DF;ICwEG,0BAAA;Gfy3DD;Ecj8DF;ICwEG,iBAAA;Gf43DD;Ecp8DF;ICwEG,0BAAA;Gf+3DD;Ecv8DF;ICwEG,yBAAA;Gfk4DD;Ec18DF;ICwEG,gBAAA;Gfq4DD;CACF;Aa53DD;EClFC;;;;;;;;;;;;ICuCK,YAAA;Gfs7DH;Ec79DF;IC+CG,YAAA;Gfi7DD;Ech+DF;IC+CG,oBAAA;Gfo7DD;Ecn+DF;IC+CG,oBAAA;Gfu7DD;Ect+DF;IC+CG,WAAA;Gf07DD;Ecz+DF;IC+CG,oBAAA;Gf67DD;Ec5+DF;IC+CG,oBAAA;Gfg8DD;Ec/+DF;IC+CG,WAAA;Gfm8DD;Ecl/DF;IC+CG,oBAAA;Gfs8DD;Ecr/DF;IC+CG,oBAAA;Gfy8DD;Ecx/DF;IC+CG,WAAA;Gf48DD;Ec3/DF;IC+CG,oBAAA;Gf+8DD;Ec9/DF;IC+CG,mBAAA;Gfk9DD;EcjgEF;IC8DG,YAAA;Gfs8DD;EcpgEF;IC8DG,oBAAA;Gfy8DD;EcvgEF;IC8DG,oBAAA;Gf48DD;Ec1gEF;IC8DG,WAAA;Gf+8DD;Ec7gEF;IC8DG,oBAAA;Gfk9DD;EchhEF;IC8DG,oBAAA;Gfq9DD;EcnhEF;IC8DG,WAAA;Gfw9DD;EcthEF;IC8DG,oBAAA;Gf29DD;EczhEF;IC8DG,oBAAA;Gf89DD;Ec5hEF;IC8DG,WAAA;Gfi+DD;Ec/hEF;IC8DG,oBAAA;Gfo+DD;EcliEF;IC8DG,mBAAA;Gfu+DD;EcriEF;ICmEG,YAAA;Gfq+DD;EcxiEF;ICoDG,WAAA;Gfu/DD;Ec3iEF;ICoDG,mBAAA;Gf0/DD;Ec9iEF;ICoDG,mBAAA;Gf6/DD;EcjjEF;ICoDG,UAAA;GfggED;EcpjEF;ICoDG,mBAAA;GfmgED;EcvjEF;ICoDG,mBAAA;GfsgED;Ec1jEF;ICoDG,UAAA;GfygED;Ec7jEF;ICoDG,mBAAA;Gf4gED;EchkEF;ICoDG,mBAAA;Gf+gED;EcnkEF;ICoDG,UAAA;GfkhED;EctkEF;ICoDG,mBAAA;GfqhED;EczkEF;ICoDG,kBAAA;GfwhED;Ec5kEF;ICyDG,WAAA;GfshED;Ec/kEF;ICwEG,kBAAA;Gf0gED;EcllEF;ICwEG,0BAAA;Gf6gED;EcrlEF;ICwEG,0BAAA;GfghED;EcxlEF;ICwEG,iBAAA;GfmhED;Ec3lEF;ICwEG,0BAAA;GfshED;Ec9lEF;ICwEG,0BAAA;GfyhED;EcjmEF;ICwEG,iBAAA;Gf4hED;EcpmEF;ICwEG,0BAAA;Gf+hED;EcvmEF;ICwEG,0BAAA;GfkiED;Ec1mEF;ICwEG,iBAAA;GfqiED;Ec7mEF;ICwEG,0BAAA;GfwiED;EchnEF;ICwEG,yBAAA;Gf2iED;EcnnEF;ICwEG,gBAAA;Gf8iED;CACF;Aa5hED;EC3FC;;;;;;;;;;;;ICuCK,YAAA;Gf+lEH;EctoEF;IC+CG,YAAA;Gf0lED;EczoEF;IC+CG,oBAAA;Gf6lED;Ec5oEF;IC+CG,oBAAA;GfgmED;Ec/oEF;IC+CG,WAAA;GfmmED;EclpEF;IC+CG,oBAAA;GfsmED;EcrpEF;IC+CG,oBAAA;GfymED;EcxpEF;IC+CG,WAAA;Gf4mED;Ec3pEF;IC+CG,oBAAA;Gf+mED;Ec9pEF;IC+CG,oBAAA;GfknED;EcjqEF;IC+CG,WAAA;GfqnED;EcpqEF;IC+CG,oBAAA;GfwnED;EcvqEF;IC+CG,mBAAA;Gf2nED;Ec1qEF;IC8DG,YAAA;Gf+mED;Ec7qEF;IC8DG,oBAAA;GfknED;EchrEF;IC8DG,oBAAA;GfqnED;EcnrEF;IC8DG,WAAA;GfwnED;EctrEF;IC8DG,oBAAA;Gf2nED;EczrEF;IC8DG,oBAAA;Gf8nED;Ec5rEF;IC8DG,WAAA;GfioED;Ec/rEF;IC8DG,oBAAA;GfooED;EclsEF;IC8DG,oBAAA;GfuoED;EcrsEF;IC8DG,WAAA;Gf0oED;EcxsEF;IC8DG,oBAAA;Gf6oED;Ec3sEF;IC8DG,mBAAA;GfgpED;Ec9sEF;ICmEG,YAAA;Gf8oED;EcjtEF;ICoDG,WAAA;GfgqED;EcptEF;ICoDG,mBAAA;GfmqED;EcvtEF;ICoDG,mBAAA;GfsqED;Ec1tEF;ICoDG,UAAA;GfyqED;Ec7tEF;ICoDG,mBAAA;Gf4qED;EchuEF;ICoDG,mBAAA;Gf+qED;EcnuEF;ICoDG,UAAA;GfkrED;EctuEF;ICoDG,mBAAA;GfqrED;EczuEF;ICoDG,mBAAA;GfwrED;Ec5uEF;ICoDG,UAAA;Gf2rED;Ec/uEF;ICoDG,mBAAA;Gf8rED;EclvEF;ICoDG,kBAAA;GfisED;EcrvEF;ICyDG,WAAA;Gf+rED;EcxvEF;ICwEG,kBAAA;GfmrED;Ec3vEF;ICwEG,0BAAA;GfsrED;Ec9vEF;ICwEG,0BAAA;GfyrED;EcjwEF;ICwEG,iBAAA;Gf4rED;EcpwEF;ICwEG,0BAAA;Gf+rED;EcvwEF;ICwEG,0BAAA;GfksED;Ec1wEF;ICwEG,iBAAA;GfqsED;Ec7wEF;ICwEG,0BAAA;GfwsED;EchxEF;ICwEG,0BAAA;Gf2sED;EcnxEF;ICwEG,iBAAA;Gf8sED;EctxEF;ICwEG,0BAAA;GfitED;EczxEF;ICwEG,yBAAA;GfotED;Ec5xEF;ICwEG,gBAAA;GfutED;CACF;AgBzxED;EACE,8BAAA;ChB2xED;AgB5xED;EAQI,iBAAA;EACA,sBAAA;EACA,YAAA;ChBuxEH;AgBlxEG;;EACE,iBAAA;EACA,oBAAA;EACA,YAAA;ChBqxEL;AgBhxED;EACE,iBAAA;EACA,oBAAA;EACA,eAAA;EACA,iBAAA;ChBkxED;AgB/wED;EACE,iBAAA;ChBixED;AgB3wED;EACE,YAAA;EACA,gBAAA;EACA,oBAAA;ChB6wED;AgBhxED;;;;;;EAWQ,aAAA;EACA,wBAAA;EACA,oBAAA;EACA,2BAAA;ChB6wEP;AgB3xED;EAoBI,uBAAA;EACA,8BAAA;ChB0wEH;AgB/xED;;;;;;EA8BQ,cAAA;ChBywEP;AgBvyED;EAoCI,2BAAA;ChBswEH;AgB1yED;EAyCI,uBAAA;ChBowEH;AgB7vED;;;;;;EAOQ,aAAA;ChB8vEP;AgBnvED;EACE,uBAAA;ChBqvED;AgBtvED;;;;;;EAQQ,uBAAA;ChBsvEP;AgB9vED;;EAeM,yBAAA;ChBmvEL;AgBzuED;EAEI,0BAAA;ChB0uEH;AgBjuED;EAEI,0BAAA;ChBkuEH;AiBj3EC;;;;;;;;;;;;EAOI,0BAAA;CjBw3EL;AiBl3EC;;;;;EAMI,0BAAA;CjBm3EL;AiBt4EC;;;;;;;;;;;;EAOI,0BAAA;CjB64EL;AiBv4EC;;;;;EAMI,0BAAA;CjBw4EL;AiB35EC;;;;;;;;;;;;EAOI,0BAAA;CjBk6EL;AiB55EC;;;;;EAMI,0BAAA;CjB65EL;AiBh7EC;;;;;;;;;;;;EAOI,0BAAA;CjBu7EL;AiBj7EC;;;;;EAMI,0BAAA;CjBk7EL;AiBr8EC;;;;;;;;;;;;EAOI,0BAAA;CjB48EL;AiBt8EC;;;;;EAMI,0BAAA;CjBu8EL;AgBnzED;EACE,kBAAA;EACA,iBAAA;ChBqzED;AgBnzEC;EAAA;IACE,YAAA;IACA,oBAAA;IACA,mBAAA;IACA,6CAAA;IACA,uBAAA;GhBszED;EgB3zED;IASI,iBAAA;GhBqzEH;EgB9zED;;;;;;IAkBU,oBAAA;GhBozET;EgBt0ED;IA0BI,UAAA;GhB+yEH;EgBz0ED;;;;;;IAmCU,eAAA;GhB8yET;EgBj1ED;;;;;;IAuCU,gBAAA;GhBkzET;EgBz1ED;;;;IAoDU,iBAAA;GhB2yET;CACF;AkBrgFD;EAIE,aAAA;EACA,WAAA;EACA,UAAA;EACA,UAAA;ClBogFD;AkBjgFD;EACE,eAAA;EACA,YAAA;EACA,WAAA;EACA,oBAAA;EACA,gBAAA;EACA,qBAAA;EACA,eAAA;EACA,UAAA;EACA,iCAAA;ClBmgFD;AkBhgFD;EACE,sBAAA;EACA,gBAAA;EACA,mBAAA;EACA,iBAAA;ClBkgFD;AkBx/ED;Eb6BE,+BAAA;EACG,4BAAA;EACK,uBAAA;EarBR,yBAAA;EACA,sBAAA;EAAA,iBAAA;ClBo/ED;AkBh/ED;;EAEE,gBAAA;EACA,mBAAA;EACA,oBAAA;ClBk/ED;AkB5+EC;;;;;;EAGE,oBAAA;ClBi/EH;AkB7+ED;EACE,eAAA;ClB++ED;AkB3+ED;EACE,eAAA;EACA,YAAA;ClB6+ED;AkBz+ED;;EAEE,aAAA;ClB2+ED;AkBv+ED;;;EZ1FE,2CAAA;EACA,qBAAA;CNskFD;AkBt+ED;EACE,eAAA;EACA,iBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;ClBw+ED;AkB98ED;EACE,eAAA;EACA,YAAA;EACA,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,uBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;Eb3EA,yDAAA;EACQ,iDAAA;EAyHR,+EAAA;EACK,0EAAA;EACG,uFAAA;EAAA,+EAAA;EAAA,uEAAA;EAAA,4GAAA;CLo6ET;AmB9iFC;EACE,sBAAA;EACA,WAAA;EdYF,0FAAA;EACQ,kFAAA;CLqiFT;AKpgFC;EACE,YAAA;EACA,WAAA;CLsgFH;AKpgFC;EAA0B,YAAA;CLugF3B;AKtgFC;EAAgC,YAAA;CLygFjC;AkB19EC;EACE,8BAAA;EACA,UAAA;ClB49EH;AkBp9EC;;;EAGE,0BAAA;EACA,WAAA;ClBs9EH;AkBn9EC;;EAEE,oBAAA;ClBq9EH;AkBj9EC;EACE,aAAA;ClBm9EH;AkBr8ED;EAKI;;;;IACE,kBAAA;GlBs8EH;EkBn8EC;;;;;;;;IAEE,kBAAA;GlB28EH;EkBx8EC;;;;;;;;IAEE,kBAAA;GlBg9EH;CACF;AkBt8ED;EACE,oBAAA;ClBw8ED;AkBh8ED;;EAEE,mBAAA;EACA,eAAA;EACA,iBAAA;EACA,oBAAA;ClBk8ED;AkB/7EC;;;;EAGI,oBAAA;ClBk8EL;AkB78ED;;EAgBI,iBAAA;EACA,mBAAA;EACA,iBAAA;EACA,iBAAA;EACA,gBAAA;ClBi8EH;AkB97ED;;;;EAIE,mBAAA;EACA,mBAAA;EACA,mBAAA;ClBg8ED;AkB77ED;;EAEE,iBAAA;ClB+7ED;AkB37ED;;EAEE,mBAAA;EACA,sBAAA;EACA,mBAAA;EACA,iBAAA;EACA,iBAAA;EACA,uBAAA;EACA,gBAAA;ClB67ED;AkB17EC;;;;EAEE,oBAAA;ClB87EH;AkB37ED;;EAEE,cAAA;EACA,kBAAA;ClB67ED;AkBp7ED;EACE,iBAAA;EAEA,iBAAA;EACA,oBAAA;EAEA,iBAAA;ClBo7ED;AkBl7EC;;EAEE,iBAAA;EACA,gBAAA;ClBo7EH;AkBv6ED;EC3PE,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CnBqqFD;AmBnqFC;EACE,aAAA;EACA,kBAAA;CnBqqFH;AmBlqFC;;EAEE,aAAA;CnBoqFH;AkBn7ED;EAEI,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;ClBo7EH;AkB17ED;EASI,aAAA;EACA,kBAAA;ClBo7EH;AkB97ED;;EAcI,aAAA;ClBo7EH;AkBl8ED;EAiBI,aAAA;EACA,iBAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;ClBo7EH;AkBh7ED;ECvRE,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CnB0sFD;AmBxsFC;EACE,aAAA;EACA,kBAAA;CnB0sFH;AmBvsFC;;EAEE,aAAA;CnBysFH;AkB57ED;EAEI,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;ClB67EH;AkBn8ED;EASI,aAAA;EACA,kBAAA;ClB67EH;AkBv8ED;;EAcI,aAAA;ClB67EH;AkB38ED;EAiBI,aAAA;EACA,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;ClB67EH;AkBp7ED;EAEE,mBAAA;ClBq7ED;AkBv7ED;EAMI,sBAAA;ClBo7EH;AkBh7ED;EACE,mBAAA;EACA,OAAA;EACA,SAAA;EACA,WAAA;EACA,eAAA;EACA,YAAA;EACA,aAAA;EACA,kBAAA;EACA,mBAAA;EACA,qBAAA;ClBk7ED;AkBh7ED;;;EAGE,YAAA;EACA,aAAA;EACA,kBAAA;ClBk7ED;AkBh7ED;;;EAGE,YAAA;EACA,aAAA;EACA,kBAAA;ClBk7ED;AkB96ED;;;;;;;;;;EClZI,eAAA;CnB40FH;AkB17ED;EC9YI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CL2xFT;AmB30FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CLgyFT;AkBp8ED;ECpYI,eAAA;EACA,0BAAA;EACA,sBAAA;CnB20FH;AkBz8ED;EC9XI,eAAA;CnB00FH;AkBz8ED;;;;;;;;;;ECrZI,eAAA;CnB02FH;AkBr9ED;ECjZI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CLyzFT;AmBz2FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CL8zFT;AkB/9ED;ECvYI,eAAA;EACA,0BAAA;EACA,sBAAA;CnBy2FH;AkBp+ED;ECjYI,eAAA;CnBw2FH;AkBp+ED;;;;;;;;;;ECxZI,eAAA;CnBw4FH;AkBh/ED;ECpZI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CLu1FT;AmBv4FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CL41FT;AkB1/ED;EC1YI,eAAA;EACA,0BAAA;EACA,sBAAA;CnBu4FH;AkB//ED;ECpYI,eAAA;CnBs4FH;AkB3/EC;EACE,UAAA;ClB6/EH;AkB3/EC;EACE,OAAA;ClB6/EH;AkBn/ED;EACE,eAAA;EACA,gBAAA;EACA,oBAAA;EACA,eAAA;ClBq/ED;AkBn+EC;EAAA;IAGI,sBAAA;IACA,iBAAA;IACA,uBAAA;GlBo+EH;EkBz+ED;IAUI,sBAAA;IACA,YAAA;IACA,uBAAA;GlBk+EH;EkB9+ED;IAiBI,sBAAA;GlBg+EH;EkBj/ED;IAqBI,sBAAA;IACA,uBAAA;GlB+9EH;EkBr/ED;;;IA2BM,YAAA;GlB+9EL;EkB1/ED;IAiCI,YAAA;GlB49EH;EkB7/ED;IAqCI,iBAAA;IACA,uBAAA;GlB29EH;EkBjgFD;;IA6CI,sBAAA;IACA,cAAA;IACA,iBAAA;IACA,uBAAA;GlBw9EH;EkBxgFD;;IAmDM,gBAAA;GlBy9EL;EkB5gFD;;IAwDI,mBAAA;IACA,eAAA;GlBw9EH;EkBjhFD;IA8DI,OAAA;GlBs9EH;CACF;AkB58ED;;;;EASI,iBAAA;EACA,cAAA;EACA,iBAAA;ClBy8EH;AkBp9ED;;EAiBI,iBAAA;ClBu8EH;AkBx9ED;EJ9gBE,oBAAA;EACA,mBAAA;Cdy+FD;AkBj8EC;EAAA;IAEI,iBAAA;IACA,iBAAA;IACA,kBAAA;GlBm8EH;CACF;AkBn+ED;EAwCI,YAAA;ClB87EH;AkBt7EG;EAAA;IAEI,kBAAA;IACA,gBAAA;GlBw7EL;CACF;AkBp7EG;EAAA;IAEI,iBAAA;IACA,gBAAA;GlBs7EL;CACF;AoBrgGD;EACE,sBAAA;EACA,iBAAA;EACA,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,uBAAA;EACA,+BAAA;EAAA,2BAAA;EACA,gBAAA;EACA,uBAAA;EACA,8BAAA;ECoCA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,mBAAA;EhBqKA,0BAAA;EACG,uBAAA;EACC,sBAAA;EACI,kBAAA;CLg0FT;AoBxgGG;;;;;;EdrBF,2CAAA;EACA,qBAAA;CNqiGD;AoB3gGC;;;EAGE,YAAA;EACA,sBAAA;CpB6gGH;AoB1gGC;;EAEE,uBAAA;EACA,WAAA;Ef2BF,yDAAA;EACQ,iDAAA;CLk/FT;AoB1gGC;;;EAGE,oBAAA;EE9CF,0BAAA;EACA,cAAA;EjBiEA,yBAAA;EACQ,iBAAA;CL2/FT;AoB1gGG;;EAEE,qBAAA;CpB4gGL;AoBngGD;EC7DE,YAAA;EACA,uBAAA;EACA,mBAAA;CrBmkGD;AqBjkGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmkGH;AqBjkGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmkGH;AqBjkGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBmkGH;AqBjkGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBykGL;AqBnkGG;;;;;;;;;EAGE,uBAAA;EACA,mBAAA;CrB2kGL;AoBpjGD;EClBI,YAAA;EACA,uBAAA;CrBykGH;AoBrjGD;EChEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGD;AqBtnGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGH;AqBtnGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGH;AqBtnGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBwnGH;AqBtnGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB8nGL;AqBxnGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBgoGL;AoBtmGD;ECrBI,eAAA;EACA,uBAAA;CrB8nGH;AoBtmGD;ECpEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGD;AqB3qGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGH;AqB3qGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGH;AqB3qGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrB6qGH;AqB3qGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmrGL;AqB7qGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBqrGL;AoBvpGD;ECzBI,eAAA;EACA,uBAAA;CrBmrGH;AoBvpGD;ECxEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGD;AqBhuGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGH;AqBhuGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGH;AqBhuGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBkuGH;AqBhuGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwuGL;AqBluGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrB0uGL;AoBxsGD;EC7BI,eAAA;EACA,uBAAA;CrBwuGH;AoBxsGD;EC5EE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGD;AqBrxGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGH;AqBrxGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGH;AqBrxGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBuxGH;AqBrxGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6xGL;AqBvxGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrB+xGL;AoBzvGD;ECjCI,eAAA;EACA,uBAAA;CrB6xGH;AoBzvGD;EChFE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GD;AqB10GC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GH;AqB10GC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GH;AqB10GC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrB40GH;AqB10GG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBk1GL;AqB50GG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBo1GL;AoB1yGD;ECrCI,eAAA;EACA,uBAAA;CrBk1GH;AoBryGD;EACE,iBAAA;EACA,eAAA;EACA,iBAAA;CpBuyGD;AoBryGC;;;;;EAKE,8BAAA;EfnCF,yBAAA;EACQ,iBAAA;CL20GT;AoBtyGC;;;;EAIE,0BAAA;CpBwyGH;AoBtyGC;;EAEE,eAAA;EACA,2BAAA;EACA,8BAAA;CpBwyGH;AoBpyGG;;;;EAEE,eAAA;EACA,sBAAA;CpBwyGL;AoB/xGD;;EC9EE,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CrBi3GD;AoBlyGD;;EClFE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CrBw3GD;AoBryGD;;ECtFE,iBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CrB+3GD;AoBpyGD;EACE,eAAA;EACA,YAAA;CpBsyGD;AoBlyGD;EACE,gBAAA;CpBoyGD;AoB7xGC;;;EACE,YAAA;CpBiyGH;AuB37GD;EACE,WAAA;ElBoLA,yCAAA;EACK,oCAAA;EACG,iCAAA;CL0wGT;AuB77GC;EACE,WAAA;CvB+7GH;AuB37GD;EACE,cAAA;CvB67GD;AuB37GC;EAAY,eAAA;CvB87Gb;AuB77GC;EAAY,mBAAA;CvBg8Gb;AuB/7GC;EAAY,yBAAA;CvBk8Gb;AuB/7GD;EACE,mBAAA;EACA,UAAA;EACA,iBAAA;ElBsKA,gDAAA;EACQ,2CAAA;EAAA,wCAAA;EAOR,mCAAA;EACQ,8BAAA;EAAA,2BAAA;EAGR,yCAAA;EACQ,oCAAA;EAAA,iCAAA;CLoxGT;AwBh+GD;EACE,sBAAA;EACA,SAAA;EACA,UAAA;EACA,iBAAA;EACA,uBAAA;EACA,uBAAA;EACA,yBAAA;EACA,oCAAA;EACA,mCAAA;CxBk+GD;AwB99GD;;EAEE,mBAAA;CxBg+GD;AwB59GD;EACE,WAAA;CxB89GD;AwB19GD;EACE,mBAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,gBAAA;EACA,iBAAA;EACA,iBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,sCAAA;EACA,mBAAA;EnBuBA,oDAAA;EACQ,4CAAA;CLs8GT;AwBx9GC;EACE,SAAA;EACA,WAAA;CxB09GH;AwBn/GD;ECzBE,YAAA;EACA,cAAA;EACA,iBAAA;EACA,0BAAA;CzB+gHD;AwBz/GD;EAmCI,eAAA;EACA,kBAAA;EACA,YAAA;EACA,iBAAA;EACA,wBAAA;EACA,eAAA;EACA,oBAAA;CxBy9GH;AwBv9GG;;EAEE,eAAA;EACA,sBAAA;EACA,0BAAA;CxBy9GL;AwBl9GC;;;EAGE,YAAA;EACA,sBAAA;EACA,0BAAA;EACA,WAAA;CxBo9GH;AwB38GC;;;EAGE,eAAA;CxB68GH;AwBz8GC;;EAEE,sBAAA;EACA,oBAAA;EACA,8BAAA;EACA,uBAAA;EEzGF,oEAAA;C1BqjHD;AwBt8GD;EAGI,eAAA;CxBs8GH;AwBz8GD;EAQI,WAAA;CxBo8GH;AwB57GD;EACE,SAAA;EACA,WAAA;CxB87GD;AwBt7GD;EACE,YAAA;EACA,QAAA;CxBw7GD;AwBp7GD;EACE,eAAA;EACA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,oBAAA;CxBs7GD;AwBl7GD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,aAAA;CxBo7GD;AwBh7GD;EACE,SAAA;EACA,WAAA;CxBk7GD;AwB16GD;;EAII,YAAA;EACA,cAAA;EACA,0BAAA;EACA,4BAAA;CxB06GH;AwBj7GD;;EAWI,UAAA;EACA,aAAA;EACA,mBAAA;CxB06GH;AwBj6GD;EACE;IApEA,SAAA;IACA,WAAA;GxBw+GC;EwBr6GD;IA1DA,YAAA;IACA,QAAA;GxBk+GC;CACF;A2B7mHD;;EAEE,mBAAA;EACA,sBAAA;EACA,uBAAA;C3B+mHD;A2BnnHD;;EAMI,mBAAA;EACA,YAAA;C3BinHH;A2B/mHG;;;;;;;;EAIE,WAAA;C3BqnHL;A2B/mHD;;;;EAKI,kBAAA;C3BgnHH;A2B3mHD;EACE,kBAAA;C3B6mHD;A2B9mHD;;;EAOI,YAAA;C3B4mHH;A2BnnHD;;;EAYI,iBAAA;C3B4mHH;A2BxmHD;EACE,iBAAA;C3B0mHD;A2BtmHD;EACE,eAAA;C3BwmHD;A2BvmHC;ECpDA,2BAAA;EACA,8BAAA;C5B8pHD;A2BtmHD;;ECjDE,0BAAA;EACA,6BAAA;C5B2pHD;A2BrmHD;EACE,YAAA;C3BumHD;A2BrmHD;EACE,iBAAA;C3BumHD;A2BrmHD;;ECrEE,2BAAA;EACA,8BAAA;C5B8qHD;A2BpmHD;ECnEE,0BAAA;EACA,6BAAA;C5B0qHD;A2BnmHD;;EAEE,WAAA;C3BqmHD;A2BplHD;EACE,mBAAA;EACA,kBAAA;C3BslHD;A2BplHD;EACE,oBAAA;EACA,mBAAA;C3BslHD;A2BjlHD;EtB/CE,yDAAA;EACQ,iDAAA;CLmoHT;A2BjlHC;EtBnDA,yBAAA;EACQ,iBAAA;CLuoHT;A2B9kHD;EACE,eAAA;C3BglHD;A2B7kHD;EACE,wBAAA;EACA,uBAAA;C3B+kHD;A2B5kHD;EACE,wBAAA;C3B8kHD;A2BvkHD;;;EAII,eAAA;EACA,YAAA;EACA,YAAA;EACA,gBAAA;C3BwkHH;A2B/kHD;EAcM,YAAA;C3BokHL;A2BllHD;;;;EAsBI,iBAAA;EACA,eAAA;C3BkkHH;A2B7jHC;EACE,iBAAA;C3B+jHH;A2B7jHC;EC7KA,4BAAA;EACA,6BAAA;EAOA,8BAAA;EACA,6BAAA;C5BuuHD;A2B/jHC;ECjLA,0BAAA;EACA,2BAAA;EAOA,gCAAA;EACA,+BAAA;C5B6uHD;A2BhkHD;EACE,iBAAA;C3BkkHD;A2BhkHD;;ECjLE,8BAAA;EACA,6BAAA;C5BqvHD;A2B/jHD;EC/LE,0BAAA;EACA,2BAAA;C5BiwHD;A2B3jHD;EACE,eAAA;EACA,YAAA;EACA,oBAAA;EACA,0BAAA;C3B6jHD;A2BjkHD;;EAOI,oBAAA;EACA,YAAA;EACA,UAAA;C3B8jHH;A2BvkHD;EAYI,YAAA;C3B8jHH;A2B1kHD;EAgBI,WAAA;C3B6jHH;A2B5iHD;;;;EAKM,mBAAA;EACA,uBAAA;EACA,qBAAA;C3B6iHL;A6BvxHD;EACE,mBAAA;EACA,eAAA;EACA,0BAAA;C7ByxHD;A6BtxHC;EACE,YAAA;EACA,iBAAA;EACA,gBAAA;C7BwxHH;A6BjyHD;EAeI,mBAAA;EACA,WAAA;EAKA,YAAA;EAEA,YAAA;EACA,iBAAA;C7BgxHH;A6B9wHG;EACE,WAAA;C7BgxHL;A6BtwHD;;;EVwBE,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CnBmvHD;AmBjvHC;;;EACE,aAAA;EACA,kBAAA;CnBqvHH;AmBlvHC;;;;;;EAEE,aAAA;CnBwvHH;A6BxxHD;;;EVmBE,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CnB0wHD;AmBxwHC;;;EACE,aAAA;EACA,kBAAA;CnB4wHH;AmBzwHC;;;;;;EAEE,aAAA;CnB+wHH;A6BtyHD;;;EAGE,oBAAA;C7BwyHD;A6BtyHC;;;EACE,iBAAA;C7B0yHH;A6BtyHD;;EAEE,UAAA;EACA,oBAAA;EACA,uBAAA;C7BwyHD;A6BnyHD;EACE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,eAAA;EACA,eAAA;EACA,mBAAA;EACA,0BAAA;EACA,uBAAA;EACA,mBAAA;C7BqyHD;A6BlyHC;EACE,kBAAA;EACA,gBAAA;EACA,mBAAA;C7BoyHH;A6BlyHC;EACE,mBAAA;EACA,gBAAA;EACA,mBAAA;C7BoyHH;A6BxzHD;;EA0BI,cAAA;C7BkyHH;A6B7xHD;;;;;;;EDtGE,2BAAA;EACA,8BAAA;C5B44HD;A6B9xHD;EACE,gBAAA;C7BgyHD;A6B9xHD;;;;;;;ED1GE,0BAAA;EACA,6BAAA;C5Bi5HD;A6B/xHD;EACE,eAAA;C7BiyHD;A6B5xHD;EACE,mBAAA;EAGA,aAAA;EACA,oBAAA;C7B4xHD;A6BjyHD;EAUI,mBAAA;C7B0xHH;A6BpyHD;EAYM,kBAAA;C7B2xHL;A6BxxHG;;;EAGE,WAAA;C7B0xHL;A6BrxHC;;EAGI,mBAAA;C7BsxHL;A6BnxHC;;EAGI,WAAA;EACA,kBAAA;C7BoxHL;A8Bn7HD;EACE,gBAAA;EACA,iBAAA;EACA,iBAAA;C9Bq7HD;A8Bx7HD;EAOI,mBAAA;EACA,eAAA;C9Bo7HH;A8B57HD;EAWM,mBAAA;EACA,eAAA;EACA,mBAAA;C9Bo7HL;A8Bn7HK;;EAEE,sBAAA;EACA,0BAAA;C9Bq7HP;A8Bh7HG;EACE,eAAA;C9Bk7HL;A8Bh7HK;;EAEE,eAAA;EACA,sBAAA;EACA,oBAAA;EACA,8BAAA;C9Bk7HP;A8B36HG;;;EAGE,0BAAA;EACA,sBAAA;C9B66HL;A8Bt9HD;ELLE,YAAA;EACA,cAAA;EACA,iBAAA;EACA,0BAAA;CzB89HD;A8B59HD;EA0DI,gBAAA;C9Bq6HH;A8B55HD;EACE,8BAAA;C9B85HD;A8B/5HD;EAGI,YAAA;EAEA,oBAAA;C9B85HH;A8Bn6HD;EASM,kBAAA;EACA,wBAAA;EACA,8BAAA;EACA,2BAAA;C9B65HL;A8B55HK;EACE,mCAAA;C9B85HP;A8Bx5HK;;;EAGE,eAAA;EACA,gBAAA;EACA,uBAAA;EACA,uBAAA;EACA,iCAAA;C9B05HP;A8Br5HC;EAqDA,YAAA;EA8BA,iBAAA;C9Bs0HD;A8Bz5HC;EAwDE,YAAA;C9Bo2HH;A8B55HC;EA0DI,mBAAA;EACA,mBAAA;C9Bq2HL;A8Bh6HC;EAgEE,UAAA;EACA,WAAA;C9Bm2HH;A8Bh2HC;EAAA;IAEI,oBAAA;IACA,UAAA;G9Bk2HH;E8Br2HD;IAKM,iBAAA;G9Bm2HL;CACF;A8B76HC;EAuFE,gBAAA;EACA,mBAAA;C9By1HH;A8Bj7HC;;;EA8FE,uBAAA;C9Bw1HH;A8Br1HC;EAAA;IAEI,8BAAA;IACA,2BAAA;G9Bu1HH;E8B11HD;;;IAQI,0BAAA;G9Bu1HH;CACF;A8Bx7HD;EAEI,YAAA;C9By7HH;A8B37HD;EAMM,mBAAA;C9Bw7HL;A8B97HD;EASM,iBAAA;C9Bw7HL;A8Bn7HK;;;EAGE,YAAA;EACA,0BAAA;C9Bq7HP;A8B76HD;EAEI,YAAA;C9B86HH;A8Bh7HD;EAIM,gBAAA;EACA,eAAA;C9B+6HL;A8Bn6HD;EACE,YAAA;C9Bq6HD;A8Bt6HD;EAII,YAAA;C9Bq6HH;A8Bz6HD;EAMM,mBAAA;EACA,mBAAA;C9Bs6HL;A8B76HD;EAYI,UAAA;EACA,WAAA;C9Bo6HH;A8Bj6HC;EAAA;IAEI,oBAAA;IACA,UAAA;G9Bm6HH;E8Bt6HD;IAKM,iBAAA;G9Bo6HL;CACF;A8B55HD;EACE,iBAAA;C9B85HD;A8B/5HD;EAKI,gBAAA;EACA,mBAAA;C9B65HH;A8Bn6HD;;;EAYI,uBAAA;C9B45HH;A8Bz5HC;EAAA;IAEI,8BAAA;IACA,2BAAA;G9B25HH;E8B95HD;;;IAQI,0BAAA;G9B25HH;CACF;A8Bl5HD;EAEI,cAAA;C9Bm5HH;A8Br5HD;EAKI,eAAA;C9Bm5HH;A8B14HD;EAEE,iBAAA;EF7OA,0BAAA;EACA,2BAAA;C5BynID;A+BjnID;EACE,mBAAA;EACA,iBAAA;EACA,oBAAA;EACA,8BAAA;C/BmnID;A+B9mIC;EAAA;IACE,mBAAA;G/BinID;CACF;A+BrmIC;EAAA;IACE,YAAA;G/BwmID;CACF;A+B1lID;EACE,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,kCAAA;EACA,2DAAA;EAAA,mDAAA;EAEA,kCAAA;C/B2lID;A+BzlIC;EACE,iBAAA;C/B2lIH;A+BxlIC;EAAA;IACE,YAAA;IACA,cAAA;IACA,yBAAA;IAAA,iBAAA;G/B2lID;E+BzlIC;IACE,0BAAA;IACA,wBAAA;IACA,kBAAA;IACA,6BAAA;G/B2lIH;E+BxlIC;IACE,oBAAA;G/B0lIH;E+BrlIC;;;IAGE,iBAAA;IACA,gBAAA;G/BulIH;CACF;A+BnlID;;EAWE,gBAAA;EACA,SAAA;EACA,QAAA;EACA,cAAA;C/B4kID;A+B1lID;;EAGI,kBAAA;C/B2lIH;A+BzlIG;EAAA;;IACE,kBAAA;G/B6lIH;CACF;A+BnlIC;EAAA;;IACE,iBAAA;G/BulID;CACF;A+BplID;EACE,OAAA;EACA,sBAAA;C/BslID;A+BplID;EACE,UAAA;EACA,iBAAA;EACA,sBAAA;C/BslID;A+B9kID;;;;EAII,oBAAA;EACA,mBAAA;C/BglIH;A+B9kIG;EAAA;;;;IACE,gBAAA;IACA,eAAA;G/BolIH;CACF;A+BxkID;EACE,cAAA;EACA,sBAAA;C/B0kID;A+BxkIC;EAAA;IACE,iBAAA;G/B2kID;CACF;A+BrkID;EACE,YAAA;EACA,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,kBAAA;C/BukID;A+BrkIC;;EAEE,sBAAA;C/BukIH;A+BhlID;EAaI,eAAA;C/BskIH;A+BnkIC;EACE;;IAEE,mBAAA;G/BqkIH;CACF;A+B3jID;EACE,mBAAA;EACA,aAAA;EACA,kBAAA;EACA,mBAAA;EC9LA,gBAAA;EACA,mBAAA;ED+LA,8BAAA;EACA,uBAAA;EACA,8BAAA;EACA,mBAAA;C/B8jID;A+B1jIC;EACE,WAAA;C/B4jIH;A+B1kID;EAmBI,eAAA;EACA,YAAA;EACA,YAAA;EACA,mBAAA;C/B0jIH;A+BhlID;EAyBI,gBAAA;C/B0jIH;A+BvjIC;EAAA;IACE,cAAA;G/B0jID;CACF;A+BjjID;EACE,oBAAA;C/BmjID;A+BpjID;EAII,kBAAA;EACA,qBAAA;EACA,kBAAA;C/BmjIH;A+BhjIC;EAAA;IAGI,iBAAA;IACA,YAAA;IACA,YAAA;IACA,cAAA;IACA,8BAAA;IACA,UAAA;IACA,yBAAA;IAAA,iBAAA;G/BijIH;E+B1jID;;IAYM,2BAAA;G/BkjIL;E+B9jID;IAeM,kBAAA;G/BkjIL;E+BjjIK;;IAEE,uBAAA;G/BmjIP;CACF;A+B7iIC;EAAA;IACE,YAAA;IACA,UAAA;G/BgjID;E+BljID;IAKI,YAAA;G/BgjIH;E+BrjID;IAOM,kBAAA;IACA,qBAAA;G/BijIL;CACF;A+BtiID;EACE,mBAAA;EACA,oBAAA;EACA,mBAAA;EACA,kCAAA;EACA,qCAAA;E1B5NA,6FAAA;EACQ,qFAAA;E2BjER,gBAAA;EACA,mBAAA;ChCu0ID;AkB13HC;EAAA;IAGI,sBAAA;IACA,iBAAA;IACA,uBAAA;GlB23HH;EkBh4HD;IAUI,sBAAA;IACA,YAAA;IACA,uBAAA;GlBy3HH;EkBr4HD;IAiBI,sBAAA;GlBu3HH;EkBx4HD;IAqBI,sBAAA;IACA,uBAAA;GlBs3HH;EkB54HD;;;IA2BM,YAAA;GlBs3HL;EkBj5HD;IAiCI,YAAA;GlBm3HH;EkBp5HD;IAqCI,iBAAA;IACA,uBAAA;GlBk3HH;EkBx5HD;;IA6CI,sBAAA;IACA,cAAA;IACA,iBAAA;IACA,uBAAA;GlB+2HH;EkB/5HD;;IAmDM,gBAAA;GlBg3HL;EkBn6HD;;IAwDI,mBAAA;IACA,eAAA;GlB+2HH;EkBx6HD;IA8DI,OAAA;GlB62HH;CACF;A+BtlIG;EAAA;IACE,mBAAA;G/BylIH;E+BvlIG;IACE,iBAAA;G/BylIL;CACF;A+BjlIC;EAAA;IACE,YAAA;IACA,eAAA;IACA,kBAAA;IACA,gBAAA;IACA,eAAA;IACA,UAAA;I1BvPF,yBAAA;IACQ,iBAAA;GL40IP;CACF;A+B9kID;EACE,cAAA;EHpUA,0BAAA;EACA,2BAAA;C5Bq5ID;A+B9kID;EACE,iBAAA;EHzUA,4BAAA;EACA,6BAAA;EAOA,8BAAA;EACA,6BAAA;C5Bo5ID;A+B1kID;EChVE,gBAAA;EACA,mBAAA;ChC65ID;A+B3kIC;ECnVA,iBAAA;EACA,oBAAA;ChCi6ID;A+B5kIC;ECtVA,iBAAA;EACA,oBAAA;ChCq6ID;A+BtkID;EChWE,iBAAA;EACA,oBAAA;ChCy6ID;A+BvkIC;EAAA;IACE,YAAA;IACA,mBAAA;IACA,kBAAA;G/B0kID;CACF;A+B9jID;EACE;IEtWA,uBAAA;GjCu6IC;E+BhkID;IE1WA,wBAAA;IF4WE,oBAAA;G/BkkID;E+BpkID;IAKI,gBAAA;G/BkkIH;CACF;A+BzjID;EACE,0BAAA;EACA,sBAAA;C/B2jID;A+B7jID;EAKI,YAAA;C/B2jIH;A+B1jIG;;EAEE,eAAA;EACA,8BAAA;C/B4jIL;A+BrkID;EAcI,YAAA;C/B0jIH;A+BxkID;EAmBM,YAAA;C/BwjIL;A+BtjIK;;EAEE,YAAA;EACA,8BAAA;C/BwjIP;A+BpjIK;;;EAGE,YAAA;EACA,0BAAA;C/BsjIP;A+BljIK;;;EAGE,YAAA;EACA,8BAAA;C/BojIP;A+B7iIK;;;EAGE,YAAA;EACA,0BAAA;C/B+iIP;A+B3iIG;EAAA;IAIM,YAAA;G/B2iIP;E+B1iIO;;IAEE,YAAA;IACA,8BAAA;G/B4iIT;E+BxiIO;;;IAGE,YAAA;IACA,0BAAA;G/B0iIT;E+BtiIO;;;IAGE,YAAA;IACA,8BAAA;G/BwiIT;CACF;A+BxnID;EAuFI,mBAAA;C/BoiIH;A+BniIG;;EAEE,uBAAA;C/BqiIL;A+B/nID;EA6FM,uBAAA;C/BqiIL;A+BloID;;EAmGI,sBAAA;C/BmiIH;A+BtoID;EA4GI,YAAA;C/B6hIH;A+B5hIG;EACE,YAAA;C/B8hIL;A+B5oID;EAmHI,YAAA;C/B4hIH;A+B3hIG;;EAEE,YAAA;C/B6hIL;A+BzhIK;;;;EAEE,YAAA;C/B6hIP;A+BrhID;EACE,uBAAA;EACA,sBAAA;C/BuhID;A+BzhID;EAKI,eAAA;C/BuhIH;A+BthIG;;EAEE,YAAA;EACA,8BAAA;C/BwhIL;A+BjiID;EAcI,eAAA;C/BshIH;A+BpiID;EAmBM,eAAA;C/BohIL;A+BlhIK;;EAEE,YAAA;EACA,8BAAA;C/BohIP;A+BhhIK;;;EAGE,YAAA;EACA,0BAAA;C/BkhIP;A+B9gIK;;;EAGE,YAAA;EACA,8BAAA;C/BghIP;A+B1gIK;;;EAGE,YAAA;EACA,0BAAA;C/B4gIP;A+BxgIG;EAAA;IAIM,sBAAA;G/BwgIP;E+B5gIC;IAOM,0BAAA;G/BwgIP;E+B/gIC;IAUM,eAAA;G/BwgIP;E+BvgIO;;IAEE,YAAA;IACA,8BAAA;G/BygIT;E+BrgIO;;;IAGE,YAAA;IACA,0BAAA;G/BugIT;E+BngIO;;;IAGE,YAAA;IACA,8BAAA;G/BqgIT;CACF;A+B1lID;EA6FI,mBAAA;C/BggIH;A+B//HG;;EAEE,uBAAA;C/BigIL;A+BjmID;EAmGM,uBAAA;C/BigIL;A+BpmID;;EAyGI,sBAAA;C/B+/HH;A+BxmID;EA6GI,eAAA;C/B8/HH;A+B7/HG;EACE,YAAA;C/B+/HL;A+B9mID;EAoHI,eAAA;C/B6/HH;A+B5/HG;;EAEE,YAAA;C/B8/HL;A+B1/HK;;;;EAEE,YAAA;C/B8/HP;AkCpoJD;EACE,kBAAA;EACA,oBAAA;EACA,iBAAA;EACA,0BAAA;EACA,mBAAA;ClCsoJD;AkC3oJD;EAQI,sBAAA;ClCsoJH;AkC9oJD;EAWM,eAAA;EACA,YAAA;EACA,kBAAA;ClCsoJL;AkCnpJD;EAkBI,eAAA;ClCooJH;AmCxpJD;EACE,sBAAA;EACA,gBAAA;EACA,eAAA;EACA,mBAAA;CnC0pJD;AmC9pJD;EAOI,gBAAA;CnC0pJH;AmCjqJD;;EAUM,mBAAA;EACA,YAAA;EACA,kBAAA;EACA,kBAAA;EACA,wBAAA;EACA,eAAA;EACA,sBAAA;EACA,uBAAA;EACA,uBAAA;CnC2pJL;AmCzpJK;;;;EAEE,WAAA;EACA,eAAA;EACA,0BAAA;EACA,mBAAA;CnC6pJP;AmC1pJG;;EAGI,eAAA;EPnBN,4BAAA;EACA,+BAAA;C5B+qJD;AmCzpJG;;EP/BF,6BAAA;EACA,gCAAA;C5B4rJD;AmCppJG;;;;;;EAGE,WAAA;EACA,YAAA;EACA,gBAAA;EACA,0BAAA;EACA,sBAAA;CnCypJL;AmC7sJD;;;;;;EA+DM,eAAA;EACA,oBAAA;EACA,uBAAA;EACA,mBAAA;CnCspJL;AmC7oJD;;ECxEM,mBAAA;EACA,gBAAA;EACA,uBAAA;CpCytJL;AoCvtJG;;ERKF,4BAAA;EACA,+BAAA;C5BstJD;AoCttJG;;ERTF,6BAAA;EACA,gCAAA;C5BmuJD;AmCxpJD;;EC7EM,kBAAA;EACA,gBAAA;EACA,iBAAA;CpCyuJL;AoCvuJG;;ERKF,4BAAA;EACA,+BAAA;C5BsuJD;AoCtuJG;;ERTF,6BAAA;EACA,gCAAA;C5BmvJD;AqCtvJD;EACE,gBAAA;EACA,eAAA;EACA,mBAAA;EACA,iBAAA;CrCwvJD;AqC5vJD;EAOI,gBAAA;CrCwvJH;AqC/vJD;;EAUM,sBAAA;EACA,kBAAA;EACA,uBAAA;EACA,uBAAA;EACA,oBAAA;CrCyvJL;AqCvwJD;;EAmBM,sBAAA;EACA,0BAAA;CrCwvJL;AqC5wJD;;EA2BM,aAAA;CrCqvJL;AqChxJD;;EAkCM,YAAA;CrCkvJL;AqCpxJD;;;;EA2CM,eAAA;EACA,oBAAA;EACA,uBAAA;CrC+uJL;AsC7xJD;EACE,gBAAA;EACA,2BAAA;EACA,eAAA;EACA,iBAAA;EACA,eAAA;EACA,YAAA;EACA,mBAAA;EACA,oBAAA;EACA,yBAAA;EACA,sBAAA;CtC+xJD;AsC3xJG;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;CtC6xJL;AsCxxJC;EACE,cAAA;CtC0xJH;AsCtxJC;EACE,mBAAA;EACA,UAAA;CtCwxJH;AsCjxJD;ECtCE,0BAAA;CvC0zJD;AuCvzJG;;EAEE,0BAAA;CvCyzJL;AsCpxJD;EC1CE,0BAAA;CvCi0JD;AuC9zJG;;EAEE,0BAAA;CvCg0JL;AsCvxJD;EC9CE,0BAAA;CvCw0JD;AuCr0JG;;EAEE,0BAAA;CvCu0JL;AsC1xJD;EClDE,0BAAA;CvC+0JD;AuC50JG;;EAEE,0BAAA;CvC80JL;AsC7xJD;ECtDE,0BAAA;CvCs1JD;AuCn1JG;;EAEE,0BAAA;CvCq1JL;AsChyJD;EC1DE,0BAAA;CvC61JD;AuC11JG;;EAEE,0BAAA;CvC41JL;AwC91JD;EACE,sBAAA;EACA,gBAAA;EACA,iBAAA;EACA,gBAAA;EACA,kBAAA;EACA,eAAA;EACA,YAAA;EACA,mBAAA;EACA,oBAAA;EACA,uBAAA;EACA,0BAAA;EACA,oBAAA;CxCg2JD;AwC71JC;EACE,cAAA;CxC+1JH;AwC31JC;EACE,mBAAA;EACA,UAAA;CxC61JH;AwC11JC;;EAEE,OAAA;EACA,iBAAA;CxC41JH;AwCv1JG;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;CxCy1JL;AwCp1JC;;EAEE,eAAA;EACA,uBAAA;CxCs1JH;AwCn1JC;EACE,aAAA;CxCq1JH;AwCl1JC;EACE,kBAAA;CxCo1JH;AwCj1JC;EACE,iBAAA;CxCm1JH;AyC74JD;EACE,kBAAA;EACA,qBAAA;EACA,oBAAA;EACA,eAAA;EACA,0BAAA;CzC+4JD;AyCp5JD;;EASI,eAAA;CzC+4JH;AyCx5JD;EAaI,oBAAA;EACA,gBAAA;EACA,iBAAA;CzC84JH;AyC75JD;EAmBI,0BAAA;CzC64JH;AyC14JC;;EAEE,oBAAA;EACA,mBAAA;EACA,mBAAA;CzC44JH;AyCt6JD;EA8BI,gBAAA;CzC24JH;AyCx4JC;EAAA;IACE,kBAAA;IACA,qBAAA;GzC24JD;EyCz4JC;;IAEE,oBAAA;IACA,mBAAA;GzC24JH;EyCl5JD;;IAYI,gBAAA;GzC04JH;CACF;A0Cr7JD;EACE,eAAA;EACA,aAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;ErCiLA,4CAAA;EACK,uCAAA;EACG,oCAAA;CLuwJT;A0Cj8JD;;EAaI,mBAAA;EACA,kBAAA;C1Cw7JH;A0Cp7JC;;;EAGE,sBAAA;C1Cs7JH;A0C38JD;EA0BI,aAAA;EACA,eAAA;C1Co7JH;A2C/8JD;EACE,cAAA;EACA,oBAAA;EACA,8BAAA;EACA,mBAAA;C3Ci9JD;A2Cr9JD;EAQI,cAAA;EACA,eAAA;C3Cg9JH;A2Cz9JD;EAcI,kBAAA;C3C88JH;A2C59JD;;EAoBI,iBAAA;C3C48JH;A2Ch+JD;EAwBI,gBAAA;C3C28JH;A2Cl8JD;;EAEE,oBAAA;C3Co8JD;A2Ct8JD;;EAMI,mBAAA;EACA,UAAA;EACA,aAAA;EACA,eAAA;C3Co8JH;A2C57JD;ECvDE,eAAA;EACA,0BAAA;EACA,sBAAA;C5Cs/JD;A2Cj8JD;EClDI,0BAAA;C5Cs/JH;A2Cp8JD;EC9CI,eAAA;C5Cq/JH;A2Cn8JD;EC3DE,eAAA;EACA,0BAAA;EACA,sBAAA;C5CigKD;A2Cx8JD;ECtDI,0BAAA;C5CigKH;A2C38JD;EClDI,eAAA;C5CggKH;A2C18JD;EC/DE,eAAA;EACA,0BAAA;EACA,sBAAA;C5C4gKD;A2C/8JD;EC1DI,0BAAA;C5C4gKH;A2Cl9JD;ECtDI,eAAA;C5C2gKH;A2Cj9JD;ECnEE,eAAA;EACA,0BAAA;EACA,sBAAA;C5CuhKD;A2Ct9JD;EC9DI,0BAAA;C5CuhKH;A2Cz9JD;EC1DI,eAAA;C5CshKH;A6CvhKD;EACE;IAAQ,4BAAA;G7C0hKP;E6CzhKD;IAAQ,yBAAA;G7C4hKP;CACF;A6CzhKD;EACE;IAAQ,4BAAA;G7C4hKP;E6C3hKD;IAAQ,yBAAA;G7C8hKP;CACF;A6CjiKD;EACE;IAAQ,4BAAA;G7C4hKP;E6C3hKD;IAAQ,yBAAA;G7C8hKP;CACF;A6CvhKD;EACE,aAAA;EACA,oBAAA;EACA,iBAAA;EACA,0BAAA;EACA,mBAAA;ExCsCA,uDAAA;EACQ,+CAAA;CLo/JT;A6CthKD;EACE,YAAA;EACA,UAAA;EACA,aAAA;EACA,gBAAA;EACA,kBAAA;EACA,YAAA;EACA,mBAAA;EACA,0BAAA;ExCyBA,uDAAA;EACQ,+CAAA;EAyHR,oCAAA;EACK,+BAAA;EACG,4BAAA;CLw4JT;A6CnhKD;;ECDI,8MAAA;EACA,yMAAA;EACA,sMAAA;EDEF,mCAAA;EAAA,2BAAA;C7CuhKD;A6ChhKD;;ExC5CE,2DAAA;EACK,sDAAA;EACG,mDAAA;CLgkKT;A6C7gKD;EEvEE,0BAAA;C/CulKD;A+CplKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9CuiKH;A6CjhKD;EE3EE,0BAAA;C/C+lKD;A+C5lKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9C+iKH;A6CrhKD;EE/EE,0BAAA;C/CumKD;A+CpmKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9CujKH;A6CzhKD;EEnFE,0BAAA;C/C+mKD;A+C5mKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9C+jKH;AgDvnKD;EAEE,iBAAA;ChDwnKD;AgDtnKC;EACE,cAAA;ChDwnKH;AgDpnKD;;EAEE,iBAAA;EACA,QAAA;ChDsnKD;AgDnnKD;EACE,eAAA;ChDqnKD;AgDlnKD;EACE,eAAA;ChDonKD;AgDjnKC;EACE,gBAAA;ChDmnKH;AgD/mKD;;EAEE,mBAAA;ChDinKD;AgD9mKD;;EAEE,oBAAA;ChDgnKD;AgD7mKD;;;EAGE,oBAAA;EACA,oBAAA;ChD+mKD;AgD5mKD;EACE,uBAAA;ChD8mKD;AgD3mKD;EACE,uBAAA;ChD6mKD;AgDzmKD;EACE,cAAA;EACA,mBAAA;ChD2mKD;AgDrmKD;EACE,gBAAA;EACA,iBAAA;ChDumKD;AiD5pKD;EAEE,gBAAA;EACA,oBAAA;CjD6pKD;AiDrpKD;EACE,mBAAA;EACA,eAAA;EACA,mBAAA;EAEA,oBAAA;EACA,uBAAA;EACA,uBAAA;CjDspKD;AiDnpKC;ErB7BA,4BAAA;EACA,6BAAA;C5BmrKD;AiDppKC;EACE,iBAAA;ErBzBF,gCAAA;EACA,+BAAA;C5BgrKD;AiDnpKC;;;EAGE,eAAA;EACA,oBAAA;EACA,0BAAA;CjDqpKH;AiD1pKC;;;EASI,eAAA;CjDspKL;AiD/pKC;;;EAYI,eAAA;CjDwpKL;AiDnpKC;;;EAGE,WAAA;EACA,YAAA;EACA,0BAAA;EACA,sBAAA;CjDqpKH;AiD3pKC;;;;;;;;;EAYI,eAAA;CjD0pKL;AiDtqKC;;;EAeI,eAAA;CjD4pKL;AiDjpKD;;EAEE,YAAA;CjDmpKD;AiDrpKD;;EAKI,YAAA;CjDopKH;AiDhpKC;;;;EAEE,YAAA;EACA,sBAAA;EACA,0BAAA;CjDopKH;AiDhpKD;EACE,YAAA;EACA,iBAAA;CjDkpKD;AczvKA;EoCIG,eAAA;EACA,0BAAA;ClDwvKH;AkDtvKG;;EAEE,eAAA;ClDwvKL;AkD1vKG;;EAKI,eAAA;ClDyvKP;AkDtvKK;;;;EAEE,eAAA;EACA,0BAAA;ClD0vKP;AkDxvKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClD6vKP;ActxKA;EoCIG,eAAA;EACA,0BAAA;ClDqxKH;AkDnxKG;;EAEE,eAAA;ClDqxKL;AkDvxKG;;EAKI,eAAA;ClDsxKP;AkDnxKK;;;;EAEE,eAAA;EACA,0BAAA;ClDuxKP;AkDrxKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClD0xKP;AcnzKA;EoCIG,eAAA;EACA,0BAAA;ClDkzKH;AkDhzKG;;EAEE,eAAA;ClDkzKL;AkDpzKG;;EAKI,eAAA;ClDmzKP;AkDhzKK;;;;EAEE,eAAA;EACA,0BAAA;ClDozKP;AkDlzKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClDuzKP;Ach1KA;EoCIG,eAAA;EACA,0BAAA;ClD+0KH;AkD70KG;;EAEE,eAAA;ClD+0KL;AkDj1KG;;EAKI,eAAA;ClDg1KP;AkD70KK;;;;EAEE,eAAA;EACA,0BAAA;ClDi1KP;AkD/0KK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClDo1KP;AiDnvKD;EACE,cAAA;EACA,mBAAA;CjDqvKD;AiDnvKD;EACE,iBAAA;EACA,iBAAA;CjDqvKD;AmD72KD;EACE,oBAAA;EACA,uBAAA;EACA,8BAAA;EACA,mBAAA;E9C0DA,kDAAA;EACQ,0CAAA;CLszKT;AmD52KD;EACE,cAAA;CnD82KD;AmDz2KD;EACE,mBAAA;EACA,qCAAA;EvBtBA,4BAAA;EACA,6BAAA;C5Bk4KD;AmD/2KD;EAMI,eAAA;CnD42KH;AmDv2KD;EACE,cAAA;EACA,iBAAA;EACA,gBAAA;EACA,eAAA;CnDy2KD;AmD72KD;;;;;EAWI,eAAA;CnDy2KH;AmDp2KD;EACE,mBAAA;EACA,0BAAA;EACA,2BAAA;EvB1CA,gCAAA;EACA,+BAAA;C5Bi5KD;AmD91KD;;EAGI,iBAAA;CnD+1KH;AmDl2KD;;EAMM,oBAAA;EACA,iBAAA;CnDg2KL;AmD51KG;;EAEI,cAAA;EvBzEN,4BAAA;EACA,6BAAA;C5Bw6KD;AmD11KG;;EAEI,iBAAA;EvBzEN,gCAAA;EACA,+BAAA;C5Bs6KD;AmDn3KD;EvB5DE,0BAAA;EACA,2BAAA;C5Bk7KD;AmDt1KD;EAEI,oBAAA;CnDu1KH;AmDp1KD;EACE,oBAAA;CnDs1KD;AmD90KD;;;EAII,iBAAA;CnD+0KH;AmDn1KD;;;EAOM,oBAAA;EACA,mBAAA;CnDi1KL;AmDz1KD;;EvB3GE,4BAAA;EACA,6BAAA;C5Bw8KD;AmD91KD;;;;EAmBQ,4BAAA;EACA,6BAAA;CnDi1KP;AmDr2KD;;;;;;;;EAwBU,4BAAA;CnDu1KT;AmD/2KD;;;;;;;;EA4BU,6BAAA;CnD61KT;AmDz3KD;;EvBnGE,gCAAA;EACA,+BAAA;C5Bg+KD;AmD93KD;;;;EAyCQ,gCAAA;EACA,+BAAA;CnD21KP;AmDr4KD;;;;;;;;EA8CU,+BAAA;CnDi2KT;AmD/4KD;;;;;;;;EAkDU,gCAAA;CnDu2KT;AmDz5KD;;;;EA2DI,2BAAA;CnDo2KH;AmD/5KD;;EA+DI,cAAA;CnDo2KH;AmDn6KD;;EAmEI,UAAA;CnDo2KH;AmDv6KD;;;;;;;;;;;;EA0EU,eAAA;CnD22KT;AmDr7KD;;;;;;;;;;;;EA8EU,gBAAA;CnDq3KT;AmDn8KD;;;;;;;;EAuFU,iBAAA;CnDs3KT;AmD78KD;;;;;;;;EAgGU,iBAAA;CnDu3KT;AmDv9KD;EAsGI,iBAAA;EACA,UAAA;CnDo3KH;AmD12KD;EACE,oBAAA;CnD42KD;AmD72KD;EAKI,iBAAA;EACA,mBAAA;CnD22KH;AmDj3KD;EASM,gBAAA;CnD22KL;AmDp3KD;EAcI,iBAAA;CnDy2KH;AmDv3KD;;EAkBM,2BAAA;CnDy2KL;AmD33KD;EAuBI,cAAA;CnDu2KH;AmD93KD;EAyBM,8BAAA;CnDw2KL;AmDj2KD;EC5PE,mBAAA;CpDgmLD;AoD9lLC;EACE,eAAA;EACA,0BAAA;EACA,mBAAA;CpDgmLH;AoDnmLC;EAMI,uBAAA;CpDgmLL;AoDtmLC;EASI,eAAA;EACA,0BAAA;CpDgmLL;AoD7lLC;EAEI,0BAAA;CpD8lLL;AmDh3KD;EC/PE,sBAAA;CpDknLD;AoDhnLC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CpDknLH;AoDrnLC;EAMI,0BAAA;CpDknLL;AoDxnLC;EASI,eAAA;EACA,uBAAA;CpDknLL;AoD/mLC;EAEI,6BAAA;CpDgnLL;AmD/3KD;EClQE,sBAAA;CpDooLD;AoDloLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDooLH;AoDvoLC;EAMI,0BAAA;CpDooLL;AoD1oLC;EASI,eAAA;EACA,0BAAA;CpDooLL;AoDjoLC;EAEI,6BAAA;CpDkoLL;AmD94KD;ECrQE,sBAAA;CpDspLD;AoDppLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDspLH;AoDzpLC;EAMI,0BAAA;CpDspLL;AoD5pLC;EASI,eAAA;EACA,0BAAA;CpDspLL;AoDnpLC;EAEI,6BAAA;CpDopLL;AmD75KD;ECxQE,sBAAA;CpDwqLD;AoDtqLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDwqLH;AoD3qLC;EAMI,0BAAA;CpDwqLL;AoD9qLC;EASI,eAAA;EACA,0BAAA;CpDwqLL;AoDrqLC;EAEI,6BAAA;CpDsqLL;AmD56KD;EC3QE,sBAAA;CpD0rLD;AoDxrLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpD0rLH;AoD7rLC;EAMI,0BAAA;CpD0rLL;AoDhsLC;EASI,eAAA;EACA,0BAAA;CpD0rLL;AoDvrLC;EAEI,6BAAA;CpDwrLL;AqDxsLD;EACE,mBAAA;EACA,eAAA;EACA,UAAA;EACA,WAAA;EACA,iBAAA;CrD0sLD;AqD/sLD;;;;;EAYI,mBAAA;EACA,OAAA;EACA,UAAA;EACA,QAAA;EACA,YAAA;EACA,aAAA;EACA,UAAA;CrD0sLH;AqDrsLD;EACE,uBAAA;CrDusLD;AqDnsLD;EACE,oBAAA;CrDqsLD;AsDhuLD;EACE,iBAAA;EACA,cAAA;EACA,oBAAA;EACA,0BAAA;EACA,0BAAA;EACA,mBAAA;EjD0DA,wDAAA;EACQ,gDAAA;CLyqLT;AsD1uLD;EASI,mBAAA;EACA,kCAAA;CtDouLH;AsD/tLD;EACE,cAAA;EACA,mBAAA;CtDiuLD;AsD/tLD;EACE,aAAA;EACA,mBAAA;CtDiuLD;AuDrvLD;EACE,aAAA;EACA,gBAAA;EACA,kBAAA;EACA,eAAA;EACA,YAAA;EACA,0BAAA;EjCTA,0BAAA;EACA,aAAA;CtBiwLD;AuDtvLC;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;EjChBF,0BAAA;EACA,aAAA;CtBywLD;AuDlvLC;EACE,WAAA;EACA,gBAAA;EACA,wBAAA;EACA,UAAA;EACA,yBAAA;EACA,sBAAA;EAAA,iBAAA;CvDovLH;AwD5wLD;EACE,iBAAA;CxD8wLD;AwD1wLD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,iBAAA;EACA,kCAAA;EAIA,WAAA;CxDywLD;AwDtwLC;EnDiHA,sCAAA;EACI,kCAAA;EACC,iCAAA;EACG,8BAAA;EAkER,oDAAA;EAEK,0CAAA;EACG,4CAAA;EAAA,oCAAA;EAAA,iGAAA;CLulLT;AwD5wLC;EnD6GA,mCAAA;EACI,+BAAA;EACC,8BAAA;EACG,2BAAA;CLkqLT;AwDhxLD;EACE,mBAAA;EACA,iBAAA;CxDkxLD;AwD9wLD;EACE,mBAAA;EACA,YAAA;EACA,aAAA;CxDgxLD;AwD5wLD;EACE,mBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,qCAAA;EACA,mBAAA;EnDcA,iDAAA;EACQ,yCAAA;EmDZR,WAAA;CxD8wLD;AwD1wLD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,uBAAA;CxD4wLD;AwD1wLC;ElCpEA,yBAAA;EACA,WAAA;CtBi1LD;AwD7wLC;ElCrEA,0BAAA;EACA,aAAA;CtBq1LD;AwD5wLD;EACE,cAAA;EACA,iCAAA;CxD8wLD;AwD1wLD;EACE,iBAAA;CxD4wLD;AwDxwLD;EACE,UAAA;EACA,wBAAA;CxD0wLD;AwDrwLD;EACE,mBAAA;EACA,cAAA;CxDuwLD;AwDnwLD;EACE,cAAA;EACA,kBAAA;EACA,8BAAA;CxDqwLD;AwDxwLD;EAQI,iBAAA;EACA,iBAAA;CxDmwLH;AwD5wLD;EAaI,kBAAA;CxDkwLH;AwD/wLD;EAiBI,eAAA;CxDiwLH;AwD5vLD;EACE,mBAAA;EACA,aAAA;EACA,YAAA;EACA,aAAA;EACA,iBAAA;CxD8vLD;AwD1vLD;EAEE;IACE,aAAA;IACA,kBAAA;GxD2vLD;EwDzvLD;InDrEA,kDAAA;IACQ,0CAAA;GLi0LP;EwDxvLD;IAAY,aAAA;GxD2vLX;CACF;AwDzvLD;EACE;IAAY,aAAA;GxD4vLX;CACF;AyD34LD;EACE,mBAAA;EACA,cAAA;EACA,eAAA;ECRA,4DAAA;EAEA,mBAAA;EACA,iBAAA;EACA,wBAAA;EACA,iBAAA;EACA,iBAAA;EACA,kBAAA;EACA,sBAAA;EACA,kBAAA;EACA,qBAAA;EACA,uBAAA;EACA,mBAAA;EACA,qBAAA;EACA,kBAAA;EACA,oBAAA;EDHA,gBAAA;EnCTA,yBAAA;EACA,WAAA;CtBm6LD;AyDv5LC;EnCbA,0BAAA;EACA,aAAA;CtBu6LD;AyD15LC;EACE,eAAA;EACA,iBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,iBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,gBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,kBAAA;CzD45LH;AyDx5LC;EACE,UAAA;EACA,UAAA;EACA,kBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,WAAA;EACA,UAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,UAAA;EACA,UAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,SAAA;EACA,QAAA;EACA,iBAAA;EACA,4BAAA;EACA,yBAAA;CzD05LH;AyDx5LC;EACE,SAAA;EACA,SAAA;EACA,iBAAA;EACA,4BAAA;EACA,wBAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,UAAA;EACA,kBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,WAAA;EACA,iBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,UAAA;EACA,iBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDr5LD;EACE,iBAAA;EACA,iBAAA;EACA,YAAA;EACA,mBAAA;EACA,uBAAA;EACA,mBAAA;CzDu5LD;AyDn5LD;EACE,mBAAA;EACA,SAAA;EACA,UAAA;EACA,0BAAA;EACA,oBAAA;CzDq5LD;A2D9/LD;EACE,mBAAA;EACA,OAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,iBAAA;EACA,aAAA;EDXA,4DAAA;EAEA,mBAAA;EACA,iBAAA;EACA,wBAAA;EACA,iBAAA;EACA,iBAAA;EACA,kBAAA;EACA,sBAAA;EACA,kBAAA;EACA,qBAAA;EACA,uBAAA;EACA,mBAAA;EACA,qBAAA;EACA,kBAAA;EACA,oBAAA;ECAA,gBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,qCAAA;EACA,mBAAA;EtDiDA,kDAAA;EACQ,0CAAA;CL49LT;A2D1gMC;EAAQ,kBAAA;C3D6gMT;A2D5gMC;EAAU,kBAAA;C3D+gMX;A2D9gMC;EAAW,iBAAA;C3DihMZ;A2DhhMC;EAAS,mBAAA;C3DmhMV;A2D1iMD;EA4BI,mBAAA;C3DihMH;A2D/gMG;;EAEE,mBAAA;EACA,eAAA;EACA,SAAA;EACA,UAAA;EACA,0BAAA;EACA,oBAAA;C3DihML;A2D9gMG;EACE,YAAA;EACA,mBAAA;C3DghML;A2D5gMC;EACE,cAAA;EACA,UAAA;EACA,mBAAA;EACA,0BAAA;EACA,sCAAA;EACA,uBAAA;C3D8gMH;A2D7gMG;EACE,YAAA;EACA,mBAAA;EACA,aAAA;EACA,uBAAA;EACA,uBAAA;C3D+gML;A2D5gMC;EACE,SAAA;EACA,YAAA;EACA,kBAAA;EACA,4BAAA;EACA,wCAAA;EACA,qBAAA;C3D8gMH;A2D7gMG;EACE,cAAA;EACA,UAAA;EACA,aAAA;EACA,yBAAA;EACA,qBAAA;C3D+gML;A2D5gMC;EACE,WAAA;EACA,UAAA;EACA,mBAAA;EACA,oBAAA;EACA,6BAAA;EACA,yCAAA;C3D8gMH;A2D7gMG;EACE,SAAA;EACA,mBAAA;EACA,aAAA;EACA,oBAAA;EACA,0BAAA;C3D+gML;A2D3gMC;EACE,SAAA;EACA,aAAA;EACA,kBAAA;EACA,sBAAA;EACA,2BAAA;EACA,uCAAA;C3D6gMH;A2D5gMG;EACE,WAAA;EACA,cAAA;EACA,aAAA;EACA,sBAAA;EACA,wBAAA;C3D8gML;A2DzgMD;EACE,kBAAA;EACA,UAAA;EACA,gBAAA;EACA,0BAAA;EACA,iCAAA;EACA,2BAAA;C3D2gMD;A2DxgMD;EACE,kBAAA;C3D0gMD;A4D9nMD;EACE,mBAAA;C5DgoMD;A4D7nMD;EACE,mBAAA;EACA,YAAA;EACA,iBAAA;C5D+nMD;A4DloMD;EAMI,mBAAA;EACA,cAAA;EvD6KF,0CAAA;EACK,qCAAA;EACG,kCAAA;CLm9LT;A4DzoMD;;EAcM,eAAA;C5D+nML;A4D3nMG;EAAA;IvDuLF,uDAAA;IAEK,6CAAA;IACG,+CAAA;IAAA,uCAAA;IAAA,0GAAA;IA7JR,oCAAA;IAEQ,4BAAA;IA+GR,4BAAA;IAEQ,oBAAA;GLw/LP;E4DnoMG;;IvDmHJ,2CAAA;IACQ,mCAAA;IuDjHF,QAAA;G5DsoML;E4DpoMG;;IvD8GJ,4CAAA;IACQ,oCAAA;IuD5GF,QAAA;G5DuoML;E4DroMG;;;IvDyGJ,wCAAA;IACQ,gCAAA;IuDtGF,QAAA;G5DwoML;CACF;A4D9qMD;;;EA6CI,eAAA;C5DsoMH;A4DnrMD;EAiDI,QAAA;C5DqoMH;A4DtrMD;;EAsDI,mBAAA;EACA,OAAA;EACA,YAAA;C5DooMH;A4D5rMD;EA4DI,WAAA;C5DmoMH;A4D/rMD;EA+DI,YAAA;C5DmoMH;A4DlsMD;;EAmEI,QAAA;C5DmoMH;A4DtsMD;EAuEI,YAAA;C5DkoMH;A4DzsMD;EA0EI,WAAA;C5DkoMH;A4D1nMD;EACE,mBAAA;EACA,OAAA;EACA,UAAA;EACA,QAAA;EACA,WAAA;EACA,gBAAA;EACA,YAAA;EACA,mBAAA;EACA,0CAAA;EACA,mCAAA;EtCpGA,0BAAA;EACA,aAAA;CtBiuMD;A4DxnMC;EdrGE,mGAAA;EACA,8FAAA;EACA,qHAAA;EAAA,+FAAA;EACA,uHAAA;EACA,4BAAA;C9CguMH;A4D5nMC;EACE,SAAA;EACA,WAAA;Ed1GA,mGAAA;EACA,8FAAA;EACA,qHAAA;EAAA,+FAAA;EACA,uHAAA;EACA,4BAAA;C9CyuMH;A4D9nMC;;EAEE,YAAA;EACA,sBAAA;EACA,WAAA;EtCxHF,0BAAA;EACA,aAAA;CtByvMD;A4DhqMD;;;;EAuCI,mBAAA;EACA,SAAA;EACA,WAAA;EACA,sBAAA;EACA,kBAAA;C5D+nMH;A4D1qMD;;EA+CI,UAAA;EACA,mBAAA;C5D+nMH;A4D/qMD;;EAoDI,WAAA;EACA,oBAAA;C5D+nMH;A4DprMD;;EAyDI,YAAA;EACA,aAAA;EACA,mBAAA;EACA,eAAA;C5D+nMH;A4D3nMG;EACE,iBAAA;C5D6nML;A4DznMG;EACE,iBAAA;C5D2nML;A4DjnMD;EACE,mBAAA;EACA,aAAA;EACA,UAAA;EACA,YAAA;EACA,WAAA;EACA,gBAAA;EACA,kBAAA;EACA,mBAAA;EACA,iBAAA;C5DmnMD;A4D5nMD;EAYI,sBAAA;EACA,YAAA;EACA,aAAA;EACA,YAAA;EACA,oBAAA;EACA,gBAAA;EAUA,0BAAA;EACA,mCAAA;EAEA,uBAAA;EACA,oBAAA;C5DymMH;A4DxoMD;EAmCI,YAAA;EACA,aAAA;EACA,UAAA;EACA,uBAAA;C5DwmMH;A4DjmMD;EACE,mBAAA;EACA,WAAA;EACA,aAAA;EACA,UAAA;EACA,YAAA;EACA,kBAAA;EACA,qBAAA;EACA,YAAA;EACA,mBAAA;EACA,0CAAA;C5DmmMD;A4DjmMC;EACE,kBAAA;C5DmmMH;A4D7lMD;EAGE;;;;IAKI,YAAA;IACA,aAAA;IACA,kBAAA;IACA,gBAAA;G5D4lMH;E4DpmMD;;IAYI,mBAAA;G5D4lMH;E4DxmMD;;IAgBI,oBAAA;G5D4lMH;E4DvlMD;IACE,WAAA;IACA,UAAA;IACA,qBAAA;G5DylMD;E4DrlMD;IACE,aAAA;G5DulMD;CACF;A6Dz1MC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;EAEE,eAAA;EACA,aAAA;C7Dy3MH;A6Dv3MC;;;;;;;;;;;;;;;;EACE,YAAA;C7Dw4MH;AiC94MD;E6BVE,eAAA;EACA,mBAAA;EACA,kBAAA;C9D25MD;AiCh5MD;EACE,wBAAA;CjCk5MD;AiCh5MD;EACE,uBAAA;CjCk5MD;AiC14MD;EACE,yBAAA;CjC44MD;AiC14MD;EACE,0BAAA;CjC44MD;AiC14MD;EACE,mBAAA;CjC44MD;AiC14MD;E8BzBE,YAAA;EACA,mBAAA;EACA,kBAAA;EACA,8BAAA;EACA,UAAA;C/Ds6MD;AiCx4MD;EACE,yBAAA;CjC04MD;AiCn4MD;EACE,gBAAA;CjCq4MD;AgEt6MD;EACE,oBAAA;ChEw6MD;AgEl6MD;;;;EClBE,yBAAA;CjE07MD;AgEj6MD;;;;;;;;;;;;EAYE,yBAAA;ChEm6MD;AgE/5MC;EAAA;ICjDA,0BAAA;GjEo9MC;EiEn9MD;IAAU,0BAAA;GjEs9MT;EiEr9MD;IAAU,8BAAA;GjEw9MT;EiEv9MD;;IACU,+BAAA;GjE09MT;CACF;AgEz6MC;EAAA;IACE,0BAAA;GhE46MD;CACF;AgEz6MC;EAAA;IACE,2BAAA;GhE46MD;CACF;AgEz6MC;EAAA;IACE,iCAAA;GhE46MD;CACF;AgEx6MC;EAAA;ICtEA,0BAAA;GjEk/MC;EiEj/MD;IAAU,0BAAA;GjEo/MT;EiEn/MD;IAAU,8BAAA;GjEs/MT;EiEr/MD;;IACU,+BAAA;GjEw/MT;CACF;AgEl7MC;EAAA;IACE,0BAAA;GhEq7MD;CACF;AgEl7MC;EAAA;IACE,2BAAA;GhEq7MD;CACF;AgEl7MC;EAAA;IACE,iCAAA;GhEq7MD;CACF;AgEj7MC;EAAA;IC3FA,0BAAA;GjEghNC;EiE/gND;IAAU,0BAAA;GjEkhNT;EiEjhND;IAAU,8BAAA;GjEohNT;EiEnhND;;IACU,+BAAA;GjEshNT;CACF;AgE37MC;EAAA;IACE,0BAAA;GhE87MD;CACF;AgE37MC;EAAA;IACE,2BAAA;GhE87MD;CACF;AgE37MC;EAAA;IACE,iCAAA;GhE87MD;CACF;AgE17MC;EAAA;IChHA,0BAAA;GjE8iNC;EiE7iND;IAAU,0BAAA;GjEgjNT;EiE/iND;IAAU,8BAAA;GjEkjNT;EiEjjND;;IACU,+BAAA;GjEojNT;CACF;AgEp8MC;EAAA;IACE,0BAAA;GhEu8MD;CACF;AgEp8MC;EAAA;IACE,2BAAA;GhEu8MD;CACF;AgEp8MC;EAAA;IACE,iCAAA;GhEu8MD;CACF;AgEn8MC;EAAA;IC7HA,yBAAA;GjEokNC;CACF;AgEn8MC;EAAA;IClIA,yBAAA;GjEykNC;CACF;AgEn8MC;EAAA;ICvIA,yBAAA;GjE8kNC;CACF;AgEn8MC;EAAA;IC5IA,yBAAA;GjEmlNC;CACF;AgE77MD;ECvJE,yBAAA;CjEulND;AgE77MC;EAAA;IClKA,0BAAA;GjEmmNC;EiElmND;IAAU,0BAAA;GjEqmNT;EiEpmND;IAAU,8BAAA;GjEumNT;EiEtmND;;IACU,+BAAA;GjEymNT;CACF;AgEx8MD;EACE,yBAAA;ChE08MD;AgEx8MC;EAAA;IACE,0BAAA;GhE28MD;CACF;AgEz8MD;EACE,yBAAA;ChE28MD;AgEz8MC;EAAA;IACE,2BAAA;GhE48MD;CACF;AgE18MD;EACE,yBAAA;ChE48MD;AgE18MC;EAAA;IACE,iCAAA;GhE68MD;CACF;AgEz8MC;EAAA;ICrLA,yBAAA;GjEkoNC;CACF","file":"bootstrap.css","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: none;\n  text-decoration: underline;\n  text-decoration: underline dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n  src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: 400;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: 0.2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: \"\\00A0 \\2014\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: 700;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n  padding-right: 0;\n  padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1,\n  .col-sm-2,\n  .col-sm-3,\n  .col-sm-4,\n  .col-sm-5,\n  .col-sm-6,\n  .col-sm-7,\n  .col-sm-8,\n  .col-sm-9,\n  .col-sm-10,\n  .col-sm-11,\n  .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1,\n  .col-md-2,\n  .col-md-3,\n  .col-md-4,\n  .col-md-5,\n  .col-md-6,\n  .col-md-7,\n  .col-md-8,\n  .col-md-9,\n  .col-md-10,\n  .col-md-11,\n  .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1,\n  .col-lg-2,\n  .col-lg-3,\n  .col-lg-4,\n  .col-lg-5,\n  .col-lg-6,\n  .col-lg-7,\n  .col-lg-8,\n  .col-lg-9,\n  .col-lg-10,\n  .col-lg-11,\n  .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: 0.01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: 700;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  -webkit-appearance: none;\n  appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eeeeee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  margin-bottom: 0;\n  font-weight: normal;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none;\n  border: 1px solid transparent;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  border-radius: 4px;\n  -webkit-user-select: none;\n  -moz-user-select: none;\n  -ms-user-select: none;\n  user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  opacity: 0.65;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  background-image: none;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  background-image: none;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  background-image: none;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  background-image: none;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  background-image: none;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  background-image: none;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: 400;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity 0.15s linear;\n  -o-transition: opacity 0.15s linear;\n  transition: opacity 0.15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-property: height, visibility;\n  transition-property: height, visibility;\n  -webkit-transition-duration: 0.35s;\n  transition-duration: 0.35s;\n  -webkit-transition-timing-function: ease;\n  transition-timing-function: ease;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: 400;\n  line-height: 1.42857143;\n  color: #333333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: 400;\n  line-height: 1;\n  color: #555555;\n  text-align: center;\n  background-color: #eeeeee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n  color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eeeeee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: 15px;\n  margin-top: 8px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-right: -15px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eeeeee;\n  border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: 0.2em 0.6em 0.3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border 0.2s ease-in-out;\n  -o-transition: border 0.2s ease-in-out;\n  transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-transition: width 0.6s ease;\n  -o-transition: width 0.6s ease;\n  transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n  -o-animation: progress-bar-stripes 2s linear infinite;\n  animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\nbutton.close {\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n  -webkit-appearance: none;\n  appearance: none;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transform: translate(0, -25%);\n  -ms-transform: translate(0, -25%);\n  -o-transform: translate(0, -25%);\n  transform: translate(0, -25%);\n  -webkit-transition: -webkit-transform 0.3s ease-out;\n  -moz-transition: -moz-transform 0.3s ease-out;\n  -o-transition: -o-transform 0.3s ease-out;\n  transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n  -ms-transform: translate(0, 0);\n  -o-transform: translate(0, 0);\n  transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  outline: 0;\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 12px;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 14px;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999999;\n  border-top-color: rgba(0, 0, 0, 0.25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999999;\n  border-right-color: rgba(0, 0, 0, 0.25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999999;\n  border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999999;\n  border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: 0.6s ease-in-out left;\n  -o-transition: 0.6s ease-in-out left;\n  transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform 0.6s ease-in-out;\n    -moz-transition: -moz-transform 0.6s ease-in-out;\n    -o-transition: -o-transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out;\n    -webkit-backface-visibility: hidden;\n    -moz-backface-visibility: hidden;\n    backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n    -moz-perspective: 1000px;\n    perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    -webkit-transform: translate3d(100%, 0, 0);\n    transform: translate3d(100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    -webkit-transform: translate3d(-100%, 0, 0);\n    transform: translate3d(-100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    -webkit-transform: translate3d(0, 0, 0);\n    transform: translate3d(0, 0, 0);\n    left: 0;\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  outline: 0;\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n  content: \"\\203a\";\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable\n\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n//    without disabling user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `template` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// 1. Remove the bottom border in Chrome 57- and Firefox 39-.\n// 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n//\n\nabbr[title] {\n  border-bottom: none; // 1\n  text-decoration: underline; // 2\n  text-decoration: underline dotted; // 2\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; // 2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  border: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n","// stylelint-disable declaration-no-important, selector-no-qualifying-type\n\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important; // Black prints faster: h5bp.com/s\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n\n  // Don't show links that are fragment identifiers,\n  // or use the `javascript:` pseudo protocol\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n\n  thead {\n    display: table-header-group; // h5bp.com/t\n  }\n\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n\n  img {\n    max-width: 100% !important;\n  }\n\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n\n  // Bootstrap specific changes start\n\n  // Bootstrap components\n  .navbar {\n    display: none;\n  }\n  .btn,\n  .dropup > .btn {\n    > .caret {\n      border-top-color: #000 !important;\n    }\n  }\n  .label {\n    border: 1px solid #000;\n  }\n\n  .table {\n    border-collapse: collapse !important;\n\n    td,\n    th {\n      background-color: #fff !important;\n    }\n  }\n  .table-bordered {\n    th,\n    td {\n      border: 1px solid #ddd !important;\n    }\n  }\n}\n","// stylelint-disable value-list-comma-newline-after, value-list-comma-space-after, indentation, declaration-colon-newline-after, font-family-no-missing-generic-family-keyword\n\n//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// <a href=\"#\"><span class=\"glyphicon glyphicon-star\"></span> Star</a>\n\n// Import the fonts\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot\");\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot?#iefix\") format(\"embedded-opentype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff2\") format(\"woff2\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff\") format(\"woff\"),\n       url(\"@{icon-font-path}@{icon-font-name}.ttf\") format(\"truetype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}\") format(\"svg\");\n}\n\n// Catchall baseclass\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk               { &:before { content: \"\\002a\"; } }\n.glyphicon-plus                   { &:before { content: \"\\002b\"; } }\n.glyphicon-euro,\n.glyphicon-eur                    { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus                  { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud                  { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope               { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil                 { &:before { content: \"\\270f\"; } }\n.glyphicon-glass                  { &:before { content: \"\\e001\"; } }\n.glyphicon-music                  { &:before { content: \"\\e002\"; } }\n.glyphicon-search                 { &:before { content: \"\\e003\"; } }\n.glyphicon-heart                  { &:before { content: \"\\e005\"; } }\n.glyphicon-star                   { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty             { &:before { content: \"\\e007\"; } }\n.glyphicon-user                   { &:before { content: \"\\e008\"; } }\n.glyphicon-film                   { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large               { &:before { content: \"\\e010\"; } }\n.glyphicon-th                     { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list                { &:before { content: \"\\e012\"; } }\n.glyphicon-ok                     { &:before { content: \"\\e013\"; } }\n.glyphicon-remove                 { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in                { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out               { &:before { content: \"\\e016\"; } }\n.glyphicon-off                    { &:before { content: \"\\e017\"; } }\n.glyphicon-signal                 { &:before { content: \"\\e018\"; } }\n.glyphicon-cog                    { &:before { content: \"\\e019\"; } }\n.glyphicon-trash                  { &:before { content: \"\\e020\"; } }\n.glyphicon-home                   { &:before { content: \"\\e021\"; } }\n.glyphicon-file                   { &:before { content: \"\\e022\"; } }\n.glyphicon-time                   { &:before { content: \"\\e023\"; } }\n.glyphicon-road                   { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt           { &:before { content: \"\\e025\"; } }\n.glyphicon-download               { &:before { content: \"\\e026\"; } }\n.glyphicon-upload                 { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox                  { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle            { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat                 { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh                { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt               { &:before { content: \"\\e032\"; } }\n.glyphicon-lock                   { &:before { content: \"\\e033\"; } }\n.glyphicon-flag                   { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones             { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off             { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down            { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up              { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode                 { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode                { &:before { content: \"\\e040\"; } }\n.glyphicon-tag                    { &:before { content: \"\\e041\"; } }\n.glyphicon-tags                   { &:before { content: \"\\e042\"; } }\n.glyphicon-book                   { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark               { &:before { content: \"\\e044\"; } }\n.glyphicon-print                  { &:before { content: \"\\e045\"; } }\n.glyphicon-camera                 { &:before { content: \"\\e046\"; } }\n.glyphicon-font                   { &:before { content: \"\\e047\"; } }\n.glyphicon-bold                   { &:before { content: \"\\e048\"; } }\n.glyphicon-italic                 { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height            { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width             { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left             { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center           { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right            { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify          { &:before { content: \"\\e055\"; } }\n.glyphicon-list                   { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left            { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right           { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video         { &:before { content: \"\\e059\"; } }\n.glyphicon-picture                { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker             { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust                 { &:before { content: \"\\e063\"; } }\n.glyphicon-tint                   { &:before { content: \"\\e064\"; } }\n.glyphicon-edit                   { &:before { content: \"\\e065\"; } }\n.glyphicon-share                  { &:before { content: \"\\e066\"; } }\n.glyphicon-check                  { &:before { content: \"\\e067\"; } }\n.glyphicon-move                   { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward          { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward          { &:before { content: \"\\e070\"; } }\n.glyphicon-backward               { &:before { content: \"\\e071\"; } }\n.glyphicon-play                   { &:before { content: \"\\e072\"; } }\n.glyphicon-pause                  { &:before { content: \"\\e073\"; } }\n.glyphicon-stop                   { &:before { content: \"\\e074\"; } }\n.glyphicon-forward                { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward           { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward           { &:before { content: \"\\e077\"; } }\n.glyphicon-eject                  { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left           { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right          { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign              { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign             { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign            { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign                { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign          { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign              { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot             { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle          { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle              { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle             { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left             { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right            { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up               { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down             { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt              { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full            { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small           { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign       { &:before { content: \"\\e101\"; } }\n.glyphicon-gift                   { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf                   { &:before { content: \"\\e103\"; } }\n.glyphicon-fire                   { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open               { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close              { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign           { &:before { content: \"\\e107\"; } }\n.glyphicon-plane                  { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar               { &:before { content: \"\\e109\"; } }\n.glyphicon-random                 { &:before { content: \"\\e110\"; } }\n.glyphicon-comment                { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet                 { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up             { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down           { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet                { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart          { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close           { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open            { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical        { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal      { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd                    { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn               { &:before { content: \"\\e122\"; } }\n.glyphicon-bell                   { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate            { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up              { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down            { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right             { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left              { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up                { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down              { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right     { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left      { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up        { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down      { &:before { content: \"\\e134\"; } }\n.glyphicon-globe                  { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench                 { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks                  { &:before { content: \"\\e137\"; } }\n.glyphicon-filter                 { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase              { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen             { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard              { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip              { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty            { &:before { content: \"\\e143\"; } }\n.glyphicon-link                   { &:before { content: \"\\e144\"; } }\n.glyphicon-phone                  { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin                { &:before { content: \"\\e146\"; } }\n.glyphicon-usd                    { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp                    { &:before { content: \"\\e149\"; } }\n.glyphicon-sort                   { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet       { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt   { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order          { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt      { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes     { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked              { &:before { content: \"\\e157\"; } }\n.glyphicon-expand                 { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down          { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up            { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in                 { &:before { content: \"\\e161\"; } }\n.glyphicon-flash                  { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out                { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window             { &:before { content: \"\\e164\"; } }\n.glyphicon-record                 { &:before { content: \"\\e165\"; } }\n.glyphicon-save                   { &:before { content: \"\\e166\"; } }\n.glyphicon-open                   { &:before { content: \"\\e167\"; } }\n.glyphicon-saved                  { &:before { content: \"\\e168\"; } }\n.glyphicon-import                 { &:before { content: \"\\e169\"; } }\n.glyphicon-export                 { &:before { content: \"\\e170\"; } }\n.glyphicon-send                   { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk            { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved           { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove          { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save            { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open            { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card            { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer               { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery                { &:before { content: \"\\e179\"; } }\n.glyphicon-header                 { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed             { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone               { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt              { &:before { content: \"\\e183\"; } }\n.glyphicon-tower                  { &:before { content: \"\\e184\"; } }\n.glyphicon-stats                  { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video               { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video               { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles              { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo           { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby            { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1              { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1              { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1              { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark         { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark      { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download         { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload           { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer           { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous         { &:before { content: \"\\e200\"; } }\n.glyphicon-cd                     { &:before { content: \"\\e201\"; } }\n.glyphicon-save-file              { &:before { content: \"\\e202\"; } }\n.glyphicon-open-file              { &:before { content: \"\\e203\"; } }\n.glyphicon-level-up               { &:before { content: \"\\e204\"; } }\n.glyphicon-copy                   { &:before { content: \"\\e205\"; } }\n.glyphicon-paste                  { &:before { content: \"\\e206\"; } }\n// The following 2 Glyphicons are omitted for the time being because\n// they currently use Unicode codepoints that are outside the\n// Basic Multilingual Plane (BMP). Older buggy versions of WebKit can't handle\n// non-BMP codepoints in CSS string escapes, and thus can't display these two icons.\n// Notably, the bug affects some older versions of the Android Browser.\n// More info: https://github.com/twbs/bootstrap/issues/10106\n// .glyphicon-door                   { &:before { content: \"\\1f6aa\"; } }\n// .glyphicon-key                    { &:before { content: \"\\1f511\"; } }\n.glyphicon-alert                  { &:before { content: \"\\e209\"; } }\n.glyphicon-equalizer              { &:before { content: \"\\e210\"; } }\n.glyphicon-king                   { &:before { content: \"\\e211\"; } }\n.glyphicon-queen                  { &:before { content: \"\\e212\"; } }\n.glyphicon-pawn                   { &:before { content: \"\\e213\"; } }\n.glyphicon-bishop                 { &:before { content: \"\\e214\"; } }\n.glyphicon-knight                 { &:before { content: \"\\e215\"; } }\n.glyphicon-baby-formula           { &:before { content: \"\\e216\"; } }\n.glyphicon-tent                   { &:before { content: \"\\26fa\"; } }\n.glyphicon-blackboard             { &:before { content: \"\\e218\"; } }\n.glyphicon-bed                    { &:before { content: \"\\e219\"; } }\n.glyphicon-apple                  { &:before { content: \"\\f8ff\"; } }\n.glyphicon-erase                  { &:before { content: \"\\e221\"; } }\n.glyphicon-hourglass              { &:before { content: \"\\231b\"; } }\n.glyphicon-lamp                   { &:before { content: \"\\e223\"; } }\n.glyphicon-duplicate              { &:before { content: \"\\e224\"; } }\n.glyphicon-piggy-bank             { &:before { content: \"\\e225\"; } }\n.glyphicon-scissors               { &:before { content: \"\\e226\"; } }\n.glyphicon-bitcoin                { &:before { content: \"\\e227\"; } }\n.glyphicon-btc                    { &:before { content: \"\\e227\"; } }\n.glyphicon-xbt                    { &:before { content: \"\\e227\"; } }\n.glyphicon-yen                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-jpy                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-ruble                  { &:before { content: \"\\20bd\"; } }\n.glyphicon-rub                    { &:before { content: \"\\20bd\"; } }\n.glyphicon-scale                  { &:before { content: \"\\e230\"; } }\n.glyphicon-ice-lolly              { &:before { content: \"\\e231\"; } }\n.glyphicon-ice-lolly-tasted       { &:before { content: \"\\e232\"; } }\n.glyphicon-education              { &:before { content: \"\\e233\"; } }\n.glyphicon-option-horizontal      { &:before { content: \"\\e234\"; } }\n.glyphicon-option-vertical        { &:before { content: \"\\e235\"; } }\n.glyphicon-menu-hamburger         { &:before { content: \"\\e236\"; } }\n.glyphicon-modal-window           { &:before { content: \"\\e237\"; } }\n.glyphicon-oil                    { &:before { content: \"\\e238\"; } }\n.glyphicon-grain                  { &:before { content: \"\\e239\"; } }\n.glyphicon-sunglasses             { &:before { content: \"\\e240\"; } }\n.glyphicon-text-size              { &:before { content: \"\\e241\"; } }\n.glyphicon-text-color             { &:before { content: \"\\e242\"; } }\n.glyphicon-text-background        { &:before { content: \"\\e243\"; } }\n.glyphicon-object-align-top       { &:before { content: \"\\e244\"; } }\n.glyphicon-object-align-bottom    { &:before { content: \"\\e245\"; } }\n.glyphicon-object-align-horizontal{ &:before { content: \"\\e246\"; } }\n.glyphicon-object-align-left      { &:before { content: \"\\e247\"; } }\n.glyphicon-object-align-vertical  { &:before { content: \"\\e248\"; } }\n.glyphicon-object-align-right     { &:before { content: \"\\e249\"; } }\n.glyphicon-triangle-right         { &:before { content: \"\\e250\"; } }\n.glyphicon-triangle-left          { &:before { content: \"\\e251\"; } }\n.glyphicon-triangle-bottom        { &:before { content: \"\\e252\"; } }\n.glyphicon-triangle-top           { &:before { content: \"\\e253\"; } }\n.glyphicon-console                { &:before { content: \"\\e254\"; } }\n.glyphicon-superscript            { &:before { content: \"\\e255\"; } }\n.glyphicon-subscript              { &:before { content: \"\\e256\"; } }\n.glyphicon-menu-left              { &:before { content: \"\\e257\"; } }\n.glyphicon-menu-right             { &:before { content: \"\\e258\"; } }\n.glyphicon-menu-down              { &:before { content: \"\\e259\"; } }\n.glyphicon-menu-up                { &:before { content: \"\\e260\"; } }\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// https://getbootstrap.com/docs/3.4/getting-started/#third-box-sizing\n* {\n  .box-sizing(border-box);\n}\n*:before,\n*:after {\n  .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\nbody {\n  font-family: @font-family-base;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @text-color;\n  background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\n\n\n// Links\n\na {\n  color: @link-color;\n  text-decoration: none;\n\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n  }\n\n  &:focus {\n    .tab-focus();\n  }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n  margin: 0;\n}\n\n\n// Images\n\nimg {\n  vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n  .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n  border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n  padding: @thumbnail-padding;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  // Keep them at most 100% wide\n  .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n  border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n  margin-top: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  border: 0;\n  border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: https://a11yproject.com/posts/how-to-hide-content\n\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n\n// Use in conjunction with .sr-only to only display content when it's focused.\n// Useful for \"Skip to main content\" links; see https://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n// Credit: HTML5 Boilerplate\n\n.sr-only-focusable {\n  &:active,\n  &:focus {\n    position: static;\n    width: auto;\n    height: auto;\n    margin: 0;\n    overflow: visible;\n    clip: auto;\n  }\n}\n\n\n// iOS \"clickable elements\" fix for role=\"button\"\n//\n// Fixes \"clickability\" issue (and more generally, the firing of events such as focus as well)\n// for traditionally non-focusable elements with role=\"button\"\n// see https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n\n[role=\"button\"] {\n  cursor: pointer;\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// WebKit-style focus\n\n.tab-focus() {\n  // WebKit-specific. Other browsers will keep their default outline style.\n  // (Initially tried to also force default via `outline: initial`,\n  // but that seems to erroneously remove the outline in Firefox altogether.)\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n","// stylelint-disable media-feature-name-no-vendor-prefix, media-feature-parentheses-space-inside, media-feature-name-no-unknown, indentation, at-rule-name-space-after\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// Retina image\n//\n// Short retina mixin for setting background-image and -size. Note that the\n// spelling of `min--moz-device-pixel-ratio` is intentional.\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and ( min--moz-device-pixel-ratio: 2),\n  only screen and ( -o-min-device-pixel-ratio: 2/1),\n  only screen and ( min-device-pixel-ratio: 2),\n  only screen and ( min-resolution: 192dpi),\n  only screen and ( min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n","// stylelint-disable selector-list-comma-newline-after, selector-no-qualifying-type\n\n//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n  font-family: @headings-font-family;\n  font-weight: @headings-font-weight;\n  line-height: @headings-line-height;\n  color: @headings-color;\n\n  small,\n  .small {\n    font-weight: 400;\n    line-height: 1;\n    color: @headings-small-color;\n  }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n  margin-top: @line-height-computed;\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 65%;\n  }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n  margin-top: (@line-height-computed / 2);\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 75%;\n  }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n  margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n  margin-bottom: @line-height-computed;\n  font-size: floor((@font-size-base * 1.15));\n  font-weight: 300;\n  line-height: 1.4;\n\n  @media (min-width: @screen-sm-min) {\n    font-size: (@font-size-base * 1.5);\n  }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: (12px small font / 14px base font) * 100% = about 85%\nsmall,\n.small {\n  font-size: floor((100% * @font-size-small / @font-size-base));\n}\n\nmark,\n.mark {\n  padding: .2em;\n  background-color: @state-warning-bg;\n}\n\n// Alignment\n.text-left           { text-align: left; }\n.text-right          { text-align: right; }\n.text-center         { text-align: center; }\n.text-justify        { text-align: justify; }\n.text-nowrap         { white-space: nowrap; }\n\n// Transformation\n.text-lowercase      { text-transform: lowercase; }\n.text-uppercase      { text-transform: uppercase; }\n.text-capitalize     { text-transform: capitalize; }\n\n// Contextual colors\n.text-muted {\n  color: @text-muted;\n}\n.text-primary {\n  .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n  .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n  .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n  .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n  .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n  // Given the contrast here, this is the only class to have its color inverted\n  // automatically.\n  color: #fff;\n  .bg-variant(@brand-primary);\n}\n.bg-success {\n  .bg-variant(@state-success-bg);\n}\n.bg-info {\n  .bg-variant(@state-info-bg);\n}\n.bg-warning {\n  .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n  .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n  padding-bottom: ((@line-height-computed / 2) - 1);\n  margin: (@line-height-computed * 2) 0 @line-height-computed;\n  border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// -------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n  margin-top: 0;\n  margin-bottom: (@line-height-computed / 2);\n  ul,\n  ol {\n    margin-bottom: 0;\n  }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n  .list-unstyled();\n  margin-left: -5px;\n\n  > li {\n    display: inline-block;\n    padding-right: 5px;\n    padding-left: 5px;\n  }\n}\n\n// Description Lists\ndl {\n  margin-top: 0; // Remove browser default\n  margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n  line-height: @line-height-base;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n.dl-horizontal {\n  dd {\n    &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n  }\n\n  @media (min-width: @dl-horizontal-breakpoint) {\n    dt {\n      float: left;\n      width: (@dl-horizontal-offset - 20);\n      clear: left;\n      text-align: right;\n      .text-overflow();\n    }\n    dd {\n      margin-left: @dl-horizontal-offset;\n    }\n  }\n}\n\n\n// Misc\n// -------------------------\n\n// Abbreviations and acronyms\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n\n.initialism {\n  font-size: 90%;\n  .text-uppercase();\n}\n\n// Blockquotes\nblockquote {\n  padding: (@line-height-computed / 2) @line-height-computed;\n  margin: 0 0 @line-height-computed;\n  font-size: @blockquote-font-size;\n  border-left: 5px solid @blockquote-border-color;\n\n  p,\n  ul,\n  ol {\n    &:last-child {\n      margin-bottom: 0;\n    }\n  }\n\n  // Note: Deprecated small and .small as of v3.1.0\n  // Context: https://github.com/twbs/bootstrap/issues/11660\n  footer,\n  small,\n  .small {\n    display: block;\n    font-size: 80%; // back to default font-size\n    line-height: @line-height-base;\n    color: @blockquote-small-color;\n\n    &:before {\n      content: \"\\2014 \\00A0\"; // em dash, nbsp\n    }\n  }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid @blockquote-border-color;\n  border-left: 0;\n\n  // Account for citation\n  footer,\n  small,\n  .small {\n    &:before { content: \"\"; }\n    &:after {\n      content: \"\\00A0 \\2014\"; // nbsp, em dash\n    }\n  }\n}\n\n// Addresses\naddress {\n  margin-bottom: @line-height-computed;\n  font-style: normal;\n  line-height: @line-height-base;\n}\n","// Typography\n\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover,\n  a&:focus {\n    color: darken(@color, 10%);\n  }\n}\n","// Contextual backgrounds\n\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover,\n  a&:focus {\n    background-color: darken(@color, 10%);\n  }\n}\n","// Text overflow\n// Requires inline-block or block for proper styling\n\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n  font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @code-color;\n  background-color: @code-bg;\n  border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @kbd-color;\n  background-color: @kbd-bg;\n  border-radius: @border-radius-small;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n\n  kbd {\n    padding: 0;\n    font-size: 100%;\n    font-weight: 700;\n    box-shadow: none;\n  }\n}\n\n// Blocks of code\npre {\n  display: block;\n  padding: ((@line-height-computed - 1) / 2);\n  margin: 0 0 (@line-height-computed / 2);\n  font-size: (@font-size-base - 1); // 14px to 13px\n  line-height: @line-height-base;\n  color: @pre-color;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: @pre-bg;\n  border: 1px solid @pre-border-color;\n  border-radius: @border-radius-base;\n\n  // Account for some code outputs that place code tags in pre tags\n  code {\n    padding: 0;\n    font-size: inherit;\n    color: inherit;\n    white-space: pre-wrap;\n    background-color: transparent;\n    border-radius: 0;\n  }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n  max-height: @pre-scrollable-max-height;\n  overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n  .container-fixed();\n\n  @media (min-width: @screen-sm-min) {\n    width: @container-sm;\n  }\n  @media (min-width: @screen-md-min) {\n    width: @container-md;\n  }\n  @media (min-width: @screen-lg-min) {\n    width: @container-lg;\n  }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n  .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n  .make-row();\n}\n\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n\n  [class*=\"col-\"] {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid(xs);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n  .make-grid(sm);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n  .make-grid(md);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n  .make-grid(lg);\n}\n","// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n// Centered container element\n.container-fixed(@gutter: @grid-gutter-width) {\n  padding-right: ceil((@gutter / 2));\n  padding-left: floor((@gutter / 2));\n  margin-right: auto;\n  margin-left: auto;\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-right: floor((@gutter / -2));\n  margin-left: ceil((@gutter / -2));\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  margin-left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-push(@columns) {\n  left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-pull(@columns) {\n  right: percentage((@columns / @grid-columns));\n}\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n","// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-right: floor((@grid-gutter-width / 2));\n      padding-left: ceil((@grid-gutter-width / 2));\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.float-grid-columns(@class) {\n  .col(@index) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid-column(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index > 0) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index = 0) {\n  .col-@{class}-push-0 {\n    left: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index > 0) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index = 0) {\n  .col-@{class}-pull-0 {\n    right: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.loop-grid-columns(@index, @class, @type) when (@index >= 0) {\n  .calc-grid-column(@index, @class, @type);\n  // next iteration\n  .loop-grid-columns((@index - 1), @class, @type);\n}\n\n// Create grid for specific class\n.make-grid(@class) {\n  .float-grid-columns(@class);\n  .loop-grid-columns(@grid-columns, @class, width);\n  .loop-grid-columns(@grid-columns, @class, pull);\n  .loop-grid-columns(@grid-columns, @class, push);\n  .loop-grid-columns(@grid-columns, @class, offset);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-no-qualifying-type\n\n//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n  background-color: @table-bg;\n\n  // Table cell sizing\n  //\n  // Reset default table behavior\n\n  col[class*=\"col-\"] {\n    position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n    display: table-column;\n    float: none;\n  }\n\n  td,\n  th {\n    &[class*=\"col-\"] {\n      position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n      display: table-cell;\n      float: none;\n    }\n  }\n}\n\ncaption {\n  padding-top: @table-cell-padding;\n  padding-bottom: @table-cell-padding;\n  color: @text-muted;\n  text-align: left;\n}\n\nth {\n  text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: @line-height-computed;\n  // Cells\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-cell-padding;\n        line-height: @line-height-base;\n        vertical-align: top;\n        border-top: 1px solid @table-border-color;\n      }\n    }\n  }\n  // Bottom align for column headings\n  > thead > tr > th {\n    vertical-align: bottom;\n    border-bottom: 2px solid @table-border-color;\n  }\n  // Remove top border from thead by default\n  > caption + thead,\n  > colgroup + thead,\n  > thead:first-child {\n    > tr:first-child {\n      > th,\n      > td {\n        border-top: 0;\n      }\n    }\n  }\n  // Account for multiple tbody instances\n  > tbody + tbody {\n    border-top: 2px solid @table-border-color;\n  }\n\n  // Nesting\n  .table {\n    background-color: @body-bg;\n  }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-condensed-cell-padding;\n      }\n    }\n  }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n  border: 1px solid @table-border-color;\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        border: 1px solid @table-border-color;\n      }\n    }\n  }\n  > thead > tr {\n    > th,\n    > td {\n      border-bottom-width: 2px;\n    }\n  }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n  > tbody > tr:nth-of-type(odd) {\n    background-color: @table-bg-accent;\n  }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n  > tbody > tr:hover {\n    background-color: @table-bg-hover;\n  }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n.table-responsive {\n  min-height: .01%; // Workaround for IE9 bug (see https://github.com/twbs/bootstrap/issues/14837)\n  overflow-x: auto;\n\n  @media screen and (max-width: @screen-xs-max) {\n    width: 100%;\n    margin-bottom: (@line-height-computed * .75);\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid @table-border-color;\n\n    // Tighten up spacing\n    > .table {\n      margin-bottom: 0;\n\n      // Ensure the content doesn't wrap\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th,\n          > td {\n            white-space: nowrap;\n          }\n        }\n      }\n    }\n\n    // Special overrides for the bordered tables\n    > .table-bordered {\n      border: 0;\n\n      // Nuke the appropriate borders so that the parent can handle them\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th:first-child,\n          > td:first-child {\n            border-left: 0;\n          }\n          > th:last-child,\n          > td:last-child {\n            border-right: 0;\n          }\n        }\n      }\n\n      // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n      // chances are there will be only one `tr` in a `thead` and that would\n      // remove the border altogether.\n      > tbody,\n      > tfoot {\n        > tr:last-child {\n          > th,\n          > td {\n            border-bottom: 0;\n          }\n        }\n      }\n\n    }\n  }\n}\n","// Tables\n\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &:hover > .@{state},\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, property-no-vendor-prefix, media-feature-name-no-vendor-prefix\n\n//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n  // Chrome and Firefox set a `min-width: min-content;` on fieldsets,\n  // so we reset that to ensure it behaves more like a standard block element.\n  // See https://github.com/twbs/bootstrap/issues/12359.\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\n\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: @line-height-computed;\n  font-size: (@font-size-base * 1.5);\n  line-height: inherit;\n  color: @legend-color;\n  border: 0;\n  border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n  display: inline-block;\n  max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)\n  margin-bottom: 5px;\n  font-weight: 700;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\ninput[type=\"search\"] {\n  // Override content-box in Normalize (* isn't specific enough)\n  .box-sizing(border-box);\n\n  // Search inputs in iOS\n  //\n  // This overrides the extra rounded corners on search inputs in iOS so that our\n  // `.form-control` class can properly style them. Note that this cannot simply\n  // be added to `.form-control` as it's not specific enough. For details, see\n  // https://github.com/twbs/bootstrap/issues/11586.\n  -webkit-appearance: none;\n  appearance: none;\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9; // IE8-9\n  line-height: normal;\n\n  // Apply same disabled cursor tweak as for inputs\n  // Some special care is needed because <label>s don't inherit their parent's `cursor`.\n  //\n  // Note: Neither radios nor checkboxes can be readonly.\n  &[disabled],\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n\ninput[type=\"file\"] {\n  display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n  height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  .tab-focus();\n}\n\n// Adjust output element\noutput {\n  display: block;\n  padding-top: (@padding-base-vertical + 1);\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n  display: block;\n  width: 100%;\n  height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n  background-color: @input-bg;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid @input-border;\n  border-radius: @input-border-radius; // Note: This has no effect on <select>s in some browsers, due to the limited stylability of <select>s in CSS.\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075));\n  .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n  // Customize the `:focus` state to imitate native WebKit styles.\n  .form-control-focus();\n\n  // Placeholder\n  .placeholder();\n\n  // Unstyle the caret on `<select>`s in IE10+.\n  &::-ms-expand {\n    background-color: transparent;\n    border: 0;\n  }\n\n  // Disabled and read-only inputs\n  //\n  // HTML5 says that controls under a fieldset > legend:first-child won't be\n  // disabled if the fieldset is disabled. Due to implementation difficulty, we\n  // don't honor that edge case; we style them as disabled anyway.\n  &[disabled],\n  &[readonly],\n  fieldset[disabled] & {\n    background-color: @input-bg-disabled;\n    opacity: 1; // iOS fix for unreadable disabled content; see https://github.com/twbs/bootstrap/issues/11655\n  }\n\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n\n  // Reset height for `textarea`s\n  textarea& {\n    height: auto;\n  }\n}\n\n\n// Special styles for iOS temporal inputs\n//\n// In Mobile Safari, setting `display: block` on temporal inputs causes the\n// text within the input to become vertically misaligned. As a workaround, we\n// set a pixel line-height that matches the given height of the input, but only\n// for Safari. See https://bugs.webkit.org/show_bug.cgi?id=139848\n//\n// Note that as of 9.3, iOS doesn't support `week`.\n\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"],\n  input[type=\"time\"],\n  input[type=\"datetime-local\"],\n  input[type=\"month\"] {\n    &.form-control {\n      line-height: @input-height-base;\n    }\n\n    &.input-sm,\n    .input-group-sm & {\n      line-height: @input-height-small;\n    }\n\n    &.input-lg,\n    .input-group-lg & {\n      line-height: @input-height-large;\n    }\n  }\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n  margin-bottom: @form-group-margin-bottom;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n\n  // These are used on elements with <label> descendants\n  &.disabled,\n  fieldset[disabled] & {\n    label {\n      cursor: @cursor-disabled;\n    }\n  }\n\n  label {\n    min-height: @line-height-computed; // Ensure the input doesn't jump when there is no text\n    padding-left: 20px;\n    margin-bottom: 0;\n    font-weight: 400;\n    cursor: pointer;\n  }\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px; // Move up sibling radios or checkboxes for tighter spacing\n}\n\n// Radios and checkboxes on same line\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n\n  // These are used directly on <label>s\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px; // space out consecutive inline controls\n}\n\n\n// Static form control text\n//\n// Apply class to a `p` element to make any string of text align with labels in\n// a horizontal form layout.\n\n.form-control-static {\n  min-height: (@line-height-computed + @font-size-base);\n  // Size it appropriately next to real form controls\n  padding-top: (@padding-base-vertical + 1);\n  padding-bottom: (@padding-base-vertical + 1);\n  // Remove default margin from `p`\n  margin-bottom: 0;\n\n  &.input-lg,\n  &.input-sm {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Form control sizing\n//\n// Build on `.form-control` with modifier classes to decrease or increase the\n// height and font-size of form controls.\n//\n// The `.form-group-* form-control` variations are sadly duplicated to avoid the\n// issue documented in https://github.com/twbs/bootstrap/issues/15074.\n\n.input-sm {\n  .input-size(@input-height-small; @padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @input-border-radius-small);\n}\n.form-group-sm {\n  .form-control {\n    height: @input-height-small;\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n    border-radius: @input-border-radius-small;\n  }\n  select.form-control {\n    height: @input-height-small;\n    line-height: @input-height-small;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-small;\n    min-height: (@line-height-computed + @font-size-small);\n    padding: (@padding-small-vertical + 1) @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n  }\n}\n\n.input-lg {\n  .input-size(@input-height-large; @padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @input-border-radius-large);\n}\n.form-group-lg {\n  .form-control {\n    height: @input-height-large;\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n    border-radius: @input-border-radius-large;\n  }\n  select.form-control {\n    height: @input-height-large;\n    line-height: @input-height-large;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-large;\n    min-height: (@line-height-computed + @font-size-large);\n    padding: (@padding-large-vertical + 1) @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n  }\n}\n\n\n// Form control feedback states\n//\n// Apply contextual and semantic states to individual form controls.\n\n.has-feedback {\n  // Enable absolute positioning\n  position: relative;\n\n  // Ensure icons don't overlap text\n  .form-control {\n    padding-right: (@input-height-base * 1.25);\n  }\n}\n// Feedback icon (requires .glyphicon classes)\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2; // Ensure icon is above input groups\n  display: block;\n  width: @input-height-base;\n  height: @input-height-base;\n  line-height: @input-height-base;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: @input-height-large;\n  height: @input-height-large;\n  line-height: @input-height-large;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: @input-height-small;\n  height: @input-height-small;\n  line-height: @input-height-small;\n}\n\n// Feedback states\n.has-success {\n  .form-control-validation(@state-success-text; @state-success-text; @state-success-bg);\n}\n.has-warning {\n  .form-control-validation(@state-warning-text; @state-warning-text; @state-warning-bg);\n}\n.has-error {\n  .form-control-validation(@state-danger-text; @state-danger-text; @state-danger-bg);\n}\n\n// Reposition feedback icon if input has visible label above\n.has-feedback label {\n\n  & ~ .form-control-feedback {\n    top: (@line-height-computed + 5); // Height of the `label` and its margin\n  }\n  &.sr-only ~ .form-control-feedback {\n    top: 0;\n  }\n}\n\n\n// Help text\n//\n// Apply to any element you wish to create light text for placement immediately\n// below a form control. Use for general help, formatting, or instructional text.\n\n.help-block {\n  display: block; // account for any element using help-block\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: lighten(@text-color, 25%); // lighten the text some for contrast\n}\n\n\n// Inline forms\n//\n// Make forms appear inline(-block) by adding the `.form-inline` class. Inline\n// forms begin stacked on extra small (mobile) devices and then go inline when\n// viewports reach <768px.\n//\n// Requires wrapping inputs and labels with `.form-group` for proper display of\n// default HTML form controls and our custom form controls (e.g., input groups).\n//\n// Heads up! This is mixin-ed into `.navbar-form` in navbars.less.\n\n.form-inline {\n\n  // Kick in the inline\n  @media (min-width: @screen-sm-min) {\n    // Inline-block all the things for \"inline\"\n    .form-group {\n      display: inline-block;\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // In navbar-form, allow folks to *not* use `.form-group`\n    .form-control {\n      display: inline-block;\n      width: auto; // Prevent labels from stacking above inputs in `.form-group`\n      vertical-align: middle;\n    }\n\n    // Make static controls behave like regular ones\n    .form-control-static {\n      display: inline-block;\n    }\n\n    .input-group {\n      display: inline-table;\n      vertical-align: middle;\n\n      .input-group-addon,\n      .input-group-btn,\n      .form-control {\n        width: auto;\n      }\n    }\n\n    // Input groups need that 100% width though\n    .input-group > .form-control {\n      width: 100%;\n    }\n\n    .control-label {\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // Remove default margin on radios/checkboxes that were used for stacking, and\n    // then undo the floating of radios and checkboxes to match.\n    .radio,\n    .checkbox {\n      display: inline-block;\n      margin-top: 0;\n      margin-bottom: 0;\n      vertical-align: middle;\n\n      label {\n        padding-left: 0;\n      }\n    }\n    .radio input[type=\"radio\"],\n    .checkbox input[type=\"checkbox\"] {\n      position: relative;\n      margin-left: 0;\n    }\n\n    // Re-override the feedback icon.\n    .has-feedback .form-control-feedback {\n      top: 0;\n    }\n  }\n}\n\n\n// Horizontal forms\n//\n// Horizontal forms are built on grid classes and allow you to create forms with\n// labels on the left and inputs on the right.\n\n.form-horizontal {\n\n  // Consistent vertical alignment of radios and checkboxes\n  //\n  // Labels also get some reset styles, but that is scoped to a media query below.\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline {\n    padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n    margin-top: 0;\n    margin-bottom: 0;\n  }\n  // Account for padding we're adding to ensure the alignment and of help text\n  // and other content below items\n  .radio,\n  .checkbox {\n    min-height: (@line-height-computed + (@padding-base-vertical + 1));\n  }\n\n  // Make form groups behave like rows\n  .form-group {\n    .make-row();\n  }\n\n  // Reset spacing and right align labels, but scope to media queries so that\n  // labels on narrow viewports stack the same as a default form example.\n  @media (min-width: @screen-sm-min) {\n    .control-label {\n      padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n      margin-bottom: 0;\n      text-align: right;\n    }\n  }\n\n  // Validation states\n  //\n  // Reposition the icon because it's now within a grid column and columns have\n  // `position: relative;` on them. Also accounts for the grid gutter padding.\n  .has-feedback .form-control-feedback {\n    right: floor((@grid-gutter-width / 2));\n  }\n\n  // Form group sizes\n  //\n  // Quick utility class for applying `.input-lg` and `.input-sm` styles to the\n  // inputs and labels within a `.form-group`.\n  .form-group-lg {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-large-vertical + 1);\n        font-size: @font-size-large;\n      }\n    }\n  }\n  .form-group-sm {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-small-vertical + 1);\n        font-size: @font-size-small;\n      }\n    }\n  }\n}\n","// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline,\n  &.radio label,\n  &.checkbox label,\n  &.radio-inline label,\n  &.checkbox-inline label  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    background-color: @background-color;\n    border-color: @border-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-border-focus` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n  display: inline-block;\n  margin-bottom: 0; // For input.btn\n  font-weight: @btn-font-weight;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @btn-border-radius-base);\n  .user-select(none);\n\n  &,\n  &:active,\n  &.active {\n    &:focus,\n    &.focus {\n      .tab-focus();\n    }\n  }\n\n  &:hover,\n  &:focus,\n  &.focus {\n    color: @btn-default-color;\n    text-decoration: none;\n  }\n\n  &:active,\n  &.active {\n    background-image: none;\n    outline: 0;\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n    .opacity(.65);\n    .box-shadow(none);\n  }\n\n  a& {\n    &.disabled,\n    fieldset[disabled] & {\n      pointer-events: none; // Future-proof disabling of clicks on `<a>` elements\n    }\n  }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n  .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n  .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n  .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n  .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n  .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n  .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n  font-weight: 400;\n  color: @link-color;\n  border-radius: 0;\n\n  &,\n  &:active,\n  &.active,\n  &[disabled],\n  fieldset[disabled] & {\n    background-color: transparent;\n    .box-shadow(none);\n  }\n  &,\n  &:hover,\n  &:focus,\n  &:active {\n    border-color: transparent;\n  }\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n    background-color: transparent;\n  }\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus {\n      color: @btn-link-disabled-color;\n      text-decoration: none;\n    }\n  }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n  // line-height: ensure even-numbered height of button next to large input\n  .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @btn-border-radius-large);\n}\n.btn-sm {\n  // line-height: ensure proper height of button next to small input\n  .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n.btn-xs {\n  .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n  display: block;\n  width: 100%;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n  &.btn-block {\n    width: 100%;\n  }\n}\n","// Button variants\n//\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:focus,\n  &.focus {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 25%);\n  }\n  &:hover {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open > .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 10%);\n    background-image: none;\n    border-color: darken(@border, 12%);\n\n    &:hover,\n    &:focus,\n    &.focus {\n      color: @color;\n      background-color: darken(@background, 17%);\n      border-color: darken(@border, 25%);\n    }\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus,\n    &.focus {\n      background-color: @background;\n      border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n","// Opacity\n\n.opacity(@opacity) {\n  @opacity-ie: (@opacity * 100);  // IE8 filter\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n  opacity: @opacity;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twbs/bootstrap/pull/3552.\n\n.fade {\n  opacity: 0;\n  .transition(opacity .15s linear);\n\n  &.in {\n    opacity: 1;\n  }\n}\n\n.collapse {\n  display: none;\n\n  &.in      { display: block; }\n  tr&.in    { display: table-row; }\n  tbody&.in { display: table-row-group; }\n}\n\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  .transition-property(~\"height, visibility\");\n  .transition-duration(.35s);\n  .transition-timing-function(ease);\n}\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: @caret-width-base dashed;\n  border-top: @caret-width-base solid ~\"\\9\"; // IE8\n  border-right: @caret-width-base solid transparent;\n  border-left: @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropup,\n.dropdown {\n  position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n  outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: @zindex-dropdown;\n  display: none; // none by default, but block on \"open\" of the menu\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0; // override default ul\n  font-size: @font-size-base;\n  text-align: left; // Ensures proper alignment if parent has it changed (e.g., modal footer)\n  list-style: none;\n  background-color: @dropdown-bg;\n  background-clip: padding-box;\n  border: 1px solid @dropdown-fallback-border; // IE8 fallback\n  border: 1px solid @dropdown-border;\n  border-radius: @border-radius-base;\n  .box-shadow(0 6px 12px rgba(0, 0, 0, .175));\n\n  // Aligns the dropdown menu to right\n  //\n  // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n  &.pull-right {\n    right: 0;\n    left: auto;\n  }\n\n  // Dividers (basically an hr) within the dropdown\n  .divider {\n    .nav-divider(@dropdown-divider-bg);\n  }\n\n  // Links within the dropdown menu\n  > li > a {\n    display: block;\n    padding: 3px 20px;\n    clear: both;\n    font-weight: 400;\n    line-height: @line-height-base;\n    color: @dropdown-link-color;\n    white-space: nowrap; // prevent links from randomly breaking onto new lines\n\n    &:hover,\n    &:focus {\n      color: @dropdown-link-hover-color;\n      text-decoration: none;\n      background-color: @dropdown-link-hover-bg;\n    }\n  }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-active-color;\n    text-decoration: none;\n    background-color: @dropdown-link-active-bg;\n    outline: 0;\n  }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-disabled-color;\n  }\n\n  // Nuke hover/focus effects\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    cursor: @cursor-disabled;\n    background-color: transparent;\n    background-image: none; // Remove CSS gradient\n    .reset-filter();\n  }\n}\n\n// Open state for the dropdown\n.open {\n  // Show the menu\n  > .dropdown-menu {\n    display: block;\n  }\n\n  // Remove the outline when :focus is triggered\n  > a {\n    outline: 0;\n  }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n  right: 0;\n  left: auto; // Reset the default from `.dropdown-menu`\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n\n// Dropdown section headers\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: @font-size-small;\n  line-height: @line-height-base;\n  color: @dropdown-header-color;\n  white-space: nowrap; // as with > li > a\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n  // Reverse the caret\n  .caret {\n    content: \"\";\n    border-top: 0;\n    border-bottom: @caret-width-base dashed;\n    border-bottom: @caret-width-base solid ~\"\\9\"; // IE8\n  }\n  // Different positioning for bottom up menu\n  .dropdown-menu {\n    top: auto;\n    bottom: 100%;\n    margin-bottom: 2px;\n  }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-right {\n    .dropdown-menu {\n      .dropdown-menu-right();\n    }\n    // Necessary for overrides of the default right aligned menu.\n    // Will remove come v4 in all likelihood.\n    .dropdown-menu-left {\n      .dropdown-menu-left();\n    }\n  }\n}\n","// Horizontal dividers\n//\n// Dividers (basically an hr) within dropdowns and nav lists\n\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n","// stylelint-disable selector-no-qualifying-type */\n\n//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle; // match .btn alignment given font-size hack above\n  > .btn {\n    position: relative;\n    float: left;\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      z-index: 2;\n    }\n  }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n  .btn + .btn,\n  .btn + .btn-group,\n  .btn-group + .btn,\n  .btn-group + .btn-group {\n    margin-left: -1px;\n  }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n  margin-left: -5px; // Offset the first child's margin\n  &:extend(.clearfix all);\n\n  .btn,\n  .btn-group,\n  .input-group {\n    float: left;\n  }\n  > .btn,\n  > .btn-group,\n  > .input-group {\n    margin-left: 5px;\n  }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n  margin-left: 0;\n  &:not(:last-child):not(.dropdown-toggle) {\n    .border-right-radius(0);\n  }\n}\n// Need .dropdown-toggle since :last-child doesn't apply, given that a .dropdown-menu is used immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-right-radius(0);\n  }\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { &:extend(.btn-xs); }\n.btn-group-sm > .btn { &:extend(.btn-sm); }\n.btn-group-lg > .btn { &:extend(.btn-lg); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n  .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n\n  // Show no shadow for `.btn-link` since it has no other button styles.\n  &.btn-link {\n    .box-shadow(none);\n  }\n}\n\n\n// Reposition the caret\n.btn .caret {\n  margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n  border-width: @caret-width-large @caret-width-large 0;\n  border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n  border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n  > .btn,\n  > .btn-group,\n  > .btn-group > .btn {\n    display: block;\n    float: none;\n    width: 100%;\n    max-width: 100%;\n  }\n\n  // Clear floats so dropdown menus can be properly placed\n  > .btn-group {\n    &:extend(.clearfix all);\n    > .btn {\n      float: none;\n    }\n  }\n\n  > .btn + .btn,\n  > .btn + .btn-group,\n  > .btn-group + .btn,\n  > .btn-group + .btn-group {\n    margin-top: -1px;\n    margin-left: 0;\n  }\n}\n\n.btn-group-vertical > .btn {\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n  &:first-child:not(:last-child) {\n    .border-top-radius(@btn-border-radius-base);\n    .border-bottom-radius(0);\n  }\n  &:last-child:not(:first-child) {\n    .border-top-radius(0);\n    .border-bottom-radius(@btn-border-radius-base);\n  }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-bottom-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-top-radius(0);\n}\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n  > .btn,\n  > .btn-group {\n    display: table-cell;\n    float: none;\n    width: 1%;\n  }\n  > .btn-group .btn {\n    width: 100%;\n  }\n\n  > .btn-group .dropdown-menu {\n    left: auto;\n  }\n}\n\n\n// Checkbox and radio options\n//\n// In order to support the browser's form validation feedback, powered by the\n// `required` attribute, we have to \"hide\" the inputs via `clip`. We cannot use\n// `display: none;` or `visibility: hidden;` as that also hides the popover.\n// Simply visually hiding the inputs via `opacity` would leave them clickable in\n// certain cases which is prevented by using `clip` and `pointer-events`.\n// This way, we ensure a DOM element is visible to position the popover from.\n//\n// See https://github.com/twbs/bootstrap/pull/12794 and\n// https://github.com/twbs/bootstrap/pull/14559 for more information.\n\n[data-toggle=\"buttons\"] {\n  > .btn,\n  > .btn-group > .btn {\n    input[type=\"radio\"],\n    input[type=\"checkbox\"] {\n      position: absolute;\n      clip: rect(0, 0, 0, 0);\n      pointer-events: none;\n    }\n  }\n}\n","// Single side border-radius\n\n.border-top-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-top-right-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-top-right-radius: @radius;\n  border-bottom-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n  position: relative; // For dropdowns\n  display: table;\n  border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n  // Undo padding and float of grid classes\n  &[class*=\"col-\"] {\n    float: none;\n    padding-right: 0;\n    padding-left: 0;\n  }\n\n  .form-control {\n    // Ensure that the input is always above the *appended* addon button for\n    // proper border colors.\n    position: relative;\n    z-index: 2;\n\n    // IE9 fubars the placeholder attribute in text inputs and the arrows on\n    // select elements in input groups. To fix it, we float the input. Details:\n    // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n    float: left;\n\n    width: 100%;\n    margin-bottom: 0;\n\n    &:focus {\n      z-index: 3;\n    }\n  }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  .input-lg();\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  .input-sm();\n}\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  font-weight: 400;\n  line-height: 1;\n  color: @input-color;\n  text-align: center;\n  background-color: @input-group-addon-bg;\n  border: 1px solid @input-group-addon-border-color;\n  border-radius: @input-border-radius;\n\n  // Sizing\n  &.input-sm {\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    border-radius: @input-border-radius-small;\n  }\n  &.input-lg {\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    border-radius: @input-border-radius-large;\n  }\n\n  // Nuke default margins from checkboxes and radios to vertically center within.\n  input[type=\"radio\"],\n  input[type=\"checkbox\"] {\n    margin-top: 0;\n  }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  .border-right-radius(0);\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  .border-left-radius(0);\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n  position: relative;\n  // Jankily prevent input button groups from wrapping with `white-space` and\n  // `font-size` in combination with `inline-block` on buttons.\n  font-size: 0;\n  white-space: nowrap;\n\n  // Negative margin for spacing, position for bringing hovered/focused/actived\n  // element above the siblings.\n  > .btn {\n    position: relative;\n    + .btn {\n      margin-left: -1px;\n    }\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active {\n      z-index: 2;\n    }\n  }\n\n  // Negative margin to only have a 1px border between the two\n  &:first-child {\n    > .btn,\n    > .btn-group {\n      margin-right: -1px;\n    }\n  }\n  &:last-child {\n    > .btn,\n    > .btn-group {\n      z-index: 2;\n      margin-left: -1px;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, selector-max-type\n\n//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n  padding-left: 0; // Override default ul/ol\n  margin-bottom: 0;\n  list-style: none;\n  &:extend(.clearfix all);\n\n  > li {\n    position: relative;\n    display: block;\n\n    > a {\n      position: relative;\n      display: block;\n      padding: @nav-link-padding;\n      &:hover,\n      &:focus {\n        text-decoration: none;\n        background-color: @nav-link-hover-bg;\n      }\n    }\n\n    // Disabled state sets text to gray and nukes hover/tab effects\n    &.disabled > a {\n      color: @nav-disabled-link-color;\n\n      &:hover,\n      &:focus {\n        color: @nav-disabled-link-hover-color;\n        text-decoration: none;\n        cursor: @cursor-disabled;\n        background-color: transparent;\n      }\n    }\n  }\n\n  // Open dropdowns\n  .open > a {\n    &,\n    &:hover,\n    &:focus {\n      background-color: @nav-link-hover-bg;\n      border-color: @link-color;\n    }\n  }\n\n  // Nav dividers (deprecated with v3.0.1)\n  //\n  // This should have been removed in v3 with the dropping of `.nav-list`, but\n  // we missed it. We don't currently support this anywhere, but in the interest\n  // of maintaining backward compatibility in case you use it, it's deprecated.\n  .nav-divider {\n    .nav-divider();\n  }\n\n  // Prevent IE8 from misplacing imgs\n  //\n  // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n  > li > a > img {\n    max-width: none;\n  }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n  border-bottom: 1px solid @nav-tabs-border-color;\n  > li {\n    float: left;\n    // Make the list-items overlay the bottom border\n    margin-bottom: -1px;\n\n    // Actual tabs (as links)\n    > a {\n      margin-right: 2px;\n      line-height: @line-height-base;\n      border: 1px solid transparent;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n      &:hover {\n        border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n      }\n    }\n\n    // Active state, and its :hover to override normal :hover\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-tabs-active-link-hover-color;\n        cursor: default;\n        background-color: @nav-tabs-active-link-hover-bg;\n        border: 1px solid @nav-tabs-active-link-hover-border-color;\n        border-bottom-color: transparent;\n      }\n    }\n  }\n  // pulling this in mainly for less shorthand\n  &.nav-justified {\n    .nav-justified();\n    .nav-tabs-justified();\n  }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n  > li {\n    float: left;\n\n    // Links rendered as pills\n    > a {\n      border-radius: @nav-pills-border-radius;\n    }\n    + li {\n      margin-left: 2px;\n    }\n\n    // Active state\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-pills-active-link-hover-color;\n        background-color: @nav-pills-active-link-hover-bg;\n      }\n    }\n  }\n}\n\n\n// Stacked pills\n.nav-stacked {\n  > li {\n    float: none;\n    + li {\n      margin-top: 2px;\n      margin-left: 0; // no need for this gap between nav items\n    }\n  }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n  width: 100%;\n\n  > li {\n    float: none;\n    > a {\n      margin-bottom: 5px;\n      text-align: center;\n    }\n  }\n\n  > .dropdown .dropdown-menu {\n    top: auto;\n    left: auto;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li {\n      display: table-cell;\n      width: 1%;\n      > a {\n        margin-bottom: 0;\n      }\n    }\n  }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n  border-bottom: 0;\n\n  > li > a {\n    // Override margin from .nav-tabs\n    margin-right: 0;\n    border-radius: @border-radius-base;\n  }\n\n  > .active > a,\n  > .active > a:hover,\n  > .active > a:focus {\n    border: 1px solid @nav-tabs-justified-link-border-color;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li > a {\n      border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n    }\n    > .active > a,\n    > .active > a:hover,\n    > .active > a:focus {\n      border-bottom-color: @nav-tabs-justified-active-link-border-color;\n    }\n  }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n  > .tab-pane {\n    display: none;\n  }\n  > .active {\n    display: block;\n  }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n  // make dropdown border overlap tab border\n  margin-top: -1px;\n  // Remove the top rounded corners here since there is a hard edge above the menu\n  .border-top-radius(0);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, selector-max-class, declaration-no-important, selector-no-qualifying-type\n\n//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n  position: relative;\n  min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n  margin-bottom: @navbar-margin-bottom;\n  border: 1px solid transparent;\n\n  // Prevent floats from breaking the navbar\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: @navbar-border-radius;\n  }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n  }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n  padding-right: @navbar-padding-horizontal;\n  padding-left: @navbar-padding-horizontal;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n  &:extend(.clearfix all);\n  -webkit-overflow-scrolling: touch;\n\n  &.in {\n    overflow-y: auto;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n\n    &.collapse {\n      display: block !important;\n      height: auto !important;\n      padding-bottom: 0; // Override default setting\n      overflow: visible !important;\n    }\n\n    &.in {\n      overflow-y: visible;\n    }\n\n    // Undo the collapse side padding for navbars with containers to ensure\n    // alignment of right-aligned contents.\n    .navbar-fixed-top &,\n    .navbar-static-top &,\n    .navbar-fixed-bottom & {\n      padding-right: 0;\n      padding-left: 0;\n    }\n  }\n}\n\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  .navbar-collapse {\n    max-height: @navbar-collapse-max-height;\n\n    @media (max-device-width: @screen-xs-min) and (orientation: landscape) {\n      max-height: 200px;\n    }\n  }\n\n  // Fix the top/bottom navbars when screen real estate supports it\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: @zindex-navbar-fixed;\n\n  // Undo the rounded corners\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0; // override .navbar defaults\n  border-width: 1px 0 0;\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n  > .navbar-header,\n  > .navbar-collapse {\n    margin-right: -@navbar-padding-horizontal;\n    margin-left: -@navbar-padding-horizontal;\n\n    @media (min-width: @grid-float-breakpoint) {\n      margin-right: 0;\n      margin-left: 0;\n    }\n  }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n  z-index: @zindex-navbar;\n  border-width: 0 0 1px;\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n  float: left;\n  height: @navbar-height;\n  padding: @navbar-padding-vertical @navbar-padding-horizontal;\n  font-size: @font-size-large;\n  line-height: @line-height-computed;\n\n  &:hover,\n  &:focus {\n    text-decoration: none;\n  }\n\n  > img {\n    display: block;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    .navbar > .container &,\n    .navbar > .container-fluid & {\n      margin-left: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: @navbar-padding-horizontal;\n  .navbar-vertical-align(34px);\n  background-color: transparent;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  border-radius: @border-radius-base;\n\n  // We remove the `outline` here, but later compensate by attaching `:hover`\n  // styles to `:focus`.\n  &:focus {\n    outline: 0;\n  }\n\n  // Bars\n  .icon-bar {\n    display: block;\n    width: 22px;\n    height: 2px;\n    border-radius: 1px;\n  }\n  .icon-bar + .icon-bar {\n    margin-top: 4px;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    display: none;\n  }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n  margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n  > li > a {\n    padding-top: 10px;\n    padding-bottom: 10px;\n    line-height: @line-height-computed;\n  }\n\n  @media (max-width: @grid-float-breakpoint-max) {\n    // Dropdowns get custom display when collapsed\n    .open .dropdown-menu {\n      position: static;\n      float: none;\n      width: auto;\n      margin-top: 0;\n      background-color: transparent;\n      border: 0;\n      box-shadow: none;\n      > li > a,\n      .dropdown-header {\n        padding: 5px 15px 5px 25px;\n      }\n      > li > a {\n        line-height: @line-height-computed;\n        &:hover,\n        &:focus {\n          background-image: none;\n        }\n      }\n    }\n  }\n\n  // Uncollapse the nav\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin: 0;\n\n    > li {\n      float: left;\n      > a {\n        padding-top: @navbar-padding-vertical;\n        padding-bottom: @navbar-padding-vertical;\n      }\n    }\n  }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n  padding: 10px @navbar-padding-horizontal;\n  margin-right: -@navbar-padding-horizontal;\n  margin-left: -@navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n\n  // Mixin behavior for optimum display\n  .form-inline();\n\n  .form-group {\n    @media (max-width: @grid-float-breakpoint-max) {\n      margin-bottom: 5px;\n\n      &:last-child {\n        margin-bottom: 0;\n      }\n    }\n  }\n\n  // Vertically center in expanded, horizontal navbar\n  .navbar-vertical-align(@input-height-base);\n\n  // Undo 100% width for pull classes\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    .box-shadow(none);\n  }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  .border-top-radius(@navbar-border-radius);\n  .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n  .navbar-vertical-align(@input-height-base);\n\n  &.btn-sm {\n    .navbar-vertical-align(@input-height-small);\n  }\n  &.btn-xs {\n    .navbar-vertical-align(22);\n  }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n  .navbar-vertical-align(@line-height-computed);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin-right: @navbar-padding-horizontal;\n    margin-left: @navbar-padding-horizontal;\n  }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n//\n// Declared after the navbar components to ensure more specificity on the margins.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-left  { .pull-left(); }\n  .navbar-right {\n    .pull-right();\n    margin-right: -@navbar-padding-horizontal;\n\n    ~ .navbar-right {\n      margin-right: 0;\n    }\n  }\n}\n\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  background-color: @navbar-default-bg;\n  border-color: @navbar-default-border;\n\n  .navbar-brand {\n    color: @navbar-default-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-brand-hover-color;\n      background-color: @navbar-default-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-default-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-default-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-hover-color;\n        background-color: @navbar-default-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n        background-color: @navbar-default-link-disabled-bg;\n      }\n    }\n\n    // Dropdown menu items\n    // Remove background color from open dropdown\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display when collapsed\n      .open .dropdown-menu {\n        > li > a {\n          color: @navbar-default-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-hover-color;\n            background-color: @navbar-default-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-active-color;\n            background-color: @navbar-default-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-disabled-color;\n            background-color: @navbar-default-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  .navbar-toggle {\n    border-color: @navbar-default-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-default-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-default-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: @navbar-default-border;\n  }\n\n\n  // Links in navbars\n  //\n  // Add a class to ensure links outside the navbar nav are colored correctly.\n\n  .navbar-link {\n    color: @navbar-default-link-color;\n    &:hover {\n      color: @navbar-default-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-default-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n      }\n    }\n  }\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n  background-color: @navbar-inverse-bg;\n  border-color: @navbar-inverse-border;\n\n  .navbar-brand {\n    color: @navbar-inverse-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-brand-hover-color;\n      background-color: @navbar-inverse-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-inverse-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-inverse-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-hover-color;\n        background-color: @navbar-inverse-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n        background-color: @navbar-inverse-link-disabled-bg;\n      }\n    }\n\n    // Dropdowns\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display\n      .open .dropdown-menu {\n        > .dropdown-header {\n          border-color: @navbar-inverse-border;\n        }\n        .divider {\n          background-color: @navbar-inverse-border;\n        }\n        > li > a {\n          color: @navbar-inverse-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-hover-color;\n            background-color: @navbar-inverse-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-active-color;\n            background-color: @navbar-inverse-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-disabled-color;\n            background-color: @navbar-inverse-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  // Darken the responsive nav toggle\n  .navbar-toggle {\n    border-color: @navbar-inverse-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-inverse-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-inverse-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: darken(@navbar-inverse-bg, 7%);\n  }\n\n  .navbar-link {\n    color: @navbar-inverse-link-color;\n    &:hover {\n      color: @navbar-inverse-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-inverse-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n      }\n    }\n  }\n}\n","// Navbar vertical align\n//\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n","// stylelint-disable declaration-no-important\n\n//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n  .clearfix();\n}\n.center-block {\n  .center-block();\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n  display: none !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n  position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n  padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n  margin-bottom: @line-height-computed;\n  list-style: none;\n  background-color: @breadcrumb-bg;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline-block;\n\n    + li:before {\n      padding: 0 5px;\n      color: @breadcrumb-color;\n      content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n    }\n  }\n\n  > .active {\n    color: @breadcrumb-active-color;\n  }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline; // Remove list-style and block-level defaults\n    > a,\n    > span {\n      position: relative;\n      float: left; // Collapse white-space\n      padding: @padding-base-vertical @padding-base-horizontal;\n      margin-left: -1px;\n      line-height: @line-height-base;\n      color: @pagination-color;\n      text-decoration: none;\n      background-color: @pagination-bg;\n      border: 1px solid @pagination-border;\n\n      &:hover,\n      &:focus {\n        z-index: 2;\n        color: @pagination-hover-color;\n        background-color: @pagination-hover-bg;\n        border-color: @pagination-hover-border;\n      }\n    }\n    &:first-child {\n      > a,\n      > span {\n        margin-left: 0;\n        .border-left-radius(@border-radius-base);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius-base);\n      }\n    }\n  }\n\n  > .active > a,\n  > .active > span {\n    &,\n    &:hover,\n    &:focus {\n      z-index: 3;\n      color: @pagination-active-color;\n      cursor: default;\n      background-color: @pagination-active-bg;\n      border-color: @pagination-active-border;\n    }\n  }\n\n  > .disabled {\n    > span,\n    > span:hover,\n    > span:focus,\n    > a,\n    > a:hover,\n    > a:focus {\n      color: @pagination-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pagination-disabled-bg;\n      border-color: @pagination-disabled-border;\n    }\n  }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n  .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n  .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n","// Pagination\n\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n      line-height: @line-height;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  text-align: center;\n  list-style: none;\n  &:extend(.clearfix all);\n  li {\n    display: inline;\n    > a,\n    > span {\n      display: inline-block;\n      padding: 5px 14px;\n      background-color: @pager-bg;\n      border: 1px solid @pager-border;\n      border-radius: @pager-border-radius;\n    }\n\n    > a:hover,\n    > a:focus {\n      text-decoration: none;\n      background-color: @pager-hover-bg;\n    }\n  }\n\n  .next {\n    > a,\n    > span {\n      float: right;\n    }\n  }\n\n  .previous {\n    > a,\n    > span {\n      float: left;\n    }\n  }\n\n  .disabled {\n    > a,\n    > a:hover,\n    > a:focus,\n    > span {\n      color: @pager-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pager-bg;\n    }\n  }\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: @label-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n\n  // Add hover effects, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @label-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Empty labels collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for labels in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n  .label-variant(@label-default-bg);\n}\n\n.label-primary {\n  .label-variant(@label-primary-bg);\n}\n\n.label-success {\n  .label-variant(@label-success-bg);\n}\n\n.label-info {\n  .label-variant(@label-info-bg);\n}\n\n.label-warning {\n  .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n  .label-variant(@label-danger-bg);\n}\n","// Labels\n\n.label-variant(@color) {\n  background-color: @color;\n\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base class\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: @font-size-small;\n  font-weight: @badge-font-weight;\n  line-height: @badge-line-height;\n  color: @badge-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: @badge-bg;\n  border-radius: @badge-border-radius;\n\n  // Empty badges collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for badges in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n\n  .btn-xs &,\n  .btn-group-xs > .btn & {\n    top: 0;\n    padding: 1px 5px;\n  }\n\n  // Hover state, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @badge-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Account for badges in navs\n  .list-group-item.active > &,\n  .nav-pills > .active > a > & {\n    color: @badge-active-color;\n    background-color: @badge-active-bg;\n  }\n\n  .list-group-item > & {\n    float: right;\n  }\n\n  .list-group-item > & + & {\n    margin-right: 5px;\n  }\n\n  .nav-pills > li > a > & {\n    margin-left: 3px;\n  }\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n  padding-top: @jumbotron-padding;\n  padding-bottom: @jumbotron-padding;\n  margin-bottom: @jumbotron-padding;\n  color: @jumbotron-color;\n  background-color: @jumbotron-bg;\n\n  h1,\n  .h1 {\n    color: @jumbotron-heading-color;\n  }\n\n  p {\n    margin-bottom: (@jumbotron-padding / 2);\n    font-size: @jumbotron-font-size;\n    font-weight: 200;\n  }\n\n  > hr {\n    border-top-color: darken(@jumbotron-bg, 10%);\n  }\n\n  .container &,\n  .container-fluid & {\n    padding-right: (@grid-gutter-width / 2);\n    padding-left: (@grid-gutter-width / 2);\n    border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n  }\n\n  .container {\n    max-width: 100%;\n  }\n\n  @media screen and (min-width: @screen-sm-min) {\n    padding-top: (@jumbotron-padding * 1.6);\n    padding-bottom: (@jumbotron-padding * 1.6);\n\n    .container &,\n    .container-fluid & {\n      padding-right: (@jumbotron-padding * 2);\n      padding-left: (@jumbotron-padding * 2);\n    }\n\n    h1,\n    .h1 {\n      font-size: @jumbotron-heading-font-size;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n  display: block;\n  padding: @thumbnail-padding;\n  margin-bottom: @line-height-computed;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(border .2s ease-in-out);\n\n  > img,\n  a > img {\n    &:extend(.img-responsive);\n    margin-right: auto;\n    margin-left: auto;\n  }\n\n  // Add a hover state for linked versions only\n  a&:hover,\n  a&:focus,\n  a&.active {\n    border-color: @link-color;\n  }\n\n  // Image captions\n  .caption {\n    padding: @thumbnail-caption-padding;\n    color: @thumbnail-caption-color;\n  }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n  padding: @alert-padding;\n  margin-bottom: @line-height-computed;\n  border: 1px solid transparent;\n  border-radius: @alert-border-radius;\n\n  // Headings for larger alerts\n  h4 {\n    margin-top: 0;\n    color: inherit; // Specified for the h4 to prevent conflicts of changing @headings-color\n  }\n\n  // Provide class for links that match alerts\n  .alert-link {\n    font-weight: @alert-link-font-weight;\n  }\n\n  // Improve alignment and spacing of inner content\n  > p,\n  > ul {\n    margin-bottom: 0;\n  }\n\n  > p + p {\n    margin-top: 5px;\n  }\n}\n\n// Dismissible alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n// The misspelled .alert-dismissable was deprecated in 3.2.0.\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: (@alert-padding + 20);\n\n  // Adjust close link position\n  .close {\n    position: relative;\n    top: -2px;\n    right: -21px;\n    color: inherit;\n  }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n  .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n\n.alert-info {\n  .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n\n.alert-warning {\n  .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n\n.alert-danger {\n  .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","// Alerts\n\n.alert-variant(@background; @border; @text-color) {\n  color: @text-color;\n  background-color: @background;\n  border-color: @border;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n","// stylelint-disable at-rule-no-vendor-prefix\n\n//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n  height: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  overflow: hidden;\n  background-color: @progress-bg;\n  border-radius: @progress-border-radius;\n  .box-shadow(inset 0 1px 2px rgba(0, 0, 0, .1));\n}\n\n// Bar of progress\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: @font-size-small;\n  line-height: @line-height-computed;\n  color: @progress-bar-color;\n  text-align: center;\n  background-color: @progress-bar-bg;\n  .box-shadow(inset 0 -1px 0 rgba(0, 0, 0, .15));\n  .transition(width .6s ease);\n}\n\n// Striped bars\n//\n// `.progress-striped .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar-striped` class, which you just add to an existing\n// `.progress-bar`.\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  #gradient > .striped();\n  background-size: 40px 40px;\n}\n\n// Call animation for the active one\n//\n// `.progress.active .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar.active` approach.\n.progress.active .progress-bar,\n.progress-bar.active {\n  .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n  .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n  .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n  .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n  .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Progress bars\n\n.progress-bar-variant(@color) {\n  background-color: @color;\n\n  // Deprecated parent class requirement as of v3.2.0\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n",".media {\n  // Proper spacing between instances of .media\n  margin-top: 15px;\n\n  &:first-child {\n    margin-top: 0;\n  }\n}\n\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n\n.media-body {\n  width: 10000px;\n}\n\n.media-object {\n  display: block;\n\n  // Fix collapse in webkit from max-width: 100% and display: table-cell.\n  &.img-thumbnail {\n    max-width: none;\n  }\n}\n\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n\n.media-middle {\n  vertical-align: middle;\n}\n\n.media-bottom {\n  vertical-align: bottom;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n\n// Media list variation\n//\n// Undo default ul/ol styles\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on <ul>, <ol>, or <div>.\n\n.list-group {\n  // No need to set list-style: none; since .list-group-item is block level\n  padding-left: 0; // reset padding because ul and ol\n  margin-bottom: 20px;\n}\n\n\n// Individual list items\n//\n// Use on `li`s or `div`s within the `.list-group` parent.\n\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  // Place the border on the list items and negative margin up for better styling\n  margin-bottom: -1px;\n  background-color: @list-group-bg;\n  border: 1px solid @list-group-border;\n\n  // Round the first and last items\n  &:first-child {\n    .border-top-radius(@list-group-border-radius);\n  }\n  &:last-child {\n    margin-bottom: 0;\n    .border-bottom-radius(@list-group-border-radius);\n  }\n\n  // Disabled state\n  &.disabled,\n  &.disabled:hover,\n  &.disabled:focus {\n    color: @list-group-disabled-color;\n    cursor: @cursor-disabled;\n    background-color: @list-group-disabled-bg;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-disabled-text-color;\n    }\n  }\n\n  // Active class on item itself, not parent\n  &.active,\n  &.active:hover,\n  &.active:focus {\n    z-index: 2; // Place active items above their siblings for proper border styling\n    color: @list-group-active-color;\n    background-color: @list-group-active-bg;\n    border-color: @list-group-active-border;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading,\n    .list-group-item-heading > small,\n    .list-group-item-heading > .small {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-active-text-color;\n    }\n  }\n}\n\n\n// Interactive list items\n//\n// Use anchor or button elements instead of `li`s or `div`s to create interactive items.\n// Includes an extra `.active` modifier class for showing selected items.\n\na.list-group-item,\nbutton.list-group-item {\n  color: @list-group-link-color;\n\n  .list-group-item-heading {\n    color: @list-group-link-heading-color;\n  }\n\n  // Hover state\n  &:hover,\n  &:focus {\n    color: @list-group-link-hover-color;\n    text-decoration: none;\n    background-color: @list-group-hover-bg;\n  }\n}\n\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n\n\n// Contextual variants\n//\n// Add modifier classes to change text and background color on individual items.\n// Organizationally, this must come after the `:hover` states.\n\n.list-group-item-variant(success; @state-success-bg; @state-success-text);\n.list-group-item-variant(info; @state-info-bg; @state-info-text);\n.list-group-item-variant(warning; @state-warning-bg; @state-warning-text);\n.list-group-item-variant(danger; @state-danger-bg; @state-danger-text);\n\n\n// Custom content options\n//\n// Extra classes for creating well-formatted content within `.list-group-item`s.\n\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n","// List Groups\n\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a&,\n    button& {\n      color: @color;\n\n      .list-group-item-heading {\n        color: inherit;\n      }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, no-duplicate-selectors\n\n//\n// Panels\n// --------------------------------------------------\n\n\n// Base class\n.panel {\n  margin-bottom: @line-height-computed;\n  background-color: @panel-bg;\n  border: 1px solid transparent;\n  border-radius: @panel-border-radius;\n  .box-shadow(0 1px 1px rgba(0, 0, 0, .05));\n}\n\n// Panel contents\n.panel-body {\n  padding: @panel-body-padding;\n  &:extend(.clearfix all);\n}\n\n// Optional heading\n.panel-heading {\n  padding: @panel-heading-padding;\n  border-bottom: 1px solid transparent;\n  .border-top-radius((@panel-border-radius - 1));\n\n  > .dropdown .dropdown-toggle {\n    color: inherit;\n  }\n}\n\n// Within heading, strip any `h*` tag of its default margins for spacing.\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: ceil((@font-size-base * 1.125));\n  color: inherit;\n\n  > a,\n  > small,\n  > .small,\n  > small > a,\n  > .small > a {\n    color: inherit;\n  }\n}\n\n// Optional footer (stays gray in every modifier class)\n.panel-footer {\n  padding: @panel-footer-padding;\n  background-color: @panel-footer-bg;\n  border-top: 1px solid @panel-inner-border;\n  .border-bottom-radius((@panel-border-radius - 1));\n}\n\n\n// List groups in panels\n//\n// By default, space out list group content from panel headings to account for\n// any kind of custom content between the two.\n\n.panel {\n  > .list-group,\n  > .panel-collapse > .list-group {\n    margin-bottom: 0;\n\n    .list-group-item {\n      border-width: 1px 0;\n      border-radius: 0;\n    }\n\n    // Add border top radius for first one\n    &:first-child {\n      .list-group-item:first-child {\n        border-top: 0;\n        .border-top-radius((@panel-border-radius - 1));\n      }\n    }\n\n    // Add border bottom radius for last one\n    &:last-child {\n      .list-group-item:last-child {\n        border-bottom: 0;\n        .border-bottom-radius((@panel-border-radius - 1));\n      }\n    }\n  }\n  > .panel-heading + .panel-collapse > .list-group {\n    .list-group-item:first-child {\n      .border-top-radius(0);\n    }\n  }\n}\n// Collapse space between when there's no additional content.\n.panel-heading + .list-group {\n  .list-group-item:first-child {\n    border-top-width: 0;\n  }\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n\n// Tables in panels\n//\n// Place a non-bordered `.table` within a panel (not within a `.panel-body`) and\n// watch it go full width.\n\n.panel {\n  > .table,\n  > .table-responsive > .table,\n  > .panel-collapse > .table {\n    margin-bottom: 0;\n\n    caption {\n      padding-right: @panel-body-padding;\n      padding-left: @panel-body-padding;\n    }\n  }\n  // Add border top radius for first one\n  > .table:first-child,\n  > .table-responsive:first-child > .table:first-child {\n    .border-top-radius((@panel-border-radius - 1));\n\n    > thead:first-child,\n    > tbody:first-child {\n      > tr:first-child {\n        border-top-left-radius: (@panel-border-radius - 1);\n        border-top-right-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-top-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-top-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  // Add border bottom radius for last one\n  > .table:last-child,\n  > .table-responsive:last-child > .table:last-child {\n    .border-bottom-radius((@panel-border-radius - 1));\n\n    > tbody:last-child,\n    > tfoot:last-child {\n      > tr:last-child {\n        border-bottom-right-radius: (@panel-border-radius - 1);\n        border-bottom-left-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-bottom-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-bottom-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  > .panel-body + .table,\n  > .panel-body + .table-responsive,\n  > .table + .panel-body,\n  > .table-responsive + .panel-body {\n    border-top: 1px solid @table-border-color;\n  }\n  > .table > tbody:first-child > tr:first-child th,\n  > .table > tbody:first-child > tr:first-child td {\n    border-top: 0;\n  }\n  > .table-bordered,\n  > .table-responsive > .table-bordered {\n    border: 0;\n    > thead,\n    > tbody,\n    > tfoot {\n      > tr {\n        > th:first-child,\n        > td:first-child {\n          border-left: 0;\n        }\n        > th:last-child,\n        > td:last-child {\n          border-right: 0;\n        }\n      }\n    }\n    > thead,\n    > tbody {\n      > tr:first-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n    > tbody,\n    > tfoot {\n      > tr:last-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n  }\n  > .table-responsive {\n    margin-bottom: 0;\n    border: 0;\n  }\n}\n\n\n// Collapsible panels (aka, accordion)\n//\n// Wrap a series of panels in `.panel-group` to turn them into an accordion with\n// the help of our collapse JavaScript plugin.\n\n.panel-group {\n  margin-bottom: @line-height-computed;\n\n  // Tighten up margin so it's only between panels\n  .panel {\n    margin-bottom: 0;\n    border-radius: @panel-border-radius;\n\n    + .panel {\n      margin-top: 5px;\n    }\n  }\n\n  .panel-heading {\n    border-bottom: 0;\n\n    + .panel-collapse > .panel-body,\n    + .panel-collapse > .list-group {\n      border-top: 1px solid @panel-inner-border;\n    }\n  }\n\n  .panel-footer {\n    border-top: 0;\n    + .panel-collapse .panel-body {\n      border-bottom: 1px solid @panel-inner-border;\n    }\n  }\n}\n\n\n// Contextual variations\n.panel-default {\n  .panel-variant(@panel-default-border; @panel-default-text; @panel-default-heading-bg; @panel-default-border);\n}\n.panel-primary {\n  .panel-variant(@panel-primary-border; @panel-primary-text; @panel-primary-heading-bg; @panel-primary-border);\n}\n.panel-success {\n  .panel-variant(@panel-success-border; @panel-success-text; @panel-success-heading-bg; @panel-success-border);\n}\n.panel-info {\n  .panel-variant(@panel-info-border; @panel-info-text; @panel-info-heading-bg; @panel-info-border);\n}\n.panel-warning {\n  .panel-variant(@panel-warning-border; @panel-warning-text; @panel-warning-heading-bg; @panel-warning-border);\n}\n.panel-danger {\n  .panel-variant(@panel-danger-border; @panel-danger-text; @panel-danger-heading-bg; @panel-danger-border);\n}\n","// Panels\n\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse > .panel-body {\n      border-top-color: @border;\n    }\n    .badge {\n      color: @heading-bg-color;\n      background-color: @heading-text-color;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse > .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n","// Embeds responsive\n//\n// Credit: Nicolas Gallagher and SUIT CSS.\n\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n\n  .embed-responsive-item,\n  iframe,\n  embed,\n  object,\n  video {\n    position: absolute;\n    top: 0;\n    bottom: 0;\n    left: 0;\n    width: 100%;\n    height: 100%;\n    border: 0;\n  }\n}\n\n// Modifier class for 16:9 aspect ratio\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n\n// Modifier class for 4:3 aspect ratio\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n","//\n// Wells\n// --------------------------------------------------\n\n\n// Base class\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: @well-bg;\n  border: 1px solid @well-border;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .05));\n  blockquote {\n    border-color: #ddd;\n    border-color: rgba(0, 0, 0, .15);\n  }\n}\n\n// Sizes\n.well-lg {\n  padding: 24px;\n  border-radius: @border-radius-large;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: @border-radius-small;\n}\n","// stylelint-disable property-no-vendor-prefix\n\n//\n// Close icons\n// --------------------------------------------------\n\n\n.close {\n  float: right;\n  font-size: (@font-size-base * 1.5);\n  font-weight: @close-font-weight;\n  line-height: 1;\n  color: @close-color;\n  text-shadow: @close-text-shadow;\n  .opacity(.2);\n\n  &:hover,\n  &:focus {\n    color: @close-color;\n    text-decoration: none;\n    cursor: pointer;\n    .opacity(.5);\n  }\n\n  // Additional properties for button version\n  // iOS requires the button element instead of an anchor tag.\n  // If you want the anchor version, it requires `href=\"#\"`.\n  // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n  button& {\n    padding: 0;\n    cursor: pointer;\n    background: transparent;\n    border: 0;\n    -webkit-appearance: none;\n    appearance: none;\n  }\n}\n","//\n// Modals\n// --------------------------------------------------\n\n// .modal-open      - body class for killing the scroll\n// .modal           - container to scroll within\n// .modal-dialog    - positioning shell for the actual modal\n// .modal-content   - actual modal w/ bg and corners and shit\n\n// Kill the scroll on the body\n.modal-open {\n  overflow: hidden;\n}\n\n// Container that the modal scrolls within\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n\n  // Prevent Chrome on Windows from adding a focus outline. For details, see\n  // https://github.com/twbs/bootstrap/pull/10951.\n  outline: 0;\n\n  // When fading in the modal, animate it to slide down\n  &.fade .modal-dialog {\n    .translate(0, -25%);\n    .transition-transform(~\"0.3s ease-out\");\n  }\n  &.in .modal-dialog { .translate(0, 0); }\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n\n// Shell div to position the modal with bottom padding\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n\n// Actual modal\n.modal-content {\n  position: relative;\n  background-color: @modal-content-bg;\n  background-clip: padding-box;\n  border: 1px solid @modal-content-fallback-border-color; //old browsers fallback (ie8 etc)\n  border: 1px solid @modal-content-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 3px 9px rgba(0, 0, 0, .5));\n  // Remove focus outline from opened modal\n  outline: 0;\n}\n\n// Modal background\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal-background;\n  background-color: @modal-backdrop-bg;\n  // Fade for backdrop\n  &.fade { .opacity(0); }\n  &.in { .opacity(@modal-backdrop-opacity); }\n}\n\n// Modal header\n// Top section of the modal w/ title and dismiss\n.modal-header {\n  padding: @modal-title-padding;\n  border-bottom: 1px solid @modal-header-border-color;\n  &:extend(.clearfix all);\n}\n// Close icon\n.modal-header .close {\n  margin-top: -2px;\n}\n\n// Title text within header\n.modal-title {\n  margin: 0;\n  line-height: @modal-title-line-height;\n}\n\n// Modal body\n// Where all modal content resides (sibling of .modal-header and .modal-footer)\n.modal-body {\n  position: relative;\n  padding: @modal-inner-padding;\n}\n\n// Footer (for actions)\n.modal-footer {\n  padding: @modal-inner-padding;\n  text-align: right; // right align buttons\n  border-top: 1px solid @modal-footer-border-color;\n  &:extend(.clearfix all); // clear it in case folks use .pull-* classes on buttons\n\n  // Properly space out buttons\n  .btn + .btn {\n    margin-bottom: 0; // account for input[type=\"submit\"] which gets the bottom margin like all other inputs\n    margin-left: 5px;\n  }\n  // but override that for button groups\n  .btn-group .btn + .btn {\n    margin-left: -1px;\n  }\n  // and override it for block buttons as well\n  .btn-block + .btn-block {\n    margin-left: 0;\n  }\n}\n\n// Measure scrollbar width for padding body during modal show/hide\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n\n// Scale up the modal\n@media (min-width: @screen-sm-min) {\n  // Automatically set modal's width for larger viewports\n  .modal-dialog {\n    width: @modal-md;\n    margin: 30px auto;\n  }\n  .modal-content {\n    .box-shadow(0 5px 15px rgba(0, 0, 0, .5));\n  }\n\n  // Modal sizes\n  .modal-sm { width: @modal-sm; }\n}\n\n@media (min-width: @screen-md-min) {\n  .modal-lg { width: @modal-lg; }\n}\n","//\n// Tooltips\n// --------------------------------------------------\n\n\n// Base class\n.tooltip {\n  position: absolute;\n  z-index: @zindex-tooltip;\n  display: block;\n  // Our parent element can be arbitrary since tooltips are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-small;\n\n  .opacity(0);\n\n  &.in { .opacity(@tooltip-opacity); }\n  &.top {\n    padding: @tooltip-arrow-width 0;\n    margin-top: -3px;\n  }\n  &.right {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: 3px;\n  }\n  &.bottom {\n    padding: @tooltip-arrow-width 0;\n    margin-top: 3px;\n  }\n  &.left {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: -3px;\n  }\n\n  // Note: Deprecated .top-left, .top-right, .bottom-left, and .bottom-right as of v3.3.1\n  &.top .tooltip-arrow {\n    bottom: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-left .tooltip-arrow {\n    right: @tooltip-arrow-width;\n    bottom: 0;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-right .tooltip-arrow {\n    bottom: 0;\n    left: @tooltip-arrow-width;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.right .tooltip-arrow {\n    top: 50%;\n    left: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-right-color: @tooltip-arrow-color;\n  }\n  &.left .tooltip-arrow {\n    top: 50%;\n    right: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-left-color: @tooltip-arrow-color;\n  }\n  &.bottom .tooltip-arrow {\n    top: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-left .tooltip-arrow {\n    top: 0;\n    right: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-right .tooltip-arrow {\n    top: 0;\n    left: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n}\n\n// Wrapper for the tooltip content\n.tooltip-inner {\n  max-width: @tooltip-max-width;\n  padding: 3px 8px;\n  color: @tooltip-color;\n  text-align: center;\n  background-color: @tooltip-bg;\n  border-radius: @border-radius-base;\n}\n\n// Arrows\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n",".reset-text() {\n  font-family: @font-family-base;\n  // We deliberately do NOT reset font-size.\n  font-style: normal;\n  font-weight: 400;\n  line-height: @line-height-base;\n  line-break: auto;\n  text-align: left; // Fallback for where `start` is not supported\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n}\n","//\n// Popovers\n// --------------------------------------------------\n\n\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: @zindex-popover;\n  display: none;\n  max-width: @popover-max-width;\n  padding: 1px;\n  // Our parent element can be arbitrary since popovers are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-base;\n  background-color: @popover-bg;\n  background-clip: padding-box;\n  border: 1px solid @popover-fallback-border-color;\n  border: 1px solid @popover-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 5px 10px rgba(0, 0, 0, .2));\n\n  // Offset the popover to account for the popover arrow\n  &.top { margin-top: -@popover-arrow-width; }\n  &.right { margin-left: @popover-arrow-width; }\n  &.bottom { margin-top: @popover-arrow-width; }\n  &.left { margin-left: -@popover-arrow-width; }\n\n  // Arrows\n  // .arrow is outer, .arrow:after is inner\n  > .arrow {\n    border-width: @popover-arrow-outer-width;\n\n    &,\n    &:after {\n      position: absolute;\n      display: block;\n      width: 0;\n      height: 0;\n      border-color: transparent;\n      border-style: solid;\n    }\n\n    &:after {\n      content: \"\";\n      border-width: @popover-arrow-width;\n    }\n  }\n\n  &.top > .arrow {\n    bottom: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-top-color: @popover-arrow-outer-color;\n    border-bottom-width: 0;\n    &:after {\n      bottom: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-color: @popover-arrow-color;\n      border-bottom-width: 0;\n    }\n  }\n  &.right > .arrow {\n    top: 50%;\n    left: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-right-color: @popover-arrow-outer-color;\n    border-left-width: 0;\n    &:after {\n      bottom: -@popover-arrow-width;\n      left: 1px;\n      content: \" \";\n      border-right-color: @popover-arrow-color;\n      border-left-width: 0;\n    }\n  }\n  &.bottom > .arrow {\n    top: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-width: 0;\n    border-bottom-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-bottom-color: @popover-arrow-outer-color;\n    &:after {\n      top: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-width: 0;\n      border-bottom-color: @popover-arrow-color;\n    }\n  }\n\n  &.left > .arrow {\n    top: 50%;\n    right: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-width: 0;\n    border-left-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-left-color: @popover-arrow-outer-color;\n    &:after {\n      right: 1px;\n      bottom: -@popover-arrow-width;\n      content: \" \";\n      border-right-width: 0;\n      border-left-color: @popover-arrow-color;\n    }\n  }\n}\n\n.popover-title {\n  padding: 8px 14px;\n  margin: 0; // reset heading margin\n  font-size: @font-size-base;\n  background-color: @popover-title-bg;\n  border-bottom: 1px solid darken(@popover-title-bg, 5%);\n  border-radius: (@border-radius-large - 1) (@border-radius-large - 1) 0 0;\n}\n\n.popover-content {\n  padding: 9px 14px;\n}\n","// stylelint-disable media-feature-name-no-unknown\n\n//\n// Carousel\n// --------------------------------------------------\n\n\n// Wrapper for the slide container and indicators\n.carousel {\n  position: relative;\n}\n\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n\n  > .item {\n    position: relative;\n    display: none;\n    .transition(.6s ease-in-out left);\n\n    // Account for jankitude on images\n    > img,\n    > a > img {\n      &:extend(.img-responsive);\n      line-height: 1;\n    }\n\n    // WebKit CSS3 transforms for supported devices\n    @media all and (transform-3d), (-webkit-transform-3d) {\n      .transition-transform(~\"0.6s ease-in-out\");\n      .backface-visibility(~\"hidden\");\n      .perspective(1000px);\n\n      &.next,\n      &.active.right {\n        .translate3d(100%, 0, 0);\n        left: 0;\n      }\n      &.prev,\n      &.active.left {\n        .translate3d(-100%, 0, 0);\n        left: 0;\n      }\n      &.next.left,\n      &.prev.right,\n      &.active {\n        .translate3d(0, 0, 0);\n        left: 0;\n      }\n    }\n  }\n\n  > .active,\n  > .next,\n  > .prev {\n    display: block;\n  }\n\n  > .active {\n    left: 0;\n  }\n\n  > .next,\n  > .prev {\n    position: absolute;\n    top: 0;\n    width: 100%;\n  }\n\n  > .next {\n    left: 100%;\n  }\n  > .prev {\n    left: -100%;\n  }\n  > .next.left,\n  > .prev.right {\n    left: 0;\n  }\n\n  > .active.left {\n    left: -100%;\n  }\n  > .active.right {\n    left: 100%;\n  }\n\n}\n\n// Left/right controls for nav\n// ---------------------------\n\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: @carousel-control-width;\n  font-size: @carousel-control-font-size;\n  color: @carousel-control-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  background-color: rgba(0, 0, 0, 0); // Fix IE9 click-thru bug\n  .opacity(@carousel-control-opacity);\n  // We can't have this transition here because WebKit cancels the carousel\n  // animation if you trip this while in the middle of another animation.\n\n  // Set gradients for backgrounds\n  &.left {\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .5); @end-color: rgba(0, 0, 0, .0001));\n  }\n  &.right {\n    right: 0;\n    left: auto;\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .0001); @end-color: rgba(0, 0, 0, .5));\n  }\n\n  // Hover/focus state\n  &:hover,\n  &:focus {\n    color: @carousel-control-color;\n    text-decoration: none;\n    outline: 0;\n    .opacity(.9);\n  }\n\n  // Toggles\n  .icon-prev,\n  .icon-next,\n  .glyphicon-chevron-left,\n  .glyphicon-chevron-right {\n    position: absolute;\n    top: 50%;\n    z-index: 5;\n    display: inline-block;\n    margin-top: -10px;\n  }\n  .icon-prev,\n  .glyphicon-chevron-left {\n    left: 50%;\n    margin-left: -10px;\n  }\n  .icon-next,\n  .glyphicon-chevron-right {\n    right: 50%;\n    margin-right: -10px;\n  }\n  .icon-prev,\n  .icon-next {\n    width: 20px;\n    height: 20px;\n    font-family: serif;\n    line-height: 1;\n  }\n\n  .icon-prev {\n    &:before {\n      content: \"\\2039\";// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)\n    }\n  }\n  .icon-next {\n    &:before {\n      content: \"\\203a\";// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)\n    }\n  }\n}\n\n// Optional indicator pips\n//\n// Add an unordered list with the following class and add a list item for each\n// slide your carousel holds.\n\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n\n  li {\n    display: inline-block;\n    width: 10px;\n    height: 10px;\n    margin: 1px;\n    text-indent: -999px;\n    cursor: pointer;\n    // IE8-9 hack for event handling\n    //\n    // Internet Explorer 8-9 does not support clicks on elements without a set\n    // `background-color`. We cannot use `filter` since that's not viewed as a\n    // background color by the browser. Thus, a hack is needed.\n    // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Internet_Explorer\n    //\n    // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we\n    // set alpha transparency for the best results possible.\n    background-color: #000 \\9; // IE8\n    background-color: rgba(0, 0, 0, 0); // IE9\n\n    border: 1px solid @carousel-indicator-border-color;\n    border-radius: 10px;\n  }\n\n  .active {\n    width: 12px;\n    height: 12px;\n    margin: 0;\n    background-color: @carousel-indicator-active-bg;\n  }\n}\n\n// Optional captions\n// -----------------------------\n// Hidden by default for smaller viewports\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: @carousel-caption-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n\n  & .btn {\n    text-shadow: none; // No shadow for button elements in carousel-caption\n  }\n}\n\n\n// Scale up controls for tablets and up\n@media screen and (min-width: @screen-sm-min) {\n\n  // Scale up the controls a smidge\n  .carousel-control {\n    .glyphicon-chevron-left,\n    .glyphicon-chevron-right,\n    .icon-prev,\n    .icon-next {\n      width: (@carousel-control-font-size * 1.5);\n      height: (@carousel-control-font-size * 1.5);\n      margin-top: (@carousel-control-font-size / -2);\n      font-size: (@carousel-control-font-size * 1.5);\n    }\n    .glyphicon-chevron-left,\n    .icon-prev {\n      margin-left: (@carousel-control-font-size / -2);\n    }\n    .glyphicon-chevron-right,\n    .icon-next {\n      margin-right: (@carousel-control-font-size / -2);\n    }\n  }\n\n  // Show and left align the captions\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n\n  // Move up the indicators\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n","// Clearfix\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n//\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n\n.clearfix() {\n  &:before,\n  &:after {\n    display: table; // 2\n    content: \" \"; // 1\n  }\n  &:after {\n    clear: both;\n  }\n}\n","// Center-align a block level element\n\n.center-block() {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n","// stylelint-disable font-family-name-quotes, font-family-no-missing-generic-family-keyword\n\n// CSS image replacement\n//\n// Heads up! v3 launched with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (has been removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n","// stylelint-disable declaration-no-important, at-rule-no-vendor-prefix\n\n//\n// Responsive: Utility classes\n// --------------------------------------------------\n\n\n// IE10 in Windows (Phone) 8\n//\n// Support for responsive views via media queries is kind of borked in IE10, for\n// Surface/desktop in split view and for Windows Phone 8. This particular fix\n// must be accompanied by a snippet of JavaScript to sniff the user agent and\n// apply some conditional CSS to *only* the Surface/desktop Windows 8. Look at\n// our Getting Started page for more information on this bug.\n//\n// For more information, see the following:\n//\n// Issue: https://github.com/twbs/bootstrap/issues/10497\n// Docs: https://getbootstrap.com/docs/3.4/getting-started/#support-ie10-width\n// Source: https://timkadlec.com/2013/01/windows-phone-8-and-device-width/\n// Source: https://timkadlec.com/2012/10/ie10-snap-mode-and-responsive-design/\n\n@-ms-viewport {\n  width: device-width;\n}\n\n\n// Visibility utilities\n// Note: Deprecated .visible-xs, .visible-sm, .visible-md, and .visible-lg as of v3.2.0\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  .responsive-invisibility();\n}\n\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n\n.visible-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-visibility();\n  }\n}\n.visible-xs-block {\n  @media (max-width: @screen-xs-max) {\n    display: block !important;\n  }\n}\n.visible-xs-inline {\n  @media (max-width: @screen-xs-max) {\n    display: inline !important;\n  }\n}\n.visible-xs-inline-block {\n  @media (max-width: @screen-xs-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-visibility();\n  }\n}\n.visible-sm-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: block !important;\n  }\n}\n.visible-sm-inline {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline !important;\n  }\n}\n.visible-sm-inline-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-visibility();\n  }\n}\n.visible-md-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: block !important;\n  }\n}\n.visible-md-inline {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline !important;\n  }\n}\n.visible-md-inline-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-visibility();\n  }\n}\n.visible-lg-block {\n  @media (min-width: @screen-lg-min) {\n    display: block !important;\n  }\n}\n.visible-lg-inline {\n  @media (min-width: @screen-lg-min) {\n    display: inline !important;\n  }\n}\n.visible-lg-inline-block {\n  @media (min-width: @screen-lg-min) {\n    display: inline-block !important;\n  }\n}\n\n.hidden-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-invisibility();\n  }\n}\n\n\n// Print utilities\n//\n// Media queries are placed on the inside to be mixin-friendly.\n\n// Note: Deprecated .visible-print as of v3.2.0\n.visible-print {\n  .responsive-invisibility();\n\n  @media print {\n    .responsive-visibility();\n  }\n}\n.visible-print-block {\n  display: none !important;\n\n  @media print {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n\n  @media print {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n\n  @media print {\n    display: inline-block !important;\n  }\n}\n\n.hidden-print {\n  @media print {\n    .responsive-invisibility();\n  }\n}\n","// stylelint-disable declaration-no-important\n\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table !important; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n  display: none !important;\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css
deleted file mode 100644
index 5b96335..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;-moz-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:"Glyphicons Halflings";src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format("embedded-opentype"),url(../fonts/glyphicons-halflings-regular.woff2) format("woff2"),url(../fonts/glyphicons-halflings-regular.woff) format("woff"),url(../fonts/glyphicons-halflings-regular.ttf) format("truetype"),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format("svg")}.glyphicon{position:relative;top:1px;display:inline-block;font-family:"Glyphicons Halflings";font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none;margin-left:-5px}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:"\2014 \00A0"}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:""}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:"\00A0 \2014"}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.row-no-gutters{margin-right:0;margin-left:0}.row-no-gutters [class*=col-]{padding-right:0;padding-left:0}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-appearance:none;-moz-appearance:none;appearance:none}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s,-webkit-box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=datetime-local].form-control,input[type=month].form-control,input[type=time].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],.input-group-sm input[type=time],input[type=date].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm,input[type=time].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],.input-group-lg input[type=time],input[type=date].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg,input[type=time].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;margin-bottom:0;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;background-image:none;border:1px solid transparent;padding:6px 12px;font-size:14px;line-height:1.42857143;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);opacity:.65;-webkit-box-shadow:none;box-shadow:none}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;background-image:none;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;background-image:none;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;background-image:none;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;background-image:none;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;background-image:none;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;background-image:none;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-right:15px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-right:-15px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);margin-top:8px;margin-bottom:8px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0%;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out;transition:transform .3s ease-out,-webkit-transform .3s ease-out,-o-transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5);outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:12px;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:14px;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover>.arrow{border-width:11px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:-webkit-transform .6s ease-in-out;transition:transform .6s ease-in-out;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out,-o-transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);left:0}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);left:0}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);left:0}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;outline:0;filter:alpha(opacity=90);opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:"\2039"}.carousel-control .icon-next:before{content:"\203a"}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}
-/*# sourceMappingURL=bootstrap.min.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map
deleted file mode 100644
index 0ae3de5..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["bootstrap.css","less/normalize.less","dist/css/bootstrap.css","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"AAAA;;;;AAKA,4ECKA,KACE,YAAA,WACA,qBAAA,KACA,yBAAA,KAOF,KACE,OAAA,EAaF,QCnBA,MACA,QACA,WACA,OACA,OACA,OACA,OACA,KACA,KACA,IACA,QACA,QDqBE,QAAA,MAQF,MCzBA,OACA,SACA,MD2BE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SCrCA,SDuCE,QAAA,KAUF,EACE,iBAAA,YAQF,SCnDA,QDqDE,QAAA,EAWF,YACE,cAAA,KACA,gBAAA,UACA,wBAAA,UAAA,OAAA,qBAAA,UAAA,OAAA,gBAAA,UAAA,OAOF,EC/DA,ODiEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,UAAA,IACA,OAAA,MAAA,EAOF,KACE,WAAA,KACA,MAAA,KAOF,MACE,UAAA,IAOF,ICzFA,ID2FE,UAAA,IACA,YAAA,EACA,SAAA,SACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,mBAAA,YAAA,gBAAA,YAAA,WAAA,YACA,OAAA,EAOF,IACE,SAAA,KAOF,KC7HA,IACA,IACA,KD+HE,YAAA,SAAA,CAAA,UACA,UAAA,IAkBF,OC7IA,MACA,SACA,OACA,SD+IE,MAAA,QACA,KAAA,QACA,OAAA,EAOF,OACE,SAAA,QAUF,OC1JA,OD4JE,eAAA,KAWF,OCnKA,wBACA,kBACA,mBDqKE,mBAAA,OACA,OAAA,QAOF,iBCxKA,qBD0KE,OAAA,QAOF,yBC7KA,wBD+KE,OAAA,EACA,QAAA,EAQF,MACE,YAAA,OAWF,qBC5LA,kBD8LE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CCjMA,8CDmME,OAAA,KAQF,mBACE,mBAAA,UACA,mBAAA,YAAA,gBAAA,YAAA,WAAA,YASF,iDC5MA,8CD8ME,mBAAA,KAOF,SACE,OAAA,IAAA,MAAA,OACA,OAAA,EAAA,IACA,QAAA,MAAA,OAAA,MAQF,OACE,OAAA,EACA,QAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,gBAAA,SACA,eAAA,EAGF,GC3OA,GD6OE,QAAA,EDlPF,qFGhLA,aACE,ED2LA,OADA,QCvLE,MAAA,eACA,YAAA,eACA,WAAA,cACA,mBAAA,eAAA,WAAA,eAGF,ED0LA,UCxLE,gBAAA,UAGF,cACE,QAAA,KAAA,WAAA,IAGF,kBACE,QAAA,KAAA,YAAA,IAKF,mBDqLA,6BCnLE,QAAA,GDuLF,WCpLA,IAEE,OAAA,IAAA,MAAA,KACA,kBAAA,MAGF,MACE,QAAA,mBDqLF,IClLA,GAEE,kBAAA,MAGF,IACE,UAAA,eDmLF,GACA,GCjLA,EAGE,QAAA,EACA,OAAA,EAGF,GD+KA,GC7KE,iBAAA,MAMF,QACE,QAAA,KAEF,YD2KA,oBCxKI,iBAAA,eAGJ,OACE,OAAA,IAAA,MAAA,KAGF,OACE,gBAAA,mBADF,UD2KA,UCtKI,iBAAA,eD0KJ,mBCvKA,mBAGI,OAAA,IAAA,MAAA,gBCrFN,WACE,YAAA,uBACA,IAAA,+CACA,IAAA,sDAAA,2BAAA,CAAA,iDAAA,eAAA,CAAA,gDAAA,cAAA,CAAA,+CAAA,kBAAA,CAAA,2EAAA,cAQF,WACE,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EACA,uBAAA,YACA,wBAAA,UAIkC,2BAAW,QAAA,QACX,uBAAW,QAAA,QF2P/C,sBEzPoC,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,+BAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,gCAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,gCAAW,QAAA,QACX,gCAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,0BAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,mCAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,sBAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QACX,4BAAW,QAAA,QACX,qCAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,mCAAW,QAAA,QACX,uCAAW,QAAA,QACX,gCAAW,QAAA,QACX,oCAAW,QAAA,QACX,qCAAW,QAAA,QACX,yCAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,iCAAW,QAAA,QACX,oCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,qBAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QASX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,+BAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,mCAAW,QAAA,QACX,4BAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,kCAAW,QAAA,QACX,mCAAW,QAAA,QACX,sCAAW,QAAA,QACX,0CAAW,QAAA,QACX,oCAAW,QAAA,QACX,wCAAW,QAAA,QACX,qCAAW,QAAA,QACX,iCAAW,QAAA,QACX,gCAAW,QAAA,QACX,kCAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QCxS/C,ECkEE,mBAAA,WACG,gBAAA,WACK,WAAA,WJo+BV,OGriCA,QC+DE,mBAAA,WACG,gBAAA,WACK,WAAA,WDzDV,KACE,UAAA,KACA,4BAAA,cAGF,KACE,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,iBAAA,KHoiCF,OGhiCA,MHiiCA,OACA,SG9hCE,YAAA,QACA,UAAA,QACA,YAAA,QAMF,EACE,MAAA,QACA,gBAAA,KH8hCF,QG5hCE,QAEE,MAAA,QACA,gBAAA,UAGF,QEnDA,QAAA,IAAA,KAAA,yBACA,eAAA,KF6DF,OACE,OAAA,EAMF,IACE,eAAA,OHqhCF,4BADA,0BGhhCA,gBH+gCA,iBADA,eMxlCE,QAAA,MACA,UAAA,KACA,OAAA,KH6EF,aACE,cAAA,IAMF,eACE,QAAA,IACA,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IC+FA,mBAAA,IAAA,IAAA,YACK,cAAA,IAAA,IAAA,YACG,WAAA,IAAA,IAAA,YE5LR,QAAA,aACA,UAAA,KACA,OAAA,KHiGF,YACE,cAAA,IAMF,GACE,WAAA,KACA,cAAA,KACA,OAAA,EACA,WAAA,IAAA,MAAA,KAQF,SACE,SAAA,SACA,MAAA,IACA,OAAA,IACA,QAAA,EACA,OAAA,KACA,SAAA,OACA,KAAA,cACA,OAAA,EAQA,0BH8/BF,yBG5/BI,SAAA,OACA,MAAA,KACA,OAAA,KACA,OAAA,EACA,SAAA,QACA,KAAA,KAWJ,cACE,OAAA,QH4/BF,IACA,IACA,IACA,IACA,IACA,IOtpCA,GP4oCA,GACA,GACA,GACA,GACA,GO9oCE,YAAA,QACA,YAAA,IACA,YAAA,IACA,MAAA,QPyqCF,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UAaA,WAZA,UACA,UOxqCA,SPyqCA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SOxpCI,YAAA,IACA,YAAA,EACA,MAAA,KP8qCJ,IAEA,IAEA,IO9qCA,GP2qCA,GAEA,GO1qCE,WAAA,KACA,cAAA,KPqrCF,WANA,UAQA,WANA,UAQA,WANA,UACA,UOxrCA,SP0rCA,UANA,SAQA,UANA,SO9qCI,UAAA,IPyrCJ,IAEA,IAEA,IO1rCA,GPurCA,GAEA,GOtrCE,WAAA,KACA,cAAA,KPisCF,WANA,UAQA,WANA,UAQA,WANA,UACA,UOpsCA,SPssCA,UANA,SAQA,UANA,SO1rCI,UAAA,IPqsCJ,IOjsCA,GAAU,UAAA,KPqsCV,IOpsCA,GAAU,UAAA,KPwsCV,IOvsCA,GAAU,UAAA,KP2sCV,IO1sCA,GAAU,UAAA,KP8sCV,IO7sCA,GAAU,UAAA,KPitCV,IOhtCA,GAAU,UAAA,KAMV,EACE,OAAA,EAAA,EAAA,KAGF,MACE,cAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,IAEA,yBAAA,MACE,UAAA,MPitCJ,OOxsCA,MAEE,UAAA,IP0sCF,MOvsCA,KAEE,QAAA,KACA,iBAAA,QAIF,WAAuB,WAAA,KACvB,YAAuB,WAAA,MACvB,aAAuB,WAAA,OACvB,cAAuB,WAAA,QACvB,aAAuB,YAAA,OAGvB,gBAAuB,eAAA,UACvB,gBAAuB,eAAA,UACvB,iBAAuB,eAAA,WAGvB,YACE,MAAA,KAEF,cCvGE,MAAA,QR2zCF,qBQ1zCE,qBAEE,MAAA,QDuGJ,cC1GE,MAAA,QRk0CF,qBQj0CE,qBAEE,MAAA,QD0GJ,WC7GE,MAAA,QRy0CF,kBQx0CE,kBAEE,MAAA,QD6GJ,cChHE,MAAA,QRg1CF,qBQ/0CE,qBAEE,MAAA,QDgHJ,aCnHE,MAAA,QRu1CF,oBQt1CE,oBAEE,MAAA,QDuHJ,YAGE,MAAA,KE7HA,iBAAA,QT+1CF,mBS91CE,mBAEE,iBAAA,QF6HJ,YEhIE,iBAAA,QTs2CF,mBSr2CE,mBAEE,iBAAA,QFgIJ,SEnIE,iBAAA,QT62CF,gBS52CE,gBAEE,iBAAA,QFmIJ,YEtIE,iBAAA,QTo3CF,mBSn3CE,mBAEE,iBAAA,QFsIJ,WEzIE,iBAAA,QT23CF,kBS13CE,kBAEE,iBAAA,QF8IJ,aACE,eAAA,IACA,OAAA,KAAA,EAAA,KACA,cAAA,IAAA,MAAA,KPgvCF,GOxuCA,GAEE,WAAA,EACA,cAAA,KP4uCF,MAFA,MACA,MO9uCA,MAMI,cAAA,EAOJ,eACE,aAAA,EACA,WAAA,KAIF,aALE,aAAA,EACA,WAAA,KAMA,YAAA,KAFF,gBAKI,QAAA,aACA,cAAA,IACA,aAAA,IAKJ,GACE,WAAA,EACA,cAAA,KPouCF,GOluCA,GAEE,YAAA,WAEF,GACE,YAAA,IAEF,GACE,YAAA,EAaA,yBAAA,kBAEI,MAAA,KACA,MAAA,MACA,MAAA,KACA,WAAA,MGxNJ,SAAA,OACA,cAAA,SACA,YAAA,OHiNA,kBASI,YAAA,OP4tCN,0BOjtCA,YAEE,OAAA,KAGF,YACE,UAAA,IA9IqB,eAAA,UAmJvB,WACE,QAAA,KAAA,KACA,OAAA,EAAA,EAAA,KACA,UAAA,OACA,YAAA,IAAA,MAAA,KPitCF,yBO5sCI,wBP2sCJ,yBO1sCM,cAAA,EPgtCN,kBO1tCA,kBPytCA,iBOtsCI,QAAA,MACA,UAAA,IACA,YAAA,WACA,MAAA,KP4sCJ,yBO1sCI,yBPysCJ,wBOxsCM,QAAA,cAQN,oBPqsCA,sBOnsCE,cAAA,KACA,aAAA,EACA,WAAA,MACA,aAAA,IAAA,MAAA,KACA,YAAA,EP0sCF,kCOpsCI,kCPksCJ,iCAGA,oCAJA,oCAEA,mCOnsCe,QAAA,GP4sCf,iCO3sCI,iCPysCJ,gCAGA,mCAJA,mCAEA,kCOzsCM,QAAA,cAMN,QACE,cAAA,KACA,WAAA,OACA,YAAA,WIxSF,KXm/CA,IACA,IACA,KWj/CE,YAAA,KAAA,CAAA,MAAA,CAAA,QAAA,CAAA,aAAA,CAAA,UAIF,KACE,QAAA,IAAA,IACA,UAAA,IACA,MAAA,QACA,iBAAA,QACA,cAAA,IAIF,IACE,QAAA,IAAA,IACA,UAAA,IACA,MAAA,KACA,iBAAA,KACA,cAAA,IACA,mBAAA,MAAA,EAAA,KAAA,EAAA,gBAAA,WAAA,MAAA,EAAA,KAAA,EAAA,gBANF,QASI,QAAA,EACA,UAAA,KACA,YAAA,IACA,mBAAA,KAAA,WAAA,KAKJ,IACE,QAAA,MACA,QAAA,MACA,OAAA,EAAA,EAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,WAAA,UACA,UAAA,WACA,iBAAA,QACA,OAAA,IAAA,MAAA,KACA,cAAA,IAXF,SAeI,QAAA,EACA,UAAA,QACA,MAAA,QACA,YAAA,SACA,iBAAA,YACA,cAAA,EAKJ,gBACE,WAAA,MACA,WAAA,OC1DF,WCHE,cAAA,KACA,aAAA,KACA,aAAA,KACA,YAAA,KDGA,yBAAA,WACE,MAAA,OAEF,yBAAA,WACE,MAAA,OAEF,0BAAA,WACE,MAAA,QAUJ,iBCvBE,cAAA,KACA,aAAA,KACA,aAAA,KACA,YAAA,KD6BF,KCvBE,aAAA,MACA,YAAA,MD0BF,gBACE,aAAA,EACA,YAAA,EAFF,8BAKI,cAAA,EACA,aAAA,EZwiDJ,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAjCA,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAjCA,UAoCA,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UatnDC,UbynDD,WAIA,WAIA,WAxCA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UAIA,UcpmDM,SAAA,SAEA,WAAA,IAEA,cAAA,KACA,aAAA,KDtBL,UbmpDD,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,Uc3mDM,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,EFCJ,yBCzEC,Ub2zDC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,UcnxDI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GFUJ,yBClFC,Ubo+DC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,Uc57DI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GFmBJ,0BC3FC,Ub6oEC,WACA,WACA,WAVA,UACA,UACA,UACA,UACA,UACA,UACA,UACA,UcrmEI,MAAA,KDvCL,WC+CG,MAAA,KD/CH,WC+CG,MAAA,aD/CH,WC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,ID/CH,UC+CG,MAAA,aD/CH,UC+CG,MAAA,YD/CH,gBC8DG,MAAA,KD9DH,gBC8DG,MAAA,aD9DH,gBC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,ID9DH,eC8DG,MAAA,aD9DH,eC8DG,MAAA,YD9DH,eCmEG,MAAA,KDnEH,gBCoDG,KAAA,KDpDH,gBCoDG,KAAA,aDpDH,gBCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,IDpDH,eCoDG,KAAA,aDpDH,eCoDG,KAAA,YDpDH,eCyDG,KAAA,KDzDH,kBCwEG,YAAA,KDxEH,kBCwEG,YAAA,aDxEH,kBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,IDxEH,iBCwEG,YAAA,aDxEH,iBCwEG,YAAA,YDxEH,iBCwEG,YAAA,GCjEJ,MACE,iBAAA,YADF,uBAQI,SAAA,OACA,QAAA,aACA,MAAA,KAKA,sBf+xEJ,sBe9xEM,SAAA,OACA,QAAA,WACA,MAAA,KAKN,QACE,YAAA,IACA,eAAA,IACA,MAAA,KACA,WAAA,KAGF,GACE,WAAA,KAMF,OACE,MAAA,KACA,UAAA,KACA,cAAA,Kf6xEF,mBAHA,mBAIA,mBAHA,mBACA,mBe/xEA,mBAWQ,QAAA,IACA,YAAA,WACA,eAAA,IACA,WAAA,IAAA,MAAA,KAdR,mBAoBI,eAAA,OACA,cAAA,IAAA,MAAA,KfyxEJ,uCe9yEA,uCf+yEA,wCAHA,wCAIA,2CAHA,2Ce/wEQ,WAAA,EA9BR,mBAoCI,WAAA,IAAA,MAAA,KApCJ,cAyCI,iBAAA,KfoxEJ,6BAHA,6BAIA,6BAHA,6BACA,6Be5wEA,6BAOQ,QAAA,IAWR,gBACE,OAAA,IAAA,MAAA,KfqwEF,4BAHA,4BAIA,4BAHA,4BACA,4BerwEA,4BAQQ,OAAA,IAAA,MAAA,KfmwER,4Be3wEA,4BAeM,oBAAA,IAUN,yCAEI,iBAAA,QASJ,4BAEI,iBAAA,QfqvEJ,0BAGA,0BATA,0BAGA,0BAIA,0BAGA,0BATA,0BAGA,0BACA,0BAGA,0BgBt4EE,0BhBg4EF,0BgBz3EM,iBAAA,QhBs4EN,sCAEA,sCADA,oCgBj4EE,sChB+3EF,sCgBz3EM,iBAAA,QhBs4EN,2BAGA,2BATA,2BAGA,2BAIA,2BAGA,2BATA,2BAGA,2BACA,2BAGA,2BgB35EE,2BhBq5EF,2BgB94EM,iBAAA,QhB25EN,uCAEA,uCADA,qCgBt5EE,uChBo5EF,uCgB94EM,iBAAA,QhB25EN,wBAGA,wBATA,wBAGA,wBAIA,wBAGA,wBATA,wBAGA,wBACA,wBAGA,wBgBh7EE,wBhB06EF,wBgBn6EM,iBAAA,QhBg7EN,oCAEA,oCADA,kCgB36EE,oChBy6EF,oCgBn6EM,iBAAA,QhBg7EN,2BAGA,2BATA,2BAGA,2BAIA,2BAGA,2BATA,2BAGA,2BACA,2BAGA,2BgBr8EE,2BhB+7EF,2BgBx7EM,iBAAA,QhBq8EN,uCAEA,uCADA,qCgBh8EE,uChB87EF,uCgBx7EM,iBAAA,QhBq8EN,0BAGA,0BATA,0BAGA,0BAIA,0BAGA,0BATA,0BAGA,0BACA,0BAGA,0BgB19EE,0BhBo9EF,0BgB78EM,iBAAA,QhB09EN,sCAEA,sCADA,oCgBr9EE,sChBm9EF,sCgB78EM,iBAAA,QDoJN,kBACE,WAAA,KACA,WAAA,KAEA,oCAAA,kBACE,MAAA,KACA,cAAA,KACA,WAAA,OACA,mBAAA,yBACA,OAAA,IAAA,MAAA,KALF,yBASI,cAAA,Efq0EJ,qCAHA,qCAIA,qCAHA,qCACA,qCe70EA,qCAkBU,YAAA,OAlBV,kCA0BI,OAAA,Ef+zEJ,0DAHA,0DAIA,0DAHA,0DACA,0Dex1EA,0DAmCU,YAAA,Ef8zEV,yDAHA,yDAIA,yDAHA,yDACA,yDeh2EA,yDAuCU,aAAA,Efg0EV,yDev2EA,yDfw2EA,yDAFA,yDelzEU,cAAA,GEzNZ,SAIE,UAAA,EACA,QAAA,EACA,OAAA,EACA,OAAA,EAGF,OACE,QAAA,MACA,MAAA,KACA,QAAA,EACA,cAAA,KACA,UAAA,KACA,YAAA,QACA,MAAA,KACA,OAAA,EACA,cAAA,IAAA,MAAA,QAGF,MACE,QAAA,aACA,UAAA,KACA,cAAA,IACA,YAAA,IAUF,mBb6BE,mBAAA,WACG,gBAAA,WACK,WAAA,WarBR,mBAAA,KACA,gBAAA,KAAA,WAAA,KjBkgFF,qBiB9/EA,kBAEE,OAAA,IAAA,EAAA,EACA,WAAA,MACA,YAAA,OjBogFF,wCADA,qCADA,8BAFA,+BACA,2BiB3/EE,4BAGE,OAAA,YAIJ,iBACE,QAAA,MAIF,kBACE,QAAA,MACA,MAAA,KAIF,iBjBu/EA,aiBr/EE,OAAA,KjB0/EF,2BiBt/EA,uBjBq/EA,wBK/kFE,QAAA,IAAA,KAAA,yBACA,eAAA,KYgGF,OACE,QAAA,MACA,YAAA,IACA,UAAA,KACA,YAAA,WACA,MAAA,KA0BF,cACE,QAAA,MACA,MAAA,KACA,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,iBAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,Ib3EA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBAyHR,mBAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KACK,cAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KACG,mBAAA,aAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,KAAA,WAAA,aAAA,YAAA,IAAA,CAAA,WAAA,YAAA,IAAA,CAAA,mBAAA,YAAA,Kc1IR,oBACE,aAAA,QACA,QAAA,EdYF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,qBAiCR,gCACE,MAAA,KACA,QAAA,EAEF,oCAA0B,MAAA,KAC1B,yCAAgC,MAAA,Ka+ChC,0BACE,iBAAA,YACA,OAAA,EAQF,wBjBq+EF,wBACA,iCiBn+EI,iBAAA,KACA,QAAA,EAGF,wBjBo+EF,iCiBl+EI,OAAA,YAIF,sBACE,OAAA,KAcJ,qDAKI,8BjBm9EF,wCACA,+BAFA,8BiBj9EI,YAAA,KjB09EJ,iCAEA,2CACA,kCAFA,iCiBx9EE,0BjBq9EF,oCACA,2BAFA,0BiBl9EI,YAAA,KjB+9EJ,iCAEA,2CACA,kCAFA,iCiB79EE,0BjB09EF,oCACA,2BAFA,0BiBv9EI,YAAA,MAWN,YACE,cAAA,KjBy9EF,UiBj9EA,OAEE,SAAA,SACA,QAAA,MACA,WAAA,KACA,cAAA,KjBm9EF,yBiBh9EE,sBjBk9EF,mCADA,gCiB98EM,OAAA,YjBm9EN,gBiB99EA,aAgBI,WAAA,KACA,aAAA,KACA,cAAA,EACA,YAAA,IACA,OAAA,QjBm9EJ,+BACA,sCiBj9EA,yBjB+8EA,gCiB38EE,SAAA,SACA,WAAA,MACA,YAAA,MjBi9EF,oBiB98EA,cAEE,WAAA,KjBg9EF,iBiB58EA,cAEE,SAAA,SACA,QAAA,aACA,aAAA,KACA,cAAA,EACA,YAAA,IACA,eAAA,OACA,OAAA,QjB88EF,0BiB38EE,uBjB68EF,oCADA,iCiB18EI,OAAA,YjB+8EJ,kCiB58EA,4BAEE,WAAA,EACA,YAAA,KASF,qBACE,WAAA,KAEA,YAAA,IACA,eAAA,IAEA,cAAA,EAEA,8BjBm8EF,8BiBj8EI,cAAA,EACA,aAAA,EAaJ,UC3PE,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IAEA,gBACE,OAAA,KACA,YAAA,KlBsrFJ,0BkBnrFE,kBAEE,OAAA,KDiPJ,6BAEI,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IANJ,mCASI,OAAA,KACA,YAAA,KjBq8EJ,6CiB/8EA,qCAcI,OAAA,KAdJ,oCAiBI,OAAA,KACA,WAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IAIJ,UCvRE,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IAEA,gBACE,OAAA,KACA,YAAA,KlB2tFJ,0BkBxtFE,kBAEE,OAAA,KD6QJ,6BAEI,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IANJ,mCASI,OAAA,KACA,YAAA,KjB88EJ,6CiBx9EA,qCAcI,OAAA,KAdJ,oCAiBI,OAAA,KACA,WAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UASJ,cAEE,SAAA,SAFF,4BAMI,cAAA,OAIJ,uBACE,SAAA,SACA,IAAA,EACA,MAAA,EACA,QAAA,EACA,QAAA,MACA,MAAA,KACA,OAAA,KACA,YAAA,KACA,WAAA,OACA,eAAA,KjBo8EF,oDADA,uCiBj8EA,iCAGE,MAAA,KACA,OAAA,KACA,YAAA,KjBo8EF,oDADA,uCiBj8EA,iCAGE,MAAA,KACA,OAAA,KACA,YAAA,KjBq8EF,uBAEA,8BAJA,4BiB/7EA,yBjBg8EA,oBAEA,2BAGA,4BAEA,mCAHA,yBAEA,gCkBx1FI,MAAA,QDkZJ,2BC9YI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,iCACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,Qa4VV,gCCpYI,MAAA,QACA,iBAAA,QACA,aAAA,QDkYJ,oCC9XI,MAAA,QlB61FJ,uBAEA,8BAJA,4BiB19EA,yBjB29EA,oBAEA,2BAGA,4BAEA,mCAHA,yBAEA,gCkBt3FI,MAAA,QDqZJ,2BCjZI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,iCACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,Qa+VV,gCCvYI,MAAA,QACA,iBAAA,QACA,aAAA,QDqYJ,oCCjYI,MAAA,QlB23FJ,qBAEA,4BAJA,0BiBr/EA,uBjBs/EA,kBAEA,yBAGA,0BAEA,iCAHA,uBAEA,8BkBp5FI,MAAA,QDwZJ,yBCpZI,aAAA,QdiDF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBchDN,+BACE,aAAA,Qd8CJ,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,CAAA,EAAA,EAAA,IAAA,QakWV,8BC1YI,MAAA,QACA,iBAAA,QACA,aAAA,QDwYJ,kCCpYI,MAAA,QD2YF,2CACE,IAAA,KAEF,mDACE,IAAA,EAUJ,YACE,QAAA,MACA,WAAA,IACA,cAAA,KACA,MAAA,QAkBA,yBAAA,yBAGI,QAAA,aACA,cAAA,EACA,eAAA,OALJ,2BAUI,QAAA,aACA,MAAA,KACA,eAAA,OAZJ,kCAiBI,QAAA,aAjBJ,0BAqBI,QAAA,aACA,eAAA,OjBi/EJ,wCiBvgFA,6CjBsgFA,2CiB3+EM,MAAA,KA3BN,wCAiCI,MAAA,KAjCJ,4BAqCI,cAAA,EACA,eAAA,OjB4+EJ,uBiBlhFA,oBA6CI,QAAA,aACA,WAAA,EACA,cAAA,EACA,eAAA,OjBy+EJ,6BiBzhFA,0BAmDM,aAAA,EjB0+EN,4CiB7hFA,sCAwDI,SAAA,SACA,YAAA,EAzDJ,kDA8DI,IAAA,GjBw+EN,2BAEA,kCiB/9EA,wBjB89EA,+BiBr9EI,YAAA,IACA,WAAA,EACA,cAAA,EjB09EJ,2BiBr+EA,wBAiBI,WAAA,KAjBJ,6BJ9gBE,aAAA,MACA,YAAA,MIwiBA,yBAAA,gCAEI,YAAA,IACA,cAAA,EACA,WAAA,OA/BN,sDAwCI,MAAA,KAQA,yBAAA,+CAEI,YAAA,KACA,UAAA,MAKJ,yBAAA,+CAEI,YAAA,IACA,UAAA,ME9kBR,KACE,QAAA,aACA,cAAA,EACA,YAAA,IACA,WAAA,OACA,YAAA,OACA,eAAA,OACA,iBAAA,aAAA,aAAA,aACA,OAAA,QACA,iBAAA,KACA,OAAA,IAAA,MAAA,YCoCA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,cAAA,IhBqKA,oBAAA,KACG,iBAAA,KACC,gBAAA,KACI,YAAA,KJs1FV,kBAHA,kBACA,WACA,kBAHA,kBmB1hGI,WdrBF,QAAA,IAAA,KAAA,yBACA,eAAA,KLwjGF,WADA,WmB7hGE,WAGE,MAAA,KACA,gBAAA,KnB+hGJ,YmB5hGE,YAEE,iBAAA,KACA,QAAA,Ef2BF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBexBR,cnB4hGF,eACA,wBmB1hGI,OAAA,YE9CF,OAAA,kBACA,QAAA,IjBiEA,mBAAA,KACQ,WAAA,KefN,enB4hGJ,yBmB1hGM,eAAA,KASN,aC7DE,MAAA,KACA,iBAAA,KACA,aAAA,KpBqlGF,mBoBnlGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpBqlGJ,oBoBnlGE,oBpBolGF,mCoBjlGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB2lGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoBrlGI,0BpB0lGJ,yCAHA,yCAHA,yCoBjlGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBgmGN,4BAHA,4BoBvlGI,4BpB2lGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBnlGM,iBAAA,KACA,aAAA,KDuBN,oBClBI,MAAA,KACA,iBAAA,KDoBJ,aChEE,MAAA,KACA,iBAAA,QACA,aAAA,QpB0oGF,mBoBxoGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB0oGJ,oBoBxoGE,oBpByoGF,mCoBtoGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBgpGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoB1oGI,0BpB+oGJ,yCAHA,yCAHA,yCoBtoGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBqpGN,4BAHA,4BoB5oGI,4BpBgpGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBxoGM,iBAAA,QACA,aAAA,QD0BN,oBCrBI,MAAA,QACA,iBAAA,KDwBJ,aCpEE,MAAA,KACA,iBAAA,QACA,aAAA,QpB+rGF,mBoB7rGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB+rGJ,oBoB7rGE,oBpB8rGF,mCoB3rGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBqsGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoB/rGI,0BpBosGJ,yCAHA,yCAHA,yCoB3rGM,MAAA,KACA,iBAAA,QACA,aAAA,QpB0sGN,4BAHA,4BoBjsGI,4BpBqsGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoB7rGM,iBAAA,QACA,aAAA,QD8BN,oBCzBI,MAAA,QACA,iBAAA,KD4BJ,UCxEE,MAAA,KACA,iBAAA,QACA,aAAA,QpBovGF,gBoBlvGE,gBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,gBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpBovGJ,iBoBlvGE,iBpBmvGF,gCoBhvGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB0vGJ,uBAHA,uBAHA,uBAKA,uBAHA,uBoBpvGI,uBpByvGJ,sCAHA,sCAHA,sCoBhvGM,MAAA,KACA,iBAAA,QACA,aAAA,QpB+vGN,yBAHA,yBoBtvGI,yBpB0vGJ,0BAHA,0BAHA,0BAOA,mCAHA,mCAHA,mCoBlvGM,iBAAA,QACA,aAAA,QDkCN,iBC7BI,MAAA,QACA,iBAAA,KDgCJ,aC5EE,MAAA,KACA,iBAAA,QACA,aAAA,QpByyGF,mBoBvyGE,mBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpByyGJ,oBoBvyGE,oBpBwyGF,mCoBryGI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpB+yGJ,0BAHA,0BAHA,0BAKA,0BAHA,0BoBzyGI,0BpB8yGJ,yCAHA,yCAHA,yCoBryGM,MAAA,KACA,iBAAA,QACA,aAAA,QpBozGN,4BAHA,4BoB3yGI,4BpB+yGJ,6BAHA,6BAHA,6BAOA,sCAHA,sCAHA,sCoBvyGM,iBAAA,QACA,aAAA,QDsCN,oBCjCI,MAAA,QACA,iBAAA,KDoCJ,YChFE,MAAA,KACA,iBAAA,QACA,aAAA,QpB81GF,kBoB51GE,kBAEE,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,kBACE,MAAA,KACA,iBAAA,QACA,aAAA,QpB81GJ,mBoB51GE,mBpB61GF,kCoB11GI,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QpBo2GJ,yBAHA,yBAHA,yBAKA,yBAHA,yBoB91GI,yBpBm2GJ,wCAHA,wCAHA,wCoB11GM,MAAA,KACA,iBAAA,QACA,aAAA,QpBy2GN,2BAHA,2BoBh2GI,2BpBo2GJ,4BAHA,4BAHA,4BAOA,qCAHA,qCAHA,qCoB51GM,iBAAA,QACA,aAAA,QD0CN,mBCrCI,MAAA,QACA,iBAAA,KD6CJ,UACE,YAAA,IACA,MAAA,QACA,cAAA,EAEA,UnBwzGF,iBADA,iBAEA,oBACA,6BmBrzGI,iBAAA,YfnCF,mBAAA,KACQ,WAAA,KeqCR,UnB0zGF,iBADA,gBADA,gBmBpzGI,aAAA,YnB0zGJ,gBmBxzGE,gBAEE,MAAA,QACA,gBAAA,UACA,iBAAA,YnB2zGJ,0BmBvzGI,0BnBwzGJ,mCAFA,mCmBpzGM,MAAA,KACA,gBAAA,KnB0zGN,mBmBjzGA,QC9EE,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IpBm4GF,mBmBpzGA,QClFE,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IpB04GF,mBmBvzGA,QCtFE,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,cAAA,ID2FF,WACE,QAAA,MACA,MAAA,KAIF,sBACE,WAAA,InBuzGF,6BADA,4BmB/yGE,6BACE,MAAA,KG1JJ,MACE,QAAA,ElBoLA,mBAAA,QAAA,KAAA,OACK,cAAA,QAAA,KAAA,OACG,WAAA,QAAA,KAAA,OkBnLR,SACE,QAAA,EAIJ,UACE,QAAA,KAEA,aAAY,QAAA,MACZ,eAAY,QAAA,UACZ,kBAAY,QAAA,gBAGd,YACE,SAAA,SACA,OAAA,EACA,SAAA,OlBsKA,4BAAA,MAAA,CAAA,WACQ,uBAAA,MAAA,CAAA,WAAA,oBAAA,MAAA,CAAA,WAOR,4BAAA,KACQ,uBAAA,KAAA,oBAAA,KAGR,mCAAA,KACQ,8BAAA,KAAA,2BAAA,KmB5MV,OACE,QAAA,aACA,MAAA,EACA,OAAA,EACA,YAAA,IACA,eAAA,OACA,WAAA,IAAA,OACA,WAAA,IAAA,QACA,aAAA,IAAA,MAAA,YACA,YAAA,IAAA,MAAA,YvBu/GF,UuBn/GA,QAEE,SAAA,SAIF,uBACE,QAAA,EAIF,eACE,SAAA,SACA,IAAA,KACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,MAAA,KACA,UAAA,MACA,QAAA,IAAA,EACA,OAAA,IAAA,EAAA,EACA,UAAA,KACA,WAAA,KACA,WAAA,KACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,gBACA,cAAA,InBuBA,mBAAA,EAAA,IAAA,KAAA,iBACQ,WAAA,EAAA,IAAA,KAAA,iBmBlBR,0BACE,MAAA,EACA,KAAA,KAzBJ,wBCzBE,OAAA,IACA,OAAA,IAAA,EACA,SAAA,OACA,iBAAA,QDsBF,oBAmCI,QAAA,MACA,QAAA,IAAA,KACA,MAAA,KACA,YAAA,IACA,YAAA,WACA,MAAA,KACA,YAAA,OvB8+GJ,0BuB5+GI,0BAEE,MAAA,QACA,gBAAA,KACA,iBAAA,QAOJ,yBvBw+GF,+BADA,+BuBp+GI,MAAA,KACA,gBAAA,KACA,iBAAA,QACA,QAAA,EASF,2BvBi+GF,iCADA,iCuB79GI,MAAA,KvBk+GJ,iCuB99GE,iCAEE,gBAAA,KACA,OAAA,YACA,iBAAA,YACA,iBAAA,KEzGF,OAAA,0DF+GF,qBAGI,QAAA,MAHJ,QAQI,QAAA,EAQJ,qBACE,MAAA,EACA,KAAA,KAQF,oBACE,MAAA,KACA,KAAA,EAIF,iBACE,QAAA,MACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,YAAA,OAIF,mBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,IAIF,2BACE,MAAA,EACA,KAAA,KAQF,evB+7GA,sCuB37GI,QAAA,GACA,WAAA,EACA,cAAA,IAAA,OACA,cAAA,IAAA,QAPJ,uBvBs8GA,8CuB37GI,IAAA,KACA,OAAA,KACA,cAAA,IASJ,yBACE,6BApEA,MAAA,EACA,KAAA,KAmEA,kCA1DA,MAAA,KACA,KAAA,GG1IF,W1BkoHA,oB0BhoHE,SAAA,SACA,QAAA,aACA,eAAA,O1BooHF,yB0BxoHA,gBAMI,SAAA,SACA,MAAA,K1B4oHJ,gCAFA,gCAFA,+BAFA,+BAKA,uBAFA,uBAFA,sB0BroHI,sBAIE,QAAA,EAMN,qB1BooHA,2BACA,2BACA,iC0BjoHI,YAAA,KAKJ,aACE,YAAA,KADF,kB1BmoHA,wBACA,0B0B7nHI,MAAA,KAPJ,kB1BwoHA,wBACA,0B0B7nHI,YAAA,IAIJ,yEACE,cAAA,EAIF,4BACE,YAAA,EACA,mECpDA,wBAAA,EACA,2BAAA,EDwDF,6C1B2nHA,8C2B5qHE,uBAAA,EACA,0BAAA,EDsDF,sBACE,MAAA,KAEF,8DACE,cAAA,EAEF,mE1B0nHA,oE2B/rHE,wBAAA,EACA,2BAAA,ED0EF,oECnEE,uBAAA,EACA,0BAAA,EDuEF,mC1BwnHA,iC0BtnHE,QAAA,EAiBF,iCACE,cAAA,IACA,aAAA,IAEF,oCACE,cAAA,KACA,aAAA,KAKF,iCtB/CE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBsBkDR,0CtBnDA,mBAAA,KACQ,WAAA,KsByDV,YACE,YAAA,EAGF,eACE,aAAA,IAAA,IAAA,EACA,oBAAA,EAGF,uBACE,aAAA,EAAA,IAAA,IAOF,yB1B4lHA,+BACA,oC0BzlHI,QAAA,MACA,MAAA,KACA,MAAA,KACA,UAAA,KAPJ,oCAcM,MAAA,KAdN,8B1BumHA,oCACA,oCACA,0C0BnlHI,WAAA,KACA,YAAA,EAKF,4DACE,cAAA,EAEF,sDC7KA,uBAAA,IACA,wBAAA,IAOA,2BAAA,EACA,0BAAA,EDwKA,sDCjLA,uBAAA,EACA,wBAAA,EAOA,2BAAA,IACA,0BAAA,ID6KF,uEACE,cAAA,EAEF,4E1BqlHA,6E2BtwHE,2BAAA,EACA,0BAAA,EDsLF,6EC/LE,uBAAA,EACA,wBAAA,EDsMF,qBACE,QAAA,MACA,MAAA,KACA,aAAA,MACA,gBAAA,SAJF,0B1BslHA,gC0B/kHI,QAAA,WACA,MAAA,KACA,MAAA,GATJ,qCAYI,MAAA,KAZJ,+CAgBI,KAAA,K1BmlHJ,gD0BlkHA,6C1BmkHA,2DAFA,wD0B5jHM,SAAA,SACA,KAAA,cACA,eAAA,KE1ON,aACE,SAAA,SACA,QAAA,MACA,gBAAA,SAGA,0BACE,MAAA,KACA,cAAA,EACA,aAAA,EATJ,2BAeI,SAAA,SACA,QAAA,EAKA,MAAA,KAEA,MAAA,KACA,cAAA,EAEA,iCACE,QAAA,EAUN,8B5B2xHA,mCACA,sCkBpwHE,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UACA,cAAA,IAEA,oClBswHF,yCACA,4CkBtwHI,OAAA,KACA,YAAA,KlB4wHJ,8CACA,mDACA,sDkB3wHE,sClBuwHF,2CACA,8CkBtwHI,OAAA,KUhCJ,8B5B6yHA,mCACA,sCkB3xHE,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,cAAA,IAEA,oClB6xHF,yCACA,4CkB7xHI,OAAA,KACA,YAAA,KlBmyHJ,8CACA,mDACA,sDkBlyHE,sClB8xHF,2CACA,8CkB7xHI,OAAA,KlBqyHJ,2B4B5zHA,mB5B2zHA,iB4BxzHE,QAAA,W5B8zHF,8D4B5zHE,sD5B2zHF,oD4B1zHI,cAAA,EAIJ,mB5B2zHA,iB4BzzHE,MAAA,GACA,YAAA,OACA,eAAA,OAKF,mBACE,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IAGA,4BACE,QAAA,IAAA,KACA,UAAA,KACA,cAAA,IAEF,4BACE,QAAA,KAAA,KACA,UAAA,KACA,cAAA,I5ByzHJ,wC4B70HA,qCA0BI,WAAA,EAKJ,uC5BkzHA,+BACA,kCACA,6CACA,8CAEA,6DADA,wE2B55HE,wBAAA,EACA,2BAAA,EC8GF,+BACE,aAAA,EAEF,sC5BmzHA,8BAKA,+DADA,oDAHA,iCACA,4CACA,6C2Bh6HE,uBAAA,EACA,0BAAA,ECkHF,8BACE,YAAA,EAKF,iBACE,SAAA,SAGA,UAAA,EACA,YAAA,OALF,sBAUI,SAAA,SAVJ,2BAYM,YAAA,K5BizHN,6BADA,4B4B7yHI,4BAGE,QAAA,EAKJ,kC5B0yHF,wC4BvyHM,aAAA,KAGJ,iC5BwyHF,uC4BryHM,QAAA,EACA,YAAA,KC/JN,KACE,aAAA,EACA,cAAA,EACA,WAAA,KAHF,QAOI,SAAA,SACA,QAAA,MARJ,UAWM,SAAA,SACA,QAAA,MACA,QAAA,KAAA,K7By8HN,gB6Bx8HM,gBAEE,gBAAA,KACA,iBAAA,KAKJ,mBACE,MAAA,K7Bu8HN,yB6Br8HM,yBAEE,MAAA,KACA,gBAAA,KACA,OAAA,YACA,iBAAA,YAOJ,a7Bi8HJ,mBADA,mB6B77HM,iBAAA,KACA,aAAA,QAzCN,kBLLE,OAAA,IACA,OAAA,IAAA,EACA,SAAA,OACA,iBAAA,QKEF,cA0DI,UAAA,KASJ,UACE,cAAA,IAAA,MAAA,KADF,aAGI,MAAA,KAEA,cAAA,KALJ,eASM,aAAA,IACA,YAAA,WACA,OAAA,IAAA,MAAA,YACA,cAAA,IAAA,IAAA,EAAA,EACA,qBACE,aAAA,KAAA,KAAA,KAMF,sB7B86HN,4BADA,4B6B16HQ,MAAA,KACA,OAAA,QACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,oBAAA,YAKN,wBAqDA,MAAA,KA8BA,cAAA,EAnFA,2BAwDE,MAAA,KAxDF,6BA0DI,cAAA,IACA,WAAA,OA3DJ,iDAgEE,IAAA,KACA,KAAA,KAGF,yBAAA,2BAEI,QAAA,WACA,MAAA,GAHJ,6BAKM,cAAA,GAzEN,6BAuFE,aAAA,EACA,cAAA,IAxFF,kC7Bu8HF,wCADA,wC6Bx2HI,OAAA,IAAA,MAAA,KAGF,yBAAA,6BAEI,cAAA,IAAA,MAAA,KACA,cAAA,IAAA,IAAA,EAAA,EAHJ,kC7Bg3HA,wCADA,wC6Bv2HI,oBAAA,MAhGN,cAEI,MAAA,KAFJ,gBAMM,cAAA,IANN,iBASM,YAAA,IAKA,uB7By8HN,6BADA,6B6Br8HQ,MAAA,KACA,iBAAA,QAQR,gBAEI,MAAA,KAFJ,mBAIM,WAAA,IACA,YAAA,EAYN,eACE,MAAA,KADF,kBAII,MAAA,KAJJ,oBAMM,cAAA,IACA,WAAA,OAPN,wCAYI,IAAA,KACA,KAAA,KAGF,yBAAA,kBAEI,QAAA,WACA,MAAA,GAHJ,oBAKM,cAAA,GASR,oBACE,cAAA,EADF,yBAKI,aAAA,EACA,cAAA,IANJ,8B7By7HA,oCADA,oC6B56HI,OAAA,IAAA,MAAA,KAGF,yBAAA,yBAEI,cAAA,IAAA,MAAA,KACA,cAAA,IAAA,IAAA,EAAA,EAHJ,8B7Bo7HA,oCADA,oC6B36HI,oBAAA,MAUN,uBAEI,QAAA,KAFJ,qBAKI,QAAA,MASJ,yBAEE,WAAA,KF7OA,uBAAA,EACA,wBAAA,EGQF,QACE,SAAA,SACA,WAAA,KACA,cAAA,KACA,OAAA,IAAA,MAAA,YAKA,yBAAA,QACE,cAAA,KAaF,yBAAA,eACE,MAAA,MAeJ,iBACE,cAAA,KACA,aAAA,KACA,WAAA,QACA,WAAA,IAAA,MAAA,YACA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,WAAA,MAAA,EAAA,IAAA,EAAA,qBAEA,2BAAA,MAEA,oBACE,WAAA,KAGF,yBAAA,iBACE,MAAA,KACA,WAAA,EACA,mBAAA,KAAA,WAAA,KAEA,0BACE,QAAA,gBACA,OAAA,eACA,eAAA,EACA,SAAA,kBAGF,oBACE,WAAA,Q9BknIJ,sC8B7mIE,mC9B4mIF,oC8BzmII,cAAA,EACA,aAAA,G9B+mIN,qB8B1mIA,kBAWE,SAAA,MACA,MAAA,EACA,KAAA,EACA,QAAA,K9BmmIF,sC8BjnIA,mCAGI,WAAA,MAEA,4D9BinIF,sC8BjnIE,mCACE,WAAA,OAWJ,yB9B2mIA,qB8B3mIA,kBACE,cAAA,GAIJ,kBACE,IAAA,EACA,aAAA,EAAA,EAAA,IAEF,qBACE,OAAA,EACA,cAAA,EACA,aAAA,IAAA,EAAA,E9B+mIF,kCAFA,gCACA,4B8BtmIA,0BAII,aAAA,MACA,YAAA,MAEA,yB9BwmIF,kCAFA,gCACA,4B8BvmIE,0BACE,aAAA,EACA,YAAA,GAaN,mBACE,QAAA,KACA,aAAA,EAAA,EAAA,IAEA,yBAAA,mBACE,cAAA,GAOJ,cACE,MAAA,KACA,OAAA,KACA,QAAA,KAAA,KACA,UAAA,KACA,YAAA,K9B8lIF,oB8B5lIE,oBAEE,gBAAA,KATJ,kBAaI,QAAA,MAGF,yBACE,iC9B0lIF,uC8BxlII,YAAA,OAWN,eACE,SAAA,SACA,MAAA,MACA,QAAA,IAAA,KACA,aAAA,KC9LA,WAAA,IACA,cAAA,ID+LA,iBAAA,YACA,iBAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,IAIA,qBACE,QAAA,EAdJ,yBAmBI,QAAA,MACA,MAAA,KACA,OAAA,IACA,cAAA,IAtBJ,mCAyBI,WAAA,IAGF,yBAAA,eACE,QAAA,MAUJ,YACE,OAAA,MAAA,MADF,iBAII,YAAA,KACA,eAAA,KACA,YAAA,KAGF,yBAAA,iCAGI,SAAA,OACA,MAAA,KACA,MAAA,KACA,WAAA,EACA,iBAAA,YACA,OAAA,EACA,mBAAA,KAAA,WAAA,K9BykIJ,kD8BllIA,sCAYM,QAAA,IAAA,KAAA,IAAA,KAZN,sCAeM,YAAA,K9B0kIN,4C8BzkIM,4CAEE,iBAAA,MAOR,yBAAA,YACE,MAAA,KACA,OAAA,EAFF,eAKI,MAAA,KALJ,iBAOM,YAAA,KACA,eAAA,MAYR,aACE,QAAA,KAAA,KACA,aAAA,MACA,YAAA,MACA,WAAA,IAAA,MAAA,YACA,cAAA,IAAA,MAAA,Y1B5NA,mBAAA,MAAA,EAAA,IAAA,EAAA,oBAAA,CAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,oBAAA,CAAA,EAAA,IAAA,EAAA,qB2BjER,WAAA,IACA,cAAA,Id6cA,yBAAA,yBAGI,QAAA,aACA,cAAA,EACA,eAAA,OALJ,2BAUI,QAAA,aACA,MAAA,KACA,eAAA,OAZJ,kCAiBI,QAAA,aAjBJ,0BAqBI,QAAA,aACA,eAAA,OjB+4HJ,wCiBr6HA,6CjBo6HA,2CiBz4HM,MAAA,KA3BN,wCAiCI,MAAA,KAjCJ,4BAqCI,cAAA,EACA,eAAA,OjB04HJ,uBiBh7HA,oBA6CI,QAAA,aACA,WAAA,EACA,cAAA,EACA,eAAA,OjBu4HJ,6BiBv7HA,0BAmDM,aAAA,EjBw4HN,4CiB37HA,sCAwDI,SAAA,SACA,YAAA,EAzDJ,kDA8DI,IAAA,GaxOF,yBAAA,yBACE,cAAA,IAEA,oCACE,cAAA,GASN,yBAAA,aACE,MAAA,KACA,YAAA,EACA,eAAA,EACA,aAAA,EACA,YAAA,EACA,OAAA,E1BvPF,mBAAA,KACQ,WAAA,M0B+PV,8BACE,WAAA,EHpUA,uBAAA,EACA,wBAAA,EGuUF,mDACE,cAAA,EHzUA,uBAAA,IACA,wBAAA,IAOA,2BAAA,EACA,0BAAA,EG0UF,YChVE,WAAA,IACA,cAAA,IDkVA,mBCnVA,WAAA,KACA,cAAA,KDqVA,mBCtVA,WAAA,KACA,cAAA,KD+VF,aChWE,WAAA,KACA,cAAA,KDkWA,yBAAA,aACE,MAAA,KACA,aAAA,KACA,YAAA,MAaJ,yBACE,aEtWA,MAAA,eFuWA,cE1WA,MAAA,gBF4WE,aAAA,MAFF,4BAKI,aAAA,GAUN,gBACE,iBAAA,QACA,aAAA,QAFF,8BAKI,MAAA,K9BmlIJ,oC8BllII,oCAEE,MAAA,QACA,iBAAA,YATN,6BAcI,MAAA,KAdJ,iCAmBM,MAAA,K9BglIN,uC8B9kIM,uCAEE,MAAA,KACA,iBAAA,YAIF,sC9B6kIN,4CADA,4C8BzkIQ,MAAA,KACA,iBAAA,QAIF,wC9B2kIN,8CADA,8C8BvkIQ,MAAA,KACA,iBAAA,YAOF,oC9BskIN,0CADA,0C8BlkIQ,MAAA,KACA,iBAAA,QAIJ,yBAAA,sDAIM,MAAA,K9BmkIR,4D8BlkIQ,4DAEE,MAAA,KACA,iBAAA,YAIF,2D9BikIR,iEADA,iE8B7jIU,MAAA,KACA,iBAAA,QAIF,6D9B+jIR,mEADA,mE8B3jIU,MAAA,KACA,iBAAA,aA/EZ,+BAuFI,aAAA,K9B4jIJ,qC8B3jII,qCAEE,iBAAA,KA1FN,yCA6FM,iBAAA,KA7FN,iC9B0pIA,6B8BvjII,aAAA,QAnGJ,6BA4GI,MAAA,KACA,mCACE,MAAA,KA9GN,0BAmHI,MAAA,K9BojIJ,gC8BnjII,gCAEE,MAAA,K9BsjIN,0C8BljIM,0C9BmjIN,mDAFA,mD8B/iIQ,MAAA,KAQR,gBACE,iBAAA,KACA,aAAA,QAFF,8BAKI,MAAA,Q9B+iIJ,oC8B9iII,oCAEE,MAAA,KACA,iBAAA,YATN,6BAcI,MAAA,QAdJ,iCAmBM,MAAA,Q9B4iIN,uC8B1iIM,uCAEE,MAAA,KACA,iBAAA,YAIF,sC9ByiIN,4CADA,4C8BriIQ,MAAA,KACA,iBAAA,QAIF,wC9BuiIN,8CADA,8C8BniIQ,MAAA,KACA,iBAAA,YAMF,oC9BmiIN,0CADA,0C8B/hIQ,MAAA,KACA,iBAAA,QAIJ,yBAAA,kEAIM,aAAA,QAJN,0DAOM,iBAAA,QAPN,sDAUM,MAAA,Q9BgiIR,4D8B/hIQ,4DAEE,MAAA,KACA,iBAAA,YAIF,2D9B8hIR,iEADA,iE8B1hIU,MAAA,KACA,iBAAA,QAIF,6D9B4hIR,mEADA,mE8BxhIU,MAAA,KACA,iBAAA,aApFZ,+BA6FI,aAAA,K9BwhIJ,qC8BvhII,qCAEE,iBAAA,KAhGN,yCAmGM,iBAAA,KAnGN,iC9B4nIA,6B8BnhII,aAAA,QAzGJ,6BA6GI,MAAA,QACA,mCACE,MAAA,KA/GN,0BAoHI,MAAA,Q9BqhIJ,gC8BphII,gCAEE,MAAA,K9BuhIN,0C8BnhIM,0C9BohIN,mDAFA,mD8BhhIQ,MAAA,KGtoBR,YACE,QAAA,IAAA,KACA,cAAA,KACA,WAAA,KACA,iBAAA,QACA,cAAA,IALF,eAQI,QAAA,aARJ,yBAWM,QAAA,EAAA,IACA,MAAA,KACA,QAAA,SAbN,oBAkBI,MAAA,KCpBJ,YACE,QAAA,aACA,aAAA,EACA,OAAA,KAAA,EACA,cAAA,IAJF,eAOI,QAAA,OAPJ,iBlCyrJA,oBkC/qJM,SAAA,SACA,MAAA,KACA,QAAA,IAAA,KACA,YAAA,KACA,YAAA,WACA,MAAA,QACA,gBAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KlCorJN,uBkClrJM,uBlCmrJN,0BAFA,0BkC/qJQ,QAAA,EACA,MAAA,QACA,iBAAA,KACA,aAAA,KAGJ,6BlCkrJJ,gCkC/qJQ,YAAA,EPnBN,uBAAA,IACA,0BAAA,IOsBE,4BlCirJJ,+B2BhtJE,wBAAA,IACA,2BAAA,IOwCE,sBlC+qJJ,4BAFA,4BADA,yBAIA,+BAFA,+BkC3qJM,QAAA,EACA,MAAA,KACA,OAAA,QACA,iBAAA,QACA,aAAA,QlCmrJN,wBAEA,8BADA,8BkCxuJA,2BlCsuJA,iCADA,iCkCtqJM,MAAA,KACA,OAAA,YACA,iBAAA,KACA,aAAA,KASN,oBlCqqJA,uBmC7uJM,QAAA,KAAA,KACA,UAAA,KACA,YAAA,UAEF,gCnC+uJJ,mC2B1uJE,uBAAA,IACA,0BAAA,IQAE,+BnC8uJJ,kC2BvvJE,wBAAA,IACA,2BAAA,IO2EF,oBlCgrJA,uBmC7vJM,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IAEF,gCnC+vJJ,mC2B1vJE,uBAAA,IACA,0BAAA,IQAE,+BnC8vJJ,kC2BvwJE,wBAAA,IACA,2BAAA,ISHF,OACE,aAAA,EACA,OAAA,KAAA,EACA,WAAA,OACA,WAAA,KAJF,UAOI,QAAA,OAPJ,YpCuxJA,eoC7wJM,QAAA,aACA,QAAA,IAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,KpCixJN,kBoC/xJA,kBAmBM,gBAAA,KACA,iBAAA,KApBN,epCoyJA,kBoCzwJM,MAAA,MA3BN,mBpCwyJA,sBoCtwJM,MAAA,KAlCN,mBpC6yJA,yBADA,yBAEA,sBoCnwJM,MAAA,KACA,OAAA,YACA,iBAAA,KC9CN,OACE,QAAA,OACA,QAAA,KAAA,KAAA,KACA,UAAA,IACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,SACA,cAAA,MrCuzJF,cqCnzJI,cAEE,MAAA,KACA,gBAAA,KACA,OAAA,QAKJ,aACE,QAAA,KAIF,YACE,SAAA,SACA,IAAA,KAOJ,eCtCE,iBAAA,KtCk1JF,2BsC/0JI,2BAEE,iBAAA,QDqCN,eC1CE,iBAAA,QtCy1JF,2BsCt1JI,2BAEE,iBAAA,QDyCN,eC9CE,iBAAA,QtCg2JF,2BsC71JI,2BAEE,iBAAA,QD6CN,YClDE,iBAAA,QtCu2JF,wBsCp2JI,wBAEE,iBAAA,QDiDN,eCtDE,iBAAA,QtC82JF,2BsC32JI,2BAEE,iBAAA,QDqDN,cC1DE,iBAAA,QtCq3JF,0BsCl3JI,0BAEE,iBAAA,QCFN,OACE,QAAA,aACA,UAAA,KACA,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,OACA,iBAAA,KACA,cAAA,KAGA,aACE,QAAA,KAIF,YACE,SAAA,SACA,IAAA,KvCq3JJ,0BuCl3JE,eAEE,IAAA,EACA,QAAA,IAAA,IvCo3JJ,cuC/2JI,cAEE,MAAA,KACA,gBAAA,KACA,OAAA,QAKJ,+BvC42JF,4BuC12JI,MAAA,QACA,iBAAA,KAGF,wBACE,MAAA,MAGF,+BACE,aAAA,IAGF,uBACE,YAAA,IC1DJ,WACE,YAAA,KACA,eAAA,KACA,cAAA,KACA,MAAA,QACA,iBAAA,KxCu6JF,ewC56JA,cASI,MAAA,QATJ,aAaI,cAAA,KACA,UAAA,KACA,YAAA,IAfJ,cAmBI,iBAAA,QAGF,sBxCk6JF,4BwCh6JI,cAAA,KACA,aAAA,KACA,cAAA,IA1BJ,sBA8BI,UAAA,KAGF,oCAAA,WACE,YAAA,KACA,eAAA,KAEA,sBxCi6JF,4BwC/5JI,cAAA,KACA,aAAA,KxCm6JJ,ewC16JA,cAYI,UAAA,MC1CN,WACE,QAAA,MACA,QAAA,IACA,cAAA,KACA,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KACA,cAAA,IrCiLA,mBAAA,OAAA,IAAA,YACK,cAAA,OAAA,IAAA,YACG,WAAA,OAAA,IAAA,YJ+xJV,iByCz9JA,eAaI,aAAA,KACA,YAAA,KzCi9JJ,mBADA,kByC58JE,kBAGE,aAAA,QArBJ,oBA0BI,QAAA,IACA,MAAA,KC3BJ,OACE,QAAA,KACA,cAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,IAJF,UAQI,WAAA,EACA,MAAA,QATJ,mBAcI,YAAA,IAdJ,S1Co/JA,U0Ch+JI,cAAA,EApBJ,WAwBI,WAAA,IASJ,mB1C09JA,mB0Cx9JE,cAAA,KAFF,0B1C89JA,0B0Cx9JI,SAAA,SACA,IAAA,KACA,MAAA,MACA,MAAA,QAQJ,eCvDE,MAAA,QACA,iBAAA,QACA,aAAA,QDqDF,kBClDI,iBAAA,QDkDJ,2BC9CI,MAAA,QDkDJ,YC3DE,MAAA,QACA,iBAAA,QACA,aAAA,QDyDF,eCtDI,iBAAA,QDsDJ,wBClDI,MAAA,QDsDJ,eC/DE,MAAA,QACA,iBAAA,QACA,aAAA,QD6DF,kBC1DI,iBAAA,QD0DJ,2BCtDI,MAAA,QD0DJ,cCnEE,MAAA,QACA,iBAAA,QACA,aAAA,QDiEF,iBC9DI,iBAAA,QD8DJ,0BC1DI,MAAA,QCDJ,wCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAIV,mCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAFV,gCACE,KAAQ,oBAAA,KAAA,EACR,GAAQ,oBAAA,EAAA,GAQV,UACE,OAAA,KACA,cAAA,KACA,SAAA,OACA,iBAAA,QACA,cAAA,IxCsCA,mBAAA,MAAA,EAAA,IAAA,IAAA,eACQ,WAAA,MAAA,EAAA,IAAA,IAAA,ewClCV,cACE,MAAA,KACA,MAAA,GACA,OAAA,KACA,UAAA,KACA,YAAA,KACA,MAAA,KACA,WAAA,OACA,iBAAA,QxCyBA,mBAAA,MAAA,EAAA,KAAA,EAAA,gBACQ,WAAA,MAAA,EAAA,KAAA,EAAA,gBAyHR,mBAAA,MAAA,IAAA,KACK,cAAA,MAAA,IAAA,KACG,WAAA,MAAA,IAAA,KJw6JV,sB4CnjKA,gCCDI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKDEF,wBAAA,KAAA,KAAA,gBAAA,KAAA,K5CwjKF,qB4CjjKA,+BxC5CE,kBAAA,qBAAA,GAAA,OAAA,SACK,aAAA,qBAAA,GAAA,OAAA,SACG,UAAA,qBAAA,GAAA,OAAA,SwCmDV,sBEvEE,iBAAA,QAGA,wCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKDsBJ,mBE3EE,iBAAA,QAGA,qCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKD0BJ,sBE/EE,iBAAA,QAGA,wCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKD8BJ,qBEnFE,iBAAA,QAGA,uCDgDE,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKExDJ,OAEE,WAAA,KAEA,mBACE,WAAA,EAIJ,O/CqpKA,Y+CnpKE,SAAA,OACA,KAAA,EAGF,YACE,MAAA,QAGF,cACE,QAAA,MAGA,4BACE,UAAA,KAIJ,a/CgpKA,mB+C9oKE,aAAA,KAGF,Y/C+oKA,kB+C7oKE,cAAA,K/CkpKF,Y+C/oKA,Y/C8oKA,a+C3oKE,QAAA,WACA,eAAA,IAGF,cACE,eAAA,OAGF,cACE,eAAA,OAIF,eACE,WAAA,EACA,cAAA,IAMF,YACE,aAAA,EACA,WAAA,KCrDF,YAEE,aAAA,EACA,cAAA,KAQF,iBACE,SAAA,SACA,QAAA,MACA,QAAA,KAAA,KAEA,cAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,KAGA,6BrB7BA,uBAAA,IACA,wBAAA,IqB+BA,4BACE,cAAA,ErBzBF,2BAAA,IACA,0BAAA,IqB6BA,0BhDqrKF,gCADA,gCgDjrKI,MAAA,KACA,OAAA,YACA,iBAAA,KALF,mDhD4rKF,yDADA,yDgDlrKM,MAAA,QATJ,gDhDisKF,sDADA,sDgDprKM,MAAA,KAKJ,wBhDqrKF,8BADA,8BgDjrKI,QAAA,EACA,MAAA,KACA,iBAAA,QACA,aAAA,QANF,iDhDisKF,wDAHA,uDADA,uDAMA,8DAHA,6DAJA,uDAMA,8DAHA,6DgDnrKM,MAAA,QAZJ,8ChDwsKF,oDADA,oDgDxrKM,MAAA,QAWN,kBhDkrKA,uBgDhrKE,MAAA,KAFF,2ChDsrKA,gDgDjrKI,MAAA,KhDsrKJ,wBgDlrKE,wBhDmrKF,6BAFA,6BgD/qKI,MAAA,KACA,gBAAA,KACA,iBAAA,QAIJ,uBACE,MAAA,KACA,WAAA,KnCvGD,yBoCIG,MAAA,QACA,iBAAA,QAEA,0BjDuxKJ,+BiDrxKM,MAAA,QAFF,mDjD2xKJ,wDiDtxKQ,MAAA,QjD2xKR,gCiDxxKM,gCjDyxKN,qCAFA,qCiDrxKQ,MAAA,QACA,iBAAA,QAEF,iCjD4xKN,uCAFA,uCADA,sCAIA,4CAFA,4CiDxxKQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,sBoCIG,MAAA,QACA,iBAAA,QAEA,uBjDozKJ,4BiDlzKM,MAAA,QAFF,gDjDwzKJ,qDiDnzKQ,MAAA,QjDwzKR,6BiDrzKM,6BjDszKN,kCAFA,kCiDlzKQ,MAAA,QACA,iBAAA,QAEF,8BjDyzKN,oCAFA,oCADA,mCAIA,yCAFA,yCiDrzKQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,yBoCIG,MAAA,QACA,iBAAA,QAEA,0BjDi1KJ,+BiD/0KM,MAAA,QAFF,mDjDq1KJ,wDiDh1KQ,MAAA,QjDq1KR,gCiDl1KM,gCjDm1KN,qCAFA,qCiD/0KQ,MAAA,QACA,iBAAA,QAEF,iCjDs1KN,uCAFA,uCADA,sCAIA,4CAFA,4CiDl1KQ,MAAA,KACA,iBAAA,QACA,aAAA,QpCzBP,wBoCIG,MAAA,QACA,iBAAA,QAEA,yBjD82KJ,8BiD52KM,MAAA,QAFF,kDjDk3KJ,uDiD72KQ,MAAA,QjDk3KR,+BiD/2KM,+BjDg3KN,oCAFA,oCiD52KQ,MAAA,QACA,iBAAA,QAEF,gCjDm3KN,sCAFA,sCADA,qCAIA,2CAFA,2CiD/2KQ,MAAA,KACA,iBAAA,QACA,aAAA,QDiGR,yBACE,WAAA,EACA,cAAA,IAEF,sBACE,cAAA,EACA,YAAA,IExHF,OACE,cAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,YACA,cAAA,I9C0DA,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gB8CtDV,YACE,QAAA,KAKF,eACE,QAAA,KAAA,KACA,cAAA,IAAA,MAAA,YvBtBA,uBAAA,IACA,wBAAA,IuBmBF,0CAMI,MAAA,QAKJ,aACE,WAAA,EACA,cAAA,EACA,UAAA,KACA,MAAA,QlD24KF,oBAEA,sBkDj5KA,elD84KA,mBAEA,qBkDr4KI,MAAA,QAKJ,cACE,QAAA,KAAA,KACA,iBAAA,QACA,WAAA,IAAA,MAAA,KvB1CA,2BAAA,IACA,0BAAA,IuBmDF,mBlD+3KA,mCkD53KI,cAAA,EAHJ,oClDm4KA,oDkD73KM,aAAA,IAAA,EACA,cAAA,EAIF,4DlD63KJ,4EkD33KQ,WAAA,EvBzEN,uBAAA,IACA,wBAAA,IuB8EE,0DlD23KJ,0EkDz3KQ,cAAA,EvBzEN,2BAAA,IACA,0BAAA,IuBmDF,+EvB5DE,uBAAA,EACA,wBAAA,EuB4FF,wDAEI,iBAAA,EAGJ,0BACE,iBAAA,ElDw3KF,8BkDh3KA,clD+2KA,gCkD32KI,cAAA,ElDi3KJ,sCkDr3KA,sBlDo3KA,wCkD72KM,cAAA,KACA,aAAA,KlDk3KN,wDkD13KA,0BvB3GE,uBAAA,IACA,wBAAA,I3B2+KF,yFAFA,yFACA,2DkDh4KA,2DAmBQ,uBAAA,IACA,wBAAA,IlDo3KR,wGAIA,wGANA,wGAIA,wGAHA,0EAIA,0EkD34KA,0ElDy4KA,0EkDj3KU,uBAAA,IlD03KV,uGAIA,uGANA,uGAIA,uGAHA,yEAIA,yEkDr5KA,yElDm5KA,yEkDv3KU,wBAAA,IlD83KV,sDkD15KA,yBvBnGE,2BAAA,IACA,0BAAA,I3BigLF,qFAEA,qFkDj6KA,wDlDg6KA,wDkDv3KQ,2BAAA,IACA,0BAAA,IlD43KR,oGAIA,oGAFA,oGAIA,oGkD56KA,uElDy6KA,uEAFA,uEAIA,uEkD73KU,0BAAA,IlDk4KV,mGAIA,mGAFA,mGAIA,mGkDt7KA,sElDm7KA,sEAFA,sEAIA,sEkDn4KU,2BAAA,IAlDV,0BlD07KA,qCACA,0BACA,qCkDj4KI,WAAA,IAAA,MAAA,KlDq4KJ,kDkDh8KA,kDA+DI,WAAA,EA/DJ,uBlDo8KA,yCkDj4KI,OAAA,ElD44KJ,+CANA,+CAQA,+CANA,+CAEA,+CkD78KA,+ClDg9KA,iEANA,iEAQA,iEANA,iEAEA,iEANA,iEkD93KU,YAAA,ElDm5KV,8CANA,8CAQA,8CANA,8CAEA,8CkD39KA,8ClD89KA,gEANA,gEAQA,gEANA,gEAEA,gEANA,gEkDx4KU,aAAA,ElDu5KV,+CAIA,+CkDz+KA,+ClDu+KA,+CADA,iEAIA,iEANA,iEAIA,iEkDj5KU,cAAA,EAvFV,8ClDi/KA,8CAFA,8CAIA,8CALA,gEAIA,gEAFA,gEAIA,gEkDp5KU,cAAA,EAhGV,yBAsGI,cAAA,EACA,OAAA,EAUJ,aACE,cAAA,KADF,oBAKI,cAAA,EACA,cAAA,IANJ,2BASM,WAAA,IATN,4BAcI,cAAA,ElD04KJ,wDkDx5KA,wDAkBM,WAAA,IAAA,MAAA,KAlBN,2BAuBI,WAAA,EAvBJ,uDAyBM,cAAA,IAAA,MAAA,KAON,eC5PE,aAAA,KAEA,8BACE,MAAA,KACA,iBAAA,QACA,aAAA,KAHF,0DAMI,iBAAA,KANJ,qCASI,MAAA,QACA,iBAAA,KAGJ,yDAEI,oBAAA,KD8ON,eC/PE,aAAA,QAEA,8BACE,MAAA,KACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,KAGJ,yDAEI,oBAAA,QDiPN,eClQE,aAAA,QAEA,8BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,QAGJ,yDAEI,oBAAA,QDoPN,YCrQE,aAAA,QAEA,2BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,uDAMI,iBAAA,QANJ,kCASI,MAAA,QACA,iBAAA,QAGJ,sDAEI,oBAAA,QDuPN,eCxQE,aAAA,QAEA,8BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,0DAMI,iBAAA,QANJ,qCASI,MAAA,QACA,iBAAA,QAGJ,yDAEI,oBAAA,QD0PN,cC3QE,aAAA,QAEA,6BACE,MAAA,QACA,iBAAA,QACA,aAAA,QAHF,yDAMI,iBAAA,QANJ,oCASI,MAAA,QACA,iBAAA,QAGJ,wDAEI,oBAAA,QChBN,kBACE,SAAA,SACA,QAAA,MACA,OAAA,EACA,QAAA,EACA,SAAA,OALF,yCpDivLA,wBADA,yBAEA,yBACA,wBoDvuLI,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,KACA,OAAA,KACA,OAAA,EAKJ,wBACE,eAAA,OAIF,uBACE,eAAA,IC3BF,MACE,WAAA,KACA,QAAA,KACA,cAAA,KACA,iBAAA,QACA,OAAA,IAAA,MAAA,QACA,cAAA,IjD0DA,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBiDjEV,iBASI,aAAA,KACA,aAAA,gBAKJ,SACE,QAAA,KACA,cAAA,IAEF,SACE,QAAA,IACA,cAAA,ICpBF,OACE,MAAA,MACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,YAAA,EAAA,IAAA,EAAA,KjCTA,OAAA,kBACA,QAAA,GrBkyLF,asDvxLE,aAEE,MAAA,KACA,gBAAA,KACA,OAAA,QjChBF,OAAA,kBACA,QAAA,GiCuBA,aACE,QAAA,EACA,OAAA,QACA,WAAA,IACA,OAAA,EACA,mBAAA,KACA,gBAAA,KAAA,WAAA,KCxBJ,YACE,SAAA,OAIF,OACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,SAAA,OACA,2BAAA,MAIA,QAAA,EAGA,0BnDiHA,kBAAA,kBACI,cAAA,kBACC,aAAA,kBACG,UAAA,kBAkER,mBAAA,kBAAA,IAAA,SAEK,cAAA,aAAA,IAAA,SACG,WAAA,kBAAA,IAAA,SAAA,WAAA,UAAA,IAAA,SAAA,WAAA,UAAA,IAAA,QAAA,CAAA,kBAAA,IAAA,QAAA,CAAA,aAAA,IAAA,SmDrLR,wBnD6GA,kBAAA,eACI,cAAA,eACC,aAAA,eACG,UAAA,emD9GV,mBACE,WAAA,OACA,WAAA,KAIF,cACE,SAAA,SACA,MAAA,KACA,OAAA,KAIF,eACE,SAAA,SACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,eACA,cAAA,InDcA,mBAAA,EAAA,IAAA,IAAA,eACQ,WAAA,EAAA,IAAA,IAAA,emDZR,QAAA,EAIF,gBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,iBAAA,KAEA,qBlCpEA,OAAA,iBACA,QAAA,EkCoEA,mBlCrEA,OAAA,kBACA,QAAA,GkCyEF,cACE,QAAA,KACA,cAAA,IAAA,MAAA,QAIF,qBACE,WAAA,KAIF,aACE,OAAA,EACA,YAAA,WAKF,YACE,SAAA,SACA,QAAA,KAIF,cACE,QAAA,KACA,WAAA,MACA,WAAA,IAAA,MAAA,QAHF,wBAQI,cAAA,EACA,YAAA,IATJ,mCAaI,YAAA,KAbJ,oCAiBI,YAAA,EAKJ,yBACE,SAAA,SACA,IAAA,QACA,MAAA,KACA,OAAA,KACA,SAAA,OAIF,yBAEE,cACE,MAAA,MACA,OAAA,KAAA,KAEF,enDrEA,mBAAA,EAAA,IAAA,KAAA,eACQ,WAAA,EAAA,IAAA,KAAA,emDyER,UAAY,MAAA,OAGd,yBACE,UAAY,MAAA,OC9Id,SACE,SAAA,SACA,QAAA,KACA,QAAA,MCRA,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WAEA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,OACA,WAAA,OACA,aAAA,OACA,UAAA,OACA,YAAA,ODHA,UAAA,KnCTA,OAAA,iBACA,QAAA,EmCYA,YnCbA,OAAA,kBACA,QAAA,GmCaA,aACE,QAAA,IAAA,EACA,WAAA,KAEF,eACE,QAAA,EAAA,IACA,YAAA,IAEF,gBACE,QAAA,IAAA,EACA,WAAA,IAEF,cACE,QAAA,EAAA,IACA,YAAA,KAIF,4BACE,OAAA,EACA,KAAA,IACA,YAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,iCACE,MAAA,IACA,OAAA,EACA,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,kCACE,OAAA,EACA,KAAA,IACA,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEF,8BACE,IAAA,IACA,KAAA,EACA,WAAA,KACA,aAAA,IAAA,IAAA,IAAA,EACA,mBAAA,KAEF,6BACE,IAAA,IACA,MAAA,EACA,WAAA,KACA,aAAA,IAAA,EAAA,IAAA,IACA,kBAAA,KAEF,+BACE,IAAA,EACA,KAAA,IACA,YAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEF,oCACE,IAAA,EACA,MAAA,IACA,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEF,qCACE,IAAA,EACA,KAAA,IACA,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAKJ,eACE,UAAA,MACA,QAAA,IAAA,IACA,MAAA,KACA,WAAA,OACA,iBAAA,KACA,cAAA,IAIF,eACE,SAAA,SACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MEzGF,SACE,SAAA,SACA,IAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,UAAA,MACA,QAAA,IDXA,YAAA,gBAAA,CAAA,SAAA,CAAA,KAAA,CAAA,WAEA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,OACA,WAAA,OACA,aAAA,OACA,UAAA,OACA,YAAA,OCAA,UAAA,KACA,iBAAA,KACA,gBAAA,YACA,OAAA,IAAA,MAAA,KACA,OAAA,IAAA,MAAA,eACA,cAAA,ItDiDA,mBAAA,EAAA,IAAA,KAAA,eACQ,WAAA,EAAA,IAAA,KAAA,esD9CR,aAAQ,WAAA,MACR,eAAU,YAAA,KACV,gBAAW,WAAA,KACX,cAAS,YAAA,MAvBX,gBA4BI,aAAA,KAEA,gB1DkjMJ,sB0DhjMM,SAAA,SACA,QAAA,MACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MAGF,sBACE,QAAA,GACA,aAAA,KAIJ,oBACE,OAAA,MACA,KAAA,IACA,YAAA,MACA,iBAAA,KACA,iBAAA,gBACA,oBAAA,EACA,0BACE,OAAA,IACA,YAAA,MACA,QAAA,IACA,iBAAA,KACA,oBAAA,EAGJ,sBACE,IAAA,IACA,KAAA,MACA,WAAA,MACA,mBAAA,KACA,mBAAA,gBACA,kBAAA,EACA,4BACE,OAAA,MACA,KAAA,IACA,QAAA,IACA,mBAAA,KACA,kBAAA,EAGJ,uBACE,IAAA,MACA,KAAA,IACA,YAAA,MACA,iBAAA,EACA,oBAAA,KACA,oBAAA,gBACA,6BACE,IAAA,IACA,YAAA,MACA,QAAA,IACA,iBAAA,EACA,oBAAA,KAIJ,qBACE,IAAA,IACA,MAAA,MACA,WAAA,MACA,mBAAA,EACA,kBAAA,KACA,kBAAA,gBACA,2BACE,MAAA,IACA,OAAA,MACA,QAAA,IACA,mBAAA,EACA,kBAAA,KAKN,eACE,QAAA,IAAA,KACA,OAAA,EACA,UAAA,KACA,iBAAA,QACA,cAAA,IAAA,MAAA,QACA,cAAA,IAAA,IAAA,EAAA,EAGF,iBACE,QAAA,IAAA,KCpHF,UACE,SAAA,SAGF,gBACE,SAAA,SACA,MAAA,KACA,SAAA,OAHF,sBAMI,SAAA,SACA,QAAA,KvD6KF,mBAAA,IAAA,YAAA,KACK,cAAA,IAAA,YAAA,KACG,WAAA,IAAA,YAAA,KJs/LV,4B2D5qMA,0BAcM,YAAA,EAIF,8BAAA,uBAAA,sBvDuLF,mBAAA,kBAAA,IAAA,YAEK,cAAA,aAAA,IAAA,YACG,WAAA,kBAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,WAAA,UAAA,IAAA,WAAA,CAAA,kBAAA,IAAA,WAAA,CAAA,aAAA,IAAA,YA7JR,4BAAA,OAEQ,oBAAA,OA+GR,oBAAA,OAEQ,YAAA,OJ0hMR,mC2DrqMI,2BvDmHJ,kBAAA,sBACQ,UAAA,sBuDjHF,KAAA,E3DwqMN,kC2DtqMI,2BvD8GJ,kBAAA,uBACQ,UAAA,uBuD5GF,KAAA,E3D0qMN,6B2DxqMI,gC3DuqMJ,iCI9jMA,kBAAA,mBACQ,UAAA,mBuDtGF,KAAA,GArCR,wB3DgtMA,sBACA,sB2DpqMI,QAAA,MA7CJ,wBAiDI,KAAA,EAjDJ,sB3DwtMA,sB2DlqMI,SAAA,SACA,IAAA,EACA,MAAA,KAxDJ,sBA4DI,KAAA,KA5DJ,sBA+DI,KAAA,MA/DJ,2B3DouMA,4B2DjqMI,KAAA,EAnEJ,6BAuEI,KAAA,MAvEJ,8BA0EI,KAAA,KAQJ,kBACE,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,IACA,UAAA,KACA,MAAA,KACA,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eACA,iBAAA,ctCpGA,OAAA,kBACA,QAAA,GsCyGA,uBdrGE,iBAAA,sEACA,iBAAA,iEACA,iBAAA,uFAAA,iBAAA,kEACA,OAAA,+GACA,kBAAA,ScoGF,wBACE,MAAA,EACA,KAAA,Kd1GA,iBAAA,sEACA,iBAAA,iEACA,iBAAA,uFAAA,iBAAA,kEACA,OAAA,+GACA,kBAAA,S7C6wMJ,wB2DlqME,wBAEE,MAAA,KACA,gBAAA,KACA,QAAA,EtCxHF,OAAA,kBACA,QAAA,GrB8xMF,0CACA,2CAFA,6B2DpsMA,6BAuCI,SAAA,SACA,IAAA,IACA,QAAA,EACA,QAAA,aACA,WAAA,M3DmqMJ,0C2D9sMA,6BA+CI,KAAA,IACA,YAAA,M3DmqMJ,2C2DntMA,6BAoDI,MAAA,IACA,aAAA,M3DmqMJ,6B2DxtMA,6BAyDI,MAAA,KACA,OAAA,KACA,YAAA,MACA,YAAA,EAIA,oCACE,QAAA,QAIF,oCACE,QAAA,QAUN,qBACE,SAAA,SACA,OAAA,KACA,KAAA,IACA,QAAA,GACA,MAAA,IACA,aAAA,EACA,YAAA,KACA,WAAA,OACA,WAAA,KATF,wBAYI,QAAA,aACA,MAAA,KACA,OAAA,KACA,OAAA,IACA,YAAA,OACA,OAAA,QAUA,iBAAA,OACA,iBAAA,cAEA,OAAA,IAAA,MAAA,KACA,cAAA,KA/BJ,6BAmCI,MAAA,KACA,OAAA,KACA,OAAA,EACA,iBAAA,KAOJ,kBACE,SAAA,SACA,MAAA,IACA,OAAA,KACA,KAAA,IACA,QAAA,GACA,YAAA,KACA,eAAA,KACA,MAAA,KACA,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eAEA,uBACE,YAAA,KAMJ,oCAGE,0C3D+nMA,2CAEA,6BADA,6B2D3nMI,MAAA,KACA,OAAA,KACA,WAAA,MACA,UAAA,KARJ,0C3DwoMA,6B2D5nMI,YAAA,MAZJ,2C3D4oMA,6B2D5nMI,aAAA,MAKJ,kBACE,MAAA,IACA,KAAA,IACA,eAAA,KAIF,qBACE,OAAA,M3D0oMJ,qCADA,sCADA,mBADA,oBAXA,gB4D73ME,iB5Dm4MF,uBADA,wBADA,iBADA,kBADA,wBADA,yBASA,mCADA,oCAqBA,oBADA,qBADA,oBADA,qBAXA,WADA,YAOA,uBADA,wBADA,qBADA,sBADA,cADA,eAOA,aADA,cAGA,kBADA,mBAjBA,WADA,Y4Dl4MI,QAAA,MACA,QAAA,I5Dm6MJ,qCADA,mB4Dh6ME,gB5D65MF,uBADA,iBADA,wBAIA,mCAUA,oBADA,oBANA,WAGA,uBADA,qBADA,cAGA,aACA,kBATA,W4D75MI,MAAA,K5BNJ,c6BVE,QAAA,MACA,aAAA,KACA,YAAA,K7BWF,YACE,MAAA,gBAEF,WACE,MAAA,eAQF,MACE,QAAA,eAEF,MACE,QAAA,gBAEF,WACE,WAAA,OAEF,W8BzBE,KAAA,CAAA,CAAA,EAAA,EACA,MAAA,YACA,YAAA,KACA,iBAAA,YACA,OAAA,E9B8BF,QACE,QAAA,eAOF,OACE,SAAA,M+BjCF,cACE,MAAA,a/D88MF,YADA,YADA,Y+Dt8MA,YClBE,QAAA,ehEs+MF,kBACA,mBACA,yBALA,kBACA,mBACA,yBALA,kBACA,mBACA,yB+Dz8MA,kB/Dq8MA,mBACA,yB+D17ME,QAAA,eAIA,yBAAA,YCjDA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhE4/MV,cgE3/MA,cACU,QAAA,sBDkDV,yBAAA,kBACE,QAAA,iBAIF,yBAAA,mBACE,QAAA,kBAIF,yBAAA,yBACE,QAAA,wBAKF,+CAAA,YCtEA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhE0hNV,cgEzhNA,cACU,QAAA,sBDuEV,+CAAA,kBACE,QAAA,iBAIF,+CAAA,mBACE,QAAA,kBAIF,+CAAA,yBACE,QAAA,wBAKF,gDAAA,YC3FA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhEwjNV,cgEvjNA,cACU,QAAA,sBD4FV,gDAAA,kBACE,QAAA,iBAIF,gDAAA,mBACE,QAAA,kBAIF,gDAAA,yBACE,QAAA,wBAKF,0BAAA,YChHA,QAAA,gBACA,iBAAU,QAAA,gBACV,cAAU,QAAA,oBhEslNV,cgErlNA,cACU,QAAA,sBDiHV,0BAAA,kBACE,QAAA,iBAIF,0BAAA,mBACE,QAAA,kBAIF,0BAAA,yBACE,QAAA,wBAKF,yBAAA,WC7HA,QAAA,gBDkIA,+CAAA,WClIA,QAAA,gBDuIA,gDAAA,WCvIA,QAAA,gBD4IA,0BAAA,WC5IA,QAAA,gBDuJF,eCvJE,QAAA,eD0JA,aAAA,eClKA,QAAA,gBACA,oBAAU,QAAA,gBACV,iBAAU,QAAA,oBhE2oNV,iBgE1oNA,iBACU,QAAA,sBDkKZ,qBACE,QAAA,eAEA,aAAA,qBACE,QAAA,iBAGJ,sBACE,QAAA,eAEA,aAAA,sBACE,QAAA,kBAGJ,4BACE,QAAA,eAEA,aAAA,4BACE,QAAA,wBAKF,aAAA,cCrLA,QAAA","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: none;\n  text-decoration: underline;\n  text-decoration: underline dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n  src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: 400;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: 0.2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: \"\\00A0 \\2014\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: 700;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n  padding-right: 0;\n  padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1,\n  .col-sm-2,\n  .col-sm-3,\n  .col-sm-4,\n  .col-sm-5,\n  .col-sm-6,\n  .col-sm-7,\n  .col-sm-8,\n  .col-sm-9,\n  .col-sm-10,\n  .col-sm-11,\n  .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1,\n  .col-md-2,\n  .col-md-3,\n  .col-md-4,\n  .col-md-5,\n  .col-md-6,\n  .col-md-7,\n  .col-md-8,\n  .col-md-9,\n  .col-md-10,\n  .col-md-11,\n  .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1,\n  .col-lg-2,\n  .col-lg-3,\n  .col-lg-4,\n  .col-lg-5,\n  .col-lg-6,\n  .col-lg-7,\n  .col-lg-8,\n  .col-lg-9,\n  .col-lg-10,\n  .col-lg-11,\n  .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: 0.01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: 700;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  -webkit-appearance: none;\n  appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eeeeee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  margin-bottom: 0;\n  font-weight: normal;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none;\n  border: 1px solid transparent;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  border-radius: 4px;\n  -webkit-user-select: none;\n  -moz-user-select: none;\n  -ms-user-select: none;\n  user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  opacity: 0.65;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  background-image: none;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  background-image: none;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  background-image: none;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  background-image: none;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  background-image: none;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  background-image: none;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: 400;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity 0.15s linear;\n  -o-transition: opacity 0.15s linear;\n  transition: opacity 0.15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-property: height, visibility;\n  transition-property: height, visibility;\n  -webkit-transition-duration: 0.35s;\n  transition-duration: 0.35s;\n  -webkit-transition-timing-function: ease;\n  transition-timing-function: ease;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: 400;\n  line-height: 1.42857143;\n  color: #333333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: 400;\n  line-height: 1;\n  color: #555555;\n  text-align: center;\n  background-color: #eeeeee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n  color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eeeeee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: 15px;\n  margin-top: 8px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-right: -15px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eeeeee;\n  border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: 0.2em 0.6em 0.3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border 0.2s ease-in-out;\n  -o-transition: border 0.2s ease-in-out;\n  transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-transition: width 0.6s ease;\n  -o-transition: width 0.6s ease;\n  transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n  -o-animation: progress-bar-stripes 2s linear infinite;\n  animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\nbutton.close {\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n  -webkit-appearance: none;\n  appearance: none;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transform: translate(0, -25%);\n  -ms-transform: translate(0, -25%);\n  -o-transform: translate(0, -25%);\n  transform: translate(0, -25%);\n  -webkit-transition: -webkit-transform 0.3s ease-out;\n  -moz-transition: -moz-transform 0.3s ease-out;\n  -o-transition: -o-transform 0.3s ease-out;\n  transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n  -ms-transform: translate(0, 0);\n  -o-transform: translate(0, 0);\n  transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  outline: 0;\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 12px;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 14px;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999999;\n  border-top-color: rgba(0, 0, 0, 0.25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999999;\n  border-right-color: rgba(0, 0, 0, 0.25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999999;\n  border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999999;\n  border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: 0.6s ease-in-out left;\n  -o-transition: 0.6s ease-in-out left;\n  transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform 0.6s ease-in-out;\n    -moz-transition: -moz-transform 0.6s ease-in-out;\n    -o-transition: -o-transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out;\n    -webkit-backface-visibility: hidden;\n    -moz-backface-visibility: hidden;\n    backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n    -moz-perspective: 1000px;\n    perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    -webkit-transform: translate3d(100%, 0, 0);\n    transform: translate3d(100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    -webkit-transform: translate3d(-100%, 0, 0);\n    transform: translate3d(-100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    -webkit-transform: translate3d(0, 0, 0);\n    transform: translate3d(0, 0, 0);\n    left: 0;\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  outline: 0;\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n  content: \"\\203a\";\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable\n\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n//    without disabling user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `template` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// 1. Remove the bottom border in Chrome 57- and Firefox 39-.\n// 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n//\n\nabbr[title] {\n  border-bottom: none; // 1\n  text-decoration: underline; // 2\n  text-decoration: underline dotted; // 2\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; // 2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  border: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n","/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: none;\n  text-decoration: underline;\n  -webkit-text-decoration: underline dotted;\n  -moz-text-decoration: underline dotted;\n  text-decoration: underline dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    -webkit-box-shadow: none !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n  src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: 400;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: 0.2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: \"\\00A0 \\2014\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: 700;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n  padding-right: 0;\n  padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1,\n  .col-sm-2,\n  .col-sm-3,\n  .col-sm-4,\n  .col-sm-5,\n  .col-sm-6,\n  .col-sm-7,\n  .col-sm-8,\n  .col-sm-9,\n  .col-sm-10,\n  .col-sm-11,\n  .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1,\n  .col-md-2,\n  .col-md-3,\n  .col-md-4,\n  .col-md-5,\n  .col-md-6,\n  .col-md-7,\n  .col-md-8,\n  .col-md-9,\n  .col-md-10,\n  .col-md-11,\n  .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1,\n  .col-lg-2,\n  .col-lg-3,\n  .col-lg-4,\n  .col-lg-5,\n  .col-lg-6,\n  .col-lg-7,\n  .col-lg-8,\n  .col-lg-9,\n  .col-lg-10,\n  .col-lg-11,\n  .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: 0.01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: 700;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  -webkit-appearance: none;\n  -moz-appearance: none;\n  appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n  transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eeeeee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  margin-bottom: 0;\n  font-weight: normal;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  -ms-touch-action: manipulation;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none;\n  border: 1px solid transparent;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  border-radius: 4px;\n  -webkit-user-select: none;\n  -moz-user-select: none;\n  -ms-user-select: none;\n  user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  opacity: 0.65;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  background-image: none;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  background-image: none;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  background-image: none;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  background-image: none;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  background-image: none;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  background-image: none;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: 400;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity 0.15s linear;\n  -o-transition: opacity 0.15s linear;\n  transition: opacity 0.15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-property: height, visibility;\n  -o-transition-property: height, visibility;\n  transition-property: height, visibility;\n  -webkit-transition-duration: 0.35s;\n  -o-transition-duration: 0.35s;\n  transition-duration: 0.35s;\n  -webkit-transition-timing-function: ease;\n  -o-transition-timing-function: ease;\n  transition-timing-function: ease;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n  box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: 400;\n  line-height: 1.42857143;\n  color: #333333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n  box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n  box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: 400;\n  line-height: 1;\n  color: #555555;\n  text-align: center;\n  background-color: #eeeeee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n  color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eeeeee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n  -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: 15px;\n  margin-top: 8px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-right: -15px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eeeeee;\n  border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: 0.2em 0.6em 0.3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border 0.2s ease-in-out;\n  -o-transition: border 0.2s ease-in-out;\n  transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@-o-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-transition: width 0.6s ease;\n  -o-transition: width 0.6s ease;\n  transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  -webkit-background-size: 40px 40px;\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n  -o-animation: progress-bar-stripes 2s linear infinite;\n  animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777777;\n  cursor: not-allowed;\n  background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n  box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\nbutton.close {\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n  -webkit-appearance: none;\n  -moz-appearance: none;\n  appearance: none;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transform: translate(0, -25%);\n  -ms-transform: translate(0, -25%);\n  -o-transform: translate(0, -25%);\n  transform: translate(0, -25%);\n  -webkit-transition: -webkit-transform 0.3s ease-out;\n  -o-transition: -o-transform 0.3s ease-out;\n  transition: -webkit-transform 0.3s ease-out;\n  transition: transform 0.3s ease-out;\n  transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out, -o-transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n  -ms-transform: translate(0, 0);\n  -o-transform: translate(0, 0);\n  transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n  outline: 0;\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n    box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 12px;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1.42857143;\n  line-break: auto;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  font-size: 14px;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, 0.2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n  box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999999;\n  border-top-color: rgba(0, 0, 0, 0.25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999999;\n  border-right-color: rgba(0, 0, 0, 0.25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999999;\n  border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999999;\n  border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: 0.6s ease-in-out left;\n  -o-transition: 0.6s ease-in-out left;\n  transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform 0.6s ease-in-out;\n    -o-transition: -o-transform 0.6s ease-in-out;\n    transition: -webkit-transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out;\n    transition: transform 0.6s ease-in-out, -webkit-transform 0.6s ease-in-out, -o-transform 0.6s ease-in-out;\n    -webkit-backface-visibility: hidden;\n    backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n    perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    -webkit-transform: translate3d(100%, 0, 0);\n    transform: translate3d(100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    -webkit-transform: translate3d(-100%, 0, 0);\n    transform: translate3d(-100%, 0, 0);\n    left: 0;\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    -webkit-transform: translate3d(0, 0, 0);\n    transform: translate3d(0, 0, 0);\n    left: 0;\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: 0.5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001)));\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5)));\n  background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  outline: 0;\n  filter: alpha(opacity=90);\n  opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n  content: \"\\203a\";\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable declaration-no-important, selector-no-qualifying-type\n\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important; // Black prints faster: h5bp.com/s\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n\n  // Don't show links that are fragment identifiers,\n  // or use the `javascript:` pseudo protocol\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n\n  thead {\n    display: table-header-group; // h5bp.com/t\n  }\n\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n\n  img {\n    max-width: 100% !important;\n  }\n\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n\n  // Bootstrap specific changes start\n\n  // Bootstrap components\n  .navbar {\n    display: none;\n  }\n  .btn,\n  .dropup > .btn {\n    > .caret {\n      border-top-color: #000 !important;\n    }\n  }\n  .label {\n    border: 1px solid #000;\n  }\n\n  .table {\n    border-collapse: collapse !important;\n\n    td,\n    th {\n      background-color: #fff !important;\n    }\n  }\n  .table-bordered {\n    th,\n    td {\n      border: 1px solid #ddd !important;\n    }\n  }\n}\n","// stylelint-disable value-list-comma-newline-after, value-list-comma-space-after, indentation, declaration-colon-newline-after, font-family-no-missing-generic-family-keyword\n\n//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// <a href=\"#\"><span class=\"glyphicon glyphicon-star\"></span> Star</a>\n\n// Import the fonts\n@font-face {\n  font-family: \"Glyphicons Halflings\";\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot\");\n  src: url(\"@{icon-font-path}@{icon-font-name}.eot?#iefix\") format(\"embedded-opentype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff2\") format(\"woff2\"),\n       url(\"@{icon-font-path}@{icon-font-name}.woff\") format(\"woff\"),\n       url(\"@{icon-font-path}@{icon-font-name}.ttf\") format(\"truetype\"),\n       url(\"@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}\") format(\"svg\");\n}\n\n// Catchall baseclass\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: \"Glyphicons Halflings\";\n  font-style: normal;\n  font-weight: 400;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk               { &:before { content: \"\\002a\"; } }\n.glyphicon-plus                   { &:before { content: \"\\002b\"; } }\n.glyphicon-euro,\n.glyphicon-eur                    { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus                  { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud                  { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope               { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil                 { &:before { content: \"\\270f\"; } }\n.glyphicon-glass                  { &:before { content: \"\\e001\"; } }\n.glyphicon-music                  { &:before { content: \"\\e002\"; } }\n.glyphicon-search                 { &:before { content: \"\\e003\"; } }\n.glyphicon-heart                  { &:before { content: \"\\e005\"; } }\n.glyphicon-star                   { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty             { &:before { content: \"\\e007\"; } }\n.glyphicon-user                   { &:before { content: \"\\e008\"; } }\n.glyphicon-film                   { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large               { &:before { content: \"\\e010\"; } }\n.glyphicon-th                     { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list                { &:before { content: \"\\e012\"; } }\n.glyphicon-ok                     { &:before { content: \"\\e013\"; } }\n.glyphicon-remove                 { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in                { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out               { &:before { content: \"\\e016\"; } }\n.glyphicon-off                    { &:before { content: \"\\e017\"; } }\n.glyphicon-signal                 { &:before { content: \"\\e018\"; } }\n.glyphicon-cog                    { &:before { content: \"\\e019\"; } }\n.glyphicon-trash                  { &:before { content: \"\\e020\"; } }\n.glyphicon-home                   { &:before { content: \"\\e021\"; } }\n.glyphicon-file                   { &:before { content: \"\\e022\"; } }\n.glyphicon-time                   { &:before { content: \"\\e023\"; } }\n.glyphicon-road                   { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt           { &:before { content: \"\\e025\"; } }\n.glyphicon-download               { &:before { content: \"\\e026\"; } }\n.glyphicon-upload                 { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox                  { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle            { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat                 { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh                { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt               { &:before { content: \"\\e032\"; } }\n.glyphicon-lock                   { &:before { content: \"\\e033\"; } }\n.glyphicon-flag                   { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones             { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off             { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down            { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up              { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode                 { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode                { &:before { content: \"\\e040\"; } }\n.glyphicon-tag                    { &:before { content: \"\\e041\"; } }\n.glyphicon-tags                   { &:before { content: \"\\e042\"; } }\n.glyphicon-book                   { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark               { &:before { content: \"\\e044\"; } }\n.glyphicon-print                  { &:before { content: \"\\e045\"; } }\n.glyphicon-camera                 { &:before { content: \"\\e046\"; } }\n.glyphicon-font                   { &:before { content: \"\\e047\"; } }\n.glyphicon-bold                   { &:before { content: \"\\e048\"; } }\n.glyphicon-italic                 { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height            { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width             { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left             { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center           { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right            { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify          { &:before { content: \"\\e055\"; } }\n.glyphicon-list                   { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left            { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right           { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video         { &:before { content: \"\\e059\"; } }\n.glyphicon-picture                { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker             { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust                 { &:before { content: \"\\e063\"; } }\n.glyphicon-tint                   { &:before { content: \"\\e064\"; } }\n.glyphicon-edit                   { &:before { content: \"\\e065\"; } }\n.glyphicon-share                  { &:before { content: \"\\e066\"; } }\n.glyphicon-check                  { &:before { content: \"\\e067\"; } }\n.glyphicon-move                   { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward          { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward          { &:before { content: \"\\e070\"; } }\n.glyphicon-backward               { &:before { content: \"\\e071\"; } }\n.glyphicon-play                   { &:before { content: \"\\e072\"; } }\n.glyphicon-pause                  { &:before { content: \"\\e073\"; } }\n.glyphicon-stop                   { &:before { content: \"\\e074\"; } }\n.glyphicon-forward                { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward           { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward           { &:before { content: \"\\e077\"; } }\n.glyphicon-eject                  { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left           { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right          { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign              { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign             { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign            { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign                { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign          { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign              { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot             { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle          { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle              { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle             { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left             { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right            { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up               { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down             { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt              { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full            { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small           { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign       { &:before { content: \"\\e101\"; } }\n.glyphicon-gift                   { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf                   { &:before { content: \"\\e103\"; } }\n.glyphicon-fire                   { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open               { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close              { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign           { &:before { content: \"\\e107\"; } }\n.glyphicon-plane                  { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar               { &:before { content: \"\\e109\"; } }\n.glyphicon-random                 { &:before { content: \"\\e110\"; } }\n.glyphicon-comment                { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet                 { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up             { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down           { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet                { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart          { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close           { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open            { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical        { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal      { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd                    { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn               { &:before { content: \"\\e122\"; } }\n.glyphicon-bell                   { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate            { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up              { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down            { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right             { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left              { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up                { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down              { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right     { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left      { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up        { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down      { &:before { content: \"\\e134\"; } }\n.glyphicon-globe                  { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench                 { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks                  { &:before { content: \"\\e137\"; } }\n.glyphicon-filter                 { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase              { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen             { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard              { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip              { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty            { &:before { content: \"\\e143\"; } }\n.glyphicon-link                   { &:before { content: \"\\e144\"; } }\n.glyphicon-phone                  { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin                { &:before { content: \"\\e146\"; } }\n.glyphicon-usd                    { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp                    { &:before { content: \"\\e149\"; } }\n.glyphicon-sort                   { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet       { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt   { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order          { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt      { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes     { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked              { &:before { content: \"\\e157\"; } }\n.glyphicon-expand                 { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down          { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up            { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in                 { &:before { content: \"\\e161\"; } }\n.glyphicon-flash                  { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out                { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window             { &:before { content: \"\\e164\"; } }\n.glyphicon-record                 { &:before { content: \"\\e165\"; } }\n.glyphicon-save                   { &:before { content: \"\\e166\"; } }\n.glyphicon-open                   { &:before { content: \"\\e167\"; } }\n.glyphicon-saved                  { &:before { content: \"\\e168\"; } }\n.glyphicon-import                 { &:before { content: \"\\e169\"; } }\n.glyphicon-export                 { &:before { content: \"\\e170\"; } }\n.glyphicon-send                   { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk            { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved           { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove          { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save            { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open            { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card            { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer               { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery                { &:before { content: \"\\e179\"; } }\n.glyphicon-header                 { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed             { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone               { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt              { &:before { content: \"\\e183\"; } }\n.glyphicon-tower                  { &:before { content: \"\\e184\"; } }\n.glyphicon-stats                  { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video               { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video               { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles              { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo           { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby            { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1              { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1              { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1              { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark         { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark      { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download         { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload           { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer           { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous         { &:before { content: \"\\e200\"; } }\n.glyphicon-cd                     { &:before { content: \"\\e201\"; } }\n.glyphicon-save-file              { &:before { content: \"\\e202\"; } }\n.glyphicon-open-file              { &:before { content: \"\\e203\"; } }\n.glyphicon-level-up               { &:before { content: \"\\e204\"; } }\n.glyphicon-copy                   { &:before { content: \"\\e205\"; } }\n.glyphicon-paste                  { &:before { content: \"\\e206\"; } }\n// The following 2 Glyphicons are omitted for the time being because\n// they currently use Unicode codepoints that are outside the\n// Basic Multilingual Plane (BMP). Older buggy versions of WebKit can't handle\n// non-BMP codepoints in CSS string escapes, and thus can't display these two icons.\n// Notably, the bug affects some older versions of the Android Browser.\n// More info: https://github.com/twbs/bootstrap/issues/10106\n// .glyphicon-door                   { &:before { content: \"\\1f6aa\"; } }\n// .glyphicon-key                    { &:before { content: \"\\1f511\"; } }\n.glyphicon-alert                  { &:before { content: \"\\e209\"; } }\n.glyphicon-equalizer              { &:before { content: \"\\e210\"; } }\n.glyphicon-king                   { &:before { content: \"\\e211\"; } }\n.glyphicon-queen                  { &:before { content: \"\\e212\"; } }\n.glyphicon-pawn                   { &:before { content: \"\\e213\"; } }\n.glyphicon-bishop                 { &:before { content: \"\\e214\"; } }\n.glyphicon-knight                 { &:before { content: \"\\e215\"; } }\n.glyphicon-baby-formula           { &:before { content: \"\\e216\"; } }\n.glyphicon-tent                   { &:before { content: \"\\26fa\"; } }\n.glyphicon-blackboard             { &:before { content: \"\\e218\"; } }\n.glyphicon-bed                    { &:before { content: \"\\e219\"; } }\n.glyphicon-apple                  { &:before { content: \"\\f8ff\"; } }\n.glyphicon-erase                  { &:before { content: \"\\e221\"; } }\n.glyphicon-hourglass              { &:before { content: \"\\231b\"; } }\n.glyphicon-lamp                   { &:before { content: \"\\e223\"; } }\n.glyphicon-duplicate              { &:before { content: \"\\e224\"; } }\n.glyphicon-piggy-bank             { &:before { content: \"\\e225\"; } }\n.glyphicon-scissors               { &:before { content: \"\\e226\"; } }\n.glyphicon-bitcoin                { &:before { content: \"\\e227\"; } }\n.glyphicon-btc                    { &:before { content: \"\\e227\"; } }\n.glyphicon-xbt                    { &:before { content: \"\\e227\"; } }\n.glyphicon-yen                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-jpy                    { &:before { content: \"\\00a5\"; } }\n.glyphicon-ruble                  { &:before { content: \"\\20bd\"; } }\n.glyphicon-rub                    { &:before { content: \"\\20bd\"; } }\n.glyphicon-scale                  { &:before { content: \"\\e230\"; } }\n.glyphicon-ice-lolly              { &:before { content: \"\\e231\"; } }\n.glyphicon-ice-lolly-tasted       { &:before { content: \"\\e232\"; } }\n.glyphicon-education              { &:before { content: \"\\e233\"; } }\n.glyphicon-option-horizontal      { &:before { content: \"\\e234\"; } }\n.glyphicon-option-vertical        { &:before { content: \"\\e235\"; } }\n.glyphicon-menu-hamburger         { &:before { content: \"\\e236\"; } }\n.glyphicon-modal-window           { &:before { content: \"\\e237\"; } }\n.glyphicon-oil                    { &:before { content: \"\\e238\"; } }\n.glyphicon-grain                  { &:before { content: \"\\e239\"; } }\n.glyphicon-sunglasses             { &:before { content: \"\\e240\"; } }\n.glyphicon-text-size              { &:before { content: \"\\e241\"; } }\n.glyphicon-text-color             { &:before { content: \"\\e242\"; } }\n.glyphicon-text-background        { &:before { content: \"\\e243\"; } }\n.glyphicon-object-align-top       { &:before { content: \"\\e244\"; } }\n.glyphicon-object-align-bottom    { &:before { content: \"\\e245\"; } }\n.glyphicon-object-align-horizontal{ &:before { content: \"\\e246\"; } }\n.glyphicon-object-align-left      { &:before { content: \"\\e247\"; } }\n.glyphicon-object-align-vertical  { &:before { content: \"\\e248\"; } }\n.glyphicon-object-align-right     { &:before { content: \"\\e249\"; } }\n.glyphicon-triangle-right         { &:before { content: \"\\e250\"; } }\n.glyphicon-triangle-left          { &:before { content: \"\\e251\"; } }\n.glyphicon-triangle-bottom        { &:before { content: \"\\e252\"; } }\n.glyphicon-triangle-top           { &:before { content: \"\\e253\"; } }\n.glyphicon-console                { &:before { content: \"\\e254\"; } }\n.glyphicon-superscript            { &:before { content: \"\\e255\"; } }\n.glyphicon-subscript              { &:before { content: \"\\e256\"; } }\n.glyphicon-menu-left              { &:before { content: \"\\e257\"; } }\n.glyphicon-menu-right             { &:before { content: \"\\e258\"; } }\n.glyphicon-menu-down              { &:before { content: \"\\e259\"; } }\n.glyphicon-menu-up                { &:before { content: \"\\e260\"; } }\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// https://getbootstrap.com/docs/3.4/getting-started/#third-box-sizing\n* {\n  .box-sizing(border-box);\n}\n*:before,\n*:after {\n  .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\nbody {\n  font-family: @font-family-base;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @text-color;\n  background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\n\n\n// Links\n\na {\n  color: @link-color;\n  text-decoration: none;\n\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n  }\n\n  &:focus {\n    .tab-focus();\n  }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n  margin: 0;\n}\n\n\n// Images\n\nimg {\n  vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n  .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n  border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n  padding: @thumbnail-padding;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  // Keep them at most 100% wide\n  .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n  border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n  margin-top: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  border: 0;\n  border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: https://a11yproject.com/posts/how-to-hide-content\n\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n\n// Use in conjunction with .sr-only to only display content when it's focused.\n// Useful for \"Skip to main content\" links; see https://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n// Credit: HTML5 Boilerplate\n\n.sr-only-focusable {\n  &:active,\n  &:focus {\n    position: static;\n    width: auto;\n    height: auto;\n    margin: 0;\n    overflow: visible;\n    clip: auto;\n  }\n}\n\n\n// iOS \"clickable elements\" fix for role=\"button\"\n//\n// Fixes \"clickability\" issue (and more generally, the firing of events such as focus as well)\n// for traditionally non-focusable elements with role=\"button\"\n// see https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n\n[role=\"button\"] {\n  cursor: pointer;\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n  word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n          transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// WebKit-style focus\n\n.tab-focus() {\n  // WebKit-specific. Other browsers will keep their default outline style.\n  // (Initially tried to also force default via `outline: initial`,\n  // but that seems to erroneously remove the outline in Firefox altogether.)\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n","// stylelint-disable media-feature-name-no-vendor-prefix, media-feature-parentheses-space-inside, media-feature-name-no-unknown, indentation, at-rule-name-space-after\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// Retina image\n//\n// Short retina mixin for setting background-image and -size. Note that the\n// spelling of `min--moz-device-pixel-ratio` is intentional.\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and ( min--moz-device-pixel-ratio: 2),\n  only screen and ( -o-min-device-pixel-ratio: 2/1),\n  only screen and ( min-device-pixel-ratio: 2),\n  only screen and ( min-resolution: 192dpi),\n  only screen and ( min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n","// stylelint-disable selector-list-comma-newline-after, selector-no-qualifying-type\n\n//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n  font-family: @headings-font-family;\n  font-weight: @headings-font-weight;\n  line-height: @headings-line-height;\n  color: @headings-color;\n\n  small,\n  .small {\n    font-weight: 400;\n    line-height: 1;\n    color: @headings-small-color;\n  }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n  margin-top: @line-height-computed;\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 65%;\n  }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n  margin-top: (@line-height-computed / 2);\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 75%;\n  }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n  margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n  margin-bottom: @line-height-computed;\n  font-size: floor((@font-size-base * 1.15));\n  font-weight: 300;\n  line-height: 1.4;\n\n  @media (min-width: @screen-sm-min) {\n    font-size: (@font-size-base * 1.5);\n  }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: (12px small font / 14px base font) * 100% = about 85%\nsmall,\n.small {\n  font-size: floor((100% * @font-size-small / @font-size-base));\n}\n\nmark,\n.mark {\n  padding: .2em;\n  background-color: @state-warning-bg;\n}\n\n// Alignment\n.text-left           { text-align: left; }\n.text-right          { text-align: right; }\n.text-center         { text-align: center; }\n.text-justify        { text-align: justify; }\n.text-nowrap         { white-space: nowrap; }\n\n// Transformation\n.text-lowercase      { text-transform: lowercase; }\n.text-uppercase      { text-transform: uppercase; }\n.text-capitalize     { text-transform: capitalize; }\n\n// Contextual colors\n.text-muted {\n  color: @text-muted;\n}\n.text-primary {\n  .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n  .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n  .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n  .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n  .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n  // Given the contrast here, this is the only class to have its color inverted\n  // automatically.\n  color: #fff;\n  .bg-variant(@brand-primary);\n}\n.bg-success {\n  .bg-variant(@state-success-bg);\n}\n.bg-info {\n  .bg-variant(@state-info-bg);\n}\n.bg-warning {\n  .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n  .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n  padding-bottom: ((@line-height-computed / 2) - 1);\n  margin: (@line-height-computed * 2) 0 @line-height-computed;\n  border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// -------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n  margin-top: 0;\n  margin-bottom: (@line-height-computed / 2);\n  ul,\n  ol {\n    margin-bottom: 0;\n  }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n  .list-unstyled();\n  margin-left: -5px;\n\n  > li {\n    display: inline-block;\n    padding-right: 5px;\n    padding-left: 5px;\n  }\n}\n\n// Description Lists\ndl {\n  margin-top: 0; // Remove browser default\n  margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n  line-height: @line-height-base;\n}\ndt {\n  font-weight: 700;\n}\ndd {\n  margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n.dl-horizontal {\n  dd {\n    &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n  }\n\n  @media (min-width: @dl-horizontal-breakpoint) {\n    dt {\n      float: left;\n      width: (@dl-horizontal-offset - 20);\n      clear: left;\n      text-align: right;\n      .text-overflow();\n    }\n    dd {\n      margin-left: @dl-horizontal-offset;\n    }\n  }\n}\n\n\n// Misc\n// -------------------------\n\n// Abbreviations and acronyms\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n}\n\n.initialism {\n  font-size: 90%;\n  .text-uppercase();\n}\n\n// Blockquotes\nblockquote {\n  padding: (@line-height-computed / 2) @line-height-computed;\n  margin: 0 0 @line-height-computed;\n  font-size: @blockquote-font-size;\n  border-left: 5px solid @blockquote-border-color;\n\n  p,\n  ul,\n  ol {\n    &:last-child {\n      margin-bottom: 0;\n    }\n  }\n\n  // Note: Deprecated small and .small as of v3.1.0\n  // Context: https://github.com/twbs/bootstrap/issues/11660\n  footer,\n  small,\n  .small {\n    display: block;\n    font-size: 80%; // back to default font-size\n    line-height: @line-height-base;\n    color: @blockquote-small-color;\n\n    &:before {\n      content: \"\\2014 \\00A0\"; // em dash, nbsp\n    }\n  }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid @blockquote-border-color;\n  border-left: 0;\n\n  // Account for citation\n  footer,\n  small,\n  .small {\n    &:before { content: \"\"; }\n    &:after {\n      content: \"\\00A0 \\2014\"; // nbsp, em dash\n    }\n  }\n}\n\n// Addresses\naddress {\n  margin-bottom: @line-height-computed;\n  font-style: normal;\n  line-height: @line-height-base;\n}\n","// Typography\n\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover,\n  a&:focus {\n    color: darken(@color, 10%);\n  }\n}\n","// Contextual backgrounds\n\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover,\n  a&:focus {\n    background-color: darken(@color, 10%);\n  }\n}\n","// Text overflow\n// Requires inline-block or block for proper styling\n\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n  font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @code-color;\n  background-color: @code-bg;\n  border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @kbd-color;\n  background-color: @kbd-bg;\n  border-radius: @border-radius-small;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n\n  kbd {\n    padding: 0;\n    font-size: 100%;\n    font-weight: 700;\n    box-shadow: none;\n  }\n}\n\n// Blocks of code\npre {\n  display: block;\n  padding: ((@line-height-computed - 1) / 2);\n  margin: 0 0 (@line-height-computed / 2);\n  font-size: (@font-size-base - 1); // 14px to 13px\n  line-height: @line-height-base;\n  color: @pre-color;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: @pre-bg;\n  border: 1px solid @pre-border-color;\n  border-radius: @border-radius-base;\n\n  // Account for some code outputs that place code tags in pre tags\n  code {\n    padding: 0;\n    font-size: inherit;\n    color: inherit;\n    white-space: pre-wrap;\n    background-color: transparent;\n    border-radius: 0;\n  }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n  max-height: @pre-scrollable-max-height;\n  overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n  .container-fixed();\n\n  @media (min-width: @screen-sm-min) {\n    width: @container-sm;\n  }\n  @media (min-width: @screen-md-min) {\n    width: @container-md;\n  }\n  @media (min-width: @screen-lg-min) {\n    width: @container-lg;\n  }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n  .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n  .make-row();\n}\n\n.row-no-gutters {\n  margin-right: 0;\n  margin-left: 0;\n\n  [class*=\"col-\"] {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid(xs);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n  .make-grid(sm);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n  .make-grid(md);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n  .make-grid(lg);\n}\n","// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n// Centered container element\n.container-fixed(@gutter: @grid-gutter-width) {\n  padding-right: ceil((@gutter / 2));\n  padding-left: floor((@gutter / 2));\n  margin-right: auto;\n  margin-left: auto;\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-right: floor((@gutter / -2));\n  margin-left: ceil((@gutter / -2));\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  margin-left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-push(@columns) {\n  left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-pull(@columns) {\n  right: percentage((@columns / @grid-columns));\n}\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-right: (@gutter / 2);\n  padding-left: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n","// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-right: floor((@grid-gutter-width / 2));\n      padding-left: ceil((@grid-gutter-width / 2));\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.float-grid-columns(@class) {\n  .col(@index) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid-column(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index > 0) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index = 0) {\n  .col-@{class}-push-0 {\n    left: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index > 0) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index = 0) {\n  .col-@{class}-pull-0 {\n    right: auto;\n  }\n}\n.calc-grid-column(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.loop-grid-columns(@index, @class, @type) when (@index >= 0) {\n  .calc-grid-column(@index, @class, @type);\n  // next iteration\n  .loop-grid-columns((@index - 1), @class, @type);\n}\n\n// Create grid for specific class\n.make-grid(@class) {\n  .float-grid-columns(@class);\n  .loop-grid-columns(@grid-columns, @class, width);\n  .loop-grid-columns(@grid-columns, @class, pull);\n  .loop-grid-columns(@grid-columns, @class, push);\n  .loop-grid-columns(@grid-columns, @class, offset);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-no-qualifying-type\n\n//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n  background-color: @table-bg;\n\n  // Table cell sizing\n  //\n  // Reset default table behavior\n\n  col[class*=\"col-\"] {\n    position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n    display: table-column;\n    float: none;\n  }\n\n  td,\n  th {\n    &[class*=\"col-\"] {\n      position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n      display: table-cell;\n      float: none;\n    }\n  }\n}\n\ncaption {\n  padding-top: @table-cell-padding;\n  padding-bottom: @table-cell-padding;\n  color: @text-muted;\n  text-align: left;\n}\n\nth {\n  text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: @line-height-computed;\n  // Cells\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-cell-padding;\n        line-height: @line-height-base;\n        vertical-align: top;\n        border-top: 1px solid @table-border-color;\n      }\n    }\n  }\n  // Bottom align for column headings\n  > thead > tr > th {\n    vertical-align: bottom;\n    border-bottom: 2px solid @table-border-color;\n  }\n  // Remove top border from thead by default\n  > caption + thead,\n  > colgroup + thead,\n  > thead:first-child {\n    > tr:first-child {\n      > th,\n      > td {\n        border-top: 0;\n      }\n    }\n  }\n  // Account for multiple tbody instances\n  > tbody + tbody {\n    border-top: 2px solid @table-border-color;\n  }\n\n  // Nesting\n  .table {\n    background-color: @body-bg;\n  }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-condensed-cell-padding;\n      }\n    }\n  }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n  border: 1px solid @table-border-color;\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        border: 1px solid @table-border-color;\n      }\n    }\n  }\n  > thead > tr {\n    > th,\n    > td {\n      border-bottom-width: 2px;\n    }\n  }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n  > tbody > tr:nth-of-type(odd) {\n    background-color: @table-bg-accent;\n  }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n  > tbody > tr:hover {\n    background-color: @table-bg-hover;\n  }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n.table-responsive {\n  min-height: .01%; // Workaround for IE9 bug (see https://github.com/twbs/bootstrap/issues/14837)\n  overflow-x: auto;\n\n  @media screen and (max-width: @screen-xs-max) {\n    width: 100%;\n    margin-bottom: (@line-height-computed * .75);\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid @table-border-color;\n\n    // Tighten up spacing\n    > .table {\n      margin-bottom: 0;\n\n      // Ensure the content doesn't wrap\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th,\n          > td {\n            white-space: nowrap;\n          }\n        }\n      }\n    }\n\n    // Special overrides for the bordered tables\n    > .table-bordered {\n      border: 0;\n\n      // Nuke the appropriate borders so that the parent can handle them\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th:first-child,\n          > td:first-child {\n            border-left: 0;\n          }\n          > th:last-child,\n          > td:last-child {\n            border-right: 0;\n          }\n        }\n      }\n\n      // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n      // chances are there will be only one `tr` in a `thead` and that would\n      // remove the border altogether.\n      > tbody,\n      > tfoot {\n        > tr:last-child {\n          > th,\n          > td {\n            border-bottom: 0;\n          }\n        }\n      }\n\n    }\n  }\n}\n","// Tables\n\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &:hover > .@{state},\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, property-no-vendor-prefix, media-feature-name-no-vendor-prefix\n\n//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n  // Chrome and Firefox set a `min-width: min-content;` on fieldsets,\n  // so we reset that to ensure it behaves more like a standard block element.\n  // See https://github.com/twbs/bootstrap/issues/12359.\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\n\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: @line-height-computed;\n  font-size: (@font-size-base * 1.5);\n  line-height: inherit;\n  color: @legend-color;\n  border: 0;\n  border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n  display: inline-block;\n  max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)\n  margin-bottom: 5px;\n  font-weight: 700;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\ninput[type=\"search\"] {\n  // Override content-box in Normalize (* isn't specific enough)\n  .box-sizing(border-box);\n\n  // Search inputs in iOS\n  //\n  // This overrides the extra rounded corners on search inputs in iOS so that our\n  // `.form-control` class can properly style them. Note that this cannot simply\n  // be added to `.form-control` as it's not specific enough. For details, see\n  // https://github.com/twbs/bootstrap/issues/11586.\n  -webkit-appearance: none;\n  appearance: none;\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9; // IE8-9\n  line-height: normal;\n\n  // Apply same disabled cursor tweak as for inputs\n  // Some special care is needed because <label>s don't inherit their parent's `cursor`.\n  //\n  // Note: Neither radios nor checkboxes can be readonly.\n  &[disabled],\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n\ninput[type=\"file\"] {\n  display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n  height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  .tab-focus();\n}\n\n// Adjust output element\noutput {\n  display: block;\n  padding-top: (@padding-base-vertical + 1);\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n  display: block;\n  width: 100%;\n  height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n  background-color: @input-bg;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid @input-border;\n  border-radius: @input-border-radius; // Note: This has no effect on <select>s in some browsers, due to the limited stylability of <select>s in CSS.\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075));\n  .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n  // Customize the `:focus` state to imitate native WebKit styles.\n  .form-control-focus();\n\n  // Placeholder\n  .placeholder();\n\n  // Unstyle the caret on `<select>`s in IE10+.\n  &::-ms-expand {\n    background-color: transparent;\n    border: 0;\n  }\n\n  // Disabled and read-only inputs\n  //\n  // HTML5 says that controls under a fieldset > legend:first-child won't be\n  // disabled if the fieldset is disabled. Due to implementation difficulty, we\n  // don't honor that edge case; we style them as disabled anyway.\n  &[disabled],\n  &[readonly],\n  fieldset[disabled] & {\n    background-color: @input-bg-disabled;\n    opacity: 1; // iOS fix for unreadable disabled content; see https://github.com/twbs/bootstrap/issues/11655\n  }\n\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n\n  // Reset height for `textarea`s\n  textarea& {\n    height: auto;\n  }\n}\n\n\n// Special styles for iOS temporal inputs\n//\n// In Mobile Safari, setting `display: block` on temporal inputs causes the\n// text within the input to become vertically misaligned. As a workaround, we\n// set a pixel line-height that matches the given height of the input, but only\n// for Safari. See https://bugs.webkit.org/show_bug.cgi?id=139848\n//\n// Note that as of 9.3, iOS doesn't support `week`.\n\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"],\n  input[type=\"time\"],\n  input[type=\"datetime-local\"],\n  input[type=\"month\"] {\n    &.form-control {\n      line-height: @input-height-base;\n    }\n\n    &.input-sm,\n    .input-group-sm & {\n      line-height: @input-height-small;\n    }\n\n    &.input-lg,\n    .input-group-lg & {\n      line-height: @input-height-large;\n    }\n  }\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n  margin-bottom: @form-group-margin-bottom;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n\n  // These are used on elements with <label> descendants\n  &.disabled,\n  fieldset[disabled] & {\n    label {\n      cursor: @cursor-disabled;\n    }\n  }\n\n  label {\n    min-height: @line-height-computed; // Ensure the input doesn't jump when there is no text\n    padding-left: 20px;\n    margin-bottom: 0;\n    font-weight: 400;\n    cursor: pointer;\n  }\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px; // Move up sibling radios or checkboxes for tighter spacing\n}\n\n// Radios and checkboxes on same line\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: 400;\n  vertical-align: middle;\n  cursor: pointer;\n\n  // These are used directly on <label>s\n  &.disabled,\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n  }\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px; // space out consecutive inline controls\n}\n\n\n// Static form control text\n//\n// Apply class to a `p` element to make any string of text align with labels in\n// a horizontal form layout.\n\n.form-control-static {\n  min-height: (@line-height-computed + @font-size-base);\n  // Size it appropriately next to real form controls\n  padding-top: (@padding-base-vertical + 1);\n  padding-bottom: (@padding-base-vertical + 1);\n  // Remove default margin from `p`\n  margin-bottom: 0;\n\n  &.input-lg,\n  &.input-sm {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n\n\n// Form control sizing\n//\n// Build on `.form-control` with modifier classes to decrease or increase the\n// height and font-size of form controls.\n//\n// The `.form-group-* form-control` variations are sadly duplicated to avoid the\n// issue documented in https://github.com/twbs/bootstrap/issues/15074.\n\n.input-sm {\n  .input-size(@input-height-small; @padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @input-border-radius-small);\n}\n.form-group-sm {\n  .form-control {\n    height: @input-height-small;\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n    border-radius: @input-border-radius-small;\n  }\n  select.form-control {\n    height: @input-height-small;\n    line-height: @input-height-small;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-small;\n    min-height: (@line-height-computed + @font-size-small);\n    padding: (@padding-small-vertical + 1) @padding-small-horizontal;\n    font-size: @font-size-small;\n    line-height: @line-height-small;\n  }\n}\n\n.input-lg {\n  .input-size(@input-height-large; @padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @input-border-radius-large);\n}\n.form-group-lg {\n  .form-control {\n    height: @input-height-large;\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n    border-radius: @input-border-radius-large;\n  }\n  select.form-control {\n    height: @input-height-large;\n    line-height: @input-height-large;\n  }\n  textarea.form-control,\n  select[multiple].form-control {\n    height: auto;\n  }\n  .form-control-static {\n    height: @input-height-large;\n    min-height: (@line-height-computed + @font-size-large);\n    padding: (@padding-large-vertical + 1) @padding-large-horizontal;\n    font-size: @font-size-large;\n    line-height: @line-height-large;\n  }\n}\n\n\n// Form control feedback states\n//\n// Apply contextual and semantic states to individual form controls.\n\n.has-feedback {\n  // Enable absolute positioning\n  position: relative;\n\n  // Ensure icons don't overlap text\n  .form-control {\n    padding-right: (@input-height-base * 1.25);\n  }\n}\n// Feedback icon (requires .glyphicon classes)\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2; // Ensure icon is above input groups\n  display: block;\n  width: @input-height-base;\n  height: @input-height-base;\n  line-height: @input-height-base;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: @input-height-large;\n  height: @input-height-large;\n  line-height: @input-height-large;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: @input-height-small;\n  height: @input-height-small;\n  line-height: @input-height-small;\n}\n\n// Feedback states\n.has-success {\n  .form-control-validation(@state-success-text; @state-success-text; @state-success-bg);\n}\n.has-warning {\n  .form-control-validation(@state-warning-text; @state-warning-text; @state-warning-bg);\n}\n.has-error {\n  .form-control-validation(@state-danger-text; @state-danger-text; @state-danger-bg);\n}\n\n// Reposition feedback icon if input has visible label above\n.has-feedback label {\n\n  & ~ .form-control-feedback {\n    top: (@line-height-computed + 5); // Height of the `label` and its margin\n  }\n  &.sr-only ~ .form-control-feedback {\n    top: 0;\n  }\n}\n\n\n// Help text\n//\n// Apply to any element you wish to create light text for placement immediately\n// below a form control. Use for general help, formatting, or instructional text.\n\n.help-block {\n  display: block; // account for any element using help-block\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: lighten(@text-color, 25%); // lighten the text some for contrast\n}\n\n\n// Inline forms\n//\n// Make forms appear inline(-block) by adding the `.form-inline` class. Inline\n// forms begin stacked on extra small (mobile) devices and then go inline when\n// viewports reach <768px.\n//\n// Requires wrapping inputs and labels with `.form-group` for proper display of\n// default HTML form controls and our custom form controls (e.g., input groups).\n//\n// Heads up! This is mixin-ed into `.navbar-form` in navbars.less.\n\n.form-inline {\n\n  // Kick in the inline\n  @media (min-width: @screen-sm-min) {\n    // Inline-block all the things for \"inline\"\n    .form-group {\n      display: inline-block;\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // In navbar-form, allow folks to *not* use `.form-group`\n    .form-control {\n      display: inline-block;\n      width: auto; // Prevent labels from stacking above inputs in `.form-group`\n      vertical-align: middle;\n    }\n\n    // Make static controls behave like regular ones\n    .form-control-static {\n      display: inline-block;\n    }\n\n    .input-group {\n      display: inline-table;\n      vertical-align: middle;\n\n      .input-group-addon,\n      .input-group-btn,\n      .form-control {\n        width: auto;\n      }\n    }\n\n    // Input groups need that 100% width though\n    .input-group > .form-control {\n      width: 100%;\n    }\n\n    .control-label {\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // Remove default margin on radios/checkboxes that were used for stacking, and\n    // then undo the floating of radios and checkboxes to match.\n    .radio,\n    .checkbox {\n      display: inline-block;\n      margin-top: 0;\n      margin-bottom: 0;\n      vertical-align: middle;\n\n      label {\n        padding-left: 0;\n      }\n    }\n    .radio input[type=\"radio\"],\n    .checkbox input[type=\"checkbox\"] {\n      position: relative;\n      margin-left: 0;\n    }\n\n    // Re-override the feedback icon.\n    .has-feedback .form-control-feedback {\n      top: 0;\n    }\n  }\n}\n\n\n// Horizontal forms\n//\n// Horizontal forms are built on grid classes and allow you to create forms with\n// labels on the left and inputs on the right.\n\n.form-horizontal {\n\n  // Consistent vertical alignment of radios and checkboxes\n  //\n  // Labels also get some reset styles, but that is scoped to a media query below.\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline {\n    padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n    margin-top: 0;\n    margin-bottom: 0;\n  }\n  // Account for padding we're adding to ensure the alignment and of help text\n  // and other content below items\n  .radio,\n  .checkbox {\n    min-height: (@line-height-computed + (@padding-base-vertical + 1));\n  }\n\n  // Make form groups behave like rows\n  .form-group {\n    .make-row();\n  }\n\n  // Reset spacing and right align labels, but scope to media queries so that\n  // labels on narrow viewports stack the same as a default form example.\n  @media (min-width: @screen-sm-min) {\n    .control-label {\n      padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n      margin-bottom: 0;\n      text-align: right;\n    }\n  }\n\n  // Validation states\n  //\n  // Reposition the icon because it's now within a grid column and columns have\n  // `position: relative;` on them. Also accounts for the grid gutter padding.\n  .has-feedback .form-control-feedback {\n    right: floor((@grid-gutter-width / 2));\n  }\n\n  // Form group sizes\n  //\n  // Quick utility class for applying `.input-lg` and `.input-sm` styles to the\n  // inputs and labels within a `.form-group`.\n  .form-group-lg {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-large-vertical + 1);\n        font-size: @font-size-large;\n      }\n    }\n  }\n  .form-group-sm {\n    @media (min-width: @screen-sm-min) {\n      .control-label {\n        padding-top: (@padding-small-vertical + 1);\n        font-size: @font-size-small;\n      }\n    }\n  }\n}\n","// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline,\n  &.radio label,\n  &.checkbox label,\n  &.radio-inline label,\n  &.checkbox-inline label  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    background-color: @background-color;\n    border-color: @border-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-border-focus` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n  display: inline-block;\n  margin-bottom: 0; // For input.btn\n  font-weight: @btn-font-weight;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  touch-action: manipulation;\n  cursor: pointer;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @btn-border-radius-base);\n  .user-select(none);\n\n  &,\n  &:active,\n  &.active {\n    &:focus,\n    &.focus {\n      .tab-focus();\n    }\n  }\n\n  &:hover,\n  &:focus,\n  &.focus {\n    color: @btn-default-color;\n    text-decoration: none;\n  }\n\n  &:active,\n  &.active {\n    background-image: none;\n    outline: 0;\n    .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: @cursor-disabled;\n    .opacity(.65);\n    .box-shadow(none);\n  }\n\n  a& {\n    &.disabled,\n    fieldset[disabled] & {\n      pointer-events: none; // Future-proof disabling of clicks on `<a>` elements\n    }\n  }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n  .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n  .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n  .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n  .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n  .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n  .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n  font-weight: 400;\n  color: @link-color;\n  border-radius: 0;\n\n  &,\n  &:active,\n  &.active,\n  &[disabled],\n  fieldset[disabled] & {\n    background-color: transparent;\n    .box-shadow(none);\n  }\n  &,\n  &:hover,\n  &:focus,\n  &:active {\n    border-color: transparent;\n  }\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: @link-hover-decoration;\n    background-color: transparent;\n  }\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus {\n      color: @btn-link-disabled-color;\n      text-decoration: none;\n    }\n  }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n  // line-height: ensure even-numbered height of button next to large input\n  .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @btn-border-radius-large);\n}\n.btn-sm {\n  // line-height: ensure proper height of button next to small input\n  .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n.btn-xs {\n  .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n  display: block;\n  width: 100%;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n  &.btn-block {\n    width: 100%;\n  }\n}\n","// Button variants\n//\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:focus,\n  &.focus {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 25%);\n  }\n  &:hover {\n    color: @color;\n    background-color: darken(@background, 10%);\n    border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open > .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 10%);\n    background-image: none;\n    border-color: darken(@border, 12%);\n\n    &:hover,\n    &:focus,\n    &.focus {\n      color: @color;\n      background-color: darken(@background, 17%);\n      border-color: darken(@border, 25%);\n    }\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus,\n    &.focus {\n      background-color: @background;\n      border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n","// Opacity\n\n.opacity(@opacity) {\n  @opacity-ie: (@opacity * 100);  // IE8 filter\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n  opacity: @opacity;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twbs/bootstrap/pull/3552.\n\n.fade {\n  opacity: 0;\n  .transition(opacity .15s linear);\n\n  &.in {\n    opacity: 1;\n  }\n}\n\n.collapse {\n  display: none;\n\n  &.in      { display: block; }\n  tr&.in    { display: table-row; }\n  tbody&.in { display: table-row-group; }\n}\n\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  .transition-property(~\"height, visibility\");\n  .transition-duration(.35s);\n  .transition-timing-function(ease);\n}\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: @caret-width-base dashed;\n  border-top: @caret-width-base solid ~\"\\9\"; // IE8\n  border-right: @caret-width-base solid transparent;\n  border-left: @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropup,\n.dropdown {\n  position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n  outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: @zindex-dropdown;\n  display: none; // none by default, but block on \"open\" of the menu\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0; // override default ul\n  font-size: @font-size-base;\n  text-align: left; // Ensures proper alignment if parent has it changed (e.g., modal footer)\n  list-style: none;\n  background-color: @dropdown-bg;\n  background-clip: padding-box;\n  border: 1px solid @dropdown-fallback-border; // IE8 fallback\n  border: 1px solid @dropdown-border;\n  border-radius: @border-radius-base;\n  .box-shadow(0 6px 12px rgba(0, 0, 0, .175));\n\n  // Aligns the dropdown menu to right\n  //\n  // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n  &.pull-right {\n    right: 0;\n    left: auto;\n  }\n\n  // Dividers (basically an hr) within the dropdown\n  .divider {\n    .nav-divider(@dropdown-divider-bg);\n  }\n\n  // Links within the dropdown menu\n  > li > a {\n    display: block;\n    padding: 3px 20px;\n    clear: both;\n    font-weight: 400;\n    line-height: @line-height-base;\n    color: @dropdown-link-color;\n    white-space: nowrap; // prevent links from randomly breaking onto new lines\n\n    &:hover,\n    &:focus {\n      color: @dropdown-link-hover-color;\n      text-decoration: none;\n      background-color: @dropdown-link-hover-bg;\n    }\n  }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-active-color;\n    text-decoration: none;\n    background-color: @dropdown-link-active-bg;\n    outline: 0;\n  }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-disabled-color;\n  }\n\n  // Nuke hover/focus effects\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    cursor: @cursor-disabled;\n    background-color: transparent;\n    background-image: none; // Remove CSS gradient\n    .reset-filter();\n  }\n}\n\n// Open state for the dropdown\n.open {\n  // Show the menu\n  > .dropdown-menu {\n    display: block;\n  }\n\n  // Remove the outline when :focus is triggered\n  > a {\n    outline: 0;\n  }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n  right: 0;\n  left: auto; // Reset the default from `.dropdown-menu`\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n\n// Dropdown section headers\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: @font-size-small;\n  line-height: @line-height-base;\n  color: @dropdown-header-color;\n  white-space: nowrap; // as with > li > a\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n  // Reverse the caret\n  .caret {\n    content: \"\";\n    border-top: 0;\n    border-bottom: @caret-width-base dashed;\n    border-bottom: @caret-width-base solid ~\"\\9\"; // IE8\n  }\n  // Different positioning for bottom up menu\n  .dropdown-menu {\n    top: auto;\n    bottom: 100%;\n    margin-bottom: 2px;\n  }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-right {\n    .dropdown-menu {\n      .dropdown-menu-right();\n    }\n    // Necessary for overrides of the default right aligned menu.\n    // Will remove come v4 in all likelihood.\n    .dropdown-menu-left {\n      .dropdown-menu-left();\n    }\n  }\n}\n","// Horizontal dividers\n//\n// Dividers (basically an hr) within dropdowns and nav lists\n\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n","// stylelint-disable selector-no-qualifying-type */\n\n//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle; // match .btn alignment given font-size hack above\n  > .btn {\n    position: relative;\n    float: left;\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      z-index: 2;\n    }\n  }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n  .btn + .btn,\n  .btn + .btn-group,\n  .btn-group + .btn,\n  .btn-group + .btn-group {\n    margin-left: -1px;\n  }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n  margin-left: -5px; // Offset the first child's margin\n  &:extend(.clearfix all);\n\n  .btn,\n  .btn-group,\n  .input-group {\n    float: left;\n  }\n  > .btn,\n  > .btn-group,\n  > .input-group {\n    margin-left: 5px;\n  }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n  margin-left: 0;\n  &:not(:last-child):not(.dropdown-toggle) {\n    .border-right-radius(0);\n  }\n}\n// Need .dropdown-toggle since :last-child doesn't apply, given that a .dropdown-menu is used immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-right-radius(0);\n  }\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { &:extend(.btn-xs); }\n.btn-group-sm > .btn { &:extend(.btn-sm); }\n.btn-group-lg > .btn { &:extend(.btn-lg); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n  .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n\n  // Show no shadow for `.btn-link` since it has no other button styles.\n  &.btn-link {\n    .box-shadow(none);\n  }\n}\n\n\n// Reposition the caret\n.btn .caret {\n  margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n  border-width: @caret-width-large @caret-width-large 0;\n  border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n  border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n  > .btn,\n  > .btn-group,\n  > .btn-group > .btn {\n    display: block;\n    float: none;\n    width: 100%;\n    max-width: 100%;\n  }\n\n  // Clear floats so dropdown menus can be properly placed\n  > .btn-group {\n    &:extend(.clearfix all);\n    > .btn {\n      float: none;\n    }\n  }\n\n  > .btn + .btn,\n  > .btn + .btn-group,\n  > .btn-group + .btn,\n  > .btn-group + .btn-group {\n    margin-top: -1px;\n    margin-left: 0;\n  }\n}\n\n.btn-group-vertical > .btn {\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n  &:first-child:not(:last-child) {\n    .border-top-radius(@btn-border-radius-base);\n    .border-bottom-radius(0);\n  }\n  &:last-child:not(:first-child) {\n    .border-top-radius(0);\n    .border-bottom-radius(@btn-border-radius-base);\n  }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-bottom-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-top-radius(0);\n}\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n  > .btn,\n  > .btn-group {\n    display: table-cell;\n    float: none;\n    width: 1%;\n  }\n  > .btn-group .btn {\n    width: 100%;\n  }\n\n  > .btn-group .dropdown-menu {\n    left: auto;\n  }\n}\n\n\n// Checkbox and radio options\n//\n// In order to support the browser's form validation feedback, powered by the\n// `required` attribute, we have to \"hide\" the inputs via `clip`. We cannot use\n// `display: none;` or `visibility: hidden;` as that also hides the popover.\n// Simply visually hiding the inputs via `opacity` would leave them clickable in\n// certain cases which is prevented by using `clip` and `pointer-events`.\n// This way, we ensure a DOM element is visible to position the popover from.\n//\n// See https://github.com/twbs/bootstrap/pull/12794 and\n// https://github.com/twbs/bootstrap/pull/14559 for more information.\n\n[data-toggle=\"buttons\"] {\n  > .btn,\n  > .btn-group > .btn {\n    input[type=\"radio\"],\n    input[type=\"checkbox\"] {\n      position: absolute;\n      clip: rect(0, 0, 0, 0);\n      pointer-events: none;\n    }\n  }\n}\n","// Single side border-radius\n\n.border-top-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-top-right-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-top-right-radius: @radius;\n  border-bottom-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-top-left-radius: @radius;\n  border-bottom-left-radius: @radius;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n  position: relative; // For dropdowns\n  display: table;\n  border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n  // Undo padding and float of grid classes\n  &[class*=\"col-\"] {\n    float: none;\n    padding-right: 0;\n    padding-left: 0;\n  }\n\n  .form-control {\n    // Ensure that the input is always above the *appended* addon button for\n    // proper border colors.\n    position: relative;\n    z-index: 2;\n\n    // IE9 fubars the placeholder attribute in text inputs and the arrows on\n    // select elements in input groups. To fix it, we float the input. Details:\n    // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n    float: left;\n\n    width: 100%;\n    margin-bottom: 0;\n\n    &:focus {\n      z-index: 3;\n    }\n  }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  .input-lg();\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  .input-sm();\n}\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  font-weight: 400;\n  line-height: 1;\n  color: @input-color;\n  text-align: center;\n  background-color: @input-group-addon-bg;\n  border: 1px solid @input-group-addon-border-color;\n  border-radius: @input-border-radius;\n\n  // Sizing\n  &.input-sm {\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    border-radius: @input-border-radius-small;\n  }\n  &.input-lg {\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    border-radius: @input-border-radius-large;\n  }\n\n  // Nuke default margins from checkboxes and radios to vertically center within.\n  input[type=\"radio\"],\n  input[type=\"checkbox\"] {\n    margin-top: 0;\n  }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  .border-right-radius(0);\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  .border-left-radius(0);\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n  position: relative;\n  // Jankily prevent input button groups from wrapping with `white-space` and\n  // `font-size` in combination with `inline-block` on buttons.\n  font-size: 0;\n  white-space: nowrap;\n\n  // Negative margin for spacing, position for bringing hovered/focused/actived\n  // element above the siblings.\n  > .btn {\n    position: relative;\n    + .btn {\n      margin-left: -1px;\n    }\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active {\n      z-index: 2;\n    }\n  }\n\n  // Negative margin to only have a 1px border between the two\n  &:first-child {\n    > .btn,\n    > .btn-group {\n      margin-right: -1px;\n    }\n  }\n  &:last-child {\n    > .btn,\n    > .btn-group {\n      z-index: 2;\n      margin-left: -1px;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type, selector-max-type\n\n//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n  padding-left: 0; // Override default ul/ol\n  margin-bottom: 0;\n  list-style: none;\n  &:extend(.clearfix all);\n\n  > li {\n    position: relative;\n    display: block;\n\n    > a {\n      position: relative;\n      display: block;\n      padding: @nav-link-padding;\n      &:hover,\n      &:focus {\n        text-decoration: none;\n        background-color: @nav-link-hover-bg;\n      }\n    }\n\n    // Disabled state sets text to gray and nukes hover/tab effects\n    &.disabled > a {\n      color: @nav-disabled-link-color;\n\n      &:hover,\n      &:focus {\n        color: @nav-disabled-link-hover-color;\n        text-decoration: none;\n        cursor: @cursor-disabled;\n        background-color: transparent;\n      }\n    }\n  }\n\n  // Open dropdowns\n  .open > a {\n    &,\n    &:hover,\n    &:focus {\n      background-color: @nav-link-hover-bg;\n      border-color: @link-color;\n    }\n  }\n\n  // Nav dividers (deprecated with v3.0.1)\n  //\n  // This should have been removed in v3 with the dropping of `.nav-list`, but\n  // we missed it. We don't currently support this anywhere, but in the interest\n  // of maintaining backward compatibility in case you use it, it's deprecated.\n  .nav-divider {\n    .nav-divider();\n  }\n\n  // Prevent IE8 from misplacing imgs\n  //\n  // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n  > li > a > img {\n    max-width: none;\n  }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n  border-bottom: 1px solid @nav-tabs-border-color;\n  > li {\n    float: left;\n    // Make the list-items overlay the bottom border\n    margin-bottom: -1px;\n\n    // Actual tabs (as links)\n    > a {\n      margin-right: 2px;\n      line-height: @line-height-base;\n      border: 1px solid transparent;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n      &:hover {\n        border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n      }\n    }\n\n    // Active state, and its :hover to override normal :hover\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-tabs-active-link-hover-color;\n        cursor: default;\n        background-color: @nav-tabs-active-link-hover-bg;\n        border: 1px solid @nav-tabs-active-link-hover-border-color;\n        border-bottom-color: transparent;\n      }\n    }\n  }\n  // pulling this in mainly for less shorthand\n  &.nav-justified {\n    .nav-justified();\n    .nav-tabs-justified();\n  }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n  > li {\n    float: left;\n\n    // Links rendered as pills\n    > a {\n      border-radius: @nav-pills-border-radius;\n    }\n    + li {\n      margin-left: 2px;\n    }\n\n    // Active state\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-pills-active-link-hover-color;\n        background-color: @nav-pills-active-link-hover-bg;\n      }\n    }\n  }\n}\n\n\n// Stacked pills\n.nav-stacked {\n  > li {\n    float: none;\n    + li {\n      margin-top: 2px;\n      margin-left: 0; // no need for this gap between nav items\n    }\n  }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n  width: 100%;\n\n  > li {\n    float: none;\n    > a {\n      margin-bottom: 5px;\n      text-align: center;\n    }\n  }\n\n  > .dropdown .dropdown-menu {\n    top: auto;\n    left: auto;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li {\n      display: table-cell;\n      width: 1%;\n      > a {\n        margin-bottom: 0;\n      }\n    }\n  }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n  border-bottom: 0;\n\n  > li > a {\n    // Override margin from .nav-tabs\n    margin-right: 0;\n    border-radius: @border-radius-base;\n  }\n\n  > .active > a,\n  > .active > a:hover,\n  > .active > a:focus {\n    border: 1px solid @nav-tabs-justified-link-border-color;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li > a {\n      border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n    }\n    > .active > a,\n    > .active > a:hover,\n    > .active > a:focus {\n      border-bottom-color: @nav-tabs-justified-active-link-border-color;\n    }\n  }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n  > .tab-pane {\n    display: none;\n  }\n  > .active {\n    display: block;\n  }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n  // make dropdown border overlap tab border\n  margin-top: -1px;\n  // Remove the top rounded corners here since there is a hard edge above the menu\n  .border-top-radius(0);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, selector-max-class, declaration-no-important, selector-no-qualifying-type\n\n//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n  position: relative;\n  min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n  margin-bottom: @navbar-margin-bottom;\n  border: 1px solid transparent;\n\n  // Prevent floats from breaking the navbar\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: @navbar-border-radius;\n  }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n  }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n  padding-right: @navbar-padding-horizontal;\n  padding-left: @navbar-padding-horizontal;\n  overflow-x: visible;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n  &:extend(.clearfix all);\n  -webkit-overflow-scrolling: touch;\n\n  &.in {\n    overflow-y: auto;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n\n    &.collapse {\n      display: block !important;\n      height: auto !important;\n      padding-bottom: 0; // Override default setting\n      overflow: visible !important;\n    }\n\n    &.in {\n      overflow-y: visible;\n    }\n\n    // Undo the collapse side padding for navbars with containers to ensure\n    // alignment of right-aligned contents.\n    .navbar-fixed-top &,\n    .navbar-static-top &,\n    .navbar-fixed-bottom & {\n      padding-right: 0;\n      padding-left: 0;\n    }\n  }\n}\n\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  .navbar-collapse {\n    max-height: @navbar-collapse-max-height;\n\n    @media (max-device-width: @screen-xs-min) and (orientation: landscape) {\n      max-height: 200px;\n    }\n  }\n\n  // Fix the top/bottom navbars when screen real estate supports it\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: @zindex-navbar-fixed;\n\n  // Undo the rounded corners\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0; // override .navbar defaults\n  border-width: 1px 0 0;\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n  > .navbar-header,\n  > .navbar-collapse {\n    margin-right: -@navbar-padding-horizontal;\n    margin-left: -@navbar-padding-horizontal;\n\n    @media (min-width: @grid-float-breakpoint) {\n      margin-right: 0;\n      margin-left: 0;\n    }\n  }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n  z-index: @zindex-navbar;\n  border-width: 0 0 1px;\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n  float: left;\n  height: @navbar-height;\n  padding: @navbar-padding-vertical @navbar-padding-horizontal;\n  font-size: @font-size-large;\n  line-height: @line-height-computed;\n\n  &:hover,\n  &:focus {\n    text-decoration: none;\n  }\n\n  > img {\n    display: block;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    .navbar > .container &,\n    .navbar > .container-fluid & {\n      margin-left: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-right: @navbar-padding-horizontal;\n  .navbar-vertical-align(34px);\n  background-color: transparent;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  border-radius: @border-radius-base;\n\n  // We remove the `outline` here, but later compensate by attaching `:hover`\n  // styles to `:focus`.\n  &:focus {\n    outline: 0;\n  }\n\n  // Bars\n  .icon-bar {\n    display: block;\n    width: 22px;\n    height: 2px;\n    border-radius: 1px;\n  }\n  .icon-bar + .icon-bar {\n    margin-top: 4px;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    display: none;\n  }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n  margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n  > li > a {\n    padding-top: 10px;\n    padding-bottom: 10px;\n    line-height: @line-height-computed;\n  }\n\n  @media (max-width: @grid-float-breakpoint-max) {\n    // Dropdowns get custom display when collapsed\n    .open .dropdown-menu {\n      position: static;\n      float: none;\n      width: auto;\n      margin-top: 0;\n      background-color: transparent;\n      border: 0;\n      box-shadow: none;\n      > li > a,\n      .dropdown-header {\n        padding: 5px 15px 5px 25px;\n      }\n      > li > a {\n        line-height: @line-height-computed;\n        &:hover,\n        &:focus {\n          background-image: none;\n        }\n      }\n    }\n  }\n\n  // Uncollapse the nav\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin: 0;\n\n    > li {\n      float: left;\n      > a {\n        padding-top: @navbar-padding-vertical;\n        padding-bottom: @navbar-padding-vertical;\n      }\n    }\n  }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n  padding: 10px @navbar-padding-horizontal;\n  margin-right: -@navbar-padding-horizontal;\n  margin-left: -@navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  @shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n  .box-shadow(@shadow);\n\n  // Mixin behavior for optimum display\n  .form-inline();\n\n  .form-group {\n    @media (max-width: @grid-float-breakpoint-max) {\n      margin-bottom: 5px;\n\n      &:last-child {\n        margin-bottom: 0;\n      }\n    }\n  }\n\n  // Vertically center in expanded, horizontal navbar\n  .navbar-vertical-align(@input-height-base);\n\n  // Undo 100% width for pull classes\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    .box-shadow(none);\n  }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  .border-top-radius(@navbar-border-radius);\n  .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n  .navbar-vertical-align(@input-height-base);\n\n  &.btn-sm {\n    .navbar-vertical-align(@input-height-small);\n  }\n  &.btn-xs {\n    .navbar-vertical-align(22);\n  }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n  .navbar-vertical-align(@line-height-computed);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin-right: @navbar-padding-horizontal;\n    margin-left: @navbar-padding-horizontal;\n  }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n//\n// Declared after the navbar components to ensure more specificity on the margins.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-left  { .pull-left(); }\n  .navbar-right {\n    .pull-right();\n    margin-right: -@navbar-padding-horizontal;\n\n    ~ .navbar-right {\n      margin-right: 0;\n    }\n  }\n}\n\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  background-color: @navbar-default-bg;\n  border-color: @navbar-default-border;\n\n  .navbar-brand {\n    color: @navbar-default-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-brand-hover-color;\n      background-color: @navbar-default-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-default-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-default-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-hover-color;\n        background-color: @navbar-default-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n        background-color: @navbar-default-link-disabled-bg;\n      }\n    }\n\n    // Dropdown menu items\n    // Remove background color from open dropdown\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display when collapsed\n      .open .dropdown-menu {\n        > li > a {\n          color: @navbar-default-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-hover-color;\n            background-color: @navbar-default-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-active-color;\n            background-color: @navbar-default-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-disabled-color;\n            background-color: @navbar-default-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  .navbar-toggle {\n    border-color: @navbar-default-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-default-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-default-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: @navbar-default-border;\n  }\n\n\n  // Links in navbars\n  //\n  // Add a class to ensure links outside the navbar nav are colored correctly.\n\n  .navbar-link {\n    color: @navbar-default-link-color;\n    &:hover {\n      color: @navbar-default-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-default-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n      }\n    }\n  }\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n  background-color: @navbar-inverse-bg;\n  border-color: @navbar-inverse-border;\n\n  .navbar-brand {\n    color: @navbar-inverse-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-brand-hover-color;\n      background-color: @navbar-inverse-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-inverse-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-inverse-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-hover-color;\n        background-color: @navbar-inverse-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n        background-color: @navbar-inverse-link-disabled-bg;\n      }\n    }\n\n    // Dropdowns\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display\n      .open .dropdown-menu {\n        > .dropdown-header {\n          border-color: @navbar-inverse-border;\n        }\n        .divider {\n          background-color: @navbar-inverse-border;\n        }\n        > li > a {\n          color: @navbar-inverse-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-hover-color;\n            background-color: @navbar-inverse-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-active-color;\n            background-color: @navbar-inverse-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-disabled-color;\n            background-color: @navbar-inverse-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  // Darken the responsive nav toggle\n  .navbar-toggle {\n    border-color: @navbar-inverse-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-inverse-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-inverse-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: darken(@navbar-inverse-bg, 7%);\n  }\n\n  .navbar-link {\n    color: @navbar-inverse-link-color;\n    &:hover {\n      color: @navbar-inverse-link-hover-color;\n    }\n  }\n\n  .btn-link {\n    color: @navbar-inverse-link-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-link-hover-color;\n    }\n    &[disabled],\n    fieldset[disabled] & {\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n      }\n    }\n  }\n}\n","// Navbar vertical align\n//\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n","// stylelint-disable declaration-no-important\n\n//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n  .clearfix();\n}\n.center-block {\n  .center-block();\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n  display: none !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n  position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n  padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n  margin-bottom: @line-height-computed;\n  list-style: none;\n  background-color: @breadcrumb-bg;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline-block;\n\n    + li:before {\n      padding: 0 5px;\n      color: @breadcrumb-color;\n      content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n    }\n  }\n\n  > .active {\n    color: @breadcrumb-active-color;\n  }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline; // Remove list-style and block-level defaults\n    > a,\n    > span {\n      position: relative;\n      float: left; // Collapse white-space\n      padding: @padding-base-vertical @padding-base-horizontal;\n      margin-left: -1px;\n      line-height: @line-height-base;\n      color: @pagination-color;\n      text-decoration: none;\n      background-color: @pagination-bg;\n      border: 1px solid @pagination-border;\n\n      &:hover,\n      &:focus {\n        z-index: 2;\n        color: @pagination-hover-color;\n        background-color: @pagination-hover-bg;\n        border-color: @pagination-hover-border;\n      }\n    }\n    &:first-child {\n      > a,\n      > span {\n        margin-left: 0;\n        .border-left-radius(@border-radius-base);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius-base);\n      }\n    }\n  }\n\n  > .active > a,\n  > .active > span {\n    &,\n    &:hover,\n    &:focus {\n      z-index: 3;\n      color: @pagination-active-color;\n      cursor: default;\n      background-color: @pagination-active-bg;\n      border-color: @pagination-active-border;\n    }\n  }\n\n  > .disabled {\n    > span,\n    > span:hover,\n    > span:focus,\n    > a,\n    > a:hover,\n    > a:focus {\n      color: @pagination-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pagination-disabled-bg;\n      border-color: @pagination-disabled-border;\n    }\n  }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n  .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n  .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n","// Pagination\n\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n      line-height: @line-height;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  text-align: center;\n  list-style: none;\n  &:extend(.clearfix all);\n  li {\n    display: inline;\n    > a,\n    > span {\n      display: inline-block;\n      padding: 5px 14px;\n      background-color: @pager-bg;\n      border: 1px solid @pager-border;\n      border-radius: @pager-border-radius;\n    }\n\n    > a:hover,\n    > a:focus {\n      text-decoration: none;\n      background-color: @pager-hover-bg;\n    }\n  }\n\n  .next {\n    > a,\n    > span {\n      float: right;\n    }\n  }\n\n  .previous {\n    > a,\n    > span {\n      float: left;\n    }\n  }\n\n  .disabled {\n    > a,\n    > a:hover,\n    > a:focus,\n    > span {\n      color: @pager-disabled-color;\n      cursor: @cursor-disabled;\n      background-color: @pager-bg;\n    }\n  }\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: 700;\n  line-height: 1;\n  color: @label-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n\n  // Add hover effects, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @label-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Empty labels collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for labels in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n  .label-variant(@label-default-bg);\n}\n\n.label-primary {\n  .label-variant(@label-primary-bg);\n}\n\n.label-success {\n  .label-variant(@label-success-bg);\n}\n\n.label-info {\n  .label-variant(@label-info-bg);\n}\n\n.label-warning {\n  .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n  .label-variant(@label-danger-bg);\n}\n","// Labels\n\n.label-variant(@color) {\n  background-color: @color;\n\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base class\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: @font-size-small;\n  font-weight: @badge-font-weight;\n  line-height: @badge-line-height;\n  color: @badge-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: @badge-bg;\n  border-radius: @badge-border-radius;\n\n  // Empty badges collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for badges in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n\n  .btn-xs &,\n  .btn-group-xs > .btn & {\n    top: 0;\n    padding: 1px 5px;\n  }\n\n  // Hover state, but only for links\n  a& {\n    &:hover,\n    &:focus {\n      color: @badge-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Account for badges in navs\n  .list-group-item.active > &,\n  .nav-pills > .active > a > & {\n    color: @badge-active-color;\n    background-color: @badge-active-bg;\n  }\n\n  .list-group-item > & {\n    float: right;\n  }\n\n  .list-group-item > & + & {\n    margin-right: 5px;\n  }\n\n  .nav-pills > li > a > & {\n    margin-left: 3px;\n  }\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n  padding-top: @jumbotron-padding;\n  padding-bottom: @jumbotron-padding;\n  margin-bottom: @jumbotron-padding;\n  color: @jumbotron-color;\n  background-color: @jumbotron-bg;\n\n  h1,\n  .h1 {\n    color: @jumbotron-heading-color;\n  }\n\n  p {\n    margin-bottom: (@jumbotron-padding / 2);\n    font-size: @jumbotron-font-size;\n    font-weight: 200;\n  }\n\n  > hr {\n    border-top-color: darken(@jumbotron-bg, 10%);\n  }\n\n  .container &,\n  .container-fluid & {\n    padding-right: (@grid-gutter-width / 2);\n    padding-left: (@grid-gutter-width / 2);\n    border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n  }\n\n  .container {\n    max-width: 100%;\n  }\n\n  @media screen and (min-width: @screen-sm-min) {\n    padding-top: (@jumbotron-padding * 1.6);\n    padding-bottom: (@jumbotron-padding * 1.6);\n\n    .container &,\n    .container-fluid & {\n      padding-right: (@jumbotron-padding * 2);\n      padding-left: (@jumbotron-padding * 2);\n    }\n\n    h1,\n    .h1 {\n      font-size: @jumbotron-heading-font-size;\n    }\n  }\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n  display: block;\n  padding: @thumbnail-padding;\n  margin-bottom: @line-height-computed;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(border .2s ease-in-out);\n\n  > img,\n  a > img {\n    &:extend(.img-responsive);\n    margin-right: auto;\n    margin-left: auto;\n  }\n\n  // Add a hover state for linked versions only\n  a&:hover,\n  a&:focus,\n  a&.active {\n    border-color: @link-color;\n  }\n\n  // Image captions\n  .caption {\n    padding: @thumbnail-caption-padding;\n    color: @thumbnail-caption-color;\n  }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n  padding: @alert-padding;\n  margin-bottom: @line-height-computed;\n  border: 1px solid transparent;\n  border-radius: @alert-border-radius;\n\n  // Headings for larger alerts\n  h4 {\n    margin-top: 0;\n    color: inherit; // Specified for the h4 to prevent conflicts of changing @headings-color\n  }\n\n  // Provide class for links that match alerts\n  .alert-link {\n    font-weight: @alert-link-font-weight;\n  }\n\n  // Improve alignment and spacing of inner content\n  > p,\n  > ul {\n    margin-bottom: 0;\n  }\n\n  > p + p {\n    margin-top: 5px;\n  }\n}\n\n// Dismissible alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n// The misspelled .alert-dismissable was deprecated in 3.2.0.\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: (@alert-padding + 20);\n\n  // Adjust close link position\n  .close {\n    position: relative;\n    top: -2px;\n    right: -21px;\n    color: inherit;\n  }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n  .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n\n.alert-info {\n  .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n\n.alert-warning {\n  .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n\n.alert-danger {\n  .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","// Alerts\n\n.alert-variant(@background; @border; @text-color) {\n  color: @text-color;\n  background-color: @background;\n  border-color: @border;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n","// stylelint-disable at-rule-no-vendor-prefix\n\n//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n  height: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  overflow: hidden;\n  background-color: @progress-bg;\n  border-radius: @progress-border-radius;\n  .box-shadow(inset 0 1px 2px rgba(0, 0, 0, .1));\n}\n\n// Bar of progress\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: @font-size-small;\n  line-height: @line-height-computed;\n  color: @progress-bar-color;\n  text-align: center;\n  background-color: @progress-bar-bg;\n  .box-shadow(inset 0 -1px 0 rgba(0, 0, 0, .15));\n  .transition(width .6s ease);\n}\n\n// Striped bars\n//\n// `.progress-striped .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar-striped` class, which you just add to an existing\n// `.progress-bar`.\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  #gradient > .striped();\n  background-size: 40px 40px;\n}\n\n// Call animation for the active one\n//\n// `.progress.active .progress-bar` is deprecated as of v3.2.0 in favor of the\n// `.progress-bar.active` approach.\n.progress.active .progress-bar,\n.progress-bar.active {\n  .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n  .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n  .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n  .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n  .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n    background-repeat: repeat-x;\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n    background-repeat: no-repeat;\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Progress bars\n\n.progress-bar-variant(@color) {\n  background-color: @color;\n\n  // Deprecated parent class requirement as of v3.2.0\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n",".media {\n  // Proper spacing between instances of .media\n  margin-top: 15px;\n\n  &:first-child {\n    margin-top: 0;\n  }\n}\n\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n\n.media-body {\n  width: 10000px;\n}\n\n.media-object {\n  display: block;\n\n  // Fix collapse in webkit from max-width: 100% and display: table-cell.\n  &.img-thumbnail {\n    max-width: none;\n  }\n}\n\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n\n.media-middle {\n  vertical-align: middle;\n}\n\n.media-bottom {\n  vertical-align: bottom;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n\n// Media list variation\n//\n// Undo default ul/ol styles\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n","// stylelint-disable selector-no-qualifying-type\n\n//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on <ul>, <ol>, or <div>.\n\n.list-group {\n  // No need to set list-style: none; since .list-group-item is block level\n  padding-left: 0; // reset padding because ul and ol\n  margin-bottom: 20px;\n}\n\n\n// Individual list items\n//\n// Use on `li`s or `div`s within the `.list-group` parent.\n\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  // Place the border on the list items and negative margin up for better styling\n  margin-bottom: -1px;\n  background-color: @list-group-bg;\n  border: 1px solid @list-group-border;\n\n  // Round the first and last items\n  &:first-child {\n    .border-top-radius(@list-group-border-radius);\n  }\n  &:last-child {\n    margin-bottom: 0;\n    .border-bottom-radius(@list-group-border-radius);\n  }\n\n  // Disabled state\n  &.disabled,\n  &.disabled:hover,\n  &.disabled:focus {\n    color: @list-group-disabled-color;\n    cursor: @cursor-disabled;\n    background-color: @list-group-disabled-bg;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-disabled-text-color;\n    }\n  }\n\n  // Active class on item itself, not parent\n  &.active,\n  &.active:hover,\n  &.active:focus {\n    z-index: 2; // Place active items above their siblings for proper border styling\n    color: @list-group-active-color;\n    background-color: @list-group-active-bg;\n    border-color: @list-group-active-border;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading,\n    .list-group-item-heading > small,\n    .list-group-item-heading > .small {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-active-text-color;\n    }\n  }\n}\n\n\n// Interactive list items\n//\n// Use anchor or button elements instead of `li`s or `div`s to create interactive items.\n// Includes an extra `.active` modifier class for showing selected items.\n\na.list-group-item,\nbutton.list-group-item {\n  color: @list-group-link-color;\n\n  .list-group-item-heading {\n    color: @list-group-link-heading-color;\n  }\n\n  // Hover state\n  &:hover,\n  &:focus {\n    color: @list-group-link-hover-color;\n    text-decoration: none;\n    background-color: @list-group-hover-bg;\n  }\n}\n\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n\n\n// Contextual variants\n//\n// Add modifier classes to change text and background color on individual items.\n// Organizationally, this must come after the `:hover` states.\n\n.list-group-item-variant(success; @state-success-bg; @state-success-text);\n.list-group-item-variant(info; @state-info-bg; @state-info-text);\n.list-group-item-variant(warning; @state-warning-bg; @state-warning-text);\n.list-group-item-variant(danger; @state-danger-bg; @state-danger-text);\n\n\n// Custom content options\n//\n// Extra classes for creating well-formatted content within `.list-group-item`s.\n\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n","// List Groups\n\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a&,\n    button& {\n      color: @color;\n\n      .list-group-item-heading {\n        color: inherit;\n      }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-max-combinators, no-duplicate-selectors\n\n//\n// Panels\n// --------------------------------------------------\n\n\n// Base class\n.panel {\n  margin-bottom: @line-height-computed;\n  background-color: @panel-bg;\n  border: 1px solid transparent;\n  border-radius: @panel-border-radius;\n  .box-shadow(0 1px 1px rgba(0, 0, 0, .05));\n}\n\n// Panel contents\n.panel-body {\n  padding: @panel-body-padding;\n  &:extend(.clearfix all);\n}\n\n// Optional heading\n.panel-heading {\n  padding: @panel-heading-padding;\n  border-bottom: 1px solid transparent;\n  .border-top-radius((@panel-border-radius - 1));\n\n  > .dropdown .dropdown-toggle {\n    color: inherit;\n  }\n}\n\n// Within heading, strip any `h*` tag of its default margins for spacing.\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: ceil((@font-size-base * 1.125));\n  color: inherit;\n\n  > a,\n  > small,\n  > .small,\n  > small > a,\n  > .small > a {\n    color: inherit;\n  }\n}\n\n// Optional footer (stays gray in every modifier class)\n.panel-footer {\n  padding: @panel-footer-padding;\n  background-color: @panel-footer-bg;\n  border-top: 1px solid @panel-inner-border;\n  .border-bottom-radius((@panel-border-radius - 1));\n}\n\n\n// List groups in panels\n//\n// By default, space out list group content from panel headings to account for\n// any kind of custom content between the two.\n\n.panel {\n  > .list-group,\n  > .panel-collapse > .list-group {\n    margin-bottom: 0;\n\n    .list-group-item {\n      border-width: 1px 0;\n      border-radius: 0;\n    }\n\n    // Add border top radius for first one\n    &:first-child {\n      .list-group-item:first-child {\n        border-top: 0;\n        .border-top-radius((@panel-border-radius - 1));\n      }\n    }\n\n    // Add border bottom radius for last one\n    &:last-child {\n      .list-group-item:last-child {\n        border-bottom: 0;\n        .border-bottom-radius((@panel-border-radius - 1));\n      }\n    }\n  }\n  > .panel-heading + .panel-collapse > .list-group {\n    .list-group-item:first-child {\n      .border-top-radius(0);\n    }\n  }\n}\n// Collapse space between when there's no additional content.\n.panel-heading + .list-group {\n  .list-group-item:first-child {\n    border-top-width: 0;\n  }\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n\n// Tables in panels\n//\n// Place a non-bordered `.table` within a panel (not within a `.panel-body`) and\n// watch it go full width.\n\n.panel {\n  > .table,\n  > .table-responsive > .table,\n  > .panel-collapse > .table {\n    margin-bottom: 0;\n\n    caption {\n      padding-right: @panel-body-padding;\n      padding-left: @panel-body-padding;\n    }\n  }\n  // Add border top radius for first one\n  > .table:first-child,\n  > .table-responsive:first-child > .table:first-child {\n    .border-top-radius((@panel-border-radius - 1));\n\n    > thead:first-child,\n    > tbody:first-child {\n      > tr:first-child {\n        border-top-left-radius: (@panel-border-radius - 1);\n        border-top-right-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-top-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-top-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  // Add border bottom radius for last one\n  > .table:last-child,\n  > .table-responsive:last-child > .table:last-child {\n    .border-bottom-radius((@panel-border-radius - 1));\n\n    > tbody:last-child,\n    > tfoot:last-child {\n      > tr:last-child {\n        border-bottom-right-radius: (@panel-border-radius - 1);\n        border-bottom-left-radius: (@panel-border-radius - 1);\n\n        td:first-child,\n        th:first-child {\n          border-bottom-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-bottom-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  > .panel-body + .table,\n  > .panel-body + .table-responsive,\n  > .table + .panel-body,\n  > .table-responsive + .panel-body {\n    border-top: 1px solid @table-border-color;\n  }\n  > .table > tbody:first-child > tr:first-child th,\n  > .table > tbody:first-child > tr:first-child td {\n    border-top: 0;\n  }\n  > .table-bordered,\n  > .table-responsive > .table-bordered {\n    border: 0;\n    > thead,\n    > tbody,\n    > tfoot {\n      > tr {\n        > th:first-child,\n        > td:first-child {\n          border-left: 0;\n        }\n        > th:last-child,\n        > td:last-child {\n          border-right: 0;\n        }\n      }\n    }\n    > thead,\n    > tbody {\n      > tr:first-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n    > tbody,\n    > tfoot {\n      > tr:last-child {\n        > td,\n        > th {\n          border-bottom: 0;\n        }\n      }\n    }\n  }\n  > .table-responsive {\n    margin-bottom: 0;\n    border: 0;\n  }\n}\n\n\n// Collapsible panels (aka, accordion)\n//\n// Wrap a series of panels in `.panel-group` to turn them into an accordion with\n// the help of our collapse JavaScript plugin.\n\n.panel-group {\n  margin-bottom: @line-height-computed;\n\n  // Tighten up margin so it's only between panels\n  .panel {\n    margin-bottom: 0;\n    border-radius: @panel-border-radius;\n\n    + .panel {\n      margin-top: 5px;\n    }\n  }\n\n  .panel-heading {\n    border-bottom: 0;\n\n    + .panel-collapse > .panel-body,\n    + .panel-collapse > .list-group {\n      border-top: 1px solid @panel-inner-border;\n    }\n  }\n\n  .panel-footer {\n    border-top: 0;\n    + .panel-collapse .panel-body {\n      border-bottom: 1px solid @panel-inner-border;\n    }\n  }\n}\n\n\n// Contextual variations\n.panel-default {\n  .panel-variant(@panel-default-border; @panel-default-text; @panel-default-heading-bg; @panel-default-border);\n}\n.panel-primary {\n  .panel-variant(@panel-primary-border; @panel-primary-text; @panel-primary-heading-bg; @panel-primary-border);\n}\n.panel-success {\n  .panel-variant(@panel-success-border; @panel-success-text; @panel-success-heading-bg; @panel-success-border);\n}\n.panel-info {\n  .panel-variant(@panel-info-border; @panel-info-text; @panel-info-heading-bg; @panel-info-border);\n}\n.panel-warning {\n  .panel-variant(@panel-warning-border; @panel-warning-text; @panel-warning-heading-bg; @panel-warning-border);\n}\n.panel-danger {\n  .panel-variant(@panel-danger-border; @panel-danger-text; @panel-danger-heading-bg; @panel-danger-border);\n}\n","// Panels\n\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse > .panel-body {\n      border-top-color: @border;\n    }\n    .badge {\n      color: @heading-bg-color;\n      background-color: @heading-text-color;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse > .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n","// Embeds responsive\n//\n// Credit: Nicolas Gallagher and SUIT CSS.\n\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n\n  .embed-responsive-item,\n  iframe,\n  embed,\n  object,\n  video {\n    position: absolute;\n    top: 0;\n    bottom: 0;\n    left: 0;\n    width: 100%;\n    height: 100%;\n    border: 0;\n  }\n}\n\n// Modifier class for 16:9 aspect ratio\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n\n// Modifier class for 4:3 aspect ratio\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n","//\n// Wells\n// --------------------------------------------------\n\n\n// Base class\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: @well-bg;\n  border: 1px solid @well-border;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 1px rgba(0, 0, 0, .05));\n  blockquote {\n    border-color: #ddd;\n    border-color: rgba(0, 0, 0, .15);\n  }\n}\n\n// Sizes\n.well-lg {\n  padding: 24px;\n  border-radius: @border-radius-large;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: @border-radius-small;\n}\n","// stylelint-disable property-no-vendor-prefix\n\n//\n// Close icons\n// --------------------------------------------------\n\n\n.close {\n  float: right;\n  font-size: (@font-size-base * 1.5);\n  font-weight: @close-font-weight;\n  line-height: 1;\n  color: @close-color;\n  text-shadow: @close-text-shadow;\n  .opacity(.2);\n\n  &:hover,\n  &:focus {\n    color: @close-color;\n    text-decoration: none;\n    cursor: pointer;\n    .opacity(.5);\n  }\n\n  // Additional properties for button version\n  // iOS requires the button element instead of an anchor tag.\n  // If you want the anchor version, it requires `href=\"#\"`.\n  // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n  button& {\n    padding: 0;\n    cursor: pointer;\n    background: transparent;\n    border: 0;\n    -webkit-appearance: none;\n    appearance: none;\n  }\n}\n","//\n// Modals\n// --------------------------------------------------\n\n// .modal-open      - body class for killing the scroll\n// .modal           - container to scroll within\n// .modal-dialog    - positioning shell for the actual modal\n// .modal-content   - actual modal w/ bg and corners and shit\n\n// Kill the scroll on the body\n.modal-open {\n  overflow: hidden;\n}\n\n// Container that the modal scrolls within\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n\n  // Prevent Chrome on Windows from adding a focus outline. For details, see\n  // https://github.com/twbs/bootstrap/pull/10951.\n  outline: 0;\n\n  // When fading in the modal, animate it to slide down\n  &.fade .modal-dialog {\n    .translate(0, -25%);\n    .transition-transform(~\"0.3s ease-out\");\n  }\n  &.in .modal-dialog { .translate(0, 0); }\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n\n// Shell div to position the modal with bottom padding\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n\n// Actual modal\n.modal-content {\n  position: relative;\n  background-color: @modal-content-bg;\n  background-clip: padding-box;\n  border: 1px solid @modal-content-fallback-border-color; //old browsers fallback (ie8 etc)\n  border: 1px solid @modal-content-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 3px 9px rgba(0, 0, 0, .5));\n  // Remove focus outline from opened modal\n  outline: 0;\n}\n\n// Modal background\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal-background;\n  background-color: @modal-backdrop-bg;\n  // Fade for backdrop\n  &.fade { .opacity(0); }\n  &.in { .opacity(@modal-backdrop-opacity); }\n}\n\n// Modal header\n// Top section of the modal w/ title and dismiss\n.modal-header {\n  padding: @modal-title-padding;\n  border-bottom: 1px solid @modal-header-border-color;\n  &:extend(.clearfix all);\n}\n// Close icon\n.modal-header .close {\n  margin-top: -2px;\n}\n\n// Title text within header\n.modal-title {\n  margin: 0;\n  line-height: @modal-title-line-height;\n}\n\n// Modal body\n// Where all modal content resides (sibling of .modal-header and .modal-footer)\n.modal-body {\n  position: relative;\n  padding: @modal-inner-padding;\n}\n\n// Footer (for actions)\n.modal-footer {\n  padding: @modal-inner-padding;\n  text-align: right; // right align buttons\n  border-top: 1px solid @modal-footer-border-color;\n  &:extend(.clearfix all); // clear it in case folks use .pull-* classes on buttons\n\n  // Properly space out buttons\n  .btn + .btn {\n    margin-bottom: 0; // account for input[type=\"submit\"] which gets the bottom margin like all other inputs\n    margin-left: 5px;\n  }\n  // but override that for button groups\n  .btn-group .btn + .btn {\n    margin-left: -1px;\n  }\n  // and override it for block buttons as well\n  .btn-block + .btn-block {\n    margin-left: 0;\n  }\n}\n\n// Measure scrollbar width for padding body during modal show/hide\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n\n// Scale up the modal\n@media (min-width: @screen-sm-min) {\n  // Automatically set modal's width for larger viewports\n  .modal-dialog {\n    width: @modal-md;\n    margin: 30px auto;\n  }\n  .modal-content {\n    .box-shadow(0 5px 15px rgba(0, 0, 0, .5));\n  }\n\n  // Modal sizes\n  .modal-sm { width: @modal-sm; }\n}\n\n@media (min-width: @screen-md-min) {\n  .modal-lg { width: @modal-lg; }\n}\n","//\n// Tooltips\n// --------------------------------------------------\n\n\n// Base class\n.tooltip {\n  position: absolute;\n  z-index: @zindex-tooltip;\n  display: block;\n  // Our parent element can be arbitrary since tooltips are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-small;\n\n  .opacity(0);\n\n  &.in { .opacity(@tooltip-opacity); }\n  &.top {\n    padding: @tooltip-arrow-width 0;\n    margin-top: -3px;\n  }\n  &.right {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: 3px;\n  }\n  &.bottom {\n    padding: @tooltip-arrow-width 0;\n    margin-top: 3px;\n  }\n  &.left {\n    padding: 0 @tooltip-arrow-width;\n    margin-left: -3px;\n  }\n\n  // Note: Deprecated .top-left, .top-right, .bottom-left, and .bottom-right as of v3.3.1\n  &.top .tooltip-arrow {\n    bottom: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-left .tooltip-arrow {\n    right: @tooltip-arrow-width;\n    bottom: 0;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-right .tooltip-arrow {\n    bottom: 0;\n    left: @tooltip-arrow-width;\n    margin-bottom: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.right .tooltip-arrow {\n    top: 50%;\n    left: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-right-color: @tooltip-arrow-color;\n  }\n  &.left .tooltip-arrow {\n    top: 50%;\n    right: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-left-color: @tooltip-arrow-color;\n  }\n  &.bottom .tooltip-arrow {\n    top: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-left .tooltip-arrow {\n    top: 0;\n    right: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-right .tooltip-arrow {\n    top: 0;\n    left: @tooltip-arrow-width;\n    margin-top: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n}\n\n// Wrapper for the tooltip content\n.tooltip-inner {\n  max-width: @tooltip-max-width;\n  padding: 3px 8px;\n  color: @tooltip-color;\n  text-align: center;\n  background-color: @tooltip-bg;\n  border-radius: @border-radius-base;\n}\n\n// Arrows\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n",".reset-text() {\n  font-family: @font-family-base;\n  // We deliberately do NOT reset font-size.\n  font-style: normal;\n  font-weight: 400;\n  line-height: @line-height-base;\n  line-break: auto;\n  text-align: left; // Fallback for where `start` is not supported\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n}\n","//\n// Popovers\n// --------------------------------------------------\n\n\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: @zindex-popover;\n  display: none;\n  max-width: @popover-max-width;\n  padding: 1px;\n  // Our parent element can be arbitrary since popovers are by default inserted as a sibling of their target element.\n  // So reset our font and text properties to avoid inheriting weird values.\n  .reset-text();\n  font-size: @font-size-base;\n  background-color: @popover-bg;\n  background-clip: padding-box;\n  border: 1px solid @popover-fallback-border-color;\n  border: 1px solid @popover-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 5px 10px rgba(0, 0, 0, .2));\n\n  // Offset the popover to account for the popover arrow\n  &.top { margin-top: -@popover-arrow-width; }\n  &.right { margin-left: @popover-arrow-width; }\n  &.bottom { margin-top: @popover-arrow-width; }\n  &.left { margin-left: -@popover-arrow-width; }\n\n  // Arrows\n  // .arrow is outer, .arrow:after is inner\n  > .arrow {\n    border-width: @popover-arrow-outer-width;\n\n    &,\n    &:after {\n      position: absolute;\n      display: block;\n      width: 0;\n      height: 0;\n      border-color: transparent;\n      border-style: solid;\n    }\n\n    &:after {\n      content: \"\";\n      border-width: @popover-arrow-width;\n    }\n  }\n\n  &.top > .arrow {\n    bottom: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-top-color: @popover-arrow-outer-color;\n    border-bottom-width: 0;\n    &:after {\n      bottom: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-color: @popover-arrow-color;\n      border-bottom-width: 0;\n    }\n  }\n  &.right > .arrow {\n    top: 50%;\n    left: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-right-color: @popover-arrow-outer-color;\n    border-left-width: 0;\n    &:after {\n      bottom: -@popover-arrow-width;\n      left: 1px;\n      content: \" \";\n      border-right-color: @popover-arrow-color;\n      border-left-width: 0;\n    }\n  }\n  &.bottom > .arrow {\n    top: -@popover-arrow-outer-width;\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-width: 0;\n    border-bottom-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-bottom-color: @popover-arrow-outer-color;\n    &:after {\n      top: 1px;\n      margin-left: -@popover-arrow-width;\n      content: \" \";\n      border-top-width: 0;\n      border-bottom-color: @popover-arrow-color;\n    }\n  }\n\n  &.left > .arrow {\n    top: 50%;\n    right: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-width: 0;\n    border-left-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-left-color: @popover-arrow-outer-color;\n    &:after {\n      right: 1px;\n      bottom: -@popover-arrow-width;\n      content: \" \";\n      border-right-width: 0;\n      border-left-color: @popover-arrow-color;\n    }\n  }\n}\n\n.popover-title {\n  padding: 8px 14px;\n  margin: 0; // reset heading margin\n  font-size: @font-size-base;\n  background-color: @popover-title-bg;\n  border-bottom: 1px solid darken(@popover-title-bg, 5%);\n  border-radius: (@border-radius-large - 1) (@border-radius-large - 1) 0 0;\n}\n\n.popover-content {\n  padding: 9px 14px;\n}\n","// stylelint-disable media-feature-name-no-unknown\n\n//\n// Carousel\n// --------------------------------------------------\n\n\n// Wrapper for the slide container and indicators\n.carousel {\n  position: relative;\n}\n\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n\n  > .item {\n    position: relative;\n    display: none;\n    .transition(.6s ease-in-out left);\n\n    // Account for jankitude on images\n    > img,\n    > a > img {\n      &:extend(.img-responsive);\n      line-height: 1;\n    }\n\n    // WebKit CSS3 transforms for supported devices\n    @media all and (transform-3d), (-webkit-transform-3d) {\n      .transition-transform(~\"0.6s ease-in-out\");\n      .backface-visibility(~\"hidden\");\n      .perspective(1000px);\n\n      &.next,\n      &.active.right {\n        .translate3d(100%, 0, 0);\n        left: 0;\n      }\n      &.prev,\n      &.active.left {\n        .translate3d(-100%, 0, 0);\n        left: 0;\n      }\n      &.next.left,\n      &.prev.right,\n      &.active {\n        .translate3d(0, 0, 0);\n        left: 0;\n      }\n    }\n  }\n\n  > .active,\n  > .next,\n  > .prev {\n    display: block;\n  }\n\n  > .active {\n    left: 0;\n  }\n\n  > .next,\n  > .prev {\n    position: absolute;\n    top: 0;\n    width: 100%;\n  }\n\n  > .next {\n    left: 100%;\n  }\n  > .prev {\n    left: -100%;\n  }\n  > .next.left,\n  > .prev.right {\n    left: 0;\n  }\n\n  > .active.left {\n    left: -100%;\n  }\n  > .active.right {\n    left: 100%;\n  }\n\n}\n\n// Left/right controls for nav\n// ---------------------------\n\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: @carousel-control-width;\n  font-size: @carousel-control-font-size;\n  color: @carousel-control-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  background-color: rgba(0, 0, 0, 0); // Fix IE9 click-thru bug\n  .opacity(@carousel-control-opacity);\n  // We can't have this transition here because WebKit cancels the carousel\n  // animation if you trip this while in the middle of another animation.\n\n  // Set gradients for backgrounds\n  &.left {\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .5); @end-color: rgba(0, 0, 0, .0001));\n  }\n  &.right {\n    right: 0;\n    left: auto;\n    #gradient > .horizontal(@start-color: rgba(0, 0, 0, .0001); @end-color: rgba(0, 0, 0, .5));\n  }\n\n  // Hover/focus state\n  &:hover,\n  &:focus {\n    color: @carousel-control-color;\n    text-decoration: none;\n    outline: 0;\n    .opacity(.9);\n  }\n\n  // Toggles\n  .icon-prev,\n  .icon-next,\n  .glyphicon-chevron-left,\n  .glyphicon-chevron-right {\n    position: absolute;\n    top: 50%;\n    z-index: 5;\n    display: inline-block;\n    margin-top: -10px;\n  }\n  .icon-prev,\n  .glyphicon-chevron-left {\n    left: 50%;\n    margin-left: -10px;\n  }\n  .icon-next,\n  .glyphicon-chevron-right {\n    right: 50%;\n    margin-right: -10px;\n  }\n  .icon-prev,\n  .icon-next {\n    width: 20px;\n    height: 20px;\n    font-family: serif;\n    line-height: 1;\n  }\n\n  .icon-prev {\n    &:before {\n      content: \"\\2039\";// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)\n    }\n  }\n  .icon-next {\n    &:before {\n      content: \"\\203a\";// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)\n    }\n  }\n}\n\n// Optional indicator pips\n//\n// Add an unordered list with the following class and add a list item for each\n// slide your carousel holds.\n\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n\n  li {\n    display: inline-block;\n    width: 10px;\n    height: 10px;\n    margin: 1px;\n    text-indent: -999px;\n    cursor: pointer;\n    // IE8-9 hack for event handling\n    //\n    // Internet Explorer 8-9 does not support clicks on elements without a set\n    // `background-color`. We cannot use `filter` since that's not viewed as a\n    // background color by the browser. Thus, a hack is needed.\n    // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Internet_Explorer\n    //\n    // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we\n    // set alpha transparency for the best results possible.\n    background-color: #000 \\9; // IE8\n    background-color: rgba(0, 0, 0, 0); // IE9\n\n    border: 1px solid @carousel-indicator-border-color;\n    border-radius: 10px;\n  }\n\n  .active {\n    width: 12px;\n    height: 12px;\n    margin: 0;\n    background-color: @carousel-indicator-active-bg;\n  }\n}\n\n// Optional captions\n// -----------------------------\n// Hidden by default for smaller viewports\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: @carousel-caption-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n\n  & .btn {\n    text-shadow: none; // No shadow for button elements in carousel-caption\n  }\n}\n\n\n// Scale up controls for tablets and up\n@media screen and (min-width: @screen-sm-min) {\n\n  // Scale up the controls a smidge\n  .carousel-control {\n    .glyphicon-chevron-left,\n    .glyphicon-chevron-right,\n    .icon-prev,\n    .icon-next {\n      width: (@carousel-control-font-size * 1.5);\n      height: (@carousel-control-font-size * 1.5);\n      margin-top: (@carousel-control-font-size / -2);\n      font-size: (@carousel-control-font-size * 1.5);\n    }\n    .glyphicon-chevron-left,\n    .icon-prev {\n      margin-left: (@carousel-control-font-size / -2);\n    }\n    .glyphicon-chevron-right,\n    .icon-next {\n      margin-right: (@carousel-control-font-size / -2);\n    }\n  }\n\n  // Show and left align the captions\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n\n  // Move up the indicators\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n","// Clearfix\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n//\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n\n.clearfix() {\n  &:before,\n  &:after {\n    display: table; // 2\n    content: \" \"; // 1\n  }\n  &:after {\n    clear: both;\n  }\n}\n","// Center-align a block level element\n\n.center-block() {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n","// stylelint-disable font-family-name-quotes, font-family-no-missing-generic-family-keyword\n\n// CSS image replacement\n//\n// Heads up! v3 launched with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (has been removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n","// stylelint-disable declaration-no-important, at-rule-no-vendor-prefix\n\n//\n// Responsive: Utility classes\n// --------------------------------------------------\n\n\n// IE10 in Windows (Phone) 8\n//\n// Support for responsive views via media queries is kind of borked in IE10, for\n// Surface/desktop in split view and for Windows Phone 8. This particular fix\n// must be accompanied by a snippet of JavaScript to sniff the user agent and\n// apply some conditional CSS to *only* the Surface/desktop Windows 8. Look at\n// our Getting Started page for more information on this bug.\n//\n// For more information, see the following:\n//\n// Issue: https://github.com/twbs/bootstrap/issues/10497\n// Docs: https://getbootstrap.com/docs/3.4/getting-started/#support-ie10-width\n// Source: https://timkadlec.com/2013/01/windows-phone-8-and-device-width/\n// Source: https://timkadlec.com/2012/10/ie10-snap-mode-and-responsive-design/\n\n@-ms-viewport {\n  width: device-width;\n}\n\n\n// Visibility utilities\n// Note: Deprecated .visible-xs, .visible-sm, .visible-md, and .visible-lg as of v3.2.0\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  .responsive-invisibility();\n}\n\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n\n.visible-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-visibility();\n  }\n}\n.visible-xs-block {\n  @media (max-width: @screen-xs-max) {\n    display: block !important;\n  }\n}\n.visible-xs-inline {\n  @media (max-width: @screen-xs-max) {\n    display: inline !important;\n  }\n}\n.visible-xs-inline-block {\n  @media (max-width: @screen-xs-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-visibility();\n  }\n}\n.visible-sm-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: block !important;\n  }\n}\n.visible-sm-inline {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline !important;\n  }\n}\n.visible-sm-inline-block {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-visibility();\n  }\n}\n.visible-md-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: block !important;\n  }\n}\n.visible-md-inline {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline !important;\n  }\n}\n.visible-md-inline-block {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    display: inline-block !important;\n  }\n}\n\n.visible-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-visibility();\n  }\n}\n.visible-lg-block {\n  @media (min-width: @screen-lg-min) {\n    display: block !important;\n  }\n}\n.visible-lg-inline {\n  @media (min-width: @screen-lg-min) {\n    display: inline !important;\n  }\n}\n.visible-lg-inline-block {\n  @media (min-width: @screen-lg-min) {\n    display: inline-block !important;\n  }\n}\n\n.hidden-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-invisibility();\n  }\n}\n\n\n// Print utilities\n//\n// Media queries are placed on the inside to be mixin-friendly.\n\n// Note: Deprecated .visible-print as of v3.2.0\n.visible-print {\n  .responsive-invisibility();\n\n  @media print {\n    .responsive-visibility();\n  }\n}\n.visible-print-block {\n  display: none !important;\n\n  @media print {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n\n  @media print {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n\n  @media print {\n    display: inline-block !important;\n  }\n}\n\n.hidden-print {\n  @media print {\n    .responsive-invisibility();\n  }\n}\n","// stylelint-disable declaration-no-important\n\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table !important; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n  display: none !important;\n}\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.eot b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.eot
deleted file mode 100644
index b93a495..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.eot
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.svg b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.svg
deleted file mode 100644
index f155876..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.svg
+++ /dev/null
@@ -1,288 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata></metadata>
-<defs>
-<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
-<font-face units-per-em="1200" ascent="960" descent="-240" />
-<missing-glyph horiz-adv-x="500" />
-<glyph horiz-adv-x="0" />
-<glyph horiz-adv-x="400" />
-<glyph unicode=" " />
-<glyph unicode="*" d="M600 1100q15 0 34 -1.5t30 -3.5l11 -1q10 -2 17.5 -10.5t7.5 -18.5v-224l158 158q7 7 18 8t19 -6l106 -106q7 -8 6 -19t-8 -18l-158 -158h224q10 0 18.5 -7.5t10.5 -17.5q6 -41 6 -75q0 -15 -1.5 -34t-3.5 -30l-1 -11q-2 -10 -10.5 -17.5t-18.5 -7.5h-224l158 -158 q7 -7 8 -18t-6 -19l-106 -106q-8 -7 -19 -6t-18 8l-158 158v-224q0 -10 -7.5 -18.5t-17.5 -10.5q-41 -6 -75 -6q-15 0 -34 1.5t-30 3.5l-11 1q-10 2 -17.5 10.5t-7.5 18.5v224l-158 -158q-7 -7 -18 -8t-19 6l-106 106q-7 8 -6 19t8 18l158 158h-224q-10 0 -18.5 7.5 t-10.5 17.5q-6 41 -6 75q0 15 1.5 34t3.5 30l1 11q2 10 10.5 17.5t18.5 7.5h224l-158 158q-7 7 -8 18t6 19l106 106q8 7 19 6t18 -8l158 -158v224q0 10 7.5 18.5t17.5 10.5q41 6 75 6z" />
-<glyph unicode="+" d="M450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-350h350q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-350v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v350h-350q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5 h350v350q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xa0;" />
-<glyph unicode="&#xa5;" d="M825 1100h250q10 0 12.5 -5t-5.5 -13l-364 -364q-6 -6 -11 -18h268q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-100h275q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-174q0 -11 -7.5 -18.5t-18.5 -7.5h-148q-11 0 -18.5 7.5t-7.5 18.5v174 h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h125v100h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h118q-5 12 -11 18l-364 364q-8 8 -5.5 13t12.5 5h250q25 0 43 -18l164 -164q8 -8 18 -8t18 8l164 164q18 18 43 18z" />
-<glyph unicode="&#x2000;" horiz-adv-x="650" />
-<glyph unicode="&#x2001;" horiz-adv-x="1300" />
-<glyph unicode="&#x2002;" horiz-adv-x="650" />
-<glyph unicode="&#x2003;" horiz-adv-x="1300" />
-<glyph unicode="&#x2004;" horiz-adv-x="433" />
-<glyph unicode="&#x2005;" horiz-adv-x="325" />
-<glyph unicode="&#x2006;" horiz-adv-x="216" />
-<glyph unicode="&#x2007;" horiz-adv-x="216" />
-<glyph unicode="&#x2008;" horiz-adv-x="162" />
-<glyph unicode="&#x2009;" horiz-adv-x="260" />
-<glyph unicode="&#x200a;" horiz-adv-x="72" />
-<glyph unicode="&#x202f;" horiz-adv-x="260" />
-<glyph unicode="&#x205f;" horiz-adv-x="325" />
-<glyph unicode="&#x20ac;" d="M744 1198q242 0 354 -189q60 -104 66 -209h-181q0 45 -17.5 82.5t-43.5 61.5t-58 40.5t-60.5 24t-51.5 7.5q-19 0 -40.5 -5.5t-49.5 -20.5t-53 -38t-49 -62.5t-39 -89.5h379l-100 -100h-300q-6 -50 -6 -100h406l-100 -100h-300q9 -74 33 -132t52.5 -91t61.5 -54.5t59 -29 t47 -7.5q22 0 50.5 7.5t60.5 24.5t58 41t43.5 61t17.5 80h174q-30 -171 -128 -278q-107 -117 -274 -117q-206 0 -324 158q-36 48 -69 133t-45 204h-217l100 100h112q1 47 6 100h-218l100 100h134q20 87 51 153.5t62 103.5q117 141 297 141z" />
-<glyph unicode="&#x20bd;" d="M428 1200h350q67 0 120 -13t86 -31t57 -49.5t35 -56.5t17 -64.5t6.5 -60.5t0.5 -57v-16.5v-16.5q0 -36 -0.5 -57t-6.5 -61t-17 -65t-35 -57t-57 -50.5t-86 -31.5t-120 -13h-178l-2 -100h288q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-138v-175q0 -11 -5.5 -18 t-15.5 -7h-149q-10 0 -17.5 7.5t-7.5 17.5v175h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v100h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v475q0 10 7.5 17.5t17.5 7.5zM600 1000v-300h203q64 0 86.5 33t22.5 119q0 84 -22.5 116t-86.5 32h-203z" />
-<glyph unicode="&#x2212;" d="M250 700h800q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#x231b;" d="M1000 1200v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-50v-100q0 -91 -49.5 -165.5t-130.5 -109.5q81 -35 130.5 -109.5t49.5 -165.5v-150h50q21 0 35.5 -14.5t14.5 -35.5v-150h-800v150q0 21 14.5 35.5t35.5 14.5h50v150q0 91 49.5 165.5t130.5 109.5q-81 35 -130.5 109.5 t-49.5 165.5v100h-50q-21 0 -35.5 14.5t-14.5 35.5v150h800zM400 1000v-100q0 -60 32.5 -109.5t87.5 -73.5q28 -12 44 -37t16 -55t-16 -55t-44 -37q-55 -24 -87.5 -73.5t-32.5 -109.5v-150h400v150q0 60 -32.5 109.5t-87.5 73.5q-28 12 -44 37t-16 55t16 55t44 37 q55 24 87.5 73.5t32.5 109.5v100h-400z" />
-<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
-<glyph unicode="&#x2601;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -206.5q0 -121 -85 -207.5t-205 -86.5h-750q-79 0 -135.5 57t-56.5 137q0 69 42.5 122.5t108.5 67.5q-2 12 -2 37q0 153 108 260.5t260 107.5z" />
-<glyph unicode="&#x26fa;" d="M774 1193.5q16 -9.5 20.5 -27t-5.5 -33.5l-136 -187l467 -746h30q20 0 35 -18.5t15 -39.5v-42h-1200v42q0 21 15 39.5t35 18.5h30l468 746l-135 183q-10 16 -5.5 34t20.5 28t34 5.5t28 -20.5l111 -148l112 150q9 16 27 20.5t34 -5zM600 200h377l-182 112l-195 534v-646z " />
-<glyph unicode="&#x2709;" d="M25 1100h1150q10 0 12.5 -5t-5.5 -13l-564 -567q-8 -8 -18 -8t-18 8l-564 567q-8 8 -5.5 13t12.5 5zM18 882l264 -264q8 -8 8 -18t-8 -18l-264 -264q-8 -8 -13 -5.5t-5 12.5v550q0 10 5 12.5t13 -5.5zM918 618l264 264q8 8 13 5.5t5 -12.5v-550q0 -10 -5 -12.5t-13 5.5 l-264 264q-8 8 -8 18t8 18zM818 482l364 -364q8 -8 5.5 -13t-12.5 -5h-1150q-10 0 -12.5 5t5.5 13l364 364q8 8 18 8t18 -8l164 -164q8 -8 18 -8t18 8l164 164q8 8 18 8t18 -8z" />
-<glyph unicode="&#x270f;" d="M1011 1210q19 0 33 -13l153 -153q13 -14 13 -33t-13 -33l-99 -92l-214 214l95 96q13 14 32 14zM1013 800l-615 -614l-214 214l614 614zM317 96l-333 -112l110 335z" />
-<glyph unicode="&#xe001;" d="M700 650v-550h250q21 0 35.5 -14.5t14.5 -35.5v-50h-800v50q0 21 14.5 35.5t35.5 14.5h250v550l-500 550h1200z" />
-<glyph unicode="&#xe002;" d="M368 1017l645 163q39 15 63 0t24 -49v-831q0 -55 -41.5 -95.5t-111.5 -63.5q-79 -25 -147 -4.5t-86 75t25.5 111.5t122.5 82q72 24 138 8v521l-600 -155v-606q0 -42 -44 -90t-109 -69q-79 -26 -147 -5.5t-86 75.5t25.5 111.5t122.5 82.5q72 24 138 7v639q0 38 14.5 59 t53.5 34z" />
-<glyph unicode="&#xe003;" d="M500 1191q100 0 191 -39t156.5 -104.5t104.5 -156.5t39 -191l-1 -2l1 -5q0 -141 -78 -262l275 -274q23 -26 22.5 -44.5t-22.5 -42.5l-59 -58q-26 -20 -46.5 -20t-39.5 20l-275 274q-119 -77 -261 -77l-5 1l-2 -1q-100 0 -191 39t-156.5 104.5t-104.5 156.5t-39 191 t39 191t104.5 156.5t156.5 104.5t191 39zM500 1022q-88 0 -162 -43t-117 -117t-43 -162t43 -162t117 -117t162 -43t162 43t117 117t43 162t-43 162t-117 117t-162 43z" />
-<glyph unicode="&#xe005;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104z" />
-<glyph unicode="&#xe006;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429z" />
-<glyph unicode="&#xe007;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429zM477 700h-240l197 -142l-74 -226 l193 139l195 -140l-74 229l192 140h-234l-78 211z" />
-<glyph unicode="&#xe008;" d="M600 1200q124 0 212 -88t88 -212v-250q0 -46 -31 -98t-69 -52v-75q0 -10 6 -21.5t15 -17.5l358 -230q9 -5 15 -16.5t6 -21.5v-93q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v93q0 10 6 21.5t15 16.5l358 230q9 6 15 17.5t6 21.5v75q-38 0 -69 52 t-31 98v250q0 124 88 212t212 88z" />
-<glyph unicode="&#xe009;" d="M25 1100h1150q10 0 17.5 -7.5t7.5 -17.5v-1050q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v1050q0 10 7.5 17.5t17.5 7.5zM100 1000v-100h100v100h-100zM875 1000h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5t17.5 -7.5h550 q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM1000 1000v-100h100v100h-100zM100 800v-100h100v100h-100zM1000 800v-100h100v100h-100zM100 600v-100h100v100h-100zM1000 600v-100h100v100h-100zM875 500h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5 t17.5 -7.5h550q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM100 400v-100h100v100h-100zM1000 400v-100h100v100h-100zM100 200v-100h100v100h-100zM1000 200v-100h100v100h-100z" />
-<glyph unicode="&#xe010;" d="M50 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM50 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe011;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM850 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 700h200q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5 t35.5 14.5z" />
-<glyph unicode="&#xe012;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h700q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe013;" d="M465 477l571 571q8 8 18 8t17 -8l177 -177q8 -7 8 -17t-8 -18l-783 -784q-7 -8 -17.5 -8t-17.5 8l-384 384q-8 8 -8 18t8 17l177 177q7 8 17 8t18 -8l171 -171q7 -7 18 -7t18 7z" />
-<glyph unicode="&#xe014;" d="M904 1083l178 -179q8 -8 8 -18.5t-8 -17.5l-267 -268l267 -268q8 -7 8 -17.5t-8 -18.5l-178 -178q-8 -8 -18.5 -8t-17.5 8l-268 267l-268 -267q-7 -8 -17.5 -8t-18.5 8l-178 178q-8 8 -8 18.5t8 17.5l267 268l-267 268q-8 7 -8 17.5t8 18.5l178 178q8 8 18.5 8t17.5 -8 l268 -267l268 268q7 7 17.5 7t18.5 -7z" />
-<glyph unicode="&#xe015;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM425 900h150q10 0 17.5 -7.5t7.5 -17.5v-75h75q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5 t-17.5 -7.5h-75v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-75q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v75q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe016;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM325 800h350q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-350q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe017;" d="M550 1200h100q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM800 975v166q167 -62 272 -209.5t105 -331.5q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5 t-184.5 123t-123 184.5t-45.5 224q0 184 105 331.5t272 209.5v-166q-103 -55 -165 -155t-62 -220q0 -116 57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5q0 120 -62 220t-165 155z" />
-<glyph unicode="&#xe018;" d="M1025 1200h150q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM725 800h150q10 0 17.5 -7.5t7.5 -17.5v-750q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v750 q0 10 7.5 17.5t17.5 7.5zM425 500h150q10 0 17.5 -7.5t7.5 -17.5v-450q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v450q0 10 7.5 17.5t17.5 7.5zM125 300h150q10 0 17.5 -7.5t7.5 -17.5v-250q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5 v250q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe019;" d="M600 1174q33 0 74 -5l38 -152l5 -1q49 -14 94 -39l5 -2l134 80q61 -48 104 -105l-80 -134l3 -5q25 -44 39 -93l1 -6l152 -38q5 -43 5 -73q0 -34 -5 -74l-152 -38l-1 -6q-15 -49 -39 -93l-3 -5l80 -134q-48 -61 -104 -105l-134 81l-5 -3q-44 -25 -94 -39l-5 -2l-38 -151 q-43 -5 -74 -5q-33 0 -74 5l-38 151l-5 2q-49 14 -94 39l-5 3l-134 -81q-60 48 -104 105l80 134l-3 5q-25 45 -38 93l-2 6l-151 38q-6 42 -6 74q0 33 6 73l151 38l2 6q13 48 38 93l3 5l-80 134q47 61 105 105l133 -80l5 2q45 25 94 39l5 1l38 152q43 5 74 5zM600 815 q-89 0 -152 -63t-63 -151.5t63 -151.5t152 -63t152 63t63 151.5t-63 151.5t-152 63z" />
-<glyph unicode="&#xe020;" d="M500 1300h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-75h-1100v75q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5zM500 1200v-100h300v100h-300zM1100 900v-800q0 -41 -29.5 -70.5t-70.5 -29.5h-700q-41 0 -70.5 29.5t-29.5 70.5 v800h900zM300 800v-700h100v700h-100zM500 800v-700h100v700h-100zM700 800v-700h100v700h-100zM900 800v-700h100v700h-100z" />
-<glyph unicode="&#xe021;" d="M18 618l620 608q8 7 18.5 7t17.5 -7l608 -608q8 -8 5.5 -13t-12.5 -5h-175v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v375h-300v-375q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v575h-175q-10 0 -12.5 5t5.5 13z" />
-<glyph unicode="&#xe022;" d="M600 1200v-400q0 -41 29.5 -70.5t70.5 -29.5h300v-650q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5h450zM1000 800h-250q-21 0 -35.5 14.5t-14.5 35.5v250z" />
-<glyph unicode="&#xe023;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h50q10 0 17.5 -7.5t7.5 -17.5v-275h175q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe024;" d="M1300 0h-538l-41 400h-242l-41 -400h-538l431 1200h209l-21 -300h162l-20 300h208zM515 800l-27 -300h224l-27 300h-170z" />
-<glyph unicode="&#xe025;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-450h191q20 0 25.5 -11.5t-7.5 -27.5l-327 -400q-13 -16 -32 -16t-32 16l-327 400q-13 16 -7.5 27.5t25.5 11.5h191v450q0 21 14.5 35.5t35.5 14.5zM1125 400h50q10 0 17.5 -7.5t7.5 -17.5v-350q0 -10 -7.5 -17.5t-17.5 -7.5 h-1050q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h50q10 0 17.5 -7.5t7.5 -17.5v-175h900v175q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe026;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -275q-13 -16 -32 -16t-32 16l-223 275q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z " />
-<glyph unicode="&#xe027;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM632 914l223 -275q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5l223 275q13 16 32 16 t32 -16z" />
-<glyph unicode="&#xe028;" d="M225 1200h750q10 0 19.5 -7t12.5 -17l186 -652q7 -24 7 -49v-425q0 -12 -4 -27t-9 -17q-12 -6 -37 -6h-1100q-12 0 -27 4t-17 8q-6 13 -6 38l1 425q0 25 7 49l185 652q3 10 12.5 17t19.5 7zM878 1000h-556q-10 0 -19 -7t-11 -18l-87 -450q-2 -11 4 -18t16 -7h150 q10 0 19.5 -7t11.5 -17l38 -152q2 -10 11.5 -17t19.5 -7h250q10 0 19.5 7t11.5 17l38 152q2 10 11.5 17t19.5 7h150q10 0 16 7t4 18l-87 450q-2 11 -11 18t-19 7z" />
-<glyph unicode="&#xe029;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM540 820l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
-<glyph unicode="&#xe030;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-362q0 -10 -7.5 -17.5t-17.5 -7.5h-362q-11 0 -13 5.5t5 12.5l133 133q-109 76 -238 76q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5h150q0 -117 -45.5 -224 t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117z" />
-<glyph unicode="&#xe031;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-361q0 -11 -7.5 -18.5t-18.5 -7.5h-361q-11 0 -13 5.5t5 12.5l134 134q-110 75 -239 75q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5h-150q0 117 45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117zM1027 600h150 q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5q-192 0 -348 118l-134 -134q-7 -8 -12.5 -5.5t-5.5 12.5v360q0 11 7.5 18.5t18.5 7.5h360q10 0 12.5 -5.5t-5.5 -12.5l-133 -133q110 -76 240 -76q116 0 214.5 57t155.5 155.5t57 214.5z" />
-<glyph unicode="&#xe032;" d="M125 1200h1050q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-1050q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM1075 1000h-850q-10 0 -17.5 -7.5t-7.5 -17.5v-850q0 -10 7.5 -17.5t17.5 -7.5h850q10 0 17.5 7.5t7.5 17.5v850 q0 10 -7.5 17.5t-17.5 7.5zM325 900h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 900h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 700h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 700h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 500h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 500h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 300h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 300h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe033;" d="M900 800v200q0 83 -58.5 141.5t-141.5 58.5h-300q-82 0 -141 -59t-59 -141v-200h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h900q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-100zM400 800v150q0 21 15 35.5t35 14.5h200 q20 0 35 -14.5t15 -35.5v-150h-300z" />
-<glyph unicode="&#xe034;" d="M125 1100h50q10 0 17.5 -7.5t7.5 -17.5v-1075h-100v1075q0 10 7.5 17.5t17.5 7.5zM1075 1052q4 0 9 -2q16 -6 16 -23v-421q0 -6 -3 -12q-33 -59 -66.5 -99t-65.5 -58t-56.5 -24.5t-52.5 -6.5q-26 0 -57.5 6.5t-52.5 13.5t-60 21q-41 15 -63 22.5t-57.5 15t-65.5 7.5 q-85 0 -160 -57q-7 -5 -15 -5q-6 0 -11 3q-14 7 -14 22v438q22 55 82 98.5t119 46.5q23 2 43 0.5t43 -7t32.5 -8.5t38 -13t32.5 -11q41 -14 63.5 -21t57 -14t63.5 -7q103 0 183 87q7 8 18 8z" />
-<glyph unicode="&#xe035;" d="M600 1175q116 0 227 -49.5t192.5 -131t131 -192.5t49.5 -227v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v300q0 127 -70.5 231.5t-184.5 161.5t-245 57t-245 -57t-184.5 -161.5t-70.5 -231.5v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50 q-10 0 -17.5 7.5t-7.5 17.5v300q0 116 49.5 227t131 192.5t192.5 131t227 49.5zM220 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6zM820 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460 q0 8 6 14t14 6z" />
-<glyph unicode="&#xe036;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM900 668l120 120q7 7 17 7t17 -7l34 -34q7 -7 7 -17t-7 -17l-120 -120l120 -120q7 -7 7 -17 t-7 -17l-34 -34q-7 -7 -17 -7t-17 7l-120 119l-120 -119q-7 -7 -17 -7t-17 7l-34 34q-7 7 -7 17t7 17l119 120l-119 120q-7 7 -7 17t7 17l34 34q7 8 17 8t17 -8z" />
-<glyph unicode="&#xe037;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6 l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238q-6 8 -4.5 18t9.5 17l29 22q7 5 15 5z" />
-<glyph unicode="&#xe038;" d="M967 1004h3q11 -1 17 -10q135 -179 135 -396q0 -105 -34 -206.5t-98 -185.5q-7 -9 -17 -10h-3q-9 0 -16 6l-42 34q-8 6 -9 16t5 18q111 150 111 328q0 90 -29.5 176t-84.5 157q-6 9 -5 19t10 16l42 33q7 5 15 5zM321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5 t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238 q-6 8 -4.5 18.5t9.5 16.5l29 22q7 5 15 5z" />
-<glyph unicode="&#xe039;" d="M500 900h100v-100h-100v-100h-400v-100h-100v600h500v-300zM1200 700h-200v-100h200v-200h-300v300h-200v300h-100v200h600v-500zM100 1100v-300h300v300h-300zM800 1100v-300h300v300h-300zM300 900h-100v100h100v-100zM1000 900h-100v100h100v-100zM300 500h200v-500 h-500v500h200v100h100v-100zM800 300h200v-100h-100v-100h-200v100h-100v100h100v200h-200v100h300v-300zM100 400v-300h300v300h-300zM300 200h-100v100h100v-100zM1200 200h-100v100h100v-100zM700 0h-100v100h100v-100zM1200 0h-300v100h300v-100z" />
-<glyph unicode="&#xe040;" d="M100 200h-100v1000h100v-1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 200h-200v1000h200v-1000zM400 0h-300v100h300v-100zM600 0h-100v91h100v-91zM800 0h-100v91h100v-91zM1100 0h-200v91h200v-91z" />
-<glyph unicode="&#xe041;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
-<glyph unicode="&#xe042;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM800 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-56 56l424 426l-700 700h150zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5 t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
-<glyph unicode="&#xe043;" d="M300 1200h825q75 0 75 -75v-900q0 -25 -18 -43l-64 -64q-8 -8 -13 -5.5t-5 12.5v950q0 10 -7.5 17.5t-17.5 7.5h-700q-25 0 -43 -18l-64 -64q-8 -8 -5.5 -13t12.5 -5h700q10 0 17.5 -7.5t7.5 -17.5v-950q0 -10 -7.5 -17.5t-17.5 -7.5h-850q-10 0 -17.5 7.5t-7.5 17.5v975 q0 25 18 43l139 139q18 18 43 18z" />
-<glyph unicode="&#xe044;" d="M250 1200h800q21 0 35.5 -14.5t14.5 -35.5v-1150l-450 444l-450 -445v1151q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe045;" d="M822 1200h-444q-11 0 -19 -7.5t-9 -17.5l-78 -301q-7 -24 7 -45l57 -108q6 -9 17.5 -15t21.5 -6h450q10 0 21.5 6t17.5 15l62 108q14 21 7 45l-83 301q-1 10 -9 17.5t-19 7.5zM1175 800h-150q-10 0 -21 -6.5t-15 -15.5l-78 -156q-4 -9 -15 -15.5t-21 -6.5h-550 q-10 0 -21 6.5t-15 15.5l-78 156q-4 9 -15 15.5t-21 6.5h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-650q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h750q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5 t7.5 17.5v650q0 10 -7.5 17.5t-17.5 7.5zM850 200h-500q-10 0 -19.5 -7t-11.5 -17l-38 -152q-2 -10 3.5 -17t15.5 -7h600q10 0 15.5 7t3.5 17l-38 152q-2 10 -11.5 17t-19.5 7z" />
-<glyph unicode="&#xe046;" d="M500 1100h200q56 0 102.5 -20.5t72.5 -50t44 -59t25 -50.5l6 -20h150q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h150q2 8 6.5 21.5t24 48t45 61t72 48t102.5 21.5zM900 800v-100 h100v100h-100zM600 730q-95 0 -162.5 -67.5t-67.5 -162.5t67.5 -162.5t162.5 -67.5t162.5 67.5t67.5 162.5t-67.5 162.5t-162.5 67.5zM600 603q43 0 73 -30t30 -73t-30 -73t-73 -30t-73 30t-30 73t30 73t73 30z" />
-<glyph unicode="&#xe047;" d="M681 1199l385 -998q20 -50 60 -92q18 -19 36.5 -29.5t27.5 -11.5l10 -2v-66h-417v66q53 0 75 43.5t5 88.5l-82 222h-391q-58 -145 -92 -234q-11 -34 -6.5 -57t25.5 -37t46 -20t55 -6v-66h-365v66q56 24 84 52q12 12 25 30.5t20 31.5l7 13l399 1006h93zM416 521h340 l-162 457z" />
-<glyph unicode="&#xe048;" d="M753 641q5 -1 14.5 -4.5t36 -15.5t50.5 -26.5t53.5 -40t50.5 -54.5t35.5 -70t14.5 -87q0 -67 -27.5 -125.5t-71.5 -97.5t-98.5 -66.5t-108.5 -40.5t-102 -13h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 24 -0.5 34t-3.5 24t-8.5 19.5t-17 13.5t-28 12.5t-42.5 11.5v71 l471 -1q57 0 115.5 -20.5t108 -57t80.5 -94t31 -124.5q0 -51 -15.5 -96.5t-38 -74.5t-45 -50.5t-38.5 -30.5zM400 700h139q78 0 130.5 48.5t52.5 122.5q0 41 -8.5 70.5t-29.5 55.5t-62.5 39.5t-103.5 13.5h-118v-350zM400 200h216q80 0 121 50.5t41 130.5q0 90 -62.5 154.5 t-156.5 64.5h-159v-400z" />
-<glyph unicode="&#xe049;" d="M877 1200l2 -57q-83 -19 -116 -45.5t-40 -66.5l-132 -839q-9 -49 13 -69t96 -26v-97h-500v97q186 16 200 98l173 832q3 17 3 30t-1.5 22.5t-9 17.5t-13.5 12.5t-21.5 10t-26 8.5t-33.5 10q-13 3 -19 5v57h425z" />
-<glyph unicode="&#xe050;" d="M1300 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM175 1000h-75v-800h75l-125 -167l-125 167h75v800h-75l125 167z" />
-<glyph unicode="&#xe051;" d="M1100 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-650q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v650h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM1167 50l-167 -125v75h-800v-75l-167 125l167 125v-75h800v75z" />
-<glyph unicode="&#xe052;" d="M50 1100h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe053;" d="M250 1100h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM250 500h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000 q-21 0 -35.5 14.5t-14.5 35.5zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5z" />
-<glyph unicode="&#xe055;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe056;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 1100h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 800h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 500h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 500h800q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 200h800 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe057;" d="M400 0h-100v1100h100v-1100zM550 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM267 550l-167 -125v75h-200v100h200v75zM550 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe058;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM900 0h-100v1100h100v-1100zM50 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM1100 600h200v-100h-200v-75l-167 125l167 125v-75zM50 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe059;" d="M75 1000h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22zM1200 300l-300 300l300 300v-600z" />
-<glyph unicode="&#xe060;" d="M44 1100h1112q18 0 31 -13t13 -31v-1012q0 -18 -13 -31t-31 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13zM100 1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500h-1000zM342 884q56 0 95 -39t39 -94.5t-39 -95t-95 -39.5t-95 39.5t-39 95t39 94.5 t95 39z" />
-<glyph unicode="&#xe062;" d="M648 1169q117 0 216 -60t156.5 -161t57.5 -218q0 -115 -70 -258q-69 -109 -158 -225.5t-143 -179.5l-54 -62q-9 8 -25.5 24.5t-63.5 67.5t-91 103t-98.5 128t-95.5 148q-60 132 -60 249q0 88 34 169.5t91.5 142t137 96.5t166.5 36zM652.5 974q-91.5 0 -156.5 -65 t-65 -157t65 -156.5t156.5 -64.5t156.5 64.5t65 156.5t-65 157t-156.5 65z" />
-<glyph unicode="&#xe063;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 173v854q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57z" />
-<glyph unicode="&#xe064;" d="M554 1295q21 -72 57.5 -143.5t76 -130t83 -118t82.5 -117t70 -116t49.5 -126t18.5 -136.5q0 -71 -25.5 -135t-68.5 -111t-99 -82t-118.5 -54t-125.5 -23q-84 5 -161.5 34t-139.5 78.5t-99 125t-37 164.5q0 69 18 136.5t49.5 126.5t69.5 116.5t81.5 117.5t83.5 119 t76.5 131t58.5 143zM344 710q-23 -33 -43.5 -70.5t-40.5 -102.5t-17 -123q1 -37 14.5 -69.5t30 -52t41 -37t38.5 -24.5t33 -15q21 -7 32 -1t13 22l6 34q2 10 -2.5 22t-13.5 19q-5 4 -14 12t-29.5 40.5t-32.5 73.5q-26 89 6 271q2 11 -6 11q-8 1 -15 -10z" />
-<glyph unicode="&#xe065;" d="M1000 1013l108 115q2 1 5 2t13 2t20.5 -1t25 -9.5t28.5 -21.5q22 -22 27 -43t0 -32l-6 -10l-108 -115zM350 1100h400q50 0 105 -13l-187 -187h-368q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v182l200 200v-332 q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM1009 803l-362 -362l-161 -50l55 170l355 355z" />
-<glyph unicode="&#xe066;" d="M350 1100h361q-164 -146 -216 -200h-195q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-103q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M824 1073l339 -301q8 -7 8 -17.5t-8 -17.5l-340 -306q-7 -6 -12.5 -4t-6.5 11v203q-26 1 -54.5 0t-78.5 -7.5t-92 -17.5t-86 -35t-70 -57q10 59 33 108t51.5 81.5t65 58.5t68.5 40.5t67 24.5t56 13.5t40 4.5v210q1 10 6.5 12.5t13.5 -4.5z" />
-<glyph unicode="&#xe067;" d="M350 1100h350q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-219q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M643 639l395 395q7 7 17.5 7t17.5 -7l101 -101q7 -7 7 -17.5t-7 -17.5l-531 -532q-7 -7 -17.5 -7t-17.5 7l-248 248q-7 7 -7 17.5t7 17.5l101 101q7 7 17.5 7t17.5 -7l111 -111q8 -7 18 -7t18 7z" />
-<glyph unicode="&#xe068;" d="M318 918l264 264q8 8 18 8t18 -8l260 -264q7 -8 4.5 -13t-12.5 -5h-170v-200h200v173q0 10 5 12t13 -5l264 -260q8 -7 8 -17.5t-8 -17.5l-264 -265q-8 -7 -13 -5t-5 12v173h-200v-200h170q10 0 12.5 -5t-4.5 -13l-260 -264q-8 -8 -18 -8t-18 8l-264 264q-8 8 -5.5 13 t12.5 5h175v200h-200v-173q0 -10 -5 -12t-13 5l-264 265q-8 7 -8 17.5t8 17.5l264 260q8 7 13 5t5 -12v-173h200v200h-175q-10 0 -12.5 5t5.5 13z" />
-<glyph unicode="&#xe069;" d="M250 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe070;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5 t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe071;" d="M1200 1050v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-492 480q-15 14 -15 35t15 35l492 480q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25z" />
-<glyph unicode="&#xe072;" d="M243 1074l814 -498q18 -11 18 -26t-18 -26l-814 -498q-18 -11 -30.5 -4t-12.5 28v1000q0 21 12.5 28t30.5 -4z" />
-<glyph unicode="&#xe073;" d="M250 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM650 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800 q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe074;" d="M1100 950v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5z" />
-<glyph unicode="&#xe075;" d="M500 612v438q0 21 10.5 25t25.5 -10l492 -480q15 -14 15 -35t-15 -35l-492 -480q-15 -14 -25.5 -10t-10.5 25v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10z" />
-<glyph unicode="&#xe076;" d="M1048 1102l100 1q20 0 35 -14.5t15 -35.5l5 -1000q0 -21 -14.5 -35.5t-35.5 -14.5l-100 -1q-21 0 -35.5 14.5t-14.5 35.5l-2 437l-463 -454q-14 -15 -24.5 -10.5t-10.5 25.5l-2 437l-462 -455q-15 -14 -25.5 -9.5t-10.5 24.5l-5 1000q0 21 10.5 25.5t25.5 -10.5l466 -450 l-2 438q0 20 10.5 24.5t25.5 -9.5l466 -451l-2 438q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10l464 -453v438q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe078;" d="M686 1081l501 -540q15 -15 10.5 -26t-26.5 -11h-1042q-22 0 -26.5 11t10.5 26l501 540q15 15 36 15t36 -15zM150 400h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe079;" d="M885 900l-352 -353l352 -353l-197 -198l-552 552l552 550z" />
-<glyph unicode="&#xe080;" d="M1064 547l-551 -551l-198 198l353 353l-353 353l198 198z" />
-<glyph unicode="&#xe081;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM650 900h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-150 q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5h150v-150q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v150h150q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-150v150q0 21 -14.5 35.5t-35.5 14.5z" />
-<glyph unicode="&#xe082;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM850 700h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5 t35.5 -14.5h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5z" />
-<glyph unicode="&#xe083;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM741.5 913q-12.5 0 -21.5 -9l-120 -120l-120 120q-9 9 -21.5 9 t-21.5 -9l-141 -141q-9 -9 -9 -21.5t9 -21.5l120 -120l-120 -120q-9 -9 -9 -21.5t9 -21.5l141 -141q9 -9 21.5 -9t21.5 9l120 120l120 -120q9 -9 21.5 -9t21.5 9l141 141q9 9 9 21.5t-9 21.5l-120 120l120 120q9 9 9 21.5t-9 21.5l-141 141q-9 9 -21.5 9z" />
-<glyph unicode="&#xe084;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM546 623l-84 85q-7 7 -17.5 7t-18.5 -7l-139 -139q-7 -8 -7 -18t7 -18 l242 -241q7 -8 17.5 -8t17.5 8l375 375q7 7 7 17.5t-7 18.5l-139 139q-7 7 -17.5 7t-17.5 -7z" />
-<glyph unicode="&#xe085;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM588 941q-29 0 -59 -5.5t-63 -20.5t-58 -38.5t-41.5 -63t-16.5 -89.5 q0 -25 20 -25h131q30 -5 35 11q6 20 20.5 28t45.5 8q20 0 31.5 -10.5t11.5 -28.5q0 -23 -7 -34t-26 -18q-1 0 -13.5 -4t-19.5 -7.5t-20 -10.5t-22 -17t-18.5 -24t-15.5 -35t-8 -46q-1 -8 5.5 -16.5t20.5 -8.5h173q7 0 22 8t35 28t37.5 48t29.5 74t12 100q0 47 -17 83 t-42.5 57t-59.5 34.5t-64 18t-59 4.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe086;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM675 1000h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5 t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5zM675 700h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h75v-200h-75q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h350q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5 t-17.5 7.5h-75v275q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe087;" d="M525 1200h150q10 0 17.5 -7.5t7.5 -17.5v-194q103 -27 178.5 -102.5t102.5 -178.5h194q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-194q-27 -103 -102.5 -178.5t-178.5 -102.5v-194q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v194 q-103 27 -178.5 102.5t-102.5 178.5h-194q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h194q27 103 102.5 178.5t178.5 102.5v194q0 10 7.5 17.5t17.5 7.5zM700 893v-168q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v168q-68 -23 -119 -74 t-74 -119h168q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-168q23 -68 74 -119t119 -74v168q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-168q68 23 119 74t74 119h-168q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h168 q-23 68 -74 119t-119 74z" />
-<glyph unicode="&#xe088;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM759 823l64 -64q7 -7 7 -17.5t-7 -17.5l-124 -124l124 -124q7 -7 7 -17.5t-7 -17.5l-64 -64q-7 -7 -17.5 -7t-17.5 7l-124 124l-124 -124q-7 -7 -17.5 -7t-17.5 7l-64 64 q-7 7 -7 17.5t7 17.5l124 124l-124 124q-7 7 -7 17.5t7 17.5l64 64q7 7 17.5 7t17.5 -7l124 -124l124 124q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe089;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM782 788l106 -106q7 -7 7 -17.5t-7 -17.5l-320 -321q-8 -7 -18 -7t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l197 197q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe090;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5q0 -120 65 -225 l587 587q-105 65 -225 65zM965 819l-584 -584q104 -62 219 -62q116 0 214.5 57t155.5 155.5t57 214.5q0 115 -62 219z" />
-<glyph unicode="&#xe091;" d="M39 582l522 427q16 13 27.5 8t11.5 -26v-291h550q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-550v-291q0 -21 -11.5 -26t-27.5 8l-522 427q-16 13 -16 32t16 32z" />
-<glyph unicode="&#xe092;" d="M639 1009l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291h-550q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h550v291q0 21 11.5 26t27.5 -8z" />
-<glyph unicode="&#xe093;" d="M682 1161l427 -522q13 -16 8 -27.5t-26 -11.5h-291v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v550h-291q-21 0 -26 11.5t8 27.5l427 522q13 16 32 16t32 -16z" />
-<glyph unicode="&#xe094;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-550h291q21 0 26 -11.5t-8 -27.5l-427 -522q-13 -16 -32 -16t-32 16l-427 522q-13 16 -8 27.5t26 11.5h291v550q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe095;" d="M639 1109l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291q-94 -2 -182 -20t-170.5 -52t-147 -92.5t-100.5 -135.5q5 105 27 193.5t67.5 167t113 135t167 91.5t225.5 42v262q0 21 11.5 26t27.5 -8z" />
-<glyph unicode="&#xe096;" d="M850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5zM350 0h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249 q8 7 18 7t18 -7l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5z" />
-<glyph unicode="&#xe097;" d="M1014 1120l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249q8 7 18 7t18 -7zM250 600h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5z" />
-<glyph unicode="&#xe101;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM704 900h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5 t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe102;" d="M260 1200q9 0 19 -2t15 -4l5 -2q22 -10 44 -23l196 -118q21 -13 36 -24q29 -21 37 -12q11 13 49 35l196 118q22 13 45 23q17 7 38 7q23 0 47 -16.5t37 -33.5l13 -16q14 -21 18 -45l25 -123l8 -44q1 -9 8.5 -14.5t17.5 -5.5h61q10 0 17.5 -7.5t7.5 -17.5v-50 q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 -7.5t-7.5 -17.5v-175h-400v300h-200v-300h-400v175q0 10 -7.5 17.5t-17.5 7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5h61q11 0 18 3t7 8q0 4 9 52l25 128q5 25 19 45q2 3 5 7t13.5 15t21.5 19.5t26.5 15.5 t29.5 7zM915 1079l-166 -162q-7 -7 -5 -12t12 -5h219q10 0 15 7t2 17l-51 149q-3 10 -11 12t-15 -6zM463 917l-177 157q-8 7 -16 5t-11 -12l-51 -143q-3 -10 2 -17t15 -7h231q11 0 12.5 5t-5.5 12zM500 0h-375q-10 0 -17.5 7.5t-7.5 17.5v375h400v-400zM1100 400v-375 q0 -10 -7.5 -17.5t-17.5 -7.5h-375v400h400z" />
-<glyph unicode="&#xe103;" d="M1165 1190q8 3 21 -6.5t13 -17.5q-2 -178 -24.5 -323.5t-55.5 -245.5t-87 -174.5t-102.5 -118.5t-118 -68.5t-118.5 -33t-120 -4.5t-105 9.5t-90 16.5q-61 12 -78 11q-4 1 -12.5 0t-34 -14.5t-52.5 -40.5l-153 -153q-26 -24 -37 -14.5t-11 43.5q0 64 42 102q8 8 50.5 45 t66.5 58q19 17 35 47t13 61q-9 55 -10 102.5t7 111t37 130t78 129.5q39 51 80 88t89.5 63.5t94.5 45t113.5 36t129 31t157.5 37t182 47.5zM1116 1098q-8 9 -22.5 -3t-45.5 -50q-38 -47 -119 -103.5t-142 -89.5l-62 -33q-56 -30 -102 -57t-104 -68t-102.5 -80.5t-85.5 -91 t-64 -104.5q-24 -56 -31 -86t2 -32t31.5 17.5t55.5 59.5q25 30 94 75.5t125.5 77.5t147.5 81q70 37 118.5 69t102 79.5t99 111t86.5 148.5q22 50 24 60t-6 19z" />
-<glyph unicode="&#xe104;" d="M653 1231q-39 -67 -54.5 -131t-10.5 -114.5t24.5 -96.5t47.5 -80t63.5 -62.5t68.5 -46.5t65 -30q-4 7 -17.5 35t-18.5 39.5t-17 39.5t-17 43t-13 42t-9.5 44.5t-2 42t4 43t13.5 39t23 38.5q96 -42 165 -107.5t105 -138t52 -156t13 -159t-19 -149.5q-13 -55 -44 -106.5 t-68 -87t-78.5 -64.5t-72.5 -45t-53 -22q-72 -22 -127 -11q-31 6 -13 19q6 3 17 7q13 5 32.5 21t41 44t38.5 63.5t21.5 81.5t-6.5 94.5t-50 107t-104 115.5q10 -104 -0.5 -189t-37 -140.5t-65 -93t-84 -52t-93.5 -11t-95 24.5q-80 36 -131.5 114t-53.5 171q-2 23 0 49.5 t4.5 52.5t13.5 56t27.5 60t46 64.5t69.5 68.5q-8 -53 -5 -102.5t17.5 -90t34 -68.5t44.5 -39t49 -2q31 13 38.5 36t-4.5 55t-29 64.5t-36 75t-26 75.5q-15 85 2 161.5t53.5 128.5t85.5 92.5t93.5 61t81.5 25.5z" />
-<glyph unicode="&#xe105;" d="M600 1094q82 0 160.5 -22.5t140 -59t116.5 -82.5t94.5 -95t68 -95t42.5 -82.5t14 -57.5t-14 -57.5t-43 -82.5t-68.5 -95t-94.5 -95t-116.5 -82.5t-140 -59t-159.5 -22.5t-159.5 22.5t-140 59t-116.5 82.5t-94.5 95t-68.5 95t-43 82.5t-14 57.5t14 57.5t42.5 82.5t68 95 t94.5 95t116.5 82.5t140 59t160.5 22.5zM888 829q-15 15 -18 12t5 -22q25 -57 25 -119q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 59 23 114q8 19 4.5 22t-17.5 -12q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q22 -36 47 -71t70 -82t92.5 -81t113 -58.5t133.5 -24.5 t133.5 24t113 58.5t92.5 81.5t70 81.5t47 70.5q11 18 9 42.5t-14 41.5q-90 117 -163 189zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l35 34q14 15 12.5 33.5t-16.5 33.5q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
-<glyph unicode="&#xe106;" d="M592 0h-148l31 120q-91 20 -175.5 68.5t-143.5 106.5t-103.5 119t-66.5 110t-22 76q0 21 14 57.5t42.5 82.5t68 95t94.5 95t116.5 82.5t140 59t160.5 22.5q61 0 126 -15l32 121h148zM944 770l47 181q108 -85 176.5 -192t68.5 -159q0 -26 -19.5 -71t-59.5 -102t-93 -112 t-129 -104.5t-158 -75.5l46 173q77 49 136 117t97 131q11 18 9 42.5t-14 41.5q-54 70 -107 130zM310 824q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q18 -30 39 -60t57 -70.5t74 -73t90 -61t105 -41.5l41 154q-107 18 -178.5 101.5t-71.5 193.5q0 59 23 114q8 19 4.5 22 t-17.5 -12zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l12 11l22 86l-3 4q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
-<glyph unicode="&#xe107;" d="M-90 100l642 1066q20 31 48 28.5t48 -35.5l642 -1056q21 -32 7.5 -67.5t-50.5 -35.5h-1294q-37 0 -50.5 34t7.5 66zM155 200h345v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h345l-445 723zM496 700h208q20 0 32 -14.5t8 -34.5l-58 -252 q-4 -20 -21.5 -34.5t-37.5 -14.5h-54q-20 0 -37.5 14.5t-21.5 34.5l-58 252q-4 20 8 34.5t32 14.5z" />
-<glyph unicode="&#xe108;" d="M650 1200q62 0 106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -93 100 -113v-64q0 -21 -13 -29t-32 1l-205 128l-205 -128q-19 -9 -32 -1t-13 29v64q0 20 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41 q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44z" />
-<glyph unicode="&#xe109;" d="M850 1200h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-150h-1100v150q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-50h500v50q0 21 14.5 35.5t35.5 14.5zM1100 800v-750q0 -21 -14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v750h1100zM100 600v-100h100v100h-100zM300 600v-100h100v100h-100zM500 600v-100h100v100h-100zM700 600v-100h100v100h-100zM900 600v-100h100v100h-100zM100 400v-100h100v100h-100zM300 400v-100h100v100h-100zM500 400 v-100h100v100h-100zM700 400v-100h100v100h-100zM900 400v-100h100v100h-100zM100 200v-100h100v100h-100zM300 200v-100h100v100h-100zM500 200v-100h100v100h-100zM700 200v-100h100v100h-100zM900 200v-100h100v100h-100z" />
-<glyph unicode="&#xe110;" d="M1135 1165l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-159l-600 -600h-291q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h209l600 600h241v150q0 21 10.5 25t24.5 -10zM522 819l-141 -141l-122 122h-209q-21 0 -35.5 14.5 t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h291zM1135 565l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-241l-181 181l141 141l122 -122h159v150q0 21 10.5 25t24.5 -10z" />
-<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
-<glyph unicode="&#xe112;" d="M150 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM850 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM1100 800v-300q0 -41 -3 -77.5t-15 -89.5t-32 -96t-58 -89t-89 -77t-129 -51t-174 -20t-174 20 t-129 51t-89 77t-58 89t-32 96t-15 89.5t-3 77.5v300h300v-250v-27v-42.5t1.5 -41t5 -38t10 -35t16.5 -30t25.5 -24.5t35 -19t46.5 -12t60 -4t60 4.5t46.5 12.5t35 19.5t25 25.5t17 30.5t10 35t5 38t2 40.5t-0.5 42v25v250h300z" />
-<glyph unicode="&#xe113;" d="M1100 411l-198 -199l-353 353l-353 -353l-197 199l551 551z" />
-<glyph unicode="&#xe114;" d="M1101 789l-550 -551l-551 551l198 199l353 -353l353 353z" />
-<glyph unicode="&#xe115;" d="M404 1000h746q21 0 35.5 -14.5t14.5 -35.5v-551h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v401h-381zM135 984l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-400h385l215 -200h-750q-21 0 -35.5 14.5 t-14.5 35.5v550h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe116;" d="M56 1200h94q17 0 31 -11t18 -27l38 -162h896q24 0 39 -18.5t10 -42.5l-100 -475q-5 -21 -27 -42.5t-55 -21.5h-633l48 -200h535q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-50q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-300v-50 q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-31q-18 0 -32.5 10t-20.5 19l-5 10l-201 961h-54q-20 0 -35 14.5t-15 35.5t15 35.5t35 14.5z" />
-<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
-<glyph unicode="&#xe118;" d="M200 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q42 0 71 -29.5t29 -70.5h500v-200h-1000zM1500 700l-300 -700h-1200l300 700h1200z" />
-<glyph unicode="&#xe119;" d="M635 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-601h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v601h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe120;" d="M936 864l249 -229q14 -15 14 -35.5t-14 -35.5l-249 -229q-15 -15 -25.5 -10.5t-10.5 24.5v151h-600v-151q0 -20 -10.5 -24.5t-25.5 10.5l-249 229q-14 15 -14 35.5t14 35.5l249 229q15 15 25.5 10.5t10.5 -25.5v-149h600v149q0 21 10.5 25.5t25.5 -10.5z" />
-<glyph unicode="&#xe121;" d="M1169 400l-172 732q-5 23 -23 45.5t-38 22.5h-672q-20 0 -38 -20t-23 -41l-172 -739h1138zM1100 300h-1000q-41 0 -70.5 -29.5t-29.5 -70.5v-100q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v100q0 41 -29.5 70.5t-70.5 29.5zM800 100v100h100v-100h-100 zM1000 100v100h100v-100h-100z" />
-<glyph unicode="&#xe122;" d="M1150 1100q21 0 35.5 -14.5t14.5 -35.5v-850q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v850q0 21 14.5 35.5t35.5 14.5zM1000 200l-675 200h-38l47 -276q3 -16 -5.5 -20t-29.5 -4h-7h-84q-20 0 -34.5 14t-18.5 35q-55 337 -55 351v250v6q0 16 1 23.5t6.5 14 t17.5 6.5h200l675 250v-850zM0 750v-250q-4 0 -11 0.5t-24 6t-30 15t-24 30t-11 48.5v50q0 26 10.5 46t25 30t29 16t25.5 7z" />
-<glyph unicode="&#xe123;" d="M553 1200h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q19 0 33 -14.5t14 -35t-13 -40.5t-31 -27q-8 -4 -23 -9.5t-65 -19.5t-103 -25t-132.5 -20t-158.5 -9q-57 0 -115 5t-104 12t-88.5 15.5t-73.5 17.5t-54.5 16t-35.5 12l-11 4 q-18 8 -31 28t-13 40.5t14 35t33 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3.5 32t28.5 13zM498 110q50 -6 102 -6q53 0 102 6q-12 -49 -39.5 -79.5t-62.5 -30.5t-63 30.5t-39 79.5z" />
-<glyph unicode="&#xe124;" d="M800 946l224 78l-78 -224l234 -45l-180 -155l180 -155l-234 -45l78 -224l-224 78l-45 -234l-155 180l-155 -180l-45 234l-224 -78l78 224l-234 45l180 155l-180 155l234 45l-78 224l224 -78l45 234l155 -180l155 180z" />
-<glyph unicode="&#xe125;" d="M650 1200h50q40 0 70 -40.5t30 -84.5v-150l-28 -125h328q40 0 70 -40.5t30 -84.5v-100q0 -45 -29 -74l-238 -344q-16 -24 -38 -40.5t-45 -16.5h-250q-7 0 -42 25t-66 50l-31 25h-61q-45 0 -72.5 18t-27.5 57v400q0 36 20 63l145 196l96 198q13 28 37.5 48t51.5 20z M650 1100l-100 -212l-150 -213v-375h100l136 -100h214l250 375v125h-450l50 225v175h-50zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe126;" d="M600 1100h250q23 0 45 -16.5t38 -40.5l238 -344q29 -29 29 -74v-100q0 -44 -30 -84.5t-70 -40.5h-328q28 -118 28 -125v-150q0 -44 -30 -84.5t-70 -40.5h-50q-27 0 -51.5 20t-37.5 48l-96 198l-145 196q-20 27 -20 63v400q0 39 27.5 57t72.5 18h61q124 100 139 100z M50 1000h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM636 1000l-136 -100h-100v-375l150 -213l100 -212h50v175l-50 225h450v125l-250 375h-214z" />
-<glyph unicode="&#xe127;" d="M356 873l363 230q31 16 53 -6l110 -112q13 -13 13.5 -32t-11.5 -34l-84 -121h302q84 0 138 -38t54 -110t-55 -111t-139 -39h-106l-131 -339q-6 -21 -19.5 -41t-28.5 -20h-342q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM400 792v-503l100 -89h293l131 339 q6 21 19.5 41t28.5 20h203q21 0 30.5 25t0.5 50t-31 25h-456h-7h-6h-5.5t-6 0.5t-5 1.5t-5 2t-4 2.5t-4 4t-2.5 4.5q-12 25 5 47l146 183l-86 83zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500 q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe128;" d="M475 1103l366 -230q2 -1 6 -3.5t14 -10.5t18 -16.5t14.5 -20t6.5 -22.5v-525q0 -13 -86 -94t-93 -81h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-85 0 -139.5 39t-54.5 111t54 110t138 38h302l-85 121q-11 15 -10.5 34t13.5 32l110 112q22 22 53 6zM370 945l146 -183 q17 -22 5 -47q-2 -2 -3.5 -4.5t-4 -4t-4 -2.5t-5 -2t-5 -1.5t-6 -0.5h-6h-6.5h-6h-475v-100h221q15 0 29 -20t20 -41l130 -339h294l106 89v503l-342 236zM1050 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5 v500q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe129;" d="M550 1294q72 0 111 -55t39 -139v-106l339 -131q21 -6 41 -19.5t20 -28.5v-342q0 -7 -81 -90t-94 -83h-525q-17 0 -35.5 14t-28.5 28l-9 14l-230 363q-16 31 6 53l112 110q13 13 32 13.5t34 -11.5l121 -84v302q0 84 38 138t110 54zM600 972v203q0 21 -25 30.5t-50 0.5 t-25 -31v-456v-7v-6v-5.5t-0.5 -6t-1.5 -5t-2 -5t-2.5 -4t-4 -4t-4.5 -2.5q-25 -12 -47 5l-183 146l-83 -86l236 -339h503l89 100v293l-339 131q-21 6 -41 19.5t-20 28.5zM450 200h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe130;" d="M350 1100h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5zM600 306v-106q0 -84 -39 -139t-111 -55t-110 54t-38 138v302l-121 -84q-15 -12 -34 -11.5t-32 13.5l-112 110 q-22 22 -6 53l230 363q1 2 3.5 6t10.5 13.5t16.5 17t20 13.5t22.5 6h525q13 0 94 -83t81 -90v-342q0 -15 -20 -28.5t-41 -19.5zM308 900l-236 -339l83 -86l183 146q22 17 47 5q2 -1 4.5 -2.5t4 -4t2.5 -4t2 -5t1.5 -5t0.5 -6v-5.5v-6v-7v-456q0 -22 25 -31t50 0.5t25 30.5 v203q0 15 20 28.5t41 19.5l339 131v293l-89 100h-503z" />
-<glyph unicode="&#xe131;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM914 632l-275 223q-16 13 -27.5 8t-11.5 -26v-137h-275 q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h275v-137q0 -21 11.5 -26t27.5 8l275 223q16 13 16 32t-16 32z" />
-<glyph unicode="&#xe132;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM561 855l-275 -223q-16 -13 -16 -32t16 -32l275 -223q16 -13 27.5 -8 t11.5 26v137h275q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5h-275v137q0 21 -11.5 26t-27.5 -8z" />
-<glyph unicode="&#xe133;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM855 639l-223 275q-13 16 -32 16t-32 -16l-223 -275q-13 -16 -8 -27.5 t26 -11.5h137v-275q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v275h137q21 0 26 11.5t-8 27.5z" />
-<glyph unicode="&#xe134;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM675 900h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-275h-137q-21 0 -26 -11.5 t8 -27.5l223 -275q13 -16 32 -16t32 16l223 275q13 16 8 27.5t-26 11.5h-137v275q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe135;" d="M600 1176q116 0 222.5 -46t184 -123.5t123.5 -184t46 -222.5t-46 -222.5t-123.5 -184t-184 -123.5t-222.5 -46t-222.5 46t-184 123.5t-123.5 184t-46 222.5t46 222.5t123.5 184t184 123.5t222.5 46zM627 1101q-15 -12 -36.5 -20.5t-35.5 -12t-43 -8t-39 -6.5 q-15 -3 -45.5 0t-45.5 -2q-20 -7 -51.5 -26.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79q-9 -34 5 -93t8 -87q0 -9 17 -44.5t16 -59.5q12 0 23 -5t23.5 -15t19.5 -14q16 -8 33 -15t40.5 -15t34.5 -12q21 -9 52.5 -32t60 -38t57.5 -11 q7 -15 -3 -34t-22.5 -40t-9.5 -38q13 -21 23 -34.5t27.5 -27.5t36.5 -18q0 -7 -3.5 -16t-3.5 -14t5 -17q104 -2 221 112q30 29 46.5 47t34.5 49t21 63q-13 8 -37 8.5t-36 7.5q-15 7 -49.5 15t-51.5 19q-18 0 -41 -0.5t-43 -1.5t-42 -6.5t-38 -16.5q-51 -35 -66 -12 q-4 1 -3.5 25.5t0.5 25.5q-6 13 -26.5 17.5t-24.5 6.5q1 15 -0.5 30.5t-7 28t-18.5 11.5t-31 -21q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q7 -12 18 -24t21.5 -20.5t20 -15t15.5 -10.5l5 -3q2 12 7.5 30.5t8 34.5t-0.5 32q-3 18 3.5 29 t18 22.5t15.5 24.5q6 14 10.5 35t8 31t15.5 22.5t34 22.5q-6 18 10 36q8 0 24 -1.5t24.5 -1.5t20 4.5t20.5 15.5q-10 23 -31 42.5t-37.5 29.5t-49 27t-43.5 23q0 1 2 8t3 11.5t1.5 10.5t-1 9.5t-4.5 4.5q31 -13 58.5 -14.5t38.5 2.5l12 5q5 28 -9.5 46t-36.5 24t-50 15 t-41 20q-18 -4 -37 0zM613 994q0 -17 8 -42t17 -45t9 -23q-8 1 -39.5 5.5t-52.5 10t-37 16.5q3 11 16 29.5t16 25.5q10 -10 19 -10t14 6t13.5 14.5t16.5 12.5z" />
-<glyph unicode="&#xe136;" d="M756 1157q164 92 306 -9l-259 -138l145 -232l251 126q6 -89 -34 -156.5t-117 -110.5q-60 -34 -127 -39.5t-126 16.5l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-34 101 5.5 201.5t135.5 154.5z" />
-<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
-<glyph unicode="&#xe138;" d="M150 1200h900q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM700 500v-300l-200 -200v500l-350 500h900z" />
-<glyph unicode="&#xe139;" d="M500 1200h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5zM500 1100v-100h200v100h-200zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
-<glyph unicode="&#xe140;" d="M50 1200h300q21 0 25 -10.5t-10 -24.5l-94 -94l199 -199q7 -8 7 -18t-7 -18l-106 -106q-8 -7 -18 -7t-18 7l-199 199l-94 -94q-14 -14 -24.5 -10t-10.5 25v300q0 21 14.5 35.5t35.5 14.5zM850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-199 -199q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l199 199l-94 94q-14 14 -10 24.5t25 10.5zM364 470l106 -106q7 -8 7 -18t-7 -18l-199 -199l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l199 199 q8 7 18 7t18 -7zM1071 271l94 94q14 14 24.5 10t10.5 -25v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -25 10.5t10 24.5l94 94l-199 199q-7 8 -7 18t7 18l106 106q8 7 18 7t18 -7z" />
-<glyph unicode="&#xe141;" d="M596 1192q121 0 231.5 -47.5t190 -127t127 -190t47.5 -231.5t-47.5 -231.5t-127 -190.5t-190 -127t-231.5 -47t-231.5 47t-190.5 127t-127 190.5t-47 231.5t47 231.5t127 190t190.5 127t231.5 47.5zM596 1010q-112 0 -207.5 -55.5t-151 -151t-55.5 -207.5t55.5 -207.5 t151 -151t207.5 -55.5t207.5 55.5t151 151t55.5 207.5t-55.5 207.5t-151 151t-207.5 55.5zM454.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38.5 -16.5t-38.5 16.5t-16 39t16 38.5t38.5 16zM754.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38 -16.5q-14 0 -29 10l-55 -145 q17 -23 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5q0 32 20.5 56.5t51.5 29.5l122 126l1 1q-9 14 -9 28q0 23 16 39t38.5 16zM345.5 709q22.5 0 38.5 -16t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16zM854.5 709q22.5 0 38.5 -16 t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16z" />
-<glyph unicode="&#xe142;" d="M546 173l469 470q91 91 99 192q7 98 -52 175.5t-154 94.5q-22 4 -47 4q-34 0 -66.5 -10t-56.5 -23t-55.5 -38t-48 -41.5t-48.5 -47.5q-376 -375 -391 -390q-30 -27 -45 -41.5t-37.5 -41t-32 -46.5t-16 -47.5t-1.5 -56.5q9 -62 53.5 -95t99.5 -33q74 0 125 51l548 548 q36 36 20 75q-7 16 -21.5 26t-32.5 10q-26 0 -50 -23q-13 -12 -39 -38l-341 -338q-15 -15 -35.5 -15.5t-34.5 13.5t-14 34.5t14 34.5q327 333 361 367q35 35 67.5 51.5t78.5 16.5q14 0 29 -1q44 -8 74.5 -35.5t43.5 -68.5q14 -47 2 -96.5t-47 -84.5q-12 -11 -32 -32 t-79.5 -81t-114.5 -115t-124.5 -123.5t-123 -119.5t-96.5 -89t-57 -45q-56 -27 -120 -27q-70 0 -129 32t-93 89q-48 78 -35 173t81 163l511 511q71 72 111 96q91 55 198 55q80 0 152 -33q78 -36 129.5 -103t66.5 -154q17 -93 -11 -183.5t-94 -156.5l-482 -476 q-15 -15 -36 -16t-37 14t-17.5 34t14.5 35z" />
-<glyph unicode="&#xe143;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104zM896 972q-33 0 -64.5 -19t-56.5 -46t-47.5 -53.5t-43.5 -45.5t-37.5 -19t-36 19t-40 45.5t-43 53.5t-54 46t-65.5 19q-67 0 -122.5 -55.5t-55.5 -132.5q0 -23 13.5 -51t46 -65t57.5 -63t76 -75l22 -22q15 -14 44 -44t50.5 -51t46 -44t41 -35t23 -12 t23.5 12t42.5 36t46 44t52.5 52t44 43q4 4 12 13q43 41 63.5 62t52 55t46 55t26 46t11.5 44q0 79 -53 133.5t-120 54.5z" />
-<glyph unicode="&#xe144;" d="M776.5 1214q93.5 0 159.5 -66l141 -141q66 -66 66 -160q0 -42 -28 -95.5t-62 -87.5l-29 -29q-31 53 -77 99l-18 18l95 95l-247 248l-389 -389l212 -212l-105 -106l-19 18l-141 141q-66 66 -66 159t66 159l283 283q65 66 158.5 66zM600 706l105 105q10 -8 19 -17l141 -141 q66 -66 66 -159t-66 -159l-283 -283q-66 -66 -159 -66t-159 66l-141 141q-66 66 -66 159.5t66 159.5l55 55q29 -55 75 -102l18 -17l-95 -95l247 -248l389 389z" />
-<glyph unicode="&#xe145;" d="M603 1200q85 0 162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5v953q0 21 30 46.5t81 48t129 37.5t163 15zM300 1000v-700h600v700h-600zM600 254q-43 0 -73.5 -30.5t-30.5 -73.5t30.5 -73.5t73.5 -30.5t73.5 30.5 t30.5 73.5t-30.5 73.5t-73.5 30.5z" />
-<glyph unicode="&#xe146;" d="M902 1185l283 -282q15 -15 15 -36t-14.5 -35.5t-35.5 -14.5t-35 15l-36 35l-279 -267v-300l-212 210l-308 -307l-280 -203l203 280l307 308l-210 212h300l267 279l-35 36q-15 14 -15 35t14.5 35.5t35.5 14.5t35 -15z" />
-<glyph unicode="&#xe148;" d="M700 1248v-78q38 -5 72.5 -14.5t75.5 -31.5t71 -53.5t52 -84t24 -118.5h-159q-4 36 -10.5 59t-21 45t-40 35.5t-64.5 20.5v-307l64 -13q34 -7 64 -16.5t70 -32t67.5 -52.5t47.5 -80t20 -112q0 -139 -89 -224t-244 -97v-77h-100v79q-150 16 -237 103q-40 40 -52.5 93.5 t-15.5 139.5h139q5 -77 48.5 -126t117.5 -65v335l-27 8q-46 14 -79 26.5t-72 36t-63 52t-40 72.5t-16 98q0 70 25 126t67.5 92t94.5 57t110 27v77h100zM600 754v274q-29 -4 -50 -11t-42 -21.5t-31.5 -41.5t-10.5 -65q0 -29 7 -50.5t16.5 -34t28.5 -22.5t31.5 -14t37.5 -10 q9 -3 13 -4zM700 547v-310q22 2 42.5 6.5t45 15.5t41.5 27t29 42t12 59.5t-12.5 59.5t-38 44.5t-53 31t-66.5 24.5z" />
-<glyph unicode="&#xe149;" d="M561 1197q84 0 160.5 -40t123.5 -109.5t47 -147.5h-153q0 40 -19.5 71.5t-49.5 48.5t-59.5 26t-55.5 9q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -26 13.5 -63t26.5 -61t37 -66q6 -9 9 -14h241v-100h-197q8 -50 -2.5 -115t-31.5 -95q-45 -62 -99 -112 q34 10 83 17.5t71 7.5q32 1 102 -16t104 -17q83 0 136 30l50 -147q-31 -19 -58 -30.5t-55 -15.5t-42 -4.5t-46 -0.5q-23 0 -76 17t-111 32.5t-96 11.5q-39 -3 -82 -16t-67 -25l-23 -11l-55 145q4 3 16 11t15.5 10.5t13 9t15.5 12t14.5 14t17.5 18.5q48 55 54 126.5 t-30 142.5h-221v100h166q-23 47 -44 104q-7 20 -12 41.5t-6 55.5t6 66.5t29.5 70.5t58.5 71q97 88 263 88z" />
-<glyph unicode="&#xe150;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM935 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-900h-200v900h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe151;" d="M1000 700h-100v100h-100v-100h-100v500h300v-500zM400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM801 1100v-200h100v200h-100zM1000 350l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150z " />
-<glyph unicode="&#xe152;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 1050l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150zM1000 0h-100v100h-100v-100h-100v500h300v-500zM801 400v-200h100v200h-100z " />
-<glyph unicode="&#xe153;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 700h-100v400h-100v100h200v-500zM1100 0h-100v100h-200v400h300v-500zM901 400v-200h100v200h-100z" />
-<glyph unicode="&#xe154;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1100 700h-100v100h-200v400h300v-500zM901 1100v-200h100v200h-100zM1000 0h-100v400h-100v100h200v-500z" />
-<glyph unicode="&#xe155;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
-<glyph unicode="&#xe156;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
-<glyph unicode="&#xe157;" d="M350 1100h400q162 0 256 -93.5t94 -256.5v-400q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5z" />
-<glyph unicode="&#xe158;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-163 0 -256.5 92.5t-93.5 257.5v400q0 163 94 256.5t256 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM440 770l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
-<glyph unicode="&#xe159;" d="M350 1100h400q163 0 256.5 -94t93.5 -256v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 163 92.5 256.5t257.5 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM350 700h400q21 0 26.5 -12t-6.5 -28l-190 -253q-12 -17 -30 -17t-30 17l-190 253q-12 16 -6.5 28t26.5 12z" />
-<glyph unicode="&#xe160;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -163 -92.5 -256.5t-257.5 -93.5h-400q-163 0 -256.5 94t-93.5 256v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM580 693l190 -253q12 -16 6.5 -28t-26.5 -12h-400q-21 0 -26.5 12t6.5 28l190 253q12 17 30 17t30 -17z" />
-<glyph unicode="&#xe161;" d="M550 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h450q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-450q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM338 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
-<glyph unicode="&#xe162;" d="M793 1182l9 -9q8 -10 5 -27q-3 -11 -79 -225.5t-78 -221.5l300 1q24 0 32.5 -17.5t-5.5 -35.5q-1 0 -133.5 -155t-267 -312.5t-138.5 -162.5q-12 -15 -26 -15h-9l-9 8q-9 11 -4 32q2 9 42 123.5t79 224.5l39 110h-302q-23 0 -31 19q-10 21 6 41q75 86 209.5 237.5 t228 257t98.5 111.5q9 16 25 16h9z" />
-<glyph unicode="&#xe163;" d="M350 1100h400q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-450q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h450q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400 q0 165 92.5 257.5t257.5 92.5zM938 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
-<glyph unicode="&#xe164;" d="M750 1200h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -10.5 -25t-24.5 10l-109 109l-312 -312q-15 -15 -35.5 -15t-35.5 15l-141 141q-15 15 -15 35.5t15 35.5l312 312l-109 109q-14 14 -10 24.5t25 10.5zM456 900h-156q-41 0 -70.5 -29.5t-29.5 -70.5v-500 q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v148l200 200v-298q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5h300z" />
-<glyph unicode="&#xe165;" d="M600 1186q119 0 227.5 -46.5t187 -125t125 -187t46.5 -227.5t-46.5 -227.5t-125 -187t-187 -125t-227.5 -46.5t-227.5 46.5t-187 125t-125 187t-46.5 227.5t46.5 227.5t125 187t187 125t227.5 46.5zM600 1022q-115 0 -212 -56.5t-153.5 -153.5t-56.5 -212t56.5 -212 t153.5 -153.5t212 -56.5t212 56.5t153.5 153.5t56.5 212t-56.5 212t-153.5 153.5t-212 56.5zM600 794q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
-<glyph unicode="&#xe166;" d="M450 1200h200q21 0 35.5 -14.5t14.5 -35.5v-350h245q20 0 25 -11t-9 -26l-383 -426q-14 -15 -33.5 -15t-32.5 15l-379 426q-13 15 -8.5 26t25.5 11h250v350q0 21 14.5 35.5t35.5 14.5zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe167;" d="M583 1182l378 -435q14 -15 9 -31t-26 -16h-244v-250q0 -20 -17 -35t-39 -15h-200q-20 0 -32 14.5t-12 35.5v250h-250q-20 0 -25.5 16.5t8.5 31.5l383 431q14 16 33.5 17t33.5 -14zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe168;" d="M396 723l369 369q7 7 17.5 7t17.5 -7l139 -139q7 -8 7 -18.5t-7 -17.5l-525 -525q-7 -8 -17.5 -8t-17.5 8l-292 291q-7 8 -7 18t7 18l139 139q8 7 18.5 7t17.5 -7zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50 h-100z" />
-<glyph unicode="&#xe169;" d="M135 1023l142 142q14 14 35 14t35 -14l77 -77l-212 -212l-77 76q-14 15 -14 36t14 35zM655 855l210 210q14 14 24.5 10t10.5 -25l-2 -599q-1 -20 -15.5 -35t-35.5 -15l-597 -1q-21 0 -25 10.5t10 24.5l208 208l-154 155l212 212zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5 v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe170;" d="M350 1200l599 -2q20 -1 35 -15.5t15 -35.5l1 -597q0 -21 -10.5 -25t-24.5 10l-208 208l-155 -154l-212 212l155 154l-210 210q-14 14 -10 24.5t25 10.5zM524 512l-76 -77q-15 -14 -36 -14t-35 14l-142 142q-14 14 -14 35t14 35l77 77zM50 300h1000q21 0 35.5 -14.5 t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
-<glyph unicode="&#xe171;" d="M1200 103l-483 276l-314 -399v423h-399l1196 796v-1096zM483 424v-230l683 953z" />
-<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -14.5 -35.5t-35.5 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200z" />
-<glyph unicode="&#xe173;" d="M1100 1000l-2 -149l-299 -299l-95 95q-9 9 -21.5 9t-21.5 -9l-149 -147h-312v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1132 638l106 -106q7 -7 7 -17.5t-7 -17.5l-420 -421q-8 -7 -18 -7 t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l297 297q7 7 17.5 7t17.5 -7z" />
-<glyph unicode="&#xe174;" d="M1100 1000v-269l-103 -103l-134 134q-15 15 -33.5 16.5t-34.5 -12.5l-266 -266h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1202 572l70 -70q15 -15 15 -35.5t-15 -35.5l-131 -131 l131 -131q15 -15 15 -35.5t-15 -35.5l-70 -70q-15 -15 -35.5 -15t-35.5 15l-131 131l-131 -131q-15 -15 -35.5 -15t-35.5 15l-70 70q-15 15 -15 35.5t15 35.5l131 131l-131 131q-15 15 -15 35.5t15 35.5l70 70q15 15 35.5 15t35.5 -15l131 -131l131 131q15 15 35.5 15 t35.5 -15z" />
-<glyph unicode="&#xe175;" d="M1100 1000v-300h-350q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM850 600h100q21 0 35.5 -14.5t14.5 -35.5v-250h150q21 0 25 -10.5t-10 -24.5 l-230 -230q-14 -14 -35 -14t-35 14l-230 230q-14 14 -10 24.5t25 10.5h150v250q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe176;" d="M1100 1000v-400l-165 165q-14 15 -35 15t-35 -15l-263 -265h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM935 565l230 -229q14 -15 10 -25.5t-25 -10.5h-150v-250q0 -20 -14.5 -35 t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35v250h-150q-21 0 -25 10.5t10 25.5l230 229q14 15 35 15t35 -15z" />
-<glyph unicode="&#xe177;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-150h-1200v150q0 21 14.5 35.5t35.5 14.5zM1200 800v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v550h1200zM100 500v-200h400v200h-400z" />
-<glyph unicode="&#xe178;" d="M935 1165l248 -230q14 -14 14 -35t-14 -35l-248 -230q-14 -14 -24.5 -10t-10.5 25v150h-400v200h400v150q0 21 10.5 25t24.5 -10zM200 800h-50q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v-200zM400 800h-100v200h100v-200zM18 435l247 230 q14 14 24.5 10t10.5 -25v-150h400v-200h-400v-150q0 -21 -10.5 -25t-24.5 10l-247 230q-15 14 -15 35t15 35zM900 300h-100v200h100v-200zM1000 500h51q20 0 34.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-34.5 -14.5h-51v200z" />
-<glyph unicode="&#xe179;" d="M862 1073l276 116q25 18 43.5 8t18.5 -41v-1106q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v397q-4 1 -11 5t-24 17.5t-30 29t-24 42t-11 56.5v359q0 31 18.5 65t43.5 52zM550 1200q22 0 34.5 -12.5t14.5 -24.5l1 -13v-450q0 -28 -10.5 -59.5 t-25 -56t-29 -45t-25.5 -31.5l-10 -11v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447q-4 4 -11 11.5t-24 30.5t-30 46t-24 55t-11 60v450q0 2 0.5 5.5t4 12t8.5 15t14.5 12t22.5 5.5q20 0 32.5 -12.5t14.5 -24.5l3 -13v-350h100v350v5.5t2.5 12 t7 15t15 12t25.5 5.5q23 0 35.5 -12.5t13.5 -24.5l1 -13v-350h100v350q0 2 0.5 5.5t3 12t7 15t15 12t24.5 5.5z" />
-<glyph unicode="&#xe180;" d="M1200 1100v-56q-4 0 -11 -0.5t-24 -3t-30 -7.5t-24 -15t-11 -24v-888q0 -22 25 -34.5t50 -13.5l25 -2v-56h-400v56q75 0 87.5 6.5t12.5 43.5v394h-500v-394q0 -37 12.5 -43.5t87.5 -6.5v-56h-400v56q4 0 11 0.5t24 3t30 7.5t24 15t11 24v888q0 22 -25 34.5t-50 13.5 l-25 2v56h400v-56q-75 0 -87.5 -6.5t-12.5 -43.5v-394h500v394q0 37 -12.5 43.5t-87.5 6.5v56h400z" />
-<glyph unicode="&#xe181;" d="M675 1000h375q21 0 35.5 -14.5t14.5 -35.5v-150h-105l-295 -98v98l-200 200h-400l100 100h375zM100 900h300q41 0 70.5 -29.5t29.5 -70.5v-500q0 -41 -29.5 -70.5t-70.5 -29.5h-300q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5zM100 800v-200h300v200 h-300zM1100 535l-400 -133v163l400 133v-163zM100 500v-200h300v200h-300zM1100 398v-248q0 -21 -14.5 -35.5t-35.5 -14.5h-375l-100 -100h-375l-100 100h400l200 200h105z" />
-<glyph unicode="&#xe182;" d="M17 1007l162 162q17 17 40 14t37 -22l139 -194q14 -20 11 -44.5t-20 -41.5l-119 -118q102 -142 228 -268t267 -227l119 118q17 17 42.5 19t44.5 -12l192 -136q19 -14 22.5 -37.5t-13.5 -40.5l-163 -162q-3 -1 -9.5 -1t-29.5 2t-47.5 6t-62.5 14.5t-77.5 26.5t-90 42.5 t-101.5 60t-111 83t-119 108.5q-74 74 -133.5 150.5t-94.5 138.5t-60 119.5t-34.5 100t-15 74.5t-4.5 48z" />
-<glyph unicode="&#xe183;" d="M600 1100q92 0 175 -10.5t141.5 -27t108.5 -36.5t81.5 -40t53.5 -37t31 -27l9 -10v-200q0 -21 -14.5 -33t-34.5 -9l-202 34q-20 3 -34.5 20t-14.5 38v146q-141 24 -300 24t-300 -24v-146q0 -21 -14.5 -38t-34.5 -20l-202 -34q-20 -3 -34.5 9t-14.5 33v200q3 4 9.5 10.5 t31 26t54 37.5t80.5 39.5t109 37.5t141 26.5t175 10.5zM600 795q56 0 97 -9.5t60 -23.5t30 -28t12 -24l1 -10v-50l365 -303q14 -15 24.5 -40t10.5 -45v-212q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45t24.5 40l365 303v50 q0 4 1 10.5t12 23t30 29t60 22.5t97 10z" />
-<glyph unicode="&#xe184;" d="M1100 700l-200 -200h-600l-200 200v500h200v-200h200v200h200v-200h200v200h200v-500zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5 t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe185;" d="M700 1100h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-1000h300v1000q0 41 -29.5 70.5t-70.5 29.5zM1100 800h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-700h300v700q0 41 -29.5 70.5t-70.5 29.5zM400 0h-300v400q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-400z " />
-<glyph unicode="&#xe186;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
-<glyph unicode="&#xe187;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 300h-100v200h-100v-200h-100v500h100v-200h100v200h100v-500zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
-<glyph unicode="&#xe188;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-300h200v-100h-300v500h300v-100zM900 700h-200v-300h200v-100h-300v500h300v-100z" />
-<glyph unicode="&#xe189;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 400l-300 150l300 150v-300zM900 550l-300 -150v300z" />
-<glyph unicode="&#xe190;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM900 300h-700v500h700v-500zM800 700h-130q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300zM300 700v-300 h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130z" />
-<glyph unicode="&#xe191;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 300h-100v400h-100v100h200v-500z M700 300h-100v100h100v-100z" />
-<glyph unicode="&#xe192;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM300 700h200v-400h-300v500h100v-100zM900 300h-100v400h-100v100h200v-500zM300 600v-200h100v200h-100z M700 300h-100v100h100v-100z" />
-<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 500l-199 -200h-100v50l199 200v150h-200v100h300v-300zM900 300h-100v400h-100v100h200v-500zM701 300h-100 v100h100v-100z" />
-<glyph unicode="&#xe194;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700h-300v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
-<glyph unicode="&#xe195;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700v-100l-50 -50l100 -100v-50h-100l-100 100h-150v-100h-100v400h300zM500 700v-100h200v100h-200z" />
-<glyph unicode="&#xe197;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -207t-85 -207t-205 -86.5h-128v250q0 21 -14.5 35.5t-35.5 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-250h-222q-80 0 -136 57.5t-56 136.5q0 69 43 122.5t108 67.5q-2 19 -2 37q0 100 49 185 t134 134t185 49zM525 500h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -244q-13 -16 -32 -16t-32 16l-223 244q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe198;" d="M502 1089q110 0 201 -59.5t135 -156.5q43 15 89 15q121 0 206 -86.5t86 -206.5q0 -99 -60 -181t-150 -110l-378 360q-13 16 -31.5 16t-31.5 -16l-381 -365h-9q-79 0 -135.5 57.5t-56.5 136.5q0 69 43 122.5t108 67.5q-2 19 -2 38q0 100 49 184.5t133.5 134t184.5 49.5z M632 467l223 -228q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5q199 204 223 228q19 19 31.5 19t32.5 -19z" />
-<glyph unicode="&#xe199;" d="M700 100v100h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-100h-50q-21 0 -35.5 -14.5t-14.5 -35.5v-50h400v50q0 21 -14.5 35.5t-35.5 14.5h-50z" />
-<glyph unicode="&#xe200;" d="M600 1179q94 0 167.5 -56.5t99.5 -145.5q89 -6 150.5 -71.5t61.5 -155.5q0 -61 -29.5 -112.5t-79.5 -82.5q9 -29 9 -55q0 -74 -52.5 -126.5t-126.5 -52.5q-55 0 -100 30v-251q21 0 35.5 -14.5t14.5 -35.5v-50h-300v50q0 21 14.5 35.5t35.5 14.5v251q-45 -30 -100 -30 q-74 0 -126.5 52.5t-52.5 126.5q0 18 4 38q-47 21 -75.5 65t-28.5 97q0 74 52.5 126.5t126.5 52.5q5 0 23 -2q0 2 -1 10t-1 13q0 116 81.5 197.5t197.5 81.5z" />
-<glyph unicode="&#xe201;" d="M1010 1010q111 -111 150.5 -260.5t0 -299t-150.5 -260.5q-83 -83 -191.5 -126.5t-218.5 -43.5t-218.5 43.5t-191.5 126.5q-111 111 -150.5 260.5t0 299t150.5 260.5q83 83 191.5 126.5t218.5 43.5t218.5 -43.5t191.5 -126.5zM476 1065q-4 0 -8 -1q-121 -34 -209.5 -122.5 t-122.5 -209.5q-4 -12 2.5 -23t18.5 -14l36 -9q3 -1 7 -1q23 0 29 22q27 96 98 166q70 71 166 98q11 3 17.5 13.5t3.5 22.5l-9 35q-3 13 -14 19q-7 4 -15 4zM512 920q-4 0 -9 -2q-80 -24 -138.5 -82.5t-82.5 -138.5q-4 -13 2 -24t19 -14l34 -9q4 -1 8 -1q22 0 28 21 q18 58 58.5 98.5t97.5 58.5q12 3 18 13.5t3 21.5l-9 35q-3 12 -14 19q-7 4 -15 4zM719.5 719.5q-49.5 49.5 -119.5 49.5t-119.5 -49.5t-49.5 -119.5t49.5 -119.5t119.5 -49.5t119.5 49.5t49.5 119.5t-49.5 119.5zM855 551q-22 0 -28 -21q-18 -58 -58.5 -98.5t-98.5 -57.5 q-11 -4 -17 -14.5t-3 -21.5l9 -35q3 -12 14 -19q7 -4 15 -4q4 0 9 2q80 24 138.5 82.5t82.5 138.5q4 13 -2.5 24t-18.5 14l-34 9q-4 1 -8 1zM1000 515q-23 0 -29 -22q-27 -96 -98 -166q-70 -71 -166 -98q-11 -3 -17.5 -13.5t-3.5 -22.5l9 -35q3 -13 14 -19q7 -4 15 -4 q4 0 8 1q121 34 209.5 122.5t122.5 209.5q4 12 -2.5 23t-18.5 14l-36 9q-3 1 -7 1z" />
-<glyph unicode="&#xe202;" d="M700 800h300v-380h-180v200h-340v-200h-380v755q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM700 300h162l-212 -212l-212 212h162v200h100v-200zM520 0h-395q-10 0 -17.5 7.5t-7.5 17.5v395zM1000 220v-195q0 -10 -7.5 -17.5t-17.5 -7.5h-195z" />
-<glyph unicode="&#xe203;" d="M700 800h300v-520l-350 350l-550 -550v1095q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM862 200h-162v-200h-100v200h-162l212 212zM480 0h-355q-10 0 -17.5 7.5t-7.5 17.5v55h380v-80zM1000 80v-55q0 -10 -7.5 -17.5t-17.5 -7.5h-155v80h180z" />
-<glyph unicode="&#xe204;" d="M1162 800h-162v-200h100l100 -100h-300v300h-162l212 212zM200 800h200q27 0 40 -2t29.5 -10.5t23.5 -30t7 -57.5h300v-100h-600l-200 -350v450h100q0 36 7 57.5t23.5 30t29.5 10.5t40 2zM800 400h240l-240 -400h-800l300 500h500v-100z" />
-<glyph unicode="&#xe205;" d="M650 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM1000 850v150q41 0 70.5 -29.5t29.5 -70.5v-800 q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-1 0 -20 4l246 246l-326 326v324q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM412 250l-212 -212v162h-200v100h200v162z" />
-<glyph unicode="&#xe206;" d="M450 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM800 850v150q41 0 70.5 -29.5t29.5 -70.5v-500 h-200v-300h200q0 -36 -7 -57.5t-23.5 -30t-29.5 -10.5t-40 -2h-600q-41 0 -70.5 29.5t-29.5 70.5v800q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM1212 250l-212 -212v162h-200v100h200v162z" />
-<glyph unicode="&#xe209;" d="M658 1197l637 -1104q23 -38 7 -65.5t-60 -27.5h-1276q-44 0 -60 27.5t7 65.5l637 1104q22 39 54 39t54 -39zM704 800h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM500 300v-100h200 v100h-200z" />
-<glyph unicode="&#xe210;" d="M425 1100h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM825 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM25 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5zM425 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5 v150q0 10 7.5 17.5t17.5 7.5zM25 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe211;" d="M700 1200h100v-200h-100v-100h350q62 0 86.5 -39.5t-3.5 -94.5l-66 -132q-41 -83 -81 -134h-772q-40 51 -81 134l-66 132q-28 55 -3.5 94.5t86.5 39.5h350v100h-100v200h100v100h200v-100zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100 h-950l138 100h-13q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe212;" d="M600 1300q40 0 68.5 -29.5t28.5 -70.5h-194q0 41 28.5 70.5t68.5 29.5zM443 1100h314q18 -37 18 -75q0 -8 -3 -25h328q41 0 44.5 -16.5t-30.5 -38.5l-175 -145h-678l-178 145q-34 22 -29 38.5t46 16.5h328q-3 17 -3 25q0 38 18 75zM250 700h700q21 0 35.5 -14.5 t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-150v-200l275 -200h-950l275 200v200h-150q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe213;" d="M600 1181q75 0 128 -53t53 -128t-53 -128t-128 -53t-128 53t-53 128t53 128t128 53zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13 l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe214;" d="M600 1300q47 0 92.5 -53.5t71 -123t25.5 -123.5q0 -78 -55.5 -133.5t-133.5 -55.5t-133.5 55.5t-55.5 133.5q0 62 34 143l144 -143l111 111l-163 163q34 26 63 26zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45 zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe215;" d="M600 1200l300 -161v-139h-300q0 -57 18.5 -108t50 -91.5t63 -72t70 -67.5t57.5 -61h-530q-60 83 -90.5 177.5t-30.5 178.5t33 164.5t87.5 139.5t126 96.5t145.5 41.5v-98zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100 h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe216;" d="M600 1300q41 0 70.5 -29.5t29.5 -70.5v-78q46 -26 73 -72t27 -100v-50h-400v50q0 54 27 100t73 72v78q0 41 29.5 70.5t70.5 29.5zM400 800h400q54 0 100 -27t72 -73h-172v-100h200v-100h-200v-100h200v-100h-200v-100h200q0 -83 -58.5 -141.5t-141.5 -58.5h-400 q-83 0 -141.5 58.5t-58.5 141.5v400q0 83 58.5 141.5t141.5 58.5z" />
-<glyph unicode="&#xe218;" d="M150 1100h900q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM125 400h950q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-283l224 -224q13 -13 13 -31.5t-13 -32 t-31.5 -13.5t-31.5 13l-88 88h-524l-87 -88q-13 -13 -32 -13t-32 13.5t-13 32t13 31.5l224 224h-289q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM541 300l-100 -100h324l-100 100h-124z" />
-<glyph unicode="&#xe219;" d="M200 1100h800q83 0 141.5 -58.5t58.5 -141.5v-200h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100v200q0 83 58.5 141.5t141.5 58.5zM100 600h1000q41 0 70.5 -29.5 t29.5 -70.5v-300h-1200v300q0 41 29.5 70.5t70.5 29.5zM300 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200zM1100 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200z" />
-<glyph unicode="&#xe221;" d="M480 1165l682 -683q31 -31 31 -75.5t-31 -75.5l-131 -131h-481l-517 518q-32 31 -32 75.5t32 75.5l295 296q31 31 75.5 31t76.5 -31zM108 794l342 -342l303 304l-341 341zM250 100h800q21 0 35.5 -14.5t14.5 -35.5v-50h-900v50q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe223;" d="M1057 647l-189 506q-8 19 -27.5 33t-40.5 14h-400q-21 0 -40.5 -14t-27.5 -33l-189 -506q-8 -19 1.5 -33t30.5 -14h625v-150q0 -21 14.5 -35.5t35.5 -14.5t35.5 14.5t14.5 35.5v150h125q21 0 30.5 14t1.5 33zM897 0h-595v50q0 21 14.5 35.5t35.5 14.5h50v50 q0 21 14.5 35.5t35.5 14.5h48v300h200v-300h47q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-50z" />
-<glyph unicode="&#xe224;" d="M900 800h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-375v591l-300 300v84q0 10 7.5 17.5t17.5 7.5h375v-400zM1200 900h-200v200zM400 600h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-650q-10 0 -17.5 7.5t-7.5 17.5v950q0 10 7.5 17.5t17.5 7.5h375v-400zM700 700h-200v200z " />
-<glyph unicode="&#xe225;" d="M484 1095h195q75 0 146 -32.5t124 -86t89.5 -122.5t48.5 -142q18 -14 35 -20q31 -10 64.5 6.5t43.5 48.5q10 34 -15 71q-19 27 -9 43q5 8 12.5 11t19 -1t23.5 -16q41 -44 39 -105q-3 -63 -46 -106.5t-104 -43.5h-62q-7 -55 -35 -117t-56 -100l-39 -234q-3 -20 -20 -34.5 t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l12 70q-49 -14 -91 -14h-195q-24 0 -65 8l-11 -64q-3 -20 -20 -34.5t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l26 157q-84 74 -128 175l-159 53q-19 7 -33 26t-14 40v50q0 21 14.5 35.5t35.5 14.5h124q11 87 56 166l-111 95 q-16 14 -12.5 23.5t24.5 9.5h203q116 101 250 101zM675 1000h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h250q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5t-17.5 7.5z" />
-<glyph unicode="&#xe226;" d="M641 900l423 247q19 8 42 2.5t37 -21.5l32 -38q14 -15 12.5 -36t-17.5 -34l-139 -120h-390zM50 1100h106q67 0 103 -17t66 -71l102 -212h823q21 0 35.5 -14.5t14.5 -35.5v-50q0 -21 -14 -40t-33 -26l-737 -132q-23 -4 -40 6t-26 25q-42 67 -100 67h-300q-62 0 -106 44 t-44 106v200q0 62 44 106t106 44zM173 928h-80q-19 0 -28 -14t-9 -35v-56q0 -51 42 -51h134q16 0 21.5 8t5.5 24q0 11 -16 45t-27 51q-18 28 -43 28zM550 727q-32 0 -54.5 -22.5t-22.5 -54.5t22.5 -54.5t54.5 -22.5t54.5 22.5t22.5 54.5t-22.5 54.5t-54.5 22.5zM130 389 l152 130q18 19 34 24t31 -3.5t24.5 -17.5t25.5 -28q28 -35 50.5 -51t48.5 -13l63 5l48 -179q13 -61 -3.5 -97.5t-67.5 -79.5l-80 -69q-47 -40 -109 -35.5t-103 51.5l-130 151q-40 47 -35.5 109.5t51.5 102.5zM380 377l-102 -88q-31 -27 2 -65l37 -43q13 -15 27.5 -19.5 t31.5 6.5l61 53q19 16 14 49q-2 20 -12 56t-17 45q-11 12 -19 14t-23 -8z" />
-<glyph unicode="&#xe227;" d="M625 1200h150q10 0 17.5 -7.5t7.5 -17.5v-109q79 -33 131 -87.5t53 -128.5q1 -46 -15 -84.5t-39 -61t-46 -38t-39 -21.5l-17 -6q6 0 15 -1.5t35 -9t50 -17.5t53 -30t50 -45t35.5 -64t14.5 -84q0 -59 -11.5 -105.5t-28.5 -76.5t-44 -51t-49.5 -31.5t-54.5 -16t-49.5 -6.5 t-43.5 -1v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-100v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-175q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v600h-75q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5h175v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h100v75q0 10 7.5 17.5t17.5 7.5zM400 900v-200h263q28 0 48.5 10.5t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-263zM400 500v-200h363q28 0 48.5 10.5 t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-363z" />
-<glyph unicode="&#xe230;" d="M212 1198h780q86 0 147 -61t61 -147v-416q0 -51 -18 -142.5t-36 -157.5l-18 -66q-29 -87 -93.5 -146.5t-146.5 -59.5h-572q-82 0 -147 59t-93 147q-8 28 -20 73t-32 143.5t-20 149.5v416q0 86 61 147t147 61zM600 1045q-70 0 -132.5 -11.5t-105.5 -30.5t-78.5 -41.5 t-57 -45t-36 -41t-20.5 -30.5l-6 -12l156 -243h560l156 243q-2 5 -6 12.5t-20 29.5t-36.5 42t-57 44.5t-79 42t-105 29.5t-132.5 12zM762 703h-157l195 261z" />
-<glyph unicode="&#xe231;" d="M475 1300h150q103 0 189 -86t86 -189v-500q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
-<glyph unicode="&#xe232;" d="M475 1300h96q0 -150 89.5 -239.5t239.5 -89.5v-446q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
-<glyph unicode="&#xe233;" d="M1294 767l-638 -283l-378 170l-78 -60v-224l100 -150v-199l-150 148l-150 -149v200l100 150v250q0 4 -0.5 10.5t0 9.5t1 8t3 8t6.5 6l47 40l-147 65l642 283zM1000 380l-350 -166l-350 166v147l350 -165l350 165v-147z" />
-<glyph unicode="&#xe234;" d="M250 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM650 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM1050 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
-<glyph unicode="&#xe235;" d="M550 1100q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 700q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 300q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
-<glyph unicode="&#xe236;" d="M125 1100h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM125 700h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM125 300h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
-<glyph unicode="&#xe237;" d="M350 1200h500q162 0 256 -93.5t94 -256.5v-500q0 -165 -93.5 -257.5t-256.5 -92.5h-500q-165 0 -257.5 92.5t-92.5 257.5v500q0 165 92.5 257.5t257.5 92.5zM900 1000h-600q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h600q41 0 70.5 29.5 t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5zM350 900h500q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-500q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 14.5 35.5t35.5 14.5zM400 800v-200h400v200h-400z" />
-<glyph unicode="&#xe238;" d="M150 1100h1000q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe239;" d="M650 1187q87 -67 118.5 -156t0 -178t-118.5 -155q-87 66 -118.5 155t0 178t118.5 156zM300 800q124 0 212 -88t88 -212q-124 0 -212 88t-88 212zM1000 800q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM300 500q124 0 212 -88t88 -212q-124 0 -212 88t-88 212z M1000 500q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM700 199v-144q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v142q40 -4 43 -4q17 0 57 6z" />
-<glyph unicode="&#xe240;" d="M745 878l69 19q25 6 45 -12l298 -295q11 -11 15 -26.5t-2 -30.5q-5 -14 -18 -23.5t-28 -9.5h-8q1 0 1 -13q0 -29 -2 -56t-8.5 -62t-20 -63t-33 -53t-51 -39t-72.5 -14h-146q-184 0 -184 288q0 24 10 47q-20 4 -62 4t-63 -4q11 -24 11 -47q0 -288 -184 -288h-142 q-48 0 -84.5 21t-56 51t-32 71.5t-16 75t-3.5 68.5q0 13 2 13h-7q-15 0 -27.5 9.5t-18.5 23.5q-6 15 -2 30.5t15 25.5l298 296q20 18 46 11l76 -19q20 -5 30.5 -22.5t5.5 -37.5t-22.5 -31t-37.5 -5l-51 12l-182 -193h891l-182 193l-44 -12q-20 -5 -37.5 6t-22.5 31t6 37.5 t31 22.5z" />
-<glyph unicode="&#xe241;" d="M1200 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM500 450h-25q0 15 -4 24.5t-9 14.5t-17 7.5t-20 3t-25 0.5h-100v-425q0 -11 12.5 -17.5t25.5 -7.5h12v-50h-200v50q50 0 50 25v425h-100q-17 0 -25 -0.5t-20 -3t-17 -7.5t-9 -14.5t-4 -24.5h-25v150h500v-150z" />
-<glyph unicode="&#xe242;" d="M1000 300v50q-25 0 -55 32q-14 14 -25 31t-16 27l-4 11l-289 747h-69l-300 -754q-18 -35 -39 -56q-9 -9 -24.5 -18.5t-26.5 -14.5l-11 -5v-50h273v50q-49 0 -78.5 21.5t-11.5 67.5l69 176h293l61 -166q13 -34 -3.5 -66.5t-55.5 -32.5v-50h312zM412 691l134 342l121 -342 h-255zM1100 150v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
-<glyph unicode="&#xe243;" d="M50 1200h1100q21 0 35.5 -14.5t14.5 -35.5v-1100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5zM611 1118h-70q-13 0 -18 -12l-299 -753q-17 -32 -35 -51q-18 -18 -56 -34q-12 -5 -12 -18v-50q0 -8 5.5 -14t14.5 -6 h273q8 0 14 6t6 14v50q0 8 -6 14t-14 6q-55 0 -71 23q-10 14 0 39l63 163h266l57 -153q11 -31 -6 -55q-12 -17 -36 -17q-8 0 -14 -6t-6 -14v-50q0 -8 6 -14t14 -6h313q8 0 14 6t6 14v50q0 7 -5.5 13t-13.5 7q-17 0 -42 25q-25 27 -40 63h-1l-288 748q-5 12 -19 12zM639 611 h-197l103 264z" />
-<glyph unicode="&#xe244;" d="M1200 1100h-1200v100h1200v-100zM50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 1000h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM700 900v-300h300v300h-300z" />
-<glyph unicode="&#xe245;" d="M50 1200h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 700h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM700 600v-300h300v300h-300zM1200 0h-1200v100h1200v-100z" />
-<glyph unicode="&#xe246;" d="M50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-350h100v150q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-150h100v-100h-100v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v150h-100v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM700 700v-300h300v300h-300z" />
-<glyph unicode="&#xe247;" d="M100 0h-100v1200h100v-1200zM250 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM300 1000v-300h300v300h-300zM250 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe248;" d="M600 1100h150q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-100h450q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h350v100h-150q-21 0 -35.5 14.5 t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h150v100h100v-100zM400 1000v-300h300v300h-300z" />
-<glyph unicode="&#xe249;" d="M1200 0h-100v1200h100v-1200zM550 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM600 1000v-300h300v300h-300zM50 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
-<glyph unicode="&#xe250;" d="M865 565l-494 -494q-23 -23 -41 -23q-14 0 -22 13.5t-8 38.5v1000q0 25 8 38.5t22 13.5q18 0 41 -23l494 -494q14 -14 14 -35t-14 -35z" />
-<glyph unicode="&#xe251;" d="M335 635l494 494q29 29 50 20.5t21 -49.5v-1000q0 -41 -21 -49.5t-50 20.5l-494 494q-14 14 -14 35t14 35z" />
-<glyph unicode="&#xe252;" d="M100 900h1000q41 0 49.5 -21t-20.5 -50l-494 -494q-14 -14 -35 -14t-35 14l-494 494q-29 29 -20.5 50t49.5 21z" />
-<glyph unicode="&#xe253;" d="M635 865l494 -494q29 -29 20.5 -50t-49.5 -21h-1000q-41 0 -49.5 21t20.5 50l494 494q14 14 35 14t35 -14z" />
-<glyph unicode="&#xe254;" d="M700 741v-182l-692 -323v221l413 193l-413 193v221zM1200 0h-800v200h800v-200z" />
-<glyph unicode="&#xe255;" d="M1200 900h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300zM0 700h50q0 21 4 37t9.5 26.5t18 17.5t22 11t28.5 5.5t31 2t37 0.5h100v-550q0 -22 -25 -34.5t-50 -13.5l-25 -2v-100h400v100q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v550h100q25 0 37 -0.5t31 -2 t28.5 -5.5t22 -11t18 -17.5t9.5 -26.5t4 -37h50v300h-800v-300z" />
-<glyph unicode="&#xe256;" d="M800 700h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-100v-550q0 -22 25 -34.5t50 -14.5l25 -1v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v550h-100q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h800v-300zM1100 200h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300z" />
-<glyph unicode="&#xe257;" d="M701 1098h160q16 0 21 -11t-7 -23l-464 -464l464 -464q12 -12 7 -23t-21 -11h-160q-13 0 -23 9l-471 471q-7 8 -7 18t7 18l471 471q10 9 23 9z" />
-<glyph unicode="&#xe258;" d="M339 1098h160q13 0 23 -9l471 -471q7 -8 7 -18t-7 -18l-471 -471q-10 -9 -23 -9h-160q-16 0 -21 11t7 23l464 464l-464 464q-12 12 -7 23t21 11z" />
-<glyph unicode="&#xe259;" d="M1087 882q11 -5 11 -21v-160q0 -13 -9 -23l-471 -471q-8 -7 -18 -7t-18 7l-471 471q-9 10 -9 23v160q0 16 11 21t23 -7l464 -464l464 464q12 12 23 7z" />
-<glyph unicode="&#xe260;" d="M618 993l471 -471q9 -10 9 -23v-160q0 -16 -11 -21t-23 7l-464 464l-464 -464q-12 -12 -23 -7t-11 21v160q0 13 9 23l471 471q8 7 18 7t18 -7z" />
-<glyph unicode="&#xf8ff;" d="M1000 1200q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM450 1000h100q21 0 40 -14t26 -33l79 -194q5 1 16 3q34 6 54 9.5t60 7t65.5 1t61 -10t56.5 -23t42.5 -42t29 -64t5 -92t-19.5 -121.5q-1 -7 -3 -19.5t-11 -50t-20.5 -73t-32.5 -81.5t-46.5 -83t-64 -70 t-82.5 -50q-13 -5 -42 -5t-65.5 2.5t-47.5 2.5q-14 0 -49.5 -3.5t-63 -3.5t-43.5 7q-57 25 -104.5 78.5t-75 111.5t-46.5 112t-26 90l-7 35q-15 63 -18 115t4.5 88.5t26 64t39.5 43.5t52 25.5t58.5 13t62.5 2t59.5 -4.5t55.5 -8l-147 192q-12 18 -5.5 30t27.5 12z" />
-<glyph unicode="&#x1f511;" d="M250 1200h600q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-500l-255 -178q-19 -9 -32 -1t-13 29v650h-150q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM400 1100v-100h300v100h-300z" />
-<glyph unicode="&#x1f6aa;" d="M250 1200h750q39 0 69.5 -40.5t30.5 -84.5v-933l-700 -117v950l600 125h-700v-1000h-100v1025q0 23 15.5 49t34.5 26zM500 525v-100l100 20v100z" />
-</font>
-</defs></svg>
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.ttf b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.ttf
deleted file mode 100644
index 1413fc6..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.ttf
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff
deleted file mode 100644
index 9e61285..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff2 b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff2
deleted file mode 100644
index 64539b5..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff2
+++ /dev/null
Binary files differ
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap-editable.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap-editable.min.js
deleted file mode 100644
index 539d6c1..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap-editable.min.js
+++ /dev/null
@@ -1,7 +0,0 @@
-/*! X-editable - v1.5.0
-* In-place editing with Twitter Bootstrap, jQuery UI or pure jQuery
-* http://github.com/vitalets/x-editable
-* Copyright (c) 2013 Vitaliy Potapov; Licensed MIT */
-!function(a){"use strict";var b=function(b,c){this.options=a.extend({},a.fn.editableform.defaults,c),this.$div=a(b),this.options.scope||(this.options.scope=this)};b.prototype={constructor:b,initInput:function(){this.input=this.options.input,this.value=this.input.str2value(this.options.value),this.input.prerender()},initTemplate:function(){this.$form=a(a.fn.editableform.template)},initButtons:function(){var b=this.$form.find(".editable-buttons");b.append(a.fn.editableform.buttons),"bottom"===this.options.showbuttons&&b.addClass("editable-buttons-bottom")},render:function(){this.$loading=a(a.fn.editableform.loading),this.$div.empty().append(this.$loading),this.initTemplate(),this.options.showbuttons?this.initButtons():this.$form.find(".editable-buttons").remove(),this.showLoading(),this.isSaving=!1,this.$div.triggerHandler("rendering"),this.initInput(),this.$form.find("div.editable-input").append(this.input.$tpl),this.$div.append(this.$form),a.when(this.input.render()).then(a.proxy(function(){if(this.options.showbuttons||this.input.autosubmit(),this.$form.find(".editable-cancel").click(a.proxy(this.cancel,this)),this.input.error)this.error(this.input.error),this.$form.find(".editable-submit").attr("disabled",!0),this.input.$input.attr("disabled",!0),this.$form.submit(function(a){a.preventDefault()});else{this.error(!1),this.input.$input.removeAttr("disabled"),this.$form.find(".editable-submit").removeAttr("disabled");var b=null===this.value||void 0===this.value||""===this.value?this.options.defaultValue:this.value;this.input.value2input(b),this.$form.submit(a.proxy(this.submit,this))}this.$div.triggerHandler("rendered"),this.showForm(),this.input.postrender&&this.input.postrender()},this))},cancel:function(){this.$div.triggerHandler("cancel")},showLoading:function(){var a,b;this.$form?(a=this.$form.outerWidth(),b=this.$form.outerHeight(),a&&this.$loading.width(a),b&&this.$loading.height(b),this.$form.hide()):(a=this.$loading.parent().width(),a&&this.$loading.width(a)),this.$loading.show()},showForm:function(a){this.$loading.hide(),this.$form.show(),a!==!1&&this.input.activate(),this.$div.triggerHandler("show")},error:function(b){var c,d=this.$form.find(".control-group"),e=this.$form.find(".editable-error-block");if(b===!1)d.removeClass(a.fn.editableform.errorGroupClass),e.removeClass(a.fn.editableform.errorBlockClass).empty().hide();else{if(b){c=b.split("\n");for(var f=0;f<c.length;f++)c[f]=a("<div>").text(c[f]).html();b=c.join("<br>")}d.addClass(a.fn.editableform.errorGroupClass),e.addClass(a.fn.editableform.errorBlockClass).html(b).show()}},submit:function(b){b.stopPropagation(),b.preventDefault();var c,d=this.input.input2value();if(c=this.validate(d))return this.error(c),this.showForm(),void 0;if(!this.options.savenochange&&this.input.value2str(d)==this.input.value2str(this.value))return this.$div.triggerHandler("nochange"),void 0;var e=this.input.value2submit(d);this.isSaving=!0,a.when(this.save(e)).done(a.proxy(function(a){this.isSaving=!1;var b="function"==typeof this.options.success?this.options.success.call(this.options.scope,a,d):null;return b===!1?(this.error(!1),this.showForm(!1),void 0):"string"==typeof b?(this.error(b),this.showForm(),void 0):(b&&"object"==typeof b&&b.hasOwnProperty("newValue")&&(d=b.newValue),this.error(!1),this.value=d,this.$div.triggerHandler("save",{newValue:d,submitValue:e,response:a}),void 0)},this)).fail(a.proxy(function(a){this.isSaving=!1;var b;b="function"==typeof this.options.error?this.options.error.call(this.options.scope,a,d):"string"==typeof a?a:a.responseText||a.statusText||"Unknown error!",this.error(b),this.showForm()},this))},save:function(b){this.options.pk=a.fn.editableutils.tryParseJson(this.options.pk,!0);var c,d="function"==typeof this.options.pk?this.options.pk.call(this.options.scope):this.options.pk,e=!!("function"==typeof this.options.url||this.options.url&&("always"===this.options.send||"auto"===this.options.send&&null!==d&&void 0!==d));return e?(this.showLoading(),c={name:this.options.name||"",value:b,pk:d},"function"==typeof this.options.params?c=this.options.params.call(this.options.scope,c):(this.options.params=a.fn.editableutils.tryParseJson(this.options.params,!0),a.extend(c,this.options.params)),"function"==typeof this.options.url?this.options.url.call(this.options.scope,c):a.ajax(a.extend({url:this.options.url,data:c,type:"POST"},this.options.ajaxOptions))):void 0},validate:function(a){return void 0===a&&(a=this.value),"function"==typeof this.options.validate?this.options.validate.call(this.options.scope,a):void 0},option:function(a,b){a in this.options&&(this.options[a]=b),"value"===a&&this.setValue(b)},setValue:function(a,b){this.value=b?this.input.str2value(a):a,this.$form&&this.$form.is(":visible")&&this.input.value2input(this.value)}},a.fn.editableform=function(c){var d=arguments;return this.each(function(){var e=a(this),f=e.data("editableform"),g="object"==typeof c&&c;f||e.data("editableform",f=new b(this,g)),"string"==typeof c&&f[c].apply(f,Array.prototype.slice.call(d,1))})},a.fn.editableform.Constructor=b,a.fn.editableform.defaults={type:"text",url:null,params:null,name:null,pk:null,value:null,defaultValue:null,send:"auto",validate:null,success:null,error:null,ajaxOptions:null,showbuttons:!0,scope:null,savenochange:!1},a.fn.editableform.template='<form class="form-inline editableform"><div class="control-group"><div><div class="editable-input"></div><div class="editable-buttons"></div></div><div class="editable-error-block"></div></div></form>',a.fn.editableform.loading='<div class="editableform-loading"></div>',a.fn.editableform.buttons='<button type="submit" class="editable-submit">ok</button><button type="button" class="editable-cancel">cancel</button>',a.fn.editableform.errorGroupClass=null,a.fn.editableform.errorBlockClass="editable-error",a.fn.editableform.engine="jquery"}(window.jQuery),function(a){"use strict";a.fn.editableutils={inherit:function(a,b){var c=function(){};c.prototype=b.prototype,a.prototype=new c,a.prototype.constructor=a,a.superclass=b.prototype},setCursorPosition:function(a,b){if(a.setSelectionRange)a.setSelectionRange(b,b);else if(a.createTextRange){var c=a.createTextRange();c.collapse(!0),c.moveEnd("character",b),c.moveStart("character",b),c.select()}},tryParseJson:function(a,b){if("string"==typeof a&&a.length&&a.match(/^[\{\[].*[\}\]]$/))if(b)try{a=new Function("return "+a)()}catch(c){}finally{return a}else a=new Function("return "+a)();return a},sliceObj:function(b,c,d){var e,f,g={};if(!a.isArray(c)||!c.length)return g;for(var h=0;h<c.length;h++)e=c[h],b.hasOwnProperty(e)&&(g[e]=b[e]),d!==!0&&(f=e.toLowerCase(),b.hasOwnProperty(f)&&(g[e]=b[f]));return g},getConfigData:function(b){var c={};return a.each(b.data(),function(a,b){("object"!=typeof b||b&&"object"==typeof b&&(b.constructor===Object||b.constructor===Array))&&(c[a]=b)}),c},objectKeys:function(a){if(Object.keys)return Object.keys(a);if(a!==Object(a))throw new TypeError("Object.keys called on a non-object");var b,c=[];for(b in a)Object.prototype.hasOwnProperty.call(a,b)&&c.push(b);return c},escape:function(b){return a("<div>").text(b).html()},itemsByValue:function(b,c,d){if(!c||null===b)return[];if("function"!=typeof d){var e=d||"value";d=function(a){return a[e]}}var f=a.isArray(b),g=[],h=this;return a.each(c,function(c,e){if(e.children)g=g.concat(h.itemsByValue(b,e.children,d));else if(f)a.grep(b,function(a){return a==(e&&"object"==typeof e?d(e):e)}).length&&g.push(e);else{var i=e&&"object"==typeof e?d(e):e;b==i&&g.push(e)}}),g},createInput:function(b){var c,d,e,f=b.type;return"date"===f&&("inline"===b.mode?a.fn.editabletypes.datefield?f="datefield":a.fn.editabletypes.dateuifield&&(f="dateuifield"):a.fn.editabletypes.date?f="date":a.fn.editabletypes.dateui&&(f="dateui"),"date"!==f||a.fn.editabletypes.date||(f="combodate")),"datetime"===f&&"inline"===b.mode&&(f="datetimefield"),"wysihtml5"!==f||a.fn.editabletypes[f]||(f="textarea"),"function"==typeof a.fn.editabletypes[f]?(c=a.fn.editabletypes[f],d=this.sliceObj(b,this.objectKeys(c.defaults)),e=new c(d)):(a.error("Unknown type: "+f),!1)},supportsTransitions:function(){var a=document.body||document.documentElement,b=a.style,c="transition",d=["Moz","Webkit","Khtml","O","ms"];if("string"==typeof b[c])return!0;c=c.charAt(0).toUpperCase()+c.substr(1);for(var e=0;e<d.length;e++)if("string"==typeof b[d[e]+c])return!0;return!1}}}(window.jQuery),function(a){"use strict";var b=function(a,b){this.init(a,b)},c=function(a,b){this.init(a,b)};b.prototype={containerName:null,containerDataName:null,innerCss:null,containerClass:"editable-container editable-popup",defaults:{},init:function(c,d){this.$element=a(c),this.options=a.extend({},a.fn.editableContainer.defaults,d),this.splitOptions(),this.formOptions.scope=this.$element[0],this.initContainer(),this.delayedHide=!1,this.$element.on("destroyed",a.proxy(function(){this.destroy()},this)),a(document).data("editable-handlers-attached")||(a(document).on("keyup.editable",function(b){27===b.which&&a(".editable-open").editableContainer("hide")}),a(document).on("click.editable",function(c){var d,e=a(c.target),f=[".editable-container",".ui-datepicker-header",".datepicker",".modal-backdrop",".bootstrap-wysihtml5-insert-image-modal",".bootstrap-wysihtml5-insert-link-modal"];if(a.contains(document.documentElement,c.target)&&!e.is(document)){for(d=0;d<f.length;d++)if(e.is(f[d])||e.parents(f[d]).length)return;b.prototype.closeOthers(c.target)}}),a(document).data("editable-handlers-attached",!0))},splitOptions:function(){if(this.containerOptions={},this.formOptions={},!a.fn[this.containerName])throw new Error(this.containerName+" not found. Have you included corresponding js file?");for(var b in this.options)b in this.defaults?this.containerOptions[b]=this.options[b]:this.formOptions[b]=this.options[b]},tip:function(){return this.container()?this.container().$tip:null},container:function(){var a;return this.containerDataName&&(a=this.$element.data(this.containerDataName))?a:a=this.$element.data(this.containerName)},call:function(){this.$element[this.containerName].apply(this.$element,arguments)},initContainer:function(){this.call(this.containerOptions)},renderForm:function(){this.$form.editableform(this.formOptions).on({save:a.proxy(this.save,this),nochange:a.proxy(function(){this.hide("nochange")},this),cancel:a.proxy(function(){this.hide("cancel")},this),show:a.proxy(function(){this.delayedHide?(this.hide(this.delayedHide.reason),this.delayedHide=!1):this.setPosition()},this),rendering:a.proxy(this.setPosition,this),resize:a.proxy(this.setPosition,this),rendered:a.proxy(function(){this.$element.triggerHandler("shown",a(this.options.scope).data("editable"))},this)}).editableform("render")},show:function(b){this.$element.addClass("editable-open"),b!==!1&&this.closeOthers(this.$element[0]),this.innerShow(),this.tip().addClass(this.containerClass),this.$form,this.$form=a("<div>"),this.tip().is(this.innerCss)?this.tip().append(this.$form):this.tip().find(this.innerCss).append(this.$form),this.renderForm()},hide:function(a){if(this.tip()&&this.tip().is(":visible")&&this.$element.hasClass("editable-open")){if(this.$form.data("editableform").isSaving)return this.delayedHide={reason:a},void 0;this.delayedHide=!1,this.$element.removeClass("editable-open"),this.innerHide(),this.$element.triggerHandler("hidden",a||"manual")}},innerShow:function(){},innerHide:function(){},toggle:function(a){this.container()&&this.tip()&&this.tip().is(":visible")?this.hide():this.show(a)},setPosition:function(){},save:function(a,b){this.$element.triggerHandler("save",b),this.hide("save")},option:function(a,b){this.options[a]=b,a in this.containerOptions?(this.containerOptions[a]=b,this.setContainerOption(a,b)):(this.formOptions[a]=b,this.$form&&this.$form.editableform("option",a,b))},setContainerOption:function(a,b){this.call("option",a,b)},destroy:function(){this.hide(),this.innerDestroy(),this.$element.off("destroyed"),this.$element.removeData("editableContainer")},innerDestroy:function(){},closeOthers:function(b){a(".editable-open").each(function(c,d){if(d!==b&&!a(d).find(b).length){var e=a(d),f=e.data("editableContainer");f&&("cancel"===f.options.onblur?e.data("editableContainer").hide("onblur"):"submit"===f.options.onblur&&e.data("editableContainer").tip().find("form").submit())}})},activate:function(){this.tip&&this.tip().is(":visible")&&this.$form&&this.$form.data("editableform").input.activate()}},a.fn.editableContainer=function(d){var e=arguments;return this.each(function(){var f=a(this),g="editableContainer",h=f.data(g),i="object"==typeof d&&d,j="inline"===i.mode?c:b;h||f.data(g,h=new j(this,i)),"string"==typeof d&&h[d].apply(h,Array.prototype.slice.call(e,1))})},a.fn.editableContainer.Popup=b,a.fn.editableContainer.Inline=c,a.fn.editableContainer.defaults={value:null,placement:"top",autohide:!0,onblur:"cancel",anim:!1,mode:"popup"},jQuery.event.special.destroyed={remove:function(a){a.handler&&a.handler()}}}(window.jQuery),function(a){"use strict";a.extend(a.fn.editableContainer.Inline.prototype,a.fn.editableContainer.Popup.prototype,{containerName:"editableform",innerCss:".editable-inline",containerClass:"editable-container editable-inline",initContainer:function(){this.$tip=a("<span></span>"),this.options.anim||(this.options.anim=0)},splitOptions:function(){this.containerOptions={},this.formOptions=this.options},tip:function(){return this.$tip},innerShow:function(){this.$element.hide(),this.tip().insertAfter(this.$element).show()},innerHide:function(){this.$tip.hide(this.options.anim,a.proxy(function(){this.$element.show(),this.innerDestroy()},this))},innerDestroy:function(){this.tip()&&this.tip().empty().remove()}})}(window.jQuery),function(a){"use strict";var b=function(b,c){this.$element=a(b),this.options=a.extend({},a.fn.editable.defaults,c,a.fn.editableutils.getConfigData(this.$element)),this.options.selector?this.initLive():this.init(),this.options.highlight&&!a.fn.editableutils.supportsTransitions()&&(this.options.highlight=!1)};b.prototype={constructor:b,init:function(){var b,c=!1;if(this.options.name=this.options.name||this.$element.attr("id"),this.options.scope=this.$element[0],this.input=a.fn.editableutils.createInput(this.options),this.input){switch(void 0===this.options.value||null===this.options.value?(this.value=this.input.html2value(a.trim(this.$element.html())),c=!0):(this.options.value=a.fn.editableutils.tryParseJson(this.options.value,!0),this.value="string"==typeof this.options.value?this.input.str2value(this.options.value):this.options.value),this.$element.addClass("editable"),"textarea"===this.input.type&&this.$element.addClass("editable-pre-wrapped"),"manual"!==this.options.toggle?(this.$element.addClass("editable-click"),this.$element.on(this.options.toggle+".editable",a.proxy(function(a){if(this.options.disabled||a.preventDefault(),"mouseenter"===this.options.toggle)this.show();else{var b="click"!==this.options.toggle;this.toggle(b)}},this))):this.$element.attr("tabindex",-1),"function"==typeof this.options.display&&(this.options.autotext="always"),this.options.autotext){case"always":b=!0;break;case"auto":b=!a.trim(this.$element.text()).length&&null!==this.value&&void 0!==this.value&&!c;break;default:b=!1}a.when(b?this.render():!0).then(a.proxy(function(){this.options.disabled?this.disable():this.enable(),this.$element.triggerHandler("init",this)},this))}},initLive:function(){var b=this.options.selector;this.options.selector=!1,this.options.autotext="never",this.$element.on(this.options.toggle+".editable",b,a.proxy(function(b){var c=a(b.target);c.data("editable")||(c.hasClass(this.options.emptyclass)&&c.empty(),c.editable(this.options).trigger(b))},this))},render:function(a){return this.options.display!==!1?this.input.value2htmlFinal?this.input.value2html(this.value,this.$element[0],this.options.display,a):"function"==typeof this.options.display?this.options.display.call(this.$element[0],this.value,a):this.input.value2html(this.value,this.$element[0]):void 0},enable:function(){this.options.disabled=!1,this.$element.removeClass("editable-disabled"),this.handleEmpty(this.isEmpty),"manual"!==this.options.toggle&&"-1"===this.$element.attr("tabindex")&&this.$element.removeAttr("tabindex")},disable:function(){this.options.disabled=!0,this.hide(),this.$element.addClass("editable-disabled"),this.handleEmpty(this.isEmpty),this.$element.attr("tabindex",-1)},toggleDisabled:function(){this.options.disabled?this.enable():this.disable()},option:function(b,c){return b&&"object"==typeof b?(a.each(b,a.proxy(function(b,c){this.option(a.trim(b),c)},this)),void 0):(this.options[b]=c,"disabled"===b?c?this.disable():this.enable():("value"===b&&this.setValue(c),this.container&&this.container.option(b,c),this.input.option&&this.input.option(b,c),void 0))},handleEmpty:function(b){this.options.display!==!1&&(this.isEmpty=void 0!==b?b:"function"==typeof this.input.isEmpty?this.input.isEmpty(this.$element):""===a.trim(this.$element.html()),this.options.disabled?this.isEmpty&&(this.$element.empty(),this.options.emptyclass&&this.$element.removeClass(this.options.emptyclass)):this.isEmpty?(this.$element.html(this.options.emptytext),this.options.emptyclass&&this.$element.addClass(this.options.emptyclass)):this.options.emptyclass&&this.$element.removeClass(this.options.emptyclass))},show:function(b){if(!this.options.disabled){if(this.container){if(this.container.tip().is(":visible"))return}else{var c=a.extend({},this.options,{value:this.value,input:this.input});this.$element.editableContainer(c),this.$element.on("save.internal",a.proxy(this.save,this)),this.container=this.$element.data("editableContainer")}this.container.show(b)}},hide:function(){this.container&&this.container.hide()},toggle:function(a){this.container&&this.container.tip().is(":visible")?this.hide():this.show(a)},save:function(a,b){if(this.options.unsavedclass){var c=!1;c=c||"function"==typeof this.options.url,c=c||this.options.display===!1,c=c||void 0!==b.response,c=c||this.options.savenochange&&this.input.value2str(this.value)!==this.input.value2str(b.newValue),c?this.$element.removeClass(this.options.unsavedclass):this.$element.addClass(this.options.unsavedclass)}if(this.options.highlight){var d=this.$element,e=d.css("background-color");d.css("background-color",this.options.highlight),setTimeout(function(){"transparent"===e&&(e=""),d.css("background-color",e),d.addClass("editable-bg-transition"),setTimeout(function(){d.removeClass("editable-bg-transition")},1700)},10)}this.setValue(b.newValue,!1,b.response)},validate:function(){return"function"==typeof this.options.validate?this.options.validate.call(this,this.value):void 0},setValue:function(b,c,d){this.value=c?this.input.str2value(b):b,this.container&&this.container.option("value",this.value),a.when(this.render(d)).then(a.proxy(function(){this.handleEmpty()},this))},activate:function(){this.container&&this.container.activate()},destroy:function(){this.disable(),this.container&&this.container.destroy(),this.input.destroy(),"manual"!==this.options.toggle&&(this.$element.removeClass("editable-click"),this.$element.off(this.options.toggle+".editable")),this.$element.off("save.internal"),this.$element.removeClass("editable editable-open editable-disabled"),this.$element.removeData("editable")}},a.fn.editable=function(c){var d={},e=arguments,f="editable";switch(c){case"validate":return this.each(function(){var b,c=a(this),e=c.data(f);e&&(b=e.validate())&&(d[e.options.name]=b)}),d;case"getValue":return 2===arguments.length&&arguments[1]===!0?d=this.eq(0).data(f).value:this.each(function(){var b=a(this),c=b.data(f);c&&void 0!==c.value&&null!==c.value&&(d[c.options.name]=c.input.value2submit(c.value))}),d;case"submit":var g,h=arguments[1]||{},i=this,j=this.editable("validate");return a.isEmptyObject(j)?(g=this.editable("getValue"),h.data&&a.extend(g,h.data),a.ajax(a.extend({url:h.url,data:g,type:"POST"},h.ajaxOptions)).success(function(a){"function"==typeof h.success&&h.success.call(i,a,h)}).error(function(){"function"==typeof h.error&&h.error.apply(i,arguments)})):"function"==typeof h.error&&h.error.call(i,j),this}return this.each(function(){var d=a(this),g=d.data(f),h="object"==typeof c&&c;return h&&h.selector?(g=new b(this,h),void 0):(g||d.data(f,g=new b(this,h)),"string"==typeof c&&g[c].apply(g,Array.prototype.slice.call(e,1)),void 0)})},a.fn.editable.defaults={type:"text",disabled:!1,toggle:"click",emptytext:"Empty",autotext:"auto",value:null,display:null,emptyclass:"editable-empty",unsavedclass:"editable-unsaved",selector:null,highlight:"#FFFF80"}}(window.jQuery),function(a){"use strict";a.fn.editabletypes={};var b=function(){};b.prototype={init:function(b,c,d){this.type=b,this.options=a.extend({},d,c)},prerender:function(){this.$tpl=a(this.options.tpl),this.$input=this.$tpl,this.$clear=null,this.error=null},render:function(){},value2html:function(b,c){a(c)[this.options.escape?"text":"html"](a.trim(b))},html2value:function(b){return a("<div>").html(b).text()},value2str:function(a){return a},str2value:function(a){return a},value2submit:function(a){return a},value2input:function(a){this.$input.val(a)},input2value:function(){return this.$input.val()},activate:function(){this.$input.is(":visible")&&this.$input.focus()},clear:function(){this.$input.val(null)},escape:function(b){return a("<div>").text(b).html()},autosubmit:function(){},destroy:function(){},setClass:function(){this.options.inputclass&&this.$input.addClass(this.options.inputclass)},setAttr:function(a){void 0!==this.options[a]&&null!==this.options[a]&&this.$input.attr(a,this.options[a])},option:function(a,b){this.options[a]=b}},b.defaults={tpl:"",inputclass:null,escape:!0,scope:null,showbuttons:!0},a.extend(a.fn.editabletypes,{abstractinput:b})}(window.jQuery),function(a){"use strict";var b=function(){};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){var b=a.Deferred();return this.error=null,this.onSourceReady(function(){this.renderList(),b.resolve()},function(){this.error=this.options.sourceError,b.resolve()}),b.promise()},html2value:function(){return null},value2html:function(b,c,d,e){var f=a.Deferred(),g=function(){"function"==typeof d?d.call(c,b,this.sourceData,e):this.value2htmlFinal(b,c),f.resolve()};return null===b?g.call(this):this.onSourceReady(g,function(){f.resolve()}),f.promise()},onSourceReady:function(b,c){var d;if(a.isFunction(this.options.source)?(d=this.options.source.call(this.options.scope),this.sourceData=null):d=this.options.source,this.options.sourceCache&&a.isArray(this.sourceData))return b.call(this),void 0;try{d=a.fn.editableutils.tryParseJson(d,!1)}catch(e){return c.call(this),void 0}if("string"==typeof d){if(this.options.sourceCache){var f,g=d;if(a(document).data(g)||a(document).data(g,{}),f=a(document).data(g),f.loading===!1&&f.sourceData)return this.sourceData=f.sourceData,this.doPrepend(),b.call(this),void 0;if(f.loading===!0)return f.callbacks.push(a.proxy(function(){this.sourceData=f.sourceData,this.doPrepend(),b.call(this)},this)),f.err_callbacks.push(a.proxy(c,this)),void 0;f.loading=!0,f.callbacks=[],f.err_callbacks=[]}var h=a.extend({url:d,type:"get",cache:!1,dataType:"json",success:a.proxy(function(d){f&&(f.loading=!1),this.sourceData=this.makeArray(d),a.isArray(this.sourceData)?(f&&(f.sourceData=this.sourceData,a.each(f.callbacks,function(){this.call()})),this.doPrepend(),b.call(this)):(c.call(this),f&&a.each(f.err_callbacks,function(){this.call()}))},this),error:a.proxy(function(){c.call(this),f&&(f.loading=!1,a.each(f.err_callbacks,function(){this.call()}))},this)},this.options.sourceOptions);a.ajax(h)}else this.sourceData=this.makeArray(d),a.isArray(this.sourceData)?(this.doPrepend(),b.call(this)):c.call(this)},doPrepend:function(){null!==this.options.prepend&&void 0!==this.options.prepend&&(a.isArray(this.prependData)||(a.isFunction(this.options.prepend)&&(this.options.prepend=this.options.prepend.call(this.options.scope)),this.options.prepend=a.fn.editableutils.tryParseJson(this.options.prepend,!0),"string"==typeof this.options.prepend&&(this.options.prepend={"":this.options.prepend}),this.prependData=this.makeArray(this.options.prepend)),a.isArray(this.prependData)&&a.isArray(this.sourceData)&&(this.sourceData=this.prependData.concat(this.sourceData)))},renderList:function(){},value2htmlFinal:function(){},makeArray:function(b){var c,d,e,f,g=[];if(!b||"string"==typeof b)return null;if(a.isArray(b)){f=function(a,b){return d={value:a,text:b},c++>=2?!1:void 0};for(var h=0;h<b.length;h++)e=b[h],"object"==typeof e?(c=0,a.each(e,f),1===c?g.push(d):c>1&&(e.children&&(e.children=this.makeArray(e.children)),g.push(e))):g.push({value:e,text:e})}else a.each(b,function(a,b){g.push({value:a,text:b})});return g},option:function(a,b){this.options[a]=b,"source"===a&&(this.sourceData=null),"prepend"===a&&(this.prependData=null)}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{source:null,prepend:!1,sourceError:"Error when loading list",sourceCache:!0,sourceOptions:null}),a.fn.editabletypes.list=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("text",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){this.renderClear(),this.setClass(),this.setAttr("placeholder")},activate:function(){this.$input.is(":visible")&&(this.$input.focus(),a.fn.editableutils.setCursorPosition(this.$input.get(0),this.$input.val().length),this.toggleClear&&this.toggleClear())},renderClear:function(){this.options.clear&&(this.$clear=a('<span class="editable-clear-x"></span>'),this.$input.after(this.$clear).css("padding-right",24).keyup(a.proxy(function(b){if(!~a.inArray(b.keyCode,[40,38,9,13,27])){clearTimeout(this.t);var c=this;this.t=setTimeout(function(){c.toggleClear(b)},100)}},this)).parent().css("position","relative"),this.$clear.click(a.proxy(this.clear,this)))},postrender:function(){},toggleClear:function(){if(this.$clear){var a=this.$input.val().length,b=this.$clear.is(":visible");a&&!b&&this.$clear.show(),!a&&b&&this.$clear.hide()}},clear:function(){this.$clear.hide(),this.$input.val("").focus()}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<input type="text">',placeholder:null,clear:!0}),a.fn.editabletypes.text=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("textarea",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){this.setClass(),this.setAttr("placeholder"),this.setAttr("rows"),this.$input.keydown(function(b){b.ctrlKey&&13===b.which&&a(this).closest("form").submit()})},activate:function(){a.fn.editabletypes.text.prototype.activate.call(this)}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:"<textarea></textarea>",inputclass:"input-large",placeholder:null,rows:7}),a.fn.editabletypes.textarea=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("select",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.list),a.extend(b.prototype,{renderList:function(){this.$input.empty();var b=function(c,d){var e;if(a.isArray(d))for(var f=0;f<d.length;f++)e={},d[f].children?(e.label=d[f].text,c.append(b(a("<optgroup>",e),d[f].children))):(e.value=d[f].value,d[f].disabled&&(e.disabled=!0),c.append(a("<option>",e).text(d[f].text)));return c};b(this.$input,this.sourceData),this.setClass(),this.$input.on("keydown.editable",function(b){13===b.which&&a(this).closest("form").submit()})},value2htmlFinal:function(b,c){var d="",e=a.fn.editableutils.itemsByValue(b,this.sourceData);e.length&&(d=e[0].text),a.fn.editabletypes.abstractinput.prototype.value2html.call(this,d,c)},autosubmit:function(){this.$input.off("keydown.editable").on("change.editable",function(){a(this).closest("form").submit()})}}),b.defaults=a.extend({},a.fn.editabletypes.list.defaults,{tpl:"<select></select>"}),a.fn.editabletypes.select=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("checklist",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.list),a.extend(b.prototype,{renderList:function(){var b;if(this.$tpl.empty(),a.isArray(this.sourceData)){for(var c=0;c<this.sourceData.length;c++)b=a("<label>").append(a("<input>",{type:"checkbox",value:this.sourceData[c].value})).append(a("<span>").text(" "+this.sourceData[c].text)),a("<div>").append(b).appendTo(this.$tpl);this.$input=this.$tpl.find('input[type="checkbox"]'),this.setClass()}},value2str:function(b){return a.isArray(b)?b.sort().join(a.trim(this.options.separator)):""},str2value:function(b){var c,d=null;return"string"==typeof b&&b.length?(c=new RegExp("\\s*"+a.trim(this.options.separator)+"\\s*"),d=b.split(c)):d=a.isArray(b)?b:[b],d},value2input:function(b){this.$input.prop("checked",!1),a.isArray(b)&&b.length&&this.$input.each(function(c,d){var e=a(d);a.each(b,function(a,b){e.val()==b&&e.prop("checked",!0)})})},input2value:function(){var b=[];return this.$input.filter(":checked").each(function(c,d){b.push(a(d).val())}),b},value2htmlFinal:function(b,c){var d=[],e=a.fn.editableutils.itemsByValue(b,this.sourceData),f=this.options.escape;e.length?(a.each(e,function(b,c){var e=f?a.fn.editableutils.escape(c.text):c.text;d.push(e)}),a(c).html(d.join("<br>"))):a(c).empty()},activate:function(){this.$input.first().focus()},autosubmit:function(){this.$input.on("keydown",function(b){13===b.which&&a(this).closest("form").submit()})}}),b.defaults=a.extend({},a.fn.editabletypes.list.defaults,{tpl:'<div class="editable-checklist"></div>',inputclass:null,separator:","}),a.fn.editabletypes.checklist=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("password",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.text),a.extend(b.prototype,{value2html:function(b,c){b?a(c).text("[hidden]"):a(c).empty()},html2value:function(){return null}}),b.defaults=a.extend({},a.fn.editabletypes.text.defaults,{tpl:'<input type="password">'}),a.fn.editabletypes.password=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("email",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.text),b.defaults=a.extend({},a.fn.editabletypes.text.defaults,{tpl:'<input type="email">'}),a.fn.editabletypes.email=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("url",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.text),b.defaults=a.extend({},a.fn.editabletypes.text.defaults,{tpl:'<input type="url">'}),a.fn.editabletypes.url=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("tel",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.text),b.defaults=a.extend({},a.fn.editabletypes.text.defaults,{tpl:'<input type="tel">'}),a.fn.editabletypes.tel=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("number",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.text),a.extend(b.prototype,{render:function(){b.superclass.render.call(this),this.setAttr("min"),this.setAttr("max"),this.setAttr("step")},postrender:function(){this.$clear&&this.$clear.css({right:24})}}),b.defaults=a.extend({},a.fn.editabletypes.text.defaults,{tpl:'<input type="number">',inputclass:"input-mini",min:null,max:null,step:null}),a.fn.editabletypes.number=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("range",a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.number),a.extend(b.prototype,{render:function(){this.$input=this.$tpl.filter("input"),this.setClass(),this.setAttr("min"),this.setAttr("max"),this.setAttr("step"),this.$input.on("input",function(){a(this).siblings("output").text(a(this).val())})},activate:function(){this.$input.focus()}}),b.defaults=a.extend({},a.fn.editabletypes.number.defaults,{tpl:'<input type="range"><output style="width: 30px; display: inline-block"></output>',inputclass:"input-medium"}),a.fn.editabletypes.range=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("time",a,b.defaults)
-};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){this.setClass()}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<input type="time">'}),a.fn.editabletypes.time=b}(window.jQuery),function(a){"use strict";var b=function(c){if(this.init("select2",c,b.defaults),c.select2=c.select2||{},this.sourceData=null,c.placeholder&&(c.select2.placeholder=c.placeholder),!c.select2.tags&&c.source){var d=c.source;a.isFunction(c.source)&&(d=c.source.call(c.scope)),"string"==typeof d?(c.select2.ajax=c.select2.ajax||{},c.select2.ajax.data||(c.select2.ajax.data=function(a){return{query:a}}),c.select2.ajax.results||(c.select2.ajax.results=function(a){return{results:a}}),c.select2.ajax.url=d):(this.sourceData=this.convertSource(d),c.select2.data=this.sourceData)}if(this.options.select2=a.extend({},b.defaults.select2,c.select2),this.isMultiple=this.options.select2.tags||this.options.select2.multiple,this.isRemote="ajax"in this.options.select2,this.idFunc=this.options.select2.id,"function"!=typeof this.idFunc){var e=this.idFunc||"id";this.idFunc=function(a){return a[e]}}this.formatSelection=this.options.select2.formatSelection,"function"!=typeof this.formatSelection&&(this.formatSelection=function(a){return a.text})};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){this.setClass(),this.isRemote&&this.$input.on("select2-loaded",a.proxy(function(a){this.sourceData=a.items.results},this)),this.isMultiple&&this.$input.on("change",function(){a(this).closest("form").parent().triggerHandler("resize")})},value2html:function(c,d){var e,f="",g=this;this.options.select2.tags?e=c:this.sourceData&&(e=a.fn.editableutils.itemsByValue(c,this.sourceData,this.idFunc)),a.isArray(e)?(f=[],a.each(e,function(a,b){f.push(b&&"object"==typeof b?g.formatSelection(b):b)})):e&&(f=g.formatSelection(e)),f=a.isArray(f)?f.join(this.options.viewseparator):f,b.superclass.value2html.call(this,f,d)},html2value:function(a){return this.options.select2.tags?this.str2value(a,this.options.viewseparator):null},value2input:function(b){if(this.$input.data("select2")?this.$input.val(b).trigger("change",!0):(this.$input.val(b),this.$input.select2(this.options.select2)),this.isRemote&&!this.isMultiple&&!this.options.select2.initSelection){var c=this.options.select2.id,d=this.options.select2.formatSelection;if(!c&&!d){var e={id:b,text:a(this.options.scope).text()};this.$input.select2("data",e)}}},input2value:function(){return this.$input.select2("val")},str2value:function(b,c){if("string"!=typeof b||!this.isMultiple)return b;c=c||this.options.select2.separator||a.fn.select2.defaults.separator;var d,e,f;if(null===b||b.length<1)return null;for(d=b.split(c),e=0,f=d.length;f>e;e+=1)d[e]=a.trim(d[e]);return d},autosubmit:function(){this.$input.on("change",function(b,c){c||a(this).closest("form").submit()})},convertSource:function(b){if(a.isArray(b)&&b.length&&void 0!==b[0].value)for(var c=0;c<b.length;c++)void 0!==b[c].value&&(b[c].id=b[c].value,delete b[c].value);return b},destroy:function(){this.$input.data("select2")&&this.$input.select2("destroy")}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<input type="hidden">',select2:null,placeholder:null,source:null,viewseparator:", "}),a.fn.editabletypes.select2=b}(window.jQuery),function(a){var b=function(b,c){return this.$element=a(b),this.$element.is("input")?(this.options=a.extend({},a.fn.combodate.defaults,c,this.$element.data()),this.init(),void 0):(a.error("Combodate should be applied to INPUT element"),void 0)};b.prototype={constructor:b,init:function(){this.map={day:["D","date"],month:["M","month"],year:["Y","year"],hour:["[Hh]","hours"],minute:["m","minutes"],second:["s","seconds"],ampm:["[Aa]",""]},this.$widget=a('<span class="combodate"></span>').html(this.getTemplate()),this.initCombos(),this.$widget.on("change","select",a.proxy(function(){this.$element.val(this.getValue())},this)),this.$widget.find("select").css("width","auto"),this.$element.hide().after(this.$widget),this.setValue(this.$element.val()||this.options.value)},getTemplate:function(){var b=this.options.template;return a.each(this.map,function(a,c){c=c[0];var d=new RegExp(c+"+"),e=c.length>1?c.substring(1,2):c;b=b.replace(d,"{"+e+"}")}),b=b.replace(/ /g,"&nbsp;"),a.each(this.map,function(a,c){c=c[0];var d=c.length>1?c.substring(1,2):c;b=b.replace("{"+d+"}",'<select class="'+a+'"></select>')}),b},initCombos:function(){var b=this;a.each(this.map,function(a){var c,d,e=b.$widget.find("."+a);e.length&&(b["$"+a]=e,c="fill"+a.charAt(0).toUpperCase()+a.slice(1),d=b[c](),b["$"+a].html(b.renderItems(d)))})},initItems:function(a){var b,c=[];if("name"===this.options.firstItem){b=moment.relativeTime||moment.langData()._relativeTime;var d="function"==typeof b[a]?b[a](1,!0,a,!1):b[a];d=d.split(" ").reverse()[0],c.push(["",d])}else"empty"===this.options.firstItem&&c.push(["",""]);return c},renderItems:function(a){for(var b=[],c=0;c<a.length;c++)b.push('<option value="'+a[c][0]+'">'+a[c][1]+"</option>");return b.join("\n")},fillDay:function(){var a,b,c=this.initItems("d"),d=-1!==this.options.template.indexOf("DD");for(b=1;31>=b;b++)a=d?this.leadZero(b):b,c.push([b,a]);return c},fillMonth:function(){var a,b,c=this.initItems("M"),d=-1!==this.options.template.indexOf("MMMM"),e=-1!==this.options.template.indexOf("MMM"),f=-1!==this.options.template.indexOf("MM");for(b=0;11>=b;b++)a=d?moment().date(1).month(b).format("MMMM"):e?moment().date(1).month(b).format("MMM"):f?this.leadZero(b+1):b+1,c.push([b,a]);return c},fillYear:function(){var a,b,c=[],d=-1!==this.options.template.indexOf("YYYY");for(b=this.options.maxYear;b>=this.options.minYear;b--)a=d?b:(b+"").substring(2),c[this.options.yearDescending?"push":"unshift"]([b,a]);return c=this.initItems("y").concat(c)},fillHour:function(){var a,b,c=this.initItems("h"),d=-1!==this.options.template.indexOf("h"),e=(-1!==this.options.template.indexOf("H"),-1!==this.options.template.toLowerCase().indexOf("hh")),f=d?1:0,g=d?12:23;for(b=f;g>=b;b++)a=e?this.leadZero(b):b,c.push([b,a]);return c},fillMinute:function(){var a,b,c=this.initItems("m"),d=-1!==this.options.template.indexOf("mm");for(b=0;59>=b;b+=this.options.minuteStep)a=d?this.leadZero(b):b,c.push([b,a]);return c},fillSecond:function(){var a,b,c=this.initItems("s"),d=-1!==this.options.template.indexOf("ss");for(b=0;59>=b;b+=this.options.secondStep)a=d?this.leadZero(b):b,c.push([b,a]);return c},fillAmpm:function(){var a=-1!==this.options.template.indexOf("a"),b=(-1!==this.options.template.indexOf("A"),[["am",a?"am":"AM"],["pm",a?"pm":"PM"]]);return b},getValue:function(b){var c,d={},e=this,f=!1;return a.each(this.map,function(a){if("ampm"!==a){var b="day"===a?1:0;return d[a]=e["$"+a]?parseInt(e["$"+a].val(),10):b,isNaN(d[a])?(f=!0,!1):void 0}}),f?"":(this.$ampm&&(d.hour=12===d.hour?"am"===this.$ampm.val()?0:12:"am"===this.$ampm.val()?d.hour:d.hour+12),c=moment([d.year,d.month,d.day,d.hour,d.minute,d.second]),this.highlight(c),b=void 0===b?this.options.format:b,null===b?c.isValid()?c:null:c.isValid()?c.format(b):"")},setValue:function(b){function c(b,c){var d={};return b.children("option").each(function(b,e){var f,g=a(e).attr("value");""!==g&&(f=Math.abs(g-c),("undefined"==typeof d.distance||f<d.distance)&&(d={value:g,distance:f}))}),d.value}if(b){var d="string"==typeof b?moment(b,this.options.format):moment(b),e=this,f={};d.isValid()&&(a.each(this.map,function(a,b){"ampm"!==a&&(f[a]=d[b[1]]())}),this.$ampm&&(f.hour>=12?(f.ampm="pm",f.hour>12&&(f.hour-=12)):(f.ampm="am",0===f.hour&&(f.hour=12))),a.each(f,function(a,b){e["$"+a]&&("minute"===a&&e.options.minuteStep>1&&e.options.roundTime&&(b=c(e["$"+a],b)),"second"===a&&e.options.secondStep>1&&e.options.roundTime&&(b=c(e["$"+a],b)),e["$"+a].val(b))}),this.$element.val(d.format(this.options.format)))}},highlight:function(a){a.isValid()?this.options.errorClass?this.$widget.removeClass(this.options.errorClass):this.$widget.find("select").css("border-color",this.borderColor):this.options.errorClass?this.$widget.addClass(this.options.errorClass):(this.borderColor||(this.borderColor=this.$widget.find("select").css("border-color")),this.$widget.find("select").css("border-color","red"))},leadZero:function(a){return 9>=a?"0"+a:a},destroy:function(){this.$widget.remove(),this.$element.removeData("combodate").show()}},a.fn.combodate=function(c){var d,e=Array.apply(null,arguments);return e.shift(),"getValue"===c&&this.length&&(d=this.eq(0).data("combodate"))?d.getValue.apply(d,e):this.each(function(){var d=a(this),f=d.data("combodate"),g="object"==typeof c&&c;f||d.data("combodate",f=new b(this,g)),"string"==typeof c&&"function"==typeof f[c]&&f[c].apply(f,e)})},a.fn.combodate.defaults={format:"DD-MM-YYYY HH:mm",template:"D / MMM / YYYY   H : mm",value:null,minYear:1970,maxYear:2015,yearDescending:!0,minuteStep:5,secondStep:1,firstItem:"empty",errorClass:null,roundTime:!0}}(window.jQuery),function(a){"use strict";var b=function(c){this.init("combodate",c,b.defaults),this.options.viewformat||(this.options.viewformat=this.options.format),c.combodate=a.fn.editableutils.tryParseJson(c.combodate,!0),this.options.combodate=a.extend({},b.defaults.combodate,c.combodate,{format:this.options.format,template:this.options.template})};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{render:function(){this.$input.combodate(this.options.combodate),"bs3"===a.fn.editableform.engine&&this.$input.siblings().find("select").addClass("form-control"),this.options.inputclass&&this.$input.siblings().find("select").addClass(this.options.inputclass)},value2html:function(a,c){var d=a?a.format(this.options.viewformat):"";b.superclass.value2html.call(this,d,c)},html2value:function(a){return a?moment(a,this.options.viewformat):null},value2str:function(a){return a?a.format(this.options.format):""},str2value:function(a){return a?moment(a,this.options.format):null},value2submit:function(a){return this.value2str(a)},value2input:function(a){this.$input.combodate("setValue",a)},input2value:function(){return this.$input.combodate("getValue",null)},activate:function(){this.$input.siblings(".combodate").find("select").eq(0).focus()},autosubmit:function(){}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<input type="text">',inputclass:null,format:"YYYY-MM-DD",viewformat:null,template:"D / MMM / YYYY",combodate:null}),a.fn.editabletypes.combodate=b}(window.jQuery),function(a){"use strict";var b=a.fn.editableform.Constructor.prototype.initInput;a.extend(a.fn.editableform.Constructor.prototype,{initTemplate:function(){this.$form=a(a.fn.editableform.template),this.$form.find(".control-group").addClass("form-group"),this.$form.find(".editable-error-block").addClass("help-block")},initInput:function(){b.apply(this);var c=null===this.input.options.inputclass||this.input.options.inputclass===!1,d="input-sm",e="text,select,textarea,password,email,url,tel,number,range,time,typeaheadjs".split(",");~a.inArray(this.input.type,e)&&(this.input.$input.addClass("form-control"),c&&(this.input.options.inputclass=d,this.input.$input.addClass(d)));for(var f=this.$form.find(".editable-buttons"),g=c?[d]:this.input.options.inputclass.split(" "),h=0;h<g.length;h++)"input-lg"===g[h].toLowerCase()&&f.find("button").removeClass("btn-sm").addClass("btn-lg")}}),a.fn.editableform.buttons='<button type="submit" class="btn btn-primary btn-sm editable-submit"><i class="glyphicon glyphicon-ok"></i></button><button type="button" class="btn btn-default btn-sm editable-cancel"><i class="glyphicon glyphicon-remove"></i></button>',a.fn.editableform.errorGroupClass="has-error",a.fn.editableform.errorBlockClass=null,a.fn.editableform.engine="bs3"}(window.jQuery),function(a){"use strict";a.extend(a.fn.editableContainer.Popup.prototype,{containerName:"popover",containerDataName:"bs.popover",innerCss:".popover-content",defaults:a.fn.popover.Constructor.DEFAULTS,initContainer:function(){a.extend(this.containerOptions,{trigger:"manual",selector:!1,content:" ",template:this.defaults.template});var b;this.$element.data("template")&&(b=this.$element.data("template"),this.$element.removeData("template")),this.call(this.containerOptions),b&&this.$element.data("template",b)},innerShow:function(){this.call("show")},innerHide:function(){this.call("hide")},innerDestroy:function(){this.call("destroy")},setContainerOption:function(a,b){this.container().options[a]=b},setPosition:function(){!function(){var a=this.tip(),b="function"==typeof this.options.placement?this.options.placement.call(this,a[0],this.$element[0]):this.options.placement,c=this.getPosition(),d=a[0].offsetWidth,e=a[0].offsetHeight,f=this.getCalculatedOffset(b,c,d,e);this.applyPlacement(f,b)}.call(this.container())}})}(window.jQuery),function(a){function b(){return new Date(Date.UTC.apply(Date,arguments))}function c(b,c){var d,e=a(b).data(),f={},g=new RegExp("^"+c.toLowerCase()+"([A-Z])"),c=new RegExp("^"+c.toLowerCase());for(var h in e)c.test(h)&&(d=h.replace(g,function(a,b){return b.toLowerCase()}),f[d]=e[h]);return f}function d(b){var c={};if(k[b]||(b=b.split("-")[0],k[b])){var d=k[b];return a.each(j,function(a,b){b in d&&(c[b]=d[b])}),c}}var e=function(b,c){this._process_options(c),this.element=a(b),this.isInline=!1,this.isInput=this.element.is("input"),this.component=this.element.is(".date")?this.element.find(".add-on, .btn"):!1,this.hasInput=this.component&&this.element.find("input").length,this.component&&0===this.component.length&&(this.component=!1),this.picker=a(l.template),this._buildEvents(),this._attachEvents(),this.isInline?this.picker.addClass("datepicker-inline").appendTo(this.element):this.picker.addClass("datepicker-dropdown dropdown-menu"),this.o.rtl&&(this.picker.addClass("datepicker-rtl"),this.picker.find(".prev i, .next i").toggleClass("icon-arrow-left icon-arrow-right")),this.viewMode=this.o.startView,this.o.calendarWeeks&&this.picker.find("tfoot th.today").attr("colspan",function(a,b){return parseInt(b)+1}),this._allow_update=!1,this.setStartDate(this.o.startDate),this.setEndDate(this.o.endDate),this.setDaysOfWeekDisabled(this.o.daysOfWeekDisabled),this.fillDow(),this.fillMonths(),this._allow_update=!0,this.update(),this.showMode(),this.isInline&&this.show()};e.prototype={constructor:e,_process_options:function(b){this._o=a.extend({},this._o,b);var c=this.o=a.extend({},this._o),d=c.language;switch(k[d]||(d=d.split("-")[0],k[d]||(d=i.language)),c.language=d,c.startView){case 2:case"decade":c.startView=2;break;case 1:case"year":c.startView=1;break;default:c.startView=0}switch(c.minViewMode){case 1:case"months":c.minViewMode=1;break;case 2:case"years":c.minViewMode=2;break;default:c.minViewMode=0}c.startView=Math.max(c.startView,c.minViewMode),c.weekStart%=7,c.weekEnd=(c.weekStart+6)%7;var e=l.parseFormat(c.format);c.startDate!==-1/0&&(c.startDate=l.parseDate(c.startDate,e,c.language)),1/0!==c.endDate&&(c.endDate=l.parseDate(c.endDate,e,c.language)),c.daysOfWeekDisabled=c.daysOfWeekDisabled||[],a.isArray(c.daysOfWeekDisabled)||(c.daysOfWeekDisabled=c.daysOfWeekDisabled.split(/[,\s]*/)),c.daysOfWeekDisabled=a.map(c.daysOfWeekDisabled,function(a){return parseInt(a,10)})},_events:[],_secondaryEvents:[],_applyEvents:function(a){for(var b,c,d=0;d<a.length;d++)b=a[d][0],c=a[d][1],b.on(c)},_unapplyEvents:function(a){for(var b,c,d=0;d<a.length;d++)b=a[d][0],c=a[d][1],b.off(c)},_buildEvents:function(){this.isInput?this._events=[[this.element,{focus:a.proxy(this.show,this),keyup:a.proxy(this.update,this),keydown:a.proxy(this.keydown,this)}]]:this.component&&this.hasInput?this._events=[[this.element.find("input"),{focus:a.proxy(this.show,this),keyup:a.proxy(this.update,this),keydown:a.proxy(this.keydown,this)}],[this.component,{click:a.proxy(this.show,this)}]]:this.element.is("div")?this.isInline=!0:this._events=[[this.element,{click:a.proxy(this.show,this)}]],this._secondaryEvents=[[this.picker,{click:a.proxy(this.click,this)}],[a(window),{resize:a.proxy(this.place,this)}],[a(document),{mousedown:a.proxy(function(a){this.element.is(a.target)||this.element.find(a.target).size()||this.picker.is(a.target)||this.picker.find(a.target).size()||this.hide()},this)}]]},_attachEvents:function(){this._detachEvents(),this._applyEvents(this._events)},_detachEvents:function(){this._unapplyEvents(this._events)},_attachSecondaryEvents:function(){this._detachSecondaryEvents(),this._applyEvents(this._secondaryEvents)},_detachSecondaryEvents:function(){this._unapplyEvents(this._secondaryEvents)},_trigger:function(b,c){var d=c||this.date,e=new Date(d.getTime()+6e4*d.getTimezoneOffset());this.element.trigger({type:b,date:e,format:a.proxy(function(a){var b=a||this.o.format;return l.formatDate(d,b,this.o.language)},this)})},show:function(a){this.isInline||this.picker.appendTo("body"),this.picker.show(),this.height=this.component?this.component.outerHeight():this.element.outerHeight(),this.place(),this._attachSecondaryEvents(),a&&a.preventDefault(),this._trigger("show")},hide:function(){this.isInline||this.picker.is(":visible")&&(this.picker.hide().detach(),this._detachSecondaryEvents(),this.viewMode=this.o.startView,this.showMode(),this.o.forceParse&&(this.isInput&&this.element.val()||this.hasInput&&this.element.find("input").val())&&this.setValue(),this._trigger("hide"))},remove:function(){this.hide(),this._detachEvents(),this._detachSecondaryEvents(),this.picker.remove(),delete this.element.data().datepicker,this.isInput||delete this.element.data().date},getDate:function(){var a=this.getUTCDate();return new Date(a.getTime()+6e4*a.getTimezoneOffset())},getUTCDate:function(){return this.date},setDate:function(a){this.setUTCDate(new Date(a.getTime()-6e4*a.getTimezoneOffset()))},setUTCDate:function(a){this.date=a,this.setValue()},setValue:function(){var a=this.getFormattedDate();this.isInput?this.element.val(a):this.component&&this.element.find("input").val(a)},getFormattedDate:function(a){return void 0===a&&(a=this.o.format),l.formatDate(this.date,a,this.o.language)},setStartDate:function(a){this._process_options({startDate:a}),this.update(),this.updateNavArrows()},setEndDate:function(a){this._process_options({endDate:a}),this.update(),this.updateNavArrows()},setDaysOfWeekDisabled:function(a){this._process_options({daysOfWeekDisabled:a}),this.update(),this.updateNavArrows()},place:function(){if(!this.isInline){var b=parseInt(this.element.parents().filter(function(){return"auto"!=a(this).css("z-index")}).first().css("z-index"))+10,c=this.component?this.component.parent().offset():this.element.offset(),d=this.component?this.component.outerHeight(!0):this.element.outerHeight(!0);this.picker.css({top:c.top+d,left:c.left,zIndex:b})}},_allow_update:!0,update:function(){if(this._allow_update){var a,b=!1;arguments&&arguments.length&&("string"==typeof arguments[0]||arguments[0]instanceof Date)?(a=arguments[0],b=!0):(a=this.isInput?this.element.val():this.element.data("date")||this.element.find("input").val(),delete this.element.data().date),this.date=l.parseDate(a,this.o.format,this.o.language),b&&this.setValue(),this.viewDate=this.date<this.o.startDate?new Date(this.o.startDate):this.date>this.o.endDate?new Date(this.o.endDate):new Date(this.date),this.fill()}},fillDow:function(){var a=this.o.weekStart,b="<tr>";if(this.o.calendarWeeks){var c='<th class="cw">&nbsp;</th>';b+=c,this.picker.find(".datepicker-days thead tr:first-child").prepend(c)}for(;a<this.o.weekStart+7;)b+='<th class="dow">'+k[this.o.language].daysMin[a++%7]+"</th>";b+="</tr>",this.picker.find(".datepicker-days thead").append(b)},fillMonths:function(){for(var a="",b=0;12>b;)a+='<span class="month">'+k[this.o.language].monthsShort[b++]+"</span>";this.picker.find(".datepicker-months td").html(a)},setRange:function(b){b&&b.length?this.range=a.map(b,function(a){return a.valueOf()}):delete this.range,this.fill()},getClassNames:function(b){var c=[],d=this.viewDate.getUTCFullYear(),e=this.viewDate.getUTCMonth(),f=this.date.valueOf(),g=new Date;return b.getUTCFullYear()<d||b.getUTCFullYear()==d&&b.getUTCMonth()<e?c.push("old"):(b.getUTCFullYear()>d||b.getUTCFullYear()==d&&b.getUTCMonth()>e)&&c.push("new"),this.o.todayHighlight&&b.getUTCFullYear()==g.getFullYear()&&b.getUTCMonth()==g.getMonth()&&b.getUTCDate()==g.getDate()&&c.push("today"),f&&b.valueOf()==f&&c.push("active"),(b.valueOf()<this.o.startDate||b.valueOf()>this.o.endDate||-1!==a.inArray(b.getUTCDay(),this.o.daysOfWeekDisabled))&&c.push("disabled"),this.range&&(b>this.range[0]&&b<this.range[this.range.length-1]&&c.push("range"),-1!=a.inArray(b.valueOf(),this.range)&&c.push("selected")),c},fill:function(){var c,d=new Date(this.viewDate),e=d.getUTCFullYear(),f=d.getUTCMonth(),g=this.o.startDate!==-1/0?this.o.startDate.getUTCFullYear():-1/0,h=this.o.startDate!==-1/0?this.o.startDate.getUTCMonth():-1/0,i=1/0!==this.o.endDate?this.o.endDate.getUTCFullYear():1/0,j=1/0!==this.o.endDate?this.o.endDate.getUTCMonth():1/0;this.date&&this.date.valueOf(),this.picker.find(".datepicker-days thead th.datepicker-switch").text(k[this.o.language].months[f]+" "+e),this.picker.find("tfoot th.today").text(k[this.o.language].today).toggle(this.o.todayBtn!==!1),this.picker.find("tfoot th.clear").text(k[this.o.language].clear).toggle(this.o.clearBtn!==!1),this.updateNavArrows(),this.fillMonths();var m=b(e,f-1,28,0,0,0,0),n=l.getDaysInMonth(m.getUTCFullYear(),m.getUTCMonth());m.setUTCDate(n),m.setUTCDate(n-(m.getUTCDay()-this.o.weekStart+7)%7);var o=new Date(m);o.setUTCDate(o.getUTCDate()+42),o=o.valueOf();for(var p,q=[];m.valueOf()<o;){if(m.getUTCDay()==this.o.weekStart&&(q.push("<tr>"),this.o.calendarWeeks)){var r=new Date(+m+864e5*((this.o.weekStart-m.getUTCDay()-7)%7)),s=new Date(+r+864e5*((11-r.getUTCDay())%7)),t=new Date(+(t=b(s.getUTCFullYear(),0,1))+864e5*((11-t.getUTCDay())%7)),u=(s-t)/864e5/7+1;q.push('<td class="cw">'+u+"</td>")}p=this.getClassNames(m),p.push("day");var v=this.o.beforeShowDay(m);void 0===v?v={}:"boolean"==typeof v?v={enabled:v}:"string"==typeof v&&(v={classes:v}),v.enabled===!1&&p.push("disabled"),v.classes&&(p=p.concat(v.classes.split(/\s+/))),v.tooltip&&(c=v.tooltip),p=a.unique(p),q.push('<td class="'+p.join(" ")+'"'+(c?' title="'+c+'"':"")+">"+m.getUTCDate()+"</td>"),m.getUTCDay()==this.o.weekEnd&&q.push("</tr>"),m.setUTCDate(m.getUTCDate()+1)}this.picker.find(".datepicker-days tbody").empty().append(q.join(""));var w=this.date&&this.date.getUTCFullYear(),x=this.picker.find(".datepicker-months").find("th:eq(1)").text(e).end().find("span").removeClass("active");w&&w==e&&x.eq(this.date.getUTCMonth()).addClass("active"),(g>e||e>i)&&x.addClass("disabled"),e==g&&x.slice(0,h).addClass("disabled"),e==i&&x.slice(j+1).addClass("disabled"),q="",e=10*parseInt(e/10,10);var y=this.picker.find(".datepicker-years").find("th:eq(1)").text(e+"-"+(e+9)).end().find("td");e-=1;for(var z=-1;11>z;z++)q+='<span class="year'+(-1==z?" old":10==z?" new":"")+(w==e?" active":"")+(g>e||e>i?" disabled":"")+'">'+e+"</span>",e+=1;y.html(q)},updateNavArrows:function(){if(this._allow_update){var a=new Date(this.viewDate),b=a.getUTCFullYear(),c=a.getUTCMonth();switch(this.viewMode){case 0:this.o.startDate!==-1/0&&b<=this.o.startDate.getUTCFullYear()&&c<=this.o.startDate.getUTCMonth()?this.picker.find(".prev").css({visibility:"hidden"}):this.picker.find(".prev").css({visibility:"visible"}),1/0!==this.o.endDate&&b>=this.o.endDate.getUTCFullYear()&&c>=this.o.endDate.getUTCMonth()?this.picker.find(".next").css({visibility:"hidden"}):this.picker.find(".next").css({visibility:"visible"});break;case 1:case 2:this.o.startDate!==-1/0&&b<=this.o.startDate.getUTCFullYear()?this.picker.find(".prev").css({visibility:"hidden"}):this.picker.find(".prev").css({visibility:"visible"}),1/0!==this.o.endDate&&b>=this.o.endDate.getUTCFullYear()?this.picker.find(".next").css({visibility:"hidden"}):this.picker.find(".next").css({visibility:"visible"})}}},click:function(c){c.preventDefault();var d=a(c.target).closest("span, td, th");if(1==d.length)switch(d[0].nodeName.toLowerCase()){case"th":switch(d[0].className){case"datepicker-switch":this.showMode(1);break;case"prev":case"next":var e=l.modes[this.viewMode].navStep*("prev"==d[0].className?-1:1);switch(this.viewMode){case 0:this.viewDate=this.moveMonth(this.viewDate,e);break;case 1:case 2:this.viewDate=this.moveYear(this.viewDate,e)}this.fill();break;case"today":var f=new Date;f=b(f.getFullYear(),f.getMonth(),f.getDate(),0,0,0),this.showMode(-2);var g="linked"==this.o.todayBtn?null:"view";this._setDate(f,g);break;case"clear":var h;this.isInput?h=this.element:this.component&&(h=this.element.find("input")),h&&h.val("").change(),this._trigger("changeDate"),this.update(),this.o.autoclose&&this.hide()}break;case"span":if(!d.is(".disabled")){if(this.viewDate.setUTCDate(1),d.is(".month")){var i=1,j=d.parent().find("span").index(d),k=this.viewDate.getUTCFullYear();this.viewDate.setUTCMonth(j),this._trigger("changeMonth",this.viewDate),1===this.o.minViewMode&&this._setDate(b(k,j,i,0,0,0,0))}else{var k=parseInt(d.text(),10)||0,i=1,j=0;this.viewDate.setUTCFullYear(k),this._trigger("changeYear",this.viewDate),2===this.o.minViewMode&&this._setDate(b(k,j,i,0,0,0,0))}this.showMode(-1),this.fill()}break;case"td":if(d.is(".day")&&!d.is(".disabled")){var i=parseInt(d.text(),10)||1,k=this.viewDate.getUTCFullYear(),j=this.viewDate.getUTCMonth();d.is(".old")?0===j?(j=11,k-=1):j-=1:d.is(".new")&&(11==j?(j=0,k+=1):j+=1),this._setDate(b(k,j,i,0,0,0,0))}}},_setDate:function(a,b){b&&"date"!=b||(this.date=new Date(a)),b&&"view"!=b||(this.viewDate=new Date(a)),this.fill(),this.setValue(),this._trigger("changeDate");var c;this.isInput?c=this.element:this.component&&(c=this.element.find("input")),c&&(c.change(),!this.o.autoclose||b&&"date"!=b||this.hide())},moveMonth:function(a,b){if(!b)return a;var c,d,e=new Date(a.valueOf()),f=e.getUTCDate(),g=e.getUTCMonth(),h=Math.abs(b);if(b=b>0?1:-1,1==h)d=-1==b?function(){return e.getUTCMonth()==g}:function(){return e.getUTCMonth()!=c},c=g+b,e.setUTCMonth(c),(0>c||c>11)&&(c=(c+12)%12);else{for(var i=0;h>i;i++)e=this.moveMonth(e,b);c=e.getUTCMonth(),e.setUTCDate(f),d=function(){return c!=e.getUTCMonth()}}for(;d();)e.setUTCDate(--f),e.setUTCMonth(c);return e},moveYear:function(a,b){return this.moveMonth(a,12*b)},dateWithinRange:function(a){return a>=this.o.startDate&&a<=this.o.endDate},keydown:function(a){if(this.picker.is(":not(:visible)"))return 27==a.keyCode&&this.show(),void 0;var b,c,d,e=!1;switch(a.keyCode){case 27:this.hide(),a.preventDefault();break;case 37:case 39:if(!this.o.keyboardNavigation)break;b=37==a.keyCode?-1:1,a.ctrlKey?(c=this.moveYear(this.date,b),d=this.moveYear(this.viewDate,b)):a.shiftKey?(c=this.moveMonth(this.date,b),d=this.moveMonth(this.viewDate,b)):(c=new Date(this.date),c.setUTCDate(this.date.getUTCDate()+b),d=new Date(this.viewDate),d.setUTCDate(this.viewDate.getUTCDate()+b)),this.dateWithinRange(c)&&(this.date=c,this.viewDate=d,this.setValue(),this.update(),a.preventDefault(),e=!0);break;case 38:case 40:if(!this.o.keyboardNavigation)break;b=38==a.keyCode?-1:1,a.ctrlKey?(c=this.moveYear(this.date,b),d=this.moveYear(this.viewDate,b)):a.shiftKey?(c=this.moveMonth(this.date,b),d=this.moveMonth(this.viewDate,b)):(c=new Date(this.date),c.setUTCDate(this.date.getUTCDate()+7*b),d=new Date(this.viewDate),d.setUTCDate(this.viewDate.getUTCDate()+7*b)),this.dateWithinRange(c)&&(this.date=c,this.viewDate=d,this.setValue(),this.update(),a.preventDefault(),e=!0);break;case 13:this.hide(),a.preventDefault();break;case 9:this.hide()}if(e){this._trigger("changeDate");var f;this.isInput?f=this.element:this.component&&(f=this.element.find("input")),f&&f.change()}},showMode:function(a){a&&(this.viewMode=Math.max(this.o.minViewMode,Math.min(2,this.viewMode+a))),this.picker.find(">div").hide().filter(".datepicker-"+l.modes[this.viewMode].clsName).css("display","block"),this.updateNavArrows()}};var f=function(b,c){this.element=a(b),this.inputs=a.map(c.inputs,function(a){return a.jquery?a[0]:a}),delete c.inputs,a(this.inputs).datepicker(c).bind("changeDate",a.proxy(this.dateUpdated,this)),this.pickers=a.map(this.inputs,function(b){return a(b).data("datepicker")}),this.updateDates()};f.prototype={updateDates:function(){this.dates=a.map(this.pickers,function(a){return a.date}),this.updateRanges()},updateRanges:function(){var b=a.map(this.dates,function(a){return a.valueOf()});a.each(this.pickers,function(a,c){c.setRange(b)})},dateUpdated:function(b){var c=a(b.target).data("datepicker"),d=c.getUTCDate(),e=a.inArray(b.target,this.inputs),f=this.inputs.length;if(-1!=e){if(d<this.dates[e])for(;e>=0&&d<this.dates[e];)this.pickers[e--].setUTCDate(d);else if(d>this.dates[e])for(;f>e&&d>this.dates[e];)this.pickers[e++].setUTCDate(d);this.updateDates()}},remove:function(){a.map(this.pickers,function(a){a.remove()}),delete this.element.data().datepicker}};var g=a.fn.datepicker,h=a.fn.datepicker=function(b){var g=Array.apply(null,arguments);g.shift();var h;return this.each(function(){var j=a(this),k=j.data("datepicker"),l="object"==typeof b&&b;if(!k){var m=c(this,"date"),n=a.extend({},i,m,l),o=d(n.language),p=a.extend({},i,o,m,l);if(j.is(".input-daterange")||p.inputs){var q={inputs:p.inputs||j.find("input").toArray()};j.data("datepicker",k=new f(this,a.extend(p,q)))}else j.data("datepicker",k=new e(this,p))}return"string"==typeof b&&"function"==typeof k[b]&&(h=k[b].apply(k,g),void 0!==h)?!1:void 0}),void 0!==h?h:this},i=a.fn.datepicker.defaults={autoclose:!1,beforeShowDay:a.noop,calendarWeeks:!1,clearBtn:!1,daysOfWeekDisabled:[],endDate:1/0,forceParse:!0,format:"mm/dd/yyyy",keyboardNavigation:!0,language:"en",minViewMode:0,rtl:!1,startDate:-1/0,startView:0,todayBtn:!1,todayHighlight:!1,weekStart:0},j=a.fn.datepicker.locale_opts=["format","rtl","weekStart"];a.fn.datepicker.Constructor=e;var k=a.fn.datepicker.dates={en:{days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"],daysShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat","Sun"],daysMin:["Su","Mo","Tu","We","Th","Fr","Sa","Su"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],monthsShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],today:"Today",clear:"Clear"}},l={modes:[{clsName:"days",navFnc:"Month",navStep:1},{clsName:"months",navFnc:"FullYear",navStep:1},{clsName:"years",navFnc:"FullYear",navStep:10}],isLeapYear:function(a){return 0===a%4&&0!==a%100||0===a%400},getDaysInMonth:function(a,b){return[31,l.isLeapYear(a)?29:28,31,30,31,30,31,31,30,31,30,31][b]},validParts:/dd?|DD?|mm?|MM?|yy(?:yy)?/g,nonpunctuation:/[^ -\/:-@\[\u3400-\u9fff-`{-~\t\n\r]+/g,parseFormat:function(a){var b=a.replace(this.validParts,"\0").split("\0"),c=a.match(this.validParts);if(!b||!b.length||!c||0===c.length)throw new Error("Invalid date format.");return{separators:b,parts:c}},parseDate:function(c,d,f){if(c instanceof Date)return c;if("string"==typeof d&&(d=l.parseFormat(d)),/^[\-+]\d+[dmwy]([\s,]+[\-+]\d+[dmwy])*$/.test(c)){var g,h,i=/([\-+]\d+)([dmwy])/,j=c.match(/([\-+]\d+)([dmwy])/g);c=new Date;for(var m=0;m<j.length;m++)switch(g=i.exec(j[m]),h=parseInt(g[1]),g[2]){case"d":c.setUTCDate(c.getUTCDate()+h);break;case"m":c=e.prototype.moveMonth.call(e.prototype,c,h);break;case"w":c.setUTCDate(c.getUTCDate()+7*h);break;case"y":c=e.prototype.moveYear.call(e.prototype,c,h)}return b(c.getUTCFullYear(),c.getUTCMonth(),c.getUTCDate(),0,0,0)}var n,o,g,j=c&&c.match(this.nonpunctuation)||[],c=new Date,p={},q=["yyyy","yy","M","MM","m","mm","d","dd"],r={yyyy:function(a,b){return a.setUTCFullYear(b)},yy:function(a,b){return a.setUTCFullYear(2e3+b)},m:function(a,b){for(b-=1;0>b;)b+=12;for(b%=12,a.setUTCMonth(b);a.getUTCMonth()!=b;)a.setUTCDate(a.getUTCDate()-1);return a},d:function(a,b){return a.setUTCDate(b)}};r.M=r.MM=r.mm=r.m,r.dd=r.d,c=b(c.getFullYear(),c.getMonth(),c.getDate(),0,0,0);var s=d.parts.slice();if(j.length!=s.length&&(s=a(s).filter(function(b,c){return-1!==a.inArray(c,q)}).toArray()),j.length==s.length){for(var m=0,t=s.length;t>m;m++){if(n=parseInt(j[m],10),g=s[m],isNaN(n))switch(g){case"MM":o=a(k[f].months).filter(function(){var a=this.slice(0,j[m].length),b=j[m].slice(0,a.length);
-return a==b}),n=a.inArray(o[0],k[f].months)+1;break;case"M":o=a(k[f].monthsShort).filter(function(){var a=this.slice(0,j[m].length),b=j[m].slice(0,a.length);return a==b}),n=a.inArray(o[0],k[f].monthsShort)+1}p[g]=n}for(var u,m=0;m<q.length;m++)u=q[m],u in p&&!isNaN(p[u])&&r[u](c,p[u])}return c},formatDate:function(b,c,d){"string"==typeof c&&(c=l.parseFormat(c));var e={d:b.getUTCDate(),D:k[d].daysShort[b.getUTCDay()],DD:k[d].days[b.getUTCDay()],m:b.getUTCMonth()+1,M:k[d].monthsShort[b.getUTCMonth()],MM:k[d].months[b.getUTCMonth()],yy:b.getUTCFullYear().toString().substring(2),yyyy:b.getUTCFullYear()};e.dd=(e.d<10?"0":"")+e.d,e.mm=(e.m<10?"0":"")+e.m;for(var b=[],f=a.extend([],c.separators),g=0,h=c.parts.length;h>=g;g++)f.length&&b.push(f.shift()),b.push(e[c.parts[g]]);return b.join("")},headTemplate:'<thead><tr><th class="prev"><i class="icon-arrow-left"/></th><th colspan="5" class="datepicker-switch"></th><th class="next"><i class="icon-arrow-right"/></th></tr></thead>',contTemplate:'<tbody><tr><td colspan="7"></td></tr></tbody>',footTemplate:'<tfoot><tr><th colspan="7" class="today"></th></tr><tr><th colspan="7" class="clear"></th></tr></tfoot>'};l.template='<div class="datepicker"><div class="datepicker-days"><table class=" table-condensed">'+l.headTemplate+"<tbody></tbody>"+l.footTemplate+"</table>"+"</div>"+'<div class="datepicker-months">'+'<table class="table-condensed">'+l.headTemplate+l.contTemplate+l.footTemplate+"</table>"+"</div>"+'<div class="datepicker-years">'+'<table class="table-condensed">'+l.headTemplate+l.contTemplate+l.footTemplate+"</table>"+"</div>"+"</div>",a.fn.datepicker.DPGlobal=l,a.fn.datepicker.noConflict=function(){return a.fn.datepicker=g,this},a(document).on("focus.datepicker.data-api click.datepicker.data-api",'[data-provide="datepicker"]',function(b){var c=a(this);c.data("datepicker")||(b.preventDefault(),h.call(c,"show"))}),a(function(){h.call(a('[data-provide="datepicker-inline"]'))})}(window.jQuery),function(a){"use strict";a.fn.bdatepicker=a.fn.datepicker.noConflict(),a.fn.datepicker||(a.fn.datepicker=a.fn.bdatepicker);var b=function(a){this.init("date",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{initPicker:function(b,c){this.options.viewformat||(this.options.viewformat=this.options.format),b.datepicker=a.fn.editableutils.tryParseJson(b.datepicker,!0),this.options.datepicker=a.extend({},c.datepicker,b.datepicker,{format:this.options.viewformat}),this.options.datepicker.language=this.options.datepicker.language||"en",this.dpg=a.fn.bdatepicker.DPGlobal,this.parsedFormat=this.dpg.parseFormat(this.options.format),this.parsedViewFormat=this.dpg.parseFormat(this.options.viewformat)},render:function(){this.$input.bdatepicker(this.options.datepicker),this.options.clear&&(this.$clear=a('<a href="#"></a>').html(this.options.clear).click(a.proxy(function(a){a.preventDefault(),a.stopPropagation(),this.clear()},this)),this.$tpl.parent().append(a('<div class="editable-clear">').append(this.$clear)))},value2html:function(a,c){var d=a?this.dpg.formatDate(a,this.parsedViewFormat,this.options.datepicker.language):"";b.superclass.value2html.call(this,d,c)},html2value:function(a){return this.parseDate(a,this.parsedViewFormat)},value2str:function(a){return a?this.dpg.formatDate(a,this.parsedFormat,this.options.datepicker.language):""},str2value:function(a){return this.parseDate(a,this.parsedFormat)},value2submit:function(a){return this.value2str(a)},value2input:function(a){this.$input.bdatepicker("update",a)},input2value:function(){return this.$input.data("datepicker").date},activate:function(){},clear:function(){this.$input.data("datepicker").date=null,this.$input.find(".active").removeClass("active"),this.options.showbuttons||this.$input.closest("form").submit()},autosubmit:function(){this.$input.on("mouseup",".day",function(b){if(!a(b.currentTarget).is(".old")&&!a(b.currentTarget).is(".new")){var c=a(this).closest("form");setTimeout(function(){c.submit()},200)}})},parseDate:function(a,b){var c,d=null;return a&&(d=this.dpg.parseDate(a,b,this.options.datepicker.language),"string"==typeof a&&(c=this.dpg.formatDate(d,b,this.options.datepicker.language),a!==c&&(d=null))),d}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<div class="editable-date well"></div>',inputclass:null,format:"yyyy-mm-dd",viewformat:null,datepicker:{weekStart:0,startView:0,minViewMode:0,autoclose:!1},clear:"&times; clear"}),a.fn.editabletypes.date=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datefield",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.date),a.extend(b.prototype,{render:function(){this.$input=this.$tpl.find("input"),this.setClass(),this.setAttr("placeholder"),this.$tpl.bdatepicker(this.options.datepicker),this.$input.off("focus keydown"),this.$input.keyup(a.proxy(function(){this.$tpl.removeData("date"),this.$tpl.bdatepicker("update")},this))},value2input:function(a){this.$input.val(a?this.dpg.formatDate(a,this.parsedViewFormat,this.options.datepicker.language):""),this.$tpl.bdatepicker("update")},input2value:function(){return this.html2value(this.$input.val())},activate:function(){a.fn.editabletypes.text.prototype.activate.call(this)},autosubmit:function(){}}),b.defaults=a.extend({},a.fn.editabletypes.date.defaults,{tpl:'<div class="input-append date"><input type="text"/><span class="add-on"><i class="icon-th"></i></span></div>',inputclass:"input-small",datepicker:{weekStart:0,startView:0,minViewMode:0,autoclose:!0}}),a.fn.editabletypes.datefield=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datetime",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{initPicker:function(b,c){this.options.viewformat||(this.options.viewformat=this.options.format),b.datetimepicker=a.fn.editableutils.tryParseJson(b.datetimepicker,!0),this.options.datetimepicker=a.extend({},c.datetimepicker,b.datetimepicker,{format:this.options.viewformat}),this.options.datetimepicker.language=this.options.datetimepicker.language||"en",this.dpg=a.fn.datetimepicker.DPGlobal,this.parsedFormat=this.dpg.parseFormat(this.options.format,this.options.formatType),this.parsedViewFormat=this.dpg.parseFormat(this.options.viewformat,this.options.formatType)},render:function(){this.$input.datetimepicker(this.options.datetimepicker),this.$input.on("changeMode",function(){var b=a(this).closest("form").parent();setTimeout(function(){b.triggerHandler("resize")},0)}),this.options.clear&&(this.$clear=a('<a href="#"></a>').html(this.options.clear).click(a.proxy(function(a){a.preventDefault(),a.stopPropagation(),this.clear()},this)),this.$tpl.parent().append(a('<div class="editable-clear">').append(this.$clear)))},value2html:function(a,c){var d=a?this.dpg.formatDate(this.toUTC(a),this.parsedViewFormat,this.options.datetimepicker.language,this.options.formatType):"";return c?(b.superclass.value2html.call(this,d,c),void 0):d},html2value:function(a){var b=this.parseDate(a,this.parsedViewFormat);return b?this.fromUTC(b):null},value2str:function(a){return a?this.dpg.formatDate(this.toUTC(a),this.parsedFormat,this.options.datetimepicker.language,this.options.formatType):""},str2value:function(a){var b=this.parseDate(a,this.parsedFormat);return b?this.fromUTC(b):null},value2submit:function(a){return this.value2str(a)},value2input:function(a){a&&this.$input.data("datetimepicker").setDate(a)},input2value:function(){var a=this.$input.data("datetimepicker");return a.date?a.getDate():null},activate:function(){},clear:function(){this.$input.data("datetimepicker").date=null,this.$input.find(".active").removeClass("active"),this.options.showbuttons||this.$input.closest("form").submit()},autosubmit:function(){this.$input.on("mouseup",".minute",function(){var b=a(this).closest("form");setTimeout(function(){b.submit()},200)})},toUTC:function(a){return a?new Date(a.valueOf()-6e4*a.getTimezoneOffset()):a},fromUTC:function(a){return a?new Date(a.valueOf()+6e4*a.getTimezoneOffset()):a},parseDate:function(a,b){var c,d=null;return a&&(d=this.dpg.parseDate(a,b,this.options.datetimepicker.language,this.options.formatType),"string"==typeof a&&(c=this.dpg.formatDate(d,b,this.options.datetimepicker.language,this.options.formatType),a!==c&&(d=null))),d}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'<div class="editable-date well"></div>',inputclass:null,format:"yyyy-mm-dd hh:ii",formatType:"standard",viewformat:null,datetimepicker:{todayHighlight:!1,autoclose:!1},clear:"&times; clear"}),a.fn.editabletypes.datetime=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datetimefield",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.datetime),a.extend(b.prototype,{render:function(){this.$input=this.$tpl.find("input"),this.setClass(),this.setAttr("placeholder"),this.$tpl.datetimepicker(this.options.datetimepicker),this.$input.off("focus keydown"),this.$input.keyup(a.proxy(function(){this.$tpl.removeData("date"),this.$tpl.datetimepicker("update")},this))},value2input:function(a){this.$input.val(this.value2html(a)),this.$tpl.datetimepicker("update")},input2value:function(){return this.html2value(this.$input.val())},activate:function(){a.fn.editabletypes.text.prototype.activate.call(this)},autosubmit:function(){}}),b.defaults=a.extend({},a.fn.editabletypes.datetime.defaults,{tpl:'<div class="input-append date"><input type="text"/><span class="add-on"><i class="icon-th"></i></span></div>',inputclass:"input-medium",datetimepicker:{todayHighlight:!1,autoclose:!0}}),a.fn.editabletypes.datetimefield=b}(window.jQuery);
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.js b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.js
deleted file mode 100644
index 170bd60..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.js
+++ /dev/null
@@ -1,2580 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under the MIT license
- */
-
-if (typeof jQuery === 'undefined') {
-  throw new Error('Bootstrap\'s JavaScript requires jQuery')
-}
-
-+function ($) {
-  'use strict';
-  var version = $.fn.jquery.split(' ')[0].split('.')
-  if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 9 && version[2] < 1) || (version[0] > 3)) {
-    throw new Error('Bootstrap\'s JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4')
-  }
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: transition.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#transitions
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // CSS TRANSITION SUPPORT (Shoutout: https://modernizr.com/)
-  // ============================================================
-
-  function transitionEnd() {
-    var el = document.createElement('bootstrap')
-
-    var transEndEventNames = {
-      WebkitTransition : 'webkitTransitionEnd',
-      MozTransition    : 'transitionend',
-      OTransition      : 'oTransitionEnd otransitionend',
-      transition       : 'transitionend'
-    }
-
-    for (var name in transEndEventNames) {
-      if (el.style[name] !== undefined) {
-        return { end: transEndEventNames[name] }
-      }
-    }
-
-    return false // explicit for ie8 (  ._.)
-  }
-
-  // https://blog.alexmaccaw.com/css-transitions
-  $.fn.emulateTransitionEnd = function (duration) {
-    var called = false
-    var $el = this
-    $(this).one('bsTransitionEnd', function () { called = true })
-    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
-    setTimeout(callback, duration)
-    return this
-  }
-
-  $(function () {
-    $.support.transition = transitionEnd()
-
-    if (!$.support.transition) return
-
-    $.event.special.bsTransitionEnd = {
-      bindType: $.support.transition.end,
-      delegateType: $.support.transition.end,
-      handle: function (e) {
-        if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)
-      }
-    }
-  })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: alert.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#alerts
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // ALERT CLASS DEFINITION
-  // ======================
-
-  var dismiss = '[data-dismiss="alert"]'
-  var Alert   = function (el) {
-    $(el).on('click', dismiss, this.close)
-  }
-
-  Alert.VERSION = '3.4.1'
-
-  Alert.TRANSITION_DURATION = 150
-
-  Alert.prototype.close = function (e) {
-    var $this    = $(this)
-    var selector = $this.attr('data-target')
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
-    }
-
-    selector    = selector === '#' ? [] : selector
-    var $parent = $(document).find(selector)
-
-    if (e) e.preventDefault()
-
-    if (!$parent.length) {
-      $parent = $this.closest('.alert')
-    }
-
-    $parent.trigger(e = $.Event('close.bs.alert'))
-
-    if (e.isDefaultPrevented()) return
-
-    $parent.removeClass('in')
-
-    function removeElement() {
-      // detach from parent, fire event then clean up data
-      $parent.detach().trigger('closed.bs.alert').remove()
-    }
-
-    $.support.transition && $parent.hasClass('fade') ?
-      $parent
-        .one('bsTransitionEnd', removeElement)
-        .emulateTransitionEnd(Alert.TRANSITION_DURATION) :
-      removeElement()
-  }
-
-
-  // ALERT PLUGIN DEFINITION
-  // =======================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this = $(this)
-      var data  = $this.data('bs.alert')
-
-      if (!data) $this.data('bs.alert', (data = new Alert(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  var old = $.fn.alert
-
-  $.fn.alert             = Plugin
-  $.fn.alert.Constructor = Alert
-
-
-  // ALERT NO CONFLICT
-  // =================
-
-  $.fn.alert.noConflict = function () {
-    $.fn.alert = old
-    return this
-  }
-
-
-  // ALERT DATA-API
-  // ==============
-
-  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: button.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#buttons
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // BUTTON PUBLIC CLASS DEFINITION
-  // ==============================
-
-  var Button = function (element, options) {
-    this.$element  = $(element)
-    this.options   = $.extend({}, Button.DEFAULTS, options)
-    this.isLoading = false
-  }
-
-  Button.VERSION  = '3.4.1'
-
-  Button.DEFAULTS = {
-    loadingText: 'loading...'
-  }
-
-  Button.prototype.setState = function (state) {
-    var d    = 'disabled'
-    var $el  = this.$element
-    var val  = $el.is('input') ? 'val' : 'html'
-    var data = $el.data()
-
-    state += 'Text'
-
-    if (data.resetText == null) $el.data('resetText', $el[val]())
-
-    // push to event loop to allow forms to submit
-    setTimeout($.proxy(function () {
-      $el[val](data[state] == null ? this.options[state] : data[state])
-
-      if (state == 'loadingText') {
-        this.isLoading = true
-        $el.addClass(d).attr(d, d).prop(d, true)
-      } else if (this.isLoading) {
-        this.isLoading = false
-        $el.removeClass(d).removeAttr(d).prop(d, false)
-      }
-    }, this), 0)
-  }
-
-  Button.prototype.toggle = function () {
-    var changed = true
-    var $parent = this.$element.closest('[data-toggle="buttons"]')
-
-    if ($parent.length) {
-      var $input = this.$element.find('input')
-      if ($input.prop('type') == 'radio') {
-        if ($input.prop('checked')) changed = false
-        $parent.find('.active').removeClass('active')
-        this.$element.addClass('active')
-      } else if ($input.prop('type') == 'checkbox') {
-        if (($input.prop('checked')) !== this.$element.hasClass('active')) changed = false
-        this.$element.toggleClass('active')
-      }
-      $input.prop('checked', this.$element.hasClass('active'))
-      if (changed) $input.trigger('change')
-    } else {
-      this.$element.attr('aria-pressed', !this.$element.hasClass('active'))
-      this.$element.toggleClass('active')
-    }
-  }
-
-
-  // BUTTON PLUGIN DEFINITION
-  // ========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.button')
-      var options = typeof option == 'object' && option
-
-      if (!data) $this.data('bs.button', (data = new Button(this, options)))
-
-      if (option == 'toggle') data.toggle()
-      else if (option) data.setState(option)
-    })
-  }
-
-  var old = $.fn.button
-
-  $.fn.button             = Plugin
-  $.fn.button.Constructor = Button
-
-
-  // BUTTON NO CONFLICT
-  // ==================
-
-  $.fn.button.noConflict = function () {
-    $.fn.button = old
-    return this
-  }
-
-
-  // BUTTON DATA-API
-  // ===============
-
-  $(document)
-    .on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) {
-      var $btn = $(e.target).closest('.btn')
-      Plugin.call($btn, 'toggle')
-      if (!($(e.target).is('input[type="radio"], input[type="checkbox"]'))) {
-        // Prevent double click on radios, and the double selections (so cancellation) on checkboxes
-        e.preventDefault()
-        // The target component still receive the focus
-        if ($btn.is('input,button')) $btn.trigger('focus')
-        else $btn.find('input:visible,button:visible').first().trigger('focus')
-      }
-    })
-    .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) {
-      $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type))
-    })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: carousel.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#carousel
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // CAROUSEL CLASS DEFINITION
-  // =========================
-
-  var Carousel = function (element, options) {
-    this.$element    = $(element)
-    this.$indicators = this.$element.find('.carousel-indicators')
-    this.options     = options
-    this.paused      = null
-    this.sliding     = null
-    this.interval    = null
-    this.$active     = null
-    this.$items      = null
-
-    this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this))
-
-    this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element
-      .on('mouseenter.bs.carousel', $.proxy(this.pause, this))
-      .on('mouseleave.bs.carousel', $.proxy(this.cycle, this))
-  }
-
-  Carousel.VERSION  = '3.4.1'
-
-  Carousel.TRANSITION_DURATION = 600
-
-  Carousel.DEFAULTS = {
-    interval: 5000,
-    pause: 'hover',
-    wrap: true,
-    keyboard: true
-  }
-
-  Carousel.prototype.keydown = function (e) {
-    if (/input|textarea/i.test(e.target.tagName)) return
-    switch (e.which) {
-      case 37: this.prev(); break
-      case 39: this.next(); break
-      default: return
-    }
-
-    e.preventDefault()
-  }
-
-  Carousel.prototype.cycle = function (e) {
-    e || (this.paused = false)
-
-    this.interval && clearInterval(this.interval)
-
-    this.options.interval
-      && !this.paused
-      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
-
-    return this
-  }
-
-  Carousel.prototype.getItemIndex = function (item) {
-    this.$items = item.parent().children('.item')
-    return this.$items.index(item || this.$active)
-  }
-
-  Carousel.prototype.getItemForDirection = function (direction, active) {
-    var activeIndex = this.getItemIndex(active)
-    var willWrap = (direction == 'prev' && activeIndex === 0)
-                || (direction == 'next' && activeIndex == (this.$items.length - 1))
-    if (willWrap && !this.options.wrap) return active
-    var delta = direction == 'prev' ? -1 : 1
-    var itemIndex = (activeIndex + delta) % this.$items.length
-    return this.$items.eq(itemIndex)
-  }
-
-  Carousel.prototype.to = function (pos) {
-    var that        = this
-    var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active'))
-
-    if (pos > (this.$items.length - 1) || pos < 0) return
-
-    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, "slid"
-    if (activeIndex == pos) return this.pause().cycle()
-
-    return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos))
-  }
-
-  Carousel.prototype.pause = function (e) {
-    e || (this.paused = true)
-
-    if (this.$element.find('.next, .prev').length && $.support.transition) {
-      this.$element.trigger($.support.transition.end)
-      this.cycle(true)
-    }
-
-    this.interval = clearInterval(this.interval)
-
-    return this
-  }
-
-  Carousel.prototype.next = function () {
-    if (this.sliding) return
-    return this.slide('next')
-  }
-
-  Carousel.prototype.prev = function () {
-    if (this.sliding) return
-    return this.slide('prev')
-  }
-
-  Carousel.prototype.slide = function (type, next) {
-    var $active   = this.$element.find('.item.active')
-    var $next     = next || this.getItemForDirection(type, $active)
-    var isCycling = this.interval
-    var direction = type == 'next' ? 'left' : 'right'
-    var that      = this
-
-    if ($next.hasClass('active')) return (this.sliding = false)
-
-    var relatedTarget = $next[0]
-    var slideEvent = $.Event('slide.bs.carousel', {
-      relatedTarget: relatedTarget,
-      direction: direction
-    })
-    this.$element.trigger(slideEvent)
-    if (slideEvent.isDefaultPrevented()) return
-
-    this.sliding = true
-
-    isCycling && this.pause()
-
-    if (this.$indicators.length) {
-      this.$indicators.find('.active').removeClass('active')
-      var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)])
-      $nextIndicator && $nextIndicator.addClass('active')
-    }
-
-    var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, "slid"
-    if ($.support.transition && this.$element.hasClass('slide')) {
-      $next.addClass(type)
-      if (typeof $next === 'object' && $next.length) {
-        $next[0].offsetWidth // force reflow
-      }
-      $active.addClass(direction)
-      $next.addClass(direction)
-      $active
-        .one('bsTransitionEnd', function () {
-          $next.removeClass([type, direction].join(' ')).addClass('active')
-          $active.removeClass(['active', direction].join(' '))
-          that.sliding = false
-          setTimeout(function () {
-            that.$element.trigger(slidEvent)
-          }, 0)
-        })
-        .emulateTransitionEnd(Carousel.TRANSITION_DURATION)
-    } else {
-      $active.removeClass('active')
-      $next.addClass('active')
-      this.sliding = false
-      this.$element.trigger(slidEvent)
-    }
-
-    isCycling && this.cycle()
-
-    return this
-  }
-
-
-  // CAROUSEL PLUGIN DEFINITION
-  // ==========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.carousel')
-      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)
-      var action  = typeof option == 'string' ? option : options.slide
-
-      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))
-      if (typeof option == 'number') data.to(option)
-      else if (action) data[action]()
-      else if (options.interval) data.pause().cycle()
-    })
-  }
-
-  var old = $.fn.carousel
-
-  $.fn.carousel             = Plugin
-  $.fn.carousel.Constructor = Carousel
-
-
-  // CAROUSEL NO CONFLICT
-  // ====================
-
-  $.fn.carousel.noConflict = function () {
-    $.fn.carousel = old
-    return this
-  }
-
-
-  // CAROUSEL DATA-API
-  // =================
-
-  var clickHandler = function (e) {
-    var $this   = $(this)
-    var href    = $this.attr('href')
-    if (href) {
-      href = href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7
-    }
-
-    var target  = $this.attr('data-target') || href
-    var $target = $(document).find(target)
-
-    if (!$target.hasClass('carousel')) return
-
-    var options = $.extend({}, $target.data(), $this.data())
-    var slideIndex = $this.attr('data-slide-to')
-    if (slideIndex) options.interval = false
-
-    Plugin.call($target, options)
-
-    if (slideIndex) {
-      $target.data('bs.carousel').to(slideIndex)
-    }
-
-    e.preventDefault()
-  }
-
-  $(document)
-    .on('click.bs.carousel.data-api', '[data-slide]', clickHandler)
-    .on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler)
-
-  $(window).on('load', function () {
-    $('[data-ride="carousel"]').each(function () {
-      var $carousel = $(this)
-      Plugin.call($carousel, $carousel.data())
-    })
-  })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: collapse.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#collapse
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-/* jshint latedef: false */
-
-+function ($) {
-  'use strict';
-
-  // COLLAPSE PUBLIC CLASS DEFINITION
-  // ================================
-
-  var Collapse = function (element, options) {
-    this.$element      = $(element)
-    this.options       = $.extend({}, Collapse.DEFAULTS, options)
-    this.$trigger      = $('[data-toggle="collapse"][href="#' + element.id + '"],' +
-                           '[data-toggle="collapse"][data-target="#' + element.id + '"]')
-    this.transitioning = null
-
-    if (this.options.parent) {
-      this.$parent = this.getParent()
-    } else {
-      this.addAriaAndCollapsedClass(this.$element, this.$trigger)
-    }
-
-    if (this.options.toggle) this.toggle()
-  }
-
-  Collapse.VERSION  = '3.4.1'
-
-  Collapse.TRANSITION_DURATION = 350
-
-  Collapse.DEFAULTS = {
-    toggle: true
-  }
-
-  Collapse.prototype.dimension = function () {
-    var hasWidth = this.$element.hasClass('width')
-    return hasWidth ? 'width' : 'height'
-  }
-
-  Collapse.prototype.show = function () {
-    if (this.transitioning || this.$element.hasClass('in')) return
-
-    var activesData
-    var actives = this.$parent && this.$parent.children('.panel').children('.in, .collapsing')
-
-    if (actives && actives.length) {
-      activesData = actives.data('bs.collapse')
-      if (activesData && activesData.transitioning) return
-    }
-
-    var startEvent = $.Event('show.bs.collapse')
-    this.$element.trigger(startEvent)
-    if (startEvent.isDefaultPrevented()) return
-
-    if (actives && actives.length) {
-      Plugin.call(actives, 'hide')
-      activesData || actives.data('bs.collapse', null)
-    }
-
-    var dimension = this.dimension()
-
-    this.$element
-      .removeClass('collapse')
-      .addClass('collapsing')[dimension](0)
-      .attr('aria-expanded', true)
-
-    this.$trigger
-      .removeClass('collapsed')
-      .attr('aria-expanded', true)
-
-    this.transitioning = 1
-
-    var complete = function () {
-      this.$element
-        .removeClass('collapsing')
-        .addClass('collapse in')[dimension]('')
-      this.transitioning = 0
-      this.$element
-        .trigger('shown.bs.collapse')
-    }
-
-    if (!$.support.transition) return complete.call(this)
-
-    var scrollSize = $.camelCase(['scroll', dimension].join('-'))
-
-    this.$element
-      .one('bsTransitionEnd', $.proxy(complete, this))
-      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize])
-  }
-
-  Collapse.prototype.hide = function () {
-    if (this.transitioning || !this.$element.hasClass('in')) return
-
-    var startEvent = $.Event('hide.bs.collapse')
-    this.$element.trigger(startEvent)
-    if (startEvent.isDefaultPrevented()) return
-
-    var dimension = this.dimension()
-
-    this.$element[dimension](this.$element[dimension]())[0].offsetHeight
-
-    this.$element
-      .addClass('collapsing')
-      .removeClass('collapse in')
-      .attr('aria-expanded', false)
-
-    this.$trigger
-      .addClass('collapsed')
-      .attr('aria-expanded', false)
-
-    this.transitioning = 1
-
-    var complete = function () {
-      this.transitioning = 0
-      this.$element
-        .removeClass('collapsing')
-        .addClass('collapse')
-        .trigger('hidden.bs.collapse')
-    }
-
-    if (!$.support.transition) return complete.call(this)
-
-    this.$element
-      [dimension](0)
-      .one('bsTransitionEnd', $.proxy(complete, this))
-      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)
-  }
-
-  Collapse.prototype.toggle = function () {
-    this[this.$element.hasClass('in') ? 'hide' : 'show']()
-  }
-
-  Collapse.prototype.getParent = function () {
-    return $(document).find(this.options.parent)
-      .find('[data-toggle="collapse"][data-parent="' + this.options.parent + '"]')
-      .each($.proxy(function (i, element) {
-        var $element = $(element)
-        this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element)
-      }, this))
-      .end()
-  }
-
-  Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) {
-    var isOpen = $element.hasClass('in')
-
-    $element.attr('aria-expanded', isOpen)
-    $trigger
-      .toggleClass('collapsed', !isOpen)
-      .attr('aria-expanded', isOpen)
-  }
-
-  function getTargetFromTrigger($trigger) {
-    var href
-    var target = $trigger.attr('data-target')
-      || (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7
-
-    return $(document).find(target)
-  }
-
-
-  // COLLAPSE PLUGIN DEFINITION
-  // ==========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.collapse')
-      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)
-
-      if (!data && options.toggle && /show|hide/.test(option)) options.toggle = false
-      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.collapse
-
-  $.fn.collapse             = Plugin
-  $.fn.collapse.Constructor = Collapse
-
-
-  // COLLAPSE NO CONFLICT
-  // ====================
-
-  $.fn.collapse.noConflict = function () {
-    $.fn.collapse = old
-    return this
-  }
-
-
-  // COLLAPSE DATA-API
-  // =================
-
-  $(document).on('click.bs.collapse.data-api', '[data-toggle="collapse"]', function (e) {
-    var $this   = $(this)
-
-    if (!$this.attr('data-target')) e.preventDefault()
-
-    var $target = getTargetFromTrigger($this)
-    var data    = $target.data('bs.collapse')
-    var option  = data ? 'toggle' : $this.data()
-
-    Plugin.call($target, option)
-  })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: dropdown.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#dropdowns
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // DROPDOWN CLASS DEFINITION
-  // =========================
-
-  var backdrop = '.dropdown-backdrop'
-  var toggle   = '[data-toggle="dropdown"]'
-  var Dropdown = function (element) {
-    $(element).on('click.bs.dropdown', this.toggle)
-  }
-
-  Dropdown.VERSION = '3.4.1'
-
-  function getParent($this) {
-    var selector = $this.attr('data-target')
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
-    }
-
-    var $parent = selector !== '#' ? $(document).find(selector) : null
-
-    return $parent && $parent.length ? $parent : $this.parent()
-  }
-
-  function clearMenus(e) {
-    if (e && e.which === 3) return
-    $(backdrop).remove()
-    $(toggle).each(function () {
-      var $this         = $(this)
-      var $parent       = getParent($this)
-      var relatedTarget = { relatedTarget: this }
-
-      if (!$parent.hasClass('open')) return
-
-      if (e && e.type == 'click' && /input|textarea/i.test(e.target.tagName) && $.contains($parent[0], e.target)) return
-
-      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))
-
-      if (e.isDefaultPrevented()) return
-
-      $this.attr('aria-expanded', 'false')
-      $parent.removeClass('open').trigger($.Event('hidden.bs.dropdown', relatedTarget))
-    })
-  }
-
-  Dropdown.prototype.toggle = function (e) {
-    var $this = $(this)
-
-    if ($this.is('.disabled, :disabled')) return
-
-    var $parent  = getParent($this)
-    var isActive = $parent.hasClass('open')
-
-    clearMenus()
-
-    if (!isActive) {
-      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {
-        // if mobile we use a backdrop because click events don't delegate
-        $(document.createElement('div'))
-          .addClass('dropdown-backdrop')
-          .insertAfter($(this))
-          .on('click', clearMenus)
-      }
-
-      var relatedTarget = { relatedTarget: this }
-      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))
-
-      if (e.isDefaultPrevented()) return
-
-      $this
-        .trigger('focus')
-        .attr('aria-expanded', 'true')
-
-      $parent
-        .toggleClass('open')
-        .trigger($.Event('shown.bs.dropdown', relatedTarget))
-    }
-
-    return false
-  }
-
-  Dropdown.prototype.keydown = function (e) {
-    if (!/(38|40|27|32)/.test(e.which) || /input|textarea/i.test(e.target.tagName)) return
-
-    var $this = $(this)
-
-    e.preventDefault()
-    e.stopPropagation()
-
-    if ($this.is('.disabled, :disabled')) return
-
-    var $parent  = getParent($this)
-    var isActive = $parent.hasClass('open')
-
-    if (!isActive && e.which != 27 || isActive && e.which == 27) {
-      if (e.which == 27) $parent.find(toggle).trigger('focus')
-      return $this.trigger('click')
-    }
-
-    var desc = ' li:not(.disabled):visible a'
-    var $items = $parent.find('.dropdown-menu' + desc)
-
-    if (!$items.length) return
-
-    var index = $items.index(e.target)
-
-    if (e.which == 38 && index > 0)                 index--         // up
-    if (e.which == 40 && index < $items.length - 1) index++         // down
-    if (!~index)                                    index = 0
-
-    $items.eq(index).trigger('focus')
-  }
-
-
-  // DROPDOWN PLUGIN DEFINITION
-  // ==========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this = $(this)
-      var data  = $this.data('bs.dropdown')
-
-      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  var old = $.fn.dropdown
-
-  $.fn.dropdown             = Plugin
-  $.fn.dropdown.Constructor = Dropdown
-
-
-  // DROPDOWN NO CONFLICT
-  // ====================
-
-  $.fn.dropdown.noConflict = function () {
-    $.fn.dropdown = old
-    return this
-  }
-
-
-  // APPLY TO STANDARD DROPDOWN ELEMENTS
-  // ===================================
-
-  $(document)
-    .on('click.bs.dropdown.data-api', clearMenus)
-    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
-    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)
-    .on('keydown.bs.dropdown.data-api', toggle, Dropdown.prototype.keydown)
-    .on('keydown.bs.dropdown.data-api', '.dropdown-menu', Dropdown.prototype.keydown)
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: modal.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#modals
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // MODAL CLASS DEFINITION
-  // ======================
-
-  var Modal = function (element, options) {
-    this.options = options
-    this.$body = $(document.body)
-    this.$element = $(element)
-    this.$dialog = this.$element.find('.modal-dialog')
-    this.$backdrop = null
-    this.isShown = null
-    this.originalBodyPad = null
-    this.scrollbarWidth = 0
-    this.ignoreBackdropClick = false
-    this.fixedContent = '.navbar-fixed-top, .navbar-fixed-bottom'
-
-    if (this.options.remote) {
-      this.$element
-        .find('.modal-content')
-        .load(this.options.remote, $.proxy(function () {
-          this.$element.trigger('loaded.bs.modal')
-        }, this))
-    }
-  }
-
-  Modal.VERSION = '3.4.1'
-
-  Modal.TRANSITION_DURATION = 300
-  Modal.BACKDROP_TRANSITION_DURATION = 150
-
-  Modal.DEFAULTS = {
-    backdrop: true,
-    keyboard: true,
-    show: true
-  }
-
-  Modal.prototype.toggle = function (_relatedTarget) {
-    return this.isShown ? this.hide() : this.show(_relatedTarget)
-  }
-
-  Modal.prototype.show = function (_relatedTarget) {
-    var that = this
-    var e = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })
-
-    this.$element.trigger(e)
-
-    if (this.isShown || e.isDefaultPrevented()) return
-
-    this.isShown = true
-
-    this.checkScrollbar()
-    this.setScrollbar()
-    this.$body.addClass('modal-open')
-
-    this.escape()
-    this.resize()
-
-    this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this))
-
-    this.$dialog.on('mousedown.dismiss.bs.modal', function () {
-      that.$element.one('mouseup.dismiss.bs.modal', function (e) {
-        if ($(e.target).is(that.$element)) that.ignoreBackdropClick = true
-      })
-    })
-
-    this.backdrop(function () {
-      var transition = $.support.transition && that.$element.hasClass('fade')
-
-      if (!that.$element.parent().length) {
-        that.$element.appendTo(that.$body) // don't move modals dom position
-      }
-
-      that.$element
-        .show()
-        .scrollTop(0)
-
-      that.adjustDialog()
-
-      if (transition) {
-        that.$element[0].offsetWidth // force reflow
-      }
-
-      that.$element.addClass('in')
-
-      that.enforceFocus()
-
-      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })
-
-      transition ?
-        that.$dialog // wait for modal to slide in
-          .one('bsTransitionEnd', function () {
-            that.$element.trigger('focus').trigger(e)
-          })
-          .emulateTransitionEnd(Modal.TRANSITION_DURATION) :
-        that.$element.trigger('focus').trigger(e)
-    })
-  }
-
-  Modal.prototype.hide = function (e) {
-    if (e) e.preventDefault()
-
-    e = $.Event('hide.bs.modal')
-
-    this.$element.trigger(e)
-
-    if (!this.isShown || e.isDefaultPrevented()) return
-
-    this.isShown = false
-
-    this.escape()
-    this.resize()
-
-    $(document).off('focusin.bs.modal')
-
-    this.$element
-      .removeClass('in')
-      .off('click.dismiss.bs.modal')
-      .off('mouseup.dismiss.bs.modal')
-
-    this.$dialog.off('mousedown.dismiss.bs.modal')
-
-    $.support.transition && this.$element.hasClass('fade') ?
-      this.$element
-        .one('bsTransitionEnd', $.proxy(this.hideModal, this))
-        .emulateTransitionEnd(Modal.TRANSITION_DURATION) :
-      this.hideModal()
-  }
-
-  Modal.prototype.enforceFocus = function () {
-    $(document)
-      .off('focusin.bs.modal') // guard against infinite focus loop
-      .on('focusin.bs.modal', $.proxy(function (e) {
-        if (document !== e.target &&
-          this.$element[0] !== e.target &&
-          !this.$element.has(e.target).length) {
-          this.$element.trigger('focus')
-        }
-      }, this))
-  }
-
-  Modal.prototype.escape = function () {
-    if (this.isShown && this.options.keyboard) {
-      this.$element.on('keydown.dismiss.bs.modal', $.proxy(function (e) {
-        e.which == 27 && this.hide()
-      }, this))
-    } else if (!this.isShown) {
-      this.$element.off('keydown.dismiss.bs.modal')
-    }
-  }
-
-  Modal.prototype.resize = function () {
-    if (this.isShown) {
-      $(window).on('resize.bs.modal', $.proxy(this.handleUpdate, this))
-    } else {
-      $(window).off('resize.bs.modal')
-    }
-  }
-
-  Modal.prototype.hideModal = function () {
-    var that = this
-    this.$element.hide()
-    this.backdrop(function () {
-      that.$body.removeClass('modal-open')
-      that.resetAdjustments()
-      that.resetScrollbar()
-      that.$element.trigger('hidden.bs.modal')
-    })
-  }
-
-  Modal.prototype.removeBackdrop = function () {
-    this.$backdrop && this.$backdrop.remove()
-    this.$backdrop = null
-  }
-
-  Modal.prototype.backdrop = function (callback) {
-    var that = this
-    var animate = this.$element.hasClass('fade') ? 'fade' : ''
-
-    if (this.isShown && this.options.backdrop) {
-      var doAnimate = $.support.transition && animate
-
-      this.$backdrop = $(document.createElement('div'))
-        .addClass('modal-backdrop ' + animate)
-        .appendTo(this.$body)
-
-      this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {
-        if (this.ignoreBackdropClick) {
-          this.ignoreBackdropClick = false
-          return
-        }
-        if (e.target !== e.currentTarget) return
-        this.options.backdrop == 'static'
-          ? this.$element[0].focus()
-          : this.hide()
-      }, this))
-
-      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
-
-      this.$backdrop.addClass('in')
-
-      if (!callback) return
-
-      doAnimate ?
-        this.$backdrop
-          .one('bsTransitionEnd', callback)
-          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :
-        callback()
-
-    } else if (!this.isShown && this.$backdrop) {
-      this.$backdrop.removeClass('in')
-
-      var callbackRemove = function () {
-        that.removeBackdrop()
-        callback && callback()
-      }
-      $.support.transition && this.$element.hasClass('fade') ?
-        this.$backdrop
-          .one('bsTransitionEnd', callbackRemove)
-          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :
-        callbackRemove()
-
-    } else if (callback) {
-      callback()
-    }
-  }
-
-  // these following methods are used to handle overflowing modals
-
-  Modal.prototype.handleUpdate = function () {
-    this.adjustDialog()
-  }
-
-  Modal.prototype.adjustDialog = function () {
-    var modalIsOverflowing = this.$element[0].scrollHeight > document.documentElement.clientHeight
-
-    this.$element.css({
-      paddingLeft: !this.bodyIsOverflowing && modalIsOverflowing ? this.scrollbarWidth : '',
-      paddingRight: this.bodyIsOverflowing && !modalIsOverflowing ? this.scrollbarWidth : ''
-    })
-  }
-
-  Modal.prototype.resetAdjustments = function () {
-    this.$element.css({
-      paddingLeft: '',
-      paddingRight: ''
-    })
-  }
-
-  Modal.prototype.checkScrollbar = function () {
-    var fullWindowWidth = window.innerWidth
-    if (!fullWindowWidth) { // workaround for missing window.innerWidth in IE8
-      var documentElementRect = document.documentElement.getBoundingClientRect()
-      fullWindowWidth = documentElementRect.right - Math.abs(documentElementRect.left)
-    }
-    this.bodyIsOverflowing = document.body.clientWidth < fullWindowWidth
-    this.scrollbarWidth = this.measureScrollbar()
-  }
-
-  Modal.prototype.setScrollbar = function () {
-    var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10)
-    this.originalBodyPad = document.body.style.paddingRight || ''
-    var scrollbarWidth = this.scrollbarWidth
-    if (this.bodyIsOverflowing) {
-      this.$body.css('padding-right', bodyPad + scrollbarWidth)
-      $(this.fixedContent).each(function (index, element) {
-        var actualPadding = element.style.paddingRight
-        var calculatedPadding = $(element).css('padding-right')
-        $(element)
-          .data('padding-right', actualPadding)
-          .css('padding-right', parseFloat(calculatedPadding) + scrollbarWidth + 'px')
-      })
-    }
-  }
-
-  Modal.prototype.resetScrollbar = function () {
-    this.$body.css('padding-right', this.originalBodyPad)
-    $(this.fixedContent).each(function (index, element) {
-      var padding = $(element).data('padding-right')
-      $(element).removeData('padding-right')
-      element.style.paddingRight = padding ? padding : ''
-    })
-  }
-
-  Modal.prototype.measureScrollbar = function () { // thx walsh
-    var scrollDiv = document.createElement('div')
-    scrollDiv.className = 'modal-scrollbar-measure'
-    this.$body.append(scrollDiv)
-    var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth
-    this.$body[0].removeChild(scrollDiv)
-    return scrollbarWidth
-  }
-
-
-  // MODAL PLUGIN DEFINITION
-  // =======================
-
-  function Plugin(option, _relatedTarget) {
-    return this.each(function () {
-      var $this = $(this)
-      var data = $this.data('bs.modal')
-      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)
-
-      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))
-      if (typeof option == 'string') data[option](_relatedTarget)
-      else if (options.show) data.show(_relatedTarget)
-    })
-  }
-
-  var old = $.fn.modal
-
-  $.fn.modal = Plugin
-  $.fn.modal.Constructor = Modal
-
-
-  // MODAL NO CONFLICT
-  // =================
-
-  $.fn.modal.noConflict = function () {
-    $.fn.modal = old
-    return this
-  }
-
-
-  // MODAL DATA-API
-  // ==============
-
-  $(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) {
-    var $this = $(this)
-    var href = $this.attr('href')
-    var target = $this.attr('data-target') ||
-      (href && href.replace(/.*(?=#[^\s]+$)/, '')) // strip for ie7
-
-    var $target = $(document).find(target)
-    var option = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
-
-    if ($this.is('a')) e.preventDefault()
-
-    $target.one('show.bs.modal', function (showEvent) {
-      if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown
-      $target.one('hidden.bs.modal', function () {
-        $this.is(':visible') && $this.trigger('focus')
-      })
-    })
-    Plugin.call($target, option, this)
-  })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: tooltip.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#tooltip
- * Inspired by the original jQuery.tipsy by Jason Frame
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-+function ($) {
-  'use strict';
-
-  var DISALLOWED_ATTRIBUTES = ['sanitize', 'whiteList', 'sanitizeFn']
-
-  var uriAttrs = [
-    'background',
-    'cite',
-    'href',
-    'itemtype',
-    'longdesc',
-    'poster',
-    'src',
-    'xlink:href'
-  ]
-
-  var ARIA_ATTRIBUTE_PATTERN = /^aria-[\w-]*$/i
-
-  var DefaultWhitelist = {
-    // Global attributes allowed on any supplied element below.
-    '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],
-    a: ['target', 'href', 'title', 'rel'],
-    area: [],
-    b: [],
-    br: [],
-    col: [],
-    code: [],
-    div: [],
-    em: [],
-    hr: [],
-    h1: [],
-    h2: [],
-    h3: [],
-    h4: [],
-    h5: [],
-    h6: [],
-    i: [],
-    img: ['src', 'alt', 'title', 'width', 'height'],
-    li: [],
-    ol: [],
-    p: [],
-    pre: [],
-    s: [],
-    small: [],
-    span: [],
-    sub: [],
-    sup: [],
-    strong: [],
-    u: [],
-    ul: []
-  }
-
-  /**
-   * A pattern that recognizes a commonly useful subset of URLs that are safe.
-   *
-   * Shoutout to Angular 7 https://github.com/angular/angular/blob/7.2.4/packages/core/src/sanitization/url_sanitizer.ts
-   */
-  var SAFE_URL_PATTERN = /^(?:(?:https?|mailto|ftp|tel|file):|[^&:/?#]*(?:[/?#]|$))/gi
-
-  /**
-   * A pattern that matches safe data URLs. Only matches image, video and audio types.
-   *
-   * Shoutout to Angular 7 https://github.com/angular/angular/blob/7.2.4/packages/core/src/sanitization/url_sanitizer.ts
-   */
-  var DATA_URL_PATTERN = /^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[a-z0-9+/]+=*$/i
-
-  function allowedAttribute(attr, allowedAttributeList) {
-    var attrName = attr.nodeName.toLowerCase()
-
-    if ($.inArray(attrName, allowedAttributeList) !== -1) {
-      if ($.inArray(attrName, uriAttrs) !== -1) {
-        return Boolean(attr.nodeValue.match(SAFE_URL_PATTERN) || attr.nodeValue.match(DATA_URL_PATTERN))
-      }
-
-      return true
-    }
-
-    var regExp = $(allowedAttributeList).filter(function (index, value) {
-      return value instanceof RegExp
-    })
-
-    // Check if a regular expression validates the attribute.
-    for (var i = 0, l = regExp.length; i < l; i++) {
-      if (attrName.match(regExp[i])) {
-        return true
-      }
-    }
-
-    return false
-  }
-
-  function sanitizeHtml(unsafeHtml, whiteList, sanitizeFn) {
-    if (unsafeHtml.length === 0) {
-      return unsafeHtml
-    }
-
-    if (sanitizeFn && typeof sanitizeFn === 'function') {
-      return sanitizeFn(unsafeHtml)
-    }
-
-    // IE 8 and below don't support createHTMLDocument
-    if (!document.implementation || !document.implementation.createHTMLDocument) {
-      return unsafeHtml
-    }
-
-    var createdDocument = document.implementation.createHTMLDocument('sanitization')
-    createdDocument.body.innerHTML = unsafeHtml
-
-    var whitelistKeys = $.map(whiteList, function (el, i) { return i })
-    var elements = $(createdDocument.body).find('*')
-
-    for (var i = 0, len = elements.length; i < len; i++) {
-      var el = elements[i]
-      var elName = el.nodeName.toLowerCase()
-
-      if ($.inArray(elName, whitelistKeys) === -1) {
-        el.parentNode.removeChild(el)
-
-        continue
-      }
-
-      var attributeList = $.map(el.attributes, function (el) { return el })
-      var whitelistedAttributes = [].concat(whiteList['*'] || [], whiteList[elName] || [])
-
-      for (var j = 0, len2 = attributeList.length; j < len2; j++) {
-        if (!allowedAttribute(attributeList[j], whitelistedAttributes)) {
-          el.removeAttribute(attributeList[j].nodeName)
-        }
-      }
-    }
-
-    return createdDocument.body.innerHTML
-  }
-
-  // TOOLTIP PUBLIC CLASS DEFINITION
-  // ===============================
-
-  var Tooltip = function (element, options) {
-    this.type       = null
-    this.options    = null
-    this.enabled    = null
-    this.timeout    = null
-    this.hoverState = null
-    this.$element   = null
-    this.inState    = null
-
-    this.init('tooltip', element, options)
-  }
-
-  Tooltip.VERSION  = '3.4.1'
-
-  Tooltip.TRANSITION_DURATION = 150
-
-  Tooltip.DEFAULTS = {
-    animation: true,
-    placement: 'top',
-    selector: false,
-    template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
-    trigger: 'hover focus',
-    title: '',
-    delay: 0,
-    html: false,
-    container: false,
-    viewport: {
-      selector: 'body',
-      padding: 0
-    },
-    sanitize : true,
-    sanitizeFn : null,
-    whiteList : DefaultWhitelist
-  }
-
-  Tooltip.prototype.init = function (type, element, options) {
-    this.enabled   = true
-    this.type      = type
-    this.$element  = $(element)
-    this.options   = this.getOptions(options)
-    this.$viewport = this.options.viewport && $(document).find($.isFunction(this.options.viewport) ? this.options.viewport.call(this, this.$element) : (this.options.viewport.selector || this.options.viewport))
-    this.inState   = { click: false, hover: false, focus: false }
-
-    if (this.$element[0] instanceof document.constructor && !this.options.selector) {
-      throw new Error('`selector` option must be specified when initializing ' + this.type + ' on the window.document object!')
-    }
-
-    var triggers = this.options.trigger.split(' ')
-
-    for (var i = triggers.length; i--;) {
-      var trigger = triggers[i]
-
-      if (trigger == 'click') {
-        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
-      } else if (trigger != 'manual') {
-        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'
-        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
-
-        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
-        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
-      }
-    }
-
-    this.options.selector ?
-      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
-      this.fixTitle()
-  }
-
-  Tooltip.prototype.getDefaults = function () {
-    return Tooltip.DEFAULTS
-  }
-
-  Tooltip.prototype.getOptions = function (options) {
-    var dataAttributes = this.$element.data()
-
-    for (var dataAttr in dataAttributes) {
-      if (dataAttributes.hasOwnProperty(dataAttr) && $.inArray(dataAttr, DISALLOWED_ATTRIBUTES) !== -1) {
-        delete dataAttributes[dataAttr]
-      }
-    }
-
-    options = $.extend({}, this.getDefaults(), dataAttributes, options)
-
-    if (options.delay && typeof options.delay == 'number') {
-      options.delay = {
-        show: options.delay,
-        hide: options.delay
-      }
-    }
-
-    if (options.sanitize) {
-      options.template = sanitizeHtml(options.template, options.whiteList, options.sanitizeFn)
-    }
-
-    return options
-  }
-
-  Tooltip.prototype.getDelegateOptions = function () {
-    var options  = {}
-    var defaults = this.getDefaults()
-
-    this._options && $.each(this._options, function (key, value) {
-      if (defaults[key] != value) options[key] = value
-    })
-
-    return options
-  }
-
-  Tooltip.prototype.enter = function (obj) {
-    var self = obj instanceof this.constructor ?
-      obj : $(obj.currentTarget).data('bs.' + this.type)
-
-    if (!self) {
-      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
-      $(obj.currentTarget).data('bs.' + this.type, self)
-    }
-
-    if (obj instanceof $.Event) {
-      self.inState[obj.type == 'focusin' ? 'focus' : 'hover'] = true
-    }
-
-    if (self.tip().hasClass('in') || self.hoverState == 'in') {
-      self.hoverState = 'in'
-      return
-    }
-
-    clearTimeout(self.timeout)
-
-    self.hoverState = 'in'
-
-    if (!self.options.delay || !self.options.delay.show) return self.show()
-
-    self.timeout = setTimeout(function () {
-      if (self.hoverState == 'in') self.show()
-    }, self.options.delay.show)
-  }
-
-  Tooltip.prototype.isInStateTrue = function () {
-    for (var key in this.inState) {
-      if (this.inState[key]) return true
-    }
-
-    return false
-  }
-
-  Tooltip.prototype.leave = function (obj) {
-    var self = obj instanceof this.constructor ?
-      obj : $(obj.currentTarget).data('bs.' + this.type)
-
-    if (!self) {
-      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
-      $(obj.currentTarget).data('bs.' + this.type, self)
-    }
-
-    if (obj instanceof $.Event) {
-      self.inState[obj.type == 'focusout' ? 'focus' : 'hover'] = false
-    }
-
-    if (self.isInStateTrue()) return
-
-    clearTimeout(self.timeout)
-
-    self.hoverState = 'out'
-
-    if (!self.options.delay || !self.options.delay.hide) return self.hide()
-
-    self.timeout = setTimeout(function () {
-      if (self.hoverState == 'out') self.hide()
-    }, self.options.delay.hide)
-  }
-
-  Tooltip.prototype.show = function () {
-    var e = $.Event('show.bs.' + this.type)
-
-    if (this.hasContent() && this.enabled) {
-      this.$element.trigger(e)
-
-      var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0])
-      if (e.isDefaultPrevented() || !inDom) return
-      var that = this
-
-      var $tip = this.tip()
-
-      var tipId = this.getUID(this.type)
-
-      this.setContent()
-      $tip.attr('id', tipId)
-      this.$element.attr('aria-describedby', tipId)
-
-      if (this.options.animation) $tip.addClass('fade')
-
-      var placement = typeof this.options.placement == 'function' ?
-        this.options.placement.call(this, $tip[0], this.$element[0]) :
-        this.options.placement
-
-      var autoToken = /\s?auto?\s?/i
-      var autoPlace = autoToken.test(placement)
-      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
-
-      $tip
-        .detach()
-        .css({ top: 0, left: 0, display: 'block' })
-        .addClass(placement)
-        .data('bs.' + this.type, this)
-
-      this.options.container ? $tip.appendTo($(document).find(this.options.container)) : $tip.insertAfter(this.$element)
-      this.$element.trigger('inserted.bs.' + this.type)
-
-      var pos          = this.getPosition()
-      var actualWidth  = $tip[0].offsetWidth
-      var actualHeight = $tip[0].offsetHeight
-
-      if (autoPlace) {
-        var orgPlacement = placement
-        var viewportDim = this.getPosition(this.$viewport)
-
-        placement = placement == 'bottom' && pos.bottom + actualHeight > viewportDim.bottom ? 'top'    :
-                    placement == 'top'    && pos.top    - actualHeight < viewportDim.top    ? 'bottom' :
-                    placement == 'right'  && pos.right  + actualWidth  > viewportDim.width  ? 'left'   :
-                    placement == 'left'   && pos.left   - actualWidth  < viewportDim.left   ? 'right'  :
-                    placement
-
-        $tip
-          .removeClass(orgPlacement)
-          .addClass(placement)
-      }
-
-      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
-
-      this.applyPlacement(calculatedOffset, placement)
-
-      var complete = function () {
-        var prevHoverState = that.hoverState
-        that.$element.trigger('shown.bs.' + that.type)
-        that.hoverState = null
-
-        if (prevHoverState == 'out') that.leave(that)
-      }
-
-      $.support.transition && this.$tip.hasClass('fade') ?
-        $tip
-          .one('bsTransitionEnd', complete)
-          .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
-        complete()
-    }
-  }
-
-  Tooltip.prototype.applyPlacement = function (offset, placement) {
-    var $tip   = this.tip()
-    var width  = $tip[0].offsetWidth
-    var height = $tip[0].offsetHeight
-
-    // manually read margins because getBoundingClientRect includes difference
-    var marginTop = parseInt($tip.css('margin-top'), 10)
-    var marginLeft = parseInt($tip.css('margin-left'), 10)
-
-    // we must check for NaN for ie 8/9
-    if (isNaN(marginTop))  marginTop  = 0
-    if (isNaN(marginLeft)) marginLeft = 0
-
-    offset.top  += marginTop
-    offset.left += marginLeft
-
-    // $.fn.offset doesn't round pixel values
-    // so we use setOffset directly with our own function B-0
-    $.offset.setOffset($tip[0], $.extend({
-      using: function (props) {
-        $tip.css({
-          top: Math.round(props.top),
-          left: Math.round(props.left)
-        })
-      }
-    }, offset), 0)
-
-    $tip.addClass('in')
-
-    // check to see if placing tip in new offset caused the tip to resize itself
-    var actualWidth  = $tip[0].offsetWidth
-    var actualHeight = $tip[0].offsetHeight
-
-    if (placement == 'top' && actualHeight != height) {
-      offset.top = offset.top + height - actualHeight
-    }
-
-    var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)
-
-    if (delta.left) offset.left += delta.left
-    else offset.top += delta.top
-
-    var isVertical          = /top|bottom/.test(placement)
-    var arrowDelta          = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight
-    var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'
-
-    $tip.offset(offset)
-    this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)
-  }
-
-  Tooltip.prototype.replaceArrow = function (delta, dimension, isVertical) {
-    this.arrow()
-      .css(isVertical ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')
-      .css(isVertical ? 'top' : 'left', '')
-  }
-
-  Tooltip.prototype.setContent = function () {
-    var $tip  = this.tip()
-    var title = this.getTitle()
-
-    if (this.options.html) {
-      if (this.options.sanitize) {
-        title = sanitizeHtml(title, this.options.whiteList, this.options.sanitizeFn)
-      }
-
-      $tip.find('.tooltip-inner').html(title)
-    } else {
-      $tip.find('.tooltip-inner').text(title)
-    }
-
-    $tip.removeClass('fade in top bottom left right')
-  }
-
-  Tooltip.prototype.hide = function (callback) {
-    var that = this
-    var $tip = $(this.$tip)
-    var e    = $.Event('hide.bs.' + this.type)
-
-    function complete() {
-      if (that.hoverState != 'in') $tip.detach()
-      if (that.$element) { // TODO: Check whether guarding this code with this `if` is really necessary.
-        that.$element
-          .removeAttr('aria-describedby')
-          .trigger('hidden.bs.' + that.type)
-      }
-      callback && callback()
-    }
-
-    this.$element.trigger(e)
-
-    if (e.isDefaultPrevented()) return
-
-    $tip.removeClass('in')
-
-    $.support.transition && $tip.hasClass('fade') ?
-      $tip
-        .one('bsTransitionEnd', complete)
-        .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
-      complete()
-
-    this.hoverState = null
-
-    return this
-  }
-
-  Tooltip.prototype.fixTitle = function () {
-    var $e = this.$element
-    if ($e.attr('title') || typeof $e.attr('data-original-title') != 'string') {
-      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
-    }
-  }
-
-  Tooltip.prototype.hasContent = function () {
-    return this.getTitle()
-  }
-
-  Tooltip.prototype.getPosition = function ($element) {
-    $element   = $element || this.$element
-
-    var el     = $element[0]
-    var isBody = el.tagName == 'BODY'
-
-    var elRect    = el.getBoundingClientRect()
-    if (elRect.width == null) {
-      // width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093
-      elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top })
-    }
-    var isSvg = window.SVGElement && el instanceof window.SVGElement
-    // Avoid using $.offset() on SVGs since it gives incorrect results in jQuery 3.
-    // See https://github.com/twbs/bootstrap/issues/20280
-    var elOffset  = isBody ? { top: 0, left: 0 } : (isSvg ? null : $element.offset())
-    var scroll    = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() }
-    var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null
-
-    return $.extend({}, elRect, scroll, outerDims, elOffset)
-  }
-
-  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
-    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2 } :
-           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :
-           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
-        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }
-
-  }
-
-  Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {
-    var delta = { top: 0, left: 0 }
-    if (!this.$viewport) return delta
-
-    var viewportPadding = this.options.viewport && this.options.viewport.padding || 0
-    var viewportDimensions = this.getPosition(this.$viewport)
-
-    if (/right|left/.test(placement)) {
-      var topEdgeOffset    = pos.top - viewportPadding - viewportDimensions.scroll
-      var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight
-      if (topEdgeOffset < viewportDimensions.top) { // top overflow
-        delta.top = viewportDimensions.top - topEdgeOffset
-      } else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow
-        delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset
-      }
-    } else {
-      var leftEdgeOffset  = pos.left - viewportPadding
-      var rightEdgeOffset = pos.left + viewportPadding + actualWidth
-      if (leftEdgeOffset < viewportDimensions.left) { // left overflow
-        delta.left = viewportDimensions.left - leftEdgeOffset
-      } else if (rightEdgeOffset > viewportDimensions.right) { // right overflow
-        delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset
-      }
-    }
-
-    return delta
-  }
-
-  Tooltip.prototype.getTitle = function () {
-    var title
-    var $e = this.$element
-    var o  = this.options
-
-    title = $e.attr('data-original-title')
-      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
-
-    return title
-  }
-
-  Tooltip.prototype.getUID = function (prefix) {
-    do prefix += ~~(Math.random() * 1000000)
-    while (document.getElementById(prefix))
-    return prefix
-  }
-
-  Tooltip.prototype.tip = function () {
-    if (!this.$tip) {
-      this.$tip = $(this.options.template)
-      if (this.$tip.length != 1) {
-        throw new Error(this.type + ' `template` option must consist of exactly 1 top-level element!')
-      }
-    }
-    return this.$tip
-  }
-
-  Tooltip.prototype.arrow = function () {
-    return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))
-  }
-
-  Tooltip.prototype.enable = function () {
-    this.enabled = true
-  }
-
-  Tooltip.prototype.disable = function () {
-    this.enabled = false
-  }
-
-  Tooltip.prototype.toggleEnabled = function () {
-    this.enabled = !this.enabled
-  }
-
-  Tooltip.prototype.toggle = function (e) {
-    var self = this
-    if (e) {
-      self = $(e.currentTarget).data('bs.' + this.type)
-      if (!self) {
-        self = new this.constructor(e.currentTarget, this.getDelegateOptions())
-        $(e.currentTarget).data('bs.' + this.type, self)
-      }
-    }
-
-    if (e) {
-      self.inState.click = !self.inState.click
-      if (self.isInStateTrue()) self.enter(self)
-      else self.leave(self)
-    } else {
-      self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
-    }
-  }
-
-  Tooltip.prototype.destroy = function () {
-    var that = this
-    clearTimeout(this.timeout)
-    this.hide(function () {
-      that.$element.off('.' + that.type).removeData('bs.' + that.type)
-      if (that.$tip) {
-        that.$tip.detach()
-      }
-      that.$tip = null
-      that.$arrow = null
-      that.$viewport = null
-      that.$element = null
-    })
-  }
-
-  Tooltip.prototype.sanitizeHtml = function (unsafeHtml) {
-    return sanitizeHtml(unsafeHtml, this.options.whiteList, this.options.sanitizeFn)
-  }
-
-  // TOOLTIP PLUGIN DEFINITION
-  // =========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.tooltip')
-      var options = typeof option == 'object' && option
-
-      if (!data && /destroy|hide/.test(option)) return
-      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.tooltip
-
-  $.fn.tooltip             = Plugin
-  $.fn.tooltip.Constructor = Tooltip
-
-
-  // TOOLTIP NO CONFLICT
-  // ===================
-
-  $.fn.tooltip.noConflict = function () {
-    $.fn.tooltip = old
-    return this
-  }
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: popover.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#popovers
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // POPOVER PUBLIC CLASS DEFINITION
-  // ===============================
-
-  var Popover = function (element, options) {
-    this.init('popover', element, options)
-  }
-
-  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')
-
-  Popover.VERSION  = '3.4.1'
-
-  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {
-    placement: 'right',
-    trigger: 'click',
-    content: '',
-    template: '<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
-  })
-
-
-  // NOTE: POPOVER EXTENDS tooltip.js
-  // ================================
-
-  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)
-
-  Popover.prototype.constructor = Popover
-
-  Popover.prototype.getDefaults = function () {
-    return Popover.DEFAULTS
-  }
-
-  Popover.prototype.setContent = function () {
-    var $tip    = this.tip()
-    var title   = this.getTitle()
-    var content = this.getContent()
-
-    if (this.options.html) {
-      var typeContent = typeof content
-
-      if (this.options.sanitize) {
-        title = this.sanitizeHtml(title)
-
-        if (typeContent === 'string') {
-          content = this.sanitizeHtml(content)
-        }
-      }
-
-      $tip.find('.popover-title').html(title)
-      $tip.find('.popover-content').children().detach().end()[
-        typeContent === 'string' ? 'html' : 'append'
-      ](content)
-    } else {
-      $tip.find('.popover-title').text(title)
-      $tip.find('.popover-content').children().detach().end().text(content)
-    }
-
-    $tip.removeClass('fade top bottom left right in')
-
-    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do
-    // this manually by checking the contents.
-    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()
-  }
-
-  Popover.prototype.hasContent = function () {
-    return this.getTitle() || this.getContent()
-  }
-
-  Popover.prototype.getContent = function () {
-    var $e = this.$element
-    var o  = this.options
-
-    return $e.attr('data-content')
-      || (typeof o.content == 'function' ?
-        o.content.call($e[0]) :
-        o.content)
-  }
-
-  Popover.prototype.arrow = function () {
-    return (this.$arrow = this.$arrow || this.tip().find('.arrow'))
-  }
-
-
-  // POPOVER PLUGIN DEFINITION
-  // =========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.popover')
-      var options = typeof option == 'object' && option
-
-      if (!data && /destroy|hide/.test(option)) return
-      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.popover
-
-  $.fn.popover             = Plugin
-  $.fn.popover.Constructor = Popover
-
-
-  // POPOVER NO CONFLICT
-  // ===================
-
-  $.fn.popover.noConflict = function () {
-    $.fn.popover = old
-    return this
-  }
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: scrollspy.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#scrollspy
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // SCROLLSPY CLASS DEFINITION
-  // ==========================
-
-  function ScrollSpy(element, options) {
-    this.$body          = $(document.body)
-    this.$scrollElement = $(element).is(document.body) ? $(window) : $(element)
-    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)
-    this.selector       = (this.options.target || '') + ' .nav li > a'
-    this.offsets        = []
-    this.targets        = []
-    this.activeTarget   = null
-    this.scrollHeight   = 0
-
-    this.$scrollElement.on('scroll.bs.scrollspy', $.proxy(this.process, this))
-    this.refresh()
-    this.process()
-  }
-
-  ScrollSpy.VERSION  = '3.4.1'
-
-  ScrollSpy.DEFAULTS = {
-    offset: 10
-  }
-
-  ScrollSpy.prototype.getScrollHeight = function () {
-    return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight)
-  }
-
-  ScrollSpy.prototype.refresh = function () {
-    var that          = this
-    var offsetMethod  = 'offset'
-    var offsetBase    = 0
-
-    this.offsets      = []
-    this.targets      = []
-    this.scrollHeight = this.getScrollHeight()
-
-    if (!$.isWindow(this.$scrollElement[0])) {
-      offsetMethod = 'position'
-      offsetBase   = this.$scrollElement.scrollTop()
-    }
-
-    this.$body
-      .find(this.selector)
-      .map(function () {
-        var $el   = $(this)
-        var href  = $el.data('target') || $el.attr('href')
-        var $href = /^#./.test(href) && $(href)
-
-        return ($href
-          && $href.length
-          && $href.is(':visible')
-          && [[$href[offsetMethod]().top + offsetBase, href]]) || null
-      })
-      .sort(function (a, b) { return a[0] - b[0] })
-      .each(function () {
-        that.offsets.push(this[0])
-        that.targets.push(this[1])
-      })
-  }
-
-  ScrollSpy.prototype.process = function () {
-    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset
-    var scrollHeight = this.getScrollHeight()
-    var maxScroll    = this.options.offset + scrollHeight - this.$scrollElement.height()
-    var offsets      = this.offsets
-    var targets      = this.targets
-    var activeTarget = this.activeTarget
-    var i
-
-    if (this.scrollHeight != scrollHeight) {
-      this.refresh()
-    }
-
-    if (scrollTop >= maxScroll) {
-      return activeTarget != (i = targets[targets.length - 1]) && this.activate(i)
-    }
-
-    if (activeTarget && scrollTop < offsets[0]) {
-      this.activeTarget = null
-      return this.clear()
-    }
-
-    for (i = offsets.length; i--;) {
-      activeTarget != targets[i]
-        && scrollTop >= offsets[i]
-        && (offsets[i + 1] === undefined || scrollTop < offsets[i + 1])
-        && this.activate(targets[i])
-    }
-  }
-
-  ScrollSpy.prototype.activate = function (target) {
-    this.activeTarget = target
-
-    this.clear()
-
-    var selector = this.selector +
-      '[data-target="' + target + '"],' +
-      this.selector + '[href="' + target + '"]'
-
-    var active = $(selector)
-      .parents('li')
-      .addClass('active')
-
-    if (active.parent('.dropdown-menu').length) {
-      active = active
-        .closest('li.dropdown')
-        .addClass('active')
-    }
-
-    active.trigger('activate.bs.scrollspy')
-  }
-
-  ScrollSpy.prototype.clear = function () {
-    $(this.selector)
-      .parentsUntil(this.options.target, '.active')
-      .removeClass('active')
-  }
-
-
-  // SCROLLSPY PLUGIN DEFINITION
-  // ===========================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.scrollspy')
-      var options = typeof option == 'object' && option
-
-      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.scrollspy
-
-  $.fn.scrollspy             = Plugin
-  $.fn.scrollspy.Constructor = ScrollSpy
-
-
-  // SCROLLSPY NO CONFLICT
-  // =====================
-
-  $.fn.scrollspy.noConflict = function () {
-    $.fn.scrollspy = old
-    return this
-  }
-
-
-  // SCROLLSPY DATA-API
-  // ==================
-
-  $(window).on('load.bs.scrollspy.data-api', function () {
-    $('[data-spy="scroll"]').each(function () {
-      var $spy = $(this)
-      Plugin.call($spy, $spy.data())
-    })
-  })
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: tab.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#tabs
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // TAB CLASS DEFINITION
-  // ====================
-
-  var Tab = function (element) {
-    // jscs:disable requireDollarBeforejQueryAssignment
-    this.element = $(element)
-    // jscs:enable requireDollarBeforejQueryAssignment
-  }
-
-  Tab.VERSION = '3.4.1'
-
-  Tab.TRANSITION_DURATION = 150
-
-  Tab.prototype.show = function () {
-    var $this    = this.element
-    var $ul      = $this.closest('ul:not(.dropdown-menu)')
-    var selector = $this.data('target')
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
-    }
-
-    if ($this.parent('li').hasClass('active')) return
-
-    var $previous = $ul.find('.active:last a')
-    var hideEvent = $.Event('hide.bs.tab', {
-      relatedTarget: $this[0]
-    })
-    var showEvent = $.Event('show.bs.tab', {
-      relatedTarget: $previous[0]
-    })
-
-    $previous.trigger(hideEvent)
-    $this.trigger(showEvent)
-
-    if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return
-
-    var $target = $(document).find(selector)
-
-    this.activate($this.closest('li'), $ul)
-    this.activate($target, $target.parent(), function () {
-      $previous.trigger({
-        type: 'hidden.bs.tab',
-        relatedTarget: $this[0]
-      })
-      $this.trigger({
-        type: 'shown.bs.tab',
-        relatedTarget: $previous[0]
-      })
-    })
-  }
-
-  Tab.prototype.activate = function (element, container, callback) {
-    var $active    = container.find('> .active')
-    var transition = callback
-      && $.support.transition
-      && ($active.length && $active.hasClass('fade') || !!container.find('> .fade').length)
-
-    function next() {
-      $active
-        .removeClass('active')
-        .find('> .dropdown-menu > .active')
-        .removeClass('active')
-        .end()
-        .find('[data-toggle="tab"]')
-        .attr('aria-expanded', false)
-
-      element
-        .addClass('active')
-        .find('[data-toggle="tab"]')
-        .attr('aria-expanded', true)
-
-      if (transition) {
-        element[0].offsetWidth // reflow for transition
-        element.addClass('in')
-      } else {
-        element.removeClass('fade')
-      }
-
-      if (element.parent('.dropdown-menu').length) {
-        element
-          .closest('li.dropdown')
-          .addClass('active')
-          .end()
-          .find('[data-toggle="tab"]')
-          .attr('aria-expanded', true)
-      }
-
-      callback && callback()
-    }
-
-    $active.length && transition ?
-      $active
-        .one('bsTransitionEnd', next)
-        .emulateTransitionEnd(Tab.TRANSITION_DURATION) :
-      next()
-
-    $active.removeClass('in')
-  }
-
-
-  // TAB PLUGIN DEFINITION
-  // =====================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this = $(this)
-      var data  = $this.data('bs.tab')
-
-      if (!data) $this.data('bs.tab', (data = new Tab(this)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.tab
-
-  $.fn.tab             = Plugin
-  $.fn.tab.Constructor = Tab
-
-
-  // TAB NO CONFLICT
-  // ===============
-
-  $.fn.tab.noConflict = function () {
-    $.fn.tab = old
-    return this
-  }
-
-
-  // TAB DATA-API
-  // ============
-
-  var clickHandler = function (e) {
-    e.preventDefault()
-    Plugin.call($(this), 'show')
-  }
-
-  $(document)
-    .on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler)
-    .on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler)
-
-}(jQuery);
-
-/* ========================================================================
- * Bootstrap: affix.js v3.4.1
- * https://getbootstrap.com/docs/3.4/javascript/#affix
- * ========================================================================
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
- * ======================================================================== */
-
-
-+function ($) {
-  'use strict';
-
-  // AFFIX CLASS DEFINITION
-  // ======================
-
-  var Affix = function (element, options) {
-    this.options = $.extend({}, Affix.DEFAULTS, options)
-
-    var target = this.options.target === Affix.DEFAULTS.target ? $(this.options.target) : $(document).find(this.options.target)
-
-    this.$target = target
-      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
-      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))
-
-    this.$element     = $(element)
-    this.affixed      = null
-    this.unpin        = null
-    this.pinnedOffset = null
-
-    this.checkPosition()
-  }
-
-  Affix.VERSION  = '3.4.1'
-
-  Affix.RESET    = 'affix affix-top affix-bottom'
-
-  Affix.DEFAULTS = {
-    offset: 0,
-    target: window
-  }
-
-  Affix.prototype.getState = function (scrollHeight, height, offsetTop, offsetBottom) {
-    var scrollTop    = this.$target.scrollTop()
-    var position     = this.$element.offset()
-    var targetHeight = this.$target.height()
-
-    if (offsetTop != null && this.affixed == 'top') return scrollTop < offsetTop ? 'top' : false
-
-    if (this.affixed == 'bottom') {
-      if (offsetTop != null) return (scrollTop + this.unpin <= position.top) ? false : 'bottom'
-      return (scrollTop + targetHeight <= scrollHeight - offsetBottom) ? false : 'bottom'
-    }
-
-    var initializing   = this.affixed == null
-    var colliderTop    = initializing ? scrollTop : position.top
-    var colliderHeight = initializing ? targetHeight : height
-
-    if (offsetTop != null && scrollTop <= offsetTop) return 'top'
-    if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom'
-
-    return false
-  }
-
-  Affix.prototype.getPinnedOffset = function () {
-    if (this.pinnedOffset) return this.pinnedOffset
-    this.$element.removeClass(Affix.RESET).addClass('affix')
-    var scrollTop = this.$target.scrollTop()
-    var position  = this.$element.offset()
-    return (this.pinnedOffset = position.top - scrollTop)
-  }
-
-  Affix.prototype.checkPositionWithEventLoop = function () {
-    setTimeout($.proxy(this.checkPosition, this), 1)
-  }
-
-  Affix.prototype.checkPosition = function () {
-    if (!this.$element.is(':visible')) return
-
-    var height       = this.$element.height()
-    var offset       = this.options.offset
-    var offsetTop    = offset.top
-    var offsetBottom = offset.bottom
-    var scrollHeight = Math.max($(document).height(), $(document.body).height())
-
-    if (typeof offset != 'object')         offsetBottom = offsetTop = offset
-    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)
-    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)
-
-    var affix = this.getState(scrollHeight, height, offsetTop, offsetBottom)
-
-    if (this.affixed != affix) {
-      if (this.unpin != null) this.$element.css('top', '')
-
-      var affixType = 'affix' + (affix ? '-' + affix : '')
-      var e         = $.Event(affixType + '.bs.affix')
-
-      this.$element.trigger(e)
-
-      if (e.isDefaultPrevented()) return
-
-      this.affixed = affix
-      this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null
-
-      this.$element
-        .removeClass(Affix.RESET)
-        .addClass(affixType)
-        .trigger(affixType.replace('affix', 'affixed') + '.bs.affix')
-    }
-
-    if (affix == 'bottom') {
-      this.$element.offset({
-        top: scrollHeight - height - offsetBottom
-      })
-    }
-  }
-
-
-  // AFFIX PLUGIN DEFINITION
-  // =======================
-
-  function Plugin(option) {
-    return this.each(function () {
-      var $this   = $(this)
-      var data    = $this.data('bs.affix')
-      var options = typeof option == 'object' && option
-
-      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  var old = $.fn.affix
-
-  $.fn.affix             = Plugin
-  $.fn.affix.Constructor = Affix
-
-
-  // AFFIX NO CONFLICT
-  // =================
-
-  $.fn.affix.noConflict = function () {
-    $.fn.affix = old
-    return this
-  }
-
-
-  // AFFIX DATA-API
-  // ==============
-
-  $(window).on('load', function () {
-    $('[data-spy="affix"]').each(function () {
-      var $spy = $(this)
-      var data = $spy.data()
-
-      data.offset = data.offset || {}
-
-      if (data.offsetBottom != null) data.offset.bottom = data.offsetBottom
-      if (data.offsetTop    != null) data.offset.top    = data.offsetTop
-
-      Plugin.call($spy, data)
-    })
-  })
-
-}(jQuery);
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.min.js
deleted file mode 100644
index eb0a8b4..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.min.js
+++ /dev/null
@@ -1,6 +0,0 @@
-/*!
- * Bootstrap v3.4.1 (https://getbootstrap.com/)
- * Copyright 2011-2019 Twitter, Inc.
- * Licensed under the MIT license
- */
-if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");!function(t){"use strict";var e=jQuery.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1==e[0]&&9==e[1]&&e[2]<1||3<e[0])throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(),function(n){"use strict";n.fn.emulateTransitionEnd=function(t){var e=!1,i=this;n(this).one("bsTransitionEnd",function(){e=!0});return setTimeout(function(){e||n(i).trigger(n.support.transition.end)},t),this},n(function(){n.support.transition=function o(){var t=document.createElement("bootstrap"),e={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var i in e)if(t.style[i]!==undefined)return{end:e[i]};return!1}(),n.support.transition&&(n.event.special.bsTransitionEnd={bindType:n.support.transition.end,delegateType:n.support.transition.end,handle:function(t){if(n(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}})})}(jQuery),function(s){"use strict";var e='[data-dismiss="alert"]',a=function(t){s(t).on("click",e,this.close)};a.VERSION="3.4.1",a.TRANSITION_DURATION=150,a.prototype.close=function(t){var e=s(this),i=e.attr("data-target");i||(i=(i=e.attr("href"))&&i.replace(/.*(?=#[^\s]*$)/,"")),i="#"===i?[]:i;var o=s(document).find(i);function n(){o.detach().trigger("closed.bs.alert").remove()}t&&t.preventDefault(),o.length||(o=e.closest(".alert")),o.trigger(t=s.Event("close.bs.alert")),t.isDefaultPrevented()||(o.removeClass("in"),s.support.transition&&o.hasClass("fade")?o.one("bsTransitionEnd",n).emulateTransitionEnd(a.TRANSITION_DURATION):n())};var t=s.fn.alert;s.fn.alert=function o(i){return this.each(function(){var t=s(this),e=t.data("bs.alert");e||t.data("bs.alert",e=new a(this)),"string"==typeof i&&e[i].call(t)})},s.fn.alert.Constructor=a,s.fn.alert.noConflict=function(){return s.fn.alert=t,this},s(document).on("click.bs.alert.data-api",e,a.prototype.close)}(jQuery),function(s){"use strict";var n=function(t,e){this.$element=s(t),this.options=s.extend({},n.DEFAULTS,e),this.isLoading=!1};function i(o){return this.each(function(){var t=s(this),e=t.data("bs.button"),i="object"==typeof o&&o;e||t.data("bs.button",e=new n(this,i)),"toggle"==o?e.toggle():o&&e.setState(o)})}n.VERSION="3.4.1",n.DEFAULTS={loadingText:"loading..."},n.prototype.setState=function(t){var e="disabled",i=this.$element,o=i.is("input")?"val":"html",n=i.data();t+="Text",null==n.resetText&&i.data("resetText",i[o]()),setTimeout(s.proxy(function(){i[o](null==n[t]?this.options[t]:n[t]),"loadingText"==t?(this.isLoading=!0,i.addClass(e).attr(e,e).prop(e,!0)):this.isLoading&&(this.isLoading=!1,i.removeClass(e).removeAttr(e).prop(e,!1))},this),0)},n.prototype.toggle=function(){var t=!0,e=this.$element.closest('[data-toggle="buttons"]');if(e.length){var i=this.$element.find("input");"radio"==i.prop("type")?(i.prop("checked")&&(t=!1),e.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==i.prop("type")&&(i.prop("checked")!==this.$element.hasClass("active")&&(t=!1),this.$element.toggleClass("active")),i.prop("checked",this.$element.hasClass("active")),t&&i.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var t=s.fn.button;s.fn.button=i,s.fn.button.Constructor=n,s.fn.button.noConflict=function(){return s.fn.button=t,this},s(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(t){var e=s(t.target).closest(".btn");i.call(e,"toggle"),s(t.target).is('input[type="radio"], input[type="checkbox"]')||(t.preventDefault(),e.is("input,button")?e.trigger("focus"):e.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(t){s(t.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(t.type))})}(jQuery),function(p){"use strict";var c=function(t,e){this.$element=p(t),this.$indicators=this.$element.find(".carousel-indicators"),this.options=e,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",p.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",p.proxy(this.pause,this)).on("mouseleave.bs.carousel",p.proxy(this.cycle,this))};function r(n){return this.each(function(){var t=p(this),e=t.data("bs.carousel"),i=p.extend({},c.DEFAULTS,t.data(),"object"==typeof n&&n),o="string"==typeof n?n:i.slide;e||t.data("bs.carousel",e=new c(this,i)),"number"==typeof n?e.to(n):o?e[o]():i.interval&&e.pause().cycle()})}c.VERSION="3.4.1",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(t){if(!/input|textarea/i.test(t.target.tagName)){switch(t.which){case 37:this.prev();break;case 39:this.next();break;default:return}t.preventDefault()}},c.prototype.cycle=function(t){return t||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(p.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(t){return this.$items=t.parent().children(".item"),this.$items.index(t||this.$active)},c.prototype.getItemForDirection=function(t,e){var i=this.getItemIndex(e);if(("prev"==t&&0===i||"next"==t&&i==this.$items.length-1)&&!this.options.wrap)return e;var o=(i+("prev"==t?-1:1))%this.$items.length;return this.$items.eq(o)},c.prototype.to=function(t){var e=this,i=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(t>this.$items.length-1||t<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){e.to(t)}):i==t?this.pause().cycle():this.slide(i<t?"next":"prev",this.$items.eq(t))},c.prototype.pause=function(t){return t||(this.paused=!0),this.$element.find(".next, .prev").length&&p.support.transition&&(this.$element.trigger(p.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(t,e){var i=this.$element.find(".item.active"),o=e||this.getItemForDirection(t,i),n=this.interval,s="next"==t?"left":"right",a=this;if(o.hasClass("active"))return this.sliding=!1;var r=o[0],l=p.Event("slide.bs.carousel",{relatedTarget:r,direction:s});if(this.$element.trigger(l),!l.isDefaultPrevented()){if(this.sliding=!0,n&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var h=p(this.$indicators.children()[this.getItemIndex(o)]);h&&h.addClass("active")}var d=p.Event("slid.bs.carousel",{relatedTarget:r,direction:s});return p.support.transition&&this.$element.hasClass("slide")?(o.addClass(t),"object"==typeof o&&o.length&&o[0].offsetWidth,i.addClass(s),o.addClass(s),i.one("bsTransitionEnd",function(){o.removeClass([t,s].join(" ")).addClass("active"),i.removeClass(["active",s].join(" ")),a.sliding=!1,setTimeout(function(){a.$element.trigger(d)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(i.removeClass("active"),o.addClass("active"),this.sliding=!1,this.$element.trigger(d)),n&&this.cycle(),this}};var t=p.fn.carousel;p.fn.carousel=r,p.fn.carousel.Constructor=c,p.fn.carousel.noConflict=function(){return p.fn.carousel=t,this};var e=function(t){var e=p(this),i=e.attr("href");i&&(i=i.replace(/.*(?=#[^\s]+$)/,""));var o=e.attr("data-target")||i,n=p(document).find(o);if(n.hasClass("carousel")){var s=p.extend({},n.data(),e.data()),a=e.attr("data-slide-to");a&&(s.interval=!1),r.call(n,s),a&&n.data("bs.carousel").to(a),t.preventDefault()}};p(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),p(window).on("load",function(){p('[data-ride="carousel"]').each(function(){var t=p(this);r.call(t,t.data())})})}(jQuery),function(a){"use strict";var r=function(t,e){this.$element=a(t),this.options=a.extend({},r.DEFAULTS,e),this.$trigger=a('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};function n(t){var e,i=t.attr("data-target")||(e=t.attr("href"))&&e.replace(/.*(?=#[^\s]+$)/,"");return a(document).find(i)}function l(o){return this.each(function(){var t=a(this),e=t.data("bs.collapse"),i=a.extend({},r.DEFAULTS,t.data(),"object"==typeof o&&o);!e&&i.toggle&&/show|hide/.test(o)&&(i.toggle=!1),e||t.data("bs.collapse",e=new r(this,i)),"string"==typeof o&&e[o]()})}r.VERSION="3.4.1",r.TRANSITION_DURATION=350,r.DEFAULTS={toggle:!0},r.prototype.dimension=function(){return this.$element.hasClass("width")?"width":"height"},r.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var t,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(t=e.data("bs.collapse"))&&t.transitioning)){var i=a.Event("show.bs.collapse");if(this.$element.trigger(i),!i.isDefaultPrevented()){e&&e.length&&(l.call(e,"hide"),t||e.data("bs.collapse",null));var o=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[o](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var n=function(){this.$element.removeClass("collapsing").addClass("collapse in")[o](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return n.call(this);var s=a.camelCase(["scroll",o].join("-"));this.$element.one("bsTransitionEnd",a.proxy(n,this)).emulateTransitionEnd(r.TRANSITION_DURATION)[o](this.$element[0][s])}}}},r.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var t=a.Event("hide.bs.collapse");if(this.$element.trigger(t),!t.isDefaultPrevented()){var e=this.dimension();this.$element[e](this.$element[e]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var i=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};if(!a.support.transition)return i.call(this);this.$element[e](0).one("bsTransitionEnd",a.proxy(i,this)).emulateTransitionEnd(r.TRANSITION_DURATION)}}},r.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},r.prototype.getParent=function(){return a(document).find(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(t,e){var i=a(e);this.addAriaAndCollapsedClass(n(i),i)},this)).end()},r.prototype.addAriaAndCollapsedClass=function(t,e){var i=t.hasClass("in");t.attr("aria-expanded",i),e.toggleClass("collapsed",!i).attr("aria-expanded",i)};var t=a.fn.collapse;a.fn.collapse=l,a.fn.collapse.Constructor=r,a.fn.collapse.noConflict=function(){return a.fn.collapse=t,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(t){var e=a(this);e.attr("data-target")||t.preventDefault();var i=n(e),o=i.data("bs.collapse")?"toggle":e.data();l.call(i,o)})}(jQuery),function(a){"use strict";var r='[data-toggle="dropdown"]',o=function(t){a(t).on("click.bs.dropdown",this.toggle)};function l(t){var e=t.attr("data-target");e||(e=(e=t.attr("href"))&&/#[A-Za-z]/.test(e)&&e.replace(/.*(?=#[^\s]*$)/,""));var i="#"!==e?a(document).find(e):null;return i&&i.length?i:t.parent()}function s(o){o&&3===o.which||(a(".dropdown-backdrop").remove(),a(r).each(function(){var t=a(this),e=l(t),i={relatedTarget:this};e.hasClass("open")&&(o&&"click"==o.type&&/input|textarea/i.test(o.target.tagName)&&a.contains(e[0],o.target)||(e.trigger(o=a.Event("hide.bs.dropdown",i)),o.isDefaultPrevented()||(t.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",i)))))}))}o.VERSION="3.4.1",o.prototype.toggle=function(t){var e=a(this);if(!e.is(".disabled, :disabled")){var i=l(e),o=i.hasClass("open");if(s(),!o){"ontouchstart"in document.documentElement&&!i.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",s);var n={relatedTarget:this};if(i.trigger(t=a.Event("show.bs.dropdown",n)),t.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),i.toggleClass("open").trigger(a.Event("shown.bs.dropdown",n))}return!1}},o.prototype.keydown=function(t){if(/(38|40|27|32)/.test(t.which)&&!/input|textarea/i.test(t.target.tagName)){var e=a(this);if(t.preventDefault(),t.stopPropagation(),!e.is(".disabled, :disabled")){var i=l(e),o=i.hasClass("open");if(!o&&27!=t.which||o&&27==t.which)return 27==t.which&&i.find(r).trigger("focus"),e.trigger("click");var n=i.find(".dropdown-menu li:not(.disabled):visible a");if(n.length){var s=n.index(t.target);38==t.which&&0<s&&s--,40==t.which&&s<n.length-1&&s++,~s||(s=0),n.eq(s).trigger("focus")}}}};var t=a.fn.dropdown;a.fn.dropdown=function e(i){return this.each(function(){var t=a(this),e=t.data("bs.dropdown");e||t.data("bs.dropdown",e=new o(this)),"string"==typeof i&&e[i].call(t)})},a.fn.dropdown.Constructor=o,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=t,this},a(document).on("click.bs.dropdown.data-api",s).on("click.bs.dropdown.data-api",".dropdown form",function(t){t.stopPropagation()}).on("click.bs.dropdown.data-api",r,o.prototype.toggle).on("keydown.bs.dropdown.data-api",r,o.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",o.prototype.keydown)}(jQuery),function(a){"use strict";var s=function(t,e){this.options=e,this.$body=a(document.body),this.$element=a(t),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.fixedContent=".navbar-fixed-top, .navbar-fixed-bottom",this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};function r(o,n){return this.each(function(){var t=a(this),e=t.data("bs.modal"),i=a.extend({},s.DEFAULTS,t.data(),"object"==typeof o&&o);e||t.data("bs.modal",e=new s(this,i)),"string"==typeof o?e[o](n):i.show&&e.show(n)})}s.VERSION="3.4.1",s.TRANSITION_DURATION=300,s.BACKDROP_TRANSITION_DURATION=150,s.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},s.prototype.toggle=function(t){return this.isShown?this.hide():this.show(t)},s.prototype.show=function(i){var o=this,t=a.Event("show.bs.modal",{relatedTarget:i});this.$element.trigger(t),this.isShown||t.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){o.$element.one("mouseup.dismiss.bs.modal",function(t){a(t.target).is(o.$element)&&(o.ignoreBackdropClick=!0)})}),this.backdrop(function(){var t=a.support.transition&&o.$element.hasClass("fade");o.$element.parent().length||o.$element.appendTo(o.$body),o.$element.show().scrollTop(0),o.adjustDialog(),t&&o.$element[0].offsetWidth,o.$element.addClass("in"),o.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:i});t?o.$dialog.one("bsTransitionEnd",function(){o.$element.trigger("focus").trigger(e)}).emulateTransitionEnd(s.TRANSITION_DURATION):o.$element.trigger("focus").trigger(e)}))},s.prototype.hide=function(t){t&&t.preventDefault(),t=a.Event("hide.bs.modal"),this.$element.trigger(t),this.isShown&&!t.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(s.TRANSITION_DURATION):this.hideModal())},s.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(t){document===t.target||this.$element[0]===t.target||this.$element.has(t.target).length||this.$element.trigger("focus")},this))},s.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(t){27==t.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},s.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},s.prototype.hideModal=function(){var t=this;this.$element.hide(),this.backdrop(function(){t.$body.removeClass("modal-open"),t.resetAdjustments(),t.resetScrollbar(),t.$element.trigger("hidden.bs.modal")})},s.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},s.prototype.backdrop=function(t){var e=this,i=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var o=a.support.transition&&i;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+i).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(t){this.ignoreBackdropClick?this.ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide())},this)),o&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!t)return;o?this.$backdrop.one("bsTransitionEnd",t).emulateTransitionEnd(s.BACKDROP_TRANSITION_DURATION):t()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var n=function(){e.removeBackdrop(),t&&t()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",n).emulateTransitionEnd(s.BACKDROP_TRANSITION_DURATION):n()}else t&&t()},s.prototype.handleUpdate=function(){this.adjustDialog()},s.prototype.adjustDialog=function(){var t=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&t?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!t?this.scrollbarWidth:""})},s.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},s.prototype.checkScrollbar=function(){var t=window.innerWidth;if(!t){var e=document.documentElement.getBoundingClientRect();t=e.right-Math.abs(e.left)}this.bodyIsOverflowing=document.body.clientWidth<t,this.scrollbarWidth=this.measureScrollbar()},s.prototype.setScrollbar=function(){var t=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"";var n=this.scrollbarWidth;this.bodyIsOverflowing&&(this.$body.css("padding-right",t+n),a(this.fixedContent).each(function(t,e){var i=e.style.paddingRight,o=a(e).css("padding-right");a(e).data("padding-right",i).css("padding-right",parseFloat(o)+n+"px")}))},s.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad),a(this.fixedContent).each(function(t,e){var i=a(e).data("padding-right");a(e).removeData("padding-right"),e.style.paddingRight=i||""})},s.prototype.measureScrollbar=function(){var t=document.createElement("div");t.className="modal-scrollbar-measure",this.$body.append(t);var e=t.offsetWidth-t.clientWidth;return this.$body[0].removeChild(t),e};var t=a.fn.modal;a.fn.modal=r,a.fn.modal.Constructor=s,a.fn.modal.noConflict=function(){return a.fn.modal=t,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(t){var e=a(this),i=e.attr("href"),o=e.attr("data-target")||i&&i.replace(/.*(?=#[^\s]+$)/,""),n=a(document).find(o),s=n.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(i)&&i},n.data(),e.data());e.is("a")&&t.preventDefault(),n.one("show.bs.modal",function(t){t.isDefaultPrevented()||n.one("hidden.bs.modal",function(){e.is(":visible")&&e.trigger("focus")})}),r.call(n,s,this)})}(jQuery),function(g){"use strict";var o=["sanitize","whiteList","sanitizeFn"],a=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],t={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},r=/^(?:(?:https?|mailto|ftp|tel|file):|[^&:/?#]*(?:[/?#]|$))/gi,l=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[a-z0-9+/]+=*$/i;function u(t,e){var i=t.nodeName.toLowerCase();if(-1!==g.inArray(i,e))return-1===g.inArray(i,a)||Boolean(t.nodeValue.match(r)||t.nodeValue.match(l));for(var o=g(e).filter(function(t,e){return e instanceof RegExp}),n=0,s=o.length;n<s;n++)if(i.match(o[n]))return!0;return!1}function n(t,e,i){if(0===t.length)return t;if(i&&"function"==typeof i)return i(t);if(!document.implementation||!document.implementation.createHTMLDocument)return t;var o=document.implementation.createHTMLDocument("sanitization");o.body.innerHTML=t;for(var n=g.map(e,function(t,e){return e}),s=g(o.body).find("*"),a=0,r=s.length;a<r;a++){var l=s[a],h=l.nodeName.toLowerCase();if(-1!==g.inArray(h,n))for(var d=g.map(l.attributes,function(t){return t}),p=[].concat(e["*"]||[],e[h]||[]),c=0,f=d.length;c<f;c++)u(d[c],p)||l.removeAttribute(d[c].nodeName);else l.parentNode.removeChild(l)}return o.body.innerHTML}var m=function(t,e){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",t,e)};m.VERSION="3.4.1",m.TRANSITION_DURATION=150,m.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0},sanitize:!0,sanitizeFn:null,whiteList:t},m.prototype.init=function(t,e,i){if(this.enabled=!0,this.type=t,this.$element=g(e),this.options=this.getOptions(i),this.$viewport=this.options.viewport&&g(document).find(g.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var o=this.options.trigger.split(" "),n=o.length;n--;){var s=o[n];if("click"==s)this.$element.on("click."+this.type,this.options.selector,g.proxy(this.toggle,this));else if("manual"!=s){var a="hover"==s?"mouseenter":"focusin",r="hover"==s?"mouseleave":"focusout";this.$element.on(a+"."+this.type,this.options.selector,g.proxy(this.enter,this)),this.$element.on(r+"."+this.type,this.options.selector,g.proxy(this.leave,this))}}this.options.selector?this._options=g.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},m.prototype.getDefaults=function(){return m.DEFAULTS},m.prototype.getOptions=function(t){var e=this.$element.data();for(var i in e)e.hasOwnProperty(i)&&-1!==g.inArray(i,o)&&delete e[i];return(t=g.extend({},this.getDefaults(),e,t)).delay&&"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),t.sanitize&&(t.template=n(t.template,t.whiteList,t.sanitizeFn)),t},m.prototype.getDelegateOptions=function(){var i={},o=this.getDefaults();return this._options&&g.each(this._options,function(t,e){o[t]!=e&&(i[t]=e)}),i},m.prototype.enter=function(t){var e=t instanceof this.constructor?t:g(t.currentTarget).data("bs."+this.type);if(e||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e)),t instanceof g.Event&&(e.inState["focusin"==t.type?"focus":"hover"]=!0),e.tip().hasClass("in")||"in"==e.hoverState)e.hoverState="in";else{if(clearTimeout(e.timeout),e.hoverState="in",!e.options.delay||!e.options.delay.show)return e.show();e.timeout=setTimeout(function(){"in"==e.hoverState&&e.show()},e.options.delay.show)}},m.prototype.isInStateTrue=function(){for(var t in this.inState)if(this.inState[t])return!0;return!1},m.prototype.leave=function(t){var e=t instanceof this.constructor?t:g(t.currentTarget).data("bs."+this.type);if(e||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e)),t instanceof g.Event&&(e.inState["focusout"==t.type?"focus":"hover"]=!1),!e.isInStateTrue()){if(clearTimeout(e.timeout),e.hoverState="out",!e.options.delay||!e.options.delay.hide)return e.hide();e.timeout=setTimeout(function(){"out"==e.hoverState&&e.hide()},e.options.delay.hide)}},m.prototype.show=function(){var t=g.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(t);var e=g.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(t.isDefaultPrevented()||!e)return;var i=this,o=this.tip(),n=this.getUID(this.type);this.setContent(),o.attr("id",n),this.$element.attr("aria-describedby",n),this.options.animation&&o.addClass("fade");var s="function"==typeof this.options.placement?this.options.placement.call(this,o[0],this.$element[0]):this.options.placement,a=/\s?auto?\s?/i,r=a.test(s);r&&(s=s.replace(a,"")||"top"),o.detach().css({top:0,left:0,display:"block"}).addClass(s).data("bs."+this.type,this),this.options.container?o.appendTo(g(document).find(this.options.container)):o.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var l=this.getPosition(),h=o[0].offsetWidth,d=o[0].offsetHeight;if(r){var p=s,c=this.getPosition(this.$viewport);s="bottom"==s&&l.bottom+d>c.bottom?"top":"top"==s&&l.top-d<c.top?"bottom":"right"==s&&l.right+h>c.width?"left":"left"==s&&l.left-h<c.left?"right":s,o.removeClass(p).addClass(s)}var f=this.getCalculatedOffset(s,l,h,d);this.applyPlacement(f,s);var u=function(){var t=i.hoverState;i.$element.trigger("shown.bs."+i.type),i.hoverState=null,"out"==t&&i.leave(i)};g.support.transition&&this.$tip.hasClass("fade")?o.one("bsTransitionEnd",u).emulateTransitionEnd(m.TRANSITION_DURATION):u()}},m.prototype.applyPlacement=function(t,e){var i=this.tip(),o=i[0].offsetWidth,n=i[0].offsetHeight,s=parseInt(i.css("margin-top"),10),a=parseInt(i.css("margin-left"),10);isNaN(s)&&(s=0),isNaN(a)&&(a=0),t.top+=s,t.left+=a,g.offset.setOffset(i[0],g.extend({using:function(t){i.css({top:Math.round(t.top),left:Math.round(t.left)})}},t),0),i.addClass("in");var r=i[0].offsetWidth,l=i[0].offsetHeight;"top"==e&&l!=n&&(t.top=t.top+n-l);var h=this.getViewportAdjustedDelta(e,t,r,l);h.left?t.left+=h.left:t.top+=h.top;var d=/top|bottom/.test(e),p=d?2*h.left-o+r:2*h.top-n+l,c=d?"offsetWidth":"offsetHeight";i.offset(t),this.replaceArrow(p,i[0][c],d)},m.prototype.replaceArrow=function(t,e,i){this.arrow().css(i?"left":"top",50*(1-t/e)+"%").css(i?"top":"left","")},m.prototype.setContent=function(){var t=this.tip(),e=this.getTitle();this.options.html?(this.options.sanitize&&(e=n(e,this.options.whiteList,this.options.sanitizeFn)),t.find(".tooltip-inner").html(e)):t.find(".tooltip-inner").text(e),t.removeClass("fade in top bottom left right")},m.prototype.hide=function(t){var e=this,i=g(this.$tip),o=g.Event("hide.bs."+this.type);function n(){"in"!=e.hoverState&&i.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),t&&t()}if(this.$element.trigger(o),!o.isDefaultPrevented())return i.removeClass("in"),g.support.transition&&i.hasClass("fade")?i.one("bsTransitionEnd",n).emulateTransitionEnd(m.TRANSITION_DURATION):n(),this.hoverState=null,this},m.prototype.fixTitle=function(){var t=this.$element;(t.attr("title")||"string"!=typeof t.attr("data-original-title"))&&t.attr("data-original-title",t.attr("title")||"").attr("title","")},m.prototype.hasContent=function(){return this.getTitle()},m.prototype.getPosition=function(t){var e=(t=t||this.$element)[0],i="BODY"==e.tagName,o=e.getBoundingClientRect();null==o.width&&(o=g.extend({},o,{width:o.right-o.left,height:o.bottom-o.top}));var n=window.SVGElement&&e instanceof window.SVGElement,s=i?{top:0,left:0}:n?null:t.offset(),a={scroll:i?document.documentElement.scrollTop||document.body.scrollTop:t.scrollTop()},r=i?{width:g(window).width(),height:g(window).height()}:null;return g.extend({},o,a,r,s)},m.prototype.getCalculatedOffset=function(t,e,i,o){return"bottom"==t?{top:e.top+e.height,left:e.left+e.width/2-i/2}:"top"==t?{top:e.top-o,left:e.left+e.width/2-i/2}:"left"==t?{top:e.top+e.height/2-o/2,left:e.left-i}:{top:e.top+e.height/2-o/2,left:e.left+e.width}},m.prototype.getViewportAdjustedDelta=function(t,e,i,o){var n={top:0,left:0};if(!this.$viewport)return n;var s=this.options.viewport&&this.options.viewport.padding||0,a=this.getPosition(this.$viewport);if(/right|left/.test(t)){var r=e.top-s-a.scroll,l=e.top+s-a.scroll+o;r<a.top?n.top=a.top-r:l>a.top+a.height&&(n.top=a.top+a.height-l)}else{var h=e.left-s,d=e.left+s+i;h<a.left?n.left=a.left-h:d>a.right&&(n.left=a.left+a.width-d)}return n},m.prototype.getTitle=function(){var t=this.$element,e=this.options;return t.attr("data-original-title")||("function"==typeof e.title?e.title.call(t[0]):e.title)},m.prototype.getUID=function(t){for(;t+=~~(1e6*Math.random()),document.getElementById(t););return t},m.prototype.tip=function(){if(!this.$tip&&(this.$tip=g(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},m.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},m.prototype.enable=function(){this.enabled=!0},m.prototype.disable=function(){this.enabled=!1},m.prototype.toggleEnabled=function(){this.enabled=!this.enabled},m.prototype.toggle=function(t){var e=this;t&&((e=g(t.currentTarget).data("bs."+this.type))||(e=new this.constructor(t.currentTarget,this.getDelegateOptions()),g(t.currentTarget).data("bs."+this.type,e))),t?(e.inState.click=!e.inState.click,e.isInStateTrue()?e.enter(e):e.leave(e)):e.tip().hasClass("in")?e.leave(e):e.enter(e)},m.prototype.destroy=function(){var t=this;clearTimeout(this.timeout),this.hide(function(){t.$element.off("."+t.type).removeData("bs."+t.type),t.$tip&&t.$tip.detach(),t.$tip=null,t.$arrow=null,t.$viewport=null,t.$element=null})},m.prototype.sanitizeHtml=function(t){return n(t,this.options.whiteList,this.options.sanitizeFn)};var e=g.fn.tooltip;g.fn.tooltip=function i(o){return this.each(function(){var t=g(this),e=t.data("bs.tooltip"),i="object"==typeof o&&o;!e&&/destroy|hide/.test(o)||(e||t.data("bs.tooltip",e=new m(this,i)),"string"==typeof o&&e[o]())})},g.fn.tooltip.Constructor=m,g.fn.tooltip.noConflict=function(){return g.fn.tooltip=e,this}}(jQuery),function(n){"use strict";var s=function(t,e){this.init("popover",t,e)};if(!n.fn.tooltip)throw new Error("Popover requires tooltip.js");s.VERSION="3.4.1",s.DEFAULTS=n.extend({},n.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),((s.prototype=n.extend({},n.fn.tooltip.Constructor.prototype)).constructor=s).prototype.getDefaults=function(){return s.DEFAULTS},s.prototype.setContent=function(){var t=this.tip(),e=this.getTitle(),i=this.getContent();if(this.options.html){var o=typeof i;this.options.sanitize&&(e=this.sanitizeHtml(e),"string"===o&&(i=this.sanitizeHtml(i))),t.find(".popover-title").html(e),t.find(".popover-content").children().detach().end()["string"===o?"html":"append"](i)}else t.find(".popover-title").text(e),t.find(".popover-content").children().detach().end().text(i);t.removeClass("fade top bottom left right in"),t.find(".popover-title").html()||t.find(".popover-title").hide()},s.prototype.hasContent=function(){return this.getTitle()||this.getContent()},s.prototype.getContent=function(){var t=this.$element,e=this.options;return t.attr("data-content")||("function"==typeof e.content?e.content.call(t[0]):e.content)},s.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var t=n.fn.popover;n.fn.popover=function e(o){return this.each(function(){var t=n(this),e=t.data("bs.popover"),i="object"==typeof o&&o;!e&&/destroy|hide/.test(o)||(e||t.data("bs.popover",e=new s(this,i)),"string"==typeof o&&e[o]())})},n.fn.popover.Constructor=s,n.fn.popover.noConflict=function(){return n.fn.popover=t,this}}(jQuery),function(s){"use strict";function n(t,e){this.$body=s(document.body),this.$scrollElement=s(t).is(document.body)?s(window):s(t),this.options=s.extend({},n.DEFAULTS,e),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",s.proxy(this.process,this)),this.refresh(),this.process()}function e(o){return this.each(function(){var t=s(this),e=t.data("bs.scrollspy"),i="object"==typeof o&&o;e||t.data("bs.scrollspy",e=new n(this,i)),"string"==typeof o&&e[o]()})}n.VERSION="3.4.1",n.DEFAULTS={offset:10},n.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},n.prototype.refresh=function(){var t=this,o="offset",n=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),s.isWindow(this.$scrollElement[0])||(o="position",n=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var t=s(this),e=t.data("target")||t.attr("href"),i=/^#./.test(e)&&s(e);return i&&i.length&&i.is(":visible")&&[[i[o]().top+n,e]]||null}).sort(function(t,e){return t[0]-e[0]}).each(function(){t.offsets.push(this[0]),t.targets.push(this[1])})},n.prototype.process=function(){var t,e=this.$scrollElement.scrollTop()+this.options.offset,i=this.getScrollHeight(),o=this.options.offset+i-this.$scrollElement.height(),n=this.offsets,s=this.targets,a=this.activeTarget;if(this.scrollHeight!=i&&this.refresh(),o<=e)return a!=(t=s[s.length-1])&&this.activate(t);if(a&&e<n[0])return this.activeTarget=null,this.clear();for(t=n.length;t--;)a!=s[t]&&e>=n[t]&&(n[t+1]===undefined||e<n[t+1])&&this.activate(s[t])},n.prototype.activate=function(t){this.activeTarget=t,this.clear();var e=this.selector+'[data-target="'+t+'"],'+this.selector+'[href="'+t+'"]',i=s(e).parents("li").addClass("active");i.parent(".dropdown-menu").length&&(i=i.closest("li.dropdown").addClass("active")),i.trigger("activate.bs.scrollspy")},n.prototype.clear=function(){s(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var t=s.fn.scrollspy;s.fn.scrollspy=e,s.fn.scrollspy.Constructor=n,s.fn.scrollspy.noConflict=function(){return s.fn.scrollspy=t,this},s(window).on("load.bs.scrollspy.data-api",function(){s('[data-spy="scroll"]').each(function(){var t=s(this);e.call(t,t.data())})})}(jQuery),function(r){"use strict";var a=function(t){this.element=r(t)};function e(i){return this.each(function(){var t=r(this),e=t.data("bs.tab");e||t.data("bs.tab",e=new a(this)),"string"==typeof i&&e[i]()})}a.VERSION="3.4.1",a.TRANSITION_DURATION=150,a.prototype.show=function(){var t=this.element,e=t.closest("ul:not(.dropdown-menu)"),i=t.data("target");if(i||(i=(i=t.attr("href"))&&i.replace(/.*(?=#[^\s]*$)/,"")),!t.parent("li").hasClass("active")){var o=e.find(".active:last a"),n=r.Event("hide.bs.tab",{relatedTarget:t[0]}),s=r.Event("show.bs.tab",{relatedTarget:o[0]});if(o.trigger(n),t.trigger(s),!s.isDefaultPrevented()&&!n.isDefaultPrevented()){var a=r(document).find(i);this.activate(t.closest("li"),e),this.activate(a,a.parent(),function(){o.trigger({type:"hidden.bs.tab",relatedTarget:t[0]}),t.trigger({type:"shown.bs.tab",relatedTarget:o[0]})})}}},a.prototype.activate=function(t,e,i){var o=e.find("> .active"),n=i&&r.support.transition&&(o.length&&o.hasClass("fade")||!!e.find("> .fade").length);function s(){o.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),t.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),n?(t[0].offsetWidth,t.addClass("in")):t.removeClass("fade"),t.parent(".dropdown-menu").length&&t.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),i&&i()}o.length&&n?o.one("bsTransitionEnd",s).emulateTransitionEnd(a.TRANSITION_DURATION):s(),o.removeClass("in")};var t=r.fn.tab;r.fn.tab=e,r.fn.tab.Constructor=a,r.fn.tab.noConflict=function(){return r.fn.tab=t,this};var i=function(t){t.preventDefault(),e.call(r(this),"show")};r(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',i).on("click.bs.tab.data-api",'[data-toggle="pill"]',i)}(jQuery),function(l){"use strict";var h=function(t,e){this.options=l.extend({},h.DEFAULTS,e);var i=this.options.target===h.DEFAULTS.target?l(this.options.target):l(document).find(this.options.target);this.$target=i.on("scroll.bs.affix.data-api",l.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",l.proxy(this.checkPositionWithEventLoop,this)),this.$element=l(t),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};function i(o){return this.each(function(){var t=l(this),e=t.data("bs.affix"),i="object"==typeof o&&o;e||t.data("bs.affix",e=new h(this,i)),"string"==typeof o&&e[o]()})}h.VERSION="3.4.1",h.RESET="affix affix-top affix-bottom",h.DEFAULTS={offset:0,target:window},h.prototype.getState=function(t,e,i,o){var n=this.$target.scrollTop(),s=this.$element.offset(),a=this.$target.height();if(null!=i&&"top"==this.affixed)return n<i&&"top";if("bottom"==this.affixed)return null!=i?!(n+this.unpin<=s.top)&&"bottom":!(n+a<=t-o)&&"bottom";var r=null==this.affixed,l=r?n:s.top;return null!=i&&n<=i?"top":null!=o&&t-o<=l+(r?a:e)&&"bottom"},h.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(h.RESET).addClass("affix");var t=this.$target.scrollTop(),e=this.$element.offset();return this.pinnedOffset=e.top-t},h.prototype.checkPositionWithEventLoop=function(){setTimeout(l.proxy(this.checkPosition,this),1)},h.prototype.checkPosition=function(){if(this.$element.is(":visible")){var t=this.$element.height(),e=this.options.offset,i=e.top,o=e.bottom,n=Math.max(l(document).height(),l(document.body).height());"object"!=typeof e&&(o=i=e),"function"==typeof i&&(i=e.top(this.$element)),"function"==typeof o&&(o=e.bottom(this.$element));var s=this.getState(n,t,i,o);if(this.affixed!=s){null!=this.unpin&&this.$element.css("top","");var a="affix"+(s?"-"+s:""),r=l.Event(a+".bs.affix");if(this.$element.trigger(r),r.isDefaultPrevented())return;this.affixed=s,this.unpin="bottom"==s?this.getPinnedOffset():null,this.$element.removeClass(h.RESET).addClass(a).trigger(a.replace("affix","affixed")+".bs.affix")}"bottom"==s&&this.$element.offset({top:n-t-o})}};var t=l.fn.affix;l.fn.affix=i,l.fn.affix.Constructor=h,l.fn.affix.noConflict=function(){return l.fn.affix=t,this},l(window).on("load",function(){l('[data-spy="affix"]').each(function(){var t=l(this),e=t.data();e.offset=e.offset||{},null!=e.offsetBottom&&(e.offset.bottom=e.offsetBottom),null!=e.offsetTop&&(e.offset.top=e.offsetTop),i.call(t,e)})})}(jQuery);
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
deleted file mode 100644
index 1664873..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
+++ /dev/null
@@ -1,5 +0,0 @@
-!function(){function n(n){return n&&(n.ownerDocument||n.document||n).documentElement}function t(n){return n&&(n.ownerDocument&&n.ownerDocument.defaultView||n.document&&n||n.defaultView)}function e(n,t){return t>n?-1:n>t?1:n>=t?0:NaN}function r(n){return null===n?NaN:+n}function i(n){return!isNaN(n)}function u(n){return{left:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var u=r+i>>>1;n(t[u],e)<0?r=u+1:i=u}return r},right:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var u=r+i>>>1;n(t[u],e)>0?i=u:r=u+1}return r}}}function o(n){return n.length}function a(n){for(var t=1;n*t%1;)t*=10;return t}function l(n,t){for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}function c(){this._=Object.create(null)}function f(n){return(n+="")===bo||n[0]===_o?_o+n:n}function s(n){return(n+="")[0]===_o?n.slice(1):n}function h(n){return f(n)in this._}function p(n){return(n=f(n))in this._&&delete this._[n]}function g(){var n=[];for(var t in this._)n.push(s(t));return n}function v(){var n=0;for(var t in this._)++n;return n}function d(){for(var n in this._)return!1;return!0}function y(){this._=Object.create(null)}function m(n){return n}function M(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function x(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.slice(1);for(var e=0,r=wo.length;r>e;++e){var i=wo[e]+t;if(i in n)return i}}function b(){}function _(){}function w(n){function t(){for(var t,r=e,i=-1,u=r.length;++i<u;)(t=r[i].on)&&t.apply(this,arguments);return n}var e=[],r=new c;return t.on=function(t,i){var u,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,u=e.indexOf(o)).concat(e.slice(u+1)),r.remove(t)),i&&e.push(r.set(t,{on:i})),n)},t}function S(){ao.event.preventDefault()}function k(){for(var n,t=ao.event;n=t.sourceEvent;)t=n;return t}function N(n){for(var t=new _,e=0,r=arguments.length;++e<r;)t[arguments[e]]=w(t);return t.of=function(e,r){return function(i){try{var u=i.sourceEvent=ao.event;i.target=n,ao.event=i,t[i.type].apply(e,r)}finally{ao.event=u}}},t}function E(n){return ko(n,Co),n}function A(n){return"function"==typeof n?n:function(){return No(n,this)}}function C(n){return"function"==typeof n?n:function(){return Eo(n,this)}}function z(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function i(){this.setAttribute(n,t)}function u(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=ao.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?u:i}function L(n){return n.trim().replace(/\s+/g," ")}function q(n){return new RegExp("(?:^|\\s+)"+ao.requote(n)+"(?:\\s+|$)","g")}function T(n){return(n+"").trim().split(/^|\s+/)}function R(n,t){function e(){for(var e=-1;++e<i;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<i;)n[e](this,r)}n=T(n).map(D);var i=n.length;return"function"==typeof t?r:e}function D(n){var t=q(n);return function(e,r){if(i=e.classList)return r?i.add(n):i.remove(n);var i=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(i)||e.setAttribute("class",L(i+" "+n))):e.setAttribute("class",L(i.replace(t," ")))}}function P(n,t,e){function r(){this.style.removeProperty(n)}function i(){this.style.setProperty(n,t,e)}function u(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?u:i}function U(n,t){function e(){delete this[n]}function r(){this[n]=t}function i(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?i:r}function j(n){function t(){var t=this.ownerDocument,e=this.namespaceURI;return e===zo&&t.documentElement.namespaceURI===zo?t.createElement(n):t.createElementNS(e,n)}function e(){return this.ownerDocument.createElementNS(n.space,n.local)}return"function"==typeof n?n:(n=ao.ns.qualify(n)).local?e:t}function F(){var n=this.parentNode;n&&n.removeChild(this)}function H(n){return{__data__:n}}function O(n){return function(){return Ao(this,n)}}function I(n){return arguments.length||(n=e),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function Y(n,t){for(var e=0,r=n.length;r>e;e++)for(var i,u=n[e],o=0,a=u.length;a>o;o++)(i=u[o])&&t(i,o,e);return n}function Z(n){return ko(n,qo),n}function V(n){var t,e;return function(r,i,u){var o,a=n[u].update,l=a.length;for(u!=e&&(e=u,t=0),i>=t&&(t=i+1);!(o=a[t])&&++t<l;);return o}}function X(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function i(){var i=l(t,co(arguments));r.call(this),this.addEventListener(n,this[o]=i,i.$=e),i._=t}function u(){var t,e=new RegExp("^__on([^.]+)"+ao.requote(n)+"$");for(var r in this)if(t=r.match(e)){var i=this[r];this.removeEventListener(t[1],i,i.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),l=$;a>0&&(n=n.slice(0,a));var c=To.get(n);return c&&(n=c,l=B),a?t?i:r:t?b:u}function $(n,t){return function(e){var r=ao.event;ao.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{ao.event=r}}}function B(n,t){var e=$(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function W(e){var r=".dragsuppress-"+ ++Do,i="click"+r,u=ao.select(t(e)).on("touchmove"+r,S).on("dragstart"+r,S).on("selectstart"+r,S);if(null==Ro&&(Ro="onselectstart"in e?!1:x(e.style,"userSelect")),Ro){var o=n(e).style,a=o[Ro];o[Ro]="none"}return function(n){if(u.on(r,null),Ro&&(o[Ro]=a),n){var t=function(){u.on(i,null)};u.on(i,function(){S(),t()},!0),setTimeout(t,0)}}}function J(n,e){e.changedTouches&&(e=e.changedTouches[0]);var r=n.ownerSVGElement||n;if(r.createSVGPoint){var i=r.createSVGPoint();if(0>Po){var u=t(n);if(u.scrollX||u.scrollY){r=ao.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var o=r[0][0].getScreenCTM();Po=!(o.f||o.e),r.remove()}}return Po?(i.x=e.pageX,i.y=e.pageY):(i.x=e.clientX,i.y=e.clientY),i=i.matrixTransform(n.getScreenCTM().inverse()),[i.x,i.y]}var a=n.getBoundingClientRect();return[e.clientX-a.left-n.clientLeft,e.clientY-a.top-n.clientTop]}function G(){return ao.event.changedTouches[0].identifier}function K(n){return n>0?1:0>n?-1:0}function Q(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function nn(n){return n>1?0:-1>n?Fo:Math.acos(n)}function tn(n){return n>1?Io:-1>n?-Io:Math.asin(n)}function en(n){return((n=Math.exp(n))-1/n)/2}function rn(n){return((n=Math.exp(n))+1/n)/2}function un(n){return((n=Math.exp(2*n))-1)/(n+1)}function on(n){return(n=Math.sin(n/2))*n}function an(){}function ln(n,t,e){return this instanceof ln?(this.h=+n,this.s=+t,void(this.l=+e)):arguments.length<2?n instanceof ln?new ln(n.h,n.s,n.l):_n(""+n,wn,ln):new ln(n,t,e)}function cn(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?u+(o-u)*n/60:180>n?o:240>n?u+(o-u)*(240-n)/60:u}function i(n){return Math.round(255*r(n))}var u,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,u=2*e-o,new mn(i(n+120),i(n),i(n-120))}function fn(n,t,e){return this instanceof fn?(this.h=+n,this.c=+t,void(this.l=+e)):arguments.length<2?n instanceof fn?new fn(n.h,n.c,n.l):n instanceof hn?gn(n.l,n.a,n.b):gn((n=Sn((n=ao.rgb(n)).r,n.g,n.b)).l,n.a,n.b):new fn(n,t,e)}function sn(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),new hn(e,Math.cos(n*=Yo)*t,Math.sin(n)*t)}function hn(n,t,e){return this instanceof hn?(this.l=+n,this.a=+t,void(this.b=+e)):arguments.length<2?n instanceof hn?new hn(n.l,n.a,n.b):n instanceof fn?sn(n.h,n.c,n.l):Sn((n=mn(n)).r,n.g,n.b):new hn(n,t,e)}function pn(n,t,e){var r=(n+16)/116,i=r+t/500,u=r-e/200;return i=vn(i)*na,r=vn(r)*ta,u=vn(u)*ea,new mn(yn(3.2404542*i-1.5371385*r-.4985314*u),yn(-.969266*i+1.8760108*r+.041556*u),yn(.0556434*i-.2040259*r+1.0572252*u))}function gn(n,t,e){return n>0?new fn(Math.atan2(e,t)*Zo,Math.sqrt(t*t+e*e),n):new fn(NaN,NaN,n)}function vn(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function dn(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function yn(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function mn(n,t,e){return this instanceof mn?(this.r=~~n,this.g=~~t,void(this.b=~~e)):arguments.length<2?n instanceof mn?new mn(n.r,n.g,n.b):_n(""+n,mn,cn):new mn(n,t,e)}function Mn(n){return new mn(n>>16,n>>8&255,255&n)}function xn(n){return Mn(n)+""}function bn(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function _n(n,t,e){var r,i,u,o=0,a=0,l=0;if(r=/([a-z]+)\((.*)\)/.exec(n=n.toLowerCase()))switch(i=r[2].split(","),r[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Nn(i[0]),Nn(i[1]),Nn(i[2]))}return(u=ua.get(n))?t(u.r,u.g,u.b):(null==n||"#"!==n.charAt(0)||isNaN(u=parseInt(n.slice(1),16))||(4===n.length?(o=(3840&u)>>4,o=o>>4|o,a=240&u,a=a>>4|a,l=15&u,l=l<<4|l):7===n.length&&(o=(16711680&u)>>16,a=(65280&u)>>8,l=255&u)),t(o,a,l))}function wn(n,t,e){var r,i,u=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-u,l=(o+u)/2;return a?(i=.5>l?a/(o+u):a/(2-o-u),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=NaN,i=l>0&&1>l?0:r),new ln(r,i,l)}function Sn(n,t,e){n=kn(n),t=kn(t),e=kn(e);var r=dn((.4124564*n+.3575761*t+.1804375*e)/na),i=dn((.2126729*n+.7151522*t+.072175*e)/ta),u=dn((.0193339*n+.119192*t+.9503041*e)/ea);return hn(116*i-16,500*(r-i),200*(i-u))}function kn(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Nn(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function En(n){return"function"==typeof n?n:function(){return n}}function An(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),Cn(t,e,n,r)}}function Cn(n,t,e,r){function i(){var n,t=l.status;if(!t&&Ln(l)||t>=200&&300>t||304===t){try{n=e.call(u,l)}catch(r){return void o.error.call(u,r)}o.load.call(u,n)}else o.error.call(u,l)}var u={},o=ao.dispatch("beforesend","progress","load","error"),a={},l=new XMLHttpRequest,c=null;return!this.XDomainRequest||"withCredentials"in l||!/^(http(s)?:)?\/\//.test(n)||(l=new XDomainRequest),"onload"in l?l.onload=l.onerror=i:l.onreadystatechange=function(){l.readyState>3&&i()},l.onprogress=function(n){var t=ao.event;ao.event=n;try{o.progress.call(u,l)}finally{ao.event=t}},u.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",u)},u.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",u):t},u.responseType=function(n){return arguments.length?(c=n,u):c},u.response=function(n){return e=n,u},["get","post"].forEach(function(n){u[n]=function(){return u.send.apply(u,[n].concat(co(arguments)))}}),u.send=function(e,r,i){if(2===arguments.length&&"function"==typeof r&&(i=r,r=null),l.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),l.setRequestHeader)for(var f in a)l.setRequestHeader(f,a[f]);return null!=t&&l.overrideMimeType&&l.overrideMimeType(t),null!=c&&(l.responseType=c),null!=i&&u.on("error",i).on("load",function(n){i(null,n)}),o.beforesend.call(u,l),l.send(null==r?null:r),u},u.abort=function(){return l.abort(),u},ao.rebind(u,o,"on"),null==r?u:u.get(zn(r))}function zn(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Ln(n){var t=n.responseType;return t&&"text"!==t?n.response:n.responseText}function qn(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var i=e+t,u={c:n,t:i,n:null};return aa?aa.n=u:oa=u,aa=u,la||(ca=clearTimeout(ca),la=1,fa(Tn)),u}function Tn(){var n=Rn(),t=Dn()-n;t>24?(isFinite(t)&&(clearTimeout(ca),ca=setTimeout(Tn,t)),la=0):(la=1,fa(Tn))}function Rn(){for(var n=Date.now(),t=oa;t;)n>=t.t&&t.c(n-t.t)&&(t.c=null),t=t.n;return n}function Dn(){for(var n,t=oa,e=1/0;t;)t.c?(t.t<e&&(e=t.t),t=(n=t).n):t=n?n.n=t.n:oa=t.n;return aa=n,e}function Pn(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Un(n,t){var e=Math.pow(10,3*xo(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function jn(n){var t=n.decimal,e=n.thousands,r=n.grouping,i=n.currency,u=r&&e?function(n,t){for(var i=n.length,u=[],o=0,a=r[0],l=0;i>0&&a>0&&(l+a+1>t&&(a=Math.max(1,t-l)),u.push(n.substring(i-=a,i+a)),!((l+=a+1)>t));)a=r[o=(o+1)%r.length];return u.reverse().join(e)}:m;return function(n){var e=ha.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"-",l=e[4]||"",c=e[5],f=+e[6],s=e[7],h=e[8],p=e[9],g=1,v="",d="",y=!1,m=!0;switch(h&&(h=+h.substring(1)),(c||"0"===r&&"="===o)&&(c=r="0",o="="),p){case"n":s=!0,p="g";break;case"%":g=100,d="%",p="f";break;case"p":g=100,d="%",p="r";break;case"b":case"o":case"x":case"X":"#"===l&&(v="0"+p.toLowerCase());case"c":m=!1;case"d":y=!0,h=0;break;case"s":g=-1,p="r"}"$"===l&&(v=i[0],d=i[1]),"r"!=p||h||(p="g"),null!=h&&("g"==p?h=Math.max(1,Math.min(21,h)):"e"!=p&&"f"!=p||(h=Math.max(0,Math.min(20,h)))),p=pa.get(p)||Fn;var M=c&&s;return function(n){var e=d;if(y&&n%1)return"";var i=0>n||0===n&&0>1/n?(n=-n,"-"):"-"===a?"":a;if(0>g){var l=ao.formatPrefix(n,h);n=l.scale(n),e=l.symbol+d}else n*=g;n=p(n,h);var x,b,_=n.lastIndexOf(".");if(0>_){var w=m?n.lastIndexOf("e"):-1;0>w?(x=n,b=""):(x=n.substring(0,w),b=n.substring(w))}else x=n.substring(0,_),b=t+n.substring(_+1);!c&&s&&(x=u(x,1/0));var S=v.length+x.length+b.length+(M?0:i.length),k=f>S?new Array(S=f-S+1).join(r):"";return M&&(x=u(k+x,k.length?f-b.length:1/0)),i+=v,n=x+b,("<"===o?i+n+k:">"===o?k+i+n:"^"===o?k.substring(0,S>>=1)+i+n+k.substring(S):i+(M?n:k+n))+e}}}function Fn(n){return n+""}function Hn(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function On(n,t,e){function r(t){var e=n(t),r=u(e,1);return r-t>t-e?e:r}function i(e){return t(e=n(new va(e-1)),1),e}function u(n,e){return t(n=new va(+n),e),n}function o(n,r,u){var o=i(n),a=[];if(u>1)for(;r>o;)e(o)%u||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{va=Hn;var r=new Hn;return r._=n,o(r,t,e)}finally{va=Date}}n.floor=n,n.round=r,n.ceil=i,n.offset=u,n.range=o;var l=n.utc=In(n);return l.floor=l,l.round=In(r),l.ceil=In(i),l.offset=In(u),l.range=a,n}function In(n){return function(t,e){try{va=Hn;var r=new Hn;return r._=t,n(r,e)._}finally{va=Date}}}function Yn(n){function t(n){function t(t){for(var e,i,u,o=[],a=-1,l=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.slice(l,a)),null!=(i=ya[e=n.charAt(++a)])&&(e=n.charAt(++a)),(u=A[e])&&(e=u(t,null==i?"e"===e?" ":"0":i)),o.push(e),l=a+1);return o.push(n.slice(l,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},i=e(r,n,t,0);if(i!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var u=null!=r.Z&&va!==Hn,o=new(u?Hn:va);return"j"in r?o.setFullYear(r.y,0,r.j):"W"in r||"U"in r?("w"in r||(r.w="W"in r?1:0),o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+(r.Z/100|0),r.M+r.Z%100,r.S,r.L),u?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var i,u,o,a=0,l=t.length,c=e.length;l>a;){if(r>=c)return-1;if(i=t.charCodeAt(a++),37===i){if(o=t.charAt(a++),u=C[o in ya?t.charAt(a++):o],!u||(r=u(n,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){_.lastIndex=0;var r=_.exec(t.slice(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){x.lastIndex=0;var r=x.exec(t.slice(e));return r?(n.w=b.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){N.lastIndex=0;var r=N.exec(t.slice(e));return r?(n.m=E.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.slice(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,A.c.toString(),t,r)}function l(n,t,r){return e(n,A.x.toString(),t,r)}function c(n,t,r){return e(n,A.X.toString(),t,r)}function f(n,t,e){var r=M.get(t.slice(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var s=n.dateTime,h=n.date,p=n.time,g=n.periods,v=n.days,d=n.shortDays,y=n.months,m=n.shortMonths;t.utc=function(n){function e(n){try{va=Hn;var t=new va;return t._=n,r(t)}finally{va=Date}}var r=t(n);return e.parse=function(n){try{va=Hn;var t=r.parse(n);return t&&t._}finally{va=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ct;var M=ao.map(),x=Vn(v),b=Xn(v),_=Vn(d),w=Xn(d),S=Vn(y),k=Xn(y),N=Vn(m),E=Xn(m);g.forEach(function(n,t){M.set(n.toLowerCase(),t)});var A={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return m[n.getMonth()]},B:function(n){return y[n.getMonth()]},c:t(s),d:function(n,t){return Zn(n.getDate(),t,2)},e:function(n,t){return Zn(n.getDate(),t,2)},H:function(n,t){return Zn(n.getHours(),t,2)},I:function(n,t){return Zn(n.getHours()%12||12,t,2)},j:function(n,t){return Zn(1+ga.dayOfYear(n),t,3)},L:function(n,t){return Zn(n.getMilliseconds(),t,3)},m:function(n,t){return Zn(n.getMonth()+1,t,2)},M:function(n,t){return Zn(n.getMinutes(),t,2)},p:function(n){return g[+(n.getHours()>=12)]},S:function(n,t){return Zn(n.getSeconds(),t,2)},U:function(n,t){return Zn(ga.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Zn(ga.mondayOfYear(n),t,2)},x:t(h),X:t(p),y:function(n,t){return Zn(n.getFullYear()%100,t,2)},Y:function(n,t){return Zn(n.getFullYear()%1e4,t,4)},Z:at,"%":function(){return"%"}},C={a:r,A:i,b:u,B:o,c:a,d:tt,e:tt,H:rt,I:rt,j:et,L:ot,m:nt,M:it,p:f,S:ut,U:Bn,w:$n,W:Wn,x:l,X:c,y:Gn,Y:Jn,Z:Kn,"%":lt};return t}function Zn(n,t,e){var r=0>n?"-":"",i=(r?-n:n)+"",u=i.length;return r+(e>u?new Array(e-u+1).join(t)+i:i)}function Vn(n){return new RegExp("^(?:"+n.map(ao.requote).join("|")+")","i")}function Xn(n){for(var t=new c,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function $n(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Bn(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e));return r?(n.U=+r[0],e+r[0].length):-1}function Wn(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e));return r?(n.W=+r[0],e+r[0].length):-1}function Jn(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Gn(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.y=Qn(+r[0]),e+r[0].length):-1}function Kn(n,t,e){return/^[+-]\d{4}$/.test(t=t.slice(e,e+5))?(n.Z=-t,e+5):-1}function Qn(n){return n+(n>68?1900:2e3)}function nt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function tt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function et(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function rt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function it(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function ut(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function ot(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function at(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=xo(t)/60|0,i=xo(t)%60;return e+Zn(r,"0",2)+Zn(i,"0",2)}function lt(n,t,e){Ma.lastIndex=0;var r=Ma.exec(t.slice(e,e+1));return r?e+r[0].length:-1}function ct(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function ft(){}function st(n,t,e){var r=e.s=n+t,i=r-n,u=r-i;e.t=n-u+(t-i)}function ht(n,t){n&&wa.hasOwnProperty(n.type)&&wa[n.type](n,t)}function pt(n,t,e){var r,i=-1,u=n.length-e;for(t.lineStart();++i<u;)r=n[i],t.point(r[0],r[1],r[2]);t.lineEnd()}function gt(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)pt(n[e],t,1);t.polygonEnd()}function vt(){function n(n,t){n*=Yo,t=t*Yo/2+Fo/4;var e=n-r,o=e>=0?1:-1,a=o*e,l=Math.cos(t),c=Math.sin(t),f=u*c,s=i*l+f*Math.cos(a),h=f*o*Math.sin(a);ka.add(Math.atan2(h,s)),r=n,i=l,u=c}var t,e,r,i,u;Na.point=function(o,a){Na.point=n,r=(t=o)*Yo,i=Math.cos(a=(e=a)*Yo/2+Fo/4),u=Math.sin(a)},Na.lineEnd=function(){n(t,e)}}function dt(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function yt(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function mt(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function Mt(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function xt(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function bt(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function _t(n){return[Math.atan2(n[1],n[0]),tn(n[2])]}function wt(n,t){return xo(n[0]-t[0])<Uo&&xo(n[1]-t[1])<Uo}function St(n,t){n*=Yo;var e=Math.cos(t*=Yo);kt(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function kt(n,t,e){++Ea,Ca+=(n-Ca)/Ea,za+=(t-za)/Ea,La+=(e-La)/Ea}function Nt(){function n(n,i){n*=Yo;var u=Math.cos(i*=Yo),o=u*Math.cos(n),a=u*Math.sin(n),l=Math.sin(i),c=Math.atan2(Math.sqrt((c=e*l-r*a)*c+(c=r*o-t*l)*c+(c=t*a-e*o)*c),t*o+e*a+r*l);Aa+=c,qa+=c*(t+(t=o)),Ta+=c*(e+(e=a)),Ra+=c*(r+(r=l)),kt(t,e,r)}var t,e,r;ja.point=function(i,u){i*=Yo;var o=Math.cos(u*=Yo);t=o*Math.cos(i),e=o*Math.sin(i),r=Math.sin(u),ja.point=n,kt(t,e,r)}}function Et(){ja.point=St}function At(){function n(n,t){n*=Yo;var e=Math.cos(t*=Yo),o=e*Math.cos(n),a=e*Math.sin(n),l=Math.sin(t),c=i*l-u*a,f=u*o-r*l,s=r*a-i*o,h=Math.sqrt(c*c+f*f+s*s),p=r*o+i*a+u*l,g=h&&-nn(p)/h,v=Math.atan2(h,p);Da+=g*c,Pa+=g*f,Ua+=g*s,Aa+=v,qa+=v*(r+(r=o)),Ta+=v*(i+(i=a)),Ra+=v*(u+(u=l)),kt(r,i,u)}var t,e,r,i,u;ja.point=function(o,a){t=o,e=a,ja.point=n,o*=Yo;var l=Math.cos(a*=Yo);r=l*Math.cos(o),i=l*Math.sin(o),u=Math.sin(a),kt(r,i,u)},ja.lineEnd=function(){n(t,e),ja.lineEnd=Et,ja.point=St}}function Ct(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function zt(){return!0}function Lt(n,t,e,r,i){var u=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(wt(e,r)){i.lineStart();for(var a=0;t>a;++a)i.point((e=n[a])[0],e[1]);return void i.lineEnd()}var l=new Tt(e,n,null,!0),c=new Tt(e,null,l,!1);l.o=c,u.push(l),o.push(c),l=new Tt(r,n,null,!1),c=new Tt(r,null,l,!0),l.o=c,u.push(l),o.push(c)}}),o.sort(t),qt(u),qt(o),u.length){for(var a=0,l=e,c=o.length;c>a;++a)o[a].e=l=!l;for(var f,s,h=u[0];;){for(var p=h,g=!0;p.v;)if((p=p.n)===h)return;f=p.z,i.lineStart();do{if(p.v=p.o.v=!0,p.e){if(g)for(var a=0,c=f.length;c>a;++a)i.point((s=f[a])[0],s[1]);else r(p.x,p.n.x,1,i);p=p.n}else{if(g){f=p.p.z;for(var a=f.length-1;a>=0;--a)i.point((s=f[a])[0],s[1])}else r(p.x,p.p.x,-1,i);p=p.p}p=p.o,f=p.z,g=!g}while(!p.v);i.lineEnd()}}}function qt(n){if(t=n.length){for(var t,e,r=0,i=n[0];++r<t;)i.n=e=n[r],e.p=i,i=e;i.n=e=n[0],e.p=i}}function Tt(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Rt(n,t,e,r){return function(i,u){function o(t,e){var r=i(t,e);n(t=r[0],e=r[1])&&u.point(t,e)}function a(n,t){var e=i(n,t);d.point(e[0],e[1])}function l(){m.point=a,d.lineStart()}function c(){m.point=o,d.lineEnd()}function f(n,t){v.push([n,t]);var e=i(n,t);x.point(e[0],e[1])}function s(){x.lineStart(),v=[]}function h(){f(v[0][0],v[0][1]),x.lineEnd();var n,t=x.clean(),e=M.buffer(),r=e.length;if(v.pop(),g.push(v),v=null,r)if(1&t){n=e[0];var i,r=n.length-1,o=-1;if(r>0){for(b||(u.polygonStart(),b=!0),u.lineStart();++o<r;)u.point((i=n[o])[0],i[1]);u.lineEnd()}}else r>1&&2&t&&e.push(e.pop().concat(e.shift())),p.push(e.filter(Dt))}var p,g,v,d=t(u),y=i.invert(r[0],r[1]),m={point:o,lineStart:l,lineEnd:c,polygonStart:function(){m.point=f,m.lineStart=s,m.lineEnd=h,p=[],g=[]},polygonEnd:function(){m.point=o,m.lineStart=l,m.lineEnd=c,p=ao.merge(p);var n=Ot(y,g);p.length?(b||(u.polygonStart(),b=!0),Lt(p,Ut,n,e,u)):n&&(b||(u.polygonStart(),b=!0),u.lineStart(),e(null,null,1,u),u.lineEnd()),b&&(u.polygonEnd(),b=!1),p=g=null},sphere:function(){u.polygonStart(),u.lineStart(),e(null,null,1,u),u.lineEnd(),u.polygonEnd()}},M=Pt(),x=t(M),b=!1;return m}}function Dt(n){return n.length>1}function Pt(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:b,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ut(n,t){return((n=n.x)[0]<0?n[1]-Io-Uo:Io-n[1])-((t=t.x)[0]<0?t[1]-Io-Uo:Io-t[1])}function jt(n){var t,e=NaN,r=NaN,i=NaN;return{lineStart:function(){n.lineStart(),t=1},point:function(u,o){var a=u>0?Fo:-Fo,l=xo(u-e);xo(l-Fo)<Uo?(n.point(e,r=(r+o)/2>0?Io:-Io),n.point(i,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(u,r),t=0):i!==a&&l>=Fo&&(xo(e-i)<Uo&&(e-=i*Uo),xo(u-a)<Uo&&(u-=a*Uo),r=Ft(e,r,u,o),n.point(i,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=u,r=o),i=a},lineEnd:function(){n.lineEnd(),e=r=NaN},clean:function(){return 2-t}}}function Ft(n,t,e,r){var i,u,o=Math.sin(n-e);return xo(o)>Uo?Math.atan((Math.sin(t)*(u=Math.cos(r))*Math.sin(e)-Math.sin(r)*(i=Math.cos(t))*Math.sin(n))/(i*u*o)):(t+r)/2}function Ht(n,t,e,r){var i;if(null==n)i=e*Io,r.point(-Fo,i),r.point(0,i),r.point(Fo,i),r.point(Fo,0),r.point(Fo,-i),r.point(0,-i),r.point(-Fo,-i),r.point(-Fo,0),r.point(-Fo,i);else if(xo(n[0]-t[0])>Uo){var u=n[0]<t[0]?Fo:-Fo;i=e*u/2,r.point(-u,i),r.point(0,i),r.point(u,i)}else r.point(t[0],t[1])}function Ot(n,t){var e=n[0],r=n[1],i=[Math.sin(e),-Math.cos(e),0],u=0,o=0;ka.reset();for(var a=0,l=t.length;l>a;++a){var c=t[a],f=c.length;if(f)for(var s=c[0],h=s[0],p=s[1]/2+Fo/4,g=Math.sin(p),v=Math.cos(p),d=1;;){d===f&&(d=0),n=c[d];var y=n[0],m=n[1]/2+Fo/4,M=Math.sin(m),x=Math.cos(m),b=y-h,_=b>=0?1:-1,w=_*b,S=w>Fo,k=g*M;if(ka.add(Math.atan2(k*_*Math.sin(w),v*x+k*Math.cos(w))),u+=S?b+_*Ho:b,S^h>=e^y>=e){var N=mt(dt(s),dt(n));bt(N);var E=mt(i,N);bt(E);var A=(S^b>=0?-1:1)*tn(E[2]);(r>A||r===A&&(N[0]||N[1]))&&(o+=S^b>=0?1:-1)}if(!d++)break;h=y,g=M,v=x,s=n}}return(-Uo>u||Uo>u&&-Uo>ka)^1&o}function It(n){function t(n,t){return Math.cos(n)*Math.cos(t)>u}function e(n){var e,u,l,c,f;return{lineStart:function(){c=l=!1,f=1},point:function(s,h){var p,g=[s,h],v=t(s,h),d=o?v?0:i(s,h):v?i(s+(0>s?Fo:-Fo),h):0;if(!e&&(c=l=v)&&n.lineStart(),v!==l&&(p=r(e,g),(wt(e,p)||wt(g,p))&&(g[0]+=Uo,g[1]+=Uo,v=t(g[0],g[1]))),v!==l)f=0,v?(n.lineStart(),p=r(g,e),n.point(p[0],p[1])):(p=r(e,g),n.point(p[0],p[1]),n.lineEnd()),e=p;else if(a&&e&&o^v){var y;d&u||!(y=r(g,e,!0))||(f=0,o?(n.lineStart(),n.point(y[0][0],y[0][1]),n.point(y[1][0],y[1][1]),n.lineEnd()):(n.point(y[1][0],y[1][1]),n.lineEnd(),n.lineStart(),n.point(y[0][0],y[0][1])))}!v||e&&wt(e,g)||n.point(g[0],g[1]),e=g,l=v,u=d},lineEnd:function(){l&&n.lineEnd(),e=null},clean:function(){return f|(c&&l)<<1}}}function r(n,t,e){var r=dt(n),i=dt(t),o=[1,0,0],a=mt(r,i),l=yt(a,a),c=a[0],f=l-c*c;if(!f)return!e&&n;var s=u*l/f,h=-u*c/f,p=mt(o,a),g=xt(o,s),v=xt(a,h);Mt(g,v);var d=p,y=yt(g,d),m=yt(d,d),M=y*y-m*(yt(g,g)-1);if(!(0>M)){var x=Math.sqrt(M),b=xt(d,(-y-x)/m);if(Mt(b,g),b=_t(b),!e)return b;var _,w=n[0],S=t[0],k=n[1],N=t[1];w>S&&(_=w,w=S,S=_);var E=S-w,A=xo(E-Fo)<Uo,C=A||Uo>E;if(!A&&k>N&&(_=k,k=N,N=_),C?A?k+N>0^b[1]<(xo(b[0]-w)<Uo?k:N):k<=b[1]&&b[1]<=N:E>Fo^(w<=b[0]&&b[0]<=S)){var z=xt(d,(-y+x)/m);return Mt(z,g),[b,_t(z)]}}}function i(t,e){var r=o?n:Fo-n,i=0;return-r>t?i|=1:t>r&&(i|=2),-r>e?i|=4:e>r&&(i|=8),i}var u=Math.cos(n),o=u>0,a=xo(u)>Uo,l=ve(n,6*Yo);return Rt(t,e,l,o?[0,-n]:[-Fo,n-Fo])}function Yt(n,t,e,r){return function(i){var u,o=i.a,a=i.b,l=o.x,c=o.y,f=a.x,s=a.y,h=0,p=1,g=f-l,v=s-c;if(u=n-l,g||!(u>0)){if(u/=g,0>g){if(h>u)return;p>u&&(p=u)}else if(g>0){if(u>p)return;u>h&&(h=u)}if(u=e-l,g||!(0>u)){if(u/=g,0>g){if(u>p)return;u>h&&(h=u)}else if(g>0){if(h>u)return;p>u&&(p=u)}if(u=t-c,v||!(u>0)){if(u/=v,0>v){if(h>u)return;p>u&&(p=u)}else if(v>0){if(u>p)return;u>h&&(h=u)}if(u=r-c,v||!(0>u)){if(u/=v,0>v){if(u>p)return;u>h&&(h=u)}else if(v>0){if(h>u)return;p>u&&(p=u)}return h>0&&(i.a={x:l+h*g,y:c+h*v}),1>p&&(i.b={x:l+p*g,y:c+p*v}),i}}}}}}function Zt(n,t,e,r){function i(r,i){return xo(r[0]-n)<Uo?i>0?0:3:xo(r[0]-e)<Uo?i>0?2:1:xo(r[1]-t)<Uo?i>0?1:0:i>0?3:2}function u(n,t){return o(n.x,t.x)}function o(n,t){var e=i(n,1),r=i(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function l(n){for(var t=0,e=d.length,r=n[1],i=0;e>i;++i)for(var u,o=1,a=d[i],l=a.length,c=a[0];l>o;++o)u=a[o],c[1]<=r?u[1]>r&&Q(c,u,n)>0&&++t:u[1]<=r&&Q(c,u,n)<0&&--t,c=u;return 0!==t}function c(u,a,l,c){var f=0,s=0;if(null==u||(f=i(u,l))!==(s=i(a,l))||o(u,a)<0^l>0){do c.point(0===f||3===f?n:e,f>1?r:t);while((f=(f+l+4)%4)!==s)}else c.point(a[0],a[1])}function f(i,u){return i>=n&&e>=i&&u>=t&&r>=u}function s(n,t){f(n,t)&&a.point(n,t)}function h(){C.point=g,d&&d.push(y=[]),S=!0,w=!1,b=_=NaN}function p(){v&&(g(m,M),x&&w&&E.rejoin(),v.push(E.buffer())),C.point=s,w&&a.lineEnd()}function g(n,t){n=Math.max(-Ha,Math.min(Ha,n)),t=Math.max(-Ha,Math.min(Ha,t));var e=f(n,t);if(d&&y.push([n,t]),S)m=n,M=t,x=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:b,y:_},b:{x:n,y:t}};A(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}b=n,_=t,w=e}var v,d,y,m,M,x,b,_,w,S,k,N=a,E=Pt(),A=Yt(n,t,e,r),C={point:s,lineStart:h,lineEnd:p,polygonStart:function(){a=E,v=[],d=[],k=!0},polygonEnd:function(){a=N,v=ao.merge(v);var t=l([n,r]),e=k&&t,i=v.length;(e||i)&&(a.polygonStart(),e&&(a.lineStart(),c(null,null,1,a),a.lineEnd()),i&&Lt(v,u,t,c,a),a.polygonEnd()),v=d=y=null}};return C}}function Vt(n){var t=0,e=Fo/3,r=ae(n),i=r(t,e);return i.parallels=function(n){return arguments.length?r(t=n[0]*Fo/180,e=n[1]*Fo/180):[t/Fo*180,e/Fo*180]},i}function Xt(n,t){function e(n,t){var e=Math.sqrt(u-2*i*Math.sin(t))/i;return[e*Math.sin(n*=i),o-e*Math.cos(n)]}var r=Math.sin(n),i=(r+Math.sin(t))/2,u=1+r*(2*i-r),o=Math.sqrt(u)/i;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/i,tn((u-(n*n+e*e)*i*i)/(2*i))]},e}function $t(){function n(n,t){Ia+=i*n-r*t,r=n,i=t}var t,e,r,i;$a.point=function(u,o){$a.point=n,t=r=u,e=i=o},$a.lineEnd=function(){n(t,e)}}function Bt(n,t){Ya>n&&(Ya=n),n>Va&&(Va=n),Za>t&&(Za=t),t>Xa&&(Xa=t)}function Wt(){function n(n,t){o.push("M",n,",",t,u)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function i(){o.push("Z")}var u=Jt(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return u=Jt(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Jt(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Gt(n,t){Ca+=n,za+=t,++La}function Kt(){function n(n,r){var i=n-t,u=r-e,o=Math.sqrt(i*i+u*u);qa+=o*(t+n)/2,Ta+=o*(e+r)/2,Ra+=o,Gt(t=n,e=r)}var t,e;Wa.point=function(r,i){Wa.point=n,Gt(t=r,e=i)}}function Qt(){Wa.point=Gt}function ne(){function n(n,t){var e=n-r,u=t-i,o=Math.sqrt(e*e+u*u);qa+=o*(r+n)/2,Ta+=o*(i+t)/2,Ra+=o,o=i*n-r*t,Da+=o*(r+n),Pa+=o*(i+t),Ua+=3*o,Gt(r=n,i=t)}var t,e,r,i;Wa.point=function(u,o){Wa.point=n,Gt(t=r=u,e=i=o)},Wa.lineEnd=function(){n(t,e)}}function te(n){function t(t,e){n.moveTo(t+o,e),n.arc(t,e,o,0,Ho)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function i(){a.point=t}function u(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:i,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=i,a.point=t},pointRadius:function(n){return o=n,a},result:b};return a}function ee(n){function t(n){return(a?r:e)(n)}function e(t){return ue(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){M=NaN,S.point=u,t.lineStart()}function u(e,r){var u=dt([e,r]),o=n(e,r);i(M,x,m,b,_,w,M=o[0],x=o[1],m=e,b=u[0],_=u[1],w=u[2],a,t),t.point(M,x)}function o(){S.point=e,t.lineEnd()}function l(){
-r(),S.point=c,S.lineEnd=f}function c(n,t){u(s=n,h=t),p=M,g=x,v=b,d=_,y=w,S.point=u}function f(){i(M,x,m,b,_,w,p,g,s,v,d,y,a,t),S.lineEnd=o,o()}var s,h,p,g,v,d,y,m,M,x,b,_,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=l},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function i(t,e,r,a,l,c,f,s,h,p,g,v,d,y){var m=f-t,M=s-e,x=m*m+M*M;if(x>4*u&&d--){var b=a+p,_=l+g,w=c+v,S=Math.sqrt(b*b+_*_+w*w),k=Math.asin(w/=S),N=xo(xo(w)-1)<Uo||xo(r-h)<Uo?(r+h)/2:Math.atan2(_,b),E=n(N,k),A=E[0],C=E[1],z=A-t,L=C-e,q=M*z-m*L;(q*q/x>u||xo((m*z+M*L)/x-.5)>.3||o>a*p+l*g+c*v)&&(i(t,e,r,a,l,c,A,C,N,b/=S,_/=S,w,d,y),y.point(A,C),i(A,C,N,b,_,w,f,s,h,p,g,v,d,y))}}var u=.5,o=Math.cos(30*Yo),a=16;return t.precision=function(n){return arguments.length?(a=(u=n*n)>0&&16,t):Math.sqrt(u)},t}function re(n){var t=ee(function(t,e){return n([t*Zo,e*Zo])});return function(n){return le(t(n))}}function ie(n){this.stream=n}function ue(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function oe(n){return ae(function(){return n})()}function ae(n){function t(n){return n=a(n[0]*Yo,n[1]*Yo),[n[0]*h+l,c-n[1]*h]}function e(n){return n=a.invert((n[0]-l)/h,(c-n[1])/h),n&&[n[0]*Zo,n[1]*Zo]}function r(){a=Ct(o=se(y,M,x),u);var n=u(v,d);return l=p-n[0]*h,c=g+n[1]*h,i()}function i(){return f&&(f.valid=!1,f=null),t}var u,o,a,l,c,f,s=ee(function(n,t){return n=u(n,t),[n[0]*h+l,c-n[1]*h]}),h=150,p=480,g=250,v=0,d=0,y=0,M=0,x=0,b=Fa,_=m,w=null,S=null;return t.stream=function(n){return f&&(f.valid=!1),f=le(b(o,s(_(n)))),f.valid=!0,f},t.clipAngle=function(n){return arguments.length?(b=null==n?(w=n,Fa):It((w=+n)*Yo),i()):w},t.clipExtent=function(n){return arguments.length?(S=n,_=n?Zt(n[0][0],n[0][1],n[1][0],n[1][1]):m,i()):S},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(p=+n[0],g=+n[1],r()):[p,g]},t.center=function(n){return arguments.length?(v=n[0]%360*Yo,d=n[1]%360*Yo,r()):[v*Zo,d*Zo]},t.rotate=function(n){return arguments.length?(y=n[0]%360*Yo,M=n[1]%360*Yo,x=n.length>2?n[2]%360*Yo:0,r()):[y*Zo,M*Zo,x*Zo]},ao.rebind(t,s,"precision"),function(){return u=n.apply(this,arguments),t.invert=u.invert&&e,r()}}function le(n){return ue(n,function(t,e){n.point(t*Yo,e*Yo)})}function ce(n,t){return[n,t]}function fe(n,t){return[n>Fo?n-Ho:-Fo>n?n+Ho:n,t]}function se(n,t,e){return n?t||e?Ct(pe(n),ge(t,e)):pe(n):t||e?ge(t,e):fe}function he(n){return function(t,e){return t+=n,[t>Fo?t-Ho:-Fo>t?t+Ho:t,e]}}function pe(n){var t=he(n);return t.invert=he(-n),t}function ge(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,l=Math.sin(n)*e,c=Math.sin(t),f=c*r+a*i;return[Math.atan2(l*u-f*o,a*r-c*i),tn(f*u+l*o)]}var r=Math.cos(n),i=Math.sin(n),u=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,l=Math.sin(n)*e,c=Math.sin(t),f=c*u-l*o;return[Math.atan2(l*u+c*o,a*r+f*i),tn(f*r-a*i)]},e}function ve(n,t){var e=Math.cos(n),r=Math.sin(n);return function(i,u,o,a){var l=o*t;null!=i?(i=de(e,i),u=de(e,u),(o>0?u>i:i>u)&&(i+=o*Ho)):(i=n+o*Ho,u=n-.5*l);for(var c,f=i;o>0?f>u:u>f;f-=l)a.point((c=_t([e,-r*Math.cos(f),-r*Math.sin(f)]))[0],c[1])}}function de(n,t){var e=dt(t);e[0]-=n,bt(e);var r=nn(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Uo)%(2*Math.PI)}function ye(n,t,e){var r=ao.range(n,t-Uo,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function me(n,t,e){var r=ao.range(n,t-Uo,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function Me(n){return n.source}function xe(n){return n.target}function be(n,t,e,r){var i=Math.cos(t),u=Math.sin(t),o=Math.cos(r),a=Math.sin(r),l=i*Math.cos(n),c=i*Math.sin(n),f=o*Math.cos(e),s=o*Math.sin(e),h=2*Math.asin(Math.sqrt(on(r-t)+i*o*on(e-n))),p=1/Math.sin(h),g=h?function(n){var t=Math.sin(n*=h)*p,e=Math.sin(h-n)*p,r=e*l+t*f,i=e*c+t*s,o=e*u+t*a;return[Math.atan2(i,r)*Zo,Math.atan2(o,Math.sqrt(r*r+i*i))*Zo]}:function(){return[n*Zo,t*Zo]};return g.distance=h,g}function _e(){function n(n,i){var u=Math.sin(i*=Yo),o=Math.cos(i),a=xo((n*=Yo)-t),l=Math.cos(a);Ja+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*u-e*o*l)*a),e*u+r*o*l),t=n,e=u,r=o}var t,e,r;Ga.point=function(i,u){t=i*Yo,e=Math.sin(u*=Yo),r=Math.cos(u),Ga.point=n},Ga.lineEnd=function(){Ga.point=Ga.lineEnd=b}}function we(n,t){function e(t,e){var r=Math.cos(t),i=Math.cos(e),u=n(r*i);return[u*i*Math.sin(t),u*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),i=t(r),u=Math.sin(i),o=Math.cos(i);return[Math.atan2(n*u,r*o),Math.asin(r&&e*u/r)]},e}function Se(n,t){function e(n,t){o>0?-Io+Uo>t&&(t=-Io+Uo):t>Io-Uo&&(t=Io-Uo);var e=o/Math.pow(i(t),u);return[e*Math.sin(u*n),o-e*Math.cos(u*n)]}var r=Math.cos(n),i=function(n){return Math.tan(Fo/4+n/2)},u=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(i(t)/i(n)),o=r*Math.pow(i(n),u)/u;return u?(e.invert=function(n,t){var e=o-t,r=K(u)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/u,2*Math.atan(Math.pow(o/r,1/u))-Io]},e):Ne}function ke(n,t){function e(n,t){var e=u-t;return[e*Math.sin(i*n),u-e*Math.cos(i*n)]}var r=Math.cos(n),i=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),u=r/i+n;return xo(i)<Uo?ce:(e.invert=function(n,t){var e=u-t;return[Math.atan2(n,e)/i,u-K(i)*Math.sqrt(n*n+e*e)]},e)}function Ne(n,t){return[n,Math.log(Math.tan(Fo/4+t/2))]}function Ee(n){var t,e=oe(n),r=e.scale,i=e.translate,u=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=i.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=u.apply(e,arguments);if(o===e){if(t=null==n){var a=Fo*r(),l=i();u([[l[0]-a,l[1]-a],[l[0]+a,l[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function Ae(n,t){return[Math.log(Math.tan(Fo/4+t/2)),-n]}function Ce(n){return n[0]}function ze(n){return n[1]}function Le(n){for(var t=n.length,e=[0,1],r=2,i=2;t>i;i++){for(;r>1&&Q(n[e[r-2]],n[e[r-1]],n[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function qe(n,t){return n[0]-t[0]||n[1]-t[1]}function Te(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Re(n,t,e,r){var i=n[0],u=e[0],o=t[0]-i,a=r[0]-u,l=n[1],c=e[1],f=t[1]-l,s=r[1]-c,h=(a*(l-c)-s*(i-u))/(s*o-a*f);return[i+h*o,l+h*f]}function De(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Pe(){rr(this),this.edge=this.site=this.circle=null}function Ue(n){var t=cl.pop()||new Pe;return t.site=n,t}function je(n){Be(n),ol.remove(n),cl.push(n),rr(n)}function Fe(n){var t=n.circle,e=t.x,r=t.cy,i={x:e,y:r},u=n.P,o=n.N,a=[n];je(n);for(var l=u;l.circle&&xo(e-l.circle.x)<Uo&&xo(r-l.circle.cy)<Uo;)u=l.P,a.unshift(l),je(l),l=u;a.unshift(l),Be(l);for(var c=o;c.circle&&xo(e-c.circle.x)<Uo&&xo(r-c.circle.cy)<Uo;)o=c.N,a.push(c),je(c),c=o;a.push(c),Be(c);var f,s=a.length;for(f=1;s>f;++f)c=a[f],l=a[f-1],nr(c.edge,l.site,c.site,i);l=a[0],c=a[s-1],c.edge=Ke(l.site,c.site,null,i),$e(l),$e(c)}function He(n){for(var t,e,r,i,u=n.x,o=n.y,a=ol._;a;)if(r=Oe(a,o)-u,r>Uo)a=a.L;else{if(i=u-Ie(a,o),!(i>Uo)){r>-Uo?(t=a.P,e=a):i>-Uo?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var l=Ue(n);if(ol.insert(t,l),t||e){if(t===e)return Be(t),e=Ue(t.site),ol.insert(l,e),l.edge=e.edge=Ke(t.site,l.site),$e(t),void $e(e);if(!e)return void(l.edge=Ke(t.site,l.site));Be(t),Be(e);var c=t.site,f=c.x,s=c.y,h=n.x-f,p=n.y-s,g=e.site,v=g.x-f,d=g.y-s,y=2*(h*d-p*v),m=h*h+p*p,M=v*v+d*d,x={x:(d*m-p*M)/y+f,y:(h*M-v*m)/y+s};nr(e.edge,c,g,x),l.edge=Ke(c,n,null,x),e.edge=Ke(n,g,null,x),$e(t),$e(e)}}function Oe(n,t){var e=n.site,r=e.x,i=e.y,u=i-t;if(!u)return r;var o=n.P;if(!o)return-(1/0);e=o.site;var a=e.x,l=e.y,c=l-t;if(!c)return a;var f=a-r,s=1/u-1/c,h=f/c;return s?(-h+Math.sqrt(h*h-2*s*(f*f/(-2*c)-l+c/2+i-u/2)))/s+r:(r+a)/2}function Ie(n,t){var e=n.N;if(e)return Oe(e,t);var r=n.site;return r.y===t?r.x:1/0}function Ye(n){this.site=n,this.edges=[]}function Ze(n){for(var t,e,r,i,u,o,a,l,c,f,s=n[0][0],h=n[1][0],p=n[0][1],g=n[1][1],v=ul,d=v.length;d--;)if(u=v[d],u&&u.prepare())for(a=u.edges,l=a.length,o=0;l>o;)f=a[o].end(),r=f.x,i=f.y,c=a[++o%l].start(),t=c.x,e=c.y,(xo(r-t)>Uo||xo(i-e)>Uo)&&(a.splice(o,0,new tr(Qe(u.site,f,xo(r-s)<Uo&&g-i>Uo?{x:s,y:xo(t-s)<Uo?e:g}:xo(i-g)<Uo&&h-r>Uo?{x:xo(e-g)<Uo?t:h,y:g}:xo(r-h)<Uo&&i-p>Uo?{x:h,y:xo(t-h)<Uo?e:p}:xo(i-p)<Uo&&r-s>Uo?{x:xo(e-p)<Uo?t:s,y:p}:null),u.site,null)),++l)}function Ve(n,t){return t.angle-n.angle}function Xe(){rr(this),this.x=this.y=this.arc=this.site=this.cy=null}function $e(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,i=n.site,u=e.site;if(r!==u){var o=i.x,a=i.y,l=r.x-o,c=r.y-a,f=u.x-o,s=u.y-a,h=2*(l*s-c*f);if(!(h>=-jo)){var p=l*l+c*c,g=f*f+s*s,v=(s*p-c*g)/h,d=(l*g-f*p)/h,s=d+a,y=fl.pop()||new Xe;y.arc=n,y.site=i,y.x=v+o,y.y=s+Math.sqrt(v*v+d*d),y.cy=s,n.circle=y;for(var m=null,M=ll._;M;)if(y.y<M.y||y.y===M.y&&y.x<=M.x){if(!M.L){m=M.P;break}M=M.L}else{if(!M.R){m=M;break}M=M.R}ll.insert(m,y),m||(al=y)}}}}function Be(n){var t=n.circle;t&&(t.P||(al=t.N),ll.remove(t),fl.push(t),rr(t),n.circle=null)}function We(n){for(var t,e=il,r=Yt(n[0][0],n[0][1],n[1][0],n[1][1]),i=e.length;i--;)t=e[i],(!Je(t,n)||!r(t)||xo(t.a.x-t.b.x)<Uo&&xo(t.a.y-t.b.y)<Uo)&&(t.a=t.b=null,e.splice(i,1))}function Je(n,t){var e=n.b;if(e)return!0;var r,i,u=n.a,o=t[0][0],a=t[1][0],l=t[0][1],c=t[1][1],f=n.l,s=n.r,h=f.x,p=f.y,g=s.x,v=s.y,d=(h+g)/2,y=(p+v)/2;if(v===p){if(o>d||d>=a)return;if(h>g){if(u){if(u.y>=c)return}else u={x:d,y:l};e={x:d,y:c}}else{if(u){if(u.y<l)return}else u={x:d,y:c};e={x:d,y:l}}}else if(r=(h-g)/(v-p),i=y-r*d,-1>r||r>1)if(h>g){if(u){if(u.y>=c)return}else u={x:(l-i)/r,y:l};e={x:(c-i)/r,y:c}}else{if(u){if(u.y<l)return}else u={x:(c-i)/r,y:c};e={x:(l-i)/r,y:l}}else if(v>p){if(u){if(u.x>=a)return}else u={x:o,y:r*o+i};e={x:a,y:r*a+i}}else{if(u){if(u.x<o)return}else u={x:a,y:r*a+i};e={x:o,y:r*o+i}}return n.a=u,n.b=e,!0}function Ge(n,t){this.l=n,this.r=t,this.a=this.b=null}function Ke(n,t,e,r){var i=new Ge(n,t);return il.push(i),e&&nr(i,n,t,e),r&&nr(i,t,n,r),ul[n.i].edges.push(new tr(i,n,t)),ul[t.i].edges.push(new tr(i,t,n)),i}function Qe(n,t,e){var r=new Ge(n,null);return r.a=t,r.b=e,il.push(r),r}function nr(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function tr(n,t,e){var r=n.a,i=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(i.x-r.x,r.y-i.y):Math.atan2(r.x-i.x,i.y-r.y)}function er(){this._=null}function rr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function ir(n,t){var e=t,r=t.R,i=e.U;i?i.L===e?i.L=r:i.R=r:n._=r,r.U=i,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function ur(n,t){var e=t,r=t.L,i=e.U;i?i.L===e?i.L=r:i.R=r:n._=r,r.U=i,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function or(n){for(;n.L;)n=n.L;return n}function ar(n,t){var e,r,i,u=n.sort(lr).pop();for(il=[],ul=new Array(n.length),ol=new er,ll=new er;;)if(i=al,u&&(!i||u.y<i.y||u.y===i.y&&u.x<i.x))u.x===e&&u.y===r||(ul[u.i]=new Ye(u),He(u),e=u.x,r=u.y),u=n.pop();else{if(!i)break;Fe(i.arc)}t&&(We(t),Ze(t));var o={cells:ul,edges:il};return ol=ll=il=ul=null,o}function lr(n,t){return t.y-n.y||t.x-n.x}function cr(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function fr(n){return n.x}function sr(n){return n.y}function hr(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function pr(n,t,e,r,i,u){if(!n(t,e,r,i,u)){var o=.5*(e+i),a=.5*(r+u),l=t.nodes;l[0]&&pr(n,l[0],e,r,o,a),l[1]&&pr(n,l[1],o,r,i,a),l[2]&&pr(n,l[2],e,a,o,u),l[3]&&pr(n,l[3],o,a,i,u)}}function gr(n,t,e,r,i,u,o){var a,l=1/0;return function c(n,f,s,h,p){if(!(f>u||s>o||r>h||i>p)){if(g=n.point){var g,v=t-n.x,d=e-n.y,y=v*v+d*d;if(l>y){var m=Math.sqrt(l=y);r=t-m,i=e-m,u=t+m,o=e+m,a=g}}for(var M=n.nodes,x=.5*(f+h),b=.5*(s+p),_=t>=x,w=e>=b,S=w<<1|_,k=S+4;k>S;++S)if(n=M[3&S])switch(3&S){case 0:c(n,f,s,x,b);break;case 1:c(n,x,s,h,b);break;case 2:c(n,f,b,x,p);break;case 3:c(n,x,b,h,p)}}}(n,r,i,u,o),a}function vr(n,t){n=ao.rgb(n),t=ao.rgb(t);var e=n.r,r=n.g,i=n.b,u=t.r-e,o=t.g-r,a=t.b-i;return function(n){return"#"+bn(Math.round(e+u*n))+bn(Math.round(r+o*n))+bn(Math.round(i+a*n))}}function dr(n,t){var e,r={},i={};for(e in n)e in t?r[e]=Mr(n[e],t[e]):i[e]=n[e];for(e in t)e in n||(i[e]=t[e]);return function(n){for(e in r)i[e]=r[e](n);return i}}function yr(n,t){return n=+n,t=+t,function(e){return n*(1-e)+t*e}}function mr(n,t){var e,r,i,u=hl.lastIndex=pl.lastIndex=0,o=-1,a=[],l=[];for(n+="",t+="";(e=hl.exec(n))&&(r=pl.exec(t));)(i=r.index)>u&&(i=t.slice(u,i),a[o]?a[o]+=i:a[++o]=i),(e=e[0])===(r=r[0])?a[o]?a[o]+=r:a[++o]=r:(a[++o]=null,l.push({i:o,x:yr(e,r)})),u=pl.lastIndex;return u<t.length&&(i=t.slice(u),a[o]?a[o]+=i:a[++o]=i),a.length<2?l[0]?(t=l[0].x,function(n){return t(n)+""}):function(){return t}:(t=l.length,function(n){for(var e,r=0;t>r;++r)a[(e=l[r]).i]=e.x(n);return a.join("")})}function Mr(n,t){for(var e,r=ao.interpolators.length;--r>=0&&!(e=ao.interpolators[r](n,t)););return e}function xr(n,t){var e,r=[],i=[],u=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(Mr(n[e],t[e]));for(;u>e;++e)i[e]=n[e];for(;o>e;++e)i[e]=t[e];return function(n){for(e=0;a>e;++e)i[e]=r[e](n);return i}}function br(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function _r(n){return function(t){return 1-n(1-t)}}function wr(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function Sr(n){return n*n}function kr(n){return n*n*n}function Nr(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Er(n){return function(t){return Math.pow(t,n)}}function Ar(n){return 1-Math.cos(n*Io)}function Cr(n){return Math.pow(2,10*(n-1))}function zr(n){return 1-Math.sqrt(1-n*n)}function Lr(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/Ho*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*Ho/t)}}function qr(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function Tr(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Rr(n,t){n=ao.hcl(n),t=ao.hcl(t);var e=n.h,r=n.c,i=n.l,u=t.h-e,o=t.c-r,a=t.l-i;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(u)?(u=0,e=isNaN(e)?t.h:e):u>180?u-=360:-180>u&&(u+=360),function(n){return sn(e+u*n,r+o*n,i+a*n)+""}}function Dr(n,t){n=ao.hsl(n),t=ao.hsl(t);var e=n.h,r=n.s,i=n.l,u=t.h-e,o=t.s-r,a=t.l-i;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(u)?(u=0,e=isNaN(e)?t.h:e):u>180?u-=360:-180>u&&(u+=360),function(n){return cn(e+u*n,r+o*n,i+a*n)+""}}function Pr(n,t){n=ao.lab(n),t=ao.lab(t);var e=n.l,r=n.a,i=n.b,u=t.l-e,o=t.a-r,a=t.b-i;return function(n){return pn(e+u*n,r+o*n,i+a*n)+""}}function Ur(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function jr(n){var t=[n.a,n.b],e=[n.c,n.d],r=Hr(t),i=Fr(t,e),u=Hr(Or(e,t,-i))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,i*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*Zo,this.translate=[n.e,n.f],this.scale=[r,u],this.skew=u?Math.atan2(i,u)*Zo:0}function Fr(n,t){return n[0]*t[0]+n[1]*t[1]}function Hr(n){var t=Math.sqrt(Fr(n,n));return t&&(n[0]/=t,n[1]/=t),t}function Or(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ir(n){return n.length?n.pop()+",":""}function Yr(n,t,e,r){if(n[0]!==t[0]||n[1]!==t[1]){var i=e.push("translate(",null,",",null,")");r.push({i:i-4,x:yr(n[0],t[0])},{i:i-2,x:yr(n[1],t[1])})}else(t[0]||t[1])&&e.push("translate("+t+")")}function Zr(n,t,e,r){n!==t?(n-t>180?t+=360:t-n>180&&(n+=360),r.push({i:e.push(Ir(e)+"rotate(",null,")")-2,x:yr(n,t)})):t&&e.push(Ir(e)+"rotate("+t+")")}function Vr(n,t,e,r){n!==t?r.push({i:e.push(Ir(e)+"skewX(",null,")")-2,x:yr(n,t)}):t&&e.push(Ir(e)+"skewX("+t+")")}function Xr(n,t,e,r){if(n[0]!==t[0]||n[1]!==t[1]){var i=e.push(Ir(e)+"scale(",null,",",null,")");r.push({i:i-4,x:yr(n[0],t[0])},{i:i-2,x:yr(n[1],t[1])})}else 1===t[0]&&1===t[1]||e.push(Ir(e)+"scale("+t+")")}function $r(n,t){var e=[],r=[];return n=ao.transform(n),t=ao.transform(t),Yr(n.translate,t.translate,e,r),Zr(n.rotate,t.rotate,e,r),Vr(n.skew,t.skew,e,r),Xr(n.scale,t.scale,e,r),n=t=null,function(n){for(var t,i=-1,u=r.length;++i<u;)e[(t=r[i]).i]=t.x(n);return e.join("")}}function Br(n,t){return t=(t-=n=+n)||1/t,function(e){return(e-n)/t}}function Wr(n,t){return t=(t-=n=+n)||1/t,function(e){return Math.max(0,Math.min(1,(e-n)/t))}}function Jr(n){for(var t=n.source,e=n.target,r=Kr(t,e),i=[t];t!==r;)t=t.parent,i.push(t);for(var u=i.length;e!==r;)i.splice(u,0,e),e=e.parent;return i}function Gr(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Kr(n,t){if(n===t)return n;for(var e=Gr(n),r=Gr(t),i=e.pop(),u=r.pop(),o=null;i===u;)o=i,i=e.pop(),u=r.pop();return o}function Qr(n){n.fixed|=2}function ni(n){n.fixed&=-7}function ti(n){n.fixed|=4,n.px=n.x,n.py=n.y}function ei(n){n.fixed&=-5}function ri(n,t,e){var r=0,i=0;if(n.charge=0,!n.leaf)for(var u,o=n.nodes,a=o.length,l=-1;++l<a;)u=o[l],null!=u&&(ri(u,t,e),n.charge+=u.charge,r+=u.charge*u.cx,i+=u.charge*u.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var c=t*e[n.point.index];n.charge+=n.pointCharge=c,r+=c*n.point.x,i+=c*n.point.y}n.cx=r/n.charge,n.cy=i/n.charge}function ii(n,t){return ao.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=fi,n}function ui(n,t){for(var e=[n];null!=(n=e.pop());)if(t(n),(i=n.children)&&(r=i.length))for(var r,i;--r>=0;)e.push(i[r])}function oi(n,t){for(var e=[n],r=[];null!=(n=e.pop());)if(r.push(n),(u=n.children)&&(i=u.length))for(var i,u,o=-1;++o<i;)e.push(u[o]);for(;null!=(n=r.pop());)t(n)}function ai(n){return n.children}function li(n){return n.value}function ci(n,t){return t.value-n.value}function fi(n){return ao.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function si(n){return n.x}function hi(n){return n.y}function pi(n,t,e){n.y0=t,n.y=e}function gi(n){return ao.range(n.length)}function vi(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function di(n){for(var t,e=1,r=0,i=n[0][1],u=n.length;u>e;++e)(t=n[e][1])>i&&(r=e,i=t);return r}function yi(n){return n.reduce(mi,0)}function mi(n,t){return n+t[1]}function Mi(n,t){return xi(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function xi(n,t){for(var e=-1,r=+n[0],i=(n[1]-r)/t,u=[];++e<=t;)u[e]=i*e+r;return u}function bi(n){return[ao.min(n),ao.max(n)]}function _i(n,t){return n.value-t.value}function wi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Si(n,t){n._pack_next=t,t._pack_prev=n}function ki(n,t){var e=t.x-n.x,r=t.y-n.y,i=n.r+t.r;return.999*i*i>e*e+r*r}function Ni(n){function t(n){f=Math.min(n.x-n.r,f),s=Math.max(n.x+n.r,s),h=Math.min(n.y-n.r,h),p=Math.max(n.y+n.r,p)}if((e=n.children)&&(c=e.length)){var e,r,i,u,o,a,l,c,f=1/0,s=-(1/0),h=1/0,p=-(1/0);if(e.forEach(Ei),r=e[0],r.x=-r.r,r.y=0,t(r),c>1&&(i=e[1],i.x=i.r,i.y=0,t(i),c>2))for(u=e[2],zi(r,i,u),t(u),wi(r,u),r._pack_prev=u,wi(u,i),i=r._pack_next,o=3;c>o;o++){zi(r,i,u=e[o]);var g=0,v=1,d=1;for(a=i._pack_next;a!==i;a=a._pack_next,v++)if(ki(a,u)){g=1;break}if(1==g)for(l=r._pack_prev;l!==a._pack_prev&&!ki(l,u);l=l._pack_prev,d++);g?(d>v||v==d&&i.r<r.r?Si(r,i=a):Si(r=l,i),o--):(wi(r,u),i=u,t(u))}var y=(f+s)/2,m=(h+p)/2,M=0;for(o=0;c>o;o++)u=e[o],u.x-=y,u.y-=m,M=Math.max(M,u.r+Math.sqrt(u.x*u.x+u.y*u.y));n.r=M,e.forEach(Ai)}}function Ei(n){n._pack_next=n._pack_prev=n}function Ai(n){delete n._pack_next,delete n._pack_prev}function Ci(n,t,e,r){var i=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,i)for(var u=-1,o=i.length;++u<o;)Ci(i[u],t,e,r)}function zi(n,t,e){var r=n.r+e.r,i=t.x-n.x,u=t.y-n.y;if(r&&(i||u)){var o=t.r+e.r,a=i*i+u*u;o*=o,r*=r;var l=.5+(r-o)/(2*a),c=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+l*i+c*u,e.y=n.y+l*u-c*i}else e.x=n.x+r,e.y=n.y}function Li(n,t){return n.parent==t.parent?1:2}function qi(n){var t=n.children;return t.length?t[0]:n.t}function Ti(n){var t,e=n.children;return(t=e.length)?e[t-1]:n.t}function Ri(n,t,e){var r=e/(t.i-n.i);t.c-=r,t.s+=e,n.c+=r,t.z+=e,t.m+=e}function Di(n){for(var t,e=0,r=0,i=n.children,u=i.length;--u>=0;)t=i[u],t.z+=e,t.m+=e,e+=t.s+(r+=t.c)}function Pi(n,t,e){return n.a.parent===t.parent?n.a:e}function Ui(n){return 1+ao.max(n,function(n){return n.y})}function ji(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Fi(n){var t=n.children;return t&&t.length?Fi(t[0]):n}function Hi(n){var t,e=n.children;return e&&(t=e.length)?Hi(e[t-1]):n}function Oi(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function Ii(n,t){var e=n.x+t[3],r=n.y+t[0],i=n.dx-t[1]-t[3],u=n.dy-t[0]-t[2];return 0>i&&(e+=i/2,i=0),0>u&&(r+=u/2,u=0),{x:e,y:r,dx:i,dy:u}}function Yi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Zi(n){return n.rangeExtent?n.rangeExtent():Yi(n.range())}function Vi(n,t,e,r){var i=e(n[0],n[1]),u=r(t[0],t[1]);return function(n){return u(i(n))}}function Xi(n,t){var e,r=0,i=n.length-1,u=n[r],o=n[i];return u>o&&(e=r,r=i,i=e,e=u,u=o,o=e),n[r]=t.floor(u),n[i]=t.ceil(o),n}function $i(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:Sl}function Bi(n,t,e,r){var i=[],u=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)i.push(e(n[o-1],n[o])),u.push(r(t[o-1],t[o]));return function(t){var e=ao.bisect(n,t,1,a)-1;return u[e](i[e](t))}}function Wi(n,t,e,r){function i(){var i=Math.min(n.length,t.length)>2?Bi:Vi,l=r?Wr:Br;return o=i(n,t,l,e),a=i(t,n,l,Mr),u}function u(n){return o(n)}var o,a;return u.invert=function(n){return a(n)},u.domain=function(t){return arguments.length?(n=t.map(Number),i()):n},u.range=function(n){return arguments.length?(t=n,i()):t},u.rangeRound=function(n){return u.range(n).interpolate(Ur)},u.clamp=function(n){return arguments.length?(r=n,i()):r},u.interpolate=function(n){return arguments.length?(e=n,i()):e},u.ticks=function(t){return Qi(n,t)},u.tickFormat=function(t,e){return nu(n,t,e)},u.nice=function(t){return Gi(n,t),i()},u.copy=function(){return Wi(n,t,e,r)},i()}function Ji(n,t){return ao.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Gi(n,t){return Xi(n,$i(Ki(n,t)[2])),Xi(n,$i(Ki(n,t)[2])),n}function Ki(n,t){null==t&&(t=10);var e=Yi(n),r=e[1]-e[0],i=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),u=t/r*i;return.15>=u?i*=10:.35>=u?i*=5:.75>=u&&(i*=2),e[0]=Math.ceil(e[0]/i)*i,e[1]=Math.floor(e[1]/i)*i+.5*i,e[2]=i,e}function Qi(n,t){return ao.range.apply(ao,Ki(n,t))}function nu(n,t,e){var r=Ki(n,t);if(e){var i=ha.exec(e);if(i.shift(),"s"===i[8]){var u=ao.formatPrefix(Math.max(xo(r[0]),xo(r[1])));return i[7]||(i[7]="."+tu(u.scale(r[2]))),i[8]="f",e=ao.format(i.join("")),function(n){return e(u.scale(n))+u.symbol}}i[7]||(i[7]="."+eu(i[8],r)),e=i.join("")}else e=",."+tu(r[2])+"f";return ao.format(e)}function tu(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function eu(n,t){var e=tu(t[2]);return n in kl?Math.abs(e-tu(Math.max(xo(t[0]),xo(t[1]))))+ +("e"!==n):e-2*("%"===n)}function ru(n,t,e,r){function i(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function u(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(i(t))}return o.invert=function(t){return u(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(i)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(i)),o):t},o.nice=function(){var t=Xi(r.map(i),e?Math:El);return n.domain(t),r=t.map(u),o},o.ticks=function(){var n=Yi(r),o=[],a=n[0],l=n[1],c=Math.floor(i(a)),f=Math.ceil(i(l)),s=t%1?2:t;if(isFinite(f-c)){if(e){for(;f>c;c++)for(var h=1;s>h;h++)o.push(u(c)*h);o.push(u(c))}else for(o.push(u(c));c++<f;)for(var h=s-1;h>0;h--)o.push(u(c)*h);for(c=0;o[c]<a;c++);for(f=o.length;o[f-1]>l;f--);o=o.slice(c,f)}return o},o.tickFormat=function(n,e){if(!arguments.length)return Nl;arguments.length<2?e=Nl:"function"!=typeof e&&(e=ao.format(e));var r=Math.max(1,t*n/o.ticks().length);return function(n){var o=n/u(Math.round(i(n)));return t-.5>o*t&&(o*=t),r>=o?e(n):""}},o.copy=function(){return ru(n.copy(),t,e,r)},Ji(o,n)}function iu(n,t,e){function r(t){return n(i(t))}var i=uu(t),u=uu(1/t);return r.invert=function(t){return u(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(i)),r):e},r.ticks=function(n){return Qi(e,n)},r.tickFormat=function(n,t){return nu(e,n,t)},r.nice=function(n){return r.domain(Gi(e,n))},r.exponent=function(o){return arguments.length?(i=uu(t=o),u=uu(1/t),n.domain(e.map(i)),r):t},r.copy=function(){return iu(n.copy(),t,e)},Ji(r,n)}function uu(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function ou(n,t){function e(e){return u[((i.get(e)||("range"===t.t?i.set(e,n.push(e)):NaN))-1)%u.length]}function r(t,e){return ao.range(n.length).map(function(n){return t+e*n})}var i,u,o;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new c;for(var u,o=-1,a=r.length;++o<a;)i.has(u=r[o])||i.set(u,n.push(u));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(u=n,o=0,t={t:"range",a:arguments},e):u},e.rangePoints=function(i,a){arguments.length<2&&(a=0);var l=i[0],c=i[1],f=n.length<2?(l=(l+c)/2,0):(c-l)/(n.length-1+a);return u=r(l+f*a/2,f),o=0,t={t:"rangePoints",a:arguments},e},e.rangeRoundPoints=function(i,a){arguments.length<2&&(a=0);var l=i[0],c=i[1],f=n.length<2?(l=c=Math.round((l+c)/2),0):(c-l)/(n.length-1+a)|0;return u=r(l+Math.round(f*a/2+(c-l-(n.length-1+a)*f)/2),f),o=0,t={t:"rangeRoundPoints",a:arguments},e},e.rangeBands=function(i,a,l){arguments.length<2&&(a=0),arguments.length<3&&(l=a);var c=i[1]<i[0],f=i[c-0],s=i[1-c],h=(s-f)/(n.length-a+2*l);return u=r(f+h*l,h),c&&u.reverse(),o=h*(1-a),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(i,a,l){arguments.length<2&&(a=0),arguments.length<3&&(l=a);var c=i[1]<i[0],f=i[c-0],s=i[1-c],h=Math.floor((s-f)/(n.length-a+2*l));return u=r(f+Math.round((s-f-(n.length-a)*h)/2),h),c&&u.reverse(),o=Math.round(h*(1-a)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return o},e.rangeExtent=function(){return Yi(t.a[0])},e.copy=function(){return ou(n,t)},e.domain(n)}function au(n,t){function u(){var e=0,r=t.length;for(a=[];++e<r;)a[e-1]=ao.quantile(n,e/r);return o}function o(n){return isNaN(n=+n)?void 0:t[ao.bisect(a,n)]}var a;return o.domain=function(t){return arguments.length?(n=t.map(r).filter(i).sort(e),u()):n},o.range=function(n){return arguments.length?(t=n,u()):t},o.quantiles=function(){return a},o.invertExtent=function(e){return e=t.indexOf(e),0>e?[NaN,NaN]:[e>0?a[e-1]:n[0],e<a.length?a[e]:n[n.length-1]]},o.copy=function(){return au(n,t)},u()}function lu(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(u*(t-n))))]}function i(){return u=e.length/(t-n),o=e.length-1,r}var u,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],i()):[n,t]},r.range=function(n){return arguments.length?(e=n,i()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?NaN:t/u+n,[t,t+1/u]},r.copy=function(){return lu(n,t,e)},i()}function cu(n,t){function e(e){return e>=e?t[ao.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return cu(n,t)},e}function fu(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Qi(n,t)},t.tickFormat=function(t,e){return nu(n,t,e)},t.copy=function(){return fu(n)},t}function su(){return 0}function hu(n){return n.innerRadius}function pu(n){return n.outerRadius}function gu(n){return n.startAngle}function vu(n){return n.endAngle}function du(n){return n&&n.padAngle}function yu(n,t,e,r){return(n-e)*t-(t-r)*n>0?0:1}function mu(n,t,e,r,i){var u=n[0]-t[0],o=n[1]-t[1],a=(i?r:-r)/Math.sqrt(u*u+o*o),l=a*o,c=-a*u,f=n[0]+l,s=n[1]+c,h=t[0]+l,p=t[1]+c,g=(f+h)/2,v=(s+p)/2,d=h-f,y=p-s,m=d*d+y*y,M=e-r,x=f*p-h*s,b=(0>y?-1:1)*Math.sqrt(Math.max(0,M*M*m-x*x)),_=(x*y-d*b)/m,w=(-x*d-y*b)/m,S=(x*y+d*b)/m,k=(-x*d+y*b)/m,N=_-g,E=w-v,A=S-g,C=k-v;return N*N+E*E>A*A+C*C&&(_=S,w=k),[[_-l,w-c],[_*e/M,w*e/M]]}function Mu(n){function t(t){function o(){c.push("M",u(n(f),a))}for(var l,c=[],f=[],s=-1,h=t.length,p=En(e),g=En(r);++s<h;)i.call(this,l=t[s],s)?f.push([+p.call(this,l,s),+g.call(this,l,s)]):f.length&&(o(),f=[]);return f.length&&o(),c.length?c.join(""):null}var e=Ce,r=ze,i=zt,u=xu,o=u.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(i=n,t):i},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?u=n:(u=Tl.get(n)||xu).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function xu(n){return n.length>1?n.join("L"):n+"Z"}function bu(n){return n.join("L")+"Z"}function _u(n){for(var t=0,e=n.length,r=n[0],i=[r[0],",",r[1]];++t<e;)i.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&i.push("H",r[0]),i.join("")}function wu(n){for(var t=0,e=n.length,r=n[0],i=[r[0],",",r[1]];++t<e;)i.push("V",(r=n[t])[1],"H",r[0]);return i.join("")}function Su(n){for(var t=0,e=n.length,r=n[0],i=[r[0],",",r[1]];++t<e;)i.push("H",(r=n[t])[0],"V",r[1]);return i.join("")}function ku(n,t){return n.length<4?xu(n):n[1]+Au(n.slice(1,-1),Cu(n,t))}function Nu(n,t){return n.length<3?bu(n):n[0]+Au((n.push(n[0]),n),Cu([n[n.length-2]].concat(n,[n[1]]),t))}function Eu(n,t){return n.length<3?xu(n):n[0]+Au(n,Cu(n,t))}function Au(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return xu(n);var e=n.length!=t.length,r="",i=n[0],u=n[1],o=t[0],a=o,l=1;if(e&&(r+="Q"+(u[0]-2*o[0]/3)+","+(u[1]-2*o[1]/3)+","+u[0]+","+u[1],i=n[1],l=2),t.length>1){a=t[1],u=n[l],l++,r+="C"+(i[0]+o[0])+","+(i[1]+o[1])+","+(u[0]-a[0])+","+(u[1]-a[1])+","+u[0]+","+u[1];for(var c=2;c<t.length;c++,l++)u=n[l],a=t[c],r+="S"+(u[0]-a[0])+","+(u[1]-a[1])+","+u[0]+","+u[1]}if(e){var f=n[l];r+="Q"+(u[0]+2*a[0]/3)+","+(u[1]+2*a[1]/3)+","+f[0]+","+f[1]}return r}function Cu(n,t){for(var e,r=[],i=(1-t)/2,u=n[0],o=n[1],a=1,l=n.length;++a<l;)e=u,u=o,o=n[a],r.push([i*(o[0]-e[0]),i*(o[1]-e[1])]);return r}function zu(n){if(n.length<3)return xu(n);var t=1,e=n.length,r=n[0],i=r[0],u=r[1],o=[i,i,i,(r=n[1])[0]],a=[u,u,u,r[1]],l=[i,",",u,"L",Ru(Pl,o),",",Ru(Pl,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),Du(l,o,a);return n.pop(),l.push("L",r),l.join("")}function Lu(n){if(n.length<4)return xu(n);for(var t,e=[],r=-1,i=n.length,u=[0],o=[0];++r<3;)t=n[r],u.push(t[0]),o.push(t[1]);for(e.push(Ru(Pl,u)+","+Ru(Pl,o)),--r;++r<i;)t=n[r],u.shift(),u.push(t[0]),o.shift(),o.push(t[1]),Du(e,u,o);return e.join("")}function qu(n){for(var t,e,r=-1,i=n.length,u=i+4,o=[],a=[];++r<4;)e=n[r%i],o.push(e[0]),a.push(e[1]);for(t=[Ru(Pl,o),",",Ru(Pl,a)],--r;++r<u;)e=n[r%i],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),Du(t,o,a);return t.join("")}function Tu(n,t){var e=n.length-1;if(e)for(var r,i,u=n[0][0],o=n[0][1],a=n[e][0]-u,l=n[e][1]-o,c=-1;++c<=e;)r=n[c],i=c/e,r[0]=t*r[0]+(1-t)*(u+i*a),r[1]=t*r[1]+(1-t)*(o+i*l);return zu(n)}function Ru(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function Du(n,t,e){n.push("C",Ru(Rl,t),",",Ru(Rl,e),",",Ru(Dl,t),",",Ru(Dl,e),",",Ru(Pl,t),",",Ru(Pl,e))}function Pu(n,t){return(t[1]-n[1])/(t[0]-n[0])}function Uu(n){for(var t=0,e=n.length-1,r=[],i=n[0],u=n[1],o=r[0]=Pu(i,u);++t<e;)r[t]=(o+(o=Pu(i=u,u=n[t+1])))/2;return r[t]=o,r}function ju(n){for(var t,e,r,i,u=[],o=Uu(n),a=-1,l=n.length-1;++a<l;)t=Pu(n[a],n[a+1]),xo(t)<Uo?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,i=e*e+r*r,i>9&&(i=3*t/Math.sqrt(i),o[a]=i*e,o[a+1]=i*r));for(a=-1;++a<=l;)i=(n[Math.min(l,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),u.push([i||0,o[a]*i||0]);return u}function Fu(n){return n.length<3?xu(n):n[0]+Au(n,ju(n))}function Hu(n){for(var t,e,r,i=-1,u=n.length;++i<u;)t=n[i],e=t[0],r=t[1]-Io,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Ou(n){function t(t){function l(){v.push("M",a(n(y),s),f,c(n(d.reverse()),s),"Z")}for(var h,p,g,v=[],d=[],y=[],m=-1,M=t.length,x=En(e),b=En(i),_=e===r?function(){
-return p}:En(r),w=i===u?function(){return g}:En(u);++m<M;)o.call(this,h=t[m],m)?(d.push([p=+x.call(this,h,m),g=+b.call(this,h,m)]),y.push([+_.call(this,h,m),+w.call(this,h,m)])):d.length&&(l(),d=[],y=[]);return d.length&&l(),v.length?v.join(""):null}var e=Ce,r=Ce,i=0,u=ze,o=zt,a=xu,l=a.key,c=a,f="L",s=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(i=u=n,t):u},t.y0=function(n){return arguments.length?(i=n,t):i},t.y1=function(n){return arguments.length?(u=n,t):u},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(l="function"==typeof n?a=n:(a=Tl.get(n)||xu).key,c=a.reverse||a,f=a.closed?"M":"L",t):l},t.tension=function(n){return arguments.length?(s=n,t):s},t}function Iu(n){return n.radius}function Yu(n){return[n.x,n.y]}function Zu(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]-Io;return[e*Math.cos(r),e*Math.sin(r)]}}function Vu(){return 64}function Xu(){return"circle"}function $u(n){var t=Math.sqrt(n/Fo);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Bu(n){return function(){var t,e,r;(t=this[n])&&(r=t[e=t.active])&&(r.timer.c=null,r.timer.t=NaN,--t.count?delete t[e]:delete this[n],t.active+=.5,r.event&&r.event.interrupt.call(this,this.__data__,r.index))}}function Wu(n,t,e){return ko(n,Yl),n.namespace=t,n.id=e,n}function Ju(n,t,e,r){var i=n.id,u=n.namespace;return Y(n,"function"==typeof e?function(n,o,a){n[u][i].tween.set(t,r(e.call(n,n.__data__,o,a)))}:(e=r(e),function(n){n[u][i].tween.set(t,e)}))}function Gu(n){return null==n&&(n=""),function(){this.textContent=n}}function Ku(n){return null==n?"__transition__":"__transition_"+n+"__"}function Qu(n,t,e,r,i){function u(n){var t=v.delay;return f.t=t+l,n>=t?o(n-t):void(f.c=o)}function o(e){var i=g.active,u=g[i];u&&(u.timer.c=null,u.timer.t=NaN,--g.count,delete g[i],u.event&&u.event.interrupt.call(n,n.__data__,u.index));for(var o in g)if(r>+o){var c=g[o];c.timer.c=null,c.timer.t=NaN,--g.count,delete g[o]}f.c=a,qn(function(){return f.c&&a(e||1)&&(f.c=null,f.t=NaN),1},0,l),g.active=r,v.event&&v.event.start.call(n,n.__data__,t),p=[],v.tween.forEach(function(e,r){(r=r.call(n,n.__data__,t))&&p.push(r)}),h=v.ease,s=v.duration}function a(i){for(var u=i/s,o=h(u),a=p.length;a>0;)p[--a].call(n,o);return u>=1?(v.event&&v.event.end.call(n,n.__data__,t),--g.count?delete g[r]:delete n[e],1):void 0}var l,f,s,h,p,g=n[e]||(n[e]={active:0,count:0}),v=g[r];v||(l=i.time,f=qn(u,0,l),v=g[r]={tween:new c,time:l,timer:f,delay:i.delay,duration:i.duration,ease:i.ease,index:t},i=null,++g.count)}function no(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate("+(isFinite(r)?r:e(n))+",0)"})}function to(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate(0,"+(isFinite(r)?r:e(n))+")"})}function eo(n){return n.toISOString()}function ro(n,t,e){function r(t){return n(t)}function i(n,e){var r=n[1]-n[0],i=r/e,u=ao.bisect(Kl,i);return u==Kl.length?[t.year,Ki(n.map(function(n){return n/31536e6}),e)[2]]:u?t[i/Kl[u-1]<Kl[u]/i?u-1:u]:[tc,Ki(n,e)[2]]}return r.invert=function(t){return io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,io(+e+1),t).length}var u=r.domain(),o=Yi(u),a=null==n?i(o,10):"number"==typeof n&&i(o,n);return a&&(n=a[0],t=a[1]),r.domain(Xi(u,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Yi(r.domain()),u=null==n?i(e,10):"number"==typeof n?i(e,n):!n.range&&[{range:n},t];return u&&(n=u[0],t=u[1]),n.range(e[0],io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return ro(n.copy(),t,e)},Ji(r,n)}function io(n){return new Date(n)}function uo(n){return JSON.parse(n.responseText)}function oo(n){var t=fo.createRange();return t.selectNode(fo.body),t.createContextualFragment(n.responseText)}var ao={version:"3.5.17"},lo=[].slice,co=function(n){return lo.call(n)},fo=this.document;if(fo)try{co(fo.documentElement.childNodes)[0].nodeType}catch(so){co=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}if(Date.now||(Date.now=function(){return+new Date}),fo)try{fo.createElement("DIV").style.setProperty("opacity",0,"")}catch(ho){var po=this.Element.prototype,go=po.setAttribute,vo=po.setAttributeNS,yo=this.CSSStyleDeclaration.prototype,mo=yo.setProperty;po.setAttribute=function(n,t){go.call(this,n,t+"")},po.setAttributeNS=function(n,t,e){vo.call(this,n,t,e+"")},yo.setProperty=function(n,t,e){mo.call(this,n,t+"",e)}}ao.ascending=e,ao.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:NaN},ao.min=function(n,t){var e,r,i=-1,u=n.length;if(1===arguments.length){for(;++i<u;)if(null!=(r=n[i])&&r>=r){e=r;break}for(;++i<u;)null!=(r=n[i])&&e>r&&(e=r)}else{for(;++i<u;)if(null!=(r=t.call(n,n[i],i))&&r>=r){e=r;break}for(;++i<u;)null!=(r=t.call(n,n[i],i))&&e>r&&(e=r)}return e},ao.max=function(n,t){var e,r,i=-1,u=n.length;if(1===arguments.length){for(;++i<u;)if(null!=(r=n[i])&&r>=r){e=r;break}for(;++i<u;)null!=(r=n[i])&&r>e&&(e=r)}else{for(;++i<u;)if(null!=(r=t.call(n,n[i],i))&&r>=r){e=r;break}for(;++i<u;)null!=(r=t.call(n,n[i],i))&&r>e&&(e=r)}return e},ao.extent=function(n,t){var e,r,i,u=-1,o=n.length;if(1===arguments.length){for(;++u<o;)if(null!=(r=n[u])&&r>=r){e=i=r;break}for(;++u<o;)null!=(r=n[u])&&(e>r&&(e=r),r>i&&(i=r))}else{for(;++u<o;)if(null!=(r=t.call(n,n[u],u))&&r>=r){e=i=r;break}for(;++u<o;)null!=(r=t.call(n,n[u],u))&&(e>r&&(e=r),r>i&&(i=r))}return[e,i]},ao.sum=function(n,t){var e,r=0,u=n.length,o=-1;if(1===arguments.length)for(;++o<u;)i(e=+n[o])&&(r+=e);else for(;++o<u;)i(e=+t.call(n,n[o],o))&&(r+=e);return r},ao.mean=function(n,t){var e,u=0,o=n.length,a=-1,l=o;if(1===arguments.length)for(;++a<o;)i(e=r(n[a]))?u+=e:--l;else for(;++a<o;)i(e=r(t.call(n,n[a],a)))?u+=e:--l;return l?u/l:void 0},ao.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),i=+n[r-1],u=e-r;return u?i+u*(n[r]-i):i},ao.median=function(n,t){var u,o=[],a=n.length,l=-1;if(1===arguments.length)for(;++l<a;)i(u=r(n[l]))&&o.push(u);else for(;++l<a;)i(u=r(t.call(n,n[l],l)))&&o.push(u);return o.length?ao.quantile(o.sort(e),.5):void 0},ao.variance=function(n,t){var e,u,o=n.length,a=0,l=0,c=-1,f=0;if(1===arguments.length)for(;++c<o;)i(e=r(n[c]))&&(u=e-a,a+=u/++f,l+=u*(e-a));else for(;++c<o;)i(e=r(t.call(n,n[c],c)))&&(u=e-a,a+=u/++f,l+=u*(e-a));return f>1?l/(f-1):void 0},ao.deviation=function(){var n=ao.variance.apply(this,arguments);return n?Math.sqrt(n):n};var Mo=u(e);ao.bisectLeft=Mo.left,ao.bisect=ao.bisectRight=Mo.right,ao.bisector=function(n){return u(1===n.length?function(t,r){return e(n(t),r)}:n)},ao.shuffle=function(n,t,e){(u=arguments.length)<3&&(e=n.length,2>u&&(t=0));for(var r,i,u=e-t;u;)i=Math.random()*u--|0,r=n[u+t],n[u+t]=n[i+t],n[i+t]=r;return n},ao.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},ao.pairs=function(n){for(var t,e=0,r=n.length-1,i=n[0],u=new Array(0>r?0:r);r>e;)u[e]=[t=i,i=n[++e]];return u},ao.transpose=function(n){if(!(i=n.length))return[];for(var t=-1,e=ao.min(n,o),r=new Array(e);++t<e;)for(var i,u=-1,a=r[t]=new Array(i);++u<i;)a[u]=n[u][t];return r},ao.zip=function(){return ao.transpose(arguments)},ao.keys=function(n){var t=[];for(var e in n)t.push(e);return t},ao.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},ao.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},ao.merge=function(n){for(var t,e,r,i=n.length,u=-1,o=0;++u<i;)o+=n[u].length;for(e=new Array(o);--i>=0;)for(r=n[i],t=r.length;--t>=0;)e[--o]=r[t];return e};var xo=Math.abs;ao.range=function(n,t,e){if(arguments.length<3&&(e=1,arguments.length<2&&(t=n,n=0)),(t-n)/e===1/0)throw new Error("infinite range");var r,i=[],u=a(xo(e)),o=-1;if(n*=u,t*=u,e*=u,0>e)for(;(r=n+e*++o)>t;)i.push(r/u);else for(;(r=n+e*++o)<t;)i.push(r/u);return i},ao.map=function(n,t){var e=new c;if(n instanceof c)n.forEach(function(n,t){e.set(n,t)});else if(Array.isArray(n)){var r,i=-1,u=n.length;if(1===arguments.length)for(;++i<u;)e.set(i,n[i]);else for(;++i<u;)e.set(t.call(n,r=n[i],i),r)}else for(var o in n)e.set(o,n[o]);return e};var bo="__proto__",_o="\x00";l(c,{has:h,get:function(n){return this._[f(n)]},set:function(n,t){return this._[f(n)]=t},remove:p,keys:g,values:function(){var n=[];for(var t in this._)n.push(this._[t]);return n},entries:function(){var n=[];for(var t in this._)n.push({key:s(t),value:this._[t]});return n},size:v,empty:d,forEach:function(n){for(var t in this._)n.call(this,s(t),this._[t])}}),ao.nest=function(){function n(t,o,a){if(a>=u.length)return r?r.call(i,o):e?o.sort(e):o;for(var l,f,s,h,p=-1,g=o.length,v=u[a++],d=new c;++p<g;)(h=d.get(l=v(f=o[p])))?h.push(f):d.set(l,[f]);return t?(f=t(),s=function(e,r){f.set(e,n(t,r,a))}):(f={},s=function(e,r){f[e]=n(t,r,a)}),d.forEach(s),f}function t(n,e){if(e>=u.length)return n;var r=[],i=o[e++];return n.forEach(function(n,i){r.push({key:n,values:t(i,e)})}),i?r.sort(function(n,t){return i(n.key,t.key)}):r}var e,r,i={},u=[],o=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(ao.map,e,0),0)},i.key=function(n){return u.push(n),i},i.sortKeys=function(n){return o[u.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},ao.set=function(n){var t=new y;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},l(y,{has:h,add:function(n){return this._[f(n+="")]=!0,n},remove:p,values:g,size:v,empty:d,forEach:function(n){for(var t in this._)n.call(this,s(t))}}),ao.behavior={},ao.rebind=function(n,t){for(var e,r=1,i=arguments.length;++r<i;)n[e=arguments[r]]=M(n,t,t[e]);return n};var wo=["webkit","ms","moz","Moz","o","O"];ao.dispatch=function(){for(var n=new _,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=w(n);return n},_.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.slice(e+1),n=n.slice(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},ao.event=null,ao.requote=function(n){return n.replace(So,"\\$&")};var So=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,ko={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},No=function(n,t){return t.querySelector(n)},Eo=function(n,t){return t.querySelectorAll(n)},Ao=function(n,t){var e=n.matches||n[x(n,"matchesSelector")];return(Ao=function(n,t){return e.call(n,t)})(n,t)};"function"==typeof Sizzle&&(No=function(n,t){return Sizzle(n,t)[0]||null},Eo=Sizzle,Ao=Sizzle.matchesSelector),ao.selection=function(){return ao.select(fo.documentElement)};var Co=ao.selection.prototype=[];Co.select=function(n){var t,e,r,i,u=[];n=A(n);for(var o=-1,a=this.length;++o<a;){u.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var l=-1,c=r.length;++l<c;)(i=r[l])?(t.push(e=n.call(i,i.__data__,l,o)),e&&"__data__"in i&&(e.__data__=i.__data__)):t.push(null)}return E(u)},Co.selectAll=function(n){var t,e,r=[];n=C(n);for(var i=-1,u=this.length;++i<u;)for(var o=this[i],a=-1,l=o.length;++a<l;)(e=o[a])&&(r.push(t=co(n.call(e,e.__data__,a,i))),t.parentNode=e);return E(r)};var zo="http://www.w3.org/1999/xhtml",Lo={svg:"http://www.w3.org/2000/svg",xhtml:zo,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};ao.ns={prefix:Lo,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&"xmlns"!==(e=n.slice(0,t))&&(n=n.slice(t+1)),Lo.hasOwnProperty(e)?{space:Lo[e],local:n}:n}},Co.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=ao.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(z(t,n[t]));return this}return this.each(z(n,t))},Co.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=T(n)).length,i=-1;if(t=e.classList){for(;++i<r;)if(!t.contains(n[i]))return!1}else for(t=e.getAttribute("class");++i<r;)if(!q(n[i]).test(t))return!1;return!0}for(t in n)this.each(R(t,n[t]));return this}return this.each(R(n,t))},Co.style=function(n,e,r){var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(e="");for(r in n)this.each(P(r,n[r],e));return this}if(2>i){var u=this.node();return t(u).getComputedStyle(u,null).getPropertyValue(n)}r=""}return this.each(P(n,e,r))},Co.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(U(t,n[t]));return this}return this.each(U(n,t))},Co.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},Co.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},Co.append=function(n){return n=j(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},Co.insert=function(n,t){return n=j(n),t=A(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},Co.remove=function(){return this.each(F)},Co.data=function(n,t){function e(n,e){var r,i,u,o=n.length,s=e.length,h=Math.min(o,s),p=new Array(s),g=new Array(s),v=new Array(o);if(t){var d,y=new c,m=new Array(o);for(r=-1;++r<o;)(i=n[r])&&(y.has(d=t.call(i,i.__data__,r))?v[r]=i:y.set(d,i),m[r]=d);for(r=-1;++r<s;)(i=y.get(d=t.call(e,u=e[r],r)))?i!==!0&&(p[r]=i,i.__data__=u):g[r]=H(u),y.set(d,!0);for(r=-1;++r<o;)r in m&&y.get(m[r])!==!0&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],u=e[r],i?(i.__data__=u,p[r]=i):g[r]=H(u);for(;s>r;++r)g[r]=H(e[r]);for(;o>r;++r)v[r]=n[r]}g.update=p,g.parentNode=p.parentNode=v.parentNode=n.parentNode,a.push(g),l.push(p),f.push(v)}var r,i,u=-1,o=this.length;if(!arguments.length){for(n=new Array(o=(r=this[0]).length);++u<o;)(i=r[u])&&(n[u]=i.__data__);return n}var a=Z([]),l=E([]),f=E([]);if("function"==typeof n)for(;++u<o;)e(r=this[u],n.call(r,r.parentNode.__data__,u));else for(;++u<o;)e(r=this[u],n);return l.enter=function(){return a},l.exit=function(){return f},l},Co.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},Co.filter=function(n){var t,e,r,i=[];"function"!=typeof n&&(n=O(n));for(var u=0,o=this.length;o>u;u++){i.push(t=[]),t.parentNode=(e=this[u]).parentNode;for(var a=0,l=e.length;l>a;a++)(r=e[a])&&n.call(r,r.__data__,a,u)&&t.push(r)}return E(i)},Co.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],i=r.length-1,u=r[i];--i>=0;)(e=r[i])&&(u&&u!==e.nextSibling&&u.parentNode.insertBefore(e,u),u=e);return this},Co.sort=function(n){n=I.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},Co.each=function(n){return Y(this,function(t,e,r){n.call(t,t.__data__,e,r)})},Co.call=function(n){var t=co(arguments);return n.apply(t[0]=this,t),this},Co.empty=function(){return!this.node()},Co.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,i=e.length;i>r;r++){var u=e[r];if(u)return u}return null},Co.size=function(){var n=0;return Y(this,function(){++n}),n};var qo=[];ao.selection.enter=Z,ao.selection.enter.prototype=qo,qo.append=Co.append,qo.empty=Co.empty,qo.node=Co.node,qo.call=Co.call,qo.size=Co.size,qo.select=function(n){for(var t,e,r,i,u,o=[],a=-1,l=this.length;++a<l;){r=(i=this[a]).update,o.push(t=[]),t.parentNode=i.parentNode;for(var c=-1,f=i.length;++c<f;)(u=i[c])?(t.push(r[c]=e=n.call(i.parentNode,u.__data__,c,a)),e.__data__=u.__data__):t.push(null)}return E(o)},qo.insert=function(n,t){return arguments.length<2&&(t=V(this)),Co.insert.call(this,n,t)},ao.select=function(t){var e;return"string"==typeof t?(e=[No(t,fo)],e.parentNode=fo.documentElement):(e=[t],e.parentNode=n(t)),E([e])},ao.selectAll=function(n){var t;return"string"==typeof n?(t=co(Eo(n,fo)),t.parentNode=fo.documentElement):(t=co(n),t.parentNode=null),E([t])},Co.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(X(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(X(n,t,e))};var To=ao.map({mouseenter:"mouseover",mouseleave:"mouseout"});fo&&To.forEach(function(n){"on"+n in fo&&To.remove(n)});var Ro,Do=0;ao.mouse=function(n){return J(n,k())};var Po=this.navigator&&/WebKit/.test(this.navigator.userAgent)?-1:0;ao.touch=function(n,t,e){if(arguments.length<3&&(e=t,t=k().changedTouches),t)for(var r,i=0,u=t.length;u>i;++i)if((r=t[i]).identifier===e)return J(n,r)},ao.behavior.drag=function(){function n(){this.on("mousedown.drag",u).on("touchstart.drag",o)}function e(n,t,e,u,o){return function(){function a(){var n,e,r=t(h,v);r&&(n=r[0]-M[0],e=r[1]-M[1],g|=n|e,M=r,p({type:"drag",x:r[0]+c[0],y:r[1]+c[1],dx:n,dy:e}))}function l(){t(h,v)&&(y.on(u+d,null).on(o+d,null),m(g),p({type:"dragend"}))}var c,f=this,s=ao.event.target.correspondingElement||ao.event.target,h=f.parentNode,p=r.of(f,arguments),g=0,v=n(),d=".drag"+(null==v?"":"-"+v),y=ao.select(e(s)).on(u+d,a).on(o+d,l),m=W(s),M=t(h,v);i?(c=i.apply(f,arguments),c=[c.x-M[0],c.y-M[1]]):c=[0,0],p({type:"dragstart"})}}var r=N(n,"drag","dragstart","dragend"),i=null,u=e(b,ao.mouse,t,"mousemove","mouseup"),o=e(G,ao.touch,m,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},ao.rebind(n,r,"on")},ao.touches=function(n,t){return arguments.length<2&&(t=k().touches),t?co(t).map(function(t){var e=J(n,t);return e.identifier=t.identifier,e}):[]};var Uo=1e-6,jo=Uo*Uo,Fo=Math.PI,Ho=2*Fo,Oo=Ho-Uo,Io=Fo/2,Yo=Fo/180,Zo=180/Fo,Vo=Math.SQRT2,Xo=2,$o=4;ao.interpolateZoom=function(n,t){var e,r,i=n[0],u=n[1],o=n[2],a=t[0],l=t[1],c=t[2],f=a-i,s=l-u,h=f*f+s*s;if(jo>h)r=Math.log(c/o)/Vo,e=function(n){return[i+n*f,u+n*s,o*Math.exp(Vo*n*r)]};else{var p=Math.sqrt(h),g=(c*c-o*o+$o*h)/(2*o*Xo*p),v=(c*c-o*o-$o*h)/(2*c*Xo*p),d=Math.log(Math.sqrt(g*g+1)-g),y=Math.log(Math.sqrt(v*v+1)-v);r=(y-d)/Vo,e=function(n){var t=n*r,e=rn(d),a=o/(Xo*p)*(e*un(Vo*t+d)-en(d));return[i+a*f,u+a*s,o*e/rn(Vo*t+d)]}}return e.duration=1e3*r,e},ao.behavior.zoom=function(){function n(n){n.on(L,s).on(Wo+".zoom",p).on("dblclick.zoom",g).on(R,h)}function e(n){return[(n[0]-k.x)/k.k,(n[1]-k.y)/k.k]}function r(n){return[n[0]*k.k+k.x,n[1]*k.k+k.y]}function i(n){k.k=Math.max(A[0],Math.min(A[1],n))}function u(n,t){t=r(t),k.x+=n[0]-t[0],k.y+=n[1]-t[1]}function o(t,e,r,o){t.__chart__={x:k.x,y:k.y,k:k.k},i(Math.pow(2,o)),u(d=e,r),t=ao.select(t),C>0&&(t=t.transition().duration(C)),t.call(n.event)}function a(){b&&b.domain(x.range().map(function(n){return(n-k.x)/k.k}).map(x.invert)),w&&w.domain(_.range().map(function(n){return(n-k.y)/k.k}).map(_.invert))}function l(n){z++||n({type:"zoomstart"})}function c(n){a(),n({type:"zoom",scale:k.k,translate:[k.x,k.y]})}function f(n){--z||(n({type:"zoomend"}),d=null)}function s(){function n(){a=1,u(ao.mouse(i),h),c(o)}function r(){s.on(q,null).on(T,null),p(a),f(o)}var i=this,o=D.of(i,arguments),a=0,s=ao.select(t(i)).on(q,n).on(T,r),h=e(ao.mouse(i)),p=W(i);Il.call(i),l(o)}function h(){function n(){var n=ao.touches(g);return p=k.k,n.forEach(function(n){n.identifier in d&&(d[n.identifier]=e(n))}),n}function t(){var t=ao.event.target;ao.select(t).on(x,r).on(b,a),_.push(t);for(var e=ao.event.changedTouches,i=0,u=e.length;u>i;++i)d[e[i].identifier]=null;var l=n(),c=Date.now();if(1===l.length){if(500>c-M){var f=l[0];o(g,f,d[f.identifier],Math.floor(Math.log(k.k)/Math.LN2)+1),S()}M=c}else if(l.length>1){var f=l[0],s=l[1],h=f[0]-s[0],p=f[1]-s[1];y=h*h+p*p}}function r(){var n,t,e,r,o=ao.touches(g);Il.call(g);for(var a=0,l=o.length;l>a;++a,r=null)if(e=o[a],r=d[e.identifier]){if(t)break;n=e,t=r}if(r){var f=(f=e[0]-n[0])*f+(f=e[1]-n[1])*f,s=y&&Math.sqrt(f/y);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+r[0])/2,(t[1]+r[1])/2],i(s*p)}M=null,u(n,t),c(v)}function a(){if(ao.event.touches.length){for(var t=ao.event.changedTouches,e=0,r=t.length;r>e;++e)delete d[t[e].identifier];for(var i in d)return void n()}ao.selectAll(_).on(m,null),w.on(L,s).on(R,h),N(),f(v)}var p,g=this,v=D.of(g,arguments),d={},y=0,m=".zoom-"+ao.event.changedTouches[0].identifier,x="touchmove"+m,b="touchend"+m,_=[],w=ao.select(g),N=W(g);t(),l(v),w.on(L,null).on(R,t)}function p(){var n=D.of(this,arguments);m?clearTimeout(m):(Il.call(this),v=e(d=y||ao.mouse(this)),l(n)),m=setTimeout(function(){m=null,f(n)},50),S(),i(Math.pow(2,.002*Bo())*k.k),u(d,v),c(n)}function g(){var n=ao.mouse(this),t=Math.log(k.k)/Math.LN2;o(this,n,e(n),ao.event.shiftKey?Math.ceil(t)-1:Math.floor(t)+1)}var v,d,y,m,M,x,b,_,w,k={x:0,y:0,k:1},E=[960,500],A=Jo,C=250,z=0,L="mousedown.zoom",q="mousemove.zoom",T="mouseup.zoom",R="touchstart.zoom",D=N(n,"zoomstart","zoom","zoomend");return Wo||(Wo="onwheel"in fo?(Bo=function(){return-ao.event.deltaY*(ao.event.deltaMode?120:1)},"wheel"):"onmousewheel"in fo?(Bo=function(){return ao.event.wheelDelta},"mousewheel"):(Bo=function(){return-ao.event.detail},"MozMousePixelScroll")),n.event=function(n){n.each(function(){var n=D.of(this,arguments),t=k;Hl?ao.select(this).transition().each("start.zoom",function(){k=this.__chart__||{x:0,y:0,k:1},l(n)}).tween("zoom:zoom",function(){var e=E[0],r=E[1],i=d?d[0]:e/2,u=d?d[1]:r/2,o=ao.interpolateZoom([(i-k.x)/k.k,(u-k.y)/k.k,e/k.k],[(i-t.x)/t.k,(u-t.y)/t.k,e/t.k]);return function(t){var r=o(t),a=e/r[2];this.__chart__=k={x:i-r[0]*a,y:u-r[1]*a,k:a},c(n)}}).each("interrupt.zoom",function(){f(n)}).each("end.zoom",function(){f(n)}):(this.__chart__=k,l(n),c(n),f(n))})},n.translate=function(t){return arguments.length?(k={x:+t[0],y:+t[1],k:k.k},a(),n):[k.x,k.y]},n.scale=function(t){return arguments.length?(k={x:k.x,y:k.y,k:null},i(+t),a(),n):k.k},n.scaleExtent=function(t){return arguments.length?(A=null==t?Jo:[+t[0],+t[1]],n):A},n.center=function(t){return arguments.length?(y=t&&[+t[0],+t[1]],n):y},n.size=function(t){return arguments.length?(E=t&&[+t[0],+t[1]],n):E},n.duration=function(t){return arguments.length?(C=+t,n):C},n.x=function(t){return arguments.length?(b=t,x=t.copy(),k={x:0,y:0,k:1},n):b},n.y=function(t){return arguments.length?(w=t,_=t.copy(),k={x:0,y:0,k:1},n):w},ao.rebind(n,D,"on")};var Bo,Wo,Jo=[0,1/0];ao.color=an,an.prototype.toString=function(){return this.rgb()+""},ao.hsl=ln;var Go=ln.prototype=new an;Go.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),new ln(this.h,this.s,this.l/n)},Go.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new ln(this.h,this.s,n*this.l)},Go.rgb=function(){return cn(this.h,this.s,this.l)},ao.hcl=fn;var Ko=fn.prototype=new an;Ko.brighter=function(n){return new fn(this.h,this.c,Math.min(100,this.l+Qo*(arguments.length?n:1)))},Ko.darker=function(n){return new fn(this.h,this.c,Math.max(0,this.l-Qo*(arguments.length?n:1)))},Ko.rgb=function(){return sn(this.h,this.c,this.l).rgb()},ao.lab=hn;var Qo=18,na=.95047,ta=1,ea=1.08883,ra=hn.prototype=new an;ra.brighter=function(n){return new hn(Math.min(100,this.l+Qo*(arguments.length?n:1)),this.a,this.b)},ra.darker=function(n){return new hn(Math.max(0,this.l-Qo*(arguments.length?n:1)),this.a,this.b)},ra.rgb=function(){return pn(this.l,this.a,this.b)},ao.rgb=mn;var ia=mn.prototype=new an;ia.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,i=30;return t||e||r?(t&&i>t&&(t=i),e&&i>e&&(e=i),r&&i>r&&(r=i),new mn(Math.min(255,t/n),Math.min(255,e/n),Math.min(255,r/n))):new mn(i,i,i)},ia.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new mn(n*this.r,n*this.g,n*this.b)},ia.hsl=function(){return wn(this.r,this.g,this.b)},ia.toString=function(){return"#"+bn(this.r)+bn(this.g)+bn(this.b)};var ua=ao.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});ua.forEach(function(n,t){ua.set(n,Mn(t))}),ao.functor=En,ao.xhr=An(m),ao.dsv=function(n,t){function e(n,e,u){arguments.length<3&&(u=e,e=null);var o=Cn(n,t,null==e?r:i(e),u);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:i(n)):e},o}function r(n){return e.parse(n.responseText)}function i(n){return function(t){return e.parse(t.responseText,n)}}function u(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),l=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var i=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(i(n),e)}:i})},e.parseRows=function(n,t){function e(){if(f>=c)return o;if(i)return i=!1,u;var t=f;if(34===n.charCodeAt(t)){for(var e=t;e++<c;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}f=e+2;var r=n.charCodeAt(e+1);return 13===r?(i=!0,10===n.charCodeAt(e+2)&&++f):10===r&&(i=!0),n.slice(t+1,e).replace(/""/g,'"')}for(;c>f;){var r=n.charCodeAt(f++),a=1;if(10===r)i=!0;else if(13===r)i=!0,10===n.charCodeAt(f)&&(++f,++a);else if(r!==l)continue;return n.slice(t,f-a)}return n.slice(t)}for(var r,i,u={},o={},a=[],c=n.length,f=0,s=0;(r=e())!==o;){for(var h=[];r!==u&&r!==o;)h.push(r),r=e();t&&null==(h=t(h,s++))||a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new y,i=[];return t.forEach(function(n){for(var t in n)r.has(t)||i.push(r.add(t))}),[i.map(o).join(n)].concat(t.map(function(t){return i.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(u).join("\n")},e},ao.csv=ao.dsv(",","text/csv"),ao.tsv=ao.dsv("	","text/tab-separated-values");var oa,aa,la,ca,fa=this[x(this,"requestAnimationFrame")]||function(n){setTimeout(n,17)};ao.timer=function(){qn.apply(this,arguments)},ao.timer.flush=function(){Rn(),Dn()},ao.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var sa=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Un);ao.formatPrefix=function(n,t){var e=0;return(n=+n)&&(0>n&&(n*=-1),t&&(n=ao.round(n,Pn(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((e-1)/3)))),sa[8+e/3]};var ha=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,pa=ao.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=ao.round(n,Pn(n,t))).toFixed(Math.max(0,Math.min(20,Pn(n*(1+1e-15),t))))}}),ga=ao.time={},va=Date;Hn.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){da.setUTCDate.apply(this._,arguments)},setDay:function(){da.setUTCDay.apply(this._,arguments)},setFullYear:function(){da.setUTCFullYear.apply(this._,arguments)},setHours:function(){da.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){da.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){da.setUTCMinutes.apply(this._,arguments)},setMonth:function(){da.setUTCMonth.apply(this._,arguments)},setSeconds:function(){da.setUTCSeconds.apply(this._,arguments)},setTime:function(){da.setTime.apply(this._,arguments)}};var da=Date.prototype;ga.year=On(function(n){return n=ga.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),ga.years=ga.year.range,ga.years.utc=ga.year.utc.range,ga.day=On(function(n){var t=new va(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),ga.days=ga.day.range,ga.days.utc=ga.day.utc.range,ga.dayOfYear=function(n){var t=ga.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=ga[n]=On(function(n){return(n=ga.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=ga.year(n).getDay();return Math.floor((ga.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});ga[n+"s"]=e.range,ga[n+"s"].utc=e.utc.range,ga[n+"OfYear"]=function(n){var e=ga.year(n).getDay();return Math.floor((ga.dayOfYear(n)+(e+t)%7)/7)}}),ga.week=ga.sunday,ga.weeks=ga.sunday.range,ga.weeks.utc=ga.sunday.utc.range,ga.weekOfYear=ga.sundayOfYear;var ya={"-":"",_:" ",0:"0"},ma=/^\s*\d+/,Ma=/^%/;ao.locale=function(n){return{numberFormat:jn(n),timeFormat:Yn(n)}};var xa=ao.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],
-shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});ao.format=xa.numberFormat,ao.geo={},ft.prototype={s:0,t:0,add:function(n){st(n,this.t,ba),st(ba.s,this.s,this),this.s?this.t+=ba.t:this.s=ba.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var ba=new ft;ao.geo.stream=function(n,t){n&&_a.hasOwnProperty(n.type)?_a[n.type](n,t):ht(n,t)};var _a={Feature:function(n,t){ht(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,i=e.length;++r<i;)ht(e[r].geometry,t)}},wa={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,i=e.length;++r<i;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){pt(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,i=e.length;++r<i;)pt(e[r],t,0)},Polygon:function(n,t){gt(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,i=e.length;++r<i;)gt(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,i=e.length;++r<i;)ht(e[r],t)}};ao.geo.area=function(n){return Sa=0,ao.geo.stream(n,Na),Sa};var Sa,ka=new ft,Na={sphere:function(){Sa+=4*Fo},point:b,lineStart:b,lineEnd:b,polygonStart:function(){ka.reset(),Na.lineStart=vt},polygonEnd:function(){var n=2*ka;Sa+=0>n?4*Fo+n:n,Na.lineStart=Na.lineEnd=Na.point=b}};ao.geo.bounds=function(){function n(n,t){M.push(x=[f=n,h=n]),s>t&&(s=t),t>p&&(p=t)}function t(t,e){var r=dt([t*Yo,e*Yo]);if(y){var i=mt(y,r),u=[i[1],-i[0],0],o=mt(u,i);bt(o),o=_t(o);var l=t-g,c=l>0?1:-1,v=o[0]*Zo*c,d=xo(l)>180;if(d^(v>c*g&&c*t>v)){var m=o[1]*Zo;m>p&&(p=m)}else if(v=(v+360)%360-180,d^(v>c*g&&c*t>v)){var m=-o[1]*Zo;s>m&&(s=m)}else s>e&&(s=e),e>p&&(p=e);d?g>t?a(f,t)>a(f,h)&&(h=t):a(t,h)>a(f,h)&&(f=t):h>=f?(f>t&&(f=t),t>h&&(h=t)):t>g?a(f,t)>a(f,h)&&(h=t):a(t,h)>a(f,h)&&(f=t)}else n(t,e);y=r,g=t}function e(){b.point=t}function r(){x[0]=f,x[1]=h,b.point=n,y=null}function i(n,e){if(y){var r=n-g;m+=xo(r)>180?r+(r>0?360:-360):r}else v=n,d=e;Na.point(n,e),t(n,e)}function u(){Na.lineStart()}function o(){i(v,d),Na.lineEnd(),xo(m)>Uo&&(f=-(h=180)),x[0]=f,x[1]=h,y=null}function a(n,t){return(t-=n)<0?t+360:t}function l(n,t){return n[0]-t[0]}function c(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var f,s,h,p,g,v,d,y,m,M,x,b={point:n,lineStart:e,lineEnd:r,polygonStart:function(){b.point=i,b.lineStart=u,b.lineEnd=o,m=0,Na.polygonStart()},polygonEnd:function(){Na.polygonEnd(),b.point=n,b.lineStart=e,b.lineEnd=r,0>ka?(f=-(h=180),s=-(p=90)):m>Uo?p=90:-Uo>m&&(s=-90),x[0]=f,x[1]=h}};return function(n){p=h=-(f=s=1/0),M=[],ao.geo.stream(n,b);var t=M.length;if(t){M.sort(l);for(var e,r=1,i=M[0],u=[i];t>r;++r)e=M[r],c(e[0],i)||c(e[1],i)?(a(i[0],e[1])>a(i[0],i[1])&&(i[1]=e[1]),a(e[0],i[1])>a(i[0],i[1])&&(i[0]=e[0])):u.push(i=e);for(var o,e,g=-(1/0),t=u.length-1,r=0,i=u[t];t>=r;i=e,++r)e=u[r],(o=a(i[1],e[0]))>g&&(g=o,f=e[0],h=i[1])}return M=x=null,f===1/0||s===1/0?[[NaN,NaN],[NaN,NaN]]:[[f,s],[h,p]]}}(),ao.geo.centroid=function(n){Ea=Aa=Ca=za=La=qa=Ta=Ra=Da=Pa=Ua=0,ao.geo.stream(n,ja);var t=Da,e=Pa,r=Ua,i=t*t+e*e+r*r;return jo>i&&(t=qa,e=Ta,r=Ra,Uo>Aa&&(t=Ca,e=za,r=La),i=t*t+e*e+r*r,jo>i)?[NaN,NaN]:[Math.atan2(e,t)*Zo,tn(r/Math.sqrt(i))*Zo]};var Ea,Aa,Ca,za,La,qa,Ta,Ra,Da,Pa,Ua,ja={sphere:b,point:St,lineStart:Nt,lineEnd:Et,polygonStart:function(){ja.lineStart=At},polygonEnd:function(){ja.lineStart=Nt}},Fa=Rt(zt,jt,Ht,[-Fo,-Fo/2]),Ha=1e9;ao.geo.clipExtent=function(){var n,t,e,r,i,u,o={stream:function(n){return i&&(i.valid=!1),i=u(n),i.valid=!0,i},extent:function(a){return arguments.length?(u=Zt(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),i&&(i.valid=!1,i=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(ao.geo.conicEqualArea=function(){return Vt(Xt)}).raw=Xt,ao.geo.albers=function(){return ao.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},ao.geo.albersUsa=function(){function n(n){var u=n[0],o=n[1];return t=null,e(u,o),t||(r(u,o),t)||i(u,o),t}var t,e,r,i,u=ao.geo.albers(),o=ao.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=ao.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),l={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=u.scale(),e=u.translate(),r=(n[0]-e[0])/t,i=(n[1]-e[1])/t;return(i>=.12&&.234>i&&r>=-.425&&-.214>r?o:i>=.166&&.234>i&&r>=-.214&&-.115>r?a:u).invert(n)},n.stream=function(n){var t=u.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,i){t.point(n,i),e.point(n,i),r.point(n,i)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(u.precision(t),o.precision(t),a.precision(t),n):u.precision()},n.scale=function(t){return arguments.length?(u.scale(t),o.scale(.35*t),a.scale(t),n.translate(u.translate())):u.scale()},n.translate=function(t){if(!arguments.length)return u.translate();var c=u.scale(),f=+t[0],s=+t[1];return e=u.translate(t).clipExtent([[f-.455*c,s-.238*c],[f+.455*c,s+.238*c]]).stream(l).point,r=o.translate([f-.307*c,s+.201*c]).clipExtent([[f-.425*c+Uo,s+.12*c+Uo],[f-.214*c-Uo,s+.234*c-Uo]]).stream(l).point,i=a.translate([f-.205*c,s+.212*c]).clipExtent([[f-.214*c+Uo,s+.166*c+Uo],[f-.115*c-Uo,s+.234*c-Uo]]).stream(l).point,n},n.scale(1070)};var Oa,Ia,Ya,Za,Va,Xa,$a={point:b,lineStart:b,lineEnd:b,polygonStart:function(){Ia=0,$a.lineStart=$t},polygonEnd:function(){$a.lineStart=$a.lineEnd=$a.point=b,Oa+=xo(Ia/2)}},Ba={point:Bt,lineStart:b,lineEnd:b,polygonStart:b,polygonEnd:b},Wa={point:Gt,lineStart:Kt,lineEnd:Qt,polygonStart:function(){Wa.lineStart=ne},polygonEnd:function(){Wa.point=Gt,Wa.lineStart=Kt,Wa.lineEnd=Qt}};ao.geo.path=function(){function n(n){return n&&("function"==typeof a&&u.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=i(u)),ao.geo.stream(n,o)),u.result()}function t(){return o=null,n}var e,r,i,u,o,a=4.5;return n.area=function(n){return Oa=0,ao.geo.stream(n,i($a)),Oa},n.centroid=function(n){return Ca=za=La=qa=Ta=Ra=Da=Pa=Ua=0,ao.geo.stream(n,i(Wa)),Ua?[Da/Ua,Pa/Ua]:Ra?[qa/Ra,Ta/Ra]:La?[Ca/La,za/La]:[NaN,NaN]},n.bounds=function(n){return Va=Xa=-(Ya=Za=1/0),ao.geo.stream(n,i(Ba)),[[Ya,Za],[Va,Xa]]},n.projection=function(n){return arguments.length?(i=(e=n)?n.stream||re(n):m,t()):e},n.context=function(n){return arguments.length?(u=null==(r=n)?new Wt:new te(n),"function"!=typeof a&&u.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(u.pointRadius(+t),+t),n):a},n.projection(ao.geo.albersUsa()).context(null)},ao.geo.transform=function(n){return{stream:function(t){var e=new ie(t);for(var r in n)e[r]=n[r];return e}}},ie.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},ao.geo.projection=oe,ao.geo.projectionMutator=ae,(ao.geo.equirectangular=function(){return oe(ce)}).raw=ce.invert=ce,ao.geo.rotation=function(n){function t(t){return t=n(t[0]*Yo,t[1]*Yo),t[0]*=Zo,t[1]*=Zo,t}return n=se(n[0]%360*Yo,n[1]*Yo,n.length>2?n[2]*Yo:0),t.invert=function(t){return t=n.invert(t[0]*Yo,t[1]*Yo),t[0]*=Zo,t[1]*=Zo,t},t},fe.invert=ce,ao.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=se(-n[0]*Yo,-n[1]*Yo,0).invert,i=[];return e(null,null,1,{point:function(n,e){i.push(n=t(n,e)),n[0]*=Zo,n[1]*=Zo}}),{type:"Polygon",coordinates:[i]}}var t,e,r=[0,0],i=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=ve((t=+r)*Yo,i*Yo),n):t},n.precision=function(r){return arguments.length?(e=ve(t*Yo,(i=+r)*Yo),n):i},n.angle(90)},ao.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Yo,i=n[1]*Yo,u=t[1]*Yo,o=Math.sin(r),a=Math.cos(r),l=Math.sin(i),c=Math.cos(i),f=Math.sin(u),s=Math.cos(u);return Math.atan2(Math.sqrt((e=s*o)*e+(e=c*f-l*s*a)*e),l*f+c*s*a)},ao.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return ao.range(Math.ceil(u/d)*d,i,d).map(h).concat(ao.range(Math.ceil(c/y)*y,l,y).map(p)).concat(ao.range(Math.ceil(r/g)*g,e,g).filter(function(n){return xo(n%d)>Uo}).map(f)).concat(ao.range(Math.ceil(a/v)*v,o,v).filter(function(n){return xo(n%y)>Uo}).map(s))}var e,r,i,u,o,a,l,c,f,s,h,p,g=10,v=g,d=90,y=360,m=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(u).concat(p(l).slice(1),h(i).reverse().slice(1),p(c).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(u=+t[0][0],i=+t[1][0],c=+t[0][1],l=+t[1][1],u>i&&(t=u,u=i,i=t),c>l&&(t=c,c=l,l=t),n.precision(m)):[[u,c],[i,l]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(m)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],y=+t[1],n):[d,y]},n.minorStep=function(t){return arguments.length?(g=+t[0],v=+t[1],n):[g,v]},n.precision=function(t){return arguments.length?(m=+t,f=ye(a,o,90),s=me(r,e,m),h=ye(c,l,90),p=me(u,i,m),n):m},n.majorExtent([[-180,-90+Uo],[180,90-Uo]]).minorExtent([[-180,-80-Uo],[180,80+Uo]])},ao.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||i.apply(this,arguments)]}}var t,e,r=Me,i=xe;return n.distance=function(){return ao.geo.distance(t||r.apply(this,arguments),e||i.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(i=t,e="function"==typeof t?null:t,n):i},n.precision=function(){return arguments.length?n:0},n},ao.geo.interpolate=function(n,t){return be(n[0]*Yo,n[1]*Yo,t[0]*Yo,t[1]*Yo)},ao.geo.length=function(n){return Ja=0,ao.geo.stream(n,Ga),Ja};var Ja,Ga={sphere:b,point:b,lineStart:_e,lineEnd:b,polygonStart:b,polygonEnd:b},Ka=we(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(ao.geo.azimuthalEqualArea=function(){return oe(Ka)}).raw=Ka;var Qa=we(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},m);(ao.geo.azimuthalEquidistant=function(){return oe(Qa)}).raw=Qa,(ao.geo.conicConformal=function(){return Vt(Se)}).raw=Se,(ao.geo.conicEquidistant=function(){return Vt(ke)}).raw=ke;var nl=we(function(n){return 1/n},Math.atan);(ao.geo.gnomonic=function(){return oe(nl)}).raw=nl,Ne.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Io]},(ao.geo.mercator=function(){return Ee(Ne)}).raw=Ne;var tl=we(function(){return 1},Math.asin);(ao.geo.orthographic=function(){return oe(tl)}).raw=tl;var el=we(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(ao.geo.stereographic=function(){return oe(el)}).raw=el,Ae.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Io]},(ao.geo.transverseMercator=function(){var n=Ee(Ae),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[n[1],-n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},e([0,0,90])}).raw=Ae,ao.geom={},ao.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,i=En(e),u=En(r),o=n.length,a=[],l=[];for(t=0;o>t;t++)a.push([+i.call(this,n[t],t),+u.call(this,n[t],t),t]);for(a.sort(qe),t=0;o>t;t++)l.push([a[t][0],-a[t][1]]);var c=Le(a),f=Le(l),s=f[0]===c[0],h=f[f.length-1]===c[c.length-1],p=[];for(t=c.length-1;t>=0;--t)p.push(n[a[c[t]][2]]);for(t=+s;t<f.length-h;++t)p.push(n[a[f[t]][2]]);return p}var e=Ce,r=ze;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},ao.geom.polygon=function(n){return ko(n,rl),n};var rl=ao.geom.polygon.prototype=[];rl.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],i=0;++t<e;)n=r,r=this[t],i+=n[1]*r[0]-n[0]*r[1];return.5*i},rl.centroid=function(n){var t,e,r=-1,i=this.length,u=0,o=0,a=this[i-1];for(arguments.length||(n=-1/(6*this.area()));++r<i;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],u+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[u*n,o*n]},rl.clip=function(n){for(var t,e,r,i,u,o,a=De(n),l=-1,c=this.length-De(this),f=this[c-1];++l<c;){for(t=n.slice(),n.length=0,i=this[l],u=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Te(o,f,i)?(Te(u,f,i)||n.push(Re(u,o,f,i)),n.push(o)):Te(u,f,i)&&n.push(Re(u,o,f,i)),u=o;a&&n.push(n[0]),f=i}return n};var il,ul,ol,al,ll,cl=[],fl=[];Ye.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(Ve),t.length},tr.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},er.prototype={insert:function(n,t){var e,r,i;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=or(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(i=r.R,i&&i.C?(e.C=i.C=!1,r.C=!0,n=r):(n===e.R&&(ir(this,e),n=e,e=n.U),e.C=!1,r.C=!0,ur(this,r))):(i=r.L,i&&i.C?(e.C=i.C=!1,r.C=!0,n=r):(n===e.L&&(ur(this,e),n=e,e=n.U),e.C=!1,r.C=!0,ir(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,i=n.U,u=n.L,o=n.R;if(e=u?o?or(o):u:o,i?i.L===n?i.L=e:i.R=e:this._=e,u&&o?(r=e.C,e.C=n.C,e.L=u,u.U=e,e!==o?(i=e.U,e.U=n.U,n=e.R,i.L=n,e.R=o,o.U=e):(e.U=i,i=e,n=e.R)):(r=n.C,n=e),n&&(n.U=i),!r){if(n&&n.C)return void(n.C=!1);do{if(n===this._)break;if(n===i.L){if(t=i.R,t.C&&(t.C=!1,i.C=!0,ir(this,i),t=i.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,ur(this,t),t=i.R),t.C=i.C,i.C=t.R.C=!1,ir(this,i),n=this._;break}}else if(t=i.L,t.C&&(t.C=!1,i.C=!0,ur(this,i),t=i.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,ir(this,t),t=i.L),t.C=i.C,i.C=t.L.C=!1,ur(this,i),n=this._;break}t.C=!0,n=i,i=i.U}while(!n.C);n&&(n.C=!1)}}},ao.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],i=a[0][1],u=a[1][0],o=a[1][1];return ar(e(n),a).cells.forEach(function(e,a){var l=e.edges,c=e.site,f=t[a]=l.length?l.map(function(n){var t=n.start();return[t.x,t.y]}):c.x>=r&&c.x<=u&&c.y>=i&&c.y<=o?[[r,o],[u,o],[u,i],[r,i]]:[];f.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(u(n,t)/Uo)*Uo,y:Math.round(o(n,t)/Uo)*Uo,i:t}})}var r=Ce,i=ze,u=r,o=i,a=sl;return n?t(n):(t.links=function(n){return ar(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return ar(e(n)).cells.forEach(function(e,r){for(var i,u,o=e.site,a=e.edges.sort(Ve),l=-1,c=a.length,f=a[c-1].edge,s=f.l===o?f.r:f.l;++l<c;)i=f,u=s,f=a[l].edge,s=f.l===o?f.r:f.l,r<u.i&&r<s.i&&cr(o,u,s)<0&&t.push([n[r],n[u.i],n[s.i]])}),t},t.x=function(n){return arguments.length?(u=En(r=n),t):r},t.y=function(n){return arguments.length?(o=En(i=n),t):i},t.clipExtent=function(n){return arguments.length?(a=null==n?sl:n,t):a===sl?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===sl?null:a&&a[1]},t)};var sl=[[-1e6,-1e6],[1e6,1e6]];ao.geom.delaunay=function(n){return ao.geom.voronoi().triangles(n)},ao.geom.quadtree=function(n,t,e,r,i){function u(n){function u(n,t,e,r,i,u,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var l=n.x,f=n.y;if(null!=l)if(xo(l-e)+xo(f-r)<.01)c(n,t,e,r,i,u,o,a);else{var s=n.point;n.x=n.y=n.point=null,c(n,s,l,f,i,u,o,a),c(n,t,e,r,i,u,o,a)}else n.x=e,n.y=r,n.point=t}else c(n,t,e,r,i,u,o,a)}function c(n,t,e,r,i,o,a,l){var c=.5*(i+a),f=.5*(o+l),s=e>=c,h=r>=f,p=h<<1|s;n.leaf=!1,n=n.nodes[p]||(n.nodes[p]=hr()),s?i=c:a=c,h?o=f:l=f,u(n,t,e,r,i,o,a,l)}var f,s,h,p,g,v,d,y,m,M=En(a),x=En(l);if(null!=t)v=t,d=e,y=r,m=i;else if(y=m=-(v=d=1/0),s=[],h=[],g=n.length,o)for(p=0;g>p;++p)f=n[p],f.x<v&&(v=f.x),f.y<d&&(d=f.y),f.x>y&&(y=f.x),f.y>m&&(m=f.y),s.push(f.x),h.push(f.y);else for(p=0;g>p;++p){var b=+M(f=n[p],p),_=+x(f,p);v>b&&(v=b),d>_&&(d=_),b>y&&(y=b),_>m&&(m=_),s.push(b),h.push(_)}var w=y-v,S=m-d;w>S?m=d+w:y=v+S;var k=hr();if(k.add=function(n){u(k,n,+M(n,++p),+x(n,p),v,d,y,m)},k.visit=function(n){pr(n,k,v,d,y,m)},k.find=function(n){return gr(k,n[0],n[1],v,d,y,m)},p=-1,null==t){for(;++p<g;)u(k,n[p],s[p],h[p],v,d,y,m);--p}else n.forEach(k.add);return s=h=n=f=null,k}var o,a=Ce,l=ze;return(o=arguments.length)?(a=fr,l=sr,3===o&&(i=e,r=t,e=t=0),u(n)):(u.x=function(n){return arguments.length?(a=n,u):a},u.y=function(n){return arguments.length?(l=n,u):l},u.extent=function(n){return arguments.length?(null==n?t=e=r=i=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],i=+n[1][1]),u):null==t?null:[[t,e],[r,i]]},u.size=function(n){return arguments.length?(null==n?t=e=r=i=null:(t=e=0,r=+n[0],i=+n[1]),u):null==t?null:[r-t,i-e]},u)},ao.interpolateRgb=vr,ao.interpolateObject=dr,ao.interpolateNumber=yr,ao.interpolateString=mr;var hl=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g,pl=new RegExp(hl.source,"g");ao.interpolate=Mr,ao.interpolators=[function(n,t){var e=typeof t;return("string"===e?ua.has(t.toLowerCase())||/^(#|rgb\(|hsl\()/i.test(t)?vr:mr:t instanceof an?vr:Array.isArray(t)?xr:"object"===e&&isNaN(t)?dr:yr)(n,t)}],ao.interpolateArray=xr;var gl=function(){return m},vl=ao.map({linear:gl,poly:Er,quad:function(){return Sr},cubic:function(){return kr},sin:function(){return Ar},exp:function(){return Cr},circle:function(){return zr},elastic:Lr,back:qr,bounce:function(){return Tr}}),dl=ao.map({"in":m,out:_r,"in-out":wr,"out-in":function(n){return wr(_r(n))}});ao.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.slice(0,t):n,r=t>=0?n.slice(t+1):"in";return e=vl.get(e)||gl,r=dl.get(r)||m,br(r(e.apply(null,lo.call(arguments,1))))},ao.interpolateHcl=Rr,ao.interpolateHsl=Dr,ao.interpolateLab=Pr,ao.interpolateRound=Ur,ao.transform=function(n){var t=fo.createElementNS(ao.ns.prefix.svg,"g");return(ao.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new jr(e?e.matrix:yl)})(n)},jr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var yl={a:1,b:0,c:0,d:1,e:0,f:0};ao.interpolateTransform=$r,ao.layout={},ao.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Jr(n[e]));return t}},ao.layout.chord=function(){function n(){var n,c,s,h,p,g={},v=[],d=ao.range(u),y=[];for(e=[],r=[],n=0,h=-1;++h<u;){for(c=0,p=-1;++p<u;)c+=i[h][p];v.push(c),y.push(ao.range(u)),n+=c}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&y.forEach(function(n,t){n.sort(function(n,e){return a(i[t][n],i[t][e])})}),n=(Ho-f*u)/n,c=0,h=-1;++h<u;){for(s=c,p=-1;++p<u;){var m=d[h],M=y[m][p],x=i[m][M],b=c,_=c+=x*n;g[m+"-"+M]={index:m,subindex:M,startAngle:b,endAngle:_,value:x}}r[m]={index:m,startAngle:s,endAngle:c,value:v[m]},c+=f}for(h=-1;++h<u;)for(p=h-1;++p<u;){var w=g[h+"-"+p],S=g[p+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}l&&t()}function t(){e.sort(function(n,t){return l((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,i,u,o,a,l,c={},f=0;return c.matrix=function(n){return arguments.length?(u=(i=n)&&i.length,e=r=null,c):i},c.padding=function(n){return arguments.length?(f=n,e=r=null,c):f},c.sortGroups=function(n){return arguments.length?(o=n,e=r=null,c):o},c.sortSubgroups=function(n){return arguments.length?(a=n,e=null,c):a},c.sortChords=function(n){return arguments.length?(l=n,e&&t(),c):l},c.chords=function(){return e||n(),e},c.groups=function(){return r||n(),r},c},ao.layout.force=function(){function n(n){return function(t,e,r,i){if(t.point!==n){var u=t.cx-n.x,o=t.cy-n.y,a=i-e,l=u*u+o*o;if(l>a*a/y){if(v>l){var c=t.charge/l;n.px-=u*c,n.py-=o*c}return!0}if(t.point&&l&&v>l){var c=t.pointCharge/l;n.px-=u*c,n.py-=o*c}}return!t.charge}}function t(n){n.px=ao.event.x,n.py=ao.event.y,l.resume()}var e,r,i,u,o,a,l={},c=ao.dispatch("start","tick","end"),f=[1,1],s=.9,h=ml,p=Ml,g=-30,v=xl,d=.1,y=.64,M=[],x=[];return l.tick=function(){if((i*=.99)<.005)return e=null,c.end({type:"end",alpha:i=0}),!0;var t,r,l,h,p,v,y,m,b,_=M.length,w=x.length;for(r=0;w>r;++r)l=x[r],h=l.source,p=l.target,m=p.x-h.x,b=p.y-h.y,(v=m*m+b*b)&&(v=i*o[r]*((v=Math.sqrt(v))-u[r])/v,m*=v,b*=v,p.x-=m*(y=h.weight+p.weight?h.weight/(h.weight+p.weight):.5),p.y-=b*y,h.x+=m*(y=1-y),h.y+=b*y);if((y=i*d)&&(m=f[0]/2,b=f[1]/2,r=-1,y))for(;++r<_;)l=M[r],l.x+=(m-l.x)*y,l.y+=(b-l.y)*y;if(g)for(ri(t=ao.geom.quadtree(M),i,a),r=-1;++r<_;)(l=M[r]).fixed||t.visit(n(l));for(r=-1;++r<_;)l=M[r],l.fixed?(l.x=l.px,l.y=l.py):(l.x-=(l.px-(l.px=l.x))*s,l.y-=(l.py-(l.py=l.y))*s);c.tick({type:"tick",alpha:i})},l.nodes=function(n){return arguments.length?(M=n,l):M},l.links=function(n){return arguments.length?(x=n,l):x},l.size=function(n){return arguments.length?(f=n,l):f},l.linkDistance=function(n){return arguments.length?(h="function"==typeof n?n:+n,l):h},l.distance=l.linkDistance,l.linkStrength=function(n){return arguments.length?(p="function"==typeof n?n:+n,l):p},l.friction=function(n){return arguments.length?(s=+n,l):s},l.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,l):g},l.chargeDistance=function(n){return arguments.length?(v=n*n,l):Math.sqrt(v)},l.gravity=function(n){return arguments.length?(d=+n,l):d},l.theta=function(n){return arguments.length?(y=n*n,l):Math.sqrt(y)},l.alpha=function(n){return arguments.length?(n=+n,i?n>0?i=n:(e.c=null,e.t=NaN,e=null,c.end({type:"end",alpha:i=0})):n>0&&(c.start({type:"start",alpha:i=n}),e=qn(l.tick)),l):i},l.start=function(){function n(n,r){if(!e){for(e=new Array(i),l=0;i>l;++l)e[l]=[];for(l=0;c>l;++l){var u=x[l];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var o,a=e[t],l=-1,f=a.length;++l<f;)if(!isNaN(o=a[l][n]))return o;return Math.random()*r}var t,e,r,i=M.length,c=x.length,s=f[0],v=f[1];for(t=0;i>t;++t)(r=M[t]).index=t,r.weight=0;for(t=0;c>t;++t)r=x[t],"number"==typeof r.source&&(r.source=M[r.source]),"number"==typeof r.target&&(r.target=M[r.target]),++r.source.weight,++r.target.weight;for(t=0;i>t;++t)r=M[t],isNaN(r.x)&&(r.x=n("x",s)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof h)for(t=0;c>t;++t)u[t]=+h.call(this,x[t],t);else for(t=0;c>t;++t)u[t]=h;if(o=[],"function"==typeof p)for(t=0;c>t;++t)o[t]=+p.call(this,x[t],t);else for(t=0;c>t;++t)o[t]=p;if(a=[],"function"==typeof g)for(t=0;i>t;++t)a[t]=+g.call(this,M[t],t);else for(t=0;i>t;++t)a[t]=g;return l.resume()},l.resume=function(){return l.alpha(.1)},l.stop=function(){return l.alpha(0)},l.drag=function(){return r||(r=ao.behavior.drag().origin(m).on("dragstart.force",Qr).on("drag.force",t).on("dragend.force",ni)),arguments.length?void this.on("mouseover.force",ti).on("mouseout.force",ei).call(r):r},ao.rebind(l,c,"on")};var ml=20,Ml=1,xl=1/0;ao.layout.hierarchy=function(){function n(i){var u,o=[i],a=[];for(i.depth=0;null!=(u=o.pop());)if(a.push(u),(c=e.call(n,u,u.depth))&&(l=c.length)){for(var l,c,f;--l>=0;)o.push(f=c[l]),f.parent=u,f.depth=u.depth+1;r&&(u.value=0),u.children=c}else r&&(u.value=+r.call(n,u,u.depth)||0),delete u.children;return oi(i,function(n){var e,i;t&&(e=n.children)&&e.sort(t),r&&(i=n.parent)&&(i.value+=n.value)}),a}var t=ci,e=ai,r=li;return n.sort=function(e){return arguments.length?(t=e,n):t},n.children=function(t){return arguments.length?(e=t,n):e},n.value=function(t){return arguments.length?(r=t,n):r},n.revalue=function(t){return r&&(ui(t,function(n){n.children&&(n.value=0)}),oi(t,function(t){var e;t.children||(t.value=+r.call(n,t,t.depth)||0),(e=t.parent)&&(e.value+=t.value)})),t},n},ao.layout.partition=function(){function n(t,e,r,i){var u=t.children;if(t.x=e,t.y=t.depth*i,t.dx=r,t.dy=i,u&&(o=u.length)){var o,a,l,c=-1;for(r=t.value?r/t.value:0;++c<o;)n(a=u[c],e,l=a.value*r,i),e+=l}}function t(n){var e=n.children,r=0;if(e&&(i=e.length))for(var i,u=-1;++u<i;)r=Math.max(r,t(e[u]));return 1+r}function e(e,u){var o=r.call(this,e,u);return n(o[0],0,i[0],i[1]/t(o[0])),o}var r=ao.layout.hierarchy(),i=[1,1];return e.size=function(n){return arguments.length?(i=n,e):i},ii(e,r)},ao.layout.pie=function(){function n(o){var a,l=o.length,c=o.map(function(e,r){return+t.call(n,e,r)}),f=+("function"==typeof r?r.apply(this,arguments):r),s=("function"==typeof i?i.apply(this,arguments):i)-f,h=Math.min(Math.abs(s)/l,+("function"==typeof u?u.apply(this,arguments):u)),p=h*(0>s?-1:1),g=ao.sum(c),v=g?(s-l*p)/g:0,d=ao.range(l),y=[];return null!=e&&d.sort(e===bl?function(n,t){return c[t]-c[n]}:function(n,t){return e(o[n],o[t])}),d.forEach(function(n){y[n]={data:o[n],value:a=c[n],startAngle:f,endAngle:f+=a*v+p,padAngle:h}}),y}var t=Number,e=bl,r=0,i=Ho,u=0;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(i=t,n):i},n.padAngle=function(t){return arguments.length?(u=t,n):u},n};var bl={};ao.layout.stack=function(){function n(a,l){if(!(h=a.length))return a;var c=a.map(function(e,r){return t.call(n,e,r)}),f=c.map(function(t){return t.map(function(t,e){return[u.call(n,t,e),o.call(n,t,e)]})}),s=e.call(n,f,l);c=ao.permute(c,s),f=ao.permute(f,s);var h,p,g,v,d=r.call(n,f,l),y=c[0].length;for(g=0;y>g;++g)for(i.call(n,c[0][g],v=d[g],f[0][g][1]),p=1;h>p;++p)i.call(n,c[p][g],v+=f[p-1][g][1],f[p][g][1]);return a}var t=m,e=gi,r=vi,i=pi,u=si,o=hi;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:_l.get(t)||gi,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:wl.get(t)||vi,n):r},n.x=function(t){return arguments.length?(u=t,n):u},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(i=t,n):i},n};var _l=ao.map({"inside-out":function(n){var t,e,r=n.length,i=n.map(di),u=n.map(yi),o=ao.range(r).sort(function(n,t){return i[n]-i[t]}),a=0,l=0,c=[],f=[];for(t=0;r>t;++t)e=o[t],l>a?(a+=u[e],c.push(e)):(l+=u[e],f.push(e));return f.reverse().concat(c)},reverse:function(n){return ao.range(n.length).reverse()},"default":gi}),wl=ao.map({silhouette:function(n){var t,e,r,i=n.length,u=n[0].length,o=[],a=0,l=[];for(e=0;u>e;++e){for(t=0,r=0;i>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;u>e;++e)l[e]=(a-o[e])/2;return l},wiggle:function(n){var t,e,r,i,u,o,a,l,c,f=n.length,s=n[0],h=s.length,p=[];for(p[0]=l=c=0,e=1;h>e;++e){for(t=0,i=0;f>t;++t)i+=n[t][e][1];for(t=0,u=0,a=s[e][0]-s[e-1][0];f>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;u+=o*n[t][e][1]}p[e]=l-=i?u/i*a:0,c>l&&(c=l)}for(e=0;h>e;++e)p[e]-=c;return p},expand:function(n){var t,e,r,i=n.length,u=n[0].length,o=1/i,a=[];for(e=0;u>e;++e){for(t=0,r=0;i>t;t++)r+=n[t][e][1];if(r)for(t=0;i>t;t++)n[t][e][1]/=r;else for(t=0;i>t;t++)n[t][e][1]=o}for(e=0;u>e;++e)a[e]=0;return a},zero:vi});ao.layout.histogram=function(){function n(n,u){for(var o,a,l=[],c=n.map(e,this),f=r.call(this,c,u),s=i.call(this,f,c,u),u=-1,h=c.length,p=s.length-1,g=t?1:1/h;++u<p;)o=l[u]=[],o.dx=s[u+1]-(o.x=s[u]),o.y=0;if(p>0)for(u=-1;++u<h;)a=c[u],a>=f[0]&&a<=f[1]&&(o=l[ao.bisect(s,a,1,p)-1],o.y+=g,o.push(n[u]));return l}var t=!0,e=Number,r=bi,i=Mi;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=En(t),n):r},n.bins=function(t){return arguments.length?(i="number"==typeof t?function(n){return xi(n,t)}:En(t),n):i},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},ao.layout.pack=function(){function n(n,u){var o=e.call(this,n,u),a=o[0],l=i[0],c=i[1],f=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,oi(a,function(n){n.r=+f(n.value)}),oi(a,Ni),r){var s=r*(t?1:Math.max(2*a.r/l,2*a.r/c))/2;oi(a,function(n){n.r+=s}),oi(a,Ni),oi(a,function(n){n.r-=s})}return Ci(a,l/2,c/2,t?1:1/Math.max(2*a.r/l,2*a.r/c)),o}var t,e=ao.layout.hierarchy().sort(_i),r=0,i=[1,1];return n.size=function(t){return arguments.length?(i=t,n):i},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},ii(n,e)},ao.layout.tree=function(){function n(n,i){var f=o.call(this,n,i),s=f[0],h=t(s);if(oi(h,e),h.parent.m=-h.z,ui(h,r),c)ui(s,u);else{var p=s,g=s,v=s;ui(s,function(n){n.x<p.x&&(p=n),n.x>g.x&&(g=n),n.depth>v.depth&&(v=n)});var d=a(p,g)/2-p.x,y=l[0]/(g.x+a(g,p)/2+d),m=l[1]/(v.depth||1);ui(s,function(n){n.x=(n.x+d)*y,n.y=n.depth*m})}return f}function t(n){for(var t,e={A:null,children:[n]},r=[e];null!=(t=r.pop());)for(var i,u=t.children,o=0,a=u.length;a>o;++o)r.push((u[o]=i={_:u[o],parent:t,children:(i=u[o].children)&&i.slice()||[],A:null,a:null,z:0,m:0,c:0,s:0,t:null,i:o}).a=i);return e.children[0]}function e(n){var t=n.children,e=n.parent.children,r=n.i?e[n.i-1]:null;if(t.length){Di(n);var u=(t[0].z+t[t.length-1].z)/2;r?(n.z=r.z+a(n._,r._),n.m=n.z-u):n.z=u}else r&&(n.z=r.z+a(n._,r._));n.parent.A=i(n,r,n.parent.A||e[0])}function r(n){n._.x=n.z+n.parent.m,n.m+=n.parent.m}function i(n,t,e){if(t){for(var r,i=n,u=n,o=t,l=i.parent.children[0],c=i.m,f=u.m,s=o.m,h=l.m;o=Ti(o),i=qi(i),o&&i;)l=qi(l),u=Ti(u),u.a=n,r=o.z+s-i.z-c+a(o._,i._),r>0&&(Ri(Pi(o,n,e),n,r),c+=r,f+=r),s+=o.m,c+=i.m,h+=l.m,f+=u.m;o&&!Ti(u)&&(u.t=o,u.m+=s-f),i&&!qi(l)&&(l.t=i,l.m+=c-h,e=n)}return e}function u(n){n.x*=l[0],n.y=n.depth*l[1]}var o=ao.layout.hierarchy().sort(null).value(null),a=Li,l=[1,1],c=null;return n.separation=function(t){return arguments.length?(a=t,n):a},n.size=function(t){return arguments.length?(c=null==(l=t)?u:null,n):c?null:l},n.nodeSize=function(t){return arguments.length?(c=null==(l=t)?null:u,n):c?l:null},ii(n,o)},ao.layout.cluster=function(){function n(n,u){var o,a=t.call(this,n,u),l=a[0],c=0;oi(l,function(n){var t=n.children;t&&t.length?(n.x=ji(t),n.y=Ui(t)):(n.x=o?c+=e(n,o):0,n.y=0,o=n)});var f=Fi(l),s=Hi(l),h=f.x-e(f,s)/2,p=s.x+e(s,f)/2;return oi(l,i?function(n){n.x=(n.x-l.x)*r[0],n.y=(l.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(p-h)*r[0],n.y=(1-(l.y?n.y/l.y:1))*r[1]}),a}var t=ao.layout.hierarchy().sort(null).value(null),e=Li,r=[1,1],i=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(i=null==(r=t),n):i?null:r},n.nodeSize=function(t){return arguments.length?(i=null!=(r=t),n):i?r:null},ii(n,t)},ao.layout.treemap=function(){function n(n,t){for(var e,r,i=-1,u=n.length;++i<u;)r=(e=n[i]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var u=e.children;if(u&&u.length){var o,a,l,c=s(e),f=[],h=u.slice(),g=1/0,v="slice"===p?c.dx:"dice"===p?c.dy:"slice-dice"===p?1&e.depth?c.dy:c.dx:Math.min(c.dx,c.dy);for(n(h,c.dx*c.dy/e.value),f.area=0;(l=h.length)>0;)f.push(o=h[l-1]),f.area+=o.area,"squarify"!==p||(a=r(f,v))<=g?(h.pop(),g=a):(f.area-=f.pop().area,i(f,v,c,!1),v=Math.min(c.dx,c.dy),f.length=f.area=0,g=1/0);f.length&&(i(f,v,c,!0),f.length=f.area=0),u.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var u,o=s(t),a=r.slice(),l=[];for(n(a,o.dx*o.dy/t.value),l.area=0;u=a.pop();)l.push(u),l.area+=u.area,null!=u.z&&(i(l,u.z?o.dx:o.dy,o,!a.length),l.length=l.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,i=0,u=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(u>e&&(u=e),e>i&&(i=e));return r*=r,t*=t,r?Math.max(t*i*g/r,r/(t*u*g)):1/0}function i(n,t,e,r){var i,u=-1,o=n.length,a=e.x,c=e.y,f=t?l(n.area/t):0;
-if(t==e.dx){for((r||f>e.dy)&&(f=e.dy);++u<o;)i=n[u],i.x=a,i.y=c,i.dy=f,a+=i.dx=Math.min(e.x+e.dx-a,f?l(i.area/f):0);i.z=!0,i.dx+=e.x+e.dx-a,e.y+=f,e.dy-=f}else{for((r||f>e.dx)&&(f=e.dx);++u<o;)i=n[u],i.x=a,i.y=c,i.dx=f,c+=i.dy=Math.min(e.y+e.dy-c,f?l(i.area/f):0);i.z=!1,i.dy+=e.y+e.dy-c,e.x+=f,e.dx-=f}}function u(r){var i=o||a(r),u=i[0];return u.x=u.y=0,u.value?(u.dx=c[0],u.dy=c[1]):u.dx=u.dy=0,o&&a.revalue(u),n([u],u.dx*u.dy/u.value),(o?e:t)(u),h&&(o=i),i}var o,a=ao.layout.hierarchy(),l=Math.round,c=[1,1],f=null,s=Oi,h=!1,p="squarify",g=.5*(1+Math.sqrt(5));return u.size=function(n){return arguments.length?(c=n,u):c},u.padding=function(n){function t(t){var e=n.call(u,t,t.depth);return null==e?Oi(t):Ii(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return Ii(t,n)}if(!arguments.length)return f;var r;return s=null==(f=n)?Oi:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,u},u.round=function(n){return arguments.length?(l=n?Math.round:Number,u):l!=Number},u.sticky=function(n){return arguments.length?(h=n,o=null,u):h},u.ratio=function(n){return arguments.length?(g=n,u):g},u.mode=function(n){return arguments.length?(p=n+"",u):p},ii(u,a)},ao.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,i;do e=2*Math.random()-1,r=2*Math.random()-1,i=e*e+r*r;while(!i||i>1);return n+t*e*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(){var n=ao.random.normal.apply(ao,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=ao.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},ao.scale={};var Sl={floor:m,ceil:m};ao.scale.linear=function(){return Wi([0,1],[0,1],Mr,!1)};var kl={s:1,g:1,p:1,r:1,e:1};ao.scale.log=function(){return ru(ao.scale.linear().domain([0,1]),10,!0,[1,10])};var Nl=ao.format(".0e"),El={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};ao.scale.pow=function(){return iu(ao.scale.linear(),1,[0,1])},ao.scale.sqrt=function(){return ao.scale.pow().exponent(.5)},ao.scale.ordinal=function(){return ou([],{t:"range",a:[[]]})},ao.scale.category10=function(){return ao.scale.ordinal().range(Al)},ao.scale.category20=function(){return ao.scale.ordinal().range(Cl)},ao.scale.category20b=function(){return ao.scale.ordinal().range(zl)},ao.scale.category20c=function(){return ao.scale.ordinal().range(Ll)};var Al=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(xn),Cl=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(xn),zl=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(xn),Ll=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(xn);ao.scale.quantile=function(){return au([],[])},ao.scale.quantize=function(){return lu(0,1,[0,1])},ao.scale.threshold=function(){return cu([.5],[0,1])},ao.scale.identity=function(){return fu([0,1])},ao.svg={},ao.svg.arc=function(){function n(){var n=Math.max(0,+e.apply(this,arguments)),c=Math.max(0,+r.apply(this,arguments)),f=o.apply(this,arguments)-Io,s=a.apply(this,arguments)-Io,h=Math.abs(s-f),p=f>s?0:1;if(n>c&&(g=c,c=n,n=g),h>=Oo)return t(c,p)+(n?t(n,1-p):"")+"Z";var g,v,d,y,m,M,x,b,_,w,S,k,N=0,E=0,A=[];if((y=(+l.apply(this,arguments)||0)/2)&&(d=u===ql?Math.sqrt(n*n+c*c):+u.apply(this,arguments),p||(E*=-1),c&&(E=tn(d/c*Math.sin(y))),n&&(N=tn(d/n*Math.sin(y)))),c){m=c*Math.cos(f+E),M=c*Math.sin(f+E),x=c*Math.cos(s-E),b=c*Math.sin(s-E);var C=Math.abs(s-f-2*E)<=Fo?0:1;if(E&&yu(m,M,x,b)===p^C){var z=(f+s)/2;m=c*Math.cos(z),M=c*Math.sin(z),x=b=null}}else m=M=0;if(n){_=n*Math.cos(s-N),w=n*Math.sin(s-N),S=n*Math.cos(f+N),k=n*Math.sin(f+N);var L=Math.abs(f-s+2*N)<=Fo?0:1;if(N&&yu(_,w,S,k)===1-p^L){var q=(f+s)/2;_=n*Math.cos(q),w=n*Math.sin(q),S=k=null}}else _=w=0;if(h>Uo&&(g=Math.min(Math.abs(c-n)/2,+i.apply(this,arguments)))>.001){v=c>n^p?0:1;var T=g,R=g;if(Fo>h){var D=null==S?[_,w]:null==x?[m,M]:Re([m,M],[S,k],[x,b],[_,w]),P=m-D[0],U=M-D[1],j=x-D[0],F=b-D[1],H=1/Math.sin(Math.acos((P*j+U*F)/(Math.sqrt(P*P+U*U)*Math.sqrt(j*j+F*F)))/2),O=Math.sqrt(D[0]*D[0]+D[1]*D[1]);R=Math.min(g,(n-O)/(H-1)),T=Math.min(g,(c-O)/(H+1))}if(null!=x){var I=mu(null==S?[_,w]:[S,k],[m,M],c,T,p),Y=mu([x,b],[_,w],c,T,p);g===T?A.push("M",I[0],"A",T,",",T," 0 0,",v," ",I[1],"A",c,",",c," 0 ",1-p^yu(I[1][0],I[1][1],Y[1][0],Y[1][1]),",",p," ",Y[1],"A",T,",",T," 0 0,",v," ",Y[0]):A.push("M",I[0],"A",T,",",T," 0 1,",v," ",Y[0])}else A.push("M",m,",",M);if(null!=S){var Z=mu([m,M],[S,k],n,-R,p),V=mu([_,w],null==x?[m,M]:[x,b],n,-R,p);g===R?A.push("L",V[0],"A",R,",",R," 0 0,",v," ",V[1],"A",n,",",n," 0 ",p^yu(V[1][0],V[1][1],Z[1][0],Z[1][1]),",",1-p," ",Z[1],"A",R,",",R," 0 0,",v," ",Z[0]):A.push("L",V[0],"A",R,",",R," 0 0,",v," ",Z[0])}else A.push("L",_,",",w)}else A.push("M",m,",",M),null!=x&&A.push("A",c,",",c," 0 ",C,",",p," ",x,",",b),A.push("L",_,",",w),null!=S&&A.push("A",n,",",n," 0 ",L,",",1-p," ",S,",",k);return A.push("Z"),A.join("")}function t(n,t){return"M0,"+n+"A"+n+","+n+" 0 1,"+t+" 0,"+-n+"A"+n+","+n+" 0 1,"+t+" 0,"+n}var e=hu,r=pu,i=su,u=ql,o=gu,a=vu,l=du;return n.innerRadius=function(t){return arguments.length?(e=En(t),n):e},n.outerRadius=function(t){return arguments.length?(r=En(t),n):r},n.cornerRadius=function(t){return arguments.length?(i=En(t),n):i},n.padRadius=function(t){return arguments.length?(u=t==ql?ql:En(t),n):u},n.startAngle=function(t){return arguments.length?(o=En(t),n):o},n.endAngle=function(t){return arguments.length?(a=En(t),n):a},n.padAngle=function(t){return arguments.length?(l=En(t),n):l},n.centroid=function(){var n=(+e.apply(this,arguments)+ +r.apply(this,arguments))/2,t=(+o.apply(this,arguments)+ +a.apply(this,arguments))/2-Io;return[Math.cos(t)*n,Math.sin(t)*n]},n};var ql="auto";ao.svg.line=function(){return Mu(m)};var Tl=ao.map({linear:xu,"linear-closed":bu,step:_u,"step-before":wu,"step-after":Su,basis:zu,"basis-open":Lu,"basis-closed":qu,bundle:Tu,cardinal:Eu,"cardinal-open":ku,"cardinal-closed":Nu,monotone:Fu});Tl.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var Rl=[0,2/3,1/3,0],Dl=[0,1/3,2/3,0],Pl=[0,1/6,2/3,1/6];ao.svg.line.radial=function(){var n=Mu(Hu);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},wu.reverse=Su,Su.reverse=wu,ao.svg.area=function(){return Ou(m)},ao.svg.area.radial=function(){var n=Ou(Hu);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},ao.svg.chord=function(){function n(n,a){var l=t(this,u,n,a),c=t(this,o,n,a);return"M"+l.p0+r(l.r,l.p1,l.a1-l.a0)+(e(l,c)?i(l.r,l.p1,l.r,l.p0):i(l.r,l.p1,c.r,c.p0)+r(c.r,c.p1,c.a1-c.a0)+i(c.r,c.p1,l.r,l.p0))+"Z"}function t(n,t,e,r){var i=t.call(n,e,r),u=a.call(n,i,r),o=l.call(n,i,r)-Io,f=c.call(n,i,r)-Io;return{r:u,a0:o,a1:f,p0:[u*Math.cos(o),u*Math.sin(o)],p1:[u*Math.cos(f),u*Math.sin(f)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Fo)+",1 "+t}function i(n,t,e,r){return"Q 0,0 "+r}var u=Me,o=xe,a=Iu,l=gu,c=vu;return n.radius=function(t){return arguments.length?(a=En(t),n):a},n.source=function(t){return arguments.length?(u=En(t),n):u},n.target=function(t){return arguments.length?(o=En(t),n):o},n.startAngle=function(t){return arguments.length?(l=En(t),n):l},n.endAngle=function(t){return arguments.length?(c=En(t),n):c},n},ao.svg.diagonal=function(){function n(n,i){var u=t.call(this,n,i),o=e.call(this,n,i),a=(u.y+o.y)/2,l=[u,{x:u.x,y:a},{x:o.x,y:a},o];return l=l.map(r),"M"+l[0]+"C"+l[1]+" "+l[2]+" "+l[3]}var t=Me,e=xe,r=Yu;return n.source=function(e){return arguments.length?(t=En(e),n):t},n.target=function(t){return arguments.length?(e=En(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},ao.svg.diagonal.radial=function(){var n=ao.svg.diagonal(),t=Yu,e=n.projection;return n.projection=function(n){return arguments.length?e(Zu(t=n)):t},n},ao.svg.symbol=function(){function n(n,r){return(Ul.get(t.call(this,n,r))||$u)(e.call(this,n,r))}var t=Xu,e=Vu;return n.type=function(e){return arguments.length?(t=En(e),n):t},n.size=function(t){return arguments.length?(e=En(t),n):e},n};var Ul=ao.map({circle:$u,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Fl)),e=t*Fl;return"M0,"+-t+"L"+e+",0 0,"+t+" "+-e+",0Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/jl),e=t*jl/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/jl),e=t*jl/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});ao.svg.symbolTypes=Ul.keys();var jl=Math.sqrt(3),Fl=Math.tan(30*Yo);Co.transition=function(n){for(var t,e,r=Hl||++Zl,i=Ku(n),u=[],o=Ol||{time:Date.now(),ease:Nr,delay:0,duration:250},a=-1,l=this.length;++a<l;){u.push(t=[]);for(var c=this[a],f=-1,s=c.length;++f<s;)(e=c[f])&&Qu(e,f,i,r,o),t.push(e)}return Wu(u,i,r)},Co.interrupt=function(n){return this.each(null==n?Il:Bu(Ku(n)))};var Hl,Ol,Il=Bu(Ku()),Yl=[],Zl=0;Yl.call=Co.call,Yl.empty=Co.empty,Yl.node=Co.node,Yl.size=Co.size,ao.transition=function(n,t){return n&&n.transition?Hl?n.transition(t):n:ao.selection().transition(n)},ao.transition.prototype=Yl,Yl.select=function(n){var t,e,r,i=this.id,u=this.namespace,o=[];n=A(n);for(var a=-1,l=this.length;++a<l;){o.push(t=[]);for(var c=this[a],f=-1,s=c.length;++f<s;)(r=c[f])&&(e=n.call(r,r.__data__,f,a))?("__data__"in r&&(e.__data__=r.__data__),Qu(e,f,u,i,r[u][i]),t.push(e)):t.push(null)}return Wu(o,u,i)},Yl.selectAll=function(n){var t,e,r,i,u,o=this.id,a=this.namespace,l=[];n=C(n);for(var c=-1,f=this.length;++c<f;)for(var s=this[c],h=-1,p=s.length;++h<p;)if(r=s[h]){u=r[a][o],e=n.call(r,r.__data__,h,c),l.push(t=[]);for(var g=-1,v=e.length;++g<v;)(i=e[g])&&Qu(i,g,a,o,u),t.push(i)}return Wu(l,a,o)},Yl.filter=function(n){var t,e,r,i=[];"function"!=typeof n&&(n=O(n));for(var u=0,o=this.length;o>u;u++){i.push(t=[]);for(var e=this[u],a=0,l=e.length;l>a;a++)(r=e[a])&&n.call(r,r.__data__,a,u)&&t.push(r)}return Wu(i,this.namespace,this.id)},Yl.tween=function(n,t){var e=this.id,r=this.namespace;return arguments.length<2?this.node()[r][e].tween.get(n):Y(this,null==t?function(t){t[r][e].tween.remove(n)}:function(i){i[r][e].tween.set(n,t)})},Yl.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function i(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function u(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?$r:Mr,a=ao.ns.qualify(n);return Ju(this,"attr."+n,t,a.local?u:i)},Yl.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(i));return r&&function(n){this.setAttribute(i,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(i.space,i.local));return r&&function(n){this.setAttributeNS(i.space,i.local,r(n))}}var i=ao.ns.qualify(n);return this.tween("attr."+n,i.local?r:e)},Yl.style=function(n,e,r){function i(){this.style.removeProperty(n)}function u(e){return null==e?i:(e+="",function(){var i,u=t(this).getComputedStyle(this,null).getPropertyValue(n);return u!==e&&(i=Mr(u,e),function(t){this.style.setProperty(n,i(t),r)})})}var o=arguments.length;if(3>o){if("string"!=typeof n){2>o&&(e="");for(r in n)this.style(r,n[r],e);return this}r=""}return Ju(this,"style."+n,e,u)},Yl.styleTween=function(n,e,r){function i(i,u){var o=e.call(this,i,u,t(this).getComputedStyle(this,null).getPropertyValue(n));return o&&function(t){this.style.setProperty(n,o(t),r)}}return arguments.length<3&&(r=""),this.tween("style."+n,i)},Yl.text=function(n){return Ju(this,"text",n,Gu)},Yl.remove=function(){var n=this.namespace;return this.each("end.transition",function(){var t;this[n].count<2&&(t=this.parentNode)&&t.removeChild(this)})},Yl.ease=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].ease:("function"!=typeof n&&(n=ao.ease.apply(ao,arguments)),Y(this,function(r){r[e][t].ease=n}))},Yl.delay=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].delay:Y(this,"function"==typeof n?function(r,i,u){r[e][t].delay=+n.call(r,r.__data__,i,u)}:(n=+n,function(r){r[e][t].delay=n}))},Yl.duration=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].duration:Y(this,"function"==typeof n?function(r,i,u){r[e][t].duration=Math.max(1,n.call(r,r.__data__,i,u))}:(n=Math.max(1,n),function(r){r[e][t].duration=n}))},Yl.each=function(n,t){var e=this.id,r=this.namespace;if(arguments.length<2){var i=Ol,u=Hl;try{Hl=e,Y(this,function(t,i,u){Ol=t[r][e],n.call(t,t.__data__,i,u)})}finally{Ol=i,Hl=u}}else Y(this,function(i){var u=i[r][e];(u.event||(u.event=ao.dispatch("start","end","interrupt"))).on(n,t)});return this},Yl.transition=function(){for(var n,t,e,r,i=this.id,u=++Zl,o=this.namespace,a=[],l=0,c=this.length;c>l;l++){a.push(n=[]);for(var t=this[l],f=0,s=t.length;s>f;f++)(e=t[f])&&(r=e[o][i],Qu(e,f,o,u,{time:r.time,ease:r.ease,delay:r.delay+r.duration,duration:r.duration})),n.push(e)}return Wu(a,o,u)},ao.svg.axis=function(){function n(n){n.each(function(){var n,c=ao.select(this),f=this.__chart__||e,s=this.__chart__=e.copy(),h=null==l?s.ticks?s.ticks.apply(s,a):s.domain():l,p=null==t?s.tickFormat?s.tickFormat.apply(s,a):m:t,g=c.selectAll(".tick").data(h,s),v=g.enter().insert("g",".domain").attr("class","tick").style("opacity",Uo),d=ao.transition(g.exit()).style("opacity",Uo).remove(),y=ao.transition(g.order()).style("opacity",1),M=Math.max(i,0)+o,x=Zi(s),b=c.selectAll(".domain").data([0]),_=(b.enter().append("path").attr("class","domain"),ao.transition(b));v.append("line"),v.append("text");var w,S,k,N,E=v.select("line"),A=y.select("line"),C=g.select("text").text(p),z=v.select("text"),L=y.select("text"),q="top"===r||"left"===r?-1:1;if("bottom"===r||"top"===r?(n=no,w="x",k="y",S="x2",N="y2",C.attr("dy",0>q?"0em":".71em").style("text-anchor","middle"),_.attr("d","M"+x[0]+","+q*u+"V0H"+x[1]+"V"+q*u)):(n=to,w="y",k="x",S="y2",N="x2",C.attr("dy",".32em").style("text-anchor",0>q?"end":"start"),_.attr("d","M"+q*u+","+x[0]+"H0V"+x[1]+"H"+q*u)),E.attr(N,q*i),z.attr(k,q*M),A.attr(S,0).attr(N,q*i),L.attr(w,0).attr(k,q*M),s.rangeBand){var T=s,R=T.rangeBand()/2;f=s=function(n){return T(n)+R}}else f.rangeBand?f=s:d.call(n,s,f);v.call(n,f,s),y.call(n,s,s)})}var t,e=ao.scale.linear(),r=Vl,i=6,u=6,o=3,a=[10],l=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in Xl?t+"":Vl,n):r},n.ticks=function(){return arguments.length?(a=co(arguments),n):a},n.tickValues=function(t){return arguments.length?(l=t,n):l},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(i=+t,u=+arguments[e-1],n):i},n.innerTickSize=function(t){return arguments.length?(i=+t,n):i},n.outerTickSize=function(t){return arguments.length?(u=+t,n):u},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Vl="bottom",Xl={top:1,right:1,bottom:1,left:1};ao.svg.brush=function(){function n(t){t.each(function(){var t=ao.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=t.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),t.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=t.selectAll(".resize").data(v,m);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return $l[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,s=ao.transition(t),h=ao.transition(o);c&&(l=Zi(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),r(s)),f&&(l=Zi(f),h.attr("y",l[0]).attr("height",l[1]-l[0]),i(s)),e(s)})}function e(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+s[+/e$/.test(n)]+","+h[+/^s/.test(n)]+")"})}function r(n){n.select(".extent").attr("x",s[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1]-s[0])}function i(n){n.select(".extent").attr("y",h[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",h[1]-h[0])}function u(){function u(){32==ao.event.keyCode&&(C||(M=null,L[0]-=s[1],L[1]-=h[1],C=2),S())}function v(){32==ao.event.keyCode&&2==C&&(L[0]+=s[1],L[1]+=h[1],C=0,S())}function d(){var n=ao.mouse(b),t=!1;x&&(n[0]+=x[0],n[1]+=x[1]),C||(ao.event.altKey?(M||(M=[(s[0]+s[1])/2,(h[0]+h[1])/2]),L[0]=s[+(n[0]<M[0])],L[1]=h[+(n[1]<M[1])]):M=null),E&&y(n,c,0)&&(r(k),t=!0),A&&y(n,f,1)&&(i(k),t=!0),t&&(e(k),w({type:"brush",mode:C?"move":"resize"}))}function y(n,t,e){var r,i,u=Zi(t),l=u[0],c=u[1],f=L[e],v=e?h:s,d=v[1]-v[0];return C&&(l-=f,c-=d+f),r=(e?g:p)?Math.max(l,Math.min(c,n[e])):n[e],C?i=(r+=f)+d:(M&&(f=Math.max(l,Math.min(c,2*M[e]-r))),r>f?(i=r,r=f):i=f),v[0]!=r||v[1]!=i?(e?a=null:o=null,v[0]=r,v[1]=i,!0):void 0}function m(){d(),k.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),ao.select("body").style("cursor",null),q.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),z(),w({type:"brushend"})}var M,x,b=this,_=ao.select(ao.event.target),w=l.of(b,arguments),k=ao.select(b),N=_.datum(),E=!/^(n|s)$/.test(N)&&c,A=!/^(e|w)$/.test(N)&&f,C=_.classed("extent"),z=W(b),L=ao.mouse(b),q=ao.select(t(b)).on("keydown.brush",u).on("keyup.brush",v);if(ao.event.changedTouches?q.on("touchmove.brush",d).on("touchend.brush",m):q.on("mousemove.brush",d).on("mouseup.brush",m),k.interrupt().selectAll("*").interrupt(),C)L[0]=s[0]-L[0],L[1]=h[0]-L[1];else if(N){var T=+/w$/.test(N),R=+/^n/.test(N);x=[s[1-T]-L[0],h[1-R]-L[1]],L[0]=s[T],L[1]=h[R]}else ao.event.altKey&&(M=L.slice());k.style("pointer-events","none").selectAll(".resize").style("display",null),ao.select("body").style("cursor",_.style("cursor")),w({type:"brushstart"}),d()}var o,a,l=N(n,"brushstart","brush","brushend"),c=null,f=null,s=[0,0],h=[0,0],p=!0,g=!0,v=Bl[0];return n.event=function(n){n.each(function(){var n=l.of(this,arguments),t={x:s,y:h,i:o,j:a},e=this.__chart__||t;this.__chart__=t,Hl?ao.select(this).transition().each("start.brush",function(){o=e.i,a=e.j,s=e.x,h=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=xr(s,t.x),r=xr(h,t.y);return o=a=null,function(i){s=t.x=e(i),h=t.y=r(i),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){o=t.i,a=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,v=Bl[!c<<1|!f],n):c},n.y=function(t){return arguments.length?(f=t,v=Bl[!c<<1|!f],n):f},n.clamp=function(t){return arguments.length?(c&&f?(p=!!t[0],g=!!t[1]):c?p=!!t:f&&(g=!!t),n):c&&f?[p,g]:c?p:f?g:null},n.extent=function(t){var e,r,i,u,l;return arguments.length?(c&&(e=t[0],r=t[1],f&&(e=e[0],r=r[0]),o=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(l=e,e=r,r=l),e==s[0]&&r==s[1]||(s=[e,r])),f&&(i=t[0],u=t[1],c&&(i=i[1],u=u[1]),a=[i,u],f.invert&&(i=f(i),u=f(u)),i>u&&(l=i,i=u,u=l),i==h[0]&&u==h[1]||(h=[i,u])),n):(c&&(o?(e=o[0],r=o[1]):(e=s[0],r=s[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(l=e,e=r,r=l))),f&&(a?(i=a[0],u=a[1]):(i=h[0],u=h[1],f.invert&&(i=f.invert(i),u=f.invert(u)),i>u&&(l=i,i=u,u=l))),c&&f?[[e,i],[r,u]]:c?[e,r]:f&&[i,u])},n.clear=function(){return n.empty()||(s=[0,0],h=[0,0],o=a=null),n},n.empty=function(){return!!c&&s[0]==s[1]||!!f&&h[0]==h[1]},ao.rebind(n,l,"on")};var $l={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Bl=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Wl=ga.format=xa.timeFormat,Jl=Wl.utc,Gl=Jl("%Y-%m-%dT%H:%M:%S.%LZ");Wl.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?eo:Gl,eo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},eo.toString=Gl.toString,ga.second=On(function(n){return new va(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),ga.seconds=ga.second.range,ga.seconds.utc=ga.second.utc.range,ga.minute=On(function(n){return new va(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),ga.minutes=ga.minute.range,ga.minutes.utc=ga.minute.utc.range,ga.hour=On(function(n){var t=n.getTimezoneOffset()/60;return new va(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),ga.hours=ga.hour.range,ga.hours.utc=ga.hour.utc.range,ga.month=On(function(n){return n=ga.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),ga.months=ga.month.range,ga.months.utc=ga.month.utc.range;var Kl=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Ql=[[ga.second,1],[ga.second,5],[ga.second,15],[ga.second,30],[ga.minute,1],[ga.minute,5],[ga.minute,15],[ga.minute,30],[ga.hour,1],[ga.hour,3],[ga.hour,6],[ga.hour,12],[ga.day,1],[ga.day,2],[ga.week,1],[ga.month,1],[ga.month,3],[ga.year,1]],nc=Wl.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",zt]]),tc={range:function(n,t,e){return ao.range(Math.ceil(n/e)*e,+t,e).map(io)},floor:m,ceil:m};Ql.year=ga.year,ga.scale=function(){return ro(ao.scale.linear(),Ql,nc)};var ec=Ql.map(function(n){return[n[0].utc,n[1]]}),rc=Jl.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",zt]]);ec.year=ga.year.utc,ga.scale.utc=function(){return ro(ao.scale.linear(),ec,rc)},ao.text=An(function(n){return n.responseText}),ao.json=function(n,t){return Cn(n,"application/json",uo,t)},ao.html=function(n,t){return Cn(n,"text/html",oo,t)},ao.xml=An(function(n){return n.responseXML}),"function"==typeof define&&define.amd?(this.d3=ao,define(ao)):"object"==typeof module&&module.exports?module.exports=ao:this.d3=ao}();
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/hadoop.css b/hadoop-hdds/framework/src/main/resources/webapps/static/hadoop.css
deleted file mode 100644
index 270881d..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/hadoop.css
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
-  background-color : #ffffff;
-  font-family : sans-serif;
-}
-
-.small {
-  font-size : smaller;
-}
-
-div#dfsnodetable tr#row1, div.dfstable td.col1 {
-	font-weight : bolder;
-}
-
-div.dfstable th {
-    text-align:left;
-	vertical-align : top;
-}
-
-div.dfstable td#col3 {
-	text-align : right;
-}
-
-div#dfsnodetable caption {
-	text-align : left;
-}
-
-div#dfsnodetable a#title {
-	font-size : larger;
-	font-weight : bolder;
-}
-
-div#dfsnodetable td, th {
-        padding-bottom : 4px;
-        padding-top : 4px;
-}
-
-div#dfsnodetable A:link, A:visited {
-	text-decoration : none;
-}
-
-div#dfsnodetable th.header, th.headerASC, th.headerDSC {
-        padding-bottom : 8px;
-        padding-top : 8px;
-}
-div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover,
-                 td.name:hover {
-        text-decoration : underline;
-	cursor : pointer;
-}
-
-div#dfsnodetable td.blocks, td.size, td.pcused, td.adminstate, td.lastcontact {
-	text-align : right;
-}
-
-div#dfsnodetable .rowNormal .header {
-	background-color : #ffffff;
-}
-div#dfsnodetable .rowAlt, .headerASC, .headerDSC {
-	background-color : lightyellow;
-}
-
-.warning {
-        font-weight : bolder;
-        color : red;
-}
-
-div.dfstable table {
-	white-space : pre;
-}
-
-table.storage, table.nodes {
-    border-collapse: collapse;
-}
-
-table.storage td {
-	padding:10px;
-	border:1px solid black;
-}
-
-table.nodes td {
-	padding:0px;
-	border:1px solid black;
-}
-
-div#dfsnodetable td, div#dfsnodetable th, div.dfstable td {
-	padding-left : 10px;
-	padding-right : 10px;
-	border:1px solid black;
-}
-
-td.perc_filled {
-  background-color:#AAAAFF;
-}
-
-td.perc_nonfilled {
-  background-color:#FFFFFF;
-}
-
-line.taskgraphline {
-  stroke-width:1;stroke-linecap:round;
-}
-
-#quicklinks {
-	margin: 0;
-	padding: 2px 4px;
-	position: fixed;
-	top: 0;
-	right: 0;
-	text-align: right;
-	background-color: #eee;
-	font-weight: bold;
-}
-
-#quicklinks ul {
-	margin: 0;
-	padding: 0;
-	list-style-type: none;
-	font-weight: normal;
-}
-
-#quicklinks ul {
-	display: none;
-}
-
-#quicklinks a {
-	font-size: smaller;
-	text-decoration: none;
-}
-
-#quicklinks ul a {
-	text-decoration: underline;
-}
-
-span.failed {
-    color:red;
-}
-
-div.security {
-    width:100%;
-}
-
-#startupprogress table, #startupprogress th, #startupprogress td {
-  border-collapse: collapse;
-  border-left: 1px solid black;
-  border-right: 1px solid black;
-  padding: 5px;
-  text-align: left;
-}
-
-#startupprogress table {
-  border: 1px solid black;
-}
-
-.phase {
-  border-top: 1px solid black;
-  font-weight: bold;
-}
-
-.current {
-  font-style: italic;
-}
-
-.later {
-  color: gray;
-}
-
-.step .startupdesc {
-  text-indent: 20px;
-}
-
-#startupprogress span {
-  font-weight: bold;
-}
-
-.panel-success > .panel-heading {
-  color: #fff !important;
-  background-color: #5FA33E !important;
-}
-
-header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
-  border-radius: 0px;
-  background-color: #5fa33e;
-  color: #fff;
-}
-
-#ui-tabs > li > a {
-  color: #dcf0d3;
-}
-
-#ui-tabs .active a {
-  color: #fff;
-  background-color: #446633;
-}
-
-#alert-panel {
-  margin-top:20px;
-  display: none;
-}
-
-.dfshealth-node-capacity-bar {
-    margin-bottom:0;
-    width: 60%;
-}
-
-.dfshealth-node-icon:before {
-    font-size: 10pt;
-    padding-right: 1pt;
-    font-family: 'Glyphicons Halflings';
-    font-style: normal;
-    font-weight: normal;
-    line-height: 1;
-    -webkit-font-smoothing: antialiased;
-    -moz-osx-font-smoothing: grayscale;
-}
-
-.dfshealth-node-alive:before {
-    color: #5fa341;
-    content: "\e013";
-}
-
-.dfshealth-node-decommissioning:before {
-    color: #5fa341;
-    content: "\e090";
-}
-
-.dfshealth-node-decommissioned:before {
-    color: #eea236;
-    content: "\e090";
-}
-
-.dfshealth-node-in-maintenance:before {
-    color: #eea236;
-    content: "\e136";
-}
-
-.dfshealth-node-entering-maintenance:before {
-    color: #5fa341;
-    content: "\e136";
-}
-
-.dfshealth-node-down:before {
-    color: #c7254e;
-    content: "\e101";
-}
-
-.dfshealth-node-down-decommissioned:before {
-    color: #c7254e;
-    content: "\e017";
-}
-
-.dfshealth-node-down-maintenance:before {
-    color: #c7254e;
-    content: "\e136";
-}
-
-.dfshealth-node-legend {
-    list-style-type: none;
-    text-align: right;
-}
-
-.dfshealth-node-legend li {
-    display: inline;
-    padding: 10pt;
-    padding-left: 10pt;
-}
-
-.dfshealth-node-legend li:before {
-    padding-right: 5pt;
-}
-
-.explorer-entry .explorer-browse-links { cursor: pointer; }
-.explorer-entry .glyphicon-trash { cursor: pointer; }
-
-.popover {
-  max-width: 400px;
-}
-
-.explorer-popover-perm-body table  {
-  width: 30rem;
-}
-
-.explorer-popover-perm-body table > tbody > tr > td {
-  text-align: center;
-}
-
-.explorer-popover-perm-body label {
-    display:inline;
-    margin-bottom: 0;
-    font-weight: normal;
-    cursor: pointer;
-}
-
-.cut-paste {
-  width: 75px;
-  min-width: 75px;
-  float: right;
-  left: 75px;
-}
-
-.datanodestatus{
-    width:75px;
-    height:30px;
-    color: #555;
-    display: inline-block;
-}
-
-.bar rect {
-    fill: #5FA33F;
-}
-
-.bar text {
-    fill: #fff;
-    font: 10px sans-serif;
-}
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js
deleted file mode 100644
index a1c07fd..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */
-!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}k.fn=k.prototype={jquery:f,constructor:k,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=k.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return k.each(this,e)},map:function(n){return this.pushStack(k.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},k.extend=k.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(k.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||k.isPlainObject(n)?n:{},i=!1,a[t]=k.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},k.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){b(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(d(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(p,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?k.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g.apply([],a)},guid:1,support:y}),"function"==typeof Symbol&&(k.fn[Symbol.iterator]=t[Symbol.iterator]),k.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var h=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,k="sizzle"+1*new Date,m=n.document,S=0,r=0,p=ue(),x=ue(),N=ue(),A=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",$=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",F=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="<a id='"+k+"'></a><select id='"+k+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!==C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(F," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[S,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[S,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[k]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace(B,"$1"));return s[k]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[S,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[k]||(e[k]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===S&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[k]&&(v=Ce(v)),y&&!y[k]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[k]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace(B,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(B," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=N[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[k]?i.push(a):o.push(a);(a=N(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=S+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t===C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument===C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(S=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(S=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=k.split("").sort(D).join("")===k,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);k.find=h,k.expr=h.selectors,k.expr[":"]=k.expr.pseudos,k.uniqueSort=k.unique=h.uniqueSort,k.text=h.getText,k.isXMLDoc=h.isXML,k.contains=h.contains,k.escapeSelector=h.escape;var T=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&k(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},N=k.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var D=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1<i.call(n,e)!==r}):k.filter(n,e,r)}k.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?k.find.matchesSelector(r,e)?[r]:[]:k.find.matches(e,k.grep(t,function(e){return 1===e.nodeType}))},k.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(k(e).filter(function(){for(t=0;t<r;t++)if(k.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)k.find(e,i[t],n);return 1<r?k.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&N.test(e)?k(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(k.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&k(e);if(!N.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&k.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?k.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(k(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(k.uniqueSort(k.merge(this.get(),k(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),k.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return T(e,"parentNode")},parentsUntil:function(e,t,n){return T(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return T(e,"nextSibling")},prevAll:function(e){return T(e,"previousSibling")},nextUntil:function(e,t,n){return T(e,"nextSibling",n)},prevUntil:function(e,t,n){return T(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return"undefined"!=typeof e.contentDocument?e.contentDocument:(A(e,"template")&&(e=e.content||e),k.merge([],e.childNodes))}},function(r,i){k.fn[r]=function(e,t){var n=k.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=k.filter(t,n)),1<this.length&&(O[r]||k.uniqueSort(n),H.test(r)&&n.reverse()),this.pushStack(n)}});var R=/[^\x20\t\r\n\f]+/g;function M(e){return e}function I(e){throw e}function W(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}k.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},k.each(e.match(R)||[],function(e,t){n[t]=!0}),n):k.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){k.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return k.each(arguments,function(e,t){var n;while(-1<(n=k.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<k.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},k.extend({Deferred:function(e){var o=[["notify","progress",k.Callbacks("memory"),k.Callbacks("memory"),2],["resolve","done",k.Callbacks("once memory"),k.Callbacks("once memory"),0,"resolved"],["reject","fail",k.Callbacks("once memory"),k.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return k.Deferred(function(r){k.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,M,s),l(u,o,I,s)):(u++,t.call(e,l(u,o,M,s),l(u,o,I,s),l(u,o,M,o.notifyWith))):(a!==M&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){k.Deferred.exceptionHook&&k.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==I&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(k.Deferred.getStackHook&&(t.stackTrace=k.Deferred.getStackHook()),C.setTimeout(t))}}return k.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:M,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:M)),o[2][3].add(l(0,e,m(n)?n:I))}).promise()},promise:function(e){return null!=e?k.extend(e,a):a}},s={};return k.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=k.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(W(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)W(i[t],a(t),o.reject);return o.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;k.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&$.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},k.readyException=function(e){C.setTimeout(function(){throw e})};var F=k.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),k.ready()}k.fn.ready=function(e){return F.then(e)["catch"](function(e){k.readyException(e)}),this},k.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--k.readyWait:k.isReady)||(k.isReady=!0)!==e&&0<--k.readyWait||F.resolveWith(E,[k])}}),k.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(k.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var _=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)_(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(k(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,U=/-([a-z])/g;function X(e,t){return t.toUpperCase()}function V(e){return e.replace(z,"ms-").replace(U,X)}var G=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Y(){this.expando=k.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},G(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[V(t)]=n;else for(r in t)i[V(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][V(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(V):(t=V(t))in r?[t]:t.match(R)||[]).length;while(n--)delete r[t[n]]}(void 0===t||k.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!k.isEmptyObject(t)}};var Q=new Y,J=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function ee(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:K.test(i)?JSON.parse(i):i)}catch(e){}J.set(e,t,n)}else n=void 0;return n}k.extend({hasData:function(e){return J.hasData(e)||Q.hasData(e)},data:function(e,t,n){return J.access(e,t,n)},removeData:function(e,t){J.remove(e,t)},_data:function(e,t,n){return Q.access(e,t,n)},_removeData:function(e,t){Q.remove(e,t)}}),k.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=J.get(o),1===o.nodeType&&!Q.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=V(r.slice(5)),ee(o,r,i[r]));Q.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){J.set(this,n)}):_(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=J.get(o,n))?t:void 0!==(t=ee(o,n))?t:void 0;this.each(function(){J.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){J.remove(this,e)})}}),k.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Q.get(e,t),n&&(!r||Array.isArray(n)?r=Q.access(e,t,k.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=k.queue(e,t),r=n.length,i=n.shift(),o=k._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){k.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Q.get(e,n)||Q.access(e,n,{empty:k.Callbacks("once memory").add(function(){Q.remove(e,[t+"queue",n])})})}}),k.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?k.queue(this[0],t):void 0===n?this:this.each(function(){var e=k.queue(this,t,n);k._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&k.dequeue(this,t)})},dequeue:function(e){return this.each(function(){k.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=k.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Q.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var te=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ne=new RegExp("^(?:([+-])=|)("+te+")([a-z%]*)$","i"),re=["Top","Right","Bottom","Left"],ie=E.documentElement,oe=function(e){return k.contains(e.ownerDocument,e)},ae={composed:!0};ie.getRootNode&&(oe=function(e){return k.contains(e.ownerDocument,e)||e.getRootNode(ae)===e.ownerDocument});var se=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&oe(e)&&"none"===k.css(e,"display")},ue=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];for(o in i=n.apply(e,r||[]),t)e.style[o]=a[o];return i};function le(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return k.css(e,t,"")},u=s(),l=n&&n[3]||(k.cssNumber[t]?"":"px"),c=e.nodeType&&(k.cssNumber[t]||"px"!==l&&+u)&&ne.exec(k.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)k.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,k.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ce={};function fe(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Q.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&se(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ce[s])||(o=a.body.appendChild(a.createElement(s)),u=k.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ce[s]=u)))):"none"!==n&&(l[c]="none",Q.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}k.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){se(this)?k(this).show():k(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Q.set(e[n],"globalEval",!t||Q.get(t[n],"globalEval"))}ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;var me,xe,be=/<|&#?\w+;/;function we(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))k.merge(p,o.nodeType?[o]:o);else if(be.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+k.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;k.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<k.inArray(o,r))i&&i.push(o);else if(l=oe(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}me=E.createDocumentFragment().appendChild(E.createElement("div")),(xe=E.createElement("input")).setAttribute("type","radio"),xe.setAttribute("checked","checked"),xe.setAttribute("name","t"),me.appendChild(xe),y.checkClone=me.cloneNode(!0).cloneNode(!0).lastChild.checked,me.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t<arguments.length;t++)u[t]=arguments[t];if(s.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,s)){a=k.event.handlers.call(this,s,l),t=0;while((i=a[t++])&&!s.isPropagationStopped()){s.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!s.isImmediatePropagationStopped())s.rnamespace&&!1!==o.namespace&&!s.rnamespace.test(o.namespace)||(s.handleObj=o,s.data=o.data,void 0!==(r=((k.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,u))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<k(i,this).index(l):k.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(k.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[k.expando]?e:new k.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click",ke),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Q.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},k.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},k.Event=function(e,t){if(!(this instanceof k.Event))return new k.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?ke:Se,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&k.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[k.expando]=!0},k.Event.prototype={constructor:k.Event,isDefaultPrevented:Se,isPropagationStopped:Se,isImmediatePropagationStopped:Se,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=ke,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=ke,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=ke,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},k.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Te.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Ce.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},k.event.addProp),k.each({focus:"focusin",blur:"focusout"},function(e,t){k.event.special[e]={setup:function(){return De(this,e,Ne),!1},trigger:function(){return De(this,e),!0},delegateType:t}}),k.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){k.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||k.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),k.fn.extend({on:function(e,t,n,r){return Ae(this,e,t,n,r)},one:function(e,t,n,r){return Ae(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,k(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Se),this.each(function(){k.event.remove(this,e,n,t)})}});var je=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/<script|<style|<link/i,Le=/checked\s*(?:[^=]|=\s*.checked.)/i,He=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n<r;n++)k.event.add(t,i,l[i][n]);J.hasData(e)&&(s=J.access(e),u=k.extend({},s),J.set(t,u))}}function Ie(n,r,i,o){r=g.apply([],r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&Le.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Ie(t,r,i,o)});if(f&&(t=(e=we(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=k.map(ve(e,"script"),Pe)).length;c<f;c++)u=e,c!==p&&(u=k.clone(u,!0,!0),s&&k.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,k.map(a,Re),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Q.access(u,"globalEval")&&k.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?k._evalUrl&&!u.noModule&&k._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")}):b(u.textContent.replace(He,""),u,l))}return n}function We(e,t,n){for(var r,i=t?k.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||k.cleanData(ve(r)),r.parentNode&&(n&&oe(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}k.extend({htmlPrefilter:function(e){return e.replace(je,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Me(o[r],a[r]);else Me(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=k.event.special,o=0;void 0!==(n=e[o]);o++)if(G(n)){if(t=n[Q.expando]){if(t.events)for(r in t.events)i[r]?k.event.remove(n,r):k.removeEvent(n,r,t.handle);n[Q.expando]=void 0}n[J.expando]&&(n[J.expando]=void 0)}}}),k.fn.extend({detach:function(e){return We(this,e,!0)},remove:function(e){return We(this,e)},text:function(e){return _(this,function(e){return void 0===e?k.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Ie(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)})},prepend:function(){return Ie(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(k.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return k.clone(this,e,t)})},html:function(e){return _(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!qe.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=k.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(k.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Ie(this,arguments,function(e){var t=this.parentNode;k.inArray(this,n)<0&&(k.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),k.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){k.fn[e]=function(e){for(var t,n=[],r=k(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),k(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var $e=new RegExp("^("+te+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},Be=new RegExp(re.join("|"),"i");function _e(e,t,n){var r,i,o,a,s=e.style;return(n=n||Fe(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||oe(e)||(a=k.style(e,t)),!y.pixelBoxStyles()&&$e.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){s.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",ie.appendChild(s).appendChild(u);var e=C.getComputedStyle(u);n="1%"!==e.top,a=12===t(e.marginLeft),u.style.right="60%",o=36===t(e.right),r=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),ie.removeChild(s),u=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s=E.createElement("div"),u=E.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===u.style.backgroundClip,k.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),a},scrollboxSize:function(){return e(),i}}))}();var Ue=["Webkit","Moz","ms"],Xe=E.createElement("div").style,Ve={};function Ge(e){var t=k.cssProps[e]||Ve[e];return t||(e in Xe?e:Ve[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=Ue.length;while(n--)if((e=Ue[n]+t)in Xe)return e}(e)||e)}var Ye=/^(none|table(?!-c[ea]).+)/,Qe=/^--/,Je={position:"absolute",visibility:"hidden",display:"block"},Ke={letterSpacing:"0",fontWeight:"400"};function Ze(e,t,n){var r=ne.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function et(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=k.css(e,n+re[a],!0,i)),r?("content"===n&&(u-=k.css(e,"padding"+re[a],!0,i)),"margin"!==n&&(u-=k.css(e,"border"+re[a]+"Width",!0,i))):(u+=k.css(e,"padding"+re[a],!0,i),"padding"!==n?u+=k.css(e,"border"+re[a]+"Width",!0,i):s+=k.css(e,"border"+re[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function tt(e,t,n){var r=Fe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===k.css(e,"boxSizing",!1,r),o=i,a=_e(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if($e.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||"auto"===a||!parseFloat(a)&&"inline"===k.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===k.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+et(e,t,n||(i?"border":"content"),o,r,a)+"px"}function nt(e,t,n,r,i){return new nt.prototype.init(e,t,n,r,i)}k.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=_e(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=V(t),u=Qe.test(t),l=e.style;if(u||(t=Ge(s)),a=k.cssHooks[t]||k.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=ne.exec(n))&&i[1]&&(n=le(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(k.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=V(t);return Qe.test(t)||(t=Ge(s)),(a=k.cssHooks[t]||k.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=_e(e,t,r)),"normal"===i&&t in Ke&&(i=Ke[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),k.each(["height","width"],function(e,u){k.cssHooks[u]={get:function(e,t,n){if(t)return!Ye.test(k.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?tt(e,u,n):ue(e,Je,function(){return tt(e,u,n)})},set:function(e,t,n){var r,i=Fe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===k.css(e,"boxSizing",!1,i),s=n?et(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-et(e,u,"border",!1,i)-.5)),s&&(r=ne.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=k.css(e,u)),Ze(0,t,s)}}}),k.cssHooks.marginLeft=ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(_e(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),k.each({margin:"",padding:"",border:"Width"},function(i,o){k.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+re[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(k.cssHooks[i+o].set=Ze)}),k.fn.extend({css:function(e,t){return _(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Fe(e),i=t.length;a<i;a++)o[t[a]]=k.css(e,t[a],!1,r);return o}return void 0!==n?k.style(e,t,n):k.css(e,t)},e,t,1<arguments.length)}}),((k.Tween=nt).prototype={constructor:nt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||k.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(k.cssNumber[n]?"":"px")},cur:function(){var e=nt.propHooks[this.prop];return e&&e.get?e.get(this):nt.propHooks._default.get(this)},run:function(e){var t,n=nt.propHooks[this.prop];return this.options.duration?this.pos=t=k.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):nt.propHooks._default.set(this),this}}).init.prototype=nt.prototype,(nt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=k.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){k.fx.step[e.prop]?k.fx.step[e.prop](e):1!==e.elem.nodeType||!k.cssHooks[e.prop]&&null==e.elem.style[Ge(e.prop)]?e.elem[e.prop]=e.now:k.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=nt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},k.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},k.fx=nt.prototype.init,k.fx.step={};var rt,it,ot,at,st=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function lt(){it&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(lt):C.setTimeout(lt,k.fx.interval),k.fx.tick())}function ct(){return C.setTimeout(function(){rt=void 0}),rt=Date.now()}function ft(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=re[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function pt(e,t,n){for(var r,i=(dt.tweeners[t]||[]).concat(dt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function dt(o,e,t){var n,a,r=0,i=dt.prefilters.length,s=k.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=rt||ct(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:k.extend({},e),opts:k.extend(!0,{specialEasing:{},easing:k.easing._default},t),originalProperties:e,originalOptions:t,startTime:rt||ct(),duration:t.duration,tweens:[],createTween:function(e,t){var n=k.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=V(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=k.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=dt.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(k._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return k.map(c,pt,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),k.fx.timer(k.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}k.Animation=k.extend(dt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return le(n.elem,e,ne.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(R);for(var n,r=0,i=e.length;r<i;r++)n=e[r],dt.tweeners[n]=dt.tweeners[n]||[],dt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&se(e),v=Q.get(e,"fxshow");for(r in n.queue||(null==(a=k._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,k.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],st.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||k.style(e,r)}if((u=!k.isEmptyObject(t))||!k.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Q.get(e,"display")),"none"===(c=k.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=k.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===k.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Q.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&fe([e],!0),p.done(function(){for(r in g||fe([e]),Q.remove(e,"fxshow"),d)k.style(e,r,d[r])})),u=pt(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?dt.prefilters.unshift(e):dt.prefilters.push(e)}}),k.speed=function(e,t,n){var r=e&&"object"==typeof e?k.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return k.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in k.fx.speeds?r.duration=k.fx.speeds[r.duration]:r.duration=k.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&k.dequeue(this,r.queue)},r},k.fn.extend({fadeTo:function(e,t,n,r){return this.filter(se).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=k.isEmptyObject(t),o=k.speed(e,n,r),a=function(){var e=dt(this,k.extend({},t),o);(i||Q.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&!1!==i&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=k.timers,r=Q.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&ut.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||k.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Q.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=k.timers,o=n?n.length:0;for(t.finish=!0,k.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),k.each(["toggle","show","hide"],function(e,r){var i=k.fn[r];k.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(ft(r,!0),e,t,n)}}),k.each({slideDown:ft("show"),slideUp:ft("hide"),slideToggle:ft("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){k.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),k.timers=[],k.fx.tick=function(){var e,t=0,n=k.timers;for(rt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||k.fx.stop(),rt=void 0},k.fx.timer=function(e){k.timers.push(e),k.fx.start()},k.fx.interval=13,k.fx.start=function(){it||(it=!0,lt())},k.fx.stop=function(){it=null},k.fx.speeds={slow:600,fast:200,_default:400},k.fn.delay=function(r,e){return r=k.fx&&k.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},ot=E.createElement("input"),at=E.createElement("select").appendChild(E.createElement("option")),ot.type="checkbox",y.checkOn=""!==ot.value,y.optSelected=at.selected,(ot=E.createElement("input")).value="t",ot.type="radio",y.radioValue="t"===ot.value;var ht,gt=k.expr.attrHandle;k.fn.extend({attr:function(e,t){return _(this,k.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){k.removeAttr(this,e)})}}),k.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?k.prop(e,t,n):(1===o&&k.isXMLDoc(e)||(i=k.attrHooks[t.toLowerCase()]||(k.expr.match.bool.test(t)?ht:void 0)),void 0!==n?null===n?void k.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=k.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(R);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),ht={set:function(e,t,n){return!1===t?k.removeAttr(e,n):e.setAttribute(n,n),n}},k.each(k.expr.match.bool.source.match(/\w+/g),function(e,t){var a=gt[t]||k.find.attr;gt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=gt[o],gt[o]=r,r=null!=a(e,t,n)?o:null,gt[o]=i),r}});var vt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;function mt(e){return(e.match(R)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function bt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(R)||[]}k.fn.extend({prop:function(e,t){return _(this,k.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[k.propFix[e]||e]})}}),k.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&k.isXMLDoc(e)||(t=k.propFix[t]||t,i=k.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=k.find.attr(e,"tabindex");return t?parseInt(t,10):vt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(k.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),k.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){k.propFix[this.toLowerCase()]=this}),k.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).addClass(t.call(this,e,xt(this)))});if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).removeClass(t.call(this,e,xt(this)))});if(!arguments.length)return this.attr("class","");if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){k(this).toggleClass(i.call(this,e,xt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=k(this),r=bt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=xt(this))&&Q.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Q.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+mt(xt(n))+" ").indexOf(t))return!0;return!1}});var wt=/\r/g;k.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,k(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=k.map(t,function(e){return null==e?"":e+""})),(r=k.valHooks[this.type]||k.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=k.valHooks[t.type]||k.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(wt,""):null==e?"":e:void 0}}),k.extend({valHooks:{option:{get:function(e){var t=k.find.attr(e,"value");return null!=t?t:mt(k.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=k(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=k.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<k.inArray(k.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),k.each(["radio","checkbox"],function(){k.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<k.inArray(k(e).val(),t)}},y.checkOn||(k.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var Tt=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};k.extend(k.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Tt.test(d+k.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[k.expando]?e:new k.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:k.makeArray(t,[e]),c=k.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,Tt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Q.get(o,"events")||{})[e.type]&&Q.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&G(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!G(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),k.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,Ct),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,Ct),k.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=k.extend(new k.Event,n,{type:e,isSimulated:!0});k.event.trigger(r,null,t)}}),k.fn.extend({trigger:function(e,t){return this.each(function(){k.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return k.event.trigger(e,t,n,!0)}}),y.focusin||k.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){k.event.simulate(r,e.target,k.event.fix(e))};k.event.special[r]={setup:function(){var e=this.ownerDocument||this,t=Q.access(e,r);t||e.addEventListener(n,i,!0),Q.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this,t=Q.access(e,r)-1;t?Q.access(e,r,t):(e.removeEventListener(n,i,!0),Q.remove(e,r))}}});var Et=C.location,kt=Date.now(),St=/\?/;k.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||k.error("Invalid XML: "+e),t};var Nt=/\[\]$/,At=/\r?\n/g,Dt=/^(?:submit|button|image|reset|file)$/i,jt=/^(?:input|select|textarea|keygen)/i;function qt(n,e,r,i){var t;if(Array.isArray(e))k.each(e,function(e,t){r||Nt.test(n)?i(n,t):qt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)qt(n+"["+t+"]",e[t],r,i)}k.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!k.isPlainObject(e))k.each(e,function(){i(this.name,this.value)});else for(n in e)qt(n,e[n],t,i);return r.join("&")},k.fn.extend({serialize:function(){return k.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=k.prop(this,"elements");return e?k.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!k(this).is(":disabled")&&jt.test(this.nodeName)&&!Dt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=k(this).val();return null==n?null:Array.isArray(n)?k.map(n,function(e){return{name:t.name,value:e.replace(At,"\r\n")}}):{name:t.name,value:n.replace(At,"\r\n")}}).get()}});var Lt=/%20/g,Ht=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Rt=/^(?:GET|HEAD)$/,Mt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Ft=E.createElement("a");function Bt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(R)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function _t(t,i,o,a){var s={},u=t===Wt;function l(e){var r;return s[e]=!0,k.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function zt(e,t){var n,r,i=k.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&k.extend(!0,e,r),e}Ft.href=Et.href,k.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Et.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Et.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":k.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,k.ajaxSettings),t):zt(k.ajaxSettings,e)},ajaxPrefilter:Bt(It),ajaxTransport:Bt(Wt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=k.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?k(y):k.event,x=k.Deferred(),b=k.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Pt.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Et.href)+"").replace(Mt,Et.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(R)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Ft.protocol+"//"+Ft.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=k.param(v.data,v.traditional)),_t(It,v,t,T),h)return T;for(i in(g=k.event&&v.global)&&0==k.active++&&k.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Rt.test(v.type),f=v.url.replace(Ht,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(Lt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(St.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Ot,"$1"),o=(St.test(f)?"&":"?")+"_="+kt+++o),v.url=f+o),v.ifModified&&(k.lastModified[f]&&T.setRequestHeader("If-Modified-Since",k.lastModified[f]),k.etag[f]&&T.setRequestHeader("If-None-Match",k.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+$t+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=_t(Wt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(k.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(k.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--k.active||k.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return k.get(e,t,n,"json")},getScript:function(e,t){return k.get(e,void 0,t,"script")}}),k.each(["get","post"],function(e,i){k[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),k.ajax(k.extend({url:e,type:i,dataType:r,data:t,success:n},k.isPlainObject(e)&&e))}}),k._evalUrl=function(e,t){return k.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){k.globalEval(e,t)}})},k.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=k(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){k(this).wrapInner(n.call(this,e))}):this.each(function(){var e=k(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){k(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){k(this).replaceWith(this.childNodes)}),this}}),k.expr.pseudos.hidden=function(e){return!k.expr.pseudos.visible(e)},k.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},k.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var Ut={0:200,1223:204},Xt=k.ajaxSettings.xhr();y.cors=!!Xt&&"withCredentials"in Xt,y.ajax=Xt=!!Xt,k.ajaxTransport(function(i){var o,a;if(y.cors||Xt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Ut[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),k.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),k.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return k.globalEval(e),e}}}),k.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),k.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=k("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=mt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&k.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?k("<div>").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}}),k.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),k.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),k.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||k.guid++,i},k.holdReady=function(e){e?k.readyWait++:k.ready(!0)},k.isArray=Array.isArray,k.parseJSON=JSON.parse,k.nodeName=A,k.isFunction=m,k.isWindow=x,k.camelCase=V,k.type=w,k.now=Date.now,k.isNumeric=function(e){var t=k.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return k});var Qt=C.jQuery,Jt=C.$;return k.noConflict=function(e){return C.$===k&&(C.$=Jt),e&&C.jQuery===k&&(C.jQuery=Qt),k},e||(C.jQuery=C.$=k),k});
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css
deleted file mode 100644
index b8a5c0f..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css
+++ /dev/null
@@ -1,2 +0,0 @@
-.nvd3 .nv-axis line,.nvd3 .nv-axis path{fill:none;shape-rendering:crispEdges}.nv-brush .extent,.nvd3 .background path,.nvd3 .nv-axis line,.nvd3 .nv-axis path{shape-rendering:crispEdges}.nv-distx,.nv-disty,.nv-noninteractive,.nvd3 .nv-axis,.nvd3.nv-pie .nv-label,.nvd3.nv-sparklineplus g.nv-hoverValue{pointer-events:none}.nvd3 .nv-axis{opacity:1}.nvd3 .nv-axis.nv-disabled,.nvd3 .nv-controlsWrap .nv-legend .nv-check-box .nv-check{opacity:0}.nvd3 .nv-axis path{stroke:#000;stroke-opacity:.75}.nvd3 .nv-axis path.domain{stroke-opacity:.75}.nvd3 .nv-axis.nv-x path.domain{stroke-opacity:0}.nvd3 .nv-axis line{stroke:#e5e5e5}.nvd3 .nv-axis .zero line, .nvd3 .nv-axis line.zero{stroke-opacity:.75}.nvd3 .nv-axis .nv-axisMaxMin text{font-weight:700}.nvd3 .x .nv-axis .nv-axisMaxMin text,.nvd3 .x2 .nv-axis .nv-axisMaxMin text,.nvd3 .x3 .nv-axis .nv-axisMaxMin text{text-anchor:middle}.nvd3 .nv-bars rect{fill-opacity:.75;transition:fill-opacity 250ms linear}.nvd3 .nv-bars rect.hover{fill-opacity:1}.nvd3 .nv-bars .hover rect{fill:#add8e6}.nvd3 .nv-bars text{fill:transparent}.nvd3 .nv-bars .hover text{fill:rgba(0,0,0,1)}.nvd3 .nv-discretebar .nv-groups rect,.nvd3 .nv-multibar .nv-groups rect,.nvd3 .nv-multibarHorizontal .nv-groups rect{stroke-opacity:0;transition:fill-opacity 250ms linear}.with-transitions .nv-candlestickBar .nv-ticks .nv-tick,.with-transitions .nvd3 .nv-groups .nv-point{transition:stroke-width 250ms linear,stroke-opacity 250ms linear}.nvd3 .nv-candlestickBar .nv-ticks rect:hover,.nvd3 .nv-discretebar .nv-groups rect:hover,.nvd3 .nv-multibar .nv-groups rect:hover,.nvd3 .nv-multibarHorizontal .nv-groups rect:hover{fill-opacity:1}.nvd3 .nv-discretebar .nv-groups text,.nvd3 .nv-multibarHorizontal .nv-groups text{font-weight:700;fill:rgba(0,0,0,1);stroke:transparent}.nvd3 .nv-boxplot circle{fill-opacity:.5}.nvd3 .nv-boxplot circle:hover,.nvd3 .nv-boxplot rect:hover{fill-opacity:1}.nvd3 line.nv-boxplot-median{stroke:#000}.nv-boxplot-tick:hover{stroke-width:2.5px}.nvd3.nv-bullet{font:10px sans-serif}.nvd3.nv-bullet .nv-measure{fill-opacity:.8}.nvd3.nv-bullet .nv-measure:hover{fill-opacity:1}.nvd3.nv-bullet .nv-marker{stroke:#000;stroke-width:2px}.nvd3.nv-bullet .nv-markerTriangle{stroke:#000;fill:#fff;stroke-width:1.5px}.nvd3.nv-bullet .nv-markerLine{stroke:#000;stroke-width:1.5px}.nvd3.nv-bullet .nv-tick line{stroke:#666;stroke-width:.5px}.nvd3.nv-bullet .nv-range.nv-s0{fill:#eee}.nvd3.nv-bullet .nv-range.nv-s1{fill:#ddd}.nvd3.nv-bullet .nv-range.nv-s2{fill:#ccc}.nvd3.nv-bullet .nv-title{font-size:14px;font-weight:700}.nvd3.nv-bullet .nv-subtitle{fill:#999}.nvd3.nv-bullet .nv-range{fill:#bababa;fill-opacity:.4}.nvd3.nv-bullet .nv-range:hover{fill-opacity:.7}.nvd3.nv-candlestickBar .nv-ticks .nv-tick{stroke-width:1px}.nvd3.nv-candlestickBar .nv-ticks .nv-tick.hover{stroke-width:2px}.nvd3.nv-candlestickBar .nv-ticks .nv-tick.positive rect{stroke:#2ca02c;fill:#2ca02c}.nvd3.nv-candlestickBar .nv-ticks .nv-tick.negative rect{stroke:#d62728;fill:#d62728}.nvd3.nv-candlestickBar .nv-ticks line{stroke:#333}.nv-force-node{stroke:#fff;stroke-width:1.5px}.nv-force-link{stroke:#999;stroke-opacity:.6}.nv-force-node text{stroke-width:0}.nvd3 .nv-check-box .nv-box{fill-opacity:0;stroke-width:2}.nvd3 .nv-check-box .nv-check{fill-opacity:0;stroke-width:4}.nvd3 .nv-series.nv-disabled .nv-check-box .nv-check{fill-opacity:0;stroke-opacity:0}.nvd3.nv-linePlusBar .nv-bar rect{fill-opacity:.75}.nvd3.nv-linePlusBar .nv-bar rect:hover{fill-opacity:1}.nvd3 .nv-groups path.nv-line{fill:none}.nvd3 .nv-groups path.nv-area{stroke:none}.nvd3.nv-line .nvd3.nv-scatter .nv-groups .nv-point{fill-opacity:0;stroke-opacity:0}.nvd3.nv-scatter.nv-single-point .nv-groups .nv-point{fill-opacity:.5!important;stroke-opacity:.5!important}.nvd3 .nv-groups .nv-point.hover,.nvd3.nv-scatter .nv-groups .nv-point.hover{stroke-width:7px;fill-opacity:.95!important;stroke-opacity:.95!important}.nvd3 .nv-point-paths path{stroke:#aaa;stroke-opacity:0;fill:#eee;fill-opacity:0}.nvd3 .nv-indexLine{cursor:ew-resize}svg.nvd3-svg{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;display:block;width:100%;height:100%}.nvtooltip.with-3d-shadow,.with-3d-shadow .nvtooltip{box-shadow:0 5px 10px rgba(0,0,0,.2);border-radius:5px}.nvd3 text{font:400 12px Arial,sans-serif}.nvd3 .title{font:700 14px Arial,sans-serif}.nvd3 .nv-background{fill:#fff;fill-opacity:0}.nvd3.nv-noData{font-size:18px;font-weight:700}.nv-brush .extent{fill-opacity:.125}.nv-brush .resize path{fill:#eee;stroke:#666}.nvd3 .nv-legend .nv-series{cursor:pointer}.nvd3 .nv-legend .nv-disabled circle{fill-opacity:0}.nvd3 .nv-brush .extent{fill-opacity:0!important}.nvd3 .nv-brushBackground rect{stroke:#000;stroke-width:.4;fill:#fff;fill-opacity:.7}@media print{.nvd3 text{stroke-width:0;fill-opacity:1}}.nvd3.nv-ohlcBar .nv-ticks .nv-tick{stroke-width:1px}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.hover{stroke-width:2px}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.positive{stroke:#2ca02c}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.negative{stroke:#d62728}.nvd3 .background path{fill:none;stroke:#EEE;stroke-opacity:.4}.nvd3 .foreground path{fill:none;stroke-opacity:.7}.nvd3 .nv-parallelCoordinates-brush .extent{fill:#fff;fill-opacity:.6;stroke:gray;shape-rendering:crispEdges}.nvd3 .nv-parallelCoordinates .hover{fill-opacity:1;stroke-width:3px}.nvd3 .missingValuesline line{fill:none;stroke:#000;stroke-width:1;stroke-opacity:1;stroke-dasharray:5,5}.nvd3.nv-pie .nv-pie-title{font-size:24px;fill:rgba(19,196,249,.59)}.nvd3.nv-pie .nv-slice text{stroke:#000;stroke-width:0}.nvd3.nv-pie path{transition:fill-opacity 250ms linear,stroke-width 250ms linear,stroke-opacity 250ms linear;stroke:#fff;stroke-width:1px;stroke-opacity:1;fill-opacity:.7}.nvd3.nv-pie .hover path{fill-opacity:1}.nvd3.nv-pie .nv-label rect{fill-opacity:0;stroke-opacity:0}.nvd3 .nv-groups .nv-point.hover{stroke-width:20px;stroke-opacity:.5}.nvd3 .nv-scatter .nv-point.hover{fill-opacity:1}.nvd3.nv-sparkline path{fill:none}.nvd3.nv-sparklineplus .nv-hoverValue line{stroke:#333;stroke-width:1.5px}.nvd3.nv-sparklineplus,.nvd3.nv-sparklineplus g{pointer-events:all}.nvd3 .nv-interactiveGuideLine,.nvtooltip{pointer-events:none}.nvd3 .nv-hoverArea{fill-opacity:0;stroke-opacity:0}.nvd3.nv-sparklineplus .nv-xValue,.nvd3.nv-sparklineplus .nv-yValue{stroke-width:0;font-size:.9em;font-weight:400}.nvd3.nv-sparklineplus .nv-yValue{stroke:#f66}.nvd3.nv-sparklineplus .nv-maxValue{stroke:#2ca02c;fill:#2ca02c}.nvd3.nv-sparklineplus .nv-minValue{stroke:#d62728;fill:#d62728}.nvd3.nv-sparklineplus .nv-currentValue{font-weight:700;font-size:1.1em}.nvtooltip h3,.nvtooltip table td.key{font-weight:400}.nvd3.nv-stackedarea path.nv-area{fill-opacity:.7;stroke-opacity:0;transition:fill-opacity 250ms linear,stroke-opacity 250ms linear}.nvd3.nv-stackedarea path.nv-area.hover{fill-opacity:.9}.nvd3.nv-stackedarea .nv-groups .nv-point{stroke-opacity:0;fill-opacity:0}.nvtooltip{position:absolute;color:rgba(0,0,0,1);padding:1px;z-index:10000;display:block;font-family:Arial,sans-serif;font-size:13px;text-align:left;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background:rgba(255,255,255,.8);border:1px solid rgba(0,0,0,.5);border-radius:4px}.nvtooltip h3,.nvtooltip p{margin:0;text-align:center}.nvtooltip.with-transitions,.with-transitions .nvtooltip{transition:opacity 50ms linear;transition-delay:200ms}.nvtooltip.x-nvtooltip,.nvtooltip.y-nvtooltip{padding:8px}.nvtooltip h3{padding:4px 14px;line-height:18px;background-color:rgba(247,247,247,.75);color:rgba(0,0,0,1);border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.nvtooltip p{padding:5px 14px}.nvtooltip span{display:inline-block;margin:2px 0}.nvtooltip table{margin:6px;border-spacing:0}.nvtooltip table td{padding:2px 9px 2px 0;vertical-align:middle}.nvtooltip table td.key.total{font-weight:700}.nvtooltip table td.value{text-align:right;font-weight:700}.nvtooltip table td.percent{color:#a9a9a9}.nvtooltip table tr.highlight td{padding:1px 9px 1px 0;border-bottom-style:solid;border-bottom-width:1px;border-top-style:solid;border-top-width:1px}.nvtooltip table td.legend-color-guide div{vertical-align:middle;width:12px;height:12px;border:1px solid #999}.nvtooltip .footer{padding:3px;text-align:center}.nvtooltip-pending-removal{pointer-events:none;display:none}.nvd3 line.nv-guideline{stroke:#ccc}
-/*# sourceMappingURL=nv.d3.min.css.map */
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map
deleted file mode 100644
index 63380e6..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"sources":["build/nv.d3.css"],"names":[],"mappings":"AAqBA,oBAfA,oBAgBI,KAAM,KAiWN,gBAAiB,WA/ErB,kBA+DA,uBAlVA,oBAfA,oBAiXI,gBAAiB,WAqErB,UAAW,UAJX,mBAvbA,eAoaA,uBAgCA,uCACI,eAAgB,KArcpB,eAEI,QAAS,EAuCb,2BAsJA,0DACI,QAAS,EA3Lb,oBAEI,OAAQ,KACR,eAAgB,IAIpB,2BACI,eAAgB,IAGpB,gCACI,eAAgB,EAGpB,oBAEI,OAAQ,QAIZ,0BACI,0BACA,eAAgB,IAGpB,mCACI,YAAa,IAGjB,sCACA,uCACA,uCACI,YAAa,OAOjB,oBACI,aAAc,IAEd,WAAY,aAAa,MAAM,OAGnC,0BACI,aAAc,EAGlB,2BACI,KAAM,QAGV,oBACI,KAAM,YAGV,2BACI,KAAM,cAKV,sCAFA,mCACA,6CAEI,eAAgB,EAEhB,WAAY,aAAa,MAAM,OA8EnC,wDAwEA,6CACI,WAAY,aAAa,MAAM,OAAQ,eAAe,MAAM,OAlJhE,8CACA,4CAHA,yCACA,mDAGI,aAAc,EAGlB,sCACA,6CACI,YAAa,IACb,KAAM,cACN,OAAQ,YAIZ,yBACE,aAAc,GAGhB,+BAIA,6BAHE,aAAc,EAOhB,6BACE,OAAQ,KAGV,uBACE,aAAc,MAGhB,gBAAkB,KAAM,KAAK,WAC7B,4BAA8B,aAAc,GAC5C,kCAAoC,aAAc,EAClD,2BAA6B,OAAQ,KAAM,aAAc,IACzD,mCAAqC,OAAQ,KAAM,KAAM,KAAM,aAAc,MAC7E,+BAAiC,OAAQ,KAAM,aAAc,MAC7D,8BAAgC,OAAQ,KAAM,aAAc,KAC5D,gCAAkC,KAAM,KACxC,gCAAkC,KAAM,KACxC,gCAAkC,KAAM,KACxC,0BAA4B,UAAW,KAAM,YAAa,IAC1D,6BAA+B,KAAM,KAErC,0BACI,KAAM,QACN,aAAc,GAGlB,gCACI,aAAc,GAGlB,2CACI,aAAc,IAGlB,iDACI,aAAc,IAGlB,yDACI,OAAQ,QACR,KAAM,QAGV,yDACI,OAAQ,QACR,KAAM,QAOV,uCACI,OAAQ,KAGZ,eACI,OAAQ,KACR,aAAc,MAGlB,eACI,OAAQ,KACR,eAAgB,GAGpB,oBACI,aAAc,EAOlB,4BACI,aAAa,EACb,aAAa,EAGjB,8BACI,aAAa,EACb,aAAa,EAGjB,qDACI,aAAa,EACb,eAAe,EAQnB,kCACI,aAAc,IAGlB,wCACI,aAAc,EAElB,8BACI,KAAM,KAGV,8BACI,OAAQ,KAGZ,oDACI,aAAc,EACd,eAAgB,EAGpB,sDACI,aAAc,aACd,eAAgB,aASpB,iCADA,4CAEI,aAAc,IACd,aAAc,cACd,eAAgB,cAIpB,2BACI,OAAQ,KACR,eAAgB,EAChB,KAAM,KACN,aAAc,EAIlB,oBACI,OAAQ,UAUZ,aACI,oBAAqB,KAClB,iBAAkB,KACjB,gBAAiB,KACb,YAAa,KACrB,QAAS,MACT,MAAM,KACN,OAAO,KAMX,0BAA2B,2BACvB,WAAY,EAAE,IAAI,KAAK,eACvB,cAAe,IAInB,WACI,KAAM,IAAO,KAAK,MAAO,WAG7B,aACI,KAAM,IAAK,KAAK,MAAO,WAG3B,qBACI,KAAM,KACN,aAAc,EAGlB,gBACI,UAAW,KACX,YAAa,IAQjB,kBACI,aAAc,KAIlB,uBACI,KAAM,KACN,OAAQ,KAQZ,4BACI,OAAQ,QAGZ,qCACI,aAAc,EAIlB,wBACI,aAAc,YAGlB,+BACI,OAAQ,KACR,aAAc,GACd,KAAM,KACN,aAAc,GAOlB,aACI,WACI,aAAc,EACd,aAAc,GAItB,oCACI,aAAc,IAGlB,0CACI,aAAc,IAGlB,6CACI,OAAQ,QAGZ,6CACI,OAAQ,QAIZ,uBACI,KAAM,KACN,OAAQ,KACR,eAAgB,GAIpB,uBACI,KAAM,KACN,eAAgB,GAGpB,4CACI,KAAM,KACN,aAAc,GACd,OAAQ,KACR,gBAAiB,WAGrB,qCACI,aAAc,EACjB,aAAc,IAIf,8BACE,KAAM,KACN,OAAQ,KACR,aAAc,EACd,eAAgB,EAChB,iBAAkB,EAAG,EAQvB,2BACI,UAAW,KACX,KAAM,qBAGV,4BACI,OAAQ,KACR,aAAc,EAGlB,kBAbI,WAAY,aAAa,MAAM,OAAQ,aAAa,MAAM,OAAQ,eAAe,MAAM,OAcvF,OAAQ,KACR,aAAc,IACd,eAAgB,EAIhB,aAAc,GAGlB,yBACI,aAAc,EAOlB,4BACI,aAAc,EACd,eAAgB,EAIpB,iCACI,aAAc,KACd,eAAgB,GAGpB,kCACI,aAAc,EAYlB,wBACI,KAAM,KAOV,2CACI,OAAQ,KACR,aAAc,MAGlB,uBACA,yBACI,eAAgB,IAsLpB,+BApIA,WAqII,eAAe,KApLnB,oBACI,aAAc,EACd,eAAgB,EAGpB,kCACA,kCACI,aAAc,EACd,UAAW,KACX,YAAa,IAGjB,kCACI,OAAQ,KAGZ,oCACI,OAAQ,QACR,KAAM,QAGV,oCACI,OAAQ,QACR,KAAM,QAGV,wCACI,YAAa,IACb,UAAW,MAgEf,cAoCA,wBACI,YAAa,IAlGjB,kCACI,aAAc,GACd,eAAgB,EAChB,WAAY,aAAa,MAAM,OAAQ,eAAe,MAAM,OAGhE,wCACI,aAAc,GAIlB,0CACI,eAAgB,EAChB,aAAc,EAGlB,WACI,SAAU,SAEV,MAAO,cACP,QAAS,IAET,QAAS,MACT,QAAS,MAET,YAAa,MAAO,WACpB,UAAW,KACX,WAAY,KAGZ,YAAa,OAEb,oBAAqB,KAElB,iBAAkB,KAEjB,gBAAiB,KAEb,YAAa,KAIrB,WAAY,qBACZ,OAAQ,IAAI,MAAM,eAClB,cAAe,IAiBnB,cAcA,aACI,OAAQ,EAER,WAAY,OA5BhB,4BAA6B,6BACzB,WAAY,QAAQ,KAAK,OAEzB,iBAAkB,MAGtB,uBACA,uBACI,QAAS,IAGb,cAEI,QAAS,IAAI,KACb,YAAa,KAEb,iBAAkB,sBAClB,MAAO,cAGP,cAAe,IAAI,MAAM,QAEzB,cAAe,IAAI,IAAI,EAAE,EAG7B,aAEI,QAAS,IAAI,KAIjB,gBACI,QAAS,aACT,OAAQ,IAAI,EAGhB,iBACI,OAAQ,IACR,eAAe,EAInB,oBACI,QAAS,IAAI,IAAI,IAAI,EACrB,eAAgB,OAOpB,8BACI,YAAa,IAGjB,0BACI,WAAY,MACZ,YAAa,IAGjB,4BACI,MAAO,QAGX,iCACI,QAAS,IAAI,IAAI,IAAI,EACrB,oBAAqB,MACrB,oBAAqB,IACrB,iBAAkB,MAClB,iBAAkB,IAGtB,2CAGI,eAAgB,OAIhB,MAAO,KACP,OAAQ,KACR,OAAQ,IAAI,MAAM,KAGtB,mBACI,QAAS,IACT,WAAY,OAGhB,2BACI,eAAgB,KAChB,QAAS,KAWb,wBACI,OAAQ"}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
deleted file mode 100644
index 9cfd702..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
+++ /dev/null
@@ -1,11 +0,0 @@
-/* nvd3 version 1.8.5 (https://github.com/novus/nvd3) 2016-12-01 */
-
-!function(){var a={};a.dev=!1,a.tooltip=a.tooltip||{},a.utils=a.utils||{},a.models=a.models||{},a.charts={},a.logs={},a.dom={},"undefined"!=typeof module&&"undefined"!=typeof exports&&"undefined"==typeof d3&&(d3=require("d3")),a.dispatch=d3.dispatch("render_start","render_end"),Function.prototype.bind||(Function.prototype.bind=function(a){if("function"!=typeof this)throw new TypeError("Function.prototype.bind - what is trying to be bound is not callable");var b=Array.prototype.slice.call(arguments,1),c=this,d=function(){},e=function(){return c.apply(this instanceof d&&a?this:a,b.concat(Array.prototype.slice.call(arguments)))};return d.prototype=this.prototype,e.prototype=new d,e}),a.dev&&(a.dispatch.on("render_start",function(b){a.logs.startTime=+new Date}),a.dispatch.on("render_end",function(b){a.logs.endTime=+new Date,a.logs.totalTime=a.logs.endTime-a.logs.startTime,a.log("total",a.logs.totalTime)})),a.log=function(){if(a.dev&&window.console&&console.log&&console.log.apply)console.log.apply(console,arguments);else if(a.dev&&window.console&&"function"==typeof console.log&&Function.prototype.bind){var b=Function.prototype.bind.call(console.log,console);b.apply(console,arguments)}return arguments[arguments.length-1]},a.deprecated=function(a,b){console&&console.warn&&console.warn("nvd3 warning: `"+a+"` has been deprecated. ",b||"")},a.render=function(b){b=b||1,a.render.active=!0,a.dispatch.render_start();var c=function(){for(var d,e,f=0;b>f&&(e=a.render.queue[f]);f++)d=e.generate(),typeof e.callback==typeof Function&&e.callback(d);a.render.queue.splice(0,f),a.render.queue.length?setTimeout(c):(a.dispatch.render_end(),a.render.active=!1)};setTimeout(c)},a.render.active=!1,a.render.queue=[],a.addGraph=function(b){typeof arguments[0]==typeof Function&&(b={generate:arguments[0],callback:arguments[1]}),a.render.queue.push(b),a.render.active||a.render()},"undefined"!=typeof module&&"undefined"!=typeof exports&&(module.exports=a),"undefined"!=typeof window&&(window.nv=a),a.dom.write=function(a){return void 0!==window.fastdom?fastdom.mutate(a):a()},a.dom.read=function(a){return void 0!==window.fastdom?fastdom.measure(a):a()},a.interactiveGuideline=function(){"use strict";function b(l){l.each(function(l){function m(){var a=d3.mouse(this),d=a[0],e=a[1],h=!0,i=!1;if(k&&(d=d3.event.offsetX,e=d3.event.offsetY,"svg"!==d3.event.target.tagName&&(h=!1),d3.event.target.className.baseVal.match("nv-legend")&&(i=!0)),h&&(d-=c.left,e-=c.top),"mouseout"===d3.event.type||0>d||0>e||d>o||e>p||d3.event.relatedTarget&&void 0===d3.event.relatedTarget.ownerSVGElement||i){if(k&&d3.event.relatedTarget&&void 0===d3.event.relatedTarget.ownerSVGElement&&(void 0===d3.event.relatedTarget.className||d3.event.relatedTarget.className.match(j.nvPointerEventsClass)))return;return g.elementMouseout({mouseX:d,mouseY:e}),b.renderGuideLine(null),void j.hidden(!0)}j.hidden(!1);var l="function"==typeof f.rangeBands,m=void 0;if(l){var n=d3.bisect(f.range(),d)-1;if(!(f.range()[n]+f.rangeBand()>=d))return g.elementMouseout({mouseX:d,mouseY:e}),b.renderGuideLine(null),void j.hidden(!0);m=f.domain()[d3.bisect(f.range(),d)-1]}else m=f.invert(d);g.elementMousemove({mouseX:d,mouseY:e,pointXValue:m}),"dblclick"===d3.event.type&&g.elementDblclick({mouseX:d,mouseY:e,pointXValue:m}),"click"===d3.event.type&&g.elementClick({mouseX:d,mouseY:e,pointXValue:m}),"mousedown"===d3.event.type&&g.elementMouseDown({mouseX:d,mouseY:e,pointXValue:m}),"mouseup"===d3.event.type&&g.elementMouseUp({mouseX:d,mouseY:e,pointXValue:m})}var n=d3.select(this),o=d||960,p=e||400,q=n.selectAll("g.nv-wrap.nv-interactiveLineLayer").data([l]),r=q.enter().append("g").attr("class"," nv-wrap nv-interactiveLineLayer");r.append("g").attr("class","nv-interactiveGuideLine"),i&&(i.on("touchmove",m).on("mousemove",m,!0).on("mouseout",m,!0).on("mousedown",m,!0).on("mouseup",m,!0).on("dblclick",m).on("click",m),b.guideLine=null,b.renderGuideLine=function(c){h&&(b.guideLine&&b.guideLine.attr("x1")===c||a.dom.write(function(){var b=q.select(".nv-interactiveGuideLine").selectAll("line").data(null!=c?[a.utils.NaNtoZero(c)]:[],String);b.enter().append("line").attr("class","nv-guideline").attr("x1",function(a){return a}).attr("x2",function(a){return a}).attr("y1",p).attr("y2",0),b.exit().remove()}))})})}var c={left:0,top:0},d=null,e=null,f=d3.scale.linear(),g=d3.dispatch("elementMousemove","elementMouseout","elementClick","elementDblclick","elementMouseDown","elementMouseUp"),h=!0,i=null,j=a.models.tooltip(),k=window.ActiveXObject;return j.duration(0).hideDelay(0).hidden(!1),b.dispatch=g,b.tooltip=j,b.margin=function(a){return arguments.length?(c.top="undefined"!=typeof a.top?a.top:c.top,c.left="undefined"!=typeof a.left?a.left:c.left,b):c},b.width=function(a){return arguments.length?(d=a,b):d},b.height=function(a){return arguments.length?(e=a,b):e},b.xScale=function(a){return arguments.length?(f=a,b):f},b.showGuideLine=function(a){return arguments.length?(h=a,b):h},b.svgContainer=function(a){return arguments.length?(i=a,b):i},b},a.interactiveBisect=function(a,b,c){"use strict";if(!(a instanceof Array))return null;var d;d="function"!=typeof c?function(a){return a.x}:c;var e=function(a,b){return d(a)-b},f=d3.bisector(e).left,g=d3.max([0,f(a,b)-1]),h=d(a[g]);if("undefined"==typeof h&&(h=g),h===b)return g;var i=d3.min([g+1,a.length-1]),j=d(a[i]);return"undefined"==typeof j&&(j=i),Math.abs(j-b)>=Math.abs(h-b)?g:i},a.nearestValueIndex=function(a,b,c){"use strict";var d=1/0,e=null;return a.forEach(function(a,f){var g=Math.abs(b-a);null!=a&&d>=g&&c>g&&(d=g,e=f)}),e},a.models.tooltip=function(){"use strict";function b(){if(!l||!l.node()){var a=[1];l=d3.select(document.body).select("#"+d).data(a),l.enter().append("div").attr("class","nvtooltip "+(i?i:"xy-tooltip")).attr("id",d).style("top",0).style("left",0).style("opacity",0).style("position","fixed").selectAll("div, table, td, tr").classed(q,!0).classed(q,!0),l.exit().remove()}}function c(){return n&&w(e)?(a.dom.write(function(){b();var a=u(e);a&&(l.node().innerHTML=a),y()}),c):void 0}var d="nvtooltip-"+Math.floor(1e5*Math.random()),e=null,f="w",g=25,h=0,i=null,j=!0,k=200,l=null,m={left:null,top:null},n=!0,o=100,p=!0,q="nv-pointer-events-none",r=function(a,b){return a},s=function(a){return a},t=function(a,b){return a},u=function(a){if(null===a)return"";var b=d3.select(document.createElement("table"));if(p){var c=b.selectAll("thead").data([a]).enter().append("thead");c.append("tr").append("td").attr("colspan",3).append("strong").classed("x-value",!0).html(s(a.value))}var d=b.selectAll("tbody").data([a]).enter().append("tbody"),e=d.selectAll("tr").data(function(a){return a.series}).enter().append("tr").classed("highlight",function(a){return a.highlight});e.append("td").classed("legend-color-guide",!0).append("div").style("background-color",function(a){return a.color}),e.append("td").classed("key",!0).classed("total",function(a){return!!a.total}).html(function(a,b){return t(a.key,b)}),e.append("td").classed("value",!0).html(function(a,b){return r(a.value,b)}),e.filter(function(a,b){return void 0!==a.percent}).append("td").classed("percent",!0).html(function(a,b){return"("+d3.format("%")(a.percent)+")"}),e.selectAll("td").each(function(a){if(a.highlight){var b=d3.scale.linear().domain([0,1]).range(["#fff",a.color]),c=.6;d3.select(this).style("border-bottom-color",b(c)).style("border-top-color",b(c))}});var f=b.node().outerHTML;return void 0!==a.footer&&(f+="<div class='footer'>"+a.footer+"</div>"),f},v=function(){var a={left:null!==d3.event?d3.event.clientX:0,top:null!==d3.event?d3.event.clientY:0};if("none"!=getComputedStyle(document.body).transform){var b=document.body.getBoundingClientRect();a.left-=b.left,a.top-=b.top}return a},w=function(b){if(b&&b.series){if(a.utils.isArray(b.series))return!0;if(a.utils.isObject(b.series))return b.series=[b.series],!0}return!1},x=function(a){var b,c,d,e=l.node().offsetHeight,h=l.node().offsetWidth,i=document.documentElement.clientWidth,j=document.documentElement.clientHeight;switch(f){case"e":b=-h-g,c=-(e/2),a.left+b<0&&(b=g),(d=a.top+c)<0&&(c-=d),(d=a.top+c+e)>j&&(c-=d-j);break;case"w":b=g,c=-(e/2),a.left+b+h>i&&(b=-h-g),(d=a.top+c)<0&&(c-=d),(d=a.top+c+e)>j&&(c-=d-j);break;case"n":b=-(h/2)-5,c=g,a.top+c+e>j&&(c=-e-g),(d=a.left+b)<0&&(b-=d),(d=a.left+b+h)>i&&(b-=d-i);break;case"s":b=-(h/2),c=-e-g,a.top+c<0&&(c=g),(d=a.left+b)<0&&(b-=d),(d=a.left+b+h)>i&&(b-=d-i);break;case"center":b=-(h/2),c=-(e/2);break;default:b=0,c=0}return{left:b,top:c}},y=function(){a.dom.read(function(){var a=v(),b=x(a),c=a.left+b.left,d=a.top+b.top;if(j)l.interrupt().transition().delay(k).duration(0).style("opacity",0);else{var e="translate("+m.left+"px, "+m.top+"px)",f="translate("+Math.round(c)+"px, "+Math.round(d)+"px)",g=d3.interpolateString(e,f),h=l.style("opacity")<.1;l.interrupt().transition().duration(h?0:o).styleTween("transform",function(a){return g},"important").styleTween("-webkit-transform",function(a){return g}).style("-ms-transform",f).style("opacity",1)}m.left=c,m.top=d})};return c.nvPointerEventsClass=q,c.options=a.utils.optionsFunc.bind(c),c._options=Object.create({},{duration:{get:function(){return o},set:function(a){o=a}},gravity:{get:function(){return f},set:function(a){f=a}},distance:{get:function(){return g},set:function(a){g=a}},snapDistance:{get:function(){return h},set:function(a){h=a}},classes:{get:function(){return i},set:function(a){i=a}},enabled:{get:function(){return n},set:function(a){n=a}},hideDelay:{get:function(){return k},set:function(a){k=a}},contentGenerator:{get:function(){return u},set:function(a){u=a}},valueFormatter:{get:function(){return r},set:function(a){r=a}},headerFormatter:{get:function(){return s},set:function(a){s=a}},keyFormatter:{get:function(){return t},set:function(a){t=a}},headerEnabled:{get:function(){return p},set:function(a){p=a}},position:{get:function(){return v},set:function(a){v=a}},chartContainer:{get:function(){return document.body},set:function(b){a.deprecated("chartContainer","feature removed after 1.8.3")}},fixedTop:{get:function(){return null},set:function(b){a.deprecated("fixedTop","feature removed after 1.8.1")}},offset:{get:function(){return{left:0,top:0}},set:function(b){a.deprecated("offset","use chart.tooltip.distance() instead")}},hidden:{get:function(){return j},set:function(a){j!=a&&(j=!!a,c())}},data:{get:function(){return e},set:function(a){a.point&&(a.value=a.point.x,a.series=a.series||{},a.series.value=a.point.y,a.series.color=a.point.color||a.series.color),e=a}},node:{get:function(){return l.node()},set:function(a){}},id:{get:function(){return d},set:function(a){}}}),a.utils.initOptions(c),c},a.utils.windowSize=function(){var a={width:640,height:480};return window.innerWidth&&window.innerHeight?(a.width=window.innerWidth,a.height=window.innerHeight,a):"CSS1Compat"==document.compatMode&&document.documentElement&&document.documentElement.offsetWidth?(a.width=document.documentElement.offsetWidth,a.height=document.documentElement.offsetHeight,a):document.body&&document.body.offsetWidth?(a.width=document.body.offsetWidth,a.height=document.body.offsetHeight,a):a},a.utils.isArray=Array.isArray,a.utils.isObject=function(a){return null!==a&&"object"==typeof a},a.utils.isFunction=function(a){return"function"==typeof a},a.utils.isDate=function(a){return"[object Date]"===toString.call(a)},a.utils.isNumber=function(a){return!isNaN(a)&&"number"==typeof a},a.utils.windowResize=function(b){return window.addEventListener?window.addEventListener("resize",b):a.log("ERROR: Failed to bind to window.resize with: ",b),{callback:b,clear:function(){window.removeEventListener("resize",b)}}},a.utils.getColor=function(b){if(void 0===b)return a.utils.defaultColor();if(a.utils.isArray(b)){var c=d3.scale.ordinal().range(b);return function(a,b){var d=void 0===b?a:b;return a.color||c(d)}}return b},a.utils.defaultColor=function(){return a.utils.getColor(d3.scale.category20().range())},a.utils.customTheme=function(b,c,d){c=c||function(a){return a.key},d=d||d3.scale.category20().range();var e=d.length;return function(f,g){var h=c(f);return a.utils.isFunction(b[h])?b[h]():void 0!==b[h]?b[h]:(e||(e=d.length),e-=1,d[e])}},a.utils.pjax=function(b,c){var d=function(d){d3.html(d,function(d){var e=d3.select(c).node();e.parentNode.replaceChild(d3.select(d).select(c).node(),e),a.utils.pjax(b,c)})};d3.selectAll(b).on("click",function(){history.pushState(this.href,this.textContent,this.href),d(this.href),d3.event.preventDefault()}),d3.select(window).on("popstate",function(){d3.event.state&&d(d3.event.state)})},a.utils.calcApproxTextWidth=function(b){if(a.utils.isFunction(b.style)&&a.utils.isFunction(b.text)){var c=parseInt(b.style("font-size").replace("px",""),10),d=b.text().length;return a.utils.NaNtoZero(d*c*.5)}return 0},a.utils.NaNtoZero=function(b){return!a.utils.isNumber(b)||isNaN(b)||null===b||b===1/0||b===-(1/0)?0:b},d3.selection.prototype.watchTransition=function(a){var b=[this].concat([].slice.call(arguments,1));return a.transition.apply(a,b)},a.utils.renderWatch=function(b,c){if(!(this instanceof a.utils.renderWatch))return new a.utils.renderWatch(b,c);var d=void 0!==c?c:250,e=[],f=this;this.models=function(a){return a=[].slice.call(arguments,0),a.forEach(function(a){a.__rendered=!1,function(a){a.dispatch.on("renderEnd",function(b){a.__rendered=!0,f.renderEnd("model")})}(a),e.indexOf(a)<0&&e.push(a)}),this},this.reset=function(a){void 0!==a&&(d=a),e=[]},this.transition=function(a,b,c){if(b=arguments.length>1?[].slice.call(arguments,1):[],c=b.length>1?b.pop():void 0!==d?d:250,a.__rendered=!1,e.indexOf(a)<0&&e.push(a),0===c)return a.__rendered=!0,a.delay=function(){return this},a.duration=function(){return this},a;0===a.length?a.__rendered=!0:a.every(function(a){return!a.length})?a.__rendered=!0:a.__rendered=!1;var g=0;return a.transition().duration(c).each(function(){++g}).each("end",function(c,d){0===--g&&(a.__rendered=!0,f.renderEnd.apply(this,b))})},this.renderEnd=function(){e.every(function(a){return a.__rendered})&&(e.forEach(function(a){a.__rendered=!1}),b.renderEnd.apply(this,arguments))}},a.utils.deepExtend=function(b){var c=arguments.length>1?[].slice.call(arguments,1):[];c.forEach(function(c){for(var d in c){var e=a.utils.isArray(b[d]),f=a.utils.isObject(b[d]),g=a.utils.isObject(c[d]);f&&!e&&g?a.utils.deepExtend(b[d],c[d]):b[d]=c[d]}})},a.utils.state=function(){if(!(this instanceof a.utils.state))return new a.utils.state;var b={},c=function(){},d=function(){return{}},e=null,f=null;this.dispatch=d3.dispatch("change","set"),this.dispatch.on("set",function(a){c(a,!0)}),this.getter=function(a){return d=a,this},this.setter=function(a,b){return b||(b=function(){}),c=function(c,d){a(c),d&&b()},this},this.init=function(b){e=e||{},a.utils.deepExtend(e,b)};var g=function(){var a=d();if(JSON.stringify(a)===JSON.stringify(b))return!1;for(var c in a)void 0===b[c]&&(b[c]={}),b[c]=a[c],f=!0;return!0};this.update=function(){e&&(c(e,!1),e=null),g.call(this)&&this.dispatch.change(b)}},a.utils.optionsFunc=function(b){return b&&d3.map(b).forEach(function(b,c){a.utils.isFunction(this[b])&&this[b](c)}.bind(this)),this},a.utils.calcTicksX=function(b,c){var d=1,e=0;for(e;e<c.length;e+=1){var f=c[e]&&c[e].values?c[e].values.length:0;d=f>d?f:d}return a.log("Requested number of ticks: ",b),a.log("Calculated max values to be: ",d),b=b>d?b=d-1:b,b=1>b?1:b,b=Math.floor(b),a.log("Calculating tick count as: ",b),b},a.utils.calcTicksY=function(b,c){return a.utils.calcTicksX(b,c)},a.utils.initOption=function(a,b){a._calls&&a._calls[b]?a[b]=a._calls[b]:(a[b]=function(c){return arguments.length?(a._overrides[b]=!0,a._options[b]=c,a):a._options[b]},a["_"+b]=function(c){return arguments.length?(a._overrides[b]||(a._options[b]=c),a):a._options[b]})},a.utils.initOptions=function(b){b._overrides=b._overrides||{};var c=Object.getOwnPropertyNames(b._options||{}),d=Object.getOwnPropertyNames(b._calls||{});c=c.concat(d);for(var e in c)a.utils.initOption(b,c[e])},a.utils.inheritOptionsD3=function(a,b,c){a._d3options=c.concat(a._d3options||[]),c.unshift(b),c.unshift(a),d3.rebind.apply(this,c)},a.utils.arrayUnique=function(a){return a.sort().filter(function(b,c){return!c||b!=a[c-1]})},a.utils.symbolMap=d3.map(),a.utils.symbol=function(){function b(b,e){var f=c.call(this,b,e),g=d.call(this,b,e);return-1!==d3.svg.symbolTypes.indexOf(f)?d3.svg.symbol().type(f).size(g)():a.utils.symbolMap.get(f)(g)}var c,d=64;return b.type=function(a){return arguments.length?(c=d3.functor(a),b):c},b.size=function(a){return arguments.length?(d=d3.functor(a),b):d},b},a.utils.inheritOptions=function(b,c){var d=Object.getOwnPropertyNames(c._options||{}),e=Object.getOwnPropertyNames(c._calls||{}),f=c._inherited||[],g=c._d3options||[],h=d.concat(e).concat(f).concat(g);h.unshift(c),h.unshift(b),d3.rebind.apply(this,h),b._inherited=a.utils.arrayUnique(d.concat(e).concat(f).concat(d).concat(b._inherited||[])),b._d3options=a.utils.arrayUnique(g.concat(b._d3options||[]))},a.utils.initSVG=function(a){a.classed({"nvd3-svg":!0})},a.utils.sanitizeHeight=function(a,b){return a||parseInt(b.style("height"),10)||400},a.utils.sanitizeWidth=function(a,b){return a||parseInt(b.style("width"),10)||960},a.utils.availableHeight=function(b,c,d){return Math.max(0,a.utils.sanitizeHeight(b,c)-d.top-d.bottom)},a.utils.availableWidth=function(b,c,d){return Math.max(0,a.utils.sanitizeWidth(b,c)-d.left-d.right)},a.utils.noData=function(b,c){var d=b.options(),e=d.margin(),f=d.noData(),g=null==f?["No Data Available."]:[f],h=a.utils.availableHeight(null,c,e),i=a.utils.availableWidth(null,c,e),j=e.left+i/2,k=e.top+h/2;c.selectAll("g").remove();var l=c.selectAll(".nv-noData").data(g);l.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),l.attr("x",j).attr("y",k).text(function(a){return a})},a.utils.wrapTicks=function(a,b){a.each(function(){for(var a,c=d3.select(this),d=c.text().split(/\s+/).reverse(),e=[],f=0,g=1.1,h=c.attr("y"),i=parseFloat(c.attr("dy")),j=c.text(null).append("tspan").attr("x",0).attr("y",h).attr("dy",i+"em");a=d.pop();)e.push(a),j.text(e.join(" ")),j.node().getComputedTextLength()>b&&(e.pop(),j.text(e.join(" ")),e=[a],j=c.append("tspan").attr("x",0).attr("y",h).attr("dy",++f*g+i+"em").text(a))})},a.utils.arrayEquals=function(b,c){if(b===c)return!0;if(!b||!c)return!1;if(b.length!=c.length)return!1;for(var d=0,e=b.length;e>d;d++)if(b[d]instanceof Array&&c[d]instanceof Array){if(!a.arrayEquals(b[d],c[d]))return!1}else if(b[d]!=c[d])return!1;return!0},a.models.axis=function(){"use strict";function b(g){return t.reset(),g.each(function(b){var g=d3.select(this);a.utils.initSVG(g);var q=g.selectAll("g.nv-wrap.nv-axis").data([b]),r=q.enter().append("g").attr("class","nvd3 nv-wrap nv-axis"),u=(r.append("g"),q.select("g"));null!==n?c.ticks(n):("top"==c.orient()||"bottom"==c.orient())&&c.ticks(Math.abs(d.range()[1]-d.range()[0])/100),u.watchTransition(t,"axis").call(c),s=s||c.scale();var v=c.tickFormat();null==v&&(v=s.tickFormat());var w=u.selectAll("text.nv-axislabel").data([h||null]);w.exit().remove(),void 0!==p&&u.selectAll("g").select("text").style("font-size",p);var x,y,z;switch(c.orient()){case"top":w.enter().append("text").attr("class","nv-axislabel"),z=0,1===d.range().length?z=m?2*d.range()[0]+d.rangeBand():0:2===d.range().length?z=m?d.range()[0]+d.range()[1]+d.rangeBand():d.range()[1]:d.range().length>2&&(z=d.range()[d.range().length-1]+(d.range()[1]-d.range()[0])),w.attr("text-anchor","middle").attr("y",0).attr("x",z/2),i&&(y=q.selectAll("g.nv-axisMaxMin").data(d.domain()),y.enter().append("g").attr("class",function(a,b){return["nv-axisMaxMin","nv-axisMaxMin-x",0==b?"nv-axisMin-x":"nv-axisMax-x"].join(" ")}).append("text"),y.exit().remove(),y.attr("transform",function(b,c){return"translate("+a.utils.NaNtoZero(d(b))+",0)"}).select("text").attr("dy","-0.5em").attr("y",-c.tickPadding()).attr("text-anchor","middle").text(function(a,b){var c=v(a);return(""+c).match("NaN")?"":c}),y.watchTransition(t,"min-max top").attr("transform",function(b,c){return"translate("+a.utils.NaNtoZero(d.range()[c])+",0)"}));break;case"bottom":x=o+36;var A=30,B=0,C=u.selectAll("g").select("text"),D="";if(j%360){C.attr("transform",""),C.each(function(a,b){var c=this.getBoundingClientRect(),d=c.width;B=c.height,d>A&&(A=d)}),D="rotate("+j+" 0,"+(B/2+c.tickPadding())+")";var E=Math.abs(Math.sin(j*Math.PI/180));x=(E?E*A:A)+30,C.attr("transform",D).style("text-anchor",j%360>0?"start":"end")}else l?C.attr("transform",function(a,b){return"translate(0,"+(b%2==0?"0":"12")+")"}):C.attr("transform","translate(0,0)");w.enter().append("text").attr("class","nv-axislabel"),z=0,1===d.range().length?z=m?2*d.range()[0]+d.rangeBand():0:2===d.range().length?z=m?d.range()[0]+d.range()[1]+d.rangeBand():d.range()[1]:d.range().length>2&&(z=d.range()[d.range().length-1]+(d.range()[1]-d.range()[0])),w.attr("text-anchor","middle").attr("y",x).attr("x",z/2),i&&(y=q.selectAll("g.nv-axisMaxMin").data([d.domain()[0],d.domain()[d.domain().length-1]]),y.enter().append("g").attr("class",function(a,b){return["nv-axisMaxMin","nv-axisMaxMin-x",0==b?"nv-axisMin-x":"nv-axisMax-x"].join(" ")}).append("text"),y.exit().remove(),y.attr("transform",function(b,c){return"translate("+a.utils.NaNtoZero(d(b)+(m?d.rangeBand()/2:0))+",0)"}).select("text").attr("dy",".71em").attr("y",c.tickPadding()).attr("transform",D).style("text-anchor",j?j%360>0?"start":"end":"middle").text(function(a,b){var c=v(a);return(""+c).match("NaN")?"":c}),y.watchTransition(t,"min-max bottom").attr("transform",function(b,c){return"translate("+a.utils.NaNtoZero(d(b)+(m?d.rangeBand()/2:0))+",0)"}));break;case"right":w.enter().append("text").attr("class","nv-axislabel"),w.style("text-anchor",k?"middle":"begin").attr("transform",k?"rotate(90)":"").attr("y",k?-Math.max(e.right,f)+12-(o||0):-10).attr("x",k?d3.max(d.range())/2:c.tickPadding()),i&&(y=q.selectAll("g.nv-axisMaxMin").data(d.domain()),y.enter().append("g").attr("class",function(a,b){return["nv-axisMaxMin","nv-axisMaxMin-y",0==b?"nv-axisMin-y":"nv-axisMax-y"].join(" ")}).append("text").style("opacity",0),y.exit().remove(),y.attr("transform",function(b,c){return"translate(0,"+a.utils.NaNtoZero(d(b))+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",c.tickPadding()).style("text-anchor","start").text(function(a,b){var c=v(a);return(""+c).match("NaN")?"":c}),y.watchTransition(t,"min-max right").attr("transform",function(b,c){return"translate(0,"+a.utils.NaNtoZero(d.range()[c])+")"}).select("text").style("opacity",1));break;case"left":w.enter().append("text").attr("class","nv-axislabel"),w.style("text-anchor",k?"middle":"end").attr("transform",k?"rotate(-90)":"").attr("y",k?-Math.max(e.left,f)+25-(o||0):-10).attr("x",k?-d3.max(d.range())/2:-c.tickPadding()),i&&(y=q.selectAll("g.nv-axisMaxMin").data(d.domain()),y.enter().append("g").attr("class",function(a,b){return["nv-axisMaxMin","nv-axisMaxMin-y",0==b?"nv-axisMin-y":"nv-axisMax-y"].join(" ")}).append("text").style("opacity",0),y.exit().remove(),y.attr("transform",function(b,c){return"translate(0,"+a.utils.NaNtoZero(s(b))+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",-c.tickPadding()).attr("text-anchor","end").text(function(a,b){var c=v(a);return(""+c).match("NaN")?"":c}),y.watchTransition(t,"min-max right").attr("transform",function(b,c){return"translate(0,"+a.utils.NaNtoZero(d.range()[c])+")"}).select("text").style("opacity",1))}if(w.text(function(a){return a}),!i||"left"!==c.orient()&&"right"!==c.orient()||(u.selectAll("g").each(function(a,b){d3.select(this).select("text").attr("opacity",1),(d(a)<d.range()[1]+10||d(a)>d.range()[0]-10)&&((a>1e-10||-1e-10>a)&&d3.select(this).attr("opacity",0),d3.select(this).select("text").attr("opacity",0))}),d.domain()[0]==d.domain()[1]&&0==d.domain()[0]&&q.selectAll("g.nv-axisMaxMin").style("opacity",function(a,b){return b?0:1})),i&&("top"===c.orient()||"bottom"===c.orient())){var F=[];q.selectAll("g.nv-axisMaxMin").each(function(a,b){try{b?F.push(d(a)-this.getBoundingClientRect().width-4):F.push(d(a)+this.getBoundingClientRect().width+4)}catch(c){b?F.push(d(a)-4):F.push(d(a)+4)}}),u.selectAll("g").each(function(a,b){(d(a)<F[0]||d(a)>F[1])&&(a>1e-10||-1e-10>a?d3.select(this).remove():d3.select(this).select("text").remove())})}u.selectAll(".tick").filter(function(a){return!parseFloat(Math.round(1e5*a)/1e6)&&void 0!==a}).classed("zero",!0),s=d.copy()}),t.renderEnd("axis immediate"),b}var c=d3.svg.axis(),d=d3.scale.linear(),e={top:0,right:0,bottom:0,left:0},f=75,g=60,h=null,i=!0,j=0,k=!0,l=!1,m=!1,n=null,o=0,p=void 0,q=250,r=d3.dispatch("renderEnd");c.scale(d).orient("bottom").tickFormat(function(a){return a});var s,t=a.utils.renderWatch(r,q);return b.axis=c,b.dispatch=r,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{axisLabelDistance:{get:function(){return o},set:function(a){o=a}},staggerLabels:{get:function(){return l},set:function(a){l=a}},rotateLabels:{get:function(){return j},set:function(a){j=a}},rotateYLabel:{get:function(){return k},set:function(a){k=a}},showMaxMin:{get:function(){return i},set:function(a){i=a}},axisLabel:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return g},set:function(a){g=a}},ticks:{get:function(){return n},set:function(a){n=a}},width:{get:function(){return f},set:function(a){f=a}},fontSize:{get:function(){return p},set:function(a){p=a}},margin:{get:function(){return e},set:function(a){e.top=void 0!==a.top?a.top:e.top,e.right=void 0!==a.right?a.right:e.right,e.bottom=void 0!==a.bottom?a.bottom:e.bottom,e.left=void 0!==a.left?a.left:e.left}},duration:{get:function(){return q},set:function(a){q=a,t.reset(q)}},scale:{get:function(){return d},set:function(e){d=e,c.scale(d),m="function"==typeof d.rangeBands,a.utils.inheritOptionsD3(b,d,["domain","range","rangeBand","rangeBands"])}}}),a.utils.initOptions(b),a.utils.inheritOptionsD3(b,c,["orient","tickValues","tickSubdivide","tickSize","tickPadding","tickFormat"]),a.utils.inheritOptionsD3(b,d,["domain","range","rangeBand","rangeBands"]),b},a.models.boxPlot=function(){"use strict";function b(l){return E.reset(),l.each(function(b){var l=j-i.left-i.right,F=k-i.top-i.bottom;A=d3.select(this),a.utils.initSVG(A),m.domain(c||b.map(function(a,b){return o(a,b)})).rangeBands(d||[0,l],.1);var G=[];if(!e){var H,I,J=[];b.forEach(function(a,b){var c=p(a),d=r(a),e=s(a),f=t(a),g=v(a);g&&g.forEach(function(a,b){J.push(w(a,b,void 0))}),e&&J.push(e),c&&J.push(c),d&&J.push(d),f&&J.push(f)}),H=d3.min(J),I=d3.max(J),G=[H,I]}n.domain(e||G),n.range(f||[F,0]),g=g||m,h=h||n.copy().range([n(0),n(0)]);var K=A.selectAll("g.nv-wrap").data([b]);K.enter().append("g").attr("class","nvd3 nv-wrap");K.attr("transform","translate("+i.left+","+i.top+")");var L=K.selectAll(".nv-boxplot").data(function(a){return a}),M=L.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6);L.attr("class","nv-boxplot").attr("transform",function(a,b,c){return"translate("+(m(o(a,b))+.05*m.rangeBand())+", 0)"}).classed("hover",function(a){return a.hover}),L.watchTransition(E,"nv-boxplot: boxplots").style("stroke-opacity",1).style("fill-opacity",.75).delay(function(a,c){return c*C/b.length}).attr("transform",function(a,b){return"translate("+(m(o(a,b))+.05*m.rangeBand())+", 0)"}),L.exit().remove(),M.each(function(a,b){var c=d3.select(this);[s,t].forEach(function(d){if(void 0!==d(a)&&null!==d(a)){var e=d===s?"low":"high";c.append("line").style("stroke",u(a)||z(a,b)).attr("class","nv-boxplot-whisker nv-boxplot-"+e),c.append("line").style("stroke",u(a)||z(a,b)).attr("class","nv-boxplot-tick nv-boxplot-"+e)}})});var N=function(){return null===D?.9*m.rangeBand():Math.min(75,.9*m.rangeBand())},O=function(){return.45*m.rangeBand()-N()/2},P=function(){return.45*m.rangeBand()+N()/2};[s,t].forEach(function(a){var b=a===s?"low":"high",c=a===s?p:r;L.select("line.nv-boxplot-whisker.nv-boxplot-"+b).watchTransition(E,"nv-boxplot: boxplots").attr("x1",.45*m.rangeBand()).attr("y1",function(b,c){return n(a(b))}).attr("x2",.45*m.rangeBand()).attr("y2",function(a,b){return n(c(a))}),L.select("line.nv-boxplot-tick.nv-boxplot-"+b).watchTransition(E,"nv-boxplot: boxplots").attr("x1",O).attr("y1",function(b,c){return n(a(b))}).attr("x2",P).attr("y2",function(b,c){return n(a(b))})}),[s,t].forEach(function(a){var b=a===s?"low":"high";M.selectAll(".nv-boxplot-"+b).on("mouseover",function(b,c,d){d3.select(this).classed("hover",!0),B.elementMouseover({series:{key:a(b),color:u(b)||z(b,d)},e:d3.event})}).on("mouseout",function(b,c,d){d3.select(this).classed("hover",!1),B.elementMouseout({series:{key:a(b),color:u(b)||z(b,d)},e:d3.event})}).on("mousemove",function(a,b){B.elementMousemove({e:d3.event})})}),M.append("rect").attr("class","nv-boxplot-box").on("mouseover",function(a,b){d3.select(this).classed("hover",!0),B.elementMouseover({key:o(a),value:o(a),series:[{key:"Q3",value:r(a),color:u(a)||z(a,b)},{key:"Q2",value:q(a),color:u(a)||z(a,b)},{key:"Q1",value:p(a),color:u(a)||z(a,b)}],data:a,index:b,e:d3.event})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1),B.elementMouseout({key:o(a),value:o(a),series:[{key:"Q3",value:r(a),color:u(a)||z(a,b)},{key:"Q2",value:q(a),color:u(a)||z(a,b)},{key:"Q1",value:p(a),color:u(a)||z(a,b)}],data:a,index:b,e:d3.event})}).on("mousemove",function(a,b){B.elementMousemove({e:d3.event})}),L.select("rect.nv-boxplot-box").watchTransition(E,"nv-boxplot: boxes").attr("y",function(a,b){return n(r(a))}).attr("width",N).attr("x",O).attr("height",function(a,b){return Math.abs(n(r(a))-n(p(a)))||1}).style("fill",function(a,b){return u(a)||z(a,b)}).style("stroke",function(a,b){return u(a)||z(a,b)}),M.append("line").attr("class","nv-boxplot-median"),L.select("line.nv-boxplot-median").watchTransition(E,"nv-boxplot: boxplots line").attr("x1",O).attr("y1",function(a,b){return n(q(a))}).attr("x2",P).attr("y2",function(a,b){return n(q(a))});var Q=L.selectAll(".nv-boxplot-outlier").data(function(a){return v(a)||[]});Q.enter().append("circle").style("fill",function(a,b,c){return y(a,b,c)||z(a,c)}).style("stroke",function(a,b,c){return y(a,b,c)||z(a,c)}).style("z-index",9e3).on("mouseover",function(a,b,c){d3.select(this).classed("hover",!0),B.elementMouseover({series:{key:x(a,b,c),color:y(a,b,c)||z(a,c)},e:d3.event})}).on("mouseout",function(a,b,c){d3.select(this).classed("hover",!1),B.elementMouseout({series:{key:x(a,b,c),color:y(a,b,c)||z(a,c)},e:d3.event})}).on("mousemove",function(a,b){B.elementMousemove({e:d3.event})}),Q.attr("class","nv-boxplot-outlier"),Q.watchTransition(E,"nv-boxplot: nv-boxplot-outlier").attr("cx",.45*m.rangeBand()).attr("cy",function(a,b,c){return n(w(a,b,c))}).attr("r","3"),Q.exit().remove(),g=m.copy(),h=n.copy()}),E.renderEnd("nv-boxplot immediate"),b}var c,d,e,f,g,h,i={top:0,right:0,bottom:0,left:0},j=960,k=500,l=Math.floor(1e4*Math.random()),m=d3.scale.ordinal(),n=d3.scale.linear(),o=function(a){return a.label},p=function(a){return a.values.Q1},q=function(a){return a.values.Q2},r=function(a){return a.values.Q3},s=function(a){return a.values.whisker_low},t=function(a){return a.values.whisker_high},u=function(a){return a.color},v=function(a){return a.values.outliers},w=function(a,b,c){return a},x=function(a,b,c){return a},y=function(a,b,c){return void 0},z=a.utils.defaultColor(),A=null,B=d3.dispatch("elementMouseover","elementMouseout","elementMousemove","renderEnd"),C=250,D=null,E=a.utils.renderWatch(B,C);return b.dispatch=B,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return j},set:function(a){j=a}},height:{get:function(){return k},set:function(a){k=a}},maxBoxWidth:{get:function(){return D},set:function(a){D=a}},x:{get:function(){return o},set:function(a){o=a}},q1:{get:function(){return p},set:function(a){p=a}},q2:{get:function(){return q},set:function(a){q=a}},q3:{get:function(){return r},set:function(a){r=a}},wl:{get:function(){return s},set:function(a){s=a}},wh:{get:function(){return t},set:function(a){t=a}},itemColor:{get:function(){return u},set:function(a){u=a}},outliers:{get:function(){return v},set:function(a){
-v=a}},outlierValue:{get:function(){return w},set:function(a){w=a}},outlierLabel:{get:function(){return x},set:function(a){x=a}},outlierColor:{get:function(){return y},set:function(a){y=a}},xScale:{get:function(){return m},set:function(a){m=a}},yScale:{get:function(){return n},set:function(a){n=a}},xDomain:{get:function(){return c},set:function(a){c=a}},yDomain:{get:function(){return e},set:function(a){e=a}},xRange:{get:function(){return d},set:function(a){d=a}},yRange:{get:function(){return f},set:function(a){f=a}},id:{get:function(){return l},set:function(a){l=a}},y:{get:function(){return console.warn("BoxPlot 'y' chart option is deprecated. Please use model overrides instead."),{}},set:function(a){console.warn("BoxPlot 'y' chart option is deprecated. Please use model overrides instead.")}},margin:{get:function(){return i},set:function(a){i.top=void 0!==a.top?a.top:i.top,i.right=void 0!==a.right?a.right:i.right,i.bottom=void 0!==a.bottom?a.bottom:i.bottom,i.left=void 0!==a.left?a.left:i.left}},color:{get:function(){return z},set:function(b){z=a.utils.getColor(b)}},duration:{get:function(){return C},set:function(a){C=a,E.reset(C)}}}),a.utils.initOptions(b),b},a.models.boxPlotChart=function(){"use strict";function b(k){return t.reset(),t.models(e),l&&t.models(f),m&&t.models(g),k.each(function(k){var p=d3.select(this);a.utils.initSVG(p);var t=(i||parseInt(p.style("width"))||960)-h.left-h.right,u=(j||parseInt(p.style("height"))||400)-h.top-h.bottom;if(b.update=function(){r.beforeUpdate(),p.transition().duration(s).call(b)},b.container=this,!k||!k.length){var v=p.selectAll(".nv-noData").data([q]);return v.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),v.attr("x",h.left+t/2).attr("y",h.top+u/2).text(function(a){return a}),b}p.selectAll(".nv-noData").remove(),c=e.xScale(),d=e.yScale().clamp(!0);var w=p.selectAll("g.nv-wrap.nv-boxPlotWithAxes").data([k]),x=w.enter().append("g").attr("class","nvd3 nv-wrap nv-boxPlotWithAxes").append("g"),y=x.append("defs"),z=w.select("g");x.append("g").attr("class","nv-x nv-axis"),x.append("g").attr("class","nv-y nv-axis").append("g").attr("class","nv-zeroLine").append("line"),x.append("g").attr("class","nv-barsWrap"),z.attr("transform","translate("+h.left+","+h.top+")"),n&&z.select(".nv-y.nv-axis").attr("transform","translate("+t+",0)"),e.width(t).height(u);var A=z.select(".nv-barsWrap").datum(k.filter(function(a){return!a.disabled}));if(A.transition().call(e),y.append("clipPath").attr("id","nv-x-label-clip-"+e.id()).append("rect"),z.select("#nv-x-label-clip-"+e.id()+" rect").attr("width",c.rangeBand()*(o?2:1)).attr("height",16).attr("x",-c.rangeBand()/(o?1:2)),l){f.scale(c).ticks(a.utils.calcTicksX(t/100,k)).tickSize(-u,0),z.select(".nv-x.nv-axis").attr("transform","translate(0,"+d.range()[0]+")"),z.select(".nv-x.nv-axis").call(f);var B=z.select(".nv-x.nv-axis").selectAll("g");o&&B.selectAll("text").attr("transform",function(a,b,c){return"translate(0,"+(c%2===0?"5":"17")+")"})}m&&(g.scale(d).ticks(Math.floor(u/36)).tickSize(-t,0),z.select(".nv-y.nv-axis").call(g)),z.select(".nv-zeroLine line").attr("x1",0).attr("x2",t).attr("y1",d(0)).attr("y2",d(0))}),t.renderEnd("nv-boxplot chart immediate"),b}var c,d,e=a.models.boxPlot(),f=a.models.axis(),g=a.models.axis(),h={top:15,right:10,bottom:50,left:60},i=null,j=null,k=a.utils.getColor(),l=!0,m=!0,n=!1,o=!1,p=a.models.tooltip(),q="No Data Available.",r=d3.dispatch("beforeUpdate","renderEnd"),s=250;f.orient("bottom").showMaxMin(!1).tickFormat(function(a){return a}),g.orient(n?"right":"left").tickFormat(d3.format(",.1f")),p.duration(0);var t=a.utils.renderWatch(r,s);return e.dispatch.on("elementMouseover.tooltip",function(a){p.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){p.data(a).hidden(!0)}),e.dispatch.on("elementMousemove.tooltip",function(a){p()}),b.dispatch=r,b.boxplot=e,b.xAxis=f,b.yAxis=g,b.tooltip=p,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return i},set:function(a){i=a}},height:{get:function(){return j},set:function(a){j=a}},staggerLabels:{get:function(){return o},set:function(a){o=a}},showXAxis:{get:function(){return l},set:function(a){l=a}},showYAxis:{get:function(){return m},set:function(a){m=a}},tooltipContent:{get:function(){return p},set:function(a){p=a}},noData:{get:function(){return q},set:function(a){q=a}},margin:{get:function(){return h},set:function(a){h.top=void 0!==a.top?a.top:h.top,h.right=void 0!==a.right?a.right:h.right,h.bottom=void 0!==a.bottom?a.bottom:h.bottom,h.left=void 0!==a.left?a.left:h.left}},duration:{get:function(){return s},set:function(a){s=a,t.reset(s),e.duration(s),f.duration(s),g.duration(s)}},color:{get:function(){return k},set:function(b){k=a.utils.getColor(b),e.color(k)}},rightAlignYAxis:{get:function(){return n},set:function(a){n=a,g.orient(a?"right":"left")}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.bullet=function(){"use strict";function b(a,b){var c=a.slice();a.sort(function(a,d){var e=c.indexOf(a),f=c.indexOf(d);return d3.descending(b[e],b[f])})}function c(e){return e.each(function(c,e){var s=p-d.left-d.right,y=q-d.top-d.bottom;r=d3.select(this),a.utils.initSVG(r);var z=g.call(this,c,e).slice(),A=h.call(this,c,e).slice(),B=i.call(this,c,e).slice(),C=j.call(this,c,e).slice(),D=k.call(this,c,e).slice(),E=l.call(this,c,e).slice(),F=m.call(this,c,e).slice(),G=n.call(this,c,e).slice();b(D,z),b(E,A),b(F,B),b(G,C),z.sort(d3.descending),A.sort(d3.descending),B.sort(d3.descending),C.sort(d3.descending);var H=d3.scale.linear().domain(d3.extent(d3.merge([o,z]))).range(f?[s,0]:[0,s]);this.__chart__||d3.scale.linear().domain([0,1/0]).range(H.range());this.__chart__=H;for(var I=(d3.min(z),d3.max(z),z[1],r.selectAll("g.nv-wrap.nv-bullet").data([c])),J=I.enter().append("g").attr("class","nvd3 nv-wrap nv-bullet"),K=J.append("g"),L=I.select("g"),e=0,M=z.length;M>e;e++){var N="nv-range nv-range"+e;2>=e&&(N=N+" nv-range"+w[e]),K.append("rect").attr("class",N)}K.append("rect").attr("class","nv-measure"),I.attr("transform","translate("+d.left+","+d.top+")");for(var O=function(a){return Math.abs(H(a)-H(0))},P=function(a){return H(0>a?a:0)},e=0,M=z.length;M>e;e++){var Q=z[e];L.select("rect.nv-range"+e).datum(Q).attr("height",y).transition().duration(x).attr("width",O(Q)).attr("x",P(Q))}L.select("rect.nv-measure").style("fill",t).attr("height",y/3).attr("y",y/3).on("mouseover",function(){u.elementMouseover({value:C[0],label:G[0]||"Current",color:d3.select(this).style("fill")})}).on("mousemove",function(){u.elementMousemove({value:C[0],label:G[0]||"Current",color:d3.select(this).style("fill")})}).on("mouseout",function(){u.elementMouseout({value:C[0],label:G[0]||"Current",color:d3.select(this).style("fill")})}).transition().duration(x).attr("width",0>C?H(0)-H(C[0]):H(C[0])-H(0)).attr("x",P(C));var R=y/6,S=A.map(function(a,b){return{value:a,label:E[b]}});K.selectAll("path.nv-markerTriangle").data(S).enter().append("path").attr("class","nv-markerTriangle").attr("d","M0,"+R+"L"+R+","+-R+" "+-R+","+-R+"Z").on("mouseover",function(a){u.elementMouseover({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill"),pos:[H(a.value),y/2]})}).on("mousemove",function(a){u.elementMousemove({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){u.elementMouseout({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill")})}),L.selectAll("path.nv-markerTriangle").data(S).transition().duration(x).attr("transform",function(a){return"translate("+H(a.value)+","+y/2+")"});var T=B.map(function(a,b){return{value:a,label:F[b]}});K.selectAll("line.nv-markerLine").data(T).enter().append("line").attr("cursor","").attr("class","nv-markerLine").attr("x1",function(a){return H(a.value)}).attr("y1","2").attr("x2",function(a){return H(a.value)}).attr("y2",y-2).on("mouseover",function(a){u.elementMouseover({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill"),pos:[H(a.value),y/2]})}).on("mousemove",function(a){u.elementMousemove({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){u.elementMouseout({value:a.value,label:a.label||"Previous",color:d3.select(this).style("fill")})}),L.selectAll("line.nv-markerLine").data(T).transition().duration(x).attr("x1",function(a){return H(a.value)}).attr("x2",function(a){return H(a.value)}),I.selectAll(".nv-range").on("mouseover",function(a,b){var c=D[b]||v[b];u.elementMouseover({value:a,label:c,color:d3.select(this).style("fill")})}).on("mousemove",function(){u.elementMousemove({value:C[0],label:G[0]||"Previous",color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){var c=D[b]||v[b];u.elementMouseout({value:a,label:c,color:d3.select(this).style("fill")})})}),c}var d={top:0,right:0,bottom:0,left:0},e="left",f=!1,g=function(a){return a.ranges},h=function(a){return a.markers?a.markers:[]},i=function(a){return a.markerLines?a.markerLines:[0]},j=function(a){return a.measures},k=function(a){return a.rangeLabels?a.rangeLabels:[]},l=function(a){return a.markerLabels?a.markerLabels:[]},m=function(a){return a.markerLineLabels?a.markerLineLabels:[]},n=function(a){return a.measureLabels?a.measureLabels:[]},o=[0],p=380,q=30,r=null,s=null,t=a.utils.getColor(["#1f77b4"]),u=d3.dispatch("elementMouseover","elementMouseout","elementMousemove"),v=["Maximum","Mean","Minimum"],w=["Max","Avg","Min"],x=1e3;return c.dispatch=u,c.options=a.utils.optionsFunc.bind(c),c._options=Object.create({},{ranges:{get:function(){return g},set:function(a){g=a}},markers:{get:function(){return h},set:function(a){h=a}},measures:{get:function(){return j},set:function(a){j=a}},forceX:{get:function(){return o},set:function(a){o=a}},width:{get:function(){return p},set:function(a){p=a}},height:{get:function(){return q},set:function(a){q=a}},tickFormat:{get:function(){return s},set:function(a){s=a}},duration:{get:function(){return x},set:function(a){x=a}},margin:{get:function(){return d},set:function(a){d.top=void 0!==a.top?a.top:d.top,d.right=void 0!==a.right?a.right:d.right,d.bottom=void 0!==a.bottom?a.bottom:d.bottom,d.left=void 0!==a.left?a.left:d.left}},orient:{get:function(){return e},set:function(a){e=a,f="right"==e||"bottom"==e}},color:{get:function(){return t},set:function(b){t=a.utils.getColor(b)}}}),a.utils.initOptions(c),c},a.models.bulletChart=function(){"use strict";function b(d){return d.each(function(e,o){var p=d3.select(this);a.utils.initSVG(p);var q=a.utils.availableWidth(k,p,g),r=l-g.top-g.bottom;if(b.update=function(){b(d)},b.container=this,!e||!h.call(this,e,o))return a.utils.noData(b,p),b;p.selectAll(".nv-noData").remove();var s=h.call(this,e,o).slice().sort(d3.descending),t=i.call(this,e,o).slice().sort(d3.descending),u=j.call(this,e,o).slice().sort(d3.descending),v=p.selectAll("g.nv-wrap.nv-bulletChart").data([e]),w=v.enter().append("g").attr("class","nvd3 nv-wrap nv-bulletChart"),x=w.append("g"),y=v.select("g");x.append("g").attr("class","nv-bulletWrap"),x.append("g").attr("class","nv-titles"),v.attr("transform","translate("+g.left+","+g.top+")");var z=d3.scale.linear().domain([0,Math.max(s[0],t[0]||0,u[0])]).range(f?[q,0]:[0,q]),A=this.__chart__||d3.scale.linear().domain([0,1/0]).range(z.range());this.__chart__=z;var B=x.select(".nv-titles").append("g").attr("text-anchor","end").attr("transform","translate(-6,"+(l-g.top-g.bottom)/2+")");B.append("text").attr("class","nv-title").text(function(a){return a.title}),B.append("text").attr("class","nv-subtitle").attr("dy","1em").text(function(a){return a.subtitle}),c.width(q).height(r);var C=y.select(".nv-bulletWrap");d3.transition(C).call(c);var D=m||z.tickFormat(q/100),E=y.selectAll("g.nv-tick").data(z.ticks(n?n:q/50),function(a){return this.textContent||D(a)}),F=E.enter().append("g").attr("class","nv-tick").attr("transform",function(a){return"translate("+A(a)+",0)"}).style("opacity",1e-6);F.append("line").attr("y1",r).attr("y2",7*r/6),F.append("text").attr("text-anchor","middle").attr("dy","1em").attr("y",7*r/6).text(D);var G=d3.transition(E).transition().duration(c.duration()).attr("transform",function(a){return"translate("+z(a)+",0)"}).style("opacity",1);G.select("line").attr("y1",r).attr("y2",7*r/6),G.select("text").attr("y",7*r/6),d3.transition(E.exit()).transition().duration(c.duration()).attr("transform",function(a){return"translate("+z(a)+",0)"}).style("opacity",1e-6).remove()}),d3.timer.flush(),b}var c=a.models.bullet(),d=a.models.tooltip(),e="left",f=!1,g={top:5,right:40,bottom:20,left:120},h=function(a){return a.ranges},i=function(a){return a.markers?a.markers:[]},j=function(a){return a.measures},k=null,l=55,m=null,n=null,o=null,p=d3.dispatch();return d.duration(0).headerEnabled(!1),c.dispatch.on("elementMouseover.tooltip",function(a){a.series={key:a.label,value:a.value,color:a.color},d.data(a).hidden(!1)}),c.dispatch.on("elementMouseout.tooltip",function(a){d.hidden(!0)}),c.dispatch.on("elementMousemove.tooltip",function(a){d()}),b.bullet=c,b.dispatch=p,b.tooltip=d,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{ranges:{get:function(){return h},set:function(a){h=a}},markers:{get:function(){return i},set:function(a){i=a}},measures:{get:function(){return j},set:function(a){j=a}},width:{get:function(){return k},set:function(a){k=a}},height:{get:function(){return l},set:function(a){l=a}},tickFormat:{get:function(){return m},set:function(a){m=a}},ticks:{get:function(){return n},set:function(a){n=a}},noData:{get:function(){return o},set:function(a){o=a}},margin:{get:function(){return g},set:function(a){g.top=void 0!==a.top?a.top:g.top,g.right=void 0!==a.right?a.right:g.right,g.bottom=void 0!==a.bottom?a.bottom:g.bottom,g.left=void 0!==a.left?a.left:g.left}},orient:{get:function(){return e},set:function(a){e=a,f="right"==e||"bottom"==e}}}),a.utils.inheritOptions(b,c),a.utils.initOptions(b),b},a.models.candlestickBar=function(){"use strict";function b(x){return x.each(function(b){c=d3.select(this);var x=a.utils.availableWidth(i,c,h),y=a.utils.availableHeight(j,c,h);a.utils.initSVG(c);var A=x/b[0].values.length*.45;l.domain(d||d3.extent(b[0].values.map(n).concat(t))),v?l.range(f||[.5*x/b[0].values.length,x*(b[0].values.length-.5)/b[0].values.length]):l.range(f||[5+A/2,x-A/2-5]),m.domain(e||[d3.min(b[0].values.map(s).concat(u)),d3.max(b[0].values.map(r).concat(u))]).range(g||[y,0]),l.domain()[0]===l.domain()[1]&&(l.domain()[0]?l.domain([l.domain()[0]-.01*l.domain()[0],l.domain()[1]+.01*l.domain()[1]]):l.domain([-1,1])),m.domain()[0]===m.domain()[1]&&(m.domain()[0]?m.domain([m.domain()[0]+.01*m.domain()[0],m.domain()[1]-.01*m.domain()[1]]):m.domain([-1,1]));var B=d3.select(this).selectAll("g.nv-wrap.nv-candlestickBar").data([b[0].values]),C=B.enter().append("g").attr("class","nvd3 nv-wrap nv-candlestickBar"),D=C.append("defs"),E=C.append("g"),F=B.select("g");E.append("g").attr("class","nv-ticks"),B.attr("transform","translate("+h.left+","+h.top+")"),c.on("click",function(a,b){z.chartClick({data:a,index:b,pos:d3.event,id:k})}),D.append("clipPath").attr("id","nv-chart-clip-path-"+k).append("rect"),B.select("#nv-chart-clip-path-"+k+" rect").attr("width",x).attr("height",y),F.attr("clip-path",w?"url(#nv-chart-clip-path-"+k+")":"");var G=B.select(".nv-ticks").selectAll(".nv-tick").data(function(a){return a});G.exit().remove();var H=G.enter().append("g");G.attr("class",function(a,b,c){return(p(a,b)>q(a,b)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+c+"-"+b});H.append("line").attr("class","nv-candlestick-lines").attr("transform",function(a,b){return"translate("+l(n(a,b))+",0)"}).attr("x1",0).attr("y1",function(a,b){return m(r(a,b))}).attr("x2",0).attr("y2",function(a,b){return m(s(a,b))}),H.append("rect").attr("class","nv-candlestick-rects nv-bars").attr("transform",function(a,b){return"translate("+(l(n(a,b))-A/2)+","+(m(o(a,b))-(p(a,b)>q(a,b)?m(q(a,b))-m(p(a,b)):0))+")"}).attr("x",0).attr("y",0).attr("width",A).attr("height",function(a,b){var c=p(a,b),d=q(a,b);return c>d?m(d)-m(c):m(c)-m(d)});G.select(".nv-candlestick-lines").transition().attr("transform",function(a,b){return"translate("+l(n(a,b))+",0)"}).attr("x1",0).attr("y1",function(a,b){return m(r(a,b))}).attr("x2",0).attr("y2",function(a,b){return m(s(a,b))}),G.select(".nv-candlestick-rects").transition().attr("transform",function(a,b){return"translate("+(l(n(a,b))-A/2)+","+(m(o(a,b))-(p(a,b)>q(a,b)?m(q(a,b))-m(p(a,b)):0))+")"}).attr("x",0).attr("y",0).attr("width",A).attr("height",function(a,b){var c=p(a,b),d=q(a,b);return c>d?m(d)-m(c):m(c)-m(d)})}),b}var c,d,e,f,g,h={top:0,right:0,bottom:0,left:0},i=null,j=null,k=Math.floor(1e4*Math.random()),l=d3.scale.linear(),m=d3.scale.linear(),n=function(a){return a.x},o=function(a){return a.y},p=function(a){return a.open},q=function(a){return a.close},r=function(a){return a.high},s=function(a){return a.low},t=[],u=[],v=!1,w=!0,x=a.utils.defaultColor(),y=!1,z=d3.dispatch("stateChange","changeState","renderEnd","chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove");return b.highlightPoint=function(a,d){b.clearHighlights(),c.select(".nv-candlestickBar .nv-tick-0-"+a).classed("hover",d)},b.clearHighlights=function(){c.select(".nv-candlestickBar .nv-tick.hover").classed("hover",!1)},b.dispatch=z,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return i},set:function(a){i=a}},height:{get:function(){return j},set:function(a){j=a}},xScale:{get:function(){return l},set:function(a){l=a}},yScale:{get:function(){return m},set:function(a){m=a}},xDomain:{get:function(){return d},set:function(a){d=a}},yDomain:{get:function(){return e},set:function(a){e=a}},xRange:{get:function(){return f},set:function(a){f=a}},yRange:{get:function(){return g},set:function(a){g=a}},forceX:{get:function(){return t},set:function(a){t=a}},forceY:{get:function(){return u},set:function(a){u=a}},padData:{get:function(){return v},set:function(a){v=a}},clipEdge:{get:function(){return w},set:function(a){w=a}},id:{get:function(){return k},set:function(a){k=a}},interactive:{get:function(){return y},set:function(a){y=a}},x:{get:function(){return n},set:function(a){n=a}},y:{get:function(){return o},set:function(a){o=a}},open:{get:function(){return p()},set:function(a){p=a}},close:{get:function(){return q()},set:function(a){q=a}},high:{get:function(){return r},set:function(a){r=a}},low:{get:function(){return s},set:function(a){s=a}},margin:{get:function(){return h},set:function(a){h.top=void 0!=a.top?a.top:h.top,h.right=void 0!=a.right?a.right:h.right,h.bottom=void 0!=a.bottom?a.bottom:h.bottom,h.left=void 0!=a.left?a.left:h.left}},color:{get:function(){return x},set:function(b){x=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.cumulativeLineChart=function(){"use strict";function b(l){return I.reset(),I.models(f),s&&I.models(g),t&&I.models(h),l.each(function(l){function B(a,c){d3.select(b.container).style("cursor","ew-resize")}function F(a,b){H.x=d3.event.x,H.i=Math.round(G.invert(H.x)),L()}function I(a,c){d3.select(b.container).style("cursor","auto"),z.index=H.i,D.stateChange(z)}function L(){ba.data([H]);var a=b.duration();b.duration(0),b.update(),b.duration(a)}var M=d3.select(this);a.utils.initSVG(M),M.classed("nv-chart-"+y,!0);var N=a.utils.availableWidth(p,M,m),O=a.utils.availableHeight(q,M,m);if(b.update=function(){0===E?M.call(b):M.transition().duration(E).call(b)},b.container=this,z.setter(K(l),b.update).getter(J(l)).update(),z.disabled=l.map(function(a){return!!a.disabled}),!A){var P;A={};for(P in z)z[P]instanceof Array?A[P]=z[P].slice(0):A[P]=z[P]}var Q=d3.behavior.drag().on("dragstart",B).on("drag",F).on("dragend",I);if(!(l&&l.length&&l.filter(function(a){return a.values.length}).length))return a.utils.noData(b,M),b;if(M.selectAll(".nv-noData").remove(),d=f.xScale(),e=f.yScale(),x)f.yDomain(null);else{var R=l.filter(function(a){return!a.disabled}).map(function(a,b){var c=d3.extent(a.values,f.y());return c[0]<-.95&&(c[0]=-.95),[(c[0]-c[1])/(1+c[1]),(c[1]-c[0])/(1+c[0])]}),S=[d3.min(R,function(a){return a[0]}),d3.max(R,function(a){return a[1]})];f.yDomain(S)}G.domain([0,l[0].values.length-1]).range([0,N]).clamp(!0);var l=c(H.i,l),T=w?"none":"all",U=M.selectAll("g.nv-wrap.nv-cumulativeLine").data([l]),V=U.enter().append("g").attr("class","nvd3 nv-wrap nv-cumulativeLine").append("g"),W=U.select("g");if(V.append("g").attr("class","nv-interactive"),V.append("g").attr("class","nv-x nv-axis").style("pointer-events","none"),V.append("g").attr("class","nv-y nv-axis"),V.append("g").attr("class","nv-background"),V.append("g").attr("class","nv-linesWrap").style("pointer-events",T),V.append("g").attr("class","nv-avgLinesWrap").style("pointer-events","none"),V.append("g").attr("class","nv-legendWrap"),V.append("g").attr("class","nv-controlsWrap"),r?(i.width(N),W.select(".nv-legendWrap").datum(l).call(i),n||i.height()===m.top||(m.top=i.height(),O=a.utils.availableHeight(q,M,m)),W.select(".nv-legendWrap").attr("transform","translate(0,"+-m.top+")")):W.select(".nv-legendWrap").selectAll("*").remove(),v){var X=[{key:"Re-scale y-axis",disabled:!x}];j.width(140).color(["#444","#444","#444"]).rightAlign(!1).margin({top:5,right:0,bottom:5,left:20}),W.select(".nv-controlsWrap").datum(X).attr("transform","translate(0,"+-m.top+")").call(j)}else W.select(".nv-controlsWrap").selectAll("*").remove();U.attr("transform","translate("+m.left+","+m.top+")"),u&&W.select(".nv-y.nv-axis").attr("transform","translate("+N+",0)");var Y=l.filter(function(a){return a.tempDisabled});U.select(".tempDisabled").remove(),Y.length&&U.append("text").attr("class","tempDisabled").attr("x",N/2).attr("y","-.71em").style("text-anchor","end").text(Y.map(function(a){return a.key}).join(", ")+" values cannot be calculated for this time period."),w&&(k.width(N).height(O).margin({left:m.left,top:m.top}).svgContainer(M).xScale(d),U.select(".nv-interactive").call(k)),V.select(".nv-background").append("rect"),W.select(".nv-background rect").attr("width",N).attr("height",O),f.y(function(a){return a.display.y}).width(N).height(O).color(l.map(function(a,b){return a.color||o(a,b)}).filter(function(a,b){return!l[b].disabled&&!l[b].tempDisabled}));var Z=W.select(".nv-linesWrap").datum(l.filter(function(a){return!a.disabled&&!a.tempDisabled}));Z.call(f),l.forEach(function(a,b){a.seriesIndex=b});var $=l.filter(function(a){return!a.disabled&&!!C(a)}),_=W.select(".nv-avgLinesWrap").selectAll("line").data($,function(a){return a.key}),aa=function(a){var b=e(C(a));return 0>b?0:b>O?O:b};_.enter().append("line").style("stroke-width",2).style("stroke-dasharray","10,10").style("stroke",function(a,b){return f.color()(a,a.seriesIndex)}).attr("x1",0).attr("x2",N).attr("y1",aa).attr("y2",aa),_.style("stroke-opacity",function(a){var b=e(C(a));return 0>b||b>O?0:1}).attr("x1",0).attr("x2",N).attr("y1",aa).attr("y2",aa),_.exit().remove();var ba=Z.selectAll(".nv-indexLine").data([H]);ba.enter().append("rect").attr("class","nv-indexLine").attr("width",3).attr("x",-2).attr("fill","red").attr("fill-opacity",.5).style("pointer-events","all").call(Q),ba.attr("transform",function(a){return"translate("+G(a.i)+",0)"}).attr("height",O),s&&(g.scale(d)._ticks(a.utils.calcTicksX(N/70,l)).tickSize(-O,0),W.select(".nv-x.nv-axis").attr("transform","translate(0,"+e.range()[0]+")"),W.select(".nv-x.nv-axis").call(g)),t&&(h.scale(e)._ticks(a.utils.calcTicksY(O/36,l)).tickSize(-N,0),W.select(".nv-y.nv-axis").call(h)),W.select(".nv-background rect").on("click",function(){H.x=d3.mouse(this)[0],H.i=Math.round(G.invert(H.x)),z.index=H.i,D.stateChange(z),L()}),f.dispatch.on("elementClick",function(a){H.i=a.pointIndex,H.x=G(H.i),z.index=H.i,D.stateChange(z),L()}),j.dispatch.on("legendClick",function(a,c){a.disabled=!a.disabled,x=!a.disabled,z.rescaleY=x,D.stateChange(z),b.update()}),i.dispatch.on("stateChange",function(a){for(var c in a)z[c]=a[c];D.stateChange(z),b.update()}),k.dispatch.on("elementMousemove",function(c){f.clearHighlights();var d,e,i,j=[];if(l.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(g,h){e=a.interactiveBisect(g.values,c.pointXValue,b.x()),f.highlightPoint(h,e,!0);var k=g.values[e];"undefined"!=typeof k&&("undefined"==typeof d&&(d=k),"undefined"==typeof i&&(i=b.xScale()(b.x()(k,e))),j.push({key:g.key,value:b.y()(k,e),color:o(g,g.seriesIndex)}))}),j.length>2){var m=b.yScale().invert(c.mouseY),n=Math.abs(b.yScale().domain()[0]-b.yScale().domain()[1]),p=.03*n,q=a.nearestValueIndex(j.map(function(a){return a.value}),m,p);null!==q&&(j[q].highlight=!0)}var r=g.tickFormat()(b.x()(d,e),e);k.tooltip.valueFormatter(function(a,b){return h.tickFormat()(a)}).data({value:r,series:j})(),k.renderGuideLine(i)}),k.dispatch.on("elementMouseout",function(a){f.clearHighlights()}),D.on("changeState",function(a){"undefined"!=typeof a.disabled&&(l.forEach(function(b,c){b.disabled=a.disabled[c]}),z.disabled=a.disabled),"undefined"!=typeof a.index&&(H.i=a.index,H.x=G(H.i),z.index=a.index,ba.data([H])),"undefined"!=typeof a.rescaleY&&(x=a.rescaleY),b.update()})}),I.renderEnd("cumulativeLineChart immediate"),b}function c(a,b){return L||(L=f.y()),b.map(function(b,c){if(!b.values)return b;var d=b.values[a];if(null==d)return b;var e=L(d,a);return-.95>e&&!F?(b.tempDisabled=!0,b):(b.tempDisabled=!1,b.values=b.values.map(function(a,b){return a.display={y:(L(a,b)-e)/(1+e)},a}),b)})}var d,e,f=a.models.line(),g=a.models.axis(),h=a.models.axis(),i=a.models.legend(),j=a.models.legend(),k=a.interactiveGuideline(),l=a.models.tooltip(),m={top:30,right:30,bottom:50,left:60},n=null,o=a.utils.defaultColor(),p=null,q=null,r=!0,s=!0,t=!0,u=!1,v=!0,w=!1,x=!0,y=f.id(),z=a.utils.state(),A=null,B=null,C=function(a){return a.average},D=d3.dispatch("stateChange","changeState","renderEnd"),E=250,F=!1;z.index=0,z.rescaleY=x,g.orient("bottom").tickPadding(7),h.orient(u?"right":"left"),l.valueFormatter(function(a,b){return h.tickFormat()(a,b)}).headerFormatter(function(a,b){return g.tickFormat()(a,b)}),j.updateState(!1);var G=d3.scale.linear(),H={i:0,x:0},I=a.utils.renderWatch(D,E),J=function(a){return function(){return{active:a.map(function(a){return!a.disabled}),index:H.i,rescaleY:x}}},K=function(a){return function(b){void 0!==b.index&&(H.i=b.index),void 0!==b.rescaleY&&(x=b.rescaleY),void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};f.dispatch.on("elementMouseover.tooltip",function(a){var c={x:b.x()(a.point),y:b.y()(a.point),color:a.point.color};a.point=c,l.data(a).hidden(!1)}),f.dispatch.on("elementMouseout.tooltip",function(a){l.hidden(!0)});var L=null;return b.dispatch=D,b.lines=f,b.legend=i,b.controls=j,b.xAxis=g,b.yAxis=h,b.interactiveLayer=k,b.state=z,b.tooltip=l,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return p},set:function(a){p=a}},height:{get:function(){return q},set:function(a){q=a}},rescaleY:{get:function(){return x},set:function(a){x=a}},showControls:{get:function(){return v},set:function(a){v=a}},showLegend:{get:function(){return r},set:function(a){r=a}},average:{get:function(){return C},set:function(a){C=a}},defaultState:{get:function(){return A},set:function(a){A=a}},noData:{get:function(){return B},set:function(a){B=a}},showXAxis:{get:function(){return s},set:function(a){s=a}},showYAxis:{get:function(){return t},set:function(a){t=a}},noErrorCheck:{get:function(){return F},set:function(a){F=a}},margin:{get:function(){return m},set:function(a){void 0!==a.top&&(m.top=a.top,n=a.top),m.right=void 0!==a.right?a.right:m.right,m.bottom=void 0!==a.bottom?a.bottom:m.bottom,m.left=void 0!==a.left?a.left:m.left}},color:{get:function(){return o},set:function(b){o=a.utils.getColor(b),i.color(o)}},useInteractiveGuideline:{get:function(){return w},set:function(a){w=a,a===!0&&(b.interactive(!1),b.useVoronoi(!1))}},rightAlignYAxis:{get:function(){return u},set:function(a){u=a,h.orient(a?"right":"left")}},duration:{get:function(){return E},set:function(a){E=a,f.duration(E),g.duration(E),h.duration(E),I.reset(E)}}}),a.utils.inheritOptions(b,f),a.utils.initOptions(b),b},a.models.discreteBar=function(){"use strict";function b(m){return y.reset(),m.each(function(b){var m=k-j.left-j.right,x=l-j.top-j.bottom;c=d3.select(this),a.utils.initSVG(c),b.forEach(function(a,b){a.values.forEach(function(a){a.series=b})});var z=d&&e?[]:b.map(function(a){return a.values.map(function(a,b){return{x:p(a,b),y:q(a,b),y0:a.y0}})});n.domain(d||d3.merge(z).map(function(a){return a.x})).rangeBands(f||[0,m],.1),o.domain(e||d3.extent(d3.merge(z).map(function(a){return a.y}).concat(r))),t?o.range(g||[x-(o.domain()[0]<0?12:0),o.domain()[1]>0?12:0]):o.range(g||[x,0]),h=h||n,i=i||o.copy().range([o(0),o(0)]);var A=c.selectAll("g.nv-wrap.nv-discretebar").data([b]),B=A.enter().append("g").attr("class","nvd3 nv-wrap nv-discretebar"),C=B.append("g");A.select("g");C.append("g").attr("class","nv-groups"),A.attr("transform","translate("+j.left+","+j.top+")");var D=A.select(".nv-groups").selectAll(".nv-group").data(function(a){return a},function(a){return a.key});D.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),D.exit().watchTransition(y,"discreteBar: exit groups").style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),D.attr("class",function(a,b){return"nv-group nv-series-"+b}).classed("hover",function(a){return a.hover}),D.watchTransition(y,"discreteBar: groups").style("stroke-opacity",1).style("fill-opacity",.75);var E=D.selectAll("g.nv-bar").data(function(a){return a.values});E.exit().remove();var F=E.enter().append("g").attr("transform",function(a,b,c){return"translate("+(n(p(a,b))+.05*n.rangeBand())+", "+o(0)+")"}).on("mouseover",function(a,b){d3.select(this).classed("hover",!0),v.elementMouseover({data:a,index:b,color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1),v.elementMouseout({data:a,index:b,color:d3.select(this).style("fill")})}).on("mousemove",function(a,b){v.elementMousemove({data:a,index:b,color:d3.select(this).style("fill")})}).on("click",function(a,b){var c=this;v.elementClick({data:a,index:b,color:d3.select(this).style("fill"),event:d3.event,element:c}),d3.event.stopPropagation()}).on("dblclick",function(a,b){v.elementDblClick({data:a,index:b,color:d3.select(this).style("fill")}),d3.event.stopPropagation()});F.append("rect").attr("height",0).attr("width",.9*n.rangeBand()/b.length),t?(F.append("text").attr("text-anchor","middle"),E.select("text").text(function(a,b){return u(q(a,b))}).watchTransition(y,"discreteBar: bars text").attr("x",.9*n.rangeBand()/2).attr("y",function(a,b){return q(a,b)<0?o(q(a,b))-o(0)+12:-4})):E.selectAll("text").remove(),E.attr("class",function(a,b){return q(a,b)<0?"nv-bar negative":"nv-bar positive"}).style("fill",function(a,b){return a.color||s(a,b)}).style("stroke",function(a,b){return a.color||s(a,b)}).select("rect").attr("class",w).watchTransition(y,"discreteBar: bars rect").attr("width",.9*n.rangeBand()/b.length),E.watchTransition(y,"discreteBar: bars").attr("transform",function(a,b){var c=n(p(a,b))+.05*n.rangeBand(),d=q(a,b)<0?o(0):o(0)-o(q(a,b))<1?o(0)-1:o(q(a,b));return"translate("+c+", "+d+")"}).select("rect").attr("height",function(a,b){return Math.max(Math.abs(o(q(a,b))-o(0)),1)}),h=n.copy(),i=o.copy()}),y.renderEnd("discreteBar immediate"),b}var c,d,e,f,g,h,i,j={top:0,right:0,bottom:0,left:0},k=960,l=500,m=Math.floor(1e4*Math.random()),n=d3.scale.ordinal(),o=d3.scale.linear(),p=function(a){return a.x},q=function(a){return a.y},r=[0],s=a.utils.defaultColor(),t=!1,u=d3.format(",.2f"),v=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove","renderEnd"),w="discreteBar",x=250,y=a.utils.renderWatch(v,x);return b.dispatch=v,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return k},set:function(a){k=a}},height:{get:function(){return l},set:function(a){l=a}},forceY:{get:function(){return r},set:function(a){r=a;
-}},showValues:{get:function(){return t},set:function(a){t=a}},x:{get:function(){return p},set:function(a){p=a}},y:{get:function(){return q},set:function(a){q=a}},xScale:{get:function(){return n},set:function(a){n=a}},yScale:{get:function(){return o},set:function(a){o=a}},xDomain:{get:function(){return d},set:function(a){d=a}},yDomain:{get:function(){return e},set:function(a){e=a}},xRange:{get:function(){return f},set:function(a){f=a}},yRange:{get:function(){return g},set:function(a){g=a}},valueFormat:{get:function(){return u},set:function(a){u=a}},id:{get:function(){return m},set:function(a){m=a}},rectClass:{get:function(){return w},set:function(a){w=a}},margin:{get:function(){return j},set:function(a){j.top=void 0!==a.top?a.top:j.top,j.right=void 0!==a.right?a.right:j.right,j.bottom=void 0!==a.bottom?a.bottom:j.bottom,j.left=void 0!==a.left?a.left:j.left}},color:{get:function(){return s},set:function(b){s=a.utils.getColor(b)}},duration:{get:function(){return x},set:function(a){x=a,y.reset(x)}}}),a.utils.initOptions(b),b},a.models.discreteBarChart=function(){"use strict";function b(i){return y.reset(),y.models(e),p&&y.models(f),q&&y.models(g),i.each(function(i){var n=d3.select(this);a.utils.initSVG(n);var v=a.utils.availableWidth(l,n,j),y=a.utils.availableHeight(m,n,j);if(b.update=function(){w.beforeUpdate(),n.transition().duration(x).call(b)},b.container=this,!(i&&i.length&&i.filter(function(a){return a.values.length}).length))return a.utils.noData(b,n),b;n.selectAll(".nv-noData").remove(),c=e.xScale(),d=e.yScale().clamp(!0);var z=n.selectAll("g.nv-wrap.nv-discreteBarWithAxes").data([i]),A=z.enter().append("g").attr("class","nvd3 nv-wrap nv-discreteBarWithAxes").append("g"),B=A.append("defs"),C=z.select("g");A.append("g").attr("class","nv-x nv-axis"),A.append("g").attr("class","nv-y nv-axis").append("g").attr("class","nv-zeroLine").append("line"),A.append("g").attr("class","nv-barsWrap"),A.append("g").attr("class","nv-legendWrap"),C.attr("transform","translate("+j.left+","+j.top+")"),o?(h.width(v),C.select(".nv-legendWrap").datum(i).call(h),k||h.height()===j.top||(j.top=h.height(),y=a.utils.availableHeight(m,n,j)),z.select(".nv-legendWrap").attr("transform","translate(0,"+-j.top+")")):C.select(".nv-legendWrap").selectAll("*").remove(),r&&C.select(".nv-y.nv-axis").attr("transform","translate("+v+",0)"),e.width(v).height(y);var D=C.select(".nv-barsWrap").datum(i.filter(function(a){return!a.disabled}));if(D.transition().call(e),B.append("clipPath").attr("id","nv-x-label-clip-"+e.id()).append("rect"),C.select("#nv-x-label-clip-"+e.id()+" rect").attr("width",c.rangeBand()*(s?2:1)).attr("height",16).attr("x",-c.rangeBand()/(s?1:2)),p){f.scale(c)._ticks(a.utils.calcTicksX(v/100,i)).tickSize(-y,0),C.select(".nv-x.nv-axis").attr("transform","translate(0,"+(d.range()[0]+(e.showValues()&&d.domain()[0]<0?16:0))+")"),C.select(".nv-x.nv-axis").call(f);var E=C.select(".nv-x.nv-axis").selectAll("g");s&&E.selectAll("text").attr("transform",function(a,b,c){return"translate(0,"+(c%2==0?"5":"17")+")"}),u&&E.selectAll(".tick text").attr("transform","rotate("+u+" 0,0)").style("text-anchor",u>0?"start":"end"),t&&C.selectAll(".tick text").call(a.utils.wrapTicks,b.xAxis.rangeBand())}q&&(g.scale(d)._ticks(a.utils.calcTicksY(y/36,i)).tickSize(-v,0),C.select(".nv-y.nv-axis").call(g)),C.select(".nv-zeroLine line").attr("x1",0).attr("x2",r?-v:v).attr("y1",d(0)).attr("y2",d(0))}),y.renderEnd("discreteBar chart immediate"),b}var c,d,e=a.models.discreteBar(),f=a.models.axis(),g=a.models.axis(),h=a.models.legend(),i=a.models.tooltip(),j={top:15,right:10,bottom:50,left:60},k=null,l=null,m=null,n=a.utils.getColor(),o=!1,p=!0,q=!0,r=!1,s=!1,t=!1,u=0,v=null,w=d3.dispatch("beforeUpdate","renderEnd"),x=250;f.orient("bottom").showMaxMin(!1).tickFormat(function(a){return a}),g.orient(r?"right":"left").tickFormat(d3.format(",.1f")),i.duration(0).headerEnabled(!1).valueFormatter(function(a,b){return g.tickFormat()(a,b)}).keyFormatter(function(a,b){return f.tickFormat()(a,b)});var y=a.utils.renderWatch(w,x);return e.dispatch.on("elementMouseover.tooltip",function(a){a.series={key:b.x()(a.data),value:b.y()(a.data),color:a.color},i.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){i.hidden(!0)}),e.dispatch.on("elementMousemove.tooltip",function(a){i()}),b.dispatch=w,b.discretebar=e,b.legend=h,b.xAxis=f,b.yAxis=g,b.tooltip=i,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return l},set:function(a){l=a}},height:{get:function(){return m},set:function(a){m=a}},showLegend:{get:function(){return o},set:function(a){o=a}},staggerLabels:{get:function(){return s},set:function(a){s=a}},rotateLabels:{get:function(){return u},set:function(a){u=a}},wrapLabels:{get:function(){return t},set:function(a){t=!!a}},showXAxis:{get:function(){return p},set:function(a){p=a}},showYAxis:{get:function(){return q},set:function(a){q=a}},noData:{get:function(){return v},set:function(a){v=a}},margin:{get:function(){return j},set:function(a){void 0!==a.top&&(j.top=a.top,k=a.top),j.right=void 0!==a.right?a.right:j.right,j.bottom=void 0!==a.bottom?a.bottom:j.bottom,j.left=void 0!==a.left?a.left:j.left}},duration:{get:function(){return x},set:function(a){x=a,y.reset(x),e.duration(x),f.duration(x),g.duration(x)}},color:{get:function(){return n},set:function(b){n=a.utils.getColor(b),e.color(n),h.color(n)}},rightAlignYAxis:{get:function(){return r},set:function(a){r=a,g.orient(a?"right":"left")}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.distribution=function(){"use strict";function b(k){return m.reset(),k.each(function(b){var k=(e-("x"===g?d.left+d.right:d.top+d.bottom),"x"==g?"y":"x"),l=d3.select(this);a.utils.initSVG(l),c=c||j;var n=l.selectAll("g.nv-distribution").data([b]),o=n.enter().append("g").attr("class","nvd3 nv-distribution"),p=(o.append("g"),n.select("g"));n.attr("transform","translate("+d.left+","+d.top+")");var q=p.selectAll("g.nv-dist").data(function(a){return a},function(a){return a.key});q.enter().append("g"),q.attr("class",function(a,b){return"nv-dist nv-series-"+b}).style("stroke",function(a,b){return i(a,b)});var r=q.selectAll("line.nv-dist"+g).data(function(a){return a.values});r.enter().append("line").attr(g+"1",function(a,b){return c(h(a,b))}).attr(g+"2",function(a,b){return c(h(a,b))}),m.transition(q.exit().selectAll("line.nv-dist"+g),"dist exit").attr(g+"1",function(a,b){return j(h(a,b))}).attr(g+"2",function(a,b){return j(h(a,b))}).style("stroke-opacity",0).remove(),r.attr("class",function(a,b){return"nv-dist"+g+" nv-dist"+g+"-"+b}).attr(k+"1",0).attr(k+"2",f),m.transition(r,"dist").attr(g+"1",function(a,b){return j(h(a,b))}).attr(g+"2",function(a,b){return j(h(a,b))}),c=j.copy()}),m.renderEnd("distribution immediate"),b}var c,d={top:0,right:0,bottom:0,left:0},e=400,f=8,g="x",h=function(a){return a[g]},i=a.utils.defaultColor(),j=d3.scale.linear(),k=250,l=d3.dispatch("renderEnd"),m=a.utils.renderWatch(l,k);return b.options=a.utils.optionsFunc.bind(b),b.dispatch=l,b.margin=function(a){return arguments.length?(d.top="undefined"!=typeof a.top?a.top:d.top,d.right="undefined"!=typeof a.right?a.right:d.right,d.bottom="undefined"!=typeof a.bottom?a.bottom:d.bottom,d.left="undefined"!=typeof a.left?a.left:d.left,b):d},b.width=function(a){return arguments.length?(e=a,b):e},b.axis=function(a){return arguments.length?(g=a,b):g},b.size=function(a){return arguments.length?(f=a,b):f},b.getData=function(a){return arguments.length?(h=d3.functor(a),b):h},b.scale=function(a){return arguments.length?(j=a,b):j},b.color=function(c){return arguments.length?(i=a.utils.getColor(c),b):i},b.duration=function(a){return arguments.length?(k=a,m.reset(k),b):k},b},a.models.focus=function(b){"use strict";function c(u){return t.reset(),t.models(b),m&&t.models(f),n&&t.models(g),u.each(function(t){function u(a){var b=+("e"==a),c=b?1:-1,d=z/3;return"M"+.5*c+","+d+"A6,6 0 0 "+b+" "+6.5*c+","+(d+6)+"V"+(2*d-6)+"A6,6 0 0 "+b+" "+.5*c+","+2*d+"ZM"+2.5*c+","+(d+8)+"V"+(2*d-8)+"M"+4.5*c+","+(d+8)+"V"+(2*d-8)}function v(){h.empty()||h.extent(p),E.data([h.empty()?d.domain():p]).each(function(a,b){var c=d(a[0])-d.range()[0],e=y-d(a[1]);d3.select(this).select(".left").attr("width",0>c?0:c),d3.select(this).select(".right").attr("x",d(a[1])).attr("width",0>e?0:e)})}function w(a){p=h.empty()?null:h.extent();var b=h.empty()?d.domain():h.extent();r.brush({extent:b,brush:h}),v(),a&&r.onBrush(b)}var x=d3.select(this);a.utils.initSVG(x);var y=a.utils.availableWidth(k,x,i),z=l-i.top-i.bottom;c.update=function(){0===q?x.call(c):x.transition().duration(q).call(c)},c.container=this,d=b.xScale(),e=b.yScale();var A=x.selectAll("g.nv-focus").data([t]),B=A.enter().append("g").attr("class","nvd3 nv-focus").append("g"),C=A.select("g");A.attr("transform","translate("+i.left+","+i.top+")"),B.append("g").attr("class","nv-background").append("rect"),B.append("g").attr("class","nv-x nv-axis"),B.append("g").attr("class","nv-y nv-axis"),B.append("g").attr("class","nv-contentWrap"),B.append("g").attr("class","nv-brushBackground"),B.append("g").attr("class","nv-x nv-brush"),o&&C.select(".nv-y.nv-axis").attr("transform","translate("+y+",0)"),C.select(".nv-background rect").attr("width",y).attr("height",z),b.width(y).height(z).color(t.map(function(a,b){return a.color||j(a,b)}).filter(function(a,b){return!t[b].disabled}));var D=C.select(".nv-contentWrap").datum(t.filter(function(a){return!a.disabled}));d3.transition(D).call(b),h.x(d).on("brush",function(){w(s)}),h.on("brushend",function(){s||r.onBrush(h.empty()?d.domain():h.extent())}),p&&h.extent(p);var E=C.select(".nv-brushBackground").selectAll("g").data([p||h.extent()]),F=E.enter().append("g");F.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",z),F.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",z);var G=C.select(".nv-x.nv-brush").call(h);G.selectAll("rect").attr("height",z),G.selectAll(".resize").append("path").attr("d",u),w(!0),C.select(".nv-background rect").attr("width",y).attr("height",z),m&&(f.scale(d)._ticks(a.utils.calcTicksX(y/100,t)).tickSize(-z,0),C.select(".nv-x.nv-axis").attr("transform","translate(0,"+e.range()[0]+")"),d3.transition(C.select(".nv-x.nv-axis")).call(f)),n&&(g.scale(e)._ticks(a.utils.calcTicksY(z/36,t)).tickSize(-y,0),d3.transition(C.select(".nv-y.nv-axis")).call(g)),C.select(".nv-x.nv-axis").attr("transform","translate(0,"+e.range()[0]+")")}),t.renderEnd("focus immediate"),c}var d,e,b=b||a.models.line(),f=a.models.axis(),g=a.models.axis(),h=d3.svg.brush(),i={top:10,right:0,bottom:30,left:0},j=a.utils.defaultColor(),k=null,l=70,m=!0,n=!1,o=!1,p=null,q=250,r=d3.dispatch("brush","onBrush","renderEnd"),s=!0;b.interactive(!1),b.pointActive(function(a){return!1});var t=a.utils.renderWatch(r,q);return c.dispatch=r,c.content=b,c.brush=h,c.xAxis=f,c.yAxis=g,c.options=a.utils.optionsFunc.bind(c),c._options=Object.create({},{width:{get:function(){return k},set:function(a){k=a}},height:{get:function(){return l},set:function(a){l=a}},showXAxis:{get:function(){return m},set:function(a){m=a}},showYAxis:{get:function(){return n},set:function(a){n=a}},brushExtent:{get:function(){return p},set:function(a){p=a}},syncBrushing:{get:function(){return s},set:function(a){s=a}},margin:{get:function(){return i},set:function(a){i.top=void 0!==a.top?a.top:i.top,i.right=void 0!==a.right?a.right:i.right,i.bottom=void 0!==a.bottom?a.bottom:i.bottom,i.left=void 0!==a.left?a.left:i.left}},duration:{get:function(){return q},set:function(a){q=a,t.reset(q),b.duration(q),f.duration(q),g.duration(q)}},color:{get:function(){return j},set:function(c){j=a.utils.getColor(c),b.color(j)}},interpolate:{get:function(){return b.interpolate()},set:function(a){b.interpolate(a)}},xTickFormat:{get:function(){return f.tickFormat()},set:function(a){f.tickFormat(a)}},yTickFormat:{get:function(){return g.tickFormat()},set:function(a){g.tickFormat(a)}},x:{get:function(){return b.x()},set:function(a){b.x(a)}},y:{get:function(){return b.y()},set:function(a){b.y(a)}},rightAlignYAxis:{get:function(){return o},set:function(a){o=a,g.orient(o?"right":"left")}}}),a.utils.inheritOptions(c,b),a.utils.initOptions(c),c},a.models.forceDirectedGraph=function(){"use strict";function b(g){return u.reset(),g.each(function(g){f=d3.select(this),a.utils.initSVG(f);var j=a.utils.availableWidth(d,f,c),u=a.utils.availableHeight(e,f,c);if(f.attr("width",j).attr("height",u),!(g&&g.links&&g.nodes))return a.utils.noData(b,f),b;f.selectAll(".nv-noData").remove(),f.selectAll("*").remove();var v=new Set;g.nodes.forEach(function(a){var b=Object.keys(a);b.forEach(function(a){v.add(a)})});var w=d3.layout.force().nodes(g.nodes).links(g.links).size([j,u]).linkStrength(k).friction(l).linkDistance(m).charge(n).gravity(o).theta(p).alpha(q).start(),x=f.selectAll(".link").data(g.links).enter().append("line").attr("class","nv-force-link").style("stroke-width",function(a){return Math.sqrt(a.value)}),y=f.selectAll(".node").data(g.nodes).enter().append("g").attr("class","nv-force-node").call(w.drag);y.append("circle").attr("r",r).style("fill",function(a){return h(a)}).on("mouseover",function(a){f.select(".nv-series-"+a.seriesIndex+" .nv-distx-"+a.pointIndex).attr("y1",a.py),f.select(".nv-series-"+a.seriesIndex+" .nv-disty-"+a.pointIndex).attr("x2",a.px);var b=h(a);a.series=[],v.forEach(function(c){a.series.push({color:b,key:c,value:a[c]})}),i.data(a).hidden(!1)}).on("mouseout",function(a){i.hidden(!0)}),i.headerFormatter(function(a){return"Node"}),t(x),s(y),w.on("tick",function(){x.attr("x1",function(a){return a.source.x}).attr("y1",function(a){return a.source.y}).attr("x2",function(a){return a.target.x}).attr("y2",function(a){return a.target.y}),y.attr("transform",function(a){return"translate("+a.x+", "+a.y+")"})})}),b}var c={top:2,right:0,bottom:2,left:0},d=400,e=32,f=null,g=d3.dispatch("renderEnd"),h=a.utils.getColor(["#000"]),i=a.models.tooltip(),j=null,k=.1,l=.9,m=30,n=-120,o=.1,p=.8,q=.1,r=5,s=function(a){},t=function(a){},u=a.utils.renderWatch(g);return b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return d},set:function(a){d=a}},height:{get:function(){return e},set:function(a){e=a}},linkStrength:{get:function(){return k},set:function(a){k=a}},friction:{get:function(){return l},set:function(a){l=a}},linkDist:{get:function(){return m},set:function(a){m=a}},charge:{get:function(){return n},set:function(a){n=a}},gravity:{get:function(){return o},set:function(a){o=a}},theta:{get:function(){return p},set:function(a){p=a}},alpha:{get:function(){return q},set:function(a){q=a}},radius:{get:function(){return r},set:function(a){r=a}},x:{get:function(){return getX},set:function(a){getX=d3.functor(a)}},y:{get:function(){return getY},set:function(a){getY=d3.functor(a)}},margin:{get:function(){return c},set:function(a){c.top=void 0!==a.top?a.top:c.top,c.right=void 0!==a.right?a.right:c.right,c.bottom=void 0!==a.bottom?a.bottom:c.bottom,c.left=void 0!==a.left?a.left:c.left}},color:{get:function(){return h},set:function(b){h=a.utils.getColor(b)}},noData:{get:function(){return j},set:function(a){j=a}},nodeExtras:{get:function(){return s},set:function(a){s=a}},linkExtras:{get:function(){return t},set:function(a){t=a}}}),b.dispatch=g,b.tooltip=i,a.utils.initOptions(b),b},a.models.furiousLegend=function(){"use strict";function b(r){function s(a,b){return"furious"!=q?"#000":o?a.disengaged?h(a,b):"#fff":o?void 0:a.disabled?h(a,b):"#fff"}function t(a,b){return o&&"furious"==q?a.disengaged?"#fff":h(a,b):a.disabled?"#fff":h(a,b)}return r.each(function(b){var r=d-c.left-c.right,u=d3.select(this);a.utils.initSVG(u);var v=u.selectAll("g.nv-legend").data([b]),w=(v.enter().append("g").attr("class","nvd3 nv-legend").append("g"),v.select("g"));v.attr("transform","translate("+c.left+","+c.top+")");var x,y=w.selectAll(".nv-series").data(function(a){return"furious"!=q?a:a.filter(function(a){return o?!0:!a.disengaged})}),z=y.enter().append("g").attr("class","nv-series");if("classic"==q)z.append("circle").style("stroke-width",2).attr("class","nv-legend-symbol").attr("r",5),x=y.select("circle");else if("furious"==q){z.append("rect").style("stroke-width",2).attr("class","nv-legend-symbol").attr("rx",3).attr("ry",3),x=y.select("rect"),z.append("g").attr("class","nv-check-box").property("innerHTML",'<path d="M0.5,5 L22.5,5 L22.5,26.5 L0.5,26.5 L0.5,5 Z" class="nv-box"></path><path d="M5.5,12.8618467 L11.9185089,19.2803556 L31,0.198864511" class="nv-check"></path>').attr("transform","translate(-10,-8)scale(0.5)");var A=y.select(".nv-check-box");A.each(function(a,b){d3.select(this).selectAll("path").attr("stroke",s(a,b))})}z.append("text").attr("text-anchor","start").attr("class","nv-legend-text").attr("dy",".32em").attr("dx","8");var B=y.select("text.nv-legend-text");y.on("mouseover",function(a,b){p.legendMouseover(a,b)}).on("mouseout",function(a,b){p.legendMouseout(a,b)}).on("click",function(a,b){p.legendClick(a,b);var c=y.data();if(m){if("classic"==q)n?(c.forEach(function(a){a.disabled=!0}),a.disabled=!1):(a.disabled=!a.disabled,c.every(function(a){return a.disabled})&&c.forEach(function(a){a.disabled=!1}));else if("furious"==q)if(o)a.disengaged=!a.disengaged,a.userDisabled=void 0==a.userDisabled?!!a.disabled:a.userDisabled,a.disabled=a.disengaged||a.userDisabled;else if(!o){a.disabled=!a.disabled,a.userDisabled=a.disabled;var d=c.filter(function(a){return!a.disengaged});d.every(function(a){return a.userDisabled})&&c.forEach(function(a){a.disabled=a.userDisabled=!1})}p.stateChange({disabled:c.map(function(a){return!!a.disabled}),disengaged:c.map(function(a){return!!a.disengaged})})}}).on("dblclick",function(a,b){if(("furious"!=q||!o)&&(p.legendDblclick(a,b),m)){var c=y.data();c.forEach(function(a){a.disabled=!0,"furious"==q&&(a.userDisabled=a.disabled)}),a.disabled=!1,"furious"==q&&(a.userDisabled=a.disabled),p.stateChange({disabled:c.map(function(a){return!!a.disabled})})}}),y.classed("nv-disabled",function(a){return a.userDisabled}),y.exit().remove(),B.attr("fill",s).text(function(a){return g(f(a))});var C;switch(q){case"furious":C=23;break;case"classic":C=20}if(j){var D=[];y.each(function(b,c){var d;if(g(f(b))&&g(f(b)).length>i){var e=g(f(b)).substring(0,i);d=d3.select(this).select("text").text(e+"..."),d3.select(this).append("svg:title").text(g(f(b)))}else d=d3.select(this).select("text");var h;try{if(h=d.node().getComputedTextLength(),0>=h)throw Error()}catch(j){h=a.utils.calcApproxTextWidth(d)}D.push(h+k)});for(var E=0,F=0,G=[];r>F&&E<D.length;)G[E]=D[E],F+=D[E++];for(0===E&&(E=1);F>r&&E>1;){G=[],E--;for(var H=0;H<D.length;H++)D[H]>(G[H%E]||0)&&(G[H%E]=D[H]);F=G.reduce(function(a,b,c,d){return a+b})}for(var I=[],J=0,K=0;E>J;J++)I[J]=K,K+=G[J];y.attr("transform",function(a,b){return"translate("+I[b%E]+","+(5+Math.floor(b/E)*C)+")"}),l?w.attr("transform","translate("+(d-c.right-F)+","+c.top+")"):w.attr("transform","translate(0,"+c.top+")"),e=c.top+c.bottom+Math.ceil(D.length/E)*C}else{var L,M=5,N=5,O=0;y.attr("transform",function(a,b){var e=d3.select(this).select("text").node().getComputedTextLength()+k;return L=N,d<c.left+c.right+L+e&&(N=L=5,M+=C),N+=e,N>O&&(O=N),"translate("+L+","+M+")"}),w.attr("transform","translate("+(d-c.right-O)+","+c.top+")"),e=c.top+c.bottom+M+15}"furious"==q&&x.attr("width",function(a,b){return B[0][b].getComputedTextLength()+27}).attr("height",18).attr("y",-9).attr("x",-15),x.style("fill",t).style("stroke",function(a,b){return a.color||h(a,b)})}),b}var c={top:5,right:0,bottom:5,left:0},d=400,e=20,f=function(a){return a.key},g=function(a){return a},h=a.utils.getColor(),i=20,j=!0,k=28,l=!0,m=!0,n=!1,o=!1,p=d3.dispatch("legendClick","legendDblclick","legendMouseover","legendMouseout","stateChange"),q="classic";return b.dispatch=p,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return d},set:function(a){d=a}},height:{get:function(){return e},set:function(a){e=a}},key:{get:function(){return f},set:function(a){f=a}},keyFormatter:{get:function(){return g},set:function(a){g=a}},align:{get:function(){return j},set:function(a){j=a}},rightAlign:{get:function(){return l},set:function(a){l=a}},maxKeyLength:{get:function(){return i},set:function(a){i=a}},padding:{get:function(){return k},set:function(a){k=a}},updateState:{get:function(){return m},set:function(a){m=a}},radioButtonMode:{get:function(){return n},set:function(a){n=a}},expanded:{get:function(){return o},set:function(a){o=a}},vers:{get:function(){return q},set:function(a){q=a}},margin:{get:function(){return c},set:function(a){c.top=void 0!==a.top?a.top:c.top,c.right=void 0!==a.right?a.right:c.right,c.bottom=void 0!==a.bottom?a.bottom:c.bottom,c.left=void 0!==a.left?a.left:c.left}},color:{get:function(){return h},set:function(b){h=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.historicalBar=function(){"use strict";function b(x){return x.each(function(b){w.reset(),k=d3.select(this);var x=a.utils.availableWidth(h,k,g),y=a.utils.availableHeight(i,k,g);a.utils.initSVG(k),l.domain(c||d3.extent(b[0].values.map(n).concat(p))),r?l.range(e||[.5*x/b[0].values.length,x*(b[0].values.length-.5)/b[0].values.length]):l.range(e||[0,x]),m.domain(d||d3.extent(b[0].values.map(o).concat(q))).range(f||[y,0]),l.domain()[0]===l.domain()[1]&&(l.domain()[0]?l.domain([l.domain()[0]-.01*l.domain()[0],l.domain()[1]+.01*l.domain()[1]]):l.domain([-1,1])),m.domain()[0]===m.domain()[1]&&(m.domain()[0]?m.domain([m.domain()[0]+.01*m.domain()[0],m.domain()[1]-.01*m.domain()[1]]):m.domain([-1,1]));var z=k.selectAll("g.nv-wrap.nv-historicalBar-"+j).data([b[0].values]),A=z.enter().append("g").attr("class","nvd3 nv-wrap nv-historicalBar-"+j),B=A.append("defs"),C=A.append("g"),D=z.select("g");C.append("g").attr("class","nv-bars"),z.attr("transform","translate("+g.left+","+g.top+")"),k.on("click",function(a,b){u.chartClick({data:a,index:b,pos:d3.event,id:j})}),B.append("clipPath").attr("id","nv-chart-clip-path-"+j).append("rect"),z.select("#nv-chart-clip-path-"+j+" rect").attr("width",x).attr("height",y),D.attr("clip-path",s?"url(#nv-chart-clip-path-"+j+")":"");var E=z.select(".nv-bars").selectAll(".nv-bar").data(function(a){return a},function(a,b){return n(a,b)});E.exit().remove(),E.enter().append("rect").attr("x",0).attr("y",function(b,c){return a.utils.NaNtoZero(m(Math.max(0,o(b,c))))}).attr("height",function(b,c){return a.utils.NaNtoZero(Math.abs(m(o(b,c))-m(0)))}).attr("transform",function(a,c){return"translate("+(l(n(a,c))-x/b[0].values.length*.45)+",0)"}).on("mouseover",function(a,b){v&&(d3.select(this).classed("hover",!0),u.elementMouseover({data:a,index:b,color:d3.select(this).style("fill")}))}).on("mouseout",function(a,b){v&&(d3.select(this).classed("hover",!1),u.elementMouseout({data:a,index:b,color:d3.select(this).style("fill")}))}).on("mousemove",function(a,b){v&&u.elementMousemove({data:a,index:b,color:d3.select(this).style("fill")})}).on("click",function(a,b){if(v){var c=this;u.elementClick({data:a,index:b,color:d3.select(this).style("fill"),event:d3.event,element:c}),d3.event.stopPropagation()}}).on("dblclick",function(a,b){v&&(u.elementDblClick({data:a,index:b,color:d3.select(this).style("fill")}),d3.event.stopPropagation())}),E.attr("fill",function(a,b){return t(a,b)}).attr("class",function(a,b,c){return(o(a,b)<0?"nv-bar negative":"nv-bar positive")+" nv-bar-"+c+"-"+b}).watchTransition(w,"bars").attr("transform",function(a,c){return"translate("+(l(n(a,c))-x/b[0].values.length*.45)+",0)"}).attr("width",x/b[0].values.length*.9),E.watchTransition(w,"bars").attr("y",function(b,c){var d=o(b,c)<0?m(0):m(0)-m(o(b,c))<1?m(0)-1:m(o(b,c));return a.utils.NaNtoZero(d)}).attr("height",function(b,c){return a.utils.NaNtoZero(Math.max(Math.abs(m(o(b,c))-m(0)),1))})}),w.renderEnd("historicalBar immediate"),b}var c,d,e,f,g={top:0,right:0,bottom:0,left:0},h=null,i=null,j=Math.floor(1e4*Math.random()),k=null,l=d3.scale.linear(),m=d3.scale.linear(),n=function(a){return a.x},o=function(a){return a.y},p=[],q=[0],r=!1,s=!0,t=a.utils.defaultColor(),u=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove","renderEnd"),v=!0,w=a.utils.renderWatch(u,0);return b.highlightPoint=function(a,b){k.select(".nv-bars .nv-bar-0-"+a).classed("hover",b)},b.clearHighlights=function(){k.select(".nv-bars .nv-bar.hover").classed("hover",!1)},b.dispatch=u,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},forceX:{get:function(){return p},set:function(a){p=a}},forceY:{get:function(){return q},set:function(a){q=a}},padData:{get:function(){return r},set:function(a){r=a}},x:{get:function(){return n},set:function(a){n=a}},y:{get:function(){return o},set:function(a){o=a}},xScale:{get:function(){return l},set:function(a){l=a}},yScale:{get:function(){return m},set:function(a){m=a}},xDomain:{get:function(){return c},set:function(a){c=a}},yDomain:{get:function(){return d},set:function(a){d=a}},xRange:{get:function(){return e},set:function(a){e=a}},yRange:{get:function(){return f},set:function(a){f=a}},clipEdge:{get:function(){return s},set:function(a){s=a}},id:{get:function(){return j},set:function(a){j=a}},interactive:{get:function(){return v},set:function(a){v=a}},margin:{get:function(){return g},set:function(a){g.top=void 0!==a.top?a.top:g.top,g.right=void 0!==a.right?a.right:g.right,g.bottom=void 0!==a.bottom?a.bottom:g.bottom,g.left=void 0!==a.left?a.left:g.left}},color:{get:function(){return t},set:function(b){t=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.historicalBarChart=function(b){"use strict";function c(b){return b.each(function(k){A.reset(),A.models(f),r&&A.models(g),s&&A.models(h);var x=d3.select(this);a.utils.initSVG(x);var B=a.utils.availableWidth(o,x,l),C=a.utils.availableHeight(p,x,l);if(c.update=function(){x.transition().duration(z).call(c)},c.container=this,v.disabled=k.map(function(a){return!!a.disabled}),!w){var D;w={};for(D in v)v[D]instanceof Array?w[D]=v[D].slice(0):w[D]=v[D]}if(!(k&&k.length&&k.filter(function(a){return a.values.length}).length))return a.utils.noData(c,x),c;x.selectAll(".nv-noData").remove(),d=f.xScale(),e=f.yScale();var E=x.selectAll("g.nv-wrap.nv-historicalBarChart").data([k]),F=E.enter().append("g").attr("class","nvd3 nv-wrap nv-historicalBarChart").append("g"),G=E.select("g");F.append("g").attr("class","nv-x nv-axis"),F.append("g").attr("class","nv-y nv-axis"),F.append("g").attr("class","nv-barsWrap"),F.append("g").attr("class","nv-legendWrap"),F.append("g").attr("class","nv-interactive"),q?(i.width(B),G.select(".nv-legendWrap").datum(k).call(i),m||i.height()===l.top||(l.top=i.height(),C=a.utils.availableHeight(p,x,l)),E.select(".nv-legendWrap").attr("transform","translate(0,"+-l.top+")")):G.select(".nv-legendWrap").selectAll("*").remove(),E.attr("transform","translate("+l.left+","+l.top+")"),t&&G.select(".nv-y.nv-axis").attr("transform","translate("+B+",0)"),u&&(j.width(B).height(C).margin({left:l.left,top:l.top}).svgContainer(x).xScale(d),E.select(".nv-interactive").call(j)),f.width(B).height(C).color(k.map(function(a,b){return a.color||n(a,b)}).filter(function(a,b){return!k[b].disabled}));var H=G.select(".nv-barsWrap").datum(k.filter(function(a){return!a.disabled}));H.transition().call(f),r&&(g.scale(d)._ticks(a.utils.calcTicksX(B/100,k)).tickSize(-C,0),G.select(".nv-x.nv-axis").attr("transform","translate(0,"+e.range()[0]+")"),G.select(".nv-x.nv-axis").transition().call(g)),s&&(h.scale(e)._ticks(a.utils.calcTicksY(C/36,k)).tickSize(-B,0),G.select(".nv-y.nv-axis").transition().call(h)),j.dispatch.on("elementMousemove",function(b){f.clearHighlights();var d,e,i,l=[];k.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(g,h){e=a.interactiveBisect(g.values,b.pointXValue,c.x()),f.highlightPoint(e,!0);var j=g.values[e];void 0!==j&&(void 0===d&&(d=j),void 0===i&&(i=c.xScale()(c.x()(j,e))),l.push({key:g.key,value:c.y()(j,e),color:n(g,g.seriesIndex),data:g.values[e]}))});var m=g.tickFormat()(c.x()(d,e));j.tooltip.valueFormatter(function(a,b){return h.tickFormat()(a)}).data({value:m,index:e,series:l})(),j.renderGuideLine(i)}),j.dispatch.on("elementMouseout",function(a){y.tooltipHide(),f.clearHighlights()}),i.dispatch.on("legendClick",function(a,d){a.disabled=!a.disabled,k.filter(function(a){return!a.disabled}).length||k.map(function(a){return a.disabled=!1,E.selectAll(".nv-series").classed("disabled",!1),a}),v.disabled=k.map(function(a){return!!a.disabled}),y.stateChange(v),b.transition().call(c)}),i.dispatch.on("legendDblclick",function(a){k.forEach(function(a){a.disabled=!0}),a.disabled=!1,v.disabled=k.map(function(a){return!!a.disabled}),y.stateChange(v),c.update()}),y.on("changeState",function(a){"undefined"!=typeof a.disabled&&(k.forEach(function(b,c){b.disabled=a.disabled[c]}),v.disabled=a.disabled),c.update()})}),A.renderEnd("historicalBarChart immediate"),c}var d,e,f=b||a.models.historicalBar(),g=a.models.axis(),h=a.models.axis(),i=a.models.legend(),j=a.interactiveGuideline(),k=a.models.tooltip(),l={top:30,right:90,bottom:50,left:90},m=null,n=a.utils.defaultColor(),o=null,p=null,q=!1,r=!0,s=!0,t=!1,u=!1,v={},w=null,x=null,y=d3.dispatch("tooltipHide","stateChange","changeState","renderEnd"),z=250;g.orient("bottom").tickPadding(7),h.orient(t?"right":"left"),k.duration(0).headerEnabled(!1).valueFormatter(function(a,b){return h.tickFormat()(a,b)}).headerFormatter(function(a,b){return g.tickFormat()(a,b)});var A=a.utils.renderWatch(y,0);return f.dispatch.on("elementMouseover.tooltip",function(a){a.series={key:c.x()(a.data),value:c.y()(a.data),color:a.color},k.data(a).hidden(!1)}),f.dispatch.on("elementMouseout.tooltip",function(a){k.hidden(!0)}),f.dispatch.on("elementMousemove.tooltip",function(a){k()}),c.dispatch=y,c.bars=f,c.legend=i,c.xAxis=g,c.yAxis=h,c.interactiveLayer=j,c.tooltip=k,c.options=a.utils.optionsFunc.bind(c),c._options=Object.create({},{width:{get:function(){return o},set:function(a){o=a}},height:{get:function(){return p},set:function(a){p=a}},showLegend:{get:function(){return q},set:function(a){q=a}},showXAxis:{get:function(){return r},set:function(a){r=a}},showYAxis:{get:function(){return s},set:function(a){s=a}},defaultState:{get:function(){return w},set:function(a){w=a}},noData:{get:function(){return x},set:function(a){x=a}},margin:{get:function(){return l},set:function(a){void 0!==a.top&&(l.top=a.top,m=a.top),l.right=void 0!==a.right?a.right:l.right,l.bottom=void 0!==a.bottom?a.bottom:l.bottom,l.left=void 0!==a.left?a.left:l.left}},color:{get:function(){return n},set:function(b){n=a.utils.getColor(b),i.color(n),f.color(n)}},duration:{get:function(){return z},set:function(a){z=a,A.reset(z),h.duration(z),g.duration(z)}},rightAlignYAxis:{get:function(){return t},set:function(a){t=a,h.orient(a?"right":"left")}},useInteractiveGuideline:{get:function(){return u},set:function(a){u=a,a===!0&&c.interactive(!1)}}}),a.utils.inheritOptions(c,f),a.utils.initOptions(c),c},a.models.ohlcBarChart=function(){var b=a.models.historicalBarChart(a.models.ohlcBar());return b.useInteractiveGuideline(!0),b.interactiveLayer.tooltip.contentGenerator(function(a){var c=a.series[0].data,d=c.open<c.close?"2ca02c":"d62728";return'<h3 style="color: #'+d+'">'+a.value+"</h3><table><tr><td>open:</td><td>"+b.yAxis.tickFormat()(c.open)+"</td></tr><tr><td>close:</td><td>"+b.yAxis.tickFormat()(c.close)+"</td></tr><tr><td>high</td><td>"+b.yAxis.tickFormat()(c.high)+"</td></tr><tr><td>low:</td><td>"+b.yAxis.tickFormat()(c.low)+"</td></tr></table>"}),b},a.models.candlestickBarChart=function(){var b=a.models.historicalBarChart(a.models.candlestickBar());return b.useInteractiveGuideline(!0),b.interactiveLayer.tooltip.contentGenerator(function(a){var c=a.series[0].data,d=c.open<c.close?"2ca02c":"d62728";return'<h3 style="color: #'+d+'">'+a.value+"</h3><table><tr><td>open:</td><td>"+b.yAxis.tickFormat()(c.open)+"</td></tr><tr><td>close:</td><td>"+b.yAxis.tickFormat()(c.close)+"</td></tr><tr><td>high</td><td>"+b.yAxis.tickFormat()(c.high)+"</td></tr><tr><td>low:</td><td>"+b.yAxis.tickFormat()(c.low)+"</td></tr></table>";
-}),b},a.models.legend=function(){"use strict";function b(r){function s(a,b){return"furious"!=q?"#000":o?a.disengaged?"#000":"#fff":o?void 0:(a.color||(a.color=h(a,b)),a.disabled?a.color:"#fff")}function t(a,b){return o&&"furious"==q&&a.disengaged?"#eee":a.color||h(a,b)}function u(a,b){return o&&"furious"==q?1:a.disabled?0:1}return r.each(function(b){var h=d-c.left-c.right,r=d3.select(this);a.utils.initSVG(r);var v=r.selectAll("g.nv-legend").data([b]),w=v.enter().append("g").attr("class","nvd3 nv-legend").append("g"),x=v.select("g");l?v.attr("transform","translate("+-c.right+","+c.top+")"):v.attr("transform","translate("+c.left+","+c.top+")");var y,z,A=x.selectAll(".nv-series").data(function(a){return"furious"!=q?a:a.filter(function(a){return o?!0:!a.disengaged})}),B=A.enter().append("g").attr("class","nv-series");switch(q){case"furious":z=23;break;case"classic":z=20}if("classic"==q)B.append("circle").style("stroke-width",2).attr("class","nv-legend-symbol").attr("r",5),y=A.select(".nv-legend-symbol");else if("furious"==q){B.append("rect").style("stroke-width",2).attr("class","nv-legend-symbol").attr("rx",3).attr("ry",3),y=A.select(".nv-legend-symbol"),B.append("g").attr("class","nv-check-box").property("innerHTML",'<path d="M0.5,5 L22.5,5 L22.5,26.5 L0.5,26.5 L0.5,5 Z" class="nv-box"></path><path d="M5.5,12.8618467 L11.9185089,19.2803556 L31,0.198864511" class="nv-check"></path>').attr("transform","translate(-10,-8)scale(0.5)");var C=A.select(".nv-check-box");C.each(function(a,b){d3.select(this).selectAll("path").attr("stroke",s(a,b))})}B.append("text").attr("text-anchor","start").attr("class","nv-legend-text").attr("dy",".32em").attr("dx","8");var D=A.select("text.nv-legend-text");A.on("mouseover",function(a,b){p.legendMouseover(a,b)}).on("mouseout",function(a,b){p.legendMouseout(a,b)}).on("click",function(a,b){p.legendClick(a,b);var c=A.data();if(m){if("classic"==q)n?(c.forEach(function(a){a.disabled=!0}),a.disabled=!1):(a.disabled=!a.disabled,c.every(function(a){return a.disabled})&&c.forEach(function(a){a.disabled=!1}));else if("furious"==q)if(o)a.disengaged=!a.disengaged,a.userDisabled=void 0==a.userDisabled?!!a.disabled:a.userDisabled,a.disabled=a.disengaged||a.userDisabled;else if(!o){a.disabled=!a.disabled,a.userDisabled=a.disabled;var d=c.filter(function(a){return!a.disengaged});d.every(function(a){return a.userDisabled})&&c.forEach(function(a){a.disabled=a.userDisabled=!1})}p.stateChange({disabled:c.map(function(a){return!!a.disabled}),disengaged:c.map(function(a){return!!a.disengaged})})}}).on("dblclick",function(a,b){if(("furious"!=q||!o)&&(p.legendDblclick(a,b),m)){var c=A.data();c.forEach(function(a){a.disabled=!0,"furious"==q&&(a.userDisabled=a.disabled)}),a.disabled=!1,"furious"==q&&(a.userDisabled=a.disabled),p.stateChange({disabled:c.map(function(a){return!!a.disabled})})}}),A.classed("nv-disabled",function(a){return a.userDisabled}),A.exit().remove(),D.attr("fill",s).text(function(a){return g(f(a))});var E=0;if(j){var F=[];A.each(function(b,c){var d;if(g(f(b))&&g(f(b)).length>i){var e=g(f(b)).substring(0,i);d=d3.select(this).select("text").text(e+"..."),d3.select(this).append("svg:title").text(g(f(b)))}else d=d3.select(this).select("text");var h;try{if(h=d.node().getComputedTextLength(),0>=h)throw Error()}catch(j){h=a.utils.calcApproxTextWidth(d)}F.push(h+k)});var G=0,H=[];for(E=0;h>E&&G<F.length;)H[G]=F[G],E+=F[G++];for(0===G&&(G=1);E>h&&G>1;){H=[],G--;for(var I=0;I<F.length;I++)F[I]>(H[I%G]||0)&&(H[I%G]=F[I]);E=H.reduce(function(a,b,c,d){return a+b})}for(var J=[],K=0,L=0;G>K;K++)J[K]=L,L+=H[K];A.attr("transform",function(a,b){return"translate("+J[b%G]+","+(5+Math.floor(b/G)*z)+")"}),l?x.attr("transform","translate("+(d-c.right-E)+","+c.top+")"):x.attr("transform","translate(0,"+c.top+")"),e=c.top+c.bottom+Math.ceil(F.length/G)*z}else{var M,N=5,O=5,P=0;A.attr("transform",function(a,b){var e=d3.select(this).select("text").node().getComputedTextLength()+k;return M=O,d<c.left+c.right+M+e&&(O=M=5,N+=z),O+=e,O>P&&(P=O),M+P>E&&(E=M+P),"translate("+M+","+N+")"}),x.attr("transform","translate("+(d-c.right-P)+","+c.top+")"),e=c.top+c.bottom+N+15}if("furious"==q){y.attr("width",function(a,b){return D[0][b].getComputedTextLength()+27}).attr("height",18).attr("y",-9).attr("x",-15),w.insert("rect",":first-child").attr("class","nv-legend-bg").attr("fill","#eee").attr("opacity",0);var Q=x.select(".nv-legend-bg");Q.transition().duration(300).attr("x",-z).attr("width",E+z-12).attr("height",e+10).attr("y",-c.top-10).attr("opacity",o?1:0)}y.style("fill",t).style("fill-opacity",u).style("stroke",t)}),b}var c={top:5,right:0,bottom:5,left:0},d=400,e=20,f=function(a){return a.key},g=function(a){return a},h=a.utils.getColor(),i=20,j=!0,k=32,l=!0,m=!0,n=!1,o=!1,p=d3.dispatch("legendClick","legendDblclick","legendMouseover","legendMouseout","stateChange"),q="classic";return b.dispatch=p,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return d},set:function(a){d=a}},height:{get:function(){return e},set:function(a){e=a}},key:{get:function(){return f},set:function(a){f=a}},keyFormatter:{get:function(){return g},set:function(a){g=a}},align:{get:function(){return j},set:function(a){j=a}},maxKeyLength:{get:function(){return i},set:function(a){i=a}},rightAlign:{get:function(){return l},set:function(a){l=a}},padding:{get:function(){return k},set:function(a){k=a}},updateState:{get:function(){return m},set:function(a){m=a}},radioButtonMode:{get:function(){return n},set:function(a){n=a}},expanded:{get:function(){return o},set:function(a){o=a}},vers:{get:function(){return q},set:function(a){q=a}},margin:{get:function(){return c},set:function(a){c.top=void 0!==a.top?a.top:c.top,c.right=void 0!==a.right?a.right:c.right,c.bottom=void 0!==a.bottom?a.bottom:c.bottom,c.left=void 0!==a.left?a.left:c.left}},color:{get:function(){return h},set:function(b){h=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.line=function(){"use strict";function b(r){return v.reset(),v.models(e),r.each(function(b){i=d3.select(this);var r=a.utils.availableWidth(g,i,f),s=a.utils.availableHeight(h,i,f);a.utils.initSVG(i),c=e.xScale(),d=e.yScale(),t=t||c,u=u||d;var w=i.selectAll("g.nv-wrap.nv-line").data([b]),x=w.enter().append("g").attr("class","nvd3 nv-wrap nv-line"),y=x.append("defs"),z=x.append("g"),A=w.select("g");z.append("g").attr("class","nv-groups"),z.append("g").attr("class","nv-scatterWrap"),w.attr("transform","translate("+f.left+","+f.top+")"),e.width(r).height(s);var B=w.select(".nv-scatterWrap");B.call(e),y.append("clipPath").attr("id","nv-edge-clip-"+e.id()).append("rect"),w.select("#nv-edge-clip-"+e.id()+" rect").attr("width",r).attr("height",s>0?s:0),A.attr("clip-path",p?"url(#nv-edge-clip-"+e.id()+")":""),B.attr("clip-path",p?"url(#nv-edge-clip-"+e.id()+")":"");var C=w.select(".nv-groups").selectAll(".nv-group").data(function(a){return a},function(a){return a.key});C.enter().append("g").style("stroke-opacity",1e-6).style("stroke-width",function(a){return a.strokeWidth||j}).style("fill-opacity",1e-6),C.exit().remove(),C.attr("class",function(a,b){return(a.classed||"")+" nv-group nv-series-"+b}).classed("hover",function(a){return a.hover}).style("fill",function(a,b){return k(a,b)}).style("stroke",function(a,b){return k(a,b)}),C.watchTransition(v,"line: groups").style("stroke-opacity",1).style("fill-opacity",function(a){return a.fillOpacity||.5});var D=C.selectAll("path.nv-area").data(function(a){return o(a)?[a]:[]});D.enter().append("path").attr("class","nv-area").attr("d",function(b){return d3.svg.area().interpolate(q).defined(n).x(function(b,c){return a.utils.NaNtoZero(t(l(b,c)))}).y0(function(b,c){return a.utils.NaNtoZero(u(m(b,c)))}).y1(function(a,b){return u(d.domain()[0]<=0?d.domain()[1]>=0?0:d.domain()[1]:d.domain()[0])}).apply(this,[b.values])}),C.exit().selectAll("path.nv-area").remove(),D.watchTransition(v,"line: areaPaths").attr("d",function(b){return d3.svg.area().interpolate(q).defined(n).x(function(b,d){return a.utils.NaNtoZero(c(l(b,d)))}).y0(function(b,c){return a.utils.NaNtoZero(d(m(b,c)))}).y1(function(a,b){return d(d.domain()[0]<=0?d.domain()[1]>=0?0:d.domain()[1]:d.domain()[0])}).apply(this,[b.values])});var E=C.selectAll("path.nv-line").data(function(a){return[a.values]});E.enter().append("path").attr("class","nv-line").attr("d",d3.svg.line().interpolate(q).defined(n).x(function(b,c){return a.utils.NaNtoZero(t(l(b,c)))}).y(function(b,c){return a.utils.NaNtoZero(u(m(b,c)))})),E.watchTransition(v,"line: linePaths").attr("d",d3.svg.line().interpolate(q).defined(n).x(function(b,d){return a.utils.NaNtoZero(c(l(b,d)))}).y(function(b,c){return a.utils.NaNtoZero(d(m(b,c)))})),t=c.copy(),u=d.copy()}),v.renderEnd("line immediate"),b}var c,d,e=a.models.scatter(),f={top:0,right:0,bottom:0,left:0},g=960,h=500,i=null,j=1.5,k=a.utils.defaultColor(),l=function(a){return a.x},m=function(a){return a.y},n=function(a,b){return!isNaN(m(a,b))&&null!==m(a,b)},o=function(a){return a.area},p=!1,q="linear",r=250,s=d3.dispatch("elementClick","elementMouseover","elementMouseout","renderEnd");e.pointSize(16).pointDomain([16,256]);var t,u,v=a.utils.renderWatch(s,r);return b.dispatch=s,b.scatter=e,e.dispatch.on("elementClick",function(){s.elementClick.apply(this,arguments)}),e.dispatch.on("elementMouseover",function(){s.elementMouseover.apply(this,arguments)}),e.dispatch.on("elementMouseout",function(){s.elementMouseout.apply(this,arguments)}),b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return g},set:function(a){g=a}},height:{get:function(){return h},set:function(a){h=a}},defined:{get:function(){return n},set:function(a){n=a}},interpolate:{get:function(){return q},set:function(a){q=a}},clipEdge:{get:function(){return p},set:function(a){p=a}},margin:{get:function(){return f},set:function(a){f.top=void 0!==a.top?a.top:f.top,f.right=void 0!==a.right?a.right:f.right,f.bottom=void 0!==a.bottom?a.bottom:f.bottom,f.left=void 0!==a.left?a.left:f.left}},duration:{get:function(){return r},set:function(a){r=a,v.reset(r),e.duration(r)}},isArea:{get:function(){return o},set:function(a){o=d3.functor(a)}},x:{get:function(){return l},set:function(a){l=a,e.x(a)}},y:{get:function(){return m},set:function(a){m=a,e.y(a)}},color:{get:function(){return k},set:function(b){k=a.utils.getColor(b),e.color(k)}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.lineChart=function(){"use strict";function b(j){return C.reset(),C.models(e),s&&C.models(f),t&&C.models(g),j.each(function(j){function z(){s&&M.select(".nv-focus .nv-x.nv-axis").transition().duration(B).call(f)}function C(){t&&M.select(".nv-focus .nv-y.nv-axis").transition().duration(B).call(g)}function F(a){var b=M.select(".nv-focus .nv-linesWrap").datum(j.filter(function(a){return!a.disabled}).map(function(b,c){return{key:b.key,area:b.area,classed:b.classed,values:b.values.filter(function(b,c){return e.x()(b,c)>=a[0]&&e.x()(b,c)<=a[1]}),disableTooltip:b.disableTooltip}}));b.transition().duration(B).call(e),z(),C()}var G=d3.select(this);a.utils.initSVG(G);var H=a.utils.availableWidth(o,G,l),I=a.utils.availableHeight(p,G,l)-(w?k.height():0);if(b.update=function(){0===B?G.call(b):G.transition().duration(B).call(b)},b.container=this,x.setter(E(j),b.update).getter(D(j)).update(),x.disabled=j.map(function(a){return!!a.disabled}),!y){var J;y={};for(J in x)x[J]instanceof Array?y[J]=x[J].slice(0):y[J]=x[J]}if(!(j&&j.length&&j.filter(function(a){return a.values.length}).length))return a.utils.noData(b,G),b;G.selectAll(".nv-noData").remove(),k.dispatch.on("onBrush",function(a){F(a)}),c=e.xScale(),d=e.yScale();var K=G.selectAll("g.nv-wrap.nv-lineChart").data([j]),L=K.enter().append("g").attr("class","nvd3 nv-wrap nv-lineChart").append("g"),M=K.select("g");L.append("g").attr("class","nv-legendWrap");var N=L.append("g").attr("class","nv-focus");N.append("g").attr("class","nv-background").append("rect"),N.append("g").attr("class","nv-x nv-axis"),N.append("g").attr("class","nv-y nv-axis"),N.append("g").attr("class","nv-linesWrap"),N.append("g").attr("class","nv-interactive");L.append("g").attr("class","nv-focusWrap");q?(h.width(H),M.select(".nv-legendWrap").datum(j).call(h),"bottom"===r?K.select(".nv-legendWrap").attr("transform","translate(0,"+I+")"):"top"===r&&(m||h.height()===l.top||(l.top=h.height(),I=a.utils.availableHeight(p,G,l)-(w?k.height():0)),K.select(".nv-legendWrap").attr("transform","translate(0,"+-l.top+")"))):M.select(".nv-legendWrap").selectAll("*").remove(),K.attr("transform","translate("+l.left+","+l.top+")"),u&&M.select(".nv-y.nv-axis").attr("transform","translate("+H+",0)"),v&&(i.width(H).height(I).margin({left:l.left,top:l.top}).svgContainer(G).xScale(c),K.select(".nv-interactive").call(i)),M.select(".nv-focus .nv-background rect").attr("width",H).attr("height",I),e.width(H).height(I).color(j.map(function(a,b){return a.color||n(a,b)}).filter(function(a,b){return!j[b].disabled}));var O=M.select(".nv-linesWrap").datum(j.filter(function(a){return!a.disabled}));if(s&&f.scale(c)._ticks(a.utils.calcTicksX(H/100,j)).tickSize(-I,0),t&&g.scale(d)._ticks(a.utils.calcTicksY(I/36,j)).tickSize(-H,0),M.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+I+")"),w){k.width(H),M.select(".nv-focusWrap").attr("transform","translate(0,"+(I+l.bottom+k.margin().top)+")").datum(j.filter(function(a){return!a.disabled})).call(k);var P=k.brush.empty()?k.xDomain():k.brush.extent();null!==P&&F(P)}else O.call(e),z(),C();h.dispatch.on("stateChange",function(a){for(var c in a)x[c]=a[c];A.stateChange(x),b.update()}),i.dispatch.on("elementMousemove",function(d){e.clearHighlights();var f,h,l,m=[];if(j.filter(function(a,b){return a.seriesIndex=b,!a.disabled&&!a.disableTooltip}).forEach(function(g,i){var j=w?k.brush.empty()?k.xScale().domain():k.brush.extent():c.domain(),o=g.values.filter(function(a,b){return j[0]<=j[1]?e.x()(a,b)>=j[0]&&e.x()(a,b)<=j[1]:e.x()(a,b)>=j[1]&&e.x()(a,b)<=j[0]});h=a.interactiveBisect(o,d.pointXValue,e.x());var p=o[h],q=b.y()(p,h);null!==q&&e.highlightPoint(i,h,!0),void 0!==p&&(void 0===f&&(f=p),void 0===l&&(l=b.xScale()(b.x()(p,h))),m.push({key:g.key,value:q,color:n(g,g.seriesIndex),data:p}))}),m.length>2){var o=b.yScale().invert(d.mouseY),p=Math.abs(b.yScale().domain()[0]-b.yScale().domain()[1]),q=.03*p,r=a.nearestValueIndex(m.map(function(a){return a.value}),o,q);null!==r&&(m[r].highlight=!0)}var s=function(a,b){return null==a?"N/A":g.tickFormat()(a)};i.tooltip.valueFormatter(i.tooltip.valueFormatter()||s).data({value:b.x()(f,h),index:h,series:m})(),i.renderGuideLine(l)}),i.dispatch.on("elementClick",function(c){var d,f=[];j.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(e){var g=a.interactiveBisect(e.values,c.pointXValue,b.x()),h=e.values[g];if("undefined"!=typeof h){"undefined"==typeof d&&(d=b.xScale()(b.x()(h,g)));var i=b.yScale()(b.y()(h,g));f.push({point:h,pointIndex:g,pos:[d,i],seriesIndex:e.seriesIndex,series:e})}}),e.dispatch.elementClick(f)}),i.dispatch.on("elementMouseout",function(a){e.clearHighlights()}),A.on("changeState",function(a){"undefined"!=typeof a.disabled&&j.length===a.disabled.length&&(j.forEach(function(b,c){b.disabled=a.disabled[c]}),x.disabled=a.disabled),b.update()})}),C.renderEnd("lineChart immediate"),b}var c,d,e=a.models.line(),f=a.models.axis(),g=a.models.axis(),h=a.models.legend(),i=a.interactiveGuideline(),j=a.models.tooltip(),k=a.models.focus(a.models.line()),l={top:30,right:20,bottom:50,left:60},m=null,n=a.utils.defaultColor(),o=null,p=null,q=!0,r="top",s=!0,t=!0,u=!1,v=!1,w=!1,x=a.utils.state(),y=null,z=null,A=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState","renderEnd"),B=250;f.orient("bottom").tickPadding(7),g.orient(u?"right":"left"),e.clipEdge(!0).duration(0),j.valueFormatter(function(a,b){return g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),i.tooltip.valueFormatter(function(a,b){return g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)});var C=a.utils.renderWatch(A,B),D=function(a){return function(){return{active:a.map(function(a){return!a.disabled})}}},E=function(a){return function(b){void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};return e.dispatch.on("elementMouseover.tooltip",function(a){a.series.disableTooltip||j.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){j.hidden(!0)}),b.dispatch=A,b.lines=e,b.legend=h,b.focus=k,b.xAxis=f,b.x2Axis=k.xAxis,b.yAxis=g,b.y2Axis=k.yAxis,b.interactiveLayer=i,b.tooltip=j,b.state=x,b.dispatch=A,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return o},set:function(a){o=a}},height:{get:function(){return p},set:function(a){p=a}},showLegend:{get:function(){return q},set:function(a){q=a}},legendPosition:{get:function(){return r},set:function(a){r=a}},showXAxis:{get:function(){return s},set:function(a){s=a}},showYAxis:{get:function(){return t},set:function(a){t=a}},defaultState:{get:function(){return y},set:function(a){y=a}},noData:{get:function(){return z},set:function(a){z=a}},focusEnable:{get:function(){return w},set:function(a){w=a}},focusHeight:{get:function(){return k.height()},set:function(a){k.height(a)}},focusShowAxisX:{get:function(){return k.showXAxis()},set:function(a){k.showXAxis(a)}},focusShowAxisY:{get:function(){return k.showYAxis()},set:function(a){k.showYAxis(a)}},brushExtent:{get:function(){return k.brushExtent()},set:function(a){k.brushExtent(a)}},focusMargin:{get:function(){return k.margin},set:function(a){void 0!==a.top&&(l.top=a.top,m=a.top),k.margin.right=void 0!==a.right?a.right:k.margin.right,k.margin.bottom=void 0!==a.bottom?a.bottom:k.margin.bottom,k.margin.left=void 0!==a.left?a.left:k.margin.left}},margin:{get:function(){return l},set:function(a){l.top=void 0!==a.top?a.top:l.top,l.right=void 0!==a.right?a.right:l.right,l.bottom=void 0!==a.bottom?a.bottom:l.bottom,l.left=void 0!==a.left?a.left:l.left}},duration:{get:function(){return B},set:function(a){B=a,C.reset(B),e.duration(B),k.duration(B),f.duration(B),g.duration(B)}},color:{get:function(){return n},set:function(b){n=a.utils.getColor(b),h.color(n),e.color(n),k.color(n)}},interpolate:{get:function(){return e.interpolate()},set:function(a){e.interpolate(a),k.interpolate(a)}},xTickFormat:{get:function(){return f.tickFormat()},set:function(a){f.tickFormat(a),k.xTickFormat(a)}},yTickFormat:{get:function(){return g.tickFormat()},set:function(a){g.tickFormat(a),k.yTickFormat(a)}},x:{get:function(){return e.x()},set:function(a){e.x(a),k.x(a)}},y:{get:function(){return e.y()},set:function(a){e.y(a),k.y(a)}},rightAlignYAxis:{get:function(){return u},set:function(a){u=a,g.orient(u?"right":"left")}},useInteractiveGuideline:{get:function(){return v},set:function(a){v=a,v&&(e.interactive(!1),e.useVoronoi(!1))}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.lineWithFocusChart=function(){return a.models.lineChart().margin({bottom:30}).focusEnable(!0)},a.models.linePlusBarChart=function(){"use strict";function b(v){return v.each(function(v){function K(a){var b=+("e"==a),c=b?1:-1,d=$/3;return"M"+.5*c+","+d+"A6,6 0 0 "+b+" "+6.5*c+","+(d+6)+"V"+(2*d-6)+"A6,6 0 0 "+b+" "+.5*c+","+2*d+"ZM"+2.5*c+","+(d+8)+"V"+(2*d-8)+"M"+4.5*c+","+(d+8)+"V"+(2*d-8)}function S(){u.empty()||u.extent(J),na.data([u.empty()?e.domain():J]).each(function(a,b){var c=e(a[0])-e.range()[0],d=e.range()[1]-e(a[1]);d3.select(this).select(".left").attr("width",0>c?0:c),d3.select(this).select(".right").attr("x",e(a[1])).attr("width",0>d?0:d)})}function T(){J=u.empty()?null:u.extent(),c=u.empty()?e.domain():u.extent(),L.brush({extent:c,brush:u}),S(),l.width(Y).height(Z).color(v.map(function(a,b){return a.color||D(a,b)}).filter(function(a,b){return!v[b].disabled&&v[b].bar})),j.width(Y).height(Z).color(v.map(function(a,b){return a.color||D(a,b)}).filter(function(a,b){return!v[b].disabled&&!v[b].bar}));var b=ga.select(".nv-focus .nv-barsWrap").datum(aa.length?aa.map(function(a,b){return{key:a.key,values:a.values.filter(function(a,b){return l.x()(a,b)>=c[0]&&l.x()(a,b)<=c[1]})}}):[{values:[]}]),h=ga.select(".nv-focus .nv-linesWrap").datum(W(ba)?[{values:[]}]:ba.filter(function(a){return!a.disabled}).map(function(a,b){return{area:a.area,fillOpacity:a.fillOpacity,strokeWidth:a.strokeWidth,key:a.key,values:a.values.filter(function(a,b){return j.x()(a,b)>=c[0]&&j.x()(a,b)<=c[1]})}}));d=aa.length&&!R?l.xScale():j.xScale(),n.scale(d)._ticks(a.utils.calcTicksX(Y/100,v)).tickSize(-Z,0),n.domain([Math.ceil(c[0]),Math.floor(c[1])]),ga.select(".nv-x.nv-axis").transition().duration(M).call(n),b.transition().duration(M).call(l),h.transition().duration(M).call(j),ga.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+f.range()[0]+")"),p.scale(f)._ticks(a.utils.calcTicksY(Z/36,v)).tickSize(-Y,0),q.scale(g)._ticks(a.utils.calcTicksY(Z/36,v)),R?q.tickSize(ba.length?0:-Y,0):q.tickSize(aa.length?0:-Y,0);var i=aa.length?1:0,k=ba.length&&!W(ba)?1:0,m=R?k:i,o=R?i:k;ga.select(".nv-focus .nv-y1.nv-axis").style("opacity",m),ga.select(".nv-focus .nv-y2.nv-axis").style("opacity",o).attr("transform","translate("+d.range()[1]+",0)"),ga.select(".nv-focus .nv-y1.nv-axis").transition().duration(M).call(p),ga.select(".nv-focus .nv-y2.nv-axis").transition().duration(M).call(q)}var X=d3.select(this);a.utils.initSVG(X);var Y=a.utils.availableWidth(z,X,w),Z=a.utils.availableHeight(A,X,w)-(F?I:0),$=I-y.top-y.bottom;if(b.update=function(){X.transition().duration(M).call(b)},b.container=this,N.setter(V(v),b.update).getter(U(v)).update(),N.disabled=v.map(function(a){return!!a.disabled}),!O){var _;O={};for(_ in N)N[_]instanceof Array?O[_]=N[_].slice(0):O[_]=N[_]}if(!(v&&v.length&&v.filter(function(a){return a.values.length}).length))return a.utils.noData(b,X),b;X.selectAll(".nv-noData").remove();var aa=v.filter(function(a){return!a.disabled&&a.bar}),ba=v.filter(function(a){return!a.bar});d=aa.length&&!R?l.xScale():j.xScale(),e=o.scale(),f=R?j.yScale():l.yScale(),g=R?l.yScale():j.yScale(),h=R?k.yScale():m.yScale(),i=R?m.yScale():k.yScale();var ca=v.filter(function(a){return!a.disabled&&(R?!a.bar:a.bar)}).map(function(a){return a.values.map(function(a,b){return{x:B(a,b),y:C(a,b)}})}),da=v.filter(function(a){return!a.disabled&&(R?a.bar:!a.bar)}).map(function(a){return a.values.map(function(a,b){return{x:B(a,b),y:C(a,b)}})});d.range([0,Y]),e.domain(d3.extent(d3.merge(ca.concat(da)),function(a){return a.x})).range([0,Y]);var ea=X.selectAll("g.nv-wrap.nv-linePlusBar").data([v]),fa=ea.enter().append("g").attr("class","nvd3 nv-wrap nv-linePlusBar").append("g"),ga=ea.select("g");fa.append("g").attr("class","nv-legendWrap");var ha=fa.append("g").attr("class","nv-focus");ha.append("g").attr("class","nv-x nv-axis"),ha.append("g").attr("class","nv-y1 nv-axis"),ha.append("g").attr("class","nv-y2 nv-axis"),ha.append("g").attr("class","nv-barsWrap"),ha.append("g").attr("class","nv-linesWrap");var ia=fa.append("g").attr("class","nv-context");if(ia.append("g").attr("class","nv-x nv-axis"),ia.append("g").attr("class","nv-y1 nv-axis"),ia.append("g").attr("class","nv-y2 nv-axis"),ia.append("g").attr("class","nv-barsWrap"),ia.append("g").attr("class","nv-linesWrap"),ia.append("g").attr("class","nv-brushBackground"),ia.append("g").attr("class","nv-x nv-brush"),E){var ja=t.align()?Y/2:Y,ka=t.align()?ja:0;t.width(ja),ga.select(".nv-legendWrap").datum(v.map(function(a){return a.originalKey=void 0===a.originalKey?a.key:a.originalKey,R?a.key=a.originalKey+(a.bar?Q:P):a.key=a.originalKey+(a.bar?P:Q),a})).call(t),x||t.height()===w.top||(w.top=t.height(),Z=a.utils.availableHeight(A,X,w)-I),ga.select(".nv-legendWrap").attr("transform","translate("+ka+","+-w.top+")")}else ga.select(".nv-legendWrap").selectAll("*").remove();ea.attr("transform","translate("+w.left+","+w.top+")"),ga.select(".nv-context").style("display",F?"initial":"none"),m.width(Y).height($).color(v.map(function(a,b){return a.color||D(a,b)}).filter(function(a,b){return!v[b].disabled&&v[b].bar})),k.width(Y).height($).color(v.map(function(a,b){return a.color||D(a,b)}).filter(function(a,b){return!v[b].disabled&&!v[b].bar}));var la=ga.select(".nv-context .nv-barsWrap").datum(aa.length?aa:[{values:[]}]),ma=ga.select(".nv-context .nv-linesWrap").datum(W(ba)?[{values:[]}]:ba.filter(function(a){return!a.disabled}));ga.select(".nv-context").attr("transform","translate(0,"+(Z+w.bottom+y.top)+")"),la.transition().call(m),ma.transition().call(k),H&&(o._ticks(a.utils.calcTicksX(Y/100,v)).tickSize(-$,0),ga.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+h.range()[0]+")"),ga.select(".nv-context .nv-x.nv-axis").transition().call(o)),G&&(r.scale(h)._ticks($/36).tickSize(-Y,0),s.scale(i)._ticks($/36).tickSize(aa.length?0:-Y,0),ga.select(".nv-context .nv-y3.nv-axis").style("opacity",aa.length?1:0).attr("transform","translate(0,"+e.range()[0]+")"),ga.select(".nv-context .nv-y2.nv-axis").style("opacity",ba.length?1:0).attr("transform","translate("+e.range()[1]+",0)"),ga.select(".nv-context .nv-y1.nv-axis").transition().call(r),ga.select(".nv-context .nv-y2.nv-axis").transition().call(s)),u.x(e).on("brush",T),J&&u.extent(J);var na=ga.select(".nv-brushBackground").selectAll("g").data([J||u.extent()]),oa=na.enter().append("g");oa.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",$),oa.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",$);var pa=ga.select(".nv-x.nv-brush").call(u);pa.selectAll("rect").attr("height",$),pa.selectAll(".resize").append("path").attr("d",K),t.dispatch.on("stateChange",function(a){for(var c in a)N[c]=a[c];L.stateChange(N),b.update()}),L.on("changeState",function(a){"undefined"!=typeof a.disabled&&(v.forEach(function(b,c){b.disabled=a.disabled[c]}),N.disabled=a.disabled),b.update()}),T()}),b}var c,d,e,f,g,h,i,j=a.models.line(),k=a.models.line(),l=a.models.historicalBar(),m=a.models.historicalBar(),n=a.models.axis(),o=a.models.axis(),p=a.models.axis(),q=a.models.axis(),r=a.models.axis(),s=a.models.axis(),t=a.models.legend(),u=d3.svg.brush(),v=a.models.tooltip(),w={top:30,right:30,bottom:30,left:60},x=null,y={top:0,right:30,bottom:20,left:60},z=null,A=null,B=function(a){return a.x},C=function(a){return a.y},D=a.utils.defaultColor(),E=!0,F=!0,G=!1,H=!0,I=50,J=null,K=null,L=d3.dispatch("brush","stateChange","changeState"),M=0,N=a.utils.state(),O=null,P=" (left axis)",Q=" (right axis)",R=!1;j.clipEdge(!0),k.interactive(!1),k.pointActive(function(a){return!1}),n.orient("bottom").tickPadding(5),p.orient("left"),q.orient("right"),o.orient("bottom").tickPadding(5),r.orient("left"),s.orient("right"),v.headerEnabled(!0).headerFormatter(function(a,b){return n.tickFormat()(a,b)});var S=function(){return R?{main:q,focus:s}:{main:p,focus:r}},T=function(){return R?{main:p,focus:r}:{main:q,focus:s}},U=function(a){return function(){return{active:a.map(function(a){return!a.disabled})}}},V=function(a){return function(b){void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}},W=function(a){return a.every(function(a){return a.disabled})};return j.dispatch.on("elementMouseover.tooltip",function(a){v.duration(100).valueFormatter(function(a,b){return T().main.tickFormat()(a,b)}).data(a).hidden(!1)}),j.dispatch.on("elementMouseout.tooltip",function(a){v.hidden(!0)}),l.dispatch.on("elementMouseover.tooltip",function(a){a.value=b.x()(a.data),a.series={value:b.y()(a.data),color:a.color},v.duration(0).valueFormatter(function(a,b){return S().main.tickFormat()(a,b)}).data(a).hidden(!1)}),l.dispatch.on("elementMouseout.tooltip",function(a){v.hidden(!0)}),l.dispatch.on("elementMousemove.tooltip",function(a){v()}),b.dispatch=L,b.legend=t,b.lines=j,b.lines2=k,b.bars=l,b.bars2=m,b.xAxis=n,b.x2Axis=o,b.y1Axis=p,b.y2Axis=q,b.y3Axis=r,b.y4Axis=s,b.tooltip=v,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return z},set:function(a){z=a}},height:{get:function(){return A},set:function(a){A=a}},showLegend:{get:function(){return E},set:function(a){E=a}},brushExtent:{get:function(){return J},set:function(a){J=a}},noData:{get:function(){return K},set:function(a){K=a}},focusEnable:{get:function(){return F},set:function(a){F=a}},focusHeight:{get:function(){return I},set:function(a){I=a}},focusShowAxisX:{get:function(){return H},set:function(a){H=a}},focusShowAxisY:{get:function(){return G},set:function(a){G=a}},legendLeftAxisHint:{get:function(){return P},set:function(a){P=a}},legendRightAxisHint:{get:function(){return Q},set:function(a){Q=a}},margin:{get:function(){return w},set:function(a){void 0!==a.top&&(w.top=a.top,x=a.top),w.right=void 0!==a.right?a.right:w.right,w.bottom=void 0!==a.bottom?a.bottom:w.bottom,w.left=void 0!==a.left?a.left:w.left}},focusMargin:{get:function(){return y},set:function(a){y.top=void 0!==a.top?a.top:y.top,y.right=void 0!==a.right?a.right:y.right,y.bottom=void 0!==a.bottom?a.bottom:y.bottom,y.left=void 0!==a.left?a.left:y.left}},duration:{get:function(){return M},set:function(a){M=a}},color:{get:function(){return D},set:function(b){D=a.utils.getColor(b),t.color(D)}},x:{get:function(){return B},set:function(a){B=a,j.x(a),k.x(a),l.x(a),m.x(a)}},y:{get:function(){return C},set:function(a){C=a,j.y(a),k.y(a),l.y(a),m.y(a)}},switchYAxisOrder:{get:function(){return R},set:function(a){if(R!==a){var b=p;p=q,q=b;var c=r;r=s,s=c}R=a,p.orient("left"),q.orient("right"),r.orient("left"),s.orient("right")}}}),a.utils.inheritOptions(b,j),a.utils.initOptions(b),b},a.models.multiBar=function(){"use strict";function b(F){return D.reset(),F.each(function(b){var F=k-j.left-j.right,G=l-j.top-j.bottom;p=d3.select(this),a.utils.initSVG(p);var H=0;if(x&&b.length&&(x=[{values:b[0].values.map(function(a){return{x:a.x,y:0,series:a.series,size:.01}})}]),u){var I=d3.layout.stack().offset(v).values(function(a){return a.values}).y(r)(!b.length&&x?x:b);I.forEach(function(a,c){a.nonStackable?(b[c].nonStackableSeries=H++,I[c]=b[c]):c>0&&I[c-1].nonStackable&&I[c].values.map(function(a,b){a.y0-=I[c-1].values[b].y,a.y1=a.y0+a.y})}),b=I}b.forEach(function(a,b){a.values.forEach(function(c){c.series=b,c.key=a.key})}),u&&b.length>0&&b[0].values.map(function(a,c){var d=0,e=0;b.map(function(a,f){if(!b[f].nonStackable){var g=a.values[c];g.size=Math.abs(g.y),g.y<0?(g.y1=e,e-=g.size):(g.y1=g.size+d,d+=g.size)}})});var J=d&&e?[]:b.map(function(a,b){return a.values.map(function(a,c){return{x:q(a,c),y:r(a,c),y0:a.y0,y1:a.y1,idx:b}})});m.domain(d||d3.merge(J).map(function(a){return a.x})).rangeBands(f||[0,F],A),n.domain(e||d3.extent(d3.merge(J).map(function(a){var c=a.y;return u&&!b[a.idx].nonStackable&&(c=a.y>0?a.y1:a.y1+a.y),c}).concat(s))).range(g||[G,0]),m.domain()[0]===m.domain()[1]&&(m.domain()[0]?m.domain([m.domain()[0]-.01*m.domain()[0],m.domain()[1]+.01*m.domain()[1]]):m.domain([-1,1])),n.domain()[0]===n.domain()[1]&&(n.domain()[0]?n.domain([n.domain()[0]+.01*n.domain()[0],n.domain()[1]-.01*n.domain()[1]]):n.domain([-1,1])),h=h||m,i=i||n;var K=p.selectAll("g.nv-wrap.nv-multibar").data([b]),L=K.enter().append("g").attr("class","nvd3 nv-wrap nv-multibar"),M=L.append("defs"),N=L.append("g"),O=K.select("g");N.append("g").attr("class","nv-groups"),K.attr("transform","translate("+j.left+","+j.top+")"),M.append("clipPath").attr("id","nv-edge-clip-"+o).append("rect"),K.select("#nv-edge-clip-"+o+" rect").attr("width",F).attr("height",G),O.attr("clip-path",t?"url(#nv-edge-clip-"+o+")":"");var P=K.select(".nv-groups").selectAll(".nv-group").data(function(a){return a},function(a,b){return b});P.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6);var Q=D.transition(P.exit().selectAll("rect.nv-bar"),"multibarExit",Math.min(100,z)).attr("y",function(a,c,d){var e=i(0)||0;return u&&b[a.series]&&!b[a.series].nonStackable&&(e=i(a.y0)),e}).attr("height",0).remove();Q.delay&&Q.delay(function(a,b){var c=b*(z/(E+1))-b;return c}),P.attr("class",function(a,b){return"nv-group nv-series-"+b}).classed("hover",function(a){return a.hover}).style("fill",function(a,b){return w(a,b)}).style("stroke",function(a,b){
-return w(a,b)}),P.style("stroke-opacity",1).style("fill-opacity",B);var R=P.selectAll("rect.nv-bar").data(function(a){return x&&!b.length?x.values:a.values});R.exit().remove();R.enter().append("rect").attr("class",function(a,b){return r(a,b)<0?"nv-bar negative":"nv-bar positive"}).attr("x",function(a,c,d){return u&&!b[d].nonStackable?0:d*m.rangeBand()/b.length}).attr("y",function(a,c,d){return i(u&&!b[d].nonStackable?a.y0:0)||0}).attr("height",0).attr("width",function(a,c,d){return m.rangeBand()/(u&&!b[d].nonStackable?1:b.length)}).attr("transform",function(a,b){return"translate("+m(q(a,b))+",0)"});R.style("fill",function(a,b,c){return w(a,c,b)}).style("stroke",function(a,b,c){return w(a,c,b)}).on("mouseover",function(a,b){d3.select(this).classed("hover",!0),C.elementMouseover({data:a,index:b,color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1),C.elementMouseout({data:a,index:b,color:d3.select(this).style("fill")})}).on("mousemove",function(a,b){C.elementMousemove({data:a,index:b,color:d3.select(this).style("fill")})}).on("click",function(a,b){var c=this;C.elementClick({data:a,index:b,color:d3.select(this).style("fill"),event:d3.event,element:c}),d3.event.stopPropagation()}).on("dblclick",function(a,b){C.elementDblClick({data:a,index:b,color:d3.select(this).style("fill")}),d3.event.stopPropagation()}),R.attr("class",function(a,b){return r(a,b)<0?"nv-bar negative":"nv-bar positive"}).attr("transform",function(a,b){return"translate("+m(q(a,b))+",0)"}),y&&(c||(c=b.map(function(){return!0})),R.style("fill",function(a,b,d){return d3.rgb(y(a,b)).darker(c.map(function(a,b){return b}).filter(function(a,b){return!c[b]})[d]).toString()}).style("stroke",function(a,b,d){return d3.rgb(y(a,b)).darker(c.map(function(a,b){return b}).filter(function(a,b){return!c[b]})[d]).toString()}));var S=R.watchTransition(D,"multibar",Math.min(250,z)).delay(function(a,c){return c*z/b[0].values.length});u?S.attr("y",function(a,c,d){var e=0;return e=b[d].nonStackable?r(a,c)<0?n(0):n(0)-n(r(a,c))<-1?n(0)-1:n(r(a,c))||0:n(a.y1)}).attr("height",function(a,c,d){return b[d].nonStackable?Math.max(Math.abs(n(r(a,c))-n(0)),0)||0:Math.max(Math.abs(n(a.y+a.y0)-n(a.y0)),0)}).attr("x",function(a,c,d){var e=0;return b[d].nonStackable&&(e=a.series*m.rangeBand()/b.length,b.length!==H&&(e=b[d].nonStackableSeries*m.rangeBand()/(2*H))),e}).attr("width",function(a,c,d){if(b[d].nonStackable){var e=m.rangeBand()/H;return b.length!==H&&(e=m.rangeBand()/(2*H)),e}return m.rangeBand()}):S.attr("x",function(a,c){return a.series*m.rangeBand()/b.length}).attr("width",m.rangeBand()/b.length).attr("y",function(a,b){return r(a,b)<0?n(0):n(0)-n(r(a,b))<1?n(0)-1:n(r(a,b))||0}).attr("height",function(a,b){return Math.max(Math.abs(n(r(a,b))-n(0)),1)||0}),h=m.copy(),i=n.copy(),b[0]&&b[0].values&&(E=b[0].values.length)}),D.renderEnd("multibar immediate"),b}var c,d,e,f,g,h,i,j={top:0,right:0,bottom:0,left:0},k=960,l=500,m=d3.scale.ordinal(),n=d3.scale.linear(),o=Math.floor(1e4*Math.random()),p=null,q=function(a){return a.x},r=function(a){return a.y},s=[0],t=!0,u=!1,v="zero",w=a.utils.defaultColor(),x=!1,y=null,z=500,A=.1,B=.75,C=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove","renderEnd"),D=a.utils.renderWatch(C,z),E=0;return b.dispatch=C,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return k},set:function(a){k=a}},height:{get:function(){return l},set:function(a){l=a}},x:{get:function(){return q},set:function(a){q=a}},y:{get:function(){return r},set:function(a){r=a}},xScale:{get:function(){return m},set:function(a){m=a}},yScale:{get:function(){return n},set:function(a){n=a}},xDomain:{get:function(){return d},set:function(a){d=a}},yDomain:{get:function(){return e},set:function(a){e=a}},xRange:{get:function(){return f},set:function(a){f=a}},yRange:{get:function(){return g},set:function(a){g=a}},forceY:{get:function(){return s},set:function(a){s=a}},stacked:{get:function(){return u},set:function(a){u=a}},stackOffset:{get:function(){return v},set:function(a){v=a}},clipEdge:{get:function(){return t},set:function(a){t=a}},disabled:{get:function(){return c},set:function(a){c=a}},id:{get:function(){return o},set:function(a){o=a}},hideable:{get:function(){return x},set:function(a){x=a}},groupSpacing:{get:function(){return A},set:function(a){A=a}},fillOpacity:{get:function(){return B},set:function(a){B=a}},margin:{get:function(){return j},set:function(a){j.top=void 0!==a.top?a.top:j.top,j.right=void 0!==a.right?a.right:j.right,j.bottom=void 0!==a.bottom?a.bottom:j.bottom,j.left=void 0!==a.left?a.left:j.left}},duration:{get:function(){return z},set:function(a){z=a,D.reset(z)}},color:{get:function(){return w},set:function(b){w=a.utils.getColor(b)}},barColor:{get:function(){return y},set:function(b){y=b?a.utils.getColor(b):null}}}),a.utils.initOptions(b),b},a.models.multiBarChart=function(){"use strict";function b(C){return H.reset(),H.models(e),t&&H.models(f),u&&H.models(g),C.each(function(C){var H=d3.select(this);a.utils.initSVG(H);var L=a.utils.availableWidth(n,H,l),M=a.utils.availableHeight(o,H,l);if(b.update=function(){0===F?H.call(b):H.transition().duration(F).call(b)},b.container=this,A.setter(K(C),b.update).getter(J(C)).update(),A.disabled=C.map(function(a){return!!a.disabled}),!B){var N;B={};for(N in A)A[N]instanceof Array?B[N]=A[N].slice(0):B[N]=A[N]}if(!(C&&C.length&&C.filter(function(a){return a.values.length}).length))return a.utils.noData(b,H),b;H.selectAll(".nv-noData").remove(),c=e.xScale(),d=e.yScale();var O=H.selectAll("g.nv-wrap.nv-multiBarWithLegend").data([C]),P=O.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarWithLegend").append("g"),Q=O.select("g");if(P.append("g").attr("class","nv-x nv-axis"),P.append("g").attr("class","nv-y nv-axis"),P.append("g").attr("class","nv-barsWrap"),P.append("g").attr("class","nv-legendWrap"),P.append("g").attr("class","nv-controlsWrap"),P.append("g").attr("class","nv-interactive"),s?(i.width(L-E()),Q.select(".nv-legendWrap").datum(C).call(i),m||i.height()===l.top||(l.top=i.height(),M=a.utils.availableHeight(o,H,l)),Q.select(".nv-legendWrap").attr("transform","translate("+E()+","+-l.top+")")):Q.select(".nv-legendWrap").selectAll("*").remove(),q){var R=[{key:r.grouped||"Grouped",disabled:e.stacked()},{key:r.stacked||"Stacked",disabled:!e.stacked()}];j.width(E()).color(["#444","#444","#444"]),Q.select(".nv-controlsWrap").datum(R).attr("transform","translate(0,"+-l.top+")").call(j)}else Q.select(".nv-controlsWrap").selectAll("*").remove();O.attr("transform","translate("+l.left+","+l.top+")"),v&&Q.select(".nv-y.nv-axis").attr("transform","translate("+L+",0)"),e.disabled(C.map(function(a){return a.disabled})).width(L).height(M).color(C.map(function(a,b){return a.color||p(a,b)}).filter(function(a,b){return!C[b].disabled}));var S=Q.select(".nv-barsWrap").datum(C.filter(function(a){return!a.disabled}));if(S.call(e),t){f.scale(c)._ticks(a.utils.calcTicksX(L/100,C)).tickSize(-M,0),Q.select(".nv-x.nv-axis").attr("transform","translate(0,"+d.range()[0]+")"),Q.select(".nv-x.nv-axis").call(f);var T=Q.select(".nv-x.nv-axis > g").selectAll("g");if(T.selectAll("line, text").style("opacity",1),x){var U=function(a,b){return"translate("+a+","+b+")"},V=5,W=17;T.selectAll("text").attr("transform",function(a,b,c){return U(0,c%2==0?V:W)});var X=d3.selectAll(".nv-x.nv-axis .nv-wrap g g text")[0].length;Q.selectAll(".nv-x.nv-axis .nv-axisMaxMin text").attr("transform",function(a,b){return U(0,0===b||X%2!==0?W:V)})}y&&Q.selectAll(".tick text").call(a.utils.wrapTicks,b.xAxis.rangeBand()),w&&T.filter(function(a,b){return b%Math.ceil(C[0].values.length/(L/100))!==0}).selectAll("text, line").style("opacity",0),z&&T.selectAll(".tick text").attr("transform","rotate("+z+" 0,0)").style("text-anchor",z>0?"start":"end"),Q.select(".nv-x.nv-axis").selectAll("g.nv-axisMaxMin text").style("opacity",1)}u&&(g.scale(d)._ticks(a.utils.calcTicksY(M/36,C)).tickSize(-L,0),Q.select(".nv-y.nv-axis").call(g)),G&&(h.width(L).height(M).margin({left:l.left,top:l.top}).svgContainer(H).xScale(c),O.select(".nv-interactive").call(h)),i.dispatch.on("stateChange",function(a){for(var c in a)A[c]=a[c];D.stateChange(A),b.update()}),j.dispatch.on("legendClick",function(a,c){if(a.disabled){switch(R=R.map(function(a){return a.disabled=!0,a}),a.disabled=!1,a.key){case"Grouped":case r.grouped:e.stacked(!1);break;case"Stacked":case r.stacked:e.stacked(!0)}A.stacked=e.stacked(),D.stateChange(A),b.update()}}),D.on("changeState",function(a){"undefined"!=typeof a.disabled&&(C.forEach(function(b,c){b.disabled=a.disabled[c]}),A.disabled=a.disabled),"undefined"!=typeof a.stacked&&(e.stacked(a.stacked),A.stacked=a.stacked,I=a.stacked),b.update()}),G?(h.dispatch.on("elementMousemove",function(a){if(void 0!=a.pointXValue){var d,e,f,g,i=[];C.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(h,j){e=c.domain().indexOf(a.pointXValue);var k=h.values[e];void 0!==k&&(g=k.x,void 0===d&&(d=k),void 0===f&&(f=a.mouseX),i.push({key:h.key,value:b.y()(k,e),color:p(h,h.seriesIndex),data:h.values[e]}))}),h.tooltip.data({value:g,index:e,series:i})(),h.renderGuideLine(f)}}),h.dispatch.on("elementMouseout",function(a){h.tooltip.hidden(!0)})):(e.dispatch.on("elementMouseover.tooltip",function(a){a.value=b.x()(a.data),a.series={key:a.data.key,value:b.y()(a.data),color:a.color},k.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){k.hidden(!0)}),e.dispatch.on("elementMousemove.tooltip",function(a){k()}))}),H.renderEnd("multibarchart immediate"),b}var c,d,e=a.models.multiBar(),f=a.models.axis(),g=a.models.axis(),h=a.interactiveGuideline(),i=a.models.legend(),j=a.models.legend(),k=a.models.tooltip(),l={top:30,right:20,bottom:50,left:60},m=null,n=null,o=null,p=a.utils.defaultColor(),q=!0,r={},s=!0,t=!0,u=!0,v=!1,w=!0,x=!1,y=!1,z=0,A=a.utils.state(),B=null,C=null,D=d3.dispatch("stateChange","changeState","renderEnd"),E=function(){return q?180:0},F=250,G=!1;A.stacked=!1,e.stacked(!1),f.orient("bottom").tickPadding(7).showMaxMin(!1).tickFormat(function(a){return a}),g.orient(v?"right":"left").tickFormat(d3.format(",.1f")),k.duration(0).valueFormatter(function(a,b){return g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),h.tooltip.valueFormatter(function(a,b){return null==a?"N/A":g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),h.tooltip.valueFormatter(function(a,b){return null==a?"N/A":g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),h.tooltip.duration(0).valueFormatter(function(a,b){return g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),j.updateState(!1);var H=a.utils.renderWatch(D),I=!1,J=function(a){return function(){return{active:a.map(function(a){return!a.disabled}),stacked:I}}},K=function(a){return function(b){void 0!==b.stacked&&(I=b.stacked),void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};return b.dispatch=D,b.multibar=e,b.legend=i,b.controls=j,b.xAxis=f,b.yAxis=g,b.state=A,b.tooltip=k,b.interactiveLayer=h,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return n},set:function(a){n=a}},height:{get:function(){return o},set:function(a){o=a}},showLegend:{get:function(){return s},set:function(a){s=a}},showControls:{get:function(){return q},set:function(a){q=a}},controlLabels:{get:function(){return r},set:function(a){r=a}},showXAxis:{get:function(){return t},set:function(a){t=a}},showYAxis:{get:function(){return u},set:function(a){u=a}},defaultState:{get:function(){return B},set:function(a){B=a}},noData:{get:function(){return C},set:function(a){C=a}},reduceXTicks:{get:function(){return w},set:function(a){w=a}},rotateLabels:{get:function(){return z},set:function(a){z=a}},staggerLabels:{get:function(){return x},set:function(a){x=a}},wrapLabels:{get:function(){return y},set:function(a){y=!!a}},margin:{get:function(){return l},set:function(a){void 0!==a.top&&(l.top=a.top,m=a.top),l.right=void 0!==a.right?a.right:l.right,l.bottom=void 0!==a.bottom?a.bottom:l.bottom,l.left=void 0!==a.left?a.left:l.left}},duration:{get:function(){return F},set:function(a){F=a,e.duration(F),f.duration(F),g.duration(F),H.reset(F)}},color:{get:function(){return p},set:function(b){p=a.utils.getColor(b),i.color(p)}},rightAlignYAxis:{get:function(){return v},set:function(a){v=a,g.orient(v?"right":"left")}},useInteractiveGuideline:{get:function(){return G},set:function(a){G=a}},barColor:{get:function(){return e.barColor},set:function(a){e.barColor(a),i.color(function(a,b){return d3.rgb("#ccc").darker(1.5*b).toString()})}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.multiBarHorizontal=function(){"use strict";function b(m){return F.reset(),m.each(function(b){var m=k-j.left-j.right,D=l-j.top-j.bottom;n=d3.select(this),a.utils.initSVG(n),w&&(b=d3.layout.stack().offset("zero").values(function(a){return a.values}).y(r)(b)),b.forEach(function(a,b){a.values.forEach(function(c){c.series=b,c.key=a.key})}),w&&b[0].values.map(function(a,c){var d=0,e=0;b.map(function(a){var b=a.values[c];b.size=Math.abs(b.y),b.y<0?(b.y1=e-b.size,e-=b.size):(b.y1=d,d+=b.size)})});var G=d&&e?[]:b.map(function(a){return a.values.map(function(a,b){return{x:q(a,b),y:r(a,b),y0:a.y0,y1:a.y1}})});o.domain(d||d3.merge(G).map(function(a){return a.x})).rangeBands(f||[0,D],A),p.domain(e||d3.extent(d3.merge(G).map(function(a){return w?a.y>0?a.y1+a.y:a.y1:a.y}).concat(t))),x&&!w?p.range(g||[p.domain()[0]<0?z:0,m-(p.domain()[1]>0?z:0)]):p.range(g||[0,m]),h=h||o,i=i||d3.scale.linear().domain(p.domain()).range([p(0),p(0)]);var H=d3.select(this).selectAll("g.nv-wrap.nv-multibarHorizontal").data([b]),I=H.enter().append("g").attr("class","nvd3 nv-wrap nv-multibarHorizontal"),J=(I.append("defs"),I.append("g"));H.select("g");J.append("g").attr("class","nv-groups"),H.attr("transform","translate("+j.left+","+j.top+")");var K=H.select(".nv-groups").selectAll(".nv-group").data(function(a){return a},function(a,b){return b});K.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),K.exit().watchTransition(F,"multibarhorizontal: exit groups").style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),K.attr("class",function(a,b){return"nv-group nv-series-"+b}).classed("hover",function(a){return a.hover}).style("fill",function(a,b){return u(a,b)}).style("stroke",function(a,b){return u(a,b)}),K.watchTransition(F,"multibarhorizontal: groups").style("stroke-opacity",1).style("fill-opacity",B);var L=K.selectAll("g.nv-bar").data(function(a){return a.values});L.exit().remove();var M=L.enter().append("g").attr("transform",function(a,c,d){return"translate("+i(w?a.y0:0)+","+(w?0:d*o.rangeBand()/b.length+o(q(a,c)))+")"});M.append("rect").attr("width",0).attr("height",o.rangeBand()/(w?1:b.length)),L.on("mouseover",function(a,b){d3.select(this).classed("hover",!0),E.elementMouseover({data:a,index:b,color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1),E.elementMouseout({data:a,index:b,color:d3.select(this).style("fill")})}).on("mouseout",function(a,b){E.elementMouseout({data:a,index:b,color:d3.select(this).style("fill")})}).on("mousemove",function(a,b){E.elementMousemove({data:a,index:b,color:d3.select(this).style("fill")})}).on("click",function(a,b){var c=this;E.elementClick({data:a,index:b,color:d3.select(this).style("fill"),event:d3.event,element:c}),d3.event.stopPropagation()}).on("dblclick",function(a,b){E.elementDblClick({data:a,index:b,color:d3.select(this).style("fill")}),d3.event.stopPropagation()}),s(b[0],0)&&(M.append("polyline"),L.select("polyline").attr("fill","none").attr("points",function(a,c){var d=s(a,c),e=.8*o.rangeBand()/(2*(w?1:b.length));d=d.length?d:[-Math.abs(d),Math.abs(d)],d=d.map(function(a){return p(a)-p(0)});var f=[[d[0],-e],[d[0],e],[d[0],0],[d[1],0],[d[1],-e],[d[1],e]];return f.map(function(a){return a.join(",")}).join(" ")}).attr("transform",function(a,c){var d=o.rangeBand()/(2*(w?1:b.length));return"translate("+(r(a,c)<0?0:p(r(a,c))-p(0))+", "+d+")"})),M.append("text"),x&&!w?(L.select("text").attr("text-anchor",function(a,b){return r(a,b)<0?"end":"start"}).attr("y",o.rangeBand()/(2*b.length)).attr("dy",".32em").text(function(a,b){var c=C(r(a,b)),d=s(a,b);return void 0===d?c:d.length?c+"+"+C(Math.abs(d[1]))+"-"+C(Math.abs(d[0])):c+"±"+C(Math.abs(d))}),L.watchTransition(F,"multibarhorizontal: bars").select("text").attr("x",function(a,b){return r(a,b)<0?-4:p(r(a,b))-p(0)+4})):L.selectAll("text").text(""),y&&!w?(M.append("text").classed("nv-bar-label",!0),L.select("text.nv-bar-label").attr("text-anchor",function(a,b){return r(a,b)<0?"start":"end"}).attr("y",o.rangeBand()/(2*b.length)).attr("dy",".32em").text(function(a,b){return q(a,b)}),L.watchTransition(F,"multibarhorizontal: bars").select("text.nv-bar-label").attr("x",function(a,b){return r(a,b)<0?p(0)-p(r(a,b))+4:-4})):L.selectAll("text.nv-bar-label").text(""),L.attr("class",function(a,b){return r(a,b)<0?"nv-bar negative":"nv-bar positive"}),v&&(c||(c=b.map(function(){return!0})),L.style("fill",function(a,b,d){return d3.rgb(v(a,b)).darker(c.map(function(a,b){return b}).filter(function(a,b){return!c[b]})[d]).toString()}).style("stroke",function(a,b,d){return d3.rgb(v(a,b)).darker(c.map(function(a,b){return b}).filter(function(a,b){return!c[b]})[d]).toString()})),w?L.watchTransition(F,"multibarhorizontal: bars").attr("transform",function(a,b){return"translate("+p(a.y1)+","+o(q(a,b))+")"}).select("rect").attr("width",function(a,b){return Math.abs(p(r(a,b)+a.y0)-p(a.y0))||0}).attr("height",o.rangeBand()):L.watchTransition(F,"multibarhorizontal: bars").attr("transform",function(a,c){return"translate("+p(r(a,c)<0?r(a,c):0)+","+(a.series*o.rangeBand()/b.length+o(q(a,c)))+")"}).select("rect").attr("height",o.rangeBand()/b.length).attr("width",function(a,b){return Math.max(Math.abs(p(r(a,b))-p(0)),1)||0}),h=o.copy(),i=p.copy()}),F.renderEnd("multibarHorizontal immediate"),b}var c,d,e,f,g,h,i,j={top:0,right:0,bottom:0,left:0},k=960,l=500,m=Math.floor(1e4*Math.random()),n=null,o=d3.scale.ordinal(),p=d3.scale.linear(),q=function(a){return a.x},r=function(a){return a.y},s=function(a){return a.yErr},t=[0],u=a.utils.defaultColor(),v=null,w=!1,x=!1,y=!1,z=60,A=.1,B=.75,C=d3.format(",.2f"),D=250,E=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove","renderEnd"),F=a.utils.renderWatch(E,D);return b.dispatch=E,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return k},set:function(a){k=a}},height:{get:function(){return l},set:function(a){l=a}},x:{get:function(){return q},set:function(a){q=a}},y:{get:function(){return r},set:function(a){r=a}},yErr:{get:function(){return s},set:function(a){s=a}},xScale:{get:function(){return o},set:function(a){o=a}},yScale:{get:function(){return p},set:function(a){p=a}},xDomain:{get:function(){return d},set:function(a){d=a}},yDomain:{get:function(){return e},set:function(a){e=a}},xRange:{get:function(){return f},set:function(a){f=a}},yRange:{get:function(){return g},set:function(a){g=a}},forceY:{get:function(){return t},set:function(a){t=a}},stacked:{get:function(){return w},set:function(a){w=a}},showValues:{get:function(){return x},set:function(a){x=a}},disabled:{get:function(){return c},set:function(a){c=a}},id:{get:function(){return m},set:function(a){m=a}},valueFormat:{get:function(){return C},set:function(a){C=a}},valuePadding:{get:function(){return z},set:function(a){z=a}},groupSpacing:{get:function(){return A},set:function(a){A=a}},fillOpacity:{get:function(){return B},set:function(a){B=a}},margin:{get:function(){return j},set:function(a){j.top=void 0!==a.top?a.top:j.top,j.right=void 0!==a.right?a.right:j.right,j.bottom=void 0!==a.bottom?a.bottom:j.bottom,j.left=void 0!==a.left?a.left:j.left}},duration:{get:function(){return D},set:function(a){D=a,F.reset(D)}},color:{get:function(){return u},set:function(b){u=a.utils.getColor(b)}},barColor:{get:function(){return v},set:function(b){v=b?a.utils.getColor(b):null}}}),a.utils.initOptions(b),b},a.models.multiBarHorizontalChart=function(){"use strict";function b(j){return D.reset(),D.models(e),s&&D.models(f),t&&D.models(g),j.each(function(j){var x=d3.select(this);a.utils.initSVG(x);var D=a.utils.availableWidth(m,x,k),E=a.utils.availableHeight(n,x,k);if(b.update=function(){x.transition().duration(A).call(b)},b.container=this,u=e.stacked(),v.setter(C(j),b.update).getter(B(j)).update(),v.disabled=j.map(function(a){return!!a.disabled}),!w){var F;w={};for(F in v)v[F]instanceof Array?w[F]=v[F].slice(0):w[F]=v[F]}if(!(j&&j.length&&j.filter(function(a){return a.values.length}).length))return a.utils.noData(b,x),b;x.selectAll(".nv-noData").remove(),c=e.xScale(),d=e.yScale().clamp(!0);var G=x.selectAll("g.nv-wrap.nv-multiBarHorizontalChart").data([j]),H=G.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarHorizontalChart").append("g"),I=G.select("g");if(H.append("g").attr("class","nv-x nv-axis"),H.append("g").attr("class","nv-y nv-axis").append("g").attr("class","nv-zeroLine").append("line"),H.append("g").attr("class","nv-barsWrap"),H.append("g").attr("class","nv-legendWrap"),H.append("g").attr("class","nv-controlsWrap"),r?(h.width(D-z()),I.select(".nv-legendWrap").datum(j).call(h),l||h.height()===k.top||(k.top=h.height(),E=a.utils.availableHeight(n,x,k)),I.select(".nv-legendWrap").attr("transform","translate("+z()+","+-k.top+")")):I.select(".nv-legendWrap").selectAll("*").remove(),p){var J=[{key:q.grouped||"Grouped",disabled:e.stacked()},{key:q.stacked||"Stacked",disabled:!e.stacked()}];i.width(z()).color(["#444","#444","#444"]),I.select(".nv-controlsWrap").datum(J).attr("transform","translate(0,"+-k.top+")").call(i)}else I.select(".nv-controlsWrap").selectAll("*").remove();G.attr("transform","translate("+k.left+","+k.top+")"),e.disabled(j.map(function(a){return a.disabled})).width(D).height(E).color(j.map(function(a,b){return a.color||o(a,b)}).filter(function(a,b){return!j[b].disabled}));var K=I.select(".nv-barsWrap").datum(j.filter(function(a){return!a.disabled}));if(K.transition().call(e),s){f.scale(c)._ticks(a.utils.calcTicksY(E/24,j)).tickSize(-D,0),I.select(".nv-x.nv-axis").call(f);var L=I.select(".nv-x.nv-axis").selectAll("g");L.selectAll("line, text")}t&&(g.scale(d)._ticks(a.utils.calcTicksX(D/100,j)).tickSize(-E,0),I.select(".nv-y.nv-axis").attr("transform","translate(0,"+E+")"),I.select(".nv-y.nv-axis").call(g)),I.select(".nv-zeroLine line").attr("x1",d(0)).attr("x2",d(0)).attr("y1",0).attr("y2",-E),h.dispatch.on("stateChange",function(a){for(var c in a)v[c]=a[c];y.stateChange(v),b.update()}),i.dispatch.on("legendClick",function(a,c){if(a.disabled){switch(J=J.map(function(a){return a.disabled=!0,a}),a.disabled=!1,a.key){case"Grouped":case q.grouped:e.stacked(!1);break;case"Stacked":case q.stacked:e.stacked(!0)}v.stacked=e.stacked(),y.stateChange(v),u=e.stacked(),b.update()}}),y.on("changeState",function(a){"undefined"!=typeof a.disabled&&(j.forEach(function(b,c){b.disabled=a.disabled[c]}),v.disabled=a.disabled),"undefined"!=typeof a.stacked&&(e.stacked(a.stacked),v.stacked=a.stacked,u=a.stacked),b.update()})}),D.renderEnd("multibar horizontal chart immediate"),b}var c,d,e=a.models.multiBarHorizontal(),f=a.models.axis(),g=a.models.axis(),h=a.models.legend().height(30),i=a.models.legend().height(30),j=a.models.tooltip(),k={top:30,right:20,bottom:50,left:60},l=null,m=null,n=null,o=a.utils.defaultColor(),p=!0,q={},r=!0,s=!0,t=!0,u=!1,v=a.utils.state(),w=null,x=null,y=d3.dispatch("stateChange","changeState","renderEnd"),z=function(){return p?180:0},A=250;v.stacked=!1,e.stacked(u),f.orient("left").tickPadding(5).showMaxMin(!1).tickFormat(function(a){return a}),g.orient("bottom").tickFormat(d3.format(",.1f")),j.duration(0).valueFormatter(function(a,b){return g.tickFormat()(a,b)}).headerFormatter(function(a,b){return f.tickFormat()(a,b)}),i.updateState(!1);var B=function(a){return function(){return{active:a.map(function(a){return!a.disabled}),stacked:u}}},C=function(a){return function(b){void 0!==b.stacked&&(u=b.stacked),void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}},D=a.utils.renderWatch(y,A);return e.dispatch.on("elementMouseover.tooltip",function(a){a.value=b.x()(a.data),a.series={key:a.data.key,value:b.y()(a.data),color:a.color},j.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){j.hidden(!0)}),e.dispatch.on("elementMousemove.tooltip",function(a){j()}),b.dispatch=y,b.multibar=e,b.legend=h,b.controls=i,b.xAxis=f,b.yAxis=g,b.state=v,b.tooltip=j,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return m},set:function(a){m=a}},height:{get:function(){return n},set:function(a){n=a}},showLegend:{get:function(){return r},set:function(a){r=a}},showControls:{get:function(){return p},set:function(a){p=a}},controlLabels:{get:function(){return q},set:function(a){q=a}},showXAxis:{get:function(){return s},set:function(a){s=a}},showYAxis:{get:function(){return t},set:function(a){t=a}},defaultState:{get:function(){return w},set:function(a){w=a}},noData:{get:function(){return x},set:function(a){x=a}},margin:{get:function(){return k},set:function(a){void 0!==a.top&&(k.top=a.top,l=a.top),k.right=void 0!==a.right?a.right:k.right,k.bottom=void 0!==a.bottom?a.bottom:k.bottom,k.left=void 0!==a.left?a.left:k.left}},duration:{get:function(){return A},set:function(a){A=a,D.reset(A),e.duration(A),f.duration(A),g.duration(A)}},color:{get:function(){return o},set:function(b){o=a.utils.getColor(b),h.color(o)}},barColor:{get:function(){return e.barColor},set:function(a){e.barColor(a),h.color(function(a,b){return d3.rgb("#ccc").darker(1.5*b).toString()})}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.multiChart=function(){"use strict";function b(k){return k.each(function(k){function o(a){var b=2===k[a.seriesIndex].yAxis?G:F;a.value=a.point.x,a.series={value:a.point.y,color:a.point.color,key:a.series.key},I.duration(0).headerFormatter(function(a,b){return E.tickFormat()(a,b)}).valueFormatter(function(a,c){return b.tickFormat()(a,c)}).data(a).hidden(!1)}function s(a){var b=2===k[a.seriesIndex].yAxis?G:F;a.value=a.point.x,a.series={value:a.point.y,color:a.point.color,key:a.series.key},I.duration(100).headerFormatter(function(a,b){return E.tickFormat()(a,b)}).valueFormatter(function(a,c){return b.tickFormat()(a,c)}).data(a).hidden(!1)}function J(a){var b=2===k[a.seriesIndex].yAxis?G:F;a.point.x=C.x()(a.point),a.point.y=C.y()(a.point),I.duration(0).headerFormatter(function(a,b){return E.tickFormat()(a,b)}).valueFormatter(function(a,c){return b.tickFormat()(a,c)}).data(a).hidden(!1)}function L(a){var b=2===k[a.data.series].yAxis?G:F;a.value=A.x()(a.data),a.series={value:A.y()(a.data),color:a.color,key:a.data.key},I.duration(0).headerFormatter(function(a,b){return E.tickFormat()(a,b)}).valueFormatter(function(a,c){return b.tickFormat()(a,c)}).data(a).hidden(!1)}function M(){for(var a=0,b=K.length;b>a;a++){var c=K[a];try{c.clearHighlights()}catch(d){}}}function N(a,b,c){for(var d=0,e=K.length;e>d;d++){var f=K[d];try{f.highlightPoint(a,b,c)}catch(g){}}}var O=d3.select(this);a.utils.initSVG(O),b.update=function(){O.transition().call(b)},b.container=this;var P=a.utils.availableWidth(h,O,e),Q=a.utils.availableHeight(i,O,e),R=k.filter(function(a){return"line"==a.type&&1==a.yAxis}),S=k.filter(function(a){return"line"==a.type&&2==a.yAxis}),T=k.filter(function(a){return"scatter"==a.type&&1==a.yAxis}),U=k.filter(function(a){return"scatter"==a.type&&2==a.yAxis}),V=k.filter(function(a){return"bar"==a.type&&1==a.yAxis}),W=k.filter(function(a){return"bar"==a.type&&2==a.yAxis}),X=k.filter(function(a){return"area"==a.type&&1==a.yAxis}),Y=k.filter(function(a){return"area"==a.type&&2==a.yAxis});if(!(k&&k.length&&k.filter(function(a){return a.values.length}).length))return a.utils.noData(b,O),b;O.selectAll(".nv-noData").remove();var Z=k.filter(function(a){return!a.disabled&&1==a.yAxis}).map(function(a){return a.values.map(function(a,b){return{x:l(a),y:m(a)}})}),$=k.filter(function(a){return!a.disabled&&2==a.yAxis}).map(function(a){return a.values.map(function(a,b){return{x:l(a),y:m(a)}})});t.domain(d3.extent(d3.merge(Z.concat($)),function(a){return a.x})).range([0,P]);var _=O.selectAll("g.wrap.multiChart").data([k]),aa=_.enter().append("g").attr("class","wrap nvd3 multiChart").append("g");aa.append("g").attr("class","nv-x nv-axis"),aa.append("g").attr("class","nv-y1 nv-axis"),aa.append("g").attr("class","nv-y2 nv-axis"),aa.append("g").attr("class","stack1Wrap"),aa.append("g").attr("class","stack2Wrap"),aa.append("g").attr("class","bars1Wrap"),aa.append("g").attr("class","bars2Wrap"),aa.append("g").attr("class","scatters1Wrap"),aa.append("g").attr("class","scatters2Wrap"),aa.append("g").attr("class","lines1Wrap"),aa.append("g").attr("class","lines2Wrap"),aa.append("g").attr("class","legendWrap"),aa.append("g").attr("class","nv-interactive");var ba=_.select("g"),ca=k.map(function(a,b){return k[b].color||g(a,b)});if(j){var da=H.align()?P/2:P,ea=H.align()?da:0;H.width(da),H.color(ca),ba.select(".legendWrap").datum(k.map(function(a){return a.originalKey=void 0===a.originalKey?a.key:a.originalKey,a.key=a.originalKey+(1==a.yAxis?"":r),a})).call(H),f||H.height()===e.top||(e.top=H.height(),Q=a.utils.availableHeight(i,O,e)),ba.select(".legendWrap").attr("transform","translate("+ea+","+-e.top+")")}else ba.select(".legendWrap").selectAll("*").remove();w.width(P).height(Q).interpolate(n).color(ca.filter(function(a,b){return!k[b].disabled&&1==k[b].yAxis&&"line"==k[b].type})),x.width(P).height(Q).interpolate(n).color(ca.filter(function(a,b){return!k[b].disabled&&2==k[b].yAxis&&"line"==k[b].type})),y.width(P).height(Q).color(ca.filter(function(a,b){return!k[b].disabled&&1==k[b].yAxis&&"scatter"==k[b].type})),z.width(P).height(Q).color(ca.filter(function(a,b){return!k[b].disabled&&2==k[b].yAxis&&"scatter"==k[b].type})),A.width(P).height(Q).color(ca.filter(function(a,b){return!k[b].disabled&&1==k[b].yAxis&&"bar"==k[b].type})),B.width(P).height(Q).color(ca.filter(function(a,b){return!k[b].disabled&&2==k[b].yAxis&&"bar"==k[b].type})),C.width(P).height(Q).interpolate(n).color(ca.filter(function(a,b){return!k[b].disabled&&1==k[b].yAxis&&"area"==k[b].type})),D.width(P).height(Q).interpolate(n).color(ca.filter(function(a,b){return!k[b].disabled&&2==k[b].yAxis&&"area"==k[b].type})),ba.attr("transform","translate("+e.left+","+e.top+")");var fa=ba.select(".lines1Wrap").datum(R.filter(function(a){return!a.disabled})),ga=ba.select(".scatters1Wrap").datum(T.filter(function(a){return!a.disabled})),ha=ba.select(".bars1Wrap").datum(V.filter(function(a){return!a.disabled})),ia=ba.select(".stack1Wrap").datum(X.filter(function(a){return!a.disabled})),ja=ba.select(".lines2Wrap").datum(S.filter(function(a){return!a.disabled})),ka=ba.select(".scatters2Wrap").datum(U.filter(function(a){return!a.disabled})),la=ba.select(".bars2Wrap").datum(W.filter(function(a){return!a.disabled})),ma=ba.select(".stack2Wrap").datum(Y.filter(function(a){return!a.disabled})),na=X.length?X.map(function(a){return a.values}).reduce(function(a,b){return a.map(function(a,c){return{x:a.x,y:a.y+b[c].y}})}).concat([{x:0,y:0}]):[],oa=Y.length?Y.map(function(a){return a.values}).reduce(function(a,b){return a.map(function(a,c){return{x:a.x,y:a.y+b[c].y}})}).concat([{x:0,y:0}]):[];u.domain(c||d3.extent(d3.merge(Z).concat(na),function(a){return a.y})).range([0,Q]),v.domain(d||d3.extent(d3.merge($).concat(oa),function(a){return a.y})).range([0,Q]),w.yDomain(u.domain()),y.yDomain(u.domain()),A.yDomain(u.domain()),C.yDomain(u.domain()),x.yDomain(v.domain()),z.yDomain(v.domain()),B.yDomain(v.domain()),D.yDomain(v.domain()),X.length&&d3.transition(ia).call(C),Y.length&&d3.transition(ma).call(D),V.length&&d3.transition(ha).call(A),W.length&&d3.transition(la).call(B),R.length&&d3.transition(fa).call(w),
-S.length&&d3.transition(ja).call(x),T.length&&d3.transition(ga).call(y),U.length&&d3.transition(ka).call(z),E._ticks(a.utils.calcTicksX(P/100,k)).tickSize(-Q,0),ba.select(".nv-x.nv-axis").attr("transform","translate(0,"+Q+")"),d3.transition(ba.select(".nv-x.nv-axis")).call(E),F._ticks(a.utils.calcTicksY(Q/36,k)).tickSize(-P,0),d3.transition(ba.select(".nv-y1.nv-axis")).call(F),G._ticks(a.utils.calcTicksY(Q/36,k)).tickSize(-P,0),d3.transition(ba.select(".nv-y2.nv-axis")).call(G),ba.select(".nv-y1.nv-axis").classed("nv-disabled",Z.length?!1:!0).attr("transform","translate("+t.range()[0]+",0)"),ba.select(".nv-y2.nv-axis").classed("nv-disabled",$.length?!1:!0).attr("transform","translate("+t.range()[1]+",0)"),H.dispatch.on("stateChange",function(a){b.update()}),q&&(p.width(P).height(Q).margin({left:e.left,top:e.top}).svgContainer(O).xScale(t),_.select(".nv-interactive").call(p)),q?(p.dispatch.on("elementMousemove",function(c){M();var d,e,f,h=[];k.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(i,j){var k=t.domain(),l=i.values.filter(function(a,c){return b.x()(a,c)>=k[0]&&b.x()(a,c)<=k[1]});e=a.interactiveBisect(l,c.pointXValue,b.x());var m=l[e],n=b.y()(m,e);null!==n&&N(j,e,!0),void 0!==m&&(void 0===d&&(d=m),void 0===f&&(f=t(b.x()(m,e))),h.push({key:i.key,value:n,color:g(i,i.seriesIndex),data:m,yAxis:2==i.yAxis?G:F}))});var i=function(a,b){var c=h[b].yAxis;return null==a?"N/A":c.tickFormat()(a)};p.tooltip.headerFormatter(function(a,b){return E.tickFormat()(a,b)}).valueFormatter(p.tooltip.valueFormatter()||i).data({value:b.x()(d,e),index:e,series:h})(),p.renderGuideLine(f)}),p.dispatch.on("elementMouseout",function(a){M()})):(w.dispatch.on("elementMouseover.tooltip",o),x.dispatch.on("elementMouseover.tooltip",o),w.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),x.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),y.dispatch.on("elementMouseover.tooltip",s),z.dispatch.on("elementMouseover.tooltip",s),y.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),z.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),C.dispatch.on("elementMouseover.tooltip",J),D.dispatch.on("elementMouseover.tooltip",J),C.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),D.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),A.dispatch.on("elementMouseover.tooltip",L),B.dispatch.on("elementMouseover.tooltip",L),A.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),B.dispatch.on("elementMouseout.tooltip",function(a){I.hidden(!0)}),A.dispatch.on("elementMousemove.tooltip",function(a){I()}),B.dispatch.on("elementMousemove.tooltip",function(a){I()}))}),b}var c,d,e={top:30,right:20,bottom:50,left:60},f=null,g=a.utils.defaultColor(),h=null,i=null,j=!0,k=null,l=function(a){return a.x},m=function(a){return a.y},n="linear",o=!0,p=a.interactiveGuideline(),q=!1,r=" (right axis)",s=250,t=d3.scale.linear(),u=d3.scale.linear(),v=d3.scale.linear(),w=a.models.line().yScale(u).duration(s),x=a.models.line().yScale(v).duration(s),y=a.models.scatter().yScale(u).duration(s),z=a.models.scatter().yScale(v).duration(s),A=a.models.multiBar().stacked(!1).yScale(u).duration(s),B=a.models.multiBar().stacked(!1).yScale(v).duration(s),C=a.models.stackedArea().yScale(u).duration(s),D=a.models.stackedArea().yScale(v).duration(s),E=a.models.axis().scale(t).orient("bottom").tickPadding(5).duration(s),F=a.models.axis().scale(u).orient("left").duration(s),G=a.models.axis().scale(v).orient("right").duration(s),H=a.models.legend().height(30),I=a.models.tooltip(),J=d3.dispatch(),K=[w,x,y,z,A,B,C,D];return b.dispatch=J,b.legend=H,b.lines1=w,b.lines2=x,b.scatters1=y,b.scatters2=z,b.bars1=A,b.bars2=B,b.stack1=C,b.stack2=D,b.xAxis=E,b.yAxis1=F,b.yAxis2=G,b.tooltip=I,b.interactiveLayer=p,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},showLegend:{get:function(){return j},set:function(a){j=a}},yDomain1:{get:function(){return c},set:function(a){c=a}},yDomain2:{get:function(){return d},set:function(a){d=a}},noData:{get:function(){return k},set:function(a){k=a}},interpolate:{get:function(){return n},set:function(a){n=a}},legendRightAxisHint:{get:function(){return r},set:function(a){r=a}},margin:{get:function(){return e},set:function(a){void 0!==a.top&&(e.top=a.top,f=a.top),e.right=void 0!==a.right?a.right:e.right,e.bottom=void 0!==a.bottom?a.bottom:e.bottom,e.left=void 0!==a.left?a.left:e.left}},color:{get:function(){return g},set:function(b){g=a.utils.getColor(b)}},x:{get:function(){return l},set:function(a){l=a,w.x(a),x.x(a),y.x(a),z.x(a),A.x(a),B.x(a),C.x(a),D.x(a)}},y:{get:function(){return m},set:function(a){m=a,w.y(a),x.y(a),y.y(a),z.y(a),C.y(a),D.y(a),A.y(a),B.y(a)}},useVoronoi:{get:function(){return o},set:function(a){o=a,w.useVoronoi(a),x.useVoronoi(a),C.useVoronoi(a),D.useVoronoi(a)}},useInteractiveGuideline:{get:function(){return q},set:function(a){q=a,q&&(w.interactive(!1),w.useVoronoi(!1),x.interactive(!1),x.useVoronoi(!1),C.interactive(!1),C.useVoronoi(!1),D.interactive(!1),D.useVoronoi(!1),y.interactive(!1),z.interactive(!1))}},duration:{get:function(){return s},set:function(a){s=a,[w,x,C,D,y,z,E,F,G].forEach(function(a){a.duration(s)})}}}),a.utils.initOptions(b),b},a.models.ohlcBar=function(){"use strict";function b(y){return y.each(function(b){k=d3.select(this);var y=a.utils.availableWidth(h,k,g),A=a.utils.availableHeight(i,k,g);a.utils.initSVG(k);var B=y/b[0].values.length*.9;l.domain(c||d3.extent(b[0].values.map(n).concat(t))),v?l.range(e||[.5*y/b[0].values.length,y*(b[0].values.length-.5)/b[0].values.length]):l.range(e||[5+B/2,y-B/2-5]),m.domain(d||[d3.min(b[0].values.map(s).concat(u)),d3.max(b[0].values.map(r).concat(u))]).range(f||[A,0]),l.domain()[0]===l.domain()[1]&&(l.domain()[0]?l.domain([l.domain()[0]-.01*l.domain()[0],l.domain()[1]+.01*l.domain()[1]]):l.domain([-1,1])),m.domain()[0]===m.domain()[1]&&(m.domain()[0]?m.domain([m.domain()[0]+.01*m.domain()[0],m.domain()[1]-.01*m.domain()[1]]):m.domain([-1,1]));var C=d3.select(this).selectAll("g.nv-wrap.nv-ohlcBar").data([b[0].values]),D=C.enter().append("g").attr("class","nvd3 nv-wrap nv-ohlcBar"),E=D.append("defs"),F=D.append("g"),G=C.select("g");F.append("g").attr("class","nv-ticks"),C.attr("transform","translate("+g.left+","+g.top+")"),k.on("click",function(a,b){z.chartClick({data:a,index:b,pos:d3.event,id:j})}),E.append("clipPath").attr("id","nv-chart-clip-path-"+j).append("rect"),C.select("#nv-chart-clip-path-"+j+" rect").attr("width",y).attr("height",A),G.attr("clip-path",w?"url(#nv-chart-clip-path-"+j+")":"");var H=C.select(".nv-ticks").selectAll(".nv-tick").data(function(a){return a});H.exit().remove(),H.enter().append("path").attr("class",function(a,b,c){return(p(a,b)>q(a,b)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+c+"-"+b}).attr("d",function(a,b){return"m0,0l0,"+(m(p(a,b))-m(r(a,b)))+"l"+-B/2+",0l"+B/2+",0l0,"+(m(s(a,b))-m(p(a,b)))+"l0,"+(m(q(a,b))-m(s(a,b)))+"l"+B/2+",0l"+-B/2+",0z"}).attr("transform",function(a,b){return"translate("+l(n(a,b))+","+m(r(a,b))+")"}).attr("fill",function(a,b){return x[0]}).attr("stroke",function(a,b){return x[0]}).attr("x",0).attr("y",function(a,b){return m(Math.max(0,o(a,b)))}).attr("height",function(a,b){return Math.abs(m(o(a,b))-m(0))}),H.attr("class",function(a,b,c){return(p(a,b)>q(a,b)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+c+"-"+b}),d3.transition(H).attr("transform",function(a,b){return"translate("+l(n(a,b))+","+m(r(a,b))+")"}).attr("d",function(a,c){var d=y/b[0].values.length*.9;return"m0,0l0,"+(m(p(a,c))-m(r(a,c)))+"l"+-d/2+",0l"+d/2+",0l0,"+(m(s(a,c))-m(p(a,c)))+"l0,"+(m(q(a,c))-m(s(a,c)))+"l"+d/2+",0l"+-d/2+",0z"})}),b}var c,d,e,f,g={top:0,right:0,bottom:0,left:0},h=null,i=null,j=Math.floor(1e4*Math.random()),k=null,l=d3.scale.linear(),m=d3.scale.linear(),n=function(a){return a.x},o=function(a){return a.y},p=function(a){return a.open},q=function(a){return a.close},r=function(a){return a.high},s=function(a){return a.low},t=[],u=[],v=!1,w=!0,x=a.utils.defaultColor(),y=!1,z=d3.dispatch("stateChange","changeState","renderEnd","chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove");return b.highlightPoint=function(a,c){b.clearHighlights(),k.select(".nv-ohlcBar .nv-tick-0-"+a).classed("hover",c)},b.clearHighlights=function(){k.select(".nv-ohlcBar .nv-tick.hover").classed("hover",!1)},b.dispatch=z,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},xScale:{get:function(){return l},set:function(a){l=a}},yScale:{get:function(){return m},set:function(a){m=a}},xDomain:{get:function(){return c},set:function(a){c=a}},yDomain:{get:function(){return d},set:function(a){d=a}},xRange:{get:function(){return e},set:function(a){e=a}},yRange:{get:function(){return f},set:function(a){f=a}},forceX:{get:function(){return t},set:function(a){t=a}},forceY:{get:function(){return u},set:function(a){u=a}},padData:{get:function(){return v},set:function(a){v=a}},clipEdge:{get:function(){return w},set:function(a){w=a}},id:{get:function(){return j},set:function(a){j=a}},interactive:{get:function(){return y},set:function(a){y=a}},x:{get:function(){return n},set:function(a){n=a}},y:{get:function(){return o},set:function(a){o=a}},open:{get:function(){return p()},set:function(a){p=a}},close:{get:function(){return q()},set:function(a){q=a}},high:{get:function(){return r},set:function(a){r=a}},low:{get:function(){return s},set:function(a){s=a}},margin:{get:function(){return g},set:function(a){g.top=void 0!=a.top?a.top:g.top,g.right=void 0!=a.right?a.right:g.right,g.bottom=void 0!=a.bottom?a.bottom:g.bottom,g.left=void 0!=a.left?a.left:g.left}},color:{get:function(){return x},set:function(b){x=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.parallelCoordinates=function(){"use strict";function b(B){return A.reset(),B.each(function(b){function A(a){return x(o.map(function(b){if(isNaN(a.values[b.key])||isNaN(parseFloat(a.values[b.key]))||O){var c=l[b.key].domain(),d=l[b.key].range(),e=c[0]-(c[1]-c[0])/9;if(v.indexOf(b.key)<0){var f=d3.scale.linear().domain([e,c[1]]).range([j-12,d[1]]);l[b.key].brush.y(f),v.push(b.key)}if(isNaN(a.values[b.key])||isNaN(parseFloat(a.values[b.key])))return[k(b.key),l[b.key](e)]}return void 0!==U&&(v.length>0||O?(U.style("display","inline"),V.style("display","inline")):(U.style("display","none"),V.style("display","none"))),[k(b.key),l[b.key](a.values[b.key])]}))}function B(a){s.forEach(function(b){var c=l[b.dimension].brush.y().domain();b.hasOnlyNaN&&(b.extent[1]=(l[b.dimension].domain()[1]-c[0])*(b.extent[1]-b.extent[0])/(N[b.dimension]-b.extent[0])+c[0]),b.hasNaN&&(b.extent[0]=c[0]),a&&l[b.dimension].brush.extent(b.extent)}),e.select(".nv-brushBackground").each(function(a){d3.select(this).call(l[a.key].brush)}).selectAll("rect").attr("x",-8).attr("width",16),F()}function C(){q===!1&&(q=!0,B(!0))}function D(){$=p.filter(function(a){return!l[a].brush.empty()}),_=$.map(function(a){return l[a].brush.extent()}),s=[],$.forEach(function(a,b){s[b]={dimension:a,extent:_[b],hasNaN:!1,hasOnlyNaN:!1}}),t=[],c.style("display",function(a){var b=$.every(function(b,c){return(isNaN(a.values[b])||isNaN(parseFloat(a.values[b])))&&_[c][0]==l[b].brush.y().domain()[0]?!0:_[c][0]<=a.values[b]&&a.values[b]<=_[c][1]&&!isNaN(parseFloat(a.values[b]))});return b&&t.push(a),b?null:"none"}),F(),z.brush({filters:s,active:t})}function E(){var a=$.length>0?!0:!1;s.forEach(function(a){a.extent[0]===l[a.dimension].brush.y().domain()[0]&&v.indexOf(a.dimension)>=0&&(a.hasNaN=!0),a.extent[1]<l[a.dimension].domain()[0]&&(a.hasOnlyNaN=!0)}),z.brushEnd(t,a)}function F(){e.select(".nv-axis").each(function(a,b){var c=s.filter(function(b){return b.dimension==a.key});P[a.key]=l[a.key].domain(),0!=c.length&&q&&(P[a.key]=[],c[0].extent[1]>l[a.key].domain()[0]&&(P[a.key]=[c[0].extent[1]]),c[0].extent[0]>=l[a.key].domain()[0]&&P[a.key].push(c[0].extent[0])),d3.select(this).call(y.scale(l[a.key]).tickFormat(a.format).tickValues(P[a.key]))})}function G(a){u[a.key]=this.parentNode.__origin__=k(a.key),d.attr("visibility","hidden")}function H(a){u[a.key]=Math.min(i,Math.max(0,this.parentNode.__origin__+=d3.event.x)),c.attr("d",A),o.sort(function(a,b){return J(a.key)-J(b.key)}),o.forEach(function(a,b){return a.currentPosition=b}),k.domain(o.map(function(a){return a.key})),e.attr("transform",function(a){return"translate("+J(a.key)+")"})}function I(a,b){delete this.parentNode.__origin__,delete u[a.key],d3.select(this.parentNode).attr("transform","translate("+k(a.key)+")"),c.attr("d",A),d.attr("d",A).attr("visibility",null),z.dimensionsOrder(o)}function J(a){var b=u[a];return null==b?k(a):b}var K=d3.select(this);if(i=a.utils.availableWidth(g,K,f),j=a.utils.availableHeight(h,K,f),a.utils.initSVG(K),void 0===b[0].values){var L=[];b.forEach(function(a){var b={},c=Object.keys(a);c.forEach(function(c){"name"!==c&&(b[c]=a[c])}),L.push({key:a.name,values:b})}),b=L}var M=b.map(function(a){return a.values});0===t.length&&(t=b),p=n.sort(function(a,b){return a.currentPosition-b.currentPosition}).map(function(a){return a.key}),o=n.filter(function(a){return!a.disabled}),k.rangePoints([0,i],1).domain(o.map(function(a){return a.key}));var N={},O=!1,P=[];p.forEach(function(a){var b=d3.extent(M,function(b){return+b[a]}),c=b[0],d=b[1],e=!1;(isNaN(c)||isNaN(d))&&(e=!0,c=0,d=0),c===d&&(c-=1,d+=1);var f=s.filter(function(b){return b.dimension==a});0!==f.length&&(e?(c=l[a].domain()[0],d=l[a].domain()[1]):!f[0].hasOnlyNaN&&q?(c=c>f[0].extent[0]?f[0].extent[0]:c,d=d<f[0].extent[1]?f[0].extent[1]:d):f[0].hasNaN&&(d=d<f[0].extent[1]?f[0].extent[1]:d,N[a]=l[a].domain()[1],O=!0)),l[a]=d3.scale.linear().domain([c,d]).range([.9*(j-12),0]),v=[],l[a].brush=d3.svg.brush().y(l[a]).on("brushstart",C).on("brush",D).on("brushend",E)});var Q=K.selectAll("g.nv-wrap.nv-parallelCoordinates").data([b]),R=Q.enter().append("g").attr("class","nvd3 nv-wrap nv-parallelCoordinates"),S=R.append("g"),T=Q.select("g");S.append("g").attr("class","nv-parallelCoordinates background"),S.append("g").attr("class","nv-parallelCoordinates foreground"),S.append("g").attr("class","nv-parallelCoordinates missingValuesline"),Q.attr("transform","translate("+f.left+","+f.top+")"),x.interpolate("cardinal").tension(w),y.orient("left");var U,V,W=d3.behavior.drag().on("dragstart",G).on("drag",H).on("dragend",I),X=k.range()[1]-k.range()[0];if(X=isNaN(X)?k.range()[0]:X,!isNaN(X)){var Y=[0+X/2,j-12,i-X/2,j-12];U=Q.select(".missingValuesline").selectAll("line").data([Y]),U.enter().append("line"),U.exit().remove(),U.attr("x1",function(a){return a[0]}).attr("y1",function(a){return a[1]}).attr("x2",function(a){return a[2]}).attr("y2",function(a){return a[3]}),V=Q.select(".missingValuesline").selectAll("text").data([m]),V.append("text").data([m]),V.enter().append("text"),V.exit().remove(),V.attr("y",j).attr("x",i-92-X/2).text(function(a){return a})}d=Q.select(".background").selectAll("path").data(b),d.enter().append("path"),d.exit().remove(),d.attr("d",A),c=Q.select(".foreground").selectAll("path").data(b),c.enter().append("path"),c.exit().remove(),c.attr("d",A).style("stroke-width",function(a,b){return isNaN(a.strokeWidth)&&(a.strokeWidth=1),a.strokeWidth}).attr("stroke",function(a,b){return a.color||r(a,b)}),c.on("mouseover",function(a,b){d3.select(this).classed("hover",!0).style("stroke-width",a.strokeWidth+2+"px").style("stroke-opacity",1),z.elementMouseover({label:a.name,color:a.color||r(a,b),values:a.values,dimensions:o})}),c.on("mouseout",function(a,b){d3.select(this).classed("hover",!1).style("stroke-width",a.strokeWidth+"px").style("stroke-opacity",.7),z.elementMouseout({label:a.name,index:b})}),c.on("mousemove",function(a,b){z.elementMousemove()}),c.on("click",function(a){z.elementClick({id:a.id})}),e=T.selectAll(".dimension").data(o);var Z=e.enter().append("g").attr("class","nv-parallelCoordinates dimension");e.attr("transform",function(a){return"translate("+k(a.key)+",0)"}),Z.append("g").attr("class","nv-axis"),Z.append("text").attr("class","nv-label").style("cursor","move").attr("dy","-1em").attr("text-anchor","middle").on("mouseover",function(a,b){z.elementMouseover({label:a.tooltip||a.key,color:a.color})}).on("mouseout",function(a,b){z.elementMouseout({label:a.tooltip})}).on("mousemove",function(a,b){z.elementMousemove()}).call(W),Z.append("g").attr("class","nv-brushBackground"),e.exit().remove(),e.select(".nv-label").text(function(a){return a.key}),B(q);var $=p.filter(function(a){return!l[a].brush.empty()}),_=$.map(function(a){return l[a].brush.extent()}),aa=t.slice(0);t=[],c.style("display",function(a){var b=$.every(function(b,c){return(isNaN(a.values[b])||isNaN(parseFloat(a.values[b])))&&_[c][0]==l[b].brush.y().domain()[0]?!0:_[c][0]<=a.values[b]&&a.values[b]<=_[c][1]&&!isNaN(parseFloat(a.values[b]))});return b&&t.push(a),b?null:"none"}),(s.length>0||!a.utils.arrayEquals(t,aa))&&z.activeChanged(t)}),b}var c,d,e,f={top:30,right:0,bottom:10,left:0},g=null,h=null,i=null,j=null,k=d3.scale.ordinal(),l={},m="undefined values",n=[],o=[],p=[],q=!0,r=a.utils.defaultColor(),s=[],t=[],u=[],v=[],w=1,x=d3.svg.line(),y=d3.svg.axis(),z=d3.dispatch("brushstart","brush","brushEnd","dimensionsOrder","stateChange","elementClick","elementMouseover","elementMouseout","elementMousemove","renderEnd","activeChanged"),A=a.utils.renderWatch(z);return b.dispatch=z,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return g},set:function(a){g=a}},height:{get:function(){return h},set:function(a){h=a}},dimensionData:{get:function(){return n},set:function(a){n=a}},displayBrush:{get:function(){return q},set:function(a){q=a}},filters:{get:function(){return s},set:function(a){s=a}},active:{get:function(){return t},set:function(a){t=a}},lineTension:{get:function(){return w},set:function(a){w=a}},undefinedValuesLabel:{get:function(){return m},set:function(a){m=a}},dimensions:{get:function(){return n.map(function(a){return a.key})},set:function(b){a.deprecated("dimensions","use dimensionData instead"),0===n.length?b.forEach(function(a){n.push({key:a})}):b.forEach(function(a,b){n[b].key=a})}},dimensionNames:{get:function(){return n.map(function(a){return a.key})},set:function(b){a.deprecated("dimensionNames","use dimensionData instead"),p=[],0===n.length?b.forEach(function(a){n.push({key:a})}):b.forEach(function(a,b){n[b].key=a})}},dimensionFormats:{get:function(){return n.map(function(a){return a.format})},set:function(b){a.deprecated("dimensionFormats","use dimensionData instead"),0===n.length?b.forEach(function(a){n.push({format:a})}):b.forEach(function(a,b){n[b].format=a})}},margin:{get:function(){return f},set:function(a){f.top=void 0!==a.top?a.top:f.top,f.right=void 0!==a.right?a.right:f.right,f.bottom=void 0!==a.bottom?a.bottom:f.bottom,f.left=void 0!==a.left?a.left:f.left}},color:{get:function(){return r},set:function(b){r=a.utils.getColor(b)}}}),a.utils.initOptions(b),b},a.models.parallelCoordinatesChart=function(){"use strict";function b(e){return s.reset(),s.models(c),e.each(function(e){var k=d3.select(this);a.utils.initSVG(k);var p=a.utils.availableWidth(h,k,f),q=a.utils.availableHeight(i,k,f);if(b.update=function(){k.call(b)},b.container=this,l.setter(u(m),b.update).getter(t(m)).update(),l.disabled=m.map(function(a){return!!a.disabled}),m=m.map(function(a){return a.disabled=!!a.disabled,a}),m.forEach(function(a,b){a.originalPosition=isNaN(a.originalPosition)?b:a.originalPosition,a.currentPosition=isNaN(a.currentPosition)?b:a.currentPosition}),!o){var s;o={};for(s in l)l[s]instanceof Array?o[s]=l[s].slice(0):o[s]=l[s]}if(!e||!e.length)return a.utils.noData(b,k),b;k.selectAll(".nv-noData").remove();var v=k.selectAll("g.nv-wrap.nv-parallelCoordinatesChart").data([e]),w=v.enter().append("g").attr("class","nvd3 nv-wrap nv-parallelCoordinatesChart").append("g"),x=v.select("g");w.append("g").attr("class","nv-parallelCoordinatesWrap"),w.append("g").attr("class","nv-legendWrap"),x.select("rect").attr("width",p).attr("height",q>0?q:0),j?(d.width(p).color(function(a){return"rgb(188,190,192)"}),x.select(".nv-legendWrap").datum(m.sort(function(a,b){return a.originalPosition-b.originalPosition})).call(d),g||d.height()===f.top||(f.top=d.height(),q=a.utils.availableHeight(i,k,f)),v.select(".nv-legendWrap").attr("transform","translate( 0 ,"+-f.top+")")):x.select(".nv-legendWrap").selectAll("*").remove(),v.attr("transform","translate("+f.left+","+f.top+")"),c.width(p).height(q).dimensionData(m).displayBrush(n);var y=x.select(".nv-parallelCoordinatesWrap ").datum(e);y.transition().call(c),c.dispatch.on("brushEnd",function(a,b){b?(n=!0,r.brushEnd(a)):n=!1}),d.dispatch.on("stateChange",function(a){for(var c in a)l[c]=a[c];r.stateChange(l),b.update()}),c.dispatch.on("dimensionsOrder",function(a){m.sort(function(a,b){return a.currentPosition-b.currentPosition});var b=!1;m.forEach(function(a,c){a.currentPosition=c,a.currentPosition!==a.originalPosition&&(b=!0)}),r.dimensionsOrder(m,b)}),r.on("changeState",function(a){"undefined"!=typeof a.disabled&&(m.forEach(function(b,c){b.disabled=a.disabled[c]}),l.disabled=a.disabled),b.update()})}),s.renderEnd("parraleleCoordinateChart immediate"),b}var c=a.models.parallelCoordinates(),d=a.models.legend(),e=a.models.tooltip(),f=(a.models.tooltip(),{top:0,right:0,bottom:0,left:0}),g=null,h=null,i=null,j=!0,k=a.utils.defaultColor(),l=a.utils.state(),m=[],n=!0,o=null,p=null,q="undefined",r=d3.dispatch("dimensionsOrder","brushEnd","stateChange","changeState","renderEnd"),s=a.utils.renderWatch(r),t=function(a){return function(){return{active:a.map(function(a){return!a.disabled})}}},u=function(a){return function(b){void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};return e.contentGenerator(function(a){var b='<table><thead><tr><td class="legend-color-guide"><div style="background-color:'+a.color+'"></div></td><td><strong>'+a.key+"</strong></td></tr></thead>";return 0!==a.series.length&&(b+='<tbody><tr><td height ="10px"></td></tr>',a.series.forEach(function(a){b=b+'<tr><td class="legend-color-guide"><div style="background-color:'+a.color+'"></div></td><td class="key">'+a.key+'</td><td class="value">'+a.value+"</td></tr>"}),b+="</tbody>"),b+="</table>"}),c.dispatch.on("elementMouseover.tooltip",function(a){var b={key:a.label,color:a.color,series:[]};a.values&&(Object.keys(a.values).forEach(function(c){var d=a.dimensions.filter(function(a){return a.key===c})[0];if(d){var e;e=isNaN(a.values[c])||isNaN(parseFloat(a.values[c]))?q:d.format(a.values[c]),b.series.push({idx:d.currentPosition,key:c,value:e,color:d.color})}}),b.series.sort(function(a,b){return a.idx-b.idx})),e.data(b).hidden(!1)}),c.dispatch.on("elementMouseout.tooltip",function(a){e.hidden(!0)}),c.dispatch.on("elementMousemove.tooltip",function(){e()}),b.dispatch=r,b.parallelCoordinates=c,b.legend=d,b.tooltip=e,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},showLegend:{get:function(){return j},set:function(a){j=a}},defaultState:{get:function(){return o},set:function(a){o=a}},dimensionData:{get:function(){return m},set:function(a){m=a}},displayBrush:{get:function(){return n},set:function(a){n=a}},noData:{get:function(){return p},set:function(a){p=a}},nanValue:{get:function(){return q},set:function(a){q=a}},margin:{get:function(){return f},set:function(a){void 0!==a.top&&(f.top=a.top,g=a.top),f.right=void 0!==a.right?a.right:f.right,f.bottom=void 0!==a.bottom?a.bottom:f.bottom,f.left=void 0!==a.left?a.left:f.left}},color:{get:function(){return k},set:function(b){k=a.utils.getColor(b),d.color(k),c.color(k)}}}),a.utils.inheritOptions(b,c),a.utils.initOptions(b),b},a.models.pie=function(){"use strict";function b(F){return E.reset(),F.each(function(b){function F(a,b){a.endAngle=isNaN(a.endAngle)?0:a.endAngle,a.startAngle=isNaN(a.startAngle)?0:a.startAngle,p||(a.innerRadius=0);var c=d3.interpolate(this._current,a);return this._current=c(0),function(a){return C[b](c(a))}}var G=d-c.left-c.right,H=e-c.top-c.bottom,I=Math.min(G,H)/2,J=[],K=[];if(i=d3.select(this),0===A.length)for(var L=I-I/5,M=y*I,N=0;N<b[0].length;N++)J.push(L),K.push(M);else r?(J=A.map(function(a){return(a.outer-a.outer/5)*I}),K=A.map(function(a){return(a.inner-a.inner/5)*I}),y=d3.min(A.map(function(a){return a.inner-a.inner/5}))):(J=A.map(function(a){return a.outer*I}),K=A.map(function(a){return a.inner*I}),y=d3.min(A.map(function(a){return a.inner})));a.utils.initSVG(i);var O=i.selectAll(".nv-wrap.nv-pie").data(b),P=O.enter().append("g").attr("class","nvd3 nv-wrap nv-pie nv-chart-"+h),Q=P.append("g"),R=O.select("g"),S=Q.append("g").attr("class","nv-pie");Q.append("g").attr("class","nv-pieLabels"),O.attr("transform","translate("+c.left+","+c.top+")"),R.select(".nv-pie").attr("transform","translate("+G/2+","+H/2+")"),R.select(".nv-pieLabels").attr("transform","translate("+G/2+","+H/2+")"),i.on("click",function(a,b){B.chartClick({data:a,index:b,pos:d3.event,id:h})}),C=[],D=[];for(var N=0;N<b[0].length;N++){var T=d3.svg.arc().outerRadius(J[N]),U=d3.svg.arc().outerRadius(J[N]+5);u!==!1&&(T.startAngle(u),U.startAngle(u)),w!==!1&&(T.endAngle(w),U.endAngle(w)),p&&(T.innerRadius(K[N]),U.innerRadius(K[N])),T.cornerRadius&&x&&(T.cornerRadius(x),U.cornerRadius(x)),C.push(T),D.push(U)}var V=d3.layout.pie().sort(null).value(function(a){return a.disabled?0:g(a)});V.padAngle&&v&&V.padAngle(v),p&&q&&(S.append("text").attr("class","nv-pie-title"),O.select(".nv-pie-title").style("text-anchor","middle").text(function(a){return q}).style("font-size",Math.min(G,H)*y*2/(q.length+2)+"px").attr("dy","0.35em").attr("transform",function(a,b){return"translate(0, "+s+")"}));var W=O.select(".nv-pie").selectAll(".nv-slice").data(V),X=O.select(".nv-pieLabels").selectAll(".nv-label").data(V);W.exit().remove(),X.exit().remove();var Y=W.enter().append("g");Y.attr("class","nv-slice"),Y.on("mouseover",function(a,b){d3.select(this).classed("hover",!0),r&&d3.select(this).select("path").transition().duration(70).attr("d",D[b]),B.elementMouseover({data:a.data,index:b,color:d3.select(this).style("fill"),percent:(a.endAngle-a.startAngle)/(2*Math.PI)})}),Y.on("mouseout",function(a,b){d3.select(this).classed("hover",!1),r&&d3.select(this).select("path").transition().duration(50).attr("d",C[b]),B.elementMouseout({data:a.data,index:b})}),Y.on("mousemove",function(a,b){B.elementMousemove({data:a.data,index:b})}),Y.on("click",function(a,b){var c=this;B.elementClick({data:a.data,index:b,color:d3.select(this).style("fill"),event:d3.event,element:c})}),Y.on("dblclick",function(a,b){B.elementDblClick({data:a.data,index:b,color:d3.select(this).style("fill")})}),W.attr("fill",function(a,b){return j(a.data,b)}),W.attr("stroke",function(a,b){return j(a.data,b)});Y.append("path").each(function(a){this._current=a});if(W.select("path").transition().duration(z).attr("d",function(a,b){return C[b](a)}).attrTween("d",F),l){for(var Z=[],N=0;N<b[0].length;N++)Z.push(C[N]),m?p&&(Z[N]=d3.svg.arc().outerRadius(C[N].outerRadius()),u!==!1&&Z[N].startAngle(u),w!==!1&&Z[N].endAngle(w)):p||Z[N].innerRadius(0);X.enter().append("g").classed("nv-label",!0).each(function(a,b){var c=d3.select(this);c.attr("transform",function(a,b){if(t){a.outerRadius=J[b]+10,a.innerRadius=J[b]+15;var c=(a.startAngle+a.endAngle)/2*(180/Math.PI);return(a.startAngle+a.endAngle)/2<Math.PI?c-=90:c+=90,"translate("+Z[b].centroid(a)+") rotate("+c+")"}return a.outerRadius=I+10,a.innerRadius=I+15,"translate("+Z[b].centroid(a)+")"}),c.append("rect").style("stroke","#fff").style("fill","#fff").attr("rx",3).attr("ry",3),c.append("text").style("text-anchor",t?(a.startAngle+a.endAngle)/2<Math.PI?"start":"end":"middle").style("fill","#000")});var $={},_=14,aa=140,ba=function(a){return Math.floor(a[0]/aa)*aa+","+Math.floor(a[1]/_)*_},ca=function(a){return(a.endAngle-a.startAngle)/(2*Math.PI)};X.watchTransition(E,"pie labels").attr("transform",function(a,b){if(t){a.outerRadius=J[b]+10,a.innerRadius=J[b]+15;var c=(a.startAngle+a.endAngle)/2*(180/Math.PI);return(a.startAngle+a.endAngle)/2<Math.PI?c-=90:c+=90,"translate("+Z[b].centroid(a)+") rotate("+c+")"}a.outerRadius=I+10,a.innerRadius=I+15;var d=Z[b].centroid(a),e=ca(a);if(a.value&&e>=o){var f=ba(d);$[f]&&(d[1]-=_),$[ba(d)]=!0}return"translate("+d+")"}),X.select(".nv-label text").style("text-anchor",function(a,b){return t?(a.startAngle+a.endAngle)/2<Math.PI?"start":"end":"middle"}).text(function(a,b){var c=ca(a),d="";if(!a.value||o>c)return"";if("function"==typeof n)d=n(a,b,{key:f(a.data),value:g(a.data),percent:k(c)});else switch(n){case"key":d=f(a.data);break;case"value":d=k(g(a.data));break;case"percent":d=d3.format("%")(c)}return d})}}),E.renderEnd("pie immediate"),b}var c={top:0,right:0,bottom:0,left:0},d=500,e=500,f=function(a){return a.x},g=function(a){return a.y},h=Math.floor(1e4*Math.random()),i=null,j=a.utils.defaultColor(),k=d3.format(",.2f"),l=!0,m=!1,n="key",o=.02,p=!1,q=!1,r=!0,s=0,t=!1,u=!1,v=!1,w=!1,x=0,y=.5,z=250,A=[],B=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout","elementMousemove","renderEnd"),C=[],D=[],E=a.utils.renderWatch(B);return b.dispatch=B,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{arcsRadius:{get:function(){return A},set:function(a){A=a}},width:{get:function(){return d},set:function(a){d=a}},height:{get:function(){return e},set:function(a){e=a}},showLabels:{get:function(){return l},set:function(a){l=a}},title:{get:function(){return q},set:function(a){q=a}},titleOffset:{get:function(){return s},set:function(a){s=a}},labelThreshold:{get:function(){return o},set:function(a){o=a}},valueFormat:{get:function(){return k},set:function(a){k=a}},x:{get:function(){return f},set:function(a){f=a}},id:{get:function(){return h},set:function(a){h=a}},endAngle:{get:function(){return w},set:function(a){w=a}},startAngle:{get:function(){return u},set:function(a){u=a}},padAngle:{get:function(){return v},set:function(a){v=a}},cornerRadius:{get:function(){return x},set:function(a){x=a}},donutRatio:{get:function(){return y},set:function(a){y=a}},labelsOutside:{get:function(){return m},set:function(a){m=a}},labelSunbeamLayout:{get:function(){return t},set:function(a){t=a}},donut:{get:function(){return p},set:function(a){p=a}},growOnHover:{get:function(){return r},set:function(a){r=a}},pieLabelsOutside:{get:function(){return m},set:function(b){m=b,a.deprecated("pieLabelsOutside","use labelsOutside instead")}},donutLabelsOutside:{get:function(){return m},set:function(b){m=b,a.deprecated("donutLabelsOutside","use labelsOutside instead")}},labelFormat:{get:function(){return k},set:function(b){k=b,a.deprecated("labelFormat","use valueFormat instead")}},margin:{get:function(){return c},set:function(a){c.top="undefined"!=typeof a.top?a.top:c.top,c.right="undefined"!=typeof a.right?a.right:c.right,c.bottom="undefined"!=typeof a.bottom?a.bottom:c.bottom,c.left="undefined"!=typeof a.left?a.left:c.left}},duration:{get:function(){return z},set:function(a){z=a,E.reset(z)}},y:{get:function(){return g},set:function(a){g=d3.functor(a)}},color:{get:function(){return j},set:function(b){j=a.utils.getColor(b)}},labelType:{get:function(){return n},set:function(a){n=a||"key"}}}),a.utils.initOptions(b),b},a.models.pieChart=function(){"use strict";function b(e){return s.reset(),s.models(c),e.each(function(e){var j=d3.select(this);a.utils.initSVG(j);var m=a.utils.availableWidth(h,j,f),p=a.utils.availableHeight(i,j,f);if(b.update=function(){j.transition().call(b)},b.container=this,n.setter(u(e),b.update).getter(t(e)).update(),n.disabled=e.map(function(a){return!!a.disabled}),!o){var q;o={};for(q in n)n[q]instanceof Array?o[q]=n[q].slice(0):o[q]=n[q]}if(!e||!e.length)return a.utils.noData(b,j),b;j.selectAll(".nv-noData").remove();var s=j.selectAll("g.nv-wrap.nv-pieChart").data([e]),v=s.enter().append("g").attr("class","nvd3 nv-wrap nv-pieChart").append("g"),w=s.select("g");
-if(v.append("g").attr("class","nv-pieWrap"),v.append("g").attr("class","nv-legendWrap"),k){if("top"===l)d.width(m).key(c.x()),s.select(".nv-legendWrap").datum(e).call(d),g||d.height()===f.top||(f.top=d.height(),p=a.utils.availableHeight(i,j,f)),s.select(".nv-legendWrap").attr("transform","translate(0,"+-f.top+")");else if("right"===l){var x=a.models.legend().width();x>m/2&&(x=m/2),d.height(p).key(c.x()),d.width(x),m-=d.width(),s.select(".nv-legendWrap").datum(e).call(d).attr("transform","translate("+m+",0)")}}else w.select(".nv-legendWrap").selectAll("*").remove();s.attr("transform","translate("+f.left+","+f.top+")"),c.width(m).height(p);var y=w.select(".nv-pieWrap").datum([e]);d3.transition(y).call(c),d.dispatch.on("stateChange",function(a){for(var c in a)n[c]=a[c];r.stateChange(n),b.update()}),r.on("changeState",function(a){"undefined"!=typeof a.disabled&&(e.forEach(function(b,c){b.disabled=a.disabled[c]}),n.disabled=a.disabled),b.update()})}),s.renderEnd("pieChart immediate"),b}var c=a.models.pie(),d=a.models.legend(),e=a.models.tooltip(),f={top:30,right:20,bottom:20,left:20},g=null,h=null,i=null,j=!1,k=!0,l="top",m=a.utils.defaultColor(),n=a.utils.state(),o=null,p=null,q=250,r=d3.dispatch("stateChange","changeState","renderEnd");e.duration(0).headerEnabled(!1).valueFormatter(function(a,b){return c.valueFormat()(a,b)});var s=a.utils.renderWatch(r),t=function(a){return function(){return{active:a.map(function(a){return!a.disabled})}}},u=function(a){return function(b){void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};return c.dispatch.on("elementMouseover.tooltip",function(a){a.series={key:b.x()(a.data),value:b.y()(a.data),color:a.color,percent:a.percent},j||(delete a.percent,delete a.series.percent),e.data(a).hidden(!1)}),c.dispatch.on("elementMouseout.tooltip",function(a){e.hidden(!0)}),c.dispatch.on("elementMousemove.tooltip",function(a){e()}),b.legend=d,b.dispatch=r,b.pie=c,b.tooltip=e,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},noData:{get:function(){return p},set:function(a){p=a}},showTooltipPercent:{get:function(){return j},set:function(a){j=a}},showLegend:{get:function(){return k},set:function(a){k=a}},legendPosition:{get:function(){return l},set:function(a){l=a}},defaultState:{get:function(){return o},set:function(a){o=a}},color:{get:function(){return m},set:function(a){m=a,d.color(m),c.color(m)}},duration:{get:function(){return q},set:function(a){q=a,s.reset(q),c.duration(q)}},margin:{get:function(){return f},set:function(a){void 0!==a.top&&(f.top=a.top,g=a.top),f.right=void 0!==a.right?a.right:f.right,f.bottom=void 0!==a.bottom?a.bottom:f.bottom,f.left=void 0!==a.left?a.left:f.left}}}),a.utils.inheritOptions(b,c),a.utils.initOptions(b),b},a.models.sankey=function(){"use strict";function b(){n.forEach(function(a){a.sourceLinks=[],a.targetLinks=[]}),o.forEach(function(a){var b=a.source,c=a.target;"number"==typeof b&&(b=a.source=n[a.source]),"number"==typeof c&&(c=a.target=n[a.target]),b.sourceLinks.push(a),c.targetLinks.push(a)})}function c(){n.forEach(function(a){a.value=Math.max(d3.sum(a.sourceLinks,i),d3.sum(a.targetLinks,i))})}function d(){for(var a,b=n,c=0;b.length&&c<n.length;)a=[],b.forEach(function(b){b.x=c,b.dx=k,b.sourceLinks.forEach(function(b){a.indexOf(b.target)<0&&a.push(b.target)})}),b=a,++c;p&&e(c),f((m[0]-k)/(c-1))}function e(a){n.forEach(function(b){b.sourceLinks.length||(b.x=a-1)})}function f(a){n.forEach(function(b){b.x*=a})}function g(a){function b(){var a=d3.min(g,function(a){return(m[1]-(a.length-1)*l)/d3.sum(a,i)});g.forEach(function(b){b.forEach(function(b,c){b.y=c,b.dy=b.value*a})}),o.forEach(function(b){b.dy=b.value*a})}function c(a){function b(a){return(a.source.y+a.sy+a.dy/2)*a.value}g.forEach(function(c,d){c.forEach(function(c){if(c.targetLinks.length){var d=d3.sum(c.targetLinks,b)/d3.sum(c.targetLinks,i);c.y+=(d-t(c))*a}})})}function d(a){function b(a){return(a.target.y+a.ty+a.dy/2)*a.value}g.slice().reverse().forEach(function(c){c.forEach(function(c){if(c.sourceLinks.length){var d=d3.sum(c.sourceLinks,b)/d3.sum(c.sourceLinks,i);c.y+=(d-t(c))*a}})})}function e(){g.forEach(function(a){var b,c,d,e=0,g=a.length;for(a.sort(f),d=0;g>d;++d)b=a[d],c=e-b.y,c>0&&(b.y+=c),e=b.y+b.dy+l;if(c=e-l-m[1],c>0)for(e=b.y-=c,d=g-2;d>=0;--d)b=a[d],c=b.y+b.dy+l-e,c>0&&(b.y-=c),e=b.y})}function f(a,b){return a.y-b.y}var g=d3.nest().key(function(a){return a.x}).sortKeys(d3.ascending).entries(n).map(function(a){return a.values});b(),e(),h();for(var j=1;a>0;--a)d(j*=.99),e(),h(),c(j),e(),h()}function h(){function a(a,b){return a.source.y-b.source.y}function b(a,b){return a.target.y-b.target.y}n.forEach(function(c){c.sourceLinks.sort(b),c.targetLinks.sort(a)}),n.forEach(function(a){var b=0,c=0;a.sourceLinks.forEach(function(a){a.sy=b,b+=a.dy}),a.targetLinks.forEach(function(a){a.ty=c,c+=a.dy})})}function i(a){return a.value}var j={},k=24,l=8,m=[1,1],n=[],o=[],p=!0,q=function(a){b(),c(),d(),g(a)},r=function(){h()},s=function(){function a(a){var c=a.source.x+a.source.dx,d=a.target.x,e=d3.interpolateNumber(c,d),f=e(b),g=e(1-b),h=a.source.y+a.sy+a.dy/2,i=a.target.y+a.ty+a.dy/2,j="M"+c+","+h+"C"+f+","+h+" "+g+","+i+" "+d+","+i;return j}var b=.5;return a.curvature=function(c){return arguments.length?(b=+c,a):b},a},t=function(a){return a.y+a.dy/2};return j.options=a.utils.optionsFunc.bind(j),j._options=Object.create({},{nodeWidth:{get:function(){return k},set:function(a){k=+a}},nodePadding:{get:function(){return l},set:function(a){l=a}},nodes:{get:function(){return n},set:function(a){n=a}},links:{get:function(){return o},set:function(a){o=a}},size:{get:function(){return m},set:function(a){m=a}},sinksRight:{get:function(){return p},set:function(a){p=a}},layout:{get:function(){q(32)},set:function(a){q(a)}},relayout:{get:function(){r()},set:function(a){}},center:{get:function(){return t()},set:function(a){"function"==typeof a&&(t=a)}},link:{get:function(){return s()},set:function(a){return"function"==typeof a&&(s=a),s()}}}),a.utils.initOptions(j),j},a.models.sankeyChart=function(){"use strict";function b(a){return a.each(function(b){function c(a){d3.select(this).attr("transform","translate("+a.x+","+(a.y=Math.max(0,Math.min(f-a.dy,d3.event.y)))+")"),d.relayout(),t.attr("d",s)}var i={nodes:[{node:1,name:"Test 1"},{node:2,name:"Test 2"},{node:3,name:"Test 3"},{node:4,name:"Test 4"},{node:5,name:"Test 5"},{node:6,name:"Test 6"}],links:[{source:0,target:1,value:2295},{source:0,target:5,value:1199},{source:1,target:2,value:1119},{source:1,target:5,value:1176},{source:2,target:3,value:487},{source:2,target:5,value:632},{source:3,target:4,value:301},{source:3,target:5,value:186}]},k=!1,l=!1;if(("object"==typeof b.nodes&&b.nodes.length)>=0&&("object"==typeof b.links&&b.links.length)>=0&&(k=!0),b.nodes&&b.nodes.length>0&&b.links&&b.links.length>0&&(l=!0),!k)return console.error("NVD3 Sankey chart error:","invalid data format for",b),console.info("Valid data format is: ",i,JSON.stringify(i)),r(a,"Error loading chart, data is invalid"),!1;if(!l)return r(a,"No data available"),!1;var m=a.append("svg").attr("width",e).attr("height",f).append("g").attr("class","nvd3 nv-wrap nv-sankeyChart");d.nodeWidth(g).nodePadding(h).size([e,f]);var s=d.link();d.nodes(b.nodes).links(b.links).layout(32).center(j);var t=m.append("g").selectAll(".link").data(b.links).enter().append("path").attr("class","link").attr("d",s).style("stroke-width",function(a){return Math.max(1,a.dy)}).sort(function(a,b){return b.dy-a.dy});t.append("title").text(n);var u=m.append("g").selectAll(".node").data(b.nodes).enter().append("g").attr("class","node").attr("transform",function(a){return"translate("+a.x+","+a.y+")"}).call(d3.behavior.drag().origin(function(a){return a}).on("dragstart",function(){this.parentNode.appendChild(this)}).on("drag",c));u.append("rect").attr("height",function(a){return a.dy}).attr("width",d.nodeWidth()).style("fill",o).style("stroke",p).append("title").text(q),u.append("text").attr("x",-6).attr("y",function(a){return a.dy/2}).attr("dy",".35em").attr("text-anchor","end").attr("transform",null).text(function(a){return a.name}).filter(function(a){return a.x<e/2}).attr("x",6+d.nodeWidth()).attr("text-anchor","start")}),b}var c={top:5,right:0,bottom:5,left:0},d=a.models.sankey(),e=600,f=400,g=36,h=40,i="units",j=void 0,k=d3.format(",.0f"),l=function(a){return k(a)+" "+i},m=d3.scale.category20(),n=function(a){return a.source.name+" → "+a.target.name+"\n"+l(a.value)},o=function(a){return a.color=m(a.name.replace(/ .*/,""))},p=function(a){return d3.rgb(a.color).darker(2)},q=function(a){return a.name+"\n"+l(a.value)},r=function(a,b){a.append("text").attr("x",0).attr("y",0).attr("class","nvd3-sankey-chart-error").attr("text-anchor","middle").text(b)};return b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{units:{get:function(){return i},set:function(a){i=a}},width:{get:function(){return e},set:function(a){e=a}},height:{get:function(){return f},set:function(a){f=a}},format:{get:function(){return l},set:function(a){l=a}},linkTitle:{get:function(){return n},set:function(a){n=a}},nodeWidth:{get:function(){return g},set:function(a){g=a}},nodePadding:{get:function(){return h},set:function(a){h=a}},center:{get:function(){return j},set:function(a){j=a}},margin:{get:function(){return c},set:function(a){c.top=void 0!==a.top?a.top:c.top,c.right=void 0!==a.right?a.right:c.right,c.bottom=void 0!==a.bottom?a.bottom:c.bottom,c.left=void 0!==a.left?a.left:c.left}},nodeStyle:{get:function(){return{}},set:function(a){o=void 0!==a.fillColor?a.fillColor:o,p=void 0!==a.strokeColor?a.strokeColor:p,q=void 0!==a.title?a.title:q}}}),a.utils.initOptions(b),b},a.models.scatter=function(){"use strict";function b(a){var b,c;return b=a[0].series+":"+a[1],c=Z[b]=Z[b]||{}}function c(a){var b;b=a[0].series+":"+a[1],delete Z[b]}function d(a){var c,d,e,f=b(a),g=!1;for(c=1;c<arguments.length;c+=2)d=arguments[c],e=arguments[c+1](a[0],a[1]),f[d]===e&&f.hasOwnProperty(d)||(f[d]=e,g=!0);return g}function e(b){return X.reset(),b.each(function(b){function T(){if(W=!1,!C)return!1;if(S===!0){var c=d3.merge(b.map(function(b,c){return b.values.map(function(b,d){var e=v(b,d),f=w(b,d);return[a.utils.NaNtoZero(s(e))+1e-4*Math.random(),a.utils.NaNtoZero(t(f))+1e-4*Math.random(),c,d,b]}).filter(function(a,b){return D(a[4],b)})}));if(0==c.length)return!1;c.length<3&&(c.push([s.range()[0]-20,t.range()[0]-20,null,null]),c.push([s.range()[1]+20,t.range()[1]+20,null,null]),c.push([s.range()[0]-20,t.range()[0]+20,null,null]),c.push([s.range()[1]+20,t.range()[1]-20,null,null]));var d=d3.geom.polygon([[-10,-10],[-10,n+10],[m+10,n+10],[m+10,-10]]),e=d3.geom.voronoi(c).map(function(a,b){return{data:d.clip(a),series:c[b][2],point:c[b][3]}});ea.select(".nv-point-paths").selectAll("path").remove();var f=ea.select(".nv-point-paths").selectAll("path").data(e),g=f.enter().append("svg:path").attr("d",function(a){return a&&a.data&&0!==a.data.length?"M"+a.data.join(",")+"Z":"M 0 0"}).attr("id",function(a,b){return"nv-path-"+b}).attr("clip-path",function(a,b){return"url(#nv-clip-"+q+"-"+b+")"});if(I&&g.style("fill",d3.rgb(230,230,230)).style("fill-opacity",.4).style("stroke-opacity",1).style("stroke",d3.rgb(200,200,200)),H){ea.select(".nv-point-clips").selectAll("*").remove();var h=ea.select(".nv-point-clips").selectAll("clipPath").data(c);h.enter().append("svg:clipPath").attr("id",function(a,b){return"nv-clip-"+q+"-"+b}).append("svg:circle").attr("cx",function(a){return a[0]}).attr("cy",function(a){return a[1]}).attr("r",J)}var i=function(a,c,d){if(W)return 0;var e=b[c.series];if(void 0!==e){var f=e.values[c.point];f.color=o(e,c.series),f.x=v(f),f.y=w(f);var g=r.node().getBoundingClientRect(),h=window.pageYOffset||document.documentElement.scrollTop,i=window.pageXOffset||document.documentElement.scrollLeft,j={left:s(v(f,c.point))+g.left+i+l.left+10,top:t(w(f,c.point))+g.top+h+l.top+10};d({point:f,series:e,pos:j,relativePos:[s(v(f,c.point))+l.left,t(w(f,c.point))+l.top],seriesIndex:c.series,pointIndex:c.point,event:d3.event,element:a})}};f.on("click",function(a){i(this,a,R.elementClick)}).on("dblclick",function(a){i(this,a,R.elementDblClick)}).on("mouseover",function(a){i(this,a,R.elementMouseover)}).on("mouseout",function(a,b){i(this,a,R.elementMouseout)})}else ea.select(".nv-groups").selectAll(".nv-group").selectAll(".nv-point").on("click",function(a,c){if(W||!b[a.series])return 0;var d=b[a.series],e=d.values[c],f=this;R.elementClick({point:e,series:d,pos:[s(v(e,c))+l.left,t(w(e,c))+l.top],relativePos:[s(v(e,c))+l.left,t(w(e,c))+l.top],seriesIndex:a.series,pointIndex:c,event:d3.event,element:f})}).on("dblclick",function(a,c){if(W||!b[a.series])return 0;var d=b[a.series],e=d.values[c];R.elementDblClick({point:e,series:d,pos:[s(v(e,c))+l.left,t(w(e,c))+l.top],relativePos:[s(v(e,c))+l.left,t(w(e,c))+l.top],seriesIndex:a.series,pointIndex:c})}).on("mouseover",function(a,c){if(W||!b[a.series])return 0;var d=b[a.series],e=d.values[c];R.elementMouseover({point:e,series:d,pos:[s(v(e,c))+l.left,t(w(e,c))+l.top],relativePos:[s(v(e,c))+l.left,t(w(e,c))+l.top],seriesIndex:a.series,pointIndex:c,color:o(a,c)})}).on("mouseout",function(a,c){if(W||!b[a.series])return 0;var d=b[a.series],e=d.values[c];R.elementMouseout({point:e,series:d,pos:[s(v(e,c))+l.left,t(w(e,c))+l.top],relativePos:[s(v(e,c))+l.left,t(w(e,c))+l.top],seriesIndex:a.series,pointIndex:c,color:o(a,c)})})}r=d3.select(this);var Z=a.utils.availableWidth(m,r,l),$=a.utils.availableHeight(n,r,l);a.utils.initSVG(r),b.forEach(function(a,b){a.values.forEach(function(a){a.series=b})});var _=e.yScale().name===d3.scale.log().name?!0:!1,aa=K&&L&&O?[]:d3.merge(b.map(function(a){return a.values.map(function(a,b){return{x:v(a,b),y:w(a,b),size:x(a,b)}})}));if(s.domain(K||d3.extent(aa.map(function(a){return a.x}).concat(z))),E&&b[0]?s.range(M||[(Z*F+Z)/(2*b[0].values.length),Z-Z*(1+F)/(2*b[0].values.length)]):s.range(M||[0,Z]),_){var ba=d3.min(aa.map(function(a){return 0!==a.y?a.y:void 0}));t.clamp(!0).domain(L||d3.extent(aa.map(function(a){return 0!==a.y?a.y:.1*ba}).concat(A))).range(N||[$,0])}else t.domain(L||d3.extent(aa.map(function(a){return a.y}).concat(A))).range(N||[$,0]);u.domain(O||d3.extent(aa.map(function(a){return a.size}).concat(B))).range(P||Y),Q=s.domain()[0]===s.domain()[1]||t.domain()[0]===t.domain()[1],s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]-.01*s.domain()[0],s.domain()[1]+.01*s.domain()[1]]):s.domain([-1,1])),t.domain()[0]===t.domain()[1]&&(t.domain()[0]?t.domain([t.domain()[0]-.01*t.domain()[0],t.domain()[1]+.01*t.domain()[1]]):t.domain([-1,1])),isNaN(s.domain()[0])&&s.domain([-1,1]),isNaN(t.domain()[0])&&t.domain([-1,1]),f=f||s,g=g||t,h=h||u;var ca=s(1)!==f(1)||t(1)!==g(1)||u(1)!==h(1);i=i||m,j=j||n;var da=i!==m||j!==n,ea=r.selectAll("g.nv-wrap.nv-scatter").data([b]),fa=ea.enter().append("g").attr("class","nvd3 nv-wrap nv-scatter nv-chart-"+q),ga=fa.append("defs"),ha=fa.append("g"),ia=ea.select("g");ea.classed("nv-single-point",Q),ha.append("g").attr("class","nv-groups"),ha.append("g").attr("class","nv-point-paths"),fa.append("g").attr("class","nv-point-clips"),ea.attr("transform","translate("+l.left+","+l.top+")"),ga.append("clipPath").attr("id","nv-edge-clip-"+q).append("rect").attr("transform","translate( -10, -10)"),ea.select("#nv-edge-clip-"+q+" rect").attr("width",Z+20).attr("height",$>0?$+20:0),ia.attr("clip-path",G?"url(#nv-edge-clip-"+q+")":""),W=!0;var ja=ea.select(".nv-groups").selectAll(".nv-group").data(function(a){return a},function(a){return a.key});ja.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),ja.exit().remove(),ja.attr("class",function(a,b){return(a.classed||"")+" nv-group nv-series-"+b}).classed("nv-noninteractive",!C).classed("hover",function(a){return a.hover}),ja.watchTransition(X,"scatter: groups").style("fill",function(a,b){return o(a,b)}).style("stroke",function(a,b){return a.pointBorderColor||p||o(a,b)}).style("stroke-opacity",1).style("fill-opacity",.5);var ka=ja.selectAll("path.nv-point").data(function(a){return a.values.map(function(a,b){return[a,b]}).filter(function(a,b){return D(a[0],b)})});if(ka.enter().append("path").attr("class",function(a){return"nv-point nv-point-"+a[1]}).style("fill",function(a){return a.color}).style("stroke",function(a){return a.color}).attr("transform",function(b){return"translate("+a.utils.NaNtoZero(f(v(b[0],b[1])))+","+a.utils.NaNtoZero(g(w(b[0],b[1])))+")"}).attr("d",a.utils.symbol().type(function(a){return y(a[0])}).size(function(a){return u(x(a[0],a[1]))})),ka.exit().each(c).remove(),ja.exit().selectAll("path.nv-point").watchTransition(X,"scatter exit").attr("transform",function(b){return"translate("+a.utils.NaNtoZero(s(v(b[0],b[1])))+","+a.utils.NaNtoZero(t(w(b[0],b[1])))+")"}).remove(),ka.filter(function(a){return ca||da||d(a,"x",v,"y",w)}).watchTransition(X,"scatter points").attr("transform",function(b){return"translate("+a.utils.NaNtoZero(s(v(b[0],b[1])))+","+a.utils.NaNtoZero(t(w(b[0],b[1])))+")"}),ka.filter(function(a){return ca||da||d(a,"shape",y,"size",x)}).watchTransition(X,"scatter points").attr("d",a.utils.symbol().type(function(a){return y(a[0])}).size(function(a){return u(x(a[0],a[1]))})),V){var la=ja.selectAll(".nv-label").data(function(a){return a.values.map(function(a,b){return[a,b]}).filter(function(a,b){return D(a[0],b)})});la.enter().append("text").style("fill",function(a,b){return a.color}).style("stroke-opacity",0).style("fill-opacity",1).attr("transform",function(b){var c=a.utils.NaNtoZero(f(v(b[0],b[1])))+Math.sqrt(u(x(b[0],b[1]))/Math.PI)+2;return"translate("+c+","+a.utils.NaNtoZero(g(w(b[0],b[1])))+")"}).text(function(a,b){return a[0].label}),la.exit().remove(),ja.exit().selectAll("path.nv-label").watchTransition(X,"scatter exit").attr("transform",function(b){var c=a.utils.NaNtoZero(s(v(b[0],b[1])))+Math.sqrt(u(x(b[0],b[1]))/Math.PI)+2;return"translate("+c+","+a.utils.NaNtoZero(t(w(b[0],b[1])))+")"}).remove(),la.each(function(a){d3.select(this).classed("nv-label",!0).classed("nv-label-"+a[1],!1).classed("hover",!1)}),la.watchTransition(X,"scatter labels").attr("transform",function(b){var c=a.utils.NaNtoZero(s(v(b[0],b[1])))+Math.sqrt(u(x(b[0],b[1]))/Math.PI)+2;return"translate("+c+","+a.utils.NaNtoZero(t(w(b[0],b[1])))+")"})}U?(clearTimeout(k),k=setTimeout(T,U)):T(),f=s.copy(),g=t.copy(),h=u.copy(),i=m,j=n}),X.renderEnd("scatter immediate"),e}var f,g,h,i,j,k,l={top:0,right:0,bottom:0,left:0},m=null,n=null,o=a.utils.defaultColor(),p=null,q=Math.floor(1e5*Math.random()),r=null,s=d3.scale.linear(),t=d3.scale.linear(),u=d3.scale.linear(),v=function(a){return a.x},w=function(a){return a.y},x=function(a){return a.size||1},y=function(a){return a.shape||"circle"},z=[],A=[],B=[],C=!0,D=function(a){return!a.notActive},E=!1,F=.1,G=!1,H=!0,I=!1,J=function(){return 25},K=null,L=null,M=null,N=null,O=null,P=null,Q=!1,R=d3.dispatch("elementClick","elementDblClick","elementMouseover","elementMouseout","renderEnd"),S=!0,T=250,U=300,V=!1,W=!1,X=a.utils.renderWatch(R,T),Y=[16,256],Z={};return e.dispatch=R,e.options=a.utils.optionsFunc.bind(e),e._calls=new function(){this.clearHighlights=function(){return a.dom.write(function(){r.selectAll(".nv-point.hover").classed("hover",!1)}),null},this.highlightPoint=function(b,c,d){a.dom.write(function(){r.select(".nv-groups").selectAll(".nv-series-"+b).selectAll(".nv-point-"+c).classed("hover",d)})}},R.on("elementMouseover.point",function(a){C&&e._calls.highlightPoint(a.seriesIndex,a.pointIndex,!0)}),R.on("elementMouseout.point",function(a){C&&e._calls.highlightPoint(a.seriesIndex,a.pointIndex,!1)}),e._options=Object.create({},{width:{get:function(){return m},set:function(a){m=a}},height:{get:function(){return n},set:function(a){n=a}},xScale:{get:function(){return s},set:function(a){s=a}},yScale:{get:function(){return t},set:function(a){t=a}},pointScale:{get:function(){return u},set:function(a){u=a}},xDomain:{get:function(){return K},set:function(a){K=a}},yDomain:{get:function(){return L},set:function(a){L=a}},pointDomain:{get:function(){return O},set:function(a){O=a}},xRange:{get:function(){return M},set:function(a){M=a}},yRange:{get:function(){return N},set:function(a){N=a}},pointRange:{get:function(){return P},set:function(a){P=a}},forceX:{get:function(){return z},set:function(a){z=a}},forceY:{get:function(){return A},set:function(a){A=a}},forcePoint:{get:function(){return B},set:function(a){B=a}},interactive:{get:function(){return C},set:function(a){C=a}},pointActive:{get:function(){return D},set:function(a){D=a}},padDataOuter:{get:function(){return F},set:function(a){F=a}},padData:{get:function(){return E},set:function(a){E=a}},clipEdge:{get:function(){return G},set:function(a){G=a}},clipVoronoi:{get:function(){return H},set:function(a){H=a}},clipRadius:{get:function(){return J},set:function(a){J=a}},showVoronoi:{get:function(){return I},set:function(a){I=a}},id:{get:function(){return q},set:function(a){q=a}},interactiveUpdateDelay:{get:function(){return U},set:function(a){U=a}},showLabels:{get:function(){return V},set:function(a){V=a}},pointBorderColor:{get:function(){return p},set:function(a){p=a}},x:{get:function(){return v},set:function(a){v=d3.functor(a)}},y:{get:function(){return w},set:function(a){w=d3.functor(a)}},pointSize:{get:function(){return x},set:function(a){x=d3.functor(a)}},pointShape:{get:function(){return y},set:function(a){y=d3.functor(a)}},margin:{get:function(){return l},set:function(a){l.top=void 0!==a.top?a.top:l.top,l.right=void 0!==a.right?a.right:l.right,l.bottom=void 0!==a.bottom?a.bottom:l.bottom,l.left=void 0!==a.left?a.left:l.left}},duration:{get:function(){return T},set:function(a){T=a,X.reset(T)}},color:{get:function(){return o},set:function(b){o=a.utils.getColor(b)}},useVoronoi:{get:function(){return S},set:function(a){S=a,S===!1&&(H=!1)}}}),a.utils.initOptions(e),e},a.models.scatterChart=function(){"use strict";function b(A){return F.reset(),F.models(c),u&&F.models(d),v&&F.models(e),r&&F.models(g),s&&F.models(h),A.each(function(A){n=d3.select(this),a.utils.initSVG(n);var I=a.utils.availableWidth(l,n,j),J=a.utils.availableHeight(m,n,j);if(b.update=function(){0===B?n.call(b):n.transition().duration(B).call(b)},b.container=this,x.setter(H(A),b.update).getter(G(A)).update(),x.disabled=A.map(function(a){return!!a.disabled}),!y){var K;y={};for(K in x)x[K]instanceof Array?y[K]=x[K].slice(0):y[K]=x[K]}if(!(A&&A.length&&A.filter(function(a){return a.values.length}).length))return a.utils.noData(b,n),F.renderEnd("scatter immediate"),b;n.selectAll(".nv-noData").remove(),p=c.xScale(),q=c.yScale();var L=n.selectAll("g.nv-wrap.nv-scatterChart").data([A]),M=L.enter().append("g").attr("class","nvd3 nv-wrap nv-scatterChart nv-chart-"+c.id()),N=M.append("g"),O=L.select("g");if(N.append("rect").attr("class","nvd3 nv-background").style("pointer-events","none"),N.append("g").attr("class","nv-x nv-axis"),N.append("g").attr("class","nv-y nv-axis"),N.append("g").attr("class","nv-scatterWrap"),N.append("g").attr("class","nv-regressionLinesWrap"),N.append("g").attr("class","nv-distWrap"),N.append("g").attr("class","nv-legendWrap"),w&&O.select(".nv-y.nv-axis").attr("transform","translate("+I+",0)"),t){var P=I;f.width(P),L.select(".nv-legendWrap").datum(A).call(f),k||f.height()===j.top||(j.top=f.height(),J=a.utils.availableHeight(m,n,j)),L.select(".nv-legendWrap").attr("transform","translate(0,"+-j.top+")")}else O.select(".nv-legendWrap").selectAll("*").remove();L.attr("transform","translate("+j.left+","+j.top+")"),c.width(I).height(J).color(A.map(function(a,b){return a.color=a.color||o(a,b),a.color}).filter(function(a,b){return!A[b].disabled})).showLabels(C),L.select(".nv-scatterWrap").datum(A.filter(function(a){return!a.disabled})).call(c),L.select(".nv-regressionLinesWrap").attr("clip-path","url(#nv-edge-clip-"+c.id()+")");var Q=L.select(".nv-regressionLinesWrap").selectAll(".nv-regLines").data(function(a){return a});Q.enter().append("g").attr("class","nv-regLines");var R=Q.selectAll(".nv-regLine").data(function(a){return[a]});R.enter().append("line").attr("class","nv-regLine").style("stroke-opacity",0),R.filter(function(a){return a.intercept&&a.slope}).watchTransition(F,"scatterPlusLineChart: regline").attr("x1",p.range()[0]).attr("x2",p.range()[1]).attr("y1",function(a,b){return q(p.domain()[0]*a.slope+a.intercept)}).attr("y2",function(a,b){return q(p.domain()[1]*a.slope+a.intercept)}).style("stroke",function(a,b,c){return o(a,c)}).style("stroke-opacity",function(a,b){return a.disabled||"undefined"==typeof a.slope||"undefined"==typeof a.intercept?0:1}),u&&(d.scale(p)._ticks(a.utils.calcTicksX(I/100,A)).tickSize(-J,0),O.select(".nv-x.nv-axis").attr("transform","translate(0,"+q.range()[0]+")").call(d)),v&&(e.scale(q)._ticks(a.utils.calcTicksY(J/36,A)).tickSize(-I,0),O.select(".nv-y.nv-axis").call(e)),r&&(g.getData(c.x()).scale(p).width(I).color(A.map(function(a,b){return a.color||o(a,b)}).filter(function(a,b){return!A[b].disabled})),N.select(".nv-distWrap").append("g").attr("class","nv-distributionX"),O.select(".nv-distributionX").attr("transform","translate(0,"+q.range()[0]+")").datum(A.filter(function(a){return!a.disabled})).call(g)),s&&(h.getData(c.y()).scale(q).width(J).color(A.map(function(a,b){return a.color||o(a,b)}).filter(function(a,b){return!A[b].disabled})),N.select(".nv-distWrap").append("g").attr("class","nv-distributionY"),O.select(".nv-distributionY").attr("transform","translate("+(w?I:-h.size())+",0)").datum(A.filter(function(a){return!a.disabled})).call(h)),f.dispatch.on("stateChange",function(a){for(var c in a)x[c]=a[c];z.stateChange(x),b.update()}),z.on("changeState",function(a){"undefined"!=typeof a.disabled&&(A.forEach(function(b,c){b.disabled=a.disabled[c]}),x.disabled=a.disabled),b.update()}),c.dispatch.on("elementMouseout.tooltip",function(a){i.hidden(!0),n.select(".nv-chart-"+c.id()+" .nv-series-"+a.seriesIndex+" .nv-distx-"+a.pointIndex).attr("y1",0),n.select(".nv-chart-"+c.id()+" .nv-series-"+a.seriesIndex+" .nv-disty-"+a.pointIndex).attr("x2",h.size())}),c.dispatch.on("elementMouseover.tooltip",function(a){n.select(".nv-series-"+a.seriesIndex+" .nv-distx-"+a.pointIndex).attr("y1",a.relativePos[1]-J),n.select(".nv-series-"+a.seriesIndex+" .nv-disty-"+a.pointIndex).attr("x2",a.relativePos[0]+g.size()),i.data(a).hidden(!1)}),D=p.copy(),E=q.copy()}),F.renderEnd("scatter with line immediate"),b}var c=a.models.scatter(),d=a.models.axis(),e=a.models.axis(),f=a.models.legend(),g=a.models.distribution(),h=a.models.distribution(),i=a.models.tooltip(),j={top:30,right:20,bottom:50,left:75},k=null,l=null,m=null,n=null,o=a.utils.defaultColor(),p=c.xScale(),q=c.yScale(),r=!1,s=!1,t=!0,u=!0,v=!0,w=!1,x=a.utils.state(),y=null,z=d3.dispatch("stateChange","changeState","renderEnd"),A=null,B=250,C=!1;c.xScale(p).yScale(q),d.orient("bottom").tickPadding(10),e.orient(w?"right":"left").tickPadding(10),g.axis("x"),h.axis("y"),i.headerFormatter(function(a,b){return d.tickFormat()(a,b)}).valueFormatter(function(a,b){return e.tickFormat()(a,b)});var D,E,F=a.utils.renderWatch(z,B),G=function(a){return function(){return{active:a.map(function(a){return!a.disabled})}}},H=function(a){return function(b){void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}};return b.dispatch=z,b.scatter=c,b.legend=f,b.xAxis=d,b.yAxis=e,b.distX=g,b.distY=h,b.tooltip=i,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return l},set:function(a){l=a}},height:{get:function(){return m},set:function(a){m=a}},container:{get:function(){return n},set:function(a){n=a}},showDistX:{get:function(){return r},set:function(a){r=a}},showDistY:{get:function(){return s},set:function(a){s=a}},showLegend:{get:function(){return t},set:function(a){t=a}},showXAxis:{get:function(){return u},set:function(a){u=a}},showYAxis:{get:function(){return v},set:function(a){v=a}},defaultState:{get:function(){return y},set:function(a){y=a}},noData:{get:function(){return A},set:function(a){A=a}},duration:{get:function(){return B},set:function(a){B=a}},showLabels:{get:function(){return C},set:function(a){C=a}},margin:{get:function(){return j},set:function(a){void 0!==a.top&&(j.top=a.top,k=a.top),j.right=void 0!==a.right?a.right:j.right,j.bottom=void 0!==a.bottom?a.bottom:j.bottom,j.left=void 0!==a.left?a.left:j.left}},rightAlignYAxis:{get:function(){return w},set:function(a){w=a,e.orient(a?"right":"left")}},color:{get:function(){return o},set:function(b){o=a.utils.getColor(b),f.color(o),g.color(o),h.color(o)}}}),a.utils.inheritOptions(b,c),a.utils.initOptions(b),b},a.models.sparkline=function(){"use strict";function b(k){return t.reset(),k.each(function(b){var k=h-g.left-g.right,s=i-g.top-g.bottom;j=d3.select(this),a.utils.initSVG(j),l.domain(c||d3.extent(b,n)).range(e||[0,k]),m.domain(d||d3.extent(b,o)).range(f||[s,0]);var t=j.selectAll("g.nv-wrap.nv-sparkline").data([b]),u=t.enter().append("g").attr("class","nvd3 nv-wrap nv-sparkline");u.append("g"),t.select("g");t.attr("transform","translate("+g.left+","+g.top+")");var v=t.selectAll("path").data(function(a){return[a]});v.enter().append("path"),v.exit().remove(),v.style("stroke",function(a,b){return a.color||p(a,b)}).attr("d",d3.svg.line().x(function(a,b){return l(n(a,b))}).y(function(a,b){return m(o(a,b))}));var w=t.selectAll("circle.nv-point").data(function(a){function b(b){if(-1!=b){var c=a[b];return c.pointIndex=b,c}return null}var c=a.map(function(a,b){return o(a,b)}),d=b(c.lastIndexOf(m.domain()[1])),e=b(c.indexOf(m.domain()[0])),f=b(c.length-1);return[q?e:null,q?d:null,r?f:null].filter(function(a){return null!=a})});w.enter().append("circle"),w.exit().remove(),w.attr("cx",function(a,b){return l(n(a,a.pointIndex))}).attr("cy",function(a,b){return m(o(a,a.pointIndex))}).attr("r",2).attr("class",function(a,b){return n(a,a.pointIndex)==l.domain()[1]?"nv-point nv-currentValue":o(a,a.pointIndex)==m.domain()[0]?"nv-point nv-minValue":"nv-point nv-maxValue"})}),t.renderEnd("sparkline immediate"),b}var c,d,e,f,g={top:2,right:0,bottom:2,left:0},h=400,i=32,j=null,k=!0,l=d3.scale.linear(),m=d3.scale.linear(),n=function(a){return a.x},o=function(a){return a.y},p=a.utils.getColor(["#000"]),q=!0,r=!0,s=d3.dispatch("renderEnd"),t=a.utils.renderWatch(s);return b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return h},set:function(a){h=a}},height:{get:function(){return i},set:function(a){i=a}},xDomain:{get:function(){return c},set:function(a){c=a}},yDomain:{get:function(){return d},set:function(a){d=a}},xRange:{get:function(){return e},set:function(a){e=a}},yRange:{get:function(){return f},set:function(a){f=a}},xScale:{get:function(){return l},set:function(a){l=a}},yScale:{get:function(){return m},set:function(a){m=a}},animate:{get:function(){return k},set:function(a){k=a}},showMinMaxPoints:{get:function(){return q},set:function(a){q=a}},showCurrentPoint:{get:function(){return r},set:function(a){r=a}},x:{get:function(){return n},set:function(a){n=d3.functor(a)}},y:{get:function(){return o},set:function(a){o=d3.functor(a)}},margin:{get:function(){return g},set:function(a){g.top=void 0!==a.top?a.top:g.top,g.right=void 0!==a.right?a.right:g.right,g.bottom=void 0!==a.bottom?a.bottom:g.bottom,g.left=void 0!==a.left?a.left:g.left}},color:{get:function(){return p},set:function(b){p=a.utils.getColor(b)}}}),b.dispatch=s,a.utils.initOptions(b),b},a.models.sparklinePlus=function(){"use strict";function b(p){return r.reset(),r.models(e),p.each(function(p){function q(){if(!j){var a=z.selectAll(".nv-hoverValue").data(i),b=a.enter().append("g").attr("class","nv-hoverValue").style("stroke-opacity",0).style("fill-opacity",0);a.exit().transition().duration(250).style("stroke-opacity",0).style("fill-opacity",0).remove(),a.attr("transform",function(a){return"translate("+c(e.x()(p[a],a))+",0)"}).transition().duration(250).style("stroke-opacity",1).style("fill-opacity",1),i.length&&(b.append("line").attr("x1",0).attr("y1",-f.top).attr("x2",0).attr("y2",u),
-b.append("text").attr("class","nv-xValue").attr("x",-6).attr("y",-f.top).attr("text-anchor","end").attr("dy",".9em"),z.select(".nv-hoverValue .nv-xValue").text(k(e.x()(p[i[0]],i[0]))),b.append("text").attr("class","nv-yValue").attr("x",6).attr("y",-f.top).attr("text-anchor","start").attr("dy",".9em"),z.select(".nv-hoverValue .nv-yValue").text(l(e.y()(p[i[0]],i[0]))))}}function r(){function a(a,b){for(var c=Math.abs(e.x()(a[0],0)-b),d=0,f=0;f<a.length;f++)Math.abs(e.x()(a[f],f)-b)<c&&(c=Math.abs(e.x()(a[f],f)-b),d=f);return d}if(!j){var b=d3.mouse(this)[0]-f.left;i=[a(p,Math.round(c.invert(b)))],q()}}var s=d3.select(this);a.utils.initSVG(s);var t=a.utils.availableWidth(g,s,f),u=a.utils.availableHeight(h,s,f);if(b.update=function(){s.call(b)},b.container=this,!p||!p.length)return a.utils.noData(b,s),b;s.selectAll(".nv-noData").remove();var v=e.y()(p[p.length-1],p.length-1);c=e.xScale(),d=e.yScale();var w=s.selectAll("g.nv-wrap.nv-sparklineplus").data([p]),x=w.enter().append("g").attr("class","nvd3 nv-wrap nv-sparklineplus"),y=x.append("g"),z=w.select("g");y.append("g").attr("class","nv-sparklineWrap"),y.append("g").attr("class","nv-valueWrap"),y.append("g").attr("class","nv-hoverArea"),w.attr("transform","translate("+f.left+","+f.top+")");var A=z.select(".nv-sparklineWrap");if(e.width(t).height(u),A.call(e),m){var B=z.select(".nv-valueWrap"),C=B.selectAll(".nv-currentValue").data([v]);C.enter().append("text").attr("class","nv-currentValue").attr("dx",o?-8:8).attr("dy",".9em").style("text-anchor",o?"end":"start"),C.attr("x",t+(o?f.right:0)).attr("y",n?function(a){return d(a)}:0).style("fill",e.color()(p[p.length-1],p.length-1)).text(l(v))}y.select(".nv-hoverArea").append("rect").on("mousemove",r).on("click",function(){j=!j}).on("mouseout",function(){i=[],q()}),z.select(".nv-hoverArea rect").attr("transform",function(a){return"translate("+-f.left+","+-f.top+")"}).attr("width",t+f.left+f.right).attr("height",u+f.top)}),r.renderEnd("sparklinePlus immediate"),b}var c,d,e=a.models.sparkline(),f={top:15,right:100,bottom:10,left:50},g=null,h=null,i=[],j=!1,k=d3.format(",r"),l=d3.format(",.2f"),m=!0,n=!0,o=!1,p=null,q=d3.dispatch("renderEnd"),r=a.utils.renderWatch(q);return b.dispatch=q,b.sparkline=e,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return g},set:function(a){g=a}},height:{get:function(){return h},set:function(a){h=a}},xTickFormat:{get:function(){return k},set:function(a){k=a}},yTickFormat:{get:function(){return l},set:function(a){l=a}},showLastValue:{get:function(){return m},set:function(a){m=a}},alignValue:{get:function(){return n},set:function(a){n=a}},rightAlignValue:{get:function(){return o},set:function(a){o=a}},noData:{get:function(){return p},set:function(a){p=a}},margin:{get:function(){return f},set:function(a){f.top=void 0!==a.top?a.top:f.top,f.right=void 0!==a.right?a.right:f.right,f.bottom=void 0!==a.bottom?a.bottom:f.bottom,f.left=void 0!==a.left?a.left:f.left}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.stackedArea=function(){"use strict";function b(n){return v.reset(),v.models(s),n.each(function(n){var t=f-e.left-e.right,w=g-e.top-e.bottom;j=d3.select(this),a.utils.initSVG(j),c=s.xScale(),d=s.yScale();var x=n;n.forEach(function(a,b){a.seriesIndex=b,a.values=a.values.map(function(a,c){return a.index=c,a.seriesIndex=b,a})});var y=n.filter(function(a){return!a.disabled});n=d3.layout.stack().order(p).offset(o).values(function(a){return a.values}).x(k).y(l).out(function(a,b,c){a.display={y:c,y0:b}})(y);var z=j.selectAll("g.nv-wrap.nv-stackedarea").data([n]),A=z.enter().append("g").attr("class","nvd3 nv-wrap nv-stackedarea"),B=A.append("defs"),C=A.append("g"),D=z.select("g");C.append("g").attr("class","nv-areaWrap"),C.append("g").attr("class","nv-scatterWrap"),z.attr("transform","translate("+e.left+","+e.top+")"),0==s.forceY().length&&s.forceY().push(0),s.width(t).height(w).x(k).y(function(a){return void 0!==a.display?a.display.y+a.display.y0:void 0}).color(n.map(function(a,b){return a.color=a.color||h(a,a.seriesIndex),a.color}));var E=D.select(".nv-scatterWrap").datum(n);E.call(s),B.append("clipPath").attr("id","nv-edge-clip-"+i).append("rect"),z.select("#nv-edge-clip-"+i+" rect").attr("width",t).attr("height",w),D.attr("clip-path",r?"url(#nv-edge-clip-"+i+")":"");var F=d3.svg.area().defined(m).x(function(a,b){return c(k(a,b))}).y0(function(a){return d(a.display.y0)}).y1(function(a){return d(a.display.y+a.display.y0)}).interpolate(q),G=d3.svg.area().defined(m).x(function(a,b){return c(k(a,b))}).y0(function(a){return d(a.display.y0)}).y1(function(a){return d(a.display.y0)}),H=D.select(".nv-areaWrap").selectAll("path.nv-area").data(function(a){return a});H.enter().append("path").attr("class",function(a,b){return"nv-area nv-area-"+b}).attr("d",function(a,b){return G(a.values,a.seriesIndex)}).on("mouseover",function(a,b){d3.select(this).classed("hover",!0),u.areaMouseover({point:a,series:a.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:a.seriesIndex})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1),u.areaMouseout({point:a,series:a.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:a.seriesIndex})}).on("click",function(a,b){d3.select(this).classed("hover",!1),u.areaClick({point:a,series:a.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:a.seriesIndex})}),H.exit().remove(),H.style("fill",function(a,b){return a.color||h(a,a.seriesIndex)}).style("stroke",function(a,b){return a.color||h(a,a.seriesIndex)}),H.watchTransition(v,"stackedArea path").attr("d",function(a,b){return F(a.values,b)}),s.dispatch.on("elementMouseover.area",function(a){D.select(".nv-chart-"+i+" .nv-area-"+a.seriesIndex).classed("hover",!0)}),s.dispatch.on("elementMouseout.area",function(a){D.select(".nv-chart-"+i+" .nv-area-"+a.seriesIndex).classed("hover",!1)}),b.d3_stackedOffset_stackPercent=function(a){var b,c,d,e=a.length,f=a[0].length,g=[];for(c=0;f>c;++c){for(b=0,d=0;b<x.length;b++)d+=l(x[b].values[c]);if(d)for(b=0;e>b;b++)a[b][c][1]/=d;else for(b=0;e>b;b++)a[b][c][1]=0}for(c=0;f>c;++c)g[c]=0;return g}}),v.renderEnd("stackedArea immediate"),b}var c,d,e={top:0,right:0,bottom:0,left:0},f=960,g=500,h=a.utils.defaultColor(),i=Math.floor(1e5*Math.random()),j=null,k=function(a){return a.x},l=function(a){return a.y},m=function(a,b){return!isNaN(l(a,b))&&null!==l(a,b)},n="stack",o="zero",p="default",q="linear",r=!1,s=a.models.scatter(),t=250,u=d3.dispatch("areaClick","areaMouseover","areaMouseout","renderEnd","elementClick","elementMouseover","elementMouseout");s.pointSize(2.2).pointDomain([2.2,2.2]);var v=a.utils.renderWatch(u,t);return b.dispatch=u,b.scatter=s,s.dispatch.on("elementClick",function(){u.elementClick.apply(this,arguments)}),s.dispatch.on("elementMouseover",function(){u.elementMouseover.apply(this,arguments)}),s.dispatch.on("elementMouseout",function(){u.elementMouseout.apply(this,arguments)}),b.interpolate=function(a){return arguments.length?(q=a,b):q},b.duration=function(a){return arguments.length?(t=a,v.reset(t),s.duration(t),b):t},b.dispatch=u,b.scatter=s,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return f},set:function(a){f=a}},height:{get:function(){return g},set:function(a){g=a}},defined:{get:function(){return m},set:function(a){m=a}},clipEdge:{get:function(){return r},set:function(a){r=a}},offset:{get:function(){return o},set:function(a){o=a}},order:{get:function(){return p},set:function(a){p=a}},interpolate:{get:function(){return q},set:function(a){q=a}},x:{get:function(){return k},set:function(a){k=d3.functor(a)}},y:{get:function(){return l},set:function(a){l=d3.functor(a)}},margin:{get:function(){return e},set:function(a){e.top=void 0!==a.top?a.top:e.top,e.right=void 0!==a.right?a.right:e.right,e.bottom=void 0!==a.bottom?a.bottom:e.bottom,e.left=void 0!==a.left?a.left:e.left}},color:{get:function(){return h},set:function(b){h=a.utils.getColor(b)}},style:{get:function(){return n},set:function(a){switch(n=a){case"stack":b.offset("zero"),b.order("default");break;case"stream":b.offset("wiggle"),b.order("inside-out");break;case"stream-center":b.offset("silhouette"),b.order("inside-out");break;case"expand":b.offset("expand"),b.order("default");break;case"stack_percent":b.offset(b.d3_stackedOffset_stackPercent),b.order("default")}}},duration:{get:function(){return t},set:function(a){t=a,v.reset(t),s.duration(t)}}}),a.utils.inheritOptions(b,s),a.utils.initOptions(b),b},a.models.stackedAreaChart=function(){"use strict";function b(k){return L.reset(),L.models(e),u&&L.models(f),v&&L.models(g),k.each(function(k){function D(){u&&X.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+T+")").transition().duration(I).call(f)}function L(){if(v){if("expand"===e.style()||"stack_percent"===e.style()){var a=g.tickFormat();J&&a===P||(J=a),g.tickFormat(P)}else J&&(g.tickFormat(J),J=null);X.select(".nv-focus .nv-y.nv-axis").transition().duration(0).call(g)}}function Q(a){var b=X.select(".nv-focus .nv-stackedWrap").datum(k.filter(function(a){return!a.disabled}).map(function(b,c){return{key:b.key,area:b.area,classed:b.classed,values:b.values.filter(function(b,c){return e.x()(b,c)>=a[0]&&e.x()(b,c)<=a[1]}),disableTooltip:b.disableTooltip}}));b.transition().duration(I).call(e),D(),L()}var R=d3.select(this);a.utils.initSVG(R);var S=a.utils.availableWidth(o,R,m),T=a.utils.availableHeight(p,R,m)-(x?l.height():0);if(b.update=function(){R.transition().duration(I).call(b)},b.container=this,B.setter(O(k),b.update).getter(N(k)).update(),B.disabled=k.map(function(a){return!!a.disabled}),!C){var U;C={};for(U in B)B[U]instanceof Array?C[U]=B[U].slice(0):C[U]=B[U]}if(!(k&&k.length&&k.filter(function(a){return a.values.length}).length))return a.utils.noData(b,R),b;R.selectAll(".nv-noData").remove(),c=e.xScale(),d=e.yScale();var V=R.selectAll("g.nv-wrap.nv-stackedAreaChart").data([k]),W=V.enter().append("g").attr("class","nvd3 nv-wrap nv-stackedAreaChart").append("g"),X=V.select("g");W.append("g").attr("class","nv-legendWrap"),W.append("g").attr("class","nv-controlsWrap");var Y=W.append("g").attr("class","nv-focus");Y.append("g").attr("class","nv-background").append("rect"),Y.append("g").attr("class","nv-x nv-axis"),Y.append("g").attr("class","nv-y nv-axis"),Y.append("g").attr("class","nv-stackedWrap"),Y.append("g").attr("class","nv-interactive");W.append("g").attr("class","nv-focusWrap");if(s){var Z=r&&"top"===t?S-F:S;if(h.width(Z),X.select(".nv-legendWrap").datum(k).call(h),"bottom"===t){var $=(u?12:0)+10;m.bottom=Math.max(h.height()+$,m.bottom),T=a.utils.availableHeight(p,R,m)-(x?l.height():0);var _=T+$;X.select(".nv-legendWrap").attr("transform","translate(0,"+_+")")}else"top"===t&&(n||m.top==h.height()||(m.top=h.height(),T=a.utils.availableHeight(p,R,m)-(x?l.height():0)),X.select(".nv-legendWrap").attr("transform","translate("+(S-Z)+","+-m.top+")"))}else X.select(".nv-legendWrap").selectAll("*").remove();if(r){var aa=[{key:H.stacked||"Stacked",metaKey:"Stacked",disabled:"stack"!=e.style(),style:"stack"},{key:H.stream||"Stream",metaKey:"Stream",disabled:"stream"!=e.style(),style:"stream"},{key:H.expanded||"Expanded",metaKey:"Expanded",disabled:"expand"!=e.style(),style:"expand"},{key:H.stack_percent||"Stack %",metaKey:"Stack_Percent",disabled:"stack_percent"!=e.style(),style:"stack_percent"}];F=G.length/3*260,aa=aa.filter(function(a){return-1!==G.indexOf(a.metaKey)}),i.width(F).color(["#444","#444","#444"]),X.select(".nv-controlsWrap").datum(aa).call(i);var ba=Math.max(i.height(),s&&"top"===t?h.height():0);m.top!=ba&&(m.top=ba,T=a.utils.availableHeight(p,R,m)-(x?l.height():0)),X.select(".nv-controlsWrap").attr("transform","translate(0,"+-m.top+")")}else X.select(".nv-controlsWrap").selectAll("*").remove();V.attr("transform","translate("+m.left+","+m.top+")"),w&&X.select(".nv-y.nv-axis").attr("transform","translate("+S+",0)"),y&&(j.width(S).height(T).margin({left:m.left,top:m.top}).svgContainer(R).xScale(c),V.select(".nv-interactive").call(j)),X.select(".nv-focus .nv-background rect").attr("width",S).attr("height",T),e.width(S).height(T).color(k.map(function(a,b){return a.color||q(a,b)}).filter(function(a,b){return!k[b].disabled}));var ca=X.select(".nv-focus .nv-stackedWrap").datum(k.filter(function(a){return!a.disabled}));if(u&&f.scale(c)._ticks(a.utils.calcTicksX(S/100,k)).tickSize(-T,0),v){var da;da="wiggle"===e.offset()?0:a.utils.calcTicksY(T/36,k),g.scale(d)._ticks(da).tickSize(-S,0)}if(x){l.width(S),X.select(".nv-focusWrap").attr("transform","translate(0,"+(T+m.bottom+l.margin().top)+")").datum(k.filter(function(a){return!a.disabled})).call(l);var ea=l.brush.empty()?l.xDomain():l.brush.extent();null!==ea&&Q(ea)}else ca.transition().call(e),D(),L();e.dispatch.on("areaClick.toggle",function(a){1===k.filter(function(a){return!a.disabled}).length?k.forEach(function(a){a.disabled=!1}):k.forEach(function(b,c){b.disabled=c!=a.seriesIndex}),B.disabled=k.map(function(a){return!!a.disabled}),E.stateChange(B),b.update()}),h.dispatch.on("stateChange",function(a){for(var c in a)B[c]=a[c];E.stateChange(B),b.update()}),i.dispatch.on("legendClick",function(a,c){a.disabled&&(aa=aa.map(function(a){return a.disabled=!0,a}),a.disabled=!1,e.style(a.style),B.style=e.style(),E.stateChange(B),b.update())}),j.dispatch.on("elementMousemove",function(c){e.clearHighlights();var d,f,g,h=[],i=0,l=!0;if(k.filter(function(a,b){return a.seriesIndex=b,!a.disabled}).forEach(function(j,k){f=a.interactiveBisect(j.values,c.pointXValue,b.x());var m=j.values[f],n=b.y()(m,f);if(null!=n&&e.highlightPoint(k,f,!0),"undefined"!=typeof m){"undefined"==typeof d&&(d=m),"undefined"==typeof g&&(g=b.xScale()(b.x()(m,f)));var o="expand"==e.style()?m.display.y:b.y()(m,f);h.push({key:j.key,value:o,color:q(j,j.seriesIndex),point:m}),z&&"expand"!=e.style()&&null!=o&&(i+=o,l=!1)}}),h.reverse(),h.length>2){var m=b.yScale().invert(c.mouseY),n=null;h.forEach(function(a,b){m=Math.abs(m);var c=Math.abs(a.point.display.y0),d=Math.abs(a.point.display.y);return m>=c&&d+c>=m?void(n=b):void 0}),null!=n&&(h[n].highlight=!0)}z&&"expand"!=e.style()&&h.length>=2&&!l&&h.push({key:A,value:i,total:!0});var o=b.x()(d,f),p=j.tooltip.valueFormatter();"expand"===e.style()||"stack_percent"===e.style()?(K||(K=p),p=d3.format(".1%")):K&&(p=K,K=null),j.tooltip.valueFormatter(p).data({value:o,series:h})(),j.renderGuideLine(g)}),j.dispatch.on("elementMouseout",function(a){e.clearHighlights()}),l.dispatch.on("onBrush",function(a){Q(a)}),E.on("changeState",function(a){"undefined"!=typeof a.disabled&&k.length===a.disabled.length&&(k.forEach(function(b,c){b.disabled=a.disabled[c]}),B.disabled=a.disabled),"undefined"!=typeof a.style&&(e.style(a.style),M=a.style),b.update()})}),L.renderEnd("stacked Area chart immediate"),b}var c,d,e=a.models.stackedArea(),f=a.models.axis(),g=a.models.axis(),h=a.models.legend(),i=a.models.legend(),j=a.interactiveGuideline(),k=a.models.tooltip(),l=a.models.focus(a.models.stackedArea()),m={top:10,right:25,bottom:50,left:60},n=null,o=null,p=null,q=a.utils.defaultColor(),r=!0,s=!0,t="top",u=!0,v=!0,w=!1,x=!1,y=!1,z=!0,A="TOTAL",B=a.utils.state(),C=null,D=null,E=d3.dispatch("stateChange","changeState","renderEnd"),F=250,G=["Stacked","Stream","Expanded"],H={},I=250;B.style=e.style(),f.orient("bottom").tickPadding(7),g.orient(w?"right":"left"),k.headerFormatter(function(a,b){return f.tickFormat()(a,b)}).valueFormatter(function(a,b){return g.tickFormat()(a,b)}),j.tooltip.headerFormatter(function(a,b){return f.tickFormat()(a,b)}).valueFormatter(function(a,b){return null==a?"N/A":g.tickFormat()(a,b)});var J=null,K=null;i.updateState(!1);var L=a.utils.renderWatch(E),M=e.style(),N=function(a){return function(){return{active:a.map(function(a){return!a.disabled}),style:e.style()}}},O=function(a){return function(b){void 0!==b.style&&(M=b.style),void 0!==b.active&&a.forEach(function(a,c){a.disabled=!b.active[c]})}},P=d3.format("%");return e.dispatch.on("elementMouseover.tooltip",function(a){a.point.x=e.x()(a.point),a.point.y=e.y()(a.point),k.data(a).hidden(!1)}),e.dispatch.on("elementMouseout.tooltip",function(a){k.hidden(!0)}),b.dispatch=E,b.stacked=e,b.legend=h,b.controls=i,b.xAxis=f,b.x2Axis=l.xAxis,b.yAxis=g,b.y2Axis=l.yAxis,b.interactiveLayer=j,b.tooltip=k,b.focus=l,b.dispatch=E,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{width:{get:function(){return o},set:function(a){o=a}},height:{get:function(){return p},set:function(a){p=a}},showLegend:{get:function(){return s},set:function(a){s=a}},legendPosition:{get:function(){return t},set:function(a){t=a}},showXAxis:{get:function(){return u},set:function(a){u=a}},showYAxis:{get:function(){return v},set:function(a){v=a}},defaultState:{get:function(){return C},set:function(a){C=a}},noData:{get:function(){return D},set:function(a){D=a}},showControls:{get:function(){return r},set:function(a){r=a}},controlLabels:{get:function(){return H},set:function(a){H=a}},controlOptions:{get:function(){return G},set:function(a){G=a}},showTotalInTooltip:{get:function(){return z},set:function(a){z=a}},totalLabel:{get:function(){return A},set:function(a){A=a}},focusEnable:{get:function(){return x},set:function(a){x=a}},focusHeight:{get:function(){return l.height()},set:function(a){l.height(a)}},brushExtent:{get:function(){return l.brushExtent()},set:function(a){l.brushExtent(a)}},margin:{get:function(){return m},set:function(a){void 0!==a.top&&(m.top=a.top,n=a.top),m.right=void 0!==a.right?a.right:m.right,m.bottom=void 0!==a.bottom?a.bottom:m.bottom,m.left=void 0!==a.left?a.left:m.left}},focusMargin:{get:function(){return l.margin},set:function(a){l.margin.top=void 0!==a.top?a.top:l.margin.top,l.margin.right=void 0!==a.right?a.right:l.margin.right,l.margin.bottom=void 0!==a.bottom?a.bottom:l.margin.bottom,l.margin.left=void 0!==a.left?a.left:l.margin.left}},duration:{get:function(){return I},set:function(a){I=a,L.reset(I),e.duration(I),f.duration(I),g.duration(I)}},color:{get:function(){return q},set:function(b){q=a.utils.getColor(b),h.color(q),e.color(q),l.color(q)}},x:{get:function(){return e.x()},set:function(a){e.x(a),l.x(a)}},y:{get:function(){return e.y()},set:function(a){e.y(a),l.y(a)}},rightAlignYAxis:{get:function(){return w},set:function(a){w=a,g.orient(w?"right":"left")}},useInteractiveGuideline:{get:function(){return y},set:function(a){y=!!a,b.interactive(!a),b.useVoronoi(!a),e.scatter.interactive(!a)}}}),a.utils.inheritOptions(b,e),a.utils.initOptions(b),b},a.models.stackedAreaWithFocusChart=function(){return a.models.stackedAreaChart().margin({bottom:30}).focusEnable(!0)},a.models.sunburst=function(){"use strict";function b(a){var b=c(a);return b>90?180:0}function c(a){var b=Math.max(0,Math.min(2*Math.PI,F(a.x))),c=Math.max(0,Math.min(2*Math.PI,F(a.x+a.dx))),d=(b+c)/2*(180/Math.PI)-90;return d}function d(a){var b=Math.max(0,Math.min(2*Math.PI,F(a.x))),c=Math.max(0,Math.min(2*Math.PI,F(a.x+a.dx)));return(c-b)/(2*Math.PI)}function e(a){var b=Math.max(0,Math.min(2*Math.PI,F(a.x))),c=Math.max(0,Math.min(2*Math.PI,F(a.x+a.dx))),d=c-b;return d>z}function f(a,b){var c=d3.interpolate(F.domain(),[l.x,l.x+l.dx]),d=d3.interpolate(G.domain(),[l.y,1]),e=d3.interpolate(G.range(),[l.y?20:0,o]);return 0===b?function(){return J(a)}:function(b){return F.domain(c(b)),G.domain(d(b)).range(e(b)),J(a)}}function g(a){var b=d3.interpolate({x:a.x0,dx:a.dx0,y:a.y0,dy:a.dy0},a);return function(c){var d=b(c);return a.x0=d.x,a.dx0=d.dx,a.y0=d.y,a.dy0=d.dy,J(d)}}function h(a){var b=B(a);I[b]||(I[b]={});var c=I[b];c.dx=a.dx,c.x=a.x,c.dy=a.dy,c.y=a.y}function i(a){a.forEach(function(a){var b=B(a),c=I[b];c?(a.dx0=c.dx,a.x0=c.x,a.dy0=c.dy,a.y0=c.y):(a.dx0=a.dx,a.x0=a.x,a.dy0=a.dy,a.y0=a.y),h(a)})}function j(a){var d=v.selectAll("text"),g=v.selectAll("path");d.transition().attr("opacity",0),l=a,g.transition().duration(D).attrTween("d",f).each("end",function(d){if(d.x>=a.x&&d.x<a.x+a.dx&&d.depth>=a.depth){var f=d3.select(this.parentNode),g=f.select("text");g.transition().duration(D).text(function(a){return y(a)}).attr("opacity",function(a){return e(a)?1:0}).attr("transform",function(){var e=this.getBBox().width;if(0===d.depth)return"translate("+e/2*-1+",0)";if(d.depth===a.depth)return"translate("+(G(d.y)+5)+",0)";var f=c(d),g=b(d);return 0===g?"rotate("+f+")translate("+(G(d.y)+5)+",0)":"rotate("+f+")translate("+(G(d.y)+e+5)+",0)rotate("+g+")"})}})}function k(f){return K.reset(),f.each(function(f){v=d3.select(this),m=a.utils.availableWidth(q,v,p),n=a.utils.availableHeight(r,v,p),o=Math.min(m,n)/2,G.range([0,o]);var h=v.select("g.nvd3.nv-wrap.nv-sunburst");h[0][0]?h.attr("transform","translate("+(m/2+p.left+p.right)+","+(n/2+p.top+p.bottom)+")"):h=v.append("g").attr("class","nvd3 nv-wrap nv-sunburst nv-chart-"+u).attr("transform","translate("+(m/2+p.left+p.right)+","+(n/2+p.top+p.bottom)+")"),v.on("click",function(a,b){E.chartClick({data:a,index:b,pos:d3.event,id:u})}),H.value(t[s]||t.count);var k=H.nodes(f[0]).reverse();i(k);var l=h.selectAll(".arc-container").data(k,B),z=l.enter().append("g").attr("class","arc-container");z.append("path").attr("d",J).style("fill",function(a){return a.color?a.color:w(C?(a.children?a:a.parent).name:a.name)}).style("stroke","#FFF").on("click",function(a,b){j(a),E.elementClick({data:a,index:b})}).on("mouseover",function(a,b){d3.select(this).classed("hover",!0).style("opacity",.8),E.elementMouseover({data:a,color:d3.select(this).style("fill"),percent:d(a)})}).on("mouseout",function(a,b){d3.select(this).classed("hover",!1).style("opacity",1),E.elementMouseout({data:a})}).on("mousemove",function(a,b){E.elementMousemove({data:a})}),l.each(function(a){d3.select(this).select("path").transition().duration(D).attrTween("d",g)}),x&&(l.selectAll("text").remove(),l.append("text").text(function(a){return y(a)}).transition().duration(D).attr("opacity",function(a){return e(a)?1:0}).attr("transform",function(a){var d=this.getBBox().width;if(0===a.depth)return"rotate(0)translate("+d/2*-1+",0)";var e=c(a),f=b(a);return 0===f?"rotate("+e+")translate("+(G(a.y)+5)+",0)":"rotate("+e+")translate("+(G(a.y)+d+5)+",0)rotate("+f+")"})),j(k[k.length-1]),l.exit().transition().duration(D).attr("opacity",0).each("end",function(a){var b=B(a);I[b]=void 0}).remove()}),K.renderEnd("sunburst immediate"),k}var l,m,n,o,p={top:0,right:0,bottom:0,left:0},q=600,r=600,s="count",t={count:function(a){return 1},value:function(a){return a.value||a.size},size:function(a){return a.value||a.size}},u=Math.floor(1e4*Math.random()),v=null,w=a.utils.defaultColor(),x=!1,y=function(a){return"count"===s?a.name+" #"+a.value:a.name+" "+(a.value||a.size)},z=.02,A=function(a,b){return a.name>b.name},B=function(a,b){return a.name},C=!0,D=500,E=d3.dispatch("chartClick","elementClick","elementDblClick","elementMousemove","elementMouseover","elementMouseout","renderEnd"),F=d3.scale.linear().range([0,2*Math.PI]),G=d3.scale.sqrt(),H=d3.layout.partition().sort(A),I={},J=d3.svg.arc().startAngle(function(a){return Math.max(0,Math.min(2*Math.PI,F(a.x)))}).endAngle(function(a){return Math.max(0,Math.min(2*Math.PI,F(a.x+a.dx)))}).innerRadius(function(a){return Math.max(0,G(a.y))}).outerRadius(function(a){return Math.max(0,G(a.y+a.dy))}),K=a.utils.renderWatch(E);return k.dispatch=E,k.options=a.utils.optionsFunc.bind(k),k._options=Object.create({},{width:{get:function(){return q},set:function(a){q=a}},height:{get:function(){return r},set:function(a){r=a}},mode:{get:function(){return s},set:function(a){s=a}},id:{get:function(){return u},set:function(a){u=a}},duration:{get:function(){return D},set:function(a){D=a}},groupColorByParent:{get:function(){return C},set:function(a){C=!!a}},showLabels:{get:function(){return x},set:function(a){x=!!a}},labelFormat:{get:function(){return y},set:function(a){y=a}},labelThreshold:{get:function(){return z},set:function(a){z=a}},sort:{get:function(){return A},set:function(a){A=a}},key:{get:function(){return B},set:function(a){B=a}},margin:{get:function(){return p},set:function(a){p.top=void 0!=a.top?a.top:p.top,p.right=void 0!=a.right?a.right:p.right,p.bottom=void 0!=a.bottom?a.bottom:p.bottom,p.left=void 0!=a.left?a.left:p.left}},color:{get:function(){return w},set:function(b){w=a.utils.getColor(b)}}}),a.utils.initOptions(k),k},a.models.sunburstChart=function(){"use strict";function b(d){return n.reset(),n.models(c),d.each(function(d){var h=d3.select(this);a.utils.initSVG(h);var i=a.utils.availableWidth(f,h,e),j=a.utils.availableHeight(g,h,e);return b.update=function(){0===l?h.call(b):h.transition().duration(l).call(b)},b.container=h,d&&d.length?(h.selectAll(".nv-noData").remove(),c.width(i).height(j).margin(e),void h.call(c)):(a.utils.noData(b,h),b)}),n.renderEnd("sunburstChart immediate"),b}var c=a.models.sunburst(),d=a.models.tooltip(),e={top:30,right:20,bottom:20,left:20},f=null,g=null,h=a.utils.defaultColor(),i=!1,j=(Math.round(1e5*Math.random()),null),k=null,l=250,m=d3.dispatch("stateChange","changeState","renderEnd"),n=a.utils.renderWatch(m);return d.duration(0).headerEnabled(!1).valueFormatter(function(a){return a}),c.dispatch.on("elementMouseover.tooltip",function(a){a.series={key:a.data.name,value:a.data.value||a.data.size,color:a.color,percent:a.percent},i||(delete a.percent,delete a.series.percent),d.data(a).hidden(!1)}),c.dispatch.on("elementMouseout.tooltip",function(a){d.hidden(!0)}),c.dispatch.on("elementMousemove.tooltip",function(a){d()}),b.dispatch=m,b.sunburst=c,b.tooltip=d,b.options=a.utils.optionsFunc.bind(b),b._options=Object.create({},{noData:{get:function(){return k},set:function(a){k=a}},defaultState:{get:function(){return j},set:function(a){j=a}},showTooltipPercent:{get:function(){return i},set:function(a){i=a}},color:{get:function(){return h},set:function(a){h=a,c.color(h)}},duration:{get:function(){return l},set:function(a){l=a,n.reset(l),c.duration(l)}},margin:{get:function(){return e},set:function(a){e.top=void 0!==a.top?a.top:e.top,e.right=void 0!==a.right?a.right:e.right,e.bottom=void 0!==a.bottom?a.bottom:e.bottom,e.left=void 0!==a.left?a.left:e.left,c.margin(e)}}}),a.utils.inheritOptions(b,c),a.utils.initOptions(b),b},a.version="1.8.5"}();
-//# sourceMappingURL=nv.d3.min.js.map
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
deleted file mode 100644
index 594da5a3..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"nv.d3.min.js","sources":["../src/core.js","../src/dom.js","../src/interactiveLayer.js","../src/tooltip.js","../src/utils.js","../src/models/axis.js","../src/models/boxPlot.js","../src/models/boxPlotChart.js","../src/models/bullet.js","../src/models/bulletChart.js","../src/models/candlestickBar.js","../src/models/cumulativeLineChart.js","../src/models/discreteBar.js","../src/models/discreteBarChart.js","../src/models/distribution.js","../src/models/focus.js","../src/models/forceDirectedGraph.js","../src/models/furiousLegend.js","../src/models/historicalBar.js","../src/models/historicalBarChart.js","../src/models/legend.js","../src/models/line.js","../src/models/lineChart.js","../src/models/linePlusBarChart.js","../src/models/multiBar.js","../src/models/multiBarChart.js","../src/models/multiBarHorizontal.js","../src/models/multiBarHorizontalChart.js","../src/models/multiChart.js","../src/models/ohlcBar.js","../src/models/parallelCoordinates.js","../src/models/parallelCoordinatesChart.js","../src/models/pie.js","../src/models/pieChart.js","../src/models/sankey.js","../src/models/sankeyChart.js","../src/models/scatter.js","../src/models/scatterChart.js","../src/models/sparkline.js","../src/models/sparklinePlus.js","../src/models/stackedArea.js","../src/models/stackedAreaChart.js","../src/models/sunburst.js","../src/models/sunburstChart.js"],"names":["nv","dev","tooltip","utils","models","charts","logs","dom","d3","require","dispatch","Function","prototype","bind","oThis","this","TypeError","aArgs","Array","slice","call","arguments","fToBind","fNOP","fBound","apply","concat","on","e","startTime","Date","endTime","totalTime","log","window","console","length","deprecated","name","info","warn","render","step","active","render_start","renderLoop","chart","graph","i","queue","generate","callback","splice","setTimeout","render_end","addGraph","obj","push","module","exports","write","undefined","fastdom","mutate","read","measure","interactiveGuideline","layer","selection","each","data","mouseHandler","d3mouse","mouse","mouseX","mouseY","subtractMargin","mouseOutAnyReason","isMSIE","event","offsetX","offsetY","target","tagName","className","baseVal","match","margin","left","top","type","availableWidth","availableHeight","relatedTarget","ownerSVGElement","nvPointerEventsClass","elementMouseout","renderGuideLine","hidden","scaleIsOrdinal","xScale","rangeBands","pointXValue","elementIndex","bisect","range","rangeBand","domain","invert","elementMousemove","elementDblclick","elementClick","elementMouseDown","elementMouseUp","container","select","width","height","wrap","selectAll","wrapEnter","enter","append","attr","svgContainer","guideLine","x","showGuideLine","line","NaNtoZero","String","d","exit","remove","scale","linear","ActiveXObject","duration","hideDelay","_","interactiveBisect","values","searchVal","xAccessor","_xAccessor","_cmp","v","bisector","index","max","currentValue","nextIndex","min","nextValue","Math","abs","nearestValueIndex","threshold","yDistMax","Infinity","indexToHighlight","forEach","delta","initTooltip","node","document","body","id","classes","style","classed","nvtooltip","enabled","dataSeriesExists","newContent","contentGenerator","innerHTML","positionTooltip","floor","random","gravity","distance","snapDistance","lastPosition","headerEnabled","valueFormatter","headerFormatter","keyFormatter","table","createElement","theadEnter","html","value","tbodyEnter","trowEnter","p","series","highlight","color","total","key","filter","percent","format","opacityScale","opacity","outerHTML","footer","position","pos","clientX","clientY","getComputedStyle","transform","client","getBoundingClientRect","isArray","isObject","calcGravityOffset","tmp","offsetHeight","offsetWidth","clientWidth","documentElement","clientHeight","gravityOffset","interrupt","transition","delay","old_translate","new_translate","round","translateInterpolator","interpolateString","is_hidden","styleTween","options","optionsFunc","_options","Object","create","get","set","chartContainer","fixedTop","offset","point","y","initOptions","windowSize","size","innerWidth","innerHeight","compatMode","a","isFunction","isDate","toString","isNumber","isNaN","windowResize","handler","addEventListener","clear","removeEventListener","getColor","defaultColor","color_scale","ordinal","category20","customTheme","dictionary","getKey","defaultColors","defIndex","pjax","links","content","load","href","fragment","parentNode","replaceChild","history","pushState","textContent","preventDefault","state","calcApproxTextWidth","svgTextElem","text","fontSize","parseInt","replace","textLength","n","watchTransition","renderWatch","args","_duration","renderStack","self","model","__rendered","m","arg","renderEnd","indexOf","reset","pop","every","deepExtend","dst","sources","source","srcObj","_setState","_getState","init","changed","getter","fn","setter","update","_set","settings","JSON","stringify","change","map","calcTicksX","numTicks","numValues","stream_len","calcTicksY","initOption","_calls","_overrides","ops","getOwnPropertyNames","calls","inheritOptionsD3","d3_source","oplist","_d3options","unshift","rebind","arrayUnique","sort","item","symbolMap","symbol","t","s","svg","symbolTypes","functor","inheritOptions","inherited","_inherited","d3ops","initSVG","nvd3-svg","sanitizeHeight","sanitizeWidth","bottom","right","noData","opt","noDataText","wrapTicks","word","words","split","reverse","lineNumber","lineHeight","dy","parseFloat","tspan","join","getComputedTextLength","arrayEquals","array1","array2","l","axis","g","ticks","orient","scale0","fmt","tickFormat","axisLabel","axisLabelText","xLabelMargin","axisMaxMin","w","isOrdinal","showMaxMin","tickPadding","axisLabelDistance","maxTextWidth","textHeight","xTicks","rotateLabelsRule","rotateLabels","box","sin","PI","staggerLabels","rotateYLabel","maxMinRange","err","copy","boxPlot","xDomain","getX","xRange","yData","yDomain","yMin","yMax","q1","getQ1","q3","getQ3","wl","getWl","wh","getWh","olItems","getOlItems","getOlValue","yScale","yRange","xScale0","yScale0","boxplots","boxEnter","j","hover","f","box_width","maxBoxWidth","box_left","box_right","endpoint","elementMouseover","getQ2","outliers","getOlColor","getOlLabel","label","Q1","Q2","Q3","whisker_low","whisker_high","q2","itemColor","outlierValue","outlierLabel","outlierColor","boxPlotChart","boxplot","showXAxis","xAxis","showYAxis","yAxis","beforeUpdate","clamp","gEnter","defsEnter","rightAlignYAxis","barsWrap","datum","disabled","tickSize","evt","tooltipContent","bullet","sortLabels","labels","lz","b","iA","iB","descending","rangez","ranges","markerz","markers","markerLinez","markerLines","measurez","measures","rangeLabelz","rangeLabels","markerLabelz","markerLabels","markerLineLabelz","markerLineLabels","measureLabelz","measureLabels","x1","extent","merge","forceX","__chart__","il","rangeClassNames","legacyRangeClassNames","w1","xp1","h3","markerData","marker","markerLinesData","defaultRangeLabels","bulletChart","x0","title","subtitle","bulletWrap","tick","tickEnter","tickUpdate","timer","flush","candlestickBar","barWidth","padData","getLow","forceY","getHigh","chartClick","clipEdge","tickGroups","getOpen","getClose","getY","open","close","high","low","interactive","highlightPoint","pointIndex","isHoverOver","clearHighlights","cumulativeLineChart","lines","dragStart","dragMove","dx","updateZero","dragEnd","stateChange","indexLine","oldDuration","stateSetter","stateGetter","defaultState","indexDrag","behavior","drag","rescaleY","seriesDomains","initialDomain","completeDomain","indexify","interactivePointerEvents","showLegend","legend","marginTop","showControls","controlsData","controls","rightAlign","tempDisabled","useInteractiveGuideline","interactiveLayer","display","linesWrap","seriesIndex","avgLineData","average","avgLines","getAvgLineY","yVal","_ticks","newState","singlePoint","pointXLocation","allData","yValue","domainExtent","xValue","idx","indexifyYGetter","indexValue","noErrorCheck","updateState","useVoronoi","discreteBar","seriesData","y0","showValues","groups","bars","barsEnter","element","stopPropagation","elementDblClick","valueFormat","rectClass","discreteBarChart","discretebar","wrapLabels","distribution","naxis","distWrap","dist","getData","focus","resizePath","updateBrushBG","brush","empty","brushExtent","brushBG","leftWidth","rightWidth","onBrush","shouldDispatch","contentWrap","syncBrushing","brushBGenter","gBrush","pointActive","interpolate","xTickFormat","yTickFormat","forceDirectedGraph","nodes","nodeFieldSet","Set","keys","add","force","layout","linkStrength","friction","linkDistance","linkDist","charge","theta","alpha","start","link","sqrt","radius","py","px","nodeColor","field","linkExtras","nodeExtras","furiousLegend","setTextColor","vers","expanded","disengaged","setBGColor","seriesShape","seriesEnter","property","seriesCheckbox","seriesText","legendMouseover","legendMouseout","legendClick","radioButtonMode","userDisabled","engaged","legendDblclick","versPadding","align","seriesWidths","legendText","maxKeyLength","trimmedKey","substring","nodeTextLength","Error","padding","seriesPerRow","legendWidth","columnWidths","k","reduce","prev","cur","array","xPositions","curX","ceil","xpos","ypos","newxpos","maxwidth","historicalBar","rval","historicalBarChart","bar_model","transitionDuration","tooltipHide","ohlcBarChart","ohlcBar","candlestickBarChart","setBGOpacity","insert","seriesBG","scatter","scatterWrap","strokeWidth","fillOpacity","areaPaths","isArea","area","defined","y1","linePaths","pointSize","pointDomain","lineChart","updateXAxis","updateYAxis","focusLinesWrap","disableTooltip","focusEnable","focusEnter","legendPosition","currentValues","pointYValue","defaultValueFormatter","yPos","x2Axis","y2Axis","focusHeight","focusShowAxisX","focusShowAxisY","focusMargin","lineWithFocusChart","linePlusBarChart","availableHeight2","x2","availableHeight1","bar","focusBarsWrap","dataBars","allDisabled","dataLines","dataLine","switchYAxisOrder","y1Axis","y2","barsOpacity","linesOpacity","y1Opacity","y2Opacity","margin2","y3","lines2","bars2","y4","series1","series2","contextEnter","legendXPosition","originalKey","legendRightAxisHint","legendLeftAxisHint","bars2Wrap","lines2Wrap","y3Axis","y4Axis","getBarsAxis","main","getLinesAxis","multiBar","nonStackableCount","hideable","stacked","parsed","stack","stackOffset","nonStackable","nonStackableSeries","posBase","negBase","groupSpacing","exitTransition","last_datalength","barColor","rgb","darker","barSelection","multiBarChart","multibar","controlWidth","controlLabels","grouped","getTranslate","staggerUp","staggerDown","totalInBetweenTicks","reduceXTicks","multiBarHorizontal","valuePadding","getYerr","xerr","mid","path","yerr","showBarLabels","yErr","multiBarHorizontalChart","multiChart","mouseover_line","yaxis","yAxis2","yAxis1","mouseover_scatter","mouseover_stack","stack1","mouseover_bar","bars1","serieIndex","dataLines1","dataLines2","dataScatters1","dataScatters2","dataBars1","dataBars2","dataStack1","dataStack2","color_array","lines1","scatters1","scatters2","stack2","lines1Wrap","scatters1Wrap","bars1Wrap","stack1Wrap","scatters2Wrap","stack2Wrap","extraValue1","aVal","extraValue2","yScale1","yDomain1","yScale2","yDomain2","stackedArea","parallelCoordinates","enabledDimensions","displayMissingValuesline","axisWithUndefinedValues","newscale","missingValuesline","missingValueslineText","restoreBrush","visible","filters","brushDomain","dimension","hasOnlyNaN","oldDomainMaxValue","hasNaN","dimensions","updateTicks","brushstart","displayBrush","actives","dimensionNames","extents","foreground","isActive","brushend","hasActiveBrush","brushEnd","currentTicks","tickValues","dragging","__origin__","background","dimensionPosition","currentPosition","dimensionsOrder","newData","val","dataValues","dimensionData","rangePoints","onlyUndefinedValues","tension","lineTension","axisDrag","lineData","undefinedValuesLabel","dimensionsEnter","formerActive","activeChanged","dimensionFormats","parallelCoordinatesChart","originalPosition","parallelCoordinatesWrap","isSorted","nanValue","str","tp","dim","dd","pie","arcTween","endAngle","startAngle","donut","innerRadius","_current","arcs","arcsRadiusOuter","arcsRadiusInner","arcsRadius","outer","inner","donutRatio","growOnHover","g_pie","arcsOver","arc","outerRadius","arcOver","cornerRadius","padAngle","titleOffset","slices","pieLabels","ae","attrTween","showLabels","labelsArc","labelsOutside","group","labelSunbeamLayout","rotateAngle","centroid","labelLocationHash","avgHeight","avgWidth","createHashKey","coordinates","getSlicePercentage","center","labelThreshold","hashKey","labelType","pieLabelsOutside","donutLabelsOutside","labelFormat","pieChart","pieWrap","showTooltipPercent","sankey","computeNodeLinks","sourceLinks","targetLinks","computeNodeValues","sum","computeNodeBreadths","nextNodes","remainingNodes","nodeWidth","sinksRight","moveSinksRight","scaleNodeBreadths","kx","computeNodeDepths","iterations","initializeNodeDepth","ky","nodesByBreadth","nodePadding","relaxLeftToRight","weightedSource","sy","breadth","relaxRightToLeft","weightedTarget","ty","resolveCollisions","ascendingDepth","nest","sortKeys","ascending","entries","computeLinkDepths","ascendingSourceDepth","ascendingTargetDepth","relayout","xi","interpolateNumber","curvature","x3","linkPath","sankeyChart","dragmove","testData","isDataValid","dataAvailable","error","showError","linkTitle","origin","appendChild","nodeFillColor","nodeStrokeColor","nodeTitle","units","formatNumber","message","nodeStyle","fillColor","strokeColor","getCache","_cache","delCache","getDiffs","cache","diffs","hasOwnProperty","updateInteractiveLayer","needsUpdate","vertices","groupIndex","pX","pY","pointArray","bounds","geom","polygon","voronoi","clip","pointPaths","vPointPaths","showVoronoi","clipVoronoi","pointClips","clipRadius","mouseEventCallback","el","mDispatch","scrollTop","pageYOffset","scrollLeft","pageXOffset","relativePos","logScale","sizeDomain","getSize","padDataOuter","z","forceSize","sizeRange","_sizeRange_def","z0","scaleDiff","width0","height0","sizeDiff","pointBorderColor","points","getShape","titles","interactiveUpdateDelay","clearTimeout","timeoutID","shape","notActive","pointScale","pointRange","forcePoint","pointShape","scatterChart","showDistX","distX","showDistY","distY","regWrap","regLine","intercept","slope","sparkline","paths","result","yValues","maxPoint","lastIndexOf","minPoint","currentPoint","showMinMaxPoints","showCurrentPoint","animate","sparklinePlus","updateValueLine","paused","hoverValue","hoverEnter","sparklineHover","getClosestIndex","closestIndex","sparklineWrap","showLastValue","valueWrap","rightAlignValue","alignValue","dataRaw","aseries","dataFiltered","order","out","zeroArea","areaMouseover","pageX","pageY","areaMouseout","areaClick","d3_stackedOffset_stackPercent","stackData","o","stackedAreaChart","currentFormat","oldYTickFormat","percentFormatter","stackedWrap","xAxisHeight","legendTop","metaKey","stream","stack_percent","controlOptions","requiredTop","valueSum","allNullValues","tooltipValue","showTotalInTooltip","stackedY0","stackedY","totalLabel","oldValueFormatter","stackedAreaWithFocusChart","sunburst","rotationToAvoidUpsideDown","centerAngle","computeCenterAngle","computeNodePercentage","labelThresholdMatched","arcTweenZoom","xd","yd","yr","arcTweenUpdate","ipo","dx0","dy0","updatePrevPosition","prevPositions","pP","storeRetrievePrevPositions","zoomClick","depth","arcText","getBBox","rotation","partition","modes","mode","cG","cGE","groupColorByParent","children","parent","count","d1","d2","sunburstChart"],"mappings":";;AAAA,YAEA,GAAIA,KAGJA,GAAGC,KAAM,EACTD,EAAGE,QAAUF,EAAGE,YAChBF,EAAGG,MAAQH,EAAGG,UACdH,EAAGI,OAASJ,EAAGI,WACfJ,EAAGK,UACHL,EAAGM,QACHN,EAAGO,OAGoB,mBAAb,SAAgD,mBAAd,UAA2C,mBAAR,MAC3EC,GAAKC,QAAO,OAGhBT,EAAGU,SAAWF,GAAGE,SAAQ,eAAiB,cAOrCC,SAASC,UAAUC,OACpBF,SAASC,UAAUC,KAAO,SAAUC,GAChC,GAAoB,kBAATC,MAEP,KAAM,IAAIC,WAAS,uEAGvB,IAAIC,GAAQC,MAAMN,UAAUO,MAAMC,KAAKC,UAAW,GAC9CC,EAAUP,KACVQ,EAAO,aACPC,EAAS,WACL,MAAOF,GAAQG,MAAMV,eAAgBQ,IAAQT,EACnCC,KACAD,EACNG,EAAMS,OAAOR,MAAMN,UAAUO,MAAMC,KAAKC,aAKpD,OAFAE,GAAKX,UAAYG,KAAKH,UACtBY,EAAOZ,UAAY,GAAIW,GAChBC,IAKXxB,EAAGC,MACHD,EAAGU,SAASiB,GAAE,eAAiB,SAASC,GACpC5B,EAAGM,KAAKuB,WAAa,GAAIC,QAG7B9B,EAAGU,SAASiB,GAAE,aAAe,SAASC,GAClC5B,EAAGM,KAAKyB,SAAW,GAAID,MACvB9B,EAAGM,KAAK0B,UAAYhC,EAAGM,KAAKyB,QAAU/B,EAAGM,KAAKuB,UAC9C7B,EAAGiC,IAAG,QAAUjC,EAAGM,KAAK0B,cAQhChC,EAAGiC,IAAM,WACL,GAAIjC,EAAGC,KAAOiC,OAAOC,SAAWA,QAAQF,KAAOE,QAAQF,IAAIR,MACvDU,QAAQF,IAAIR,MAAMU,QAASd,eAC1B,IAAIrB,EAAGC,KAAOiC,OAAOC,SAAiC,kBAAfA,SAAQF,KAAqBtB,SAASC,UAAUC,KAAM,CAC9F,GAAIoB,GAAMtB,SAASC,UAAUC,KAAKO,KAAKe,QAAQF,IAAKE,QACpDF,GAAIR,MAAMU,QAASd,WAEvB,MAAOA,WAAUA,UAAUe,OAAS,IAIxCpC,EAAGqC,WAAa,SAASC,EAAMC,GACvBJ,SAAWA,QAAQK,MACnBL,QAAQK,KAAI,kBAAqBF,EAAO,0BAA2BC,GAAQ,KAOnFvC,EAAGyC,OAAS,SAAgBC,GAExBA,EAAOA,GAAQ,EAEf1C,EAAGyC,OAAOE,QAAS,EACnB3C,EAAGU,SAASkC,cAEZ,IAAIC,GAAa,WAGb,IAAK,GAFDC,GAAOC,EAEFC,EAAI,EAAON,EAAJM,IAAaD,EAAQ/C,EAAGyC,OAAOQ,MAAMD,IAAKA,IACtDF,EAAQC,EAAMG,iBACHH,GAAMI,gBAAkB,WAAYJ,EAAMI,SAASL,EAGlE9C,GAAGyC,OAAOQ,MAAMG,OAAO,EAAGJ,GAEtBhD,EAAGyC,OAAOQ,MAAMb,OAChBiB,WAAWR,IAGX7C,EAAGU,SAAS4C,aACZtD,EAAGyC,OAAOE,QAAS,GAI3BU,YAAWR,IAGf7C,EAAGyC,OAAOE,QAAS,EACnB3C,EAAGyC,OAAOQ,SAmBVjD,EAAGuD,SAAW,SAASC,SACRnC,WAAU,UAAa,YAC9BmC,GAAON,SAAU7B,UAAU,GAAI8B,SAAU9B,UAAU,KAGvDrB,EAAGyC,OAAOQ,MAAMQ,KAAKD,GAEhBxD,EAAGyC,OAAOE,QACX3C,EAAGyC,UAKY,mBAAb,SAAgD,mBAAd,WAC1CiB,OAAOC,QAAU3D,GAGI,mBAAb,UACRkC,OAAOlC,GAAKA,GClJdA,EAAGO,IAAIqD,MAAQ,SAAST,GACvB,MAAuBU,UAAnB3B,OAAO4B,QACHA,QAAQC,OAAOZ,GAEhBA,KASRnD,EAAGO,IAAIyD,KAAO,SAASb,GACtB,MAAuBU,UAAnB3B,OAAO4B,QACHA,QAAQG,QAAQd,GAEjBA,KCfRnD,EAAGkE,qBAAuB,WACtB,YAkBA,SAASC,GAAMC,GACXA,EAAUC,KAAK,SAASC,GAapB,QAASC,KACL,GAAIC,GAAUhE,GAAGiE,MAAM1D,MACnB2D,EAASF,EAAQ,GACjBG,EAASH,EAAQ,GACjBI,GAAiB,EACjBC,GAAoB,CAuCxB,IAtCIC,IAQAJ,EAASlE,GAAGuE,MAAMC,QAClBL,EAASnE,GAAGuE,MAAME,QAWa,QAA5BzE,GAAGuE,MAAMG,OAAOC,UACfP,GAAiB,GAGjBpE,GAAGuE,MAAMG,OAAOE,UAAUC,QAAQC,MAAK,eACvCT,GAAoB,IAKzBD,IACCF,GAAUa,EAAOC,KACjBb,GAAUY,EAAOE,KAMC,aAAlBjF,GAAGuE,MAAMW,MACG,EAAThB,GAAuB,EAATC,GACdD,EAASiB,GAAkBhB,EAASiB,GACnCpF,GAAGuE,MAAMc,eAA4DhC,SAA3CrD,GAAGuE,MAAMc,cAAcC,iBAClDjB,EACD,CAEF,GAAIC,GACItE,GAAGuE,MAAMc,eACqChC,SAA3CrD,GAAGuE,MAAMc,cAAcC,kBACejC,SAArCrD,GAAGuE,MAAMc,cAAcT,WACpB5E,GAAGuE,MAAMc,cAAcT,UAAUE,MAAMpF,EAAQ6F,uBAEtD,MASR,OANArF,GAASsF,iBACLtB,OAAQA,EACRC,OAAQA,IAEZR,EAAM8B,gBAAgB,UACtB/F,GAAQgG,QAAO,GAGfhG,EAAQgG,QAAO,EAInB,IAAIC,GAA8C,kBAAtBC,GAAOC,WAC/BC,EAAczC,MAGlB,IAAIsC,EAAgB,CAChB,GAAII,GAAe/F,GAAGgG,OAAOJ,EAAOK,QAAS/B,GAAU,CAEvD,MAAI0B,EAAOK,QAAQF,GAAgBH,EAAOM,aAAehC,GAUrD,MANAhE,GAASsF,iBACLtB,OAAQA,EACRC,OAAQA,IAEZR,EAAM8B,gBAAgB,UACtB/F,GAAQgG,QAAO,EARfI,GAAcF,EAAOO,SAASnG,GAAGgG,OAAOJ,EAAOK,QAAS/B,GAAU,OAatE4B,GAAcF,EAAOQ,OAAOlC,EAGhChE,GAASmG,kBACLnC,OAAQA,EACRC,OAAQA,EACR2B,YAAaA,IAIK,aAAlB9F,GAAGuE,MAAMW,MACThF,EAASoG,iBACLpC,OAAQA,EACRC,OAAQA,EACR2B,YAAaA,IAKC,UAAlB9F,GAAGuE,MAAMW,MACThF,EAASqG,cACLrC,OAAQA,EACRC,OAAQA,EACR2B,YAAaA,IAKC,cAAlB9F,GAAGuE,MAAMW,MACZhF,EAASsG,kBACRtC,OAAQA,EACRC,OAAQA,EACR2B,YAAaA,IAKO,YAAlB9F,GAAGuE,MAAMW,MACZhF,EAASuG,gBACRvC,OAAQA,EACRC,OAAQA,EACR2B,YAAaA,IAlJnB,GAAIY,GAAY1G,GAAG2G,OAAOpG,MACtB4E,EAAkByB,GAAS,IAAMxB,EAAmByB,GAAU,IAC9DC,EAAOJ,EAAUK,UAAS,qCACzBjD,MAAMA,IACPkD,EAAYF,EAAKG,QAChBC,OAAM,KAAMC,KAAI,QAAU,mCAC/BH,GAAUE,OAAM,KAAMC,KAAI,QAAO,2BAE5BC,IA+ILA,EACKjG,GAAE,YAAa4C,GACf5C,GAAE,YAAa4C,GAAc,GAC7B5C,GAAE,WAAa4C,GAAa,GAC5B5C,GAAE,YAAc4C,GAAa,GAC7B5C,GAAE,UAAY4C,GAAa,GAC3B5C,GAAE,WAAa4C,GACf5C,GAAE,QAAU4C,GAGjBJ,EAAM0D,UAAY,KAElB1D,EAAM8B,gBAAkB,SAAS6B,GACxBC,IACD5D,EAAM0D,WAAa1D,EAAM0D,UAAUF,KAAI,QAAWG,GACtD9H,EAAGO,IAAIqD,MAAM,WACT,GAAIoE,GAAOV,EAAKH,OAAM,4BACjBI,UAAS,QACTjD,KAAW,MAALwD,GAAc9H,EAAGG,MAAM8H,UAAUH,OAAUI,OACtDF,GAAKP,QACAC,OAAM,QACNC,KAAI,QAAU,gBACdA,KAAI,KAAO,SAASQ,GAAK,MAAOA,KAChCR,KAAI,KAAO,SAASQ,GAAK,MAAOA,KAChCR,KAAI,KAAO/B,GACX+B,KAAI,KAAM,GACfK,EAAKI,OAAOC,gBAnM5B,GAAI9C,IAAWC,KAAM,EAAGC,IAAK,GACrB2B,EAAQ,KACRC,EAAS,KACTjB,EAAS5F,GAAG8H,MAAMC,SAClB7H,EAAWF,GAAGE,SAAQ,mBAAqB,kBAAmB,eAAgB,kBAAmB,mBAAoB,kBACrHqH,GAAgB,EAChBH,EAAe,KACf1H,EAAUF,EAAGI,OAAOF,UACpB4E,EAAU5C,OAAOsG,aAyOzB,OAtOAtI,GACKuI,SAAS,GACTC,UAAU,GACVxC,QAAO,GA2LZ/B,EAAMzD,SAAWA,EACjByD,EAAMjE,QAAUA,EAEhBiE,EAAMoB,OAAS,SAASoD,GACpB,MAAKtH,WAAUe,QACfmD,EAAOE,IAA4B,mBAAZkD,GAAElD,IAAwBkD,EAAElD,IAASF,EAAOE,IACnEF,EAAOC,KAA4B,mBAAZmD,GAAEnD,KAAwBmD,EAAEnD,KAASD,EAAOC,KAC5DrB,GAHuBoB,GAMlCpB,EAAMiD,MAAQ,SAASuB,GACnB,MAAKtH,WAAUe,QACfgF,EAAQuB,EACDxE,GAFuBiD,GAKlCjD,EAAMkD,OAAS,SAASsB,GACpB,MAAKtH,WAAUe,QACfiF,EAASsB,EACFxE,GAFuBkD,GAKlClD,EAAMiC,OAAS,SAASuC,GACpB,MAAKtH,WAAUe,QACfgE,EAASuC,EACFxE,GAFuBiC,GAKlCjC,EAAM4D,cAAgB,SAASY,GAC3B,MAAKtH,WAAUe,QACf2F,EAAgBY,EACTxE,GAFuB4D,GAKlC5D,EAAMyD,aAAe,SAASe,GAC1B,MAAKtH,WAAUe,QACfwF,EAAee,EACRxE,GAFuByD,GAK3BzD,GAgBXnE,EAAG4I,kBAAoB,SAAUC,EAAQC,EAAWC,GAChD,YACA,MAAOF,YAAkB3H,QACrB,MAAO,KAEX,IAAI8H,EAEAA,GADqB,kBAAdD,GACM,SAASZ,GAClB,MAAOA,GAAEL,GAGAiB,CAEjB,IAAIE,GAAO,SAASd,EAAGe,GAUnB,MAAOF,GAAWb,GAAKe,GAGvB1C,EAAShG,GAAG2I,SAASF,GAAMzD,KAC3B4D,EAAQ5I,GAAG6I,KAAK,EAAG7C,EAAOqC,EAAOC,GAAa,IAC9CQ,EAAeN,EAAWH,EAAOO,GAMrC,IAJ4B,mBAAjBE,KACPA,EAAeF,GAGfE,IAAiBR,EACjB,MAAOM,EAGX,IAAIG,GAAY/I,GAAGgJ,KAAKJ,EAAM,EAAGP,EAAOzG,OAAS,IAC7CqH,EAAYT,EAAWH,EAAOU,GAMlC,OAJyB,mBAAdE,KACPA,EAAYF,GAGZG,KAAKC,IAAIF,EAAYX,IAAcY,KAAKC,IAAIL,EAAeR,GACpDM,EAEAG,GASfvJ,EAAG4J,kBAAoB,SAAUf,EAAQC,EAAWe,GAChD,YACA,IAAIC,GAAWC,EAAAA,EAAUC,EAAmB,IAQ5C,OAPAnB,GAAOoB,QAAQ,SAAS9B,EAAEnF,GACtB,GAAIkH,GAAQR,KAAKC,IAAIb,EAAYX,EACvB,OAALA,GAAsB2B,GAATI,GAA6BL,EAARK,IACnCJ,EAAWI,EACXF,EAAmBhH,KAGpBgH,GCvUXhK,EAAGI,OAAOF,QAAU,WAChB,YAyPA,SAASiK,KACL,IAAKjK,IAAYA,EAAQkK,OAAQ,CAG7B,GAAI9F,IAAQ,EACZpE,GAAUM,GAAG2G,OAAOkD,SAASC,MAAMnD,OAAM,IAAKoD,GAAIjG,KAAKA,GAEvDpE,EAAQuH,QAAQC,OAAM,OACdC,KAAI,QAAU,cAAgB6C,EAAUA,EAAU,eAClD7C,KAAI,KAAO4C,GACXE,MAAK,MAAQ,GAAGA,MAAK,OAAS,GAC9BA,MAAK,UAAY,GACjBA,MAAK,WAAa,SAClBlD,UAAS,sBAAuBmD,QAAQ3E,GAAsB,GAC9D2E,QAAQ3E,GAAsB,GAEtC7F,EAAQkI,OAAOC,UAKvB,QAASsC,KACL,MAAKC,IACAC,EAAiBvG,IAEtBtE,EAAGO,IAAIqD,MAAM,WACTuG,GAIA,IAAIW,GAAaC,EAAiBzG,EAC9BwG,KACA5K,EAAQkK,OAAOY,UAAYF,GAG/BG,MAGGN,GAhBP,OAjQJ,GAAIJ,GAAK,aAAeb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UAChC7G,EAAO,KACP8G,EAAU,IACVC,EAAW,GACXC,EAAe,EACfd,EAAU,KACVtE,GAAS,EACTwC,EAAY,IACZxI,EAAU,KACVqL,GAAiB/F,KAAM,KAAMC,IAAK,MAClCmF,GAAU,EACVnC,EAAW,IACX+C,GAAgB,EAChBzF,EAAuB,yBAI3B0F,EAAiB,SAAStD,EAAGnF,GAC7B,MAAOmF,IAIPuD,EAAkB,SAASvD,GAC3B,MAAOA,IAGPwD,EAAe,SAASxD,EAAGnF,GAC3B,MAAOmF,IAKP4C,EAAmB,SAAS5C,GAC5B,GAAU,OAANA,EACA,MAAO,EAGX,IAAIyD,GAAQpL,GAAG2G,OAAOkD,SAASwB,cAAa,SAC5C,IAAIL,EAAe,CACf,GAAIM,GAAaF,EAAMrE,UAAS,SAC3BjD,MAAM6D,IACNV,QAAQC,OAAM,QAEnBoE,GAAWpE,OAAM,MACZA,OAAM,MACNC,KAAI,UAAY,GAChBD,OAAM,UACNgD,QAAO,WAAY,GACnBqB,KAAKL,EAAgBvD,EAAE6D,QAGhC,GAAIC,GAAaL,EAAMrE,UAAS,SAC3BjD,MAAM6D,IACNV,QAAQC,OAAM,SAEfwE,EAAYD,EAAW1E,UAAS,MAC3BjD,KAAK,SAAS6H,GAAK,MAAOA,GAAEC,SAC5B3E,QACAC,OAAM,MACNgD,QAAO,YAAc,SAASyB,GAAK,MAAOA,GAAEE,WAErDH,GAAUxE,OAAM,MACXgD,QAAO,sBAAsB,GAC7BhD,OAAM,OACN+C,MAAK,mBAAqB,SAAS0B,GAAK,MAAOA,GAAEG,QAEtDJ,EAAUxE,OAAM,MACXgD,QAAO,OAAO,GACdA,QAAO,QAAS,SAASyB,GAAK,QAASA,EAAEI,QACzCR,KAAK,SAASI,EAAGnJ,GAAK,MAAO2I,GAAaQ,EAAEK,IAAKxJ,KAEtDkJ,EAAUxE,OAAM,MACXgD,QAAO,SAAS,GAChBqB,KAAK,SAASI,EAAGnJ,GAAK,MAAOyI,GAAeU,EAAEH,MAAOhJ,KAE1DkJ,EAAUO,OAAO,SAAUN,EAAEnJ,GAAK,MAAqBa,UAAdsI,EAAEO,UAAyBhF,OAAM,MACrEgD,QAAO,WAAY,GACnBqB,KAAK,SAASI,EAAGnJ,GAAK,MAAO,IAAMxC,GAAGmM,OAAM,KAAMR,EAAEO,SAAW,MAEpER,EAAU3E,UAAS,MAAOlD,KAAK,SAAS8H,GACpC,GAAIA,EAAEE,UAAW,CACb,GAAIO,GAAepM,GAAG8H,MAAMC,SAAS5B,QAAQ,EAAE,IAAIF,OAAK,OAAS0F,EAAEG,QAC/DO,EAAU,EACdrM,IAAG2G,OAAOpG,MACL0J,MAAK,sBAAwBmC,EAAaC,IAC1CpC,MAAK,mBAAqBmC,EAAaC,MAKpD,IAAId,GAAOH,EAAMxB,OAAO0C,SAGxB,OAFiBjJ,UAAbsE,EAAE4E,SACFhB,GAAQ,uBAAyB5D,EAAE4E,OAAS,UACzChB,GAYPiB,EAAW,WACX,GAAIC,IACAzH,KAAmB,OAAbhF,GAAGuE,MAAiBvE,GAAGuE,MAAMmI,QAAU,EAC7CzH,IAAkB,OAAbjF,GAAGuE,MAAiBvE,GAAGuE,MAAMoI,QAAU,EAGhD,IAAgD,QAA7CC,iBAAiB/C,SAASC,MAAM+C,UAAqB,CAGpD,GAAIC,GAASjD,SAASC,KAAKiD,uBAC3BN,GAAIzH,MAAQ8H,EAAO9H,KACnByH,EAAIxH,KAAO6H,EAAO7H,IAGtB,MAAOwH,IAGPpC,EAAmB,SAAS1C,GAC5B,GAAIA,GAAKA,EAAEiE,OAAQ,CACf,GAAIpM,EAAGG,MAAMqN,QAAQrF,EAAEiE,QACnB,OAAO,CAGX,IAAIpM,EAAGG,MAAMsN,SAAStF,EAAEiE,QAEpB,MADAjE,GAAEiE,QAAUjE,EAAEiE,SACP,EAGf,OAAO,GAKPsB,EAAoB,SAAST,GAC7B,GAIIzH,GAAMC,EAAKkI,EAJXtG,EAASnH,EAAQkK,OAAOwD,aACxBxG,EAAQlH,EAAQkK,OAAOyD,YACvBC,EAAczD,SAAS0D,gBAAgBD,YACvCE,EAAe3D,SAAS0D,gBAAgBC,YAI5C,QAAQ5C,GACJ,IAAK,IACD5F,GAAS4B,EAAQiE,EACjB5F,IAAS4B,EAAS,GACf4F,EAAIzH,KAAOA,EAAO,IAAGA,EAAO6F,IAC3BsC,EAAMV,EAAIxH,IAAMA,GAAO,IAAGA,GAAOkI,IACjCA,EAAMV,EAAIxH,IAAMA,EAAM4B,GAAU2G,IAAcvI,GAAOkI,EAAMK,EAC/D,MACJ,KAAK,IACDxI,EAAO6F,EACP5F,IAAS4B,EAAS,GACd4F,EAAIzH,KAAOA,EAAO4B,EAAQ0G,IAAatI,GAAS4B,EAAQiE,IACvDsC,EAAMV,EAAIxH,IAAMA,GAAO,IAAGA,GAAOkI,IACjCA,EAAMV,EAAIxH,IAAMA,EAAM4B,GAAU2G,IAAcvI,GAAOkI,EAAMK,EAChE,MACJ,KAAK,IACDxI,IAAU4B,EAAQ,GAAK,EACvB3B,EAAM4F,EACF4B,EAAIxH,IAAMA,EAAM4B,EAAS2G,IAAcvI,GAAQ4B,EAASgE,IACvDsC,EAAMV,EAAIzH,KAAOA,GAAQ,IAAGA,GAAQmI,IACpCA,EAAMV,EAAIzH,KAAOA,EAAO4B,GAAS0G,IAAatI,GAAQmI,EAAMG,EACjE,MACJ,KAAK,IACDtI,IAAU4B,EAAQ,GAClB3B,GAAQ4B,EAASgE,EACb4B,EAAIxH,IAAMA,EAAM,IAAGA,EAAM4F,IACxBsC,EAAMV,EAAIzH,KAAOA,GAAQ,IAAGA,GAAQmI,IACpCA,EAAMV,EAAIzH,KAAOA,EAAO4B,GAAS0G,IAAatI,GAAQmI,EAAMG,EACjE,MACJ,KAAK,SACDtI,IAAU4B,EAAQ,GAClB3B,IAAS4B,EAAS,EAClB,MACJ,SACI7B,EAAO,EACPC,EAAM,EAId,OAASD,KAAQA,EAAMC,IAAOA,IAM9BwF,EAAkB,WAClBjL,EAAGO,IAAIyD,KAAK,WACR,GAAIiJ,GAAMD,IACNiB,EAAgBP,EAAkBT,GAClCzH,EAAOyH,EAAIzH,KAAOyI,EAAczI,KAChCC,EAAMwH,EAAIxH,IAAMwI,EAAcxI,GAGlC,IAAIS,EACAhG,EACKgO,YACAC,aACAC,MAAM1F,GACND,SAAS,GACTgC,MAAK,UAAY,OACnB,CAEH,GAAI4D,GAAgB,aAAe9C,EAAa/F,KAAO,OAAS+F,EAAa9F,IAAM,MAC/E6I,EAAgB,aAAe5E,KAAK6E,MAAM/I,GAAQ,OAASkE,KAAK6E,MAAM9I,GAAO,MAC7E+I,EAAwBhO,GAAGiO,kBAAkBJ,EAAeC,GAC5DI,EAAYxO,EAAQuK,MAAK,WAAc,EAE3CvK,GACKgO,YACAC,aACA1F,SAASiG,EAAY,EAAIjG,GAEzBkG,WAAU,YAAc,SAAUxG,GAC/B,MAAOqG,IACR,aAEFG,WAAU,oBAAsB,SAAUxG,GACvC,MAAOqG,KAEV/D,MAAK,gBAAkB6D,GACvB7D,MAAK,UAAY,GAG1Bc,EAAa/F,KAAOA,EACpB+F,EAAa9F,IAAMA,IAuG3B,OAzDAkF,GAAU5E,qBAAuBA,EACjC4E,EAAUiE,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAK8J,GAE9CA,EAAUmE,SAAWC,OAAOC,WAExBvG,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GAAGF,EAASE,IACxEyC,SAAU6D,IAAK,WAAW,MAAO7D,IAAW8D,IAAK,SAASvG,GAAGyC,EAAQzC,IACrE0C,UAAW4D,IAAK,WAAW,MAAO5D,IAAY6D,IAAK,SAASvG,GAAG0C,EAAS1C,IACxE2C,cAAe2D,IAAK,WAAW,MAAO3D,IAAgB4D,IAAK,SAASvG,GAAG2C,EAAa3C,IACpF6B,SAAUyE,IAAK,WAAW,MAAOzE,IAAW0E,IAAK,SAASvG,GAAG6B,EAAQ7B,IACrEiC,SAAUqE,IAAK,WAAW,MAAOrE,IAAWsE,IAAK,SAASvG,GAAGiC,EAAQjC,IACrED,WAAYuG,IAAK,WAAW,MAAOvG,IAAawG,IAAK,SAASvG,GAAGD,EAAUC,IAC3EoC,kBAAmBkE,IAAK,WAAW,MAAOlE,IAAoBmE,IAAK,SAASvG,GAAGoC,EAAiBpC,IAChG8C,gBAAiBwD,IAAK,WAAW,MAAOxD,IAAkByD,IAAK,SAASvG,GAAG8C,EAAe9C,IAC1F+C,iBAAkBuD,IAAK,WAAW,MAAOvD,IAAmBwD,IAAK,SAASvG,GAAG+C,EAAgB/C,IAC7FgD,cAAesD,IAAK,WAAW,MAAOtD,IAAgBuD,IAAK,SAASvG,GAAGgD,EAAahD,IACpF6C,eAAgByD,IAAK,WAAW,MAAOzD,IAAiB0D,IAAK,SAASvG,GAAG6C,EAAc7C,IACvFqE,UAAWiC,IAAK,WAAW,MAAOjC,IAAYkC,IAAK,SAASvG,GAAGqE,EAASrE,IAGxEwG,gBAAiBF,IAAK,WAAW,MAAO5E,UAASC,MAAQ4E,IAAK,SAASvG,GAEnE3I,EAAGqC,WAAU,iBAAmB,iCAEpC+M,UAAWH,IAAK,WAAW,MAAO,OAAQC,IAAK,SAASvG,GAEpD3I,EAAGqC,WAAU,WAAa,iCAE9BgN,QAASJ,IAAK,WAAW,OAAQzJ,KAAM,EAAGC,IAAK,IAAMyJ,IAAK,SAASvG,GAE/D3I,EAAGqC,WAAU,SAAW,0CAI5B6D,QAAS+I,IAAK,WAAW,MAAO/I,IAAUgJ,IAAK,SAASvG,GAChDzC,GAAUyC,IACVzC,IAAWyC,EACXgC,OAGRrG,MAAO2K,IAAK,WAAW,MAAO3K,IAAQ4K,IAAK,SAASvG,GAE5CA,EAAE2G,QACF3G,EAAEqD,MAAQrD,EAAE2G,MAAMxH,EAClBa,EAAEyD,OAASzD,EAAEyD,WACbzD,EAAEyD,OAAOJ,MAAQrD,EAAE2G,MAAMC,EACzB5G,EAAEyD,OAAOE,MAAQ3D,EAAE2G,MAAMhD,OAAS3D,EAAEyD,OAAOE,OAE/ChI,EAAOqE,IAIXyB,MAAO6E,IAAK,WAAW,MAAO/O,GAAQkK,QAAU8E,IAAK,SAASvG,MAC9D4B,IAAK0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,QAGpD3I,EAAGG,MAAMqP,YAAY7E,GACdA,GC7VX3K,EAAGG,MAAMsP,WAAa,WAElB,GAAIC,IAAQtI,MAAO,IAAKC,OAAQ,IAGhC,OAAInF,QAAOyN,YAAczN,OAAO0N,aAC5BF,EAAKtI,MAAQlF,OAAOyN,WACpBD,EAAKrI,OAASnF,OAAO0N,YACd,GAIY,cAAnBvF,SAASwF,YACTxF,SAAS0D,iBACT1D,SAAS0D,gBAAgBF,aAEzB6B,EAAKtI,MAAQiD,SAAS0D,gBAAgBF,YACtC6B,EAAKrI,OAASgD,SAAS0D,gBAAgBH,aAChC,GAIPvD,SAASC,MAAQD,SAASC,KAAKuD,aAC/B6B,EAAKtI,MAAQiD,SAASC,KAAKuD,YAC3B6B,EAAKrI,OAASgD,SAASC,KAAKsD,aACrB,GAGJ,GAOX5N,EAAGG,MAAMqN,QAAUtM,MAAMsM,QACzBxN,EAAGG,MAAMsN,SAAW,SAASqC,GACzB,MAAa,QAANA,GAA2B,gBAANA,IAEhC9P,EAAGG,MAAM4P,WAAa,SAASD,GAC3B,MAAoB,kBAANA,IAElB9P,EAAGG,MAAM6P,OAAS,SAASF,GACvB,MAA4B,kBAArBG,SAAS7O,KAAK0O,IAEzB9P,EAAGG,MAAM+P,SAAW,SAASJ,GACzB,OAAQK,MAAML,IAAmB,gBAANA,IAO/B9P,EAAGG,MAAMiQ,aAAe,SAASC,GAO7B,MANInO,QAAOoO,iBACPpO,OAAOoO,iBAAgB,SAAWD,GAElCrQ,EAAGiC,IAAG,gDAAkDoO,IAIxDlN,SAAUkN,EACVE,MAAO,WACHrO,OAAOsO,oBAAmB,SAAWH,MAYjDrQ,EAAGG,MAAMsQ,SAAW,SAASnE,GAEzB,GAAczI,SAAVyI,EACA,MAAOtM,GAAGG,MAAMuQ,cAGb,IAAG1Q,EAAGG,MAAMqN,QAAQlB,GAAQ,CAC/B,GAAIqE,GAAcnQ,GAAG8H,MAAMsI,UAAUnK,MAAM6F,EAC3C,OAAO,UAASnE,EAAGnF,GACf,GAAIwJ,GAAY3I,SAANb,EAAkBmF,EAAInF,CAChC,OAAOmF,GAAEmE,OAASqE,EAAYnE,IAOlC,MAAOF,IASftM,EAAGG,MAAMuQ,aAAe,WAEpB,MAAO1Q,GAAGG,MAAMsQ,SAASjQ,GAAG8H,MAAMuI,aAAapK,UAQnDzG,EAAGG,MAAM2Q,YAAc,SAASC,EAAYC,EAAQC,GAEhDD,EAASA,GAAU,SAAS5E,GAAU,MAAOA,GAAOI,KACpDyE,EAAgBA,GAAiBzQ,GAAG8H,MAAMuI,aAAapK,OAGvD,IAAIyK,GAAWD,EAAc7O,MAE7B,OAAO,UAASgK,EAAQhD,GACpB,GAAIoD,GAAMwE,EAAO5E,EACjB,OAAIpM,GAAGG,MAAM4P,WAAWgB,EAAWvE,IACxBuE,EAAWvE,KACS3I,SAApBkN,EAAWvE,GACXuE,EAAWvE,IAGb0E,IAEDA,EAAWD,EAAc7O,QAE7B8O,GAAsB,EACfD,EAAcC,MAWjClR,EAAGG,MAAMgR,KAAO,SAASC,EAAOC,GAE5B,GAAIC,GAAO,SAASC,GAChB/Q,GAAGuL,KAAKwF,EAAM,SAASC,GACnB,GAAItM,GAAS1E,GAAG2G,OAAOkK,GAASjH,MAChClF,GAAOuM,WAAWC,aACdlR,GAAG2G,OAAOqK,GAAUrK,OAAOkK,GAASjH,OACpClF,GACJlF,EAAGG,MAAMgR,KAAKC,EAAOC,KAI7B7Q,IAAG+G,UAAU6J,GAAOzP,GAAE,QAAU,WAC5BgQ,QAAQC,UAAU7Q,KAAKwQ,KAAMxQ,KAAK8Q,YAAa9Q,KAAKwQ,MACpDD,EAAKvQ,KAAKwQ,MACV/Q,GAAGuE,MAAM+M,mBAGbtR,GAAG2G,OAAOjF,QAAQP,GAAE,WAAa,WACzBnB,GAAGuE,MAAMgN,OACTT,EAAK9Q,GAAGuE,MAAMgN,UAW1B/R,EAAGG,MAAM6R,oBAAsB,SAAUC,GACrC,GAAIjS,EAAGG,MAAM4P,WAAWkC,EAAYxH,QAAUzK,EAAGG,MAAM4P,WAAWkC,EAAYC,MAAO,CACjF,GAAIC,GAAWC,SAASH,EAAYxH,MAAK,aAAc4H,QAAO,KAAI,IAAO,IACrEC,EAAaL,EAAYC,OAAO9P,MACpC,OAAOpC,GAAGG,MAAM8H,UAAUqK,EAAaH,EAAW,IAEtD,MAAO,IAOXnS,EAAGG,MAAM8H,UAAY,SAASsK,GAC1B,OAAKvS,EAAGG,MAAM+P,SAASqC,IAChBpC,MAAMoC,IACA,OAANA,GACAA,IAAMxI,EAAAA,GACNwI,MAAOxI,EAAAA,GAEH,EAEJwI,GAMX/R,GAAG4D,UAAUxD,UAAU4R,gBAAkB,SAASC,GAC9C,GAAIC,IAAQ3R,MAAMW,UAAUP,MAAMC,KAAKC,UAAW,GAClD,OAAOoR,GAAYtE,WAAW1M,MAAMgR,EAAaC,IAOrD1S,EAAGG,MAAMsS,YAAc,SAAS/R,EAAU+H,GACtC,KAAM1H,eAAgBf,GAAGG,MAAMsS,aAC3B,MAAO,IAAIzS,GAAGG,MAAMsS,YAAY/R,EAAU+H,EAG9C,IAAIkK,GAAyB9O,SAAb4E,EAAyBA,EAAW,IAChDmK,KACAC,EAAO9R,IAEXA,MAAKX,OAAS,SAASA,GAevB,MAdIA,MAAYe,MAAMC,KAAKC,UAAW,GAClCjB,EAAO6J,QAAQ,SAAS6I,GACpBA,EAAMC,YAAa,EACnB,SAAUC,GACNA,EAAEtS,SAASiB,GAAE,YAAc,SAASsR,GAChCD,EAAED,YAAa,EACfF,EAAKK,UAAS,YAEnBJ,GAECF,EAAYO,QAAQL,GAAS,GAC7BF,EAAYnP,KAAKqP,KAGtB/R,MAGPA,KAAKqS,MAAQ,SAAS3K,GACD5E,SAAb4E,IACAkK,EAAYlK,GAEhBmK,MAGJ7R,KAAKoN,WAAa,SAAS/J,EAAWsO,EAAMjK,GAcxC,GAbAiK,EAAOrR,UAAUe,OAAS,KAAOjB,MAAMC,KAAKC,UAAW,MAGnDoH,EADAiK,EAAKtQ,OAAS,EACHsQ,EAAKW,MAESxP,SAAd8O,EAA0BA,EAAY,IAErDvO,EAAU2O,YAAa,EAEnBH,EAAYO,QAAQ/O,GAAa,GACjCwO,EAAYnP,KAAKW,GAGJ,IAAbqE,EAIA,MAHArE,GAAU2O,YAAa,EACvB3O,EAAUgK,MAAQ,WAAa,MAAOrN,OACtCqD,EAAUqE,SAAW,WAAa,MAAO1H,OAClCqD,CAEkB,KAArBA,EAAUhC,OACVgC,EAAU2O,YAAa,EAChB3O,EAAUkP,MAAO,SAASnL,GAAI,OAAQA,EAAE/F,SAC/CgC,EAAU2O,YAAa,EAEvB3O,EAAU2O,YAAa,CAG3B,IAAIR,GAAI,CACR,OAAOnO,GACF+J,aACA1F,SAASA,GACTpE,KAAK,aAAckO,IACnBlO,KAAI,MAAQ,SAAS8D,EAAGnF,GACT,MAANuP,IACFnO,EAAU2O,YAAa,EACvBF,EAAKK,UAAUzR,MAAMV,KAAM2R,OAM/C3R,KAAKmS,UAAY,WACTN,EAAYU,MAAO,SAASnL,GAAI,MAAOA,GAAE4K,eACzCH,EAAY3I,QAAS,SAAS9B,GAAIA,EAAE4K,YAAa,IACjDrS,EAASwS,UAAUzR,MAAMV,KAAMM,cAY3CrB,EAAGG,MAAMoT,WAAa,SAASC,GAC3B,GAAIC,GAAUpS,UAAUe,OAAS,KAAOjB,MAAMC,KAAKC,UAAW,KAC9DoS,GAAQxJ,QAAQ,SAASyJ,GACrB,IAAK,GAAIlH,KAAOkH,GAAQ,CACpB,GAAIlG,GAAUxN,EAAGG,MAAMqN,QAAQgG,EAAIhH,IAC/BiB,EAAWzN,EAAGG,MAAMsN,SAAS+F,EAAIhH,IACjCmH,EAAS3T,EAAGG,MAAMsN,SAASiG,EAAOlH,GAElCiB,KAAaD,GAAWmG,EACxB3T,EAAGG,MAAMoT,WAAWC,EAAIhH,GAAMkH,EAAOlH,IAErCgH,EAAIhH,GAAOkH,EAAOlH,OAUlCxM,EAAGG,MAAM4R,MAAQ,WACb,KAAMhR,eAAgBf,GAAGG,MAAM4R,OAC3B,MAAO,IAAI/R,GAAGG,MAAM4R,KAExB,IAAIA,MAEA6B,EAAY,aACZC,EAAY,WAAY,UACxBC,EAAO,KACPC,EAAU,IAEdhT,MAAKL,SAAWF,GAAGE,SAAQ,SAAW,OAEtCK,KAAKL,SAASiB,GAAE,MAAQ,SAASoQ,GAC7B6B,EAAU7B,GAAO,KAGrBhR,KAAKiT,OAAS,SAASC,GAEnB,MADAJ,GAAYI,EACLlT,MAGXA,KAAKmT,OAAS,SAASD,EAAI9Q,GAUvB,MATKA,KACDA,EAAW,cAEfyQ,EAAY,SAAS7B,EAAOoC,GACxBF,EAAGlC,GACCoC,GACAhR,KAGDpC,MAGXA,KAAK+S,KAAO,SAAS/B,GACjB+B,EAAOA,MACP9T,EAAGG,MAAMoT,WAAWO,EAAM/B,GAG9B,IAAIqC,GAAO,WACP,GAAIC,GAAWR,GAEf,IAAIS,KAAKC,UAAUF,KAAcC,KAAKC,UAAUxC,GAC5C,OAAO,CAGX,KAAK,GAAIvF,KAAO6H,GACOxQ,SAAfkO,EAAMvF,KACNuF,EAAMvF,OAEVuF,EAAMvF,GAAO6H,EAAS7H,GACtBuH,GAAU,CAEd,QAAO,EAGXhT,MAAKoT,OAAS,WACNL,IACAF,EAAUE,GAAM,GAChBA,EAAO,MAEPM,EAAKhT,KAAKL,OACVA,KAAKL,SAAS8T,OAAOzC,KAkBjC/R,EAAGG,MAAM0O,YAAc,SAAS6D,GAQ5B,MAPIA,IACAlS,GAAGiU,IAAI/B,GAAMzI,QAAO,SAAWuC,EAAIR,GAC3BhM,EAAGG,MAAM4P,WAAWhP,KAAKyL,KACzBzL,KAAKyL,GAAKR,IAEfnL,KAAKE,OAELA,MAWXf,EAAGG,MAAMuU,WAAa,SAASC,EAAUrQ,GAErC,GAAIsQ,GAAY,EACZ5R,EAAI,CACR,KAAKA,EAAGA,EAAIsB,EAAKlC,OAAQY,GAAK,EAAG,CAC7B,GAAI6R,GAAavQ,EAAKtB,IAAMsB,EAAKtB,GAAG6F,OAASvE,EAAKtB,GAAG6F,OAAOzG,OAAS,CACrEwS,GAAYC,EAAaD,EAAYC,EAAaD,EAWtD,MATA5U,GAAGiC,IAAG,8BAAgC0S,GACtC3U,EAAGiC,IAAG,gCAAkC2S,GAExCD,EAAWA,EAAWC,EAAYD,EAAWC,EAAY,EAAID,EAE7DA,EAAsB,EAAXA,EAAe,EAAIA,EAE9BA,EAAWjL,KAAKwB,MAAMyJ,GACtB3U,EAAGiC,IAAG,8BAAgC0S,GAC/BA,GAOX3U,EAAGG,MAAM2U,WAAa,SAASH,EAAUrQ,GAErC,MAAOtE,GAAGG,MAAMuU,WAAWC,EAAUrQ,IAYzCtE,EAAGG,MAAM4U,WAAa,SAASjS,EAAOR,GAE9BQ,EAAMkS,QAAUlS,EAAMkS,OAAO1S,GAC7BQ,EAAMR,GAAQQ,EAAMkS,OAAO1S,IAE3BQ,EAAMR,GAAQ,SAAUqG,GACpB,MAAKtH,WAAUe,QACfU,EAAMmS,WAAW3S,IAAQ,EACzBQ,EAAMgM,SAASxM,GAAQqG,EAChB7F,GAHuBA,EAAMgM,SAASxM,IAOjDQ,EAAK,IAAOR,GAAQ,SAASqG,GACzB,MAAKtH,WAAUe,QACVU,EAAMmS,WAAW3S,KAClBQ,EAAMgM,SAASxM,GAAQqG,GAEpB7F,GAJuBA,EAAMgM,SAASxM,MAazDtC,EAAGG,MAAMqP,YAAc,SAAS1M,GAC5BA,EAAMmS,WAAanS,EAAMmS,cACzB,IAAIC,GAAMnG,OAAOoG,oBAAoBrS,EAAMgM,cACvCsG,EAAQrG,OAAOoG,oBAAoBrS,EAAMkS,WAC7CE,GAAMA,EAAIxT,OAAO0T,EACjB,KAAK,GAAIpS,KAAKkS,GACVlV,EAAGG,MAAM4U,WAAWjS,EAAOoS,EAAIlS,KAUvChD,EAAGG,MAAMkV,iBAAmB,SAASnQ,EAAQoQ,EAAWC,GACpDrQ,EAAOsQ,WAAaD,EAAO7T,OAAOwD,EAAOsQ,gBACzCD,EAAOE,QAAQH,GACfC,EAAOE,QAAQvQ,GACf1E,GAAGkV,OAAOjU,MAAMV,KAAMwU,IAO1BvV,EAAGG,MAAMwV,YAAc,SAAS7F,GAC5B,MAAOA,GAAE8F,OAAOnJ,OAAO,SAASoJ,EAAM5I,GAClC,OAAQA,GAAO4I,GAAQ/F,EAAE7C,EAAM,MAUvCjN,EAAGG,MAAM2V,UAAYtV,GAAGiU,MAMxBzU,EAAGG,MAAM4V,OAAS,WAGd,QAASA,GAAO5N,EAAEnF,GACd,GAAIgT,GAAItQ,EAAKtE,KAAKL,KAAKoH,EAAEnF,GACrBiT,EAAIvG,EAAKtO,KAAKL,KAAKoH,EAAEnF,EACzB,OAAsC,KAAlCxC,GAAG0V,IAAIC,YAAYhD,QAAQ6C,GACpBxV,GAAG0V,IAAIH,SAASrQ,KAAKsQ,GAAGtG,KAAKuG,KAE7BjW,EAAGG,MAAM2V,UAAU7G,IAAI+G,GAAGC,GARzC,GAAIvQ,GACAgK,EAAO,EAoBX,OAVAqG,GAAOrQ,KAAO,SAASiD,GACnB,MAAKtH,WAAUe,QACfsD,EAAOlF,GAAG4V,QAAQzN,GACXoN,GAFuBrQ,GAIlCqQ,EAAOrG,KAAO,SAAS/G,GACnB,MAAKtH,WAAUe,QACfsN,EAAOlP,GAAG4V,QAAQzN,GACXoN,GAFuBrG,GAI3BqG,GAUX/V,EAAGG,MAAMkW,eAAiB,SAASnR,EAAQwO,GAEvC,GAAIwB,GAAMnG,OAAOoG,oBAAoBzB,EAAO5E,cACxCsG,EAAQrG,OAAOoG,oBAAoBzB,EAAOsB,YAC1CsB,EAAY5C,EAAO6C,eACnBC,EAAQ9C,EAAO8B,eACf9C,EAAOwC,EAAIxT,OAAO0T,GAAO1T,OAAO4U,GAAW5U,OAAO8U,EACtD9D,GAAK+C,QAAQ/B,GACbhB,EAAK+C,QAAQvQ,GACb1E,GAAGkV,OAAOjU,MAAMV,KAAM2R,GAEtBxN,EAAOqR,WAAavW,EAAGG,MAAMwV,YAAYT,EAAIxT,OAAO0T,GAAO1T,OAAO4U,GAAW5U,OAAOwT,GAAKxT,OAAOwD,EAAOqR,iBACvGrR,EAAOsQ,WAAaxV,EAAGG,MAAMwV,YAAYa,EAAM9U,OAAOwD,EAAOsQ,kBAOjExV,EAAGG,MAAMsW,QAAU,SAASP,GACxBA,EAAIxL,SAAOgM,YAAa,KAO5B1W,EAAGG,MAAMwW,eAAiB,SAAStP,EAAQH,GACvC,MAAQG,IAAU+K,SAASlL,EAAUuD,MAAK,UAAY,KAAO,KAOjEzK,EAAGG,MAAMyW,cAAgB,SAASxP,EAAOF,GACrC,MAAQE,IAASgL,SAASlL,EAAUuD,MAAK,SAAW,KAAO,KAO/DzK,EAAGG,MAAMyF,gBAAkB,SAASyB,EAAQH,EAAW3B,GACnD,MAAOmE,MAAKL,IAAI,EAAErJ,EAAGG,MAAMwW,eAAetP,EAAQH,GAAa3B,EAAOE,IAAMF,EAAOsR,SAMvF7W,EAAGG,MAAMwF,eAAiB,SAASyB,EAAOF,EAAW3B,GACjD,MAAOmE,MAAKL,IAAI,EAAErJ,EAAGG,MAAMyW,cAAcxP,EAAOF,GAAa3B,EAAOC,KAAOD,EAAOuR,QAMtF9W,EAAGG,MAAM4W,OAAS,SAASjU,EAAOoE,GAC9B,GAAI8P,GAAMlU,EAAM8L,UACZrJ,EAASyR,EAAIzR,SACbwR,EAASC,EAAID,SACbzS,EAAkB,MAAVyS,GAAkB,uBAA0BA,GACpD1P,EAASrH,EAAGG,MAAMyF,gBAAgB,KAAMsB,EAAW3B,GACnD6B,EAAQpH,EAAGG,MAAMwF,eAAe,KAAMuB,EAAW3B,GACjDuC,EAAIvC,EAAOC,KAAO4B,EAAM,EACxBmI,EAAIhK,EAAOE,IAAM4B,EAAO,CAG5BH,GAAUK,UAAS,KAAMc,QAEzB,IAAI4O,GAAa/P,EAAUK,UAAS,cAAejD,KAAKA,EAExD2S,GAAWxP,QAAQC,OAAM,QACpBC,KAAI,QAAU,kBACdA,KAAI,KAAO,SACX8C,MAAK,cAAgB,UAE1BwM,EACKtP,KAAI,IAAMG,GACVH,KAAI,IAAM4H,GACV2C,KAAK,SAAS8D,GAAI,MAAOA,MAMlChW,EAAGG,MAAM+W,UAAY,SAAUhF,EAAM9K,GACjC8K,EAAK7N,KAAK,WAUN,IATA,GAEI8S,GAFAjF,EAAO1R,GAAG2G,OAAOpG,MACjBqW,EAAQlF,EAAKA,OAAOmF,MAAK,OAAQC,UAEjCtP,KACAuP,EAAa,EACbC,EAAa,IACbjI,EAAI2C,EAAKvK,KAAI,KACb8P,EAAKC,WAAWxF,EAAKvK,KAAI,OACzBgQ,EAAQzF,EAAKA,KAAK,MAAMxK,OAAM,SAAUC,KAAI,IAAM,GAAGA,KAAI,IAAM4H,GAAG5H,KAAI,KAAO8P,EAAK,MAC/EN,EAAOC,EAAM/D,OAChBrL,EAAKvE,KAAK0T,GACVQ,EAAMzF,KAAKlK,EAAK4P,KAAI,MAChBD,EAAMvN,OAAOyN,wBAA0BzQ,IACvCY,EAAKqL,MACLsE,EAAMzF,KAAKlK,EAAK4P,KAAI,MACpB5P,GAAQmP,GACRQ,EAAQzF,EAAKxK,OAAM,SAAUC,KAAI,IAAM,GAAGA,KAAI,IAAM4H,GAAG5H,KAAI,OAAS4P,EAAaC,EAAaC,EAAK,MAAMvF,KAAKiF,OAS9HnX,EAAGG,MAAM2X,YAAc,SAAUC,EAAQC,GACrC,GAAID,IAAWC,EACX,OAAO,CAEX,KAAKD,IAAWC,EACZ,OAAO,CAGX,IAAID,EAAO3V,QAAU4V,EAAO5V,OACxB,OAAO,CAEX,KAAK,GAAIY,GAAI,EACTiV,EAAIF,EAAO3V,OAAY6V,EAAJjV,EAAOA,IAE1B,GAAI+U,EAAO/U,YAAc9B,QAAS8W,EAAOhV,YAAc9B,QAEnD,IAAKlB,EAAG8X,YAAYC,EAAO/U,GAAIgV,EAAOhV,IAClC,OAAO,MACR,IAAI+U,EAAO/U,IAAMgV,EAAOhV,GAE3B,OAAO,CAGf,QAAO,GCpsBXhD,EAAGI,OAAO8X,KAAO,WACb,YAqCA,SAASpV,GAAMsB,GAgTX,MA/SAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAII,GAAOJ,EAAUK,UAAS,qBAAsBjD,MAAMA,IACtDkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,wBAEnDwQ,GADS3Q,EAAUE,OAAM,KACrBJ,EAAKH,OAAM,KAEL,QAAViR,EACAF,EAAKE,MAAMA,IACW,OAAjBF,EAAKG,UAAsC,UAAjBH,EAAKG,WACpCH,EAAKE,MAAM1O,KAAKC,IAAIrB,EAAM7B,QAAQ,GAAK6B,EAAM7B,QAAQ,IAAM,KAG/D0R,EAAE3F,gBAAgBC,EAAa,QAAQrR,KAAK8W,GAE5CI,EAASA,GAAUJ,EAAK5P,OAExB,IAAIiQ,GAAML,EAAKM,YACJ,OAAPD,IACAA,EAAMD,EAAOE,aAGjB,IAAIC,GAAYN,EAAE5Q,UAAS,qBACtBjD,MAAMoU,GAAiB,MAC5BD,GAAUrQ,OAAOC,SAGAxE,SAAbsO,GACAgG,EAAE5Q,UAAS,KAAMJ,OAAM,QAASsD,MAAK,YAAc0H,EAGvD,IAAIwG,GACAC,EACAC,CACJ,QAAQX,EAAKG,UACT,IAAK,MACDI,EAAUhR,QAAQC,OAAM,QAASC,KAAI,QAAU,gBACjDkR,EAAI,EACyB,IAAzBvQ,EAAM7B,QAAQrE,OAChByW,EAAIC,EAA+B,EAAnBxQ,EAAM7B,QAAQ,GAAS6B,EAAM5B,YAAc,EACzB,IAAzB4B,EAAM7B,QAAQrE,OACvByW,EAAIC,EAAYxQ,EAAM7B,QAAQ,GAAK6B,EAAM7B,QAAQ,GAAK6B,EAAM5B,YAAc4B,EAAM7B,QAAQ,GAC9E6B,EAAM7B,QAAQrE,OAAS,IACjCyW,EAAIvQ,EAAM7B,QAAQ6B,EAAM7B,QAAQrE,OAAO,IAAIkG,EAAM7B,QAAQ,GAAG6B,EAAM7B,QAAQ,KAE1EgS,EACK9Q,KAAI,cAAgB,UACpBA,KAAI,IAAM,GACVA,KAAI,IAAMkR,EAAE,GACbE,IACAH,EAAatR,EAAKC,UAAS,mBACtBjD,KAAKgE,EAAM3B,UAChBiS,EAAWnR,QAAQC,OAAM,KAAMC,KAAI,QAAS,SAASQ,EAAEnF,GAC/C,OAAO,gBAAe,kBAA0B,GAALA,EAAS,eAAa,gBAAmB4U,KAAI,OAC7FlQ,OAAM,QACTkR,EAAWxQ,OAAOC,SAClBuQ,EACKjR,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,aAAehD,EAAGG,MAAM8H,UAAUK,EAAMH,IAAM,QAExDhB,OAAM,QACNQ,KAAI,KAAO,UACXA,KAAI,KAAOuQ,EAAKc,eAChBrR,KAAI,cAAgB,UACpBuK,KAAK,SAAS/J,EAAEnF,GACb,GAAIkG,GAAIqP,EAAIpQ,EACZ,QAAO,GAAMe,GAAG5D,MAAK,OAAU,GAAK4D,IAE5C0P,EAAWpG,gBAAgBC,EAAa,eACnC9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,aAAehD,EAAGG,MAAM8H,UAAUK,EAAM7B,QAAQzD,IAAM,QAGzE,MACJ,KAAK,SACD2V,EAAeM,EAAoB,EACnC,IAAIC,GAAe,GACfC,EAAa,EACbC,EAASjB,EAAE5Q,UAAS,KAAMJ,OAAM,QAChCkS,EAAmB,EACvB,IAAIC,EAAa,IAAK,CAElBF,EAAOzR,KAAI,YAAc,IAEzByR,EAAO/U,KAAK,SAAS8D,EAAEnF,GACnB,GAAIuW,GAAMxY,KAAKwM,wBACXnG,EAAQmS,EAAInS,KAChB+R,GAAaI,EAAIlS,OACdD,EAAQ8R,IAAcA,EAAe9R,KAE5CiS,EAAmB,UAAYC,EAAe,OAASH,EAAW,EAAIjB,EAAKc,eAAiB,GAE5F,IAAIQ,GAAM9P,KAAKC,IAAID,KAAK8P,IAAIF,EAAa5P,KAAK+P,GAAG,KACjDd,IAAgBa,EAAMA,EAAIN,EAAeA,GAAc,GAEvDE,EACKzR,KAAI,YAAc0R,GAClB5O,MAAK,cAAgB6O,EAAa,IAAM,EAAI,QAAU,WAEvDI,GACAN,EACKzR,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,gBAAkBA,EAAI,GAAK,EAAI,IAAM,MAAQ,MAG5DoW,EAAOzR,KAAI,YAAc,iBAGjC8Q,GAAUhR,QAAQC,OAAM,QAASC,KAAI,QAAU,gBAC/CkR,EAAI,EACyB,IAAzBvQ,EAAM7B,QAAQrE,OACdyW,EAAIC,EAA+B,EAAnBxQ,EAAM7B,QAAQ,GAAS6B,EAAM5B,YAAc,EAC3B,IAAzB4B,EAAM7B,QAAQrE,OACrByW,EAAIC,EAAYxQ,EAAM7B,QAAQ,GAAK6B,EAAM7B,QAAQ,GAAK6B,EAAM5B,YAAc4B,EAAM7B,QAAQ,GAChF6B,EAAM7B,QAAQrE,OAAS,IAC/ByW,EAAIvQ,EAAM7B,QAAQ6B,EAAM7B,QAAQrE,OAAO,IAAIkG,EAAM7B,QAAQ,GAAG6B,EAAM7B,QAAQ,KAE9EgS,EACK9Q,KAAI,cAAgB,UACpBA,KAAI,IAAMgR,GACVhR,KAAI,IAAMkR,EAAE,GACbE,IAEAH,EAAatR,EAAKC,UAAS,mBAEtBjD,MAAMgE,EAAM3B,SAAS,GAAI2B,EAAM3B,SAAS2B,EAAM3B,SAASvE,OAAS,KACrEwW,EAAWnR,QAAQC,OAAM,KAAMC,KAAI,QAAS,SAASQ,EAAEnF,GAC/C,OAAO,gBAAe,kBAA0B,GAALA,EAAS,eAAa,gBAAmB4U,KAAI,OAC7FlQ,OAAM,QACTkR,EAAWxQ,OAAOC,SAClBuQ,EACKjR,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,aAAehD,EAAGG,MAAM8H,UAAWK,EAAMH,IAAM2Q,EAAYxQ,EAAM5B,YAAc,EAAI,IAAO,QAEpGS,OAAM,QACNQ,KAAI,KAAO,SACXA,KAAI,IAAMuQ,EAAKc,eACfrR,KAAI,YAAc0R,GAClB5O,MAAK,cAAgB6O,EAAgBA,EAAa,IAAM,EAAI,QAAU,MAAS,UAC/EpH,KAAK,SAAS/J,EAAEnF,GACb,GAAIkG,GAAIqP,EAAIpQ,EACZ,QAAO,GAAMe,GAAG5D,MAAK,OAAU,GAAK4D,IAE5C0P,EAAWpG,gBAAgBC,EAAa,kBACnC9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,aAAehD,EAAGG,MAAM8H,UAAWK,EAAMH,IAAM2Q,EAAYxQ,EAAM5B,YAAc,EAAI,IAAO,QAI7G,MACJ,KAAK,QACD+R,EAAUhR,QAAQC,OAAM,QAASC,KAAI,QAAU,gBAC/C8Q,EACKhO,MAAK,cAAgBkP,EAAe,SAAW,SAC/ChS,KAAI,YAAcgS,EAAe,aAAe,IAChDhS,KAAI,IAAMgS,GAAiBjQ,KAAKL,IAAI9D,EAAOuR,MAAO1P,GAAS,IAAM6R,GAAqB,GAAM,KAC5FtR,KAAI,IAAMgS,EAAgBnZ,GAAG6I,IAAIf,EAAM7B,SAAW,EAAKyR,EAAKc,eAC7DD,IACAH,EAAatR,EAAKC,UAAS,mBACtBjD,KAAKgE,EAAM3B,UAChBiS,EAAWnR,QAAQC,OAAM,KAAMC,KAAI,QAAS,SAASQ,EAAEnF,GAC/C,OAAO,gBAAe,kBAA0B,GAALA,EAAS,eAAa,gBAAmB4U,KAAI,OAC7FlQ,OAAM,QACJ+C,MAAK,UAAY,GACtBmO,EAAWxQ,OAAOC,SAClBuQ,EACKjR,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,eAAiBhD,EAAGG,MAAM8H,UAAUK,EAAMH,IAAM,MAE1DhB,OAAM,QACNQ,KAAI,KAAO,SACXA,KAAI,IAAM,GACVA,KAAI,IAAMuQ,EAAKc,eACfvO,MAAK,cAAgB,SACrByH,KAAK,SAAS/J,EAAGnF,GACd,GAAIkG,GAAIqP,EAAIpQ,EACZ,QAAO,GAAMe,GAAG5D,MAAK,OAAU,GAAK4D,IAE5C0P,EAAWpG,gBAAgBC,EAAa,iBACnC9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,eAAiBhD,EAAGG,MAAM8H,UAAUK,EAAM7B,QAAQzD,IAAM,MAElEmE,OAAM,QACNsD,MAAK,UAAY,GAE1B,MACJ,KAAK,OASDgO,EAAUhR,QAAQC,OAAM,QAASC,KAAI,QAAU,gBAC/C8Q,EACKhO,MAAK,cAAgBkP,EAAe,SAAW,OAC/ChS,KAAI,YAAcgS,EAAe,cAAgB,IACjDhS,KAAI,IAAMgS,GAAiBjQ,KAAKL,IAAI9D,EAAOC,KAAM4B,GAAS,IAAM6R,GAAqB,GAAM,KAC3FtR,KAAI,IAAMgS,GAAiBnZ,GAAG6I,IAAIf,EAAM7B,SAAW,GAAMyR,EAAKc,eAC/DD,IACAH,EAAatR,EAAKC,UAAS,mBACtBjD,KAAKgE,EAAM3B,UAChBiS,EAAWnR,QAAQC,OAAM,KAAMC,KAAI,QAAS,SAASQ,EAAEnF,GAC/C,OAAO,gBAAe,kBAA0B,GAALA,EAAS,eAAa,gBAAmB4U,KAAI,OAC7FlQ,OAAM,QACJ+C,MAAK,UAAY,GACtBmO,EAAWxQ,OAAOC,SAClBuQ,EACKjR,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,eAAiBhD,EAAGG,MAAM8H,UAAUqQ,EAAOnQ,IAAM,MAE3DhB,OAAM,QACNQ,KAAI,KAAO,SACXA,KAAI,IAAM,GACVA,KAAI,KAAOuQ,EAAKc,eAChBrR,KAAI,cAAgB,OACpBuK,KAAK,SAAS/J,EAAEnF,GACb,GAAIkG,GAAIqP,EAAIpQ,EACZ,QAAO,GAAMe,GAAG5D,MAAK,OAAU,GAAK4D,IAE5C0P,EAAWpG,gBAAgBC,EAAa,iBACnC9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,eAAiBhD,EAAGG,MAAM8H,UAAUK,EAAM7B,QAAQzD,IAAM,MAElEmE,OAAM,QACNsD,MAAK,UAAY,IA2BlC,GAvBAgO,EAAUvG,KAAK,SAAS/J,GAAK,MAAOA,MAEhC4Q,GAAiC,SAAlBb,EAAKG,UAAyC,UAAlBH,EAAKG,WAEhDF,EAAE5Q,UAAS,KACNlD,KAAK,SAAS8D,EAAEnF,GACbxC,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASQ,KAAI,UAAY,IAC3CW,EAAMH,GAAKG,EAAM7B,QAAQ,GAAK,IAAM6B,EAAMH,GAAKG,EAAM7B,QAAQ,GAAK,OAC9D0B,EAAI,OAAa,OAAJA,IACb3H,GAAG2G,OAAOpG,MAAM4G,KAAI,UAAY,GAEpCnH,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASQ,KAAI,UAAY,MAKvDW,EAAM3B,SAAS,IAAM2B,EAAM3B,SAAS,IAA2B,GAArB2B,EAAM3B,SAAS,IACzDW,EAAKC,UAAS,mBAAoBkD,MAAK,UAAY,SAAUtC,EAAGnF,GAC5D,MAAQA,GAAQ,EAAJ,KAKpB+V,IAAiC,QAAlBb,EAAKG,UAAwC,WAAlBH,EAAKG,UAAwB,CACvE,GAAIuB,KACJtS,GAAKC,UAAS,mBACTlD,KAAK,SAAS8D,EAAEnF,GACb,IACQA,EACA4W,EAAYnW,KAAK6E,EAAMH,GAAKpH,KAAKwM,wBAAwBnG,MAAQ,GAEjEwS,EAAYnW,KAAK6E,EAAMH,GAAKpH,KAAKwM,wBAAwBnG,MAAQ,GACxE,MAAOyS,GACA7W,EACA4W,EAAYnW,KAAK6E,EAAMH,GAAK,GAE5ByR,EAAYnW,KAAK6E,EAAMH,GAAK,MAI5CgQ,EAAE5Q,UAAS,KAAMlD,KAAK,SAAS8D,EAAGnF,IAC1BsF,EAAMH,GAAKyR,EAAY,IAAMtR,EAAMH,GAAKyR,EAAY,MAChDzR,EAAI,OAAa,OAAJA,EACb3H,GAAG2G,OAAOpG,MAAMsH,SAEhB7H,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASkB,YAM/C8P,EAAE5Q,UAAS,SACNkF,OAAO,SAAUtE,GAMd,OAAQuP,WAAWhO,KAAK6E,MAAU,IAAJpG,GAAc,MAAmBtE,SAANsE,IAE5DuC,QAAO,QAAS,GAGrB4N,EAAShQ,EAAMwR,SAInBrH,EAAYS,UAAS,kBACdpQ,EA/UX,GAAIoV,GAAO1X,GAAG0V,IAAIgC,OACd5P,EAAQ9H,GAAG8H,MAAMC,SAEjBhD,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,GACRC,EAAS,GACTqR,EAAgB,KAChBK,GAAa,EACbO,EAAe,EACfK,GAAe,EACfD,GAAgB,EAChBZ,GAAY,EACZV,EAAQ,KACRa,EAAoB,EACpB9G,EAAWtO,OACX4E,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,YAE5BwX,GACK5P,MAAMA,GACN+P,OAAM,UACNG,WAAW,SAASrQ,GAAK,MAAOA,IAOrC,IAAImQ,GACA7F,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EAkWjD,OAxCA3F,GAAMoV,KAAOA,EACbpV,EAAMpC,SAAWA,EAEjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAC1CA,EAAMgM,SAAWC,OAAOC,WAEpBiK,mBAAoBhK,IAAK,WAAW,MAAOgK,IAAqB/J,IAAK,SAASvG,GAAGsQ,EAAkBtQ,IACnG+Q,eAAoBzK,IAAK,WAAW,MAAOyK,IAAiBxK,IAAK,SAASvG,GAAG+Q,EAAc/Q,IAC3F2Q,cAAoBrK,IAAK,WAAW,MAAOqK,IAAgBpK,IAAK,SAASvG,GAAG2Q,EAAa3Q,IACzFgR,cAAoB1K,IAAK,WAAW,MAAO0K,IAAgBzK,IAAK,SAASvG,GAAGgR,EAAahR,IACzFoQ,YAAoB9J,IAAK,WAAW,MAAO8J,IAAc7J,IAAK,SAASvG,GAAGoQ,EAAWpQ,IACrF8P,WAAoBxJ,IAAK,WAAW,MAAOyJ,IAAiBxJ,IAAK,SAASvG,GAAG+P,EAAc/P,IAC3FtB,QAAoB4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC7EyP,OAAoBnJ,IAAK,WAAW,MAAOmJ,IAASlJ,IAAK,SAASvG,GAAGyP,EAAMzP,IAC3EvB,OAAoB6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAC3EwJ,UAAoBlD,IAAK,WAAW,MAAOkD,IAAYjD,IAAK,SAASvG,GAAGwJ,EAASxJ,IAGjFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAmB5B,SAAV8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAqBjT,SAAZ8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAoB3B,SAAX8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAASE,EACT8J,EAAYW,MAAM3K,KAEtBH,OAAQ2G,IAAK,WAAW,MAAO3G,IAAS4G,IAAK,SAASvG,GAClDL,EAAQK,EACRuP,EAAK5P,MAAMA,GACXwQ,EAAwC,kBAArBxQ,GAAMjC,WACzBrG,EAAGG,MAAMkV,iBAAiBvS,EAAOwF,GAAO,SAAW,QAAS,YAAa,mBAIjFtI,EAAGG,MAAMqP,YAAY1M,GACrB9C,EAAGG,MAAMkV,iBAAiBvS,EAAOoV,GAAM,SAAW,aAAc,gBAAiB,WAAY,cAAe,eAC5GlY,EAAGG,MAAMkV,iBAAiBvS,EAAOwF,GAAO,SAAW,QAAS,YAAa,eAElExF,GCtYX9C,EAAGI,OAAO2Z,QAAU,WAChB,YAsCA,SAASjX,GAAMsB,GA+NX,MA9NAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,GAGjBd,EAAOO,OAAOqT,GAAW1V,EAAKmQ,IAAI,SAAStM,EAAEnF,GAAK,MAAOiX,GAAK9R,EAAEnF,MAC3DqD,WAAW6T,IAAW,EAAGvU,GAAiB,GAG/C,IAAIwU,KACJ,KAAKC,EAAS,CAEV,GAAiBC,GAAMC,EAAnBzR,IACJvE,GAAK2F,QAAQ,SAAU9B,EAAGnF,GACtB,GAAIuX,GAAKC,EAAMrS,GAAIsS,EAAKC,EAAMvS,GAAIwS,EAAKC,EAAMzS,GAAI0S,EAAKC,EAAM3S,GACxD4S,EAAUC,EAAW7S,EACrB4S,IACAA,EAAQ9Q,QAAQ,SAAUrI,EAAGoB,GACzB6F,EAAOpF,KAAKwX,EAAWrZ,EAAGoB,EAAGa,WAGjC8W,GAAM9R,EAAOpF,KAAKkX,GAClBJ,GAAM1R,EAAOpF,KAAK8W,GAClBE,GAAM5R,EAAOpF,KAAKgX,GAClBI,GAAMhS,EAAOpF,KAAKoX,KAE1BR,EAAO7Z,GAAGgJ,IAAIX,GACdyR,EAAO9Z,GAAG6I,IAAIR,GACdsR,GAAUE,EAAMC,GAGpBY,EAAOvU,OAAOyT,GAAWD,GACzBe,EAAOzU,MAAM0U,IAAWvV,EAAiB,IAGzCwV,EAAUA,GAAWhV,EACrBiV,EAAUA,GAAWH,EAAOpB,OAAOrT,OAAOyU,EAAO,GAAGA,EAAO,IAG3D,IAAI5T,GAAOJ,EAAUK,UAAS,aAAcjD,MAAMA,GAClCgD,GAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,eACvDL,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAEvE,IAAI6V,GAAWhU,EAAKC,UAAS,eAAgBjD,KAAK,SAAS6D,GAAK,MAAOA,KACnEoT,EAAWD,EAAS7T,QAAQC,OAAM,KAAM+C,MAAK,iBAAmB,MAAMA,MAAK,eAAiB,KAChG6Q,GACK3T,KAAI,QAAU,cACdA,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAAK,MAAO,cAAgBpV,EAAO6T,EAAK9R,EAAEnF,IAA2B,IAArBoD,EAAOM,aAAsB,SAC5GgE,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QAC7CH,EACK9I,gBAAgBC,EAAa,wBAC7BhI,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,KACtB2D,MAAM,SAASjG,EAAEnF,GAAK,MAAOA,GAAIyF,EAAWnE,EAAKlC,SACjDuF,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,cAAgBoD,EAAO6T,EAAK9R,EAAEnF,IAA2B,IAArBoD,EAAOM,aAAsB,SAEhF4U,EAASlT,OAAOC,SAKhBkT,EAASlX,KAAK,SAAS8D,EAAEnF,GACrB,GAAIuW,GAAM/Y,GAAG2G,OAAOpG,OACnB6Z,EAAOE,GAAO7Q,QAAQ,SAAUyR,GAC7B,GAAa7X,SAAT6X,EAAEvT,IAA6B,OAATuT,EAAEvT,GAAa,CACrC,GAAIqE,GAAOkP,IAAMd,EAAS,MAAQ,MAClCrB,GAAI7R,OAAM,QACP+C,MAAK,SAAWgG,EAAStI,IAAMmE,EAAMnE,EAAEnF,IACvC2E,KAAI,QAAU,iCAAmC6E,GACpD+M,EAAI7R,OAAM,QACP+C,MAAK,SAAWgG,EAAStI,IAAMmE,EAAMnE,EAAEnF,IACvC2E,KAAI,QAAU,8BAAgC6E,OAK7D,IAAImP,GAAY,WAAa,MAAwB,QAAhBC,EAA4C,GAArBxV,EAAOM,YAAoBgD,KAAKF,IAAI,GAAyB,GAArBpD,EAAOM,cACvGmV,EAAY,WAAa,MAA4B,IAArBzV,EAAOM,YAAqBiV,IAAY,GACxEG,EAAY,WAAa,MAA4B,IAArB1V,EAAOM,YAAqBiV,IAAY,IAG3Ef,EAAOE,GAAO7Q,QAAQ,SAAUyR,GAC7B,GAAIlP,GAAOkP,IAAMd,EAAS,MAAQ,OAC9BmB,EAAYL,IAAMd,EAASJ,EAAQE,CACvCY,GAASnU,OAAM,sCAAyCqF,GACrDgG,gBAAgBC,EAAa,wBAC3B9K,KAAI,KAA4B,IAArBvB,EAAOM,aAClBiB,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOQ,EAAEvT,MAC3CR,KAAI,KAA4B,IAArBvB,EAAOM,aAClBiB,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOa,EAAS5T,MACvDmT,EAASnU,OAAM,mCAAsCqF,GAClDgG,gBAAgBC,EAAa,wBAC3B9K,KAAI,KAAOkU,GACXlU,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOQ,EAAEvT,MAC3CR,KAAI,KAAOmU,GACXnU,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOQ,EAAEvT,SAGnDyS,EAAOE,GAAO7Q,QAAQ,SAAUyR,GAC7B,GAAIlP,GAAOkP,IAAMd,EAAS,MAAQ,MAClCW,GAAShU,UAAS,eAAkBiF,GACjC7K,GAAE,YAAc,SAASwG,EAAEnF,EAAEwY,GAC1Bhb,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL5P,QAAUI,IAAKkP,EAAEvT,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEqT,IACnD5Z,EAAGpB,GAAGuE,UAGbpD,GAAE,WAAa,SAASwG,EAAEnF,EAAEwY,GACzBhb,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACLoG,QAAUI,IAAKkP,EAAEvT,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEqT,IACnD5Z,EAAGpB,GAAGuE,UAGbpD,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBAAkBjF,EAAGpB,GAAGuE,YAK3CwW,EAAS7T,OAAM,QACVC,KAAI,QAAU,kBAEdhG,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACLxP,IAAKyN,EAAK9R,GACV6D,MAAOiO,EAAK9R,GACZiE,SACMI,IAAK,KAAMR,MAAO0O,EAAMvS,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAC1DwJ,IAAK,KAAMR,MAAOiQ,EAAM9T,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAC1DwJ,IAAK,KAAMR,MAAOwO,EAAMrS,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAEhEsB,KAAM6D,EACNiB,MAAOpG,EACPpB,EAAGpB,GAAGuE,UAGbpD,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACLwG,IAAKyN,EAAK9R,GACV6D,MAAOiO,EAAK9R,GACZiE,SACMI,IAAK,KAAMR,MAAO0O,EAAMvS,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAC1DwJ,IAAK,KAAMR,MAAOiQ,EAAM9T,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAC1DwJ,IAAK,KAAMR,MAAOwO,EAAMrS,GAAImE,MAAOmE,EAAStI,IAAMmE,EAAMnE,EAAEnF,KAEhEsB,KAAM6D,EACNiB,MAAOpG,EACPpB,EAAGpB,GAAGuE,UAGbpD,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBAAkBjF,EAAGpB,GAAGuE,UAIzCuW,EAASnU,OAAM,uBACZqL,gBAAgBC,EAAa,qBAC3B9K,KAAI,IAAM,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOR,EAAMvS,MAC9CR,KAAI,QAAUgU,GACdhU,KAAI,IAAMkU,GACVlU,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAO0G,MAAKC,IAAIuR,EAAOR,EAAMvS,IAAM+S,EAAOV,EAAMrS,MAAQ,IACvFsC,MAAK,OAAS,SAAStC,EAAEnF,GAAK,MAAOyN,GAAStI,IAAMmE,EAAMnE,EAAEnF,KAC5DyH,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOyN,GAAStI,IAAMmE,EAAMnE,EAAEnF,KAGnEuY,EAAS7T,OAAM,QAASC,KAAI,QAAU,qBAEtC2T,EAASnU,OAAM,0BACZqL,gBAAgBC,EAAa,6BAC3B9K,KAAI,KAAOkU,GACXlU,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOe,EAAM9T,MAC/CR,KAAI,KAAOmU,GACXnU,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOkY,GAAOe,EAAM9T,KAGpD,IAAI+T,GAAWZ,EAAS/T,UAAS,uBAAwBjD,KAAK,SAAS6D,GACnE,MAAO6S,GAAW7S,QAEtB+T,GAASzU,QAAQC,OAAM,UAClB+C,MAAK,OAAS,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOW,GAAWhU,EAAEnF,EAAEwY,IAAMlP,EAAMnE,EAAEqT,KACpE/Q,MAAK,SAAW,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOW,GAAWhU,EAAEnF,EAAEwY,IAAMlP,EAAMnE,EAAEqT,KACtE/Q,MAAK,UAAY,KACjB9I,GAAE,YAAc,SAASwG,EAAEnF,EAAEwY,GAC1Bhb,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL5P,QAAUI,IAAK4P,EAAWjU,EAAEnF,EAAEwY,GAAIlP,MAAO6P,EAAWhU,EAAEnF,EAAEwY,IAAMlP,EAAMnE,EAAEqT,IACtE5Z,EAAGpB,GAAGuE,UAGbpD,GAAE,WAAa,SAASwG,EAAEnF,EAAEwY,GACzBhb,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACLoG,QAAUI,IAAK4P,EAAWjU,EAAEnF,EAAEwY,GAAIlP,MAAO6P,EAAWhU,EAAEnF,EAAEwY,IAAMlP,EAAMnE,EAAEqT,IACtE5Z,EAAGpB,GAAGuE,UAGbpD,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBAAkBjF,EAAGpB,GAAGuE,UAEzCmX,EAASvU,KAAI,QAAU,sBACvBuU,EACG1J,gBAAgBC,EAAa,kCAC3B9K,KAAI,KAA4B,IAArBvB,EAAOM,aAClBiB,KAAI,KAAO,SAASQ,EAAEnF,EAAEwY,GAAK,MAAON,GAAOD,EAAW9S,EAAEnF,EAAEwY,MAC1D7T,KAAI,IAAM,KACfuU,EAAS9T,OAAOC,SAGhB+S,EAAUhV,EAAO0T,OACjBuB,EAAUH,EAAOpB,SAGrBrH,EAAYS,UAAS,wBACdpQ,EA/PX,GAmBIkX,GAASE,EACTE,EAASe,EASTC,EAASC,EA7BT9V,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC7C4B,EAAQ,IACRC,EAAS,IACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrB/E,EAAS5F,GAAG8H,MAAMsI,UAClBsK,EAAS1a,GAAG8H,MAAMC,SAClB0R,EAAQ,SAAS9R,GAAK,MAAOA,GAAEkU,OAC/B7B,EAAQ,SAASrS,GAAK,MAAOA,GAAEU,OAAOyT,IACtCL,EAAQ,SAAS9T,GAAK,MAAOA,GAAEU,OAAO0T,IACtC7B,EAAQ,SAASvS,GAAK,MAAOA,GAAEU,OAAO2T,IACtC5B,EAAQ,SAASzS,GAAK,MAAOA,GAAEU,OAAO4T,aACtC3B,EAAQ,SAAS3S,GAAK,MAAOA,GAAEU,OAAO6T,cACtCjM,EAAW,SAAStI,GAAK,MAAOA,GAAEmE,OAClC0O,EAAc,SAAS7S,GAAK,MAAOA,GAAEU,OAAOqT,UAC5CjB,EAAa,SAAS9S,EAAGnF,EAAGwY,GAAK,MAAOrT,IACxCiU,EAAa,SAASjU,EAAGnF,EAAGwY,GAAK,MAAOrT,IACxCgU,EAAa,SAAShU,EAAGnF,EAAGwY,GAAK,MAAO3X,SACxCyI,EAAQtM,EAAGG,MAAMuQ,eACjBxJ,EAAY,KAGZxG,EAAWF,GAAGE,SAAQ,mBAAqB,kBAAmB,mBAAoB,aAClF+H,EAAW,IACXmT,EAAc,KAOdnJ,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA8RjD,OAtDA3F,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAc6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACrEtB,QAAc4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACvEiT,aAAc3M,IAAK,WAAW,MAAO2M,IAAe1M,IAAK,SAASvG,GAAGiT,EAAYjT,IACjFb,GAAcmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IACnE4R,IAAKtL,IAAK,WAAW,MAAOuL,IAAStL,IAAK,SAASvG,GAAG6R,EAAM7R,IAC5DgU,IAAK1N,IAAK,WAAW,MAAOgN,IAAS/M,IAAK,SAASvG,GAAGsT,EAAMtT,IAC5D8R,IAAKxL,IAAK,WAAW,MAAOyL,IAASxL,IAAK,SAASvG,GAAG+R,EAAM/R,IAC5DgS,IAAK1L,IAAK,WAAW,MAAO2L,IAAS1L,IAAK,SAASvG,GAAGiS,EAAMjS,IAC5DkS,IAAK5L,IAAK,WAAW,MAAO6L,IAAS5L,IAAK,SAASvG,GAAGmS,EAAMnS,IAC5DiU,WAAe3N,IAAK,WAAW,MAAOwB,IAAYvB,IAAK,SAASvG,GAAG8H,EAAS9H,IAC5EuT,UAAejN,IAAK,WAAW,MAAO+L,IAAc9L,IAAK,SAASvG;AN3R1E,AM2R6EqS,EN3R3E,AM2RsFrS,CN3RrF,GAAG,AM4REkU,CN5RD,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EM4RK5N,IN5RC,AM4RI,WAAW,MAAOgM,IAAc/L,IAAK,SAASvG,GAAGsS,EAAWtS,IAChFmU,cAAe7N,IAAK,WAAW,MAAOmN,IAAclN,IAAK,SAASvG,GAAGyT,EAAWzT,IAChFoU,cAAe9N,IAAK,WAAW,MAAOkN,IAAcjN,IAAK,SAASvG,GAAGwT,EAAWxT,IAChFvC,QAAU6I,IAAK,WAAW,MAAO7I,IAAU8I,IAAK,SAASvG,GAAGvC,EAAOuC,IACnEuS,QAAUjM,IAAK,WAAW,MAAOiM,IAAUhM,IAAK,SAASvG,GAAGuS,EAAOvS,IACnEqR,SAAU/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACrEyR,SAAUnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACrEuR,QAAUjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACnEwS,QAAUlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACnE4B,IAAc0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAE/D4G,GACIN,IAAK,WAED,MADA9M,SAAQK,KAAI,mFAGhB0M,IAAK,SAASvG,GACVxG,QAAQK,KAAI,iFAIpB+C,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9BF,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,OAI1BzI,EAAGG,MAAMqP,YAAY1M,GAEdA,GCnUX9C,EAAGI,OAAO4c,aAAe,WACrB,YA0CA,SAASla,GAAMsB,GAuHX,MAtHAqO,GAAYW,QACZX,EAAYrS,OAAO6c,GACfC,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,IAAkByB,GAAUgL,SAASlL,EAAUuD,MAAK,WAAc,KAAOlF,EAAOC,KAAOD,EAAOuR,MAC9FlR,GAAmByB,GAAU+K,SAASlL,EAAUuD,MAAK,YAAe,KAAOlF,EAAOE,IAAMF,EAAOsR,MAUnG,IARA/T,EAAMqR,OAAS,WACXzT,EAAS4c,eACTpW,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAEnDA,EAAMoE,UAAYnG,MAIbuD,IAASA,EAAKlC,OAAQ,CACvB,GAAI6U,GAAa/P,EAAUK,UAAS,cAAejD,MAAMyS,GAYzD,OAVAE,GAAWxP,QAAQC,OAAM,QACpBC,KAAI,QAAU,kBACdA,KAAI,KAAO,SACX8C,MAAK,cAAgB,UAE1BwM,EACKtP,KAAI,IAAMpC,EAAOC,KAAOG,EAAiB,GACzCgC,KAAI,IAAMpC,EAAOE,IAAMG,EAAkB,GACzCsM,KAAK,SAAS/J,GAAK,MAAOA,KAExBrF,EAEPoE,EAAUK,UAAS,cAAec,SAItCP,EAAImV,EAAQ7W,SACZmJ,EAAI0N,EAAQ/B,SAASqC,OAAM,EAG3B,IAAIjW,GAAOJ,EAAUK,UAAS,gCAAiCjD,MAAMA,IACjEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,mCAAmCD,OAAM,KACzF+V,EAAYD,EAAO9V,OAAM,QACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAC5BD,OAAM,KAAMC,KAAI,QAAU,eAC1BD,OAAM,QAEX8V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjCwQ,EAAExQ,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEhEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAI3DsX,EAAQ7V,MAAMzB,GAAgB0B,OAAOzB,EAErC,IAAI+X,GAAWxF,EAAEhR,OAAM,gBAClByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAc/C,IAZAF,EAASxP,aAAa/M,KAAK6b,GAE3BQ,EAAU/V,OAAM,YACXC,KAAI,KAAO,mBAAqBsV,EAAQ1S,MACxC7C,OAAM,QAEXyQ,EAAEhR,OAAM,oBAAuB8V,EAAQ1S,KAAO,SACzC5C,KAAI,QAAUG,EAAEpB,aAAegT,EAAgB,EAAI,IACnD/R,KAAI,SAAW,IACfA,KAAI,KAAOG,EAAEpB,aAAegT,EAAgB,EAAI,IAGjDwD,EAAW,CACXC,EACK7U,MAAMR,GACNsQ,MAAOpY,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAC/CwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBAAkBQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KAC5E0R,EAAEhR,OAAM,iBAAkB/F,KAAK+b,EAE/B,IAAI/D,GAASjB,EAAEhR,OAAM,iBAAkBI,UAAS,IAC5CmS,IACAN,EACK7R,UAAS,QACTI,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAAK,MAAO,gBAAkBA,EAAI,IAAM,EAAI,IAAM,MAAQ,MAIlG4B,IACAC,EACK/U,MAAMiH,GACN6I,MAAO1O,KAAKwB,MAAMtF,EAAgB,KAClCkY,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBAAkB/F,KAAKic,IAInClF,EAAEhR,OAAM,qBACHQ,KAAI,KAAM,GACVA,KAAI,KAAMhC,GACVgC,KAAI,KAAO4H,EAAE,IACb5H,KAAI,KAAO4H,EAAE,MAQtBkD,EAAYS,UAAS,8BACdpQ,EA3JX,GAaIgF,GAAGyH,EAbH0N,EAAUjd,EAAGI,OAAO2Z,UACpBoD,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAElB3S,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAChD4B,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMsQ,WACjByM,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClBhE,GAAgB,EAChBxZ,EAAUF,EAAGI,OAAOF,UAEpB6W,EAAS,qBACTrW,EAAWF,GAAGE,SAAQ,eAAiB,aACvC+H,EAAW,GAEf0U,GACK9E,OAAM,UACNU,YAAW,GACXP,WAAW,SAASrQ,GAAK,MAAOA,KAErCkV,EACKhF,OAAM,EAAqB,QAAU,QACrCG,WAAWhY,GAAGmM,OAAM,SAGzBzM,EAAQuI,SAAS,EAMjB,IAAIgK,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA6LjD,OA7DAwU,GAAQvc,SAASiB,GAAE,2BAA6B,SAASoc,GACrD7d,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7B+W,EAAQvc,SAASiB,GAAE,0BAA4B,SAASoc,GACpD7d,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7B+W,EAAQvc,SAASiB,GAAE,2BAA6B,SAASoc,GACrD7d,MAOJ4C,EAAMpC,SAAWA,EACjBoC,EAAMma,QAAUA,EAChBna,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtE+Q,eAAgBzK,IAAK,WAAW,MAAOyK,IAAiBxK,IAAK,SAASvG,GAAG+Q,EAAc/Q,IACvFuU,WAAYjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAC3EyU,WAAYnO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC3EqV,gBAAoB/O,IAAK,WAAW,MAAO/O,IAAWgP,IAAK,SAASvG,GAAGzI,EAAQyI,IAC/EoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAGrEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBwU,EAAQxU,SAASA,GACjB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1BsU,EAAQ3Q,MAAMA,KAElBoR,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQ,EAAM,QAAU,YAItCrY,EAAGG,MAAMkW,eAAevT,EAAOma,GAC/Bjd,EAAGG,MAAMqP,YAAY1M,GAEdA,GCjOX9C,EAAGI,OAAO6d,OAAS,WACf,YA6BA,SAASC,GAAWC,EAAQtV,GACxB,GAAIuV,GAAKD,EAAOhd,OAChBgd,GAAOvI,KAAK,SAAS9F,EAAGuO,GACpB,GAAIC,GAAKF,EAAGjL,QAAQrD,GAChByO,EAAKH,EAAGjL,QAAQkL,EACpB,OAAO7d,IAAGge,WAAW3V,EAAOyV,GAAKzV,EAAO0V,MAIhD,QAASzb,GAAMsB,GAkOX,MAjOAA,GAAUC,KAAK,SAAS8D,EAAGnF,GACvB,GAAI2C,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,EAEjB,IAAIuX,GAASC,EAAOtd,KAAKL,KAAMoH,EAAGnF,GAAG7B,QACjCwd,EAAUC,EAAQxd,KAAKL,KAAMoH,EAAGnF,GAAG7B,QACnC0d,EAAcC,EAAY1d,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAC3C4d,EAAWC,EAAS5d,KAAKL,KAAMoH,EAAGnF,GAAG7B,QACrC8d,EAAcC,EAAY9d,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAC3Cge,EAAeC,EAAahe,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAC7Cke,EAAmBC,EAAiBle,KAAKL,KAAMoH,EAAGnF,GAAG7B,QACrDoe,EAAgBC,EAAcpe,KAAKL,KAAMoH,EAAGnF,GAAG7B,OAGnD+c,GAAWe,EAAaR,GACxBP,EAAWiB,EAAcR,GACzBT,EAAWmB,EAAkBR,GAC7BX,EAAWqB,EAAeR,GAG1BN,EAAO7I,KAAKpV,GAAGge,YACfG,EAAQ/I,KAAKpV,GAAGge,YAChBK,EAAYjJ,KAAKpV,GAAGge,YACpBO,EAASnJ,KAAKpV,GAAGge,WAIjB,IAAIiB,GAAKjf,GAAG8H,MAAMC,SACb5B,OAAQnG,GAAGkf,OAAOlf,GAAGmf,OAAOC,EAAQnB,MACpChY,MAAM6Q,GAAW3R,EAAgB,IAAM,EAAGA,GAGtC5E,MAAK8e,WAAarf,GAAG8H,MAAMC,SAC/B5B,QAAQ,EAAGoD,EAAAA,IACXtD,MAAMgZ,EAAGhZ,QAGd1F,MAAK8e,UAAYJ,CAYjB,KAAI,GALAnY,IALW9G,GAAGgJ,IAAIiV,GACPje,GAAG6I,IAAIoV,GACPA,EAAO,GAGXvX,EAAUK,UAAS,uBAAwBjD,MAAM6D,KACxDX,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,0BACnD6V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,KAEXnE,EAAE,EAAE8c,EAAGrB,EAAOrc,OAAU0d,EAAF9c,EAAMA,IAAC,CACjC,GAAI+c,GAAkB,oBAAoB/c,CAClC,IAALA,IACC+c,EAAkBA,EAAkB,YAAYC,EAAsBhd,IAE1Ewa,EAAO9V,OAAM,QAASC,KAAI,QAAUoY,GAGxCvC,EAAO9V,OAAM,QAASC,KAAI,QAAU,cAEpCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAOvE,KAAI,GAJAwa,GAAK,SAAS9X,GAAK,MAAOuB,MAAKC,IAAI8V,EAAGtX,GAAKsX,EAAG,KAE9CS,EAAM,SAAS/X,GAAK,MAAesX,GAAJ,EAAJtX,EAAWA,EAAQ,IAE1CnF,EAAE,EAAE8c,EAAGrB,EAAOrc,OAAU0d,EAAF9c,EAAMA,IAAC,CACjC,GAAIyD,GAAQgY,EAAOzb,EACnBmV,GAAEhR,OAAM,gBAAiBnE,GACpB4a,MAAMnX,GACNkB,KAAI,SAAW/B,GACfuI,aACA1F,SAASA,GACTd,KAAI,QAAUsY,EAAGxZ,IACjBkB,KAAI,IAAMuY,EAAIzZ,IAGvB0R,EAAEhR,OAAM,mBACHsD,MAAK,OAAS6B,GACd3E,KAAI,SAAW/B,EAAkB,GACjC+B,KAAI,IAAM/B,EAAkB,GAC5BjE,GAAE,YAAc,WACbjB,EAASsb,kBACLhQ,MAAO+S,EAAS,GAChB1C,MAAOkD,EAAc,IAAM,UAC3BjT,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,YAAc,WACbjB,EAASmG,kBACLmF,MAAO+S,EAAS,GAChB1C,MAAOkD,EAAc,IAAM,UAC3BjT,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,WACZjB,EAASsF,iBACLgG,MAAO+S,EAAS,GAChB1C,MAAOkD,EAAc,IAAM,UAC3BjT,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC0D,aACA1F,SAASA,GACTd,KAAI,QAAqB,EAAXoX,EACXU,EAAG,GAAKA,EAAGV,EAAS,IAClBU,EAAGV,EAAS,IAAMU,EAAG,IAC1B9X,KAAI,IAAMuY,EAAInB,GAEnB,IAAIoB,GAAMva,EAAkB,EAExBwa,EAAazB,EAAQlK,IAAK,SAAS4L,EAAQjX,GAC3C,OAAQ4C,MAAOqU,EAAQhE,MAAO8C,EAAa/V,KAE/CoU,GACGjW,UAAS,0BACTjD,KAAK8b,GACL3Y,QACAC,OAAM,QACNC,KAAI,QAAU,qBACdA,KAAI,IAAM,MAAQwY,EAAK,IAAMA,EAAK,KAAQA,EAAM,KAAQA,EAAM,KAAQA,EAAM,KAC5Exe,GAAE,YAAc,SAASwG,GACxBzH,EAASsb,kBACPhQ,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5BwC,KAAMwS,EAAGtX,EAAE6D,OAAQpG,EAAgB,OAItCjE,GAAE,YAAc,SAASwG,GACtBzH,EAASmG,kBACLmF,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAGnF,GACxBtC,EAASsF,iBACLgG,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAItC0N,EAAE5Q,UAAS,0BACRjD,KAAK8b,GACLjS,aACA1F,SAASA,GACTd,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAesX,EAAGtX,EAAE6D,OAAS,IAAOpG,EAAkB,EAAK,KAErG,IAAI0a,GAAkBzB,EAAYpK,IAAK,SAAS4L,EAAQjX,GACpD,OAAQ4C,MAAOqU,EAAQhE,MAAOgD,EAAiBjW,KAEnDoU,GACGjW,UAAS,sBACTjD,KAAKgc,GACL7Y,QACAC,OAAM,QACNC,KAAI,SAAW,IACfA,KAAI,QAAU,iBACdA,KAAI,KAAO,SAASQ,GAAK,MAAOsX,GAAGtX,EAAE6D,SACrCrE,KAAI,KAAO,KACXA,KAAI,KAAO,SAASQ,GAAK,MAAOsX,GAAGtX,EAAE6D,SACrCrE,KAAI,KAAO/B,EAAkB,GAC7BjE,GAAE,YAAc,SAASwG,GACxBzH,EAASsb,kBACPhQ,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5BwC,KAAMwS,EAAGtX,EAAE6D,OAAQpG,EAAgB,OAItCjE,GAAE,YAAc,SAASwG,GACtBzH,EAASmG,kBACLmF,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAGnF,GACxBtC,EAASsF,iBACLgG,MAAO7D,EAAE6D,MACTqQ,MAAOlU,EAAEkU,OAAS,WAClB/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAItC0N,EAAE5Q,UAAS,sBACRjD,KAAKgc,GACLnS,aACA1F,SAASA,GACTd,KAAI,KAAO,SAASQ,GAAK,MAAOsX,GAAGtX,EAAE6D,SACrCrE,KAAI,KAAO,SAASQ,GAAK,MAAOsX,GAAGtX,EAAE6D,SAExC1E,EAAKC,UAAS,aACT5F,GAAE,YAAc,SAASwG,EAAEnF,GACxB,GAAIqZ,GAAQ4C,EAAYjc,IAAMud,EAAmBvd,EACjDtC,GAASsb,kBACLhQ,MAAO7D,EACPkU,MAAOA,EACP/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,YAAc,WACbjB,EAASmG,kBACLmF,MAAO+S,EAAS,GAChB1C,MAAOkD,EAAc,IAAM,WAC3BjT,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAEnF,GACvB,GAAIqZ,GAAQ4C,EAAYjc,IAAMud,EAAmBvd,EACjDtC,GAASsF,iBACLgG,MAAO7D,EACPkU,MAAOA,EACP/P,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,cAKrC3H,EAlQX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C6S,EAAS,OACTf,GAAU,EACVoH,EAAS,SAASvW,GAAK,MAAOA,GAAEuW,QAChCE,EAAU,SAASzW,GAAK,MAAOA,GAAEyW,QAAUzW,EAAEyW,YAC7CE,EAAc,SAAS3W,GAAK,MAAOA,GAAE2W,YAAc3W,EAAE2W,aAAe,IACpEE,EAAW,SAAS7W,GAAK,MAAOA,GAAE6W,UAClCE,EAAc,SAAS/W,GAAK,MAAOA,GAAE+W,YAAc/W,EAAE+W,gBACrDE,EAAe,SAASjX,GAAK,MAAOA,GAAEiX,aAAejX,EAAEiX,iBACvDE,EAAmB,SAASnX,GAAK,MAAOA,GAAEmX,iBAAmBnX,EAAEmX,qBAC/DE,EAAgB,SAASrX,GAAK,MAAOA,GAAEqX,cAAgBrX,EAAEqX,kBACzDI,GAAU,GACVxY,EAAQ,IACRC,EAAS,GACTH,EAAY,KACZsR,EAAa,KACblM,EAAQtM,EAAGG,MAAMsQ,UAAQ,YACzB/P,EAAWF,GAAGE,SAAQ,mBAAqB,kBAAmB,oBAC9D6f,GAAqB,UAAY,OAAQ,WACzCP,GAAwB,MAAQ,MAAO,OACvCvX,EAAW,GAoRjB,OA/BA3F,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB0P,QAAczP,IAAK,WAAW,MAAOyP,IAAUxP,IAAK,SAASvG,GAAG+V,EAAO/V,IACvEiW,SAAc3P,IAAK,WAAW,MAAO2P,IAAW1P,IAAK,SAASvG,GAAGiW,EAAQjW,IACzEqW,UAAW/P,IAAK,WAAW,MAAO+P,IAAY9P,IAAK,SAASvG,GAAGqW,EAASrW,IACxEiX,QAAc3Q,IAAK,WAAW,MAAO2Q,IAAU1Q,IAAK,SAASvG,GAAGiX,EAAOjX,IACvEvB,OAAW6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAClEtB,QAAY4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACrE6P,YAAgBvJ,IAAK,WAAW,MAAOuJ,IAActJ,IAAK,SAASvG,GAAG6P,EAAW7P,IACjFF,UAAcwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GAAGF,EAASE,IAG3EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D6S,QAASpJ,IAAK,WAAW,MAAOoJ,IAAUnJ,IAAK,SAASvG,GACpD0P,EAAS1P,EACT2O,EAAoB,SAAVe,GAA+B,UAAVA,IAEnC/L,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GACdA,GChTX9C,EAAGI,OAAOogB,YAAc,WACpB,YA2BA,SAAS1d,GAAMsB,GAuHX,MAtHAA,GAAUC,KAAK,SAAS8D,EAAGnF,GACvB,GAAIkE,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAOnD,IAJA/T,EAAMqR,OAAS,WAAarR,EAAMsB,IAClCtB,EAAMoE,UAAYnG,MAGboH,IAAMuW,EAAOtd,KAAKL,KAAMoH,EAAGnF,GAE5B,MADAhD,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAGtC,IAAIoW,GAASC,EAAOtd,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAAQyU,KAAKpV,GAAGge,YACjDG,EAAUC,EAAQxd,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAAQyU,KAAKpV,GAAGge,YACnDO,EAAWC,EAAS5d,KAAKL,KAAMoH,EAAGnF,GAAG7B,QAAQyU,KAAKpV,GAAGge,YAGrDlX,EAAOJ,EAAUK,UAAS,4BAA6BjD,MAAM6D,IAC7DX,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,+BACnD6V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,aAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAGvE,IAAIga,GAAKjf,GAAG8H,MAAMC,SACb5B,QAAQ,EAAG+C,KAAKL,IAAIoV,EAAO,GAAKE,EAAQ,IAAM,EAAII,EAAS,MAC3DtY,MAAM6Q,GAAW3R,EAAgB,IAAM,EAAGA,IAG3C8a,EAAK1f,KAAK8e,WAAarf,GAAG8H,MAAMC,SAC/B5B,QAAQ,EAAGoD,EAAAA,IACXtD,MAAMgZ,EAAGhZ,QAGd1F,MAAK8e,UAAYJ,CAEjB,IAGIiB,GAAQlD,EAAOrW,OAAM,cAAeO,OAAM,KACzCC,KAAI,cAAgB,OACpBA,KAAI,YAAc,iBAAmBN,EAAS9B,EAAOE,IAAMF,EAAOsR,QAAU,EAAI,IACrF6J,GAAMhZ,OAAM,QACPC,KAAI,QAAU,YACduK,KAAK,SAAS/J,GAAK,MAAOA,GAAEuY,QAEjCA,EAAMhZ,OAAM,QACPC,KAAI,QAAU,eACdA,KAAI,KAAO,OACXuK,KAAK,SAAS/J,GAAK,MAAOA,GAAEwY,WAEjC1C,EACK7W,MAAMzB,GACN0B,OAAOzB,EAEZ,IAAIgb,GAAazI,EAAEhR,OAAM,iBACzB3G,IAAG2N,WAAWyS,GAAYxf,KAAK6c,EAG/B,IAAItR,GAAS6L,GAAciH,EAAGjH,WAAY7S,EAAiB,KAGvDkb,EAAO1I,EAAE5Q,UAAS,aACjBjD,KAAKmb,EAAGrH,MAAOA,EAAQA,EAASzS,EAAiB,IAAO,SAASwC,GAC9D,MAAOpH,MAAK8Q,aAAelF,EAAOxE,KAItC2Y,EAAYD,EAAKpZ,QAAQC,OAAM,KAC9BC,KAAI,QAAU,WACdA,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAesY,EAAGtY,GAAK,QAC9DsC,MAAK,UAAY,KAEtBqW,GAAUpZ,OAAM,QACXC,KAAI,KAAO/B,GACX+B,KAAI,KAAyB,EAAlB/B,EAAsB,GAEtCkb,EAAUpZ,OAAM,QACXC,KAAI,cAAgB,UACpBA,KAAI,KAAO,OACXA,KAAI,IAAwB,EAAlB/B,EAAsB,GAChCsM,KAAKvF,EAGV,IAAIoU,GAAavgB,GAAG2N,WAAW0S,GAC1B1S,aACA1F,SAASwV,EAAOxV,YAChBd,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAesX,EAAGtX,GAAK,QAC9DsC,MAAK,UAAY,EAEtBsW,GAAW5Z,OAAM,QACZQ,KAAI,KAAO/B,GACX+B,KAAI,KAAyB,EAAlB/B,EAAsB,GAEtCmb,EAAW5Z,OAAM,QACZQ,KAAI,IAAwB,EAAlB/B,EAAsB,GAGrCpF,GAAG2N,WAAW0S,EAAKzY,QACd+F,aACA1F,SAASwV,EAAOxV,YAChBd,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAesX,EAAGtX,GAAK,QAC9DsC,MAAK,UAAY,MACjBpC,WAGT7H,GAAGwgB,MAAMC,QACFne,EA5IX,GAAImb,GAASje,EAAGI,OAAO6d,SACnB/d,EAAUF,EAAGI,OAAOF,UAEpBmY,EAAS,OACPf,GAAU,EACV/R,GAAUE,IAAK,EAAGqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,KAC/CkZ,EAAS,SAASvW,GAAK,MAAOA,GAAEuW,QAChCE,EAAU,SAASzW,GAAK,MAAOA,GAAEyW,QAAUzW,EAAEyW,YAC7CI,EAAW,SAAS7W,GAAK,MAAOA,GAAE6W,UAClC5X,EAAQ,KACRC,EAAS,GACTmR,EAAa,KACbJ,EAAQ,KACRrB,EAAS,KACTrW,EAAWF,GAAGE,UA2LpB,OAxLAR,GACKuI,SAAS,GACT+C,eAAc,GAgInByS,EAAOvd,SAASiB,GAAE,2BAA6B,SAASoc,GACpDA,EAAW,QACPvR,IAAKuR,EAAI1B,MACTrQ,MAAO+R,EAAI/R,MACXM,MAAOyR,EAAIzR,OAEfpM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7B+X,EAAOvd,SAASiB,GAAE,0BAA4B,SAASoc,GACnD7d,EAAQgG,QAAO,KAGnB+X,EAAOvd,SAASiB,GAAE,2BAA6B,SAASoc,GACpD7d,MAOJ4C,EAAMmb,OAASA,EACfnb,EAAMpC,SAAWA,EACjBoC,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB0P,QAAczP,IAAK,WAAW,MAAOyP,IAAUxP,IAAK,SAASvG,GAAG+V,EAAO/V,IACvEiW,SAAc3P,IAAK,WAAW,MAAO2P,IAAW1P,IAAK,SAASvG,GAAGiW,EAAQjW,IACzEqW,UAAW/P,IAAK,WAAW,MAAO+P,IAAY9P,IAAK,SAASvG,GAAGqW,EAASrW,IACxEvB,OAAW6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAClEtB,QAAY4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACrE6P,YAAgBvJ,IAAK,WAAW,MAAOuJ,IAActJ,IAAK,SAASvG,GAAG6P,EAAW7P,IACjFyP,OAAWnJ,IAAK,WAAW,MAAOmJ,IAASlJ,IAAK,SAASvG,GAAGyP,EAAMzP,IAClEoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAGrEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D6S,QAASpJ,IAAK,WAAW,MAAOoJ,IAAUnJ,IAAK,SAASvG,GACpD0P,EAAS1P,EACT2O,EAAoB,SAAVe,GAA+B,UAAVA,MAIvCrY,EAAGG,MAAMkW,eAAevT,EAAOmb,GAC/Bje,EAAGG,MAAMqP,YAAY1M,GAEdA,GCnNX9C,EAAGI,OAAO8gB,eAAiB,WACvB,YAoCA,SAASpe,GAAMsB,GA4HX,MA3HAA,GAAUC,KAAK,SAASC,GACpB4C,EAAY1G,GAAG2G,OAAOpG,KACtB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAElEvF,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAIia,GAAYxb,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAU,GAG1D0F,GAAEnB,OAAOqT,GAAWxZ,GAAGkf,OAAOpb,EAAK,GAAGuE,OAAO4L,IAAIwF,GAAMvY,OAAOke,KAE1DwB,EACAtZ,EAAErB,MAAMyT,IAA4B,GAAjBvU,EAAsBrB,EAAK,GAAGuE,OAAOzG,OAAQuD,GAAkBrB,EAAK,GAAGuE,OAAOzG,OAAS,IAAOkC,EAAK,GAAGuE,OAAOzG,SAEhI0F,EAAErB,MAAMyT,IAAW,EAAIiH,EAAW,EAAGxb,EAAiBwb,EAAW,EAAI,IAEzE5R,EAAE5I,OAAOyT,IACD5Z,GAAGgJ,IAAIlF,EAAK,GAAGuE,OAAO4L,IAAI4M,GAAQ3f,OAAO4f,IACzC9gB,GAAG6I,IAAI/E,EAAK,GAAGuE,OAAO4L,IAAI8M,GAAS7f,OAAO4f,MAEhD7a,MAAM0U,IAAWvV,EAAiB,IAGhCkC,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,KAC7BmB,EAAEnB,SAAS,GACPmB,EAAEnB,QAAQmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,GAAWmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,KACzEmB,EAAEnB,QAAM,GAAK,KAEnB4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,KAC7B4I,EAAE5I,SAAS,GACP4I,EAAE5I,QAAQ4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,GAAW4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,KACzE4I,EAAE5I,QAAM,GAAK,IAGvB,IAAIW,GAAO9G,GAAG2G,OAAOpG,MAAMwG,UAAS,+BAAgCjD,MAAMA,EAAK,GAAGuE,SAC9ErB,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,kCACnD8V,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,YAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEyB,EACKvF,GAAE,QAAU,SAASwG,EAAEnF,GACpBtC,EAAS8gB,YACLld,KAAM6D,EACNiB,MAAOpG,EACPiK,IAAKzM,GAAGuE,MACRwF,GAAIA,MAIhBkT,EAAU/V,OAAM,YACXC,KAAI,KAAO,sBAAwB4C,GACnC7C,OAAM,QAEXJ,EAAKH,OAAM,uBAA0BoD,EAAK,SACrC5C,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpBuS,EAAKxQ,KAAI,YAAc8Z,EAAW,2BAA6BlX,EAAK,IAAM,GAE1E,IAAI6N,GAAQ9Q,EAAKH,OAAM,aAAcI,UAAS,YACzCjD,KAAK,SAAS6D,GAAK,MAAOA,IAC/BiQ,GAAMhQ,OAAOC,QAEb,IAAIqZ,GAAatJ,EAAM3Q,QAAQC,OAAM,IAGrC0Q,GACKzQ,KAAI,QAAU,SAASQ,EAAGnF,EAAGwY,GAAK,OAAQmG,EAAQxZ,EAAGnF,GAAK4e,EAASzZ,EAAGnF,GAAK,mBAAqB,oBAAsB,YAAcwY,EAAI,IAAMxY,GAEvI0e,GAAWha,OAAM,QACxBC,KAAI,QAAU,wBACdA,KAAI,YAAc,SAASQ,EAAGnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAGnF,IAAM,QACzE2E,KAAI,KAAO,GACXA,KAAI,KAAO,SAASQ,EAAGnF,GAAK,MAAOuM,GAAEgS,EAAQpZ,EAAGnF,MAChD2E,KAAI,KAAO,GACXA,KAAI,KAAO,SAASQ,EAAGnF,GAAK,MAAOuM,GAAE8R,EAAOlZ,EAAGnF,MAExC0e,EAAWha,OAAM,QACxBC,KAAI,QAAU,gCACdA,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,MAAO,cAAgB8E,EAAEmS,EAAK9R,EAAGnF,IAAMme,EAAS,GAAK,KAClD5R,EAAEsS,EAAK1Z,EAAGnF,KAAO2e,EAAQxZ,EAAGnF,GAAK4e,EAASzZ,EAAGnF,GAAMuM,EAAEqS,EAASzZ,EAAGnF,IAAMuM,EAAEoS,EAAQxZ,EAAGnF,IAAO,IAC5F,MAEL2E,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,QAAUwZ,GACdxZ,KAAI,SAAW,SAASQ,EAAGnF,GACxB,GAAI8e,GAAOH,EAAQxZ,EAAGnF,GAClB+e,EAAQH,EAASzZ,EAAGnF,EACxB,OAAO8e,GAAOC,EAAQxS,EAAEwS,GAASxS,EAAEuS,GAAQvS,EAAEuS,GAAQvS,EAAEwS,IAG/D3J,GAAMjR,OAAM,yBAA0BgH,aACjCxG,KAAI,YAAc,SAASQ,EAAGnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAGnF,IAAM,QACzE2E,KAAI,KAAO,GACXA,KAAI,KAAO,SAASQ,EAAGnF,GAAK,MAAOuM,GAAEgS,EAAQpZ,EAAGnF,MAChD2E,KAAI,KAAO,GACXA,KAAI,KAAO,SAASQ,EAAGnF,GAAK,MAAOuM,GAAE8R,EAAOlZ,EAAGnF,MAEpDoV,EAAMjR,OAAM,yBAA0BgH,aACjCxG,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,MAAO,cAAgB8E,EAAEmS,EAAK9R,EAAGnF,IAAMme,EAAS,GAAK,KAClD5R,EAAEsS,EAAK1Z,EAAGnF,KAAO2e,EAAQxZ,EAAGnF,GAAK4e,EAASzZ,EAAGnF,GAAMuM,EAAEqS,EAASzZ,EAAGnF,IAAMuM,EAAEoS,EAAQxZ,EAAGnF,IAAO,IAC5F,MAEL2E,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,QAAUwZ,GACdxZ,KAAI,SAAW,SAASQ,EAAGnF,GACxB,GAAI8e,GAAOH,EAAQxZ,EAAGnF,GAClB+e,EAAQH,EAASzZ,EAAGnF,EACxB,OAAO8e,GAAOC,EAAQxS,EAAEwS,GAASxS,EAAEuS,GAAQvS,EAAEuS,GAAQvS,EAAEwS,OAI5Djf,EA1JX,GAIMoE,GAeA8S,EACAI,EACAF,EACAiB,EAtBF5V,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,KACRC,EAAS,KACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UAErBrD,EAAItH,GAAG8H,MAAMC,SACbgH,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BoS,EAAU,SAASxZ,GAAK,MAAOA,GAAE2Z,MACjCF,EAAW,SAASzZ,GAAK,MAAOA,GAAE4Z,OAClCR,EAAU,SAASpZ,GAAK,MAAOA,GAAE6Z,MACjCX,EAAS,SAASlZ,GAAK,MAAOA,GAAE8Z,KAChCrC,KACA0B,KACAF,GAAc,EACdK,GAAW,EACXnV,EAAQtM,EAAGG,MAAMuQ,eACjBwR,GAAc,EAKdxhB,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,YAAa,aAAc,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAiMhK,OAzDAoC,GAAMqf,eAAiB,SAASC,EAAYC,GACxCvf,EAAMwf,kBACNpb,EAAUC,OAAM,iCAAoCib,GAC/C1X,QAAO,QAAU2X,IAI1Bvf,EAAMwf,gBAAkB,WACpBpb,EAAUC,OAAM,qCACXuD,QAAO,SAAU,IAQ1B5H,EAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAW6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAClEtB,QAAW4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACpEvC,QAAW6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IAC1DuS,QAAWjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IAC1DqR,SAAW/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACtEyR,SAAWnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACtEuR,QAAWjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACpEwS,QAAWlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACpEiX,QAAW3Q,IAAK,WAAW,MAAO2Q,IAAU1Q,IAAK,SAASvG,GAAGiX,EAAOjX,IACpE2Y,QAAWrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACpEyY,SAAWnS,IAAK,WAAW,MAAOmS,IAAWlS,IAAK,SAASvG,GAAGyY,EAAQzY,IACtE8Y,UAAWxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IACxE4B,IAAW0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC5DuZ,aAAcjT,IAAK,WAAW,MAAOiT,IAAehT,IAAK,SAASvG,GAAGuZ,EAAYvZ,IAEjFb,GAAQmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC7D4G,GAAQN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC7DmZ,MAAQ7S,IAAK,WAAW,MAAO0S,MAAazS,IAAK,SAASvG,GAAGgZ,EAAQhZ,IACrEoZ,OAAQ9S,IAAK,WAAW,MAAO2S,MAAc1S,IAAK,SAASvG,GAAGiZ,EAASjZ,IACvEqZ,MAAQ/S,IAAK,WAAW,MAAOsS,IAAWrS,IAAK,SAASvG,GAAG4Y,EAAQ5Y,IACnEsZ,KAAQhT,IAAK,WAAW,MAAOoS,IAAUnS,IAAK,SAASvG,GAAG0Y,EAAO1Y,IAGjEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAqB5B,QAAZ8E,EAAElD,IAAsBkD,EAAElD,IAASF,EAAOE,IAC1DF,EAAOuR,MAAqBjT,QAAZ8E,EAAEmO,MAAsBnO,EAAEmO,MAASvR,EAAOuR,MAC1DvR,EAAOsR,OAAqBhT,QAAZ8E,EAAEkO,OAAsBlO,EAAEkO,OAAStR,EAAOsR,OAC1DtR,EAAOC,KAAqB3B,QAAZ8E,EAAEnD,KAAsBmD,EAAEnD,KAASD,EAAOC,OAE9D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GACdA,GC/NX9C,EAAGI,OAAOmiB,oBAAsB,WAC5B,YAsFA,SAASzf,GAAMsB,GAsbX,MArbAqO,GAAYW,QACZX,EAAYrS,OAAOoiB,GACftF,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAClCjZ,EAAUC,KAAK,SAASC,GA0CpB,QAASme,GAAUta,EAAEnF,GACjBxC,GAAG2G,OAAOrE,EAAMoE,WACXuD,MAAK,SAAW,aAGzB,QAASiY,GAASva,EAAEnF,GAChBoG,EAAMtB,EAAItH,GAAGuE,MAAM+C,EACnBsB,EAAMpG,EAAI0G,KAAK6E,MAAMoU,EAAG/b,OAAOwC,EAAMtB,IACrC8a,IAGJ,QAASC,GAAQ1a,EAAEnF,GACfxC,GAAG2G,OAAOrE,EAAMoE,WACXuD,MAAK,SAAW,QAGrBsH,EAAM3I,MAAQA,EAAMpG,EACpBtC,EAASoiB,YAAY/Q,GAgPzB,QAAS6Q,KACLG,GACKze,MAAM8E,GAIX,IAAI4Z,GAAclgB,EAAM2F,UACxB3F,GAAM2F,SAAS,GACf3F,EAAMqR,SACNrR,EAAM2F,SAASua,GAnTnB,GAAI9b,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,GACjBA,EAAUwD,QAAO,YAAeH,GAAI,EACpC,IAEI5E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAkBlE,IAhBAzC,EAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAK0B,GAEfoE,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAEvDA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAItC,GAAI4W,GAAY5iB,GAAG6iB,SAASC,OACvB3hB,GAAE,YAAc8gB,GAChB9gB,GAAE,OAAS+gB,GACX/gB,GAAE,UAAYkhB,EAwBnB,MAAKve,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CASX,IAPIoE,EAAUK,UAAS,cAAec,SAItCP,EAAI0a,EAAMpc,SACVmJ,EAAIiT,EAAMtH,SAELqI,EAsBDf,EAAMpI,QAAQ,UAtBH,CACX,GAAIoJ,GAAgBlf,EACfmI,OAAO,SAASL,GAAU,OAAQA,EAAOyR,WACzCpJ,IAAI,SAASrI,EAAOpJ,GACjB,GAAIygB,GAAgBjjB,GAAGkf,OAAOtT,EAAOvD,OAAQ2Z,EAAMjT,IAKnD,OAFIkU,GAAc,IAAK,MAAMA,EAAc,IAAK,OAGvCA,EAAc,GAAKA,EAAc,KAAO,EAAIA,EAAc,KAC1DA,EAAc,GAAKA,EAAc,KAAO,EAAIA,EAAc,OAIvEC,GACAljB,GAAGgJ,IAAIga,EAAe,SAASrb,GAAK,MAAOA,GAAE,KAC7C3H,GAAG6I,IAAIma,EAAe,SAASrb,GAAK,MAAOA,GAAE,KAGjDqa,GAAMpI,QAAQsJ,GAKlBf,EAAGhc,QAAQ,EAAGrC,EAAK,GAAGuE,OAAOzG,OAAS,IACjCqE,OAAO,EAAGd,IACV4X,OAAM,EAEX,IAAIjZ,GAAOqf,EAASva,EAAMpG,EAAGsB,GAGzBsf,EAA2B,EAA4B,OAAS,MAChEtc,EAAOJ,EAAUK,UAAS,+BAAgCjD,MAAMA,IAChEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,kCAAkCD,OAAM,KACxFyQ,EAAI7Q,EAAKH,OAAM,IA+BnB,IA7BAqW,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAAgB8C,MAAK,iBAAgB,QACtE+S,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAAgB8C,MAAK,iBAAkBmZ,GACxEpG,EAAO9V,OAAM,KAAMC,KAAI,QAAU,mBAAmB8C,MAAK,iBAAgB,QACzE+S,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,mBAG5Bkc,GAGDC,EAAO1c,MAAMzB,GAEbwS,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE4S,EAAEhR,OAAM,kBACHQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,MAdtD0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAkBzC2b,EAEE,CACH,GAAIC,KACEzX,IAAK,kBAAmBqR,UAAW0F,GAGzCW,GACK9c,MAAM,KACNkF,OAAK,OAAU,OAAQ,SACvB6X,YAAW,GACX5e,QAAQE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,KAGhD2S,EAAEhR,OAAM,oBACHyW,MAAMqG,GACNtc,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,KACjDrE,KAAK8iB,OAhBT/L,GAAEhR,OAAM,oBAAqBI,UAAS,KAAMc,QAmBjDf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEnEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,MAI3D,IAAIye,GAAe9f,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEic,cAEtD9c,GAAKH,OAAM,iBAAkBkB,SACzB+b,EAAahiB,QACbkF,EAAKI,OAAM,QAASC,KAAI,QAAU,gBAC7BA,KAAI,IAAMhC,EAAiB,GAC3BgC,KAAI,IAAM,UACV8C,MAAK,cAAgB,OACrByH,KAAKkS,EAAa3P,IAAI,SAAStM,GAAK,MAAOA,GAAEqE,MAAOoL,KAAI,MAAS,sDAItEyM,IACAC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAKD,EAAOC,KAAKC,IAAIF,EAAOE,MACpCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAGxC9G,EAAOrW,OAAM,kBACRO,OAAM,QAEXyQ,EAAEhR,OAAM,uBACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpB4c,EAEKjT,EAAE,SAASpH,GAAK,MAAOA,GAAEoc,QAAQhV,IACjCnI,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAAavZ,EAAKtB,GAAGohB,eAEnE,IAAII,GAAYrM,EAAEhR,OAAM,iBACnByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAASA,EAAE0V,WAAa1V,EAAEic,eAE/DI,GAAUpjB,KAAKohB,GAGfle,EAAK2F,QAAQ,SAAS9B,EAAEnF,GACpBmF,EAAEsc,YAAczhB,GAGpB,IAAI0hB,GAAcpgB,EAAKmI,OAAO,SAAStE,GACnC,OAAQA,EAAE0V,YAAc8G,EAAQxc,KAGhCyc,EAAWzM,EAAEhR,OAAM,oBAAqBI,UAAS,QAChDjD,KAAKogB,EAAa,SAASvc,GAAK,MAAOA,GAAEqE,MAE1CqY,GAAc,SAAS1c,GAEvB,GAAI2c,GAAOvV,EAAEoV,EAAQxc,GACrB,OAAW,GAAP2c,EAAiB,EACjBA,EAAOlf,EAAwBA,EAC5Bkf,EAGXF,GAASnd,QACJC,OAAM,QACN+C,MAAK,eAAgB,GACrBA,MAAK,mBAAkB,SACvBA,MAAK,SAAU,SAAUtC,EAAEnF,GACxB,MAAOwf,GAAMlW,QAAQnE,EAAEA,EAAEsc,eAE5B9c,KAAI,KAAM,GACVA,KAAI,KAAMhC,GACVgC,KAAI,KAAOkd,IACXld,KAAI,KAAOkd,IAEhBD,EACKna,MAAK,iBAAkB,SAAStC,GAE7B,GAAI2c,GAAOvV,EAAEoV,EAAQxc,GACrB,OAAW,GAAP2c,GAAYA,EAAOlf,EAAwB,EACxC,IAEV+B,KAAI,KAAM,GACVA,KAAI,KAAMhC,GACVgC,KAAI,KAAOkd,IACXld,KAAI,KAAOkd,IAEhBD,EAASxc,OAAOC,QAGhB,IAAI0a,IAAYyB,EAAUjd,UAAS,iBAC9BjD,MAAM8E,GACX2Z,IAAUtb,QAAQC,OAAM,QAASC,KAAI,QAAU,gBAC1CA,KAAI,QAAU,GACdA,KAAI,IAAM,IACVA,KAAI,OAAS,OACbA,KAAI,eAAiB,IACrB8C,MAAK,iBAAgB,OACrBrJ,KAAKgiB,GAEVL,GACKpb,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAewa,EAAGxa,EAAEnF,GAAK,QAChE2E,KAAI,SAAW/B,GAGhBsX,IACAC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,GAAIrB,IAC/CwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KACvD0R,EAAEhR,OAAM,iBACH/F,KAAK+b,IAGVC,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBACH/F,KAAKic,IAmBdlF,EAAEhR,OAAM,uBACHxF,GAAE,QAAU,WACTyH,EAAMtB,EAAItH,GAAGiE,MAAM1D,MAAM,GACzBqI,EAAMpG,EAAI0G,KAAK6E,MAAMoU,EAAG/b,OAAOwC,EAAMtB,IAGrCiK,EAAM3I,MAAQA,EAAMpG,EACpBtC,EAASoiB,YAAY/Q,GAErB6Q,MAGRJ,EAAM9hB,SAASiB,GAAE,eAAiB,SAASC,GACvCwH,EAAMpG,EAAIpB,EAAEwgB,WACZhZ,EAAMtB,EAAI6a,EAAGvZ,EAAMpG,GAGnB+O,EAAM3I,MAAQA,EAAMpG,EACpBtC,EAASoiB,YAAY/Q,GAErB6Q,MAGJsB,EAASxjB,SAASiB,GAAE,cAAgB,SAASwG,EAAEnF,GAC3CmF,EAAE0V,UAAY1V,EAAE0V,SAChB0F,GAAYpb,EAAE0V,SAEd9L,EAAMwR,SAAWA,EACjB7iB,EAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGV2P,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGVmQ,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtD4gB,EAAMF,iBACN,IAAI2C,GAAa7C,EAAY8C,EAAgBC,IAsB7C,IApBA7gB,EACKmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAElB5T,QAAQ,SAASmC,EAAOpJ,GACrBof,EAAapiB,EAAG4I,kBAAkBwD,EAAOvD,OAAQjH,EAAE0E,YAAaxD,EAAMgF,KACtE0a,EAAML,eAAenf,EAAGof,GAAY,EACpC,IAAI9S,GAAQlD,EAAOvD,OAAOuZ,EACL,oBAAV9S,KACgB,mBAAhB2V,KAA6BA,EAAc3V,GACxB,mBAAnB4V,KAAgCA,EAAiBpiB,EAAMsD,SAAStD,EAAMgF,IAAIwH,EAAM8S,KAC3F+C,EAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOlJ,EAAMyM,IAAID,EAAO8S,GACxB9V,MAAOA,EAAMF,EAAOA,EAAOqY,kBAKnCU,EAAQ/iB,OAAS,EAAG,CACpB,GAAIgjB,GAAStiB,EAAMoY,SAAStU,OAAOhF,EAAE+C,QACjC0gB,EAAe3b,KAAKC,IAAI7G,EAAMoY,SAASvU,SAAS,GAAK7D,EAAMoY,SAASvU,SAAS,IAC7EkD,EAAY,IAAOwb,EACnBrb,EAAmBhK,EAAG4J,kBAAkBub,EAAQ1Q,IAAI,SAAStM,GAAG,MAAOA,GAAE6D,QAAQoZ,EAAOvb,EACnE,QAArBG,IACAmb,EAAQnb,GAAkBqC,WAAY,GAG9C,GAAIiZ,GAASnI,EAAM3E,aAAa1V,EAAMgF,IAAImd,EAAY7C,GAAaA,EACnEkC,GAAiBpkB,QACZuL,eAAe,SAAStD,EAAEnF,GACvB,MAAOqa,GAAM7E,aAAarQ,KAE7B7D,MAEG0H,MAAOsZ,EACPlZ,OAAQ+Y,MAIhBb,EAAiBre,gBAAgBif,KAGrCZ,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpD4gB,EAAMF,oBAIV5hB,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAGjC+O,EAAM8L,SAAWjc,EAAEic,UAGA,mBAAZjc,GAAEwH,QACTA,EAAMpG,EAAIpB,EAAEwH,MACZA,EAAMtB,EAAI6a,EAAGvZ,EAAMpG,GAEnB+O,EAAM3I,MAAQxH,EAAEwH,MAEhB2Z,GACKze,MAAM8E,KAGW,mBAAfxH,GAAE2hB,WACTA,EAAW3hB,EAAE2hB,UAGjBzgB,EAAMqR,aAKd1B,EAAYS,UAAS,iCAEdpQ,EA2BX,QAAS6gB,GAAS4B,EAAKjhB,GAEnB,MADKkhB,KAAiBA,EAAkBhD,EAAMjT,KACvCjL,EAAKmQ,IAAI,SAASzM,EAAMhF,GAC3B,IAAKgF,EAAKa,OACN,MAAOb,EAEX,IAAIyd,GAAazd,EAAKa,OAAO0c,EAC7B,IAAkB,MAAdE,EACA,MAAOzd,EAEX,IAAIkB,GAAIsc,EAAgBC,EAAYF,EAGpC,QAAQ,IAAJrc,IAAawc,GAGb1d,EAAKoc,cAAe,EACbpc,IAGXA,EAAKoc,cAAe,EAEpBpc,EAAKa,OAASb,EAAKa,OAAO4L,IAAI,SAASnF,EAAO8S,GAE1C,MADA9S,GAAMiV,SAAUhV,GAAOiW,EAAgBlW,EAAO8S,GAAclZ,IAAM,EAAIA,IAC/DoG,IAGJtH,KA5jBf,GAqBMF,GACAyH,EAtBFiT,EAAQxiB,EAAGI,OAAO4H,OAChBmV,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClB4L,EAAS9jB,EAAGI,OAAO0jB,SACnBI,EAAWlkB,EAAGI,OAAO0jB,SACrBQ,EAAmBtkB,EAAGkE,uBACtBhE,EAAUF,EAAGI,OAAOF,UAGtBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZzX,EAAQtM,EAAGG,MAAMuQ,eACjBtJ,EAAQ,KACRC,EAAS,KACTwc,GAAa,EACb3G,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClBsG,GAAe,EACfK,GAA0B,EAC1Bd,GAAW,EAGXhZ,EAAKiY,EAAMjY,KACXwH,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACT4N,EAAU,SAASxc,GAAK,MAAOA,GAAEwc,SACjCjkB,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,aAErD+H,EAAW,IACXid,GAAe,CAGrB3T,GAAM3I,MAAQ,EACd2I,EAAMwR,SAAWA,EAEjBpG,EAAM9E,OAAM,UAAWW,YAAY,GACnCqE,EAAMhF,OAAM,EAAqB,QAAU,QAE3CnY,EAAQuL,eAAe,SAAStD,EAAGnF,GAC/B,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAC9B0I,gBAAgB,SAASvD,EAAGnF,GAC3B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGjCkhB,EAASyB,aAAY,EAMrB,IAAIhD,GAAKniB,GAAG8H,MAAMC,SACZa,GAASpG,EAAG,EAAG8E,EAAG,GAClB2K,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,GAG/Cya,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,WACzCzU,MAAOA,EAAMpG,EACbugB,SAAUA,KAKlBN,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACQlO,SAAhBkO,EAAM3I,QACNA,EAAMpG,EAAI+O,EAAM3I,OACGvF,SAAnBkO,EAAMwR,WACNA,EAAWxR,EAAMwR,UACA1f,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MAkchDwf,GAAM9hB,SAASiB,GAAE,2BAA6B,SAASoc,GACnD,GAAIzO,IACAxH,EAAGhF,EAAMgF,IAAIiW,EAAIzO,OACjBC,EAAGzM,EAAMyM,IAAIwO,EAAIzO,OACjBhD,MAAOyR,EAAIzO,MAAMhD,MAErByR,GAAIzO,MAAQA,EACZpP,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7Bsc,EAAM9hB,SAASiB,GAAE,0BAA4B,SAASoc,GAClD7d,EAAQgG,QAAO,IAOnB,IAAIsf,GAAkB,IAqGtB,OA/DA1iB,GAAMpC,SAAWA,EACjBoC,EAAM0f,MAAQA,EACd1f,EAAMghB,OAASA,EACfhhB,EAAMohB,SAAWA,EACjBphB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAMwhB,iBAAmBA,EACzBxhB,EAAMiP,MAAQA,EACdjP,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtE4a,UAAetU,IAAK,WAAW,MAAOsU,IAAYrU,IAAK,SAASvG,GAAG4a,EAAS5a,IAC5Eqb,cAAmB/U,IAAK,WAAW,MAAO+U,IAAgB9U,IAAK,SAASvG,GAAGqb,EAAarb,IACxFkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9Egc,SAAU1V,IAAK,WAAW,MAAO0V,IAAWzV,IAAK,SAASvG,GAAGgc,EAAQhc,IACrEwa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrEuU,WAAejO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAC9EyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9E+c,cAAkBzW,IAAK,WAAW,MAAOyW,IAAgBxW,IAAK,SAASvG,GAAG+c,EAAa/c,IAGvFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,KAEjB+X,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,EAA0B1b,EACtBA,KAAM,IACN7F,EAAMof,aAAY,GAClBpf,EAAM8iB,YAAW,MAGzBlI,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQ,EAAM,QAAU,UAElC5P,UAAcwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GAC3DF,EAAWE,EACX6Z,EAAM/Z,SAASA,GACf0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,GACfgK,EAAYW,MAAM3K,OAI1BzI,EAAGG,MAAMkW,eAAevT,EAAO0f,GAC/BxiB,EAAGG,MAAMqP,YAAY1M,GAEdA,GC3oBX9C,EAAGI,OAAOylB,YAAc,WACpB,YAmCA,SAAS/iB,GAAMsB,GAyKX,MAxKAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,GAGjB5C,EAAK2F,QAAQ,SAASmC,EAAQpJ,GAC1BoJ,EAAOvD,OAAOoB,QAAQ,SAASqF,GAC3BA,EAAMlD,OAASpJ,KAMvB,IAAI8iB,GAAc9L,GAAWI,KACzB9V,EAAKmQ,IAAI,SAAStM,GACd,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,GAAI+iB,GAAI5d,EAAE4d,OAIvDje,GAAKnB,OAAOqT,GAAWxZ,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAAK,MAAOA,GAAEL,KAClEzB,WAAW6T,IAAW,EAAGvU,GAAiB,IAC/C4J,EAAK5I,OAAOyT,GAAW5Z,GAAGkf,OAAOlf,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAAK,MAAOA,GAAEoH,IAAK7N,OAAO4f,KAGzF0E,EAAYzW,EAAE9I,MAAM0U,IAAWvV,GAAmB2J,EAAE5I,SAAS,GAAK,EAAI,GAAK,GAAI4I,EAAE5I,SAAS,GAAK,EAAI,GAAK,IACvG4I,EAAE9I,MAAM0U,IAAWvV,EAAiB,IAGzC6a,EAAKA,GAAM3Y,EACXie,EAAKA,GAAMxW,EAAEuK,OAAOrT,OAAO8I,EAAE,GAAGA,EAAE,IAGlC,IAAIjI,GAAOJ,EAAUK,UAAS,4BAA6BjD,MAAMA,IAC7DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,+BACnD6V,EAAShW,EAAUE,OAAM,IACrBJ,GAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAGvE,IAAIwgB,GAAS3e,EAAKH,OAAM,cAAeI,UAAS,aAC3CjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,GAAK,MAAOA,GAAEqE,KAC3DyZ,GAAOxe,QAAQC,OAAM,KAChB+C,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,MAC3Bwb,EAAO7d,OACFoK,gBAAgBC,EAAa,4BAC7BhI,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,MACtBpC,SACL4d,EACKte,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,sBAAwBA,IAC7D0H,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QAC7CwK,EACKzT,gBAAgBC,EAAa,uBAC7BhI,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,IAE3B,IAAIyb,GAAOD,EAAO1e,UAAS,YACtBjD,KAAK,SAAS6D,GAAK,MAAOA,GAAEU,QACjCqd,GAAK9d,OAAOC,QAEZ,IAAI8d,GAAYD,EAAKze,QAAQC,OAAM,KAC9BC,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAC5B,MAAO,cAAgB1T,EAAEmS,EAAK9R,EAAEnF,IAAsB,IAAhB8E,EAAEpB,aAAsB,KAAO6I,EAAE,GAAK,MAE/E5N,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL1X,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACL1B,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBACLvC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,QAAU,SAASwG,EAAEnF,GACpB,GAAIojB,GAAUrlB,IACdL,GAASqG,cACLzC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5B1F,MAAOvE,GAAGuE,MACVqhB,QAASA,IAEb5lB,GAAGuE,MAAMshB,oBAEZ1kB,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAAS4lB,iBACLhiB,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,UAEhCjK,GAAGuE,MAAMshB,mBAGjBF,GAAUze,OAAM,QACXC,KAAI,SAAW,GACfA,KAAI,QAA0B,GAAhBG,EAAEpB,YAAmBpC,EAAKlC,QAEzC4jB,GACAG,EAAUze,OAAM,QACXC,KAAI,cAAgB,UAGzBue,EAAK/e,OAAM,QACN+K,KAAK,SAAS/J,EAAEnF,GAAK,MAAOujB,GAAY1E,EAAK1Z,EAAEnF,MAC/CwP,gBAAgBC,EAAa,0BAC7B9K,KAAI,IAAsB,GAAhBG,EAAEpB,YAAmB,GAC/BiB,KAAI,IAAM,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAIuM,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,GAAK,GAAK,MAIjF2W,EAAK3e,UAAS,QAASc,SAG3B6d,EACKve,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,kBAAoB,oBACzEyH,MAAK,OAAS,SAAStC,EAAEnF,GAAK,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAEnF,KACxDyH,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAEnF,KAC1DmE,OAAM,QACNQ,KAAI,QAAU6e,GACdhU,gBAAgBC,EAAa,0BAC7B9K,KAAI,QAA0B,GAAhBG,EAAEpB,YAAmBpC,EAAKlC,QAC7C8jB,EAAK1T,gBAAgBC,EAAa,qBAE7B9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,GAAIwC,GAAOsC,EAAEmS,EAAK9R,EAAEnF,IAAsB,IAAhB8E,EAAEpB,YACxBjB,EAAMoc,EAAK1Z,EAAEnF,GAAK,EACduM,EAAE,GACEA,EAAE,GAAKA,EAAEsS,EAAK1Z,EAAEnF,IAAM,EAC1BuM,EAAE,GAAK,EACPA,EAAEsS,EAAK1Z,EAAEnF,GAEjB,OAAO,aAAewC,EAAO,KAAOC,EAAM,MAE7C0B,OAAM,QACNQ,KAAI,SAAW,SAASQ,EAAEnF,GACvB,MAAQ0G,MAAKL,IAAIK,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAK,KAKxDkR,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,SAIXrH,EAAYS,UAAS,yBACdpQ,EAtMX,GAIMoE,GASA8S,EACAI,EACAF,EACAiB,EAUFsF,EAAIsF,EA1BJxgB,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UAErBrD,EAAItH,GAAG8H,MAAMsI,UACbrB,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9B+R,GAAU,GACVhV,EAAQtM,EAAGG,MAAMuQ,eACjBsV,GAAa,EACbO,EAAc/lB,GAAGmM,OAAM,QAKvBjM,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAAoB,aACnI8lB,EAAY,cACZ/d,EAAW,IAQbgK,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EAyNjD,OAvCA3F,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAU6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACjEtB,QAAU4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACnE2Y,QAAUrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y;AZ1N3E,GAAG,AY2NKqd,CZ3NJ,EAAE,CAAC,CAAC,CAAC,GAAG,GY2NS/W,IAAK,WAAW,MAAO+W,IAAc9W,IAAK,SAASvG,GAAGqd,EAAWrd,IAC9Eb,GAAUmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC/D4G,GAAUN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC/DvC,QAAU6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IACzDuS,QAAUjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IACzDqR,SAAU/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACrEyR,SAAUnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACrEuR,QAAUjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACnEwS,QAAUlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACnE4d,aAAiBtX,IAAK,WAAW,MAAOsX,IAAerX,IAAK,SAASvG,GAAG4d,EAAY5d,IACpF4B,IAAc0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC/D6d,WAAYvX,IAAK,WAAW,MAAOuX,IAAatX,IAAK,SAASvG,GAAG6d,EAAU7d,IAG3EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9BF,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,OAI1BzI,EAAGG,MAAMqP,YAAY1M,GAEdA,GC3PX9C,EAAGI,OAAOqmB,iBAAmB,WACzB,YA0DA,SAAS3jB,GAAMsB,GA6IX,MA5IAqO,GAAYW,QACZX,EAAYrS,OAAOsmB,GACfxJ,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EASlE,IAPAzC,EAAMqR,OAAS,WACXzT,EAAS4c,eACTpW,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAEnDA,EAAMoE,UAAYnG,OAGbuD,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAItCP,EAAI4e,EAAYtgB,SAChBmJ,EAAImX,EAAYxL,SAASqC,OAAM,EAG/B,IAAIjW,GAAOJ,EAAUK,UAAS,oCAAqCjD,MAAMA,IACrEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,uCAAuCD,OAAM,KAC7F+V,EAAYD,EAAO9V,OAAM,QACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAC5BD,OAAM,KAAMC,KAAI,QAAU,eAC1BD,OAAM,QAEX8V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACxC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAE1BwQ,EAAExQ,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAG/Doe,GAGDC,EAAO1c,MAAMzB,GAEbwS,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE+B,EAAKH,OAAM,kBACNQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,MAdtD0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAiB1CqV,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAI3D+gB,EACKtf,MAAMzB,GACN0B,OAAOzB,EAEZ,IAAI+X,GAAWxF,EAAEhR,OAAM,gBAClByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAe/C,IAbAF,EAASxP,aAAa/M,KAAKslB,GAG3BjJ,EAAU/V,OAAM,YACXC,KAAI,KAAO,mBAAqB+e,EAAYnc,MAC5C7C,OAAM,QAEXyQ,EAAEhR,OAAM,oBAAuBuf,EAAYnc,KAAO,SAC7C5C,KAAI,QAAUG,EAAEpB,aAAegT,EAAgB,EAAI,IACnD/R,KAAI,SAAW,IACfA,KAAI,KAAOG,EAAEpB,aAAegT,EAAgB,EAAI,IAGjDwD,EAAW,CACXC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,gBAAkB4H,EAAE9I,QAAQ,IAAOigB,EAAYV,cAAgBzW,EAAE5I,SAAS,GAAK,EAAK,GAAK,IAAM,KACtHwR,EAAEhR,OAAM,iBAAkB/F,KAAK+b,EAE/B,IAAI/D,GAASjB,EAAEhR,OAAM,iBAAkBI,UAAS,IAC5CmS,IACAN,EACK7R,UAAS,QACTI,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAAK,MAAO,gBAAkBA,EAAI,GAAK,EAAI,IAAM,MAAQ,MAG7FlC,GACAF,EACK7R,UAAS,cACTI,KAAI,YAAc,UAAY2R,EAAe,SAC7C7O,MAAK,cAAgB6O,EAAe,EAAI,QAAU,OAGvDqN,GACAxO,EAAE5Q,UAAS,cACNnG,KAAKpB,EAAGG,MAAM+W,UAAWpU,EAAMqa,MAAMzW,aAI9C0W,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBAAkB/F,KAAKic,IAInClF,EAAEhR,OAAM,qBACHQ,KAAI,KAAM,GACVA,KAAI,KAAI,GAAuBhC,EAAiBA,GAChDgC,KAAI,KAAO4H,EAAE,IACb5H,KAAI,KAAO4H,EAAE,MAItBkD,EAAYS,UAAS,+BACdpQ,EAjMX,GAmBMgF,GACAyH,EApBFmX,EAAc1mB,EAAGI,OAAOylB,cACtB1I,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OACzB4L,EAAS9jB,EAAGI,OAAO0jB,SACZ5jB,EAAUF,EAAGI,OAAOF,UAGtBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMsQ,WACxBoT,GAAa,EACN3G,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClBhE,GAAgB,EAChBiN,GAAa,EACbrN,EAAe,EAGfvC,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,eAAc,aACpC+H,EAAW,GAGjB0U,GACK9E,OAAM,UACNU,YAAW,GACXP,WAAW,SAASrQ,GAAK,MAAOA,KAErCkV,EACKhF,OAAM,EAAqB,QAAU,QACrCG,WAAWhY,GAAGmM,OAAM,SAGzBzM,EACKuI,SAAS,GACT+C,eAAc,GACdC,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAEhC2I,aAAa,SAASxD,EAAGnF,GACtB,MAAOma,GAAM3E,aAAarQ,EAAGnF,IAOrC,IAAIyP,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA+NjD,OAzEAie,GAAYhmB,SAASiB,GAAE,2BAA6B,SAASoc,GACzDA,EAAW,QACPvR,IAAK1J,EAAMgF,IAAIiW,EAAIzZ,MACnB0H,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,OAEfpM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7BwgB,EAAYhmB,SAASiB,GAAE,0BAA4B,SAASoc,GACxD7d,EAAQgG,QAAO,KAGnBwgB,EAAYhmB,SAASiB,GAAE,2BAA6B,SAASoc,GACzD7d,MAOJ4C,EAAMpC,SAAWA,EACjBoC,EAAM4jB,YAAcA,EACpB5jB,EAAMghB,OAASA,EACfhhB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC7Ekb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IACvE+Q,eAAgBzK,IAAK,WAAW,MAAOyK,IAAiBxK,IAAK,SAASvG,GAAG+Q,EAAc/Q,IACvF2Q,cAAgBrK,IAAK,WAAW,MAAOqK,IAAgBpK,IAAK,SAASvG,GAAG2Q,EAAa3Q,IACrFge,YAAc1X,IAAK,WAAW,MAAO0X,IAAczX,IAAK,SAASvG,GAAGge,IAAahe,IACjFuU,WAAYjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAC3EyU,WAAYnO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC3EoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAGrEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBie,EAAYje,SAASA,GACrB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1B+d,EAAYpa,MAAMA,GACzBwX,EAAOxX,MAAMA,KAEVoR,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQ,EAAM,QAAU,YAItCrY,EAAGG,MAAMkW,eAAevT,EAAO4jB,GAC/B1mB,EAAGG,MAAMqP,YAAY1M,GAEdA,GCxRX9C,EAAGI,OAAOwmB,aAAe,WACrB,YA8BA,SAAS9jB,GAAMsB,GA8DX,MA7DAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GACIuiB,IADkBzf,GAAkB,MAAT8Q,EAAe3S,EAAOC,KAAOD,EAAOuR,MAAQvR,EAAOE,IAAMF,EAAOsR,QAC3E,KAARqB,EAAc,IAAM,KAC5BhR,EAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,GAKjBoR,EAASA,GAAUhQ,CAQnB,IAAIhB,GAAOJ,EAAUK,UAAS,qBAAsBjD,MAAMA,IACtDkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,wBAEnDwQ,GADS3Q,EAAUE,OAAM,KACrBJ,EAAKH,OAAM,KAEnBG,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAKvE,IAAIqhB,GAAW3O,EAAE5Q,UAAS,aACrBjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,GAAK,MAAOA,GAAEqE,KAE3Dsa,GAASrf,QAAQC,OAAM,KACvBof,EACKnf,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,qBAAuBA,IAC5DyH,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOsJ,GAAMnE,EAAGnF,IAErD,IAAI+jB,GAAOD,EAASvf,UAAS,eAAkB2Q,GAC1C5T,KAAK,SAAS6D,GAAK,MAAOA,GAAEU,QACjCke,GAAKtf,QAAQC,OAAM,QACdC,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsV,GAAO0O,EAAQ7e,EAAEnF,MACzD2E,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsV,GAAO0O,EAAQ7e,EAAEnF,MAC9DyP,EAAYtE,WAAW2Y,EAAS1e,OAAOb,UAAS,eAAkB2Q,GAAO,aAEpEvQ,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsF,GAAM0e,EAAQ7e,EAAEnF,MACxD2E,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsF,GAAM0e,EAAQ7e,EAAEnF,MACxDyH,MAAK,iBAAmB,GACxBpC,SACL0e,EACKpf,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,UAAYkV,EAAO,WAAaA,EAAO,IAAMlV,IAClF2E,KAAKkf,EAAQ,IAAK,GAClBlf,KAAKkf,EAAQ,IAAKnX,GACvB+C,EAAYtE,WAAW4Y,EAAM,QAExBpf,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsF,GAAM0e,EAAQ7e,EAAEnF,MACxD2E,KAAKuQ,EAAO,IAAK,SAAS/P,EAAEnF,GAAK,MAAOsF,GAAM0e,EAAQ7e,EAAEnF,MAG7DsV,EAAShQ,EAAMwR,SAGnBrH,EAAYS,UAAS,0BACdpQ,EAvFX,GAmBIwV,GAnBA/S,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRsI,EAAO,EACPwI,EAAO,IACP8O,EAAU,SAAS7e,GAAK,MAAOA,GAAE+P,IACjC5L,EAAQtM,EAAGG,MAAMuQ,eACjBpI,EAAQ9H,GAAG8H,MAAMC,SAEjBE,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,aAWxB+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EAmIjD,OAzDA3F,GAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAC1CA,EAAMpC,SAAWA,EAEjBoC,EAAMyC,OAAS,SAASoD,GACpB,MAAKtH,WAAUe,QACfmD,EAAOE,IAA4B,mBAAZkD,GAAElD,IAAwBkD,EAAElD,IAASF,EAAOE,IACnEF,EAAOuR,MAA4B,mBAAZnO,GAAEmO,MAAwBnO,EAAEmO,MAASvR,EAAOuR,MACnEvR,EAAOsR,OAA4B,mBAAZlO,GAAEkO,OAAwBlO,EAAEkO,OAAStR,EAAOsR,OACnEtR,EAAOC,KAA4B,mBAAZmD,GAAEnD,KAAwBmD,EAAEnD,KAASD,EAAOC,KAC5D1C,GALuByC,GAQlCzC,EAAMsE,MAAQ,SAASuB,GACnB,MAAKtH,WAAUe,QACfgF,EAAQuB,EACD7F,GAFuBsE,GAKlCtE,EAAMoV,KAAO,SAASvP,GAClB,MAAKtH,WAAUe,QACf8V,EAAOvP,EACA7F,GAFuBoV,GAKlCpV,EAAM4M,KAAO,SAAS/G,GAClB,MAAKtH,WAAUe,QACfsN,EAAO/G,EACA7F,GAFuB4M,GAKlC5M,EAAMkkB,QAAU,SAASre,GACrB,MAAKtH,WAAUe,QACf4kB,EAAUxmB,GAAG4V,QAAQzN,GACd7F,GAFuBkkB,GAKlClkB,EAAMwF,MAAQ,SAASK,GACnB,MAAKtH,WAAUe,QACfkG,EAAQK,EACD7F,GAFuBwF,GAKlCxF,EAAMwJ,MAAQ,SAAS3D,GACnB,MAAKtH,WAAUe,QACfkK,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GACnB7F,GAFuBwJ,GAKlCxJ,EAAM2F,SAAW,SAASE,GACtB,MAAKtH,WAAUe,QACfqG,EAAWE,EACX8J,EAAYW,MAAM3K,GACX3F,GAHuB2F,GAQ3B3F,GC9JX9C,EAAGI,OAAO6mB,MAAQ,SAAS5V,GACvB,YAqCA,SAASvO,GAAMsB,GAuLX,MAtLAqO,GAAYW,QACZX,EAAYrS,OAAOiR,GACf6L,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GAoIpB,QAAS4iB,GAAW/e,GAChB,GAAIvG,KAAW,KAALuG,GACNL,EAAIlG,EAAI,EAAI,GACZ2N,EAAI3J,EAAkB,CAC1B,OAAO,IAAO,GAAMkC,EAAK,IAAMyH,EACzB,YAAc3N,EAAI,IAAO,IAAMkG,EAAK,KAAOyH,EAAI,GAC/C,KAAO,EAAIA,EAAI,GACf,YAAc3N,EAAI,IAAO,GAAMkG,EAAK,IAAO,EAAIyH,EAC/C,KACO,IAAMzH,EAAK,KAAOyH,EAAI,GAC7B,KAAO,EAAIA,EAAI,GACf,IAAO,IAAMzH,EAAK,KAAOyH,EAAI,GAC7B,KAAO,EAAIA,EAAI,GAIzB,QAAS4X,KACAC,EAAMC,SAASD,EAAM1H,OAAO4H,GACjCC,EACKjjB,MAAM8iB,EAAMC,QAAUvf,EAAEnB,SAAW2gB,IACnCjjB,KAAK,SAAS8D,EAAEnF,GACb,GAAIwkB,GAAY1f,EAAEK,EAAE,IAAML,EAAErB,QAAQ,GAChCghB,EAAa9hB,EAAiBmC,EAAEK,EAAE,GACtC3H,IAAG2G,OAAOpG,MAAMoG,OAAM,SACjBQ,KAAI,QAAuB,EAAZ6f,EAAgB,EAAIA,GAExChnB,GAAG2G,OAAOpG,MAAMoG,OAAM,UACjBQ,KAAI,IAAMG,EAAEK,EAAE,KACdR,KAAI,QAAuB,EAAb8f,EAAiB,EAAIA,KAKpD,QAASC,GAAQC,GACbL,EAAcF,EAAMC,QAAU,KAAOD,EAAM1H,QAC3C,IAAIA,GAAS0H,EAAMC,QAAUvf,EAAEnB,SAAWygB,EAAM1H,QAChDhf,GAAS0mB,OAAO1H,OAAQA,EAAQ0H,MAAOA,IACvCD,IACIQ,GACAjnB,EAASgnB,QAAQhI,GA1KzB,GAAIxY,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD/T,GAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAM0B,GAEhBoE,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAGvDA,EAAMoE,UAAYnG,KAGlB+G,EAAIuJ,EAAQjL,SACZmJ,EAAI8B,EAAQ6J,QAGZ,IAAI5T,GAAOJ,EAAUK,UAAS,cAAejD,MAAMA,IAC/CkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,iBAAiBD,OAAM,KACvEyQ,EAAI7Q,EAAKH,OAAM,IAEnBG,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvE+X,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAAiBD,OAAM,QACxD8V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,sBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAE7B+V,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAG3DwS,EAAEhR,OAAM,uBACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpByL,EACKjK,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAE9C,IAAI+J,GAAczP,EAAEhR,OAAM,mBACrByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAE/Crd,IAAG2N,WAAWyZ,GAAaxmB,KAAKiQ,GAGhC+V,EACKtf,EAAEA,GACFnG,GAAE,QAAU,WACT+lB,EAAQG,KAGhBT,EAAMzlB,GAAE,WAAa,WACZkmB,GACDnnB,EAASgnB,QAAQN,EAAMC,QAAUvf,EAAEnB,SAAWygB,EAAM1H,YAIxD4H,GAAaF,EAAM1H,OAAO4H,EAE9B,IAAIC,GAAUpP,EAAEhR,OAAM,uBAAwBI,UAAS,KAClDjD,MAAMgjB,GAAeF,EAAM1H,WAE5BoI,EAAeP,EAAQ9f,QACtBC,OAAM,IAEXogB,GAAapgB,OAAM,QACdC,KAAI,QAAU,QACdA,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,SAAW/B,GAEpBkiB,EAAapgB,OAAM,QACdC,KAAI,QAAU,SACdA,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,SAAW/B,EAEpB,IAAImiB,GAAS5P,EAAEhR,OAAM,kBAChB/F,KAAKgmB,EACVW,GAAOxgB,UAAS,QACXI,KAAI,SAAW/B,GACpBmiB,EAAOxgB,UAAS,WAAYG,OAAM,QAASC,KAAI,IAAMuf,GAErDQ,GAAQ,GAERvP,EAAEhR,OAAM,uBACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEhBsX,IACAC,EAAM7U,MAAMR,GACPid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KACvDjG,GAAG2N,WAAWgK,EAAEhR,OAAM,kBACjB/F,KAAK+b,IAGVC,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCnF,GAAG2N,WAAWgK,EAAEhR,OAAM,kBACjB/F,KAAKic,IAGdlF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,OAuD3DgM,EAAYS,UAAS,mBACdpQ,EAtNX,GAcMgF,GACAyH,EAfF8B,EAAUA,GAAWrR,EAAGI,OAAO4H,OAC7BmV,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClBkP,EAAQ5mB,GAAG0V,IAAIkR,QAGjB7hB,GAAUE,IAAK,GAAIqR,MAAO,EAAGD,OAAQ,GAAIrR,KAAM,GAC7C8G,EAAQtM,EAAGG,MAAMuQ,eACjBtJ,EAAQ,KACRC,EAAS,GACT6V,GAAY,EACZE,GAAY,EACZM,GAAkB,EAIlB4J,EAAc,KACd7e,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,QAAU,UAAW,aAC3CmnB,GAAe,CAGrBxW,GAAQ6Q,aAAY,GACpB7Q,EAAQ2W,YAAY,SAAS7f,GAAK,OAAO,GAMzC,IAAIsK,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EAgQjD,OA1DA3F,GAAMpC,SAAWA,EACjBoC,EAAMuO,QAAUA,EAChBvO,EAAMskB,MAAQA,EACdtkB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEuU,WAAiBjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAChFyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9E2e,aAAcrY,IAAK,WAAW,MAAOqY,IAAepY,IAAK,SAASvG,GAAG2e,EAAY3e,IACjFkf,cAAe5Y,IAAK,WAAW,MAAO4Y,IAAgB3Y,IAAK,SAASvG,GAAGkf,EAAalf,IAGpFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClB4I,EAAQ5I,SAASA,GACjB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1B0I,EAAQ/E,MAAMA,KAElB2b,aAAchZ,IAAK,WAAW,MAAOoC,GAAQ4W,eAAiB/Y,IAAK,SAASvG,GACxE0I,EAAQ4W,YAAYtf,KAExBuf,aAAcjZ,IAAK,WAAW,MAAOkO,GAAM3E,cAAgBtJ,IAAK,SAASvG,GACrEwU,EAAM3E,WAAW7P,KAErBwf,aAAclZ,IAAK,WAAW,MAAOoO,GAAM7E,cAAgBtJ,IAAK,SAASvG,GACrE0U,EAAM7E,WAAW7P,KAErBb,GAAImH,IAAK,WAAW,MAAOoC,GAAQvJ,KAAOoH,IAAK,SAASvG,GACpD0I,EAAQvJ,EAAEa,KAEd4G,GAAIN,IAAK,WAAW,MAAOoC,GAAQ9B,KAAOL,IAAK,SAASvG,GACpD0I,EAAQ9B,EAAE5G,KAEd+U,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQqF,EAAkB,QAAU,YAIlD1d,EAAGG,MAAMkW,eAAevT,EAAOuO,GAC/BrR,EAAGG,MAAMqP,YAAY1M,GAEdA,GCpSX9C,EAAGI,OAAOgoB,mBAAqB,WAC3B,YAkCA,SAAStlB,GAAMsB,GAsGX,MArGAqO,GAAYW,QAEZhP,EAAUC,KAAK,SAASC,GACtB4C,EAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAOlE,IALA2B,EACSS,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,KAGnBtB,GAASA,EAAK8M,OAAU9M,EAAK+jB,OAE9B,MADAroB,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAEtCnB,EAAUK,UAAS,KAAMc,QAGzB,IAAIigB,GAAe,GAAIC,IACvBjkB,GAAK+jB,MAAMpe,QAAQ,SAASG,GAC1B,GAAIoe,GAAOzZ,OAAOyZ,KAAKpe,EACvBoe,GAAKve,QAAQ,SAASuC,GACpB8b,EAAaG,IAAIjc,MAIrB,IAAIkc,GAAQloB,GAAGmoB,OAAOD,QACfL,MAAM/jB,EAAK+jB,OACXjX,MAAM9M,EAAK8M,OACX1B,MAAM/J,EAAgBC,IACtBgjB,aAAaA,GACbC,SAASA,GACTC,aAAaC,GACbC,OAAOA,GACP5d,QAAQA,GACR6d,MAAMA,GACNC,MAAMA,GACNC,QAEHC,EAAOliB,EAAUK,UAAS,SACvBjD,KAAKA,EAAK8M,OACV3J,QAAQC,OAAM,QACdC,KAAI,QAAU,iBACd8C,MAAK,eAAiB,SAAStC,GAAK,MAAOuB,MAAK2f,KAAKlhB,EAAE6D,SAE1D5B,EAAOlD,EAAUK,UAAS,SACvBjD,KAAKA,EAAK+jB,OACV5gB,QACAC,OAAM,KACNC,KAAI,QAAU,iBACdvG,KAAKsnB,EAAMpF,KAElBlZ,GACG1C,OAAM,UACNC,KAAI,IAAM2hB,GACV7e,MAAK,OAAS,SAAStC,GAAK,MAAOmE,GAAMnE,KACzCxG,GAAE,YAAc,SAASoc,GACxB7W,EAAUC,OAAM,cAAiB4W,EAAI0G,YAAc,cAAgB1G,EAAIqE,YAClEza,KAAI,KAAOoW,EAAIwL,IACpBriB,EAAUC,OAAM,cAAiB4W,EAAI0G,YAAc,cAAgB1G,EAAIqE,YAClEza,KAAI,KAAOoW,EAAIyL,GAGpB,IAAIC,GAAYnd,EAAMyR,EACtBA,GAAI3R,UACJkc,EAAare,QAAQ,SAASyf,GAC5B3L,EAAI3R,OAAO3I,MACT6I,MAAOmd,EACPjd,IAAOkd,EACP1d,MAAO+R,EAAI2L,OAGfxpB,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAE1BvE,GAAE,WAAc,SAASwG,GACxBjI,EAAQgG,QAAO,KAGnBhG,EAAQwL,gBAAgB,SAASvD,GAAI,MAAO,SAG5CwhB,EAAWP,GACXQ,EAAWxf,GAEXse,EAAM/mB,GAAE,OAAS,WACbynB,EAAKzhB,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAEuL,OAAO5L,IACzCH,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAEuL,OAAOnE,IACzC5H,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAEjD,OAAO4C,IACzCH,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAEjD,OAAOqK,IAE9CnF,EAAKzC,KAAI,YAAc,SAASQ,GAC9B,MAAO,aAAeA,EAAEL,EAAI,KAAOK,EAAEoH,EAAI,UAK1CzM,EAnIX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,GACTH,EAAY,KACZxG,EAAWF,GAAGE,SAAQ,aACtB4L,EAAQtM,EAAGG,MAAMsQ,UAAQ,SACzBvQ,EAAeF,EAAGI,OAAOF,UACzB6W,EAAS,KAET6R,EAAe,GACfC,EAAW,GACXE,EAAW,GACXC,EAAS,KACT5d,EAAU,GACV6d,EAAQ,GACRC,EAAQ,GACRI,EAAS,EAEVM,EAAa,SAASvB,KACtBsB,EAAa,SAASvY,KAQvBqB,EAAczS,EAAGG,MAAMsS,YAAY/R,EA0JvC,OA3CAoC,GAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAY6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACnEtB,QAAY4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAGrEigB,cAAc3Z,IAAK,WAAW,MAAO2Z,IAAgB1Z,IAAK,SAASvG,GAAGigB,EAAajgB,IACnFkgB,UAAc5Z,IAAK,WAAW,MAAO4Z,IAAY3Z,IAAK,SAASvG,GAAGkgB,EAASlgB,IAC3EogB,UAAc9Z,IAAK,WAAW,MAAO8Z,IAAY7Z,IAAK,SAASvG,GAAGogB,EAASpgB,IAC3EqgB,QAAc/Z,IAAK,WAAW,MAAO+Z,IAAU9Z,IAAK,SAASvG,GAAGqgB,EAAOrgB,IACvEyC,SAAc6D,IAAK,WAAW,MAAO7D,IAAW8D,IAAK,SAASvG,GAAGyC,EAAQzC,IACzEsgB,OAAcha,IAAK,WAAW,MAAOga,IAAS/Z,IAAK,SAASvG,GAAGsgB,EAAMtgB,IACrEugB,OAAcja,IAAK,WAAW,MAAOia,IAASha,IAAK,SAASvG,GAAGugB,EAAMvgB,IACrE2gB,QAAcra,IAAK,WAAW,MAAOqa,IAAUpa,IAAK,SAASvG,GAAG2gB,EAAO3gB,IAGvEb,GAAImH,IAAK,WAAW,MAAOgL,OAAQ/K,IAAK,SAASvG,GAAGsR,KAAKzZ,GAAG4V,QAAQzN,KACpE4G,GAAIN,IAAK,WAAW,MAAO4S,OAAQ3S,IAAK,SAASvG,GAAGkZ,KAAKrhB,GAAG4V,QAAQzN,KAGpEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9BoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrEihB,YAAa3a,IAAK,WAAW,MAAO2a,IAAc1a,IAAK,SAASvG,GAC5DihB,EAAajhB,IAEjBghB,YAAa1a,IAAK,WAAW,MAAO0a,IAAcza,IAAK,SAASvG,GAC5DghB,EAAahhB,MAIrB7F,EAAMpC,SAAWA,EACjBoC,EAAM5C,QAAUA,EAChBF,EAAGG,MAAMqP,YAAY1M,GACdA,GC3LX9C,EAAGI,OAAOypB,cAAgB,WACtB,YAuBA,SAAS/mB,GAAMsB,GA2QX,QAAS0lB,GAAa3hB,EAAEnF,GACpB,MAAW,WAAR+mB,EAA0B,OAC1BC,EACQ7hB,EAAE8hB,WAAa3d,EAAMnE,EAAEnF,GAAK,OAC3BgnB,EAAL,OACM7hB,EAAE0V,SAAWvR,EAAMnE,EAAEnF,GAAK,OAI3C,QAASknB,GAAW/hB,EAAEnF,GAClB,MAAGgnB,IAAoB,WAARD,EACJ5hB,EAAE8hB,WAAa,OAAS3d,EAAMnE,EAAEnF,GAE9BmF,EAAE0V,SAAW,OAASvR,EAAMnE,EAAEnF,GAI/C,MA3RAoB,GAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9C5P,EAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAII,GAAOJ,EAAUK,UAAS,eAAgBjD,MAAMA,IAEhD6T,GADS7Q,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,kBAAkBD,OAAM,KACpEJ,EAAKH,OAAM,KAEnBG,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAEvE,IAUI0kB,GAVA/d,EAAS+L,EAAE5Q,UAAS,cACnBjD,KAAK,SAAS6D,GACX,MAAW,WAAR4hB,EAA0B5hB,EAEtBA,EAAEsE,OAAO,SAAS8F,GACrB,MAAOyX,IAAW,GAAQzX,EAAE0X,eAGpCG,EAAche,EAAO3E,QAAQC,OAAM,KAAMC,KAAI,QAAU,YAI3D,IAAW,WAARoiB,EACCK,EAAY1iB,OAAM,UACb+C,MAAK,eAAiB,GACtB9C,KAAI,QAAO,oBACXA,KAAI,IAAM,GAEfwiB,EAAc/d,EAAOjF,OAAM,cACxB,IAAY,WAAR4iB,EAAmB,CAC1BK,EAAY1iB,OAAM,QACb+C,MAAK,eAAiB,GACtB9C,KAAI,QAAO,oBACXA,KAAI,KAAO,GACXA,KAAI,KAAO,GAEhBwiB,EAAc/d,EAAOjF,OAAM,QAE3BijB,EAAY1iB,OAAM,KACbC,KAAI,QAAU,gBACd0iB,SAAQ,YAAW,0KACnB1iB,KAAI,YAAc,8BAEvB,IAAI2iB,GAAiBle,EAAOjF,OAAM,gBAElCmjB,GAAejmB,KAAK,SAAS8D,EAAEnF,GAC3BxC,GAAG2G,OAAOpG,MAAMwG,UAAS,QACpBI,KAAI,SAAWmiB,EAAa3hB,EAAEnF,MAI3ConB,EAAY1iB,OAAM,QACbC,KAAI,cAAgB,SACpBA,KAAI,QAAO,kBACXA,KAAI,KAAO,SACXA,KAAI,KAAO,IAEhB,IAAI4iB,GAAane,EAAOjF,OAAM,sBAE9BiF,GACKzK,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAAS8pB,gBAAgBriB,EAAEnF,KAE9BrB,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAAS+pB,eAAetiB,EAAEnF,KAE7BrB,GAAE,QAAU,SAASwG,EAAEnF,GACpBtC,EAASgqB,YAAYviB,EAAEnF,EAEvB,IAAIsB,GAAO8H,EAAO9H,MAClB,IAAIqhB,EAAa,CACb,GAAQ,WAALoE,EACKY,GAGArmB,EAAK2F,QAAQ,SAASmC,GAAUA,EAAOyR,UAAW,IAClD1V,EAAE0V,UAAW,IAGb1V,EAAE0V,UAAY1V,EAAE0V,SACZvZ,EAAKgP,MAAM,SAASlH,GAAU,MAAOA,GAAOyR,YAG5CvZ,EAAK2F,QAAQ,SAASmC,GAAUA,EAAOyR,UAAW,SAGvD,IAAW,WAARkM,EACN,GAAGC,EACC7hB,EAAE8hB,YAAc9hB,EAAE8hB,WAClB9hB,EAAEyiB,aAAiC/mB,QAAlBsE,EAAEyiB,eAA8BziB,EAAE0V,SAAW1V,EAAEyiB,aAChEziB,EAAE0V,SAAW1V,EAAE8hB,YAAc9hB,EAAEyiB,iBAC5B,KAAKZ,EAAU,CAClB7hB,EAAE0V,UAAY1V,EAAE0V,SAChB1V,EAAEyiB,aAAeziB,EAAE0V,QACnB,IAAIgN,GAAUvmB,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE8hB,YAC9CY,GAAQvX,MAAM,SAASlH,GAAU,MAAOA,GAAOwe,gBAG/CtmB,EAAK2F,QAAQ,SAASmC,GAClBA,EAAOyR,SAAWzR,EAAOwe,cAAe,IAKxDlqB,EAASoiB,aACLjF,SAAUvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,WAC5CoM,WAAY3lB,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE8hB,kBAKzDtoB,GAAE,WAAa,SAASwG,EAAEnF,GACvB,IAAW,WAAR+mB,IAAqBC,KACxBtpB,EAASoqB,eAAe3iB,EAAEnF,GACtB2iB,GAAa,CAEb,GAAIrhB,GAAO8H,EAAO9H,MAGlBA,GAAK2F,QAAQ,SAASmC,GAClBA,EAAOyR,UAAW,EACP,WAARkM,IAAmB3d,EAAOwe,aAAexe,EAAOyR,YAEvD1V,EAAE0V,UAAW,EACF,WAARkM,IAAmB5hB,EAAEyiB,aAAeziB,EAAE0V,UACzCnd,EAASoiB,aACLjF,SAAUvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,gBAK5DzR,EAAO1B,QAAO,cAAgB,SAASvC,GAAK,MAAOA,GAAEyiB,eACrDxe,EAAOhE,OAAOC,SAEdkiB,EACK5iB,KAAI,OAASmiB,GACb5X,KAAK,SAAU/J,GAAK,MAAOwD,GAAaqF,EAAO7I,KAKpD,IAAI4iB,EACJ,QAAOhB,GACH,IAAK,UACDgB,EAAc,EACd,MACJ,KAAK,UACDA,EAAc,GAGtB,GAAIC,EAAO,CAEP,GAAIC,KACJ7e,GAAO/H,KAAK,SAAS8D,EAAEnF,GACnB,GAAIkoB,EACJ,IAAIvf,EAAaqF,EAAO7I,KAAOwD,EAAaqF,EAAO7I,IAAI/F,OAAS+oB,EAAc,CAC1E,GAAIC,GAAazf,EAAaqF,EAAO7I,IAAIkjB,UAAU,EAAGF,EACtDD,GAAa1qB,GAAG2G,OAAOpG,MAAMoG,OAAM,QAAS+K,KAAKkZ,EAAa,OAC9D5qB,GAAG2G,OAAOpG,MAAM2G,OAAM,aAAcwK,KAAKvG,EAAaqF,EAAO7I,SAE7D+iB,GAAa1qB,GAAG2G,OAAOpG,MAAMoG,OAAM,OAEvC,IAAImkB,EACJ,KAGI,GAFAA,EAAiBJ,EAAW9gB,OAAOyN,wBAEd,GAAlByT,EAAqB,KAAMC,SAElC,MAAM3pB,GACF0pB,EAAiBtrB,EAAGG,MAAM6R,oBAAoBkZ,GAGlDD,EAAaxnB,KAAK6nB,EAAiBE,IAOvC,KAJA,GAAIC,GAAe,EACfC,EAAc,EACdC,KAEkBhmB,EAAd+lB,GAAgCD,EAAeR,EAAa7oB,QAChEupB,EAAaF,GAAgBR,EAAaQ,GAC1CC,GAAeT,EAAaQ,IAIhC,KAFqB,IAAjBA,IAAoBA,EAAe,GAE/BC,EAAc/lB,GAAkB8lB,EAAe,GAAI,CACvDE,KACAF,GAEA,KAAK,GAAIG,GAAI,EAAGA,EAAIX,EAAa7oB,OAAQwpB,IACjCX,EAAaW,IAAMD,EAAaC,EAAIH,IAAiB,KACrDE,EAAaC,EAAIH,GAAgBR,EAAaW,GAGtDF,GAAcC,EAAaE,OAAO,SAASC,EAAMC,EAAK3iB,EAAO4iB,GACzD,MAAOF,GAAOC,IAKtB,IAAK,GADDE,MACKjpB,EAAI,EAAGkpB,EAAO,EAAOT,EAAJzoB,EAAkBA,IACxCipB,EAAWjpB,GAAKkpB,EAChBA,GAAQP,EAAa3oB,EAGzBoJ,GACKzE,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,MAAO,aAAeipB,EAAWjpB,EAAIyoB,GAAgB,KAAO,EAAI/hB,KAAKwB,MAAMlI,EAAIyoB,GAAgBV,GAAe,MAIlH5G,EACAhM,EAAExQ,KAAI,YAAc,cAAgBP,EAAQ7B,EAAOuR,MAAQ4U,GAAe,IAAMnmB,EAAOE,IAAM,KAG7F0S,EAAExQ,KAAI,YAAc,eAAsBpC,EAAOE,IAAM,KAG3D4B,EAAS9B,EAAOE,IAAMF,EAAOsR,OAAUnN,KAAKyiB,KAAKlB,EAAa7oB,OAASqpB,GAAgBV,MAEpF,CAEH,GAGIqB,GAHAC,EAAO,EACPC,EAAU,EACVC,EAAW,CAEfngB,GACKzE,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,GAAIZ,GAAS5B,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASiD,OAAOyN,wBAA0B2T,CAW7E,OAVAY,GAAOE,EAEHllB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAAQsV,EAAOhqB,IAC5CkqB,EAAUF,EAAO,EACjBC,GAAQtB,GAGZuB,GAAWlqB,EACPkqB,EAAUC,IAAUA,EAAWD,GAE5B,aAAeF,EAAO,IAAMC,EAAO,MAIlDlU,EAAExQ,KAAI,YAAc,cAAgBP,EAAQ7B,EAAOuR,MAAQyV,GAAY,IAAMhnB,EAAOE,IAAM,KAE1F4B,EAAS9B,EAAOE,IAAMF,EAAOsR,OAASwV,EAAO,GAGtC,WAARtC,GAECI,EACKxiB,KAAI,QAAU,SAASQ,EAAEnF,GACtB,MAAOunB,GAAW,GAAGvnB,GAAG6U,wBAA0B,KAErDlQ,KAAI,SAAW,IACfA,KAAI,IAAM,IACVA,KAAI,IAAM,KAGnBwiB,EACK1f,MAAK,OAASyf,GACdzf,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,OAoB7DF,EA7SX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,GACT2J,EAAS,SAAS7I,GAAK,MAAOA,GAAEqE,KAChCb,EAAe,SAAUxD,GAAK,MAAOA,IACrCmE,EAAQtM,EAAGG,MAAMsQ,WACjB0a,EAAe,GACfH,GAAQ,EACRQ,EAAU,GACVrH,GAAa,EACbwB,GAAc,EACdgF,GAAkB,EAClBX,GAAW,EACXtpB,EAAWF,GAAGE,SAAQ,cAAgB,iBAAkB,kBAAmB,iBAAkB,eAC7FqpB,EAAO,SAsUb,OAhCAjnB,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAiB6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACxEtB,QAAiB4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC1E6D,KAAiByC,IAAK,WAAW,MAAO+B,IAAU9B,IAAK,SAASvG,GAAGqI,EAAOrI,IAC1EgD,cAAiBsD,IAAK,WAAW,MAAOtD,IAAgBuD,IAAK,SAASvG,GAAGgD,EAAahD,IACtFqiB,OAAiB/b,IAAK,WAAW,MAAO+b,IAAS9b,IAAK,SAASvG,GAAGqiB,EAAMriB,IACxEwb,YAAiBlV,IAAK,WAAW,MAAOkV,IAAcjV,IAAK,SAASvG,GAAGwb,EAAWxb,IAClFwiB,cAAiBlc,IAAK,WAAW,MAAOkc,IAAgBjc,IAAK,SAASvG,GAAGwiB,EAAaxiB,IACtF6iB,SAAiBvc,IAAK,WAAW,MAAOuc,IAAWtc,IAAK,SAASvG,GAAG6iB,EAAQ7iB,IAC5Egd,aAAiB1W,IAAK,WAAW,MAAO0W,IAAezW,IAAK,SAASvG,GAAGgd,EAAYhd,IACpFgiB,iBAAiB1b,IAAK,WAAW,MAAO0b,IAAmBzb,IAAK,SAASvG,GAAGgiB,EAAgBhiB,IAC5FqhB,UAAiB/a,IAAK,WAAW,MAAO+a,IAAY9a,IAAK,SAASvG,GAAGqhB,EAASrhB,IAC9EohB,MAAiB9a,IAAK,WAAW,MAAO8a,IAAQ7a,IAAK,SAASvG,GAAGohB,EAAKphB,IAGtEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GAEdA,GC1VX9C,EAAGI,OAAOosB,cAAgB,WACtB,YA8BA,SAAS1pB,GAAMsB,GA8IX,MA7IAA,GAAUC,KAAK,SAASC,GACpBmO,EAAYW,QAEZlM,EAAY1G,GAAG2G,OAAOpG,KACtB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAElEvF,GAAGG,MAAMsW,QAAQvP,GAGjBY,EAAEnB,OAAOqT,GAAWxZ,GAAGkf,OAAOpb,EAAK,GAAGuE,OAAO4L,IAAIwF,GAAMvY,OAAOke,KAE1DwB,EACAtZ,EAAErB,MAAMyT,IAA4B,GAAjBvU,EAAsBrB,EAAK,GAAGuE,OAAOzG,OAAQuD,GAAkBrB,EAAK,GAAGuE,OAAOzG,OAAS,IAAOkC,EAAK,GAAGuE,OAAOzG,SAEhI0F,EAAErB,MAAMyT,IAAW,EAAGvU,IAE1B4J,EAAE5I,OAAOyT,GAAW5Z,GAAGkf,OAAOpb,EAAK,GAAGuE,OAAO4L,IAAIoN,GAAMngB,OAAO4f,KACzD7a,MAAM0U,IAAWvV,EAAiB,IAGnCkC,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,KAC7BmB,EAAEnB,SAAS,GACPmB,EAAEnB,QAAQmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,GAAWmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,KACzEmB,EAAEnB,QAAM,GAAK,KAEnB4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,KAC7B4I,EAAE5I,SAAS,GACP4I,EAAE5I,QAAQ4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,GAAW4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,KACzE4I,EAAE5I,QAAM,GAAK,IAGvB,IAAIW,GAAOJ,EAAUK,UAAS,8BAAiCgD,GAAIjG,MAAMA,EAAK,GAAGuE,SAC7ErB,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,iCAAmC4C,GACtFkT,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,WACjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEyB,EACKvF,GAAE,QAAU,SAASwG,EAAEnF,GACpBtC,EAAS8gB,YACLld,KAAM6D,EACNiB,MAAOpG,EACPiK,IAAKzM,GAAGuE,MACRwF,GAAIA,MAIhBkT,EAAU/V,OAAM,YACXC,KAAI,KAAO,sBAAwB4C,GACnC7C,OAAM,QAEXJ,EAAKH,OAAM,uBAA0BoD,EAAK,SACrC5C,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpBuS,EAAExQ,KAAI,YAAc8Z,EAAW,2BAA6BlX,EAAK,IAAM,GAEvE,IAAI2b,GAAO5e,EAAKH,OAAM,YAAaI,UAAS,WACvCjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,EAAEnF,GAAI,MAAOiX,GAAK9R,EAAEnF,IACjEkjB,GAAK9d,OAAOC,SAEZ6d,EAAKze,QAAQC,OAAM,QACdC,KAAI,IAAM,GACVA,KAAI,IAAM,SAASQ,EAAEnF,GAAM,MAAOhD,GAAGG,MAAM8H,UAAUsH,EAAE7F,KAAKL,IAAI,EAAGwY,EAAK1Z,EAAEnF,QAC1E2E,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUyB,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,OACnF5H,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,cAAgB8E,EAAEmS,EAAK9R,EAAEnF,IAAM2C,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAS,KAAO,QACxHT,GAAE,YAAc,SAASwG,EAAEnF,GACnBkf,IACL1hB,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL1X,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,aAInC9I,GAAE,WAAa,SAASwG,EAAEnF,GAClBkf,IACL1hB,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACL1B,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,aAGnC9I,GAAE,YAAc,SAASwG,EAAEnF,GACnBkf,GACLxhB,EAASmG,kBACLvC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,QAAU,SAASwG,EAAEnF,GACpB,GAAKkf,EAAL,CACA,GAAIkE,GAAUrlB,IACdL,GAASqG,cACLzC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5B1F,MAAOvE,GAAGuE,MACVqhB,QAASA,IAEb5lB,GAAGuE,MAAMshB,qBAEZ1kB,GAAE,WAAa,SAASwG,EAAEnF,GAClBkf,IACLxhB,EAAS4lB,iBACLhiB,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,UAEhCjK,GAAGuE,MAAMshB,qBAGjBH,EACKve,KAAI,OAAS,SAASQ,EAAEnF,GAAK,MAAOsJ,GAAMnE,EAAGnF,KAC7C2E,KAAI,QAAU,SAASQ,EAAEnF,EAAEwY,GAAK,OAAQqG,EAAK1Z,EAAEnF,GAAK,EAAI,kBAAoB,mBAAqB,WAAawY,EAAI,IAAMxY,IACxHwP,gBAAgBC,EAAa,QAC7B9K,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,cAAgB8E,EAAEmS,EAAK9R,EAAEnF,IAAM2C,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAS,KAAO,QAExHuF,KAAI,QAAWhC,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAU,IAE9D8jB,EAAK1T,gBAAgBC,EAAa,QAC7B9K,KAAI,IAAM,SAASQ,EAAEnF,GAClB,GAAIypB,GAAO5K,EAAK1Z,EAAEnF,GAAK,EACnBuM,EAAE,GACEA,EAAE,GAAKA,EAAEsS,EAAK1Z,EAAEnF,IAAM,EAC1BuM,EAAE,GAAK,EACPA,EAAEsS,EAAK1Z,EAAEnF,GACb,OAAOhD,GAAGG,MAAM8H,UAAUwkB,KAE7B9kB,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUyB,KAAKL,IAAIK,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAI,QAIzGkD,EAAYS,UAAS,2BACdpQ,EAtKX,GAcMkX,GACAI,EACAF,EACAiB,EAjBF5V,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,KACRC,EAAS,KACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZY,EAAItH,GAAG8H,MAAMC,SACbgH,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BqQ,KACA0B,GAAU,GACVF,GAAU,EACVK,GAAW,EACXnV,EAAQtM,EAAGG,MAAMuQ,eAKjBhQ,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAAoB,aACnIwhB,GAAc,EAGhBzP,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU,EA0MjD,OAtDAoC,GAAMqf,eAAiB,SAASC,EAAYC,GACxCnb,EACKC,OAAM,sBAAyBib,GAC/B1X,QAAO,QAAU2X,IAI1Bvf,EAAMwf,gBAAkB,WACpBpb,EACKC,OAAM,0BACNuD,QAAO,SAAU,IAQ1B5H,EAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAU6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACjEtB,QAAU4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACnEiX,QAAU3Q,IAAK,WAAW,MAAO2Q,IAAU1Q,IAAK,SAASvG,GAAGiX,EAAOjX,IACnE2Y,QAAUrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACnEyY,SAAUnS,IAAK,WAAW,MAAOmS,IAAWlS,IAAK,SAASvG,GAAGyY,EAAQzY,IACrEb,GAAUmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC/D4G,GAAUN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC/DvC,QAAU6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IACzDuS,QAAUjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IACzDqR,SAAU/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACrEyR,SAAUnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACrEuR,QAAUjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACnEwS,QAAUlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACnE8Y,UAAcxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IAC3E4B,IAAc0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC/DuZ,aAAcjT,IAAK,WAAW,MAAOiT,IAAehT,IAAK,SAASvG,GAAGuZ,EAAYvZ,IAGjFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GAEdA,GCvOX9C,EAAGI,OAAOssB,mBAAqB,SAASC,GACpC,YAqDA,SAAS7pB,GAAMsB,GAuNX,MAtNAA,GAAUC,KAAK,SAASC,GACpBmO,EAAYW,QACZX,EAAYrS,OAAO8lB,GACfhJ,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,EAElC,IAAInW,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAQlE,IANAzC,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa1F,SAASmkB,GAAoBxrB,KAAK0B,IACrFA,EAAMoE,UAAYnG,KAGlBgR,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAItCP,EAAIoe,EAAK9f,SACTmJ,EAAI2W,EAAKhL,QAGT,IAAI5T,GAAOJ,EAAUK,UAAS,mCAAoCjD,MAAMA,IACpEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,sCAAsCD,OAAM,KAC5FyQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBAG5Bkc,GAGDC,EAAO1c,MAAMzB,GAEbwS,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE+B,EAAKH,OAAM,kBACNQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,MAdtD0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAgB9Cf,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEnEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAIvD0e,IACAC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAKD,EAAOC,KAAMC,IAAIF,EAAOE,MACrCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAExC4B,EACK9e,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAE9C,IAAIF,GAAWxF,EAAEhR,OAAM,gBAClByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAC/CF,GAASxP,aAAa/M,KAAK8kB,GAGvBhJ,IACAC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KACvD0R,EAAEhR,OAAM,iBACHgH,aACA/M,KAAK+b,IAGVC,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBACHgH,aACA/M,KAAKic,IAOdiH,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtDskB,EAAK5D,iBAEL,IAAI2C,GAAa7C,EAAY8C,EAAgBC,IAC7C7gB,GACKmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAElB5T,QAAQ,SAASmC,EAAOpJ,GACrBof,EAAapiB,EAAG4I,kBAAkBwD,EAAOvD,OAAQjH,EAAE0E,YAAaxD,EAAMgF,KACtEoe,EAAK/D,eAAeC,GAAW,EAC/B,IAAI9S,GAAQlD,EAAOvD,OAAOuZ,EACZve,UAAVyL,IACgBzL,SAAhBohB,IAA2BA,EAAc3V,GACtBzL,SAAnBqhB,IAA8BA,EAAiBpiB,EAAMsD,SAAStD,EAAMgF,IAAIwH,EAAM8S,KAClF+C,EAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOlJ,EAAMyM,IAAID,EAAO8S,GACxB9V,MAAOA,EAAMF,EAAOA,EAAOqY,aAC3BngB,KAAM8H,EAAOvD,OAAOuZ,OAIhC,IAAIkD,GAASnI,EAAM3E,aAAa1V,EAAMgF,IAAImd,EAAY7C,GACtDkC,GAAiBpkB,QACZuL,eAAe,SAAStD,EAAEnF,GACvB,MAAOqa,GAAM7E,aAAarQ,KAE7B7D,MACG0H,MAAOsZ,EACPlc,MAAOgZ,EACPhW,OAAQ+Y,MAGhBb,EAAiBre,gBAAgBif,KAIrCZ,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpDlB,EAASmsB,cACT3G,EAAK5D,oBAGTwB,EAAOpjB,SAASiB,GAAE,cAAgB,SAASwG,EAAEnF,GACzCmF,EAAE0V,UAAY1V,EAAE0V,SAEXvZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAAYzb,QACjDkC,EAAKmQ,IAAI,SAAStM,GAGd,MAFAA,GAAE0V,UAAW,EACbvW,EAAKC,UAAS,cAAemD,QAAO,YAAa,GAC1CvC,IAIf4J,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,WACnDnd,EAASoiB,YAAY/Q,GAErB3N,EAAU+J,aAAa/M,KAAK0B,KAGhCghB,EAAOpjB,SAASiB,GAAE,iBAAmB,SAASwG,GAE1C7D,EAAK2F,QAAQ,SAAS9B,GAClBA,EAAE0V,UAAW,IAEjB1V,EAAE0V,UAAW,EAEb9L,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,WACnDnd,EAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGVzT,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAGjC+O,EAAM8L,SAAWjc,EAAEic,UAGvB/a,EAAMqR,aAId1B,EAAYS,UAAS,gCACdpQ,EAtQX,GAmBMgF,GACAyH,EApBF2W,EAAOyG,GAAa3sB,EAAGI,OAAOosB,gBAC5BrP,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClB4L,EAAS9jB,EAAGI,OAAO0jB,SACnBQ,EAAmBtkB,EAAGkE,uBACtBhE,EAAUF,EAAGI,OAAOF,UAItBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZzX,EAAQtM,EAAGG,MAAMuQ,eACjBtJ,EAAQ,KACRC,EAAS,KACTwc,GAAa,EACb3G,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClB2G,GAA0B,EAG1BtS,KACAoR,EAAe,KACfpM,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,cAAe,aACpEksB,EAAqB,GAG3BzP,GAAM9E,OAAM,UAAWW,YAAY,GACnCqE,EAAMhF,OAAQ,EAAoB,QAAU,QAC5CnY,EACKuI,SAAS,GACT+C,eAAc,GACdC,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAEhC0I,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,IAQrC,IAAIyP,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU,EA8SjD,OA9EAwlB,GAAKxlB,SAASiB,GAAE,2BAA6B,SAASoc,GAClDA,EAAW,QACPvR,IAAK1J,EAAMgF,IAAIiW,EAAIzZ,MACnB0H,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,OAEfpM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7BggB,EAAKxlB,SAASiB,GAAE,0BAA4B,SAASoc,GACjD7d,EAAQgG,QAAO,KAGnBggB,EAAKxlB,SAASiB,GAAE,2BAA6B,SAASoc,GAClD7d,MAQJ4C,EAAMpC,SAAWA,EACjBoC,EAAMojB,KAAOA,EACbpjB,EAAMghB,OAASA,EACfhhB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAMwhB,iBAAmBA,EACzBxhB,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9EuU,WAAYjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAC3EyU,WAAYnO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC3Ewa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAGrEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,GACb4Z,EAAK5Z,MAAMA,KAEf7D,UAAcwG,IAAK,WAAW,MAAO2d,IAAsB1d,IAAK,SAASvG,GACrEikB,EAAmBjkB,EACnB8J,EAAYW,MAAMwZ,GAClBvP,EAAM5U,SAASmkB,GACfzP,EAAM1U,SAASmkB,KAEnBlP,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQ,EAAM,QAAU,UAElCgM,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,EAA0B1b,EACtBA,KAAM,GACN7F,EAAMof,aAAY,OAK9BliB,EAAGG,MAAMkW,eAAevT,EAAOojB,GAC/BlmB,EAAGG,MAAMqP,YAAY1M,GAEdA,GAKX9C,EAAGI,OAAO0sB,aAAe,WACrB,GAAIhqB,GAAQ9C,EAAGI,OAAOssB,mBAAmB1sB,EAAGI,OAAO2sB,UAkBnD,OAfAjqB,GAAMuhB,yBAAwB,GAC9BvhB,EAAMwhB,iBAAiBpkB,QAAQ6K,iBAAiB,SAASzG,GAErD,GAAI6D,GAAI7D,EAAK8H,OAAO,GAAG9H,KAEnBgI,EAAQnE,EAAE2Z,KAAO3Z,EAAE4Z,MAAQ,SAAW,QAC1C,OAAO,sBACqBzV,EAAQ,KAAOhI,EAAK0H,MAAQ,qCAEzBlJ,EAAMua,MAAM7E,aAAarQ,EAAE2Z,MAAQ,oCAClChf,EAAMua,MAAM7E,aAAarQ,EAAE4Z,OAAS,kCACtCjf,EAAMua,MAAM7E,aAAarQ,EAAE6Z,MAAQ,kCACnClf,EAAMua,MAAM7E,aAAarQ,EAAE8Z,KAAO,uBAG7Dnf,GAIX9C,EAAGI,OAAO4sB,oBAAsB,WAC5B,GAAIlqB,GAAQ9C,EAAGI,OAAOssB,mBAAmB1sB,EAAGI,OAAO8gB,iBAkBnD,OAfApe,GAAMuhB,yBAAwB,GAC9BvhB,EAAMwhB,iBAAiBpkB,QAAQ6K,iBAAiB,SAASzG,GAErD,GAAI6D,GAAI7D,EAAK8H,OAAO,GAAG9H,KAEnBgI,EAAQnE,EAAE2Z,KAAO3Z,EAAE4Z,MAAQ,SAAW,QAC1C,OAAO,sBACqBzV,EAAQ,KAAOhI,EAAK0H,MAAQ,qCAEzBlJ,EAAMua,MAAM7E,aAAarQ,EAAE2Z,MAAQ,oCAClChf,EAAMua,MAAM7E,aAAarQ,EAAE4Z,OAAS,kCACtCjf,EAAMua,MAAM7E,aAAarQ,EAAE6Z,MAAQ,kCACnClf,EAAMua,MAAM7E,aAAarQ,EAAE8Z,KAAO;AnB5YxE,GmB+YWnf,GClZX9C,EAAGI,OAAO0jB,OAAS,WACf,YAuBA,SAAShhB,GAAMsB,GAoSX,QAAS0lB,GAAa3hB,EAAEnF,GACpB,MAAW,WAAR+mB,EAA0B,OAC1BC,EACQ7hB,EAAE8hB,WAAa,OAAS,OACvBD,EAAL,QACC7hB,EAAEmE,QAAOnE,EAAEmE,MAAQA,EAAMnE,EAAEnF,IACtBmF,EAAE0V,SAAW1V,EAAEmE,MAAQ,QAIxC,QAAS4d,GAAW/hB,EAAEnF,GAClB,MAAGgnB,IAAoB,WAARD,GACJ5hB,EAAE8hB,WAAa,OAEf9hB,EAAEmE,OAASA,EAAMnE,EAAEnF,GAKlC,QAASiqB,GAAa9kB,EAAEnF,GACpB,MAAGgnB,IAAoB,WAARD,EACJ,EAEE5hB,EAAE0V,SAAW,EAAI,EAIlC,MA9TAzZ,GAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9C5P,EAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAII,GAAOJ,EAAUK,UAAS,eAAgBjD,MAAMA,IAChDkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,kBAAkBD,OAAM,KACxEyQ,EAAI7Q,EAAKH,OAAM,IAEfgd,GACA7c,EAAKK,KAAI,YAAc,cAAkBpC,EAAOuR,MAAS,IAAMvR,EAAOE,IAAM,KAE5E6B,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAE3E,IAUI0kB,GAEAY,EAZA3e,EAAS+L,EAAE5Q,UAAS,cACnBjD,KAAK,SAAS6D,GACX,MAAW,WAAR4hB,EAA0B5hB,EAEtBA,EAAEsE,OAAO,SAAS8F,GACrB,MAAOyX,IAAW,GAAQzX,EAAE0X,eAIpCG,EAAche,EAAO3E,QAAQC,OAAM,KAAMC,KAAI,QAAU,YAI3D,QAAOoiB,GACH,IAAK,UACDgB,EAAc,EACd,MACJ,KAAK,UACDA,EAAc,GAGtB,GAAW,WAARhB,EACCK,EAAY1iB,OAAM,UACb+C,MAAK,eAAiB,GACtB9C,KAAI,QAAO,oBACXA,KAAI,IAAM,GAEfwiB,EAAc/d,EAAOjF,OAAM,yBACxB,IAAY,WAAR4iB,EAAmB,CAC1BK,EAAY1iB,OAAM,QACb+C,MAAK,eAAiB,GACtB9C,KAAI,QAAO,oBACXA,KAAI,KAAO,GACXA,KAAI,KAAO,GAChBwiB,EAAc/d,EAAOjF,OAAM,qBAE3BijB,EAAY1iB,OAAM,KACbC,KAAI,QAAU,gBACd0iB,SAAQ,YAAW,0KACnB1iB,KAAI,YAAc,8BAEvB,IAAI2iB,GAAiBle,EAAOjF,OAAM,gBAElCmjB,GAAejmB,KAAK,SAAS8D,EAAEnF,GAC3BxC,GAAG2G,OAAOpG,MAAMwG,UAAS,QACpBI,KAAI,SAAWmiB,EAAa3hB,EAAEnF,MAI3ConB,EAAY1iB,OAAM,QACbC,KAAI,cAAgB,SACpBA,KAAI,QAAO,kBACXA,KAAI,KAAO,SACXA,KAAI,KAAO,IAEhB,IAAI4iB,GAAane,EAAOjF,OAAM,sBAE9BiF,GACKzK,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAAS8pB,gBAAgBriB,EAAEnF,KAE9BrB,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAAS+pB,eAAetiB,EAAEnF,KAE7BrB,GAAE,QAAU,SAASwG,EAAEnF,GACpBtC,EAASgqB,YAAYviB,EAAEnF,EAEvB,IAAIsB,GAAO8H,EAAO9H,MAClB,IAAIqhB,EAAa,CACb,GAAQ,WAALoE,EACKY,GAGArmB,EAAK2F,QAAQ,SAASmC,GAAUA,EAAOyR,UAAW,IAClD1V,EAAE0V,UAAW,IAGb1V,EAAE0V,UAAY1V,EAAE0V,SACZvZ,EAAKgP,MAAM,SAASlH,GAAU,MAAOA,GAAOyR,YAG5CvZ,EAAK2F,QAAQ,SAASmC,GAAUA,EAAOyR,UAAW,SAGvD,IAAW,WAARkM,EACN,GAAGC,EACC7hB,EAAE8hB,YAAc9hB,EAAE8hB,WAClB9hB,EAAEyiB,aAAiC/mB,QAAlBsE,EAAEyiB,eAA8BziB,EAAE0V,SAAW1V,EAAEyiB,aAChEziB,EAAE0V,SAAW1V,EAAE8hB,YAAc9hB,EAAEyiB,iBAC5B,KAAKZ,EAAU,CAClB7hB,EAAE0V,UAAY1V,EAAE0V,SAChB1V,EAAEyiB,aAAeziB,EAAE0V,QACnB,IAAIgN,GAAUvmB,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE8hB,YAC9CY,GAAQvX,MAAM,SAASlH,GAAU,MAAOA,GAAOwe,gBAG/CtmB,EAAK2F,QAAQ,SAASmC,GAClBA,EAAOyR,SAAWzR,EAAOwe,cAAe,IAKxDlqB,EAASoiB,aACLjF,SAAUvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,WAC5CoM,WAAY3lB,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE8hB,kBAKzDtoB,GAAE,WAAa,SAASwG,EAAEnF,GACvB,IAAW,WAAR+mB,IAAqBC,KACxBtpB,EAASoqB,eAAe3iB,EAAEnF,GACtB2iB,GAAa,CAEb,GAAIrhB,GAAO8H,EAAO9H,MAGlBA,GAAK2F,QAAQ,SAASmC,GAClBA,EAAOyR,UAAW,EACP,WAARkM,IAAmB3d,EAAOwe,aAAexe,EAAOyR,YAEvD1V,EAAE0V,UAAW,EACF,WAARkM,IAAmB5hB,EAAEyiB,aAAeziB,EAAE0V,UACzCnd,EAASoiB,aACLjF,SAAUvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,gBAK5DzR,EAAO1B,QAAO,cAAgB,SAASvC,GAAK,MAAOA,GAAEyiB,eACrDxe,EAAOhE,OAAOC,SAEdkiB,EACK5iB,KAAI,OAASmiB,GACb5X,KAAK,SAAU/J,GAAK,MAAOwD,GAAaqF,EAAO7I,KAIpD,IAAIujB,GAAc,CAClB,IAAIV,EAAO,CAEP,GAAIC,KACJ7e,GAAO/H,KAAK,SAAS8D,EAAEnF,GACnB,GAAIkoB,EACJ,IAAIvf,EAAaqF,EAAO7I,KAAOwD,EAAaqF,EAAO7I,IAAI/F,OAAS+oB,EAAc,CAC1E,GAAIC,GAAazf,EAAaqF,EAAO7I,IAAIkjB,UAAU,EAAGF,EACtDD,GAAa1qB,GAAG2G,OAAOpG,MAAMoG,OAAM,QAAS+K,KAAKkZ,EAAa,OAC9D5qB,GAAG2G,OAAOpG,MAAM2G,OAAM,aAAcwK,KAAKvG,EAAaqF,EAAO7I,SAE7D+iB,GAAa1qB,GAAG2G,OAAOpG,MAAMoG,OAAM,OAEvC,IAAImkB,EACJ,KAGI,GAFAA,EAAiBJ,EAAW9gB,OAAOyN,wBAEd,GAAlByT,EAAqB,KAAMC,SAElC,MAAM3pB,GACF0pB,EAAiBtrB,EAAGG,MAAM6R,oBAAoBkZ,GAGlDD,EAAaxnB,KAAK6nB,EAAiBE,IAGvC,IAAIC,GAAe,EACfE,IAGJ,KAFAD,EAAc,EAEQ/lB,EAAd+lB,GAAgCD,EAAeR,EAAa7oB,QAChEupB,EAAaF,GAAgBR,EAAaQ,GAC1CC,GAAeT,EAAaQ,IAIhC,KAFqB,IAAjBA,IAAoBA,EAAe,GAE/BC,EAAc/lB,GAAkB8lB,EAAe,GAAI,CACvDE,KACAF,GAEA,KAAK,GAAIG,GAAI,EAAGA,EAAIX,EAAa7oB,OAAQwpB,IACjCX,EAAaW,IAAMD,EAAaC,EAAIH,IAAiB,KACrDE,EAAaC,EAAIH,GAAgBR,EAAaW,GAGtDF,GAAcC,EAAaE,OAAO,SAASC,EAAMC,EAAK3iB,EAAO4iB,GACzD,MAAOF,GAAOC,IAKtB,IAAK,GADDE,MACKjpB,EAAI,EAAGkpB,EAAO,EAAOT,EAAJzoB,EAAkBA,IACxCipB,EAAWjpB,GAAKkpB,EAChBA,GAAQP,EAAa3oB,EAGzBoJ,GACKzE,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,MAAO,aAAeipB,EAAWjpB,EAAIyoB,GAAgB,KAAO,EAAI/hB,KAAKwB,MAAMlI,EAAIyoB,GAAgBV,GAAe,MAIlH5G,EACAhM,EAAExQ,KAAI,YAAc,cAAgBP,EAAQ7B,EAAOuR,MAAQ4U,GAAe,IAAMnmB,EAAOE,IAAM,KAG7F0S,EAAExQ,KAAI,YAAc,eAAsBpC,EAAOE,IAAM,KAG3D4B,EAAS9B,EAAOE,IAAMF,EAAOsR,OAAUnN,KAAKyiB,KAAKlB,EAAa7oB,OAASqpB,GAAgBV,MAEpF,CAEH,GAGIqB,GAHAC,EAAO,EACPC,EAAU,EACVC,EAAW,CAEfngB,GACKzE,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,GAAIZ,GAAS5B,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASiD,OAAOyN,wBAA0B2T,CAc7E,OAbAY,GAAOE,EAEHllB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAAQsV,EAAOhqB,IAC5CkqB,EAAUF,EAAO,EACjBC,GAAQtB,GAGZuB,GAAWlqB,EACPkqB,EAAUC,IAAUA,EAAWD,GAElBF,EAAOG,EAArBb,IACCA,EAAcU,EAAOG,GAElB,aAAeH,EAAO,IAAMC,EAAO,MAIlDlU,EAAExQ,KAAI,YAAc,cAAgBP,EAAQ7B,EAAOuR,MAAQyV,GAAY,IAAMhnB,EAAOE,IAAM,KAE1F4B,EAAS9B,EAAOE,IAAMF,EAAOsR,OAASwV,EAAO,GAGjD,GAAW,WAARtC,EAAmB,CAElBI,EACKxiB,KAAI,QAAU,SAASQ,EAAEnF,GACtB,MAAOunB,GAAW,GAAGvnB,GAAG6U,wBAA0B,KAErDlQ,KAAI,SAAW,IACfA,KAAI,IAAM,IACVA,KAAI,IAAM,KAGf6V,EAAO0P,OAAM,OAAM,gBACdvlB,KAAI,QAAU,gBACdA,KAAI,OAAS,QAEbA,KAAI,UAAW,EAEpB,IAAIwlB,GAAWhV,EAAEhR,OAAM,gBAEvBgmB,GACChf,aAAa1F,SAAS,KAClBd,KAAI,KAAOojB,GACXpjB,KAAI,QAAU+jB,EAAcX,EAAc,IAC1CpjB,KAAI,SAAWN,EAAS,IACxBM,KAAI,KAAOpC,EAAOE,IAAM,IACxBkC,KAAI,UAAYqiB,EAAW,EAAI,GAKxCG,EACK1f,MAAK,OAASyf,GACdzf,MAAK,eAAiBwiB,GACtBxiB,MAAK,SAAWyf,KA8BlBpnB,EAhVX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,GACT2J,EAAS,SAAS7I,GAAK,MAAOA,GAAEqE,KAChCb,EAAe,SAAUxD,GAAK,MAAOA,IACrCmE,EAAQtM,EAAGG,MAAMsQ,WACjB0a,EAAe,GACfH,GAAQ,EACRQ,EAAU,GACVrH,GAAa,EACbwB,GAAc,EACdgF,GAAkB,EAClBX,GAAW,EACXtpB,EAAWF,GAAGE,SAAQ,cAAgB,iBAAkB,kBAAmB,iBAAkB,eAC7FqpB,EAAO,SAyWb,OAhCAjnB,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAiB6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACxEtB,QAAiB4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC1E6D,KAAiByC,IAAK,WAAW,MAAO+B,IAAU9B,IAAK,SAASvG,GAAGqI,EAAOrI,IAC1EgD,cAAiBsD,IAAK,WAAW,MAAOtD,IAAgBuD,IAAK,SAASvG,GAAGgD,EAAahD,IACtFqiB,OAAiB/b,IAAK,WAAW,MAAO+b,IAAS9b,IAAK,SAASvG,GAAGqiB,EAAMriB,IACxEwiB,cAAiBlc,IAAK,WAAW,MAAOkc,IAAgBjc,IAAK,SAASvG,GAAGwiB,EAAaxiB,IACtFwb,YAAiBlV,IAAK,WAAW,MAAOkV,IAAcjV,IAAK,SAASvG,GAAGwb,EAAWxb,IAClF6iB,SAAiBvc,IAAK,WAAW,MAAOuc,IAAWtc,IAAK,SAASvG,GAAG6iB,EAAQ7iB,IAC5Egd,aAAiB1W,IAAK,WAAW,MAAO0W,IAAezW,IAAK,SAASvG,GAAGgd,EAAYhd,IACpFgiB,iBAAiB1b,IAAK,WAAW,MAAO0b,IAAmBzb,IAAK,SAASvG,GAAGgiB,EAAgBhiB,IAC5FqhB,UAAiB/a,IAAK,WAAW,MAAO+a,IAAY9a,IAAK,SAASvG,GAAGqhB,EAASrhB,IAC9EohB,MAAiB9a,IAAK,WAAW,MAAO8a,IAAQ7a,IAAK,SAASvG,GAAGohB,EAAKphB,IAGtEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GAEdA,GC7XX9C,EAAGI,OAAO4H,KAAO,WACb,YA6CA,SAASlF,GAAMsB,GA2HX,MA1HAqO,GAAYW,QACZX,EAAYrS,OAAOgtB,GACnBhpB,EAAUC,KAAK,SAASC,GACpB4C,EAAY1G,GAAG2G,OAAOpG,KACtB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAClEvF,GAAGG,MAAMsW,QAAQvP,GAGjBY,EAAIslB,EAAQhnB,SACZmJ,EAAI6d,EAAQlS,SAEZuF,EAAKA,GAAM3Y,EACXie,EAAKA,GAAMxW,CAGX,IAAIjI,GAAOJ,EAAUK,UAAS,qBAAsBjD,MAAMA,IACtDkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,wBACnD8V,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvE2nB,EACKhmB,MAAMzB,GACN0B,OAAOzB,EAEZ,IAAIynB,GAAc/lB,EAAKH,OAAM,kBAC7BkmB,GAAYjsB,KAAKgsB,GAEjB3P,EAAU/V,OAAM,YACXC,KAAI,KAAO,gBAAkBylB,EAAQ7iB,MACrC7C,OAAM,QAEXJ,EAAKH,OAAM,iBAAoBimB,EAAQ7iB,KAAO,SACzC5C,KAAI,QAAUhC,GACdgC,KAAI,SAAY/B,EAAkB,EAAKA,EAAkB,GAE9DuS,EAAKxQ,KAAI,YAAc8Z,EAAW,qBAAuB2L,EAAQ7iB,KAAO,IAAM,IAC9E8iB,EACK1lB,KAAI,YAAc8Z,EAAW,qBAAuB2L,EAAQ7iB,KAAO,IAAM,GAE9E,IAAI0b,GAAS3e,EAAKH,OAAM,cAAeI,UAAS,aAC3CjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,GAAK,MAAOA,GAAEqE,KAC3DyZ,GAAOxe,QAAQC,OAAM,KAChB+C,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,SAAStC,GAAK,MAAOA,GAAEmlB,aAAeA,IAC5D7iB,MAAK,eAAiB,MAE3Bwb,EAAO7d,OAAOC,SAEd4d,EACKte,KAAI,QAAU,SAASQ,EAAEnF,GACtB,OAAQmF,EAAEuC,SAAW,IAAM,uBAAyB1H,IAEvD0H,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QACxChR,MAAK,OAAS,SAAStC,EAAEnF,GAAI,MAAOsJ,GAAMnE,EAAGnF,KAC7CyH,MAAK,SAAW,SAAStC,EAAEnF,GAAI,MAAOsJ,GAAMnE,EAAGnF,KACpDijB,EAAOzT,gBAAgBC,EAAa,gBAC/BhI,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,SAAStC,GAAK,MAAOA,GAAEolB,aAAe,IAEjE,IAAIC,GAAYvH,EAAO1e,UAAS,gBAC3BjD,KAAK,SAAS6D,GAAK,MAAOslB,GAAOtlB,IAAMA,OAC5CqlB,GAAU/lB,QAAQC,OAAM,QACnBC,KAAI,QAAU,WACdA,KAAI,IAAM,SAASQ,GAChB,MAAO3H,IAAG0V,IAAIwX,OACTzF,YAAYA,GACZ0F,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUwY,EAAGxG,EAAK9R,EAAEnF,OACtD+iB,GAAG,SAAS5d,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAU8d,EAAGlE,EAAK1Z,EAAEnF,OACvD4qB,GAAG,SAASzlB,EAAEnF,GAAK,MAAO+iB,GAAIxW,EAAE5I,SAAS,IAAM,EAAI4I,EAAE5I,SAAS,IAAM,EAAI,EAAI4I,EAAE5I,SAAS,GAAK4I,EAAE5I,SAAS,MAEvGlF,MAAMV,MAAOoH,EAAEU,WAE5Bod,EAAO7d,OAAOb,UAAS,gBAClBc,SAELmlB,EAAUhb,gBAAgBC,EAAa,mBAClC9K,KAAI,IAAM,SAASQ,GAChB,MAAO3H,IAAG0V,IAAIwX,OACTzF,YAAYA,GACZ0F,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAEnF,OACrD+iB,GAAG,SAAS5d,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAEnF,OACtD4qB,GAAG,SAASzlB,EAAEnF,GAAK,MAAOuM,GAAGA,EAAE5I,SAAS,IAAM,EAAI4I,EAAE5I,SAAS,IAAM,EAAI,EAAI4I,EAAE5I,SAAS,GAAK4I,EAAE5I,SAAS,MAEtGlF,MAAMV,MAAOoH,EAAEU,UAG5B,IAAIglB,GAAY5H,EAAO1e,UAAS,gBAC3BjD,KAAK,SAAS6D,GAAK,OAAQA,EAAEU,SAElCglB,GAAUpmB,QAAQC,OAAM,QACnBC,KAAI,QAAU,WACdA,KAAI,IACDnH,GAAG0V,IAAIlO,OACNigB,YAAYA,GACZ0F,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUwY,EAAGxG,EAAK9R,EAAEnF,OACtDuM,EAAE,SAASpH,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAU8d,EAAGlE,EAAK1Z,EAAEnF,QAG/D6qB,EAAUrb,gBAAgBC,EAAa,mBAClC9K,KAAI,IACDnH,GAAG0V,IAAIlO,OACNigB,YAAYA,GACZ0F,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAEnF,OACrDuM,EAAE,SAASpH,EAAEnF,GAAK,MAAOhD,GAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAEnF,QAI9Dyd,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,SAEXrH,EAAYS,UAAS,kBACdpQ,EAnKX,GAcMgF,GACAyH,EAfD6d,EAAUptB,EAAGI,OAAOgtB,UAGrB7nB,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTH,EAAY,KACZomB,EAAc,IACdhhB,EAAQtM,EAAGG,MAAMuQ,eACjBuJ,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9Boe,EAAU,SAASxlB,EAAEnF,GAAK,OAAQmN,MAAM0R,EAAK1Z,EAAEnF,KAAqB,OAAd6e,EAAK1Z,EAAEnF,IAC7DyqB,EAAS,SAAStlB,GAAK,MAAOA,GAAEulB,MAChCjM,GAAW,EAGXwG,EAAc,SACdxf,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,eAAiB,mBAAoB,kBAAmB,YAGpF0sB,GACKU,UAAU,IACVC,aAAa,GAAG,KAUrB,IAAItN,GAAIsF,EACFtT,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA0LnD,OAjDA3F,GAAMpC,SAAWA,EACjBoC,EAAMsqB,QAAUA,EAEhBA,EAAQ1sB,SAASiB,GAAE,eAAiB,WAAYjB,EAASqG,aAAatF,MAAMV,KAAMM,aAClF+rB,EAAQ1sB,SAASiB,GAAE,mBAAqB,WAAYjB,EAASsb,iBAAiBva,MAAMV,KAAMM,aAC1F+rB,EAAQ1sB,SAASiB,GAAE,kBAAoB,WAAYjB,EAASsF,gBAAgBvE,MAAMV,KAAMM,aAExFyB,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEglB,SAAU1e,IAAK,WAAW,MAAO0e,IAAWze,IAAK,SAASvG,GAAGglB,EAAQhlB,IACrEsf,aAAmBhZ,IAAK,WAAW,MAAOgZ,IAAe/Y,IAAK,SAASvG,GAAGsf,EAAYtf,IACtF8Y,UAAcxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IAG3EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClB2kB,EAAQ3kB,SAASA,KAErBglB,QAASxe,IAAK,WAAW,MAAOwe,IAAUve,IAAK,SAASvG,GACpD8kB,EAASjtB,GAAG4V,QAAQzN,KAExBb,GAAImH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAC7CsR,EAAOtR,EACPykB,EAAQtlB,EAAEa,KAEd4G,GAAIN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAC7CkZ,EAAOlZ,EACPykB,EAAQ7d,EAAE5G,KAEd2D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1BykB,EAAQ9gB,MAAMA,OAItBtM,EAAGG,MAAMkW,eAAevT,EAAOsqB,GAC/BptB,EAAGG,MAAMqP,YAAY1M,GAEdA,GCnOX9C,EAAGI,OAAO4tB,UAAY,WAClB,YA8EA,SAASlrB,GAAMsB,GA0VX,MAzVAqO,GAAYW,QACZX,EAAYrS,OAAOoiB,GACftF,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GA6IpB,QAAS2pB,KACJ/Q,GACD/E,EAAEhR,OAAM,2BACLgH,aACA1F,SAASA,GACTrH,KAAK+b,GAKZ,QAAS+Q,KACJ9Q,GACDjF,EAAEhR,OAAM,2BACLgH,aACA1F,SAASA,GACTrH,KAAKic,GA8JZ,QAASqK,GAAQhI,GAEb,GAAIyO,GAAiBhW,EAAEhR,OAAM,2BACxByW,MACDtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAC/BpJ,IAAI,SAAStM,EAAEnF,GACZ,OACIwJ,IAAKrE,EAAEqE,IACPkhB,KAAMvlB,EAAEulB,KACRhjB,QAASvC,EAAEuC,QACX7B,OAAQV,EAAEU,OAAO4D,OAAO,SAAStE,EAAEnF,GAC/B,MAAOwf,GAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,IAAM8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,KAEnE0O,eAAgBjmB,EAAEimB,kBAIlCD,GAAehgB,aAAa1F,SAASA,GAAUrH,KAAKohB,GAGpDyL,IACAC,IA9UJ,GAAIhnB,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,EAkB5G,IAjBAvE,EAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAM0B,GAEhBoE,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAGvDA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAWA,QAE/E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAItC4e,EAAMvmB,SAASiB,GAAE,UAAY,SAAS+d,GAClCgI,EAAQhI,KAIZ5X,EAAI0a,EAAMpc,SACVmJ,EAAIiT,EAAMtH,QAGV,IAAI5T,GAAOJ,EAAUK,UAAS,0BAA2BjD,MAAMA,IAC3DkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,6BAA6BD,OAAM,KACnFyQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAEjC,IAAI2mB,GAAa9Q,EAAO9V,OAAM,KAAMC,KAAI,QAAU,WAClD2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBAAiBD,OAAM,QAC5D4mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBAElB6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,eAG/Ckc,IAGDC,EAAO1c,MAAMzB,GAEbwS,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAEa,WAAnByK,EACAjnB,EAAKH,OAAM,kBACNQ,KAAI,YAAc,eAAiB/B,EAAgB,KAC9B,QAAnB2oB,IACFxK,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,IAG5GC,EAAKH,OAAM,kBACNQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,OAlB1D0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAsB9Cf,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEnEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAIvD0e,IACAC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAKD,EAAOC,KAAMC,IAAIF,EAAOE,MACrCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAGxCnM,EAAEhR,OAAM,iCACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpB4c,EACKpb,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAE9C,IAAI2G,GAAYrM,EAAEhR,OAAM,iBACnByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WA+C/C,IA3CIX,GACAC,EACK7U,MAAMR,GACNid,OAAO/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAC/CwZ,UAAUlY,EAAiB,GAGhCwX,GACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GA0BpCwS,EAAEhR,OAAM,2BACHQ,KAAI,YAAc,eAAiB/B,EAAkB,KAKtDyoB,EAIG,CACHpH,EAAM7f,MAAMzB,GACZwS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,gBAAmB/B,EAAkBL,EAAOsR,OAASoQ,EAAM1hB,SAASE,KAAO,KAC7FmY,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,YAC1Czc,KAAK6lB,EACV,IAAIvH,GAASuH,EAAMG,MAAMC,QAAUJ,EAAMjN,UAAYiN,EAAMG,MAAM1H,QACnD,QAAXA,GACCgI,EAAQhI,OAXZ8E,GAAUpjB,KAAKohB,GACfyL,IACAC,GAgBJpK,GAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGVmQ,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtD4gB,EAAMF,iBACN,IAAI2C,GAAa7C,EAAY8C,EAAgBC,IAmC7C,IAlCA7gB,EACKmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAAazR,EAAOgiB,iBAEtCnkB,QAAQ,SAASmC,EAAOpJ,GACrB,GAAI0c,GAAS2O,EAAepH,EAAMG,MAAMC,QAAUJ,EAAM7gB,SAASO,SAAWsgB,EAAMG,MAAM1H,SAAY5X,EAAEnB,SAClG6nB,EAAgBpiB,EAAOvD,OAAO4D,OAAO,SAAStE,EAAEnF,GAGhD,MAAG0c,GAAO,IAAMA,EAAO,GACZ8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,IAAM8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,GAExD8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,IAAM8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,IAIvE0C,GAAapiB,EAAG4I,kBAAkB4lB,EAAe5sB,EAAE0E,YAAakc,EAAM1a,IACtE,IAAIwH,GAAQkf,EAAcpM,GACtBqM,EAAc3rB,EAAMyM,IAAID,EAAO8S,EACf,QAAhBqM,GACAjM,EAAML,eAAenf,EAAGof,GAAY,GAE1Bve,SAAVyL,IACgBzL,SAAhBohB,IAA2BA,EAAc3V,GACtBzL,SAAnBqhB,IAA8BA,EAAiBpiB,EAAMsD,SAAStD,EAAMgF,IAAIwH,EAAM8S,KAClF+C,EAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOyiB,EACPniB,MAAOA,EAAMF,EAAOA,EAAOqY,aAC3BngB,KAAMgL,OAId6V,EAAQ/iB,OAAS,EAAG,CACpB,GAAIgjB,GAAStiB,EAAMoY,SAAStU,OAAOhF,EAAE+C,QACjC0gB,EAAe3b,KAAKC,IAAI7G,EAAMoY,SAASvU,SAAS,GAAK7D,EAAMoY,SAASvU,SAAS,IAC7EkD,EAAY,IAAOwb,EACnBrb,EAAmBhK,EAAG4J,kBAAkBub,EAAQ1Q,IAAI,SAAStM,GAAG,MAAOA,GAAE6D,QAASoZ,EAAOvb,EACpE,QAArBG,IACAmb,EAAQnb,GAAkBqC,WAAY,GAG9C,GAAIqiB,GAAwB,SAASvmB,EAAEnF,GACnC,MAAY,OAALmF,EAAY,MAAQkV,EAAM7E,aAAarQ,GAGlDmc,GAAiBpkB,QACZuL,eAAe6Y,EAAiBpkB,QAAQuL,kBAAoBijB,GAC5DpqB,MACG0H,MAAOlJ,EAAMgF,IAAKmd,EAAY7C,GAC9BhZ,MAAOgZ,EACPhW,OAAQ+Y,MAGhBb,EAAiBre,gBAAgBif,KAIrCZ,EAAiB5jB,SAASiB,GAAE,eAAiB,SAASC,GAClD,GAAIsjB,GAAgBC,IAEpB7gB,GAAKmI,OAAO,SAASL,EAAQpJ,GAEzB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAChB5T,QAAQ,SAASmC,GAChB,GAAIgW,GAAapiB,EAAG4I,kBAAkBwD,EAAOvD,OAAQjH,EAAE0E,YAAaxD,EAAMgF,KACtEwH,EAAQlD,EAAOvD,OAAOuZ,EAC1B,IAAqB,mBAAV9S,GAAX,CAC8B,mBAAnB4V,KAAgCA,EAAiBpiB,EAAMsD,SAAStD,EAAMgF,IAAIwH,EAAM8S,IAC3F,IAAIuM,GAAO7rB,EAAMoY,SAASpY,EAAMyM,IAAID,EAAM8S,GAC1C+C,GAAQ1hB,MACJ6L,MAAOA,EACP8S,WAAYA,EACZnV,KAAMiY,EAAgByJ,GACtBlK,YAAarY,EAAOqY,YACpBrY,OAAQA,OAIhBoW,EAAM9hB,SAASqG,aAAaoe,KAGhCb,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpD4gB,EAAMF,oBAGV5hB,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,UAA4BvZ,EAAKlC,SAAWR,EAAEic,SAASzb,SAChEkC,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAGjC+O,EAAM8L,SAAWjc,EAAEic,UAEvB/a,EAAMqR,aAgDd1B,EAAYS,UAAS,uBACdpQ,EAlaX,GAoBMgF,GACAyH,EArBFiT,EAAQxiB,EAAGI,OAAO4H,OAChBmV,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClB4L,EAAS9jB,EAAGI,OAAO0jB,SACnBQ,EAAmBtkB,EAAGkE,uBACtBhE,EAAUF,EAAGI,OAAOF,UACpB+mB,EAAQjnB,EAAGI,OAAO6mB,MAAMjnB,EAAGI,OAAO4H,QAGpCzC,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZzX,EAAQtM,EAAGG,MAAMuQ,eACjBtJ,EAAQ,KACRC,EAAS,KACTwc,GAAa,EACb0K,EAAiB,MACjBrR,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClB2G,GAA0B,EAG1BgK,GAAc,EACdtc,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,cAAe,cAAe,aACnF+H,EAAW,GAIjB0U,GAAM9E,OAAM,UAAWW,YAAY,GACnCqE,EAAMhF,OAAOqF,EAAkB,QAAU,QAEzC8E,EAAMf,UAAS,GAAMhZ,SAAS,GAE9BvI,EAAQuL,eAAe,SAAStD,EAAGnF,GAC/B,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAC9B0I,gBAAgB,SAASvD,EAAGnF,GAC3B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGjCshB,EAAiBpkB,QAAQuL,eAAe,SAAStD,EAAGnF,GAChD,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAC9B0I,gBAAgB,SAASvD,EAAGnF,GAC3B,MAAOma,GAAM3E,aAAarQ,EAAGnF,IAQjC,IAAIyP,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,GAE7Cya,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,cAKjDoF,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACSlO,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MAudhD,OAhHAwf,GAAM9hB,SAASiB,GAAE,2BAA6B,SAASoc,GAC/CA,EAAI3R,OAAOgiB,gBACXluB,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAIjCsc,EAAM9hB,SAASiB,GAAE,0BAA4B,SAASoc,GAClD7d,EAAQgG,QAAO,KAQnBpD,EAAMpC,SAAWA,EACjBoC,EAAM0f,MAAQA,EACd1f,EAAMghB,OAASA,EACfhhB,EAAMmkB,MAAQA,EACdnkB,EAAMqa,MAAQA,EACdra,EAAM8rB,OAAS3H,EAAM9J,MACrBra,EAAMua,MAAQA,EACdva,EAAM+rB,OAAS5H,EAAM5J,MACrBva,EAAMwhB,iBAAmBA,EACzBxhB,EAAM5C,QAAUA,EAChB4C,EAAMiP,MAAQA,EACdjP,EAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9E4lB,gBAAiBtf,IAAK,WAAW,MAAOsf,IAAkBrf,IAAK,SAASvG,GAAG4lB,EAAe5lB,IAC1FuU,WAAiBjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAChFyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9Ewa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAErE0lB,aAAiBpf,IAAK,WAAW,MAAOof,IAAenf,IAAK,SAASvG,GAAG0lB,EAAY1lB,IACpFmmB,aAAkB7f,IAAK,WAAW,MAAOgY,GAAM5f,UAAY6H,IAAK,SAASvG,GAAGse,EAAM5f,OAAOsB,KACzFomB,gBAAoB9f,IAAK,WAAW,MAAOgY,GAAM/J,aAAehO,IAAK,SAASvG,GAAGse,EAAM/J,UAAUvU,KACjGqmB,gBAAoB/f,IAAK,WAAW,MAAOgY,GAAM7J,aAAelO,IAAK,SAASvG,GAAGse,EAAM7J,UAAUzU,KACjG2e,aAAcrY,IAAK,WAAW,MAAOgY,GAAMK,eAAiBpY,IAAK,SAASvG,GAAGse,EAAMK,YAAY3e,KAG/FsmB,aAAchgB,IAAK,WAAW,MAAOgY,GAAM1hB,QAAS2J,IAAK,SAASvG,GAChD9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBwhB,EAAM1hB,OAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASmQ,EAAM1hB,OAAOuR,MACvEmQ,EAAM1hB,OAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAASoQ,EAAM1hB,OAAOsR,OACvEoQ,EAAM1hB,OAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASyhB,EAAM1hB,OAAOC,OAE3ED,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClB+Z,EAAM/Z,SAASA,GACfwe,EAAMxe,SAASA,GACf0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,GACbkW,EAAMlW,MAAMA,GACZ2a,EAAM3a,MAAMA,KAEhB2b,aAAchZ,IAAK,WAAW,MAAOuT,GAAMyF,eAAiB/Y,IAAK,SAASvG,GACtE6Z,EAAMyF,YAAYtf,GAClBse,EAAMgB,YAAYtf,KAEtBuf,aAAcjZ,IAAK,WAAW,MAAOkO,GAAM3E,cAAgBtJ,IAAK,SAASvG,GACrEwU,EAAM3E,WAAW7P,GACjBse,EAAMiB,YAAYvf,KAEtBwf,aAAclZ,IAAK,WAAW,MAAOoO,GAAM7E,cAAgBtJ,IAAK,SAASvG,GACrE0U,EAAM7E,WAAW7P,GACjBse,EAAMkB,YAAYxf,KAEtBb,GAAImH,IAAK,WAAW,MAAOuT,GAAM1a,KAAOoH,IAAK,SAASvG,GAClD6Z,EAAM1a,EAAEa,GACRse,EAAMnf,EAAEa,KAEZ4G,GAAIN,IAAK,WAAW,MAAOuT,GAAMjT,KAAOL,IAAK,SAASvG,GAClD6Z,EAAMjT,EAAE5G,GACRse,EAAM1X,EAAE5G,KAEZ+U,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQqF,EAAkB,QAAU,UAE9C2G,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,EAA0B1b,EACtB0b,IACA7B,EAAMN,aAAY,GAClBM,EAAMoD,YAAW,QAK7B5lB,EAAGG,MAAMkW,eAAevT,EAAO0f,GAC/BxiB,EAAGG,MAAMqP,YAAY1M,GAEdA,GAGX9C,EAAGI,OAAO8uB,mBAAqB,WAC7B,MAAOlvB,GAAGI,OAAO4tB,YACdzoB,QAASsR,OAAQ,KACjBwX,aAAa,ICviBlBruB,EAAGI,OAAO+uB,iBAAmB,WACzB,YA0GA,SAASrsB,GAAMsB,GAkaX,MAjaAA,GAAUC,KAAK,SAASC,GA+QpB,QAAS4iB,GAAW/e,GAChB,GAAIvG,KAAW,KAALuG,GACNL,EAAIlG,EAAI,EAAI,GACZ2N,EAAI6f,EAAmB,CAC3B,OAAO,IAAM,GAAMtnB,EAAK,IAAMyH,EACxB,YAAc3N,EAAI,IAAO,IAAMkG,EAAK,KAAOyH,EAAI,GAC/C,KAAO,EAAIA,EAAI,GACf,YAAc3N,EAAI,IAAM,GAAMkG,EAAK,IAAO,EAAIyH,EAC9C,KACO,IAAMzH,EAAK,KAAOyH,EAAI,GAC7B,KAAO,EAAIA,EAAI,GACf,IAAO,IAAMzH,EAAK,KAAOyH,EAAI,GAC7B,KAAO,EAAIA,EAAI,GAIzB,QAAS4X,KACAC,EAAMC,SAASD,EAAM1H,OAAO4H,GACjCC,GACKjjB,MAAM8iB,EAAMC,QAAUgI,EAAG1oB,SAAW2gB,IACpCjjB,KAAK,SAAS8D,EAAEnF,GACb,GAAIwkB,GAAY6H,EAAGlnB,EAAE,IAAMknB,EAAG5oB,QAAQ,GAClCghB,EAAa4H,EAAG5oB,QAAQ,GAAK4oB,EAAGlnB,EAAE,GACtC3H,IAAG2G,OAAOpG,MAAMoG,OAAM,SACjBQ,KAAI,QAAuB,EAAZ6f,EAAgB,EAAIA,GAExChnB,GAAG2G,OAAOpG,MAAMoG,OAAM,UACjBQ,KAAI,IAAM0nB,EAAGlnB,EAAE,KACfR,KAAI,QAAuB,EAAb8f,EAAiB,EAAIA,KAIpD,QAASC,KACLJ,EAAcF,EAAMC,QAAU,KAAOD,EAAM1H,SAC3CA,EAAS0H,EAAMC,QAAUgI,EAAG1oB,SAAWygB,EAAM1H,SAC7Chf,EAAS0mB,OAAO1H,OAAQA,EAAQ0H,MAAOA,IACvCD,IAGAjB,EACK9e,MAAMzB,GACN0B,OAAOioB,GACPhjB,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAAYvZ,EAAKtB,GAAGusB,OAElE/M,EACKpb,MAAMzB,GACN0B,OAAOioB,GACPhjB,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAAavZ,EAAKtB,GAAGusB,MAEnE,IAAIC,GAAgBrX,GAAEhR,OAAM,0BACvByW,MAAO6R,GAASrtB,OACbqtB,GACKhb,IAAI,SAAStM,EAAEnF,GACZ,OACIwJ,IAAKrE,EAAEqE,IACP3D,OAAQV,EAAEU,OAAO4D,OAAO,SAAStE,EAAEnF,GAC/B,MAAOkjB,GAAKpe,IAAIK,EAAEnF,IAAM0c,EAAO,IAAMwG,EAAKpe,IAAIK,EAAEnF,IAAM0c,EAAO,UANrD7W,aAY5BslB,EAAiBhW,GAAEhR,OAAM,2BACxByW,MAAM8R,EAAYC,MAAe9mB,YAC3B8mB,GACCljB,OAAO,SAASmjB,GAAY,OAAQA,EAAS/R,WAC7CpJ,IAAI,SAAStM,EAAEnF,GACX,OACI0qB,KAAMvlB,EAAEulB,KACRH,YAAaplB,EAAEolB,YACfD,YAAanlB,EAAEmlB,YACf9gB,IAAKrE,EAAEqE,IACP3D,OAAQV,EAAEU,OAAO4D,OAAO,SAAStE,EAAEnF,GAC/B,MAAOwf,GAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,IAAM8C,EAAM1a,IAAIK,EAAEnF,IAAM0c,EAAO,QAQnF5X,GADA2nB,GAASrtB,SAAWytB,EAChB3J,EAAK9f,SAELoc,EAAMpc,SAGd+W,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUwR,EAAkB,GAEjCnS,EAAMxW,QAAQ+C,KAAKyiB,KAAKzM,EAAO,IAAKhW,KAAKwB,MAAMwU,EAAO,MAEtDvH,GAAEhR,OAAM,iBAAkBgH,aAAa1F,SAASmkB,GAC3CxrB,KAAK+b,GAGVqS,EAAcrhB,aAAa1F,SAASmkB,GAAoBxrB,KAAK8kB,GAC7DiI,EAAehgB,aAAa1F,SAASmkB,GAAoBxrB,KAAKohB,GAG9DrK,GAAEhR,OAAM,2BACHQ,KAAI,YAAc,eAAiBimB,EAAGnnB,QAAQ,GAAK,KAExDqpB,EACKxnB,MAAMslB,GACN7I,OAAQ/kB,EAAGG,MAAM2U,WAAWwa,EAAiB,GAAIhrB,IACjDwZ,UAAUnY,EAAgB,GAC/BkpB,EACKvmB,MAAMynB,GACNhL,OAAQ/kB,EAAGG,MAAM2U,WAAWwa,EAAiB,GAAIhrB,IAGlDurB,EAGAhB,EAAO/Q,SAAS6R,GAAUvtB,OAAS,GAAKuD,EAAgB,GAFxDkpB,EAAO/Q,SAAS2R,GAASrtB,OAAS,GAAKuD,EAAgB,EAM3D,IAAIqqB,GAAcP,GAASrtB,OAAS,EAAI,EACpC6tB,EAAeN,GAAUvtB,SAAWstB,EAAYC,IAAa,EAAI,EAEjEO,EAAYL,EAAmBI,EAAeD,EAC9CG,EAAYN,EAAmBG,EAAcC,CAEjD9X,IAAEhR,OAAM,4BACHsD,MAAK,UAAYylB,GACtB/X,GAAEhR,OAAM,4BACHsD,MAAK,UAAY0lB,GACjBxoB,KAAI,YAAc,aAAeG,EAAErB,QAAQ,GAAK,OAErD0R,GAAEhR,OAAM,4BAA6BgH,aAAa1F,SAASmkB,GACtDxrB,KAAK0uB,GACV3X,GAAEhR,OAAM,4BAA6BgH,aAAa1F,SAASmkB,GACtDxrB,KAAKytB,GAzZd,GAAI3nB,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3D+pB,EAAmBtvB,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IACxD8oB,EAAcS,EAAc,GACnCM,EAAmBN,EAAcsB,EAAQ3qB,IAAM2qB,EAAQvZ,MAa3D,IAXA/T,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa1F,SAASmkB,GAAoBxrB,KAAK0B,IACrFA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAItC,IAAIonB,IAAWnrB,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,UAAY1V,EAAEonB,MAC7DI,GAAYrrB,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAEonB,KAGhDznB,GADA2nB,GAASrtB,SAAWytB,EAChB3J,EAAK9f,SAELoc,EAAMpc,SAGdipB,EAAKT,EAAOtmB,QAGZslB,EAAKiC,EAAmBrN,EAAMtH,SAAWgL,EAAKhL,SAC9C6U,EAAKF,EAAmB3J,EAAKhL,SAAWsH,EAAMtH,SAC9CmV,EAAKR,EAAmBS,EAAOpV,SAAWqV,EAAMrV,SAChDsV,EAAKX,EAAmBU,EAAMrV,SAAWoV,EAAOpV,QAEhD,IAAIuV,IAAUnsB,EACTmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAAagS,GAAoB1nB,EAAEonB,IAAMpnB,EAAEonB,OAC1E9a,IAAI,SAAStM,GACV,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,QAIzC0tB,GAAUpsB,EACTmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAAagS,EAAmB1nB,EAAEonB,KAAOpnB,EAAEonB,OAC1E9a,IAAI,SAAStM,GACV,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,OAI7C8E,GAAErB,OAAO,EAAGd,IAEZ0pB,EAAK1oB,OAAOnG,GAAGkf,OAAOlf,GAAGmf,MAAM8Q,GAAQ/uB,OAAOgvB,KAAW,SAASvoB,GAAK,MAAOA,GAAEL,KAC3ErB,OAAO,EAAGd,GAGf,IAAI2B,IAAOJ,EAAUK,UAAS,4BAA6BjD,MAAMA,IAC7DkZ,GAASlW,GAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,+BAA+BD,OAAM,KACrFyQ,GAAI7Q,GAAKH,OAAM,IAEnBqW,IAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAGjC,IAAI2mB,IAAa9Q,GAAO9V,OAAM,KAAMC,KAAI,QAAU,WAClD2mB,IAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBACrC2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBACrC2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,eACrC2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,eAGrC,IAAIgpB,IAAenT,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aAapD,IAZAgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,gBACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,iBACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,iBACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,eACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,gBACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,sBACvCgpB,GAAajpB,OAAM,KAAMC,KAAI,QAAU,iBAMlCkc,EAEE,CACH,GAAI6H,IAAc5H,EAAOkH,QAAUrlB,EAAiB,EAAIA,EACpDirB,GAAkB9M,EAAOkH,QAAUU,GAAc,CAErD5H,GAAO1c,MAAMskB,IAEbvT,GAAEhR,OAAM,kBACHyW,MAAMtZ,EAAKmQ,IAAI,SAASrI,GAOrB,MANAA,GAAOykB,YAAqChtB,SAAvBuI,EAAOykB,YAA4BzkB,EAAOI,IAAMJ,EAAOykB,YACzEhB,EACCzjB,EAAOI,IAAMJ,EAAOykB,aAAezkB,EAAOmjB,IAAMuB,EAAsBC,GAEtE3kB,EAAOI,IAAMJ,EAAOykB,aAAezkB,EAAOmjB,IAAMwB,EAAqBD,GAElE1kB,KAEVhL,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SAEpBioB,EAAmBtvB,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,GAAUupB,GAG7E3W,GAAEhR,OAAM,kBACHQ,KAAI,YAAc,aAAeipB,GAAkB,KAAQrrB,EAAOE,IAAK,SA1B5E0S,IAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,QA6B9Cf,IAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAOvE0S,GAAEhR,OAAM,eAAgBsD,MAAK,UAAY4jB,EAAc,UAAY,QAEnEkC,EACKnpB,MAAMzB,GACN0B,OAAO+nB,GACP9iB,MAAMhI,EAAKmQ,IAAI,SAAUtM,EAAGnF,GACzB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAUtE,EAAGnF,GACnB,OAAQsB,EAAKtB,GAAG6a,UAAYvZ,EAAKtB,GAAGusB,OAE5Ce,EACKlpB,MAAMzB,GACN0B,OAAO+nB,GACP9iB,MAAMhI,EAAKmQ,IAAI,SAAUtM,EAAGnF,GACzB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAUtE,EAAGnF,GACnB,OAAQsB,EAAKtB,GAAG6a,WAAavZ,EAAKtB,GAAGusB,MAG7C,IAAIyB,IAAY7Y,GAAEhR,OAAM,4BACnByW,MAAM6R,GAASrtB,OAASqtB,KACpB5mB,aAELooB,GAAa9Y,GAAEhR,OAAM,6BACpByW,MAAM8R,EAAYC,MACV9mB,YACF8mB,GAAUljB,OAAO,SAASmjB,GACxB,OAAQA,EAAS/R,WAG9B1F,IAAEhR,OAAM,eACHQ,KAAI,YAAc,gBAAmB2nB,EAAmB/pB,EAAOsR,OAASuZ,EAAQ3qB,KAAO,KAE5FurB,GAAU7iB,aAAa/M,KAAKmvB,GAC5BU,GAAW9iB,aAAa/M,KAAKkvB,GAGzBvB,IACAH,EACK7J,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAiB,IAAKrB,IAClDwZ,UAAUsR,EAAkB,GACjCjX,GAAEhR,OAAM,6BACHQ,KAAI,YAAc,eAAiB0oB,EAAG5pB,QAAQ,GAAK,KACxD0R,GAAEhR,OAAM,6BAA8BgH,aACjC/M,KAAKwtB,IAGVI,IACAkC,EACK5oB,MAAM+nB,GACNtL,OAAQqK,EAAmB,IAC3BtR,UAAWnY,EAAgB,GAChCwrB,EACK7oB,MAAMkoB,GACNzL,OAAQqK,EAAmB,IAC3BtR,SAAS2R,GAASrtB,OAAS,GAAKuD,EAAgB,GAErDwS,GAAEhR,OAAM,8BACHsD,MAAK,UAAYglB,GAASrtB,OAAS,EAAI,GACvCuF,KAAI,YAAc,eAAiB0nB,EAAG5oB,QAAQ,GAAK,KACxD0R,GAAEhR,OAAM,8BACHsD,MAAK,UAAYklB,GAAUvtB,OAAS,EAAI,GACxCuF,KAAI,YAAc,aAAe0nB,EAAG5oB,QAAQ,GAAK,OAEtD0R,GAAEhR,OAAM,8BAA+BgH,aAClC/M,KAAK8vB,GACV/Y,GAAEhR,OAAM,8BAA+BgH,aAClC/M,KAAK+vB,IAId/J,EAAMtf,EAAEunB,GAAI1tB,GAAE,QAAU+lB,GAEpBJ,GAAaF,EAAM1H,OAAO4H,EAE9B,IAAIC,IAAUpP,GAAEhR,OAAM,uBAAwBI,UAAS,KAClDjD,MAAMgjB,GAAeF,EAAM1H,WAE5BoI,GAAeP,GAAQ9f,QACtBC,OAAM,IAEXogB,IAAapgB,OAAM,QACdC,KAAI,QAAU,QACdA,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,SAAWynB,GAEpBtH,GAAapgB,OAAM,QACdC,KAAI,QAAU,SACdA,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,SAAWynB,EAEpB,IAAIrH,IAAS5P,GAAEhR,OAAM,kBAChB/F,KAAKgmB,EACVW,IAAOxgB,UAAS,QAEXI,KAAI,SAAWynB,GACpBrH,GAAOxgB,UAAS,WAAYG,OAAM,QAASC,KAAI,IAAMuf,GAMrDpD,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAIVzT,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAEjC+O,EAAM8L,SAAWjc,EAAEic,UAEvB/a,EAAMqR,WAsJVuT,MAIG5kB,EAtgBX,GA4BM4c,GAEA5X,EACAunB,EACAzB,EACAmC,EACAM,EACAG,EAnCFhO,EAAQxiB,EAAGI,OAAO4H,OAChBsoB,EAAStwB,EAAGI,OAAO4H,OACnBke,EAAOlmB,EAAGI,OAAOosB,gBACjB+D,EAAQvwB,EAAGI,OAAOosB,gBAClBrP,EAAQnd,EAAGI,OAAO8X,OAClB0W,EAAS5uB,EAAGI,OAAO8X,OACnB4X,EAAS9vB,EAAGI,OAAO8X,OACnB2W,EAAS7uB,EAAGI,OAAO8X,OACnBgZ,EAASlxB,EAAGI,OAAO8X,OACnBiZ,EAASnxB,EAAGI,OAAO8X,OACnB4L,EAAS9jB,EAAGI,OAAO0jB,SACnBsD,EAAQ5mB,GAAG0V,IAAIkR,QACflnB,EAAUF,EAAGI,OAAOF,UAGtBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZqM,GAAW3qB,IAAK,EAAGqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAChD4B,EAAQ,KACRC,EAAS,KACT4S,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BjD,EAAQtM,EAAGG,MAAMuQ,eACjBmT,GAAa,EACbwK,GAAc,EACdW,GAAiB,EACjBD,GAAiB,EACjBD,EAAc,GAEdxH,EAAc,KAOdvQ,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,QAAU,cAAe,eAC/CksB,EAAqB,EACrB7a,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACf4N,EAAqB,eACrBD,EAAsB,gBACtBjB,GAAmB,CAGzBrN,GAAMf,UAAS,GACf6O,EAAOpO,aAAY,GAEnBoO,EAAOtI,YAAY,SAAS7f,GAAK,OAAO,IACxCgV,EAAM9E,OAAM,UAAWW,YAAY,GACnC8W,EAAOzX,OAAM,QACbwW,EAAOxW,OAAM,SACbuW,EAAOvW,OAAM,UAAWW,YAAY,GACpCkY,EAAO7Y,OAAM,QACb8Y,EAAO9Y,OAAM,SAEbnY,EAAQsL,eAAc,GAAME,gBAAgB,SAASvD,EAAGnF,GACpD,MAAOma,GAAM3E,aAAarQ,EAAGnF,IAOjC,IAAIouB,GAAc,WACd,MAAOvB,IACCwB,KAAMxC,EAAQ5H,MAAOkK,IACrBE,KAAMvB,EAAQ7I,MAAOiK,IAG7BI,EAAe,WACf,MAAOzB,IACCwB,KAAMvB,EAAQ7I,MAAOiK,IACrBG,KAAMxC,EAAQ5H,MAAOkK,IAG7BjO,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,cAKjDoF,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACSlO,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,OAK5C0sB,EAAc,SAASprB,GACzB,MAAOA,GAAKgP,MAAM,SAASlH,GACzB,MAAOA,GAAOyR,WAojBlB,OAvIA2E,GAAM9hB,SAASiB,GAAE,2BAA6B,SAASoc,GACnD7d,EACKuI,SAAS,KACTgD,eAAe,SAAStD,EAAGnF,GACxB,MAAOsuB,KAAeD,KAAK7Y,aAAarQ,EAAGnF,KAE9CsB,KAAKyZ,GACL7X,QAAO,KAGhBsc,EAAM9hB,SAASiB,GAAE,0BAA4B,SAASoc,GAClD7d,EAAQgG,QAAO,KAGnBggB,EAAKxlB,SAASiB,GAAE,2BAA6B,SAASoc,GAClDA,EAAI/R,MAAQlJ,EAAMgF,IAAIiW,EAAIzZ,MAC1ByZ,EAAW,QACP/R,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,OAEfpM,EACKuI,SAAS,GACTgD,eAAe,SAAStD,EAAGnF,GACxB,MAAOouB,KAAcC,KAAK7Y,aAAarQ,EAAGnF,KAE7CsB,KAAKyZ,GACL7X,QAAO,KAGhBggB,EAAKxlB,SAASiB,GAAE,0BAA4B,SAASoc,GACjD7d,EAAQgG,QAAO,KAGnBggB,EAAKxlB,SAASiB,GAAE,2BAA6B,SAASoc,GAClD7d,MAWJ4C,EAAMpC,SAAWA,EACjBoC,EAAMghB,OAASA,EACfhhB,EAAM0f,MAAQA,EACd1f,EAAMwtB,OAASA,EACfxtB,EAAMojB,KAAOA,EACbpjB,EAAMytB,MAAQA,EACdztB,EAAMqa,MAAQA,EACdra,EAAM8rB,OAASA,EACf9rB,EAAMgtB,OAASA,EACfhtB,EAAM+rB,OAASA,EACf/rB,EAAMouB,OAASA,EACfpuB,EAAMquB,OAASA,EACfruB,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9E2e,aAAiBrY,IAAK,WAAW,MAAOqY,IAAepY,IAAK,SAASvG,GAAG2e,EAAY3e,IACpFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrE0lB,aAAiBpf,IAAK,WAAW,MAAOof,IAAenf,IAAK,SAASvG,GAAG0lB,EAAY1lB,IACpFmmB,aAAiB7f,IAAK,WAAW,MAAO6f,IAAe5f,IAAK,SAASvG,GAAGmmB,EAAYnmB,IACpFomB,gBAAoB9f,IAAK,WAAW,MAAO8f,IAAkB7f,IAAK,SAASvG,GAAGomB,EAAepmB,IAC7FqmB,gBAAoB/f,IAAK,WAAW,MAAO+f,IAAkB9f,IAAK,SAASvG,GAAGqmB,EAAermB,IAC7FooB,oBAAwB9hB,IAAK,WAAW,MAAO8hB,IAAsB7hB,IAAK,SAASvG,GAAGooB,EAAmBpoB,IACzGmoB,qBAAyB7hB,IAAK,WAAW,MAAO6hB,IAAuB5hB,IAAK,SAASvG,GAAGmoB,EAAoBnoB,IAG5GpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DypB,aAAchgB,IAAK,WAAW,MAAOmhB,IAAWlhB,IAAK,SAASvG,GAC1DynB,EAAQ3qB,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAAS2qB,EAAQ3qB,IAC7D2qB,EAAQtZ,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASsZ,EAAQtZ,MAC7DsZ,EAAQvZ,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAASuZ,EAAQvZ,OAC7DuZ,EAAQ5qB,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAAS4qB,EAAQ5qB,OAEjEiD,UAAWwG,IAAK,WAAW,MAAO2d,IAAsB1d,IAAK,SAASvG,GAClEikB,EAAqBjkB,IAEzB2D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,KAEjBxE,GAAImH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAC7CsR,EAAOtR,EACP6Z,EAAM1a,EAAEa,GACR2nB,EAAOxoB,EAAEa,GACTud,EAAKpe,EAAEa,GACP4nB,EAAMzoB,EAAEa,KAEZ4G,GAAIN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAC7CkZ,EAAOlZ,EACP6Z,EAAMjT,EAAE5G,GACR2nB,EAAO/gB,EAAE5G,GACTud,EAAK3W,EAAE5G,GACP4nB,EAAMhhB,EAAE5G,KAEZknB,kBAAsB5gB,IAAK,WAAW,MAAO4gB,IAAoB3gB,IAAK,SAASvG,GAE3E,GAAGknB,IAAqBlnB,EAAG,CACvB,GAAIilB,GAAKkC,CACTA,GAASjB,EACTA,EAASjB,CAET,IAAIyC,GAAKa,CACTA,GAASC,EACTA,EAASd,EAEbR,EAAiBlnB,EAEjBmnB,EAAOzX,OAAM,QACbwW,EAAOxW,OAAM,SACb6Y,EAAO7Y,OAAM,QACb8Y,EAAO9Y,OAAM,aAIrBrY,EAAGG,MAAMkW,eAAevT,EAAO0f,GAC/BxiB,EAAGG,MAAMqP,YAAY1M,GAEdA,GC1pBX9C,EAAGI,OAAOmxB,SAAW,WACjB,YA2CA,SAASzuB,GAAMsB,GAyUX,MAxUAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,EACjB,IAAIsqB,GAAoB,CAkBxB,IAVGC,GAAYntB,EAAKlC,SAAQqvB,IACxB5oB,OAAQvE,EAAK,GAAGuE,OAAO4L,IAAI,SAAStM,GAC5B,OACIL,EAAGK,EAAEL,EACLyH,EAAG,EACHnD,OAAQjE,EAAEiE,OACVsD,KAAM,UAIlBgiB,EAAS,CACT,GAAIC,GAASnxB,GAAGmoB,OAAOiJ,QAClBviB,OAAOwiB,GACPhpB,OAAO,SAASV,GAAI,MAAOA,GAAEU,SAC7B0G,EAAEsS,IACLvd,EAAKlC,QAAUqvB,EAAWA,EAAWntB,EAEvCqtB,GAAO1nB,QAAQ,SAASmC,EAAQpJ,GAExBoJ,EAAO0lB,cACPxtB,EAAKtB,GAAG+uB,mBAAqBP,IAC7BG,EAAO3uB,GAAKsB,EAAKtB,IAGbA,EAAI,GAAK2uB,EAAO3uB,EAAI,GAAG8uB,cACvBH,EAAO3uB,GAAG6F,OAAO4L,IAAI,SAAStM,EAAEqT,GAC5BrT,EAAE4d,IAAM4L,EAAO3uB,EAAI,GAAG6F,OAAO2S,GAAGjM,EAChCpH,EAAEylB,GAAKzlB,EAAE4d,GAAK5d,EAAEoH,MAKhCjL,EAAOqtB,EAGXrtB,EAAK2F,QAAQ,SAASmC,EAAQpJ,GAC1BoJ,EAAOvD,OAAOoB,QAAQ,SAASqF,GAC3BA,EAAMlD,OAASpJ,EACfsM,EAAM9C,IAAMJ,EAAOI,QAKvBklB,GAAWptB,EAAKlC,OAAS,GACzBkC,EAAK,GAAGuE,OAAO4L,IAAI,SAAStM,EAAEnF,GAC1B,GAAIgvB,GAAU,EAAGC,EAAU,CAC3B3tB,GAAKmQ,IAAI,SAAStM,EAAGod,GACjB,IAAKjhB,EAAKihB,GAAKuM,aAAc,CACzB,GAAIpW,GAAIvT,EAAEU,OAAO7F,EACjB0Y,GAAEhM,KAAOhG,KAAKC,IAAI+R,EAAEnM,GAChBmM,EAAEnM,EAAE,GACJmM,EAAEkS,GAAKqE,EACPA,GAAoBvW,EAAEhM,OAGtBgM,EAAEkS,GAAKlS,EAAEhM,KAAOsiB,EAChBA,GAAoBtW,EAAEhM,UAS1C,IAAIoW,GAAc9L,GAAWI,KACzB9V,EAAKmQ,IAAI,SAAStM,EAAGod,GACjB,MAAOpd,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,GAAI+iB,GAAI5d,EAAE4d,GAAI6H,GAAIzlB,EAAEylB,GAAIrI,IAAIA,MAIzEzd,GAAEnB,OAAOqT,GAAWxZ,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAAK,MAAOA,GAAEL,KAC/DzB,WAAW6T,IAAW,EAAGvU,GAAiBusB,GAE/C3iB,EAAE5I,OAAOyT,GAAW5Z,GAAGkf,OAAOlf,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAC5D,GAAIxB,GAASwB,EAAEoH,CASf,OAPImiB,KAAYptB,EAAK6D,EAAEod,KAAKuM,eAEpBnrB,EADAwB,EAAEoH,EAAI,EACGpH,EAAEylB,GAEFzlB,EAAEylB,GAAKzlB,EAAEoH,GAGnB5I,IACRjF,OAAO4f,KACT7a,MAAM0U,IAAWvV,EAAiB,IAG/BkC,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,KAC7BmB,EAAEnB,SAAS,GACPmB,EAAEnB,QAAQmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,GAAWmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,KACzEmB,EAAEnB,QAAM,GAAK,KAEnB4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,KAC7B4I,EAAE5I,SAAS,GACP4I,EAAE5I,QAAQ4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,GAAW4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,KACzE4I,EAAE5I,QAAM,GAAK,KAEvB8Z,EAAKA,GAAM3Y,EACXie,EAAKA,GAAMxW,CAGX,IAAIjI,GAAOJ,EAAUK,UAAS,yBAA0BjD,MAAMA,IAC1DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,4BACnD8V,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEgY,EAAU/V,OAAM,YACXC,KAAI,KAAO,gBAAkB4C,GAC7B7C,OAAM,QACXJ,EAAKH,OAAM,iBAAoBoD,EAAK,SAC/B5C,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpBuS,EAAExQ,KAAI,YAAc8Z,EAAW,qBAAuBlX,EAAK,IAAM,GAEjE,IAAI0b,GAAS3e,EAAKH,OAAM,cAAeI,UAAS,aAC3CjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,EAAEnF,GAAK,MAAOA,IAC3DijB,GAAOxe,QAAQC,OAAM,KAChB+C,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,KAE3B,IAAI0nB,GAAiB1f,EAChBtE,WAAW8X,EAAO7d,OAAOb,UAAS,eAAiB,eAAgBmC,KAAKF,IAAI,IAAKf,IACjFd,KAAI,IAAM,SAASQ,EAAGnF,EAAGwY,GACtB,GAAIsJ,GAAOiB,EAAG,IAAM,CAMpB,OALI2L,IACIptB,EAAK6D,EAAEiE,UAAY9H,EAAK6D,EAAEiE,QAAQ0lB,eAClChN,EAAOiB,EAAG5d,EAAE4d,KAGbjB,IAEVnd,KAAI,SAAW,GACfU,QACD8pB,GAAe/jB,OACf+jB,EAAe/jB,MAAM,SAASjG,EAAEnF,GAC5B,GAAIoL,GAAQpL,GAAKyF,GAAY2pB,EAAkB,IAAMpvB,CACrD,OAAOoL,KAEf6X,EACKte,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,sBAAwBA,IAC7D0H,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QACxChR,MAAK,OAAS,SAAStC,EAAEnF,GAAI,MAAOsJ,GAAMnE,EAAGnF,KAC7CyH,MAAK,SAAW,SAAStC,EAAEnF;AxB/M5C,AwB+MgD,ExB/M9C,CAAC,GAAG,AwB+MiDsJ,CxB/MhD,EwB+MsDnE,EAAGnF,CxB/MpD,CAAC,GwBgNDijB,EACKxb,CxBjNE,CAAC,IwBiNE,GxBjNK,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,AwBiNA,GACxBA,MxBlNiC,AwBkN5B,eAAiB8iB,EAE3B,IAAIrH,GAAOD,EAAO1e,UAAS,eACtBjD,KAAK,SAAS6D,GAAK,MAAQspB,KAAantB,EAAKlC,OAAUqvB,EAAS5oB,OAASV,EAAEU,QAChFqd,GAAK9d,OAAOC,QAEI6d,GAAKze,QAAQC,OAAM,QAC1BC,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,kBAAoB,oBACzE2E,KAAI,IAAM,SAASQ,EAAEnF,EAAEwY,GACpB,MAAOkW,KAAYptB,EAAKkX,GAAGsW,aAAe,EAAKtW,EAAI1T,EAAEpB,YAAcpC,EAAKlC,SAE3EuF,KAAI,IAAM,SAASQ,EAAEnF,EAAEwY,GAAK,MAAOuK,GAAG2L,IAAYptB,EAAKkX,GAAGsW,aAAe3pB,EAAE4d,GAAK,IAAM,IACtFpe,KAAI,SAAW,GACfA,KAAI,QAAU,SAASQ,EAAEnF,EAAEwY,GAAK,MAAO1T,GAAEpB,aAAegrB,IAAYptB,EAAKkX,GAAGsW,aAAe,EAAIxtB,EAAKlC,UACpGuF,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAEnF,IAAM,OAEhFkjB,GACKzb,MAAK,OAAS,SAAStC,EAAEnF,EAAEwY,GAAI,MAAOlP,GAAMnE,EAAGqT,EAAGxY,KAClDyH,MAAK,SAAW,SAAStC,EAAEnF,EAAEwY,GAAI,MAAOlP,GAAMnE,EAAGqT,EAAGxY,KACpDrB,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL1X,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACL1B,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBACLvC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,QAAU,SAASwG,EAAEnF,GACpB,GAAIojB,GAAUrlB,IACdL,GAASqG,cACLzC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5B1F,MAAOvE,GAAGuE,MACVqhB,QAASA,IAEb5lB,GAAGuE,MAAMshB,oBAEZ1kB,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAAS4lB,iBACLhiB,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,UAEhCjK,GAAGuE,MAAMshB,oBAEjBH,EACKve,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,kBAAoB,oBACzE2E,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAEnF,IAAM,QAExEqvB,IACKxU,IAAUA,EAAWvZ,EAAKmQ,IAAI,WAAa,OAAO,KACvDyR,EACKzb,MAAK,OAAS,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOhb,IAAG8xB,IAAID,EAASlqB,EAAEnF,IAAIuvB,OAAS1U,EAASpJ,IAAI,SAAStM,EAAEnF,GAAK,MAAOA,KAAKyJ,OAAO,SAAStE,EAAEnF,GAAI,OAAQ6a,EAAS7a,KAAOwY,IAAOvL,aACpKxF,MAAK,SAAW,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOhb,IAAG8xB,IAAID,EAASlqB,EAAEnF,IAAIuvB,OAAS1U,EAASpJ,IAAI,SAAStM,EAAEnF,GAAK,MAAOA,KAAKyJ,OAAO,SAAStE,EAAEnF,GAAI,OAAQ6a,EAAS7a,KAAOwY,IAAOvL,aAG/K,IAAIuiB,GACAtM,EAAK1T,gBAAgBC,EAAa,WAAY/I,KAAKF,IAAI,IAAKf,IACvD2F,MAAM,SAASjG,EAAEnF,GACd,MAAOA,GAAIyF,EAAWnE,EAAK,GAAGuE,OAAOzG,QAE7CsvB,GACAc,EACK7qB,KAAI,IAAM,SAASQ,EAAEnF,EAAEwY,GACpB,GAAIsJ,GAAO,CAeX,OATQA,GAJHxgB,EAAKkX,GAAGsW,aAGLjQ,EAAK1Z,EAAEnF,GAAK,EACLuM,EAAE,GAELA,EAAE,GAAKA,EAAEsS,EAAK1Z,EAAEnF,IAAM,GACfuM,EAAE,GAAK,EAEPA,EAAEsS,EAAK1Z,EAAGnF,KAAO,EARzBuM,EAAEpH,EAAEylB,MAclBjmB,KAAI,SAAW,SAASQ,EAAEnF,EAAEwY,GACzB,MAAKlX,GAAKkX,GAAGsW,aAGFpoB,KAAKL,IAAIK,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAK,IAAM,EAF9C7F,KAAKL,IAAIK,KAAKC,IAAI4F,EAAEpH,EAAEoH,EAAEpH,EAAE4d,IAAMxW,EAAEpH,EAAE4d,KAAM,KAKxDpe,KAAI,IAAM,SAASQ,EAAEnF,EAAEwY,GACpB,GAAIpU,GAAQ,CAOZ,OANI9C,GAAKkX,GAAGsW,eACR1qB,EAAQe,EAAEiE,OAAStE,EAAEpB,YAAcpC,EAAKlC,OACpCkC,EAAKlC,SAAWovB,IAChBpqB,EAAQ9C,EAAKkX,GAAGuW,mBAAqBjqB,EAAEpB,aAA+B,EAAlB8qB,KAGrDpqB,IAEVO,KAAI,QAAU,SAASQ,EAAEnF,EAAEwY,GACxB,GAAKlX,EAAKkX,GAAGsW,aAEN,CAEH,GAAI1qB,GAASU,EAAEpB,YAAc8qB,CAM7B,OAHIltB,GAAKlC,SAAWovB,IAChBpqB,EAAQU,EAAEpB,aAA+B,EAAlB8qB,IAEpBpqB,EATP,MAAOU,GAAEpB,cAcrB8rB,EACK7qB,KAAI,IAAM,SAASQ,EAAEnF,GAClB,MAAOmF,GAAEiE,OAAStE,EAAEpB,YAAcpC,EAAKlC,SAE1CuF,KAAI,QAAUG,EAAEpB,YAAcpC,EAAKlC,QACnCuF,KAAI,IAAM,SAASQ,EAAEnF,GAClB,MAAO6e,GAAK1Z,EAAEnF,GAAK,EACfuM,EAAE,GACEA,EAAE,GAAKA,EAAEsS,EAAK1Z,EAAEnF,IAAM,EAC1BuM,EAAE,GAAK,EACPA,EAAEsS,EAAK1Z,EAAEnF,KAAO,IAEvB2E,KAAI,SAAW,SAASQ,EAAEnF,GACvB,MAAO0G,MAAKL,IAAIK,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAI,IAAM,IAKhEkR,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,OAGHxV,EAAK,IAAMA,EAAK,GAAGuE,SACnBupB,EAAkB9tB,EAAK,GAAGuE,OAAOzG,UAKzCqQ,EAAYS,UAAS,sBAEdpQ,EA9WX,GAgBM+a,GAEA7D,EACAI,EACAF,EACAiB,EAUFsF,EAAIsF,EA/BJxgB,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTS,EAAItH,GAAG8H,MAAMsI,UACbrB,EAAI/O,GAAG8H,MAAMC,SACbgC,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZ+S,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9B+R,GAAU,GACVG,GAAW,EACXiQ,GAAU,EACVG,EAAc,OACdvlB,EAAQtM,EAAGG,MAAMuQ,eACjB+gB,GAAW,EACXY,EAAW,KAEX5pB,EAAW,IAKXypB,EAAe,GACf3E,EAAc,IACd7sB,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAAoB,aAQnI+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,GAG/C2pB,EAAkB,CAiYtB,OA/CAtvB,GAAMpC,SAAWA,EAEjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAU6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACjEtB,QAAU4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACnEb,GAAUmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC/D4G,GAAUN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC/DvC,QAAU6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IACzDuS,QAAUjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IACzDqR,SAAU/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACrEyR,SAAUnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACrEuR,QAAUjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACnEwS,QAAUlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACnE2Y,QAAUrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACnE+oB,SAAUziB,IAAK,WAAW,MAAOyiB,IAAWxiB,IAAK,SAASvG,GAAG+oB,EAAQ/oB,IACrEkpB,aAAc5iB,IAAK,WAAW,MAAO4iB,IAAe3iB,IAAK,SAASvG,GAAGkpB,EAAYlpB,IACjF8Y,UAAcxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IAC3EkV,UAAc5O,IAAK,WAAW,MAAO4O,IAAY3O,IAAK,SAASvG,GAAGkV,EAASlV,IAC3E4B,IAAc0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC/D8oB,UAAcxiB,IAAK,WAAW,MAAOwiB,IAAYviB,IAAK,SAASvG,GAAG8oB,EAAS9oB,IAC3EupB,cAAcjjB,IAAK,WAAW,MAAOijB,IAAgBhjB,IAAK,SAASvG,GAAGupB,EAAavpB,IACnF4kB,aAActe,IAAK,WAAW,MAAOse,IAAere,IAAK,SAASvG,GAAG4kB,EAAY5kB,IAGjFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,KAEtB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9B0pB,UAAYpjB,IAAK,WAAW,MAAOojB,IAAYnjB,IAAK,SAASvG,GACzD0pB,EAAW1pB,EAAI3I,EAAGG,MAAMsQ,SAAS9H,GAAK,SAI9C3I,EAAGG,MAAMqP,YAAY1M,GAEdA,GC5aX9C,EAAGI,OAAOqyB,cAAgB,WACtB,YAsHA,SAAS3vB,GAAMsB,GA8TX,MA7TAqO,GAAYW,QACZX,EAAYrS,OAAOsyB,GACfxV,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAoBlE,IAlBAzC,EAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAK0B,GAEfoE,EAAUiH,aACL1F,SAASA,GACTrH,KAAK0B,IAElBA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAItCP,EAAI4qB,EAAStsB,SACbmJ,EAAImjB,EAASxX,QAGb,IAAI5T,GAAOJ,EAAUK,UAAS,mCAAoCjD,MAAMA,IACpEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,sCAAsCD,OAAM,KAC5FyQ,EAAI7Q,EAAKH,OAAM,IA6BnB,IA3BAqW,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,mBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBAG5Bkc,GAGDC,EAAO1c,MAAMzB,EAAiBgtB,KAE9Bxa,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE4S,EAAEhR,OAAM,kBACHQ,KAAI,YAAc,aAAegrB,IAAiB,KAAQptB,EAAOE,IAAK,MAd3E0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAkBzC2b,EAEE,CACH,GAAIC,KACEzX,IAAKomB,EAAcC,SAAW,UAAWhV,SAAU6U,EAAShB,YAC5DllB,IAAKomB,EAAclB,SAAW,UAAW7T,UAAW6U,EAAShB,WAGnExN,GAAS9c,MAAMurB,KAAgBrmB,OAAK,OAAU,OAAQ,SACtD6L,EAAEhR,OAAM,oBACHyW,MAAMqG,GACNtc,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,KACjDrE,KAAK8iB,OAXT/L,GAAEhR,OAAM,oBAAqBI,UAAS,KAAMc,QAcjDf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KACnEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAI3D+sB,EACK7U,SAASvZ,EAAKmQ,IAAI,SAASrI,GAAU,MAAOA,GAAOyR,YACnDzW,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAG9C,IAAIF,GAAWxF,EAAEhR,OAAM,gBAClByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAK/C,IAHAF,EAASvc,KAAKsxB,GAGVxV,EAAW,CACXC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUlY,EAAiB,GAEhCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KACvD0R,EAAEhR,OAAM,iBACH/F,KAAK+b,EAEV,IAAI/D,GAASjB,EAAEhR,OAAM,qBAAsBI,UAAS,IAMpD,IAJA6R,EACK7R,UAAS,cACTkD,MAAK,UAAY,GAElBiP,EAAe,CACf,GAAIoZ,GAAe,SAAShrB,EAAEyH,GAC1B,MAAO,aAAezH,EAAI,IAAMyH,EAAI,KAGpCwjB,EAAY,EAAGC,EAAc,EAEjC5Z,GACK7R,UAAS,QACTI,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAC5B,MAAQsX,GAAa,EAAItX,EAAI,GAAK,EAAIuX,EAAYC,IAG1D,IAAIC,GAAsBzyB,GAAG+G,UAAS,mCAAoC,GAAGnF,MAC7E+V,GAAE5Q,UAAS,qCACNI,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO8vB,GAAa,EAAU,IAAN9vB,GAAWiwB,EAAsB,IAAM,EAAKD,EAAcD,KAI1FpM,GACAxO,EAAE5Q,UAAS,cACNnG,KAAKpB,EAAGG,MAAM+W,UAAWpU,EAAMqa,MAAMzW,aAG1CwsB,GACA9Z,EACK3M,OAAO,SAAStE,EAAEnF,GACf,MAAOA,GAAI0G,KAAKyiB,KAAK7nB,EAAK,GAAGuE,OAAOzG,QAAUuD,EAAiB,QAAU,IAE5E4B,UAAS,cACTkD,MAAK,UAAY,GAEvB6O,GACCF,EACK7R,UAAS,cACTI,KAAI,YAAc,UAAY2R,EAAe,SAC7C7O,MAAK,cAAgB6O,EAAe,EAAI,QAAU,OAE3DnB,EAAEhR,OAAM,iBAAkBI,UAAS,wBAC9BkD,MAAK,UAAY,GAGtB2S,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBACH/F,KAAKic,IAIVgH,IACAC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAKD,EAAOC,KAAMC,IAAIF,EAAOE,MACrCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAOxCR,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGV+P,EAASxjB,SAASiB,GAAE,cAAgB,SAASwG,EAAEnF,GAC3C,GAAKmF,EAAE0V,SAAP,CAOA,OANAoG,EAAeA,EAAaxP,IAAI,SAASwB,GAErC,MADAA,GAAE4H,UAAW,EACN5H,IAEX9N,EAAE0V,UAAW,EAEL1V,EAAEqE,KACN,IAAK,UACL,IAAKomB,GAAcC,QACfH,EAAShB,SAAQ,EACjB,MACJ,KAAK,UACL,IAAKkB,GAAclB,QACfgB,EAAShB,SAAQ,GAIzB3f,EAAM2f,QAAUgB,EAAShB,UACzBhxB,EAASoiB,YAAY/Q,GACrBjP,EAAMqR,YAIVzT,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAEjC+O,EAAM8L,SAAWjc,EAAEic,UAEE,mBAAdjc,GAAE8vB,UACTgB,EAAShB,QAAQ9vB,EAAE8vB,SACnB3f,EAAM2f,QAAU9vB,EAAE8vB,QAClBA,EAAU9vB,EAAE8vB,SAEhB5uB,EAAMqR,WAGNkQ,GACAC,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtD,GAAqBiC,QAAjBjC,EAAE0E,YAAN,CAEA,GAAI2e,GAAa7C,EAAY8C,EAAgBI,EAAQH,IACrD7gB,GACKmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAElB5T,QAAQ,SAASmC,EAAOpJ,GACrBof,EAAata,EAAEnB,SAASwM,QAAQvR,EAAE0E,YAElC,IAAIgJ,GAAQlD,EAAOvD,OAAOuZ,EACZve,UAAVyL,IAEJgW,EAAShW,EAAMxH,EACKjE,SAAhBohB,IAA2BA,EAAc3V,GACtBzL,SAAnBqhB,IAA8BA,EAAiBtjB,EAAE8C,QACrDygB,EAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOlJ,EAAMyM,IAAID,EAAO8S,GACxB9V,MAAOA,EAAMF,EAAOA,EAAOqY,aAC3BngB,KAAM8H,EAAOvD,OAAOuZ,QAIhCkC,EAAiBpkB,QACZoE,MACG0H,MAAOsZ,EACPlc,MAAOgZ,EACPhW,OAAQ+Y,MAGhBb,EAAiBre,gBAAgBif,MAGrCZ,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpD0iB,EAAiBpkB,QAAQgG,QAAO,OAIpCwsB,EAAShyB,SAASiB,GAAE,2BAA6B,SAASoc,GACtDA,EAAI/R,MAAQlJ,EAAMgF,IAAIiW,EAAIzZ,MAC1ByZ,EAAW,QACPvR,IAAKuR,EAAIzZ,KAAKkI,IACdR,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,OAEfpM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7BwsB,EAAShyB,SAASiB,GAAE,0BAA4B,SAASoc,GACrD7d,EAAQgG,QAAO,KAGnBwsB,EAAShyB,SAASiB,GAAE,2BAA6B,SAASoc,GACtD7d,SAKZuS,EAAYS,UAAS,2BACdpQ,EA9aX,GAwBMgF,GACAyH,EAzBFmjB,EAAW1yB,EAAGI,OAAOmxB,WACnBpU,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClBoM,EAAmBtkB,EAAGkE,uBACtB4f,EAAS9jB,EAAGI,OAAO0jB,SACnBI,EAAWlkB,EAAGI,OAAO0jB,SACrB5jB,EAAUF,EAAGI,OAAOF,UAGtBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMuQ,eACjBsT,GAAe,EACf4O,KACA/O,GAAa,EACb3G,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClBwV,GAAe,EACfxZ,GAAgB,EAChBiN,GAAa,EACbrN,EAAe,EAGfvH,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,aACrDiyB,EAAe,WAAa,MAAO3O,GAAe,IAAM,GACxDvb,EAAW,IACX4b,GAA0B,CAGhCtS,GAAM2f,SAAU,EAEhBgB,EAAShB,SAAQ,GACjBvU,EACK9E,OAAM,UACNW,YAAY,GACZD,YAAW,GACXP,WAAW,SAASrQ,GAAK,MAAOA,KAErCkV,EACKhF,OAAM,EAAqB,QAAU,QACrCG,WAAWhY,GAAGmM,OAAM,SAGzBzM,EACKuI,SAAS,GACTgD,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAEhC0I,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGrCshB,EAAiBpkB,QACZuL,eAAe,SAAStD,EAAGnF,GACxB,MAAY,OAALmF,EAAY,MAAQkV,EAAM7E,aAAarQ,EAAGnF,KAEpD0I,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGrCshB,EAAiBpkB,QACZuL,eAAe,SAAUtD,EAAGnF,GACzB,MAAY,OAALmF,EAAY,MAAQkV,EAAM7E,aAAarQ,EAAGnF,KAEpD0I,gBAAgB,SAAUvD,EAAGnF,GAC1B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGrCshB,EAAiBpkB,QACZuI,SAAS,GACTgD,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAEhC0I,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGrCkhB,EAASyB,aAAY,EAMrB,IAAIlT,GAAczS,EAAGG,MAAMsS,YAAY/R,GACnCgxB,GAAU,EAEVxO,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,WACzC6T,QAASA,KAKjBzO,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACUlO,SAAlBkO,EAAM2f,UACNA,EAAU3f,EAAM2f,SACC7tB,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MA4YhD,OAjEAF,GAAMpC,SAAWA,EACjBoC,EAAM4vB,SAAWA,EACjB5vB,EAAMghB,OAASA,EACfhhB,EAAMohB,SAAWA,EACjBphB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAMiP,MAAQA,EACdjP,EAAM5C,QAAUA,EAChB4C,EAAMwhB,iBAAmBA,EAEzBxhB,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9Eqb,cAAe/U,IAAK,WAAW,MAAO+U,IAAgB9U,IAAK,SAASvG,GAAGqb,EAAarb,IACpFiqB,eAAgB3jB,IAAK,WAAW,MAAO2jB,IAAiB1jB,IAAK,SAASvG,GAAGiqB,EAAcjqB,IACvFuU,WAAiBjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAChFyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9Ewa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrEuqB,cAAkBjkB,IAAK,WAAW,MAAOikB,IAAgBhkB,IAAK,SAASvG,GAAGuqB,EAAavqB,IACvF2Q,cAAkBrK,IAAK,WAAW,MAAOqK,IAAgBpK,IAAK,SAASvG,GAAG2Q,EAAa3Q,IACvF+Q,eAAmBzK,IAAK,WAAW,MAAOyK,IAAiBxK,IAAK,SAASvG,GAAG+Q,EAAc/Q,IAC1Fge,YAAe1X,IAAK,WAAW,MAAO0X,IAAczX,IAAK,SAASvG,GAAGge,IAAahe,IAGlFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX+pB,EAASjqB,SAASA,GAClB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,GACfgK,EAAYW,MAAM3K,KAEtB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,KAEjBoR,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQqF,EAAkB,QAAU,UAE9C2G,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,EAA0B1b,IAE9B0pB,UAAYpjB,IAAK,WAAW,MAAOyjB,GAASL,UAAYnjB,IAAK,SAASvG,GAClE+pB,EAASL,SAAS1pB,GAClBmb,EAAOxX,MAAM,SAASnE,EAAEnF,GAAI,MAAOxC,IAAG8xB,IAAG,QAASC,OAAW,IAAJvvB,GAASiN,iBAI1EjQ,EAAGG,MAAMkW,eAAevT,EAAO4vB,GAC/B1yB,EAAGG,MAAMqP,YAAY1M,GAEdA,GC7fX9C,EAAGI,OAAO+yB,mBAAqB,WAC3B,YA2CA,SAASrwB,GAAMsB,GAkQX,MAjQAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,GAEbwqB,IACAptB,EAAO9D,GAAGmoB,OAAOiJ,QACZviB,OAAM,QACNxG,OAAO,SAASV,GAAI,MAAOA,GAAEU,SAC7B0G,EAAEsS,GACNvd,IAGLA,EAAK2F,QAAQ,SAASmC,EAAQpJ,GAC1BoJ,EAAOvD,OAAOoB,QAAQ,SAASqF,GAC3BA,EAAMlD,OAASpJ,EACfsM,EAAM9C,IAAMJ,EAAOI,QAKvBklB,GACAptB,EAAK,GAAGuE,OAAO4L,IAAI,SAAStM,EAAEnF,GAC1B,GAAIgvB,GAAU,EAAGC,EAAU,CAC3B3tB,GAAKmQ,IAAI,SAAStM,GACd,GAAIuT,GAAIvT,EAAEU,OAAO7F,EACjB0Y,GAAEhM,KAAOhG,KAAKC,IAAI+R,EAAEnM,GAChBmM,EAAEnM,EAAE,GACJmM,EAAEkS,GAAKqE,EAAUvW,EAAEhM,KACnBuiB,GAAoBvW,EAAEhM,OAGtBgM,EAAEkS,GAAKoE,EACPA,GAAoBtW,EAAEhM,SAOtC,IAAIoW,GAAc9L,GAAWI,KACzB9V,EAAKmQ,IAAI,SAAStM,GACd,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,GAAI+iB,GAAI5d,EAAE4d,GAAI6H,GAAIzlB,EAAEylB,OAIjE9lB,GAAEnB,OAAOqT,GAAWxZ,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAAK,MAAOA,GAAEL,KAC/DzB,WAAW6T,IAAW,EAAGtU,GAAkBssB,GAEhD3iB,EAAE5I,OAAOyT,GAAW5Z,GAAGkf,OAAOlf,GAAGmf,MAAMmG,GAAYrR,IAAI,SAAStM,GAAK,MAAOupB,GAAWvpB,EAAEoH,EAAI,EAAIpH,EAAEylB,GAAKzlB,EAAEoH,EAAIpH,EAAEylB,GAAOzlB,EAAEoH,IAAK7N,OAAO4f,KAEjI0E,IAAe0L,EACfniB,EAAE9I,MAAM0U,IAAY5L,EAAE5I,SAAS,GAAK,EAAIysB,EAAe,EAAIztB,GAAkB4J,EAAE5I,SAAS,GAAK,EAAIysB,EAAe,KAEhH7jB,EAAE9I,MAAM0U,IAAW,EAAGxV,IAE1B8a,EAAKA,GAAM3Y,EACXie,EAAKA,GAAMvlB,GAAG8H,MAAMC,SAAS5B,OAAO4I,EAAE5I,UAAUF,OAAO8I,EAAE,GAAGA,EAAE,IAG9D,IAAIjI,GAAO9G,GAAG2G,OAAOpG,MAAMwG,UAAS,mCAAoCjD,MAAMA,IAC1EkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,sCAEnD6V,GADYhW,EAAUE,OAAM,QACnBF,EAAUE,OAAM,KACrBJ,GAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAEvE,IAAIwgB,GAAS3e,EAAKH,OAAM,cAAeI,UAAS,aAC3CjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,EAAEnF,GAAK,MAAOA,IAC3DijB,GAAOxe,QAAQC,OAAM,KAChB+C,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,MAC3Bwb,EAAO7d,OAAOoK,gBAAgBC,EAAa,mCACtChI,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,MACtBpC,SACL4d,EACKte,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,sBAAwBA,IAC7D0H,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QACxChR,MAAK,OAAS,SAAStC,EAAEnF,GAAI,MAAOsJ,GAAMnE,EAAGnF,KAC7CyH,MAAK,SAAW,SAAStC,EAAEnF,GAAI,MAAOsJ,GAAMnE,EAAGnF,KACpDijB,EAAOzT,gBAAgBC,EAAa,8BAC/BhI,MAAK,iBAAmB,GACxBA,MAAK,eAAiB8iB,EAE3B,IAAIrH,GAAOD,EAAO1e,UAAS,YACtBjD,KAAK,SAAS6D,GAAK,MAAOA,GAAEU,QACjCqd,GAAK9d,OAAOC,QAEZ,IAAI8d,GAAYD,EAAKze,QAAQC,OAAM,KAC9BC,KAAI,YAAc,SAASQ,EAAEnF,EAAEwY,GAC5B,MAAO,aAAeuK,EAAG2L,EAAUvpB,EAAE4d,GAAK,GAAK,KAAO2L,EAAU,EAAKlW,EAAI1T,EAAEpB,YAAcpC,EAAKlC,OAAW0F,EAAEmS,EAAK9R,EAAEnF,KAAO,KAGjImjB,GAAUze,OAAM,QACXC,KAAI,QAAU,GACdA,KAAI,SAAWG,EAAEpB,aAAegrB,EAAU,EAAIptB,EAAKlC,SAExD8jB,EACKvkB,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsb,kBACL1X,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASsF,iBACL1B,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAASsF,iBACL1B,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBACLvC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAGnC9I,GAAE,QAAU,SAASwG,EAAEnF,GACpB,GAAIojB,GAAUrlB,IACdL,GAASqG,cACLzC,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5B1F,MAAOvE,GAAGuE,MACVqhB,QAASA,IAEb5lB,GAAGuE,MAAMshB,oBAEZ1kB,GAAE,WAAa,SAASwG,EAAEnF,GACvBtC,EAAS4lB,iBACLhiB,KAAM6D,EACNiB,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,UAEhCjK,GAAGuE,MAAMshB,oBAGbgN,EAAQ/uB,EAAK,GAAG,KAChB6hB,EAAUze,OAAM,YAEhBwe,EAAK/e,OAAM,YACNQ,KAAI,OAAS,QACbA,KAAI,SAAW,SAASQ,EAAEnF,GACvB,GAAIswB,GAAOD,EAAQlrB,EAAEnF,GACfuwB,EAAM,GAAMzrB,EAAEpB,aAA6C,GAA7BgrB,EAAU,EAAIptB,EAAKlC,QACvDkxB,GAAOA,EAAKlxB,OAASkxB,IAAS5pB,KAAKC,IAAI2pB,GAAO5pB,KAAKC,IAAI2pB,IACvDA,EAAOA,EAAK7e,IAAI,SAAS7S,GAAK,MAAO2N,GAAE3N,GAAK2N,EAAE,IAC9C,IAAIO,KAAMwjB,EAAK,IAAIC,IAAOD,EAAK,GAAGC,IAAOD,EAAK,GAAG,IAAKA,EAAK,GAAG,IAAKA,EAAK,IAAIC,IAAOD,EAAK,GAAGC,GAC3F,OAAOzjB,GAAE2E,IAAI,SAAU+e,GAAQ,MAAOA,GAAK5b,KAAI,OAASA,KAAI,OAE/DjQ,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,GAAIuwB,GAAMzrB,EAAEpB,aAA6C,GAA7BgrB,EAAU,EAAIptB,EAAKlC,QAC/C,OAAO,cAAgByf,EAAK1Z,EAAEnF,GAAK,EAAI,EAAIuM,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAM,KAAOgkB,EAAM,OAI3FpN,EAAUze,OAAM,QAEZse,IAAe0L,GACfxL,EAAK/e,OAAM,QACNQ,KAAI,cAAgB,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,MAAQ,UACnE2E,KAAI,IAAMG,EAAEpB,aAA6B,EAAdpC,EAAKlC,SAChCuF,KAAI,KAAO,SACXuK,KAAK,SAAS/J,EAAEnF,GACb,GAAIgT,GAAIuQ,EAAY1E,EAAK1Z,EAAEnF,IACrBywB,EAAOJ,EAAQlrB,EAAEnF,EACvB,OAAaa,UAAT4vB,EACOzd,EACNyd,EAAKrxB,OAEH4T,EAAI,IAAMuQ,EAAY7c,KAAKC,IAAI8pB,EAAK,KAAO,IAAMlN,EAAY7c,KAAKC,IAAI8pB,EAAK,KADvEzd,EAAI,IAAMuQ,EAAY7c,KAAKC,IAAI8pB,MAGlDvN,EAAK1T,gBAAgBC,EAAa,4BAC7BtL,OAAM,QACNQ,KAAI,IAAM,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,GAAKuM,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,GAAK,KAEjF2W,EAAK3e,UAAS,QAAS2K,KAAI,IAG3BwhB,IAAkBhC,GAClBvL,EAAUze,OAAM,QAASgD,QAAO,gBAAgB,GAChDwb,EAAK/e,OAAM,qBACNQ,KAAI,cAAgB,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,QAAU,QACrE2E,KAAI,IAAMG,EAAEpB,aAA6B,EAAdpC,EAAKlC,SAChCuF,KAAI,KAAO,SACXuK,KAAK,SAAS/J,EAAEnF,GAAK,MAAOiX,GAAK9R,EAAEnF,KACxCkjB,EAAK1T,gBAAgBC,EAAa,4BAC7BtL,OAAM,qBACNQ,KAAI,IAAM,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAIuM,EAAE,GAAKA,EAAEsS,EAAK1Z,EAAEnF,IAAM,EAAI,MAGhFkjB,EAAK3e,UAAS,qBAAsB2K,KAAI,IAG5CgU,EACKve,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,GAAK,EAAI,kBAAoB,oBAE1EqvB,IACKxU,IAAUA,EAAWvZ,EAAKmQ,IAAI,WAAa,OAAO,KACvDyR,EACKzb,MAAK,OAAS,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOhb,IAAG8xB,IAAID,EAASlqB,EAAEnF,IAAIuvB,OAAS1U,EAASpJ,IAAI,SAAStM,EAAEnF,GAAK,MAAOA,KAAKyJ,OAAO,SAAStE,EAAEnF,GAAI,OAAQ6a,EAAS7a,KAAOwY,IAAOvL,aACpKxF,MAAK,SAAW,SAAStC,EAAEnF,EAAEwY,GAAK,MAAOhb,IAAG8xB,IAAID,EAASlqB,EAAEnF,IAAIuvB,OAAS1U,EAASpJ,IAAI,SAAStM,EAAEnF,GAAK,MAAOA,KAAKyJ,OAAO,SAAStE,EAAEnF,GAAI,OAAQ6a,EAAS7a,KAAOwY,IAAOvL,cAG3KyhB,EACAxL,EAAK1T,gBAAgBC,EAAa,4BAC7B9K,KAAI,YAAc,SAASQ,EAAEnF,GAC1B,MAAO,aAAeuM,EAAEpH,EAAEylB,IAAM,IAAM9lB,EAAEmS,EAAK9R,EAAEnF,IAAM,MAExDmE,OAAM,QACNQ,KAAI,QAAU,SAASQ,EAAEnF,GACtB,MAAO0G,MAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,GAAKmF,EAAE4d,IAAMxW,EAAEpH,EAAE4d,MAAQ,IAErDpe,KAAI,SAAWG,EAAEpB,aAEtBwf,EAAK1T,gBAAgBC,EAAa,4BAC7B9K,KAAI,YAAc,SAASQ,EAAEnF,GAE1B,MAAO,aACcuM,EAAhBsS,EAAK1Z,EAAEnF,GAAK,EAAM6e,EAAK1Z,EAAEnF,GAAQ,GAChC,KACDmF,EAAEiE,OAAStE,EAAEpB,YAAcpC,EAAKlC,OAE7B0F,EAAEmS,EAAK9R,EAAEnF,KACX,MAETmE,OAAM,QACNQ,KAAI,SAAWG,EAAEpB,YAAcpC,EAAKlC,QACpCuF,KAAI,QAAU,SAASQ,EAAEnF,GACtB,MAAO0G,MAAKL,IAAIK,KAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,IAAI,IAAM,IAIhEkR,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,SAIXrH,EAAYS,UAAS,gCACdpQ,EAvSX,GAaM+a,GASA7D,EACAI,EACAF,EACAiB,EASFsF,EAAIsF,EAlCJxgB,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZY,EAAItH,GAAG8H,MAAMsI,UACbrB,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9B8jB,EAAU,SAASlrB,GAAK,MAAOA,GAAEwrB,MACjCrS,GAAU,GACVhV,EAAQtM,EAAGG,MAAMuQ,eACjB2hB,EAAW,KAEXX,GAAU,EACV1L,GAAa,EACb0N,GAAgB,EAChBN,EAAe,GACflB,EAAe,GACf3E,EAAc,IACdhH,EAAc/lB,GAAGmM,OAAM,QAMvBlE,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAAoB,aAQrI+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA6TjD,OAlDA3F,GAAMpC,SAAWA,EAEjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAU6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACjEtB,QAAU4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACnEb,GAAUmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC/D4G,GAAUN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC/DgrB,MAAa1kB,IAAK,WAAW,MAAOokB,IAAWnkB,IAAK,SAASvG,GAAG0qB,EAAQ1qB,IACxEvC,QAAU6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IACzDuS,QAAUjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IACzDqR,SAAU/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACrEyR,SAAUnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACrEuR,QAAUjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACnEwS,QAAUlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACnE2Y,QAAUrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACnE+oB,SAAUziB,IAAK,WAAW,MAAOyiB,IAAWxiB,IAAK,SAASvG,GAAG+oB,EAAQ/oB,IACrEqd,YAAa/W,IAAK,WAAW,MAAO+W,IAAc9W,IAAK,SAASvG,GAAGqd,EAAWrd,IAG9EkV,UAAe5O,IAAK,WAAW,MAAO4O,IAAY3O,IAAK,SAASvG,GAAGkV,EAASlV,IAC5E4B,IAAe0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAChE4d,aAAetX,IAAK,WAAW,MAAOsX,IAAerX,IAAK,SAASvG,GAAG4d,EAAY5d,IAClFyqB,cAAenkB,IAAK,WAAW,MAAOmkB,IAAgBlkB,IAAK,SAASvG,GAAGyqB,EAAazqB,IACpFupB,cAAejjB,IAAK,WAAW,MAAOijB,IAAgBhjB,IAAK,SAASvG,GAAGupB,EAAavpB,IACpF4kB,aAAete,IAAK,WAAW,MAAOse,IAAere,IAAK,SAASvG,GAAG4kB,EAAY5kB,IAGlFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,KAEtB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9B0pB,UAAYpjB,IAAK,WAAW,MAAOojB,IAAYnjB,IAAK,SAASvG,GACzD0pB,EAAW1pB,EAAI3I,EAAGG,MAAMsQ,SAAS9H,GAAK,SAI9C3I,EAAGG,MAAMqP,YAAY1M,GAEdA,GCvWX9C,EAAGI,OAAOwzB,wBAA0B,WAChC,YAuFA,SAAS9wB,GAAMsB,GA6MX,MA5MAqO,GAAYW,QACZX,EAAYrS,OAAOsyB,GACfxV,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EACjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAelE,IAbAzC,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAC3EA,EAAMoE,UAAYnG,KAElB2wB,EAAUgB,EAAShB,UAEnB3f,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAItCP,EAAI4qB,EAAStsB,SACbmJ,EAAImjB,EAASxX,SAASqC,OAAM,EAG5B,IAAIjW,GAAOJ,EAAUK,UAAS,wCAAyCjD,MAAMA,IACzEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,2CAA2CD,OAAM,KACjGyQ,EAAI7Q,EAAKH,OAAM,IA8BnB,IA5BAqW,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAC5BD,OAAM,KAAMC,KAAI,QAAU,eAC1BD,OAAM,QACX8V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,mBAG5Bkc,GAGDC,EAAO1c,MAAMzB,EAAiBgtB,KAE9Bxa,EAAEhR,OAAM,kBACHyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE4S,EAAEhR,OAAM,kBACHQ,KAAI,YAAc,aAAegrB,IAAiB,KAAQptB,EAAOE,IAAK,MAd3E0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAkBzC2b,EAEE,CACH,GAAIC,KACEzX,IAAKomB,EAAcC,SAAW,UAAWhV,SAAU6U,EAAShB,YAC5DllB,IAAKomB,EAAclB,SAAW,UAAW7T,UAAW6U,EAAShB,WAGnExN,GAAS9c,MAAMurB,KAAgBrmB,OAAK,OAAU,OAAQ,SACtD6L,EAAEhR,OAAM,oBACHyW,MAAMqG,GACNtc,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,KACjDrE,KAAK8iB,OAXT/L,GAAEhR,OAAM,oBAAqBI,UAAS,KAAMc,QAcjDf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAGvEitB,EACK7U,SAASvZ,EAAKmQ,IAAI,SAASrI,GAAU,MAAOA,GAAOyR,YACnDzW,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAE9C,IAAIF,GAAWxF,EAAEhR,OAAM,gBAClByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAK/C,IAHAF,EAASxP,aAAa/M,KAAKsxB,GAGvBxV,EAAW,CACXC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAUnY,EAAgB,GAE/BwS,EAAEhR,OAAM,iBAAkB/F,KAAK+b,EAE/B,IAAI/D,GAASjB,EAAEhR,OAAM,iBAAkBI,UAAS,IAEhD6R,GACK7R,UAAS,cAGd6V,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAWlY,EAAiB,GAEjCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB/B,EAAkB,KAC1DuS,EAAEhR,OAAM,iBAAkB/F,KAAKic,IAInClF,EAAEhR,OAAM,qBACHQ,KAAI,KAAO4H,EAAE,IACb5H,KAAI,KAAO4H,EAAE,IACb5H,KAAI,KAAO,GACXA,KAAI,MAAQ/B,GAOjBke,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGV+P,EAASxjB,SAASiB,GAAE,cAAgB,SAASwG,EAAEnF,GAC3C,GAAKmF,EAAE0V,SAAP,CAOA,OANAoG,EAAeA,EAAaxP,IAAI,SAASwB,GAErC,MADAA,GAAE4H,UAAW,EACN5H,IAEX9N,EAAE0V,UAAW,EAEL1V,EAAEqE,KACN,IAAK,UACL,IAAKomB,GAAcC,QACfH,EAAShB,SAAQ,EACjB,MACJ,KAAK,UACL,IAAKkB,GAAclB,QACfgB,EAAShB,SAAQ,GAIzB3f,EAAM2f,QAAUgB,EAAShB,UACzBhxB,EAASoiB,YAAY/Q,GACrB2f,EAAUgB,EAAShB,UAEnB5uB,EAAMqR,YAIVzT,EAASiB,GAAE,cAAgB,SAASC,GAEN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAGjC+O,EAAM8L,SAAWjc,EAAEic,UAGE,mBAAdjc,GAAE8vB,UACTgB,EAAShB,QAAQ9vB,EAAE8vB,SACnB3f,EAAM2f,QAAU9vB,EAAE8vB,QAClBA,EAAU9vB,EAAE8vB,SAGhB5uB,EAAMqR,aAGd1B,EAAYS,UAAS,uCACdpQ,EA9RX,GAmBMgF,GACAyH,EApBFmjB,EAAW1yB,EAAGI,OAAO+yB,qBACnBhW,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClB4L,EAAS9jB,EAAGI,OAAO0jB,SAASzc,OAAO,IACnC6c,EAAWlkB,EAAGI,OAAO0jB,SAASzc,OAAO,IACrCnH,EAAUF,EAAGI,OAAOF,UAGtBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMuQ,eACjBsT,GAAe,EACf4O,KACA/O,GAAa,EACb3G,GAAY,EACZE,GAAY,EACZsU,GAAU,EAGV3f,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,cAAgB,cAAY,aAClDiyB,EAAe,WAAa,MAAO3O,GAAe,IAAM,GACxDvb,EAAW,GAGjBsJ,GAAM2f,SAAU,EAEhBgB,EAAShB,QAAQA,GAEjBvU,EACK9E,OAAM,QACNW,YAAY,GACZD,YAAW,GACXP,WAAW,SAASrQ,GAAK,MAAOA,KAErCkV,EACKhF,OAAM,UACNG,WAAWhY,GAAGmM,OAAM,SAGzBzM,EACKuI,SAAS,GACTgD,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAEhC0I,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAGrCkhB,EAASyB,aAAY,EAMrB,IAAIzC,GAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,WACzC6T,QAASA,KAKjBzO,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACUlO,SAAlBkO,EAAM2f,UACNA,EAAU3f,EAAM2f,SACC7tB,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,OAK5CyP,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EAkSjD,OA5EAiqB,GAAShyB,SAASiB,GAAE,2BAA6B,SAASoc,GACtDA,EAAI/R,MAAQlJ,EAAMgF,IAAIiW,EAAIzZ,MAC1ByZ,EAAW,QACPvR,IAAKuR,EAAIzZ,KAAKkI,IACdR,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,OAEfpM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7BwsB,EAAShyB,SAASiB,GAAE,0BAA4B,SAASoc,GACrD7d,EAAQgG,QAAO,KAGnBwsB,EAAShyB,SAASiB,GAAE,2BAA6B,SAASoc,GACtD7d,MAQJ4C,EAAMpC,SAAWA,EACjBoC,EAAM4vB,SAAWA,EACjB5vB,EAAMghB,OAASA,EACfhhB,EAAMohB,SAAWA,EACjBphB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAMiP,MAAQA,EACdjP,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9Eqb,cAAe/U,IAAK,WAAW,MAAO+U,IAAgB9U,IAAK,SAASvG,GAAGqb,EAAarb,IACpFiqB,eAAgB3jB,IAAK,WAAW,MAAO2jB,IAAiB1jB,IAAK,SAASvG,GAAGiqB,EAAcjqB,IACvFuU,WAAiBjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAChFyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9Ewa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAGrEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBiqB,EAASjqB,SAASA,GAClB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,KAEjB+lB,UAAYpjB,IAAK,WAAW,MAAOyjB,GAASL,UAAYnjB,IAAK,SAASvG,GAClE+pB,EAASL,SAAS1pB,GAClBmb,EAAOxX,MAAM,SAASnE,EAAEnF,GAAI,MAAOxC,IAAG8xB,IAAG,QAASC,OAAW,IAAJvvB,GAASiN,iBAI1EjQ,EAAGG,MAAMkW,eAAevT,EAAO4vB,GAC/B1yB,EAAGG,MAAMqP,YAAY1M,GAEdA,GCzXX9C,EAAGI,OAAOyzB,WAAa,WACnB,YAuDA,SAAS/wB,GAAMsB,GAmbX,MAlbAA,GAAUC,KAAK,SAASC,GA4OpB,QAASwvB,GAAe/V,GACpB,GAAIgW,GAAwC,IAAhCzvB,EAAKyZ,EAAI0G,aAAapH,MAAc2W,EAASC,CACzDlW,GAAI/R,MAAQ+R,EAAIzO,MAAMxH,EACtBiW,EAAI3R,QACAJ,MAAO+R,EAAIzO,MAAMC,EACjBjD,MAAOyR,EAAIzO,MAAMhD,MACjBE,IAAKuR,EAAI3R,OAAOI,KAEpBtM,EACKuI,SAAS,GACTiD,gBAAgB,SAASvD,EAAGnF,GAC5B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAE7ByI,eAAe,SAAStD,EAAGnF,GACxB,MAAO+wB,GAAMvb,aAAarQ,EAAGnF,KAEhCsB,KAAKyZ,GACL7X,QAAO,GAGhB,QAASguB,GAAkBnW,GACvB,GAAIgW,GAAwC,IAAhCzvB,EAAKyZ,EAAI0G,aAAapH,MAAc2W,EAASC,CACzDlW,GAAI/R,MAAQ+R,EAAIzO,MAAMxH,EACtBiW,EAAI3R,QACAJ,MAAO+R,EAAIzO,MAAMC,EACjBjD,MAAOyR,EAAIzO,MAAMhD,MACjBE,IAAKuR,EAAI3R,OAAOI,KAEpBtM,EACKuI,SAAS,KACTiD,gBAAgB,SAASvD,EAAGnF,GAC5B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAE7ByI,eAAe,SAAStD,EAAGnF,GACxB,MAAO+wB,GAAMvb,aAAarQ,EAAGnF,KAEhCsB,KAAKyZ,GACL7X,QAAO,GAGhB,QAASiuB,GAAgBpW,GACrB,GAAIgW,GAAwC,IAAhCzvB,EAAKyZ,EAAI0G,aAAapH,MAAc2W,EAASC,CACzDlW,GAAIzO,MAAQ,EAAK8kB,EAAOtsB,IAAIiW,EAAIzO,OAChCyO,EAAIzO,MAAQ,EAAK8kB,EAAO7kB,IAAIwO,EAAIzO,OAChCpP,EACKuI,SAAS,GACTiD,gBAAgB,SAASvD,EAAGnF,GAC5B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAE7ByI,eAAe,SAAStD,EAAGnF,GACxB,MAAO+wB,GAAMvb,aAAarQ,EAAGnF,KAEhCsB,KAAKyZ,GACL7X,QAAO,GAGhB,QAASmuB,GAActW,GACnB,GAAIgW,GAAwC,IAAhCzvB,EAAKyZ,EAAIzZ,KAAK8H,QAAQiR,MAAc2W,EAASC,CAEzDlW,GAAI/R,MAAQsoB,EAAMxsB,IAAIiW,EAAIzZ,MAC1ByZ,EAAW,QACP/R,MAAOsoB,EAAM/kB,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,MACXE,IAAKuR,EAAIzZ,KAAKkI,KAElBtM,EACKuI,SAAS,GACTiD,gBAAgB,SAASvD,EAAGnF,GAC5B,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAE7ByI,eAAe,SAAStD,EAAGnF,GACxB,MAAO+wB,GAAMvb,aAAarQ,EAAGnF,KAEhCsB,KAAKyZ,GACL7X,QAAO,GAKhB,QAASoc,KACP,IAAI,GAAItf,GAAE,EAAG8c,EAAGzf,EAAO+B,OAAY0d,EAAJ9c,EAAQA,IAAC,CACtC,GAAIF,GAAQzC,EAAO2C,EACnB,KACEF,EAAMwf,kBACN,MAAM1gB,MAIZ,QAASugB,GAAeoS,EAAYnS,EAAY/D,GAC9C,IAAI,GAAIrb,GAAE,EAAG8c,EAAGzf,EAAO+B,OAAY0d,EAAJ9c,EAAQA,IAAC,CACtC,GAAIF,GAAQzC,EAAO2C,EACnB,KACEF,EAAMqf,eAAeoS,EAAYnS,EAAY/D,GAC7C,MAAMzc,MAxUZ,GAAIsF,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,GAEjBpE,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa/M,KAAK0B,IACxDA,EAAMoE,UAAYnG,IAElB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,GAE9DivB,EAAalwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,QAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,QACnEoX,EAAanwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,QAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,QACnEqX,EAAgBpwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,WAAVA,EAAEzC,MAAgC,GAAXyC,EAAEkV,QACzEsX,EAAgBrwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,WAAVA,EAAEzC,MAAgC,GAAXyC,EAAEkV,QACzEuX,EAAatwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,OAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,QACnEwX,EAAavwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,OAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,QACnEyX,EAAaxwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,QAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,QACnE0X,EAAazwB,EAAKmI,OAAO,SAAStE,GAAI,MAAiB,QAAVA,EAAEzC,MAA6B,GAAXyC,EAAEkV,OAGvE,MAAK/Y,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAGtC,IAAIooB,GAAUnsB,EAAKmI,OAAO,SAAStE,GAAI,OAAQA,EAAE0V,UAAuB,GAAX1V,EAAEkV,QAC1D5I,IAAI,SAAStM,GACV,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,GAAIoH,EAAGsS,EAAK1Z,QAIrCuoB,EAAUpsB,EAAKmI,OAAO,SAAStE,GAAI,OAAQA,EAAE0V,UAAuB,GAAX1V,EAAEkV,QAC1D5I,IAAI,SAAStM,GACV,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,GAAIoH,EAAGsS,EAAK1Z,OAIzCL,GAAKnB,OAAOnG,GAAGkf,OAAOlf,GAAGmf,MAAM8Q,EAAQ/uB,OAAOgvB,IAAW,SAASvoB,GAAK,MAAOA,GAAEL,KAC3ErB,OAAO,EAAGd,GAEf,IAAI2B,GAAOJ,EAAUK,UAAS,qBAAsBjD,MAAMA,IACtDkZ,GAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,wBAAwBD,OAAM,IAElF8V,IAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,cACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,cACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,cACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,cACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,cACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAEjC,IAAIwQ,IAAI7Q,EAAKH,OAAM,KAEf6tB,GAAc1wB,EAAKmQ,IAAI,SAAStM,EAAEnF,GAClC,MAAOsB,GAAKtB,GAAGsJ,OAASA,EAAMnE,EAAGnF,IAIrC,IAAK6gB,EAEE,CACH,GAAI6H,IAAc5H,EAAOkH,QAAUrlB,EAAiB,EAAIA,EACpDirB,GAAkB9M,EAAOkH,QAAUU,GAAc,CAErD5H,GAAO1c,MAAMskB,IACb5H,EAAOxX,MAAM0oB,IAEb7c,GAAEhR,OAAM,eACHyW,MAAMtZ,EAAKmQ,IAAI,SAASrI,GAGrB,MAFAA,GAAOykB,YAAqChtB,SAAvBuI,EAAOykB,YAA4BzkB,EAAOI,IAAMJ,EAAOykB,YAC5EzkB,EAAOI,IAAMJ,EAAOykB,aAA+B,GAAhBzkB,EAAOiR,MAAa,GAAKyT,GACrD1kB,KAEVhL,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE4S,GAAEhR,OAAM,eACHQ,KAAI,YAAc,aAAeipB,GAAkB,KAAQrrB,EAAOE,IAAK,SAtB5E0S,IAAEhR,OAAM,eAAgBI,UAAS,KAAMc,QAyB3C4sB,GACK7tB,MAAMzB,GACN0B,OAAOzB,GACPqiB,YAAYA,GACZ3b,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,QAAhB/Y,EAAKtB,GAAG0C,QACxG4qB,EACKlpB,MAAMzB,GACN0B,OAAOzB,GACPqiB,YAAYA,GACZ3b,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,QAAhB/Y,EAAKtB,GAAG0C,QACxGwvB,EACK9tB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,WAAhB/Y,EAAKtB,GAAG0C,QACxGyvB,EACK/tB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,WAAhB/Y,EAAKtB,GAAG0C,QACxG4uB,EACKltB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,OAAhB/Y,EAAKtB,GAAG0C,QACxG6qB,EACKnpB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,OAAhB/Y,EAAKtB,GAAG0C,QACxG0uB,EACKhtB,MAAMzB,GACN0B,OAAOzB,GACPqiB,YAAYA,GACZ3b,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,QAAhB/Y,EAAKtB,GAAG0C,QACxG0vB,EACKhuB,MAAMzB,GACN0B,OAAOzB,GACPqiB,YAAYA,GACZ3b,MAAM0oB,GAAYvoB,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,UAA6B,GAAjBvZ,EAAKtB,GAAGqa,OAA8B,QAAhB/Y,EAAKtB,GAAG0C,QAExGyS,GAAExQ,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAEpE,IAAI4vB,IAAald,GAAEhR,OAAM,eACpByW,MAAM4W,EAAW/nB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAC/CyX,GAAgBnd,GAAEhR,OAAM,kBACvByW,MAAM8W,EAAcjoB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAClD0X,GAAYpd,GAAEhR,OAAM,cACnByW,MAAMgX,EAAUnoB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAC9C2X,GAAard,GAAEhR,OAAM,eACpByW,MAAMkX,EAAWroB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAC/CoT,GAAa9Y,GAAEhR,OAAM,eACpByW,MAAM6W,EAAWhoB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAC/C4X,GAAgBtd,GAAEhR,OAAM,kBACvByW,MAAM+W,EAAcloB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAClDmT,GAAY7Y,GAAEhR,OAAM,cACnByW,MAAMiX,EAAUpoB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAC9C6X,GAAavd,GAAEhR,OAAM,eACpByW,MAAMmX,EAAWtoB,OAAO,SAAStE,GAAG,OAAQA,EAAE0V,YAE/C8X,GAAcb,EAAW1yB,OAAS0yB,EAAWrgB,IAAI,SAAS3E,GAAG,MAAOA,GAAEjH,SAASgjB,OAAO,SAAS/b,EAAEuO,GACjG,MAAOvO,GAAE2E,IAAI,SAASmhB,EAAK5yB,GAAG,OAAQ8E,EAAG8tB,EAAK9tB,EAAGyH,EAAGqmB,EAAKrmB,EAAI8O,EAAErb,GAAGuM,OACnE7N,SAASoG,EAAE,EAAGyH,EAAE,QACfsmB,GAAcd,EAAW3yB,OAAS2yB,EAAWtgB,IAAI,SAAS3E,GAAG,MAAOA,GAAEjH,SAASgjB,OAAO,SAAS/b,EAAEuO,GACjG,MAAOvO,GAAE2E,IAAI,SAASmhB,EAAK5yB,GAAG,OAAQ8E,EAAG8tB,EAAK9tB,EAAGyH,EAAGqmB,EAAKrmB,EAAI8O,EAAErb,GAAGuM,OACnE7N,SAASoG,EAAE,EAAGyH,EAAE,OAEnBumB,GAASnvB,OAAOovB,GAAYv1B,GAAGkf,OAAOlf,GAAGmf,MAAM8Q,GAAS/uB,OAAOi0B,IAAc,SAASxtB,GAAK,MAAOA,GAAEoH,KAC/F9I,OAAO,EAAGb,IAEfowB,EAASrvB,OAAOsvB,GAAYz1B,GAAGkf,OAAOlf,GAAGmf,MAAM+Q,GAAShvB,OAAOm0B,IAAc,SAAS1tB,GAAK,MAAOA,GAAEoH,KAC/F9I,OAAO,EAAGb,IAEfqvB,EAAO7a,QAAQ0b,EAAQnvB,UACvBuuB,EAAU9a,QAAQ0b,EAAQnvB,UAC1B2tB,EAAMla,QAAQ0b,EAAQnvB,UACtBytB,EAAOha,QAAQ0b,EAAQnvB,UAEvB2pB,EAAOlW,QAAQ4b,EAAQrvB,UACvBwuB,EAAU/a,QAAQ4b,EAAQrvB,UAC1B4pB,EAAMnW,QAAQ4b,EAAQrvB,UACtByuB,EAAOhb,QAAQ4b,EAAQrvB,UAEpBmuB,EAAW1yB,QAAQ5B,GAAG2N,WAAWqnB,IAAYp0B,KAAKgzB,GAClDW,EAAW3yB,QAAQ5B,GAAG2N,WAAWunB,IAAYt0B,KAAKg0B,GAElDR,EAAUxyB,QAAQ5B,GAAG2N,WAAWonB,IAAWn0B,KAAKkzB,GAChDO,EAAUzyB,QAAQ5B,GAAG2N,WAAW6iB,IAAW5vB,KAAKmvB,GAEhDiE,EAAWpyB,QAAQ5B,GAAG2N,WAAWknB,IAAYj0B,KAAK6zB;A5BxOjE,A4ByOeR,E5BzOb,A4ByOwBryB,C5BzOvB,GAAG,CAAC,CAAC,CAAC,C4ByOyB5B,GAAG2N,C5BzOvB,CAAC,CAAC,EAAE,GAAG,CAAC,E4ByO0B8iB,G5BzOrB,CAAC,A4ByOgC7vB,I5BzO5B,CAAC,A4ByOgCkvB,E5BzO9B,CAAC,A4B2OrBoE,EAActyB,Q5B3OiB,A4B2OT5B,GAAG2N,WAAWmnB,IAAel0B,KAAK8zB,GACxDP,EAAcvyB,QAAQ5B,GAAG2N,WAAWsnB,IAAer0B,KAAK+zB,GAE3DhY,EACK4H,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAUlY,EAAiB,GAEhCuS,GAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB/B,EAAkB,KAC1DpF,GAAG2N,WAAWgK,GAAEhR,OAAM,kBACjB/F,KAAK+b,GAEV8W,EACKlP,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAGhCnF,GAAG2N,WAAWgK,GAAEhR,OAAM,mBACjB/F,KAAK6yB,GAEVD,EACKjP,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCnF,GAAG2N,WAAWgK,GAAEhR,OAAM,mBACjB/F,KAAK4yB,GAEV7b,GAAEhR,OAAM,kBACHuD,QAAO,cAAgB+lB,EAAQruB,QAAS,GAAQ,GAChDuF,KAAI,YAAc,aAAeG,EAAErB,QAAQ,GAAK,OAErD0R,GAAEhR,OAAM,kBACHuD,QAAO,cAAgBgmB,EAAQtuB,QAAS,GAAQ,GAChDuF,KAAI,YAAc,aAAeG,EAAErB,QAAQ,GAAK,OAErDqd,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvCliB,EAAMqR,WAGPkQ,IACCC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAKD,EAAOC,KAAMC,IAAIF,EAAOE,MACrCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAwGrCD,GACCC,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtD0gB,GACA,IAAI2C,GAAa7C,EAAY8C,EAAgBC,IAC7C7gB,GACCmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAElB5T,QAAQ,SAASmC,EAAOpJ,GACrB,GAAI0c,GAAS5X,EAAEnB,SACX6nB,EAAgBpiB,EAAOvD,OAAO4D,OAAO,SAAStE,EAAEnF,GAChD,MAAOF,GAAMgF,IAAIK,EAAEnF,IAAM0c,EAAO,IAAM5c,EAAMgF,IAAIK,EAAEnF,IAAM0c,EAAO,IAGnE0C,GAAapiB,EAAG4I,kBAAkB4lB,EAAe5sB,EAAE0E,YAAaxD,EAAMgF,IACtE,IAAIwH,GAAQkf,EAAcpM,GACtBqM,EAAc3rB,EAAMyM,IAAID,EAAO8S,EACf,QAAhBqM,GACAtM,EAAenf,EAAGof,GAAY,GAEpBve,SAAVyL,IACgBzL,SAAhBohB,IAA2BA,EAAc3V,GACtBzL,SAAnBqhB,IAA8BA,EAAiBpd,EAAEhF,EAAMgF,IAAIwH,EAAM8S,KACrE+C,EAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOyiB,EACPniB,MAAOA,EAAMF,EAAOA,EAAOqY,aAC3BngB,KAAMgL,EACN+N,MAAuB,GAAhBjR,EAAOiR,MAAa2W,EAASC,MAI5C,IAAIvF,GAAwB,SAASvmB,EAAEnF,GACnC,GAAIqa,GAAQ8H,EAAQniB,GAAGqa,KACvB,OAAY,OAALlV,EAAY,MAAQkV,EAAM7E,aAAarQ,GAGlDmc,GAAiBpkB,QACZwL,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAEhCyI,eAAe6Y,EAAiBpkB,QAAQuL,kBAAoBijB,GAC5DpqB,MACG0H,MAAOlJ,EAAMgF,IAAKmd,EAAY7C,GAC9BhZ,MAAOgZ,EACPhW,OAAQ+Y,MAGhBb,EAAiBre,gBAAgBif,KAGrCZ,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpD0gB,QAGJ2S,EAAOv0B,SAASiB,GAAE,2BAA6BmyB,GAC/CxD,EAAO5vB,SAASiB,GAAE,2BAA6BmyB,GAC/CmB,EAAOv0B,SAASiB,GAAE,0BAA4B,SAASoc,GACnD7d,EAAQgG,QAAO,KAEnBoqB,EAAO5vB,SAASiB,GAAE,0BAA4B,SAASoc,GACnD7d,EAAQgG,QAAO,KAGnBgvB,EAAUx0B,SAASiB,GAAE,2BAA6BuyB,GAClDiB,EAAUz0B,SAASiB,GAAE,2BAA6BuyB,GAClDgB,EAAUx0B,SAASiB,GAAE,0BAA4B,SAASoc,GACtD7d,EAAQgG,QAAO,KAEnBivB,EAAUz0B,SAASiB,GAAE,0BAA4B,SAASoc,GACtD7d,EAAQgG,QAAO,KAGnBkuB,EAAO1zB,SAASiB,GAAE,2BAA6BwyB,GAC/CiB,EAAO10B,SAASiB,GAAE,2BAA6BwyB,GAC/CC,EAAO1zB,SAASiB,GAAE,0BAA4B,SAASoc,GACnD7d,EAAQgG,QAAO,KAEnBkvB,EAAO10B,SAASiB,GAAE,0BAA4B,SAASoc,GACnD7d,EAAQgG,QAAO,KAGnBouB,EAAM5zB,SAASiB,GAAE,2BAA6B0yB,GAC9C9D,EAAM7vB,SAASiB,GAAE,2BAA6B0yB,GAE9CC,EAAM5zB,SAASiB,GAAE,0BAA4B,SAASoc,GAClD7d,EAAQgG,QAAO,KAEnBqqB,EAAM7vB,SAASiB,GAAE,0BAA4B,SAASoc,GAClD7d,EAAQgG,QAAO,KAEnBouB,EAAM5zB,SAASiB,GAAE,2BAA6B,SAASoc,GACnD7d,MAEJqwB,EAAM7vB,SAASiB,GAAE,2BAA6B,SAASoc,GACnD7d,SAKL4C,EApeX,GAOIizB,GACAE,EARA1wB,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAChDue,EAAY,KACZzX,EAAQtM,EAAGG,MAAMuQ,eACjBtJ,EAAQ,KACRC,EAAS,KACTwc,GAAa,EACb9M,EAAS,KAGTkD,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9B0Y,EAAc,SACdrC,GAAa,EACbtB,EAAmBtkB,EAAGkE,uBACtBmgB,GAA0B,EAC1ByM,EAAsB,gBACtBroB,EAAW,IAOXX,EAAItH,GAAG8H,MAAMC,SACbutB,EAAUt1B,GAAG8H,MAAMC,SACnBytB,EAAUx1B,GAAG8H,MAAMC,SAEnB0sB,EAASj1B,EAAGI,OAAO4H,OAAOkT,OAAO4a,GAASrtB,SAASA,GACnD6nB,EAAStwB,EAAGI,OAAO4H,OAAOkT,OAAO8a,GAASvtB,SAASA,GAEnDysB,EAAYl1B,EAAGI,OAAOgtB,UAAUlS,OAAO4a,GAASrtB,SAASA,GACzD0sB,EAAYn1B,EAAGI,OAAOgtB,UAAUlS,OAAO8a,GAASvtB,SAASA,GAEzD6rB,EAAQt0B,EAAGI,OAAOmxB,WAAWG,SAAQ,GAAOxW,OAAO4a,GAASrtB,SAASA,GACrE8nB,EAAQvwB,EAAGI,OAAOmxB,WAAWG,SAAQ,GAAOxW,OAAO8a,GAASvtB,SAASA,GAErE2rB,EAASp0B,EAAGI,OAAO81B,cAAchb,OAAO4a,GAASrtB,SAASA,GAC1D2sB,EAASp1B,EAAGI,OAAO81B,cAAchb,OAAO8a,GAASvtB,SAASA,GAE1D0U,EAAQnd,EAAGI,OAAO8X,OAAO5P,MAAMR,GAAGuQ,OAAM,UAAWW,YAAY,GAAGvQ,SAASA,GAC3EwrB,EAASj0B,EAAGI,OAAO8X,OAAO5P,MAAMwtB,GAASzd,OAAM,QAAS5P,SAASA,GACjEurB,EAASh0B,EAAGI,OAAO8X,OAAO5P,MAAM0tB,GAAS3d,OAAM,SAAU5P,SAASA,GAElEqb,EAAS9jB,EAAGI,OAAO0jB,SAASzc,OAAO,IACnCnH,EAAUF,EAAGI,OAAOF,UACpBQ,EAAWF,GAAGE,WAEdL,GAAU40B,EAAQ3E,EAAQ4E,EAAWC,EAAWb,EAAO/D,EAAO6D,EAAQgB,EA8hB1E,OAlGAtyB,GAAMpC,SAAWA,EACjBoC,EAAMghB,OAASA,EACfhhB,EAAMmyB,OAASA,EACfnyB,EAAMwtB,OAASA,EACfxtB,EAAMoyB,UAAYA,EAClBpyB,EAAMqyB,UAAYA,EAClBryB,EAAMwxB,MAAQA,EACdxxB,EAAMytB,MAAQA,EACdztB,EAAMsxB,OAASA,EACftxB,EAAMsyB,OAASA,EACftyB,EAAMqa,MAAQA,EACdra,EAAMmxB,OAASA,EACfnxB,EAAMkxB,OAASA,EACflxB,EAAM5C,QAAUA,EAChB4C,EAAMwhB,iBAAmBA,EAEzBxhB,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9EotB,UAAgB9mB,IAAK,WAAW,MAAO8mB,IAAY7mB,IAAK,SAASvG,GAAGotB,EAASptB,IAC7EstB,UAAchnB,IAAK,WAAW,MAAOgnB,IAAY/mB,IAAK,SAASvG,GAAGstB,EAASttB,IAC3EoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrEsf,aAAiBhZ,IAAK,WAAW,MAAOgZ,IAAe/Y,IAAK,SAASvG,GAAGsf,EAAYtf,IACpFmoB,qBAAyB7hB,IAAK,WAAW,MAAO6hB,IAAuB5hB,IAAK,SAASvG,GAAGmoB,EAAoBnoB,IAG5GpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9Bb,GAAImH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAC7CsR,EAAOtR,EACPssB,EAAOntB,EAAEa,GACT2nB,EAAOxoB,EAAEa,GACTusB,EAAUptB,EAAEa,GACZwsB,EAAUrtB,EAAEa,GACZ2rB,EAAMxsB,EAAEa,GACR4nB,EAAMzoB,EAAEa,GACRyrB,EAAOtsB,EAAEa,GACTysB,EAAOttB,EAAEa,KAEb4G,GAAIN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAC7CkZ,EAAOlZ,EACPssB,EAAO1lB,EAAE5G,GACT2nB,EAAO/gB,EAAE5G,GACTusB,EAAU3lB,EAAE5G,GACZwsB,EAAU5lB,EAAE5G,GACZyrB,EAAO7kB,EAAE5G,GACTysB,EAAO7lB,EAAE5G,GACT2rB,EAAM/kB,EAAE5G,GACR4nB,EAAMhhB,EAAE5G,KAEZid,YAAa3W,IAAK,WAAW,MAAO2W,IAAc1W,IAAK,SAASvG,GAC5Did,EAAWjd,EACXssB,EAAOrP,WAAWjd,GAClB2nB,EAAO1K,WAAWjd,GAClByrB,EAAOxO,WAAWjd,GAClBysB,EAAOxP,WAAWjd,KAGtB0b,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,EAA0B1b,EACtB0b,IACA4Q,EAAO/S,aAAY,GACnB+S,EAAOrP,YAAW,GAClB0K,EAAOpO,aAAY,GACnBoO,EAAO1K,YAAW,GAClBwO,EAAOlS,aAAY,GACnBkS,EAAOxO,YAAW,GAClBwP,EAAOlT,aAAY,GACnBkT,EAAOxP,YAAW,GAClBsP,EAAUhT,aAAY,GACtBiT,EAAUjT,aAAY,MAI9BzZ,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,GACVssB,EAAQ3E,EAAQ8D,EAAQgB,EAAQF,EAAWC,EAAWhY,EAAO8W,EAAQD,GAAQ/pB,QAAQ,SAAS6I,GAC7FA,EAAMrK,SAASA,SAKzBzI,EAAGG,MAAMqP,YAAY1M,GAEdA,GCnlBX9C,EAAGI,OAAO2sB,QAAU,WAChB,YAoCA,SAASjqB,GAAMsB,GAiIX,MAhIAA,GAAUC,KAAK,SAASC,GACpB4C,EAAY1G,GAAG2G,OAAOpG,KACtB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAElEvF,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAI2R,GAAKlT,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAU,EAGnD0F,GAAEnB,OAAOqT,GAAWxZ,GAAGkf,OAAOpb,EAAK,GAAGuE,OAAO4L,IAAIwF,GAAMvY,OAAOke,KAE1DwB,EACAtZ,EAAErB,MAAMyT,IAA4B,GAAjBvU,EAAsBrB,EAAK,GAAGuE,OAAOzG,OAAQuD,GAAkBrB,EAAK,GAAGuE,OAAOzG,OAAS,IAAOkC,EAAK,GAAGuE,OAAOzG,SAEhI0F,EAAErB,MAAMyT,IAAW,EAAIrB,EAAE,EAAGlT,EAAiBkT,EAAE,EAAI,IAEvDtJ,EAAE5I,OAAOyT,IACD5Z,GAAGgJ,IAAIlF,EAAK,GAAGuE,OAAO4L,IAAI4M,GAAQ3f,OAAO4f,IACzC9gB,GAAG6I,IAAI/E,EAAK,GAAGuE,OAAO4L,IAAI8M,GAAS7f,OAAO4f,MAEhD7a,MAAM0U,IAAWvV,EAAiB,IAGhCkC,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,KAC7BmB,EAAEnB,SAAS,GACPmB,EAAEnB,QAAQmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,GAAWmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,KACzEmB,EAAEnB,QAAM,GAAK,KAEnB4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,KAC7B4I,EAAE5I,SAAS,GACP4I,EAAE5I,QAAQ4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,GAAW4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,KACzE4I,EAAE5I,QAAM,GAAK,IAGvB,IAAIW,GAAO9G,GAAG2G,OAAOpG,MAAMwG,UAAS,wBAAyBjD,MAAMA,EAAK,GAAGuE,SACvErB,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,2BACnD8V,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,YAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEyB,EACKvF,GAAE,QAAU,SAASwG,EAAEnF,GACpBtC,EAAS8gB,YACLld,KAAM6D,EACNiB,MAAOpG,EACPiK,IAAKzM,GAAGuE,MACRwF,GAAIA,MAIhBkT,EAAU/V,OAAM,YACXC,KAAI,KAAO,sBAAwB4C,GACnC7C,OAAM,QAEXJ,EAAKH,OAAM,uBAA0BoD,EAAK,SACrC5C,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpBuS,EAAKxQ,KAAI,YAAc8Z,EAAW,2BAA6BlX,EAAK,IAAM,GAE1E,IAAI6N,GAAQ9Q,EAAKH,OAAM,aAAcI,UAAS,YACzCjD,KAAK,SAAS6D,GAAK,MAAOA,IAC/BiQ,GAAMhQ,OAAOC,SAEb+P,EAAM3Q,QAAQC,OAAM,QACfC,KAAI,QAAU,SAASQ,EAAEnF,EAAEwY,GAAK,OAAQmG,EAAQxZ,EAAEnF,GAAK4e,EAASzZ,EAAEnF,GAAK,mBAAqB,oBAAsB,YAAcwY,EAAI,IAAMxY,IAC1I2E,KAAI,IAAM,SAASQ,EAAEnF,GAClB,MAAO,WACAuM,EAAEoS,EAAQxZ,EAAEnF,IACTuM,EAAEgS,EAAQpZ,EAAEnF,KAChB,KACE6V,EAAE,EACJ,MACCA,EAAE,EACH,SACCtJ,EAAE8R,EAAOlZ,EAAEnF,IAAMuM,EAAEoS,EAAQxZ,EAAEnF,KAC9B,OACCuM,EAAEqS,EAASzZ,EAAEnF,IACVuM,EAAE8R,EAAOlZ,EAAEnF,KACf,IACC6V,EAAE,EACH,OACEA,EAAE,EACJ,QAETlR,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAEnF,IAAM,IAAMuM,EAAEgS,EAAQpZ,EAAEnF,IAAM,MAC/F2E,KAAI,OAAS,SAASQ,EAAEnF,GAAK,MAAOsJ,GAAM,KAC1C3E,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAOsJ,GAAM,KAC5C3E,KAAI,IAAM,GACVA,KAAI,IAAM,SAASQ,EAAEnF,GAAM,MAAOuM,GAAE7F,KAAKL,IAAI,EAAGwY,EAAK1Z,EAAEnF,OACvD2E,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAO0G,MAAKC,IAAI4F,EAAEsS,EAAK1Z,EAAEnF,IAAMuM,EAAE,MAGrE6I,EAAMzQ,KAAI,QAAU,SAASQ,EAAEnF,EAAEwY,GAC7B,OAAQmG,EAAQxZ,EAAEnF,GAAK4e,EAASzZ,EAAEnF,GAAK,mBAAqB,oBAAsB,YAAcwY,EAAI,IAAMxY,IAG9GxC,GAAG2N,WAAWiK,GACTzQ,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,aAAe8E,EAAEmS,EAAK9R,EAAEnF,IAAM,IAAMuM,EAAEgS,EAAQpZ,EAAEnF,IAAM,MAC/F2E,KAAI,IAAM,SAASQ,EAAEnF,GAClB,GAAI6V,GAAKlT,EAAiBrB,EAAK,GAAGuE,OAAOzG,OAAU,EACnD,OAAO,WACAmN,EAAEoS,EAAQxZ,EAAEnF,IACTuM,EAAEgS,EAAQpZ,EAAEnF,KAChB,KACE6V,EAAE,EACJ,MACCA,EAAE,EACH,SACCtJ,EAAE8R,EAAOlZ,EAAEnF,IACRuM,EAAEoS,EAAQxZ,EAAEnF,KAChB,OACCuM,EAAEqS,EAASzZ,EAAEnF,IACVuM,EAAE8R,EAAOlZ,EAAEnF,KACf,IACC6V,EAAE,EACH,OACEA,EAAE,EACJ,UAIX/V,EA/JX,GAmBMkX,GACAI,EACAF,EACAiB,EAtBF5V,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,KACRC,EAAS,KACTkD,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZY,EAAItH,GAAG8H,MAAMC,SACbgH,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BoS,EAAU,SAASxZ,GAAK,MAAOA,GAAE2Z,MACjCF,EAAW,SAASzZ,GAAK,MAAOA,GAAE4Z,OAClCR,EAAU,SAASpZ,GAAK,MAAOA,GAAE6Z,MACjCX,EAAS,SAASlZ,GAAK,MAAOA,GAAE8Z,KAChCrC,KACA0B,KACAF,GAAc,EACdK,GAAW,EACXnV,EAAQtM,EAAGG,MAAMuQ,eACjBwR,GAAc,EAKdxhB,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,YAAa,aAAc,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAsMhK,OAzDAoC,GAAMqf,eAAiB,SAASC,EAAYC,GACxCvf,EAAMwf,kBACNpb,EAAUC,OAAM,0BAA6Bib,GACxC1X,QAAO,QAAU2X,IAI1Bvf,EAAMwf,gBAAkB,WACpBpb,EAAUC,OAAM,8BACXuD,QAAO,SAAU,IAQ1B5H,EAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAW6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAClEtB,QAAW4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACpEvC,QAAW6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IAC1DuS,QAAWjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IAC1DqR,SAAW/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IACtEyR,SAAWnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IACtEuR,QAAWjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACpEwS,QAAWlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACpEiX,QAAW3Q,IAAK,WAAW,MAAO2Q,IAAU1Q,IAAK,SAASvG,GAAGiX,EAAOjX,IACpE2Y,QAAWrS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACpEyY,SAAWnS,IAAK,WAAW,MAAOmS,IAAWlS,IAAK,SAASvG,GAAGyY,EAAQzY,IACtE8Y,UAAWxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IACxE4B,IAAW0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC5DuZ,aAAcjT,IAAK,WAAW,MAAOiT,IAAehT,IAAK,SAASvG,GAAGuZ,EAAYvZ,IAEjFb,GAAQmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAC7D4G,GAAQN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKlZ,IAC7DmZ,MAAQ7S,IAAK,WAAW,MAAO0S,MAAazS,IAAK,SAASvG,GAAGgZ,EAAQhZ,IACrEoZ,OAAQ9S,IAAK,WAAW,MAAO2S,MAAc1S,IAAK,SAASvG,GAAGiZ,EAASjZ,IACvEqZ,MAAQ/S,IAAK,WAAW,MAAOsS,IAAWrS,IAAK,SAASvG,GAAG4Y,EAAQ5Y,IACnEsZ,KAAQhT,IAAK,WAAW,MAAOoS,IAAUnS,IAAK,SAASvG,GAAG0Y,EAAO1Y,IAGjEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAqB5B,QAAZ8E,EAAElD,IAAsBkD,EAAElD,IAASF,EAAOE,IAC1DF,EAAOuR,MAAqBjT,QAAZ8E,EAAEmO,MAAsBnO,EAAEmO,MAASvR,EAAOuR,MAC1DvR,EAAOsR,OAAqBhT,QAAZ8E,EAAEkO,OAAsBlO,EAAEkO,OAAStR,EAAOsR,OAC1DtR,EAAOC,KAAqB3B,QAAZ8E,EAAEnD,KAAsBmD,EAAEnD,KAASD,EAAOC,OAE9D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC3I,EAAGG,MAAMqP,YAAY1M,GACdA,GCnOX9C,EAAGI,OAAO+1B,oBAAsB,WAC5B,YAsCA,SAASrzB,GAAMsB,GAgYX,MA/XAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GA+NpB,QAASkvB,GAAKrrB,GACV,MAAOH,GAAKouB,EAAkB3hB,IAAI,SAAUtI,GAExC,GAAIgE,MAAMhI,EAAEU,OAAOsD,EAAEK,OAAS2D,MAAMuH,WAAWvP,EAAEU,OAAOsD,EAAEK,QAAU6pB,EAA0B,CAC1F,GAAI1vB,GAAS4I,EAAEpD,EAAEK,KAAK7F,SAClBF,EAAQ8I,EAAEpD,EAAEK,KAAK/F,QACjB+C,EAAM7C,EAAO,IAAMA,EAAO,GAAKA,EAAO,IAAM,CAGhD,IAAI2vB,EAAwBnjB,QAAQhH,EAAEK,KAAO,EAAG,CAE5C,GAAI+pB,GAAW/1B,GAAG8H,MAAMC,SAAS5B,QAAQ6C,EAAK7C,EAAO,KAAKF,OAAOb,EAAkB,GAAIa,EAAM,IAC7F8I,GAAEpD,EAAEK,KAAK4a,MAAM7X,EAAEgnB,GACjBD,EAAwB7yB,KAAK0I,EAAEK,KAEnC,GAAI2D,MAAMhI,EAAEU,OAAOsD,EAAEK,OAAS2D,MAAMuH,WAAWvP,EAAEU,OAAOsD,EAAEK,OACtD,OAAQ1E,EAAEqE,EAAEK,KAAM+C,EAAEpD,EAAEK,KAAKhD,IAcnC,MAT0B3F,UAAtB2yB,IACIF,EAAwBl0B,OAAS,GAAKi0B,GACtCG,EAAkB/rB,MAAK,UAAY,UACnCgsB,EAAsBhsB,MAAK,UAAY,YAEvC+rB,EAAkB/rB,MAAK,UAAY,QACnCgsB,EAAsBhsB,MAAK,UAAY,WAGvC3C,EAAEqE,EAAEK,KAAM+C,EAAEpD,EAAEK,KAAKrE,EAAEU,OAAOsD,EAAEK,UAI9C,QAASkqB,GAAaC,GAClBC,EAAQ3sB,QAAQ,SAAUyR,GAEtB,GAAImb,GAActnB,EAAEmM,EAAEob,WAAW1P,MAAM7X,IAAI5I,QACvC+U,GAAEqb,aACFrb,EAAEgE,OAAO,IAAMnQ,EAAEmM,EAAEob,WAAWnwB,SAAS,GAAKkwB,EAAY,KAAOnb,EAAEgE,OAAO,GAAKhE,EAAEgE,OAAO,KAAOsX,EAAkBtb,EAAEob,WAAapb,EAAEgE,OAAO,IAAMmX,EAAY,IAEzJnb,EAAEub,SACFvb,EAAEgE,OAAO,GAAKmX,EAAY,IAE1BF,GACApnB,EAAEmM,EAAEob,WAAW1P,MAAM1H,OAAOhE,EAAEgE,UAGtCwX,EAAW/vB,OAAM,uBACZ9C,KAAK,SAAU8D,GACZ3H,GAAG2G,OAAOpG,MAAMK,KAAKmO,EAAEpH,EAAEqE,KAAK4a,SAGjC7f,UAAS,QACTI,KAAI,IAAM,IACVA,KAAI,QAAU,IAEnBwvB,IAIJ,QAASC,KAEDC,KAAiB,IACjBA,GAAe,EACfX,GAAa,IAKrB,QAAStP,KACLkQ,EAAUC,EAAe9qB,OAAO,SAAUN,GAAK,OAAQoD,EAAEpD,GAAGib,MAAMC,UAClEmQ,EAAUF,EAAQ7iB,IAAI,SAAStI,GAAK,MAAOoD,GAAEpD,GAAGib,MAAM1H,WAEtDkX,KACAU,EAAQrtB,QAAQ,SAAS9B,EAAEnF,GACvB4zB,EAAQ5zB,IACJ8zB,UAAW3uB,EACXuX,OAAQ8X,EAAQx0B,GAChBi0B,QAAQ,EACRF,YAAY,KAIpBp0B,KACA80B,EAAWhtB,MAAK,UAAY,SAAStC,GACjC,GAAIuvB,GAAWJ,EAAQhkB,MAAM,SAASnH,EAAGnJ,GACrC,OAAKmN,MAAMhI,EAAEU,OAAOsD,KAAOgE,MAAMuH,WAAWvP,EAAEU,OAAOsD,OAASqrB,EAAQx0B,GAAG,IAAMuM,EAAEpD,GAAGib,MAAM7X,IAAI5I,SAAS,IAAW,EAC1G6wB,EAAQx0B,GAAG,IAAMmF,EAAEU,OAAOsD,IAAMhE,EAAEU,OAAOsD,IAAMqrB,EAAQx0B,GAAG,KAAQmN,MAAMuH,WAAWvP,EAAEU,OAAOsD,MAGxG,OADIurB,IAAU/0B,EAAOc,KAAK0E,GACnBuvB,EAAW,KAAO,SAG7BP,IAEAz2B,EAAS0mB,OACLwP,QAASA,EACTj0B,OAAQA,IAGhB,QAASg1B,KACL,GAAIC,GAAiBN,EAAQl1B,OAAS,GAAI,GAAO,CACjDw0B,GAAQ3sB,QAAQ,SAAUyR,GAClBA,EAAEgE,OAAO,KAAOnQ,EAAEmM,EAAEob,WAAW1P,MAAM7X,IAAI5I,SAAS,IAAM2vB,EAAwBnjB,QAAQuI,EAAEob,YAAc,IACxGpb,EAAEub,QAAS,GACXvb,EAAEgE,OAAO,GAAKnQ,EAAEmM,EAAEob,WAAWnwB,SAAS,KACtC+U,EAAEqb,YAAa,KAEvBr2B,EAASm3B,SAASl1B,EAAQi1B,GAE9B,QAAST,KACLD,EAAW/vB,OAAM,YACZ9C,KAAK,SAAU8D,EAAGnF,GACf,GAAI0Y,GAAIkb,EAAQnqB,OAAO,SAAUmf,GAAK,MAAOA,GAAEkL,WAAa3uB,EAAEqE,KAC9DsrB,GAAa3vB,EAAEqE,KAAO+C,EAAEpH,EAAEqE,KAAK7F,SAGf,GAAZ+U,EAAEtZ,QAAei1B,IAEjBS,EAAa3vB,EAAEqE,QACXkP,EAAE,GAAGgE,OAAO,GAAKnQ,EAAEpH,EAAEqE,KAAK7F,SAAS,KACnCmxB,EAAa3vB,EAAEqE,MAAQkP,EAAE,GAAGgE,OAAO,KACnChE,EAAE,GAAGgE,OAAO,IAAMnQ,EAAEpH,EAAEqE,KAAK7F,SAAS,IACpCmxB,EAAa3vB,EAAEqE,KAAK/I,KAAKiY,EAAE,GAAGgE,OAAO,KAG7Clf,GAAG2G,OAAOpG,MAAMK,KAAK8W,EAAK5P,MAAMiH,EAAEpH,EAAEqE,MAAMgM,WAAWrQ,EAAEwE,QAAQorB,WAAWD,EAAa3vB,EAAEqE,SAGrG,QAASiW,GAAUta,GACf6vB,EAAS7vB,EAAEqE,KAAOzL,KAAK0Q,WAAWwmB,WAAanwB,EAAEK,EAAEqE,KACnD0rB,EAAWvwB,KAAI,aAAe,UAElC,QAAS+a,GAASva,GACd6vB,EAAS7vB,EAAEqE,KAAO9C,KAAKF,IAAI7D,EAAgB+D,KAAKL,IAAI,EAAGtI,KAAK0Q,WAAWwmB,YAAcz3B,GAAGuE,MAAM+C,IAC9F2vB,EAAW9vB,KAAI,IAAM6rB,GACrB4C,EAAkBxgB,KAAK,SAAU9F,EAAGuO,GAAK,MAAO8Z,GAAkBroB,EAAEtD,KAAO2rB,EAAkB9Z,EAAE7R,OAC/F4pB,EAAkBnsB,QAAQ,SAAU9B,EAAGnF,GAAK,MAAOmF,GAAEiwB,gBAAkBp1B,IACvE8E,EAAEnB,OAAOyvB,EAAkB3hB,IAAI,SAAUtM,GAAK,MAAOA,GAAEqE,OACvD0qB,EAAWvvB,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAegwB,EAAkBhwB,EAAEqE,KAAO,MAEhG,QAASqW,GAAQ1a,EAAGnF,SACTjC,MAAK0Q,WAAWwmB,iBAChBD,GAAS7vB,EAAEqE,KAClBhM,GAAG2G,OAAOpG,KAAK0Q,YAAY9J,KAAI,YAAc,aAAeG,EAAEK,EAAEqE,KAAO,KACvEirB,EACG9vB,KAAI,IAAM6rB,GACb0E,EACGvwB,KAAI,IAAM6rB,GACV7rB,KAAI,aAAe,MAEtBjH,EAAS23B,gBAAgBjC,GAE7B,QAAS+B,GAAkBhwB,GACvB,GAAIe,GAAI8uB,EAAS7vB,EACjB,OAAY,OAALe,EAAYpB,EAAEK,GAAKe,EA1X9B,GAAIhC,GAAY1G,GAAG2G,OAAOpG,KAO1B,IANA4E,EAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,GAE9DvF,EAAGG,MAAMsW,QAAQvP,GAGMrD,SAAnBS,EAAK,GAAGuE,OAAsB,CAC9B,GAAIyvB,KACJh0B,GAAK2F,QAAQ,SAAU9B,GACf,GAAIowB,MACA/rB,EAAMuC,OAAOyZ,KAAKrgB,EACtBqE,GAAIvC,QAAQ,SAAU2hB,GAAe,SAANA,IAAc2M,EAAI3M,GAAKzjB,EAAEyjB,MACxD0M,EAAQ70B,MAAO+I,IAAKrE,EAAE7F,KAAMuG,OAAQ0vB,MAE5Cj0B,EAAOg0B,EAGX,GAAIE,GAAal0B,EAAKmQ,IAAI,SAAUtM,GAAI,MAAOA,GAAEU,QAC3B,KAAlBlG,EAAOP,SACPO,EAAS2B,GAGbizB,EAAiBkB,EAAc7iB,KAAK,SAAU9F,EAAGuO,GAAK,MAAOvO,GAAEsoB,gBAAkB/Z,EAAE+Z,kBAAoB3jB,IAAI,SAAUtM,GAAK,MAAOA,GAAEqE,MACnI4pB,EAAoBqC,EAAchsB,OAAO,SAAUtE,GAAK,OAAQA,EAAE0V,WAGlE/V,EAAE4wB,aAAa,EAAG/yB,GAAiB,GAAGgB,OAAOyvB,EAAkB3hB,IAAI,SAAUtM,GAAK,MAAOA,GAAEqE,MAI3F,IAAIwqB,MACAX,GAA2B,EAC3ByB,IAEJP,GAAettB,QAAQ,SAAS9B,GAC5B,GAAIuX,GAASlf,GAAGkf,OAAO8Y,EAAY,SAAUrsB,GAAK,OAAQA,EAAEhE,KACxDqB,EAAMkW,EAAO,GACbrW,EAAMqW,EAAO,GACbiZ,GAAsB,GAEtBxoB,MAAM3G,IAAQ2G,MAAM9G,MACpBsvB,GAAsB,EACtBnvB,EAAM,EACNH,EAAM,GAGNG,IAAQH,IACRG,GAAY,EACZH,GAAY,EAEhB,IAAIqS,GAAIkb,EAAQnqB,OAAO,SAAUmf,GAAK,MAAOA,GAAEkL,WAAa3uB,GAC3C,KAAbuT,EAAEtZ,SAEEu2B,GACAnvB,EAAM+F,EAAEpH,GAAGxB,SAAS,GACpB0C,EAAMkG,EAAEpH,GAAGxB,SAAS,KAGd+U,EAAE,GAAGqb,YAAcM,GACzB7tB,EAAMA,EAAMkS,EAAE,GAAGgE,OAAO,GAAKhE,EAAE,GAAGgE,OAAO,GAAKlW,EAC9CH,EAAMA,EAAMqS,EAAE,GAAGgE,OAAO,GAAKhE,EAAE,GAAGgE,OAAO,GAAKrW,GAGzCqS,EAAE,GAAGub,SACV5tB,EAAMA,EAAMqS,EAAE,GAAGgE,OAAO,GAAKhE,EAAE,GAAGgE,OAAO,GAAKrW,EAC9C2tB,EAAkB7uB,GAAKoH,EAAEpH,GAAGxB,SAAS,GACrC0vB,GAA2B,IAKnC9mB,EAAEpH,GAAK3H,GAAG8H,MAAMC,SACX5B,QAAQ6C,EAAKH,IACb5C,OAAgC,IAAxBb,EAAkB,IAAW,IAE1C0wB,KACA/mB,EAAEpH,GAAGif,MAAQ5mB,GAAG0V,IAAIkR,QAAQ7X,EAAEA,EAAEpH,IAAIxG,GAAE,aAAey1B,GAAYz1B,GAAE,QAAUylB,GAAOzlB,GAAE,WAAag2B,IAIvG,IAAIrwB,GAAOJ,EAAUK,UAAS,oCAAqCjD,MAAMA,IACrEkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,uCACnD6V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,qCACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,qCACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,4CAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEuC,EAAKigB,YAAW,YAAa2Q,QAAQC,GACrC3gB,EAAKG,OAAM,OACX,IAMIme,GAAmBC,EANnBqC,EAAWt4B,GAAG6iB,SAASC,OACd3hB,GAAE,YAAc8gB,GAChB9gB,GAAE,OAAS+gB,GACX/gB,GAAE,UAAYkhB,GAIvBngB,EAAOoF,EAAErB,QAAQ,GAAKqB,EAAErB,QAAQ,EAEpC,IADA/D,EAAOyN,MAAMzN,GAAQoF,EAAErB,QAAQ,GAAK/D,GAC/ByN,MAAMzN,GAAO,CACd,GAAIq2B,IAAY,EAAIr2B,EAAO,EAAGkD,EAAkB,GAAID,EAAiBjD,EAAO,EAAGkD,EAAkB,GACjG4wB,GAAoBlvB,EAAKH,OAAM,sBAAuBI,UAAS,QAASjD,MAAMy0B,IAC9EvC,EAAkB/uB,QAAQC,OAAM,QAChC8uB,EAAkBpuB,OAAOC,SACzBmuB,EAAkB7uB,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAC3CR,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAClCR,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAClCR,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAG3CsuB,EAAwBnvB,EAAKH,OAAM,sBAAuBI,UAAS,QAASjD,MAAM00B,IAClFvC,EAAsB/uB,OAAM,QAASpD,MAAM00B,IAC3CvC,EAAsBhvB,QAAQC,OAAM,QACpC+uB,EAAsBruB,OAAOC,SAC7BouB,EAAsB9uB,KAAI,IAAM/B,GAEvB+B,KAAI,IAAMhC,EAAiB,GAAKjD,EAAO,GACvCwP,KAAK,SAAS/J,GAAK,MAAOA,KAGvC+vB,EAAa5wB,EAAKH,OAAM,eAAgBI,UAAS,QAASjD,KAAKA,GAC/D4zB,EAAWzwB,QAAQC,OAAM,QACzBwwB,EAAW9vB,OAAOC,SAClB6vB,EAAWvwB,KAAI,IAAM6rB,GAGrBiE,EAAanwB,EAAKH,OAAM,eAAgBI,UAAS,QAASjD,KAAKA,GAC/DmzB,EAAWhwB,QAAQC,OAAM,QACzB+vB,EAAWrvB,OAAOC,SAClBovB,EAAW9vB,KAAI,IAAM6rB,GAChB/oB,MAAK,eAAiB,SAAUtC,EAAGnF,GACY,MAA5CmN,OAAMhI,EAAEmlB,eAAgBnlB,EAAEmlB,YAAc,GAAWnlB,EAAEmlB,cACxD3lB,KAAI,SAAW,SAAUQ,EAAGnF,GAAK,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KACjEy0B,EAAW91B,GAAE,YAAc,SAAUwG,EAAGnF,GACpCxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAAMD,MAAK,eAAiBtC,EAAEmlB,YAAc,EAAI,MAAM7iB,MAAK,iBAAmB,GAC/G/J,EAASsb,kBACLK,MAAOlU,EAAE7F,KACTgK,MAAOnE,EAAEmE,OAASA,EAAMnE,EAAGnF,GAC3B6F,OAAQV,EAAEU,OACVquB,WAAYd,MAIpBqB,EAAW91B,GAAE,WAAa,SAAUwG,EAAGnF,GACnCxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAAOD,MAAK,eAAiBtC,EAAEmlB,YAAc,MAAM7iB,MAAK,iBAAmB,IAC5G/J,EAASsF,iBACLqW,MAAOlU,EAAE7F,KACT8G,MAAOpG,MAGfy0B,EAAW91B,GAAE,YAAc,SAAUwG,EAAGnF,GACpCtC,EAASmG,qBAEb4wB,EAAW91B,GAAE,QAAU,SAAUwG,GAC7BzH,EAASqG,cACLwD,GAAIpC,EAAEoC,OAId2sB,EAAa/e,EAAE5Q,UAAS,cAAejD,KAAK8xB,EAC5C,IAAI6C,GAAkB/B,EAAWzvB,QAAQC,OAAM,KAAMC,KAAI,QAAU,mCAEnEuvB,GAAWvvB,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAeL,EAAEK,EAAEqE,KAAO,QAC5EysB,EAAgBvxB,OAAM,KAAMC,KAAI,QAAU,WAG1CsxB,EAAgBvxB,OAAM,QACjBC,KAAI,QAAU,YACd8C,MAAK,SAAW,QAChB9C,KAAI,KAAO,QACXA,KAAI,cAAgB,UACpBhG,GAAE,YAAc,SAASwG,EAAGnF,GACzBtC,EAASsb,kBACLK,MAAOlU,EAAEjI,SAAWiI,EAAEqE,IACtBF,MAAOnE,EAAEmE,UAGhB3K,GAAE,WAAa,SAASwG,EAAGnF,GACxBtC,EAASsF,iBACLqW,MAAOlU,EAAEjI,YAGhByB,GAAE,YAAc,SAAUwG,EAAGnF,GAC1BtC,EAASmG,qBAEZzF,KAAK03B,GAEVG,EAAgBvxB,OAAM,KAAMC,KAAI,QAAU,sBAC1CuvB,EAAW9uB,OAAOC,SAClB6uB,EAAW/vB,OAAM,aAAc+K,KAAK,SAAU/J,GAAK,MAAOA,GAAEqE,MAG5DkqB,EAAaW,EAEb,IAAIC,GAAUC,EAAe9qB,OAAO,SAAUN,GAAK,OAAQoD,EAAEpD,GAAGib,MAAMC,UAC9DmQ,EAAUF,EAAQ7iB,IAAI,SAAUtI,GAAK,MAAOoD,GAAEpD,GAAGib,MAAM1H,WAC3DwZ,GAAev2B,EAAOxB,MAAM,EAGhCwB,MACA80B,EAAWhtB,MAAK,UAAY,SAAUtC,GAClC,GAAIuvB,GAAWJ,EAAQhkB,MAAM,SAAUnH,EAAGnJ,GACtC,OAAKmN,MAAMhI,EAAEU,OAAOsD,KAAOgE,MAAMuH,WAAWvP,EAAEU,OAAOsD,OAASqrB,EAAQx0B,GAAG,IAAMuM,EAAEpD,GAAGib,MAAM7X,IAAI5I,SAAS,IAC5F,EAEH6wB,EAAQx0B,GAAG,IAAMmF,EAAEU,OAAOsD,IAAMhE,EAAEU,OAAOsD,IAAMqrB,EAAQx0B,GAAG,KAAQmN,MAAMuH,WAAWvP,EAAEU,OAAOsD,MAIxG,OAFIurB,IACA/0B,EAAOc,KAAK0E,GACRuvB,EAAoB,KAAT,UAInBd,EAAQx0B,OAAS,IAAMpC,EAAGG,MAAM2X,YAAYnV,EAAQu2B,MACrDx4B,EAASy4B,cAAcx2B,KAmKvBG,EAhaX,GAkBM20B,GACAS,EACAhB,EApBF3xB,GAAUE,IAAK,GAAIqR,MAAO,EAAGD,OAAQ,GAAIrR,KAAM,GAC7C4B,EAAQ,KACRC,EAAS,KACT1B,EAAiB,KACjBC,EAAkB,KAClBkC,EAAItH,GAAG8H,MAAMsI,UACbrB,KACAypB,EAAuB,mBACvBP,KACArC,KACAmB,KACAF,GAAe,EACf/qB,EAAQtM,EAAGG,MAAMuQ,eACjBkmB,KACAj0B,KACAq1B,KACA1B,KACAuC,EAAc,EAId7wB,EAAOxH,GAAG0V,IAAIlO,OACdkQ,EAAO1X,GAAG0V,IAAIgC,OACdxX,EAAWF,GAAGE,SAAQ,aAAe,QAAS,WAAY,kBAAmB,cAAe,eAAgB,mBAAoB,kBAAmB,mBAAoB,YAAa,iBAOtL+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EAkcvC,OAzDAoC,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAgB6H,IAAK,WAAW,MAAO7H,IAAmB8H,IAAK,SAASvG,GAAGvB,EAAOuB,IAClFtB,QAAgB4H,IAAK,WAAW,MAAO5H,IAAmB6H,IAAK,SAASvG,GAAGtB,EAAQsB,IACnF8vB,eAAiBxpB,IAAK,WAAc,MAAOwpB,IAAkBvpB,IAAK,SAAUvG,GAAK8vB,EAAgB9vB,IACjG0uB,cAAgBpoB,IAAK,WAAc,MAAOooB,IAAiBnoB,IAAK,SAAUvG,GAAK0uB,EAAe1uB,IAC9FiuB,SAAW3nB,IAAK,WAAc,MAAO2nB,IAAY1nB,IAAK,SAAUvG,GAAKiuB,EAAUjuB,IAC/EhG,QAAUsM,IAAK,WAAc,MAAOtM,IAAWuM,IAAK,SAAUvG,GAAKhG,EAASgG,IAC5EkwB,aAAgB5pB,IAAK,WAAW,MAAO4pB,IAAmB3pB,IAAK,SAASvG,GAAGkwB,EAAclwB,IACzFqwB,sBAAwB/pB,IAAK,WAAW,MAAO+pB,IAAwB9pB,IAAK,SAASvG,GAAGqwB,EAAqBrwB,IAG7GuuB,YAAajoB,IAAK,WAAc,MAAOwpB,GAAchkB,IAAI,SAAUtM,GAAG,MAAOA,GAAEqE,OAAU0C,IAAK,SAAUvG,GAEpG3I,EAAGqC,WAAU,aAAe,6BACC,IAAzBo2B,EAAcr2B,OACduG,EAAEsB,QAAQ,SAAU2hB,GAAK6M,EAAch1B,MAAO+I,IAAKof,MAEnDjjB,EAAEsB,QAAQ,SAAU2hB,EAAG5oB,GAAKy1B,EAAcz1B,GAAGwJ,IAAKof,MAG1D2L,gBAAiBtoB,IAAK,WAAc,MAAOwpB,GAAchkB,IAAI,SAAUtM,GAAG,MAAOA,GAAEqE,OAAU0C,IAAK,SAAUvG,GAExG3I,EAAGqC,WAAU,iBAAmB,6BAChCk1B,KAC6B,IAAzBkB,EAAcr2B,OACduG,EAAEsB,QAAQ,SAAU2hB,GAAK6M,EAAch1B,MAAO+I,IAAKof,MAEnDjjB,EAAEsB,QAAQ,SAAU2hB,EAAG5oB,GAAKy1B,EAAcz1B,GAAGwJ,IAAMof,MAI3DwN,kBAAmBnqB,IAAK,WAAc,MAAOwpB,GAAchkB,IAAI,SAAUtM,GAAK,MAAOA,GAAEwE,UAAcuC,IAAK,SAAUvG,GAEhH3I,EAAGqC,WAAU,mBAAqB,6BACL,IAAzBo2B,EAAcr2B,OACduG,EAAEsB,QAAQ,SAAUyR,GAAK+c,EAAch1B,MAAOkJ,OAAQ+O,MAEtD/S,EAAEsB,QAAQ,SAAUyR,EAAG1Y,GAAKy1B,EAAcz1B,GAAG2J,OAAS+O,MAK9DnW,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAuB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC5DF,EAAOuR,MAAuBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC5DvR,EAAOsR,OAAuBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC5DtR,EAAOC,KAAuB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAEhE8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAGlC3I,EAAGG,MAAMqP,YAAY1M,GACdA,GCzeX9C,EAAGI,OAAOi5B,yBAA2B,WAC7B,YAsEA,SAASv2B,GAAMsB,GAgJX,MA/IAqO,GAAYW,QACZX,EAAYrS,OAAO+1B,GAEnB/xB,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IAEIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAmBnE,IAjBCzC,EAAMqR,OAAS,WAAajN,EAAU9F,KAAK0B,IAC3CA,EAAMoE,UAAYnG,KAElBgR,EAAMmC,OAAO+O,EAAYwV,GAAgB31B,EAAMqR,QAC1CH,OAAOkP,EAAYuV,IACnBtkB,SAGLpC,EAAM8L,SAAW4a,EAAchkB,IAAI,SAAUtM,GAAK,QAASA,EAAE0V,WAG7D4a,EAAgBA,EAAchkB,IAAI,SAAUtM,GAA+B,MAA3BA,GAAE0V,WAAa1V,EAAE0V,SAAiB1V,IAClFswB,EAAcxuB,QAAQ,SAAU9B,EAAGnF,GAC/BmF,EAAEmxB,iBAAmBnpB,MAAMhI,EAAEmxB,kBAAoBt2B,EAAImF,EAAEmxB,iBACvDnxB,EAAEiwB,gBAAkBjoB,MAAMhI,EAAEiwB,iBAAmBp1B,EAAImF,EAAEiwB,mBAGrDjV,EAAc,CACd,GAAI3W,EACJ2W,KACA,KAAI3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACrBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,IAAIlI,IAASA,EAAKlC,OAEd,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAMtC,IAAIf,GAAOJ,EAAUK,UAAS,yCAA0CjD,MAAMA,IAC1EkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,4CAA4CD,OAAM,KAElGyQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,8BACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAEjCwQ,EAAEhR,OAAM,QACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAY/B,EAAkB,EAAKA,EAAkB,GAGzDie,GAGDC,EAAO1c,MAAMzB,GACR2G,MAAM,SAAUnE,GAAK,MAAO,qBAEjCgQ,EAAEhR,OAAM,kBACHyW,MAAM6a,EAAc7iB,KAAK,SAAU9F,EAAGuO,GAAK,MAAOvO,GAAEwpB,iBAAmBjb,EAAEib,oBACzEl4B,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAElE+B,EAAKH,OAAM,kBACPQ,KAAI,YAAc,kBAAqBpC,EAAOE,IAAO,MAdzD0S,EAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,SAgB9Cf,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAGvE0wB,EACK/uB,MAAMzB,GACN0B,OAAOzB,GACP6yB,cAAcA,GACdpB,aAAaA,EAExB,IAAIkC,GAA0BphB,EAAEhR,OAAM,gCAC7ByW,MAAMtZ,EAEfi1B,GAAwBprB,aAAa/M,KAAK+0B,GAM1CA,EAAoBz1B,SAASiB,GAAE,WAAa,SAAUgB,EAAQi1B,GACtDA,GACAP,GAAe,EACf32B,EAASm3B,SAASl1B,IAGlB00B,GAAe,IAIvBvT,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAI,GAAIxY,KAAOwY,GACXjT,EAAMvF,GAAOwY,EAASxY,EAE1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAIVgiB,EAAoBz1B,SAASiB,GAAE,kBAAoB,SAAUC,GACzD62B,EAAc7iB,KAAK,SAAU9F,EAAGuO,GAAK,MAAOvO,GAAEsoB,gBAAkB/Z,EAAE+Z,iBAClE,IAAIoB,IAAW,CACff,GAAcxuB,QAAQ,SAAU9B,EAAGnF,GAC/BmF,EAAEiwB,gBAAkBp1B,EAChBmF,EAAEiwB,kBAAoBjwB,EAAEmxB,mBACxBE,GAAW,KAEnB94B,EAAS23B,gBAAgBI,EAAee,KAItC94B,EAASiB,GAAE,cAAgB,SAAUC,GAEP,mBAAfA,GAAEic,WACT4a,EAAcxuB,QAAQ,SAAUmC,EAAQpJ,GACpCoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAEjC+O,EAAM8L,SAAWjc,EAAEic,UAEvB/a,EAAMqR,aAId1B,EAAYS,UAAS,sCACdpQ,EAjNX,GAAIqzB,GAAsBn2B,EAAGI,OAAO+1B,sBAChCrS,EAAS9jB,EAAGI,OAAO0jB,SACnB5jB,EAAUF,EAAGI,OAAOF,UAGpBqF,GAFmBvF,EAAGI,OAAOF,WAElBuF,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,IAChDue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACTwc,GAAa,EACbvX,EAAQtM,EAAGG,MAAMuQ,eACjBqB,EAAQ/R,EAAGG,MAAM4R,QACjB0mB,KACApB,GAAe,EACflU,EAAe,KACfpM,EAAS,KACT0iB,EAAW,YACX/4B,EAAWF,GAAGE,SAAQ,kBAAoB,WAAY,cAAe,cAAe,aAUlF+R,EAAczS,EAAGG,MAAMsS,YAAY/R,GAEnCwiB,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,cAKjDoF,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACQlO,SAAjBkO,EAAMpP,QACL2B,EAAK2F,QAAQ,SAASmC,EAAQpJ,GAC1BoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MA2PhD,OArPA9C,GAAQ6K,iBAAiB,SAASzG,GAC9B,GAAIo1B,GAAM,iFAAmFp1B,EAAKgI,MAAQ,4BAA8BhI,EAAKkI,IAAM,6BAUnJ,OAT0B,KAAvBlI,EAAK8H,OAAOhK,SAEXs3B,GAAY,2CACZp1B,EAAK8H,OAAOnC,QAAQ,SAAS9B,GACzBuxB,EAAMA,EAAM,mEAAqEvxB,EAAEmE,MAAQ,gCAAkCnE,EAAEqE,IAAM,0BAA4BrE,EAAE6D,MAAQ,eAE/K0tB,GAAY,YAEhBA,GAAY,aA+JhBvD,EAAoBz1B,SAASiB,GAAE,2BAA6B,SAAUoc,GAClE,GAAI4b,IACAntB,IAAKuR,EAAI1B,MACT/P,MAAOyR,EAAIzR,MACXF,UAED2R,GAAIlV,SACHkG,OAAOyZ,KAAKzK,EAAIlV,QAAQoB,QAAQ,SAAU9B,GACtC,GAAIyxB,GAAM7b,EAAImZ,WAAWzqB,OAAO,SAAUotB,GAAK,MAAOA,GAAGrtB,MAAQrE,IAAK,EACtE,IAAGyxB,EAAG,CACF,GAAI1wB,EAEAA,GADAiH,MAAM4N,EAAIlV,OAAOV,KAAOgI,MAAMuH,WAAWqG,EAAIlV,OAAOV,KAChDsxB,EAEAG,EAAIjtB,OAAOoR,EAAIlV,OAAOV,IAE9BwxB,EAAGvtB,OAAO3I,MAAO8hB,IAAKqU,EAAIxB,gBAAiB5rB,IAAKrE,EAAG6D,MAAO9C,EAAGoD,MAAOstB,EAAIttB,WAGhFqtB,EAAGvtB,OAAOwJ,KAAK,SAAS9F,EAAEuO,GAAI,MAAOvO,GAAEyV,IAAMlH,EAAEkH,OAEnDrlB,EAAQoE,KAAKq1B,GAAIzzB,QAAO,KAG5BiwB,EAAoBz1B,SAASiB,GAAE,0BAA4B,SAASoc,GAChE7d,EAAQgG,QAAO,KAGnBiwB,EAAoBz1B,SAASiB,GAAE,2BAA6B,WACxDzB,MAOJ4C,EAAMpC,SAAWA,EACjBoC,EAAMqzB,oBAAsBA,EAC5BrzB,EAAMghB,OAASA,EACfhhB,EAAM5C,QAAUA,EAChB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAS6H,IAAK,WAAc,MAAO7H,IAAU8H,IAAK,SAAUvG,GAAKvB,EAAQuB,IACzEtB,QAAU4H,IAAK,WAAc,MAAO5H,IAAW6H,IAAK,SAAUvG,GAAKtB,EAASsB,IAC5Ekb,YAAc5U,IAAK,WAAc,MAAO4U,IAAe3U,IAAK,SAAUvG,GAAKkb,EAAalb,IACxFwa,cAAgBlU,IAAK,WAAc,MAAOkU,IAAiBjU,IAAK,SAAUvG,GAAKwa,EAAexa,IAC9F8vB,eAAiBxpB,IAAK,WAAc,MAAOwpB,IAAkBvpB,IAAK,SAAUvG,GAAK8vB,EAAgB9vB,IACjG0uB,cAAgBpoB,IAAK,WAAc,MAAOooB,IAAiBnoB,IAAK,SAAUvG,GAAK0uB,EAAe1uB,IAC9FoO,QAAU9H,IAAK,WAAc,MAAO8H,IAAW7H,IAAK,SAAUvG,GAAKoO,EAASpO,IAC5E8wB,UAAYxqB,IAAK,WAAc,MAAOwqB,IAAavqB,IAAK,SAAUvG,GAAK8wB,EAAW9wB,IAGlFpD,QACI0J,IAAK,WAAc,MAAO1J,IAC1B2J,IAAK,SAAUvG,GACG9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAoBjT,SAAZ8E,EAAEmO,MAAsBnO,EAAEmO,MAAQvR,EAAOuR,MACxDvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAkB3B,SAAX8E,EAAEnD,KAAqBmD,EAAEnD,KAAOD,EAAOC,OAG7D8G,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAC9C2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,GACb6pB,EAAoB7pB,MAAMA,OAItCtM,EAAGG,MAAMkW,eAAevT,EAAOqzB,GAC/Bn2B,EAAGG,MAAMqP,YAAY1M,GAEdA,GC1Sf9C,EAAGI,OAAO05B,IAAM,WACZ,YA2CA,SAASh3B,GAAMsB,GA+TX,MA9TAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GAgTpB,QAASy1B,GAASjqB,EAAGyV,GACjBzV,EAAEkqB,SAAW7pB,MAAML,EAAEkqB,UAAY,EAAIlqB,EAAEkqB,SACvClqB,EAAEmqB,WAAa9pB,MAAML,EAAEmqB,YAAc,EAAInqB,EAAEmqB,WACtCC,IAAOpqB,EAAEqqB,YAAc,EAC5B,IAAIn3B,GAAIxC,GAAGynB,YAAYlnB,KAAKq5B,SAAUtqB,EAEtC,OADA/O,MAAKq5B,SAAWp3B,EAAE,GACX,SAAUgT,GACb,MAAOqkB,GAAK9U,GAAKviB,EAAEgT,KAtT3B,GAAIrQ,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC5ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,OAC/CyS,EAAS5f,KAAKF,IAAI7D,EAAgBC,GAAmB,EACrD00B,KACAC,IAIN,IADArzB,EAAY1G,GAAG2G,OAAOpG,MACI,IAAtBy5B,EAAWp4B,OAGX,IAAK,GAFDq4B,GAAQnR,EAASA,EAAS,EAC1BoR,EAAQC,EAAarR,EAChBtmB,EAAI,EAAGA,EAAIsB,EAAK,GAAGlC,OAAQY,IAChCs3B,EAAgB72B,KAAKg3B,GACrBF,EAAgB92B,KAAKi3B,OAGtBE,IACCN,EAAkBE,EAAW/lB,IAAI,SAAUtM,GAAK,OAAQA,EAAEsyB,MAAQtyB,EAAEsyB,MAAQ,GAAKnR,IACjFiR,EAAkBC,EAAW/lB,IAAI,SAAUtM,GAAK,OAAQA,EAAEuyB,MAAQvyB,EAAEuyB,MAAQ,GAAKpR,IACjFqR,EAAan6B,GAAGgJ,IAAIgxB,EAAW/lB,IAAI,SAAUtM,GAAK,MAAQA,GAAEuyB,MAAQvyB,EAAEuyB,MAAQ,OAE9EJ,EAAkBE,EAAW/lB,IAAI,SAAUtM,GAAK,MAAOA,GAAEsyB,MAAQnR,IACjEiR,EAAkBC,EAAW/lB,IAAI,SAAUtM,GAAK,MAAOA,GAAEuyB,MAAQpR,IACjEqR,EAAan6B,GAAGgJ,IAAIgxB,EAAW/lB,IAAI,SAAUtM,GAAK,MAAOA,GAAEuyB,SAGnE16B,GAAGG,MAAMsW,QAAQvP,EAGjB,IAAII,GAAOJ,EAAUK,UAAS,mBAAoBjD,KAAKA,GACnDkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAO,gCAAoC4C,GACpFiT,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,KACf0zB,EAAQrd,EAAO9V,OAAM,KAAMC,KAAI,QAAU,SAC7C6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KACvE0S,EAAEhR,OAAM,WAAYQ,KAAI,YAAc,aAAehC,EAAiB,EAAI,IAAMC,EAAkB,EAAI,KACtGuS,EAAEhR,OAAM,iBAAkBQ,KAAI,YAAc,aAAehC,EAAiB,EAAI,IAAMC,EAAkB,EAAI,KAG5GsB,EAAUvF,GAAE,QAAU,SAASwG,EAAEnF,GAC7BtC,EAAS8gB,YACLld,KAAM6D,EACNiB,MAAOpG,EACPiK,IAAKzM,GAAGuE,MACRwF,GAAIA,MAIZ8vB,KACAS,IACA,KAAK,GAAI93B,GAAI,EAAGA,EAAIsB,EAAK,GAAGlC,OAAQY,IAAK,CAErC,GAAI+3B,GAAMv6B,GAAG0V,IAAI6kB,MAAMC,YAAYV,EAAgBt3B,IAC/Ci4B,EAAUz6B,GAAG0V,IAAI6kB,MAAMC,YAAYV,EAAgBt3B,GAAK,EAExDi3B,MAAe,IACfc,EAAId,WAAWA,GACfgB,EAAQhB,WAAWA,IAEnBD,KAAa,IACbe,EAAIf,SAASA,GACbiB,EAAQjB,SAASA,IAEjBE,IACAa,EAAIZ,YAAYI,EAAgBv3B,IAChCi4B,EAAQd,YAAYI,EAAgBv3B,KAGpC+3B,EAAIG,cAAgBA,IACpBH,EAAIG,aAAaA,GACjBD,EAAQC,aAAaA,IAGzBb,EAAK52B,KAAKs3B,GACVD,EAASr3B,KAAKw3B,GAIlB,GAAInB,GAAMt5B,GAAGmoB,OAAOmR,MACflkB,KAAK,MACL5J,MAAM,SAAS7D,GAAK,MAAOA,GAAE0V,SAAW,EAAIgE,EAAK1Z,IAGlD2xB,GAAIqB,UAAYA,GAChBrB,EAAIqB,SAASA,GAIbjB,GAASxZ,IACTma,EAAMnzB,OAAM,QAASC,KAAI,QAAU,gBAEnCL,EAAKH,OAAM,iBACNsD,MAAK,cAAgB,UACrByH,KAAK,SAAU/J,GACZ,MAAOuY,KAEVjW,MAAK,YAAef,KAAKF,IAAI7D,EAAgBC,GAAoB+0B,EAAa,GAAKja,EAAMte,OAAS,GAAK,MACvGuF,KAAI,KAAO,UACXA,KAAI,YAAc,SAASQ,EAAGnF,GAC3B,MAAO,gBAAiBo4B,EAAc,MAIlD,IAAIC,GAAS/zB,EAAKH,OAAM,WAAYI,UAAS,aAAcjD,KAAKw1B,GAC5DwB,EAAYh0B,EAAKH,OAAM,iBAAkBI,UAAS,aAAcjD,KAAKw1B,EAEzEuB,GAAOjzB,OAAOC,SACdizB,EAAUlzB,OAAOC,QAEjB,IAAIkzB,GAAKF,EAAO5zB,QAAQC,OAAM,IAC9B6zB,GAAG5zB,KAAI,QAAU,YACjB4zB,EAAG55B,GAAE,YAAc,SAASwG,EAAGnF,GAC3BxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAC7BkwB,GACAp6B,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASgH,aAC1B1F,SAAS,IACTd,KAAI,IAAMmzB,EAAS93B,IAE5BtC,EAASsb,kBACL1X,KAAM6D,EAAE7D,KACR8E,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5BiC,SAAUvE,EAAE6xB,SAAW7xB,EAAE8xB,aAAe,EAAIvwB,KAAK+P,QAGzD8hB,EAAG55B,GAAE,WAAa,SAASwG,EAAGnF,GAC1BxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAC7BkwB,GACAp6B,GAAG2G,OAAOpG,MAAMoG,OAAM,QAASgH,aAC1B1F,SAAS,IACTd,KAAI,IAAM0yB,EAAKr3B,IAExBtC,EAASsF,iBAAiB1B,KAAM6D,EAAE7D,KAAM8E,MAAOpG,MAEnDu4B,EAAG55B,GAAE,YAAc,SAASwG,EAAGnF,GAC3BtC,EAASmG,kBAAkBvC,KAAM6D,EAAE7D,KAAM8E,MAAOpG,MAEpDu4B,EAAG55B,GAAE,QAAU,SAASwG,EAAGnF,GACvB,GAAIojB,GAAUrlB,IACdL,GAASqG,cACLzC,KAAM6D,EAAE7D,KACR8E,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5B1F,MAAOvE,GAAGuE,MACVqhB,QAASA,MAGjBmV,EAAG55B,GAAE,WAAa,SAASwG,EAAGnF,GAC1BtC,EAAS4lB,iBACLhiB,KAAM6D,EAAE7D,KACR8E,MAAOpG,EACPsJ,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,YAIpC4wB,EAAO1zB,KAAI,OAAS,SAASQ,EAAEnF,GAAK,MAAOsJ,GAAMnE,EAAE7D,KAAMtB,KACzDq4B,EAAO1zB,KAAI,SAAW,SAASQ,EAAEnF,GAAK,MAAOsJ,GAAMnE,EAAE7D,KAAMtB,IAE/Cu4B,GAAG7zB,OAAM,QAASrD,KAAK,SAAS8D,GACxCpH,KAAKq5B,SAAWjyB,GASpB,IANAkzB,EAAOl0B,OAAM,QACRgH,aACA1F,SAASA,GACTd,KAAI,IAAM,SAAUQ,EAAGnF,GAAK,MAAOq3B,GAAKr3B,GAAGmF,KAC3CqzB,UAAS,IAAMzB,GAEhB0B,EAAY,CAGZ,IAAK,GADDC,MACK14B,EAAI,EAAGA,EAAIsB,EAAK,GAAGlC,OAAQY,IAChC04B,EAAUj4B,KAAK42B,EAAKr3B,IAEhB24B,EACIzB,IACAwB,EAAU14B,GAAKxC,GAAG0V,IAAI6kB,MAAMC,YAAYX,EAAKr3B,GAAGg4B,eAC5Cf,KAAe,GAAOyB,EAAU14B,GAAGi3B,WAAWA,GAC9CD,KAAa,GAAO0B,EAAU14B,GAAGg3B,SAASA,IAE1CE,GACJwB,EAAU14B,GAAGm3B,YAAY,EAIrCmB,GAAU7zB,QAAQC,OAAM,KAAMgD,QAAO,YAAY,GAAMrG,KAAK,SAAS8D,EAAEnF,GACnE,GAAI44B,GAAQp7B,GAAG2G,OAAOpG,KAEtB66B,GAAMj0B,KAAI,YAAc,SAAUQ,EAAGnF,GACjC,GAAI64B,EAAoB,CACpB1zB,EAAE6yB,YAAcV,EAAgBt3B,GAAK,GACrCmF,EAAEgyB,YAAcG,EAAgBt3B,GAAK,EACrC,IAAI84B,IAAe3zB,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,GAAK,IAAMtwB,KAAK+P,GAMhE,QALKtR,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,EAAItwB,KAAK+P,GACvCqiB,GAAe,GAEfA,GAAe,GAEZ,aAAeJ,EAAU14B,GAAG+4B,SAAS5zB,GAAK,YAAc2zB,EAAc,IAI7E,MAFA3zB,GAAE6yB,YAAc1R,EAAS,GACzBnhB,EAAEgyB,YAAc7Q,EAAS,GAClB,aAAeoS,EAAU14B,GAAG+4B,SAAS5zB,GAAK,MAIzDyzB,EAAMl0B,OAAM,QACP+C,MAAK,SAAW,QAChBA,MAAK,OAAS,QACd9C,KAAI,KAAO,GACXA,KAAI,KAAO,GAEhBi0B,EAAMl0B,OAAM,QACP+C,MAAK,cAAgBoxB,GAAuB1zB,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,EAAItwB,KAAK+P,GAAK,QAAU,MAAS,UAC1GhP,MAAK,OAAS,SAGvB,IAAIuxB,MACAC,EAAY,GACZC,GAAW,IACXC,GAAgB,SAASC,GACzB,MAAO1yB,MAAKwB,MAAMkxB,EAAY,GAAGF,IAAYA,GAAW,IAAMxyB,KAAKwB,MAAMkxB,EAAY,GAAGH,GAAaA,GAErGI,GAAqB,SAASl0B,GAC9B,OAAQA,EAAE6xB,SAAW7xB,EAAE8xB,aAAe,EAAIvwB,KAAK+P,IAGnD6hB,GAAU9oB,gBAAgBC,EAAa,cAAc9K,KAAI,YAAc,SAAUQ,EAAGnF,GAChF,GAAI64B,EAAoB,CACpB1zB,EAAE6yB,YAAcV,EAAgBt3B,GAAK,GACrCmF,EAAEgyB,YAAcG,EAAgBt3B,GAAK,EACrC,IAAI84B,IAAe3zB,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,GAAK,IAAMtwB,KAAK+P,GAMhE,QALKtR,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,EAAItwB,KAAK+P,GACvCqiB,GAAe,GAEfA,GAAe,GAEZ,aAAeJ,EAAU14B,GAAG+4B,SAAS5zB,GAAK,YAAc2zB,EAAc,IAE7E3zB,EAAE6yB,YAAc1R,EAAS,GACzBnhB,EAAEgyB,YAAc7Q,EAAS,EAOzB,IAAIgT,GAASZ,EAAU14B,GAAG+4B,SAAS5zB,GAC/BuE,EAAU2vB,GAAmBl0B,EACjC,IAAIA,EAAE6D,OAASU,GAAW6vB,EAAgB,CACtC,GAAIC,GAAUL,GAAcG,EACxBN,GAAkBQ,KAClBF,EAAO,IAAML,GAEjBD,EAAkBG,GAAcG,KAAW,EAE/C,MAAO,aAAeA,EAAS,MAIvChB,EAAUn0B,OAAM,kBACXsD,MAAK,cAAgB,SAAStC,EAAEnF,GAE7B,MAAO64B,IAAuB1zB,EAAE8xB,WAAa9xB,EAAE6xB,UAAY,EAAItwB,KAAK+P,GAAK,QAAU,MAAS,WAE/FvH,KAAK,SAAS/J,EAAGnF,GACd,GAAI0J,GAAU2vB,GAAmBl0B,GAC7BkU,EAAQ,EACZ,KAAKlU,EAAE6D,OAAmBuwB,EAAV7vB,EAA0B,MAAO,EAEjD,IAAwB,kBAAd+vB,GACNpgB,EAAQogB,EAAUt0B,EAAGnF,GACjBwJ,IAAOyN,EAAK9R,EAAE7D,MACd0H,MAAS6V,EAAK1Z,EAAE7D,MAChBoI,QAAW6Z,EAAY7Z,SAG3B,QAAQ+vB,GACJ,IAAK,MACDpgB,EAAQpC,EAAK9R,EAAE7D,KACf,MACJ,KAAK,QACD+X,EAAQkK,EAAY1E,EAAK1Z,EAAE7D,MAC3B,MACJ,KAAK,UACD+X,EAAQ7b,GAAGmM,OAAM,KAAMD,GAInC,MAAO2P,QAwBvB5J,EAAYS,UAAS,iBACdpQ,EApWX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACT4S,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BhF,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZoF,EAAQtM,EAAGG,MAAMuQ,eACjB6V,EAAc/lB,GAAGmM,OAAM,QACvB8uB,GAAa,EACbE,GAAgB,EAChBc,EAAY,MACZF,EAAiB,IACjBrC,GAAQ,EACRxZ,GAAQ,EACRka,GAAc,EACdQ,EAAc,EACdS,GAAqB,EACrB5B,GAAa,EACbkB,GAAW,EACXnB,GAAW,EACXkB,EAAe,EACfP,EAAa,GACblyB,EAAW,IACX+xB,KACA95B,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,kBAAmB,mBAAoB,aAGrI25B,KACAS,KAMAroB,EAAczS,EAAGG,MAAMsS,YAAY/R,EAwYvC,OAhEAoC,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpBwrB,YAAcvrB,IAAK,WAAc,MAAOurB,IAAetrB,IAAK,SAAUvG,GAAK6xB,EAAa7xB,IACxFvB,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtE8yB,YAAaxsB,IAAK,WAAW,MAAOwsB,IAAcvsB,IAAK,SAASvG,GAAG8yB,EAAW9yB,IAC9E+X,OAAazR,IAAK,WAAW,MAAOyR,IAASxR,IAAK,SAASvG,GAAG+X,EAAM/X,IACpEyyB,aAAiBnsB,IAAK,WAAW,MAAOmsB,IAAelsB,IAAK,SAASvG,GAAGyyB,EAAYzyB,IACpF4zB,gBAAiBttB,IAAK,WAAW,MAAOstB,IAAkBrtB,IAAK,SAASvG,GAAG4zB,EAAe5zB,IAC1F4d,aAAiBtX,IAAK,WAAW,MAAOsX,IAAerX,IAAK,SAASvG,GAAG4d,EAAY5d,IACpFb,GAAamH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKtR,IAClE4B,IAAa0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC9DqxB,UAAa/qB,IAAK,WAAW,MAAO+qB,IAAY9qB,IAAK,SAASvG,GAAGqxB,EAASrxB,IAC1EsxB,YAAahrB,IAAK,WAAW,MAAOgrB,IAAc/qB,IAAK,SAASvG,GAAGsxB,EAAWtxB,IAC9EwyB,UAAalsB,IAAK,WAAW,MAAOksB,IAAYjsB,IAAK,SAASvG,GAAGwyB,EAASxyB,IAC1EuyB,cAAejsB,IAAK,WAAW,MAAOisB,IAAgBhsB,IAAK,SAASvG,GAAGuyB,EAAavyB,IACpFgyB,YAAe1rB,IAAK,WAAW,MAAO0rB,IAAczrB,IAAK,SAASvG,GAAGgyB,EAAWhyB,IAChFgzB,eAAgB1sB,IAAK,WAAW,MAAO0sB,IAAiBzsB,IAAK,SAASvG,GAAGgzB,EAAchzB,IACvFkzB,oBAAqB5sB,IAAK,WAAW,MAAO4sB,IAAsB3sB,IAAK,SAASvG,GAAGkzB,EAAmBlzB,IACtGuxB,OAAqBjrB,IAAK,WAAW,MAAOirB,IAAShrB,IAAK,SAASvG,GAAGuxB,EAAMvxB,IAC5EiyB,aAAqB3rB,IAAK,WAAW,MAAO2rB,IAAe1rB,IAAK,SAASvG,GAAGiyB,EAAYjyB,IAGxF+zB,kBAAmBztB,IAAK,WAAW,MAAO0sB,IAAiBzsB,IAAK,SAASvG,GACrEgzB,EAAchzB,EACd3I,EAAGqC,WAAU,mBAAqB,+BAGtCs6B,oBAAqB1tB,IAAK,WAAW,MAAO0sB,IAAiBzsB,IAAK,SAASvG,GACvEgzB,EAAchzB,EACd3I,EAAGqC,WAAU,qBAAuB,+BAGxCu6B,aAAc3tB,IAAK,WAAY,MAAOsX,IAAerX,IAAK,SAASvG,GAC/D4d,EAAY5d,EACZ3I,EAAGqC,WAAU,cAAa,6BAI9BkD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAA4B,mBAAZkD,GAAElD,IAAwBkD,EAAElD,IAASF,EAAOE,IACnEF,EAAOuR,MAA4B,mBAAZnO,GAAEmO,MAAwBnO,EAAEmO,MAASvR,EAAOuR,MACnEvR,EAAOsR,OAA4B,mBAAZlO,GAAEkO,OAAwBlO,EAAEkO,OAAStR,EAAOsR,OACnEtR,EAAOC,KAA4B,mBAAZmD,GAAEnD,KAAwBmD,EAAEnD,KAASD,EAAOC,OAEvEiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,KAEtB8G,GAAIN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAC7CkZ,EAAKrhB,GAAG4V,QAAQzN,KAEpB2D,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAMtM,EAAGG,MAAMsQ,SAAS9H,KAE5B8zB,WAAqBxtB,IAAK,WAAW,MAAOwtB,IAAavtB,IAAK,SAASvG,GACnE8zB,EAAW9zB,GAAK,UAIxB3I,EAAGG,MAAMqP,YAAY1M,GACdA,GClbX9C,EAAGI,OAAOy8B,SAAW,WACjB,YA4DA,SAAS/5B,GAAMsB,GAkHX,MAjHAqO,GAAYW,QACZX,EAAYrS,OAAO05B,GAEnB11B,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IACIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAYlE,IAVAzC,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa/M,KAAK0B,IACxDA,EAAMoE,UAAYnG,KAElBgR,EAAMmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QACjCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,IAAKlI,IAASA,EAAKlC,OAEf,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAItC,IAAIf,GAAOJ,EAAUK,UAAS,yBAA0BjD,MAAMA,IAC1DkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,4BAA4BD,OAAM,KAClFyQ,EAAI7Q,EAAKH,OAAM;AjCnG/B,AiCyGY,EjCzGV,CAAC,AiCqGSqW,EAAO9V,KjCrGT,CAAC,CAAC,AiCqGa,CjCrGZ,EAAE,CAAC,CiCqGeC,KAAI,CjCrGZ,CAAC,EAAE,CAAC,GAAG,AiCqGe,CjCrGd,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,EiCsG9B6V,EAAO9V,GjCtG8B,CAAC,GiCsGzB,GjCtG+B,EiCsGzBC,KAAI,QAAU,iBAG5Bkc,GAGD,GAAuB,QAAnB0K,EACAzK,EAAO1c,MAAOzB,GAAiB6G,IAAIstB,EAAIhyB,KAEvCR,EAAKH,OAAM,kBACNyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE+B,EAAKH,OAAM,kBACNQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,SACnD,IAAuB,UAAnB8oB,EAA4B,CACnC,GAAI7C,GAAc1rB,EAAGI,OAAO0jB,SAAS1c,OACZskB,GAArB/lB,EAAiB,IACjB+lB,EAAe/lB,EAAiB,GAEpCme,EAAOzc,OAAOzB,GAAiB4G,IAAIstB,EAAIhyB,KACvCgc,EAAO1c,MAAMskB,GACb/lB,GAAkBme,EAAO1c,QAEzBE,EAAKH,OAAM,kBACNyW,MAAMtZ,GACNlD,KAAK0iB,GACLnc,KAAI,YAAc,aAAe,EAAiB,YA5B3DwQ,GAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,QA+B9Cf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAGvEq0B,EAAI1yB,MAAMzB,GAAgB0B,OAAOzB,EACjC,IAAIk3B,GAAU3kB,EAAEhR,OAAM,eAAgByW,OAAOtZ,GAC7C9D,IAAG2N,WAAW2uB,GAAS17B,KAAK04B,GAM5BhW,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAE1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAIVzT,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAEjC+O,EAAM8L,SAAWjc,EAAEic,UAEvB/a,EAAMqR,aAId1B,EAAYS,UAAS,sBACdpQ,EAxKX,GAAIg3B,GAAM95B,EAAGI,OAAO05B,MAChBhW,EAAS9jB,EAAGI,OAAO0jB,SACnB5jB,EAAUF,EAAGI,OAAOF,UAEpBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACT01B,GAAqB,EACrBlZ,GAAa,EACb0K,EAAiB,MACjBjiB,EAAQtM,EAAGG,MAAMuQ,eACjBqB,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACTtO,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,cAAgB,cAAY,YAGxDR,GACKuI,SAAS,GACT+C,eAAc,GACdC,eAAe,SAAStD,EAAGnF,GACxB,MAAO82B,GAAIvT,cAAcpe,EAAGnF,IAOpC,IAAIyP,GAAczS,EAAGG,MAAMsS,YAAY/R,GAEnCwiB,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,cAKjDoF,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACSlO,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAAUmC,EAAQpJ,GAC3BoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MAsMhD,OAnEA82B,GAAIp5B,SAASiB,GAAE,2BAA6B,SAASoc,GACjDA,EAAW,QACPvR,IAAK1J,EAAMgF,IAAIiW,EAAIzZ,MACnB0H,MAAOlJ,EAAMyM,IAAIwO,EAAIzZ,MACrBgI,MAAOyR,EAAIzR,MACXI,QAASqR,EAAIrR,SAEZqwB,UACMhf,GAAIrR,cACJqR,GAAI3R,OAAOM,SAEtBxM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7B4zB,EAAIp5B,SAASiB,GAAE,0BAA4B,SAASoc,GAChD7d,EAAQgG,QAAO,KAGnB4zB,EAAIp5B,SAASiB,GAAE,2BAA6B,SAASoc,GACjD7d,MAQJ4C,EAAMghB,OAASA,EACfhhB,EAAMpC,SAAWA,EACjBoC,EAAMg3B,IAAMA,EACZh3B,EAAM5C,QAAUA,EAChB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAG1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAqB6H,IAAK,WAAW,MAAO7H,IAAwB8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAC3FtB,QAAqB4H,IAAK,WAAW,MAAO5H,IAAwB6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC5FoO,QAAqB9H,IAAK,WAAW,MAAO8H,IAAwB7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAC5Fo0B,oBAAqB9tB,IAAK,WAAW,MAAO8tB,IAAwB7tB,IAAK,SAASvG,GAAGo0B,EAAmBp0B,IACxGkb,YAAqB5U,IAAK,WAAW,MAAO4U,IAAwB3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAChG4lB,gBAAqBtf,IAAK,WAAW,MAAOsf,IAAwBrf,IAAK,SAASvG,GAAG4lB,EAAe5lB,IACpGwa,cAAqBlU,IAAK,WAAW,MAAOkU,IAAwBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IAGlG2D,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAQ3D,EACRmb,EAAOxX,MAAMA,GACbwtB,EAAIxtB,MAAMA,KAEd7D,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBqxB,EAAIrxB,SAASA,KAEjBlD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,SAGnExF,EAAGG,MAAMkW,eAAevT,EAAOg3B,GAC/B95B,EAAGG,MAAMqP,YAAY1M,GACdA,GCzPX9C,EAAGI,OAAO48B,OAAS,WACf,YAqEA,SAASC,KACL5U,EAAMpe,QAAQ,SAASG,GAEnBA,EAAK8yB,eAEL9yB,EAAK+yB,iBAET/rB,EAAMnH,QAAQ,SAASmf,GACnB,GAAI1V,GAAS0V,EAAK1V,OACdxO,EAASkkB,EAAKlkB,MACI,iBAAXwO,KAAqBA,EAAS0V,EAAK1V,OAAS2U,EAAMe,EAAK1V,SAC5C,gBAAXxO,KAAqBA,EAASkkB,EAAKlkB,OAASmjB,EAAMe,EAAKlkB,SAClEwO,EAAOwpB,YAAYz5B,KAAK2lB,GACxBlkB,EAAOi4B,YAAY15B,KAAK2lB,KAKhC,QAASgU,KACL/U,EAAMpe,QAAQ,SAASG,GACnBA,EAAK4B,MAAQtC,KAAKL,IACd7I,GAAG68B,IAAIjzB,EAAK8yB,YAAalxB,GACzBxL,GAAG68B,IAAIjzB,EAAK+yB,YAAanxB,MASrC,QAASsxB,KASL,IAPA,GACIC,GADAC,EAAiBnV,EAEjBvgB,EAAI,EAKD01B,EAAep7B,QAAU0F,EAAIugB,EAAMjmB,QACtCm7B,KACAC,EAAevzB,QAAQ,SAASG,GAC5BA,EAAKtC,EAAIA,EACTsC,EAAKuY,GAAK8a,EACVrzB,EAAK8yB,YAAYjzB,QAAQ,SAASmf,GAC1BmU,EAAUpqB,QAAQiW,EAAKlkB,QAAU,GACjCq4B,EAAU95B,KAAK2lB,EAAKlkB,YAIhCs4B,EAAiBD,IACfz1B,CAKF41B,IACAC,EAAe71B,GAGnB81B,GAAmBluB,EAAK,GAAK+tB,IAAc31B,EAAI,IAWnD,QAAS61B,GAAe71B,GACpBugB,EAAMpe,QAAQ,SAASG,GACdA,EAAK8yB,YAAY96B,SAClBgI,EAAKtC,EAAIA,EAAI,KAKzB,QAAS81B,GAAkBC,GACvBxV,EAAMpe,QAAQ,SAASG,GACnBA,EAAKtC,GAAK+1B,IAKlB,QAASC,GAAkBC,GAqBvB,QAASC,KAEL,GAAIC,GAAKz9B,GAAGgJ,IAAI00B,EAAgB,SAAS7V,GACrC,OAAQ3Y,EAAK,IAAM2Y,EAAMjmB,OAAS,GAAK+7B,GAAe39B,GAAG68B,IAAIhV,EAAOrc,IAGxEkyB,GAAej0B,QAAQ,SAASoe,GAC5BA,EAAMpe,QAAQ,SAASG,EAAMpH,GACzBoH,EAAKmF,EAAIvM,EACToH,EAAKqN,GAAKrN,EAAK4B,MAAQiyB,MAI/B7sB,EAAMnH,QAAQ,SAASmf,GACnBA,EAAK3R,GAAK2R,EAAKpd,MAAQiyB,IAI/B,QAASG,GAAiBlV,GAWtB,QAASmV,GAAejV,GACpB,OAAQA,EAAK1V,OAAOnE,EAAI6Z,EAAKkV,GAAKlV,EAAK3R,GAAK,GAAK2R,EAAKpd,MAX1DkyB,EAAej0B,QAAQ,SAASoe,EAAOkW,GACnClW,EAAMpe,QAAQ,SAASG,GACnB,GAAIA,EAAK+yB,YAAY/6B,OAAQ,CAEzB,GAAImN,GAAI/O,GAAG68B,IAAIjzB,EAAK+yB,YAAakB,GAAkB79B,GAAG68B,IAAIjzB,EAAK+yB,YAAanxB,EAC5E5B,GAAKmF,IAAMA,EAAI+sB,EAAOlyB,IAAS8e,OAU/C,QAASsV,GAAiBtV,GAWtB,QAASuV,GAAerV,GACpB,OAAQA,EAAKlkB,OAAOqK,EAAI6Z,EAAKsV,GAAKtV,EAAK3R,GAAK,GAAK2R,EAAKpd,MAX1DkyB,EAAe/8B,QAAQmW,UAAUrN,QAAQ,SAASoe,GAC9CA,EAAMpe,QAAQ,SAASG,GACnB,GAAIA,EAAK8yB,YAAY96B,OAAQ,CAEzB,GAAImN,GAAI/O,GAAG68B,IAAIjzB,EAAK8yB,YAAauB,GAAkBj+B,GAAG68B,IAAIjzB,EAAK8yB,YAAalxB,EAC5E5B,GAAKmF,IAAMA,EAAI+sB,EAAOlyB,IAAS8e,OAU/C,QAASyV,KACLT,EAAej0B,QAAQ,SAASoe,GAC5B,GAAIje,GACAqN,EAGAzU,EAFA+iB,EAAK,EACLxT,EAAI8V,EAAMjmB,MAKd,KADAimB,EAAMzS,KAAKgpB,GACN57B,EAAI,EAAOuP,EAAJvP,IAASA,EACjBoH,EAAOie,EAAMrlB,GACbyU,EAAKsO,EAAK3b,EAAKmF,EACXkI,EAAK,IAAGrN,EAAKmF,GAAKkI,GACtBsO,EAAK3b,EAAKmF,EAAInF,EAAKqN,GAAK0mB,CAK5B,IADA1mB,EAAKsO,EAAKoY,EAAczuB,EAAK,GACzB+H,EAAK,EAIL,IAHAsO,EAAK3b,EAAKmF,GAAKkI,EAGVzU,EAAIuP,EAAI,EAAGvP,GAAK,IAAKA,EACtBoH,EAAOie,EAAMrlB,GACbyU,EAAKrN,EAAKmF,EAAInF,EAAKqN,GAAK0mB,EAAcpY,EAClCtO,EAAK,IAAGrN,EAAKmF,GAAKkI,GACtBsO,EAAK3b,EAAKmF,IAM1B,QAASqvB,GAAe9uB,EAAGuO,GACvB,MAAOvO,GAAEP,EAAI8O,EAAE9O,EAvGnB,GAAI2uB,GAAiB19B,GAAGq+B,OACnBryB,IAAI,SAASrE,GAAK,MAAOA,GAAEL,IAC3Bg3B,SAASt+B,GAAGu+B,WACZC,QAAQ3W,GACR5T,IAAI,SAAStM,GAAK,MAAOA,GAAEU,QAGhCm1B,KACAW,IACAM,GACA,KAAK,GAAI/V,GAAQ,EAAG6U,EAAa,IAAKA,EAClCS,EAAiBtV,GAAS,KAC1ByV,IACAM,IACAb,EAAiBlV,GACjByV,IACAM,IA6FR,QAASA,KAiBL,QAASC,GAAqBpvB,EAAGuO,GAC7B,MAAOvO,GAAE4D,OAAOnE,EAAI8O,EAAE3K,OAAOnE,EAGjC,QAAS4vB,GAAqBrvB,EAAGuO,GAC7B,MAAOvO,GAAE5K,OAAOqK,EAAI8O,EAAEnZ,OAAOqK,EArBjC8Y,EAAMpe,QAAQ,SAASG,GACnBA,EAAK8yB,YAAYtnB,KAAKupB,GACtB/0B,EAAK+yB,YAAYvnB,KAAKspB,KAE1B7W,EAAMpe,QAAQ,SAASG,GACnB,GAAIk0B,GAAK,EAAGI,EAAK,CACjBt0B,GAAK8yB,YAAYjzB,QAAQ,SAASmf,GAC9BA,EAAKkV,GAAKA,EACVA,GAAMlV,EAAK3R,KAEfrN,EAAK+yB,YAAYlzB,QAAQ,SAASmf,GAC9BA,EAAKsV,GAAKA,EACVA,GAAMtV,EAAK3R,OAcvB,QAASzL,GAAMlE,GACX,MAAOA,GAAEkE,MA7Rb,GAAIgxB,MACAS,EAAY,GACZU,EAAc,EACdzuB,GAAQ,EAAG,GACX2Y,KACAjX,KACAssB,GAAa,EAEb/U,EAAS,SAASoV,GAClBd,IACAG,IACAE,IACAQ,EAAkBC,IAGlBqB,EAAW,WACXH,KAIA7V,EAAO,WAGP,QAASA,GAAKjhB,GAEV,GAAIsY,GAAKtY,EAAEuL,OAAO5L,EAAIK,EAAEuL,OAAOiP,GAC3BlD,EAAKtX,EAAEjD,OAAO4C,EACdu3B,EAAK7+B,GAAG8+B,kBAAkB7e,EAAIhB,GAC9B4P,EAAKgQ,EAAGE,GACRC,EAAKH,EAAG,EAAIE,GACZxZ,EAAK5d,EAAEuL,OAAOnE,EAAIpH,EAAEm2B,GAAKn2B,EAAEsP,GAAK,EAChCmW,EAAKzlB,EAAEjD,OAAOqK,EAAIpH,EAAEu2B,GAAKv2B,EAAEsP,GAAK,EAChCgoB,EAAW,IAAMhf,EAAK,IAAMsF,EAC1B,IAAMsJ,EAAK,IAAMtJ,EACjB,IAAMyZ,EAAK,IAAM5R,EACjB,IAAMnO,EAAK,IAAMmO,CACvB,OAAO6R,GAfX,GAAIF,GAAY,EAwBhB,OANAnW,GAAKmW,UAAY,SAAS52B,GACtB,MAAKtH,WAAUe,QACfm9B,GAAa52B,EACNygB,GAFuBmW,GAK3BnW,GAIPkT,EAAS,SAASlyB,GAClB,MAAOA,GAAKmF,EAAInF,EAAKqN,GAAK,EAwQ9B,OA1BAulB,GAAOpuB,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKm8B,GAC3CA,EAAOluB,SAAWC,OAAOC,WACrByuB,WAAexuB,IAAK,WAAW,MAAOwuB,IAAevuB,IAAK,SAASvG,GAAG80B,GAAW90B,IACjFw1B,aAAelvB,IAAK,WAAW,MAAOkvB,IAAejvB,IAAK,SAASvG,GAAGw1B,EAAYx1B,IAClF0f,OAAepZ,IAAK,WAAW,MAAOoZ,IAAenZ,IAAK,SAASvG,GAAG0f,EAAM1f,IAC5EyI,OAAenC,IAAK,WAAW,MAAOmC,IAAelC,IAAK,SAASvG,GAAGyI,EAAMzI,IAC5E+G,MAAeT,IAAK,WAAW,MAAOS,IAAeR,IAAK,SAASvG,GAAG+G,EAAK/G,IAC3E+0B,YAAezuB,IAAK,WAAW,MAAOyuB,IAAexuB,IAAK,SAASvG,GAAG+0B,EAAW/0B,IAEjFggB,QAAe1Z,IAAK,WAAW0Z,EAAO,KAAezZ,IAAK,SAASvG,GAAGggB,EAAOhgB,KAC7Ey2B,UAAenwB,IAAK,WAAWmwB,KAAsBlwB,IAAK,SAASvG,MACnE2zB,QAAertB,IAAK,WAAW,MAAOqtB,MAAeptB,IAAK,SAASvG,GAC/C,kBAANA,KACN2zB,EAAO3zB,KAGfygB,MAAena,IAAK,WAAW,MAAOma,MAAela,IAAK,SAASvG,GAI/D,MAHgB,kBAANA,KACNygB,EAAKzgB,GAEFygB,QAIfppB,EAAGG,MAAMqP,YAAYwtB,GAEdA,GCrUXh9B,EAAGI,OAAOs/B,YAAc,WACpB,YAmDA,SAAS58B,GAAMsB,GAgJX,MA/IAA,GAAUC,KAAK,SAASC,GAqIpB,QAASq7B,GAASx3B,GACd3H,GAAG2G,OAAOpG,MAAM4G,KAAI,YACpB,aAAeQ,EAAEL,EAAI,KACjBK,EAAEoH,EAAI7F,KAAKL,IAAI,EAAGK,KAAKF,IAAInC,EAASc,EAAEsP,GAAIjX,GAAGuE,MAAMwK,KACnD,KACJytB,EAAOoC,WACPhW,EAAKzhB,KAAI,IAAM6rB,GAzInB,GAAIoM,IACAvX,QAEQje,KAAS,EAAG9H,KAAQ,WACpB8H,KAAS,EAAG9H,KAAQ,WACpB8H,KAAS,EAAG9H,KAAQ,WACpB8H,KAAS,EAAG9H,KAAQ,WACpB8H,KAAS,EAAG9H,KAAQ,WACpB8H,KAAS,EAAG9H,KAAQ,WAE5B8O,QAEQsC,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,OACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,OACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,OACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,OACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,MACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,MACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,MACpC0H,OAAW,EAAGxO,OAAU,EAAG8G,MAAS,OAK5C6zB,GAAc,EACdC,GAAgB,CAmBpB,KAf8B,gBAAlBx7B,GAAW,OAAmBA,EAAW,MAAGlC,SAAW,IACrC,gBAAlBkC,GAAW,OAAmBA,EAAW,MAAGlC,SAAW,IAE/Dy9B,GAAc,GAKdv7B,EAAW,OAAMA,EAAW,MAAGlC,OAAS,GACxCkC,EAAW,OAAMA,EAAW,MAAGlC,OAAS,IAExC09B,GAAgB,IAIhBD,EAIA,MAHA19B,SAAQ49B,MAAK,2BAA6B,0BAA2Bz7B,GACrEnC,QAAQI,KAAI,yBAA2Bq9B,EAAUtrB,KAAKC,UAAUqrB,IAChEI,EAAU57B,EAAW,yCACd,CAIX,KAAI07B,EAEA,MADAE,GAAU57B,EAAW,sBACd,CAMX,IAAI8R,GAAM9R,EAAUsD,OAAM,OACrBC,KAAI,QAAUP,GACdO,KAAI,SAAWN,GACfK,OAAM,KACNC,KAAI,QAAU,8BAGnBq1B,GACKS,UAAUA,GACVU,YAAYA,GACZzuB,MAAMtI,EAAOC,GAElB,IAAImsB,GAAOwJ,EAAO5T,MAElB4T,GACK3U,MAAM/jB,EAAK+jB,OACXjX,MAAM9M,EAAK8M,OACXuX,OAAO,IACP2T,OAAOA,EAGZ,IAAIlT,GAAOlT,EAAIxO,OAAM,KAAMH,UAAS,SAC/BjD,KAAKA,EAAK8M,OACV3J,QAAQC,OAAM,QACdC,KAAI,QAAU,QACdA,KAAI,IAAM6rB,GACV/oB,MAAK,eAAiB,SAAStC,GAAK,MAAOuB,MAAKL,IAAI,EAAGlB,EAAEsP,MAC7D7B,KAAK,SAAS9F,EAAEuO,GAAK,MAAOA,GAAE5G,GAAK3H,EAAE2H,IAGtC2R,GAAK1hB,OAAM,SACNwK,KAAK+tB,EAGV,IAAI71B,GAAO8L,EAAIxO,OAAM,KAAMH,UAAS,SAC/BjD,KAAKA,EAAK+jB,OACV5gB,QAAQC,OAAM,KACdC,KAAI,QAAU,QACdA,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAeA,EAAEL,EAAI,IAAMK,EAAEoH,EAAI,MACxEnO,KACGZ,GAAG6iB,SACEC,OACA4c,OAAO,SAAS/3B,GAAK,MAAOA,KAC5BxG,GAAE,YAAc,WACbZ,KAAK0Q,WAAW0uB,YAAYp/B,QAE/BY,GAAE,OAASg+B,GAIxBv1B,GAAK1C,OAAM,QACNC,KAAI,SAAW,SAASQ,GAAK,MAAOA,GAAEsP,KACtC9P,KAAI,QAAUq1B,EAAOS,aACrBhzB,MAAK,OAAS21B,GACd31B,MAAK,SAAW41B,GAChB34B,OAAM,SACNwK,KAAKouB,GAGVl2B,EAAK1C,OAAM,QACNC,KAAI,IAAM,IACVA,KAAI,IAAM,SAASQ,GAAK,MAAOA,GAAEsP,GAAK,IACtC9P,KAAI,KAAO,SACXA,KAAI,cAAgB,OACpBA,KAAI,YAAc,MAClBuK,KAAK,SAAS/J,GAAK,MAAOA,GAAE7F,OAC5BmK,OAAO,SAAStE,GAAK,MAAOA,GAAEL,EAAIV,EAAQ,IAC1CO,KAAI,IAAM,EAAIq1B,EAAOS,aACrB91B,KAAI,cAAgB,WAatB7E,EAzLX,GAAIyC,IAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3Cw3B,EAASh9B,EAAGI,OAAO48B,SACnB51B,EAAQ,IACRC,EAAS,IACTo2B,EAAY,GACZU,EAAe,GACfoC,EAAQ,QACRjE,EAASz4B,OAOX28B,EAAehgC,GAAGmM,OAAM,QACxBA,EAAS,SAASxE,GAClB,MAAOq4B,GAAar4B,GAAK,IAAMo4B,GAE/Bj0B,EAAQ9L,GAAG8H,MAAMuI,aACjBovB,EAAY,SAAS93B,GACrB,MAAOA,GAAEuL,OAAOpR,KAAO,MAAQ6F,EAAEjD,OAAO5C,KAAO,KAAOqK,EAAOxE,EAAE6D,QAE/Do0B,EAAgB,SAASj4B,GACzB,MAAOA,GAAEmE,MAAQA,EAAMnE,EAAE7F,KAAK+P,QAAO,MAAQ,MAE7CguB,EAAkB,SAASl4B,GAC3B,MAAO3H,IAAG8xB,IAAInqB,EAAEmE,OAAOimB,OAAO,IAE9B+N,EAAY,SAASn4B,GACrB,MAAOA,GAAE7F,KAAO,KAAOqK,EAAOxE,EAAE6D,QAGhCg0B,EAAY,SAAS5Z,EAASqa,GAC9Bra,EAAQ1e,OAAM,QACTC,KAAI,IAAM,GACVA,KAAI,IAAM,GACVA,KAAI,QAAU,2BACdA,KAAI,cAAgB,UACpBuK,KAAKuuB,GAwLd,OA9BA39B,GAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpBuxB,OAAkBtxB,IAAK,WAAW,MAAOsxB,IAAerxB,IAAK,SAASvG,GAAG43B,EAAM53B,IAC/EvB,OAAkB6H,IAAK,WAAW,MAAO7H,IAAe8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAC/EtB,QAAkB4H,IAAK,WAAW,MAAO5H,IAAe6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAChFgE,QAAkBsC,IAAK,WAAW,MAAOtC,IAAeuC,IAAK,SAASvG,GAAGgE,EAAOhE,IAChFs3B,WAAkBhxB,IAAK,WAAW,MAAOgxB,IAAe/wB,IAAK,SAASvG,GAAGs3B,EAAUt3B,IACnF80B,WAAkBxuB,IAAK,WAAW,MAAOwuB,IAAevuB,IAAK,SAASvG,GAAG80B,EAAU90B,IACnFw1B,aAAkBlvB,IAAK,WAAW,MAAOkvB,IAAejvB,IAAK,SAASvG,GAAGw1B,EAAYx1B,IACrF2zB,QAAkBrtB,IAAK,WAAW,MAAOqtB,IAAeptB,IAAK,SAASvG,GAAG2zB,EAAO3zB,IAGhFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/Dk7B,WAAYzxB,IAAK,WAAW,UAAaC,IAAK,SAASvG,GACnDy3B,EAAoCv8B,SAAlB8E,EAAEg4B,UAA4Bh4B,EAAEg4B,UAAcP,EAChEC,EAAoCx8B,SAAlB8E,EAAEi4B,YAA4Bj4B,EAAEi4B,YAAcP,EAChEC,EAAoCz8B,SAAlB8E,EAAE+X,MAA4B/X,EAAE+X,MAAc4f,MAKxEtgC,EAAGG,MAAMqP,YAAY1M,GAEdA,GCxOX9C,EAAGI,OAAOgtB,QAAU,WAChB,YA4DA,SAASyT,GAAS14B,GACd,GAAIqE,GAAK+rB,CAGT,OAFA/rB,GAAMrE,EAAE,GAAGiE,OAAS,IAAMjE,EAAE,GAC5BowB,EAAMuI,EAAOt0B,GAAOs0B,EAAOt0B,OAI/B,QAASu0B,GAAS54B,GACd,GAAIqE,EACJA,GAAMrE,EAAE,GAAGiE,OAAS,IAAMjE,EAAE,SACrB24B,GAAOt0B,GAGlB,QAASw0B,GAAS74B,GACd,GAAInF,GAAGwJ,EAAK+rB,EACR0I,EAAQJ,EAAS14B,GACjB+4B,GAAQ,CACZ,KAAKl+B,EAAI,EAAGA,EAAI3B,UAAUe,OAAQY,GAAK,EACnCwJ,EAAMnL,UAAU2B,GAChBu1B,EAAMl3B,UAAU2B,EAAI,GAAGmF,EAAE,GAAIA,EAAE,IAC3B84B,EAAMz0B,KAAS+rB,GAAQ0I,EAAME,eAAe30B,KAC5Cy0B,EAAMz0B,GAAO+rB,EACb2I,GAAQ,EAGhB,OAAOA,GAGX,QAASp+B,GAAMsB,GAwcX,MAvcAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GA2GpB,QAAS88B,KAKL,GAFAC,GAAc,GAETnf,EAAa,OAAO,CAGzB,IAAI0D,KAAe,EAAM,CACrB,GAAI0b,GAAW9gC,GAAGmf,MAAMrb,EAAKmQ,IAAI,SAASmnB,EAAO2F,GACzC,MAAO3F,GAAM/yB,OACR4L,IAAI,SAASnF,EAAO8S,GAKjB,GAAIof,GAAKvnB,EAAK3K,EAAM8S,GAChBqf,EAAK5f,EAAKvS,EAAM8S,EAEpB,QAAQpiB,EAAGG,MAAM8H,UAAUH,EAAE05B,IAAsB,KAAhB93B,KAAKyB,SAChCnL,EAAGG,MAAM8H,UAAUsH,EAAEkyB,IAAsB,KAAhB/3B,KAAKyB,SACpCo2B,EACAnf,EAAY9S,KAEnB7C,OAAO,SAASi1B,EAAYtf,GACzB,MAAO4F,GAAY0Z,EAAW,GAAItf,OAKlD,IAAuB,GAAnBkf,EAASl/B,OAAa,OAAO,CAC7Bk/B,GAASl/B,OAAS,IAElBk/B,EAAS79B,MAAMqE,EAAErB,QAAQ,GAAK,GAAI8I,EAAE9I,QAAQ,GAAK,GAAI,KAAM,OAC3D66B,EAAS79B,MAAMqE,EAAErB,QAAQ,GAAK,GAAI8I,EAAE9I,QAAQ,GAAK,GAAI,KAAM,OAC3D66B,EAAS79B,MAAMqE,EAAErB,QAAQ,GAAK,GAAI8I,EAAE9I,QAAQ,GAAK,GAAI,KAAM,OAC3D66B,EAAS79B,MAAMqE,EAAErB,QAAQ,GAAK,GAAI8I,EAAE9I,QAAQ,GAAK,GAAI,KAAM,OAK/D,IAAIk7B,GAASnhC,GAAGohC,KAAKC,UACjB,IAAI,MACJ,IAAKx6B,EAAS,KACbD,EAAQ,GAAGC,EAAS,KACpBD,EAAQ,GAAE,OAGX06B,EAAUthC,GAAGohC,KAAKE,QAAQR,GAAU7sB,IAAI,SAAStM,EAAGnF,GACpD,OACIsB,KAAQq9B,EAAOI,KAAK55B,GACpBiE,OAAUk1B,EAASt+B,GAAG,GACtBsM,MAASgyB,EAASt+B,GAAG,KAK7BsE,IAAKH,OAAM,mBAAoBI,UAAS,QAASc,QACjD,IAAI25B,GAAa16B,GAAKH,OAAM,mBAAoBI,UAAS,QAASjD,KAAKw9B,GACnEG,EAAcD,EACbv6B,QAAQC,OAAM,YACdC,KAAI,IAAM,SAASQ,GAChB,MAAKA,IAAMA,EAAE7D,MAA0B,IAAlB6D,EAAE7D,KAAKlC,OAGjB,IAAM+F,EAAE7D,KAAKsT,KAAI,KAAQ,IAFzB,UAIdjQ,KAAI,KAAO,SAASQ,EAAEnF,GACnB,MAAO,WAAWA,IACrB2E,KAAI,YAAc,SAASQ,EAAEnF,GAAK,MAAO,gBAAgBuH,EAAE,IAAKvH,EAAC,KAWtE,IAPIk/B,GACAD,EAAYx3B,MAAK,OAASjK,GAAG8xB,IAAI,IAAK,IAAK,MACtC7nB,MAAK,eAAiB,IACtBA,MAAK,iBAAmB,GACxBA,MAAK,SAAWjK,GAAG8xB,IAAI,IAAI,IAAI,MAGpC6P,EAAa,CAGb76B,GAAKH,OAAM,mBAAoBI,UAAS,KAAMc,QAC9C,IAAI+5B,GAAa96B,GAAKH,OAAM,mBAAoBI,UAAS,YAAajD,KAAKg9B,EACzDc,GACb36B,QAAQC,OAAM,gBACdC,KAAI,KAAO,SAASQ,EAAGnF,GAAK,MAAO,WAAWuH,EAAE,IAAKvH,IACrD0E,OAAM,cACNC,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAClCR,KAAI,KAAO,SAASQ,GAAK,MAAOA,GAAE,KAClCR,KAAI,IAAM06B,GAGnB,GAAIC,GAAqB,SAASC,EAAIp6B,EAAGq6B,GACrC,GAAInB,EAAa,MAAO,EACxB,IAAIj1B,GAAS9H,EAAK6D,EAAEiE,OACpB,IAAevI,SAAXuI,EAAJ,CACA,GAAIkD,GAASlD,EAAOvD,OAAOV,EAAEmH,MAC7BA,GAAY,MAAKhD,EAAMF,EAAQjE,EAAEiE,QAGjCkD,EAAQ,EAAK2K,EAAK3K,GAClBA,EAAQ,EAAKuS,EAAKvS,EAGlB,IAAIiK,GAAMrS,EAAUkD,OAAOmD,wBACvBk1B,EAAavgC,OAAOwgC,aAAer4B,SAAS0D,gBAAgB00B,UAC5DE,EAAazgC,OAAO0gC,aAAev4B,SAAS0D,gBAAgB40B,WAE5D11B,GACAzH,KAAMsC,EAAEmS,EAAK3K,EAAOnH,EAAEmH,QAAUiK,EAAI/T,KAAOm9B,EAAap9B,EAAOC,KAAO,GACtEC,IAAK8J,EAAEsS,EAAKvS,EAAOnH,EAAEmH,QAAUiK,EAAI9T,IAAMg9B,EAAYl9B,EAAOE,IAAM,GAGtE+8B,IACIlzB,MAAOA,EACPlD,OAAQA,EACRa,IAAKA,EACL41B,aAAc/6B,EAAEmS,EAAK3K,EAAOnH,EAAEmH,QAAU/J,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOnH,EAAEmH,QAAU/J,EAAOE,KACtFgf,YAAatc,EAAEiE,OACfgW,WAAYja,EAAEmH,MACdvK,MAAOvE,GAAGuE,MACVqhB,QAASmc,KAIjBP,GACKrgC,GAAE,QAAU,SAASwG,GAClBm6B,EAAmBvhC,KAAMoH,EAAGzH,EAASqG,gBAExCpF,GAAE,WAAa,SAASwG,GACrBm6B,EAAmBvhC,KAAMoH,EAAGzH,EAAS4lB,mBAExC3kB,GAAE,YAAc,SAASwG,GACtBm6B,EAAmBvhC,KAAMoH,EAAGzH,EAASsb,oBAExCra,GAAE,WAAa,SAASwG,EAAGnF,GACxBs/B,EAAmBvhC,KAAMoH,EAAGzH,EAASsF,uBAK7CsB,IAAKH,OAAM,cAAeI,UAAS,aAC9BA,UAAS,aAGT5F,GAAE,QAAU,SAASwG,EAAEnF,GAEpB,GAAIq+B,IAAgB/8B,EAAK6D,EAAEiE,QAAS,MAAO,EAC3C,IAAIA,GAAS9H,EAAK6D,EAAEiE,QAChBkD,EAASlD,EAAOvD,OAAO7F,GACvBojB,EAAUrlB,IACdL,GAASqG,cACLuI,MAAOA,EACPlD,OAAQA,EACRa,KAAMnF,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAClEo9B,aAAc/6B,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAC1Egf,YAAatc,EAAEiE,OACfgW,WAAYpf,EACZ+B,MAAOvE,GAAGuE,MACVqhB,QAASA,MAGhBzkB,GAAE,WAAa,SAASwG,EAAEnF,GACvB,GAAIq+B,IAAgB/8B,EAAK6D,EAAEiE,QAAS,MAAO,EAC3C,IAAIA,GAAS9H,EAAK6D,EAAEiE,QAChBkD,EAASlD,EAAOvD,OAAO7F,EAE3BtC,GAAS4lB,iBACLhX,MAAOA,EACPlD,OAAQA,EACRa,KAAMnF,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAClEo9B,aAAc/6B,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAC1Egf,YAAatc,EAAEiE,OACfgW,WAAYpf,MAGnBrB,GAAE,YAAc,SAASwG,EAAEnF,GACxB,GAAIq+B,IAAgB/8B,EAAK6D,EAAEiE,QAAS,MAAO,EAC3C,IAAIA,GAAS9H,EAAK6D,EAAEiE,QAChBkD,EAASlD,EAAOvD,OAAO7F,EAE3BtC,GAASsb,kBACL1M,MAAOA,EACPlD,OAAQA,EACRa,KAAMnF,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAClEo9B,aAAc/6B,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAC1Egf,YAAatc,EAAEiE,OACfgW,WAAYpf,EACZsJ,MAAOA,EAAMnE,EAAGnF,OAGvBrB,GAAE,WAAa,SAASwG,EAAEnF,GACvB,GAAIq+B,IAAgB/8B,EAAK6D,EAAEiE,QAAS,MAAO,EAC3C,IAAIA,GAAS9H,EAAK6D,EAAEiE,QAChBkD,EAASlD,EAAOvD,OAAO7F,EAE3BtC,GAASsF,iBACLsJ,MAAOA,EACPlD,OAAQA,EACRa,KAAMnF,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAClEo9B,aAAc/6B,EAAEmS,EAAK3K,EAAOtM,IAAMuC,EAAOC,KAAM+J,EAAEsS,EAAKvS,EAAOtM,IAAMuC,EAAOE,KAC1Egf,YAAatc,EAAEiE,OACfgW,WAAYpf,EACZsJ,MAAOA,EAAMnE,EAAGnF,OAvTpCkE,EAAY1G,GAAG2G,OAAOpG,KACtB,IAAI4E,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAElEvF,GAAGG,MAAMsW,QAAQvP,GAGjB5C,EAAK2F,QAAQ,SAASmC,EAAQpJ,GAC1BoJ,EAAOvD,OAAOoB,QAAQ,SAASqF,GAC3BA,EAAMlD,OAASpJ,KAKvB,IAAI8/B,GAAWhgC,EAAMoY,SAAS5Y,OAAS9B,GAAG8H,MAAMrG,MAAMK,MAAO,GAAO,EAEhEwjB,GAAc9L,GAAWI,GAAW2oB,KACpCviC,GAAGmf,MACCrb,EAAKmQ,IAAI,SAAStM,GACd,MAAOA,GAAEU,OAAO4L,IAAI,SAAStM,EAAEnF,GAC3B,OAAS8E,EAAGmS,EAAK9R,EAAEnF,GAAIuM,EAAGsS,EAAK1Z,EAAEnF,GAAI0M,KAAMszB,EAAQ76B,EAAEnF,QAapE,IARD8E,EAAKnB,OAAOqT,GAAWxZ,GAAGkf,OAAOoG,GAAWrR,IAAI,SAAStM,GAAK,MAAOA,GAAEL,IAAMpG,OAAOke,KAEhFwB,GAAW9c,EAAK,GAChBwD,EAAErB,MAAMyT,KAAYvU,EAAiBs9B,EAAgBt9B,IAAmB,EAAGrB,EAAK,GAAGuE,OAAOzG,QAASuD,EAAiBA,GAAkB,EAAIs9B,IAAiB,EAAI3+B,EAAK,GAAGuE,OAAOzG,UAG9K0F,EAAErB,MAAMyT,IAAW,EAAGvU,IAErBm9B,EAAU,CACP,GAAIt5B,IAAMhJ,GAAGgJ,IAAIsc,GAAWrR,IAAI,SAAStM,GAAK,MAAY,KAARA,EAAEoH,EAAgBpH,EAAEoH,EAAxB,SAC9CA,GAAEgO,OAAM,GACH5W,OAAOyT,GAAW5Z,GAAGkf,OAAOoG,GAAWrR,IAAI,SAAStM,GACjD,MAAY,KAARA,EAAEoH,EAAgBpH,EAAEoH,EACN,GAAN/F,KACb9H,OAAO4f,KACT7a,MAAM0U,IAAWvV,EAAiB,QAEnC2J,GAAE5I,OAAOyT,GAAW5Z,GAAGkf,OAAOoG,GAAWrR,IAAI,SAAUtM,GAAK,MAAOA,GAAEoH,IAAK7N,OAAO4f,KAChF7a,MAAM0U,IAAWvV,EAAiB,GAG/Cs9B,GAAKv8B,OAAOo8B,GAAcviC,GAAGkf,OAAOoG,GAAWrR,IAAI,SAAStM,GAAK,MAAOA,GAAEuH,OAAQhO,OAAOyhC,KACpF18B,MAAM28B,GAAaC,GAGxBpe,EAAcnd,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,IAAM4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,GAE1EmB,EAAEnB,SAAS,KAAOmB,EAAEnB,SAAS,KAC7BmB,EAAEnB,SAAS,GACPmB,EAAEnB,QAAQmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,GAAWmB,EAAEnB,SAAS,GAAqB,IAAhBmB,EAAEnB,SAAS,KACzEmB,EAAEnB,QAAM,GAAK,KAEnB4I,EAAE5I,SAAS,KAAO4I,EAAE5I,SAAS,KAC7B4I,EAAE5I,SAAS,GACP4I,EAAE5I,QAAQ4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,GAAW4I,EAAE5I,SAAS,GAAqB,IAAhB4I,EAAE5I,SAAS,KACzE4I,EAAE5I,QAAM,GAAK,KAElBwJ,MAAMrI,EAAEnB,SAAS,KAClBmB,EAAEnB,QAAM,GAAK,IAGZwJ,MAAMZ,EAAE5I,SAAS,KAClB4I,EAAE5I,QAAM,GAAK,IAGjB8Z,EAAKA,GAAM3Y,EACXie,EAAKA,GAAMxW,EACX+zB,EAAKA,GAAMJ,CAEX,IAAIK,IAAYz7B,EAAE,KAAO2Y,EAAG,IAAMlR,EAAE,KAAOwW,EAAG,IAAMmd,EAAE,KAAOI,EAAG,EAEhEE,GAASA,GAAUp8B,EACnBq8B,EAAUA,GAAWp8B,CAErB,IAAIq8B,IAAWF,IAAWp8B,GAASq8B,IAAYp8B,EAG3CC,GAAOJ,EAAUK,UAAS,wBAAyBjD,MAAMA,IACzDkD,GAAYF,GAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,oCAAsC4C,GACzFkT,GAAYjW,GAAUE,OAAM,QAC5B8V,GAAShW,GAAUE,OAAM,KACzByQ,GAAI7Q,GAAKH,OAAM,IAEnBG,IAAKoD,QAAO,kBAAoBua,GAChCzH,GAAO9V,OAAM,KAAMC,KAAI,QAAU,aACjC6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,kBACjCH,GAAUE,OAAM,KAAMC,KAAI,QAAU,kBAEpCL,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEvEgY,GAAU/V,OAAM,YACXC,KAAI,KAAO,gBAAkB4C,GAC7B7C,OAAM,QACNC,KAAI,YAAc,wBAEvBL,GAAKH,OAAM,iBAAoBoD,EAAK,SAC/B5C,KAAI,QAAUhC,EAAiB,IAC/BgC,KAAI,SAAY/B,EAAkB,EAAKA,EAAkB,GAAK,GAEnEuS,GAAExQ,KAAI,YAAc8Z,EAAW,qBAAuBlX,EAAK,IAAM,IAqNjE82B,GAAc,CACd,IAAIpb,IAAS3e,GAAKH,OAAM,cAAeI,UAAS,aAC3CjD,KAAK,SAAS6D,GAAK,MAAOA,IAAK,SAASA,GAAK,MAAOA,GAAEqE,KAC3DyZ,IAAOxe,QAAQC,OAAM,KAChB+C,MAAK,iBAAmB,MACxBA,MAAK,eAAiB,MAC3Bwb,GAAO7d,OACFC,SACL4d,GACKte,KAAI,QAAU,SAASQ,EAAEnF,GACtB,OAAQmF,EAAEuC,SAAW,IAAM,uBAAyB1H,IAEvD0H,QAAO,qBAAuBwX,GAC9BxX,QAAO,QAAU,SAASvC,GAAK,MAAOA,GAAEsT,QAC7CwK,GAAOzT,gBAAgBC,EAAa,mBAC/BhI,MAAK,OAAS,SAAStC,EAAEnF,GAAK,MAAOsJ,GAAMnE,EAAGnF,KAC9CyH,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOmF,GAAEw7B,kBAAoBA,GAAoBr3B,EAAMnE,EAAGnF,KAC1FyH,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,GAG3B,IAAIm5B,IAAS3d,GAAO1e,UAAS,iBACxBjD,KAAK,SAAS6D,GACX,MAAOA,GAAEU,OAAO4L,IACZ,SAAUnF,EAAO8S,GACb,OAAQ9S,EAAO8S,KAChB3V,OACC,SAASi1B,EAAYtf,GACjB,MAAO4F,GAAY0Z,EAAW,GAAItf,MAyCtD,IAtCAwhB,GAAOn8B,QAAQC,OAAM,QAChBC,KAAI,QAAU,SAAUQ,GACrB,MAAO,qBAAuBA,EAAE,KAEnCsC,MAAK,OAAS,SAAUtC,GAAK,MAAOA,GAAEmE,QACtC7B,MAAK,SAAW,SAAUtC,GAAK,MAAOA,GAAEmE,QACxC3E,KAAI,YAAc,SAASQ,GACxB,MAAO,aAAenI,EAAGG,MAAM8H,UAAUwY,EAAGxG,EAAK9R,EAAE,GAAGA,EAAE,MAAQ,IAAMnI,EAAGG,MAAM8H,UAAU8d,EAAGlE,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAEnHR,KAAI,IACD3H,EAAGG,MAAM4V,SACRrQ,KAAK,SAASyC,GAAK,MAAO07B,GAAS17B,EAAE,MACrCuH,KAAK,SAASvH,GAAK,MAAO+6B,GAAEF,EAAQ76B,EAAE,GAAGA,EAAE,QAEpDy7B,GAAOx7B,OAAO/D,KAAK08B,GAAU14B,SAC7B4d,GAAO7d,OAAOb,UAAS,iBAClBiL,gBAAgBC,EAAa,gBAC7B9K,KAAI,YAAc,SAASQ,GACxB,MAAO,aAAenI,EAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAE,GAAGA,EAAE,MAAQ,IAAMnI,EAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAEjHE,SAELu7B,GAAOn3B,OAAO,SAAUtE,GAAK,MAAOo7B,KAAaG,IAAY1C,EAAS74B,EAAG,IAAK8R,EAAM,IAAK4H,KACpFrP,gBAAgBC,EAAa,kBAC7B9K,KAAI,YAAc,SAASQ,GAExB,MAAO,aAAenI,EAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAE,GAAGA,EAAE,MAAQ,IAAMnI,EAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAGtHy7B,GAAOn3B,OAAO,SAAUtE,GAAK,MAAOo7B,KAAaG,IAAY1C,EAAS74B,EAAG,QAAS07B,EAAU,OAAQb,KAC/FxwB,gBAAgBC,EAAa,kBAC7B9K,KAAI,IACD3H,EAAGG,MAAM4V,SACRrQ,KAAK,SAASyC,GAAK,MAAO07B,GAAS17B,EAAE,MACrCuH,KAAK,SAASvH,GAAK,MAAO+6B,GAAEF,EAAQ76B,EAAE,GAAGA,EAAE,QAIjDszB,EACH,CACI,GAAIqI,IAAU7d,GAAO1e,UAAS,aACzBjD,KAAK,SAAS6D,GACX,MAAOA,GAAEU,OAAO4L,IACZ,SAAUnF,EAAO8S,GACb,OAAQ9S,EAAO8S,KAChB3V,OACC,SAASi1B,EAAYtf,GACjB,MAAO4F,GAAY0Z,EAAW,GAAItf,MAItD0hB,IAAOr8B,QAAQC,OAAM,QAChB+C,MAAK,OAAS,SAAUtC,EAAEnF,GACvB,MAAOmF,GAAEmE,QACZ7B,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,GACtB9C,KAAI,YAAc,SAASQ,GACxB,GAAIwa,GAAK3iB,EAAGG,MAAM8H,UAAUwY,EAAGxG,EAAK9R,EAAE,GAAGA,EAAE,MAAQuB,KAAK2f,KAAK6Z,EAAEF,EAAQ76B,EAAE,GAAGA,EAAE,KAAKuB,KAAK+P,IAAM,CAC9F,OAAO,aAAekJ,EAAK,IAAM3iB,EAAGG,MAAM8H,UAAU8d,EAAGlE,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAE9E+J,KAAK,SAAS/J,EAAEnF,GACb,MAAOmF,GAAE,GAAGkU,QAEpBynB,GAAO17B,OAAOC,SACd4d,GAAO7d,OAAOb,UAAS,iBAClBiL,gBAAgBC,EAAa,gBAC7B9K,KAAI,YAAc,SAASQ,GACxB,GAAIwa,GAAK3iB,EAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAE,GAAGA,EAAE,MAAOuB,KAAK2f,KAAK6Z,EAAEF,EAAQ76B,EAAE,GAAGA,EAAE,KAAKuB,KAAK+P,IAAI,CAC1F,OAAO,aAAekJ,EAAK,IAAM3iB,EAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAE7EE,SACNy7B,GAAOz/B,KAAK,SAAS8D,GAClB3H,GAAG2G,OAAOpG,MACP2J,QAAO,YAAa,GACpBA,QAAO,YAAevC,EAAE,IAAI,GAC5BuC,QAAO,SAAS,KAErBo5B,GAAOtxB,gBAAgBC,EAAa,kBAC/B9K,KAAI,YAAc,SAASQ,GACxB,GAAIwa,GAAK3iB,EAAGG,MAAM8H,UAAUH,EAAEmS,EAAK9R,EAAE,GAAGA,EAAE,MAAOuB,KAAK2f,KAAK6Z,EAAEF,EAAQ76B,EAAE,GAAGA,EAAE,KAAKuB,KAAK+P,IAAI,CAC1F,OAAO,aAAekJ,EAAK,IAAM3iB,EAAGG,MAAM8H,UAAUsH,EAAEsS,EAAK1Z,EAAE,GAAGA,EAAE,MAAQ,MAKlF47B,GAEAC,aAAaC,GACbA,EAAY5gC,WAAW+9B,EAAwB2C,IAI/C3C,IAIJ3gB,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,OACPwpB,EAAKJ,EAAEppB,OAEP0pB,EAASp8B,EACTq8B,EAAUp8B,IAGdoL,EAAYS,UAAS,qBACdpQ,EA1hBX,GA4CI2d,GAAIsF,EAAIud,EACNE,EACAC,EACAQ,EA/CF1+B,GAAgBE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GACjD4B,EAAe,KACfC,EAAe,KACfiF,EAAetM,EAAGG,MAAMuQ,eACxBizB,EAAmB,KACnBp5B,EAAeb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UAC/BjE,EAAe,KACfY,EAAetH,GAAG8H,MAAMC,SACxBgH,EAAe/O,GAAG8H,MAAMC,SACxB26B,EAAe1iC,GAAG8H,MAAMC,SACxB0R,EAAe,SAAS9R,GAAK,MAAOA,GAAEL,GACtC+Z,EAAe,SAAS1Z,GAAK,MAAOA,GAAEoH,GACtCyzB,EAAe,SAAS76B,GAAK,MAAOA,GAAEuH,MAAQ,GAC9Cm0B,EAAe,SAAS17B,GAAK,MAAOA,GAAE+7B,OAAS,UAC/CtkB,KACA0B,KACA6hB,KACAjhB,GAAe,EACf8F,EAAe,SAAS7f,GAAK,OAAQA,EAAEg8B,WACvC/iB,GAAe,EACf6hB,EAAe,GACfxhB,GAAe,EACf0gB,GAAe,EACfD,GAAe,EACfG,EAAe,WAAa,MAAO,KACnCroB,EAAe,KACfI,EAAe,KACfF,EAAe,KACfiB,EAAe,KACf4nB,EAAe,KACfK,EAAe,KACfne,GAAe,EACfvkB,EAAeF,GAAGE,SAAQ,eAAiB,kBAAmB,mBAAoB,kBAAmB,aACrGklB,GAAe,EACfnd,EAAe,IACfs7B,EAAyB,IACzBtI,GAAgB,EAYhB4F,GAAc,EACd5uB,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,GAC7C46B,GAAkB,GAAI,KACtBvC,IAskBN,OAxFAh+B,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAG1CA,EAAMkS,OAAS,GAAI,YACfjU,KAAKuhB,gBAAkB,WAInB,MAHAtiB,GAAGO,IAAIqD,MAAM,WACTsD,EAAUK,UAAS,mBAAoBmD,QAAO,SAAU,KAErD,MAEX3J,KAAKohB,eAAiB,SAAUsC,EAAarC,EAAYC,GACrDriB,EAAGO,IAAIqD,MAAM,WACTsD,EAAUC,OAAM,cACbI,UAAS,cAAiBkd,GAC1Bld,UAAS,aAAgB6a,GACzB1X,QAAO,QAAU2X,OAMhC3hB,EAASiB,GAAE,yBAA2B,SAASwG,GACvC+Z,GAAapf,EAAMkS,OAAOmN,eAAeha,EAAEsc,YAAYtc,EAAEia,YAAW,KAG5E1hB,EAASiB,GAAE,wBAA0B,SAASwG,GACtC+Z,GAAapf,EAAMkS,OAAOmN,eAAeha,EAAEsc,YAAYtc,EAAEia,YAAW,KAG5Etf,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAe6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACtEtB,QAAe4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACxEvC,QAAe6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IAC9DuS,QAAejM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IAC9Dy7B,YAAen1B,IAAK,WAAW,MAAOi0B,IAAKh0B,IAAK,SAASvG,GAAGu6B,EAAEv6B,IAC9DqR,SAAe/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IAC1EyR,SAAenL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IAC1EolB,aAAe9e,IAAK,WAAW,MAAO8zB,IAAc7zB,IAAK,SAASvG,GAAGo6B,EAAWp6B,IAChFuR,QAAejL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IACxEwS,QAAelM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IACxE07B,YAAep1B,IAAK,WAAW,MAAOm0B,IAAal0B,IAAK,SAASvG,GAAGy6B,EAAUz6B,IAC9EiX,QAAe3Q,IAAK,WAAW,MAAO2Q,IAAU1Q,IAAK,SAASvG,GAAGiX,EAAOjX,IACxE2Y,QAAerS,IAAK,WAAW,MAAOqS,IAAUpS,IAAK,SAASvG,GAAG2Y,EAAO3Y,IACxE27B,YAAer1B,IAAK,WAAW,MAAOk0B,IAAaj0B,IAAK,SAASvG,GAAGw6B,EAAUx6B,IAC9EuZ,aAAejT,IAAK,WAAW,MAAOiT,IAAehT,IAAK,SAASvG,GAAGuZ,EAAYvZ,IAClFqf,aAAe/Y,IAAK,WAAW,MAAO+Y,IAAe9Y,IAAK,SAASvG,GAAGqf,EAAYrf,IAClFs6B,cAAeh0B,IAAK,WAAW,MAAOg0B,IAAgB/zB,IAAK,SAASvG,GAAGs6B,EAAat6B,IACpFyY,SAAenS,IAAK,WAAW,MAAOmS,IAAWlS,IAAK,SAASvG,GAAGyY,EAAQzY,IAC1E8Y,UAAexS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IAC5Ew5B,aAAelzB,IAAK,WAAW,MAAOkzB,IAAejzB,IAAK,SAASvG,GAAGw5B,EAAYx5B,IAClF05B,YAAepzB,IAAK,WAAW,MAAOozB,IAAcnzB,IAAK,SAASvG,GAAG05B,EAAW15B,IAChFu5B,aAAgBjzB,IAAK,WAAW,MAAOizB,IAAehzB,IAAK,SAASvG,GAAGu5B,EAAYv5B,IACnF4B,IAAe0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAChEo7B,wBAAyB90B,IAAI,WAAW,MAAO80B,IAA0B70B,IAAK,SAASvG,GAAGo7B,EAAuBp7B,IACjH8yB,YAAaxsB,IAAK,WAAW,MAAOwsB,IAAcvsB,IAAK,SAASvG,GAAI8yB,EAAa9yB,IACjFg7B,kBAAmB10B,IAAK,WAAW,MAAO00B,IAAoBz0B,IAAK,SAASvG,GAAGg7B,EAAiBh7B,IAGhGb,GAAQmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAOzZ,GAAG4V,QAAQzN,KAC1E4G,GAAQN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAOrhB,GAAG4V,QAAQzN,KAC1EmlB,WAAY7e,IAAK,WAAW,MAAO+zB,IAAW9zB,IAAK,SAASvG,GAAGq6B,EAAUxiC,GAAG4V,QAAQzN,KACpF47B,YAAat1B,IAAK,WAAW,MAAO40B,IAAY30B,IAAK,SAASvG,GAAGk7B,EAAWrjC,GAAG4V,QAAQzN,KAGvFpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,KAEtB6D,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9Bid,YAAa3W,IAAK,WAAW,MAAO2W,IAAc1W,IAAK,SAASvG,GAC5Did,EAAajd,EACTid,KAAe,IACfuc,GAAc,OAK1BniC,EAAGG,MAAMqP,YAAY1M,GACdA,GChoBX9C,EAAGI,OAAOokC,aAAe,WACrB,YA6EA,SAAS1hC,GAAMsB,GA+PX,MA9PAqO,GAAYW,QACZX,EAAYrS,OAAOgtB,GACflQ,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAC9BonB,GAAWhyB,EAAYrS,OAAOskC,GAC9BC,GAAWlyB,EAAYrS,OAAOwkC,GAElCxgC,EAAUC,KAAK,SAASC,GAGpB4C,EAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAkBlE,IAhBAzC,EAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAK0B,GAEfoE,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAEvDA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAG9E,MAFApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GACvBuL,EAAYS,UAAS,qBACdpQ,CAEPoE,GAAUK,UAAS,cAAec,SAItCP,EAAIslB,EAAQhnB,SACZmJ,EAAI6d,EAAQlS,QAGZ,IAAI5T,GAAOJ,EAAUK,UAAS,6BAA8BjD,MAAMA,IAC9DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,yCAA2CylB,EAAQ7iB,MACtGiT,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAkBnB,IAfAqW,EAAO9V,OAAM,QAASC,KAAI,QAAU,sBAAsB8C,MAAK,iBAAgB,QAE/E+S,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,0BACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,iBAE7B+V,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAItDke,EAEE,CACH,GAAI6H,GAAc/lB,CAClBme,GAAO1c,MAAMskB,GAEbpkB,EAAKH,OAAM,kBACNyW,MAAMtZ,GACNlD,KAAK0iB,GAELC,GAAaD,EAAOzc,WAAa9B,EAAOE,MACzCF,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAGlE+B,EAAKH,OAAM,kBACNQ,KAAI,YAAc,gBAAwBpC,EAAOE,IAAK,SAf3D0S,GAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,QAkB9Cf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAGvE2nB,EACKhmB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GAEvB,MADAmF,GAAEmE,MAAQnE,EAAEmE,OAASA,EAAMnE,EAAGnF,GACvBmF,EAAEmE,QACVG,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,YACzC4d,WAAWA,GAEhBn0B,EAAKH,OAAM,mBACNyW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,YAC1Czc,KAAKgsB,GAGV9lB,EAAKH,OAAM,2BACNQ,KAAI,YAAc,qBAAuBylB,EAAQ7iB,KAAO,IAE7D,IAAIs6B,GAAUv9B,EAAKH,OAAM,2BAA4BI,UAAS,gBACzDjD,KAAK,SAAU6D,GACZ,MAAOA,IAGf08B,GAAQp9B,QAAQC,OAAM,KAAMC,KAAI,QAAU,cAE1C,IAAIm9B,GAAUD,EAAQt9B,UAAS,eAC1BjD,KAAK,SAAU6D,GACZ,OAAQA,IAGhB28B,GAAQr9B,QACHC,OAAM,QAASC,KAAI,QAAU,cAC7B8C,MAAK,iBAAmB,GAG7Bq6B,EAAQr4B,OAAO,SAAStE,GACpB,MAAOA,GAAE48B,WAAa58B,EAAE68B,QAEvBxyB,gBAAgBC,EAAa,iCAC7B9K,KAAI,KAAOG,EAAErB,QAAQ,IACrBkB,KAAI,KAAOG,EAAErB,QAAQ,IACrBkB,KAAI,KAAO,SAAUQ,EAAGnF,GACrB,MAAOuM,GAAEzH,EAAEnB,SAAS,GAAKwB,EAAE68B,MAAQ78B,EAAE48B,aAExCp9B,KAAI,KAAO,SAAUQ,EAAGnF,GACrB,MAAOuM,GAAEzH,EAAEnB,SAAS,GAAKwB,EAAE68B,MAAQ78B,EAAE48B,aAExCt6B,MAAK,SAAW,SAAUtC,EAAGnF,EAAGwY,GAC7B,MAAOlP,GAAMnE,EAAGqT,KAEnB/Q,MAAK,iBAAmB,SAAUtC,EAAGnF,GAClC,MAAQmF,GAAE0V,UAA+B,mBAAZ1V,GAAE68B,OAAgD,mBAAhB78B,GAAE48B,UAA6B,EAAI,IAItG7nB,IACAC,EACK7U,MAAMR,GACNid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAWlY,EAAkB,GAElCuS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KAClDrF,KAAK+b,IAGVC,IACAC,EACK/U,MAAMiH,GACNwV,OAAQ/kB,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,IAChDwZ,UAAWnY,EAAgB,GAEhCwS,EAAEhR,OAAM,iBACH/F,KAAKic,IAIVonB,IACAC,EACK1d,QAAQoG,EAAQtlB,KAChBQ,MAAMR,GACNV,MAAMzB,GACN2G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,YAC9CL,EAAOrW,OAAM,gBAAiBO,OAAM,KAC/BC,KAAI,QAAU,oBACnBwQ,EAAEhR,OAAM,qBACHQ,KAAI,YAAc,eAAiB4H,EAAE9I,QAAQ,GAAK,KAClDmX,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,YAC1Czc,KAAKsjC,IAGVC,IACAC,EACK5d,QAAQoG,EAAQ7d,KAChBjH,MAAMiH,GACNnI,MAAMxB,GACN0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,YAC9CL,EAAOrW,OAAM,gBAAiBO,OAAM,KAC/BC,KAAI,QAAU,oBACnBwQ,EAAEhR,OAAM,qBACHQ,KAAI,YAAc,cAAgB+V,EAAkB/X,GAAkBi/B,EAAMl1B,QAAW,OACvFkO,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,YAC1Czc,KAAKwjC,IAOd9gB,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAIVzT,EAASiB,GAAE,cAAgB,SAASC,GACN,mBAAfA,GAAEic,WACTvZ,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAEjC+O,EAAM8L,SAAWjc,EAAEic,UAEvB/a,EAAMqR,WAIViZ,EAAQ1sB,SAASiB,GAAE,0BAA4B,SAASoc,GACpD7d,EAAQgG,QAAO,GACfgB,EAAUC,OAAM,aAAgBimB,EAAQ7iB,KAAO,eAAiBwT,EAAI0G,YAAc,cAAgB1G,EAAIqE,YACjGza,KAAI,KAAO,GAChBT,EAAUC,OAAM,aAAgBimB,EAAQ7iB,KAAO,eAAiBwT,EAAI0G,YAAc,cAAgB1G,EAAIqE,YACjGza,KAAI,KAAOi9B,EAAMl1B,UAG1B0d,EAAQ1sB,SAASiB,GAAE,2BAA6B,SAASoc,GACrD7W,EAAUC,OAAM,cAAiB4W,EAAI0G,YAAc,cAAgB1G,EAAIqE,YAClEza,KAAI,KAAOoW,EAAI8kB,YAAY,GAAKj9B,GACrCsB,EAAUC,OAAM,cAAiB4W,EAAI0G,YAAc,cAAgB1G,EAAIqE,YAClEza,KAAI,KAAOoW,EAAI8kB,YAAY,GAAK6B,EAAMh1B,QAC3CxP,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAI7Bua,EAAK3Y,EAAEgS,OACPiM,EAAKxW,EAAEuK,SAIXrH,EAAYS,UAAS,+BACdpQ,EAtUX,GAAIsqB,GAAeptB,EAAGI,OAAOgtB,UACvBjQ,EAAend,EAAGI,OAAO8X,OACzBmF,EAAerd,EAAGI,OAAO8X,OACzB4L,EAAe9jB,EAAGI,OAAO0jB,SACzB4gB,EAAe1kC,EAAGI,OAAOwmB,eACzBge,EAAe5kC,EAAGI,OAAOwmB,eACzB1mB,EAAeF,EAAGI,OAAOF,UAG3BqF,GAAgBE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IACpDue,EAAY,KACZ3c,EAAe,KACfC,EAAe,KACfH,EAAe,KACfoF,EAAetM,EAAGG,MAAMuQ,eACxB5I,EAAeslB,EAAQhnB,SACvBmJ,EAAe6d,EAAQlS,SACvBupB,GAAe,EACfE,GAAe,EACf9gB,GAAe,EACf3G,GAAe,EACfE,GAAe,EACfM,GAAkB,EAClB3L,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfziB,EAAWF,GAAGE,SAAQ,cAAgB,cAAe,aACrDqW,EAAe,KACftO,EAAW,IACXgzB,GAAgB,CAGtBrO,GAAQhnB,OAAO0B,GAAGoT,OAAO3L,GACzB4N,EAAM9E,OAAM,UAAWW,YAAY,IACnCqE,EACKhF,OAAM,EAAqB,QAAU,QACrCW,YAAY,IAEjB0rB,EAAMxsB,KAAI,KACV0sB,EAAM1sB,KAAI,KACVhY,EACKwL,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAEhCyI,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,IAOrC,IAAIyd,GAAIsF,EACFtT,EAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,GAE/Cya,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,cAKjDoF,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACSlO,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,MA6ThD,OAjDAF,GAAMpC,SAAWA,EACjBoC,EAAMsqB,QAAUA,EAChBtqB,EAAMghB,OAASA,EACfhhB,EAAMqa,MAAQA,EACdra,EAAMua,MAAQA,EACdva,EAAM4hC,MAAQA,EACd5hC,EAAM8hC,MAAQA,EACd9hC,EAAM5C,QAAUA,EAEhB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAC1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEzB,WAAa+H,IAAK,WAAW,MAAO/H,IAAagI,IAAK,SAASvG,GAAGzB,EAAUyB,IAC5E87B,WAAax1B,IAAK,WAAW,MAAOw1B,IAAav1B,IAAK,SAASvG,GAAG87B,EAAU97B,IAC5Eg8B,WAAa11B,IAAK,WAAW,MAAO01B,IAAaz1B,IAAK,SAASvG,GAAGg8B,EAAUh8B,IAC5Ekb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9EuU,WAAajO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAC5EyU,WAAanO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC5Ewa,cAAmBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACxFoO,QAAa9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACtEF,UAAawG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GAAGF,EAASE,IAC1E8yB,YAAaxsB,IAAK,WAAW,MAAOwsB,IAAcvsB,IAAK,SAASvG,GAAG8yB,EAAW9yB,IAG9EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DkY,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQ,EAAM,QAAU,UAElC/L,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,GACbo4B,EAAMp4B,MAAMA,GACZs4B,EAAMt4B,MAAMA,OAIpBtM,EAAGG,MAAMkW,eAAevT,EAAOsqB,GAC/BptB,EAAGG,MAAMqP,YAAY1M,GACdA,GCtYX9C,EAAGI,OAAO6kC,UAAY,WAClB,YA+BA,SAASniC,GAAMsB,GAkEX,MAjEAqO,GAAYW,QACZhP,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,GAGjBY,EAAKnB,OAAOqT,GAAWxZ,GAAGkf,OAAOpb,EAAM2V,IAClCxT,MAAMyT,IAAW,EAAGvU,IAEzB4J,EAAK5I,OAAOyT,GAAW5Z,GAAGkf,OAAOpb,EAAMud,IAClCpb,MAAM0U,IAAWvV,EAAiB,GAGvC,IAAI0B,GAAOJ,EAAUK,UAAS,0BAA2BjD,MAAMA,IAC3DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,4BAC1CH,GAAUE,OAAM,KACrBJ,EAAKH,OAAM,IAEnBG,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAEvE,IAAIy/B,GAAQ59B,EAAKC,UAAS,QACrBjD,KAAK,SAAS6D,GAAK,OAAQA,IAChC+8B,GAAMz9B,QAAQC,OAAM,QACpBw9B,EAAM98B,OAAOC,SACb68B,EACKz6B,MAAK,SAAW,SAAStC,EAAEnF,GAAK,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC3D2E,KAAI,IAAMnH,GAAG0V,IAAIlO,OACbF,EAAE,SAASK,EAAEnF,GAAK,MAAO8E,GAAEmS,EAAK9R,EAAEnF,MAClCuM,EAAE,SAASpH,EAAEnF,GAAK,MAAOuM,GAAEsS,EAAK1Z,EAAEnF,MAI3C,IAAI4gC,GAASt8B,EAAKC,UAAS,mBACtBjD,KAAK,SAASA,GAEX,QAAS8d,GAAWhZ,GAChB,GAAa,IAATA,EAAa,CACb,GAAI+7B,GAAS7gC,EAAK8E,EAElB,OADA+7B,GAAO/iB,WAAahZ,EACb+7B,EAEP,MAAO,MAPf,GAAIC,GAAU9gC,EAAKmQ,IAAI,SAAStM,EAAGnF,GAAK,MAAO6e,GAAK1Z,EAAEnF,KAUlDqiC,EAAWjjB,EAAWgjB,EAAQE,YAAY/1B,EAAE5I,SAAS,KACrD4+B,EAAWnjB,EAAWgjB,EAAQjyB,QAAQ5D,EAAE5I,SAAS,KACjD6+B,EAAepjB,EAAWgjB,EAAQhjC,OAAS,EAC/C,QAASqjC,EAAmBF,EAAW,KAAQE,EAAmBJ,EAAW,KAAQK,EAAmBF,EAAe,MAAO/4B,OAAO,SAAUtE,GAAI,MAAY,OAALA,KAElKy7B,GAAOn8B,QAAQC,OAAM,UACrBk8B,EAAOx7B,OAAOC,SACdu7B,EACKj8B,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAO8E,GAAEmS,EAAK9R,EAAEA,EAAEia,eAC7Cza,KAAI,KAAO,SAASQ,EAAEnF,GAAK,MAAOuM,GAAEsS,EAAK1Z,EAAEA,EAAEia,eAC7Cza,KAAI,IAAM,GACVA,KAAI,QAAU,SAASQ,EAAEnF,GACtB,MAAOiX,GAAK9R,EAAGA,EAAEia,aAAeta,EAAEnB,SAAS,GAAK,2BACxCkb,EAAK1Z,EAAGA,EAAEia,aAAe7S,EAAE5I,SAAS,GAAK,uBAAyB,2BAItF8L,EAAYS,UAAS,uBACdpQ,EA3FX,GAUMkX,GACAI,EACAF,EACAiB,EAbF5V,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,GACTH,EAAY,KACZy+B,GAAU,EACV79B,EAAItH,GAAG8H,MAAMC,SACbgH,EAAI/O,GAAG8H,MAAMC,SACb0R,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9BjD,EAAQtM,EAAGG,MAAMsQ,UAAQ,SAKzBg1B,GAAmB,EACnBC,GAAmB,EACnBhlC,EAAWF,GAAGE,SAAQ,aAOxB+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EA6GvC,OAlCAoC,GAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAmB6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IAC1EtB,QAAmB4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC5EqR,SAAmB/K,IAAK,WAAW,MAAO+K,IAAW9K,IAAK,SAASvG,GAAGqR,EAAQrR,IAC9EyR,SAAmBnL,IAAK,WAAW,MAAOmL,IAAWlL,IAAK,SAASvG,GAAGyR,EAAQzR,IAC9EuR,QAAmBjL,IAAK,WAAW,MAAOiL,IAAUhL,IAAK,SAASvG,GAAGuR,EAAOvR,IAC5EwS,QAAmBlM,IAAK,WAAW,MAAOkM,IAAUjM,IAAK,SAASvG,GAAGwS,EAAOxS,IAC5EvC,QAAmB6I,IAAK,WAAW,MAAOnH,IAAKoH,IAAK,SAASvG,GAAGb,EAAEa,IAClEuS,QAAmBjM,IAAK,WAAW,MAAOM,IAAKL,IAAK,SAASvG,GAAG4G,EAAE5G,IAClEg9B,SAAmB12B,IAAK,WAAW,MAAO02B,IAAWz2B,IAAK,SAASvG,GAAGg9B,EAAQh9B,IAC9E88B,kBAAmBx2B,IAAK,WAAW,MAAOw2B,IAAoBv2B,IAAK,SAASvG,GAAG88B,EAAiB98B,IAChG+8B,kBAAmBz2B,IAAK,WAAW,MAAOy2B,IAAoBx2B,IAAK,SAASvG,GAAG+8B,EAAiB/8B,IAGhGb,GAAImH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAKzZ,GAAG4V,QAAQzN,KACpE4G,GAAIN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAKrhB,GAAG4V,QAAQzN,KAGpEpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,OAIlC7F,EAAMpC,SAAWA,EACjBV,EAAGG,MAAMqP,YAAY1M,GACdA,GC3IX9C,EAAGI,OAAOwlC,cAAgB,WACtB,YA8BA,SAAS9iC,GAAMsB,GAmJX,MAlJAqO,GAAYW,QACZX,EAAYrS,OAAO6kC,GACnB7gC,EAAUC,KAAK,SAASC,GAwEpB,QAASuhC,KACL,IAAIC,EAAJ,CAEA,GAAIC,GAAa5tB,EAAE5Q,UAAS,kBAAmBjD,KAAK8E,GAEhD48B,EAAaD,EAAWt+B,QACvBC,OAAM,KAAMC,KAAI,QAAU,iBAC1B8C,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,EAE3Bs7B,GAAW39B,OACN+F,aAAa1F,SAAS,KACtBgC,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,GACtBpC,SAEL09B,EACKp+B,KAAI,YAAc,SAASQ,GAAK,MAAO,aAAeL,EAAEm9B,EAAUn9B,IAAIxD,EAAK6D,GAAGA,IAAM,QACpFgG,aAAa1F,SAAS,KACtBgC,MAAK,iBAAmB,GACxBA,MAAK,eAAiB,GAEtBrB,EAAMhH,SAEX4jC,EAAWt+B,OAAM,QACZC,KAAI,KAAO,GACXA,KAAI,MAAQpC,EAAOE,KACnBkC,KAAI,KAAO,GACXA,KAAI,KAAO/B;AvChIhC,AuCkIgBogC,EvClId,AuCkIyBt+B,CvClIxB,KAAK,CAAC,AuCkIwB,CvClIvB,CAAC,EAAE,CAAC,GuCkI4BC,EvClIvB,CAAC,EAAE,AuCkIwB,CvClIvB,GAAG,CAAC,EAAE,CAAC,AuCkI0B,OvClInB,CAAC,KuCmIjBA,IvCnI0B,CuCmItB,IAAM,IACVA,KAAI,KAAOpC,EAAOE,KAClBkC,KAAI,cAAgB,OACpBA,KAAI,KAAO,QAEhBwQ,EAAEhR,OAAM,6BACH+K,KAAKgW,EAAY+c,EAAUn9B,IAAIxD,EAAK8E,EAAM,IAAKA,EAAM,MAE1D48B,EAAWt+B,OAAM,QAASC,KAAI,QAAU,aACnCA,KAAI,IAAM,GACVA,KAAI,KAAOpC,EAAOE,KAClBkC,KAAI,cAAgB,SACpBA,KAAI,KAAO,QAEhBwQ,EAAEhR,OAAM,6BACH+K,KAAKiW,EAAY8c,EAAU11B,IAAIjL,EAAK8E,EAAM,IAAKA,EAAM,QAG9D,QAAS68B,KAKL,QAASC,GAAgB5hC,EAAMwD,GAG3B,IAAK,GAFDuD,GAAW3B,KAAKC,IAAIs7B,EAAUn9B,IAAIxD,EAAK,GAAI,GAAKwD,GAChDq+B,EAAe,EACVnjC,EAAI,EAAGA,EAAIsB,EAAKlC,OAAQY,IACzB0G,KAAKC,IAAIs7B,EAAUn9B,IAAIxD,EAAKtB,GAAIA,GAAK8E,GAAKuD,IAC1CA,EAAW3B,KAAKC,IAAIs7B,EAAUn9B,IAAIxD,EAAKtB,GAAIA,GAAK8E,GAChDq+B,EAAenjC,EAGvB,OAAOmjC,GAbX,IAAIL,EAAJ,CAEA,GAAI74B,GAAMzM,GAAGiE,MAAM1D,MAAM,GAAKwE,EAAOC,IAcrC4D,IAAS88B,EAAgB5hC,EAAMoF,KAAK6E,MAAMzG,EAAElB,OAAOqG,MACnD44B,KA1IJ,GAAI3+B,GAAY1G,GAAG2G,OAAOpG,KAC1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAMlE,IAJAzC,EAAMqR,OAAS,WAAajN,EAAU9F,KAAK0B,IAC3CA,EAAMoE,UAAYnG,MAGbuD,IAASA,EAAKlC,OAEf,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,QAGtC,IAAIiB,GAAe27B,EAAU11B,IAAIjL,EAAKA,EAAKlC,OAAO,GAAIkC,EAAKlC,OAAO,EAGlE0F,GAAIm9B,EAAU7+B,SACdmJ,EAAI01B,EAAU/pB,QAGd,IAAI5T,GAAOJ,EAAUK,UAAS,8BAA+BjD,MAAMA,IAC/DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,iCACnD6V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,oBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,gBAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,IAGvE,IAAI2gC,GAAgBjuB,EAAEhR,OAAM,oBAK5B,IAHA89B,EAAU79B,MAAMzB,GAAgB0B,OAAOzB,GACvCwgC,EAAchlC,KAAK6jC,GAEfoB,EAAe,CACf,GAAIC,GAAYnuB,EAAEhR,OAAM,iBACpB6E,EAAQs6B,EAAU/+B,UAAS,oBAC1BjD,MAAMgF,GAEX0C,GAAMvE,QAAQC,OAAM,QAASC,KAAI,QAAU,mBACtCA,KAAI,KAAO4+B,EAAkB,GAAK,GAClC5+B,KAAI,KAAO,QACX8C,MAAK,cAAgB87B,EAAkB,MAAQ,SAEpDv6B,EACKrE,KAAI,IAAMhC,GAAkB4gC,EAAkBhhC,EAAOuR,MAAQ,IAC7DnP,KAAI,IAAM6+B,EAAa,SAAUr+B,GAC9B,MAAOoH,GAAEpH,IACT,GACHsC,MAAK,OAASw6B,EAAU34B,QAAQhI,EAAKA,EAAKlC,OAAS,GAAIkC,EAAKlC,OAAS,IACrE8P,KAAKiW,EAAY7e,IAG1BkU,EAAOrW,OAAM,iBAAkBO,OAAM,QAChC/F,GAAE,YAAcskC,GAChBtkC,GAAE,QAAU,WAAamkC,GAAUA,IACnCnkC,GAAE,WAAa,WAAayH,KAAYy8B,MAE7C1tB,EAAEhR,OAAM,sBACHQ,KAAI,YAAc,SAASQ,GAAK,MAAO,cAAgB5C,EAAOC,KAAO,KAAOD,EAAOE,IAAM,MACzFkC,KAAI,QAAUhC,EAAiBJ,EAAOC,KAAOD,EAAOuR,OACpDnP,KAAI,SAAW/B,EAAkBL,EAAOE,OA0EjDgN,EAAYS,UAAS,2BACdpQ,EA3KX,GAKMgF,GACAyH,EANF01B,EAAYjlC,EAAGI,OAAO6kC,YAEtB1/B,GAAUE,IAAK,GAAIqR,MAAO,IAAKD,OAAQ,GAAIrR,KAAM,IAC/C4B,EAAQ,KACRC,EAAS,KAGT+B,KACA08B,GAAS,EACT5d,EAAc1nB,GAAGmM,OAAM,MACvBwb,EAAc3nB,GAAGmM,OAAM,QACvB05B,GAAgB,EAChBG,GAAa,EACbD,GAAkB,EAClBxvB,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,aAOxB+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EAyLvC,OA5BAoC,GAAMpC,SAAWA,EACjBoC,EAAMmiC,UAAYA,EAElBniC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAkB6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACzEtB,QAAkB4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IAC3Euf,aAAkBjZ,IAAK,WAAW,MAAOiZ,IAAehZ,IAAK,SAASvG,GAAGuf,EAAYvf,IACrFwf,aAAkBlZ,IAAK,WAAW,MAAOkZ,IAAejZ,IAAK,SAASvG,GAAGwf,EAAYxf,IACrF09B,eAAkBp3B,IAAK,WAAW,MAAOo3B,IAAiBn3B,IAAK,SAASvG,GAAG09B,EAAc19B,IACzF69B,YAAkBv3B,IAAK,WAAW,MAAOu3B,IAAct3B,IAAK,SAASvG,GAAG69B,EAAW79B,IACnF49B,iBAAkBt3B,IAAK,WAAW,MAAOs3B,IAAmBr3B,IAAK,SAASvG,GAAG49B,EAAgB59B,IAC7FoO,QAAkB9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAG3EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,SAInExF,EAAGG,MAAMkW,eAAevT,EAAOmiC,GAC/BjlC,EAAGG,MAAMqP,YAAY1M,GAEdA,GCtNX9C,EAAGI,OAAO81B,YAAc,WACpB,YA8CA,SAASpzB,GAAMsB,GA8LX,MA7LAqO,GAAYW,QACZX,EAAYrS,OAAOgtB,GACnBhpB,EAAUC,KAAK,SAASC,GACpB,GAAIqB,GAAiByB,EAAQ7B,EAAOC,KAAOD,EAAOuR,MAC9ClR,EAAkByB,EAAS9B,EAAOE,IAAMF,EAAOsR,MAEnD3P,GAAY1G,GAAG2G,OAAOpG,MACtBf,EAAGG,MAAMsW,QAAQvP,GAGjBY,EAAIslB,EAAQhnB,SACZmJ,EAAI6d,EAAQlS,QAEZ,IAAIurB,GAAUniC,CAEdA,GAAK2F,QAAQ,SAASy8B,EAAS1jC,GAC3B0jC,EAAQjiB,YAAczhB,EACtB0jC,EAAQ79B,OAAS69B,EAAQ79B,OAAO4L,IAAI,SAAStM,EAAGqT,GAG5C,MAFArT,GAAEiB,MAAQoS,EACVrT,EAAEsc,YAAczhB,EACTmF,KAIf,IAAIw+B,GAAeriC,EAAKmI,OAAO,SAASL,GACpC,OAAQA,EAAOyR,UAGnBvZ,GAAO9D,GAAGmoB,OAAOiJ,QACZgV,MAAMA,GACNv3B,OAAOA,GACPxG,OAAO,SAASV,GAAK,MAAOA,GAAEU,SAC9Bf,EAAEmS,GACF1K,EAAEsS,GACFglB,IAAI,SAAS1+B,EAAG4d,EAAIxW,GACjBpH,EAAEoc,SACEhV,EAAGA,EACHwW,GAAIA,KAGf4gB,EAGD,IAAIr/B,GAAOJ,EAAUK,UAAS,4BAA6BjD,MAAMA,IAC7DkD,EAAYF,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,+BACnD8V,EAAYjW,EAAUE,OAAM,QAC5B8V,EAAShW,EAAUE,OAAM,KACzByQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,eACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBAEjCL,EAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAIxC,GAA3B2nB,EAAQ9L,SAASlf,QACjBgrB,EAAQ9L,SAAS7d,KAAK,GAG1B2pB,EACKhmB,MAAMzB,GACN0B,OAAOzB,GACPkC,EAAEmS,GACF1K,EAAE,SAASpH,GACR,MAAkBtE,UAAdsE,EAAEoc,QAAgCpc,EAAEoc,QAAQhV,EAAIpH,EAAEoc,QAAQwB,GAA9D,SAEHzZ,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GAEvB,MADAmF,GAAEmE,MAAQnE,EAAEmE,OAASA,EAAMnE,EAAGA,EAAEsc,aACzBtc,EAAEmE,QAGjB,IAAI+gB,GAAclV,EAAEhR,OAAM,mBACrByW,MAAMtZ,EAEX+oB,GAAYjsB,KAAKgsB,GAEjB3P,EAAU/V,OAAM,YACXC,KAAI,KAAO,gBAAkB4C,GAC7B7C,OAAM,QAEXJ,EAAKH,OAAM,iBAAoBoD,EAAK,SAC/B5C,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpBuS,EAAExQ,KAAI,YAAc8Z,EAAW,qBAAuBlX,EAAK,IAAM,GAEjE,IAAImjB,GAAOltB,GAAG0V,IAAIwX,OACbC,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAM,MAAO8E,GAAEmS,EAAK9R,EAAEnF,MACnC+iB,GAAG,SAAS5d,GACT,MAAOoH,GAAEpH,EAAEoc,QAAQwB,MAEtB6H,GAAG,SAASzlB,GACT,MAAOoH,GAAEpH,EAAEoc,QAAQhV,EAAIpH,EAAEoc,QAAQwB,MAEpCkC,YAAYA,GAEb6e,EAAWtmC,GAAG0V,IAAIwX,OACjBC,QAAQA,GACR7lB,EAAE,SAASK,EAAEnF,GAAM,MAAO8E,GAAEmS,EAAK9R,EAAEnF,MACnC+iB,GAAG,SAAS5d,GAAK,MAAOoH,GAAEpH,EAAEoc,QAAQwB,MACpC6H,GAAG,SAASzlB,GAAK,MAAOoH,GAAEpH,EAAEoc,QAAQwB,MAErCyN,EAAOrb,EAAEhR,OAAM,gBAAiBI,UAAS,gBACxCjD,KAAK,SAAS6D,GAAK,MAAOA,IAE/BqrB,GAAK/rB,QAAQC,OAAM,QAASC,KAAI,QAAU,SAASQ,EAAEnF,GAAK,MAAO,mBAAqBA,IACjF2E,KAAI,IAAM,SAASQ,EAAEnF,GAClB,MAAO8jC,GAAS3+B,EAAEU,OAAQV,EAAEsc,eAE/B9iB,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASqmC,eACLz3B,MAAOnH,EACPiE,OAAQjE,EAAEqE,IACVS,KAAMzM,GAAGuE,MAAMiiC,MAAOxmC,GAAGuE,MAAMkiC,OAC/BxiB,YAAatc,EAAEsc,gBAGtB9iB,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASwmC,cACL53B,MAAOnH,EACPiE,OAAQjE,EAAEqE,IACVS,KAAMzM,GAAGuE,MAAMiiC,MAAOxmC,GAAGuE,MAAMkiC,OAC/BxiB,YAAatc,EAAEsc,gBAGtB9iB,GAAE,QAAU,SAASwG,EAAEnF,GACpBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GACjChK,EAASymC,WACL73B,MAAOnH,EACPiE,OAAQjE,EAAEqE,IACVS,KAAMzM,GAAGuE,MAAMiiC,MAAOxmC,GAAGuE,MAAMkiC,OAC/BxiB,YAAatc,EAAEsc,gBAI3B+O,EAAKprB,OAAOC,SACZmrB,EAAK/oB,MAAK,OAAS,SAAStC,EAAEnF,GACtB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGA,EAAEsc,eAEhCha,MAAK,SAAW,SAAStC,EAAEnF,GAAI,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGA,EAAEsc,eACjE+O,EAAKhhB,gBAAgBC,EAAW,oBAC3B9K,KAAI,IAAM,SAASQ,EAAEnF,GAClB,MAAO0qB,GAAKvlB,EAAEU,OAAO7F,KAO7BoqB,EAAQ1sB,SAASiB,GAAE,wBAA0B,SAASC,GAClDuW,EAAEhR,OAAM,aAAgBoD,EAAK,aAAe3I,EAAE6iB,aAAa/Z,QAAO,SAAU,KAEhF0iB,EAAQ1sB,SAASiB,GAAE,uBAAyB,SAASC,GACjDuW,EAAEhR,OAAM,aAAgBoD,EAAK,aAAe3I,EAAE6iB,aAAa/Z,QAAO,SAAU,KAIhF5H,EAAMskC,8BAAgC,SAASC,GAC3C,GAEIrkC,GACAwY,EACA8rB,EAJA/0B,EAAI80B,EAAUjlC,OACd4Q,EAAIq0B,EAAU,GAAGjlC,OAIjB2jB,IAEJ,KAAKvK,EAAI,EAAOxI,EAAJwI,IAASA,EAAG,CACpB,IAAKxY,EAAI,EAAGskC,EAAI,EAAGtkC,EAAIyjC,EAAQrkC,OAAQY,IACnCskC,GAAKzlB,EAAK4kB,EAAQzjC,GAAG6F,OAAO2S,GAGhC,IAAI8rB,EAAG,IAAKtkC,EAAI,EAAOuP,EAAJvP,EAAOA,IACtBqkC,EAAUrkC,GAAGwY,GAAG,IAAM8rB,MAEtB,KAAKtkC,EAAI,EAAOuP,EAAJvP,EAAOA,IACfqkC,EAAUrkC,GAAGwY,GAAG,GAAK,EAIjC,IAAKA,EAAI,EAAOxI,EAAJwI,IAASA,EAAGuK,EAAGvK,GAAK,CAChC,OAAOuK,MAKftT,EAAYS,UAAS,yBACdpQ,EAtOX,GAcMgF,GACAyH,EAfFhK,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTiF,EAAQtM,EAAGG,MAAMuQ,eACjBnG,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZ+S,EAAO,SAAS9R,GAAK,MAAOA,GAAEL,GAC9B+Z,EAAO,SAAS1Z,GAAK,MAAOA,GAAEoH,GAC9Boe,EAAU,SAASxlB,EAAEnF,GAAK,OAAQmN,MAAM0R,EAAK1Z,EAAEnF,KAAqB,OAAd6e,EAAK1Z,EAAEnF,IAC7DyH,EAAQ,QACR4E,EAAS,OACTu3B,EAAQ,UACR3e,EAAc,SACdxG,GAAW,EAGX2L,EAAUptB,EAAGI,OAAOgtB,UACpB3kB,EAAW,IACX/H,EAAYF,GAAGE,SAAQ,YAAc,gBAAiB,eAAa,YAAe,eAAgB,mBAAoB,kBAG5H0sB,GACKU,UAAU,KACVC,aAAa,IAAK,KAevB,IAAItb,GAAczS,EAAGG,MAAMsS,YAAY/R,EAAU+H,EA2RjD,OApFA3F,GAAMpC,SAAWA,EACjBoC,EAAMsqB,QAAUA,EAEhBA,EAAQ1sB,SAASiB,GAAE,eAAiB,WAAYjB,EAASqG,aAAatF,MAAMV,KAAMM,aAClF+rB,EAAQ1sB,SAASiB,GAAE,mBAAqB,WAAYjB,EAASsb,iBAAiBva,MAAMV,KAAMM,aAC1F+rB,EAAQ1sB,SAASiB,GAAE,kBAAoB,WAAYjB,EAASsF,gBAAgBvE,MAAMV,KAAMM,aAExFyB,EAAMmlB,YAAc,SAAStf,GACzB,MAAKtH,WAAUe,QACf6lB,EAActf,EACP7F,GAFuBmlB,GAKlCnlB,EAAM2F,SAAW,SAASE,GACtB,MAAKtH,WAAUe,QACfqG,EAAWE,EACX8J,EAAYW,MAAM3K,GAClB2kB,EAAQ3kB,SAASA,GACV3F,GAJuB2F,GAOlC3F,EAAMpC,SAAWA,EACjBoC,EAAMsqB,QAAUA,EAChBtqB,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEglB,SAAU1e,IAAK,WAAW,MAAO0e,IAAWze,IAAK,SAASvG,GAAGglB,EAAQhlB,IACrE8Y,UAAWxS,IAAK,WAAW,MAAOwS,IAAYvS,IAAK,SAASvG,GAAG8Y,EAAS9Y,IACxE0G,QAAcJ,IAAK,WAAW,MAAOI,IAAUH,IAAK,SAASvG,GAAG0G,EAAO1G,IACvEi+B,OAAW33B,IAAK,WAAW,MAAO23B,IAAS13B,IAAK,SAASvG,GAAGi+B,EAAMj+B,IAClEsf,aAAiBhZ,IAAK,WAAW,MAAOgZ,IAAe/Y,IAAK,SAASvG,GAAGsf,EAAYtf,IAGpFb,GAAQmH,IAAK,WAAW,MAAOgL,IAAQ/K,IAAK,SAASvG,GAAGsR,EAAOzZ,GAAG4V,QAAQzN,KAC1E4G,GAAQN,IAAK,WAAW,MAAO4S,IAAQ3S,IAAK,SAASvG,GAAGkZ,EAAOrhB,GAAG4V,QAAQzN,KAG1EpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/D8G,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,KAE9B8B,OAAQwE,IAAK,WAAW,MAAOxE,IAASyE,IAAK,SAASvG,GAElD,OADA8B,EAAQ9B,GAEJ,IAAK,QACD7F,EAAMuM,OAAM,QACZvM,EAAM8jC,MAAK,UACX,MACJ,KAAK,SACD9jC,EAAMuM,OAAM,UACZvM,EAAM8jC,MAAK,aACX,MACJ,KAAK,gBACD9jC,EAAMuM,OAAM,cACZvM,EAAM8jC,MAAK,aACX,MACJ,KAAK,SACD9jC,EAAMuM,OAAM,UACZvM,EAAM8jC,MAAK,UACX,MACJ,KAAK,gBACD9jC,EAAMuM,OAAOvM,EAAMskC,+BACnBtkC,EAAM8jC,MAAK,cAIvBn+B,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClB2kB,EAAQ3kB,SAASA,OAIzBzI,EAAGG,MAAMkW,eAAevT,EAAOsqB,GAC/BptB,EAAGG,MAAMqP,YAAY1M,GAEdA,GCxUX9C,EAAGI,OAAOmnC,iBAAmB,WACzB,YAiGA,SAASzkC,GAAMsB,GA2cX,MA1cAqO,GAAYW,QACZX,EAAYrS,OAAOsxB,GACfxU,GAAWzK,EAAYrS,OAAO+c,GAC9BC,GAAW3K,EAAYrS,OAAOid,GAElCjZ,EAAUC,KAAK,SAASC,GAsMpB,QAAS2pB,KACF/Q,GACC/E,EAAEhR,OAAM,2BACHQ,KAAI,YAAc,eAAiB/B,EAAkB,KACrDuI,aACA1F,SAASA,GACTrH,KAAK+b,GAKlB,QAAS+Q,KACL,GAAG9Q,EAAW,CACV,GAAwB,WAApBsU,EAAQjnB,SAA4C,kBAApBinB,EAAQjnB,QAA6B,CACrE,GAAI+8B,GAAgBnqB,EAAM7E,YAEpBivB,IAAkBD,IAAkBE,IACtCD,EAAiBD,GAGrBnqB,EAAM7E,WAAWkvB,OAGbD,KACApqB,EAAM7E,WAAWivB,GACjBA,EAAiB,KAIzBtvB,GAAEhR,OAAM,2BACPgH,aAAa1F,SAAS,GACtBrH,KAAKic,IAqMd,QAASqK,GAAQhI,GAEb,GAAIioB,GAAcxvB,EAAEhR,OAAM,6BACrByW,MACDtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAC/BpJ,IAAI,SAAStM,EAAEnF,GACZ,OACIwJ,IAAKrE,EAAEqE,IACPkhB,KAAMvlB,EAAEulB,KACRhjB,QAASvC,EAAEuC,QACX7B,OAAQV,EAAEU,OAAO4D,OAAO,SAAStE,EAAEnF,GAC/B,MAAO0uB,GAAQ5pB,IAAIK,EAAEnF,IAAM0c,EAAO,IAAMgS,EAAQ5pB,IAAIK,EAAEnF,IAAM0c,EAAO,KAEvE0O,eAAgBjmB,EAAEimB,kBAIlCuZ,GAAYx5B,aAAa1F,SAASA,GAAUrH,KAAKswB,GAGjDzD,IACAC,IA9bJ,GAAIhnB,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,EAa5G,IAXAvE,EAAMqR,OAAS,WAAajN,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAC3EA,EAAMoE,UAAYnG,KAElBgR,EACKmC,OAAO+O,EAAY3e,GAAOxB,EAAMqR,QAChCH,OAAOkP,EAAY5e,IACnB6P,SAGLpC,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,YAE9CsF,EAAc,CACf,GAAI3W,EACJ2W,KACA,KAAK3W,IAAOuF,GACJA,EAAMvF,YAAgBtL,OACtBiiB,EAAa3W,GAAOuF,EAAMvF,GAAKrL,MAAM,GAErCgiB,EAAa3W,GAAOuF,EAAMvF,GAKtC,KAAKlI,GAASA,EAAKlC,QAAWkC,EAAKmI,OAAO,SAAStE,GAAK,MAAOA,GAAEU,OAAOzG,SAAUA,QAE9E,MADApC,GAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,CAEPoE,GAAUK,UAAS,cAAec,SAGtCP,EAAI4pB,EAAQtrB,SACZmJ,EAAImiB,EAAQxW,QAGZ,IAAI5T,GAAOJ,EAAUK,UAAS,iCAAkCjD,MAAMA,IAClEkZ,EAASlW,EAAKG,QAAQC,OAAM,KAAMC,KAAI,QAAU,oCAAoCD,OAAM,KAC1FyQ,EAAI7Q,EAAKH,OAAM,IAEnBqW,GAAO9V,OAAM,KAAMC,KAAI,QAAU,iBACjC6V,EAAO9V,OAAM,KAAMC,KAAI,QAAU,kBAEjC,IAAI2mB,GAAa9Q,EAAO9V,OAAM,KAAMC,KAAI,QAAU,WAClD2mB,GAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBAAiBD,OAAM,QAC5D4mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,gBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,kBACrC2mB,EAAW5mB,OAAM,KAAMC,KAAI,QAAU,iBAIlB6V,GAAO9V,OAAM,KAAMC,KAAI,QAAU,eAGpD,IAAKkc,EAEE,CACH,GAAI6H,GAAe1H,GAAmC,QAAnBuK,EAA4B5oB,EAAiBgtB,EAAehtB,CAK/F,IAHAme,EAAO1c,MAAMskB,GACbvT,EAAEhR,OAAM,kBAAmByW,MAAMtZ,GAAMlD,KAAK0iB,GAErB,WAAnByK,EAA6B,CAEhC,GAAIqZ,IAAe1qB,EAAY,GAAK,GAAK,EACtC3X,GAAOsR,OAASnN,KAAKL,IAAIya,EAAOzc,SAAWugC,EAAariC,EAAOsR,QAC/DjR,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,EAC3G,IAAIwgC,GAAYjiC,EAAkBgiC,CAC/BzvB,GAAEhR,OAAM,kBACHQ,KAAI,YAAc,eAAiBkgC,EAAU,SACxB,QAAnBtZ,IACFxK,GAAaxe,EAAOE,KAAOqe,EAAOzc,WACnC9B,EAAOE,IAAMqe,EAAOzc,SACpBzB,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,IAG5G8Q,EAAEhR,OAAM,kBACNQ,KAAI,YAAc,cAAgBhC,EAAe+lB,GAAe,KAAQnmB,EAAOE,IAAK,UAtB1F0S,GAAEhR,OAAM,kBAAmBI,UAAS,KAAMc,QA2B9C,IAAK2b,EAEE,CACH,GAAIC,MAEIzX,IAAKomB,EAAclB,SAAW,UAC9BoW,QAAS,UACTjqB,SAA6B,SAAnB6T,EAAQjnB,QAClBA,MAAO,UAGP+B,IAAKomB,EAAcmV,QAAU,SAC7BD,QAAS,SACTjqB,SAA6B,UAAnB6T,EAAQjnB,QAClBA,MAAO,WAGP+B,IAAKomB,EAAc5I,UAAY,WAC/B8d,QAAS,WACTjqB,SAA6B,UAAnB6T,EAAQjnB,QAClBA,MAAO,WAGP+B,IAAKomB,EAAcoV,eAAiB,UACpCF,QAAS,gBACTjqB,SAA6B,iBAAnB6T,EAAQjnB,QAClBA,MAAO,iBAIfkoB,GAAgBsV,EAAe7lC,OAAO,EAAK,IAC3C6hB,GAAeA,GAAaxX,OAAO,SAAStE,GACxC,MAA6C,KAAtC8/B,EAAe90B,QAAQhL,EAAE2/B,WAGpC5jB,EACK9c,MAAOurB,GACPrmB,OAAK,OAAU,OAAQ,SAE5B6L,EAAEhR,OAAM,oBACHyW,MAAMqG,IACN7iB,KAAK8iB,EAEV,IAAIgkB,IAAcx+B,KAAKL,IAAI6a,EAAS7c,SAAUwc,GAAkC,QAAnB0K,EAA4BzK,EAAOzc,SAAW,EAEtG9B,GAAOE,KAAOyiC,KACf3iC,EAAOE,IAAMyiC,GACbtiC,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,IAAW8oB,EAAcpH,EAAM5f,SAAW,IAG5G8Q,EAAEhR,OAAM,oBACHQ,KAAI,YAAc,gBAAmBpC,EAAOE,IAAK,SAlDrD0S,GAAEhR,OAAM,oBAAqBI,UAAS,KAAMc,QAqDjDf,GAAKK,KAAI,YAAc,aAAepC,EAAOC,KAAO,IAAMD,EAAOE,IAAM,KAEnEiY,GACAvF,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,aAAehC,EAAiB,OAIvD0e,IACAC,EACKld,MAAMzB,GACN0B,OAAOzB,GACPL,QAAQC,KAAMD,EAAOC,KAAMC,IAAKF,EAAOE,MACvCmC,aAAaV,GACbd,OAAO0B,GACZR,EAAKH,OAAM,mBAAoB/F,KAAKkjB,IAGxCnM,EAAEhR,OAAM,iCACHQ,KAAI,QAAUhC,GACdgC,KAAI,SAAW/B,GAEpB8rB,EACKtqB,MAAMzB,GACN0B,OAAOzB,GACP0G,MAAMhI,EAAKmQ,IAAI,SAAStM,EAAEnF,GACvB,MAAOmF,GAAEmE,OAASA,EAAMnE,EAAGnF,KAC5ByJ,OAAO,SAAStE,EAAEnF,GAAK,OAAQsB,EAAKtB,GAAG6a,WAE9C,IAAI8pB,IAAcxvB,EAAEhR,OAAM,6BACrByW,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAS/C,IANIX,GACAC,EAAM7U,MAAMR,GACPid,OAAQ/kB,EAAGG,MAAMuU,WAAW/O,EAAe,IAAKrB,IAChDwZ,UAAWlY,EAAiB,GAGjCwX,EAAW,CACX,GAAIhF,GAEAA,IADqB,WAArBsZ,EAAQriB,SACA,EAGArP,EAAGG,MAAM2U,WAAWlP,EAAgB,GAAItB,GAEpD+Y,EAAM/U,MAAMiH,GACPwV,OAAO3M,IACP0F,UAAUnY,EAAgB,GA4CnC,GAAI0oB,EAIG,CACHpH,EAAM7f,MAAMzB,GACZwS,EAAEhR,OAAM,iBACHQ,KAAI,YAAc,gBAAmB/B,EAAkBL,EAAOsR,OAASoQ,EAAM1hB,SAASE,KAAO,KAC7FmY,MAAMtZ,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,YAC1Czc,KAAK6lB,EACV,IAAIvH,IAASuH,EAAMG,MAAMC,QAAUJ,EAAMjN,UAAYiN,EAAMG,MAAM1H,QACnD,QAAXA,IACCgI,EAAQhI,QAXZioB,IAAYx5B,aAAa/M,KAAKswB,GAC9BzD,IACAC,GAiBJwD,GAAQhxB,SAASiB,GAAE,mBAAqB,SAASC,GACkB,IAA3D0C,EAAKmI,OAAO,SAAStE,GAAK,OAAQA,EAAE0V,WAAYzb,OAChDkC,EAAK2F,QAAQ,SAAS9B,GAClBA,EAAE0V,UAAW,IAGjBvZ,EAAK2F,QAAQ,SAAS9B,EAAEnF,GACpBmF,EAAE0V,SAAY7a,GAAKpB,EAAE6iB,cAG7B1S,EAAM8L,SAAWvZ,EAAKmQ,IAAI,SAAStM,GAAK,QAASA,EAAE0V,WACnDnd,EAASoiB,YAAY/Q,GAErBjP,EAAMqR,WAGV2P,EAAOpjB,SAASiB,GAAE,cAAgB,SAASqjB,GACvC,IAAK,GAAIxY,KAAOwY,GACZjT,EAAMvF,GAAOwY,EAASxY,EAC1B9L,GAASoiB,YAAY/Q,GACrBjP,EAAMqR,WAGV+P,EAASxjB,SAASiB,GAAE,cAAgB,SAASwG,EAAEnF,GACtCmF,EAAE0V,WAEPoG,GAAeA,GAAaxP,IAAI,SAASwB,GAErC,MADAA,GAAE4H,UAAW,EACN5H,IAEX9N,EAAE0V,UAAW,EAEb6T,EAAQjnB,MAAMtC,EAAEsC,OAGhBsH,EAAMtH,MAAQinB,EAAQjnB,QACtB/J,EAASoiB,YAAY/Q,GAErBjP,EAAMqR,YAGVmQ,EAAiB5jB,SAASiB,GAAE,mBAAqB,SAASC,GACtD8vB,EAAQpP,iBACR,IAAI2C,GAAa7C,EAAY8C,EAAgBC,KAAcgjB,EAAW,EAAGC,GAAgB,CAmCzF,IAlCA9jC,EACKmI,OAAO,SAASL,EAAQpJ,GAErB,MADAoJ,GAAOqY,YAAczhB,GACboJ,EAAOyR,WAElB5T,QAAQ,SAASmC,EAAOpJ,GACrBof,EAAapiB,EAAG4I,kBAAkBwD,EAAOvD,OAAQjH,EAAE0E,YAAaxD,EAAMgF,IACtE,IAAIwH,GAAQlD,EAAOvD,OAAOuZ,GACtBqM,EAAc3rB,EAAMyM,IAAID,EAAO8S,EAInC,IAHmB,MAAfqM,GACAiD,EAAQvP,eAAenf,EAAGof,GAAY,GAErB,mBAAV9S,GAAX,CAC2B,mBAAhB2V,KAA6BA,EAAc3V,GACxB,mBAAnB4V,KAAgCA,EAAiBpiB,EAAMsD,SAAStD,EAAMgF,IAAIwH,EAAM8S,IAG3F,IAAIimB,GAAmC,UAAnB3W,EAAQjnB,QAAuB6E,EAAMiV,QAAQhV,EAAIzM,EAAMyM,IAAID,EAAM8S,EACrF+C,GAAQ1hB,MACJ+I,IAAKJ,EAAOI,IACZR,MAAOq8B,EACP/7B,MAAOA,EAAMF,EAAOA,EAAOqY,aAC3BnV,MAAOA,IAGPg5B,GAAyC,UAAnB5W,EAAQjnB,SAAuC,MAAhB49B,IACvDF,GAAYE,EACZD,GAAgB,MAI1BjjB,EAAQ7N,UAGJ6N,EAAQ/iB,OAAS,EAAG,CACpB,GAAIgjB,GAAStiB,EAAMoY,SAAStU,OAAOhF,EAAE+C,QACZqF,EAAmB,IAC5Cmb,GAAQlb,QAAQ,SAASmC,EAAOpJ,GAI5BoiB,EAAS1b,KAAKC,IAAIyb,EAClB,IAAImjB,GAAY7+B,KAAKC,IAAIyC,EAAOkD,MAAMiV,QAAQwB,IAC1CyiB,EAAW9+B,KAAKC,IAAIyC,EAAOkD,MAAMiV,QAAQhV,EAC7C,OAAK6V,IAAUmjB,GAAwBC,EAAWD,GAAtBnjB,OAExBpb,EAAmBhH,GAFvB,SAMoB,MAApBgH,IACAmb,EAAQnb,GAAkBqC,WAAY,GAI1Ci8B,GAAyC,UAAnB5W,EAAQjnB,SAAuB0a,EAAQ/iB,QAAU,IAAMgmC,GAC7EjjB,EAAQ1hB,MACJ+I,IAAKi8B,EACLz8B,MAAOm8B,EACP57B,OAAO,GAIf,IAAI+Y,GAASxiB,EAAMgF,IAAImd,EAAY7C,GAE/B3W,EAAiB6Y,EAAiBpkB,QAAQuL,gBAEtB,YAApBimB,EAAQjnB,SAA4C,kBAApBinB,EAAQjnB,SAClCi+B,IACFA,EAAoBj9B,GAGxBA,EAAiBjL,GAAGmM,OAAM,QAGtB+7B,IACAj9B,EAAiBi9B,EACjBA,EAAoB,MAI5BpkB,EAAiBpkB,QACZuL,eAAeA,GACfnH,MAEG0H,MAAOsZ,EACPlZ,OAAQ+Y,MAIhBb,EAAiBre,gBAAgBif,KAIrCZ,EAAiB5jB,SAASiB,GAAE,kBAAmB,SAASC,GACpD8vB,EAAQpP,oBAIZ2E,EAAMvmB,SAASiB,GAAE,UAAY,SAAS+d,GAClCgI,EAAQhI,KAIZhf,EAASiB,GAAE,cAAgB,SAASC,GAEN,mBAAfA,GAAEic,UAA4BvZ,EAAKlC,SAAWR,EAAEic,SAASzb,SAChEkC,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,SAAWjc,EAAEic,SAAS7a,KAGjC+O,EAAM8L,SAAWjc,EAAEic,UAGA,mBAAZjc,GAAE6I,QACTinB,EAAQjnB,MAAM7I,EAAE6I,OAChBA,EAAQ7I,EAAE6I,OAGd3H,EAAMqR,aAiCd1B,EAAYS,UAAS,gCACdpQ,EAtiBX,GAyBMgF,GACAyH,EA1BFmiB,EAAU1xB,EAAGI,OAAO81B,cAClB/Y,EAAQnd,EAAGI,OAAO8X,OAClBmF,EAAQrd,EAAGI,OAAO8X,OAClB4L,EAAS9jB,EAAGI,OAAO0jB,SACnBI,EAAWlkB,EAAGI,OAAO0jB,SACrBQ,EAAmBtkB,EAAGkE,uBACtBhE,EAAUF,EAAGI,OAAOF,UACpB+mB,EAAQjnB,EAAGI,OAAO6mB,MAAMjnB,EAAGI,OAAO81B,eAGpC3wB,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9Cue,EAAY,KACZ3c,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMuQ,eACjBsT,GAAe,EACfH,GAAa,EACb0K,EAAiB,MACjBrR,GAAY,EACZE,GAAY,EACZM,GAAkB,EAClB2Q,GAAc,EACdhK,GAA0B,EAC1BikB,GAAqB,EACrBG,EAAa,QAGb12B,EAAQ/R,EAAGG,MAAM4R,QACjBoR,EAAe,KACfpM,EAAS,KACTrW,EAAWF,GAAGE,SAAQ,cAAgB,cAAY,aAClDiyB,EAAe,IACfsV,GAAiB,UAAS,SAAS,YACnCrV,KACAnqB,EAAW,GAGjBsJ,GAAMtH,MAAQinB,EAAQjnB,QACtB0S,EAAM9E,OAAM,UAAWW,YAAY,GACnCqE,EAAMhF,OAAM,EAAqB,QAAU,QAE3CnY,EACKwL,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAEhCyI,eAAe,SAAStD,EAAGnF,GACxB,MAAOqa,GAAM7E,aAAarQ,EAAGnF,KAGrCshB,EAAiBpkB,QACZwL,gBAAgB,SAASvD,EAAGnF,GACzB,MAAOma,GAAM3E,aAAarQ,EAAGnF,KAEhCyI,eAAe,SAAStD,EAAGnF,GACxB,MAAY,OAALmF,EAAY,MAAQkV,EAAM7E,aAAarQ,EAAGnF,IAGzD,IAAIykC,GAAiB,KACjBiB,EAAoB,IAExBxkB,GAASyB,aAAY,EAMrB,IAAIlT,GAAczS,EAAGG,MAAMsS,YAAY/R,GACnC+J,EAAQinB,EAAQjnB,QAEhByY,EAAc,SAAS5e,GACvB,MAAO,YACH,OACI3B,OAAQ2B,EAAKmQ,IAAI,SAAStM,GAAK,OAAQA,EAAE0V,WACzCpT,MAAOinB,EAAQjnB,WAKvBwY,EAAc,SAAS3e,GACvB,MAAO,UAASyN,GACQlO,SAAhBkO,EAAMtH,QACNA,EAAQsH,EAAMtH,OACG5G,SAAjBkO,EAAMpP,QACN2B,EAAK2F,QAAQ,SAASmC,EAAOpJ,GACzBoJ,EAAOyR,UAAY9L,EAAMpP,OAAOK,OAK5C0kC,EAAmBlnC,GAAGmM,OAAM,IAwjBhC,OApGA+kB,GAAQhxB,SAASiB,GAAE,2BAA6B,SAASoc,GACrDA,EAAIzO,MAAQ,EAAKoiB,EAAQ5pB,IAAIiW,EAAIzO,OACjCyO,EAAIzO,MAAQ,EAAKoiB,EAAQniB,IAAIwO,EAAIzO,OACjCpP,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7BwrB,EAAQhxB,SAASiB,GAAE,0BAA4B,SAASoc,GACpD7d,EAAQgG,QAAO,KAOnBpD,EAAMpC,SAAWA,EACjBoC,EAAM4uB,QAAUA,EAChB5uB,EAAMghB,OAASA,EACfhhB,EAAMohB,SAAWA,EACjBphB,EAAMqa,MAAQA,EACdra,EAAM8rB,OAAS3H,EAAM9J,MACrBra,EAAMua,MAAQA,EACdva,EAAM+rB,OAAS5H,EAAM5J,MACrBva,EAAMwhB,iBAAmBA,EACzBxhB,EAAM5C,QAAUA,EAChB4C,EAAMmkB,MAAQA,EAEdnkB,EAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtEkb,YAAa5U,IAAK,WAAW,MAAO4U,IAAc3U,IAAK,SAASvG,GAAGkb,EAAWlb,IAC9E4lB,gBAAiBtf,IAAK,WAAW,MAAOsf,IAAkBrf,IAAK,SAASvG,GAAG4lB,EAAe5lB,IAC1FuU,WAAiBjO,IAAK,WAAW,MAAOiO,IAAahO,IAAK,SAASvG,GAAGuU,EAAUvU,IAChFyU,WAAenO,IAAK,WAAW,MAAOmO,IAAalO,IAAK,SAASvG,GAAGyU,EAAUzU,IAC9Ewa,cAAkBlU,IAAK,WAAW,MAAOkU,IAAgBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IACvFoO,QAAY9H,IAAK,WAAW,MAAO8H,IAAU7H,IAAK,SAASvG,GAAGoO,EAAOpO,IACrEqb,cAAkB/U,IAAK,WAAW,MAAO+U,IAAgB9U,IAAK,SAASvG,GAAGqb,EAAarb,IACvFiqB,eAAmB3jB,IAAK,WAAW,MAAO2jB,IAAiB1jB,IAAK,SAASvG,GAAGiqB,EAAcjqB,IAC1Fs/B,gBAAoBh5B,IAAK,WAAW,MAAOg5B,IAAkB/4B,IAAK,SAASvG,GAAGs/B,EAAet/B,IAC7F2/B,oBAA0Br5B,IAAK,WAAW,MAAOq5B,IAAsBp5B,IAAK,SAASvG,GAAG2/B,EAAmB3/B,IAC3G8/B,YAAkBx5B,IAAK,WAAW,MAAOw5B,IAAcv5B,IAAK,SAASvG,GAAG8/B,EAAW9/B,IACnF0lB,aAAiBpf,IAAK,WAAW,MAAOof,IAAenf,IAAK,SAASvG,GAAG0lB,EAAY1lB,IACpFmmB,aAAkB7f,IAAK,WAAW,MAAOgY,GAAM5f,UAAY6H,IAAK,SAASvG,GAAGse,EAAM5f,OAAOsB,KACzF2e,aAAcrY,IAAK,WAAW,MAAOgY,GAAMK,eAAiBpY,IAAK,SAASvG,GAAGse,EAAMK,YAAY3e,KAG/FpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACtC9E,SAAV8E,EAAElD,MACFF,EAAOE,IAAMkD,EAAElD,IACfse,EAAYpb,EAAElD,KAElBF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,OAE/DypB,aAAchgB,IAAK,WAAW,MAAOgY,GAAM1hB,QAAS2J,IAAK,SAASvG,GAC9Dse,EAAM1hB,OAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASwhB,EAAM1hB,OAAOE,IACvEwhB,EAAM1hB,OAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASmQ,EAAM1hB,OAAOuR,MACvEmQ,EAAM1hB,OAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAASoQ,EAAM1hB,OAAOsR,OACvEoQ,EAAM1hB,OAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASyhB,EAAM1hB,OAAOC,OAE3EiD,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBipB,EAAQjpB,SAASA,GACjB0U,EAAM1U,SAASA,GACf4U,EAAM5U,SAASA,KAEnB6D,OAAS2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GACnD2D,EAAQtM,EAAGG,MAAMsQ,SAAS9H,GAC1Bmb,EAAOxX,MAAMA,GACbolB,EAAQplB,MAAMA,GACd2a,EAAM3a,MAAMA,KAEhBxE,GAAImH,IAAK,WAAW,MAAOyiB,GAAQ5pB,KAAOoH,IAAK,SAASvG,GACpD+oB,EAAQ5pB,EAAEa,GACVse,EAAMnf,EAAEa,KAEZ4G,GAAIN,IAAK,WAAW,MAAOyiB,GAAQniB,KAAOL,IAAK,SAASvG,GACpD+oB,EAAQniB,EAAE5G,GACVse,EAAM1X,EAAE5G,KAEZ+U,iBAAkBzO,IAAK,WAAW,MAAOyO,IAAmBxO,IAAK,SAASvG,GACtE+U,EAAkB/U,EAClB0U,EAAMhF,OAAQqF,EAAkB,QAAU,UAE9C2G,yBAA0BpV,IAAK,WAAW,MAAOoV,IAA2BnV,IAAK,SAASvG,GACtF0b,IAA4B1b,EAC5B7F,EAAMof,aAAavZ,GACnB7F,EAAM8iB,YAAYjd,GAClB+oB,EAAQtE,QAAQlL,aAAavZ,OAIrC3I,EAAGG,MAAMkW,eAAevT,EAAO4uB,GAC/B1xB,EAAGG,MAAMqP,YAAY1M,GAEdA,GAGX9C,EAAGI,OAAOuoC,0BAA4B,WACpC,MAAO3oC,GAAGI,OAAOmnC,mBACdhiC,QAASsR,OAAQ,KACjBwX,aAAa,IC9pBlBruB,EAAGI,OAAOwoC,SAAW,WACjB,YAyCA,SAASC,GAA0B1gC,GAC/B,GAAI2gC,GAAcC,EAAmB5gC,EACrC,OAAG2gC,GAAc,GACN,IAGA,EAIf,QAASC,GAAmB5gC,GACxB,GAAI8xB,GAAavwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,KACnDkyB,EAAWtwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,EAAIK,EAAEwa,MACvDmmB,GAAiB7O,EAAaD,GAAY,GAAM,IAAMtwB,KAAK+P,IAAO,EACtE,OAAOqvB,GAGX,QAASE,GAAsB7gC,GAC3B,GAAI8xB,GAAavwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,KACnDkyB,EAAWtwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,EAAIK,EAAEwa,KAC3D,QAAQqX,EAAWC,IAAe,EAAIvwB,KAAK+P,IAG/C,QAASwvB,GAAsB9gC,GAC3B,GAAI8xB,GAAavwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,KACnDkyB,EAAWtwB,KAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,EAAIK,EAAEwa,MAEvDjT,EAAOsqB,EAAWC,CACtB,OAAOvqB,GAAO6sB,EAIlB,QAAS2M,GAAatnC,EAAEoB,GACpB,GAAImmC,GAAK3oC,GAAGynB,YAAYngB,EAAEnB,UAAWyD,EAAKtC,EAAGsC,EAAKtC,EAAIsC,EAAKuY,KAC3DymB,EAAK5oC,GAAGynB,YAAY1Y,EAAE5I,UAAWyD,EAAKmF,EAAG,IACzC85B,EAAK7oC,GAAGynB,YAAY1Y,EAAE9I,SAAU2D,EAAKmF,EAAI,GAAK,EAAG+Z,GAEjD,OAAU,KAANtmB,EACO,WAAY,MAAO+3B,GAAIn5B,IAGvB,SAAUoU,GAGb,MAFAlO,GAAEnB,OAAOwiC,EAAGnzB,IACZzG,EAAE5I,OAAOyiC,EAAGpzB,IAAIvP,MAAM4iC,EAAGrzB,IAClB+kB,EAAIn5B,IAKvB,QAAS0nC,GAAenhC,GACpB,GAAIohC,GAAM/oC,GAAGynB,aAAangB,EAAGK,EAAEsY,GAAIkC,GAAIxa,EAAEqhC,IAAKj6B,EAAGpH,EAAE4d,GAAItO,GAAItP,EAAEshC,KAAMthC,EAEnE,OAAO,UAAU6N,GACb,GAAIqI,GAAIkrB,EAAIvzB,EAOZ,OALA7N,GAAEsY,GAAKpC,EAAEvW,EACTK,EAAEqhC,IAAMnrB,EAAEsE,GACVxa,EAAE4d,GAAK1H,EAAE9O,EACTpH,EAAEshC,IAAMprB,EAAE5G,GAEHsjB,EAAI1c,IAInB,QAASqrB,GAAmBt/B,GACxB,GAAIwhB,GAAIpf,EAAIpC,EACPu/B,GAAc/d,KAAI+d,EAAc/d,MACrC,IAAIge,GAAKD,EAAc/d,EACvBge,GAAGjnB,GAAKvY,EAAKuY,GACbinB,EAAG9hC,EAAIsC,EAAKtC,EACZ8hC,EAAGnyB,GAAKrN,EAAKqN,GACbmyB,EAAGr6B,EAAInF,EAAKmF,EAGhB,QAASs6B,GAA2BxhB,GAChCA,EAAMpe,QAAQ,SAASsI,GACnB,GAAIqZ,GAAIpf,EAAI+F,GACRq3B,EAAKD,EAAc/d,EAEnBge,IACAr3B,EAAEi3B,IAAMI,EAAGjnB,GACXpQ,EAAEkO,GAAKmpB,EAAG9hC,EACVyK,EAAEk3B,IAAMG,EAAGnyB,GACXlF,EAAEwT,GAAK6jB,EAAGr6B,IAGVgD,EAAEi3B,IAAMj3B,EAAEoQ,GACVpQ,EAAEkO,GAAKlO,EAAEzK,EACTyK,EAAEk3B,IAAMl3B,EAAEkF,GACVlF,EAAEwT,GAAKxT,EAAEhD,GAEbm6B,EAAmBn3B,KAI3B,QAASu3B,GAAU3hC,GACf,GAAIgW,GAASjX,EAAUK,UAAS,QAC5BisB,EAAOtsB,EAAUK,UAAS,OAG9B4W,GAAOhQ,aAAaxG,KAAI,UAAW,GAGnCyC,EAAOjC,EAEPqrB,EAAKrlB,aACA1F,SAASA,GACT+yB,UAAS,IAAM0N,GACf7kC,KAAI,MAAQ,SAASzC,GAGlB,GAAGA,EAAEkG,GAAKK,EAAEL,GAAKlG,EAAEkG,EAAKK,EAAEL,EAAIK,EAAEwa,IACzB/gB,EAAEmoC,OAAS5hC,EAAE4hC,MAAK,CAEjB,GAAIt4B,GAAajR,GAAG2G,OAAOpG,KAAK0Q,YAC5Bu4B,EAAUv4B,EAAWtK,OAAM,OAG/B6iC,GAAQ77B,aAAa1F,SAASA,GAC7ByJ,KAAM,SAAStQ,GAAG,MAAOg7B,GAAYh7B,KACrC+F,KAAI,UAAY,SAASQ,GACtB,MAAG8gC,GAAsB9gC,GACd,EAGA,IAGdR,KAAI,YAAc,WACf,GAAIP,GAAQrG,KAAKkpC,UAAU7iC,KAC3B,IAAe,IAAZxF,EAAEmoC,MACL,MAAO,aAAgB3iC,EAAQ,EAAI,GAAO,KACrC,IAAGxF,EAAEmoC,QAAU5hC,EAAE4hC,MAClB,MAAO,cAAgBx6B,EAAE3N,EAAE2N,GAAK,GAAK,KAGrC,IAAIu5B,GAAcC,EAAmBnnC,GACjCsoC,EAAWrB,EAA0BjnC,EACzC,OAAiB,KAAbsoC,EACO,UAAWpB,EAAY,eAAkBv5B,EAAE3N,EAAE2N,GAAK,GAAK,MAGvD,UAAWu5B,EAAY,eAAkBv5B,EAAE3N,EAAE2N,GAAKnI,EAAQ,GAAK,aAAe8iC,EAAW,SAchI,QAASpnC,GAAMsB,GAoJX,MAnJAqO,GAAYW,QAEZhP,EAAUC,KAAK,SAASC,GACpB4C,EAAY1G,GAAG2G,OAAOpG,MACtB4E,EAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,GAC9D+jB,EAAS5f,KAAKF,IAAI7D,EAAgBC,GAAmB,EAErD2J,EAAE9I,OAAO,EAAG6iB,GAGZ,IAAIhiB,GAAOJ,EAAUC,OAAM,6BACtBG,GAAK,GAAG,GAKTA,EAAKK,KAAI,YAAc,cAAiBhC,EAAiB,EAAKJ,EAAOC,KAAOD,EAAOuR,OAAS,KAAQlR,EAAkB,EAAKL,EAAOE,IAAMF,EAAOsR,QAAU,KAJzJvP,EAAOJ,EAAUQ,OAAM,KAClBC,KAAI,QAAU,qCAAuC4C,GACrD5C,KAAI,YAAc,cAAiBhC,EAAiB,EAAKJ,EAAOC,KAAOD,EAAOuR,OAAS,KAAQlR,EAAkB,EAAKL,EAAOE,IAAMF,EAAOsR,QAAU,KAK7J3P,EAAUvF,GAAE,QAAU,SAAUwG,EAAGnF,GAC/BtC,EAAS8gB,YACLld,KAAM6D,EACNiB,MAAOpG,EACPiK,IAAKzM,GAAGuE,MACRwF,GAAIA,MAIZ4/B,EAAUn+B,MAAMo+B,EAAMC,IAASD,EAAY,MAI3C,IAAI/hB,GAAQ8hB,EAAU9hB,MAAM/jB,EAAK,IAAIgT,SAErCuyB,GAA2BxhB,EAC3B,IAAIiiB,GAAKhjC,EAAKC,UAAS,kBAAmBjD,KAAK+jB,EAAO7b,GAGlD+9B,EAAMD,EAAG7iC,QACRC,OAAM,KACNC,KAAI,QAAO,gBAEhB4iC,GAAI7iC,OAAM,QACLC,KAAI,IAAMozB,GACVtwB,MAAK,OAAS,SAAUtC,GACrB,MAAIA,GAAEmE,MACKnE,EAAEmE,MAGFA,EADFk+B,GACSriC,EAAEsiC,SAAWtiC,EAAIA,EAAEuiC,QAAQpoC,KAG5B6F,EAAE7F,QAGtBmI,MAAK,SAAW,QAChB9I,GAAE,QAAU,SAASwG,EAAEnF,GACpB8mC,EAAU3hC,GACVzH,EAASqG,cACLzC,KAAM6D,EACNiB,MAAOpG,MAGdrB,GAAE,YAAc,SAASwG,EAAEnF,GACxBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAAMD,MAAK,UAAY,IACxD/J,EAASsb,kBACL1X,KAAM6D,EACNmE,MAAO9L,GAAG2G,OAAOpG,MAAM0J,MAAK,QAC5BiC,QAASs8B,EAAsB7gC,OAGtCxG,GAAE,WAAa,SAASwG,EAAEnF,GACvBxC,GAAG2G,OAAOpG,MAAM2J,QAAO,SAAU,GAAOD,MAAK,UAAY,GACzD/J,EAASsF,iBACL1B,KAAM6D,MAGbxG,GAAE,YAAc,SAASwG,EAAEnF,GACxBtC,EAASmG,kBACLvC,KAAM6D,MAOlBmiC,EAAGjmC,KAAK,SAAS8D,GACb3H,GAAG2G,OAAOpG,MAAMoG,OAAM,QACjBgH,aACA1F,SAASA,GACT+yB,UAAS,IAAM8N,KAGrB7N,IAEC6O,EAAG/iC,UAAS,QAASc,SAGrBiiC,EAAG5iC,OAAM,QACJwK,KAAM,SAAStQ,GAAI,MAAOg7B,GAAYh7B,KACtCuM,aACA1F,SAASA,GACTd,KAAI,UAAY,SAASQ,GACtB,MAAG8gC,GAAsB9gC,GACd,EAGA,IAGdR,KAAI,YAAc,SAASQ,GACxB,GAAIf,GAAQrG,KAAKkpC,UAAU7iC,KAC3B,IAAe,IAAZe,EAAE4hC,MACD,MAAO,sBAAyB3iC,EAAQ,EAAI,GAAM,KAGlD,IAAI0hC,GAAcC,EAAmB5gC,GACjC+hC,EAAWrB,EAA0B1gC,EACzC,OAAiB,KAAb+hC,EACO,UAAWpB,EAAY,eAAkBv5B,EAAEpH,EAAEoH,GAAK,GAAK,MAGvD,UAAWu5B,EAAY,eAAkBv5B,EAAEpH,EAAEoH,GAAKnI,EAAQ,GAAK,aAAe8iC,EAAW,OAOpHJ,EAAUzhB,EAAMA,EAAMjmB,OAAS,IAI/BkoC,EAAGliC,OACE+F,aACA1F,SAASA,GACTd,KAAI,UAAW,GACftD,KAAI,MAAO,SAAS8D,GACjB,GAAIyjB,GAAIpf,EAAIrE,EACZwhC,GAAc/d,GAAK/nB,SAEtBwE,WAIToK,EAAYS,UAAS,sBACdpQ,EAnVX,GA0BIsH,GAAMzE,EAAgBC,EAAiB0jB,EA1BvC/jB,GAAUE,IAAK,EAAGqR,MAAO,EAAGD,OAAQ,EAAGrR,KAAM,GAC3C4B,EAAQ,IACRC,EAAS,IACTgjC,EAAO,QACPD,GAASO,MAAO,SAASxiC,GAAK,MAAO,IAAM6D,MAAO,SAAS7D,GAAK,MAAOA,GAAE6D,OAAS7D,EAAEuH,MAAQA,KAAM,SAASvH,GAAK,MAAOA,GAAE6D,OAAS7D,EAAEuH,OACpInF,EAAKb,KAAKwB,MAAsB,IAAhBxB,KAAKyB,UACrBjE,EAAY,KACZoF,EAAQtM,EAAGG,MAAMuQ,eACjB+qB,GAAa,EACbmB,EAAc,SAASz0B,GAAG,MAAY,UAATkiC,EAAyBliC,EAAE7F,KAAO,KAAO6F,EAAE6D,MAAkB7D,EAAE7F,KAAO,KAAO6F,EAAE6D,OAAS7D,EAAEuH,OACvH6sB,EAAiB,IACjB3mB,EAAO,SAASg1B,EAAIC,GAAI,MAAOD,GAAGtoC,KAAOuoC,EAAGvoC,MAC5CkK,EAAM,SAASrE,EAAEnF,GAAG,MAAOmF,GAAE7F,MAC7BkoC,GAAqB,EACrB/hC,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,aAAe,eAAgB,kBAAmB,mBAAoB,mBAAoB,kBAAmB,aAMrIoH,EAAItH,GAAG8H,MAAMC,SAAS9B,OAAO,EAAG,EAAIiD,KAAK+P,KACzClK,EAAI/O,GAAG8H,MAAM+gB,OAEb8gB,EAAY3pC,GAAGmoB,OAAOwhB,YAAYv0B,KAAKA,GAGvC+zB,KAEA5O,EAAMv6B,GAAG0V,IAAI6kB,MACZd,WAAW,SAAS9xB,GAAI,MAAOuB,MAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,OACrEkyB,SAAS,SAAS7xB,GAAI,MAAOuB,MAAKL,IAAI,EAAGK,KAAKF,IAAI,EAAIE,KAAK+P,GAAI3R,EAAEK,EAAEL,EAAIK,EAAEwa,QACzEwX,YAAY,SAAShyB,GAAI,MAAOuB,MAAKL,IAAI,EAAGkG,EAAEpH,EAAEoH,MAChDyrB,YAAY,SAAS7yB,GAAI,MAAOuB,MAAKL,IAAI,EAAGkG,EAAEpH,EAAEoH,EAAIpH,EAAEsP,OA4JvDhF,EAAczS,EAAGG,MAAMsS,YAAY/R,EA0LvC,OA7BAoC,GAAMpC,SAAWA,EACjBoC,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAE1CA,EAAMgM,SAAWC,OAAOC,WAEpB5H,OAAa6H,IAAK,WAAW,MAAO7H,IAAS8H,IAAK,SAASvG,GAAGvB,EAAMuB,IACpEtB,QAAa4H,IAAK,WAAW,MAAO5H,IAAU6H,IAAK,SAASvG,GAAGtB,EAAOsB,IACtE0hC,MAAap7B,IAAK,WAAW,MAAOo7B,IAAQn7B,IAAK,SAASvG,GAAG0hC,EAAK1hC,IAClE4B,IAAa0E,IAAK,WAAW,MAAO1E,IAAM2E,IAAK,SAASvG,GAAG4B,EAAG5B,IAC9DF,UAAawG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GAAGF,EAASE,IAC1E6hC,oBAAqBv7B,IAAK,WAAW,MAAOu7B,IAAsBt7B,IAAK,SAASvG,GAAG6hC,IAAqB7hC,IACxG8yB,YAAaxsB,IAAK,WAAW,MAAOwsB,IAAcvsB,IAAK,SAASvG,GAAG8yB,IAAa9yB,IAChFi0B,aAAc3tB,IAAK,WAAW,MAAO2tB,IAAe1tB,IAAK,SAASvG,GAAGi0B,EAAYj0B,IACjF4zB,gBAAiBttB,IAAK,WAAW,MAAOstB,IAAkBrtB,IAAK,SAASvG,GAAG4zB,EAAe5zB,IAC1FiN,MAAO3G,IAAK,WAAW,MAAO2G,IAAQ1G,IAAK,SAASvG,GAAGiN,EAAKjN,IAC5D6D,KAAMyC,IAAK,WAAW,MAAOzC,IAAO0C,IAAK,SAASvG,GAAG6D,EAAI7D,IAEzDpD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAqB5B,QAAZ8E,EAAElD,IAAsBkD,EAAElD,IAASF,EAAOE,IAC1DF,EAAOuR,MAAqBjT,QAAZ8E,EAAEmO,MAAsBnO,EAAEmO,MAASvR,EAAOuR,MAC1DvR,EAAOsR,OAAqBhT,QAAZ8E,EAAEkO,OAAsBlO,EAAEkO,OAAStR,EAAOsR,OAC1DtR,EAAOC,KAAqB3B,QAAZ8E,EAAEnD,KAAsBmD,EAAEnD,KAASD,EAAOC,OAE9D8G,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAMtM,EAAGG,MAAMsQ,SAAS9H,OAIhC3I,EAAGG,MAAMqP,YAAY1M,GACdA,GC/XX9C,EAAGI,OAAO0qC,cAAgB,WACtB,YAoCA,SAAShoC,GAAMsB,GAkCX,MAjCAqO,GAAYW,QACZX,EAAYrS,OAAOwoC,GAEnBxkC,EAAUC,KAAK,SAASC,GACpB,GAAI4C,GAAY1G,GAAG2G,OAAOpG,KAE1Bf,GAAGG,MAAMsW,QAAQvP,EAEjB,IAAIvB,GAAiB3F,EAAGG,MAAMwF,eAAeyB,EAAOF,EAAW3B,GAC3DK,EAAkB5F,EAAGG,MAAMyF,gBAAgByB,EAAQH,EAAW3B,EAYlE,OAVAzC,GAAMqR,OAAS,WACM,IAAb1L,EACAvB,EAAU9F,KAAK0B,GAEfoE,EAAUiH,aAAa1F,SAASA,GAAUrH,KAAK0B,IAGvDA,EAAMoE,UAAYA,EAGb5C,GAASA,EAAKlC,QAIf8E,EAAUK,UAAS,cAAec,SAGtCugC,EAASxhC,MAAMzB,GAAgB0B,OAAOzB,GAAiBL,OAAOA,OAC9D2B,GAAU9F,KAAKwnC,KAPX5oC,EAAGG,MAAM4W,OAAOjU,EAAOoE,GAChBpE,KASf2P,EAAYS,UAAS,2BACdpQ,EAhEX,GAAI8lC,GAAW5oC,EAAGI,OAAOwoC,WACrB1oC,EAAUF,EAAGI,OAAOF,UAEpBqF,GAAUE,IAAK,GAAIqR,MAAO,GAAID,OAAQ,GAAIrR,KAAM,IAC9C4B,EAAQ,KACRC,EAAS,KACTiF,EAAQtM,EAAGG,MAAMuQ,eACjBqsB,GAAqB,EAErB5Z,GADKzZ,KAAK6E,MAAsB,IAAhB7E,KAAKyB,UACN,MACf4L,EAAS,KACTtO,EAAW,IACX/H,EAAWF,GAAGE,SAAQ,cAAgB,cAAY,aAOpD+R,EAAczS,EAAGG,MAAMsS,YAAY/R,EA+GvC,OA7GAR,GACKuI,SAAS,GACT+C,eAAc,GACdC,eAAe,SAAStD,GAAG,MAAOA,KA+CvCygC,EAASloC,SAASiB,GAAE,2BAA6B,SAASoc,GACtDA,EAAI3R,QACAI,IAAKuR,EAAIzZ,KAAKhC,KACd0J,MAAQ+R,EAAIzZ,KAAK0H,OAAS+R,EAAIzZ,KAAKoL,KACnCpD,MAAOyR,EAAIzR,MACXI,QAASqR,EAAIrR,SAEZqwB,UACMhf,GAAIrR,cACJqR,GAAI3R,OAAOM,SAEtBxM,EAAQoE,KAAKyZ,GAAK7X,QAAO,KAG7B0iC,EAASloC,SAASiB,GAAE,0BAA4B,SAASoc,GACrD7d,EAAQgG,QAAO,KAGnB0iC,EAASloC,SAASiB,GAAE,2BAA6B,SAASoc,GACtD7d,MAQJ4C,EAAMpC,SAAWA,EACjBoC,EAAM8lC,SAAWA,EACjB9lC,EAAM5C,QAAUA,EAChB4C,EAAM8L,QAAU5O,EAAGG,MAAM0O,YAAYhO,KAAKiC,GAG1CA,EAAMgM,SAAWC,OAAOC,WAEpB+H,QAAqB9H,IAAK,WAAW,MAAO8H,IAAwB7H,IAAK,SAASvG,GAAGoO,EAAOpO,IAC5Fwa,cAAqBlU,IAAK,WAAW,MAAOkU,IAAwBjU,IAAK,SAASvG,GAAGwa,EAAaxa,IAClGo0B,oBAAqB9tB,IAAK,WAAW,MAAO8tB,IAAwB7tB,IAAK,SAASvG,GAAGo0B,EAAmBp0B,IAGxG2D,OAAQ2C,IAAK,WAAW,MAAO3C,IAAS4C,IAAK,SAASvG,GAClD2D,EAAQ3D,EACRigC,EAASt8B,MAAMA,KAEnB7D,UAAWwG,IAAK,WAAW,MAAOxG,IAAYyG,IAAK,SAASvG,GACxDF,EAAWE,EACX8J,EAAYW,MAAM3K,GAClBmgC,EAASngC,SAASA,KAEtBlD,QAAS0J,IAAK,WAAW,MAAO1J,IAAU2J,IAAK,SAASvG,GACpDpD,EAAOE,IAAsB5B,SAAb8E,EAAElD,IAAuBkD,EAAElD,IAASF,EAAOE,IAC3DF,EAAOuR,MAAsBjT,SAAb8E,EAAEmO,MAAuBnO,EAAEmO,MAASvR,EAAOuR,MAC3DvR,EAAOsR,OAAsBhT,SAAb8E,EAAEkO,OAAuBlO,EAAEkO,OAAStR,EAAOsR,OAC3DtR,EAAOC,KAAsB3B,SAAb8E,EAAEnD,KAAuBmD,EAAEnD,KAASD,EAAOC,KAC3DojC,EAASrjC,OAAOA,OAGxBvF,EAAGG,MAAMkW,eAAevT,EAAO8lC,GAC/B5oC,EAAGG,MAAMqP,YAAY1M,GACdA;A3CjIX,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,UAAU;AACxE,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACpD,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,QAAQ;AACnE,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,GAAG,CAAC,YAAY,CAAC,SAAS;AACzC;AACA,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE;AAC7B,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACrG,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,EAAE,EAAE,GAAG;AACvB,CAAC;AACD;AACA,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,CAAC,CAAC,UAAU,GAAG;AACxD;AACA,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ;AACzB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO;AACrG,EAAE,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,MAAM,CAAC,KAAK;AAClD,EAAE,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,EAAE,QAAQ,CAAC,SAAS,CAAC,IAAI;AACpE,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC/C,EAAE,CAAC,EAAE,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,CAAC;AAC/B,IAAI,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAChD,QAAQ,EAAE,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,YAAY,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,QAAQ;AACtF,YAAY,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,GAAG;AACxG,QAAQ,CAAC;AACT;AACA,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE;AAC7D,YAAY,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AAC3B,YAAY,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG;AAClC,YAAY,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AAClC,gBAAgB,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK;AAClE,wBAAwB,CAAC,CAAC,IAAI;AAC9B,wBAAwB,CAAC,CAAC,KAAK,CAAC;AAChC,oBAAoB,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,IAAI;AACzE,YAAY,EAAE;AACd;AACA,QAAQ,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC;AACxC,QAAQ,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG;AACtC,QAAQ,MAAM,CAAC,MAAM,CAAC;AACtB,IAAI,EAAE;AACN,CAAC;AACD;AACA,EAAE,EAAE,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACvD,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC;AACb,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,QAAQ,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG;AACxC,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAE,EAAE,UAAU,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,QAAQ,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG;AACtC,QAAQ,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC;AAChE,QAAQ,EAAE,CAAC,GAAG,EAAE,KAAK,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,KAAK;AAC5G,IAAI,GAAG;AACP,CAAC;AACD;AACA,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK;AAC3E,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI;AACjF,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ;AAChF,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC;AACnC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrB,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC;AACrE,QAAQ,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,SAAS,EAAE;AAC9C,IAAI,IAAI,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,CAAC;AACvG,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,OAAO,EAAE;AACrE,QAAQ,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,SAAS,EAAE;AACtC,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,SAAS,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3C,EAAE;AACF;AACA,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS;AAChE,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,IAAI,EAAE,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC;AAClC,QAAQ,OAAO,CAAC,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI;AACvF,IAAI,CAAC;AACL,EAAE;AACF;AACA,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS;AAC7D,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC;AACnC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,CAAC;AAChF,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC;AACnC,IAAI,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,IAAI;AACxD,IAAI,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AACrB;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,GAAG;AAC/B;AACA,IAAI,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC;AACzB;AACA,QAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC;AACxE,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,GAAG;AACrC,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,KAAK,EAAE;AACjF,QAAQ,CAAC;AACT;AACA,QAAQ,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC;AACA,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC;AACrC,YAAY,UAAU,CAAC,UAAU,EAAE;AACnC,QAAQ,CAAC;AACT,QAAQ,IAAI,CAAC,CAAC;AACd,YAAY,EAAE,CAAC,QAAQ,CAAC,UAAU,GAAG;AACrC,YAAY,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,UAAU,CAAC,UAAU,EAAE;AAC3B,EAAE;AACF;AACA,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACzB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB;AACA,EAAE;AACF,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC;AACvF,EAAE,CAAC,QAAQ,EAAE;AACb,IAAI,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AACxB,IAAI,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AACxB,EAAE;AACF;AACA,EAAE;AACF;AACA,EAAE,CAAC,QAAQ,EAAE,QAAQ,CAAC,QAAQ,EAAE,CAAC,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrD;AACA,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,OAAO;AACnF,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ;AACtF,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,GAAG,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC;AAChF;AACA,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;AACzF,EAAE;AACF,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7B,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC;AACnD,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,GAAG;AAC/D,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,EAAE;AAC9B;AACA,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;AAC5B,QAAQ,EAAE,CAAC,MAAM,GAAG;AACpB,IAAI,CAAC;AACL,EAAE;AACF;AACA,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO;AACxB,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxE,EAAE,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC;AACtB,CAAC;AACD;AACA,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACrC,EAAE,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACjB,CAAC;ACzJD,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU,CAAC;AAC5C,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,UAAU,CAAC,OAAO,CAAC,CAAC;AACxD,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC;AACjB,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,OAAO,CAAC,SAAS,CAAC;AACtD,CAAC,CAAC,CAAC,eAAe,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;AAClC,CAAC,EAAE,CAAC;AACJ,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC;AACpC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC;AACrC,EAAE,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC;AACnC,CAAC,CAAC,CAAC;AACH,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC;AACpB,EAAE,CAAC;AACH,CAAC;AACD,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC;AAC3C,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,UAAU,CAAC,OAAO,CAAC,CAAC;AACxD,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC;AACjB,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,OAAO,CAAC,SAAS,CAAC;AACtD,CAAC,CAAC,CAAC,eAAe,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;AAClC,CAAC,EAAE,CAAC;AACJ,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC;AACnC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC;AACrC,EAAE,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,QAAQ,EAAE,CAAC;AACpC,CAAC,CAAC,CAAC;AACH,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC;AACpB,EAAE,CAAC;ACxBH,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK,CAAC;AAC5D,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ;AAC9F,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC;AAC5F;AACA,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI;AAC3G,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC;AACtF,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,WAAW,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC;AAClF,CAAC,EAAE;AACH,EAAE,CAAC,oBAAoB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC;AAC5G,QAAQ,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,IAAI;AACxB,QAAQ,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACtC,QAAQ,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,cAAc,EAAE;AAClJ,QAAQ,CAAC,GAAG,aAAa,CAAC,CAAC,CAAC,IAAI;AAChC,QAAQ,CAAC,GAAG,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC;AAC5F,QAAQ,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACzC,QAAQ,CAAC,GAAG,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,aAAa,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC;AACjG,IAAI,CAAC;AACL;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC,MAAM,CAAC,KAAK,EAAE;AACvB;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,EAAE;AACnF,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,oBAAoB,EAAE;AAC/E,gBAAgB,CAAC,IAAI,EAAE,IAAI,GAAG;AAC9B,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,EAAE;AACxC,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,oBAAoB,GAAG;AAC/E,YAAY,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,oBAAoB,GAAG;AAC1E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,MAAM,CAAC;AACvB,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,YAAY,EAAE,CAAC,CAAC;AACrC,gBAAgB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE;AAC7C,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE;AACxC,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE;AACxC,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1C,gBAAgB,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC,KAAK,CAAC;AAC9C,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7B,oBAAoB,EAAE;AACtB,qBAAqB,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC;AAC/F,qBAAqB,EAAE,CAAC,KAAK,EAAE,CAAC,OAAO,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM;AACzF,qBAAqB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC;AAC1C,qBAAqB,OAAO,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,WAAW;AACnF,qBAAqB,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AAClF,qBAAqB,EAAE;AACvB,oBAAoB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC;AAC9C,oBAAoB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC;AAC9C;AACA,oBAAoB,EAAE;AACtB,qBAAqB,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC;AACxF,qBAAqB,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,GAAG,EAAE;AACzG,qBAAqB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO;AAClG,qBAAqB,EAAE,CAAC,OAAO,CAAC;AAChC,qBAAqB,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;AAChG,qBAAqB,QAAQ,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK;AACvF,qBAAqB,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC;AACnE,qBAAqB,EAAE;AACvB,oBAAoB,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC;AAC3D,wBAAwB,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC;AAC/C,oBAAoB,CAAC;AACrB;AACA,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC;AAC/E,wBAAwB,iBAAiB,CAAC,CAAC,CAAC,IAAI,CAAC;AACjD,oBAAoB,CAAC;AACrB;AACA,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC;AACpC,oBAAoB,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AAC1C,oBAAoB,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC;AACzC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC;AAChE,iBAAiB,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC;AAC1C,iBAAiB,EAAE;AACnB,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC;AAChD,oBAAoB,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,eAAe;AAC1E,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,eAAe,CAAC,GAAG,CAAC,SAAS,CAAC;AACvG,oBAAoB,EAAE,CAAC,iBAAiB;AACxC,oBAAoB,CAAC,CAAC,CAAC;AACvB;AACA,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACjC,wBAAwB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa;AAClD,4BAA4B,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,eAAe,CAAC,GAAG,CAAC,SAAS;AACnF,4BAA4B,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,SAAS;AAC9E,gCAAgC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,SAAS,CAAC,KAAK,CAAC,OAAO,CAAC,oBAAoB,GAAG,CAAC,CAAC;AAC3G;AACA,4BAA4B,MAAM,CAAC;AACnC,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC;AACvC,wBAAwB,MAAM,CAAC,CAAC,MAAM;AACtC,oBAAoB,GAAG;AACvB,oBAAoB,KAAK,CAAC,eAAe,CAAC,IAAI,EAAE,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,SAAS;AACrE,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,oBAAoB,MAAM,CAAC;AAC3B,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,OAAO,CAAC,MAAM,CAAC,KAAK,EAAE;AAC1C,gBAAgB,CAAC;AACjB;AACA;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE;AAC7E,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;AAC5C;AACA,gBAAgB,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM;AACrD,gBAAgB,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACrC,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7E,oBAAoB,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI;AAC3D,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC;AACtF,wBAAwB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,oBAAoB,CAAC;AACrB,oBAAoB,IAAI,CAAC,CAAC;AAC1B,wBAAwB,QAAQ,CAAC,eAAe,EAAE;AAClD,4BAA4B,MAAM,CAAC,CAAC,MAAM,CAAC;AAC3C,4BAA4B,MAAM,CAAC,CAAC,MAAM;AAC1C,wBAAwB,GAAG;AAC3B,wBAAwB,KAAK,CAAC,eAAe,CAAC,IAAI,EAAE,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,SAAS;AACzE,wBAAwB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7C,wBAAwB,MAAM,CAAC;AAC/B,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,CAAC;AACtB,oBAAoB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,EAAE;AACxD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE;AAC3C,oBAAoB,MAAM,CAAC,CAAC,MAAM,CAAC;AACnC,oBAAoB,MAAM,CAAC,CAAC,MAAM,CAAC;AACnC,oBAAoB,WAAW,CAAC,CAAC,WAAW;AAC5C,gBAAgB,GAAG;AACnB;AACA,gBAAgB,EAAE,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,eAAe;AACzE,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACnD,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC;AACvC,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC;AACvC,wBAAwB,WAAW,CAAC,CAAC,WAAW;AAChD,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,YAAY;AACrE,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC;AACvC,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC;AACvC,wBAAwB,WAAW,CAAC,CAAC,WAAW;AAChD,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,gBAAgB;AAC9E,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACpD,iBAAiB,QAAQ,CAAC,gBAAgB,EAAE;AAC5C,kBAAkB,MAAM,CAAC,CAAC,MAAM,CAAC;AACjC,kBAAkB,MAAM,CAAC,CAAC,MAAM,CAAC;AACjC,kBAAkB,WAAW,CAAC,CAAC,WAAW;AAC1C,iBAAiB,GAAG;AACpB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,cAAc;AAC5E,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAClD,iBAAiB,QAAQ,CAAC,cAAc,EAAE;AAC1C,kBAAkB,MAAM,CAAC,CAAC,MAAM,CAAC;AACjC,kBAAkB,MAAM,CAAC,CAAC,MAAM,CAAC;AACjC,kBAAkB,WAAW,CAAC,CAAC,WAAW;AAC1C,iBAAiB,GAAG;AACpB,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,YAAY;AACxB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,YAAY,CAAC;AAC7C,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,YAAY,CAAC,CAAC,IAAI,CAAC;AACnD,gBAAgB,CAAC,EAAE,EAAE,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC;AAClD,gBAAgB,CAAC,EAAE,EAAE,SAAS,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC;AACnD,gBAAgB,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC;AACjD,gBAAgB,CAAC,EAAE,EAAE,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC;AAC7C,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,YAAY,CAAC;AAC1C,YAAY,CAAC;AACb;AACA,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC,YAAY,EAAE,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC;AAChE,YAAY,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,EAAE,CAAC,EAAE,aAAa,CAAC,CAAC,MAAM,CAAC;AAC3C,gBAAgB,EAAE,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AAChF,gBAAgB,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,oBAAoB,EAAE;AACtE,wBAAwB,CAAC,SAAS,EAAE,IAAI,EAAE;AAC1C,wBAAwB,CAAC,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,EAAE;AAClF,oBAAoB,IAAI,CAAC,KAAK,EAAE;AAChC,wBAAwB,CAAC,MAAM,EAAE,IAAI,EAAE;AACvC,wBAAwB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AACtD,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAC7D,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAC7D,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC;AACpD,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE;AACtC,oBAAoB,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACzC,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7C,QAAQ,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC/E,QAAQ,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAChF,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5C,QAAQ,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAClB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7C,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACnB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7C,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACnB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC;AACpD,QAAQ,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC;AACnD,QAAQ,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AAC9G,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5G;AACA,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC;AAC7E,CAAC,MAAM,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,GAAG,CAAC,iBAAiB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACrH,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC;AACpC;AACA,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,qBAAqB,CAAC,IAAI;AACvD;AACA,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,MAAM,CAAC;AAChC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC;AACjH,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC;AACzD,CAAC,EAAE;AACH,EAAE,CAAC,iBAAiB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAChE,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB,IAAI,CAAC;AACL,IAAI,GAAG,CAAC,UAAU,CAAC;AACnB,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC1C,QAAQ,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,QAAQ,CAAC;AACT,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC;AACZ,QAAQ,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;AAC/B,IAAI,CAAC;AACL,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI;AAC7E,QAAQ,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC;AAC1D,QAAQ,EAAE;AACV,QAAQ,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,UAAU,CAAC,GAAG;AACtE,QAAQ,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,QAAQ;AAC5E,QAAQ,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS;AAC7E,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE;AACtE,QAAQ,EAAE,CAAC,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO;AAC5E,QAAQ,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC;AAChD,QAAQ,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC;AACxC,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC1D,IAAI,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,KAAK,GAAG;AACjD;AACA,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AAC9C,QAAQ,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7B,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACrC,QAAQ,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,KAAK;AACzC,IAAI,CAAC;AACL;AACA,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AACzD,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,SAAS,GAAG;AAClD;AACA,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AAC3C,QAAQ,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AAC9B,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AAChF,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC;AACZ,QAAQ,MAAM,CAAC,SAAS;AACxB,IAAI,CAAC;AACL,EAAE;AACF;AACA,EAAE;AACF,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,SAAS,CAAC;AACtE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE;AAC/D,CAAC,SAAS,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC;AACzB,CAAC,EAAE;AACH,EAAE,CAAC,iBAAiB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAChE,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC;AACrD,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AACnE,YAAY,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7B,YAAY,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,QAAQ,CAAC;AACT,IAAI,GAAG;AACP,IAAI,MAAM,CAAC,gBAAgB,CAAC;AAC5B,EAAE;AChVF;AACA,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,YAAY,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,SAAS,CAAC;AAC/D,CAAC,OAAO,CAAC,KAAK,CAAC;AACf,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG,OAAO,EAAE,CAAC,GAAG,QAAQ,CAAC,EAAE,CAAC;AACxD,CAAC,CAAC,IAAI,CAAC,YAAY,EAAE;AACrB;AACA,CAAC,GAAG,GAAG,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC;AACjE,CAAC,EAAE;AACH,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,EAAE;AACN,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC,SAAS,CAAC;AAC3F,IAAI,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC;AAC3B,IAAI,CAAC;AACL,QAAQ,GAAG,CAAC,CAAC,CAAC,IAAI,EAAE;AACpB,QAAQ,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,QAAQ,MAAM,CAAC,CAAC,CAAC;AACjB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,GAAG,GAAG;AAC/D,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,GAAG,EAAE;AAC9D,QAAQ,CAAC;AACT,IAAI,CAAC;AACL,IAAI,EAAE;AACN,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC;AAC/H,QAAQ,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,UAAU,CAAC;AAC5F,QAAQ,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC;AAChF,QAAQ,CAAC,GAAG,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,SAAS,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AACjI,QAAQ,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,QAAQ,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC;AAClG,QAAQ,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC;AACtF,QAAQ,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,OAAO,CAAC,IAAI,GAAG;AAC5F,QAAQ,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC;AAC3D,QAAQ,CAAC,GAAG,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC;AACzF,QAAQ,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAC7F,QAAQ,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AAC/D,QAAQ,CAAC,GAAG,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;AACrE,QAAQ,CAAC,GAAG,oBAAoB,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;AACjI,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,MAAM,CAAC;AACrD,IAAI,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,QAAQ,MAAM,CAAC,CAAC,CAAC;AACjB,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC;AACpD,IAAI,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,MAAM,CAAC,CAAC,CAAC;AACjB,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,MAAM,CAAC,CAAC,CAAC;AACjB,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC;AAC5E,IAAI,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC;AACrE,IAAI,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AACzB,YAAY,MAAM,CAAC,GAAG;AACtB,QAAQ,CAAC;AACT;AACA,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,aAAa,EAAE,KAAK,IAAI;AAC/D,QAAQ,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AAC5B,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,EAAE;AACrD,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE;AAC1B,gBAAgB,CAAC,KAAK,GAAG,MAAM,EAAE,KAAK,GAAG;AACzC;AACA,YAAY,UAAU,CAAC,MAAM,EAAE,EAAE,EAAE;AACnC,gBAAgB,CAAC,MAAM,EAAE,EAAE,EAAE;AAC7B,gBAAgB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC,CAAC;AACnC,gBAAgB,CAAC,MAAM,EAAE,MAAM,EAAE;AACjC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC;AACzC,gBAAgB,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,GAAG;AAChD,QAAQ,CAAC;AACT;AACA,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,EAAE;AACjD,YAAY,CAAC,IAAI,EAAE,CAAC,EAAE;AACtB,YAAY,CAAC,KAAK,GAAG,MAAM,EAAE,KAAK,GAAG;AACrC;AACA,QAAQ,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,SAAS,EAAE,EAAE,EAAE;AAClD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE;AACrD,gBAAgB,CAAC,KAAK,EAAE;AACxB,gBAAgB,CAAC,MAAM,EAAE,EAAE,EAAE;AAC7B,gBAAgB,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,GAAG;AACzE;AACA,QAAQ,SAAS,CAAC,MAAM,EAAE,EAAE,EAAE;AAC9B,YAAY,CAAC,OAAO,EAAE,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE,IAAI,CAAC;AAC/C,YAAY,CAAC,MAAM,EAAE,GAAG,EAAE;AAC1B,YAAY,CAAC,KAAK,EAAE,UAAU,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG;AACtE;AACA,QAAQ,SAAS,CAAC,MAAM,EAAE,EAAE,EAAE;AAC9B,YAAY,CAAC,OAAO,EAAE,GAAG,EAAE,IAAI,CAAC;AAChC,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE;AAC7D,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACnE;AACA,QAAQ,SAAS,CAAC,MAAM,EAAE,EAAE,EAAE;AAC9B,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,IAAI,CAAC;AAClC,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACxE;AACA,QAAQ,SAAS,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,GAAG,MAAM,EAAE,EAAE,EAAE;AACxF,YAAY,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC,IAAI,CAAC;AACrC,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACnF;AACA,QAAQ,SAAS,CAAC,SAAS,EAAE,EAAE,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC9B,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,KAAK,IAAI,GAAG,EAAE,CAAC,CAAC,KAAK,GAAG;AAC3F,gBAAgB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AAC/B,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,YAAY,CAAC,OAAO,EAAE;AACxE,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,YAAY,CAAC,OAAO,EAAE;AACrE,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG,SAAS,CAAC;AAC1C,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AACnC,YAAY,IAAI,CAAC,EAAE,CAAC,EAAE,GAAG,CAAC,KAAK,EAAE,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,GAAG;AACjE,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB;AACA,IAAI,EAAE;AACN;AACA,IAAI,EAAE;AACN,KAAK,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;AAChF,KAAK,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC;AACrC,KAAK,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;AACrB,QAAQ,IAAI,CAAC,CAAC,CAAC,OAAO,EAAE;AACxB,QAAQ,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC;AACrB,KAAK,CAAC;AACN,KAAK,EAAE;AACP,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AACnB,YAAY,IAAI,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3D,YAAY,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;AACzD,QAAQ,EAAE;AACV;AACA,QAAQ,EAAE,CAAC,gBAAgB,CAAC,QAAQ,CAAC,IAAI,EAAE,SAAS,CAAC,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC;AACjE,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ;AAC3E,YAAY,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC;AAChC,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,qBAAqB,GAAG;AAC/D,YAAY,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AACpC,YAAY,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC;AAClC,QAAQ,CAAC;AACT;AACA,QAAQ,MAAM,CAAC,GAAG,CAAC;AACnB,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC5B,YAAY,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AAC7C,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,CAAC;AACb,YAAY,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACvE,YAAY,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AAC9C,gBAAgB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AACtC,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,CAAC;AACb,QAAQ,CAAC;AACT,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,OAAO;AACrF,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC;AAChC,IAAI,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC3C,QAAQ,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,GAAG,YAAY,CAAC;AACjD,YAAY,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,GAAG,WAAW,CAAC;AAC/C,YAAY,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC;AACzF,YAAY,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC;AAC3F,YAAY,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC;AAC3B;AACA,QAAQ,EAAE,CAAC,SAAS,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO;AAC9C,QAAQ,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAC1B,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE;AACrB,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1C,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC;AACxD,gBAAgB,EAAE,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC;AACzD,gBAAgB,EAAE,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,YAAY,CAAC;AAC5F,gBAAgB,KAAK,CAAC;AACtB,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE;AACrB,gBAAgB,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC;AACrF,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC;AAC1D,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,YAAY,CAAC;AAC7F,gBAAgB,KAAK,CAAC;AACtB,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE;AACrB,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC;AAC3F,gBAAgB,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC/B,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC;AACrF,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC;AAC7D,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC;AAC7F,gBAAgB,KAAK,CAAC;AACtB,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE;AACrB,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1C,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC;AACtD,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC;AAC7D,gBAAgB,EAAE,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC;AAC7F,gBAAgB,KAAK,CAAC;AACtB,YAAY,IAAI,CAAC,CAAC,MAAM,EAAE;AAC1B,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,KAAK,CAAC;AACtB,YAAY,OAAO,CAAC;AACpB,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACxB,gBAAgB,KAAK,CAAC;AACtB,QAAQ,CAAC;AACT;AACA,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,EAAE;AAC5C,IAAI,EAAE;AACN;AACA,IAAI,EAAE;AACN,KAAK,SAAS,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,QAAQ,CAAC;AACrF,KAAK,EAAE;AACP,IAAI,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,QAAQ,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,GAAG;AACjC,gBAAgB,aAAa,CAAC,CAAC,CAAC,iBAAiB,CAAC,GAAG,EAAE;AACvD,gBAAgB,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,CAAC;AACrD,gBAAgB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,aAAa,CAAC,GAAG,CAAC;AAClD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU;AACrD,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACzB,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,SAAS,EAAE;AAChC,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,KAAK,CAAC,SAAS,CAAC;AACrC,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC;AAChC,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACzC,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,EAAE,SAAS,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK;AACtF,gBAAgB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG;AACzG,gBAAgB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG;AACvG,gBAAgB,GAAG,CAAC,qBAAqB,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,aAAa,CAAC,CAAC,aAAa,EAAE;AAC/F,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D;AACA,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,WAAW;AAC9D,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACvD,oBAAoB,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAClG,oBAAoB,CAAC,UAAU,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3D,wBAAwB,MAAM,CAAC,qBAAqB,CAAC;AACrD,oBAAoB,EAAE,CAAC,CAAC,SAAS,EAAE;AACnC,oBAAoB,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,SAAS,CAAC;AAC9F,oBAAoB,CAAC,UAAU,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnE,wBAAwB,MAAM,CAAC,qBAAqB,CAAC;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,KAAK,GAAG,EAAE,CAAC,SAAS,EAAE,CAAC,aAAa,CAAC;AAC1D,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACzC,YAAY,CAAC;AACb;AACA,YAAY,YAAY,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACrC,YAAY,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,QAAQ,GAAG;AACX,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC;AAClE,IAAI,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AAC5B,QAAQ,EAAE,CAAC,EAAE,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,IAAI,GAAG,CAAC,CAAC;AAC1C,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC;AACjE;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3B,YAAY,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,EAAE,MAAM,KAAK,EAAE,EAAE,IAAI,CAAC,IAAI,EAAE;AACzE;AACA,YAAY,OAAO,CAAC,KAAK,GAAG,MAAM,EAAE,GAAG,EAAE;AACzC,mBAAmB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,GAAG;AACpF,mBAAmB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC;AAClC,mBAAmB,CAAC,KAAK,EAAE,GAAG,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC;AACpD,mBAAmB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,CAAC;AACvC,mBAAmB,CAAC,KAAK,EAAE,QAAQ,EAAE,CAAC,CAAC,KAAK,EAAE;AAC9C,mBAAmB,CAAC,SAAS,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,GAAG,OAAO,CAAC,oBAAoB,CAAC,CAAC,IAAI,CAAC;AACvF,mBAAmB,CAAC,OAAO,CAAC,oBAAoB,CAAC,CAAC,IAAI,EAAE;AACxD;AACA,YAAY,OAAO,CAAC,IAAI,GAAG,MAAM,EAAE;AACnC,QAAQ,CAAC;AACT,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC;AACrC,IAAI,QAAQ,CAAC,SAAS,EAAE,CAAC,CAAC;AAC1B,QAAQ,EAAE,CAAC,EAAE,OAAO,CAAC,CAAC,MAAM,CAAC;AAC7B,QAAQ,EAAE,CAAC,EAAE,gBAAgB,CAAC,IAAI,EAAE,CAAC,MAAM,CAAC;AAC5C;AACA,QAAQ,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AAClC,YAAY,WAAW,GAAG;AAC1B,YAAY,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC;AACrD,YAAY,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,gBAAgB,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,IAAI;AACpG,YAAY,EAAE,SAAS,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC;AAC3E,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,gBAAgB,CAAC,IAAI,EAAE;AACpD,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC7B,gBAAgB,OAAO,CAAC,IAAI,GAAG,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC;AACtD,YAAY,CAAC;AACb;AACA,YAAY,eAAe,GAAG;AAC9B,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,SAAS,CAAC;AACzB,IAAI,CAAC;AACL;AACA,IAAI,SAAS,CAAC,oBAAoB,CAAC,CAAC,CAAC,oBAAoB,CAAC;AAC1D,IAAI,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,EAAE;AAC7D;AACA,IAAI,SAAS,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AAC5C,QAAQ,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO;AACpC,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,gBAAgB,CAAC,CAAC,IAAI;AAC7G,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AACvG,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,IAAI;AAC1G,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF;AACA,QAAQ,EAAE,CAAC,UAAU,CAAC,OAAO;AAC7B,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClF,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,cAAc,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC3E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,QAAQ,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrE,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC9E,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,EAAE,CAAC,OAAO,GAAG;AAC5E,QAAQ,GAAG;AACX;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK;AACnC,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC7B,gBAAgB,SAAS,GAAG;AAC5B,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC/D,YAAY,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI;AAC3E,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC1B,gBAAgB,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,gBAAgB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG;AAC1C,gBAAgB,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACjE,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,QAAQ,GAAG;AACX;AACA,QAAQ,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU;AAC/B,QAAQ,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,KAAK;AAC5E,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI;AAC7D,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,SAAS,EAAE;AACpC,IAAI,MAAM,CAAC,SAAS,CAAC;AACrB,EAAE;ACrWF;AACA;AACA,EAAE;AACF,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI;AAC5B;AACA,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAC/C,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAClC,IAAI,EAAE,CAAC,IAAI,CAAC,QAAQ;AACpB,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE;AACzC;AACA,IAAI,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG;AAC/B,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC;AAClD,QAAQ,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC;AACvC,QAAQ,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC;AACzC,QAAQ,MAAM,CAAC,CAAC,IAAI,EAAE;AACtB,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AAC5C,IAAI,EAAE,CAAC,CAAC,QAAQ,CAAC,UAAU,GAAG,UAAU,CAAC,CAAC,EAAE;AAC5C,QAAQ,QAAQ,CAAC,eAAe,CAAC,EAAE;AACnC,QAAQ,QAAQ,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;AAChD;AACA,QAAQ,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,CAAC,WAAW,CAAC;AAC1D,QAAQ,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,CAAC,YAAY,CAAC;AAC5D,QAAQ,MAAM,CAAC,CAAC,IAAI,EAAE;AACtB,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI;AAC/B,IAAI,EAAE,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC;AACrD,QAAQ,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC;AAC/C,QAAQ,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC;AACjD,QAAQ,MAAM,CAAC,CAAC,IAAI,EAAE;AACtB,IAAI,CAAC;AACL;AACA,IAAI,MAAM,CAAC,CAAC,IAAI,EAAE;AAClB,EAAE;AACF;AACA;AACA,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,GAAG,EAAE,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM;AACrE,MAAM,CAAC,OAAO,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC;AACvD,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC;AACjC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,IAAI,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE;AAC/C,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,IAAI,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE;AACnC,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,IAAI,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,IAAI,GAAG;AAChD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,IAAI,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE;AAC9C,EAAE;AACF;AACA;AACA,EAAE;AACF,KAAK,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO;AACrD,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC;AAC3C,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC,CAAC;AAClC,QAAQ,MAAM,CAAC,gBAAgB,EAAE,MAAM,EAAE,CAAC,OAAO,EAAE;AACnD,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC;AACZ,QAAQ,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,OAAO,EAAE;AACzE,IAAI,CAAC;AACL,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC;AAC7E,IAAI,MAAM,CAAC,CAAC;AACZ,QAAQ,QAAQ,CAAC,CAAC,OAAO,CAAC;AAC1B,QAAQ,KAAK,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC3B,YAAY,MAAM,CAAC,mBAAmB,EAAE,MAAM,EAAE,CAAC,OAAO,EAAE;AAC1D,QAAQ,CAAC;AACT,IAAI,CAAC;AACL,EAAE;AACF;AACA;AACA,EAAE;AACF,SAAS,CAAC,UAAU,CAAC,GAAG,CAAC,EAAE,CAAC,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC;AACtE,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK;AAClD,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI;AAC1E,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,aAAa;AACtE,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACrC,IAAI,EAAE,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI;AACrD,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC9B,QAAQ,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,GAAG;AACvC;AACA,IAAI,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AACpD,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,CAAC;AACxC,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,GAAG,KAAK,CAAC,KAAK,EAAE;AAC1D,QAAQ,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,YAAY,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,EAAE;AAC/C,QAAQ,EAAE;AACV;AACA,IAAI,EAAE,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AACrE,IAAI,EAAE,QAAQ,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI;AAC/D,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC;AACZ,QAAQ,EAAE,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK;AACjE,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE;AAC7D,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,WAAW,CAAC,MAAM;AACtE,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACpC,IAAI,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC;AACrE,IAAI,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,GAAG,KAAK,IAAI;AAC5D,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG;AAC9E,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,aAAa,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,UAAU;AACnD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,MAAM,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACpE,IAAI,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS;AACpD,IAAI,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE;AAC9D,IAAI,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,GAAG,KAAK,GAAG;AACnE;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC;AAClE,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC;AACxC;AACA,IAAI,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE;AACjC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,GAAG,CAAC,CAAC;AACnD,YAAY,MAAM,CAAC,UAAU,CAAC,GAAG,IAAI;AACrC,QAAQ,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACnD,YAAY,MAAM,CAAC,UAAU,CAAC,GAAG,EAAE;AACnC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC;AAChB,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK;AAC1D,YAAY,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AAC5B,gBAAgB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,IAAI;AAC1D,gBAAgB,QAAQ,CAAC,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC;AAChD,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,YAAY,MAAM,CAAC,aAAa,CAAC,QAAQ,EAAE;AAC3C,QAAQ,CAAC;AACT,IAAI,EAAE;AACN,EAAE;AACF;AACA;AACA,EAAE;AACF,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM;AAC3E,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC1E,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM;AAClD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAC1C;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1C,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE,IAAI,GAAG;AACnD,YAAY,MAAM,CAAC,UAAU,CAAC,YAAY,CAAC;AAC3C,gBAAgB,EAAE,CAAC,MAAM,CAAC,QAAQ,EAAE,MAAM,CAAC,OAAO,EAAE,IAAI,GAAG;AAC3D,gBAAgB,MAAM,EAAE;AACxB,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC1C,QAAQ,GAAG;AACX,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,SAAS,CAAC,KAAK,EAAE,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChD,QAAQ,OAAO,CAAC,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE;AAClE,QAAQ,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE;AACxB,QAAQ,EAAE,CAAC,KAAK,CAAC,cAAc,GAAG;AAClC,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjD,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,YAAY,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AACjC,QAAQ,CAAC;AACT,IAAI,GAAG;AACP,EAAE;AACF;AACA;AACA,EAAE;AACF,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC;AAC5E,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,SAAS,CAAC;AACzE,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,eAAe;AACtD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,mBAAmB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACvD,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC;AAC1F,QAAQ,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE,IAAI,CAAC,IAAI,GAAG,OAAO,EAAE,EAAE,MAAM,CAAC,EAAE,EAAE;AACrF,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,GAAG,MAAM,CAAC;AACnD,QAAQ,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/D,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,CAAC,CAAC;AACb,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC;AAC/D,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC7B,QAAQ,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACnB,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI;AACrB,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ;AACzB,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC7B;AACA,QAAQ,MAAM,CAAC,CAAC,CAAC;AACjB,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,CAAC,CAAC;AACb,EAAE;AACF;AACA,EAAE;AACF,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE;AAC/C,EAAE;AACF,EAAE,CAAC,SAAS,CAAC,SAAS,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,WAAW,EAAE;AAC/D,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,MAAM,IAAI,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,GAAG;AAC1D,IAAI,MAAM,CAAC,WAAW,CAAC,UAAU,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,IAAI,EAAE;AAC3D,EAAE;AACF;AACA;AACA,EAAE;AACF,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,SAAS;AACrD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AACrD,IAAI,EAAE,CAAC,GAAG,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,EAAE,CAAC,CAAC;AAClD,QAAQ,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC5D,IAAI,CAAC;AACL;AACA,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC;AAC5D,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG;AACzB,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACpB;AACA,IAAI,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACpC,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE;AAC7C,QAAQ,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,EAAE;AACvC,YAAY,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,YAAY,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzB,gBAAgB,CAAC,CAAC,QAAQ,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,GAAG,EAAE;AACzD,oBAAoB,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AACxC,oBAAoB,IAAI,CAAC,SAAS,EAAE,KAAK,GAAG;AAC5C,gBAAgB,GAAG;AACnB,YAAY,GAAG,KAAK,EAAE;AACtB;AACA,YAAY,EAAE,CAAC,CAAC,WAAW,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACxC,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,IAAI,MAAM,CAAC,IAAI,CAAC;AAChB,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACrC,QAAQ,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACrC,YAAY,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC;AACjC,QAAQ,CAAC;AACT,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG;AACzB,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3D,QAAQ,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACvE;AACA,QAAQ,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,YAAY,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,GAAG;AAClC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC;AAChB,YAAY,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC;AACjE,QAAQ,CAAC;AACT,QAAQ,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC;AACA,QAAQ,EAAE,CAAC,CAAC,WAAW,CAAC,OAAO,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,YAAY,WAAW,CAAC,IAAI,CAAC,SAAS,EAAE;AACxC,QAAQ,CAAC;AACT;AACA,QAAQ,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,YAAY,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AACxC,YAAY,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AAC1D,YAAY,SAAS,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AAC7D,YAAY,MAAM,CAAC,SAAS,CAAC;AAC7B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC;AAChB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,gBAAgB,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5C,YAAY,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC7E,gBAAgB,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5C,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7C,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,MAAM,CAAC,SAAS;AAC5B,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnC,gBAAgB,CAAC,IAAI,CAAC,QAAQ,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,GAAG,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,EAAE,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,wBAAwB,SAAS,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AACpD,wBAAwB,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,IAAI,EAAE;AACzD,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,QAAQ,EAAE,CAAC,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACvE,YAAY,WAAW,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AACvE,YAAY,QAAQ,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE;AACtD,QAAQ,CAAC;AACT,IAAI,CAAC;AACL;AACA,EAAE;AACF;AACA;AACA,EAAE;AACF,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC;AACjE,OAAO,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC5D,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AACpC,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC1E,IAAI,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACtC,QAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC;AACjC,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,GAAG;AACrD,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,GAAG,GAAG;AACvD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,GAAG;AACxD;AACA,YAAY,EAAE,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC;AACjD,gBAAgB,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,MAAM,CAAC,GAAG,GAAG;AAC3D,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AACvC,YAAY,CAAC;AACb,QAAQ,CAAC;AACT,IAAI,GAAG;AACP,EAAE;AACF;AACA;AACA,EAAE;AACF,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AAC3D,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,GAAG;AAC5B,IAAI,EAAE,CAAC,GAAG,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;AAC5C,QAAQ,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG;AACpC,IAAI,CAAC;AACL,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACnB,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AACrB,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,KAAK;AACjC,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE;AAC7C,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACpB,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACvB;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,EAAE,CAAC,CAAC,GAAG,GAAG;AACjD;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,GAAG,EAAE,CAAC,QAAQ,CAAC,KAAK,EAAE;AAC5C,QAAQ,SAAS,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;AAC/B,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,EAAE;AAC/B,QAAQ,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC;AACvB,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1C,QAAQ,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AACxB,YAAY,QAAQ,CAAC,CAAC,CAAC,QAAQ,KAAK;AACpC,QAAQ,CAAC;AACT,QAAQ,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,MAAM,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,EAAE;AACtB,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACzB,gBAAgB,QAAQ,GAAG;AAC3B,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,EAAE;AAChC,QAAQ,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG;AAC1B,QAAQ,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE;AACzC,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,GAAG;AAC1B,QAAQ,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,GAAG;AACnC;AACA,QAAQ,EAAE,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,CAAC,CAAC;AACjE,YAAY,MAAM,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC;AACT;AACA,QAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AACnC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC3C,gBAAgB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG;AAChC,YAAY,CAAC;AACb,YAAY,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AACvC,YAAY,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AAC3B,QAAQ,CAAC;AACT,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB,IAAI,EAAE;AACN;AACA,IAAI,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,GAAG;AAC7B,QAAQ,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACnB,YAAY,SAAS,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE;AACnC,YAAY,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACxB,QAAQ,CAAC;AACT,QAAQ,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC;AAC9B,YAAY,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,KAAK,EAAE;AACxC,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE;AAC/E,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AACf,KAAK,CAAC,OAAO,EAAE;AACf,EAAE,SAAS,CAAC,CAAC,IAAI,CAAC;AAClB,EAAE,QAAQ,CAAC,CAAC,IAAI;AAChB,GAAG;AACH;AACA,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC;AACvB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACjD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACf,QAAQ,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE,OAAO,EAAE,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACnD,YAAY,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,CAAC;AACjD,gBAAgB,IAAI,CAAC,GAAG,EAAE,KAAK,EAAE;AACjC,YAAY,CAAC;AACb,QAAQ,GAAG,IAAI,CAAC,IAAI,GAAG;AACvB,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,IAAI,CAAC;AAChB,EAAE;AACF;AACA;AACA,EAAE;AACF,QAAQ,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK;AACpC,IAAI,CAAC,EAAE,GAAG,CAAC,KAAK,CAAC,IAAI;AACrB;AACA,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI;AAC1E,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC5C,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAChD,IAAI,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO;AACtD,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACd,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/E,QAAQ,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;AACpE,IAAI,CAAC;AACL,IAAI,EAAE,CAAC,GAAG,EAAE,SAAS,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACpD,IAAI,EAAE,CAAC,GAAG,EAAE,UAAU,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AACvD,IAAI,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU;AACzE,IAAI,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1E,IAAI,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI;AAC1C,IAAI,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC3C,IAAI,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO;AAChC,IAAI,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,EAAE;AACpC,IAAI,EAAE,CAAC,GAAG,EAAE,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACpD,IAAI,MAAM,CAAC,QAAQ,CAAC;AACpB,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI;AACtE,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAChD,IAAI,EAAE,CAAC,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK;AAC3E,IAAI,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,IAAI,EAAE;AAC/C,EAAE;AACF;AACA;AACA,EAAE;AACF,GAAG,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AACzD,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK;AAC1E,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,EAAE,CAAC,GAAG,OAAO,EAAE,CAAC,GAAG;AAC/E;AACA,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,CAAC,OAAO;AACjE,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC;AACtD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC7C,IAAI,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG;AACzE,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,CAAC;AAC7C,QAAQ,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC;AACZ,QAAQ,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,YAAY,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE;AAC/D,YAAY,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1C,YAAY,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,MAAM,CAAC,KAAK,CAAC;AACzB,QAAQ,EAAE;AACV,QAAQ,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO;AAC7E,QAAQ,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ;AAC1E,QAAQ,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE;AAC/D,YAAY,EAAE,CAAC,EAAE,KAAK,CAAC,UAAU,CAAC,IAAI,EAAE,CAAC,CAAC;AAC1C,gBAAgB,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC;AACT,IAAI,CAAC;AACL,EAAE;AACF;AACA;AACA,EAAE;AACF,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AACjD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACxC,IAAI,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC,GAAG;AAC9C,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,mBAAmB,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI;AAC/D,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,mBAAmB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI;AAC/D,IAAI,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,EAAE;AAC5B,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC;AACxB,QAAQ,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG;AAC3C,IAAI,CAAC;AACL,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM;AAChC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM;AACzE,IAAI,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,aAAa,CAAC,GAAG,CAAC,OAAO,CAAC,WAAW;AAC7F,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACjE,IAAI,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,IAAI;AAC/D,IAAI,MAAM,CAAC,OAAO,CAAC,SAAS,EAAE;AAC9B,IAAI,MAAM,CAAC,OAAO,CAAC,MAAM,EAAE;AAC3B,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE;AAClC,EAAE;AACF;AACA;AACA,EAAE;AACF,MAAM,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK;AAC/B,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,IAAI,MAAM,CAAC,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AAChD,QAAQ,MAAM,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1C,IAAI,GAAG;AACP,EAAE;AACF;AACA;AACA,EAAE;AACF,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACxE,SAAS,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACtD,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,IAAI,QAAQ;AAC3E,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,GAAG;AAC9B;AACA;AACA,EAAE;AACF,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG;AACrE,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC9B,IAAI,GAAG,CAAC,IAAI,CAAC;AACb,QAAQ,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC;AAClB,IAAI,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACpC,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACpC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,WAAW,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,YAAY,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AACrD,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC;AAChB,YAAY,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE;AAChD,QAAQ,CAAC;AACT,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3C,QAAQ,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AAC7B,QAAQ,MAAM,CAAC,MAAM,CAAC;AACtB,IAAI,EAAE;AACN,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3C,QAAQ,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AAC7B,QAAQ,MAAM,CAAC,MAAM,CAAC;AACtB,IAAI,EAAE;AACN,IAAI,MAAM,CAAC,MAAM,CAAC;AAClB,EAAE;AACF;AACA;AACA,EAAE;AACF,OAAO,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM;AAC5D,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM;AACzE,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO;AACxE,GAAG,CAAC,aAAa,CAAC,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,WAAW;AAC7D,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACpD,IAAI,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM;AAC7B,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,mBAAmB,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI;AAChE,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,mBAAmB,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI;AAChE,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,GAAG;AAC5C,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,GAAG;AACxC,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,KAAK,EAAE;AACjE,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE;AACzB,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE;AACzB,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,IAAI,EAAE;AAChC,IAAI,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU;AACzE,IAAI,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK;AAC9H,IAAI,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK;AACpF,EAAE;AACF;AACA;AACA,EAAE;AACF,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM;AAC9D,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClC,IAAI,GAAG,CAAC,OAAO,GAAG,IAAI,CAAC,GAAG,EAAE,IAAI,GAAG;AACnC,EAAE;AACF;AACA;AACA,EAAE;AACF,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM,CAAC;AACtD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AACvD,IAAI,MAAM,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACtE,EAAE;AACF;AACA;AACA,EAAE;AACF,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC;AACrD,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AACrD,IAAI,MAAM,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,KAAK,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACpE,EAAE;AACF;AACA;AACA,EAAE;AACF,SAAS,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3C,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAChE,IAAI,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE;AAC/F,EAAE;AACF;AACA,EAAE;AACF,SAAS,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC;AAC1C,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC9D,IAAI,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,EAAE;AAC7F,EAAE;AACF;AACA,EAAE;AACF,KAAK,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,OAAO;AAC1E,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC9C,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,GAAG;AAC9B,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,GAAG;AAC9B,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,GAAG;AAC9B,QAAQ,IAAI,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AACpE,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,IAAI,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnE,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACjE,QAAQ,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAClC,QAAQ,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAClC;AACA,IAAI,EAAE,MAAM,CAAC,GAAG,CAAC,UAAU,CAAC,OAAO,CAAC,KAAK,CAAC,UAAU;AACpD,IAAI,SAAS,CAAC,SAAS,EAAE,CAAC,GAAG,MAAM,GAAG;AACtC;AACA,IAAI,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,IAAI,CAAC,IAAI,EAAE;AAClE;AACA,IAAI,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACrC,QAAQ,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,EAAE;AACxC,QAAQ,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,GAAG,EAAE;AAC5B,QAAQ,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG;AACxC;AACA,IAAI,UAAU;AACd,QAAQ,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AACxC,EAAE;AACF;AACA,EAAE;AACF,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;AAClB,CAAC,EAAE;AACH,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7C,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC1B,QAAQ,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,YAAY,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,GAAG,KAAK,GAAG,CAAC,IAAI,OAAO,GAAG;AACvD,YAAY,IAAI,CAAC;AACjB,YAAY,IAAI,CAAC,CAAC,CAAC,GAAG;AACtB,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG;AAC/B,YAAY,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,EAAE,IAAI;AAC7C,YAAY,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,GAAG,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG;AACpG,QAAQ,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,GAAG,CAAC,CAAC;AACpC,YAAY,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE;AAC5B,YAAY,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,IAAI;AACvC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG,qBAAqB,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC/D,gBAAgB,IAAI,CAAC,GAAG,GAAG;AAC3B,gBAAgB,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,IAAI;AAC3C,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAC9B,gBAAgB,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,KAAK,GAAG,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,IAAI,CAAC,IAAI,EAAE;AACpI,YAAY,CAAC;AACb,QAAQ,CAAC;AACT,IAAI,GAAG;AACP,EAAE;AACF;AACA,EAAE;AACF,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK;AACzB,EAAE;AACF,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAClD,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC;AAC1B,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB;AACA,IAAI,EAAE,CAAC,EAAE,MAAM,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC;AAC3B,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI;AAC/C,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC;AACvC,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB;AACA,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnB,QAAQ,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM;AACzC,QAAQ,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC;AACvE,YAAY,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM;AAC7C,YAAY,EAAE,CAAC,EAAE,EAAE,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,GAAG;AACtD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,QAAQ,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC;AAC5C,YAAY,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7F,YAAY,MAAM,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC;AACT,IAAI,CAAC;AACL,IAAI,MAAM,CAAC,IAAI,CAAC;AAChB,EAAE;ACrsBF,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,GAAG;AAC7B,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG;AAClC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,SAAS;AACxD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,SAAS;AACzD,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI;AAC9B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,IAAI,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI;AAC5F,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS;AAC9B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE;AAC7C,QAAQ,CAAC;AACT,IAAI,IAAI;AACR,QAAQ,CAAC,KAAK,CAAC,KAAK,CAAC;AACrB,QAAQ,CAAC,MAAM,EAAE,MAAM,EAAE;AACzB,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AAC7C,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC;AACf,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,EAAE,IAAI,GAAG;AAC7E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,GAAG;AAC3F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC;AAC/B,gBAAgB,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,YAAY,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AACzE,gBAAgB,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,EAAE;AAChF;AACA,YAAY,EAAE,IAAI,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS;AAC1I,YAAY,CAAC,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,EAAE;AAC9D;AACA,YAAY,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,GAAG;AAC5C;AACA,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,GAAG;AACxC,YAAY,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC;AAC9B,gBAAgB,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,GAAG;AAC1C,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,SAAS,EAAE;AAC5D,gBAAgB,CAAC,IAAI,EAAE,aAAa,CAAC,EAAE,CAAC,IAAI,GAAG;AAC/C,YAAY,SAAS,CAAC,IAAI,GAAG,MAAM,GAAG;AACtC;AACA,YAAY,EAAE,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM;AACnG,YAAY,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACzC,gBAAgB,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG,MAAM,EAAE,IAAI,GAAG,KAAK,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,EAAE;AAC7E,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,YAAY,CAAC;AAC7B,YAAY,GAAG,CAAC,UAAU,CAAC;AAC3B,YAAY,GAAG,CAAC,CAAC,CAAC;AAClB,YAAY,MAAM,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,CAAC,CAAC;AACpC,gBAAgB,IAAI,CAAC,CAAC,GAAG,EAAE;AAC3B,oBAAoB,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACnF,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxB,kBAAkB,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACjF,kBAAkB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,oBAAoB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE;AAC/G,kBAAkB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxD,oBAAoB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC,GAAG,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,GAAG;AAClG,kBAAkB,EAAE;AACpB,oBAAoB,SAAS;AAC7B,wBAAwB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AACtD,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACrC,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,oBAAoB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACrC,wBAAwB,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AACtE,4BAA4B,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,IAAI;AAClD,wBAAwB,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gCAAgC,MAAM,CAAC,EAAE,EAAE,CAAC,UAAU,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,EAAE;AAC7H,wBAAwB,GAAG,MAAM,EAAE,IAAI,GAAG;AAC1C,wBAAwB,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACnD,wBAAwB,UAAU;AAClC,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1F,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,GAAG,EAAE;AACjD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,WAAW,GAAG;AAC3D,4BAA4B,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AAC1D,4BAA4B,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC/C,gCAAgC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,4BAA4B,GAAG;AAC/B,wBAAwB,UAAU,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,EAAE;AAC9E,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAClG,4BAA4B,GAAG;AAC/B,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,MAAM,EAAE;AAC9B,oBAAoB,YAAY,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,EAAE,CAAC;AAC1D,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC;AAC1C,oBAAoB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG,MAAM,EAAE,IAAI,GAAG;AACjE,oBAAoB,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,GAAG;AAC9C,oBAAoB,EAAE,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC;AAC3C,wBAAwB,EAAE,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS;AAC5F,wBAAwB,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,IAAI,CAAC;AACtD,wBAAwB,EAAE,SAAS,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK;AAC3D,wBAAwB,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,4BAA4B,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,qBAAqB,GAAG;AACnE,4BAA4B,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAClD,4BAA4B,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC;AACpD,4BAA4B,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AAC1E,wBAAwB,GAAG;AAC3B,wBAAwB,gBAAgB,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,IAAI;AACxH,wBAAwB,EAAE,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC;AAC1G,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,YAAY,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,GAAG;AAC/E,wBAAwB,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,YAAY,EAAE,EAAE,CAAC;AAClF,wBAAwB,EAAE,MAAM,CAAC,GAAG,CAAC,MAAM;AAC3C,wBAAwB,MAAM;AAC9B,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,gBAAgB,CAAC;AAChE,4BAA4B,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,GAAG;AAC1F,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AAC5C,4BAA4B,MAAM;AAClC,gCAAgC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClE,oCAAoC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG;AAC3F,gCAAgC,GAAG;AACnC,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,IAAI;AACvE,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACnF,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACrF,oBAAoB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5D,wBAAwB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE;AACnH,oBAAoB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1D,wBAAwB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,KAAK,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC,GAAG,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,GAAG;AACtG,oBAAoB,EAAE;AACtB,oBAAoB,SAAS;AAC7B,wBAAwB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AACtD,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC;AAChD,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,oBAAoB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACrC,wBAAwB,EAAE,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AACzD,wBAAwB,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AACtE,4BAA4B,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,GAAG;AACnD,4BAA4B,CAAC,IAAI,EAAE,KAAK,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI;AAClG,wBAAwB,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gCAAgC,MAAM,CAAC,EAAE,EAAE,CAAC,UAAU,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,EAAE;AAC7H,wBAAwB,GAAG,MAAM,EAAE,IAAI,GAAG;AAC1C,wBAAwB,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACnD,wBAAwB,UAAU;AAClC,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACtI,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AAChD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,WAAW,GAAG;AAC1D,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,gBAAgB,CAAC;AAChE,4BAA4B,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AACrH,4BAA4B,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC/C,gCAAgC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,4BAA4B,GAAG;AAC/B,wBAAwB,UAAU,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,EAAE;AACjF,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACtI,4BAA4B,GAAG;AAC/B,oBAAoB,CAAC;AACrB;AACA,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,KAAK,EAAE;AAC7B,oBAAoB,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACnF,oBAAoB,SAAS;AAC7B,wBAAwB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE;AAChF,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG;AAC5E,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK;AAC3N,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,IAAI;AACpG,oBAAoB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACrC,wBAAwB,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AACtE,4BAA4B,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,IAAI;AAClD,wBAAwB,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gCAAgC,MAAM,CAAC,EAAE,EAAE,CAAC,UAAU,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,EAAE;AAC7H,wBAAwB,GAAG,MAAM,EAAE,IAAI,EAAE;AACzC,4BAA4B,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACjD,wBAAwB,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACnD,wBAAwB,UAAU;AAClC,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAC1F,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AAChD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzC,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,WAAW,GAAG;AAC1D,4BAA4B,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE;AAC1D,4BAA4B,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC/C,gCAAgC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,4BAA4B,GAAG;AAC/B,wBAAwB,UAAU,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,EAAE;AAChF,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAClG,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACjD,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,IAAI,EAAE;AAC5B,oBAAoB,EAAE;AACtB,qBAAqB,EAAE,GAAG,CAAC,WAAW,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,OAAO;AAC/G,qBAAqB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG,MAAM,EAAE,IAAI,GAAG;AAClE,qBAAqB,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,qBAAqB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACrG,qBAAqB,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,YAAY,CAAC;AACnE,qBAAqB,GAAG;AACxB,qBAAqB,EAAE;AACvB,oBAAoB,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACnF,oBAAoB,SAAS;AAC7B,wBAAwB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE;AAC9E,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG;AAC7E,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACxH,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,IAAI;AACtG,oBAAoB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACrC,wBAAwB,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AACtE,4BAA4B,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,IAAI;AAClD,wBAAwB,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gCAAgC,MAAM,CAAC,EAAE,EAAE,CAAC,UAAU,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,EAAE;AAC7H,wBAAwB,GAAG,MAAM,EAAE,IAAI,EAAE;AACzC,4BAA4B,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACjD,wBAAwB,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACnD,wBAAwB,UAAU;AAClC,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAC3F,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AAChD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzC,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,WAAW,GAAG;AAC3D,4BAA4B,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,EAAE;AACvD,4BAA4B,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC/C,gCAAgC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,4BAA4B,GAAG;AAC/B,wBAAwB,UAAU,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,EAAE;AAChF,4BAA4B,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAClG,4BAA4B,EAAE;AAC9B,4BAA4B,CAAC,MAAM,EAAE,IAAI,EAAE;AAC3C,4BAA4B,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACjD,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC;AAC1B,YAAY,CAAC;AACb,YAAY,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACrD;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC;AACxF,gBAAgB,EAAE,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO;AAChG,gBAAgB,CAAC,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI;AAC9D,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AAC1E,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC;AAC/J,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,KAAK;AACnK,gCAAgC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACnE;AACA,4BAA4B,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,EAAE;AAC9G,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AAC9D,gBAAgB,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACvF,oBAAoB,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG,KAAK,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxF,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC;AACxF,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG;AACrC,gBAAgB,IAAI,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AACjD,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,wBAAwB,GAAG,CAAC,CAAC;AAC7B,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ;AACzD,gCAAgC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC;AAC1M,4BAA4B,IAAI,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ;AACtD,gCAAgC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACnG,wBAAwB,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AACtC,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ;AACzD,gCAAgC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC;AACrK,4BAA4B,IAAI,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ;AACtD,gCAAgC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/D,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB,gBAAgB,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI;AAC7C,gBAAgB,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,CAAC;AACjF,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,KAAK;AAC/J,4BAA4B,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG;AACrD,wBAAwB,IAAI;AAC5B,4BAA4B,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,MAAM,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,EAAE;AACpG,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI;AACtC,YAAY,CAAC,CAAC,SAAS,GAAG,IAAI,EAAE;AAChC,gBAAgB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,EAAE;AACtB,oBAAoB,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC;AAC1E,oBAAoB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC;AACvE,oBAAoB,GAAG,CAAC,GAAG,CAAC,UAAU,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC;AAC/D,oBAAoB,EAAE;AACtB,oBAAoB,MAAM,CAAC,CAAC,UAAU,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC;AAC7F,gBAAgB,EAAE,CAAC;AACnB,gBAAgB,CAAC,OAAO,EAAE,IAAI,EAAE,CAAC,IAAI,EAAE;AACvC,YAAY;AACZ,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG;AAClC;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,IAAI,CAAC,SAAS,GAAG;AAChD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,iBAAiB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,iBAAiB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,iBAAiB,CAAC,CAAC,IAAI;AAChH,QAAQ,aAAa,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACxG,QAAQ,YAAY,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACtG,QAAQ,YAAY,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACtG,QAAQ,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAClG,QAAQ,SAAS,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACxG,QAAQ,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC1F,QAAQ,KAAK,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACxF,QAAQ,KAAK,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACxF,QAAQ,QAAQ,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC;AACvB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE;AAC9B,YAAY,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE;AAC/D,YAAY,EAAE,CAAC,KAAK,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,UAAU,IAAI;AACpG,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,EAAE,CAAC,KAAK,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC,UAAU,EAAE,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,UAAU,IAAI;AAC/H,IAAI,EAAE,CAAC,KAAK,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,UAAU,IAAI;AAC5F;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACvYF,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE;AACxD,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC;AACpB,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC;AACrB,QAAQ,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,GAAG;AACpC,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG;AACnC,QAAQ,IAAI,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC;AAChF,QAAQ,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE;AACnD,QAAQ,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE;AACnD,QAAQ,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE;AACnD,QAAQ,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,EAAE;AAC5D,QAAQ,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,EAAE;AAC7D,QAAQ,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AAClD,QAAQ,UAAU,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE;AAC/D,QAAQ,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACpD,QAAQ,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACpD,QAAQ,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE;AAC5D,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,GAAG;AACxC,QAAQ,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACzB,QAAQ,OAAO,CAAC,CAAC,MAAM,CAAC;AACxB,QAAQ,OAAO,CAAC,CAAC,MAAM,CAAC;AACxB,QAAQ,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,GAAG;AACvG,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC;AACvB,QAAQ,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC;AAC3B;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,OAAO,CAAC;AACzB,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACnF,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAChE;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS;AACvD,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE;AAC1B,YAAY,EAAE,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC;AAC3B,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC;AACzE,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC;AAC5C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AACnF,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,EAAE;AAChD,oBAAoB,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAClC,wBAAwB,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,4BAA4B,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG;AACrE,wBAAwB,GAAG;AAC3B,oBAAoB,CAAC;AACrB,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC/C,gBAAgB,GAAG;AACnB,gBAAgB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE;AACtC,gBAAgB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE;AACtC,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,CAAC;AACb;AACA,YAAY,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE;AAC5C,YAAY,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACzD;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK;AAC5C,YAAY,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC;AACxC,YAAY,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,KAAK,EAAE,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC5E;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,EAAE,IAAI,GAAG;AACrE,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,GAAG;AACnF,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,GAAG,EAAE,CAAC,OAAO,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACxF,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAClH,YAAY,QAAQ;AACpB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,EAAE;AAC5C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE;AACvI,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG;AAClE,YAAY,QAAQ;AACpB,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,EAAE;AACrE,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC5C,gBAAgB,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE;AAC3E,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG;AACnG,gBAAgB,GAAG;AACnB,YAAY,QAAQ,CAAC,IAAI,GAAG,MAAM,GAAG;AACrC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK;AAChE;AACA,YAAY,EAAE,CAAC,aAAa,CAAC,MAAM,CAAC,OAAO,CAAC,KAAK;AACjD,YAAY,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AAC9D,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACjE,wBAAwB,GAAG,CAAC,MAAM,EAAE,IAAI,EAAE;AAC1C,0BAA0B,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrE,0BAA0B,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,GAAG,EAAE;AACjF,wBAAwB,GAAG,CAAC,MAAM,EAAE,IAAI,EAAE;AAC1C,0BAA0B,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrE,0BAA0B,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,GAAG,EAAE;AAC9E,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAC9I,YAAY,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC,EAAE;AAC7F,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC,EAAE;AAC7F;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK;AAC7C,YAAY,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACzD,gBAAgB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7D,gBAAgB,QAAQ,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC;AAC5E,kBAAkB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,EAAE;AACvE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC3D,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC3D,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,GAAG;AAC/E,gBAAgB,QAAQ,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC;AACzE,kBAAkB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,EAAE;AACvE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC;AAC1C,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,SAAS,CAAC,CAAC;AAC3C,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACxE,YAAY,GAAG;AACf;AACA,YAAY,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACzD,gBAAgB,QAAQ,CAAC,SAAS,GAAG,EAAE,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC;AACxD,kBAAkB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,sBAAsB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC7D,sBAAsB,QAAQ,CAAC,gBAAgB,EAAE;AACjD,0BAA0B,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,0BAA0B,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACrC,sBAAsB,GAAG;AACzB,kBAAkB,EAAE;AACpB,kBAAkB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,sBAAsB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC9D,sBAAsB,QAAQ,CAAC,eAAe,EAAE;AAChD,0BAA0B,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,0BAA0B,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACrC,sBAAsB,GAAG;AACzB,kBAAkB,EAAE;AACpB,kBAAkB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,sBAAsB,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC/D,kBAAkB,GAAG;AACrB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,KAAK;AACpB,YAAY,QAAQ,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE;AAChD,gBAAgB,EAAE,CAAC,OAAO,CAAC,MAAM;AACjC,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACrC,wBAAwB,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACvC,wBAAwB,MAAM,CAAC,CAAC,CAAC;AACjC,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5F,wBAAwB,EAAE;AAC1B,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACnC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACrC,wBAAwB,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACvC,wBAAwB,MAAM,CAAC,CAAC,CAAC;AACjC,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5F,wBAAwB,EAAE;AAC1B,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACnC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC7D,gBAAgB,GAAG;AACnB;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,WAAW;AAC9B,YAAY,QAAQ,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE;AAClD,cAAc,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK,EAAE;AAChE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE;AACtE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,SAAS,CAAC;AACzC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC;AACrC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;AAC5G,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrF;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,IAAI;AAC1B,YAAY,QAAQ,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,GAAG;AACvE;AACA,YAAY,QAAQ,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,EAAE;AACrD,cAAc,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE;AACxE,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC;AACrC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE;AACvE,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,SAAS,CAAC;AACtC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,GAAG;AACxE;AACA,YAAY,EAAE,CAAC,QAAQ;AACvB,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,GAAG,EAAE,CAAC,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvF,gBAAgB,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC3C,YAAY,GAAG;AACf,YAAY,QAAQ,CAAC,KAAK,GAAG,MAAM,EAAE,MAAM,EAAE;AAC7C,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1F,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5F,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC;AACvC,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnG,wBAAwB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACnC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnG,wBAAwB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK;AACnC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC7D,gBAAgB,GAAG;AACnB,YAAY,QAAQ,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,GAAG;AACzD,YAAY,QAAQ;AACpB,cAAc,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,EAAE;AAC7E,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACtD,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAClF,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAChC,YAAY,QAAQ,CAAC,IAAI,GAAG,MAAM,GAAG;AACrC;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AACpC,YAAY,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AACpC,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,EAAE,CAAC,OAAO,CAAC,SAAS,GAAG;AACtD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACpF,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F,QAAQ,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAChF,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzE,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzE,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzE,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzE,QAAQ,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzE,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACzF,QAAQ,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC5E,QAAQ,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,CAAC,CAAC,CAAC,CAAC;AACZ,YAAY,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7B,gBAAgB,OAAO,CAAC,IAAI,EAAE,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,OAAO,IAAI;AAC9G,gBAAgB,MAAM,CAAC,GAAG;AAC1B,YAAY,EAAE;AACd,YAAY,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,OAAO,CAAC,IAAI,EAAE,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,OAAO,IAAI;AAC9G,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACpUF,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG;AACjC,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG;AACjC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,EAAE;AAC5D,QAAQ,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AACrB,QAAQ,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,GAAG;AACpC,QAAQ,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACzB,QAAQ,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACzB,QAAQ,eAAe,CAAC,CAAC,CAAC,KAAK,CAAC;AAChC,QAAQ,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC;AAC9B,QAAQ,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACb,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,GAAG;AACtC,QAAQ,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,CAAC,CAAC,SAAS,GAAG;AAC5D,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC;AACvB;AACA,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,MAAM,EAAE;AACzB,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC;AAC1B,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AAC7C,IAAI,CAAC;AACL,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,QAAQ,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,GAAG;AACtC,IAAI,CAAC;AACL;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACzD,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,KAAK,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpH,YAAY,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtH;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,QAAQ,CAAC,YAAY,GAAG;AACxC,gBAAgB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AACtE,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC;AACxG,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,QAAQ,CAAC,EAAE,CAAC,OAAO,EAAE;AACnG,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AACxC,gBAAgB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,IAAI,EAAE,MAAM,GAAG;AAClF;AACA,gBAAgB,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACjD,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,EAAE;AACpD,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,GAAG,EAAE;AACxC,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG;AACpD;AACA,gBAAgB,UAAU;AAC1B,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;AAChE,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChE,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACpD;AACA,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,KAAK,CAAC,IAAI,EAAE;AAC7C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,eAAe,GAAG,IAAI,EAAE,IAAI,GAAG;AACxF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,eAAe,GAAG,MAAM,EAAE,CAAC,GAAG;AAC/G,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAClD,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACzD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACrF;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,OAAO,CAAC,KAAK,CAAC,cAAc,EAAE,MAAM,CAAC,eAAe,EAAE;AAClE;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,EAAE;AACnD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACvE;AACA,YAAY,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,OAAO,EAAE;AAChD;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,GAAG;AAC9D,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAClE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvE,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,EAAE,CAAC;AACnC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtE;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AACjG,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,SAAS,EAAE,CAAC,GAAG;AACtE,gBAAgB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACpC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,SAAS,EAAE,IAAI,EAAE;AAC1C,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACxH,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI;AAC9G,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,IAAI;AACxB,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,cAAc,CAAC;AAC1C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,SAAS,GAAG;AAC5D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClE,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,IAAI,EAAE;AACvC,IAAI,GAAG;AACP;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAC5F,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE;AACjC,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAClD,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,CAAC;ACvOD;AACA,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,eAAe,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,cAAc;AAC3E,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC;AAChE,EAAE,CAAC,IAAI,GAAG,QAAQ,CAAC,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,WAAW,CAAC;AAC7D;AACA,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM;AAC9C,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AAClD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACrE,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClF,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;AACtD,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACjF,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;AACrF,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;AACjG,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;AACxF,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE;AACjG,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,IAAI,MAAM,GAAG;AAChD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE;AAC3F,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,OAAO,EAAE;AAC7D,QAAQ,CAAC,CAAC,qBAAqB,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,GAAG,EAAE;AACvD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC;AACT;AACA,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE;AACxC,QAAQ,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG;AAChC,QAAQ,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnC,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AACnC,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AACnC,YAAY,MAAM,CAAC,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,GAAG;AACzD,QAAQ,GAAG;AACX,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AACzD,gBAAgB,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AAC3D,gBAAgB,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AACnE,gBAAgB,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AAC7D,gBAAgB,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AACnE,gBAAgB,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AACrE,gBAAgB,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AAC7E,gBAAgB,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG;AACvE;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM;AAC3D,YAAY,UAAU,CAAC,WAAW,CAAC,CAAC,MAAM,EAAE;AAC5C,YAAY,UAAU,CAAC,YAAY,CAAC,CAAC,OAAO,EAAE;AAC9C,YAAY,UAAU,CAAC,gBAAgB,CAAC,CAAC,WAAW,EAAE;AACtD,YAAY,UAAU,CAAC,aAAa,CAAC,CAAC,QAAQ,EAAE;AAChD;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,UAAU;AACrC,YAAY,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AACvC,YAAY,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AACxC,YAAY,WAAW,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AAC5C,YAAY,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AACzC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC;AACvC,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACtC,gBAAgB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC;AAChE,gBAAgB,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AAC5E;AACA,YAAY,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC;AAC9D,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACxD,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,QAAQ,EAAE;AACtC,gBAAgB,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,IAAI;AACnC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC;AACnC,YAAY,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC;AAChC;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AACtD,gBAAgB,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AACtD,gBAAgB,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACrC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,GAAG,IAAI,EAAE,CAAC,GAAG;AAC5E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,GAAG;AAC7F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACrD,gBAAgB,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC;AAC5D,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE;AAC3B,oBAAoB,eAAe,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,qBAAqB,CAAC,CAAC,EAAE;AAC7F,gBAAgB,CAAC;AACjB,gBAAgB,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,eAAe,EAAE;AACrE,YAAY,CAAC;AACb;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,GAAG;AAC9D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9H,gBAAgB,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE;AACpE,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE,gBAAgB,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE;AACA,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACrD,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACtC,gBAAgB,CAAC,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC;AAC3C,oBAAoB,CAAC,KAAK,CAAC,KAAK,CAAC;AACjC,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,CAAC;AACpD,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACvC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,EAAE,CAAC,KAAK,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAC1C,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,EAAE;AACvC,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,KAAK,CAAC;AACrC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7C,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3C,wBAAwB,KAAK,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7C,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3C,wBAAwB,KAAK,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3C,wBAAwB,KAAK,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3C,oBAAoB,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE;AAC9C,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,GAAG;AAC1C;AACA,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACnE,gBAAgB,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,YAAY,CAAC,KAAK,EAAE;AAClE,YAAY,GAAG;AACf,YAAY,MAAM;AAClB,cAAc,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,cAAc,EAAE;AAClD,cAAc,CAAC,IAAI,CAAC,UAAU,CAAC;AAC/B,cAAc,CAAC,KAAK,EAAE;AACtB,cAAc,CAAC,MAAM,EAAE,IAAI,EAAE;AAC7B,cAAc,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,cAAc,EAAE;AACjD,cAAc,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/F,cAAc,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE;AAC3C,kBAAkB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACjC,kBAAkB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AAC/C,kBAAkB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AACvD,kBAAkB,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC;AACvD,gBAAgB,EAAE;AAClB;AACA,cAAc,EAAE;AAChB,cAAc,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,kBAAkB,QAAQ,CAAC,gBAAgB,EAAE;AAC7C,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AACnD,sBAAsB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC1D,kBAAkB,EAAE;AACpB,cAAc,EAAE;AAChB,cAAc,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,kBAAkB,QAAQ,CAAC,eAAe,EAAE;AAC5C,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AACnD,sBAAsB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC1D,kBAAkB,EAAE;AACpB,cAAc,GAAG;AACjB;AACA,YAAY,CAAC,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,cAAc,EAAE;AACjD,cAAc,CAAC,IAAI,CAAC,UAAU,CAAC;AAC/B,cAAc,CAAC,UAAU,EAAE;AAC3B,cAAc,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACjC,cAAc,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACxH;AACA,YAAY,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC5E,gBAAgB,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,gBAAgB,CAAC,KAAK,EAAE;AACtE,YAAY,GAAG;AACf,YAAY,MAAM;AAClB,cAAc,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AAC9C,cAAc,CAAC,IAAI,CAAC,eAAe,CAAC;AACpC,cAAc,CAAC,KAAK,EAAE;AACtB,cAAc,CAAC,MAAM,EAAE,IAAI,EAAE;AAC7B,cAAc,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,GAAG;AACjC,cAAc,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AAC7C,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AAC7D,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE;AAC9B,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AAC7D,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,cAAc,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE;AAC3C,kBAAkB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACjC,kBAAkB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AAC/C,kBAAkB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AACvD,kBAAkB,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC;AACvD,gBAAgB,EAAE;AAClB;AACA,cAAc,EAAE;AAChB,cAAc,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,kBAAkB,QAAQ,CAAC,gBAAgB,EAAE;AAC7C,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AACnD,sBAAsB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC1D,kBAAkB,EAAE;AACpB,cAAc,EAAE;AAChB,cAAc,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,kBAAkB,QAAQ,CAAC,eAAe,EAAE;AAC5C,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACrC,sBAAsB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AACnD,sBAAsB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC1D,kBAAkB,EAAE;AACpB,cAAc,GAAG;AACjB;AACA,YAAY,CAAC,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AAC7C,cAAc,CAAC,IAAI,CAAC,eAAe,CAAC;AACpC,cAAc,CAAC,UAAU,EAAE;AAC3B,cAAc,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACjC,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AAC7D,cAAc,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AAC9D;AACA,YAAY,IAAI,CAAC,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACvC,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,kBAAkB,CAAC,CAAC,EAAE;AACxE,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,KAAK,CAAC;AACrC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7C,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3C,wBAAwB,KAAK,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AAC9D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,kBAAkB,CAAC,CAAC,EAAE;AACxE,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,KAAK,CAAC;AACrC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,YAAY,CAAC,CAAC,IAAI,CAAC;AACxH,QAAQ,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC;AAClH,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC;AACpH,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACpF,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC9F,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM;AAC/F,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,YAAY,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9D,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA;ACvTA;AACA,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,eAAe,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,cAAc;AAC3E,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC;AAChE,EAAE,CAAC,IAAI,GAAG,QAAQ,CAAC,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,WAAW,CAAC;AAC7D,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACpC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG;AACpC,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM;AAC5C,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC;AAC7D,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AAClD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACrE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;AACtD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AAClC,QAAQ,CAAC;AACT;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,aAAa,CAAC,KAAK,EAAE;AAC9B;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,EAAE;AAC3D,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACjD,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AAC7E,gBAAgB,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AAC/E,gBAAgB,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,GAAG,IAAI,CAAC,EAAE,CAAC,UAAU,EAAE;AACjF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG,IAAI,EAAE,CAAC,GAAG;AACjF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG;AAClG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC;AACvC,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACtC,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,OAAO;AACjJ,gBAAgB,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AAC5E;AACA,YAAY,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC;AAC9D,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACxD,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,QAAQ,EAAE;AACtC,gBAAgB,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,IAAI;AACnC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC;AACnC,YAAY,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC;AAChC;AACA,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9H,gBAAgB,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE;AACpE;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,EAAE,CAAC,EAAE;AAC/D,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,EAAE;AAC3C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AACtG,YAAY,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE;AAChC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE;AAC1C,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AACvD;AACA,YAAY,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE;AAChC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AAC7C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,GAAG,EAAE;AAClC,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG;AAC1D;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,EAAE;AACzC;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG;AACxD,YAAY,EAAE,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE;AACnD;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC;AACvC,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAC7E;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC;AACtC,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtF,oBAAoB,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AACzD,gBAAgB,GAAG;AACnB;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC;AAC3D,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACpD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AACvF,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AACxC;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC;AAC5C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,GAAG,EAAE;AAClC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE;AAC9B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC;AAClE,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC;AAChD,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,MAAM,CAAC,QAAQ,GAAG;AAC5C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AACvF,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACrC;AACA,YAAY,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE;AACrC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC;AAC5C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD;AACA,YAAY,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE;AACrC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpD;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC;AACjE,YAAY,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,GAAG;AACtC,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,MAAM,CAAC,QAAQ,GAAG;AAC5C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AACvF,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACvC,gBAAgB,CAAC,MAAM,GAAG;AAC1B,QAAQ,GAAG;AACX;AACA,QAAQ,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG;AACzB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClE,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,GAAG,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAC3B,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAC7B,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AAC5B,QAAQ,EAAE;AACV,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACjE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,YAAY,CAAC,CAAC,IAAI,CAAC;AACxH,QAAQ,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC;AAClH,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC;AACpH,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC9F,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM;AAC/F,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,YAAY,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9D,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,MAAM,EAAE;AAC3C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA;ACvNA;AACA,EAAE,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS;AACnB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;AACjD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;AACnD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;AACjD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC/C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,OAAO,KAAK,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK;AACtI,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK;AAC7B,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE;AACvL,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,WAAW,CAAC,IAAI,CAAC;AAC7C,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAC1E;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG;AACrF;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,GAAG;AAC1J,YAAY,IAAI;AAChB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACzF;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC;AACjC,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,GAAG;AACtE,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE;AACtE,gBAAgB,CAAC;AACjB,YAAY,EAAE,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACpD;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AAC3H,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,CAAC,CAAC,EAAE,MAAM,GAAG;AACvG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,cAAc,GAAG;AACrG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AACzD;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,SAAS;AACrB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,UAAU,EAAE;AACzC,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACtC,wBAAwB,EAAE,CAAC,CAAC,EAAE;AAC9B,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACvD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AAC1F;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,GAAG,SAAS,GAAG,EAAE,CAAC,IAAI,EAAE;AACtE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAChD,YAAY,KAAK,CAAC,IAAI,GAAG,MAAM,GAAG;AAClC;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG;AACvD;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,UAAU,CAAC,EAAE,CAAC,GAAG,CAAC;AAC1D,YAAY,KAAK;AACjB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG;AACnK;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE;AACjD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK,EAAE;AACtD,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AACnG,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACxE,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACxE;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE;AACjD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC5E,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrH,oBAAoB,CAAC,CAAC,IAAI;AAC1B,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC;AACxC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7C,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE;AAClF,gBAAgB,GAAG;AACnB;AACA,YAAY,KAAK,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,CAAC,KAAK,GAAG,UAAU,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AACnG,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACxE,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACxE;AACA,YAAY,KAAK,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,CAAC,KAAK,GAAG,UAAU,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC5E,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrH,oBAAoB,CAAC,CAAC,IAAI;AAC1B,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC;AACxC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7C,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE;AAClF,gBAAgB,GAAG;AACnB,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,EAAE,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC;AAC5E,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAC9D,QAAQ,KAAK,CAAC,eAAe,GAAG;AAChC,QAAQ,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,cAAc,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC;AACvE,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,WAAW,CAAC;AAC1C,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACxC,QAAQ,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,cAAc,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,EAAE;AAC7D,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,CAAC;AACpC,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACvE,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACvE,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF,QAAQ,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AACzE,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC1E,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC1E,QAAQ,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACpF,QAAQ,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAChF,QAAQ,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC9E;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC1E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACjOF;AACA,EAAE,CAAC,MAAM,CAAC,mBAAmB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC5C,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAChC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACvC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,EAAE;AACtD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,KAAK;AACzC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,EAAE;AACzB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC;AACpD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AAC3E,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,GAAG;AAClC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACtG,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACpB,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B;AACA,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC1C,IAAI,KAAK,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AACvD;AACA,IAAI,OAAO,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC9B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;AAChE,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACrE,gBAAgB,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC/B,gBAAgB,QAAQ,CAAC,CAAC,QAAQ;AAClC,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC;AAC1C,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC;AACtC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,SAAS,CAAC;AAC7C,gBAAgB,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;AAC1C,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AAClC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,SAAS,CAAC,OAAO,EAAE,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,EAAE;AACtD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnC,oBAAoB,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AAC1C,gBAAgB,IAAI;AACpB,oBAAoB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC;AACzE,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,gBAAgB;AACpD,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE;AAC9C,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,SAAS,CAAC;AAC3C,gBAAgB,CAAC,EAAE,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC;AACrC,gBAAgB,CAAC,EAAE,EAAE,OAAO,EAAE,CAAC,OAAO,EAAE;AACxC;AACA;AACA,YAAY,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,gBAAgB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,SAAS,CAAC;AAC1C,oBAAoB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAClD,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACrC,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG;AACzD,gBAAgB,UAAU,GAAG;AAC7B,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,gBAAgB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,SAAS,CAAC;AAC1C,oBAAoB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC,IAAI,GAAG;AAC7C;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACnE,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACtC,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AAC/B;AACA,YAAY,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AAC5B,gBAAgB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI;AACxC,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE;AACzE,oBAAoB,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,wBAAwB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AAChF;AACA,wBAAwB,EAAE,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI;AACnF,wBAAwB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;AAC7E;AACA,wBAAwB,MAAM,CAAC,CAAC;AAChC,gCAAgC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,GAAG;AAC/F,gCAAgC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE;AAC9F,wBAAwB,EAAE;AAC1B,oBAAoB,GAAG;AACvB;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,EAAE,CAAC,GAAG,CAAC,aAAa,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACvE,oBAAoB,EAAE,CAAC,GAAG,CAAC,aAAa,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtE,gBAAgB,EAAE;AAClB;AACA,gBAAgB,KAAK,CAAC,OAAO,CAAC,cAAc,EAAE;AAC9C,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,KAAK,CAAC,OAAO,CAAC,IAAI,EAAE;AACpC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM;AAC3F,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,EAAE;AAC3C,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE;AAC7B;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAC/C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,wBAAwB,CAAC,CAAC,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE;AACtF,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,GAAG;AACvF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,cAAc,GAAG,MAAM,EAAE,CAAC,GAAG;AAC9G,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,KAAK,EAAE,OAAO,CAAC,MAAM,GAAG,IAAI,GAAG;AAC5F,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG,KAAK,EAAE,OAAO,CAAC,MAAM,EAAE,wBAAwB,EAAE;AAC9G,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG,KAAK,EAAE,OAAO,CAAC,MAAM,GAAG,IAAI,GAAG;AAC/F,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG;AAChE;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,EAAE;AAC7C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,QAAQ;AACvB,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,iBAAiB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,GAAG,SAAS,MAAM,MAAM,GAAG;AACtE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;AACnE,gBAAgB,EAAE;AAClB;AACA,gBAAgB,QAAQ;AACxB,oBAAoB,CAAC,KAAK,CAAC,GAAG,CAAC;AAC/B,oBAAoB,CAAC,KAAK,IAAI,GAAG,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,GAAG,GAAG;AACpD,oBAAoB,CAAC,UAAU,CAAC,KAAK,CAAC;AACtC,oBAAoB,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,EAAE;AACpE,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,EAAE;AAC5C,oBAAoB,CAAC,KAAK,CAAC,YAAY,CAAC;AACxC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE;AACpC,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC;AACnD,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG;AAClF;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,YAAY,GAAG,MAAM,GAAG,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,UAAU;AACpF,YAAY,EAAE,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC;AACtC,gBAAgB,IAAI,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,YAAY,EAAE;AACjE,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE;AACxC,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,EAAE;AAChD,oBAAoB,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI;AAC5I,YAAY,CAAC;AACb;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK;AACtC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,EAAE;AAC9D,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb;AACA,YAAY,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC3C,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AAC3C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,KAAK;AACjB,gBAAgB,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE;AACtD,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,EAAE;AACrD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI;AAC5F;AACA,YAAY,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AAClC;AACA,YAAY,EAAE,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC;AAC5D,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,gBAAgB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,YAAY,GAAG;AACf;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,OAAO,CAAC,CAAC,EAAE;AACnD,YAAY,GAAG;AACf;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,GAAG,SAAS,EAAE,IAAI,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG;AAClE;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,gBAAgB,EAAE,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC;AACxF,gBAAgB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG;AACzC,gBAAgB,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,MAAM,CAAC,eAAe,CAAC;AACnE,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE;AACd;AACA,YAAY,QAAQ,CAAC,KAAK,EAAE;AAC5B,gBAAgB,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC;AACxC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,SAAS,GAAG,EAAE,CAAC,EAAE,EAAE;AAClD,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,MAAM,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE;AAC1D,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,cAAc,CAAC;AAC1C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,WAAW,CAAC;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,WAAW,EAAE;AACzC;AACA,YAAY,QAAQ;AACpB,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,CAAC,EAAE;AACpD,oBAAoB,EAAE,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW;AAC1E,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG;AAC7C,oBAAoB,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACrE,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,cAAc,CAAC;AAC1C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,WAAW,CAAC;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,WAAW,EAAE;AACzC;AACA,YAAY,QAAQ,CAAC,IAAI,GAAG,MAAM,GAAG;AACrC;AACA,YAAY,EAAE,MAAM,CAAC,KAAK,CAAC,IAAI;AAC/B,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,SAAS,EAAE;AAChE,gBAAgB,CAAC,IAAI,EAAE,KAAK,GAAG;AAC/B,YAAY,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AAC1E,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC;AACjC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC,GAAG,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC;AACzC,gBAAgB,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,GAAG,GAAG,EAAE;AAC9C,gBAAgB,CAAC,IAAI,CAAC,SAAS,EAAE;AACjC;AACA,YAAY,SAAS;AACrB,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AACzF,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC5E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;AACnC,gBAAgB,SAAS;AACzB,oBAAoB,CAAC,IAAI,EAAE,KAAK,GAAG;AACnC;AACA,gBAAgB,EAAE,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC;AAC1E,gBAAgB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC;AAC7D,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,GAAG;AACnD,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClC,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,gBAAgB,KAAK,CAAC,QAAQ,CAAC,WAAW,EAAE;AAC5C,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AAC3C,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,EAAE;AAChD,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG;AAC7D;AACA,oBAAoB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACvE,oBAAoB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC1C,oBAAoB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAChD;AACA,oBAAoB,UAAU,GAAG;AACjC,gBAAgB,GAAG;AACnB;AACA,YAAY,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3D,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AACvC,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE;AACtC;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACnE,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACtC,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C;AACA,gBAAgB,UAAU,GAAG;AAC7B,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzC,gBAAgB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACvC;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,KAAK,CAAC,eAAe,GAAG;AACxC,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AAC1E;AACA,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAChD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,wBAAwB,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,IAAI,EAAE;AAClE,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE;AAC9D,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,MAAM,CAAC;AACjE,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AACpF,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AAChI,wBAAwB,OAAO,CAAC,IAAI,EAAE;AACtC,4BAA4B,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AAChE,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,CAAC;AACnE,wBAAwB,GAAG;AAC3B,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,SAAS,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;AAC3F,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE;AACjE,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,GAAG;AACzG,oBAAoB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC;AACxD,oBAAoB,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,SAAS,EAAE;AAC3H,oBAAoB,EAAE,CAAC,CAAC,gBAAgB,CAAC,GAAG,CAAC,IAAI,CAAC;AAClD,wBAAwB,OAAO,CAAC,gBAAgB,EAAE,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnE,gBAAgB,CAAC;AACjB;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,KAAK,CAAC,CAAC,GAAG,WAAW,CAAC,UAAU,EAAE,CAAC,UAAU,EAAE;AAC/F,gBAAgB,gBAAgB,CAAC,OAAO;AACxC,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC;AAC1B,oBAAoB,CAAC;AACrB,wBAAwB,KAAK,CAAC,CAAC,MAAM,CAAC;AACtC,wBAAwB,MAAM,CAAC,CAAC,OAAO;AACvC,oBAAoB,CAAC;AACrB,gBAAgB,IAAI;AACpB;AACA,gBAAgB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACjE,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,gBAAgB,KAAK,CAAC,eAAe,GAAG;AACxC,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACrD,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACtC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE;AAC1C;AACA,oBAAoB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AAC1C;AACA,oBAAoB,SAAS;AAC7B,wBAAwB,CAAC,IAAI,EAAE,KAAK,GAAG;AACvC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1C,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,mBAAmB,CAAC,SAAS,GAAG;AAC/D;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACjE,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AACpC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AACpC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AAClC,QAAQ,EAAE;AACV,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC1B,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5B,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,SAAS;AAChB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,IAAI,CAAC;AAC/B,IAAI,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE;AACzD,IAAI,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAClC,QAAQ,EAAE,CAAC,EAAE,eAAe,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AAC1D,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AAC/B,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,CAAC;AACb,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,EAAE;AAC9C,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC;AACrC,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,CAAC;AACb,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,UAAU,CAAC,CAAC,GAAG,EAAE;AACrD;AACA,YAAY,EAAE,IAAI,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK;AAClH,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC;AAC5C,gBAAgB,EAAE,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,YAAY,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,cAAc,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC;AAClK;AACA,gBAAgB,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC;AACzC,gBAAgB,MAAM,CAAC,IAAI,CAAC;AAC5B,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AACtC;AACA,YAAY,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACvE,gBAAgB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3F,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,IAAI,CAAC;AACxB,QAAQ,EAAE;AACV,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACzF,QAAQ,YAAY,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACrG,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AAC7B,gBAAgB,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AACzC,gBAAgB,KAAK,CAAC,UAAU,CAAC,KAAK,EAAE;AACxC,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAClD,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC1E,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE;AAC1C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC7oBF,EAAE,IAAI,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,KAAK;AAC3E,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACpC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS;AACnB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE;AAChC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EAAE,CAAC,MAAM;AACnJ,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,EAAE;AACzC,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE;AACzJ,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC;AACnC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC;AACf,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS;AAC/D,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACvD,oBAAoB,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,OAAO;AACpF,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS;AAChH,gBAAgB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACvE,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACxF,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,CAAC,EAAE;AAC/D,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,IAAI;AACnH;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM;AAC9E,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG;AAC5H,YAAY,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACzD;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK;AAC5C,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,GAAG,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACnD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG,IAAI,EAAE,IAAI,GAAG;AACpF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG;AAClG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,IAAI,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK;AAC7G,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AAC9E,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7C,YAAY,MAAM,CAAC,IAAI,EAAE;AACzB,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE;AACzE,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5C,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG;AAClE,YAAY,MAAM;AAClB,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,MAAM,EAAE;AACpE,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,EAAE;AAC5C;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACnD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACvD,YAAY,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACpD,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACnG,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI;AACnG,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACvC,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACxC,wBAAwB,OAAO,CAAC,CAAC,OAAO;AACxC,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,GAAG;AACnB;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACjE;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC7B,gBAAgB,SAAS,CAAC,MAAM,EAAE,IAAI,EAAE;AACxC,oBAAoB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AAClD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,IAAI,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1E,oBAAoB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE;AAC3E,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtG;AACA,gBAAgB,CAAC;AACjB,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,MAAM,GAAG;AAChD,YAAY,CAAC;AACb;AACA,YAAY,IAAI;AAChB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC9G,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9E,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChF,gBAAgB,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,SAAS,CAAC;AACzC,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE;AACvE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE;AACjE,YAAY,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,IAAI,EAAE;AAClE,gBAAgB,GAAG,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,EAAE;AACnF,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAClE,wBAAwB,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gCAAgC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;AAClF,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG;AACzC;AACA,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AACjE,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AACtE,gBAAgB,GAAG;AACnB;AACA;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,WAAW,CAAC,SAAS,GAAG;AACvD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC9E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC5E,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC7PF;AACA,EAAE,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,EAAE;AAC7C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AAC9B,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,EAAE;AACrC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AACrB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,GAAG,SAAS,EAAE;AAC5D,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC;AACT;AACA,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,MAAM,EAAE;AACzB,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC;AAC1B,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AAC7C,IAAI,CAAC;AACL,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,QAAQ,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,GAAG;AACtC,IAAI,CAAC;AACL;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,aAAa,CAAC,KAAK,CAAC;AAC7B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,WAAW,EAAE;AACxC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,QAAQ,CAAC,YAAY,GAAG;AACxC,gBAAgB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AACtE,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,MAAM,GAAG;AACrC,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,MAAM,GAAG,KAAK,CAAC,IAAI,EAAE;AACjD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,mBAAmB,GAAG,IAAI,EAAE,IAAI,GAAG;AAC5F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,mBAAmB,GAAG,MAAM,EAAE,CAAC,GAAG;AACnH,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAClD,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACzD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,KAAK,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AACvD;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACrF;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,EAAE;AAC7C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,WAAW;AACvB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,EAAE;AACzC;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,EAAE;AACnD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI;AACxE;AACA,YAAY,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,WAAW,EAAE;AACpD;AACA;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,EAAE,GAAG;AAClE,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACtE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvE,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,EAAE,CAAC;AACnC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtE;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,WAAW,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK;AAC3I,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,SAAS,EAAE,CAAC,GAAG;AACtE,gBAAgB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACpC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,SAAS,EAAE,IAAI,EAAE;AAC1C,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvH,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC;AACnC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,SAAS,GAAG,IAAI,CAAC,IAAI,EAAE;AAChD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9E,wBAAwB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,GAAG;AAClF,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACjC,oBAAoB,CAAC,CAAC,SAAS,GAAG,IAAI,CAAC,IAAI,EAAE;AAC7C,wBAAwB,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,SAAS,GAAG;AAC1E,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,IAAI;AACxB,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,EAAE,GAAG,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,cAAc,CAAC;AAChF,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,WAAW,CAAC,KAAK,CAAC,SAAS,GAAG;AAC7D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,WAAW,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACvE,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACrC,YAAY,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACvC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AAC5B,QAAQ,EAAE;AACV,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,WAAW,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACtE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,WAAW,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACvE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC;AACpC,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AACpF,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,YAAY,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AAClG,QAAQ,UAAU,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,GAAG,CAAC,IAAI;AAC9F,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,WAAW,CAAC,QAAQ,CAAC,QAAQ,EAAE;AAC3C,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,WAAW,CAAC,KAAK,CAAC,KAAK,EAAE;AACrC,KAAK,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AACzB,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAClD,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,WAAW,EAAE;AAChD,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,CAAC;AC1RD;AACA,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,WAAW,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI;AAC3E,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAClB,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,QAAQ;AAC5D,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAC1E,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE;AAC7C,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC;AACf,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE;AACnH,gBAAgB,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChD,gBAAgB,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B;AACA,YAAY,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC;AACrC;AACA,YAAY,8DAA8D;AAC1E;AACA;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG,IAAI,EAAE,IAAI,GAAG;AAC7E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,YAAY,GAAG;AAC3F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACvF;AACA,YAAY,8DAA8D;AAC1E;AACA;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACnD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AAC9E;AACA,YAAY,QAAQ,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG;AACzC,YAAY,QAAQ;AACpB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjF,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACvE;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAChE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE;AACtD,YAAY,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAChF,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAChF,YAAY,WAAW,CAAC,UAAU,CAAC,QAAQ,CAAC,IAAI,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE;AACjG,gBAAgB,EAAE,CAAC,CAAC,UAAU,EAAE;AAChC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC/E,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC/E,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,IAAI;AAChB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvG,gBAAgB,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AACrC,gBAAgB,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,YAAY,WAAW,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE;AAChD,gBAAgB,EAAE,CAAC,CAAC,UAAU,EAAE;AAChC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC/E,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC/E;AACA;AACA,YAAY,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG;AAClC;AACA,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,SAAS,EAAE,YAAY,CAAC,SAAS,GAAG;AACxD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B;AACA,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7C,QAAQ,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC/E,QAAQ,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACjF,QAAQ,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAClF,QAAQ,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAChF,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5C,QAAQ,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAClB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3C,QAAQ,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACjB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3C,QAAQ,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACjB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC;AAC9C,QAAQ,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AAChC,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5C,QAAQ,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAClB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5C,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrC,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAC/C,QAAQ,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,QAAQ,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACpC,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,CAAC;AC/JD,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC;AACrC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAChC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACzD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK;AAC3B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC5B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,KAAK,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC,SAAS,EAAE;AACjE,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC;AACT;AACA,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE;AAC/B,IAAI,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG;AACvD;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,SAAS,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE;AAC5C,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AAC1E,gBAAgB,CAAC;AACjB,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,IAAI,EAAE,IAAI,GAAG;AACtE,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG;AAC7F,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,EAAE,IAAI,GAAG;AAC7E,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,eAAe,GAAG;AACnE,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC9D;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AAC3C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD,gBAAgB;AAChB,YAAY,OAAO;AACnB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxE;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,EAAE;AACzD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI;AACzE;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,WAAW,EAAE,IAAI,CAAC,OAAO,EAAE;AACrD,YAAY;AACZ,YAAY,EAAE,CAAC,KAAK,CAAC,KAAK;AAC1B,YAAY,KAAK;AACjB,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,oBAAoB,OAAO,CAAC,YAAY,EAAE;AAC1C,gBAAgB,GAAG;AACnB;AACA,YAAY,KAAK,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AAC9C,gBAAgB,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACpC,oBAAoB,QAAQ,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,IAAI;AAClF,gBAAgB,CAAC;AACjB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,WAAW,EAAE;AACvD;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,eAAe,GAAG,SAAS,EAAE,CAAC,EAAE;AACxE,gBAAgB,CAAC,IAAI,EAAE,WAAW,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,KAAK;AACvD,IAAI;AACJ,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE;AAC9C,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG;AAC7B;AACA,YAAY,YAAY,CAAC,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,YAAY,CAAC,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE;AACnD,gBAAgB,CAAC,IAAI,CAAC,KAAK,EAAE;AAC7B,YAAY,MAAM,CAAC,SAAS,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD,YAAY,MAAM,CAAC,SAAS,GAAG,MAAM,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,CAAC,EAAE,CAAC,UAAU,EAAE;AAC7E;AACA,YAAY,OAAO,CAAC,IAAI,EAAE;AAC1B;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AAC3C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD,EAAE;AACF,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC5E,gBAAgB,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACxD,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACxD,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb,YAAY;AACZ,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACrC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AACxE;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,SAAS;AACxB,YAAY,8DAA8D;AAC1E,IAAI;AACJ,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,WAAW,EAAE;AAC7E,YAAY,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AACpC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,YAAY,CAAC;AACb,IAAI;AACJ,IAAI;AACJ,YAAY,QAAQ,CAAC,aAAa,EAAE,CAAC,CAAC;AACtC,gBAAgB,EAAE,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,WAAW,EAAE;AAC9D,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,IAAI,EAAE,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,EAAE;AACrE,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,wBAAwB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,EAAE;AAC/D,4BAA4B,UAAU,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAClE,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,EAAE;AACvD,4BAA4B,CAAC,IAAI,EAAE,KAAK,EAAE,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE;AAC3E,IAAI;AACJ,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,KAAK,EAAE;AACxD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC/C,4BAA4B,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,EAAE;AAC5E,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA;AACA,YAAY,QAAQ,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC,CAAC;AAC9C,gBAAgB,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACpE,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACzE,gBAAgB,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,GAAG;AAC/D,gBAAgB,aAAa,GAAG;AAChC,gBAAgB,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACrC,oBAAoB,QAAQ,CAAC,OAAO,CAAC,MAAM,EAAE;AAC7C,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,KAAK,CAAC,SAAS,GAAG;AACjD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE;AACjC,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,WAAW,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvF,YAAY,OAAO,CAAC,WAAW,CAAC,CAAC,EAAE;AACnC,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,UAAU,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,KAAK,CAAC,UAAU,CAAC,CAAC,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,UAAU,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,KAAK,CAAC,UAAU,CAAC,CAAC,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC9D,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACrSF,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC3C,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE;AAC7C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,IAAI,GAAG,GAAG;AAC7C,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AAC5C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC;AACpE,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AACvB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AACpB,QAAQ,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AAC1E,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC;AAC1D,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC;AAC1D,QAAQ,CAAC;AACT;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,UAAU,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACtC,UAAU,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACtC;AACA,UAAU,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACjF,cAAc,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACpF;AACA,UAAU,SAAS;AACnB,kBAAkB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAChD,kBAAkB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACnD;AACA,UAAU,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAChE,UAAU,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC;AACpD,cAAc,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AAC/C,cAAc,MAAM,CAAC,KAAK,CAAC;AAC3B,UAAU,CAAC,CAAC,IAAI,CAAC,CAAC;AAClB,cAAc,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AACzD,UAAU,CAAC;AACX,UAAU,SAAS,CAAC,SAAS,MAAM,MAAM,GAAG;AAC5C;AACA,UAAU,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AACrD,UAAU,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,GAAG;AACvC,UAAU,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC7C,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE;AACzC,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACxC,cAAc,YAAY,CAAC,GAAG,CAAC,GAAG,EAAE;AACpC,YAAY,GAAG;AACf,UAAU,GAAG;AACb;AACA,UAAU,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,EAAE;AACvC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC;AAClC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC;AAClC,gBAAgB,CAAC,IAAI,EAAE,cAAc,CAAC,CAAC,eAAe,EAAE;AACxD,gBAAgB,CAAC,YAAY,CAAC,YAAY,CAAC;AAC3C,gBAAgB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnC,gBAAgB,CAAC,YAAY,CAAC,QAAQ,CAAC;AACvC,gBAAgB,CAAC,MAAM,CAAC,MAAM,CAAC;AAC/B,gBAAgB,CAAC,OAAO,CAAC,OAAO,CAAC;AACjC,gBAAgB,CAAC,KAAK,CAAC,KAAK,CAAC;AAC7B,gBAAgB,CAAC,KAAK,CAAC,KAAK,CAAC;AAC7B,gBAAgB,CAAC,KAAK,GAAG;AACzB;AACA,UAAU,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,IAAI,EAAE;AACjD,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC;AACjC,gBAAgB,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG;AACnF;AACA,UAAU,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,IAAI,EAAE;AACjD,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC;AACjC,gBAAgB,CAAC,KAAK,EAAE;AACxB,gBAAgB,CAAC,MAAM,EAAE,CAAC,EAAE;AAC5B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,EAAE;AAClC;AACA,UAAU,IAAI;AACd,YAAY,CAAC,MAAM,EAAE,MAAM,EAAE;AAC7B,YAAY,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC;AAC9B,YAAY,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5D,YAAY,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC5C,cAAc,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AAChG,kBAAkB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,EAAE,EAAE;AACtC,cAAc,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AAChG,kBAAkB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,EAAE,EAAE;AACtC;AACA,cAAc,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,EAAE;AACvC,cAAc,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACzC,cAAc,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAC9B,cAAc,YAAY,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACpD,gBAAgB,GAAG,CAAC,MAAM,CAAC,IAAI,EAAE;AACjC,kBAAkB,KAAK,CAAC,CAAC,SAAS,CAAC;AACnC,kBAAkB,GAAG,CAAC,GAAG,KAAK,CAAC;AAC/B,kBAAkB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AACnC,gBAAgB,GAAG;AACnB,cAAc,GAAG;AACjB,cAAc,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AAC9C,YAAY,EAAE;AACd,YAAY,CAAC,EAAE,EAAE,QAAQ,EAAE,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,cAAc,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,YAAY,GAAG;AACf;AACA,UAAU,OAAO,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,KAAK;AAChE;AACA,UAAU,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC;AAC/D,UAAU,UAAU,CAAC,IAAI,EAAE;AAC3B,UAAU,UAAU,CAAC,IAAI,EAAE;AAC3B;AACA,UAAU,KAAK,CAAC,EAAE,EAAE,IAAI,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,cAAc,IAAI,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,kBAAkB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,kBAAkB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,kBAAkB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AAClE;AACA,cAAc,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAgB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AAC7D,cAAc,GAAG;AACjB,YAAY,GAAG;AACf,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ,CAAC,UAAU;AACnD,QAAQ,YAAY,EAAE,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AAChG,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACpF,QAAQ,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACtF,QAAQ,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAClF,QAAQ,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACpF;AACA,QAAQ,EAAE,OAAO,CAAC,OAAO;AACzB,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AAClF,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3E,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,QAAQ,GAAG;AACX,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3E,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC5LF,EAAE,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC/C,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AAClD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,EAAE;AACrC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO;AAC3D,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO;AACvG,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,QAAQ,CAAC;AAChH,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC;AACtJ,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,WAAW,EAAE;AACrH,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC;AAChE,QAAQ,CAAC;AACT;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG,IAAI,EAAE,IAAI,GAAG;AACvE,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,GAAG,MAAM,EAAE,CAAC,GAAG;AAC9F,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,EAAE;AAClD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC;AACnD;AACA,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AAC/D,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE;AACnF;AACA,YAAY,GAAG,CAAC,WAAW,CAAC;AAC5B;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACnC,gBAAgB,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE;AAC5C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAClC;AACA,gBAAgB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG;AACtD,YAAY,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAC3C,gBAAgB,WAAW,CAAC,MAAM,EAAE,IAAI,EAAE;AAC1C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE;AACnC;AACA,gBAAgB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AACpD;AACA,gBAAgB,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE;AACvC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE;AAClD,oBAAoB,CAAC,QAAQ,EAAE,SAAS,IAAI,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,GAAG,IAAI,IAAI,EAAE,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,IAAI,IAAI,GAAG;AACnN,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI;AACtE;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,GAAG,GAAG;AACpE;AACA,gBAAgB,cAAc,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE;AACrD,wBAAwB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG;AAC3D,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb;AACA,YAAY,WAAW,CAAC,MAAM,EAAE,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE;AAC7C,gBAAgB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG;AAClE;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,OAAO;AACpG,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE;AACjD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,oBAAoB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ;AACvE,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AAC7C,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,IAAI,CAAC,GAAG,OAAO,EAAE,CAAC,CAAC;AAC9C,4BAA4B,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClD,gCAAgC,EAAE,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC;AAClF,gCAAgC,EAAE,EAAE,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;AAClE,gCAAgC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,GAAG;AACzF,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnD,4BAA4B,CAAC;AAC7B,4BAA4B,IAAI,CAAC,CAAC;AAClC,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzD,gCAAgC,EAAE,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC,CAAC;AAC7F,oCAAoC,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM;AACrG,oCAAoC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AAC7E,oCAAoC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,GAAG;AAC9F,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACtD,4BAA4B,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1C,gCAAgC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AAC7D,gCAAgC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC;AAC7G,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC;AAC5E,4BAA4B,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AACnD,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzD,gCAAgC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC5D,gCAAgC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,GAAG;AACjG,gCAAgC,EAAE,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC;AACrG,oCAAoC,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM;AACrG,oCAAoC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AAC7E,oCAAoC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACnE,wCAAwC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AACtF,oCAAoC,GAAG;AACvC,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,wBAAwB,QAAQ,CAAC,WAAW,EAAE;AAC9C,4BAA4B,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AACpF,4BAA4B,UAAU,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE;AACvF,wBAAwB,GAAG;AAC3B;AACA,oBAAoB,CAAC;AACrB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AAC7D,oBAAoB,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE;AACjD,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ;AAC3E,wBAAwB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AACjD,wBAAwB,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG,CAAC;AACzF,wBAAwB,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC;AAC5G,wBAAwB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACvD,4BAA4B,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACnD,4BAA4B,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AACxF,wBAAwB,GAAG;AAC3B,wBAAwB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3C,wBAAwB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1E,wBAAwB,QAAQ,CAAC,WAAW,EAAE;AAC9C,4BAA4B,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,EAAE;AACnF,wBAAwB,GAAG;AAC3B,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB;AACA,YAAY,MAAM,CAAC,OAAO,EAAE,EAAE,CAAC,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG;AACjF,YAAY,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACnC;AACA,YAAY,UAAU;AACtB,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,YAAY,CAAC;AAC3C,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG;AACvE;AACA,YAAY,EAAE,IAAI,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC;AACtH,YAAY,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE;AAChD;AACA,YAAY,GAAG,CAAC,WAAW,CAAC;AAC5B,YAAY,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAChC,oBAAoB,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AACrC,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAChC,oBAAoB,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AACrC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACxB;AACA,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,gBAAgB,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,GAAG,CAAC,UAAU,CAAC;AACnC,oBAAoB,EAAE,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC;AACnG,wBAAwB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC,YAAY,EAAE;AAC5F,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO;AAC7F,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,IAAI;AAC1F,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG;AACpE,oBAAoB,CAAC;AACrB,oBAAoB,GAAG,CAAC,cAAc,CAAC;AACvC,oBAAoB,GAAG,CAAC,CAAC;AACzB,wBAAwB,cAAc,CAAC,CAAC,CAAC,UAAU,CAAC,IAAI,GAAG,qBAAqB,GAAG;AACnF,wBAAwB,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,OAAO;AAClI,wBAAwB,EAAE,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG;AAC9D,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,wBAAwB,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,mBAAmB,CAAC,UAAU,EAAE;AAClF,oBAAoB,CAAC;AACrB;AACA,oBAAoB,YAAY,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,OAAO,EAAE;AAChE,gBAAgB,GAAG;AACnB;AACA,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC;AACA,gBAAgB,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7F,oBAAoB,YAAY,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,YAAY,EAAE;AAC5E,oBAAoB,WAAW,CAAC,EAAE,CAAC,YAAY,CAAC,YAAY,IAAI;AAChE,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG;AACzF;AACA,gBAAgB,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5E,oBAAoB,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,oBAAoB,YAAY,GAAG;AACnC;AACA,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACnE,wBAAwB,EAAE,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACrF,4BAA4B,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE;AAC7E,oBAAoB,CAAC;AACrB;AACA,oBAAoB,WAAW,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACzF,wBAAwB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1C,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;AACpC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAClE,oBAAoB,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACzC,oBAAoB,IAAI,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,EAAE;AAC5C,gBAAgB,CAAC;AACjB;AACA,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI;AAC1I,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACjF,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACjC,oBAAoB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACtH,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,CAAC;AACtB,oBAAoB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAChF,gBAAgB,CAAC;AACjB;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE;AACpH;AACA,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB;AACA,gBAAgB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,oBAAoB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,oBAAoB,IAAI,CAAC;AACzB,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,GAAG,qBAAqB,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC;AAC7G,wBAAwB,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC;AACvC;AACA,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACjF,4BAA4B,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,4BAA4B,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC;AAChD,wBAAwB,CAAC;AACzB;AACA,wBAAwB,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC;AAC1C,wBAAwB,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC;AACnE;AACA,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI;AACtE,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACjF,gBAAgB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAC/G;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC;AAChE,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACnC,gBAAgB,EAAE,CAAC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM;AACvD,gBAAgB,WAAW;AAC3B,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,EAAE,qBAAqB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7E,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,EAAE,CAAC;AACvC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC;AACnC,YAAY,CAAC;AACb;AACA,YAAY,WAAW;AACvB,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,UAAU,CAAC;AAC1C,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAClF,QAAQ,GAAG;AACX;AACA,QAAQ,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,GAAG,EAAE;AAChD,YAAY,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1B,gBAAgB,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE;AAC1D,YAAY,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AACnC,gBAAgB,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE;AAC1D,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,YAAY,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAC/C,gBAAgB,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1D,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1D,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACvF,QAAQ,GAAG,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACvF,QAAQ,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACnG,QAAQ,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACrF,QAAQ,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC/F,QAAQ,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACnG,QAAQ,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACzF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,eAAe,EAAE,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,IAAI;AACzG,QAAQ,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AAC3F,QAAQ,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AACnF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC5VF,EAAE,IAAI,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI;AAC3E,EAAE,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE;AACzJ,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC5B,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACxD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,WAAW,CAAC,KAAK,GAAG;AAChC;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG;AACrF;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,GAAG;AAC1J,YAAY,IAAI;AAChB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AACvD;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AACpF,gBAAgB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACvD;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AAC3H,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,EAAE,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,EAAE,MAAM,GAAG;AACtG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACxD,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,SAAS;AACrB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,UAAU,EAAE;AACzC,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACtC,wBAAwB,EAAE,CAAC,CAAC,EAAE;AAC9B,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACvD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACvF;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,GAAG,SAAS,GAAG,EAAE,CAAC,GAAG,EAAE;AACnE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI;AAClF,YAAY,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACjC;AACA,YAAY,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;AACnG,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAC3G,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AAClJ,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC;AAC7C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC;AAC7C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC;AAC7C,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC;AAC7C,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACvC,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACxC,wBAAwB,OAAO,CAAC,CAAC,OAAO;AACxC,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC;AAC7C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,GAAG;AACnB;AACA,YAAY,IAAI;AAChB,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACpE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7I,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AAClJ,gBAAgB,EAAE,IAAI,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AACjG,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/E;AACA,YAAY,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG;AACrC,oBAAoB,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,IAAI,EAAE;AACpD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG;AACxH;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,aAAa,CAAC,SAAS,GAAG;AACzD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,EAAE,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC;AAC5E,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAC9D,QAAQ,SAAS;AACjB,YAAY,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC;AACvD,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,WAAW,CAAC;AAC1C,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACxC,QAAQ,SAAS;AACjB,YAAY,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAC7C,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,CAAC;AACpC,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC9E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC5E,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACzOF;AACA,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC;AACpD,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,aAAa,EAAE;AACrD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,EAAE;AACtD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,KAAK;AACzC,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE;AACpB,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AAC1F,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,GAAG;AAClC,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC1C,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AACxD,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,aAAa,CAAC,KAAK,CAAC;AAC7B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACxD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,WAAW,CAAC,KAAK,GAAG;AAChC,YAAY,WAAW,CAAC,MAAM,CAAC,IAAI,EAAE;AACrC,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACrD,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACrD;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,kBAAkB,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE;AAC1G,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,GAAG,CAAC,KAAK,CAAC,QAAQ;AAChC,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACjE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG;AAC9B,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG;AAC9B;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG,IAAI,EAAE,IAAI,GAAG;AAC3F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG,MAAM,EAAE,CAAC,GAAG;AAClH,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,EAAE;AAC7C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK;AACtC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/D,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb,YAAY,IAAI;AAChB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,IAAI;AACvE;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,EAAE;AACnD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI;AACxE,YAAY,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,IAAI,EAAE;AAC7C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC5E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,IAAI,CAAC,eAAe,GAAG;AACvC;AACA,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AAC1E,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAChD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,wBAAwB,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,IAAI,EAAE;AAC7D,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE;AAC9D,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AACxD,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3E,wBAAwB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AACvH,wBAAwB,OAAO,CAAC,IAAI,EAAE;AACtC,4BAA4B,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AAChE,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE;AACpE,4BAA4B,IAAI,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC;AAC3D,wBAAwB,GAAG;AAC3B,oBAAoB,GAAG;AACvB;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,KAAK,CAAC,CAAC,GAAG,WAAW,CAAC,UAAU,GAAG;AACnF,gBAAgB,gBAAgB,CAAC,OAAO;AACxC,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE;AAC3B,wBAAwB,KAAK,CAAC,CAAC,MAAM,CAAC;AACtC,wBAAwB,KAAK,CAAC,CAAC,UAAU,CAAC;AAC1C,wBAAwB,MAAM,CAAC,CAAC,OAAO;AACvC,oBAAoB,KAAK;AACzB;AACA,gBAAgB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACjE;AACA,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,gBAAgB,QAAQ,CAAC,WAAW,GAAG;AACvC,gBAAgB,IAAI,CAAC,eAAe,GAAG;AACvC,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzC;AACA,gBAAgB,EAAE,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AAC9E,oBAAoB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,wBAAwB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3C,wBAAwB,IAAI,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE;AAChF,wBAAwB,MAAM,CAAC,CAAC,CAAC;AACjC,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC/E,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C;AACA,gBAAgB,SAAS,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK,EAAE;AACnD,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,cAAc,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,gBAAgB,EAAE,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC;AAC/F,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,oBAAoB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,gBAAgB,GAAG;AACnB,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC/E,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,kBAAkB,CAAC,SAAS,GAAG;AAC9D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACrC,YAAY,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACvC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AAC5B,QAAQ,EAAE;AACV,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/D,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACxF,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,YAAY,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE;AAC9B,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,kBAAkB,CAAC,CAAC,CAAC;AACjC,YAAY,WAAW,CAAC,KAAK,CAAC,kBAAkB,EAAE;AAClD,YAAY,KAAK,CAAC,QAAQ,CAAC,kBAAkB,EAAE;AAC/C,YAAY,KAAK,CAAC,QAAQ,CAAC,kBAAkB,EAAE;AAC/C,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAClD,QAAQ,GAAG;AACX,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AAC7B,gBAAgB,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AACzC,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;AACzC,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA;AACA,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM;AACtE,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,IAAI;AAClE;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;AAClE,IAAI,KAAK,CAAC,uBAAuB,CAAC,IAAI,EAAE;AACxC,IAAI,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC,gBAAgB,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACpE,QAAQ,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC1D,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC;AACpC,QAAQ,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG;AACpD,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAC3D,QAAQ,MAAM,CAAC,EAAE,CAAC,CAAC;AACnB,YAAY,EAAE,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;AACzE,YAAY,EAAE,KAAK,EAAE,CAAC,CAAC;AACvB,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,IAAI,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACxF,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,KAAK,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AAC1F,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,IAAI,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACvF,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,GAAG,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACtF,YAAY,GAAG,KAAK,GAAG;AACvB,IAAI,GAAG;AACP,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA,EAAE,CAAC,gBAAgB,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM;AACpF,EAAE,CAAC,MAAM,CAAC,mBAAmB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC5C,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,CAAC,MAAM,CAAC,cAAc,IAAI;AACzE;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;AAClE,IAAI,KAAK,CAAC,uBAAuB,CAAC,IAAI,EAAE;AACxC,IAAI,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC,gBAAgB,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACpE,QAAQ,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC1D,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC;AACpC,QAAQ,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG;AACpD,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAC3D,QAAQ,MAAM,CAAC,EAAE,CAAC,CAAC;AACnB,YAAY,EAAE,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;AACzE,YAAY,EAAE,KAAK,EAAE,CAAC,CAAC;AACvB,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,IAAI,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACxF,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,KAAK,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AAC1F,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,IAAI,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACvF,YAAY,EAAE,EAAE,EAAE,EAAE,CAAC,GAAG,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;AACtF,YAAY,GAAG,KAAK,GAAG;AACvB,IAAI,GAAG;AACP,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACnZF,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC/C,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AAClD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,EAAE;AACrC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO;AAC3D,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO;AACvG,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,QAAQ,CAAC;AAChH,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC;AACtJ,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,WAAW,EAAE;AACrH,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC;AAChE,QAAQ,CAAC;AACT;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG,IAAI,EAAE,IAAI,GAAG;AACvE,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,GAAG,MAAM,EAAE,CAAC,GAAG;AAC9F,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC;AAC3B,gBAAgB,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACjG,YAAY,IAAI;AAChB,gBAAgB,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAC5F;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,EAAE;AAClD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC;AACnD;AACA,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AAC/D,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AACpF,YAAY,GAAG,CAAC,WAAW,CAAC;AAC5B;AACA,YAAY,GAAG,CAAC,WAAW,CAAC;AAC5B,YAAY,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAChC,oBAAoB,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AACrC,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAChC,oBAAoB,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AACrC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACnC,gBAAgB,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE;AAC5C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAClC;AACA,gBAAgB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG;AACjE,YAAY,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAC3C,gBAAgB,WAAW,CAAC,MAAM,EAAE,IAAI,EAAE;AAC1C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE;AACnC,gBAAgB,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG;AACjE;AACA,gBAAgB,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE;AACvC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE;AAClD,oBAAoB,CAAC,QAAQ,EAAE,SAAS,IAAI,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,GAAG,IAAI,IAAI,EAAE,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,IAAI,IAAI,GAAG;AACnN,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI;AACtE;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,GAAG,GAAG;AACpE;AACA,gBAAgB,cAAc,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE;AACrD,wBAAwB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG;AAC3D,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb;AACA,YAAY,WAAW,CAAC,MAAM,EAAE,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE;AAC7C,gBAAgB,CAAC,IAAI,EAAE,KAAK,GAAG,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG;AAClE;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,OAAO;AACpG,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE;AACjD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,oBAAoB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ;AACvE,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AAC7C,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,IAAI,CAAC,GAAG,OAAO,EAAE,CAAC,CAAC;AAC9C,4BAA4B,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClD,gCAAgC,EAAE,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC;AAClF,gCAAgC,EAAE,EAAE,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;AAClE,gCAAgC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,GAAG;AACzF,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnD,4BAA4B,CAAC;AAC7B,4BAA4B,IAAI,CAAC,CAAC;AAClC,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzD,gCAAgC,EAAE,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC,CAAC;AAC7F,oCAAoC,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM;AACrG,oCAAoC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AAC7E,oCAAoC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,GAAG;AAC9F,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACtD,4BAA4B,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1C,gCAAgC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AAC7D,gCAAgC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC;AAC7G,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC;AAC5E,4BAA4B,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AACnD,gCAAgC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzD,gCAAgC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC5D,gCAAgC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,GAAG;AACjG,gCAAgC,EAAE,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC;AACrG,oCAAoC,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM;AACrG,oCAAoC,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AAC7E,oCAAoC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACnE,wCAAwC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AACtF,oCAAoC,GAAG;AACvC,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,wBAAwB,QAAQ,CAAC,WAAW,EAAE;AAC9C,4BAA4B,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AACpF,4BAA4B,UAAU,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE;AACvF,wBAAwB,GAAG;AAC3B;AACA,oBAAoB,CAAC;AACrB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AAC7D,oBAAoB,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE;AACjD,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ;AAC3E,wBAAwB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AACjD,wBAAwB,EAAE,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG,CAAC;AACzF,wBAAwB,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC;AAC5G,wBAAwB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACvD,4BAA4B,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACnD,4BAA4B,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AACxF,wBAAwB,GAAG;AAC3B,wBAAwB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3C,wBAAwB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1E,wBAAwB,QAAQ,CAAC,WAAW,EAAE;AAC9C,4BAA4B,QAAQ,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,EAAE;AACnF,wBAAwB,GAAG;AAC3B,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB;AACA,YAAY,MAAM,CAAC,OAAO,EAAE,EAAE,CAAC,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG;AACjF,YAAY,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACnC;AACA,YAAY,UAAU;AACtB,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,YAAY,CAAC;AAC3C,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG;AACvE;AACA,YAAY,EAAE,IAAI,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC;AACtH,YAAY,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE;AAChD,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACxB;AACA,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,gBAAgB,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,GAAG,CAAC,UAAU,CAAC;AACnC,oBAAoB,EAAE,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC;AACnG,wBAAwB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC,YAAY,EAAE;AAC5F,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO;AAC7F,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,IAAI;AAC1F,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG;AACpE,oBAAoB,CAAC;AACrB,oBAAoB,GAAG,CAAC,cAAc,CAAC;AACvC,oBAAoB,GAAG,CAAC,CAAC;AACzB,wBAAwB,cAAc,CAAC,CAAC,CAAC,UAAU,CAAC,IAAI,GAAG,qBAAqB,GAAG;AACnF,wBAAwB,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,OAAO;AAClI,wBAAwB,EAAE,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG;AAC9D,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,wBAAwB,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,mBAAmB,CAAC,UAAU,EAAE;AAClF,oBAAoB,CAAC;AACrB;AACA,oBAAoB,YAAY,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,OAAO,EAAE;AAChE,gBAAgB,GAAG;AACnB;AACA,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,gBAAgB,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC;AACA,gBAAgB,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7F,oBAAoB,YAAY,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,YAAY,EAAE;AAC5E,oBAAoB,WAAW,CAAC,EAAE,CAAC,YAAY,CAAC,YAAY,IAAI;AAChE,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG;AACzF;AACA,gBAAgB,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5E,oBAAoB,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,oBAAoB,YAAY,GAAG;AACnC;AACA,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACnE,wBAAwB,EAAE,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACrF,4BAA4B,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE;AAC7E,oBAAoB,CAAC;AACrB;AACA,oBAAoB,WAAW,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACzF,wBAAwB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1C,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;AACpC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAClE,oBAAoB,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACzC,oBAAoB,IAAI,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,EAAE;AAC5C,gBAAgB,CAAC;AACjB;AACA,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI;AAC1I,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACjF,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACjC,oBAAoB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACtH,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,CAAC;AACtB,oBAAoB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAChF,gBAAgB,CAAC;AACjB;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE;AACpH;AACA,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB;AACA,gBAAgB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,oBAAoB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,oBAAoB,IAAI,CAAC;AACzB,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,IAAI,GAAG,qBAAqB,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC;AAC7G,wBAAwB,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC;AACvC;AACA,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACjF,4BAA4B,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,4BAA4B,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC;AAChD,wBAAwB,CAAC;AACzB;AACA,wBAAwB,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC;AAC1C,wBAAwB,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC;AACnE;AACA,wBAAwB,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3D,4BAA4B,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC1D,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI;AACtE,oBAAoB,GAAG;AACvB;AACA,gBAAgB,EAAE,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACjF,gBAAgB,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAC/G;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC;AAChE,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AACnC,gBAAgB,EAAE,CAAC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM;AACvD,gBAAgB,WAAW;AAC3B,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,EAAE,qBAAqB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7E,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,EAAE,CAAC;AACvC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE;AACpC;AACA,gBAAgB,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC;AAC9D,gBAAgB,MAAM,CAAC,MAAM,EAAE,IAAI,IAAI,KAAK,CAAC,KAAK,EAAE;AACpD,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,EAAE;AAClD,oBAAoB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,EAAE,GAAG,EAAE;AACzC,oBAAoB,EAAE,CAAC,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,EAAE,GAAG,EAAE;AAC9C,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,EAAE;AACvC;AACA,gBAAgB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,CAAC,EAAE,GAAG;AACzD;AACA,gBAAgB,QAAQ;AACxB,gBAAgB,CAAC,UAAU,GAAG,QAAQ,CAAC,GAAG,CAAC;AAC3C,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AAClE,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;AAChD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;AAChD,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvD;AACA;AACA,YAAY,CAAC;AACb;AACA,YAAY,WAAW;AACvB,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,UAAU,CAAC;AAC1C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,YAAY,CAAC;AACpD,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,UAAU,EAAE;AAC7C,QAAQ,GAAG;AACX;AACA,QAAQ,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,GAAG,EAAE;AAChD,YAAY,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC1B,gBAAgB,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE;AACtD,YAAY,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC;AACnC,gBAAgB,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE;AACvD,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,YAAY,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAC/C,gBAAgB,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7C,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA;AACA,QAAQ,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,YAAY,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;AAC/C,gBAAgB,MAAM,CAAC,CAAC,CAAC;AACzB,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACvF,QAAQ,GAAG,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACvF,QAAQ,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACnG,QAAQ,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACrF,QAAQ,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACnG,QAAQ,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC/F,QAAQ,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACzF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,eAAe,EAAE,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,IAAI;AACzG,QAAQ,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AAC3F,QAAQ,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AACnF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC/XF;AACA,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,EAAE,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACtC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AAC5E,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK;AAC5F,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK;AAC5F,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO;AACjJ,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI;AAC/F,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACvE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,aAAa;AACnE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,SAAS,EAAE;AACpG,QAAQ,CAAC;AACT;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI;AACtC,QAAQ,CAAC,WAAW,EAAE,EAAE,CAAC,GAAG,EAAE,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ;AAClH,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM;AAC9C,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;AAChE,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,EAAE,IAAI,GAAG;AAC7E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,GAAG;AAC3F,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,OAAO;AACnB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,EAAE;AACzC;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG;AAC7D,YAAY,WAAW,CAAC,IAAI,CAAC,OAAO,EAAE;AACtC;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,GAAG;AAC3D,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAClE,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7E;AACA,YAAY,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AAC9F,YAAY,WAAW;AACvB,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AAC9F;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AAC9E,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE;AAC3F,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7C;AACA,YAAY,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACnC;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AACjE,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AACrE,YAAY,MAAM,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE;AAC/D,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAClF;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI;AACvJ,YAAY,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AAC5C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,oBAAoB,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACxC,wBAAwB,CAAC,WAAW,CAAC,WAAW,CAAC;AACjD,wBAAwB,CAAC,OAAO,CAAC,OAAO,CAAC;AACzC,wBAAwB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACtF,wBAAwB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvF,wBAAwB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvI,wBAAwB,GAAG,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI;AACtH,wBAAwB,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAChD,gBAAgB,GAAG;AACnB,YAAY,MAAM,CAAC,IAAI,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE;AACnD,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,SAAS,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE;AACrE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,oBAAoB,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACxC,wBAAwB,CAAC,WAAW,CAAC,WAAW,CAAC;AACjD,wBAAwB,CAAC,OAAO,CAAC,OAAO,CAAC;AACzC,wBAAwB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACrF,wBAAwB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACtF,wBAAwB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtI,wBAAwB,GAAG,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI;AACtH,wBAAwB,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAChD,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AACzD;AACA,YAAY,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AAC5C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE;AAC1B,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACjC,oBAAoB,CAAC,WAAW,CAAC,WAAW,CAAC;AAC7C,oBAAoB,CAAC,OAAO,CAAC,OAAO,CAAC;AACrC,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAClF,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAClF,YAAY,EAAE;AACd;AACA,YAAY,SAAS,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE;AACrE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE;AAC1B,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACjC,oBAAoB,CAAC,WAAW,CAAC,WAAW,CAAC;AAC7C,oBAAoB,CAAC,OAAO,CAAC,OAAO,CAAC;AACrC,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACjF,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACjF,YAAY,EAAE;AACd;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,SAAS,EAAE,IAAI,CAAC,SAAS,GAAG;AAChD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM;AAC1B,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,YAAY,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AACrG,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AAC7G,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AAC3G;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,WAAW,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACnG,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AACnC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE;AACjC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACpOF,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAClC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAChC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,EAAE;AACtD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG;AACnD,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAChC,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,KAAK;AACzC,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK;AAC7B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AACzG,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC;AACT;AACA,IAAI,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAChD,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC1C,IAAI,KAAK,CAAC,MAAM,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC,EAAE;AACrC;AACA,IAAI,OAAO,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,gBAAgB,CAAC,OAAO,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5D,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG;AACP;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrE,YAAY,EAAE;AACd,QAAQ,EAAE;AACV,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,EAAE;AACV,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AAClC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3H,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,SAAS,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE;AAC5C,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AAC1E,gBAAgB,CAAC;AACjB,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ;AAC5C,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG;AAC5E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACjE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACxG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACtD,YAAY,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3D,gBAAgB,OAAO,CAAC,MAAM,EAAE;AAChC,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AAC/B;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,GAAG,IAAI,EAAE,IAAI,GAAG;AAClF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,GAAG,MAAM,EAAE,CAAC,GAAG;AACzG,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC1E,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,EAAE,IAAI,GAAG;AACjF,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AACnE;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AAChF;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,EAAE;AAC7C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AAClD,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,MAAM;AAClF,gBAAgB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC;AACtD,oBAAoB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACvE,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACrD,wBAAwB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACnI,oBAAoB,CAAC;AACrB;AACA,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAChF,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK;AACtC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/D,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AACrD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxE;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,EAAE;AACrD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI;AACzE;AACA;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACtC,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,MAAM,CAAC,IAAI;AAC1B,YAAY,8DAA8D;AAC1E,YAAY,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AACpC,cAAc,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACnD,kBAAkB,CAAC,UAAU,EAAE;AAC/B,kBAAkB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACrC,kBAAkB,CAAC,IAAI,CAAC,KAAK,CAAC;AAC9B,gBAAgB,CAAC;AACjB,cAAc,CAAC;AACf,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AACpC,cAAc,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACnD,kBAAkB,CAAC,UAAU,EAAE;AAC/B,kBAAkB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACrC,kBAAkB,CAAC,IAAI,CAAC,KAAK,CAAC;AAC9B,gBAAgB,CAAC;AACjB,cAAc,CAAC;AACf,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC/C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AAC3E;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK;AAC3B,YAAY,8DAA8D;AAC1E,YAAY,EAAE,EAAE,WAAW,CAAC,CAAC,CAAC;AAC9B,gBAAgB,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AACtC,gBAAgB,WAAW,GAAG;AAC9B,gBAAgB,WAAW,GAAG;AAC9B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,KAAK,CAAC,KAAK,CAAC,cAAc,EAAE;AAC5C,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI;AACtH,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG;AAC5E,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,GAAG;AAC1F,gBAAgB,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE;AACpC,oBAAoB,OAAO,CAAC,MAAM,EAAE;AACpC,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,KAAK,CAAC,eAAe,GAAG;AACxC,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AAC1E,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,cAAc,CAAC;AAC1E,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG;AACvI,wBAAwB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChF,4BAA4B,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AACpI,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC;AACpF,4BAA4B,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC;AACxD,gCAAgC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AAClG,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC;AACpC,gCAAgC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AAClG,4BAA4B,CAAC;AAC7B,wBAAwB,GAAG;AAC3B;AACA,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,aAAa,CAAC,UAAU,EAAE;AAC9D,wBAAwB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AACvE,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AACnD,4BAA4B,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,IAAI,EAAE;AACtE,wBAAwB,CAAC;AACzB,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AACxD,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3E,wBAAwB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AACvH,wBAAwB,OAAO,CAAC,IAAI,EAAE;AACtC,4BAA4B,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,KAAK,CAAC,CAAC,WAAW,CAAC;AAC/C,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE;AACpE,4BAA4B,IAAI,CAAC,CAAC,KAAK;AACvC,wBAAwB,GAAG;AAC3B,oBAAoB,GAAG;AACvB,gBAAgB,EAAE,SAAS,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;AAC3F,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE;AACjE,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,GAAG;AACzG,oBAAoB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC;AACxD,oBAAoB,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,KAAK,IAAI,MAAM,CAAC,SAAS,EAAE;AAC5H,oBAAoB,EAAE,CAAC,CAAC,gBAAgB,CAAC,GAAG,CAAC,IAAI,CAAC;AAClD,wBAAwB,OAAO,CAAC,gBAAgB,EAAE,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnE,gBAAgB,CAAC;AACjB;AACA,gBAAgB,GAAG,CAAC,qBAAqB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3D,oBAAoB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,EAAE;AACrE,gBAAgB,EAAE;AAClB;AACA,gBAAgB,gBAAgB,CAAC,OAAO;AACxC,oBAAoB,CAAC,cAAc,CAAC,gBAAgB,CAAC,OAAO,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,qBAAqB,CAAC;AACvG,oBAAoB,CAAC,IAAI,EAAE;AAC3B,wBAAwB,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,UAAU,CAAC,EAAE;AACnE,wBAAwB,KAAK,CAAC,CAAC,UAAU,CAAC;AAC1C,wBAAwB,MAAM,CAAC,CAAC,OAAO;AACvC,oBAAoB,KAAK;AACzB;AACA,gBAAgB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACjE;AACA,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AACjD;AACA,gBAAgB,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAC5C,gBAAgB,GAAG,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7C,oBAAoB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE;AAC1D,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,MAAM,CAAC;AAC7D,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AAC5H,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AAC3E,oBAAoB,OAAO,CAAC,IAAI,EAAE;AAClC,wBAAwB,KAAK,CAAC,CAAC,KAAK,CAAC;AACrC,wBAAwB,UAAU,CAAC,CAAC,UAAU,CAAC;AAC/C,wBAAwB,GAAG,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,IAAI,EAAE;AACpD,wBAAwB,WAAW,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC;AACxD,wBAAwB,MAAM,CAAC,CAAC,MAAM;AACtC,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,YAAY,CAAC,OAAO,EAAE;AACrD,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,gBAAgB,KAAK,CAAC,eAAe,GAAG;AACxC,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7F,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,SAAS;AACxB,YAAY,8DAA8D;AAC1E;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,WAAW,EAAE;AAC7E,YAAY,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AACpC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC;AACtC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC;AACtC,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AACxE,oBAAoB,CAAC,KAAK,CAAC;AAC3B,oBAAoB,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpE,wBAAwB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,4BAA4B,MAAM,CAAC,CAAC;AACpC,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC3C,gCAAgC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAC7C,gCAAgC,OAAO,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AACnD,gCAAgC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oCAAoC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AACtG,gCAAgC,GAAG;AACnC,gCAAgC,cAAc,CAAC,CAAC,CAAC,CAAC,cAAc;AAChE,4BAA4B,EAAE;AAC9B,wBAAwB,EAAE;AAC1B,gBAAgB,EAAE;AAClB,gBAAgB,cAAc,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AAC3E;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AAC3C,gBAAgB,WAAW,GAAG;AAC9B,gBAAgB,WAAW,GAAG;AAC9B,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,SAAS,CAAC,SAAS,GAAG;AACrD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACjE,QAAQ,EAAE,EAAE,GAAG,CAAC,MAAM,CAAC,cAAc,EAAE;AACvC,YAAY,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AAC5C,QAAQ,CAAC;AACT,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC9B,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AACvG,QAAQ,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC;AACzD,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,KAAK;AACvG,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,SAAS,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC,KAAK;AAC/G,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,SAAS,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC,KAAK;AAC/G,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,WAAW,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,CAAC,KAAK;AAC7G;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC7E,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,KAAK,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC;AACzF,YAAY,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC;AAC1F,YAAY,KAAK,CAAC,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC;AACxF,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,YAAY,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE;AAC/B,YAAY,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE;AAC/B,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,WAAW,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE;AACjC,YAAY,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE;AACjC,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,UAAU,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,KAAK,CAAC,UAAU,CAAC,CAAC,EAAE;AAChC,YAAY,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE;AACjC,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,UAAU,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,KAAK,CAAC,UAAU,CAAC,CAAC,EAAE;AAChC,YAAY,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE;AACjC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC9D,QAAQ,GAAG;AACX,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AACzC,gBAAgB,KAAK,CAAC,UAAU,CAAC,KAAK,EAAE;AACxC,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE;AAC1C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC3C,EAAE,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,EAAE;AAC9B,IAAI,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE;AAC3B,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,EAAE;AACzB,EAAE;ACxiBF,EAAE,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAChC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,aAAa,EAAE;AAC1C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,aAAa,EAAE;AAC3C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACnC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAChC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC7D,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC5B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK;AAChC,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI;AAC/B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE;AAC1B,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC5B,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,EAAE;AACZ,QAAQ,CAAC,CAAC,EAAE;AACZ,QAAQ,CAAC,CAAC,EAAE;AACZ,QAAQ,CAAC,CAAC,EAAE;AACZ,QAAQ,CAAC,CAAC,EAAE;AACZ,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,KAAK,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE;AACvE,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE;AAC7C,QAAQ,CAAC,CAAC,mBAAmB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE;AAC/C,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,KAAK;AAClC,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE;AACzB,IAAI,MAAM,CAAC,WAAW,CAAC,KAAK,EAAE;AAC9B,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC;AAC5E,IAAI,MAAM,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG;AACrD,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC1C,IAAI,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAC1B,IAAI,MAAM,CAAC,MAAM,EAAE,KAAK,GAAG;AAC3B,IAAI,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC3C,IAAI,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAC1B,IAAI,MAAM,CAAC,MAAM,EAAE,KAAK,GAAG;AAC3B;AACA,IAAI,OAAO,CAAC,aAAa,CAAC,IAAI,EAAE,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChE,QAAQ,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAClC,QAAQ,MAAM,CAAC,gBAAgB;AAC/B,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC;AAC7C,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC;AAC7C,IAAI,CAAC;AACL;AACA,IAAI,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACnC,QAAQ,MAAM,CAAC,gBAAgB;AAC/B,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC;AAC7C,YAAY,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC;AAC7C,IAAI,CAAC;AACL;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE;AACpE,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,MAAM,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC1C,QAAQ,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC;AAC/B,MAAM,GAAG;AACT,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AACtF,oBAAoB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,gBAAgB,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC;AAC9E;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,kBAAkB,EAAE,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AAC3G,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,gBAAgB;AACpD,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACpF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,GAAG;AAChI;AACA,YAAY,EAAE,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;AACvD,gBAAgB,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG;AAClC,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACnC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG;AAChC;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AAC9E,YAAY,EAAE,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACrE,YAAY,EAAE,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACrE;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI;AAC9B,gBAAgB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAClG,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI;AAC9B,gBAAgB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAClG,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,GAAG;AACzC;AACA,YAAY,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClG,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,GAAG;AAC5C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG,IAAI,EAAE,IAAI,GAAG;AACpF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG,MAAM,EAAE,CAAC,GAAG;AAC3G,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AACrC,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC1E,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AAClE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AAClE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAChE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACjE;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE;AACtD,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,GAAG;AAC9E,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACnE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AACpE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AACpE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAClE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AACnE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,eAAe,GAAG;AACzE,YAAY,YAAY,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AACpE;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,8DAA8D;AAC1E;AACA,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC;AACvF,gBAAgB,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE;AACA,gBAAgB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC1C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACtD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC;AAChH,wBAAwB,EAAE,CAAC,gBAAgB,CAAC,CAAC,CAAC;AAC9C,4BAA4B,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,mBAAmB,CAAC,CAAC,CAAC,kBAAkB,EAAE;AACtH,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,mBAAmB,EAAE;AACtH,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,MAAM,CAAC;AACtC,oBAAoB,GAAG;AACvB,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,EAAE,CAAC,KAAK,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,GAAG;AACrF,oBAAoB,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC;AACzG,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAClG,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,UAAU;AACrD,YAAY,8DAA8D;AAC1E;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK;AACnD,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,GAAG,KAAK,EAAE,OAAO,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AACvF;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,gBAAgB,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG;AAC3D,gBAAgB,IAAI;AACpB,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,gBAAgB,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG;AAC5D,gBAAgB,IAAI;AACpB;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AAChE,gBAAgB,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,MAAM,CAAC,CAAC,GAAG;AAChC,gBAAgB,GAAG;AACnB,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AAClE,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/C,uBAAuB,EAAE,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC;AACvC,uBAAuB,SAAS,CAAC,MAAM,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC5D,yBAAyB,MAAM,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnD,uBAAuB,IAAI;AAC3B;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,EAAE;AACnC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK;AAC7G;AACA,YAAY,SAAS,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK,EAAE;AAC/C,YAAY,UAAU,CAAC,UAAU,GAAG,IAAI,CAAC,MAAM,EAAE;AACjD;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,QAAQ;AAClD,YAAY,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACjC,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE;AAC7E,oBAAoB,CAAC,QAAQ,EAAE,gBAAgB,CAAC,CAAC,CAAC,EAAE;AACpD,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACrD,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC7E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,EAAE;AAClE,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACjC,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,KAAK,CAAC,EAAE,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACpD,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,KAAK,CAAC,EAAE,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACpD,oBAAoB,CAAC,QAAQ,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AACjH;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACtD,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC7E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACtD,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC7E;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,EAAE;AACnE,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,EAAE;AACnE,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,KAAK;AAC1B,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,KAAK,EAAE,CAAC,OAAO,EAAE;AAC7C;AACA,YAAY,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,WAAW,EAAE;AACvD;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,eAAe,GAAG,SAAS,EAAE,CAAC,EAAE;AACxE,gBAAgB,CAAC,IAAI,EAAE,WAAW,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,KAAK;AACvD;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE;AAC9C,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG;AAC7B;AACA,YAAY,YAAY,CAAC,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,gBAAgB,EAAE;AAClD;AACA,YAAY,YAAY,CAAC,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,gBAAgB,EAAE;AAClD;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE;AACnD,gBAAgB,CAAC,IAAI,CAAC,KAAK,EAAE;AAC7B,YAAY,MAAM,CAAC,SAAS,EAAE,IAAI,EAAE;AACpC,gBAAgB,GAAG,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAChC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,gBAAgB,EAAE;AAClD,YAAY,MAAM,CAAC,SAAS,GAAG,MAAM,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,CAAC,EAAE,CAAC,UAAU,EAAE;AAC7E;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,SAAS;AACxB,YAAY,8DAA8D;AAC1E;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,WAAW,EAAE;AAC7E,YAAY,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AACpC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,CAAC,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,YAAY,CAAC;AACb;AACA;AACA,YAAY,QAAQ,CAAC,aAAa,EAAE,CAAC,CAAC;AACtC,gBAAgB,EAAE,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,WAAW,EAAE;AAC9D,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,IAAI,EAAE,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,EAAE;AACtE,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,wBAAwB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,EAAE;AACjE,4BAA4B,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG;AAClE,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,EAAE;AACvD,4BAA4B,CAAC,IAAI,EAAE,KAAK,EAAE,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE;AAC3E;AACA,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,KAAK,EAAE;AACxD,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG;AAChD,4BAA4B,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,EAAE;AAC5E,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,OAAO,EAAE,CAAC,CAAC;AAChC,gBAAgB,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACpE,gBAAgB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACtE,gBAAgB,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,GAAG;AAC/D,gBAAgB,aAAa,GAAG;AAChC;AACA,gBAAgB,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACtD,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,gBAAgB,CAAC;AAC7C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,oBAAoB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,CAAC,IAAI;AAC1F;AACA,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,gBAAgB,CAAC;AAC7C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,oBAAoB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,CAAC,IAAI;AAC3F;AACA,gBAAgB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACtE,oBAAoB,CAAC,KAAK,EAAE,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,KAAK,CAAC,CAAC;AAC7D,wBAAwB,QAAQ;AAChC,4BAA4B,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,gCAAgC,MAAM,CAAC,CAAC;AACxC,oCAAoC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC/C,oCAAoC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3E,wCAAwC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AACxG,oCAAoC,EAAE;AACtC,gCAAgC,CAAC;AACjC,4BAA4B,EAAE;AAC9B,gBAAgB,EAAE;AAClB;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AACxE,oBAAoB,CAAC,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,KAAK,CAAC,CAAC;AACnE,2BAA2B,SAAS;AACpC,2BAA2B,CAAC,MAAM,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,2BAA2B,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gCAAgC,MAAM,CAAC,CAAC;AACxC,oCAAoC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACjD,oCAAoC,WAAW,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC;AAC/D,oCAAoC,WAAW,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC;AAC/D,oCAAoC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC/C,oCAAoC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3E,wCAAwC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1G,oCAAoC,EAAE;AACtC,gCAAgC,CAAC;AACjC,4BAA4B,EAAE;AAC9B,gBAAgB,EAAE;AAClB;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI;AAC7C,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;AAC3D,oBAAoB,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG;AACtC,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG;AACvC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,gBAAgB,CAAC,CAAC,CAAC,EAAE;AACpD;AACA,gBAAgB,KAAK,CAAC,MAAM,EAAE,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,KAAK;AAC5E;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,GAAG,QAAQ,CAAC,kBAAkB,CAAC;AACnF,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACrD,gBAAgB,aAAa,CAAC,UAAU,GAAG,QAAQ,CAAC,kBAAkB,EAAE,IAAI,CAAC,IAAI,EAAE;AACnF,gBAAgB,cAAc,CAAC,UAAU,GAAG,QAAQ,CAAC,kBAAkB,EAAE,IAAI,CAAC,KAAK,EAAE;AACrF;AACA,gBAAgB,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI;AACvD,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACnD,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC7E;AACA,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,KAAK,CAAC,EAAE,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,gBAAgB,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC7E,oBAAoB,CAAC,QAAQ,EAAE,cAAc,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,KAAK,CAAC,EAAE,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,gBAAgB,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AAC9E;AACA,gBAAgB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AACxD,gBAAgB,EAAE,EAAE,gBAAgB,CAAC,CAAC,CAAC;AACvC,oBAAoB,MAAM,CAAC,QAAQ,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AAC9E,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,MAAM,CAAC,QAAQ,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AAC/E,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,SAAS,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAChD,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvF;AACA,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,WAAW,CAAC;AAC9E,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,YAAY,CAAC;AAC9E;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACpD,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,SAAS,EAAE;AACjD,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACpD,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,SAAS,CAAC;AAChD,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC5E;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,GAAG,QAAQ,CAAC,kBAAkB,CAAC;AAC9F,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG,UAAU,GAAG,QAAQ,CAAC,kBAAkB,CAAC;AAC9F,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC,YAAY,CAAC;AACb;AACA,YAAY,OAAO,GAAG;AACtB;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACjE,QAAQ,OAAO;AACf,YAAY,CAAC,QAAQ,CAAC,GAAG,CAAC;AAC1B,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,MAAM,CAAC,YAAY,GAAG,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9D,YAAY,EAAE;AACd,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC;AACtB,YAAY,CAAC,MAAM,CAAC,KAAK,EAAE;AAC3B,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5B,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACxC,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACvC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AAC5B,QAAQ,EAAE;AACV,QAAQ,OAAO;AACf,YAAY,CAAC,QAAQ,CAAC,CAAC,CAAC;AACxB,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,gBAAgB,MAAM,CAAC,WAAW,GAAG,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7D,YAAY,EAAE;AACd,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC;AACtB,YAAY,CAAC,MAAM,CAAC,KAAK,EAAE;AAC3B,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/D,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AAC1G,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AAC1G,QAAQ,kBAAkB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,IAAI;AACtH,QAAQ,mBAAmB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,mBAAmB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,mBAAmB,CAAC,CAAC,IAAI;AACzH;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzE,YAAY,OAAO,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC;AAC7E,YAAY,OAAO,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC;AAC/E,YAAY,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC;AAChF,YAAY,OAAO,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC;AAC9E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjF,YAAY,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACtB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACtB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,gBAAgB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC1F,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK;AACnD,YAAY,EAAE,CAAC,gBAAgB,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,gBAAgB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC;AAChC,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChC,gBAAgB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;AAC5B;AACA,gBAAgB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC;AAChC,gBAAgB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChC,gBAAgB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;AAC5B,YAAY,CAAC;AACb,YAAY,gBAAgB,CAAC,CAAC,CAAC;AAC/B;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAClC,YAAY,MAAM,CAAC,MAAM,EAAE,KAAK,GAAG;AACnC,YAAY,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG;AAClC,YAAY,MAAM,CAAC,MAAM,EAAE,KAAK,GAAG;AACnC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE;AAC1C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC5pBF;AACA,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE;AAChC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EAAE,CAAC,MAAM;AACnJ,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,UAAU,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ;AAChH,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACrG,QAAQ,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,WAAW,CAAC,IAAI,CAAC,uBAAuB,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ;AAC5H,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE;AACzJ,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM;AAC9C,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;AAChE,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,YAAY,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ;AACzE,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACpF,oBAAoB,MAAM,CAAC,IAAI,CAAC;AAChC,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,EAAE;AACd;AACA,YAAY,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACrD,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,wBAAwB,MAAM,CAAC,CAAC;AAChC,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,4BAA4B,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AAC7C,4BAA4B,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACtC,wBAAwB,GAAG;AAC3B,gBAAgB,IAAI;AACpB;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAC1B,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,EAAE;AAC9C,oBAAoB,CAAC,MAAM,CAAC,WAAW,CAAC;AACxC,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE;AAC3D,oBAAoB,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,gBAAgB,EAAE,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,EAAE;AAC7D;AACA,gBAAgB,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACnD,oBAAoB,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI;AACrE,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC;AAC9C,wBAAwB,IAAI,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,CAAC,iBAAiB,GAAG;AACzE,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AAC5C,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,YAAY,CAAC,OAAO;AACrF,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,YAAY,EAAE;AACjE,4BAA4B,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/D,gCAAgC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC;AAClE,gCAAgC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,4BAA4B,GAAG;AAC/B,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,gBAAgB,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AAC9B,YAAY,CAAC;AACb,YAAY,EAAE,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS;AACvE,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACvD,oBAAoB,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3C,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ;AAC/C,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,gBAAgB,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/C,wBAAwB,EAAE,CAAC,EAAE,IAAI,CAAC,GAAG,EAAE,YAAY,CAAC,CAAC,CAAC;AACtD,4BAA4B,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC/C,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACnD,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACzC,gCAAgC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC;AAC/C,gCAAgC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAC3D,4BAA4B,CAAC,CAAC,IAAI;AAClC,4BAA4B,CAAC;AAC7B,gCAAgC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC;AACxD,gCAAgC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAC3D,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB;AACA,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,OAAO;AACpF,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS;AAChH,gBAAgB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AAC3C,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAC1F,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrF,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,EAAE,CAAC,YAAY,EAAE;AACzE;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChF,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,gBAAgB,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS;AACxE,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,EAAE,YAAY,CAAC,CAAC,CAAC;AAC3D,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,gBAAgB,MAAM,CAAC,MAAM,CAAC;AAC9B,YAAY,GAAG,MAAM,CAAC,MAAM,GAAG;AAC/B,YAAY,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACnD;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AAC3H,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,GAAG,IAAI,EAAE,IAAI,GAAG;AACjF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC/F,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACjD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACxD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACjF;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAC5E,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7C;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,WAAW;AAC5C,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC,CAAC,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE;AAC5G,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC1C,oBAAoB,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAClC,wBAAwB,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC,CAAC;AAC7E,4BAA4B,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,EAAE;AAC5C,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,IAAI,CAAC;AAChC,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,cAAc,CAAC,KAAK,CAAC;AACrC,gBAAgB,cAAc,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3E,oBAAoB,MAAM,CAAC,KAAK,CAAC;AACjC,gBAAgB,GAAG;AACnB,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AACjE,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtE,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,WAAW,EAAE;AACpD;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE;AACtD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACtG,YAAY,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACvD,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,GAAG;AACjH,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,wBAAwB,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACxG,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;AAC/G,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC;AACtC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE;AACnI,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AACrG,gBAAgB,CAAC;AACjB,YAAY,IAAI;AAChB,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE;AAC1E,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC3E,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI;AACnG,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACvC,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACxC,wBAAwB,OAAO,CAAC,CAAC,OAAO;AACxC,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,GAAG;AACnB,YAAY,IAAI;AAChB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,GAAG;AAC7G,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE;AACjG;AACA,YAAY,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3B,gBAAgB,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG;AAC/E,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,EAAE,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,GAAG,EAAE,QAAQ,GAAG,CAAC,EAAE;AACvM,oBAAoB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,EAAE,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,GAAG,EAAE,QAAQ,GAAG,CAAC,GAAG;AAC1M,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC;AAC9B,gBAAgB,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE;AACtF,oBAAoB,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC;AACpE,oBAAoB,GAAG;AACvB,YAAY,EAAE,CAAC,CAAC,OAAO,EAAE;AACzB,gBAAgB,YAAY;AAC5B,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,wBAAwB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,wBAAwB,EAAE,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM;AAC/E,wBAAwB,EAAE,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACpD,4BAA4B,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC3C,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,gCAAgC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC;AACpC,gCAAgC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9D,oCAAoC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gCAAgC,CAAC,CAAC,IAAI,CAAC,CAAC;AACxC,oCAAoC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9D,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,IAAI,CAAC;AACpC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,EAAE,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACpD,4BAA4B,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,EAAE;AAChF,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AACnF,wBAAwB,CAAC;AACzB,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACnD,4BAA4B,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AAC3E,4BAA4B,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,iBAAiB,EAAE;AACnE,gCAAgC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,IAAI,iBAAiB,CAAC,CAAC,EAAE;AACzG,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,KAAK,CAAC;AACrC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnD,wBAAwB,EAAE,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACpD,4BAA4B,MAAM,CAAC,CAAC,CAAC,SAAS,GAAG;AACjD,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AACjF,4BAA4B,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,iBAAiB,EAAE;AAC5E,4BAA4B,EAAE,CAAC,SAAS,CAAC,CAAC,YAAY,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC/F,4BAA4B,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS;AACjD,4BAA4B,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC;AACpE,gCAAgC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,IAAI,iBAAiB,CAAC,CAAC,EAAE;AAC5E,4BAA4B,CAAC;AAC7B,4BAA4B,MAAM,CAAC,KAAK,CAAC;AACzC,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,CAAC;AAClB,gBAAgB,YAAY;AAC5B,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,wBAAwB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AACtE,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AAC/D,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,wBAAwB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gCAAgC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,4BAA4B,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9E,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,UAAU,CAAC,YAAY;AACnF,YAAY,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAC5C,gBAAgB,eAAe,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC;AACxD,YAAY,CAAC;AACb;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,SAAS,GAAG;AACpD;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC9E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC5E,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,YAAY,EAAE,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AAChG,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACvD,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC7aF,EAAE,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,EAAE;AACvC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,EAAE;AACtD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACvC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK;AAC/E,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AAC3E,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,KAAK;AACzC,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa;AAC7E;AACA,IAAI,QAAQ,CAAC,OAAO,CAAC,KAAK,EAAE;AAC5B,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,MAAM,EAAE;AACzB,QAAQ,CAAC,WAAW,CAAC,CAAC,CAAC;AACvB,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC;AAC1B,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AAC7C,IAAI,CAAC;AACL,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,QAAQ,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,GAAG;AACtC,IAAI,CAAC;AACL;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,gBAAgB,CAAC,OAAO;AAC5B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAChE,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,gBAAgB,CAAC,OAAO;AAC5B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAChE,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,gBAAgB,CAAC,OAAO;AAC5B,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACrE,gBAAgB,OAAO,CAAC,CAAC,OAAO;AAChC,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC;AAC5C,gBAAgB,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,QAAQ,EAAE;AACrC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnC,oBAAoB,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AAC1C,gBAAgB,IAAI;AACpB,oBAAoB,SAAS,CAAC,UAAU,EAAE;AAC1C,wBAAwB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC3C,wBAAwB,CAAC,IAAI,CAAC,KAAK,EAAE;AACrC,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,gBAAgB;AACpD,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACjE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,GAAG;AAClC,YAAY,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,GAAG;AAClC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG,IAAI,EAAE,IAAI,GAAG;AAC3F,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG,MAAM,EAAE,CAAC,GAAG;AAClH,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG;AAChE,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,YAAY,IAAI;AAC9D;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AACjG,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,QAAQ;AACvB,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,iBAAiB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,GAAG,SAAS,MAAM,MAAM,GAAG;AACtE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,CAAC,CAAC,GAAG,CAAC,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,OAAO,EAAE,CAAC,EAAE;AAC9F,oBAAoB,CAAC,CAAC,GAAG,CAAC,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,EAAE,CAAC,CAAC;AAC9F,gBAAgB,EAAE;AAClB;AACA,gBAAgB,QAAQ,CAAC,KAAK,CAAC,YAAY,IAAI,KAAK,IAAI,GAAG,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,GAAG,IAAI;AAC/E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,EAAE;AAC5C,oBAAoB,CAAC,KAAK,CAAC,YAAY,CAAC;AACxC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE;AACpC,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,QAAQ;AACpB,gBAAgB,CAAC,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG;AAChF,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,IAAI;AACvE;AACA;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,EAAE;AACnD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI;AACxE;AACA,YAAY,QAAQ,CAAC,IAAI,CAAC,QAAQ,EAAE;AACpC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC5E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,SAAS,EAAE,CAAC,GAAG;AAC1E;AACA,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,IAAI,EAAE;AAC5C,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,CAAC;AACxC;AACA,gBAAgB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACpC,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AAChE,oBAAoB,EAAE;AACtB;AACA,oBAAoB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE;AAChF,oBAAoB,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG;AACjC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,SAAS,EAAE,IAAI,EAAE;AAC1C,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5D,4BAA4B,MAAM,EAAE,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,WAAW,GAAG;AAC5F,wBAAwB,GAAG;AAC3B;AACA,oBAAoB,GAAG,CAAC,mBAAmB,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,EAAE,MAAM,CAAC;AACxG,oBAAoB,CAAC,CAAC,SAAS,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AACpE,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,4BAA4B,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,mBAAmB,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,EAAE;AACzH,wBAAwB,GAAG;AAC3B,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACjC,oBAAoB,CAAC,CAAC,SAAS,GAAG,IAAI,CAAC,IAAI,EAAE;AAC7C,wBAAwB,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,SAAS,GAAG;AAC1E,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,YAAY,CAAC;AACjC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,4BAA4B,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC;AACvG,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,IAAI,EAAE;AAChD,wBAAwB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AAC7C;AACA,gBAAgB,EAAE,CAAC,YAAY,CAAC;AAChC,oBAAoB,MAAM;AAC1B,wBAAwB,CAAC,SAAS,GAAG,IAAI,CAAC,IAAI,EAAE;AAChD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9E,wBAAwB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,GAAG;AAClF;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AAC3E,oBAAoB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AACzC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK;AACtC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/D,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,gBAAgB,EAAE,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AACxC,gBAAgB,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,oBAAoB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,GAAG;AACnB,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AAChC,oBAAoB,IAAI,CAAC,CAAC,OAAO,EAAE;AACnC,oBAAoB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC;AAC/C,wBAAwB,QAAQ,CAAC,OAAO,CAAC,KAAK,EAAE;AAChD,wBAAwB,KAAK,CAAC;AAC9B,oBAAoB,IAAI,CAAC,CAAC,OAAO,EAAE;AACnC,oBAAoB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC;AAC/C,wBAAwB,QAAQ,CAAC,OAAO,CAAC,IAAI,EAAE;AAC/C,wBAAwB,KAAK,CAAC;AAC9B,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,GAAG;AACnD,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACvD,oBAAoB,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,EAAE;AAChD,oBAAoB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AAC9C,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AACxC,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9E,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AAC3D;AACA,oBAAoB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AACtF,oBAAoB,IAAI;AACxB,wBAAwB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,4BAA4B,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,4BAA4B,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AACpD,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,4BAA4B,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,OAAO,CAAC,CAAC,CAAC,WAAW,CAAC;AAC1E;AACA,4BAA4B,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE;AAClE,4BAA4B,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AAC5D;AACA,4BAA4B,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7C,4BAA4B,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AAC/E,4BAA4B,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM;AACvF,4BAA4B,OAAO,CAAC,IAAI,EAAE;AAC1C,gCAAgC,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAChD,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AACpE,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE;AACxE,gCAAgC,IAAI,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC;AAC/D,4BAA4B,GAAG;AAC/B,wBAAwB,GAAG;AAC3B;AACA,oBAAoB,gBAAgB,CAAC,OAAO;AAC5C,wBAAwB,CAAC,IAAI,EAAE;AAC/B,4BAA4B,KAAK,CAAC,CAAC,MAAM,CAAC;AAC1C,4BAA4B,KAAK,CAAC,CAAC,UAAU,CAAC;AAC9C,4BAA4B,MAAM,CAAC,CAAC,OAAO;AAC3C,wBAAwB,KAAK;AAC7B;AACA,oBAAoB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACrE,gBAAgB,GAAG;AACnB;AACA,gBAAgB,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5E,oBAAoB,gBAAgB,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC1D,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,CAAC;AAClB,gBAAgB,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChF,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACpD,oBAAoB,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACrC,wBAAwB,GAAG,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAC1C,wBAAwB,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACnD,wBAAwB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AACxC,oBAAoB,EAAE;AACtB,oBAAoB,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACpD,gBAAgB,GAAG;AACnB;AACA,gBAAgB,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,gBAAgB,GAAG;AACnB;AACA,gBAAgB,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChF,oBAAoB,OAAO,GAAG;AAC9B,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,aAAa,CAAC,SAAS,GAAG;AACzD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACvG,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,GAAG,CAAC,IAAI;AAC/F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,QAAQ,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACxC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC9D,QAAQ,GAAG;AACX,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjF,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjC,YAAY,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,GAAG,GAAG,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,QAAQ,KAAK;AAC3F,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,QAAQ,EAAE;AAC7C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC/fF;AACA,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC3C,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE;AAChC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;AACjD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EAAE,CAAC,MAAM;AACnJ,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AACrG,QAAQ,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,WAAW,CAAC,IAAI,CAAC,uBAAuB,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ;AAC5H,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE;AAC3B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE;AACzJ,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM;AAC/C,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,EAAE;AACxC,oBAAoB,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE;AAC3D,oBAAoB,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,gBAAgB,CAAC,IAAI,EAAE;AACvB;AACA,YAAY,EAAE,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS;AACvE,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACvD,oBAAoB,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3C,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ;AAC/C,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACrC,4BAA4B,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACpD,4BAA4B,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACvD,wBAAwB,CAAC,CAAC,IAAI;AAC9B,wBAAwB,CAAC;AACzB,4BAA4B,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC;AAC3C,4BAA4B,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACvD,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,OAAO;AACpF,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS;AAChH,gBAAgB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACjF,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrF,gBAAgB,CAAC,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,EAAE,CAAC,YAAY,EAAE;AAC1E;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,GAAG;AAC1J;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC;AACvC,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtI,YAAY,IAAI;AAChB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AACvD;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,MAAM,IAAI,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC/E;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG,IAAI,EAAE,IAAI,GAAG;AACjG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,kBAAkB,GAAG;AACzG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAC5E,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7C,YAAY,MAAM,CAAC,IAAI,GAAG,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE;AACzF,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5C,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AACjE,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtE,YAAY,MAAM,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,MAAM,EAAE;AAC7E,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,WAAW,EAAE;AACpD;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACnD,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACvD,YAAY,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACpD,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AAChJ,gBAAgB,GAAG;AACnB;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC;AACjC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7E;AACA,YAAY,IAAI;AAChB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI;AACnG,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACvC,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AAC7D,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACxC,wBAAwB,OAAO,CAAC,CAAC,OAAO;AACxC,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AAC5D,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,KAAK,CAAC,eAAe,GAAG;AAC/C,gBAAgB,GAAG;AACnB;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;AACrC,gBAAgB,SAAS,CAAC,MAAM,EAAE,QAAQ,GAAG;AAC7C;AACA,gBAAgB,IAAI,CAAC,MAAM,EAAE,QAAQ,EAAE;AACvC,oBAAoB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5F,wBAAwB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,GAAG;AACtF,wBAAwB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC7E,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,GAAG,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,GAAG,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,GAAG;AACzH,wBAAwB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,GAAG,IAAI,EAAE,CAAC,GAAG;AAC1F,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpF,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AAC1G,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrC;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AACzC,gBAAgB,IAAI,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AAClG,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AACxC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,wBAAwB,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC;AAC/C,4BAA4B,MAAM,CAAC,CAAC,CAAC;AACrC,wBAAwB,EAAE,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC;AACzC,4BAA4B,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,GAAG;AACzE,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,IAAI;AAC/G,oBAAoB,GAAG;AACvB,gBAAgB,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,IAAI,EAAE;AAC7E,oBAAoB,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrG,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,IAAI,KAAK;AAChD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAC5C,gBAAgB,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG,OAAO,EAAE,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,EAAE;AACtE,gBAAgB,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAChD,oBAAoB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAClG,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AACxC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9D,gBAAgB,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,IAAI,EAAE;AAC7E,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAChD,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtG,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,CAAC;AAClB,gBAAgB,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,GAAG,IAAI,KAAK;AAC7D,YAAY,CAAC;AACb;AACA,YAAY,IAAI;AAChB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,GAAG;AAC7G;AACA,YAAY,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3B,gBAAgB,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG;AAC/E,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,EAAE,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,GAAG,EAAE,QAAQ,GAAG,CAAC,EAAE;AACvM,oBAAoB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,EAAE,QAAQ,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,GAAG,EAAE,QAAQ,GAAG,CAAC,GAAG;AAC1M,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,IAAI,EAAE;AAC7E,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG;AAChF,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC;AAC3E,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,EAAE;AACpD,YAAY,IAAI;AAChB,gBAAgB,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,IAAI,EAAE;AAC7E,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,wBAAwB,EAAE,IAAI,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC;AACvF,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AAC7C,4BAA4B,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjE,4BAA4B,CAAC,CAAC,GAAG,CAAC,CAAC;AACnC,4BAA4B,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM;AACnE,gCAAgC,CAAC;AACjC,gCAAgC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC9C,4BAA4B,CAAC,CAAC,GAAG;AACjC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,MAAM,EAAE,IAAI,EAAE;AACnC,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACjE,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC7E,oBAAoB,GAAG;AACvB;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,kBAAkB,CAAC,SAAS,GAAG;AAC9D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC9E,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC5E,QAAQ,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACtE,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;AACtD,QAAQ,EAAE,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACzG,QAAQ,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACzF,QAAQ,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC7E,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACvD,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACzWF;AACA,EAAE,CAAC,MAAM,CAAC,uBAAuB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChD,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,kBAAkB,EAAE;AACjD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,EAAE,CAAC;AAChD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,EAAE,CAAC;AAClD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK;AACzB,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,GAAG,SAAS,EAAE;AAC1E,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa;AAC9E;AACA,IAAI,QAAQ,CAAC,OAAO,CAAC,OAAO,EAAE;AAC9B;AACA,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,IAAI,EAAE;AACvB,QAAQ,CAAC,WAAW,CAAC,CAAC,CAAC;AACvB,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC;AAC1B,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AAC7C,IAAI,CAAC;AACL,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,MAAM,EAAE;AACzB,QAAQ,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,GAAG;AACtC,IAAI,CAAC;AACL;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACrE,gBAAgB,OAAO,CAAC,CAAC,OAAO;AAChC,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC;AAC5C,gBAAgB,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,QAAQ,EAAE;AACrC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE;AAChG,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,GAAG;AACzC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,gBAAgB;AACpD,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,GAAG;AAClC,YAAY,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,GAAG,KAAK,CAAC,IAAI,EAAE;AAC9C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,uBAAuB,GAAG,IAAI,EAAE,IAAI,GAAG;AAChG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,uBAAuB,GAAG,MAAM,EAAE,CAAC,GAAG;AACvH,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,gBAAgB,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE;AACzD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG;AAChE;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,YAAY,IAAI;AAC9D;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC1C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AACjG,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,QAAQ;AACvB,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,iBAAiB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,GAAG,SAAS,MAAM,MAAM,GAAG;AACtE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,CAAC,CAAC,GAAG,CAAC,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,OAAO,EAAE,CAAC,EAAE;AAC9F,oBAAoB,CAAC,CAAC,GAAG,CAAC,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,EAAE,CAAC,CAAC;AAC9F,gBAAgB,EAAE;AAClB;AACA,gBAAgB,QAAQ,CAAC,KAAK,CAAC,YAAY,IAAI,KAAK,IAAI,GAAG,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,GAAG,IAAI;AAC/E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,EAAE;AAC5C,oBAAoB,CAAC,KAAK,CAAC,YAAY,CAAC;AACxC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK;AAC3E,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE;AACpC,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,QAAQ;AACpB,gBAAgB,CAAC,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG;AAChF,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,IAAI;AACvE;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,EAAE;AACnD,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI;AACxE;AACA,YAAY,QAAQ,CAAC,UAAU,GAAG,IAAI,CAAC,QAAQ,EAAE;AACjD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,EAAE,cAAc,CAAC,CAAC,CAAC,EAAE;AAClD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,SAAS,EAAE,CAAC,GAAG;AACtE;AACA,gBAAgB,MAAM;AACtB,oBAAoB,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,IAAI,GAAG;AAC7C,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE;AACpD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AAC/E,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,EAAE;AACtD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,IAAI;AACxB,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,eAAe,CAAC;AAC7C,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,gBAAgB,EAAE,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AACxC,gBAAgB,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,oBAAoB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,GAAG;AACnB,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AAChC,oBAAoB,IAAI,CAAC,CAAC,OAAO,EAAE;AACnC,oBAAoB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC;AAC/C,wBAAwB,QAAQ,CAAC,OAAO,CAAC,KAAK,EAAE;AAChD,wBAAwB,KAAK,CAAC;AAC9B,oBAAoB,IAAI,CAAC,CAAC,OAAO,EAAE;AACnC,oBAAoB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC;AAC/C,wBAAwB,QAAQ,CAAC,OAAO,CAAC,IAAI,EAAE;AAC/C,wBAAwB,KAAK,CAAC;AAC9B,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,GAAG;AACnD,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,GAAG;AAC7C;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACvD,oBAAoB,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,EAAE;AAChD,oBAAoB,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AAC9C,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AACxC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC,SAAS,GAAG;AACrE,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACpE,QAAQ,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACxC,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,GAAG,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAC9B,YAAY,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACvC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK;AAC5B,QAAQ,EAAE;AACV,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACpE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,QAAQ,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACxC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjF,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjC,YAAY,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,GAAG,GAAG,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,QAAQ,KAAK;AAC3F,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,QAAQ,EAAE;AAC7C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC1XF,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACnC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,EAAE;AAC5D,QAAQ,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACzB,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,GAAG;AACxC,QAAQ,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AACrB,QAAQ,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,QAAQ,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1B,QAAQ,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AACtB,QAAQ,QAAQ,CAAC;AACjB,QAAQ,QAAQ,CAAC;AACjB,QAAQ,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1C,QAAQ,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACzC,QAAQ,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAC/B,QAAQ,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1B,QAAQ,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,GAAG;AACrD,QAAQ,uBAAuB,CAAC,CAAC,CAAC,KAAK,CAAC;AACxC,QAAQ,mBAAmB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG;AAC9C,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG;AAC9B,QAAQ,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG;AACpC,QAAQ,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG;AACpC;AACA,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AACrE,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AACrE;AACA,QAAQ,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AAC3E,QAAQ,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AAC3E;AACA,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,GAAG,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AACvF,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,GAAG,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AACvF;AACA,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AAC5E,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,GAAG,MAAM,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,EAAE;AAC5E;AACA,QAAQ,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,KAAK,CAAC,CAAC,EAAE,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ,EAAE;AAC7F,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,KAAK,CAAC,OAAO,EAAE,MAAM,EAAE,IAAI,GAAG,QAAQ,CAAC,QAAQ,EAAE;AACnF,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,GAAG,KAAK,CAAC,OAAO,EAAE,MAAM,EAAE,KAAK,GAAG,QAAQ,CAAC,QAAQ,EAAE;AACpF;AACA,QAAQ,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,EAAE,EAAE;AAC/C,QAAQ,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC,QAAQ,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AACjC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AAC9E,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AACtG,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AACtG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAChG;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACjE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE;AACvF,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE;AACvF,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,oBAAoB,EAAE;AACtB,gBAAgB,GAAG;AACnB;AACA,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACjG,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,GAAG;AAC5C;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,IAAI,CAAC,UAAU,GAAG,IAAI,EAAE,IAAI,GAAG;AAC7E,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,GAAG,MAAM,EAAE,CAAC,GAAG;AACpG;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,UAAU,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,UAAU,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,SAAS,GAAG;AAC1D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,SAAS,GAAG;AAC1D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,aAAa,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,aAAa,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,UAAU,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,UAAU,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,UAAU,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D;AACA,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,gBAAgB,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpD,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AAChE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC;AACvF,gBAAgB,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE;AACA,gBAAgB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC1C,gBAAgB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC1C;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACvC,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AACtD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC;AAChH,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,mBAAmB,EAAE;AACzG,wBAAwB,MAAM,CAAC,MAAM,CAAC;AACtC,oBAAoB,GAAG;AACvB,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACvC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAClG,YAAY,CAAC;AACb;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,WAAW,CAAC,WAAW,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,KAAK;AACtI,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,WAAW,CAAC,WAAW,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,KAAK;AACtI,YAAY,SAAS;AACrB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,KAAK;AACzI,YAAY,SAAS;AACrB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,KAAK;AACzI,YAAY,KAAK;AACjB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,KAAK;AACrI,YAAY,KAAK;AACjB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,KAAK;AACrI,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,WAAW,CAAC,WAAW,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,KAAK;AACtI,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,WAAW,CAAC,WAAW,CAAC;AACzC,gBAAgB,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,KAAK;AACtI;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACrF;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACpD,gBAAgB,CAAC,KAAK,CAAC,UAAU,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC3E,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,aAAa,EAAE;AAC1D,gBAAgB,CAAC,KAAK,CAAC,aAAa,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC9E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,SAAS,EAAE;AAClD,gBAAgB,CAAC,KAAK,CAAC,SAAS,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC1E,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACpD,gBAAgB,CAAC,KAAK,CAAC,UAAU,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC3E,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACpD,gBAAgB,CAAC,KAAK,CAAC,UAAU,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC3E,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,aAAa,EAAE;AAC1D,gBAAgB,CAAC,KAAK,CAAC,aAAa,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC9E,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,SAAS,EAAE;AAClD,gBAAgB,CAAC,KAAK,CAAC,SAAS,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC1E,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,UAAU,EAAE;AACpD,gBAAgB,CAAC,KAAK,CAAC,UAAU,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,IAAI;AAC3E;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AACpH,gBAAgB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACtF,YAAY,GAAG,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AACzC,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AACpH,gBAAgB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACtF,YAAY,GAAG,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AACzC;AACA,YAAY,OAAO,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE,MAAM,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtH,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,eAAe,GAAG;AAC7C;AACA,YAAY,OAAO,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE,MAAM,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtH,gBAAgB,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,eAAe,GAAG;AAC7C;AACA,YAAY,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC7C,YAAY,SAAS,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAChD,YAAY,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC5C,YAAY,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC7C;AACA,YAAY,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC7C,YAAY,SAAS,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAChD,YAAY,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC5C,YAAY,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,IAAI;AAC7C;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,GAAG;AAC1E,YAAY,EAAE,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,GAAG;AAC1E;AACA,YAAY,EAAE,CAAC,SAAS,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,SAAS,EAAE,IAAI,CAAC,KAAK,GAAG;AACvE,YAAY,EAAE,CAAC,SAAS,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,SAAS,EAAE,IAAI,CAAC,KAAK,GAAG;AACvE;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,GAAG;AAC1E,YAAY,EAAE,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,GAAG;AAC1E;AACA,YAAY,EAAE,CAAC,aAAa,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,aAAa,EAAE,IAAI,CAAC,SAAS,GAAG;AACnF,YAAY,EAAE,CAAC,aAAa,CAAC,MAAM,EAAE,EAAE,CAAC,UAAU,CAAC,aAAa,EAAE,IAAI,CAAC,SAAS,GAAG;AACnF;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACxE,gBAAgB,CAAC,QAAQ,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE;AAC/C;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACrC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AAC3E,YAAY,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACpD,gBAAgB,CAAC,IAAI,CAAC,KAAK,EAAE;AAC7B;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACxE,gBAAgB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AAC/C;AACA;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AACrD,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE;AAC9B;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACxE,gBAAgB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AAC/C;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,GAAG;AACrD,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE;AAC9B;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,OAAO,EAAE,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AACtE,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AACxE;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,OAAO,EAAE,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AACtE,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AACxE;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,uBAAuB,EAAE;AACxC,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/D,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW;AACzC,YAAY,8DAA8D;AAC1E;AACA,YAAY,QAAQ,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC;AAC1C,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChF,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACxC,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AAC9B,oBAAoB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACvC,oBAAoB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC;AAC3C,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG;AACvC,gBAAgB,EAAE;AAClB,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC;AAChC,oBAAoB,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,qBAAqB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,GAAG,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,KAAK,EAAE;AACnC,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,iBAAiB,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7C,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChF,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACxC,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AAC9B,oBAAoB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACvC,oBAAoB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC;AAC3C,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG;AACvC,gBAAgB,EAAE;AAClB,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,QAAQ,CAAC,GAAG,CAAC;AAClC,oBAAoB,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,qBAAqB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,GAAG,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,KAAK,EAAE;AACnC,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChF,gBAAgB,GAAG,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AACvD,gBAAgB,GAAG,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AACvD,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC;AAChC,oBAAoB,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,qBAAqB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,GAAG,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,KAAK,EAAE;AACnC,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,CAAC;AACzC,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAChF;AACA,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AAChD,gBAAgB,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACjC,oBAAoB,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AAC/C,oBAAoB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AACrC,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG;AACrC,gBAAgB,EAAE;AAClB,gBAAgB,OAAO;AACvB,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC;AAChC,oBAAoB,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,qBAAqB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACxD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,GAAG,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,KAAK,EAAE;AACnC,YAAY,CAAC;AACb;AACA;AACA;AACA,YAAY,QAAQ,CAAC,eAAe,EAAE,CAAC,CAAC;AACxC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC1D,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACtC,gBAAgB,GAAG,CAAC,CAAC;AACrB,kBAAkB,KAAK,CAAC,eAAe,GAAG;AAC1C,gBAAgB,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AAC5B,cAAc,CAAC;AACf,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,cAAc,CAAC,UAAU,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AAC/D,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC1D,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACtC,gBAAgB,GAAG,CAAC,CAAC;AACrB,kBAAkB,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AAClE,gBAAgB,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG;AAC5B,cAAc,CAAC;AACf,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,uBAAuB,EAAE;AACxC,gBAAgB,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9E,oBAAoB,eAAe,GAAG;AACtC,oBAAoB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AAC9E,oBAAoB,IAAI;AACxB,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAChD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG;AAChD,wBAAwB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChF,4BAA4B,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AAC9F,wBAAwB,GAAG;AAC3B;AACA,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,aAAa,CAAC,UAAU,EAAE;AAC9D,wBAAwB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AACvE,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AACnD,4BAA4B,cAAc,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,IAAI,EAAE;AAChE,wBAAwB,CAAC;AACzB,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AACxD,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3E,wBAAwB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AAC1G,wBAAwB,OAAO,CAAC,IAAI,EAAE;AACtC,4BAA4B,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,KAAK,CAAC,CAAC,WAAW,CAAC;AAC/C,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE;AACpE,4BAA4B,IAAI,CAAC,CAAC,KAAK,CAAC;AACxC,4BAA4B,KAAK,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM;AACtE,wBAAwB,GAAG;AAC3B,oBAAoB,GAAG;AACvB;AACA,oBAAoB,GAAG,CAAC,qBAAqB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,CAAC;AACrD,wBAAwB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,EAAE;AACzE,oBAAoB,EAAE;AACtB;AACA,oBAAoB,gBAAgB,CAAC,OAAO;AAC5C,wBAAwB,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,4BAA4B,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5D,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,cAAc,CAAC,gBAAgB,CAAC,OAAO,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,qBAAqB,CAAC;AAC3G,wBAAwB,CAAC,IAAI,EAAE;AAC/B,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,UAAU,CAAC,EAAE;AACvE,4BAA4B,KAAK,CAAC,CAAC,UAAU,CAAC;AAC9C,4BAA4B,MAAM,CAAC,CAAC,OAAO;AAC3C,wBAAwB,KAAK;AAC7B;AACA,oBAAoB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACrE,gBAAgB,GAAG;AACnB;AACA,gBAAgB,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5E,oBAAoB,eAAe,GAAG;AACtC,gBAAgB,GAAG;AACnB,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,cAAc,EAAE;AAC/E,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,cAAc,EAAE;AAC/E,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB;AACA,gBAAgB,SAAS,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,iBAAiB,EAAE;AACrF,gBAAgB,SAAS,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,iBAAiB,EAAE;AACrF,gBAAgB,SAAS,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChF,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB,gBAAgB,SAAS,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAChF,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB;AACA,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,eAAe,EAAE;AAChF,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,eAAe,EAAE;AAChF,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB,gBAAgB,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AACxC,gBAAgB,GAAG;AACnB;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,aAAa,EAAE;AAC7E,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,aAAa,EAAE;AAC7E;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC5E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,gBAAgB,GAAG;AACnB,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC5E,oBAAoB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,gBAAgB,GAAG;AACnB,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,GAAG;AAC9B,gBAAgB,GAAG;AACnB,gBAAgB,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7E,oBAAoB,OAAO,GAAG;AAC9B,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO;AACjC,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AAChC,IAAI,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AAChC,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AAC1F,QAAQ,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxF,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,mBAAmB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,mBAAmB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,mBAAmB,CAAC,CAAC,IAAI;AACzH;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3B,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3B,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3B,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3B,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACxB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3E,YAAY,UAAU,CAAC,CAAC,CAAC;AACzB,YAAY,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE;AACjC,YAAY,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE;AACjC,YAAY,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE;AACjC,YAAY,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE;AACjC,QAAQ,GAAG;AACX;AACA,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,MAAM,CAAC,WAAW,CAAC,KAAK,EAAE;AAC1C,gBAAgB,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE;AACzC,gBAAgB,MAAM,CAAC,WAAW,CAAC,KAAK,EAAE;AAC1C,gBAAgB,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE;AACzC,gBAAgB,MAAM,CAAC,WAAW,CAAC,KAAK,EAAE;AAC1C,gBAAgB,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE;AACzC,gBAAgB,MAAM,CAAC,WAAW,CAAC,KAAK,EAAE;AAC1C,gBAAgB,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE;AACzC,gBAAgB,SAAS,CAAC,WAAW,CAAC,KAAK,EAAE;AAC7C,gBAAgB,SAAS,CAAC,WAAW,CAAC,KAAK,EAAE;AAC7C,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE,OAAO,CAAC,QAAQ,CAAC,KAAK,EAAE;AAClH,cAAc,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,YAAY,GAAG;AACf,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACrlBF;AACA,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;AACjD,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;AACnD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;AACjD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC/C,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,OAAO,KAAK,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK;AACtI,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACzB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK;AAC7B,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE;AACvL,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC;AAC9B,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClE;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG;AACrF;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC;AACxB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,GAAG;AAC1J,YAAY,IAAI;AAChB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACvE;AACA,YAAY,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC;AACjC,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,GAAG;AACtE,oBAAoB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE;AACtE,gBAAgB,CAAC;AACjB,YAAY,EAAE,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACpD;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AAC3H,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,GAAG,IAAI,EAAE,IAAI,CAAC,CAAC,EAAE,MAAM,GAAG;AAChG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,GAAG;AAC9F,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AACzD;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,SAAS;AACrB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,QAAQ,CAAC,UAAU,EAAE;AACzC,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACtC,wBAAwB,EAAE,CAAC,CAAC,EAAE;AAC9B,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACvD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AAC9D,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AAC1F;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,GAAG,SAAS,GAAG,EAAE,CAAC,IAAI,EAAE;AACtE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAChD,YAAY,KAAK,CAAC,IAAI,GAAG,MAAM,GAAG;AAClC;AACA,YAAY,KAAK,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/J,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,oBAAoB,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACpC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1C,4BAA4B,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9C,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,wBAAwB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,CAAC,CAAC,EAAE,EAAE,CAAC;AAC/B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,GAAG,EAAE;AACjC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG;AAC5D,wBAAwB,CAAC,CAAC,CAAC,EAAE,EAAE;AAC/B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3C,4BAA4B,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AAC7C,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,EAAE,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,CAAC,CAAC,EAAE,EAAE,EAAE;AAChC,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;AACvH,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE;AACjE,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,EAAE;AACnE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAC/E,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACxF;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,GAAG,CAAC,SAAS;AAC7D,YAAY,KAAK,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5H,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC;AAChC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;AACvH,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,oBAAoB,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,EAAE;AACpC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1C,4BAA4B,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9C,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,wBAAwB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,CAAC,CAAC,EAAE,EAAE,CAAC;AAC/B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,GAAG,EAAE;AACjC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACzC,4BAA4B,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9C,wBAAwB,CAAC,CAAC,CAAC,EAAE,EAAE;AAC/B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3C,4BAA4B,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AAC7C,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,EAAE,CAAC;AAC/B,wBAAwB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,CAAC,CAAC,EAAE,EAAE,EAAE;AAChC,gBAAgB,GAAG;AACnB,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA;AACA,IAAI,EAAE,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC;AAC5E,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAC9D,QAAQ,KAAK,CAAC,eAAe,GAAG;AAChC,QAAQ,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC;AAChE,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,WAAW,CAAC;AAC1C,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACxC,QAAQ,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,EAAE;AACtD,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,CAAC;AACpC,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACvE,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AACvE,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACjF,QAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACnF,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF,QAAQ,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AACzE,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC1E,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC1E,QAAQ,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACpF,QAAQ,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAChF,QAAQ,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC9E;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC1E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACtOF,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC;AACzD,EAAE,CAAC,IAAI,GAAG,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,CAAC,OAAO;AACzC,EAAE,CAAC,MAAM,CAAC,mBAAmB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC5C,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACzD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI;AAC/B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,IAAI;AAChC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,EAAE;AAChC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChB,QAAQ,CAAC,CAAC,oBAAoB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC;AACnD,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,EAAE;AAChC,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AAC7B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,EAAE;AACtC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;AACzB,QAAQ,CAAC,CAAC,UAAU;AACpB,QAAQ,CAAC,CAAC,UAAU;AACpB,QAAQ,CAAC,CAAC,UAAU;AACpB,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AAC9B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AAC9B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,aAAa,EAAE;AAC9M,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC/E,YAAY,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAClF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,WAAW,EAAE,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC;AAC1D,YAAY,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/C,gBAAgB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;AACjC,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;AACrC,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AACjD,wBAAwB,GAAG,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACtF,wBAAwB,OAAO,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG;AACnE,gBAAgB,GAAG;AACnB,gBAAgB,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC;AAC/B,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,GAAG;AACtE,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AAC9B,YAAY,EAAE,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI;AACvD,YAAY;AACZ,YAAY,cAAc,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,GAAG,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AACtJ,YAAY,iBAAiB,CAAC,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG;AAC3F,YAAY;AACZ,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,cAAc,EAAE,CAAC,CAAC,EAAE,MAAM,CAAC,iBAAiB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI;AAChH;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC;AAC/D,YAAY,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC;AAC1E,YAAY,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC,GAAG;AACvC,YAAY,GAAG,CAAC,wBAAwB,CAAC,CAAC,CAAC,KAAK,CAAC;AACjD,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,YAAY;AACZ,YAAY,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACnF,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACpC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE;AACpC,gBAAgB,GAAG,CAAC,mBAAmB,CAAC,CAAC,CAAC,KAAK,CAAC;AAChD,gBAAgB,EAAE,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC;AAClF,gBAAgB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,CAAC;AAC/C,oBAAoB,mBAAmB,CAAC,CAAC,CAAC,IAAI,CAAC;AAC/C,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5B,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK;AACvD,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;AAClC,oBAAoB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC;AACjB,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG;AAClF,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,oBAAoB,EAAE,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC;AAC5E,oBAAoB,EAAE,CAAC,CAAC,mBAAmB,CAAC,CAAC,CAAC;AAC9C,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,GAAG,CAAC,EAAE;AAC/C,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,GAAG,CAAC,EAAE;AAC/C,oBAAoB,CAAC;AACrB,wBAAwB,EAAE,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC;AACtF,oBAAoB,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,UAAU,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC;AAChE,wBAAwB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1E,wBAAwB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1E,oBAAoB,CAAC;AACrB,wBAAwB,EAAE,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC;AACnG,oBAAoB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAC3C,wBAAwB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1E,wBAAwB,iBAAiB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,GAAG,CAAC,EAAE;AAChE,wBAAwB,wBAAwB,CAAC,CAAC,CAAC,IAAI,CAAC;AACxD,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,aAAa,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC;AAChJ,gBAAgB,EAAE,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,YAAY,CAAC,IAAI,CAAC;AAC9E,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACxC,oBAAoB,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC,GAAG,EAAE;AACvC,oBAAoB,CAAC,KAAK,GAAG,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC9D;AACA,gBAAgB,uBAAuB,CAAC,CAAC,CAAC,GAAG;AAC7C,gBAAgB,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,UAAU,EAAE,CAAC,UAAU,EAAE,EAAE,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,EAAE;AAC7H,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,mBAAmB,GAAG,IAAI,EAAE,IAAI,GAAG;AAC5F,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,mBAAmB,GAAG;AAC1G,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,mBAAmB,CAAC,UAAU,GAAG;AAClF,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,mBAAmB,CAAC,UAAU,GAAG;AAClF,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,mBAAmB,CAAC,iBAAiB,GAAG;AACzF;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,IAAI,CAAC,WAAW,EAAE,QAAQ,GAAG,OAAO,CAAC,WAAW,EAAE;AAC9D,YAAY,IAAI,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE;AAC7C,wBAAwB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,SAAS,CAAC;AACnD,wBAAwB,CAAC,EAAE,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC;AAC7C,wBAAwB,CAAC,EAAE,EAAE,OAAO,EAAE,CAAC,OAAO,EAAE;AAChD;AACA,YAAY,EAAE,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AAC/D,YAAY,GAAG,CAAC,iBAAiB,CAAC,CAAC,qBAAqB,CAAC;AACzD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,EAAE;AACnD,YAAY,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACrD,YAAY,EAAE,CAAC,EAAE,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC;AAC/B,gBAAgB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE,EAAE;AACrH,gBAAgB,iBAAiB,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,iBAAiB,GAAG,SAAS,EAAE,IAAI,GAAG,IAAI,EAAE,QAAQ,GAAG;AACzG,gBAAgB,iBAAiB,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG;AACzD,gBAAgB,iBAAiB,CAAC,IAAI,GAAG,MAAM,GAAG;AAClD,gBAAgB,iBAAiB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1E,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACjE,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACjE,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAClE,IAAI;AACJ,gBAAgB,EAAE,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI;AAC9E,gBAAgB,qBAAqB,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,iBAAiB,GAAG,SAAS,EAAE,IAAI,GAAG,IAAI,EAAE,oBAAoB,GAAG;AACzH,gBAAgB,qBAAqB,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,oBAAoB,GAAG;AAClF,gBAAgB,qBAAqB,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG;AAC7D,gBAAgB,qBAAqB,CAAC,IAAI,GAAG,MAAM,GAAG;AACtD,gBAAgB,qBAAqB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,eAAe,CAAC;AAChE,wBAAwB,EAAE,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AAC5H,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAClE,wBAAwB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG;AACzD,YAAY,CAAC;AACb,YAAY,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC;AACrD,YAAY,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,UAAU,GAAG,SAAS,EAAE,IAAI,GAAG,IAAI,CAAC,IAAI,EAAE;AACjF,YAAY,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG;AAC9C,YAAY,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACvC,YAAY,UAAU,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACvC;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC;AACnD,YAAY,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,UAAU,GAAG,SAAS,EAAE,IAAI,GAAG,IAAI,CAAC,IAAI,EAAE;AACjF,YAAY,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AAC7C,YAAY,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACvC,YAAY,UAAU,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,gBAAgB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,WAAW,GAAG;AACvF,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACpF,YAAY,UAAU,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE;AAClI,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE;AAC3C,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAClC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AACrC,oBAAoB,UAAU,CAAC,CAAC,iBAAiB;AACjD,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG;AACf,YAAY,UAAU,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjI,gBAAgB,QAAQ,CAAC,eAAe,EAAE;AAC1C,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAClC,oBAAoB,KAAK,CAAC,CAAC,CAAC;AAC5B,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf,YAAY,UAAU,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,gBAAgB,QAAQ,CAAC,gBAAgB,GAAG;AAC5C,YAAY,GAAG;AACf,YAAY,UAAU,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,QAAQ,CAAC,YAAY,EAAE;AACvC,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5B,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf,YAAY,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC;AACtD,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,SAAS,GAAG,IAAI,CAAC,iBAAiB,EAAE;AAC3E,YAAY,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,mBAAmB,CAAC,SAAS,GAAG;AACnH;AACA,YAAY,UAAU,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG;AAClG,YAAY,eAAe,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC;AACrC,YAAY,eAAe,CAAC,MAAM,EAAE,IAAI,EAAE;AAC1C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE;AAC1C,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC,IAAI,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,GAAG,EAAE;AACnC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9C,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC;AAClD,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACvC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO;AACxC,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,QAAQ,CAAC,gBAAgB,GAAG;AAChD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,CAAC,QAAQ,EAAE;AAChC;AACA,YAAY,eAAe,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,eAAe,GAAG;AAC5E,YAAY,UAAU,CAAC,IAAI,GAAG,MAAM,GAAG;AACvC,YAAY,UAAU,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AAC/E;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AACnD,YAAY,YAAY,CAAC,YAAY,EAAE;AACvC;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,GAAG;AAC9F,oBAAoB,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,CAAC,GAAG;AACxF,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE;AAC/C;AACA,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,MAAM;AACnC,YAAY,MAAM,CAAC,CAAC,CAAC,GAAG;AACxB,YAAY,UAAU,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,gBAAgB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,oBAAoB,EAAE,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,GAAG,MAAM,GAAG,CAAC,EAAE,CAAC,CAAC;AAChI,wBAAwB,MAAM,CAAC,IAAI,CAAC;AACpC,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI;AAC7H,gBAAgB,GAAG;AACnB,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC;AAC7B,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AACnC,gBAAgB,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACjD;AACA,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC;AACpF,eAAe,QAAQ,CAAC,aAAa,CAAC,MAAM,EAAE;AAC9C,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC;AACvD,YAAY,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,MAAM,CAAC,IAAI,CAAC,iBAAiB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChE,oBAAoB,EAAE,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI;AAClF,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,wBAAwB,CAAC,CAAC,CAAC;AACnH,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,MAAM,GAAG;AACvD,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,KAAK,GAAG;AACrD,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E;AACA,wBAAwB,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,MAAM;AAC9F,wBAAwB,EAAE,CAAC,CAAC,uBAAuB,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzE;AACA,4BAA4B,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,EAAE,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AAC9H,4BAA4B,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,CAAC,QAAQ,EAAE;AACvD,4BAA4B,uBAAuB,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,EAAE;AAChE,wBAAwB,CAAC;AACzB,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;AAC3F,4BAA4B,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,GAAG;AAC7D,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB;AACA,oBAAoB,EAAE,EAAE,CAAC,kBAAkB,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AACnH,oBAAoB,EAAE,CAAC,CAAC,iBAAiB,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC1D,wBAAwB,EAAE,CAAC,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,wBAAwB,CAAC,CAAC,CAAC;AAC7F,4BAA4B,iBAAiB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,MAAM,GAAG;AACzE,4BAA4B,qBAAqB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,MAAM,GAAG;AAC7E,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,iBAAiB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,IAAI,GAAG;AACvE,4BAA4B,qBAAqB,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,IAAI,GAAG;AAC3E,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;AACjE,gBAAgB,IAAI;AACpB,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,YAAY,CAAC,OAAO,CAAC,CAAC,CAAC;AAC5C,gBAAgB,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,EAAE,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC;AAC7F,oBAAoB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC,GAAG,MAAM,GAAG;AACxE,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACvC,wBAAwB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,EAAE;AACpL,oBAAoB,CAAC;AACrB,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AACnC,wBAAwB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,EAAE;AACrD,oBAAoB,CAAC;AACrB,oBAAoB,EAAE,CAAC,CAAC,OAAO,CAAC;AAChC,wBAAwB,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE;AAC9D,gBAAgB,GAAG;AACnB,gBAAgB;AAChB,gBAAgB,UAAU,CAAC,MAAM,GAAG,EAAE,CAAC,eAAe,EAAE;AACxD,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,KAAK,EAAE;AAC7D;AACA,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,SAAS,EAAE,IAAI,EAAE;AACtC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,EAAE,EAAE;AACvC,gBAAgB;AAChB,gBAAgB,WAAW,GAAG;AAC9B,YAAY,CAAC;AACb,YAAY;AACZ,YAAY,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC;AAC/E,YAAY,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;AACnC,gBAAgB,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC;AACzE,gBAAgB,EAAE,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7C,oBAAoB,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC;AACxC,oBAAoB,YAAY,CAAC,IAAI,EAAE;AACvC,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,YAAY;AACZ,YAAY,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC;AAC/E,YAAY,QAAQ,CAAC,KAAK,EAAE,CAAC,CAAC;AAC9B,gBAAgB,OAAO,CAAC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,KAAK,GAAG,CAAC,GAAG;AAC9F,gBAAgB,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,CAAC,GAAG;AACnF;AACA,gBAAgB,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,KAAK,CAAC,OAAO,CAAC,OAAO;AACrD,gBAAgB,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,wBAAwB,SAAS,CAAC,CAAC,CAAC,CAAC;AACrC,wBAAwB,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE;AAC3C,wBAAwB,MAAM,CAAC,CAAC,KAAK,CAAC;AACtC,wBAAwB,UAAU,CAAC,CAAC,KAAK;AACzC,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB;AACA,gBAAgB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI;AACxD,gBAAgB,UAAU,CAAC,KAAK,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,oBAAoB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjE,wBAAwB,EAAE,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,GAAG,MAAM,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AAC/I,wBAAwB,MAAM,CAAC,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI;AACjI,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACpD,gBAAgB,GAAG;AACnB,gBAAgB;AAChB,gBAAgB,WAAW,GAAG;AAC9B,gBAAgB;AAChB,gBAAgB,QAAQ,CAAC,KAAK,EAAE;AAChC,oBAAoB,OAAO,CAAC,CAAC,OAAO,CAAC;AACrC,oBAAoB,MAAM,CAAC,CAAC,MAAM;AAClC,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC;AACvE,gBAAgB,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC,GAAG,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,uBAAuB,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAClI,wBAAwB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC;AACxC,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,MAAM,GAAG,CAAC,EAAE;AACjE,wBAAwB,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5C,gBAAgB,GAAG;AACnB,gBAAgB,QAAQ,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,cAAc,EAAE;AAC1D,YAAY,CAAC,WAAW;AACxB,YAAY,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AACpC,gBAAgB,UAAU,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,EAAE;AAC7C,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG;AAC9F,wBAAwB,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,MAAM,GAAG;AAChE,wBAAwB;AACxB,wBAAwB,EAAE,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,MAAM;AACtE,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,YAAY,CAAC;AAC1D,wBAAwB,CAAC;AACzB,4BAA4B,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG;AACrD,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,MAAM,GAAG,CAAC,EAAE,CAAC;AACvE,gCAAgC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,GAAG;AACvE,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,MAAM,GAAG,CAAC,EAAE;AACvE,gCAAgC,YAAY,CAAC,CAAC,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,GAAG,IAAI;AAC7E,wBAAwB,CAAC;AACzB,4BAA4B;AAC5B,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,GAAG,UAAU,CAAC,CAAC,CAAC,MAAM,EAAE,UAAU,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,IAAI;AACxH,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,gBAAgB,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE;AACxE,gBAAgB,UAAU,CAAC,IAAI,EAAE,UAAU,EAAE,CAAC,CAAC,MAAM,GAAG;AACxD,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gBAAgB,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG;AAClH,gBAAgB,UAAU,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AAC3C,gBAAgB,iBAAiB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,iBAAiB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG;AACxH,gBAAgB,iBAAiB,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC7F,gBAAgB,CAAC,CAAC,MAAM,CAAC,iBAAiB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI;AAChF,gBAAgB,UAAU,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG;AACpH,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,gBAAgB,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,CAAC;AAClD,gBAAgB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,EAAE;AACvC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK;AAC5F,gBAAgB,UAAU;AAC1B,kBAAkB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACnC,gBAAgB,UAAU;AAC1B,kBAAkB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC;AAClC,kBAAkB,CAAC,IAAI,EAAE,UAAU,EAAE,CAAC,IAAI,EAAE;AAC5C;AACA,gBAAgB,QAAQ,CAAC,eAAe,CAAC,iBAAiB,EAAE;AAC5D,YAAY,CAAC;AACb,YAAY,QAAQ,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpC,gBAAgB,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,WAAW,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,IAAI;AAC/F,QAAQ,MAAM,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,UAAU,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI;AAChG,QAAQ,aAAa,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChH,QAAQ,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7G,QAAQ,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9F,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3F,QAAQ,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,KAAK,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI;AACtG,QAAQ,oBAAoB,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,oBAAoB,CAAC,CAAC,IAAI;AAC1H,QAAQ;AACR,QAAQ,EAAE,CAAC,UAAU,CAAC,OAAO;AAC7B,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpH,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,UAAU,EAAE,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,OAAO,GAAG;AACrE,YAAY,EAAE,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1E,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AACtE,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxH,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,cAAc,EAAE,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,OAAO,GAAG;AACzE,YAAY,cAAc,CAAC,CAAC,CAAC,GAAG;AAChC,YAAY,EAAE,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1E,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvE,YAAY,CAAC;AACb,CAAC;AACD,QAAQ,GAAG;AACX,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChI,YAAY,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,YAAY,EAAE,CAAC,UAAU,EAAE,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,OAAO,GAAG;AAC3E,YAAY,EAAE,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,IAAI,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC7E,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1E,YAAY,CAAC;AACb;AACA,QAAQ,GAAG;AACX,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC9E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC/E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC7E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC1eF,EAAE,CAAC,MAAM,CAAC,wBAAwB,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AAClD,QAAQ,CAAC,GAAG,CAAC,MAAM,EAAE;AACrB,QAAQ,8DAA8D;AACtE,QAAQ,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AACjD,QAAQ,8DAA8D;AACtE;AACA,QAAQ,GAAG,CAAC,mBAAmB,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,mBAAmB,EAAE;AACjE,QAAQ,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACvC,QAAQ,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AAC1C,QAAQ,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACnD;AACA,QAAQ,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAChC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,eAAe,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AAC1G,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,QAAQ,CAAC;AACT;AACA,KAAK,8DAA8D;AACnE;AACA,EAAE,8DAA8D;AAChE,QAAQ,EAAE,CAAC,OAAO,CAAC,SAAS;AAC5B,QAAQ,8DAA8D;AACtE;AACA,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACzD;AACA,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1C,YAAY,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,gBAAgB,MAAM,CAAC,CAAC;AACxB,oBAAoB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE;AACxE,gBAAgB,EAAE;AAClB,YAAY,CAAC;AACb,QAAQ,EAAE;AACV;AACA,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1C,YAAY,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAChD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AAC3D,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,QAAQ,EAAE;AACV;AACA,QAAQ,OAAO,CAAC,gBAAgB,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACjD,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,KAAK,EAAE,EAAE,EAAE,EAAE,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,KAAK,GAAG,GAAG,CAAC,KAAK,EAAE,UAAU,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,GAAG,GAAG,EAAE,EAAE,EAAE,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,MAAM,GAAG,EAAE,GAAG,EAAE,GAAG,KAAK,GAAG;AAC7L,YAAY,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACxC,YAAY,CAAC;AACb,gBAAgB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,EAAE,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,IAAI,IAAI,EAAE,GAAG,EAAE,GAAG;AACvE,gBAAgB,IAAI,CAAC,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,EAAE;AAChD,oBAAoB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,KAAK,GAAG,GAAG,CAAC,KAAK,EAAE,UAAU,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,GAAG,GAAG,EAAE,EAAE,EAAE,CAAC,KAAK,EAAE,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,EAAE,CAAC,KAAK,EAAE,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG;AAC5M,gBAAgB,GAAG;AACnB,gBAAgB,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,KAAK,GAAG;AACvC,YAAY,CAAC;AACb,YAAY,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,KAAK,GAAG;AACnC,YAAY,MAAM,CAAC,GAAG,CAAC;AACvB,QAAQ,GAAG;AACX;AACA,QAAQ,8DAA8D;AACtE,QAAQ,EAAE,CAAC,KAAK,CAAC,QAAQ;AACzB,QAAQ,8DAA8D;AACtE;AACA,QAAQ,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACnC,YAAY,WAAW,CAAC,KAAK,GAAG;AAChC,YAAY,WAAW,CAAC,MAAM,CAAC,mBAAmB,EAAE;AACpD;AACA,YAAY,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAChD,gBAAgB,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AAC5C;AACA,gBAAgB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAChC;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACvF,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F;AACA,gBAAgB,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AACrE,gBAAgB,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACvC;AACA,gBAAgB,KAAK,CAAC,MAAM,CAAC,WAAW,CAAC,aAAa,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACtE,oBAAoB,CAAC,MAAM,CAAC,WAAW,CAAC,aAAa,EAAE;AACvD,oBAAoB,CAAC,MAAM,GAAG;AAC9B;AACA,gBAAgB,EAAE,GAAG,CAAC,KAAK,CAAC,QAAQ;AACpC,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AACzF;AACA,gBAAgB,EAAE,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM;AACpD,gBAAgB,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AACtG,gBAAgB,aAAa,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,oBAAoB,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC5F,oBAAoB,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC;AACzF,gBAAgB,GAAG;AACnB;AACA,eAAe,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AACnC,oBAAoB,GAAG,CAAC,GAAG,CAAC;AAC5B,oBAAoB,YAAY,CAAC,CAAC,CAAC,GAAG;AACtC,oBAAoB,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACvC,wBAAwB,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACvD,4BAA4B,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AACpE,wBAAwB,IAAI;AAC5B,4BAA4B,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AAC3D,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACtE,gBAAgB,EAAE,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,oBAAoB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AACtD,oBAAoB,MAAM,CAAC,KAAK,CAAC;AACjC,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC/D,gBAAgB,CAAC;AACjB;AACA,gBAAgB,8DAA8D;AAC9E,gBAAgB,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACzD;AACA,gBAAgB,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,wBAAwB,GAAG,IAAI,EAAE,IAAI,GAAG;AACrG,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,wBAAwB,GAAG,MAAM,EAAE,CAAC,GAAG;AAC5H;AACA,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACzC;AACA,gBAAgB,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,uBAAuB,GAAG;AAC/E,gBAAgB,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAClE;AACA,gBAAgB,CAAC,CAAC,MAAM,EAAE,IAAI,EAAE;AAChC,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAClD,oBAAoB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE;AACjF;AACA,gBAAgB,EAAE,CAAC,MAAM;AACzB,gBAAgB,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACvE,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,MAAM,CAAC,KAAK,CAAC,cAAc,CAAC;AAChD,wBAAwB,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC,GAAG;AAC5E;AACA,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC9C,wBAAwB,CAAC,KAAK,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,gBAAgB,CAAC,CAAC,GAAG;AACvH,wBAAwB,CAAC,IAAI,CAAC,MAAM,EAAE;AACtC;AACA,oBAAoB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACvE,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACrD,wBAAwB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC9F,oBAAoB,CAAC;AACrB,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,uBAAuB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK;AAClF,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAC5F;AACA,gBAAgB,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC1C,gBAAgB,mBAAmB;AACnC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,aAAa,CAAC,aAAa,CAAC;AACjD,oBAAoB,CAAC,YAAY,CAAC,YAAY,EAAE;AAChD;AACA,UAAU,GAAG,CAAC,uBAAuB,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,uBAAuB,CAAC,EAAE;AAChF,kBAAkB,CAAC,KAAK,CAAC,IAAI,EAAE;AAC/B;AACA,UAAU,uBAAuB,CAAC,UAAU,GAAG,IAAI,CAAC,mBAAmB,EAAE;AACzE;AACA,IAAI,8DAA8D;AAClE,gBAAgB,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAChE,gBAAgB,8DAA8D;AAC9E,gBAAgB,EAAE,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM;AAC5C,UAAU,mBAAmB,CAAC,QAAQ,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACzF,cAAc,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AACnC,kBAAkB,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,kBAAkB,QAAQ,CAAC,QAAQ,CAAC,MAAM,EAAE;AAC5C,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC;AACtB;AACA,kBAAkB,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC;AACvC,cAAc,CAAC;AACf,UAAU,GAAG;AACb;AACA,UAAU,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAChE,cAAc,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AACxC,kBAAkB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC7C,cAAc,CAAC;AACf,cAAc,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC1C,cAAc,KAAK,CAAC,MAAM,GAAG;AAC7B,UAAU,GAAG;AACb;AACA,gBAAgB,EAAE,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM;AAC1E,UAAU,mBAAmB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3E,cAAc,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,GAAG;AACpG,cAAc,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC,cAAc,aAAa,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,kBAAkB,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,kBAAkB,EAAE,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC/D,sBAAsB,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,cAAc,GAAG;AACjB,cAAc,QAAQ,CAAC,eAAe,CAAC,aAAa,CAAC,CAAC,QAAQ,EAAE;AAChE,UAAU,GAAG;AACb;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AAC/D,gBAAgB,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD;AACA,oBAAoB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AAC5D,wBAAwB,aAAa,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpE,4BAA4B,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,wBAAwB,GAAG;AAC3B,wBAAwB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACpD,oBAAoB,CAAC;AACrB,oBAAoB,KAAK,CAAC,MAAM,GAAG;AACnC,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,WAAW,CAAC,SAAS,EAAE,wBAAwB,CAAC,SAAS,GAAG;AACxE,YAAY,MAAM,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC;AACT;AACA,EAAE,8DAA8D;AAChE,QAAQ,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,QAAQ,8DAA8D;AACtE;AACA,QAAQ,mBAAmB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AACpF,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AACtB,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAC/B,gBAAgB,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AACjC,gBAAgB,MAAM,CAAC,CAAC,EAAE;AAC1B,aAAa,CAAC;AACd,YAAY,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE;AAC3B,gBAAgB,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9D,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,EAAE;AAC7F,oBAAoB,EAAE,CAAC,GAAG,EAAE;AAC5B,wBAAwB,GAAG,CAAC,CAAC,CAAC;AAC9B,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC;AACvF,4BAA4B,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACzC,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,GAAG;AAC1D,wBAAwB,CAAC;AACzB,wBAAwB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,eAAe,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG;AACzG,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,GAAG;AACrE,aAAa,CAAC;AACd,YAAY,OAAO,CAAC,IAAI,CAAC,EAAE,EAAE,MAAM,CAAC,KAAK,EAAE;AAC3C,QAAQ,GAAG;AACX;AACA,QAAQ,mBAAmB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClF,YAAY,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AAChC,QAAQ,GAAG;AACX;AACA,QAAQ,mBAAmB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AACjF,YAAY,OAAO,GAAG;AACtB,QAAQ,GAAG;AACX,GAAG,8DAA8D;AACjE,QAAQ,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAClC,QAAQ,8DAA8D;AACtE;AACA,EAAE,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AAClC,QAAQ,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAClC,QAAQ,KAAK,CAAC,mBAAmB,CAAC,CAAC,CAAC,mBAAmB,CAAC;AACxD,QAAQ,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC9B,QAAQ,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAChC,QAAQ,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACzD;AACA,QAAQ,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AAC5C,YAAY,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAChE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5F,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/F,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3G,YAAY,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjH,YAAY,aAAa,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpH,YAAY,YAAY,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjH,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/F,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrG;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AAC7D,YAAY,MAAM,CAAC,CAAC,CAAC;AACrB,gBAAgB,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AACpD,gBAAgB,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC9C,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC3C,wBAAwB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC1C,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAClF,oBAAoB,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtF,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC9E,gBAAgB,CAAC;AACjB,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrE,oBAAoB,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjD,oBAAoB,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AACxC,oBAAoB,mBAAmB,CAAC,KAAK,CAAC,KAAK,EAAE;AACrD,gBAAgB,EAAE;AAClB,QAAQ,GAAG;AACX;AACA,QAAQ,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,mBAAmB,EAAE;AAC5D,QAAQ,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AACpC;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AC3SN,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC5B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,EAAE;AACzC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC3B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK;AACpF,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK;AACvB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;AACzB,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,KAAK;AACpC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AAC1B,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AACzB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,SAAS,EAAE;AACzJ,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG;AAClB,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACtB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ;AACrB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK;AACnE,gBAAgB,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM;AACvE,gBAAgB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,gBAAgB,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE;AACtC,gBAAgB,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE;AACtC,gBAAgB,CAAC;AACjB;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AACvC,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC;AAChD,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC1D,oBAAoB,eAAe,CAAC,IAAI,CAAC,KAAK,EAAE;AAChD,oBAAoB,eAAe,CAAC,IAAI,CAAC,KAAK,EAAE;AAChD,gBAAgB,CAAC;AACjB,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,EAAE,CAAC,WAAW,EAAE;AAChC,oBAAoB,eAAe,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAChH,oBAAoB,eAAe,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAChH,oBAAoB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC1G,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,eAAe,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAChG,oBAAoB,eAAe,CAAC,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AAChG,oBAAoB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AAC1F,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI,EAAE;AACzE,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,GAAG,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,EAAE;AACxG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,GAAG;AACnE,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AAC7D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,GAAG,GAAG,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AACvH,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,GAAG,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK;AAC7H;AACA,YAAY,EAAE;AACd,YAAY,SAAS,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gBAAgB,QAAQ,CAAC,UAAU,EAAE;AACrC,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC;AAC5B,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AAClC,oBAAoB,EAAE,CAAC,CAAC,EAAE;AAC1B,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,IAAI,CAAC,CAAC,CAAC,GAAG;AACtB,YAAY,QAAQ,CAAC,CAAC,CAAC,GAAG;AAC1B,YAAY,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACtD;AACA,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,GAAG,WAAW,CAAC,eAAe,CAAC,CAAC,GAAG;AACvE,gBAAgB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,GAAG,WAAW,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/E;AACA,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AAC3C,oBAAoB,GAAG,CAAC,UAAU,CAAC,UAAU,EAAE;AAC/C,oBAAoB,OAAO,CAAC,UAAU,CAAC,UAAU,EAAE;AACnD,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,QAAQ,CAAC,QAAQ,EAAE;AAC3C,oBAAoB,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AAC/C,gBAAgB,CAAC;AACjB,gBAAgB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC5B,oBAAoB,GAAG,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC,GAAG;AACxD,oBAAoB,OAAO,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC,GAAG;AAC5D,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,YAAY,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC;AACvD,oBAAoB,GAAG,CAAC,YAAY,CAAC,YAAY,EAAE;AACnD,oBAAoB,OAAO,CAAC,YAAY,CAAC,YAAY,EAAE;AACvD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;AAC/B,gBAAgB,QAAQ,CAAC,IAAI,CAAC,OAAO,EAAE;AACvC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO;AAC9D,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,EAAE;AACrC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC;AAC3B,gBAAgB,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG;AACxE;AACA,YAAY,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AACvC,YAAY,EAAE,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACpE,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACjC,gBAAgB,KAAK,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,GAAG;AACnE;AACA,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,GAAG,CAAC,KAAK,EAAE;AAC5C,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AACnD,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,wBAAwB,MAAM,CAAC,KAAK,CAAC;AACrC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AACjI,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI;AAC5E,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,wBAAwB,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAClE,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,GAAG,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,GAAG,IAAI,CAAC,GAAG,EAAE;AACjF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,GAAG,IAAI,CAAC,GAAG,EAAE;AAC1F;AACA,YAAY,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACnC,YAAY,SAAS,CAAC,IAAI,GAAG,MAAM,GAAG;AACtC;AACA,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG;AAChD,YAAY,EAAE,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AACzC,YAAY,EAAE,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AACvD,gBAAgB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAClC,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,UAAU,EAAE;AAC/D,wBAAwB,CAAC,QAAQ,CAAC,EAAE,CAAC;AACrC,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,GAAG;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE;AAC3C,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACjC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AACzD,oBAAoB,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AACxE,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf,YAAY,EAAE,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AACxD,gBAAgB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAClC,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,GAAG,UAAU,EAAE;AAC/D,wBAAwB,CAAC,QAAQ,CAAC,EAAE,CAAC;AACrC,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG;AAC5C,gBAAgB,CAAC;AACjB,gBAAgB,QAAQ,CAAC,eAAe,EAAE,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACnE,YAAY,GAAG;AACf,YAAY,EAAE,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gBAAgB,QAAQ,CAAC,gBAAgB,EAAE,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACpE,YAAY,GAAG;AACf,YAAY,EAAE,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC,gBAAgB,QAAQ,CAAC,YAAY,EAAE;AACvC,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACjC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AACzD,oBAAoB,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACpC,oBAAoB,OAAO,CAAC,CAAC,OAAO;AACpC,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf,YAAY,EAAE,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,QAAQ,CAAC,eAAe,EAAE;AAC1C,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACjC,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE;AACxD,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,IAAI,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC5E,YAAY,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC9E;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5D,gBAAgB,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,IAAI,EAAE;AACjC,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AAClE,gBAAgB,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,QAAQ,EAAE;AAC1C;AACA,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC7B,gBAAgB,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK;AAC7C,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,GAAG;AACnC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAC1D,oBAAoB,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,GAAG;AAC5C;AACA,oBAAoB,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACxC,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,4BAA4B,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,GAAG,WAAW,CAAC,IAAI,CAAC,CAAC,EAAE,WAAW,IAAI;AAC3F,4BAA4B,EAAE,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,UAAU,CAAC,UAAU,EAAE;AAC1F,4BAA4B,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ,EAAE;AACpF,wBAAwB,CAAC;AACzB,oBAAoB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC;AACxC,4BAA4B,SAAS,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,EAAE;AACxD,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,SAAS,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,OAAO,EAAE,EAAE,CAAC,KAAK,EAAE,IAAI,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3F,oBAAoB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAChD;AACA,oBAAoB,KAAK,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,wBAAwB,EAAE,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC;AACjD,4BAA4B,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAC5F,4BAA4B,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAC5F,4BAA4B,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,EAAE;AAChG,4BAA4B,EAAE,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5E,gCAAgC,WAAW,CAAC,EAAE,CAAC,EAAE,CAAC;AAClD,4BAA4B,CAAC,CAAC,IAAI,CAAC,CAAC;AACpC,gCAAgC,WAAW,CAAC,EAAE,CAAC,EAAE,CAAC;AAClD,4BAA4B,CAAC;AAC7B,4BAA4B,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AAC7G,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAChF,4BAA4B,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAChF,4BAA4B,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAChF,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE;AACxC,wBAAwB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,EAAE,GAAG,EAAE;AAChD,wBAAwB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,EAAE,GAAG,EAAE;AAC9C,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AACtC,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE;AACvC;AACA,oBAAoB,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE;AACxC,wBAAwB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU,CAAC,OAAO;AAChN,wBAAwB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,EAAE,GAAG,EAAE;AAC9C,gBAAgB,GAAG;AACnB;AACA,gBAAgB,GAAG,CAAC,iBAAiB,CAAC,CAAC,CAAC,GAAG;AAC3C,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC;AACnC,gBAAgB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,CAAC;AAC3D,oBAAoB,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AACnI,gBAAgB,EAAE;AAClB,gBAAgB,GAAG,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,EAAE;AACvE,gBAAgB,EAAE;AAClB;AACA,gBAAgB,SAAS,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,GAAG,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxG,oBAAoB,EAAE,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC;AAC7C,wBAAwB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AACxF,wBAAwB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AACxF,wBAAwB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,EAAE;AAC5F,wBAAwB,EAAE,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AACxE,4BAA4B,WAAW,CAAC,EAAE,CAAC,EAAE,CAAC;AAC9C,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,WAAW,CAAC,EAAE,CAAC,EAAE,CAAC;AAC9C,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI;AACzG,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC;AAC5B,wBAAwB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAC5E,wBAAwB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU;AAC5E;AACA,wBAAwB,EAAE;AAC1B,wBAAwB,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,WAAW,CAAC;AAC9G,wBAAwB,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,CAAC;AAC5G,wBAAwB,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC;AAC5E,wBAAwB,EAAE;AAC1B,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,EAAE;AAC9D,wBAAwB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,EAAE;AAC5D,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC;AACnE,4BAA4B,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,aAAa,CAAC,MAAM,EAAE;AAChE,4BAA4B,EAAE,CAAC,CAAC,iBAAiB,CAAC,OAAO,EAAE,CAAC,CAAC;AAC7D,gCAAgC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC;AACvD,4BAA4B,CAAC;AAC7B,4BAA4B,iBAAiB,CAAC,aAAa,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5E,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAC1D,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB;AACA,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE;AAClD,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,wBAAwB,EAAE,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU,CAAC,OAAO;AAC3F,wBAAwB,MAAM,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AAC7H,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,wBAAwB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,EAAE;AAC5D,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACvC,wBAAwB,EAAE,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,MAAM,CAAC,GAAG;AAC5E;AACA,wBAAwB,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC7D,4BAA4B,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,gCAAgC,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE;AACpD,gCAAgC,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE;AACtD,gCAAgC,CAAC,OAAO,EAAE,CAAC,WAAW,CAAC,OAAO,CAAC;AAC/D,4BAA4B,GAAG;AAC/B,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAChD,gCAAgC,IAAI,CAAC,CAAC,GAAG,EAAE;AAC3C,oCAAoC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,EAAE;AACzD,oCAAoC,KAAK,CAAC;AAC1C,gCAAgC,IAAI,CAAC,CAAC,KAAK,EAAE;AAC7C,oCAAoC,KAAK,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,GAAG;AACtE,oCAAoC,KAAK,CAAC;AAC1C,gCAAgC,IAAI,CAAC,CAAC,OAAO,EAAE;AAC/C,oCAAoC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,MAAM,OAAO,EAAE;AACpE,oCAAoC,KAAK,CAAC;AAC1C,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,wBAAwB,MAAM,CAAC,KAAK,CAAC;AACrC,oBAAoB,EAAE;AACtB,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA;AACA,YAAY,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC;AAChF,YAAY,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACxE,gBAAgB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;AACvC,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChE,gBAAgB,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC;AACtE,gBAAgB,EAAE,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE;AACzD,gBAAgB,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,oBAAoB,MAAM,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,GAAG;AAC3C,gBAAgB,EAAE;AAClB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,GAAG,CAAC,SAAS,GAAG;AAC/C,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,UAAU,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvG,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AACvG,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC/E,QAAQ,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC3E,QAAQ,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACvF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACvF,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACpG,QAAQ,kBAAkB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,IAAI;AACnH,QAAQ,KAAK,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzF,QAAQ,WAAW,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACrG;AACA,QAAQ,EAAE,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpF,YAAY,aAAa,CAAC,CAAC,CAAC;AAC5B,YAAY,EAAE,CAAC,UAAU,EAAE,gBAAgB,EAAE,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,OAAO,GAAG;AAC3E,QAAQ,GAAG;AACX,QAAQ,EAAE,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,QAAQ,kBAAkB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACtF,YAAY,aAAa,CAAC,CAAC,CAAC;AAC5B,YAAY,EAAE,CAAC,UAAU,EAAE,kBAAkB,EAAE,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,OAAO,GAAG;AAC7E,QAAQ,GAAG;AACX,QAAQ,EAAE,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,CAAC,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/E,YAAY,WAAW,CAAC,CAAC,CAAC;AAC1B,YAAY,EAAE,CAAC,UAAU,EAAE,WAAW,GAAG,GAAG,CAAC,WAAW,CAAC,OAAO,GAAG;AACnE,QAAQ,GAAG;AACX;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AACnF,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACrF,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtF,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AACpF,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC5D,YAAY,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,EAAE;AAC/B,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvC,QAAQ,GAAG;AACX,QAAQ,SAAS,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClF,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,EAAE;AAClC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACnbF,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,GAAG;AAC9B,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG;AACpC,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,KAAK;AACpC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAChC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,GAAG,SAAS,EAAE;AAC1E,QAAQ,CAAC;AACT;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,aAAa,CAAC,KAAK,CAAC;AAC7B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,GAAG,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3C,QAAQ,GAAG;AACX;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE;AACpE,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AAC7C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,YAAY,CAAC;AACb,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ;AACrB,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,GAAG,EAAE;AAChC;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AAC9E,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACzD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,GAAG,CAAC,KAAK,CAAC,QAAQ;AAChC,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,GAAG,IAAI,EAAE,IAAI,GAAG;AACjF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,GAAG,MAAM,EAAE,CAAC,GAAG;AACxG,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,GAAG;AAC3D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,KAAK,CAAC,CAAC,cAAc,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI;AAChE;AACA,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,wBAAwB,CAAC,KAAK,CAAC,IAAI,CAAC;AACpC,wBAAwB,CAAC,IAAI,CAAC,MAAM,EAAE;AACtC;AACA,oBAAoB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACvE,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACrD,wBAAwB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC9F,oBAAoB,CAAC;AACrB;AACA,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAChF,gBAAgB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;AACxD,oBAAoB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,KAAK,GAAG;AACjE,oBAAoB,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAC3D,wBAAwB,WAAW,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,MAAM,CAAC,eAAe,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI;AAChE,oBAAoB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC9C,oBAAoB,cAAc,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,GAAG;AACrD;AACA,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AACjD,wBAAwB,CAAC,KAAK,CAAC,IAAI,CAAC;AACpC,wBAAwB,CAAC,IAAI,CAAC,MAAM,CAAC;AACrC,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,IAAI;AACnF,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,GAAG,CAAC,KAAK,CAAC,cAAc,EAAE,MAAM,CAAC,eAAe,EAAE;AAC9D,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,OAAO,GAAG,KAAK,EAAE,IAAI,GAAG;AAChE,YAAY,EAAE,CAAC,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,GAAG,EAAE;AAC7C;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC3C,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,CAAC;AACjB,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,SAAS,GAAG;AACpD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/D,QAAQ,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACrC,YAAY,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE;AACvC,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAC7B,YAAY,OAAO,CAAC,CAAC,GAAG,CAAC,OAAO;AAChC,QAAQ,EAAE;AACV,QAAQ,EAAE,CAAC,EAAE,kBAAkB,CAAC,CAAC,CAAC;AAClC,YAAY,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC;AAC/B,YAAY,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC;AACtC,QAAQ,CAAC;AACT,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC9D,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC/D,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC;AACpB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,aAAa,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS;AAC/E,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,gBAAgB,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACxG,QAAQ,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,eAAe,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzG,QAAQ,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,eAAe,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzG,QAAQ,kBAAkB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,GAAG,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,IAAI;AACrH,QAAQ,UAAU,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,WAAW,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7G,QAAQ,cAAc,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,OAAO,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AACjH,QAAQ,YAAY,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,SAAS,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AAC/G;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,EAAE;AAC7B,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,GAAG,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACnC,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,EAAE;AACV,IAAI,GAAG;AACP,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,GAAG,EAAE;AACxC,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC1PF,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,EAAE,CAAC,OAAO,CAAC;AACf,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC;AAC3C,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,MAAM;AAC7D;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACpB,QAAQ,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC;AACvB,QAAQ,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACxB,QAAQ,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtB,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG;AACnB,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG;AACnB,QAAQ,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1B;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC;AACvC,QAAQ,gBAAgB,GAAG;AAC3B,QAAQ,iBAAiB,GAAG;AAC5B,QAAQ,mBAAmB,GAAG;AAC9B,QAAQ,iBAAiB,CAAC,UAAU,EAAE;AACtC,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/B,QAAQ,iBAAiB,GAAG;AAC5B,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,OAAO,CAAC,SAAS,CAAC;AACxF,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC3B,QAAQ,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B;AACA,QAAQ,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B;AACA,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC;AAC9C,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAChC,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE;AAClD,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE;AACnC,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE;AACvC,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE;AAC9C,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE;AACrC,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;AACtC,YAAY,MAAM,CAAC,QAAQ,CAAC;AAC5B,QAAQ,CAAC;AACT;AACA,QAAQ,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,YAAY,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC;AACpD,YAAY,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,YAAY,MAAM,CAAC,IAAI,CAAC;AACxB,QAAQ,EAAE;AACV;AACA,QAAQ,MAAM,CAAC,IAAI,CAAC;AACpB,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC;AAC1C,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACjC,QAAQ,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,IAAI,EAAE;AACN;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AAC9D,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC;AAC/E,IAAI,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC;AACjC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC;AACnD,YAAY,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG;AAClC,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,MAAM,CAAC;AACnD,YAAY,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG;AAClC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AACrC,gBAAgB,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AACrC,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE;AACtF,YAAY,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE;AACtF,YAAY,MAAM,CAAC,WAAW,CAAC,IAAI,CAAC,IAAI,EAAE;AAC1C,YAAY,MAAM,CAAC,WAAW,CAAC,IAAI,CAAC,IAAI,EAAE;AAC1C,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,UAAU,CAAC,KAAK,CAAC;AAC7E,IAAI,QAAQ,CAAC,iBAAiB,EAAE,CAAC,CAAC;AAClC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;AAClC,gBAAgB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,KAAK,EAAE;AAChD,gBAAgB,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC;AAC/C,YAAY,EAAE;AACd,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,WAAW,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AACjE,IAAI,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC;AAC7E,IAAI,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK;AACpE,IAAI,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC;AACrE,IAAI,QAAQ,CAAC,mBAAmB,EAAE,CAAC,CAAC;AACpC,QAAQ,EAAE;AACV,QAAQ,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC,YAAY,SAAS,CAAC;AACtB,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClB;AACA,QAAQ,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC;AACnC,QAAQ,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,KAAK,CAAC;AACpG,QAAQ,EAAE;AACV,QAAQ,KAAK,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3D,YAAY,SAAS,CAAC,CAAC,CAAC,GAAG;AAC3B,YAAY,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACnD,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,gBAAgB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;AACpC,gBAAgB,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACzD,oBAAoB,EAAE,CAAC,CAAC,SAAS,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,wBAAwB,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;AACpD,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf,YAAY,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;AACvC,YAAY,EAAE,CAAC,CAAC;AAChB,YAAY,EAAE;AACd,QAAQ,CAAC;AACT;AACA,QAAQ,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC;AAC1D,QAAQ,EAAE,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACzB,YAAY,cAAc,CAAC,CAAC,EAAE;AAC9B,QAAQ,CAAC;AACT;AACA,QAAQ,iBAAiB,EAAE,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAC3D,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC;AACjC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1F,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,gBAAgB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,iBAAiB,CAAC,EAAE,CAAC,CAAC,CAAC;AACpC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AACzB,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AACpD,IAAI,QAAQ,CAAC,iBAAiB,CAAC,UAAU,CAAC,CAAC,CAAC;AAC5C,QAAQ,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC;AACjC,QAAQ,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACtC,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7C,YAAY,CAAC,QAAQ,CAAC,EAAE,CAAC,SAAS,CAAC;AACnC,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC;AAC3B,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG;AACnD;AACA,QAAQ,EAAE;AACV,QAAQ,mBAAmB,GAAG;AAC9B,QAAQ,iBAAiB,GAAG;AAC5B,QAAQ,iBAAiB,GAAG;AAC5B,QAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC3D,YAAY,gBAAgB,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE;AAC3C,YAAY,iBAAiB,GAAG;AAChC,YAAY,iBAAiB,GAAG;AAChC,YAAY,gBAAgB,CAAC,KAAK,EAAE;AACpC,YAAY,iBAAiB,GAAG;AAChC,YAAY,iBAAiB,GAAG;AAChC,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,mBAAmB,EAAE,CAAC,CAAC;AACxC,YAAY,EAAE,CAAC,SAAS,CAAC,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC;AACjD,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7D,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE;AAC3F,YAAY,GAAG;AACf;AACA,YAAY,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACpD,gBAAgB,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC;AAC9C,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1C,gBAAgB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC;AAC1C,YAAY,GAAG;AACf,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC1C,YAAY,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAC7D,gBAAgB,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC9C,oBAAoB,EAAE,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC;AAClD,wBAAwB,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC;AAC/G,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,KAAK,EAAE;AAC3G,wBAAwB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7D,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3C,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC;AAC5E,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC1C,YAAY,cAAc,CAAC,KAAK,GAAG,OAAO,GAAG,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACtE,gBAAgB,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AAC9C,oBAAoB,EAAE,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC;AAClD,wBAAwB,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC;AACzG,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,KAAK,EAAE;AAC3G,wBAAwB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7D,oBAAoB,CAAC;AACrB,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3C,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC;AAC5E,YAAY,CAAC;AACb,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,iBAAiB,EAAE,CAAC,CAAC;AACtC,YAAY,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACpD,gBAAgB,GAAG,CAAC,IAAI,CAAC;AACzB,oBAAoB,EAAE,CAAC;AACvB,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,oBAAoB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;AACrC,oBAAoB,CAAC,CAAC;AACtB;AACA,gBAAgB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC;AACnD,gBAAgB,KAAK,CAAC,IAAI,CAAC,cAAc,EAAE;AAC3C,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AACpC,oBAAoB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACrC,oBAAoB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AAC7C,oBAAoB,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC;AACxD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC;AACnF,gBAAgB,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AAChD,gBAAgB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AACtC;AACA,oBAAoB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;AAC1D,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;AACxC,wBAAwB,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC;AACjE,wBAAwB,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AACjD,wBAAwB,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACpC,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,YAAY,GAAG;AACf,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,YAAY,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,QAAQ,CAAC;AACT,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACvF,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AACvD,IAAI,QAAQ,CAAC,iBAAiB,EAAE,CAAC,CAAC;AAClC,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,oBAAoB,EAAE;AACxD,YAAY,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,oBAAoB,EAAE;AACxD,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACrD,gBAAgB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7B,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC;AAC9B,YAAY,GAAG;AACf,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACrD,gBAAgB,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7B,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC;AAC9B,YAAY,GAAG;AACf,QAAQ,GAAG;AACX;AACA,QAAQ,QAAQ,CAAC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,YAAY,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC;AACT;AACA,QAAQ,QAAQ,CAAC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,YAAY,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC;AACT,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC/B,IAAI,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,QAAQ,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACvB,IAAI,CAAC;AACL;AACA,IAAI,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,MAAM,EAAE;AACvD,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACzC,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,GAAG,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,EAAE,CAAC,IAAI;AAC9F,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F,QAAQ,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,OAAO,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzF,QAAQ,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,GAAG,MAAM,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACzF,QAAQ,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,QAAQ,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AACxF,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,EAAE,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC9F;AACA,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,IAAI,SAAS,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,KAAK;AAC3F,QAAQ,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,QAAQ,KAAK,SAAS,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,KAAK;AACjF,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,KAAK,IAAI,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC9E,YAAY,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG;AACxC,gBAAgB,MAAM,CAAC,CAAC,CAAC;AACzB,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,KAAK,MAAM,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC9E,YAAY,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG;AACxC,gBAAgB,IAAI,CAAC,CAAC,CAAC;AACvB,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,IAAI,GAAG;AAC1B,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,MAAM,EAAE;AACjC;AACA,IAAI,MAAM,CAAC,MAAM,CAAC;AAClB,EAAE;ACtUF,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACpC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,EAAE,CAAC,OAAO,CAAC;AACf,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC;AAC3C,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,MAAM;AAC7D;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE;AACxB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,EAAE,EAAE;AAC3B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS;AAC5B,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,GAAG,IAAI,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM;AACnE,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,QAAQ,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AAC7C,IAAI,EAAE;AACN,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,GAAG;AACtC,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAChC,QAAQ,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,EAAE;AAC9E,IAAI,EAAE;AACN,IAAI,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpC,QAAQ,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC,IAAI,CAAC,KAAK;AAC1D,IAAI,EAAE;AACN,IAAI,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACtC,QAAQ,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,EAAE;AACzC,IAAI,EAAE;AACN,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAChC,QAAQ,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,EAAE;AAC/C,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;AAChD,QAAQ,OAAO,CAAC,MAAM,EAAE,IAAI,EAAE;AAC9B,YAAY,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AACrD,YAAY,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE;AAC1C,YAAY,CAAC,IAAI,CAAC,OAAO,EAAE;AAC3B,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK,CAAC;AACtB,oBAAoB,CAAC;AACrB,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG;AACtD,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG;AACtD,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG;AACtD,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG;AACtD,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG;AACtD,wBAAwB,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACrD,oBAAoB,EAAE;AACtB,gBAAgB,KAAK,CAAC;AACtB,oBAAoB,CAAC;AACrB,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,EAAE;AAClE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,EAAE;AAClE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,EAAE;AAClE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,IAAI,EAAE;AAClE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,EAAE;AACjE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,EAAE;AACjE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,EAAE;AACjE,wBAAwB,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC;AAChE,oBAAoB,CAAC;AACrB,YAAY,EAAE;AACd;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ;AAC7B,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AACpC,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC;AACtC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK;AACrC,YAAY,EAAE,CAAC;AACf,gBAAgB,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,KAAK,GAAG,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;AACnF,gBAAgB,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,KAAK,GAAG,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC;AAChF,YAAY,EAAE;AACd,gBAAgB,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS;AACzC,YAAY,EAAE,CAAC;AACf,gBAAgB,IAAI,EAAE,KAAK,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5D,gBAAgB,IAAI,EAAE,KAAK,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;AACzD,YAAY,CAAC,CAAC,CAAC;AACf,gBAAgB,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC;AACrC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK;AACzB,YAAY,EAAE,EAAE,WAAW,CAAC,CAAC,CAAC;AAC9B,gBAAgB,OAAO,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE;AAC3F,gBAAgB,OAAO,CAAC,IAAI,EAAE,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,QAAQ,GAAG;AAC3F,gBAAgB,SAAS,CAAC,SAAS,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,GAAG;AAC7E,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM;AACvC,YAAY,EAAE,EAAE,aAAa,CAAC,CAAC,CAAC;AAChC,gBAAgB,SAAS,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,GAAG;AAC1D,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,QAAQ;AAClC;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAChD,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,GAAG,EAAE;AAC7C,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,KAAK,CAAC;AACrC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,MAAM,CAAC;AACvC,gBAAgB,CAAC,MAAM,EAAE,CAAC,EAAE;AAC5B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG;AAC9D;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,UAAU;AAChD,YAAY,MAAM;AAClB,gBAAgB,CAAC,SAAS,CAAC,SAAS,CAAC;AACrC,gBAAgB,CAAC,WAAW,CAAC,WAAW,CAAC;AACzC,gBAAgB,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,MAAM,GAAG;AACvC;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,GAAG;AACrC;AACA,YAAY,MAAM;AAClB,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC;AAClC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC;AAClC,gBAAgB,CAAC,MAAM,CAAC,EAAE,CAAC;AAC3B,gBAAgB,CAAC,MAAM,CAAC,MAAM,EAAE;AAChC;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AAC/B,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,GAAG,SAAS,GAAG,IAAI,EAAE;AACzD,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC;AACjC,gBAAgB,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACvC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC;AAChC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,EAAE;AACjF,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG;AACzD;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM;AAClC,YAAY,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE;AAChC,gBAAgB,CAAC,IAAI,CAAC,SAAS,EAAE;AACjC;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK;AAC/B,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,GAAG,SAAS,GAAG,IAAI,EAAE;AACzD,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC;AACjC,gBAAgB,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,EAAE;AACtC,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;AAChG,gBAAgB,CAAC,IAAI,CAAC;AACtB,oBAAoB,EAAE,CAAC,QAAQ;AAC/B,wBAAwB,CAAC,IAAI,EAAE;AAC/B,wBAAwB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AAC1D,wBAAwB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrD,4BAA4B,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,EAAE;AAC9D,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC;AAC7C,gBAAgB,EAAE;AAClB;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK;AAC/C,YAAY,IAAI,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7D,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,MAAM,CAAC,SAAS,GAAG;AAClD,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,aAAa,CAAC;AAC7C,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,eAAe,CAAC;AACjD,gBAAgB,CAAC,MAAM,EAAE,KAAK,EAAE;AAChC,gBAAgB,CAAC,IAAI,CAAC,SAAS,EAAE;AACjC;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK;AAC7C,YAAY,IAAI,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5D,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE;AACpC,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,EAAE;AAC3C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,IAAI,CAAC;AACxC,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE;AACrD,gBAAgB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,GAAG;AAClD,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,GAAG;AAC9C;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK;AAChD,YAAY,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,IAAI,EAAE,SAAS,EAAE;AACjD,gBAAgB,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE;AAC1E,gBAAgB,CAAC,CAAC,CAAC,CAAC,KAAK;AACzB,gBAAgB,MAAM,CAAC,QAAQ,GAAG;AAClC,gBAAgB,IAAI,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,IAAI,EAAE;AACrC,YAAY,CAAC;AACb,QAAQ,GAAG;AACX;AACA,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,OAAO,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC5F,QAAQ,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,OAAO,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC5F,QAAQ,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC7F,QAAQ,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,GAAG,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAChG,QAAQ,SAAS,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,GAAG,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAChG,QAAQ,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAClG,QAAQ,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,GAAG;AAC5F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,aAAa,GAAG,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,CAAC,CAAC,aAAa,CAAC;AAC1F,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,eAAe,CAAC;AAC5F,YAAY,SAAS,OAAO,CAAC,CAAC,CAAC,CAAC,KAAK,OAAO,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,OAAO,CAAC,CAAC,SAAS,CAAC;AACtF,QAAQ,EAAE;AACV;AACA,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC1OF;AACA,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAChC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAC7D,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK;AACjE,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI;AACjC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AAClH,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC1C,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC1C,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,EAAE,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI;AAChG,QAAQ,CAAC,CAAC,IAAI,SAAS,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAClF,QAAQ,CAAC,CAAC,IAAI,SAAS,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAClF,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI;AAC5F,QAAQ,CAAC,CAAC,QAAQ,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AAClG,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE;AACtG,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxE,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AAC3E,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,YAAY;AACjG,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG;AACjH,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK;AACvI,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,YAAY,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,OAAO;AACjF,QAAQ,CAAC,CAAC,QAAQ,KAAK,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AAC5E,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,WAAW;AACzH,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK;AAC3D,QAAQ,CAAC,CAAC,UAAU,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK;AACvG,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC;AACpF,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM;AAClD,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AACjD,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK;AACjD,QAAQ,CAAC,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM;AAC3D,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,KAAK;AAC9B,QAAQ,CAAC,CAAC,QAAQ,KAAK,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,SAAS,EAAE;AAC3H,QAAQ,CAAC,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,QAAQ,KAAK,CAAC,CAAC,GAAG;AAC5B,QAAQ,CAAC,CAAC,sBAAsB,CAAC,CAAC,CAAC,GAAG;AACtC,QAAQ,CAAC,CAAC,UAAU,IAAI,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC;AACT;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM;AACnD,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,SAAS;AACnB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,QAAQ;AACzI,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;AAChE,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC;AACpC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC;AACT;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC;AACrB,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvC,QAAQ,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAAG;AAC9C,QAAQ,MAAM,CAAC,GAAG,CAAC;AACnB,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC;AACrB,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvC,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE;AAC3B,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC;AACxB,YAAY,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAChC,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC1B,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,YAAY,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE;AAC/B,YAAY,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG;AAC/C,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,GAAG,EAAE,CAAC,CAAC;AACnE,gBAAgB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACjC,gBAAgB,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC;AAC7B,YAAY,CAAC;AACb,QAAQ,CAAC;AACT,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS;AAC/D,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,gBAAgB,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AACvD,oBAAoB,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC;AACtF,YAAY,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,OAAO;AACpF,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW;AACxN,gBAAgB,EAAE,CAAC,KAAK,CAAC;AACzB,oBAAoB,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,wBAAwB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3D,4BAA4B,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrF,wBAAwB,EAAE;AAC1B,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB;AACA,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,GAAG;AACzG;AACA,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE;AACnC,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,cAAc,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,EAAE,GAAG;AAC1M,YAAY,EAAE,CAAC,CAAC,KAAK,EAAE,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,MAAM,CAAC,MAAM,CAAC,GAAG;AAC9I,YAAY,IAAI;AAChB,gBAAgB,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AACvD;AACA,aAAa,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AAC5B,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AACjG,oBAAoB,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC;AACjC,wBAAwB,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjF,4BAA4B,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,4BAA4B,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,wBAAwB,GAAG,MAAM,CAAC,MAAM,GAAG;AAC3C,wBAAwB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AAC/D,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,wBAAwB,CAAC,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,MAAM,CAAC,MAAM,GAAG;AAClH,wBAAwB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AAC/D,gBAAgB,CAAC;AACjB;AACA,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,UAAU,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,MAAM,CAAC,SAAS,GAAG;AACjH,gBAAgB,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,cAAc,EAAE;AACpD;AACA,YAAY,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK;AAC3H,YAAY,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAC7F;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AAChD,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1G,oBAAoB,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACvC;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC;AACxC,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC;AACxC,gBAAgB,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAG;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACzB;AACA,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE;AAC/E;AACA,YAAY,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC;AACrC,YAAY,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC;AACxC;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC;AAClE;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,GAAG,IAAI,EAAE,IAAI,GAAG;AAChF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,EAAE;AAC7G,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,IAAI,CAAC,OAAO,EAAE,EAAE,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,WAAW,EAAE;AACzD,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG;AAC1D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG;AAC/D,YAAY,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG;AAClE;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACjD,gBAAgB,CAAC,MAAM,EAAE,IAAI,EAAE;AAC/B,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,IAAI;AAC3D,gBAAgB;AAChB,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACxD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC;AACnD,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAClF;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACjF;AACA,YAAY,QAAQ,CAAC,sBAAsB,EAAE,CAAC,CAAC;AAC/C,gBAAgB,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG;AAC9E,gBAAgB,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,WAAW,EAAE;AAC9E,gBAAgB,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AACpC;AACA,gBAAgB,EAAE,CAAC,EAAE,WAAW,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC/C;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO;AAC3E,gBAAgB,EAAE,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC;AAC1C,oBAAoB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAClF,4BAA4B,MAAM,CAAC,KAAK,CAAC,MAAM;AAC/C,gCAAgC,CAAC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAClE,oCAAoC,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,QAAQ;AACrF,oCAAoC,EAAE,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS;AACtF,oCAAoC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC;AACrH,qCAAqC,EAAE;AACvC,oCAAoC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,UAAU,EAAE;AACpE,oCAAoC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,UAAU,EAAE;AACpE;AACA,oCAAoC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5F,4CAA4C,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5F,wCAAwC,UAAU,CAAC;AACnD,wCAAwC,UAAU,CAAC,CAAC,KAAK,EAAE,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,UAAU;AAC7I,gCAAgC,EAAE;AAClC,gCAAgC,CAAC,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC1E,oCAAoC,MAAM,CAAC,WAAW,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,CAAC;AACtJ,gCAAgC,EAAE;AAClC,wBAAwB,EAAE;AAC1B,oBAAoB,EAAE;AACtB;AACA,oBAAoB,EAAE,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI;AAC5F,oBAAoB,EAAE,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,wBAAwB,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI;AACtH,wBAAwB,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG;AAC1F,wBAAwB,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG;AAC1F,wBAAwB,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG;AAC1F,wBAAwB,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG;AAC1F,oBAAoB,CAAC;AACrB;AACA,oBAAoB,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK;AACrF,oBAAoB,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG;AACzE,oBAAoB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,EAAE;AAClD,wBAAwB,EAAE,EAAE,EAAE,EAAE,EAAE;AAClC,wBAAwB,EAAE,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE;AAC1C,wBAAwB,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE;AACjD,wBAAwB,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC;AACxC,oBAAoB,GAAG;AACvB;AACA,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChF,wBAAwB,MAAM,CAAC,CAAC;AAChC,4BAA4B,CAAC,IAAI,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE;AACnD,4BAA4B,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,EAAE;AACrD,4BAA4B,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC;AACnD,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB;AACA,oBAAoB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI;AACzE,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG,SAAS,EAAE,IAAI,GAAG,MAAM,GAAG;AAC9E,oBAAoB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG,SAAS,EAAE,IAAI,GAAG,IAAI,CAAC,OAAO,EAAE;AACpG,oBAAoB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU;AAChD,wBAAwB,CAAC,KAAK,GAAG,MAAM,EAAE,GAAG,CAAC,IAAI,EAAE;AACnD,wBAAwB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,4BAA4B,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACrE,gCAAgC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,4BAA4B,IAAI;AAChC,gCAAgC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpE,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,4BAA4B,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,EAAE;AACnD,wBAAwB,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,GAAG,EAAE,KAAK,CAAC,KAAK,CAAC,EAAE;AAClG,wBAAwB,CAAC;AACzB;AACA,oBAAoB,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM;AAC5D,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,WAAW,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE;AACxE,4BAA4B,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,4BAA4B,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AACvD,4BAA4B,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,GAAG;AAClE,oBAAoB,CAAC;AACrB;AACA,oBAAoB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtC,wBAAwB,EAAE,CAAC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC;AACpE,wBAAwB,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM;AAC3E,wBAAwB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG,SAAS,MAAM,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG;AACjH,wBAAwB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG,SAAS,EAAE,QAAQ,GAAG,IAAI,CAAC,QAAQ,EAAE;AAC7G,wBAAwB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU;AACpD,4BAA4B,CAAC,KAAK,GAAG,MAAM,EAAE,GAAG,CAAC,QAAQ,EAAE;AAC3D,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG,EAAE,KAAK,CAAC,GAAG;AACtF,4BAA4B,CAAC,MAAM,EAAE,GAAG,CAAC,MAAM,EAAE;AACjD,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACrE,4BAA4B,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AACrE,4BAA4B,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,UAAU,EAAE;AACnD,oBAAoB,CAAC;AACrB;AACA,oBAAoB,GAAG,CAAC,kBAAkB,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AACzE,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAClD,wBAAwB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE;AACpD,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC;AACzD,wBAAwB,GAAG,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,EAAE;AAC5D,wBAAwB,KAAK,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE;AACjE;AACA,wBAAwB,EAAE,CAAC,WAAW,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC;AAC9D,wBAAwB,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,EAAE;AACjD,wBAAwB,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,EAAE;AACjD;AACA,wBAAwB,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,OAAO;AACjG,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,IAAI,GAAG,qBAAqB,GAAG;AAC3E,wBAAwB,GAAG,CAAC,SAAS,EAAE,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,EAAE,CAAC,QAAQ,CAAC,eAAe,CAAC,SAAS,CAAC;AAClG,wBAAwB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,EAAE,CAAC,QAAQ,CAAC,eAAe,CAAC,UAAU,CAAC;AACnG;AACA,wBAAwB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AACnC,4BAA4B,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC;AACrG,4BAA4B,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE;AAChG,wBAAwB,EAAE;AAC1B;AACA,wBAAwB,SAAS,EAAE;AACnC,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC;AACzC,4BAA4B,MAAM,CAAC,CAAC,MAAM,CAAC;AAC3C,4BAA4B,GAAG,CAAC,CAAC,GAAG,CAAC;AACrC,4BAA4B,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AACvH,4BAA4B,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AAClD,4BAA4B,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AAChD,4BAA4B,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AAC5C,4BAA4B,OAAO,CAAC,CAAC,EAAE;AACvC,wBAAwB,GAAG;AAC3B,oBAAoB,EAAE;AACtB;AACA,oBAAoB,UAAU;AAC9B,wBAAwB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,4BAA4B,kBAAkB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,YAAY,EAAE;AAC/E,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,4BAA4B,kBAAkB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,EAAE;AAClF,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,4BAA4B,kBAAkB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,gBAAgB,EAAE;AACnF,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,4BAA4B,kBAAkB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,EAAE;AAClF,wBAAwB,GAAG;AAC3B;AACA,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK;AACzE,oBAAoB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACpE,wBAAwB,CAAC,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AAC/C,wBAAwB,GAAG,IAAI,CAAC,cAAc,CAAC;AAC/C,wBAAwB,GAAG,KAAK,EAAE,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG;AAChG,wBAAwB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,4BAA4B,EAAE,EAAE,CAAC,GAAG,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACnD,4BAA4B,EAAE,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC1G,4BAA4B,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE;AACxD,gCAAgC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1D,4BAA4B,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AAC/C,4BAA4B,QAAQ,CAAC,YAAY,EAAE;AACnD,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC;AAC7C,gCAAgC,MAAM,CAAC,CAAC,MAAM,CAAC;AAC/C,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAC9I,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/G,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AACtD,gCAAgC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,gCAAgC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AAChD,gCAAgC,OAAO,CAAC,CAAC,OAAO;AAChD,4BAA4B,GAAG;AAC/B,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,4BAA4B,EAAE,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC1G,4BAA4B,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE;AACxD,gCAAgC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1D;AACA,4BAA4B,QAAQ,CAAC,eAAe,EAAE;AACtD,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC;AAC7C,gCAAgC,MAAM,CAAC,CAAC,MAAM,CAAC;AAC/C,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAC7I,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/G,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AACtD,gCAAgC,UAAU,CAAC,CAAC,CAAC;AAC7C,4BAA4B,GAAG;AAC/B,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,4BAA4B,EAAE,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC1G,4BAA4B,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE;AACxD,gCAAgC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1D;AACA,4BAA4B,QAAQ,CAAC,gBAAgB,EAAE;AACvD,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC;AAC7C,gCAAgC,MAAM,CAAC,CAAC,MAAM,CAAC;AAC/C,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAC7I,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/G,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AACtD,gCAAgC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,4BAA4B,GAAG;AAC/B,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,4BAA4B,EAAE,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK;AAC1G,4BAA4B,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,EAAE;AACxD,gCAAgC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1D;AACA,4BAA4B,QAAQ,CAAC,eAAe,EAAE;AACtD,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC;AAC7C,gCAAgC,MAAM,CAAC,CAAC,MAAM,CAAC;AAC/C,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AAC7I,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AAC/G,gCAAgC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;AACtD,gCAAgC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,gCAAgC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,4BAA4B,GAAG;AAC/B,wBAAwB,GAAG;AAC3B,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC;AAC/B,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,GAAG,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG;AAC9E,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,EAAE;AACtC,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE;AAC7C,YAAY,MAAM,CAAC,IAAI,EAAE;AACzB,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,OAAO,EAAE,EAAE,CAAC,cAAc,EAAE,CAAC,CAAC,WAAW,CAAC;AAC3D,gBAAgB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG;AAClE,YAAY,MAAM,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,MAAM,EAAE;AAClE,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACpE,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,gBAAgB,CAAC,EAAE,CAAC,gBAAgB,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAChH,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC3C,gBAAgB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE;AAC3C;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,CAAC,GAAG;AAClF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,EAAE;AAC1D,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AACxC,wBAAwB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AACtD,4BAA4B,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC;AACtD,wBAAwB,GAAG,MAAM,CAAC;AAClC,4BAA4B,QAAQ,CAAC,UAAU,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC9D,gCAAgC,MAAM,CAAC,WAAW,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC;AAC7E,4BAA4B,EAAE;AAC9B,oBAAoB,GAAG;AACvB,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACvD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AAC/D,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AACjE,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG;AACvI,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE;AAC1B,oBAAoB,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACrC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACjE,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvE,YAAY,EAAE;AACd,YAAY,MAAM,CAAC,IAAI,GAAG,IAAI,CAAC,QAAQ,EAAE,MAAM,GAAG;AAClD,YAAY,MAAM,CAAC,IAAI,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,EAAE;AACpD,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,EAAE;AAC7D,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG;AACrI,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,MAAM,GAAG;AAC1B,YAAY,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO;AACrE,YAAY,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,EAAE;AAC9G,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;AAC/D,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,EAAE,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK;AACrE,oBAAoB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG;AACrI,gBAAgB,GAAG;AACnB,YAAY,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO;AAC9E,YAAY,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,OAAO,EAAE,CAAC,EAAE;AAC5H,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;AAC/D,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE;AAC1B,oBAAoB,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AACrC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACjE,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACvE,YAAY,EAAE;AACd;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK;AACjD,YAAY,EAAE,CAAC,UAAU,CAAC;AAC1B,YAAY,CAAC;AACb,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE;AAC3D,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,wBAAwB,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAC1D,gCAAgC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,UAAU,CAAC;AAC1D,4BAA4B,GAAG,MAAM,CAAC;AACtC,gCAAgC,QAAQ,CAAC,UAAU,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC;AAClE,oCAAoC,MAAM,CAAC,WAAW,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC;AACjF,gCAAgC,EAAE;AAClC,wBAAwB,GAAG;AAC3B;AACA,gBAAgB,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,EAAE;AAC7C,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;AACzC,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxH,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI;AACvG,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AACxC,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,IAAI;AAC7C;AACA,gBAAgB,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACvC,gBAAgB,MAAM,CAAC,IAAI,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,KAAK,EAAE;AACxD,oBAAoB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,EAAE;AACjE,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;AACpH,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI;AACtG,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,MAAM,GAAG;AAC9B,eAAe,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,kBAAkB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC;AACjC,oBAAoB,CAAC,OAAO,EAAE,EAAE,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC;AAC9C,oBAAoB,CAAC,OAAO,EAAE,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AACvD,oBAAoB,CAAC,OAAO,EAAE,KAAK,EAAE,KAAK,EAAE;AAC5C,gBAAgB,GAAG;AACnB,gBAAgB,MAAM,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;AACrE,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC;AACpH,wBAAwB,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG;AACrG,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,SAAS,CAAC,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,QAAQ,CAAC,SAAS;AACpF,YAAY,EAAE,CAAC,CAAC,sBAAsB,CAAC,CAAC;AACxC,YAAY,CAAC;AACb,gBAAgB,YAAY,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,sBAAsB;AACvF,gBAAgB,SAAS,CAAC,CAAC,CAAC,UAAU,CAAC,sBAAsB,CAAC,CAAC,sBAAsB,CAAC,EAAE;AACxF,YAAY,CAAC;AACb,YAAY,IAAI;AAChB,YAAY,CAAC;AACb,gBAAgB,sBAAsB,GAAG;AACzC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B;AACA,YAAY,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3B,YAAY,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC;AAC7B;AACA,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,SAAS,EAAE,OAAO,CAAC,SAAS,GAAG;AACnD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK;AACpD,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC;AACnC,QAAQ,IAAI,CAAC,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;AAC5C,YAAY,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC/E,YAAY,GAAG;AACf,YAAY,MAAM,CAAC,IAAI,CAAC;AACxB,QAAQ,EAAE;AACV,QAAQ,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AAC/E,YAAY,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,EAAE;AAC9C,kBAAkB,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC;AACzD,kBAAkB,CAAC,SAAS,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC;AACvD,kBAAkB,CAAC,OAAO,EAAE,KAAK,EAAE,CAAC,WAAW,EAAE;AACjD,YAAY,GAAG;AACf,QAAQ,EAAE;AACV,IAAI,EAAE;AACN;AACA,IAAI,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG;AACpC,IAAI,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU,CAAC,IAAI,EAAE;AACtF,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtD,QAAQ,EAAE,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,EAAE;AACvF,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACnF,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC3E,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC3E,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC3E,QAAQ,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACvF,QAAQ,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACvF,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACrF,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACrF,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F,QAAQ,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACjG,QAAQ,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AACvF,QAAQ,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACzF,QAAQ,WAAW,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAC/F,QAAQ,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC7F,QAAQ,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAChG,QAAQ,EAAE,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC7E,QAAQ,sBAAsB,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,GAAG,MAAM,CAAC,sBAAsB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,sBAAsB,CAAC,CAAC,IAAI;AAC9H,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,IAAI;AAC9F,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,gBAAgB,CAAC,CAAC,IAAI;AAC7G;AACA,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,OAAO;AACjC,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AACxF,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AACxF,QAAQ,SAAS,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AAClG,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AACrG;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC3E,YAAY,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,YAAY,EAAE,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC;AACvC,gBAAgB,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AACpC,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACloBF;AACA,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACrC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,OAAO,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AAC1C,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACzC,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AAC3C,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,YAAY,EAAE;AACjD,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,YAAY,EAAE;AACjD,QAAQ,CAAC,CAAC,OAAO,MAAM,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AAC5C,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AACjE,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;AACzC,QAAQ,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;AACzC,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,KAAK;AAC9B,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,KAAK;AAC9B,QAAQ,CAAC,CAAC,UAAU,GAAG,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,SAAS,EAAE;AAC3E,QAAQ,CAAC,CAAC,MAAM,OAAO,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,UAAU,IAAI,CAAC,CAAC,KAAK;AAC/B,QAAQ,CAAC;AACT;AACA,IAAI,OAAO,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,EAAE;AAChC,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,EAAE,EAAE;AAC3C,IAAI,KAAK;AACT,QAAQ,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACrD,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;AACxB,IAAI,CAAC;AACL,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,GAAG;AACpB,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,GAAG;AACpB,IAAI,OAAO;AACX,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,EAAE,CAAC,CAAC,EAAE;AACd,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AACjE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE;AACpE,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnC,oBAAoB,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AAC1C,gBAAgB,IAAI;AACpB,oBAAoB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AAC1E,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,gBAAgB;AACpD,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AACjE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,WAAW,CAAC,SAAS,EAAE,OAAO,CAAC,SAAS,GAAG;AAC3D,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,YAAY,GAAG,IAAI,EAAE,IAAI,GAAG;AACrF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,YAAY,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,IAAI;AAC5H,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM;AAC5C,YAAY,MAAM,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,UAAU,GAAG,KAAK,EAAE,OAAO,CAAC,MAAM,GAAG,IAAI,GAAG;AACrG;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,mBAAmB,GAAG;AACvE,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,cAAc,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC1C;AACA,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC7C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC;AAChC,oBAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;AAClC;AACA,gBAAgB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACjD,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC1F,gBAAgB,CAAC;AACjB;AACA,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AACjF,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,OAAO;AACnB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,GAAG;AACtE,gBAAgB,CAAC,UAAU,CAAC,UAAU,EAAE;AACxC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,EAAE;AAC1C,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACvE,gBAAgB,CAAC,IAAI,CAAC,OAAO,EAAE;AAC/B;AACA;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,mBAAmB,EAAE;AAClD,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK;AAC9E;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,mBAAmB,GAAG,SAAS,GAAG,EAAE,CAAC,QAAQ,EAAE;AAC1F,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,GAAG;AACnB;AACA,YAAY,OAAO,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AACrE;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,SAAS,GAAG,EAAE,CAAC,OAAO,EAAE;AAC1D,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,GAAG;AACnB;AACA,YAAY,OAAO,CAAC,KAAK,EAAE;AAC3B,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,OAAO,EAAE;AAC3D,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE;AAC5C;AACA,YAAY,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG;AACxE,YAAY,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,gBAAgB,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC;AAC9C,YAAY,EAAE;AACd,gBAAgB,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC,CAAC,oBAAoB,CAAC,CAAC,OAAO,EAAE;AAC9E,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,EAAE;AACzC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AACnE,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AACnE,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oBAAoB,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvH,gBAAgB,GAAG;AACnB;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,EAAE;AACrD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AAC3E,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE;AACnD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,YAAY;AACjC,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,GAAG;AACzC,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,oBAAoB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,IAAI;AAC3E,gBAAgB,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,GAAG,MAAM,EAAE,CAAC,EAAE;AACzD,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,aAAa,GAAG;AACvD,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,aAAa,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AAC3E,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,GAAG;AACzC,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC;AAC7B,oBAAoB,CAAC,KAAK,CAAC,eAAe,CAAC;AAC3C,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtD,oBAAoB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,IAAI;AAC3E,gBAAgB,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,GAAG,MAAM,EAAE,CAAC,EAAE;AACzD,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,aAAa,GAAG;AACvD,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,aAAa,EAAE;AAC7C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAClH,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACxD,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,eAAe,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK;AAC1G,YAAY,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC1E,gBAAgB,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AACrC,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AACjI,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE;AACnC,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AACjI,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,IAAI;AAC9C,YAAY,GAAG;AACf;AACA,YAAY,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAC3E,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AAClG,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,EAAE;AACtE,gBAAgB,SAAS,CAAC,MAAM,GAAG,EAAE,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU,CAAC;AAClG,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,IAAI;AACnE,gBAAgB,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AAChD,YAAY,GAAG;AACf;AACA,YAAY,EAAE,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,EAAE,CAAC,MAAM;AAC/D,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC1B;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,GAAG;AAC7D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACzF,QAAQ,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACzF,QAAQ,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACzF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACzF,QAAQ,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AACzF,QAAQ,YAAY,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACrG,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACvF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAClD,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,YAAY,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE;AAC/B,YAAY,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE;AAC/B,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACxYF;AACA,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAClC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE;AACrB,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI;AACxB,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,IAAI,GAAG,GAAG;AAC7C,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,OAAO;AACjB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,MAAM;AAChB,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI;AACjC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI;AACjC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE;AAC7C,QAAQ,CAAC;AACT;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD,IAAI;AACJ,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE;AAC1D,gBAAgB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG;AACtD;AACA,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,EAAE;AAC1D,gBAAgB,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG;AACvD;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,GAAG,IAAI,EAAE,IAAI,GAAG;AAClF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,SAAS,GAAG;AAChG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACvF;AACA,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,EAAE;AAC9C,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AAClD,YAAY,KAAK,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG;AACzC,YAAY,KAAK,CAAC,IAAI,GAAG,MAAM,GAAG;AAClC,YAAY,KAAK;AACjB,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACjF,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACxC,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC7D,oBAAoB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC7D,YAAY,EAAE;AACd;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;AAClF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,MAAM,CAAC,EAAE,CAAC,KAAK,EAAE;AAC1D,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,oBAAoB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACjF,oBAAoB,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC;AAChD,wBAAwB,EAAE,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,4BAA4B,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD,4BAA4B,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC;AACtD,4BAA4B,MAAM,CAAC,MAAM,CAAC;AAC1C,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC;AAChC,4BAA4B,MAAM,CAAC,IAAI,CAAC;AACxC,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAClF,wBAAwB,QAAQ,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAC9E,wBAAwB,YAAY,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AACtE,oBAAoB,MAAM,CAAC,EAAE,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,IAAI;AAC3L,gBAAgB,GAAG;AACnB,YAAY,MAAM,CAAC,KAAK,GAAG,MAAM,EAAE,MAAM,GAAG;AAC5C,YAAY,MAAM,CAAC,IAAI,GAAG,MAAM,GAAG;AACnC,YAAY,MAAM;AAClB,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,EAAE,CAAC,EAAE;AAC7E,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,EAAE,CAAC,EAAE;AAC7E,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AAC7B,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC;AAChG,4BAA4B,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC;AACpH,gBAAgB,GAAG;AACnB,QAAQ,GAAG;AACX,QAAQ;AACR,QAAQ,WAAW,CAAC,SAAS,EAAE,SAAS,CAAC,SAAS,GAAG;AACrD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACvF,QAAQ,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzF,QAAQ,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAC3F,QAAQ,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAC3F,QAAQ,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzF,QAAQ,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzF,QAAQ,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC/E,QAAQ,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;AAC/E,QAAQ,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAC3F,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,gBAAgB,CAAC,CAAC,IAAI;AAC7G,QAAQ,gBAAgB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,gBAAgB,CAAC,CAAC,IAAI;AAC7G;AACA,QAAQ,EAAE,OAAO,CAAC,OAAO;AACzB,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AAClF,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AAClF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC7IF;AACA,EAAE,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,GAAG;AAC1C;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC5D,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,CAAC;AACX,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE;AACpB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK;AACxB,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG,CAAC,EAAE;AACvC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,IAAI,EAAE,EAAE;AACzC,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI;AAC9B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE;AAC7C,QAAQ,CAAC;AACT,QAAQ;AACR,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,SAAS,EAAE;AACtC,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE;AACjF;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,GAAG;AACnC,YAAY,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,GAAG;AACnC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,aAAa,GAAG,IAAI,EAAE,IAAI,GAAG;AACtF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,aAAa,GAAG;AACpG,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,aAAa,GAAG;AACjE,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AAC7D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AAC7D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,YAAY,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,aAAa,GAAG;AAC9D;AACA,YAAY,SAAS,CAAC,KAAK,CAAC,cAAc,EAAE,MAAM,CAAC,eAAe,EAAE;AACpE,YAAY,aAAa,CAAC,IAAI,CAAC,SAAS,EAAE;AAC1C;AACA,YAAY,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,GAAG;AAC1D,gBAAgB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,YAAY,EAAE;AACnE,oBAAoB,CAAC,IAAI,EAAE,YAAY,GAAG;AAC1C;AACA,gBAAgB,KAAK,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,EAAE;AAC7E,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzD,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,GAAG,EAAE;AACvC,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG;AAC7E;AACA,gBAAgB,KAAK;AACrB,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACrF,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1D,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACnC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1B,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,KAAK,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7F,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,YAAY,GAAG;AACrD,YAAY,CAAC;AACb;AACA,YAAY,MAAM,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,GAAG,MAAM,EAAE,IAAI,EAAE;AACzD,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,cAAc,CAAC;AAChD,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE;AAC7D,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,eAAe,GAAG,CAAC,GAAG;AAC/E;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AAChH,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC3E,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AAC9D;AACA,YAAY,EAAE,KAAK,CAAC,EAAE,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG;AAC3F,YAAY,QAAQ,CAAC,eAAe,EAAE,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC;AACnC;AACA,gBAAgB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,EAAE,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK,EAAE;AAC3E;AACA,gBAAgB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,UAAU,CAAC,KAAK,EAAE;AACnD,oBAAoB,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,EAAE;AAC/D,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE;AAC9C;AACA,gBAAgB,UAAU,CAAC,IAAI,EAAE;AACjC,oBAAoB,CAAC,UAAU,GAAG,QAAQ,CAAC,GAAG,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,MAAM,GAAG;AAC9B;AACA,gBAAgB,UAAU;AAC1B,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE;AACjH,oBAAoB,CAAC,UAAU,GAAG,QAAQ,CAAC,GAAG,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE;AAC9C;AACA,gBAAgB,EAAE,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC;AAC1C;AACA,gBAAgB,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,gBAAgB,UAAU,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE;AACpE,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAClC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3C,oBAAoB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,EAAE;AAC/C,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,GAAG,GAAG;AACxC;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK;AAChF;AACA,gBAAgB,UAAU,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE;AACpE,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;AACjC,oBAAoB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3C,oBAAoB,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE;AACjD,oBAAoB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,EAAE,GAAG,GAAG;AACxC;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE;AACrD,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK;AAChF,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,cAAc,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC;AACnC;AACA,gBAAgB,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC1D;AACA,gBAAgB,QAAQ,CAAC,eAAe,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,oBAAoB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3E,oBAAoB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AAC1D,wBAAwB,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC;AACjF,4BAA4B,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/E,4BAA4B,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,oBAAoB,MAAM,CAAC,YAAY,CAAC;AACxC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,KAAK;AAC3E,gBAAgB,eAAe,GAAG;AAClC,YAAY,CAAC;AACb;AACA,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,SAAS,EAAE,aAAa,CAAC,SAAS,GAAG;AACzD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AAChC;AACA,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACtF,QAAQ,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACxF,QAAQ,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAClG,QAAQ,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AAClG,QAAQ,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACtG,QAAQ,UAAU,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAChG,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,IAAI;AAC1G,QAAQ,MAAM,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACxF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAC9C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;ACxNF;AACA,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACpC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK;AAC/E,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG;AACvG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK;AAC5F,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK;AAC5F,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO;AACjJ,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACzB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AACzB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AAC3B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,aAAa;AACpE,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACvE,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,YAAY,GAAG,SAAS,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE;AAClJ,QAAQ,CAAC;AACT;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI;AACvC,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO;AAChE,IAAI,CAAC;AACL;AACA,IAAI,qCAAqC;AACzC,KAAK,CAAC,CAAC,MAAM,CAAC;AACd,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC;AACzB,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,EAAE;AACrC,KAAK,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AACvC,KAAK,CAAC;AACN,KAAK,CAAC,CAAC,KAAK,CAAC;AACb,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC;AAC9B,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC;AAChC,KAAK,qCAAqC;AAC1C;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,QAAQ,EAAE;AAC/D;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AACpE,gBAAgB,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACtE;AACA,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;AAC/B,YAAY,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,GAAG,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK;AACtG,YAAY,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gBAAgB,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,gBAAgB,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpE,oBAAoB,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,oBAAoB,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7D,gBAAgB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AACxC,YAAY,GAAG;AACf;AACA,YAAY,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,EAAE;AACpC,gBAAgB,CAAC,KAAK,CAAC,KAAK,CAAC;AAC7B,gBAAgB,CAAC,MAAM,CAAC,MAAM,CAAC;AAC/B,gBAAgB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,EAAE,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO;AAC1H,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC;AACxB,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC;AACxB,gBAAgB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,wBAAwB,EAAE,CAAC,CAAC,EAAE;AAC9B,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,YAAY,CAAC,YAAY,EAAE;AAC3B;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG,IAAI,EAAE,IAAI,GAAG;AACpF,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,WAAW,GAAG;AAClG,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,IAAI,GAAG;AACrD,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,GAAG;AAC/C,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,QAAQ,GAAG;AAC5D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AAC/D;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF,YAAY;AACZ,YAAY,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AAC1F,YAAY,EAAE,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM;AAC9D,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,gBAAgB,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,EAAE;AACzC,YAAY,CAAC;AACb,YAAY;AACZ,YAAY,OAAO;AACnB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC;AACxB,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC;AACvF,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE;AACjE,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC,gBAAgB,IAAI;AACpB;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,EAAE;AACzD,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE;AAC7B;AACA,YAAY,WAAW,CAAC,IAAI,CAAC,OAAO,EAAE;AACtC;AACA,YAAY,SAAS,CAAC,MAAM,EAAE,QAAQ,EAAE;AACxC,gBAAgB,CAAC,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACjD,gBAAgB,CAAC,MAAM,EAAE,IAAI,GAAG;AAChC;AACA,YAAY,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE;AACxD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,CAAC,CAAC,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI;AACjF;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACpC,gBAAgB,CAAC,OAAO,CAAC,OAAO,CAAC;AACjC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1D,gBAAgB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC;AAC1C,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,oBAAoB,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC;AACxD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,WAAW,CAAC,WAAW,EAAE;AAC1C;AACA,YAAY,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE;AACxC,gBAAgB,CAAC,OAAO,CAAC,OAAO,CAAC;AACjC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC1D,gBAAgB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE;AAC3D,gBAAgB,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,GAAG;AAC5D;AACA,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,QAAQ,GAAG,SAAS,EAAE,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE;AACzE,gBAAgB,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AAChD;AACA,YAAY,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AACtG,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AACzC,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE;AAC7D,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAC3D,oBAAoB,QAAQ,CAAC,aAAa,EAAE;AAC5C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACtC,wBAAwB,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAC9D,wBAAwB,WAAW,CAAC,CAAC,CAAC,CAAC,WAAW;AAClD,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACtC,wBAAwB,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAC9D,wBAAwB,WAAW,CAAC,CAAC,CAAC,CAAC,WAAW;AAClD,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AAC5D,oBAAoB,QAAQ,CAAC,SAAS,EAAE;AACxC,wBAAwB,KAAK,CAAC,CAAC,CAAC,CAAC;AACjC,wBAAwB,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACtC,wBAAwB,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAC9D,wBAAwB,WAAW,CAAC,CAAC,CAAC,CAAC,WAAW;AAClD,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,IAAI,CAAC,IAAI,GAAG,MAAM,GAAG;AACjC,YAAY,IAAI,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC7C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC;AAC7D,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,GAAG;AAC7F,YAAY,IAAI,CAAC,eAAe,CAAC,WAAW,EAAE,WAAW,CAAC,IAAI,EAAE;AAChE,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1C,oBAAoB,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3C,gBAAgB,GAAG;AACnB;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE;AAClG,YAAY,GAAG;AACf,YAAY,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACrE,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE;AACnG,YAAY,GAAG;AACf;AACA,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,SAAS;AACtC,YAAY,KAAK,CAAC,6BAA6B,CAAC,CAAC,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC;AACvE,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,CAAC,IAAI,EAAE,GAAG,CAAC,IAAI,CAAC,MAAM;AAC9D,oBAAoB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,KAAK,EAAE,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM;AAC7E,oBAAoB,CAAC,CAAC;AACtB,oBAAoB,CAAC,CAAC;AACtB,oBAAoB,CAAC,CAAC;AACtB,oBAAoB,EAAE,CAAC,CAAC,CAAC,GAAG;AAC5B;AACA,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM;AACtE,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM;AAC9F,wBAAwB,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,GAAG,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC;AAClH,oBAAoB,CAAC;AACrB;AACA,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC5G,wBAAwB,SAAS,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAChD,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AACpF,wBAAwB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACjD,4BAA4B,SAAS,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,gBAAgB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAgB,MAAM,CAAC,EAAE,CAAC;AAC1B,YAAY,EAAE;AACd;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,WAAW,CAAC,SAAS,GAAG;AACvD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO;AACjC,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,YAAY,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,YAAY,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AACrG,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AAC7G,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,CAAC,QAAQ,GAAG,CAAC,QAAQ,CAAC,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,CAAC,GAAG;AAC3G;AACA,IAAI,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC;AAClD,QAAQ,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AACxB,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAClC,QAAQ,EAAE,CAAC,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAC/C,QAAQ,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACrB,QAAQ,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACpC,QAAQ,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACnC,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,EAAE;AACN;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,IAAI;AAClF,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACrF,QAAQ,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACpF,QAAQ,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AAC/E,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG;AACA,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,OAAO;AACjC,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AACxF,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK;AACxF;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAC5B,gBAAgB,IAAI,CAAC,CAAC,KAAK,EAAE;AAC7B,oBAAoB,KAAK,CAAC,MAAM,EAAE,IAAI,GAAG;AACzC,oBAAoB,KAAK,CAAC,KAAK,EAAE,OAAO,GAAG;AAC3C,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,MAAM,EAAE;AAC9B,oBAAoB,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG;AAC3C,oBAAoB,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,GAAG,GAAG;AAC9C,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,oBAAoB,KAAK,CAAC,MAAM,EAAE,UAAU,GAAG;AAC/C,oBAAoB,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,GAAG,GAAG;AAC9C,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,MAAM,EAAE;AAC9B,oBAAoB,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG;AAC3C,oBAAoB,KAAK,CAAC,KAAK,EAAE,OAAO,GAAG;AAC3C,oBAAoB,KAAK,CAAC;AAC1B,gBAAgB,IAAI,CAAC,CAAC,aAAa,EAAE;AACrC,oBAAoB,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,6BAA6B,EAAE;AACtE,oBAAoB,KAAK,CAAC,KAAK,EAAE,OAAO,GAAG;AAC3C,oBAAoB,KAAK,CAAC;AAC1B,YAAY,CAAC;AACb,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AC1UF;AACA,EAAE,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACzC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,EAAE;AACzC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAClC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACrC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,EAAE;AACvC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,oBAAoB,EAAE;AACtD,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,EAAE;AACvC,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,GAAG;AAC1D,QAAQ,CAAC;AACT;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI;AAC3B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAChC,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK;AACjC,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK;AAC7B,QAAQ,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC,KAAK;AACzC,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,IAAI;AACnC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AAC9B,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE;AAChD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,GAAG,SAAS,EAAE;AAC1E,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG;AAC5B,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG,QAAQ,EAAE;AAC1D,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE;AAC5B,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC;AACT;AACA,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,GAAG;AAClC,IAAI,KAAK,CAAC,MAAM,EAAE,MAAM,GAAG,WAAW,CAAC,CAAC,EAAE;AAC1C,IAAI,KAAK,CAAC,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AACvD;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,GAAG;AACX;AACA,IAAI,gBAAgB,CAAC,OAAO;AAC5B,QAAQ,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxC,YAAY,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;AAChE,QAAQ,GAAG;AACX;AACA,IAAI,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC;AAC9B,QAAQ,iBAAiB,CAAC,CAAC,CAAC,IAAI,CAAC;AACjC;AACA,IAAI,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,GAAG;AAChC;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,GAAG;AAC1B,YAAY,MAAM,CAAC,CAAC;AACpB,gBAAgB,MAAM,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG;AACrE,gBAAgB,KAAK,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE;AACtC,YAAY,EAAE;AACd,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACtC,QAAQ,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;AAChC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,SAAS,CAAC;AAC1C,gBAAgB,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC;AACpC,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC;AAC3C,gBAAgB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,oBAAoB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACvD,gBAAgB,GAAG;AACnB,QAAQ,CAAC;AACT,IAAI,EAAE;AACN;AACA,IAAI,GAAG,CAAC,gBAAgB,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,MAAM;AAC1C;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,OAAO,EAAE;AACpC,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD,QAAQ,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,EAAE;AACjD;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C,gBAAgB,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC;AAC5B,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,gBAAgB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3H;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE;AACjG,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnC;AACA,YAAY,KAAK;AACjB,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC;AACxD,gBAAgB,CAAC,MAAM,CAAC,WAAW,CAAC,IAAI,EAAE;AAC1C,gBAAgB,CAAC,MAAM,GAAG;AAC1B;AACA,YAAY,EAAE,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,QAAQ;AAC5C,YAAY,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC3E;AACA,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,gBAAgB,GAAG,CAAC,GAAG,CAAC;AACxB,gBAAgB,YAAY,CAAC,CAAC,CAAC,GAAG;AAClC,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AACpC,oBAAoB,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,UAAU,CAAC,KAAK,CAAC;AACpD,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC,EAAE;AAChE,oBAAoB,IAAI;AACxB,wBAAwB,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;AACvD,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb,YAAY,EAAE,CAAC,KAAK,CAAC,MAAM;AAC3B,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,gBAAgB,GAAG,IAAI,EAAE,IAAI,GAAG;AACzF,YAAY,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,GAAG,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,gBAAgB,GAAG,MAAM,EAAE,CAAC,GAAG;AAChH,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,GAAG;AACrC;AACA,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG;AAC9D,YAAY,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,YAAY,GAAG;AAChE;AACA,YAAY,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,KAAK,GAAG;AAC1E,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,EAAE,IAAI,GAAG;AACjF,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,GAAG;AACjE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AACnE,YAAY,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,GAAG;AACnE;AACA,YAAY,EAAE,CAAC,CAAC,CAAC,MAAM,EAAE,IAAI,GAAG,IAAI,EAAE,KAAK,EAAE,cAAc,EAAE,IAAI,EAAE,MAAM,EAAE,eAAe,EAAE;AAC5F;AACA,YAAY,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,EAAE,CAAC,SAAS,GAAG;AAChF;AACA,YAAY,EAAE,CAAC,MAAM;AACrB,YAAY,EAAE,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,SAAS,MAAM,MAAM,GAAG;AACnE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,EAAE,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,cAAc,CAAC;AAC9H;AACA,gBAAgB,MAAM,CAAC,KAAK,CAAC,WAAW,EAAE;AAC1C,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,GAAG,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,MAAM,EAAE;AACpE;AACA,gBAAgB,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AAClD,iBAAiB,EAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM;AAC7E,iBAAiB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7D,oBAAoB,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE;AAC3F,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/H,iBAAiB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,WAAW,CAAC;AAC/D,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC9C,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM;AAC5E,gBAAgB,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC;AACtD,oBAAoB,EAAE,CAAC,EAAE,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;AACtE,wBAAwB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,GAAG;AACrD,wBAAwB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACnI,oBAAoB,CAAC;AACrB;AACA,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,UAAU,EAAE;AAC9C,qBAAqB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAChH,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,QAAQ;AACvB,YAAY,EAAE,CAAC,EAAE,YAAY,CAAC,CAAC,CAAC;AAChC,iBAAiB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,GAAG,SAAS,MAAM,MAAM,GAAG;AACtE,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AACpC,oBAAoB,CAAC;AACrB,wBAAwB,GAAG,CAAC,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,OAAO,EAAE;AAChE,wBAAwB,OAAO,CAAC,CAAC,CAAC,OAAO,EAAE;AAC3C,wBAAwB,QAAQ,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE;AAC7D,wBAAwB,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACtC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC;AACrB,wBAAwB,GAAG,CAAC,CAAC,aAAa,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9D,wBAAwB,OAAO,CAAC,CAAC,CAAC,MAAM,EAAE;AAC1C,wBAAwB,QAAQ,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9D,wBAAwB,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC;AACvC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC;AACrB,wBAAwB,GAAG,CAAC,CAAC,aAAa,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,QAAQ,EAAE;AAClE,wBAAwB,OAAO,CAAC,CAAC,CAAC,QAAQ,EAAE;AAC5C,wBAAwB,QAAQ,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE;AAC9D,wBAAwB,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC;AACvC,oBAAoB,EAAE;AACtB,oBAAoB,CAAC;AACrB,wBAAwB,GAAG,CAAC,CAAC,aAAa,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG;AACtE,wBAAwB,OAAO,CAAC,CAAC,CAAC,aAAa,EAAE;AACjD,wBAAwB,QAAQ,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,aAAa,EAAE;AACrE,wBAAwB,KAAK,CAAC,CAAC,CAAC,aAAa,CAAC;AAC9C,oBAAoB,CAAC;AACrB,gBAAgB,EAAE;AAClB;AACA,gBAAgB,YAAY,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC/D,gBAAgB,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAChE,oBAAoB,MAAM,CAAC,cAAc,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;AACpE,gBAAgB,GAAG;AACnB;AACA,gBAAgB,QAAQ;AACxB,oBAAoB,CAAC,KAAK,CAAC,CAAC,YAAY,CAAC,CAAC;AAC1C,oBAAoB,CAAC,KAAK,IAAI,GAAG,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,GAAG,IAAI;AACrD;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,EAAE;AAC5C,oBAAoB,CAAC,KAAK,CAAC,YAAY,CAAC;AACxC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE;AACpC;AACA,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9H;AACA,gBAAgB,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;AAClD,oBAAoB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,WAAW,CAAC;AAC7C,oBAAoB,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/H,gBAAgB,CAAC;AACjB;AACA,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,YAAY,EAAE;AAC5C,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC,MAAM;AAC5E,YAAY,CAAC;AACb;AACA,YAAY,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;AACxF;AACA,YAAY,EAAE,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC;AAClC,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI;AAC9E,YAAY,CAAC;AACb;AACA,YAAY,EAAE,GAAG,CAAC,EAAE,CAAC,WAAW,CAAC,KAAK;AACtC,YAAY,EAAE,CAAC,CAAC,uBAAuB,CAAC,CAAC,CAAC;AAC1C,gBAAgB,gBAAgB;AAChC,oBAAoB,CAAC,KAAK,CAAC,cAAc,CAAC;AAC1C,oBAAoB,CAAC,MAAM,CAAC,eAAe,CAAC;AAC5C,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,EAAE;AACjE,oBAAoB,CAAC,YAAY,CAAC,SAAS,CAAC;AAC5C,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE;AAC/B,gBAAgB,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE;AACtE,YAAY,CAAC;AACb;AACA,YAAY,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,IAAI,EAAE;AACrD,gBAAgB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,cAAc,CAAC;AAC9C,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,CAAC,eAAe,EAAE;AACjD;AACA,YAAY,OAAO;AACnB,gBAAgB,CAAC,KAAK,CAAC,cAAc,CAAC;AACtC,gBAAgB,CAAC,MAAM,CAAC,eAAe,CAAC;AACxC,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,oBAAoB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAClD,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACxE;AACA,YAAY,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,WAAW,EAAE;AACnE,gBAAgB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI;AACzE;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,IAAI;AACzB,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AAC5E,oBAAoB,CAAC,QAAQ,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE;AACpD,YAAY,CAAC;AACb;AACA,YAAY,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC;AAC5B,gBAAgB,GAAG,CAAC,KAAK,CAAC;AAC1B,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AACpD,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9B,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,CAAC;AACtB,oBAAoB,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC,IAAI,EAAE;AAC1E,gBAAgB,CAAC;AACjB,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC;AAC9B,oBAAoB,CAAC,MAAM,CAAC,KAAK,CAAC;AAClC,oBAAoB,CAAC,QAAQ,EAAE,cAAc,CAAC,CAAC,CAAC,EAAE;AAClD,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,MAAM,CAAC,IAAI;AAC1B,YAAY,8DAA8D;AAC1E,YAAY,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AACpC,gBAAgB,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACvD,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,IAAI;AAClF,wBAAwB,CAAC,UAAU,EAAE;AACrC,wBAAwB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC3C,wBAAwB,CAAC,IAAI,CAAC,KAAK,CAAC;AACpC,wBAAwB,CAAC;AACzB,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,WAAW,EAAE,CAAC,CAAC;AACpC,gBAAgB,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,oBAAoB,EAAE,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC;AAC9F,wBAAwB,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,GAAG;AAC/D;AACA,wBAAwB,EAAE,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,EAAE,CAAC,aAAa,CAAC,GAAG,CAAC,gBAAgB,CAAC,CAAC;AACpF,4BAA4B,cAAc,CAAC,CAAC,CAAC,aAAa,CAAC;AAC3D;AACA,wBAAwB,EAAE,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC;AAC9E,wBAAwB,KAAK,CAAC,UAAU,CAAC,gBAAgB,EAAE;AAC3D,oBAAoB,CAAC;AACrB,oBAAoB,IAAI,CAAC,CAAC;AAC1B,wBAAwB,EAAE,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;AAC7C,4BAA4B,KAAK,CAAC,UAAU,CAAC,cAAc,EAAE;AAC7D,4BAA4B,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC;AAClD,wBAAwB,CAAC;AACzB,oBAAoB,CAAC;AACrB;AACA,oBAAoB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AACvD,oBAAoB,CAAC,UAAU,GAAG,QAAQ,CAAC,CAAC,CAAC;AAC7C,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK;AAC3B,YAAY,8DAA8D;AAC1E,YAAY,EAAE,EAAE,WAAW,CAAC,CAAC,CAAC;AAC9B,gBAAgB,WAAW,CAAC,UAAU,GAAG,IAAI,CAAC,OAAO,EAAE;AACvD,gBAAgB,WAAW,GAAG;AAC9B,gBAAgB,WAAW,GAAG;AAC9B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,KAAK,CAAC,KAAK,CAAC,cAAc,EAAE;AAC5C,gBAAgB,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,SAAS,EAAE;AACzC,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI;AACtH,oBAAoB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,GAAG;AAC5E,oBAAoB,CAAC,IAAI,CAAC,KAAK,EAAE;AACjC,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,GAAG;AAC1F,gBAAgB,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,EAAE;AACpC,oBAAoB,OAAO,CAAC,MAAM,EAAE;AACpC,gBAAgB,CAAC;AACjB,YAAY,CAAC;AACb;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AAC5D,YAAY,8DAA8D;AAC1E;AACA,YAAY,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,SAAS,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACjE,gBAAgB,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;AACjF,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC9C,wBAAwB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AAC3C,oBAAoB,GAAG;AACvB,gBAAgB,IAAI;AACpB,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChD,wBAAwB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,WAAW,EAAE;AAC1D,oBAAoB,GAAG;AACvB;AACA,gBAAgB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG;AAC/E,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,MAAM,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAClE,gBAAgB,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,QAAQ,CAAC;AACzC,oBAAoB,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE;AAC/C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/D,gBAAgB,EAAE,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;AACxC;AACA,gBAAgB,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,oBAAoB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC;AACtC,oBAAoB,MAAM,CAAC,CAAC,CAAC;AAC7B,gBAAgB,GAAG;AACnB,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC;AACnC;AACA,gBAAgB,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,EAAE;AACvC;AACA;AACA,gBAAgB,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,GAAG;AAC9C,gBAAgB,QAAQ,CAAC,WAAW,CAAC,KAAK,EAAE;AAC5C;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AAC1E,gBAAgB,OAAO,CAAC,eAAe,GAAG;AAC1C,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,UAAU,CAAC,CAAC,cAAc,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC;AAC9G,gBAAgB,IAAI;AACpB,oBAAoB,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,MAAM,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC;AAChD,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,wBAAwB,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,iBAAiB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AACnG,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE;AAC9D,wBAAwB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,UAAU,EAAE;AACvE,wBAAwB,EAAE,CAAC,CAAC,WAAW,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC;AAClD,4BAA4B,OAAO,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,IAAI,EAAE;AACxE,wBAAwB,CAAC;AACzB,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,MAAM,CAAC;AACjE,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;AACpF,wBAAwB,EAAE,CAAC,CAAC,MAAM,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,GAAG;AAChI;AACA,wBAAwB,EAAE,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC;AACzG,wBAAwB,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,UAAU,EAAE;AACzH,wBAAwB,OAAO,CAAC,IAAI,EAAE;AACtC,4BAA4B,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC5C,4BAA4B,KAAK,CAAC,CAAC,YAAY,CAAC;AAChD,4BAA4B,KAAK,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE;AACpE,4BAA4B,KAAK,CAAC,CAAC,KAAK;AACxC,wBAAwB,GAAG;AAC3B;AACA,wBAAwB,EAAE,CAAC,CAAC,kBAAkB,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,YAAY,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC;AACxG,0BAA0B,QAAQ,CAAC,EAAE,CAAC,YAAY,CAAC;AACnD,0BAA0B,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC;AAChD,wBAAwB,EAAE;AAC1B,oBAAoB,GAAG;AACvB;AACA,gBAAgB,OAAO,CAAC,OAAO,GAAG;AAClC;AACA,gBAAgB,EAAE,SAAS,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;AAC3F,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACzC,oBAAoB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,MAAM,EAAE;AACjE,oBAAoB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC,IAAI,CAAC;AACrE,oBAAoB,OAAO,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD;AACA,wBAAwB,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM;AACtH,wBAAwB,EAAE,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC;AACtF,wBAAwB,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AAClD,wBAAwB,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,EAAE;AAC1E,wBAAwB,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,EAAE;AACxE,wBAAwB,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,EAAE;AACrF,wBAAwB,CAAC;AACzB,4BAA4B,gBAAgB,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,4BAA4B,MAAM,CAAC;AACnC,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB,oBAAoB,EAAE,CAAC,CAAC,gBAAgB,CAAC,EAAE,CAAC,IAAI,CAAC;AACjD,wBAAwB,OAAO,CAAC,gBAAgB,EAAE,SAAS,CAAC,CAAC,CAAC,IAAI,CAAC;AACnE,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC;AACnF,gBAAgB,EAAE,CAAC,CAAC,kBAAkB,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC;AACjH,oBAAoB,OAAO,CAAC,IAAI,EAAE;AAClC,wBAAwB,GAAG,CAAC,CAAC,UAAU,CAAC;AACxC,wBAAwB,KAAK,CAAC,CAAC,QAAQ,CAAC;AACxC,wBAAwB,KAAK,CAAC,CAAC,IAAI;AACnC,oBAAoB,GAAG;AACvB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,WAAW,CAAC,UAAU,EAAE;AAC/D;AACA,gBAAgB,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,gBAAgB,CAAC,OAAO,CAAC,cAAc,GAAG;AAC/E,gBAAgB,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,cAAc,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,CAAC,IAAI;AAClG,gBAAgB,EAAE,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC,GAAG,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC;AAC1F,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;AAC/C,wBAAwB,iBAAiB,CAAC,CAAC,CAAC,cAAc,CAAC;AAC3D,oBAAoB,CAAC;AACrB,oBAAoB,EAAE,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC;AAC5E,oBAAoB,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,GAAG,CAAC,IAAI;AACtD,gBAAgB,CAAC;AACjB,gBAAgB,IAAI,CAAC,CAAC;AACtB,oBAAoB,EAAE,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC;AAC5C,wBAAwB,cAAc,CAAC,CAAC,CAAC,iBAAiB,CAAC;AAC3D,wBAAwB,iBAAiB,CAAC,CAAC,CAAC,IAAI,CAAC;AACjD,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB;AACA,gBAAgB,gBAAgB,CAAC,OAAO;AACxC,oBAAoB,CAAC,cAAc,CAAC,cAAc,CAAC;AACnD,oBAAoB,CAAC,IAAI,CAAC;AAC1B,oBAAoB,CAAC;AACrB,wBAAwB,KAAK,CAAC,CAAC,MAAM,CAAC;AACtC,wBAAwB,MAAM,CAAC,CAAC,OAAO;AACvC,oBAAoB,CAAC;AACrB,gBAAgB,IAAI;AACpB;AACA,gBAAgB,gBAAgB,CAAC,eAAe,CAAC,cAAc,EAAE;AACjE;AACA,YAAY,GAAG;AACf;AACA,YAAY,gBAAgB,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACxE,gBAAgB,OAAO,CAAC,eAAe,GAAG;AAC1C,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,EAAE;AACtD,YAAY,KAAK,CAAC,QAAQ,CAAC,EAAE,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC3D,gBAAgB,OAAO,CAAC,MAAM,EAAE;AAChC,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,CAAC,OAAO;AACvE,YAAY,QAAQ,CAAC,EAAE,EAAE,WAAW,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC;AAC7F,oBAAoB,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,wBAAwB,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACxD,oBAAoB,GAAG;AACvB;AACA,oBAAoB,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;AAChD,gBAAgB,CAAC;AACjB;AACA,gBAAgB,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC;AACrD,oBAAoB,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,EAAE;AAC3C,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACpC,gBAAgB,CAAC;AACjB;AACA,gBAAgB,KAAK,CAAC,MAAM,GAAG;AAC/B,YAAY,GAAG;AACf;AACA,YAAY,8DAA8D;AAC1E,YAAY,EAAE,CAAC,SAAS;AACxB,YAAY,8DAA8D;AAC1E;AACA,YAAY,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC;AACtC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC;AACtC,gBAAgB,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,WAAW,EAAE;AACvE,oBAAoB,CAAC,KAAK,CAAC;AAC3B,oBAAoB,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACpE,wBAAwB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5C,4BAA4B,MAAM,CAAC,CAAC;AACpC,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAC3C,gCAAgC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;AAC7C,gCAAgC,OAAO,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC;AACnD,gCAAgC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvE,oCAAoC,MAAM,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE;AAC1G,gCAAgC,GAAG;AACnC,gCAAgC,cAAc,CAAC,CAAC,CAAC,CAAC,cAAc;AAChE,4BAA4B,EAAE;AAC9B,wBAAwB,EAAE;AAC1B,gBAAgB,EAAE;AAClB,gBAAgB,WAAW,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,OAAO,EAAE;AAC1E;AACA,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,IAAI;AAC3C,gBAAgB,WAAW,GAAG;AAC9B,gBAAgB,WAAW,GAAG;AAC9B,YAAY,CAAC;AACb;AACA,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,GAAG;AAC9D,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,QAAQ,GAAG,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AAChD,QAAQ,GAAG,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,GAAG,CAAC,KAAK,EAAE;AAChD,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AAClE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5B,IAAI,GAAG;AACP,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC;AAC1B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC;AAC/B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC;AAC/B,IAAI,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,gBAAgB,CAAC;AAC9C,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAC3F,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AACvG,QAAQ,SAAS,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC7F,QAAQ,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,IAAI;AAC3F,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAClF,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AACpG,QAAQ,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,aAAa,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,aAAa,CAAC,CAAC,IAAI;AACvG,QAAQ,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,IAAI;AAC1G,QAAQ,kBAAkB,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,IAAI;AACxH,QAAQ,UAAU,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,IAAI;AAChG,QAAQ,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,IAAI;AACjG,QAAQ,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,KAAK;AACvG,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,WAAW,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,CAAC,KAAK;AAC7G;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC;AACtC,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AACnC,gBAAgB,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;AAClC,YAAY,CAAC;AACb,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,QAAQ,GAAG;AACX,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAC7E,YAAY,KAAK,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC;AACvF,YAAY,KAAK,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC;AACzF,YAAY,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC;AAC1F,YAAY,KAAK,CAAC,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,CAAC;AACxF,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,OAAO,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACvC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,YAAY,KAAK,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClE,YAAY,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACzC,YAAY,MAAM,CAAC,KAAK,CAAC,KAAK,EAAE;AAChC,YAAY,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE;AACjC,YAAY,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE;AAC/B,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,OAAO,CAAC,CAAC,CAAC,CAAC,EAAE;AACzB,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,EAAE;AACvB,QAAQ,GAAG;AACX,QAAQ,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,eAAe,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrF,YAAY,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG;AAC9D,QAAQ,GAAG;AACX,QAAQ,uBAAuB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,uBAAuB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrG,YAAY,uBAAuB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAC1C,YAAY,KAAK,CAAC,WAAW,EAAE,CAAC,EAAE;AAClC,YAAY,KAAK,CAAC,UAAU,EAAE,CAAC,EAAE;AACjC,YAAY,OAAO,CAAC,OAAO,CAAC,WAAW,EAAE,CAAC,EAAE;AAC5C,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;AAC5C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC;AACA,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AACF;AACA,EAAE,CAAC,MAAM,CAAC,yBAAyB,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AAClD,EAAE,MAAM,CAAC,EAAE,CAAC,MAAM,CAAC,gBAAgB,EAAE;AACrC,IAAI,CAAC,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE;AAC3B,IAAI,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,EAAE;AACzB,EAAE;AChqBF,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,GAAG,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,WAAW,CAAC,oBAAoB;AAC/D,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACjC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACvD,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG;AACrB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG;AACtB,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACxB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;AACrJ,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG;AACxG,QAAQ,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI;AAC1B,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;AAC5B,QAAQ,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,GAAG;AACxI,QAAQ,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/B,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,EAAE;AAC5D,QAAQ,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,EAAE;AAC7C,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,IAAI;AACnC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,EAAE,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,gBAAgB,EAAE,CAAC,CAAC,eAAe,EAAE,CAAC,CAAC,SAAS,GAAG;AAC1J;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,GAAG,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,MAAM,GAAG,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,GAAG;AACtD,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,GAAG;AAC5B;AACA,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,SAAS,GAAG,IAAI,CAAC,IAAI,EAAE;AACrD;AACA,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,cAAc,CAAC,CAAC,eAAe,CAAC,CAAC,MAAM,CAAC;AACtD,IAAI,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG;AAC3B;AACA,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,EAAE;AAC1B,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE;AACrF,QAAQ,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE;AAC1F,QAAQ,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;AAC/D,QAAQ,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,GAAG;AACvE;AACA,IAAI,QAAQ,CAAC,yBAAyB,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,EAAE;AAChD,QAAQ,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,EAAE;AAC7B,YAAY,MAAM,CAAC,GAAG,CAAC;AACvB,QAAQ,CAAC;AACT,QAAQ,IAAI,CAAC,CAAC;AACd,YAAY,MAAM,CAAC,CAAC,CAAC;AACrB,QAAQ,CAAC;AACT,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC;AACpC,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AACpE,QAAQ,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI;AACzE,QAAQ,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AACjF,QAAQ,MAAM,CAAC,WAAW,CAAC;AAC3B,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,qBAAqB,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AACpE,QAAQ,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI;AACzE,QAAQ,MAAM,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,EAAE;AACvD,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,qBAAqB,CAAC,CAAC,CAAC,CAAC,CAAC;AACvC,QAAQ,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI;AACpE,QAAQ,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI;AACzE;AACA,QAAQ,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,UAAU,CAAC;AACzC,QAAQ,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,cAAc,CAAC;AACrC,IAAI,CAAC;AACL;AACA,IAAI,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC;AAC5C,IAAI,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,GAAG;AACxE,QAAQ,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrD,QAAQ,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG;AAClE;AACA,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,GAAG;AAC9C,QAAQ,CAAC;AACT,QAAQ,IAAI,CAAC,CAAC;AACd,YAAY,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjC,gBAAgB,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,GAAG;AAChC,gBAAgB,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,GAAG,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG;AAC7C,gBAAgB,MAAM,CAAC,GAAG,CAAC,CAAC,EAAE;AAC9B,YAAY,CAAC;AACb,QAAQ,EAAE;AACV,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;AAChC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE;AAC9E;AACA,QAAQ,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7B,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC3B;AACA,YAAY,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,YAAY,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACzB,YAAY,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACvB,YAAY,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AACzB;AACA,YAAY,MAAM,CAAC,GAAG,CAAC,CAAC,EAAE;AAC1B,QAAQ,EAAE;AACV,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,kBAAkB,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,QAAQ,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,EAAE;AAC1B,QAAQ,EAAE,EAAE,CAAC,aAAa,CAAC,CAAC,EAAE,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG;AACrD,QAAQ,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE;AAClC,QAAQ,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AACxB,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACtB,QAAQ,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;AACxB,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;AACtB,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,0BAA0B,CAAC,KAAK,CAAC,CAAC,CAAC;AAChD,QAAQ,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,EAAE;AAClC,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AAC3B,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,EAAE;AACtC,YAAY,EAAE,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE;AAClC,YAAY,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE;AACrB,gBAAgB,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AAC9B,gBAAgB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5B,gBAAgB,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC;AAC9B,gBAAgB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;AAC5B,YAAY,CAAC;AACb,YAAY,IAAI,CAAC,CAAC;AAClB,gBAAgB,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7B,gBAAgB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,gBAAgB,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7B,gBAAgB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,YAAY,CAAC;AACb,YAAY,kBAAkB,CAAC,CAAC,EAAE;AAClC,QAAQ,GAAG;AACX,IAAI,CAAC;AACL;AACA,IAAI,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3B,QAAQ,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,IAAI,EAAE;AAChD,QAAQ,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,SAAS,EAAE,IAAI,EAAE;AAC9C;AACA,QAAQ,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ;AACrC,QAAQ,MAAM,CAAC,UAAU,GAAG,IAAI,EAAE,OAAO,EAAE,CAAC,EAAE;AAC9C;AACA,QAAQ,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI;AACpD,QAAQ,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;AACjB;AACA,QAAQ,IAAI,CAAC,UAAU,EAAE;AACzB,YAAY,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC/B,YAAY,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,YAAY,CAAC;AACzC,YAAY,CAAC,IAAI,EAAE,GAAG,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACtC,gBAAgB,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,OAAO;AACjF,gBAAgB,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;AACvG,gBAAgB,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE;AACtD,oBAAoB,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,EAAE;AAC3C,wBAAwB,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,CAAC,OAAO;AACzE,wBAAwB,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE;AACpE,wBAAwB,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,MAAM,EAAE,IAAI,GAAG;AAChE;AACA,wBAAwB,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,SAAS;AAC7E,wBAAwB,OAAO,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC/D,wBAAwB,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,EAAE;AACnE,wBAAwB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE;AACrD,4BAA4B,EAAE,CAAC,qBAAqB,CAAC,CAAC,EAAE,CAAC,CAAC;AAC1D,gCAAgC,MAAM,CAAC,CAAC,CAAC;AACzC,4BAA4B,CAAC;AAC7B,4BAA4B,IAAI,CAAC,CAAC;AAClC,gCAAgC,MAAM,CAAC,CAAC,CAAC;AACzC,4BAA4B,CAAC;AAC7B,wBAAwB,EAAE;AAC1B,wBAAwB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvD,4BAA4B,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;AAC7D,4BAA4B,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC;AAC7C,4BAA4B,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC5E,4BAA4B,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,EAAE;AACzD,gCAAgC,MAAM,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AAC3E,4BAA4B,CAAC;AAC7B,4BAA4B,IAAI,CAAC,CAAC;AAClC,gCAAgC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,EAAE;AACxE,gCAAgC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,yBAAyB,CAAC,CAAC,EAAE;AAC5E,gCAAgC,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACrD,oCAAoC,MAAM,CAAC,CAAC,MAAM,GAAG,CAAC,WAAW,CAAC,GAAG,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACxG,gCAAgC,CAAC;AACjC,gCAAgC,IAAI,CAAC,CAAC;AACtC,oCAAoC,MAAM,CAAC,CAAC,MAAM,GAAG,CAAC,WAAW,CAAC,GAAG,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACxI,gCAAgC,CAAC;AACjC,4BAA4B,CAAC;AAC7B,wBAAwB,GAAG;AAC3B,oBAAoB,CAAC;AACrB,gBAAgB,CAAC;AACjB,YAAY,EAAE;AACd,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ;AACrB,IAAI,8DAA8D;AAClE,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AACxC,YAAY,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAC/E,YAAY,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AAClF,YAAY,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnE;AACA,YAAY,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG;AACjC;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK;AACrD,YAAY,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,GAAG;AACtE,YAAY,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC/B,gBAAgB,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,EAAE;AAC5C,oBAAoB,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;AAC7E,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK;AAC9K,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,IAAI,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK;AAC9K,YAAY,CAAC;AACb;AACA,YAAY,SAAS,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACnD,gBAAgB,QAAQ,CAAC,UAAU,EAAE;AACrC,oBAAoB,IAAI,CAAC,CAAC,CAAC,CAAC;AAC5B,oBAAoB,KAAK,CAAC,CAAC,CAAC,CAAC;AAC7B,oBAAoB,GAAG,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC;AAClC,oBAAoB,EAAE,CAAC,CAAC,EAAE;AAC1B,gBAAgB,GAAG;AACnB,YAAY,GAAG;AACf;AACA,YAAY,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE,KAAK,IAAI;AAC3D;AACA,YAAY,EAAE,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK;AACnE,YAAY,EAAE,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC;AACtD,YAAY,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,OAAO,EAAE;AAC1D;AACA,YAAY,0BAA0B,CAAC,KAAK,EAAE;AAC9C,YAAY,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,GAAG,GAAG,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC;AACtE;AACA,YAAY,EAAE,MAAM,CAAC,GAAG,CAAC,UAAU;AACnC,YAAY,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,EAAE;AAChC,gBAAgB,CAAC,MAAM,EAAE,CAAC,EAAE;AAC5B,gBAAgB,CAAC,IAAI,EAAE,KAAK,GAAG,GAAG,CAAC,SAAS,EAAE;AAC9C;AACA,YAAY,GAAG,CAAC,MAAM,EAAE,IAAI,EAAE;AAC9B,gBAAgB,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC;AAC/B,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7C,oBAAoB,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;AAClC,wBAAwB,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;AACvC,oBAAoB,CAAC;AACrB,oBAAoB,IAAI,CAAC,EAAE,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC;AAClD,wBAAwB,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,IAAI,EAAE;AACvE,oBAAoB,CAAC;AACrB,oBAAoB,IAAI,CAAC,CAAC;AAC1B,wBAAwB,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,EAAE;AAC7C,oBAAoB,CAAC;AACrB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,CAAC,EAAE,GAAG,EAAE;AACxC,gBAAgB,CAAC,EAAE,EAAE,KAAK,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC3C,oBAAoB,SAAS,CAAC,CAAC,EAAE;AACjC,oBAAoB,QAAQ,CAAC,YAAY,EAAE;AAC3C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,CAAC;AAChC,oBAAoB,EAAE;AACtB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE;AACjF,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC,CAAC;AAChC,wBAAwB,KAAK,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,IAAI,GAAG;AAC7D,wBAAwB,OAAO,CAAC,CAAC,qBAAqB,CAAC,CAAC,CAAC;AACzD,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,oBAAoB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,KAAK,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC,EAAE;AAChF,oBAAoB,QAAQ,CAAC,eAAe,EAAE;AAC9C,wBAAwB,IAAI,CAAC,CAAC,CAAC;AAC/B,oBAAoB,GAAG;AACvB,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,EAAE;AAC/C,oBAAoB,QAAQ,CAAC,gBAAgB,EAAE;AAC/C,wBAAwB,IAAI,CAAC,CAAC,CAAC;AAC/B,oBAAoB,GAAG;AACvB,gBAAgB,GAAG;AACnB;AACA,YAAY,GAAG,SAAS,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI;AACjE,YAAY,GAAG,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,SAAS,EAAE,IAAI,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC;AAChE,YAAY,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AACzE,YAAY,EAAE,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,EAAE;AAChC,gBAAgB,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE;AAC9C,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACvC,oBAAoB,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,cAAc,EAAE;AACpD,YAAY,GAAG;AACf;AACA,YAAY,EAAE,CAAC,UAAU,EAAE;AAC3B,gBAAgB,EAAE,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI;AACvD,gBAAgB,EAAE,CAAC,SAAS,EAAE,IAAI,GAAG,MAAM,GAAG;AAC9C;AACA,gBAAgB,EAAE,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI;AAChE,gBAAgB,EAAE,CAAC,MAAM,EAAE,IAAI,EAAE;AACjC,oBAAoB,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC,GAAG;AAC/D,oBAAoB,CAAC,UAAU,EAAE;AACjC,oBAAoB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACvC,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjD,wBAAwB,EAAE,CAAC,qBAAqB,CAAC,CAAC,EAAE,CAAC,CAAC;AACtD,4BAA4B,MAAM,CAAC,CAAC,CAAC;AACrC,wBAAwB,CAAC;AACzB,wBAAwB,IAAI,CAAC,CAAC;AAC9B,4BAA4B,MAAM,CAAC,CAAC,CAAC;AACrC,wBAAwB,CAAC;AACzB,oBAAoB,EAAE;AACtB,oBAAoB,CAAC,IAAI,EAAE,SAAS,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,wBAAwB,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;AACzD,wBAAwB,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE;AAC1C,4BAA4B,MAAM,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACpF,wBAAwB,CAAC;AACzB,wBAAwB,IAAI,CAAC,CAAC;AAC9B,4BAA4B,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,EAAE;AACpE,4BAA4B,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,yBAAyB,CAAC,CAAC,EAAE;AACxE,4BAA4B,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACjD,gCAAgC,MAAM,CAAC,CAAC,MAAM,GAAG,CAAC,WAAW,CAAC,GAAG,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG;AACpG,4BAA4B,CAAC;AAC7B,4BAA4B,IAAI,CAAC,CAAC;AAClC,gCAAgC,MAAM,CAAC,CAAC,MAAM,GAAG,CAAC,WAAW,CAAC,GAAG,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI;AACpI,4BAA4B,CAAC;AAC7B,wBAAwB,CAAC;AACzB,oBAAoB,GAAG;AACvB,YAAY,CAAC;AACb;AACA,YAAY,EAAE,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,OAAO,CAAC;AAC9D,YAAY,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C;AACA;AACA,YAAY,EAAE,MAAM,CAAC,SAAS,CAAC,QAAQ,CAAC,GAAG;AAC3C,YAAY,EAAE,CAAC,IAAI,EAAE;AACrB,gBAAgB,CAAC,UAAU,EAAE;AAC7B,gBAAgB,CAAC,QAAQ,CAAC,QAAQ,CAAC;AACnC,gBAAgB,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;AAClC,gBAAgB,CAAC,IAAI,EAAE,GAAG,EAAE,QAAQ,CAAC,CAAC,EAAE;AACxC,oBAAoB,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE;AACnC,oBAAoB,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AACjD,gBAAgB,EAAE;AAClB,gBAAgB,CAAC,MAAM,GAAG;AAC1B,QAAQ,GAAG;AACX;AACA;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,SAAS,GAAG;AACpD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,IAAI;AACjF,QAAQ,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACnF,QAAQ,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI;AAC/E,QAAQ,EAAE,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI;AAC3E,QAAQ,QAAQ,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,IAAI;AACvF,QAAQ,kBAAkB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,GAAG,CAAC,IAAI;AACrH,QAAQ,UAAU,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,UAAU,GAAG,CAAC,GAAG;AAC5F,QAAQ,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,WAAW,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,GAAG;AAC7F,QAAQ,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC,GAAG;AACtG,QAAQ,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG;AACxE,QAAQ,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,GAAG,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,GAAG;AACrE,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC1E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC5E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC7E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC3E,QAAQ,GAAG;AACX,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP;AACA,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB,EAAE;AChYF,EAAE,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACtC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE;AACjB;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ;AAC7C,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,QAAQ,GAAG;AACxC,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,OAAO,GAAG;AACtC;AACA,IAAI,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;AAC3D,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;AACtB,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,YAAY,EAAE;AACzC,QAAQ,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,KAAK;AACpC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC;AACjD,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;AAC7B,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI;AACvB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,GAAG;AACxB,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC,WAAW,GAAG,SAAS,GAAG;AAC3E;AACA;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,OAAO,CAAC,SAAS;AACxB,IAAI,8DAA8D;AAClE;AACA,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,EAAE;AACrD;AACA,IAAI,OAAO;AACX,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpB,QAAQ,CAAC,aAAa,CAAC,KAAK,CAAC;AAC7B,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AAChD;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ;AACrB,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC;AAC/B,QAAQ,WAAW,CAAC,KAAK,GAAG;AAC5B,QAAQ,WAAW,CAAC,MAAM,CAAC,QAAQ,EAAE;AACrC;AACA,QAAQ,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC;AACvC,YAAY,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,CAAC,IAAI,EAAE;AAC5C;AACA,YAAY,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,EAAE;AACxC;AACA,YAAY,GAAG,CAAC,cAAc,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACnF,YAAY,GAAG,CAAC,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,MAAM,EAAE;AACtF;AACA,YAAY,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC;AACvC,gBAAgB,EAAE,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;AACrC,oBAAoB,SAAS,CAAC,IAAI,CAAC,KAAK,EAAE;AAC1C,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC;AACxB,oBAAoB,SAAS,CAAC,UAAU,GAAG,QAAQ,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AAC1E,gBAAgB,CAAC;AACjB,YAAY,EAAE;AACd,YAAY,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AACxC;AACA,YAAY,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC;AAClE,YAAY,EAAE,CAAC,EAAE,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC;AACxC,gBAAgB,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,SAAS,EAAE;AAClD,gBAAgB,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAY,CAAC,CAAC,IAAI,CAAC,CAAC;AACpB,gBAAgB,SAAS,CAAC,SAAS,GAAG,EAAE,CAAC,MAAM,GAAG,MAAM,GAAG;AAC3D,YAAY,CAAC;AACb;AACA,YAAY,QAAQ,CAAC,KAAK,CAAC,cAAc,EAAE,MAAM,CAAC,eAAe,EAAE,MAAM,CAAC,MAAM,EAAE;AAClF,YAAY,SAAS,CAAC,IAAI,CAAC,QAAQ,EAAE;AACrC,QAAQ,GAAG;AACX;AACA,QAAQ,WAAW,CAAC,SAAS,EAAE,aAAa,CAAC,SAAS,GAAG;AACzD,QAAQ,MAAM,CAAC,KAAK,CAAC;AACrB,IAAI,CAAC;AACL;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC;AACxD,IAAI,8DAA8D;AAClE;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACpE,QAAQ,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,GAAG,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC;AAC/B,YAAY,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,EAAE;AACrD,YAAY,KAAK,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC;AAC7B,YAAY,OAAO,CAAC,CAAC,GAAG,CAAC,OAAO;AAChC,QAAQ,EAAE;AACV,QAAQ,EAAE,CAAC,EAAE,kBAAkB,CAAC,CAAC,CAAC;AAClC,YAAY,MAAM,CAAC,GAAG,CAAC,OAAO,CAAC;AAC/B,YAAY,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC;AACtC,QAAQ,CAAC;AACT,QAAQ,OAAO,CAAC,IAAI,CAAC,GAAG,EAAE,MAAM,CAAC,KAAK,EAAE;AACxC,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACnE,QAAQ,OAAO,CAAC,MAAM,CAAC,IAAI,EAAE;AAC7B,IAAI,GAAG;AACP;AACA,IAAI,QAAQ,CAAC,QAAQ,CAAC,EAAE,EAAE,gBAAgB,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;AACpE,QAAQ,OAAO,GAAG;AAClB,IAAI,GAAG;AACP;AACA,IAAI,8DAA8D;AAClE,IAAI,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS;AAC9B,IAAI,8DAA8D;AAClE;AACA,IAAI,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,UAAU;AACpC,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,QAAQ,CAAC;AAC9B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC;AAC5B,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE;AACrD;AACA,IAAI,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,aAAa,CAAC,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS;AAC/E,IAAI,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,IAAI,CAAC,CAAC;AACxC,QAAQ,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,SAAS,CAAC,MAAM;AAC5D,QAAQ,MAAM,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,eAAe,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,IAAI;AACzG,QAAQ,YAAY,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,YAAY,GAAG,SAAS,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,YAAY,CAAC,CAAC,IAAI;AAC/G,QAAQ,kBAAkB,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,kBAAkB,GAAG,GAAG,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,kBAAkB,CAAC,CAAC,IAAI;AACrH;AACA,QAAQ,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM;AACzD,QAAQ,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACjE,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AACtB,YAAY,QAAQ,CAAC,KAAK,CAAC,KAAK,EAAE;AAClC,QAAQ,GAAG;AACX,QAAQ,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACvE,YAAY,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;AACzB,YAAY,WAAW,CAAC,KAAK,CAAC,QAAQ,EAAE;AACxC,YAAY,QAAQ,CAAC,QAAQ,CAAC,QAAQ,EAAE;AACxC,QAAQ,GAAG;AACX,QAAQ,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE;AACnE,YAAY,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;AAC3E,YAAY,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;AAC7E,YAAY,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AAC9E,YAAY,MAAM,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC;AAC5E,YAAY,QAAQ,CAAC,MAAM,CAAC,MAAM,EAAE;AACpC,QAAQ,EAAE;AACV,IAAI,GAAG;AACP,IAAI,EAAE,CAAC,KAAK,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,QAAQ,EAAE;AAC7C,IAAI,EAAE,CAAC,KAAK,CAAC,WAAW,CAAC,KAAK,EAAE;AAChC,IAAI,MAAM,CAAC,KAAK,CAAC;AACjB;AACA,EAAE","sourcesContent":["\n// set up main nv object\nvar nv = {};\n\n// the major global objects under the nv namespace\nnv.dev = false; //set false when in production\nnv.tooltip = nv.tooltip || {}; // For the tooltip system\nnv.utils = nv.utils || {}; // Utility subsystem\nnv.models = nv.models || {}; //stores all the possible models/components\nnv.charts = {}; //stores all the ready to use charts\nnv.logs = {}; //stores some statistics and potential error messages\nnv.dom = {}; //DOM manipulation functions\n\n// Node/CommonJS - require D3\nif (typeof(module) !== 'undefined' && typeof(exports) !== 'undefined' && typeof(d3) == 'undefined') {\n    d3 = require('d3');\n}\n\nnv.dispatch = d3.dispatch('render_start', 'render_end');\n\n// Function bind polyfill\n// Needed ONLY for phantomJS as it's missing until version 2.0 which is unreleased as of this comment\n// https://github.com/ariya/phantomjs/issues/10522\n// http://kangax.github.io/compat-table/es5/#Function.prototype.bind\n// phantomJS is used for running the test suite\nif (!Function.prototype.bind) {\n    Function.prototype.bind = function (oThis) {\n        if (typeof this !== \"function\") {\n            // closest thing possible to the ECMAScript 5 internal IsCallable function\n            throw new TypeError(\"Function.prototype.bind - what is trying to be bound is not callable\");\n        }\n\n        var aArgs = Array.prototype.slice.call(arguments, 1),\n            fToBind = this,\n            fNOP = function () {},\n            fBound = function () {\n                return fToBind.apply(this instanceof fNOP && oThis\n                        ? this\n                        : oThis,\n                    aArgs.concat(Array.prototype.slice.call(arguments)));\n            };\n\n        fNOP.prototype = this.prototype;\n        fBound.prototype = new fNOP();\n        return fBound;\n    };\n}\n\n//  Development render timers - disabled if dev = false\nif (nv.dev) {\n    nv.dispatch.on('render_start', function(e) {\n        nv.logs.startTime = +new Date();\n    });\n\n    nv.dispatch.on('render_end', function(e) {\n        nv.logs.endTime = +new Date();\n        nv.logs.totalTime = nv.logs.endTime - nv.logs.startTime;\n        nv.log('total', nv.logs.totalTime); // used for development, to keep track of graph generation times\n    });\n}\n\n// Logs all arguments, and returns the last so you can test things in place\n// Note: in IE8 console.log is an object not a function, and if modernizr is used\n// then calling Function.prototype.bind with with anything other than a function\n// causes a TypeError to be thrown.\nnv.log = function() {\n    if (nv.dev && window.console && console.log && console.log.apply)\n        console.log.apply(console, arguments);\n    else if (nv.dev && window.console && typeof console.log == \"function\" && Function.prototype.bind) {\n        var log = Function.prototype.bind.call(console.log, console);\n        log.apply(console, arguments);\n    }\n    return arguments[arguments.length - 1];\n};\n\n// print console warning, should be used by deprecated functions\nnv.deprecated = function(name, info) {\n    if (console && console.warn) {\n        console.warn('nvd3 warning: `' + name + '` has been deprecated. ', info || '');\n    }\n};\n\n// The nv.render function is used to queue up chart rendering\n// in non-blocking async functions.\n// When all queued charts are done rendering, nv.dispatch.render_end is invoked.\nnv.render = function render(step) {\n    // number of graphs to generate in each timeout loop\n    step = step || 1;\n\n    nv.render.active = true;\n    nv.dispatch.render_start();\n\n    var renderLoop = function() {\n        var chart, graph;\n\n        for (var i = 0; i < step && (graph = nv.render.queue[i]); i++) {\n            chart = graph.generate();\n            if (typeof graph.callback == typeof(Function)) graph.callback(chart);\n        }\n\n        nv.render.queue.splice(0, i);\n\n        if (nv.render.queue.length) {\n            setTimeout(renderLoop);\n        }\n        else {\n            nv.dispatch.render_end();\n            nv.render.active = false;\n        }\n    };\n\n    setTimeout(renderLoop);\n};\n\nnv.render.active = false;\nnv.render.queue = [];\n\n/*\nAdds a chart to the async rendering queue. This method can take arguments in two forms:\nnv.addGraph({\n    generate: <Function>\n    callback: <Function>\n})\n\nor\n\nnv.addGraph(<generate Function>, <callback Function>)\n\nThe generate function should contain code that creates the NVD3 model, sets options\non it, adds data to an SVG element, and invokes the chart model. The generate function\nshould return the chart model.  See examples/lineChart.html for a usage example.\n\nThe callback function is optional, and it is called when the generate function completes.\n*/\nnv.addGraph = function(obj) {\n    if (typeof arguments[0] === typeof(Function)) {\n        obj = {generate: arguments[0], callback: arguments[1]};\n    }\n\n    nv.render.queue.push(obj);\n\n    if (!nv.render.active) {\n        nv.render();\n    }\n};\n\n// Node/CommonJS exports\nif (typeof(module) !== 'undefined' && typeof(exports) !== 'undefined') {\n  module.exports = nv;\n}\n\nif (typeof(window) !== 'undefined') {\n  window.nv = nv;\n}\n","/* Facade for queueing DOM write operations\r\n * with Fastdom (https://github.com/wilsonpage/fastdom)\r\n * if available.\r\n * This could easily be extended to support alternate\r\n * implementations in the future.\r\n */\r\nnv.dom.write = function(callback) {\r\n\tif (window.fastdom !== undefined) {\r\n\t\treturn fastdom.mutate(callback);\r\n\t}\r\n\treturn callback();\r\n};\r\n\r\n/* Facade for queueing DOM read operations\r\n * with Fastdom (https://github.com/wilsonpage/fastdom)\r\n * if available.\r\n * This could easily be extended to support alternate\r\n * implementations in the future.\r\n */\r\nnv.dom.read = function(callback) {\r\n\tif (window.fastdom !== undefined) {\r\n\t\treturn fastdom.measure(callback);\r\n\t}\r\n\treturn callback();\r\n};\r\n","/* Utility class to handle creation of an interactive layer.\n This places a rectangle on top of the chart. When you mouse move over it, it sends a dispatch\n containing the X-coordinate. It can also render a vertical line where the mouse is located.\n\n dispatch.elementMousemove is the important event to latch onto.  It is fired whenever the mouse moves over\n the rectangle. The dispatch is given one object which contains the mouseX/Y location.\n It also has 'pointXValue', which is the conversion of mouseX to the x-axis scale.\n */\nnv.interactiveGuideline = function() {\n    \"use strict\";\n\n    var margin = { left: 0, top: 0 } //Pass the chart's top and left magins. Used to calculate the mouseX/Y.\n        ,   width = null\n        ,   height = null\n        ,   xScale = d3.scale.linear()\n        ,   dispatch = d3.dispatch('elementMousemove', 'elementMouseout', 'elementClick', 'elementDblclick', 'elementMouseDown', 'elementMouseUp')\n        ,   showGuideLine = true\n        ,   svgContainer = null // Must pass the chart's svg, we'll use its mousemove event.\n        ,   tooltip = nv.models.tooltip()\n        ,   isMSIE =  window.ActiveXObject// Checkt if IE by looking for activeX. (excludes IE11)\n    ;\n\n    tooltip\n        .duration(0)\n        .hideDelay(0)\n        .hidden(false);\n\n    function layer(selection) {\n        selection.each(function(data) {\n            var container = d3.select(this);\n            var availableWidth = (width || 960), availableHeight = (height || 400);\n            var wrap = container.selectAll(\"g.nv-wrap.nv-interactiveLineLayer\")\n                .data([data]);\n            var wrapEnter = wrap.enter()\n                .append(\"g\").attr(\"class\", \" nv-wrap nv-interactiveLineLayer\");\n            wrapEnter.append(\"g\").attr(\"class\",\"nv-interactiveGuideLine\");\n\n            if (!svgContainer) {\n                return;\n            }\n\n            function mouseHandler() {\n                var d3mouse = d3.mouse(this);\n                var mouseX = d3mouse[0];\n                var mouseY = d3mouse[1];\n                var subtractMargin = true;\n                var mouseOutAnyReason = false;\n                if (isMSIE) {\n                    /*\n                     D3.js (or maybe SVG.getScreenCTM) has a nasty bug in Internet Explorer 10.\n                     d3.mouse() returns incorrect X,Y mouse coordinates when mouse moving\n                     over a rect in IE 10.\n                     However, d3.event.offsetX/Y also returns the mouse coordinates\n                     relative to the triggering <rect>. So we use offsetX/Y on IE.\n                     */\n                    mouseX = d3.event.offsetX;\n                    mouseY = d3.event.offsetY;\n\n                    /*\n                     On IE, if you attach a mouse event listener to the <svg> container,\n                     it will actually trigger it for all the child elements (like <path>, <circle>, etc).\n                     When this happens on IE, the offsetX/Y is set to where ever the child element\n                     is located.\n                     As a result, we do NOT need to subtract margins to figure out the mouse X/Y\n                     position under this scenario. Removing the line below *will* cause\n                     the interactive layer to not work right on IE.\n                     */\n                    if(d3.event.target.tagName !== \"svg\") {\n                        subtractMargin = false;\n                    }\n\n                    if (d3.event.target.className.baseVal.match(\"nv-legend\")) {\n                        mouseOutAnyReason = true;\n                    }\n\n                }\n\n                if(subtractMargin) {\n                    mouseX -= margin.left;\n                    mouseY -= margin.top;\n                }\n\n                /* If mouseX/Y is outside of the chart's bounds,\n                 trigger a mouseOut event.\n                 */\n                if (d3.event.type === 'mouseout'\n                    || mouseX < 0 || mouseY < 0\n                    || mouseX > availableWidth || mouseY > availableHeight\n                    || (d3.event.relatedTarget && d3.event.relatedTarget.ownerSVGElement === undefined)\n                    || mouseOutAnyReason\n                    ) {\n\n                    if (isMSIE) {\n                        if (d3.event.relatedTarget\n                            && d3.event.relatedTarget.ownerSVGElement === undefined\n                            && (d3.event.relatedTarget.className === undefined\n                                || d3.event.relatedTarget.className.match(tooltip.nvPointerEventsClass))) {\n\n                            return;\n                        }\n                    }\n                    dispatch.elementMouseout({\n                        mouseX: mouseX,\n                        mouseY: mouseY\n                    });\n                    layer.renderGuideLine(null); //hide the guideline\n                    tooltip.hidden(true);\n                    return;\n                } else {\n                    tooltip.hidden(false);\n                }\n\n\n                var scaleIsOrdinal = typeof xScale.rangeBands === 'function';\n                var pointXValue = undefined;\n\n                // Ordinal scale has no invert method\n                if (scaleIsOrdinal) {\n                    var elementIndex = d3.bisect(xScale.range(), mouseX) - 1;\n                    // Check if mouseX is in the range band\n                    if (xScale.range()[elementIndex] + xScale.rangeBand() >= mouseX) {\n                        pointXValue = xScale.domain()[d3.bisect(xScale.range(), mouseX) - 1];\n                    }\n                    else {\n                        dispatch.elementMouseout({\n                            mouseX: mouseX,\n                            mouseY: mouseY\n                        });\n                        layer.renderGuideLine(null); //hide the guideline\n                        tooltip.hidden(true);\n                        return;\n                    }\n                }\n                else {\n                    pointXValue = xScale.invert(mouseX);\n                }\n\n                dispatch.elementMousemove({\n                    mouseX: mouseX,\n                    mouseY: mouseY,\n                    pointXValue: pointXValue\n                });\n\n                //If user double clicks the layer, fire a elementDblclick\n                if (d3.event.type === \"dblclick\") {\n                    dispatch.elementDblclick({\n                        mouseX: mouseX,\n                        mouseY: mouseY,\n                        pointXValue: pointXValue\n                    });\n                }\n\n                // if user single clicks the layer, fire elementClick\n                if (d3.event.type === 'click') {\n                    dispatch.elementClick({\n                        mouseX: mouseX,\n                        mouseY: mouseY,\n                        pointXValue: pointXValue\n                    });\n                }\n\n                // if user presses mouse down the layer, fire elementMouseDown\n                if (d3.event.type === 'mousedown') {\n                \tdispatch.elementMouseDown({\n                \t\tmouseX: mouseX,\n                \t\tmouseY: mouseY,\n                \t\tpointXValue: pointXValue\n                \t});\n                }\n\n                // if user presses mouse down the layer, fire elementMouseUp\n                if (d3.event.type === 'mouseup') {\n                \tdispatch.elementMouseUp({\n                \t\tmouseX: mouseX,\n                \t\tmouseY: mouseY,\n                \t\tpointXValue: pointXValue\n                \t});\n                }\n            }\n\n            svgContainer\n                .on(\"touchmove\",mouseHandler)\n                .on(\"mousemove\",mouseHandler, true)\n                .on(\"mouseout\" ,mouseHandler,true)\n                .on(\"mousedown\" ,mouseHandler,true)\n                .on(\"mouseup\" ,mouseHandler,true)\n                .on(\"dblclick\" ,mouseHandler)\n                .on(\"click\", mouseHandler)\n            ;\n\n            layer.guideLine = null;\n            //Draws a vertical guideline at the given X postion.\n            layer.renderGuideLine = function(x) {\n                if (!showGuideLine) return;\n                if (layer.guideLine && layer.guideLine.attr(\"x1\") === x) return;\n                nv.dom.write(function() {\n                    var line = wrap.select(\".nv-interactiveGuideLine\")\n                        .selectAll(\"line\")\n                        .data((x != null) ? [nv.utils.NaNtoZero(x)] : [], String);\n                    line.enter()\n                        .append(\"line\")\n                        .attr(\"class\", \"nv-guideline\")\n                        .attr(\"x1\", function(d) { return d;})\n                        .attr(\"x2\", function(d) { return d;})\n                        .attr(\"y1\", availableHeight)\n                        .attr(\"y2\",0);\n                    line.exit().remove();\n                });\n            }\n        });\n    }\n\n    layer.dispatch = dispatch;\n    layer.tooltip = tooltip;\n\n    layer.margin = function(_) {\n        if (!arguments.length) return margin;\n        margin.top    = typeof _.top    != 'undefined' ? _.top    : margin.top;\n        margin.left   = typeof _.left   != 'undefined' ? _.left   : margin.left;\n        return layer;\n    };\n\n    layer.width = function(_) {\n        if (!arguments.length) return width;\n        width = _;\n        return layer;\n    };\n\n    layer.height = function(_) {\n        if (!arguments.length) return height;\n        height = _;\n        return layer;\n    };\n\n    layer.xScale = function(_) {\n        if (!arguments.length) return xScale;\n        xScale = _;\n        return layer;\n    };\n\n    layer.showGuideLine = function(_) {\n        if (!arguments.length) return showGuideLine;\n        showGuideLine = _;\n        return layer;\n    };\n\n    layer.svgContainer = function(_) {\n        if (!arguments.length) return svgContainer;\n        svgContainer = _;\n        return layer;\n    };\n\n    return layer;\n};\n\n/* Utility class that uses d3.bisect to find the index in a given array, where a search value can be inserted.\n This is different from normal bisectLeft; this function finds the nearest index to insert the search value.\n\n For instance, lets say your array is [1,2,3,5,10,30], and you search for 28.\n Normal d3.bisectLeft will return 4, because 28 is inserted after the number 10.  But interactiveBisect will return 5\n because 28 is closer to 30 than 10.\n\n Unit tests can be found in: interactiveBisectTest.html\n\n Has the following known issues:\n * Will not work if the data points move backwards (ie, 10,9,8,7, etc) or if the data points are in random order.\n * Won't work if there are duplicate x coordinate values.\n */\nnv.interactiveBisect = function (values, searchVal, xAccessor) {\n    \"use strict\";\n    if (! (values instanceof Array)) {\n        return null;\n    }\n    var _xAccessor;\n    if (typeof xAccessor !== 'function') {\n        _xAccessor = function(d) {\n            return d.x;\n        }\n    } else {\n        _xAccessor = xAccessor;\n    }\n    var _cmp = function(d, v) {\n        // Accessors are no longer passed the index of the element along with\n        // the element itself when invoked by d3.bisector.\n        //\n        // Starting at D3 v3.4.4, d3.bisector() started inspecting the\n        // function passed to determine if it should consider it an accessor\n        // or a comparator. This meant that accessors that take two arguments\n        // (expecting an index as the second parameter) are treated as\n        // comparators where the second argument is the search value against\n        // which the first argument is compared.\n        return _xAccessor(d) - v;\n    };\n\n    var bisect = d3.bisector(_cmp).left;\n    var index = d3.max([0, bisect(values,searchVal) - 1]);\n    var currentValue = _xAccessor(values[index]);\n\n    if (typeof currentValue === 'undefined') {\n        currentValue = index;\n    }\n\n    if (currentValue === searchVal) {\n        return index; //found exact match\n    }\n\n    var nextIndex = d3.min([index+1, values.length - 1]);\n    var nextValue = _xAccessor(values[nextIndex]);\n\n    if (typeof nextValue === 'undefined') {\n        nextValue = nextIndex;\n    }\n\n    if (Math.abs(nextValue - searchVal) >= Math.abs(currentValue - searchVal)) {\n        return index;\n    } else {\n        return nextIndex\n    }\n};\n\n/*\n Returns the index in the array \"values\" that is closest to searchVal.\n Only returns an index if searchVal is within some \"threshold\".\n Otherwise, returns null.\n */\nnv.nearestValueIndex = function (values, searchVal, threshold) {\n    \"use strict\";\n    var yDistMax = Infinity, indexToHighlight = null;\n    values.forEach(function(d,i) {\n        var delta = Math.abs(searchVal - d);\n        if ( d != null && delta <= yDistMax && delta < threshold) {\n            yDistMax = delta;\n            indexToHighlight = i;\n        }\n    });\n    return indexToHighlight;\n};\n","\n/* Model which can be instantiated to handle tooltip rendering.\n Example usage:\n var tip = nv.models.tooltip().gravity('w').distance(23)\n .data(myDataObject);\n\n tip();    //just invoke the returned function to render tooltip.\n */\nnv.models.tooltip = function() {\n    \"use strict\";\n\n    /*\n    Tooltip data. If data is given in the proper format, a consistent tooltip is generated.\n    Example Format of data:\n    {\n        key: \"Date\",\n        value: \"August 2009\",\n        series: [\n            {key: \"Series 1\", value: \"Value 1\", color: \"#000\"},\n            {key: \"Series 2\", value: \"Value 2\", color: \"#00f\"}\n        ]\n    }\n    */\n    var id = \"nvtooltip-\" + Math.floor(Math.random() * 100000) // Generates a unique id when you create a new tooltip() object.\n        ,   data = null\n        ,   gravity = 'w'   // Can be 'n','s','e','w'. Determines how tooltip is positioned.\n        ,   distance = 25 // Distance to offset tooltip from the mouse location.\n        ,   snapDistance = 0   // Tolerance allowed before tooltip is moved from its current position (creates 'snapping' effect)\n        ,   classes = null  // Attaches additional CSS classes to the tooltip DIV that is created.\n        ,   hidden = true  // Start off hidden, toggle with hide/show functions below.\n        ,   hideDelay = 200  // Delay (in ms) before the tooltip hides after calling hide().\n        ,   tooltip = null // d3 select of the tooltip div.\n        ,   lastPosition = { left: null, top: null } // Last position the tooltip was in.\n        ,   enabled = true  // True -> tooltips are rendered. False -> don't render tooltips.\n        ,   duration = 100 // Tooltip movement duration, in ms.\n        ,   headerEnabled = true // If is to show the tooltip header.\n        ,   nvPointerEventsClass = \"nv-pointer-events-none\" // CSS class to specify whether element should not have mouse events.\n    ;\n\n    // Format function for the tooltip values column.\n    var valueFormatter = function(d, i) {\n        return d;\n    };\n\n    // Format function for the tooltip header value.\n    var headerFormatter = function(d) {\n        return d;\n    };\n\n    var keyFormatter = function(d, i) {\n        return d;\n    };\n\n    // By default, the tooltip model renders a beautiful table inside a DIV.\n    // You can override this function if a custom tooltip is desired.\n    var contentGenerator = function(d) {\n        if (d === null) {\n            return '';\n        }\n\n        var table = d3.select(document.createElement(\"table\"));\n        if (headerEnabled) {\n            var theadEnter = table.selectAll(\"thead\")\n                .data([d])\n                .enter().append(\"thead\");\n\n            theadEnter.append(\"tr\")\n                .append(\"td\")\n                .attr(\"colspan\", 3)\n                .append(\"strong\")\n                .classed(\"x-value\", true)\n                .html(headerFormatter(d.value));\n        }\n\n        var tbodyEnter = table.selectAll(\"tbody\")\n            .data([d])\n            .enter().append(\"tbody\");\n\n        var trowEnter = tbodyEnter.selectAll(\"tr\")\n                .data(function(p) { return p.series})\n                .enter()\n                .append(\"tr\")\n                .classed(\"highlight\", function(p) { return p.highlight});\n\n        trowEnter.append(\"td\")\n            .classed(\"legend-color-guide\",true)\n            .append(\"div\")\n            .style(\"background-color\", function(p) { return p.color});\n\n        trowEnter.append(\"td\")\n            .classed(\"key\",true)\n            .classed(\"total\",function(p) { return !!p.total})\n            .html(function(p, i) { return keyFormatter(p.key, i)});\n\n        trowEnter.append(\"td\")\n            .classed(\"value\",true)\n            .html(function(p, i) { return valueFormatter(p.value, i) });\n\n        trowEnter.filter(function (p,i) { return p.percent !== undefined }).append(\"td\")\n            .classed(\"percent\", true)\n            .html(function(p, i) { return \"(\" + d3.format('%')(p.percent) + \")\" });\n\n        trowEnter.selectAll(\"td\").each(function(p) {\n            if (p.highlight) {\n                var opacityScale = d3.scale.linear().domain([0,1]).range([\"#fff\",p.color]);\n                var opacity = 0.6;\n                d3.select(this)\n                    .style(\"border-bottom-color\", opacityScale(opacity))\n                    .style(\"border-top-color\", opacityScale(opacity))\n                ;\n            }\n        });\n\n        var html = table.node().outerHTML;\n        if (d.footer !== undefined)\n            html += \"<div class='footer'>\" + d.footer + \"</div>\";\n        return html;\n\n    };\n\n    /*\n     Function that returns the position (relative to the viewport/document.body)\n     the tooltip should be placed in.\n     Should return: {\n        left: <leftPos>,\n        top: <topPos>\n     }\n     */\n    var position = function() {\n        var pos = {\n            left: d3.event !== null ? d3.event.clientX : 0,\n            top: d3.event !== null ? d3.event.clientY : 0\n        };\n\n        if(getComputedStyle(document.body).transform != 'none') {\n            // Take the offset into account, as now the tooltip is relative\n            // to document.body.\n            var client = document.body.getBoundingClientRect();\n            pos.left -= client.left;\n            pos.top -= client.top;\n        }\n\n        return pos;\n    };\n\n    var dataSeriesExists = function(d) {\n        if (d && d.series) {\n            if (nv.utils.isArray(d.series)) {\n                return true;\n            }\n            // if object, it's okay just convert to array of the object\n            if (nv.utils.isObject(d.series)) {\n                d.series = [d.series];\n                return true;\n            }\n        }\n        return false;\n    };\n\n    // Calculates the gravity offset of the tooltip. Parameter is position of tooltip\n    // relative to the viewport.\n    var calcGravityOffset = function(pos) {\n        var height = tooltip.node().offsetHeight,\n            width = tooltip.node().offsetWidth,\n            clientWidth = document.documentElement.clientWidth, // Don't want scrollbars.\n            clientHeight = document.documentElement.clientHeight, // Don't want scrollbars.\n            left, top, tmp;\n\n        // calculate position based on gravity\n        switch (gravity) {\n            case 'e':\n                left = - width - distance;\n                top = - (height / 2);\n                if(pos.left + left < 0) left = distance;\n                if((tmp = pos.top + top) < 0) top -= tmp;\n                if((tmp = pos.top + top + height) > clientHeight) top -= tmp - clientHeight;\n                break;\n            case 'w':\n                left = distance;\n                top = - (height / 2);\n                if (pos.left + left + width > clientWidth) left = - width - distance;\n                if ((tmp = pos.top + top) < 0) top -= tmp;\n                if ((tmp = pos.top + top + height) > clientHeight) top -= tmp - clientHeight;\n                break;\n            case 'n':\n                left = - (width / 2) - 5; // - 5 is an approximation of the mouse's height.\n                top = distance;\n                if (pos.top + top + height > clientHeight) top = - height - distance;\n                if ((tmp = pos.left + left) < 0) left -= tmp;\n                if ((tmp = pos.left + left + width) > clientWidth) left -= tmp - clientWidth;\n                break;\n            case 's':\n                left = - (width / 2);\n                top = - height - distance;\n                if (pos.top + top < 0) top = distance;\n                if ((tmp = pos.left + left) < 0) left -= tmp;\n                if ((tmp = pos.left + left + width) > clientWidth) left -= tmp - clientWidth;\n                break;\n            case 'center':\n                left = - (width / 2);\n                top = - (height / 2);\n                break;\n            default:\n                left = 0;\n                top = 0;\n                break;\n        }\n\n        return { 'left': left, 'top': top };\n    };\n\n    /*\n     Positions the tooltip in the correct place, as given by the position() function.\n     */\n    var positionTooltip = function() {\n        nv.dom.read(function() {\n            var pos = position(),\n                gravityOffset = calcGravityOffset(pos),\n                left = pos.left + gravityOffset.left,\n                top = pos.top + gravityOffset.top;\n\n            // delay hiding a bit to avoid flickering\n            if (hidden) {\n                tooltip\n                    .interrupt()\n                    .transition()\n                    .delay(hideDelay)\n                    .duration(0)\n                    .style('opacity', 0);\n            } else {\n                // using tooltip.style('transform') returns values un-usable for tween\n                var old_translate = 'translate(' + lastPosition.left + 'px, ' + lastPosition.top + 'px)';\n                var new_translate = 'translate(' + Math.round(left) + 'px, ' + Math.round(top) + 'px)';\n                var translateInterpolator = d3.interpolateString(old_translate, new_translate);\n                var is_hidden = tooltip.style('opacity') < 0.1;\n\n                tooltip\n                    .interrupt() // cancel running transitions\n                    .transition()\n                    .duration(is_hidden ? 0 : duration)\n                    // using tween since some versions of d3 can't auto-tween a translate on a div\n                    .styleTween('transform', function (d) {\n                        return translateInterpolator;\n                    }, 'important')\n                    // Safari has its own `-webkit-transform` and does not support `transform`\n                    .styleTween('-webkit-transform', function (d) {\n                        return translateInterpolator;\n                    })\n                    .style('-ms-transform', new_translate)\n                    .style('opacity', 1);\n            }\n\n            lastPosition.left = left;\n            lastPosition.top = top;\n        });\n    };\n\n    // Creates new tooltip container, or uses existing one on DOM.\n    function initTooltip() {\n        if (!tooltip || !tooltip.node()) {\n            // Create new tooltip div if it doesn't exist on DOM.\n\n            var data = [1];\n            tooltip = d3.select(document.body).select('#'+id).data(data);\n\n            tooltip.enter().append('div')\n                   .attr(\"class\", \"nvtooltip \" + (classes ? classes : \"xy-tooltip\"))\n                   .attr(\"id\", id)\n                   .style(\"top\", 0).style(\"left\", 0)\n                   .style('opacity', 0)\n                   .style('position', 'fixed')\n                   .selectAll(\"div, table, td, tr\").classed(nvPointerEventsClass, true)\n                   .classed(nvPointerEventsClass, true);\n\n            tooltip.exit().remove()\n        }\n    }\n\n    // Draw the tooltip onto the DOM.\n    function nvtooltip() {\n        if (!enabled) return;\n        if (!dataSeriesExists(data)) return;\n\n        nv.dom.write(function () {\n            initTooltip();\n            // Generate data and set it into tooltip.\n            // Bonus - If you override contentGenerator and return falsey you can use something like\n            //         React or Knockout to bind the data for your tooltip.\n            var newContent = contentGenerator(data);\n            if (newContent) {\n                tooltip.node().innerHTML = newContent;\n            }\n\n            positionTooltip();\n        });\n\n        return nvtooltip;\n    }\n\n    nvtooltip.nvPointerEventsClass = nvPointerEventsClass;\n    nvtooltip.options = nv.utils.optionsFunc.bind(nvtooltip);\n\n    nvtooltip._options = Object.create({}, {\n        // simple read/write options\n        duration: {get: function(){return duration;}, set: function(_){duration=_;}},\n        gravity: {get: function(){return gravity;}, set: function(_){gravity=_;}},\n        distance: {get: function(){return distance;}, set: function(_){distance=_;}},\n        snapDistance: {get: function(){return snapDistance;}, set: function(_){snapDistance=_;}},\n        classes: {get: function(){return classes;}, set: function(_){classes=_;}},\n        enabled: {get: function(){return enabled;}, set: function(_){enabled=_;}},\n        hideDelay: {get: function(){return hideDelay;}, set: function(_){hideDelay=_;}},\n        contentGenerator: {get: function(){return contentGenerator;}, set: function(_){contentGenerator=_;}},\n        valueFormatter: {get: function(){return valueFormatter;}, set: function(_){valueFormatter=_;}},\n        headerFormatter: {get: function(){return headerFormatter;}, set: function(_){headerFormatter=_;}},\n        keyFormatter: {get: function(){return keyFormatter;}, set: function(_){keyFormatter=_;}},\n        headerEnabled: {get: function(){return headerEnabled;}, set: function(_){headerEnabled=_;}},\n        position: {get: function(){return position;}, set: function(_){position=_;}},\n\n        // Deprecated options\n        chartContainer: {get: function(){return document.body;}, set: function(_){\n            // deprecated after 1.8.3\n            nv.deprecated('chartContainer', 'feature removed after 1.8.3');\n        }},\n        fixedTop: {get: function(){return null;}, set: function(_){\n            // deprecated after 1.8.1\n            nv.deprecated('fixedTop', 'feature removed after 1.8.1');\n        }},\n        offset: {get: function(){return {left: 0, top: 0};}, set: function(_){\n            // deprecated after 1.8.1\n            nv.deprecated('offset', 'use chart.tooltip.distance() instead');\n        }},\n\n        // options with extra logic\n        hidden: {get: function(){return hidden;}, set: function(_){\n            if (hidden != _) {\n                hidden = !!_;\n                nvtooltip();\n            }\n        }},\n        data: {get: function(){return data;}, set: function(_){\n            // if showing a single data point, adjust data format with that\n            if (_.point) {\n                _.value = _.point.x;\n                _.series = _.series || {};\n                _.series.value = _.point.y;\n                _.series.color = _.point.color || _.series.color;\n            }\n            data = _;\n        }},\n\n        // read only properties\n        node: {get: function(){return tooltip.node();}, set: function(_){}},\n        id: {get: function(){return id;}, set: function(_){}}\n    });\n\n    nv.utils.initOptions(nvtooltip);\n    return nvtooltip;\n};\n","\n\n/*\nGets the browser window size\n\nReturns object with height and width properties\n */\nnv.utils.windowSize = function() {\n    // Sane defaults\n    var size = {width: 640, height: 480};\n\n    // Most recent browsers use\n    if (window.innerWidth && window.innerHeight) {\n        size.width = window.innerWidth;\n        size.height = window.innerHeight;\n        return (size);\n    }\n\n    // IE can use depending on mode it is in\n    if (document.compatMode=='CSS1Compat' &&\n        document.documentElement &&\n        document.documentElement.offsetWidth ) {\n\n        size.width = document.documentElement.offsetWidth;\n        size.height = document.documentElement.offsetHeight;\n        return (size);\n    }\n\n    // Earlier IE uses Doc.body\n    if (document.body && document.body.offsetWidth) {\n        size.width = document.body.offsetWidth;\n        size.height = document.body.offsetHeight;\n        return (size);\n    }\n\n    return (size);\n};\n\n\n/* handle dumb browser quirks...  isinstance breaks if you use frames\ntypeof returns 'object' for null, NaN is a number, etc.\n */\nnv.utils.isArray = Array.isArray;\nnv.utils.isObject = function(a) {\n    return a !== null && typeof a === 'object';\n};\nnv.utils.isFunction = function(a) {\n    return typeof a === 'function';\n};\nnv.utils.isDate = function(a) {\n    return toString.call(a) === '[object Date]';\n};\nnv.utils.isNumber = function(a) {\n    return !isNaN(a) && typeof a === 'number';\n};\n\n\n/*\nBinds callback function to run when window is resized\n */\nnv.utils.windowResize = function(handler) {\n    if (window.addEventListener) {\n        window.addEventListener('resize', handler);\n    } else {\n        nv.log(\"ERROR: Failed to bind to window.resize with: \", handler);\n    }\n    // return object with clear function to remove the single added callback.\n    return {\n        callback: handler,\n        clear: function() {\n            window.removeEventListener('resize', handler);\n        }\n    }\n};\n\n\n/*\nBackwards compatible way to implement more d3-like coloring of graphs.\nCan take in nothing, an array, or a function/scale\nTo use a normal scale, get the range and pass that because we must be able\nto take two arguments and use the index to keep backward compatibility\n*/\nnv.utils.getColor = function(color) {\n    //if you pass in nothing, get default colors back\n    if (color === undefined) {\n        return nv.utils.defaultColor();\n\n    //if passed an array, turn it into a color scale\n    } else if(nv.utils.isArray(color)) {\n        var color_scale = d3.scale.ordinal().range(color);\n        return function(d, i) {\n            var key = i === undefined ? d : i;\n            return d.color || color_scale(key);\n        };\n\n    //if passed a function or scale, return it, or whatever it may be\n    //external libs, such as angularjs-nvd3-directives use this\n    } else {\n        //can't really help it if someone passes rubbish as color\n        return color;\n    }\n};\n\n\n/*\nDefault color chooser uses a color scale of 20 colors from D3\n https://github.com/mbostock/d3/wiki/Ordinal-Scales#categorical-colors\n */\nnv.utils.defaultColor = function() {\n    // get range of the scale so we'll turn it into our own function.\n    return nv.utils.getColor(d3.scale.category20().range());\n};\n\n\n/*\nReturns a color function that takes the result of 'getKey' for each series and\nlooks for a corresponding color from the dictionary\n*/\nnv.utils.customTheme = function(dictionary, getKey, defaultColors) {\n    // use default series.key if getKey is undefined\n    getKey = getKey || function(series) { return series.key };\n    defaultColors = defaultColors || d3.scale.category20().range();\n\n    // start at end of default color list and walk back to index 0\n    var defIndex = defaultColors.length;\n\n    return function(series, index) {\n        var key = getKey(series);\n        if (nv.utils.isFunction(dictionary[key])) {\n            return dictionary[key]();\n        } else if (dictionary[key] !== undefined) {\n            return dictionary[key];\n        } else {\n            // no match in dictionary, use a default color\n            if (!defIndex) {\n                // used all the default colors, start over\n                defIndex = defaultColors.length;\n            }\n            defIndex = defIndex - 1;\n            return defaultColors[defIndex];\n        }\n    };\n};\n\n\n/*\nFrom the PJAX example on d3js.org, while this is not really directly needed\nit's a very cool method for doing pjax, I may expand upon it a little bit,\nopen to suggestions on anything that may be useful\n*/\nnv.utils.pjax = function(links, content) {\n\n    var load = function(href) {\n        d3.html(href, function(fragment) {\n            var target = d3.select(content).node();\n            target.parentNode.replaceChild(\n                d3.select(fragment).select(content).node(),\n                target);\n            nv.utils.pjax(links, content);\n        });\n    };\n\n    d3.selectAll(links).on(\"click\", function() {\n        history.pushState(this.href, this.textContent, this.href);\n        load(this.href);\n        d3.event.preventDefault();\n    });\n\n    d3.select(window).on(\"popstate\", function() {\n        if (d3.event.state) {\n            load(d3.event.state);\n        }\n    });\n};\n\n\n/*\nFor when we want to approximate the width in pixels for an SVG:text element.\nMost common instance is when the element is in a display:none; container.\nForumla is : text.length * font-size * constant_factor\n*/\nnv.utils.calcApproxTextWidth = function (svgTextElem) {\n    if (nv.utils.isFunction(svgTextElem.style) && nv.utils.isFunction(svgTextElem.text)) {\n        var fontSize = parseInt(svgTextElem.style(\"font-size\").replace(\"px\",\"\"), 10);\n        var textLength = svgTextElem.text().length;\n        return nv.utils.NaNtoZero(textLength * fontSize * 0.5);\n    }\n    return 0;\n};\n\n\n/*\nNumbers that are undefined, null or NaN, convert them to zeros.\n*/\nnv.utils.NaNtoZero = function(n) {\n    if (!nv.utils.isNumber(n)\n        || isNaN(n)\n        || n === null\n        || n === Infinity\n        || n === -Infinity) {\n\n        return 0;\n    }\n    return n;\n};\n\n/*\nAdd a way to watch for d3 transition ends to d3\n*/\nd3.selection.prototype.watchTransition = function(renderWatch){\n    var args = [this].concat([].slice.call(arguments, 1));\n    return renderWatch.transition.apply(renderWatch, args);\n};\n\n\n/*\nHelper object to watch when d3 has rendered something\n*/\nnv.utils.renderWatch = function(dispatch, duration) {\n    if (!(this instanceof nv.utils.renderWatch)) {\n        return new nv.utils.renderWatch(dispatch, duration);\n    }\n\n    var _duration = duration !== undefined ? duration : 250;\n    var renderStack = [];\n    var self = this;\n\n    this.models = function(models) {\n        models = [].slice.call(arguments, 0);\n        models.forEach(function(model){\n            model.__rendered = false;\n            (function(m){\n                m.dispatch.on('renderEnd', function(arg){\n                    m.__rendered = true;\n                    self.renderEnd('model');\n                });\n            })(model);\n\n            if (renderStack.indexOf(model) < 0) {\n                renderStack.push(model);\n            }\n        });\n    return this;\n    };\n\n    this.reset = function(duration) {\n        if (duration !== undefined) {\n            _duration = duration;\n        }\n        renderStack = [];\n    };\n\n    this.transition = function(selection, args, duration) {\n        args = arguments.length > 1 ? [].slice.call(arguments, 1) : [];\n\n        if (args.length > 1) {\n            duration = args.pop();\n        } else {\n            duration = _duration !== undefined ? _duration : 250;\n        }\n        selection.__rendered = false;\n\n        if (renderStack.indexOf(selection) < 0) {\n            renderStack.push(selection);\n        }\n\n        if (duration === 0) {\n            selection.__rendered = true;\n            selection.delay = function() { return this; };\n            selection.duration = function() { return this; };\n            return selection;\n        } else {\n            if (selection.length === 0) {\n                selection.__rendered = true;\n            } else if (selection.every( function(d){ return !d.length; } )) {\n                selection.__rendered = true;\n            } else {\n                selection.__rendered = false;\n            }\n\n            var n = 0;\n            return selection\n                .transition()\n                .duration(duration)\n                .each(function(){ ++n; })\n                .each('end', function(d, i) {\n                    if (--n === 0) {\n                        selection.__rendered = true;\n                        self.renderEnd.apply(this, args);\n                    }\n                });\n        }\n    };\n\n    this.renderEnd = function() {\n        if (renderStack.every( function(d){ return d.__rendered; } )) {\n            renderStack.forEach( function(d){ d.__rendered = false; });\n            dispatch.renderEnd.apply(this, arguments);\n        }\n    }\n\n};\n\n\n/*\nTakes multiple objects and combines them into the first one (dst)\nexample:  nv.utils.deepExtend({a: 1}, {a: 2, b: 3}, {c: 4});\ngives:  {a: 2, b: 3, c: 4}\n*/\nnv.utils.deepExtend = function(dst){\n    var sources = arguments.length > 1 ? [].slice.call(arguments, 1) : [];\n    sources.forEach(function(source) {\n        for (var key in source) {\n            var isArray = nv.utils.isArray(dst[key]);\n            var isObject = nv.utils.isObject(dst[key]);\n            var srcObj = nv.utils.isObject(source[key]);\n\n            if (isObject && !isArray && srcObj) {\n                nv.utils.deepExtend(dst[key], source[key]);\n            } else {\n                dst[key] = source[key];\n            }\n        }\n    });\n};\n\n\n/*\nstate utility object, used to track d3 states in the models\n*/\nnv.utils.state = function(){\n    if (!(this instanceof nv.utils.state)) {\n        return new nv.utils.state();\n    }\n    var state = {};\n    var _self = this;\n    var _setState = function(){};\n    var _getState = function(){ return {}; };\n    var init = null;\n    var changed = null;\n\n    this.dispatch = d3.dispatch('change', 'set');\n\n    this.dispatch.on('set', function(state){\n        _setState(state, true);\n    });\n\n    this.getter = function(fn){\n        _getState = fn;\n        return this;\n    };\n\n    this.setter = function(fn, callback) {\n        if (!callback) {\n            callback = function(){};\n        }\n        _setState = function(state, update){\n            fn(state);\n            if (update) {\n                callback();\n            }\n        };\n        return this;\n    };\n\n    this.init = function(state){\n        init = init || {};\n        nv.utils.deepExtend(init, state);\n    };\n\n    var _set = function(){\n        var settings = _getState();\n\n        if (JSON.stringify(settings) === JSON.stringify(state)) {\n            return false;\n        }\n\n        for (var key in settings) {\n            if (state[key] === undefined) {\n                state[key] = {};\n            }\n            state[key] = settings[key];\n            changed = true;\n        }\n        return true;\n    };\n\n    this.update = function(){\n        if (init) {\n            _setState(init, false);\n            init = null;\n        }\n        if (_set.call(this)) {\n            this.dispatch.change(state);\n        }\n    };\n\n};\n\n\n/*\nSnippet of code you can insert into each nv.models.* to give you the ability to\ndo things like:\nchart.options({\n  showXAxis: true,\n  tooltips: true\n});\n\nTo enable in the chart:\nchart.options = nv.utils.optionsFunc.bind(chart);\n*/\nnv.utils.optionsFunc = function(args) {\n    if (args) {\n        d3.map(args).forEach((function(key,value) {\n            if (nv.utils.isFunction(this[key])) {\n                this[key](value);\n            }\n        }).bind(this));\n    }\n    return this;\n};\n\n\n/*\nnumTicks:  requested number of ticks\ndata:  the chart data\n\nreturns the number of ticks to actually use on X axis, based on chart data\nto avoid duplicate ticks with the same value\n*/\nnv.utils.calcTicksX = function(numTicks, data) {\n    // find max number of values from all data streams\n    var numValues = 1;\n    var i = 0;\n    for (i; i < data.length; i += 1) {\n        var stream_len = data[i] && data[i].values ? data[i].values.length : 0;\n        numValues = stream_len > numValues ? stream_len : numValues;\n    }\n    nv.log(\"Requested number of ticks: \", numTicks);\n    nv.log(\"Calculated max values to be: \", numValues);\n    // make sure we don't have more ticks than values to avoid duplicates\n    numTicks = numTicks > numValues ? numTicks = numValues - 1 : numTicks;\n    // make sure we have at least one tick\n    numTicks = numTicks < 1 ? 1 : numTicks;\n    // make sure it's an integer\n    numTicks = Math.floor(numTicks);\n    nv.log(\"Calculating tick count as: \", numTicks);\n    return numTicks;\n};\n\n\n/*\nreturns number of ticks to actually use on Y axis, based on chart data\n*/\nnv.utils.calcTicksY = function(numTicks, data) {\n    // currently uses the same logic but we can adjust here if needed later\n    return nv.utils.calcTicksX(numTicks, data);\n};\n\n\n/*\nAdd a particular option from an options object onto chart\nOptions exposed on a chart are a getter/setter function that returns chart\non set to mimic typical d3 option chaining, e.g. svg.option1('a').option2('b');\n\noption objects should be generated via Object.create() to provide\nthe option of manipulating data via get/set functions.\n*/\nnv.utils.initOption = function(chart, name) {\n    // if it's a call option, just call it directly, otherwise do get/set\n    if (chart._calls && chart._calls[name]) {\n        chart[name] = chart._calls[name];\n    } else {\n        chart[name] = function (_) {\n            if (!arguments.length) return chart._options[name];\n            chart._overrides[name] = true;\n            chart._options[name] = _;\n            return chart;\n        };\n        // calling the option as _option will ignore if set by option already\n        // so nvd3 can set options internally but the stop if set manually\n        chart['_' + name] = function(_) {\n            if (!arguments.length) return chart._options[name];\n            if (!chart._overrides[name]) {\n                chart._options[name] = _;\n            }\n            return chart;\n        }\n    }\n};\n\n\n/*\nAdd all options in an options object to the chart\n*/\nnv.utils.initOptions = function(chart) {\n    chart._overrides = chart._overrides || {};\n    var ops = Object.getOwnPropertyNames(chart._options || {});\n    var calls = Object.getOwnPropertyNames(chart._calls || {});\n    ops = ops.concat(calls);\n    for (var i in ops) {\n        nv.utils.initOption(chart, ops[i]);\n    }\n};\n\n\n/*\nInherit options from a D3 object\nd3.rebind makes calling the function on target actually call it on source\nAlso use _d3options so we can track what we inherit for documentation and chained inheritance\n*/\nnv.utils.inheritOptionsD3 = function(target, d3_source, oplist) {\n    target._d3options = oplist.concat(target._d3options || []);\n    oplist.unshift(d3_source);\n    oplist.unshift(target);\n    d3.rebind.apply(this, oplist);\n};\n\n\n/*\nRemove duplicates from an array\n*/\nnv.utils.arrayUnique = function(a) {\n    return a.sort().filter(function(item, pos) {\n        return !pos || item != a[pos - 1];\n    });\n};\n\n\n/*\nKeeps a list of custom symbols to draw from in addition to d3.svg.symbol\nNecessary since d3 doesn't let you extend its list -_-\nAdd new symbols by doing nv.utils.symbols.set('name', function(size){...});\n*/\nnv.utils.symbolMap = d3.map();\n\n\n/*\nReplaces d3.svg.symbol so that we can look both there and our own map\n */\nnv.utils.symbol = function() {\n    var type,\n        size = 64;\n    function symbol(d,i) {\n        var t = type.call(this,d,i);\n        var s = size.call(this,d,i);\n        if (d3.svg.symbolTypes.indexOf(t) !== -1) {\n            return d3.svg.symbol().type(t).size(s)();\n        } else {\n            return nv.utils.symbolMap.get(t)(s);\n        }\n    }\n    symbol.type = function(_) {\n        if (!arguments.length) return type;\n        type = d3.functor(_);\n        return symbol;\n    };\n    symbol.size = function(_) {\n        if (!arguments.length) return size;\n        size = d3.functor(_);\n        return symbol;\n    };\n    return symbol;\n};\n\n\n/*\nInherit option getter/setter functions from source to target\nd3.rebind makes calling the function on target actually call it on source\nAlso track via _inherited and _d3options so we can track what we inherit\nfor documentation generation purposes and chained inheritance\n*/\nnv.utils.inheritOptions = function(target, source) {\n    // inherit all the things\n    var ops = Object.getOwnPropertyNames(source._options || {});\n    var calls = Object.getOwnPropertyNames(source._calls || {});\n    var inherited = source._inherited || [];\n    var d3ops = source._d3options || [];\n    var args = ops.concat(calls).concat(inherited).concat(d3ops);\n    args.unshift(source);\n    args.unshift(target);\n    d3.rebind.apply(this, args);\n    // pass along the lists to keep track of them, don't allow duplicates\n    target._inherited = nv.utils.arrayUnique(ops.concat(calls).concat(inherited).concat(ops).concat(target._inherited || []));\n    target._d3options = nv.utils.arrayUnique(d3ops.concat(target._d3options || []));\n};\n\n\n/*\nRuns common initialize code on the svg before the chart builds\n*/\nnv.utils.initSVG = function(svg) {\n    svg.classed({'nvd3-svg':true});\n};\n\n\n/*\nSanitize and provide default for the container height.\n*/\nnv.utils.sanitizeHeight = function(height, container) {\n    return (height || parseInt(container.style('height'), 10) || 400);\n};\n\n\n/*\nSanitize and provide default for the container width.\n*/\nnv.utils.sanitizeWidth = function(width, container) {\n    return (width || parseInt(container.style('width'), 10) || 960);\n};\n\n\n/*\nCalculate the available height for a chart.\n*/\nnv.utils.availableHeight = function(height, container, margin) {\n    return Math.max(0,nv.utils.sanitizeHeight(height, container) - margin.top - margin.bottom);\n};\n\n/*\nCalculate the available width for a chart.\n*/\nnv.utils.availableWidth = function(width, container, margin) {\n    return Math.max(0,nv.utils.sanitizeWidth(width, container) - margin.left - margin.right);\n};\n\n/*\nClear any rendered chart components and display a chart's 'noData' message\n*/\nnv.utils.noData = function(chart, container) {\n    var opt = chart.options(),\n        margin = opt.margin(),\n        noData = opt.noData(),\n        data = (noData == null) ? [\"No Data Available.\"] : [noData],\n        height = nv.utils.availableHeight(null, container, margin),\n        width = nv.utils.availableWidth(null, container, margin),\n        x = margin.left + width/2,\n        y = margin.top + height/2;\n\n    //Remove any previously created chart components\n    container.selectAll('g').remove();\n\n    var noDataText = container.selectAll('.nv-noData').data(data);\n\n    noDataText.enter().append('text')\n        .attr('class', 'nvd3 nv-noData')\n        .attr('dy', '-.7em')\n        .style('text-anchor', 'middle');\n\n    noDataText\n        .attr('x', x)\n        .attr('y', y)\n        .text(function(t){ return t; });\n};\n\n/*\n Wrap long labels.\n */\nnv.utils.wrapTicks = function (text, width) {\n    text.each(function() {\n        var text = d3.select(this),\n            words = text.text().split(/\\s+/).reverse(),\n            word,\n            line = [],\n            lineNumber = 0,\n            lineHeight = 1.1,\n            y = text.attr(\"y\"),\n            dy = parseFloat(text.attr(\"dy\")),\n            tspan = text.text(null).append(\"tspan\").attr(\"x\", 0).attr(\"y\", y).attr(\"dy\", dy + \"em\");\n        while (word = words.pop()) {\n            line.push(word);\n            tspan.text(line.join(\" \"));\n            if (tspan.node().getComputedTextLength() > width) {\n                line.pop();\n                tspan.text(line.join(\" \"));\n                line = [word];\n                tspan = text.append(\"tspan\").attr(\"x\", 0).attr(\"y\", y).attr(\"dy\", ++lineNumber * lineHeight + dy + \"em\").text(word);\n            }\n        }\n    });\n};\n\n/*\nCheck equality of 2 array\n*/\nnv.utils.arrayEquals = function (array1, array2) {\n    if (array1 === array2)\n        return true;\n\n    if (!array1 || !array2)\n        return false;\n\n    // compare lengths - can save a lot of time\n    if (array1.length != array2.length)\n        return false;\n\n    for (var i = 0,\n        l = array1.length; i < l; i++) {\n        // Check if we have nested arrays\n        if (array1[i] instanceof Array && array2[i] instanceof Array) {\n            // recurse into the nested arrays\n            if (!nv.arrayEquals(array1[i], array2[i]))\n                return false;\n        } else if (array1[i] != array2[i]) {\n            // Warning - two different object instances will never be equal: {x:20} != {x:20}\n            return false;\n        }\n    }\n    return true;\n};\n","nv.models.axis = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var axis = d3.svg.axis();\n    var scale = d3.scale.linear();\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 75 //only used for tickLabel currently\n        , height = 60 //only used for tickLabel currently\n        , axisLabelText = null\n        , showMaxMin = true //TODO: showMaxMin should be disabled on all ordinal scaled axes\n        , rotateLabels = 0\n        , rotateYLabel = true\n        , staggerLabels = false\n        , isOrdinal = false\n        , ticks = null\n        , axisLabelDistance = 0\n        , fontSize = undefined\n        , duration = 250\n        , dispatch = d3.dispatch('renderEnd')\n        ;\n    axis\n        .scale(scale)\n        .orient('bottom')\n        .tickFormat(function(d) { return d })\n    ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var scale0;\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-axis').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-axis');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            if (ticks !== null)\n                axis.ticks(ticks);\n            else if (axis.orient() == 'top' || axis.orient() == 'bottom')\n                axis.ticks(Math.abs(scale.range()[1] - scale.range()[0]) / 100);\n\n            //TODO: consider calculating width/height based on whether or not label is added, for reference in charts using this component\n            g.watchTransition(renderWatch, 'axis').call(axis);\n\n            scale0 = scale0 || axis.scale();\n\n            var fmt = axis.tickFormat();\n            if (fmt == null) {\n                fmt = scale0.tickFormat();\n            }\n\n            var axisLabel = g.selectAll('text.nv-axislabel')\n                .data([axisLabelText || null]);\n            axisLabel.exit().remove();\n\n            //only skip when fontSize is undefined so it can be cleared with a null or blank string\n            if (fontSize !== undefined) {\n                g.selectAll('g').select(\"text\").style('font-size', fontSize);\n            }\n\n            var xLabelMargin;\n            var axisMaxMin;\n            var w;\n            switch (axis.orient()) {\n                case 'top':\n                    axisLabel.enter().append('text').attr('class', 'nv-axislabel');\n                  w = 0;\n                  if (scale.range().length === 1) {\n                    w = isOrdinal ? scale.range()[0] * 2 + scale.rangeBand() : 0;\n                  } else if (scale.range().length === 2) {\n                    w = isOrdinal ? scale.range()[0] + scale.range()[1] + scale.rangeBand() : scale.range()[1];\n                  } else if ( scale.range().length > 2){\n                    w = scale.range()[scale.range().length-1]+(scale.range()[1]-scale.range()[0]);\n                  };\n                    axisLabel\n                        .attr('text-anchor', 'middle')\n                        .attr('y', 0)\n                        .attr('x', w/2);\n                    if (showMaxMin) {\n                        axisMaxMin = wrap.selectAll('g.nv-axisMaxMin')\n                            .data(scale.domain());\n                        axisMaxMin.enter().append('g').attr('class',function(d,i){\n                                return ['nv-axisMaxMin','nv-axisMaxMin-x',(i == 0 ? 'nv-axisMin-x':'nv-axisMax-x')].join(' ')\n                        }).append('text');\n                        axisMaxMin.exit().remove();\n                        axisMaxMin\n                            .attr('transform', function(d,i) {\n                                return 'translate(' + nv.utils.NaNtoZero(scale(d)) + ',0)'\n                            })\n                            .select('text')\n                            .attr('dy', '-0.5em')\n                            .attr('y', -axis.tickPadding())\n                            .attr('text-anchor', 'middle')\n                            .text(function(d,i) {\n                                var v = fmt(d);\n                                return ('' + v).match('NaN') ? '' : v;\n                            });\n                        axisMaxMin.watchTransition(renderWatch, 'min-max top')\n                            .attr('transform', function(d,i) {\n                                return 'translate(' + nv.utils.NaNtoZero(scale.range()[i]) + ',0)'\n                            });\n                    }\n                    break;\n                case 'bottom':\n                    xLabelMargin = axisLabelDistance + 36;\n                    var maxTextWidth = 30;\n                    var textHeight = 0;\n                    var xTicks = g.selectAll('g').select(\"text\");\n                    var rotateLabelsRule = '';\n                    if (rotateLabels%360) {\n                        //Reset transform on ticks so textHeight can be calculated correctly\n                        xTicks.attr('transform', ''); \n                        //Calculate the longest xTick width\n                        xTicks.each(function(d,i){\n                            var box = this.getBoundingClientRect();\n                            var width = box.width;\n                            textHeight = box.height;\n                            if(width > maxTextWidth) maxTextWidth = width;\n                        });\n                        rotateLabelsRule = 'rotate(' + rotateLabels + ' 0,' + (textHeight/2 + axis.tickPadding()) + ')';\n                        //Convert to radians before calculating sin. Add 30 to margin for healthy padding.\n                        var sin = Math.abs(Math.sin(rotateLabels*Math.PI/180));\n                        xLabelMargin = (sin ? sin*maxTextWidth : maxTextWidth)+30;\n                        //Rotate all xTicks\n                        xTicks\n                            .attr('transform', rotateLabelsRule)\n                            .style('text-anchor', rotateLabels%360 > 0 ? 'start' : 'end');\n                    } else {\n                        if (staggerLabels) {\n                            xTicks\n                                .attr('transform', function(d,i) {\n                                    return 'translate(0,' + (i % 2 == 0 ? '0' : '12') + ')'\n                                });\n                        } else {\n                            xTicks.attr('transform', \"translate(0,0)\");\n                        }\n                    }\n                    axisLabel.enter().append('text').attr('class', 'nv-axislabel');\n                    w = 0;\n                    if (scale.range().length === 1) {\n                        w = isOrdinal ? scale.range()[0] * 2 + scale.rangeBand() : 0;\n                    } else if (scale.range().length === 2) {\n                        w = isOrdinal ? scale.range()[0] + scale.range()[1] + scale.rangeBand() : scale.range()[1];\n                    } else if ( scale.range().length > 2){\n                        w = scale.range()[scale.range().length-1]+(scale.range()[1]-scale.range()[0]);\n                    };\n                    axisLabel\n                        .attr('text-anchor', 'middle')\n                        .attr('y', xLabelMargin)\n                        .attr('x', w/2);\n                    if (showMaxMin) {\n                        //if (showMaxMin && !isOrdinal) {\n                        axisMaxMin = wrap.selectAll('g.nv-axisMaxMin')\n                            //.data(scale.domain())\n                            .data([scale.domain()[0], scale.domain()[scale.domain().length - 1]]);\n                        axisMaxMin.enter().append('g').attr('class',function(d,i){\n                                return ['nv-axisMaxMin','nv-axisMaxMin-x',(i == 0 ? 'nv-axisMin-x':'nv-axisMax-x')].join(' ')\n                        }).append('text');\n                        axisMaxMin.exit().remove();\n                        axisMaxMin\n                            .attr('transform', function(d,i) {\n                                return 'translate(' + nv.utils.NaNtoZero((scale(d) + (isOrdinal ? scale.rangeBand() / 2 : 0))) + ',0)'\n                            })\n                            .select('text')\n                            .attr('dy', '.71em')\n                            .attr('y', axis.tickPadding())\n                            .attr('transform', rotateLabelsRule)\n                            .style('text-anchor', rotateLabels ? (rotateLabels%360 > 0 ? 'start' : 'end') : 'middle')\n                            .text(function(d,i) {\n                                var v = fmt(d);\n                                return ('' + v).match('NaN') ? '' : v;\n                            });\n                        axisMaxMin.watchTransition(renderWatch, 'min-max bottom')\n                            .attr('transform', function(d,i) {\n                                return 'translate(' + nv.utils.NaNtoZero((scale(d) + (isOrdinal ? scale.rangeBand() / 2 : 0))) + ',0)'\n                            });\n                    }\n\n                    break;\n                case 'right':\n                    axisLabel.enter().append('text').attr('class', 'nv-axislabel');\n                    axisLabel\n                        .style('text-anchor', rotateYLabel ? 'middle' : 'begin')\n                        .attr('transform', rotateYLabel ? 'rotate(90)' : '')\n                        .attr('y', rotateYLabel ? (-Math.max(margin.right, width) + 12 - (axisLabelDistance || 0)) : -10) //TODO: consider calculating this based on largest tick width... OR at least expose this on chart\n                        .attr('x', rotateYLabel ? (d3.max(scale.range()) / 2) : axis.tickPadding());\n                    if (showMaxMin) {\n                        axisMaxMin = wrap.selectAll('g.nv-axisMaxMin')\n                            .data(scale.domain());\n                       \taxisMaxMin.enter().append('g').attr('class',function(d,i){\n                                return ['nv-axisMaxMin','nv-axisMaxMin-y',(i == 0 ? 'nv-axisMin-y':'nv-axisMax-y')].join(' ')\n                        }).append('text')\n                            .style('opacity', 0);\n                        axisMaxMin.exit().remove();\n                        axisMaxMin\n                            .attr('transform', function(d,i) {\n                                return 'translate(0,' + nv.utils.NaNtoZero(scale(d)) + ')'\n                            })\n                            .select('text')\n                            .attr('dy', '.32em')\n                            .attr('y', 0)\n                            .attr('x', axis.tickPadding())\n                            .style('text-anchor', 'start')\n                            .text(function(d, i) {\n                                var v = fmt(d);\n                                return ('' + v).match('NaN') ? '' : v;\n                            });\n                        axisMaxMin.watchTransition(renderWatch, 'min-max right')\n                            .attr('transform', function(d,i) {\n                                return 'translate(0,' + nv.utils.NaNtoZero(scale.range()[i]) + ')'\n                            })\n                            .select('text')\n                            .style('opacity', 1);\n                    }\n                    break;\n                case 'left':\n                    /*\n                     //For dynamically placing the label. Can be used with dynamically-sized chart axis margins\n                     var yTicks = g.selectAll('g').select(\"text\");\n                     yTicks.each(function(d,i){\n                     var labelPadding = this.getBoundingClientRect().width + axis.tickPadding() + 16;\n                     if(labelPadding > width) width = labelPadding;\n                     });\n                     */\n                    axisLabel.enter().append('text').attr('class', 'nv-axislabel');\n                    axisLabel\n                        .style('text-anchor', rotateYLabel ? 'middle' : 'end')\n                        .attr('transform', rotateYLabel ? 'rotate(-90)' : '')\n                        .attr('y', rotateYLabel ? (-Math.max(margin.left, width) + 25 - (axisLabelDistance || 0)) : -10)\n                        .attr('x', rotateYLabel ? (-d3.max(scale.range()) / 2) : -axis.tickPadding());\n                    if (showMaxMin) {\n                        axisMaxMin = wrap.selectAll('g.nv-axisMaxMin')\n                            .data(scale.domain());\n                        axisMaxMin.enter().append('g').attr('class',function(d,i){\n                                return ['nv-axisMaxMin','nv-axisMaxMin-y',(i == 0 ? 'nv-axisMin-y':'nv-axisMax-y')].join(' ')\n                        }).append('text')\n                            .style('opacity', 0);\n                        axisMaxMin.exit().remove();\n                        axisMaxMin\n                            .attr('transform', function(d,i) {\n                                return 'translate(0,' + nv.utils.NaNtoZero(scale0(d)) + ')'\n                            })\n                            .select('text')\n                            .attr('dy', '.32em')\n                            .attr('y', 0)\n                            .attr('x', -axis.tickPadding())\n                            .attr('text-anchor', 'end')\n                            .text(function(d,i) {\n                                var v = fmt(d);\n                                return ('' + v).match('NaN') ? '' : v;\n                            });\n                        axisMaxMin.watchTransition(renderWatch, 'min-max right')\n                            .attr('transform', function(d,i) {\n                                return 'translate(0,' + nv.utils.NaNtoZero(scale.range()[i]) + ')'\n                            })\n                            .select('text')\n                            .style('opacity', 1);\n                    }\n                    break;\n            }\n            axisLabel.text(function(d) { return d });\n\n            if (showMaxMin && (axis.orient() === 'left' || axis.orient() === 'right')) {\n                //check if max and min overlap other values, if so, hide the values that overlap\n                g.selectAll('g') // the g's wrapping each tick\n                    .each(function(d,i) {\n                        d3.select(this).select('text').attr('opacity', 1);\n                        if (scale(d) < scale.range()[1] + 10 || scale(d) > scale.range()[0] - 10) { // 10 is assuming text height is 16... if d is 0, leave it!\n                            if (d > 1e-10 || d < -1e-10) // accounts for minor floating point errors... though could be problematic if the scale is EXTREMELY SMALL\n                                d3.select(this).attr('opacity', 0);\n\n                            d3.select(this).select('text').attr('opacity', 0); // Don't remove the ZERO line!!\n                        }\n                    });\n\n                //if Max and Min = 0 only show min, Issue #281\n                if (scale.domain()[0] == scale.domain()[1] && scale.domain()[0] == 0) {\n                    wrap.selectAll('g.nv-axisMaxMin').style('opacity', function (d, i) {\n                        return !i ? 1 : 0\n                    });\n                }\n            }\n\n            if (showMaxMin && (axis.orient() === 'top' || axis.orient() === 'bottom')) {\n                var maxMinRange = [];\n                wrap.selectAll('g.nv-axisMaxMin')\n                    .each(function(d,i) {\n                        try {\n                            if (i) // i== 1, max position\n                                maxMinRange.push(scale(d) - this.getBoundingClientRect().width - 4);  //assuming the max and min labels are as wide as the next tick (with an extra 4 pixels just in case)\n                            else // i==0, min position\n                                maxMinRange.push(scale(d) + this.getBoundingClientRect().width + 4)\n                        }catch (err) {\n                            if (i) // i== 1, max position\n                                maxMinRange.push(scale(d) - 4);  //assuming the max and min labels are as wide as the next tick (with an extra 4 pixels just in case)\n                            else // i==0, min position\n                                maxMinRange.push(scale(d) + 4);\n                        }\n                    });\n                // the g's wrapping each tick\n                g.selectAll('g').each(function(d, i) {\n                    if (scale(d) < maxMinRange[0] || scale(d) > maxMinRange[1]) {\n                        if (d > 1e-10 || d < -1e-10) // accounts for minor floating point errors... though could be problematic if the scale is EXTREMELY SMALL\n                            d3.select(this).remove();\n                        else\n                            d3.select(this).select('text').remove(); // Don't remove the ZERO line!!\n                    }\n                });\n            }\n\n            //Highlight zero tick line\n            g.selectAll('.tick')\n                .filter(function (d) {\n                    /*\n                    The filter needs to return only ticks at or near zero.\n                    Numbers like 0.00001 need to count as zero as well,\n                    and the arithmetic trick below solves that.\n                    */\n                    return !parseFloat(Math.round(d * 100000) / 1000000) && (d !== undefined)\n                }) \n                .classed('zero', true);\n            \n            //store old scales for use in transitions on update\n            scale0 = scale.copy();\n\n        });\n\n        renderWatch.renderEnd('axis immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.axis = axis;\n    chart.dispatch = dispatch;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        axisLabelDistance: {get: function(){return axisLabelDistance;}, set: function(_){axisLabelDistance=_;}},\n        staggerLabels:     {get: function(){return staggerLabels;}, set: function(_){staggerLabels=_;}},\n        rotateLabels:      {get: function(){return rotateLabels;}, set: function(_){rotateLabels=_;}},\n        rotateYLabel:      {get: function(){return rotateYLabel;}, set: function(_){rotateYLabel=_;}},\n        showMaxMin:        {get: function(){return showMaxMin;}, set: function(_){showMaxMin=_;}},\n        axisLabel:         {get: function(){return axisLabelText;}, set: function(_){axisLabelText=_;}},\n        height:            {get: function(){return height;}, set: function(_){height=_;}},\n        ticks:             {get: function(){return ticks;}, set: function(_){ticks=_;}},\n        width:             {get: function(){return width;}, set: function(_){width=_;}},\n        fontSize:          {get: function(){return fontSize;}, set: function(_){fontSize=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top !== undefined    ? _.top    : margin.top;\n            margin.right  = _.right !== undefined  ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left !== undefined   ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration=_;\n            renderWatch.reset(duration);\n        }},\n        scale: {get: function(){return scale;}, set: function(_){\n            scale = _;\n            axis.scale(scale);\n            isOrdinal = typeof scale.rangeBands === 'function';\n            nv.utils.inheritOptionsD3(chart, scale, ['domain', 'range', 'rangeBand', 'rangeBands']);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    nv.utils.inheritOptionsD3(chart, axis, ['orient', 'tickValues', 'tickSubdivide', 'tickSize', 'tickPadding', 'tickFormat']);\n    nv.utils.inheritOptionsD3(chart, scale, ['domain', 'range', 'rangeBand', 'rangeBands']);\n\n    return chart;\n};\n","nv.models.boxPlot = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0},\n        width = 960,\n        height = 500,\n        id = Math.floor(Math.random() * 10000), // Create semi-unique ID in case user doesn't select one\n        xScale = d3.scale.ordinal(),\n        yScale = d3.scale.linear(),\n        getX  = function(d) { return d.label }, // Default data model selectors.\n        getQ1 = function(d) { return d.values.Q1 },\n        getQ2 = function(d) { return d.values.Q2 },\n        getQ3 = function(d) { return d.values.Q3 },\n        getWl = function(d) { return d.values.whisker_low },\n        getWh = function(d) { return d.values.whisker_high },\n        getColor = function(d) { return d.color },\n        getOlItems  = function(d) { return d.values.outliers },\n        getOlValue = function(d, i, j) { return d },\n        getOlLabel = function(d, i, j) { return d },\n        getOlColor = function(d, i, j) { return undefined },\n        color = nv.utils.defaultColor(),\n        container = null,\n        xDomain, xRange,\n        yDomain, yRange,\n        dispatch = d3.dispatch('elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd'),\n        duration = 250,\n        maxBoxWidth = null;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var xScale0, yScale0;\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup Scales\n            xScale.domain(xDomain || data.map(function(d,i) { return getX(d,i); }))\n                .rangeBands(xRange || [0, availableWidth], 0.1);\n\n            // if we know yDomain, no need to calculate\n            var yData = []\n            if (!yDomain) {\n                // (y-range is based on quartiles, whiskers and outliers)\n                var values = [], yMin, yMax;\n                data.forEach(function (d, i) {\n                    var q1 = getQ1(d), q3 = getQ3(d), wl = getWl(d), wh = getWh(d);\n                    var olItems = getOlItems(d);\n                    if (olItems) {\n                        olItems.forEach(function (e, i) {\n                            values.push(getOlValue(e, i, undefined));\n                        });\n                    }\n                    if (wl) { values.push(wl) }\n                    if (q1) { values.push(q1) }\n                    if (q3) { values.push(q3) }\n                    if (wh) { values.push(wh) }\n                });\n                yMin = d3.min(values);\n                yMax = d3.max(values);\n                yData = [ yMin, yMax ] ;\n            }\n\n            yScale.domain(yDomain || yData);\n            yScale.range(yRange || [availableHeight, 0]);\n\n            //store old scales if they exist\n            xScale0 = xScale0 || xScale;\n            yScale0 = yScale0 || yScale.copy().range([yScale(0),yScale(0)]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap');\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var boxplots = wrap.selectAll('.nv-boxplot').data(function(d) { return d });\n            var boxEnter = boxplots.enter().append('g').style('stroke-opacity', 1e-6).style('fill-opacity', 1e-6);\n            boxplots\n                .attr('class', 'nv-boxplot')\n                .attr('transform', function(d,i,j) { return 'translate(' + (xScale(getX(d,i)) + xScale.rangeBand() * 0.05) + ', 0)'; })\n                .classed('hover', function(d) { return d.hover });\n            boxplots\n                .watchTransition(renderWatch, 'nv-boxplot: boxplots')\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', 0.75)\n                .delay(function(d,i) { return i * duration / data.length })\n                .attr('transform', function(d,i) {\n                    return 'translate(' + (xScale(getX(d,i)) + xScale.rangeBand() * 0.05) + ', 0)';\n                });\n            boxplots.exit().remove();\n\n            // ----- add the SVG elements for each boxPlot -----\n\n            // conditionally append whisker lines\n            boxEnter.each(function(d,i) {\n                var box = d3.select(this);\n                [getWl, getWh].forEach(function (f) {\n                    if (f(d) !== undefined && f(d) !== null) {\n                        var key = (f === getWl) ? 'low' : 'high';\n                        box.append('line')\n                          .style('stroke', getColor(d) || color(d,i))\n                          .attr('class', 'nv-boxplot-whisker nv-boxplot-' + key);\n                        box.append('line')\n                          .style('stroke', getColor(d) || color(d,i))\n                          .attr('class', 'nv-boxplot-tick nv-boxplot-' + key);\n                    }\n                });\n            });\n\n            var box_width = function() { return (maxBoxWidth === null ? xScale.rangeBand() * 0.9 : Math.min(75, xScale.rangeBand() * 0.9)); };\n            var box_left  = function() { return xScale.rangeBand() * 0.45 - box_width()/2; };\n            var box_right = function() { return xScale.rangeBand() * 0.45 + box_width()/2; };\n\n            // update whisker lines and ticks\n            [getWl, getWh].forEach(function (f) {\n                var key = (f === getWl) ? 'low' : 'high';\n                var endpoint = (f === getWl) ? getQ1 : getQ3;\n                boxplots.select('line.nv-boxplot-whisker.nv-boxplot-' + key)\n                  .watchTransition(renderWatch, 'nv-boxplot: boxplots')\n                    .attr('x1', xScale.rangeBand() * 0.45 )\n                    .attr('y1', function(d,i) { return yScale(f(d)); })\n                    .attr('x2', xScale.rangeBand() * 0.45 )\n                    .attr('y2', function(d,i) { return yScale(endpoint(d)); });\n                boxplots.select('line.nv-boxplot-tick.nv-boxplot-' + key)\n                  .watchTransition(renderWatch, 'nv-boxplot: boxplots')\n                    .attr('x1', box_left )\n                    .attr('y1', function(d,i) { return yScale(f(d)); })\n                    .attr('x2', box_right )\n                    .attr('y2', function(d,i) { return yScale(f(d)); });\n            });\n\n            [getWl, getWh].forEach(function (f) {\n                var key = (f === getWl) ? 'low' : 'high';\n                boxEnter.selectAll('.nv-boxplot-' + key)\n                  .on('mouseover', function(d,i,j) {\n                      d3.select(this).classed('hover', true);\n                      dispatch.elementMouseover({\n                          series: { key: f(d), color: getColor(d) || color(d,j) },\n                          e: d3.event\n                      });\n                  })\n                  .on('mouseout', function(d,i,j) {\n                      d3.select(this).classed('hover', false);\n                      dispatch.elementMouseout({\n                          series: { key: f(d), color: getColor(d) || color(d,j) },\n                          e: d3.event\n                      });\n                  })\n                  .on('mousemove', function(d,i) {\n                      dispatch.elementMousemove({e: d3.event});\n                  });\n            });\n\n            // boxes\n            boxEnter.append('rect')\n                .attr('class', 'nv-boxplot-box')\n                // tooltip events\n                .on('mouseover', function(d,i) {\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        key: getX(d),\n                        value: getX(d),\n                        series: [\n                            { key: 'Q3', value: getQ3(d), color: getColor(d) || color(d,i) },\n                            { key: 'Q2', value: getQ2(d), color: getColor(d) || color(d,i) },\n                            { key: 'Q1', value: getQ1(d), color: getColor(d) || color(d,i) }\n                        ],\n                        data: d,\n                        index: i,\n                        e: d3.event\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        key: getX(d),\n                        value: getX(d),\n                        series: [\n                            { key: 'Q3', value: getQ3(d), color: getColor(d) || color(d,i) },\n                            { key: 'Q2', value: getQ2(d), color: getColor(d) || color(d,i) },\n                            { key: 'Q1', value: getQ1(d), color: getColor(d) || color(d,i) }\n                        ],\n                        data: d,\n                        index: i,\n                        e: d3.event\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    dispatch.elementMousemove({e: d3.event});\n                });\n\n            // box transitions\n            boxplots.select('rect.nv-boxplot-box')\n              .watchTransition(renderWatch, 'nv-boxplot: boxes')\n                .attr('y', function(d,i) { return yScale(getQ3(d)); })\n                .attr('width', box_width)\n                .attr('x', box_left )\n                .attr('height', function(d,i) { return Math.abs(yScale(getQ3(d)) - yScale(getQ1(d))) || 1 })\n                .style('fill', function(d,i) { return getColor(d) || color(d,i) })\n                .style('stroke', function(d,i) { return getColor(d) || color(d,i) });\n\n            // median line\n            boxEnter.append('line').attr('class', 'nv-boxplot-median');\n\n            boxplots.select('line.nv-boxplot-median')\n              .watchTransition(renderWatch, 'nv-boxplot: boxplots line')\n                .attr('x1', box_left)\n                .attr('y1', function(d,i) { return yScale(getQ2(d)); })\n                .attr('x2', box_right)\n                .attr('y2', function(d,i) { return yScale(getQ2(d)); });\n\n            // outliers\n            var outliers = boxplots.selectAll('.nv-boxplot-outlier').data(function(d) {\n                return getOlItems(d) || [];\n            });\n            outliers.enter().append('circle')\n                .style('fill', function(d,i,j) { return getOlColor(d,i,j) || color(d,j) })\n                .style('stroke', function(d,i,j) { return getOlColor(d,i,j) || color(d,j) })\n                .style('z-index', 9000)\n                .on('mouseover', function(d,i,j) {\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        series: { key: getOlLabel(d,i,j), color: getOlColor(d,i,j) || color(d,j) },\n                        e: d3.event\n                    });\n                })\n                .on('mouseout', function(d,i,j) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        series: { key: getOlLabel(d,i,j), color: getOlColor(d,i,j) || color(d,j) },\n                        e: d3.event\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    dispatch.elementMousemove({e: d3.event});\n                });\n            outliers.attr('class', 'nv-boxplot-outlier');\n            outliers\n              .watchTransition(renderWatch, 'nv-boxplot: nv-boxplot-outlier')\n                .attr('cx', xScale.rangeBand() * 0.45)\n                .attr('cy', function(d,i,j) { return yScale(getOlValue(d,i,j)); })\n                .attr('r', '3');\n            outliers.exit().remove();\n\n            //store old scales for use in transitions on update\n            xScale0 = xScale.copy();\n            yScale0 = yScale.copy();\n        });\n\n        renderWatch.renderEnd('nv-boxplot immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:       {get: function(){return width;}, set: function(_){width=_;}},\n        height:      {get: function(){return height;}, set: function(_){height=_;}},\n        maxBoxWidth: {get: function(){return maxBoxWidth;}, set: function(_){maxBoxWidth=_;}},\n        x:           {get: function(){return getX;}, set: function(_){getX=_;}},\n        q1: {get: function(){return getQ1;}, set: function(_){getQ1=_;}},\n        q2: {get: function(){return getQ2;}, set: function(_){getQ2=_;}},\n        q3: {get: function(){return getQ3;}, set: function(_){getQ3=_;}},\n        wl: {get: function(){return getWl;}, set: function(_){getWl=_;}},\n        wh: {get: function(){return getWh;}, set: function(_){getWh=_;}},\n        itemColor:    {get: function(){return getColor;}, set: function(_){getColor=_;}},\n        outliers:     {get: function(){return getOlItems;}, set: function(_){getOlItems=_;}},\n        outlierValue: {get: function(){return getOlValue;}, set: function(_){getOlValue=_;}},\n        outlierLabel: {get: function(){return getOlLabel;}, set: function(_){getOlLabel=_;}},\n        outlierColor: {get: function(){return getOlColor;}, set: function(_){getOlColor=_;}},\n        xScale:  {get: function(){return xScale;}, set: function(_){xScale=_;}},\n        yScale:  {get: function(){return yScale;}, set: function(_){yScale=_;}},\n        xDomain: {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain: {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:  {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:  {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        id:          {get: function(){return id;}, set: function(_){id=_;}},\n        // rectClass: {get: function(){return rectClass;}, set: function(_){rectClass=_;}},\n        y: {\n            get: function() {\n                console.warn('BoxPlot \\'y\\' chart option is deprecated. Please use model overrides instead.');\n                return {};\n            },\n            set: function(_) {\n                console.warn('BoxPlot \\'y\\' chart option is deprecated. Please use model overrides instead.');\n            }\n        },\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","nv.models.boxPlotChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var boxplot = nv.models.boxPlot(),\n        xAxis = nv.models.axis(),\n        yAxis = nv.models.axis();\n\n    var margin = {top: 15, right: 10, bottom: 50, left: 60},\n        width = null,\n        height = null,\n        color = nv.utils.getColor(),\n        showXAxis = true,\n        showYAxis = true,\n        rightAlignYAxis = false,\n        staggerLabels = false,\n        tooltip = nv.models.tooltip(),\n        x, y,\n        noData = 'No Data Available.',\n        dispatch = d3.dispatch('beforeUpdate', 'renderEnd'),\n        duration = 250;\n\n    xAxis\n        .orient('bottom')\n        .showMaxMin(false)\n        .tickFormat(function(d) { return d })\n    ;\n    yAxis\n        .orient((rightAlignYAxis) ? 'right' : 'left')\n        .tickFormat(d3.format(',.1f'))\n    ;\n\n    tooltip.duration(0);\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(boxplot);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this), that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = (width  || parseInt(container.style('width')) || 960) - margin.left - margin.right;\n            var availableHeight = (height || parseInt(container.style('height')) || 400) - margin.top - margin.bottom;\n\n            chart.update = function() {\n                dispatch.beforeUpdate();\n                container.transition().duration(duration).call(chart);\n            };\n            chart.container = this;\n\n            // TODO still need to find a way to validate quartile data presence using boxPlot callbacks.\n            // Display No Data message if there's nothing to show. (quartiles required at minimum).\n            if (!data || !data.length) {\n                var noDataText = container.selectAll('.nv-noData').data([noData]);\n\n                noDataText.enter().append('text')\n                    .attr('class', 'nvd3 nv-noData')\n                    .attr('dy', '-.7em')\n                    .style('text-anchor', 'middle');\n\n                noDataText\n                    .attr('x', margin.left + availableWidth / 2)\n                    .attr('y', margin.top + availableHeight / 2)\n                    .text(function(d) { return d });\n\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = boxplot.xScale();\n            y = boxplot.yScale().clamp(true);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-boxPlotWithAxes').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-boxPlotWithAxes').append('g');\n            var defsEnter = gEnter.append('defs');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis')\n                .append('g').attr('class', 'nv-zeroLine')\n                .append('line');\n\n            gEnter.append('g').attr('class', 'nv-barsWrap');\n            g.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            if (rightAlignYAxis) {\n                g.select('.nv-y.nv-axis')\n                    .attr('transform', 'translate(' + availableWidth + ',0)');\n            }\n\n            // Main Chart Component(s)\n            boxplot.width(availableWidth).height(availableHeight);\n\n            var barsWrap = g.select('.nv-barsWrap')\n                .datum(data.filter(function(d) { return !d.disabled }))\n\n            barsWrap.transition().call(boxplot);\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-x-label-clip-' + boxplot.id())\n                .append('rect');\n\n            g.select('#nv-x-label-clip-' + boxplot.id() + ' rect')\n                .attr('width', x.rangeBand() * (staggerLabels ? 2 : 1))\n                .attr('height', 16)\n                .attr('x', -x.rangeBand() / (staggerLabels ? 1 : 2 ));\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    .ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n\n                g.select('.nv-x.nv-axis').attr('transform', 'translate(0,' + y.range()[0] + ')');\n                g.select('.nv-x.nv-axis').call(xAxis);\n\n                var xTicks = g.select('.nv-x.nv-axis').selectAll('g');\n                if (staggerLabels) {\n                    xTicks\n                        .selectAll('text')\n                        .attr('transform', function(d,i,j) { return 'translate(0,' + (j % 2 === 0 ? '5' : '17') + ')' })\n                }\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    .ticks( Math.floor(availableHeight/36) ) // can't use nv.utils.calcTicksY with Object data\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis').call(yAxis);\n            }\n\n            // Zero line\n            g.select('.nv-zeroLine line')\n                .attr('x1',0)\n                .attr('x2',availableWidth)\n                .attr('y1', y(0))\n                .attr('y2', y(0))\n            ;\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n        });\n\n        renderWatch.renderEnd('nv-boxplot chart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    boxplot.dispatch.on('elementMouseover.tooltip', function(evt) {\n        tooltip.data(evt).hidden(false);\n    });\n\n    boxplot.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.data(evt).hidden(true);\n    });\n\n    boxplot.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.boxplot = boxplot;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        staggerLabels: {get: function(){return staggerLabels;}, set: function(_){staggerLabels=_;}},\n        showXAxis: {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis: {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        tooltipContent:    {get: function(){return tooltip;}, set: function(_){tooltip=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            boxplot.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            boxplot.color(color);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( (_) ? 'right' : 'left');\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, boxplot);\n    nv.utils.initOptions(chart);\n\n    return chart;\n}\n","\n// Chart design based on the recommendations of Stephen Few. Implementation\n// based on the work of Clint Ivy, Jamie Love, and Jason Davies.\n// http://projects.instantcognition.com/protovis/bulletchart/\n\nnv.models.bullet = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , orient = 'left' // TODO top & bottom\n        , reverse = false\n        , ranges = function(d) { return d.ranges }\n        , markers = function(d) { return d.markers ? d.markers : [] }\n        , markerLines = function(d) { return d.markerLines ? d.markerLines : [0] }\n        , measures = function(d) { return d.measures }\n        , rangeLabels = function(d) { return d.rangeLabels ? d.rangeLabels : [] }\n        , markerLabels = function(d) { return d.markerLabels ? d.markerLabels : []  }\n        , markerLineLabels = function(d) { return d.markerLineLabels ? d.markerLineLabels : []  }\n        , measureLabels = function(d) { return d.measureLabels ? d.measureLabels : []  }\n        , forceX = [0] // List of numbers to Force into the X scale (ie. 0, or a max / min, etc.)\n        , width = 380\n        , height = 30\n        , container = null\n        , tickFormat = null\n        , color = nv.utils.getColor(['#1f77b4'])\n        , dispatch = d3.dispatch('elementMouseover', 'elementMouseout', 'elementMousemove')\n        , defaultRangeLabels = [\"Maximum\", \"Mean\", \"Minimum\"]\n        , legacyRangeClassNames = [\"Max\", \"Avg\", \"Min\"]\n        , duration = 1000\n        ;\n\n    function sortLabels(labels, values){\n        var lz = labels.slice();\n        labels.sort(function(a, b){\n            var iA = lz.indexOf(a);\n            var iB = lz.indexOf(b);\n            return d3.descending(values[iA], values[iB]);\n        });\n    };\n\n    function chart(selection) {\n        selection.each(function(d, i) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            var rangez = ranges.call(this, d, i).slice(),\n                markerz = markers.call(this, d, i).slice(),\n                markerLinez = markerLines.call(this, d, i).slice(),\n                measurez = measures.call(this, d, i).slice(),\n                rangeLabelz = rangeLabels.call(this, d, i).slice(),\n                markerLabelz = markerLabels.call(this, d, i).slice(),\n                markerLineLabelz = markerLineLabels.call(this, d, i).slice(),\n                measureLabelz = measureLabels.call(this, d, i).slice();\n\n            // Sort labels according to their sorted values\n            sortLabels(rangeLabelz, rangez);\n            sortLabels(markerLabelz, markerz);\n            sortLabels(markerLineLabelz, markerLinez);\n            sortLabels(measureLabelz, measurez);\n\n            // sort values descending\n            rangez.sort(d3.descending);\n            markerz.sort(d3.descending);\n            markerLinez.sort(d3.descending);\n            measurez.sort(d3.descending);\n\n            // Setup Scales\n            // Compute the new x-scale.\n            var x1 = d3.scale.linear()\n                .domain( d3.extent(d3.merge([forceX, rangez])) )\n                .range(reverse ? [availableWidth, 0] : [0, availableWidth]);\n\n            // Retrieve the old x-scale, if this is an update.\n            var x0 = this.__chart__ || d3.scale.linear()\n                .domain([0, Infinity])\n                .range(x1.range());\n\n            // Stash the new scale.\n            this.__chart__ = x1;\n\n            var rangeMin = d3.min(rangez), //rangez[2]\n                rangeMax = d3.max(rangez), //rangez[0]\n                rangeAvg = rangez[1];\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-bullet').data([d]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-bullet');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            for(var i=0,il=rangez.length; i<il; i++){\n                var rangeClassNames = 'nv-range nv-range'+i;\n                if(i <= 2){\n                    rangeClassNames = rangeClassNames + ' nv-range'+legacyRangeClassNames[i];\n                }\n                gEnter.append('rect').attr('class', rangeClassNames);\n            }\n\n            gEnter.append('rect').attr('class', 'nv-measure');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var w0 = function(d) { return Math.abs(x0(d) - x0(0)) }, // TODO: could optimize by precalculating x0(0) and x1(0)\n                w1 = function(d) { return Math.abs(x1(d) - x1(0)) };\n            var xp0 = function(d) { return d < 0 ? x0(d) : x0(0) },\n                xp1 = function(d) { return d < 0 ? x1(d) : x1(0) };\n\n            for(var i=0,il=rangez.length; i<il; i++){\n                var range = rangez[i];\n                g.select('rect.nv-range'+i)\n                    .datum(range)\n                    .attr('height', availableHeight)\n                    .transition()\n                    .duration(duration)\n                    .attr('width', w1(range))\n                    .attr('x', xp1(range))\n            }\n\n            g.select('rect.nv-measure')\n                .style('fill', color)\n                .attr('height', availableHeight / 3)\n                .attr('y', availableHeight / 3)\n                .on('mouseover', function() {\n                    dispatch.elementMouseover({\n                        value: measurez[0],\n                        label: measureLabelz[0] || 'Current',\n                        color: d3.select(this).style(\"fill\")\n                    })\n                })\n                .on('mousemove', function() {\n                    dispatch.elementMousemove({\n                        value: measurez[0],\n                        label: measureLabelz[0] || 'Current',\n                        color: d3.select(this).style(\"fill\")\n                    })\n                })\n                .on('mouseout', function() {\n                    dispatch.elementMouseout({\n                        value: measurez[0],\n                        label: measureLabelz[0] || 'Current',\n                        color: d3.select(this).style(\"fill\")\n                    })\n                })\n                .transition()\n                .duration(duration)\n                .attr('width', measurez < 0 ?\n                    x1(0) - x1(measurez[0])\n                    : x1(measurez[0]) - x1(0))\n                .attr('x', xp1(measurez));\n\n            var h3 =  availableHeight / 6;\n\n            var markerData = markerz.map( function(marker, index) {\n                return {value: marker, label: markerLabelz[index]}\n            });\n            gEnter\n              .selectAll(\"path.nv-markerTriangle\")\n              .data(markerData)\n              .enter()\n              .append('path')\n              .attr('class', 'nv-markerTriangle')\n              .attr('d', 'M0,' + h3 + 'L' + h3 + ',' + (-h3) + ' ' + (-h3) + ',' + (-h3) + 'Z')\n              .on('mouseover', function(d) {\n                dispatch.elementMouseover({\n                  value: d.value,\n                  label: d.label || 'Previous',\n                  color: d3.select(this).style(\"fill\"),\n                  pos: [x1(d.value), availableHeight/2]\n                })\n\n              })\n              .on('mousemove', function(d) {\n                  dispatch.elementMousemove({\n                      value: d.value,\n                      label: d.label || 'Previous',\n                      color: d3.select(this).style(\"fill\")\n                  })\n              })\n              .on('mouseout', function(d, i) {\n                  dispatch.elementMouseout({\n                      value: d.value,\n                      label: d.label || 'Previous',\n                      color: d3.select(this).style(\"fill\")\n                  })\n              });\n\n            g.selectAll(\"path.nv-markerTriangle\")\n              .data(markerData)\n              .transition()\n              .duration(duration)\n              .attr('transform', function(d) { return 'translate(' + x1(d.value) + ',' + (availableHeight / 2) + ')' });\n\n            var markerLinesData = markerLinez.map( function(marker, index) {\n                return {value: marker, label: markerLineLabelz[index]}\n            });\n            gEnter\n              .selectAll(\"line.nv-markerLine\")\n              .data(markerLinesData)\n              .enter()\n              .append('line')\n              .attr('cursor', '')\n              .attr('class', 'nv-markerLine')\n              .attr('x1', function(d) { return x1(d.value) })\n              .attr('y1', '2')\n              .attr('x2', function(d) { return x1(d.value) })\n              .attr('y2', availableHeight - 2)\n              .on('mouseover', function(d) {\n                dispatch.elementMouseover({\n                  value: d.value,\n                  label: d.label || 'Previous',\n                  color: d3.select(this).style(\"fill\"),\n                  pos: [x1(d.value), availableHeight/2]\n                })\n\n              })\n              .on('mousemove', function(d) {\n                  dispatch.elementMousemove({\n                      value: d.value,\n                      label: d.label || 'Previous',\n                      color: d3.select(this).style(\"fill\")\n                  })\n              })\n              .on('mouseout', function(d, i) {\n                  dispatch.elementMouseout({\n                      value: d.value,\n                      label: d.label || 'Previous',\n                      color: d3.select(this).style(\"fill\")\n                  })\n              });\n\n            g.selectAll(\"line.nv-markerLine\")\n              .data(markerLinesData)\n              .transition()\n              .duration(duration)\n              .attr('x1', function(d) { return x1(d.value) })\n              .attr('x2', function(d) { return x1(d.value) });\n\n            wrap.selectAll('.nv-range')\n                .on('mouseover', function(d,i) {\n                    var label = rangeLabelz[i] || defaultRangeLabels[i];\n                    dispatch.elementMouseover({\n                        value: d,\n                        label: label,\n                        color: d3.select(this).style(\"fill\")\n                    })\n                })\n                .on('mousemove', function() {\n                    dispatch.elementMousemove({\n                        value: measurez[0],\n                        label: measureLabelz[0] || 'Previous',\n                        color: d3.select(this).style(\"fill\")\n                    })\n                })\n                .on('mouseout', function(d,i) {\n                    var label = rangeLabelz[i] || defaultRangeLabels[i];\n                    dispatch.elementMouseout({\n                        value: d,\n                        label: label,\n                        color: d3.select(this).style(\"fill\")\n                    })\n                });\n        });\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        ranges:      {get: function(){return ranges;}, set: function(_){ranges=_;}}, // ranges (bad, satisfactory, good)\n        markers:     {get: function(){return markers;}, set: function(_){markers=_;}}, // markers (previous, goal)\n        measures: {get: function(){return measures;}, set: function(_){measures=_;}}, // measures (actual, forecast)\n        forceX:      {get: function(){return forceX;}, set: function(_){forceX=_;}},\n        width:    {get: function(){return width;}, set: function(_){width=_;}},\n        height:    {get: function(){return height;}, set: function(_){height=_;}},\n        tickFormat:    {get: function(){return tickFormat;}, set: function(_){tickFormat=_;}},\n        duration:    {get: function(){return duration;}, set: function(_){duration=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        orient: {get: function(){return orient;}, set: function(_){ // left, right, top, bottom\n            orient = _;\n            reverse = orient == 'right' || orient == 'bottom';\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n\n\n","\n// Chart design based on the recommendations of Stephen Few. Implementation\n// based on the work of Clint Ivy, Jamie Love, and Jason Davies.\n// http://projects.instantcognition.com/protovis/bulletchart/\nnv.models.bulletChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var bullet = nv.models.bullet();\n    var tooltip = nv.models.tooltip();\n\n    var orient = 'left' // TODO top & bottom\n        , reverse = false\n        , margin = {top: 5, right: 40, bottom: 20, left: 120}\n        , ranges = function(d) { return d.ranges }\n        , markers = function(d) { return d.markers ? d.markers : [] }\n        , measures = function(d) { return d.measures }\n        , width = null\n        , height = 55\n        , tickFormat = null\n        , ticks = null\n        , noData = null\n        , dispatch = d3.dispatch()\n        ;\n\n    tooltip\n        .duration(0)\n        .headerEnabled(false);\n\n    function chart(selection) {\n        selection.each(function(d, i) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = height - margin.top - margin.bottom,\n                that = this;\n\n            chart.update = function() { chart(selection) };\n            chart.container = this;\n\n            // Display No Data message if there's nothing to show.\n            if (!d || !ranges.call(this, d, i)) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            var rangez = ranges.call(this, d, i).slice().sort(d3.descending),\n                markerz = markers.call(this, d, i).slice().sort(d3.descending),\n                measurez = measures.call(this, d, i).slice().sort(d3.descending);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-bulletChart').data([d]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-bulletChart');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-bulletWrap');\n            gEnter.append('g').attr('class', 'nv-titles');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Compute the new x-scale.\n            var x1 = d3.scale.linear()\n                .domain([0, Math.max(rangez[0], (markerz[0] || 0), measurez[0])])  // TODO: need to allow forceX and forceY, and xDomain, yDomain\n                .range(reverse ? [availableWidth, 0] : [0, availableWidth]);\n\n            // Retrieve the old x-scale, if this is an update.\n            var x0 = this.__chart__ || d3.scale.linear()\n                .domain([0, Infinity])\n                .range(x1.range());\n\n            // Stash the new scale.\n            this.__chart__ = x1;\n\n            var w0 = function(d) { return Math.abs(x0(d) - x0(0)) }, // TODO: could optimize by precalculating x0(0) and x1(0)\n                w1 = function(d) { return Math.abs(x1(d) - x1(0)) };\n\n            var title = gEnter.select('.nv-titles').append('g')\n                .attr('text-anchor', 'end')\n                .attr('transform', 'translate(-6,' + (height - margin.top - margin.bottom) / 2 + ')');\n            title.append('text')\n                .attr('class', 'nv-title')\n                .text(function(d) { return d.title; });\n\n            title.append('text')\n                .attr('class', 'nv-subtitle')\n                .attr('dy', '1em')\n                .text(function(d) { return d.subtitle; });\n\n            bullet\n                .width(availableWidth)\n                .height(availableHeight);\n\n            var bulletWrap = g.select('.nv-bulletWrap');\n            d3.transition(bulletWrap).call(bullet);\n\n            // Compute the tick format.\n            var format = tickFormat || x1.tickFormat( availableWidth / 100 );\n\n            // Update the tick groups.\n            var tick = g.selectAll('g.nv-tick')\n                .data(x1.ticks( ticks ? ticks : (availableWidth / 50) ), function(d) {\n                    return this.textContent || format(d);\n                });\n\n            // Initialize the ticks with the old scale, x0.\n            var tickEnter = tick.enter().append('g')\n                .attr('class', 'nv-tick')\n                .attr('transform', function(d) { return 'translate(' + x0(d) + ',0)' })\n                .style('opacity', 1e-6);\n\n            tickEnter.append('line')\n                .attr('y1', availableHeight)\n                .attr('y2', availableHeight * 7 / 6);\n\n            tickEnter.append('text')\n                .attr('text-anchor', 'middle')\n                .attr('dy', '1em')\n                .attr('y', availableHeight * 7 / 6)\n                .text(format);\n\n            // Transition the updating ticks to the new scale, x1.\n            var tickUpdate = d3.transition(tick)\n                .transition()\n                .duration(bullet.duration())\n                .attr('transform', function(d) { return 'translate(' + x1(d) + ',0)' })\n                .style('opacity', 1);\n\n            tickUpdate.select('line')\n                .attr('y1', availableHeight)\n                .attr('y2', availableHeight * 7 / 6);\n\n            tickUpdate.select('text')\n                .attr('y', availableHeight * 7 / 6);\n\n            // Transition the exiting ticks to the new scale, x1.\n            d3.transition(tick.exit())\n                .transition()\n                .duration(bullet.duration())\n                .attr('transform', function(d) { return 'translate(' + x1(d) + ',0)' })\n                .style('opacity', 1e-6)\n                .remove();\n        });\n\n        d3.timer.flush();\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    bullet.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt['series'] = {\n            key: evt.label,\n            value: evt.value,\n            color: evt.color\n        };\n        tooltip.data(evt).hidden(false);\n    });\n\n    bullet.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    bullet.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.bullet = bullet;\n    chart.dispatch = dispatch;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        ranges:      {get: function(){return ranges;}, set: function(_){ranges=_;}}, // ranges (bad, satisfactory, good)\n        markers:     {get: function(){return markers;}, set: function(_){markers=_;}}, // markers (previous, goal)\n        measures: {get: function(){return measures;}, set: function(_){measures=_;}}, // measures (actual, forecast)\n        width:    {get: function(){return width;}, set: function(_){width=_;}},\n        height:    {get: function(){return height;}, set: function(_){height=_;}},\n        tickFormat:    {get: function(){return tickFormat;}, set: function(_){tickFormat=_;}},\n        ticks:    {get: function(){return ticks;}, set: function(_){ticks=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        orient: {get: function(){return orient;}, set: function(_){ // left, right, top, bottom\n            orient = _;\n            reverse = orient == 'right' || orient == 'bottom';\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, bullet);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n\n\n","\nnv.models.candlestickBar = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = null\n        , height = null\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container\n        , x = d3.scale.linear()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , getOpen = function(d) { return d.open }\n        , getClose = function(d) { return d.close }\n        , getHigh = function(d) { return d.high }\n        , getLow = function(d) { return d.low }\n        , forceX = []\n        , forceY = []\n        , padData     = false // If true, adds half a data points width to front and back, for lining up a line chart with a bar chart\n        , clipEdge = true\n        , color = nv.utils.defaultColor()\n        , interactive = false\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , dispatch = d3.dispatch('stateChange', 'changeState', 'renderEnd', 'chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    function chart(selection) {\n        selection.each(function(data) {\n            container = d3.select(this);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            nv.utils.initSVG(container);\n\n            // Width of the candlestick bars.\n            var barWidth = (availableWidth / data[0].values.length) * .45;\n\n            // Setup Scales\n            x.domain(xDomain || d3.extent(data[0].values.map(getX).concat(forceX) ));\n\n            if (padData)\n                x.range(xRange || [availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5)  / data[0].values.length ]);\n            else\n                x.range(xRange || [5 + barWidth / 2, availableWidth - barWidth / 2 - 5]);\n\n            y.domain(yDomain || [\n                    d3.min(data[0].values.map(getLow).concat(forceY)),\n                    d3.max(data[0].values.map(getHigh).concat(forceY))\n                ]\n            ).range(yRange || [availableHeight, 0]);\n\n            // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point\n            if (x.domain()[0] === x.domain()[1])\n                x.domain()[0] ?\n                    x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01])\n                    : x.domain([-1,1]);\n\n            if (y.domain()[0] === y.domain()[1])\n                y.domain()[0] ?\n                    y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01])\n                    : y.domain([-1,1]);\n\n            // Setup containers and skeleton of chart\n            var wrap = d3.select(this).selectAll('g.nv-wrap.nv-candlestickBar').data([data[0].values]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-candlestickBar');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-ticks');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            container\n                .on('click', function(d,i) {\n                    dispatch.chartClick({\n                        data: d,\n                        index: i,\n                        pos: d3.event,\n                        id: id\n                    });\n                });\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-chart-clip-path-' + id)\n                .append('rect');\n\n            wrap.select('#nv-chart-clip-path-' + id + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            g   .attr('clip-path', clipEdge ? 'url(#nv-chart-clip-path-' + id + ')' : '');\n\n            var ticks = wrap.select('.nv-ticks').selectAll('.nv-tick')\n                .data(function(d) { return d });\n            ticks.exit().remove();\n\n            var tickGroups = ticks.enter().append('g');\n\n            // The colors are currently controlled by CSS.\n            ticks\n                .attr('class', function(d, i, j) { return (getOpen(d, i) > getClose(d, i) ? 'nv-tick negative' : 'nv-tick positive') + ' nv-tick-' + j + '-' + i});\n\n            var lines = tickGroups.append('line')\n                .attr('class', 'nv-candlestick-lines')\n                .attr('transform', function(d, i) { return 'translate(' + x(getX(d, i)) + ',0)'; })\n                .attr('x1', 0)\n                .attr('y1', function(d, i) { return y(getHigh(d, i)); })\n                .attr('x2', 0)\n                .attr('y2', function(d, i) { return y(getLow(d, i)); });\n\n            var rects = tickGroups.append('rect')\n                .attr('class', 'nv-candlestick-rects nv-bars')\n                .attr('transform', function(d, i) {\n                    return 'translate(' + (x(getX(d, i)) - barWidth/2) + ','\n                    + (y(getY(d, i)) - (getOpen(d, i) > getClose(d, i) ? (y(getClose(d, i)) - y(getOpen(d, i))) : 0))\n                    + ')';\n                })\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('width', barWidth)\n                .attr('height', function(d, i) {\n                    var open = getOpen(d, i);\n                    var close = getClose(d, i);\n                    return open > close ? y(close) - y(open) : y(open) - y(close);\n                });\n\n            ticks.select('.nv-candlestick-lines').transition()\n                .attr('transform', function(d, i) { return 'translate(' + x(getX(d, i)) + ',0)'; })\n                .attr('x1', 0)\n                .attr('y1', function(d, i) { return y(getHigh(d, i)); })\n                .attr('x2', 0)\n                .attr('y2', function(d, i) { return y(getLow(d, i)); });\n\n            ticks.select('.nv-candlestick-rects').transition()\n                .attr('transform', function(d, i) {\n                    return 'translate(' + (x(getX(d, i)) - barWidth/2) + ','\n                    + (y(getY(d, i)) - (getOpen(d, i) > getClose(d, i) ? (y(getClose(d, i)) - y(getOpen(d, i))) : 0))\n                    + ')';\n                })\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('width', barWidth)\n                .attr('height', function(d, i) {\n                    var open = getOpen(d, i);\n                    var close = getClose(d, i);\n                    return open > close ? y(close) - y(open) : y(open) - y(close);\n                });\n        });\n\n        return chart;\n    }\n\n\n    //Create methods to allow outside functions to highlight a specific bar.\n    chart.highlightPoint = function(pointIndex, isHoverOver) {\n        chart.clearHighlights();\n        container.select(\".nv-candlestickBar .nv-tick-0-\" + pointIndex)\n            .classed(\"hover\", isHoverOver)\n        ;\n    };\n\n    chart.clearHighlights = function() {\n        container.select(\".nv-candlestickBar .nv-tick.hover\")\n            .classed(\"hover\", false)\n        ;\n    };\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:    {get: function(){return width;}, set: function(_){width=_;}},\n        height:   {get: function(){return height;}, set: function(_){height=_;}},\n        xScale:   {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:   {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain:  {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain:  {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:   {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:   {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        forceX:   {get: function(){return forceX;}, set: function(_){forceX=_;}},\n        forceY:   {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        padData:  {get: function(){return padData;}, set: function(_){padData=_;}},\n        clipEdge: {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        id:       {get: function(){return id;}, set: function(_){id=_;}},\n        interactive: {get: function(){return interactive;}, set: function(_){interactive=_;}},\n\n        x:     {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:     {get: function(){return getY;}, set: function(_){getY=_;}},\n        open:  {get: function(){return getOpen();}, set: function(_){getOpen=_;}},\n        close: {get: function(){return getClose();}, set: function(_){getClose=_;}},\n        high:  {get: function(){return getHigh;}, set: function(_){getHigh=_;}},\n        low:   {get: function(){return getLow;}, set: function(_){getLow=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    != undefined ? _.top    : margin.top;\n            margin.right  = _.right  != undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom != undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   != undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","\nnv.models.cumulativeLineChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var lines = nv.models.line()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , legend = nv.models.legend()\n        , controls = nv.models.legend()\n        , interactiveLayer = nv.interactiveGuideline()\n        , tooltip = nv.models.tooltip()\n        ;\n\n    var margin = {top: 30, right: 30, bottom: 50, left: 60}\n        , marginTop = null\n        , color = nv.utils.defaultColor()\n        , width = null\n        , height = null\n        , showLegend = true\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , showControls = true\n        , useInteractiveGuideline = false\n        , rescaleY = true\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , id = lines.id()\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , average = function(d) { return d.average }\n        , dispatch = d3.dispatch('stateChange', 'changeState', 'renderEnd')\n        , transitionDuration = 250\n        , duration = 250\n        , noErrorCheck = false  //if set to TRUE, will bypass an error check in the indexify function.\n        ;\n\n    state.index = 0;\n    state.rescaleY = rescaleY;\n\n    xAxis.orient('bottom').tickPadding(7);\n    yAxis.orient((rightAlignYAxis) ? 'right' : 'left');\n\n    tooltip.valueFormatter(function(d, i) {\n        return yAxis.tickFormat()(d, i);\n    }).headerFormatter(function(d, i) {\n        return xAxis.tickFormat()(d, i);\n    });\n\n    controls.updateState(false);\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var dx = d3.scale.linear()\n        , index = {i: 0, x: 0}\n        , renderWatch = nv.utils.renderWatch(dispatch, duration)\n        ;\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled }),\n                index: index.i,\n                rescaleY: rescaleY\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.index !== undefined)\n                index.i = state.index;\n            if (state.rescaleY !== undefined)\n                rescaleY = state.rescaleY;\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(lines);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n            container.classed('nv-chart-' + id, true);\n            var that = this;\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() {\n                if (duration === 0)\n                    container.call(chart);\n                else\n                    container.transition().duration(duration).call(chart)\n            };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disableddisabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            var indexDrag = d3.behavior.drag()\n                .on('dragstart', dragStart)\n                .on('drag', dragMove)\n                .on('dragend', dragEnd);\n\n\n            function dragStart(d,i) {\n                d3.select(chart.container)\n                    .style('cursor', 'ew-resize');\n            }\n\n            function dragMove(d,i) {\n                index.x = d3.event.x;\n                index.i = Math.round(dx.invert(index.x));\n                updateZero();\n            }\n\n            function dragEnd(d,i) {\n                d3.select(chart.container)\n                    .style('cursor', 'auto');\n\n                // update state and send stateChange with new index\n                state.index = index.i;\n                dispatch.stateChange(state);\n            }\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = lines.xScale();\n            y = lines.yScale();\n\n            if (!rescaleY) {\n                var seriesDomains = data\n                    .filter(function(series) { return !series.disabled })\n                    .map(function(series,i) {\n                        var initialDomain = d3.extent(series.values, lines.y());\n\n                        //account for series being disabled when losing 95% or more\n                        if (initialDomain[0] < -.95) initialDomain[0] = -.95;\n\n                        return [\n                                (initialDomain[0] - initialDomain[1]) / (1 + initialDomain[1]),\n                                (initialDomain[1] - initialDomain[0]) / (1 + initialDomain[0])\n                        ];\n                    });\n\n                var completeDomain = [\n                    d3.min(seriesDomains, function(d) { return d[0] }),\n                    d3.max(seriesDomains, function(d) { return d[1] })\n                ];\n\n                lines.yDomain(completeDomain);\n            } else {\n                lines.yDomain(null);\n            }\n\n            dx.domain([0, data[0].values.length - 1]) //Assumes all series have same length\n                .range([0, availableWidth])\n                .clamp(true);\n\n            var data = indexify(index.i, data);\n\n            // Setup containers and skeleton of chart\n            var interactivePointerEvents = (useInteractiveGuideline) ? \"none\" : \"all\";\n            var wrap = container.selectAll('g.nv-wrap.nv-cumulativeLine').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-cumulativeLine').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-interactive');\n            gEnter.append('g').attr('class', 'nv-x nv-axis').style(\"pointer-events\",\"none\");\n            gEnter.append('g').attr('class', 'nv-y nv-axis');\n            gEnter.append('g').attr('class', 'nv-background');\n            gEnter.append('g').attr('class', 'nv-linesWrap').style(\"pointer-events\",interactivePointerEvents);\n            gEnter.append('g').attr('class', 'nv-avgLinesWrap').style(\"pointer-events\",\"none\");\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n            gEnter.append('g').attr('class', 'nv-controlsWrap');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth);\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                g.select('.nv-legendWrap')\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n            }\n\n            // Controls\n            if (!showControls) {\n                 g.select('.nv-controlsWrap').selectAll('*').remove();\n            } else {\n                var controlsData = [\n                    { key: 'Re-scale y-axis', disabled: !rescaleY }\n                ];\n\n                controls\n                    .width(140)\n                    .color(['#444', '#444', '#444'])\n                    .rightAlign(false)\n                    .margin({top: 5, right: 0, bottom: 5, left: 20})\n                ;\n\n                g.select('.nv-controlsWrap')\n                    .datum(controlsData)\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n                    .call(controls);\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            // Show error if series goes below 100%\n            var tempDisabled = data.filter(function(d) { return d.tempDisabled });\n\n            wrap.select('.tempDisabled').remove(); //clean-up and prevent duplicates\n            if (tempDisabled.length) {\n                wrap.append('text').attr('class', 'tempDisabled')\n                    .attr('x', availableWidth / 2)\n                    .attr('y', '-.71em')\n                    .style('text-anchor', 'end')\n                    .text(tempDisabled.map(function(d) { return d.key }).join(', ') + ' values cannot be calculated for this time period.');\n            }\n\n            //Set up interactive layer\n            if (useInteractiveGuideline) {\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left:margin.left,top:margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n\n            gEnter.select('.nv-background')\n                .append('rect');\n\n            g.select('.nv-background rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            lines\n                //.x(function(d) { return d.x })\n                .y(function(d) { return d.display.y })\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled && !data[i].tempDisabled; }));\n\n            var linesWrap = g.select('.nv-linesWrap')\n                .datum(data.filter(function(d) { return  !d.disabled && !d.tempDisabled }));\n\n            linesWrap.call(lines);\n\n            //Store a series index number in the data array.\n            data.forEach(function(d,i) {\n                d.seriesIndex = i;\n            });\n\n            var avgLineData = data.filter(function(d) {\n                return !d.disabled && !!average(d);\n            });\n\n            var avgLines = g.select(\".nv-avgLinesWrap\").selectAll(\"line\")\n                .data(avgLineData, function(d) { return d.key; });\n\n            var getAvgLineY = function(d) {\n                //If average lines go off the svg element, clamp them to the svg bounds.\n                var yVal = y(average(d));\n                if (yVal < 0) return 0;\n                if (yVal > availableHeight) return availableHeight;\n                return yVal;\n            };\n\n            avgLines.enter()\n                .append('line')\n                .style('stroke-width',2)\n                .style('stroke-dasharray','10,10')\n                .style('stroke',function (d,i) {\n                    return lines.color()(d,d.seriesIndex);\n                })\n                .attr('x1',0)\n                .attr('x2',availableWidth)\n                .attr('y1', getAvgLineY)\n                .attr('y2', getAvgLineY);\n\n            avgLines\n                .style('stroke-opacity',function(d){\n                    //If average lines go offscreen, make them transparent\n                    var yVal = y(average(d));\n                    if (yVal < 0 || yVal > availableHeight) return 0;\n                    return 1;\n                })\n                .attr('x1',0)\n                .attr('x2',availableWidth)\n                .attr('y1', getAvgLineY)\n                .attr('y2', getAvgLineY);\n\n            avgLines.exit().remove();\n\n            //Create index line\n            var indexLine = linesWrap.selectAll('.nv-indexLine')\n                .data([index]);\n            indexLine.enter().append('rect').attr('class', 'nv-indexLine')\n                .attr('width', 3)\n                .attr('x', -2)\n                .attr('fill', 'red')\n                .attr('fill-opacity', .5)\n                .style(\"pointer-events\",\"all\")\n                .call(indexDrag);\n\n            indexLine\n                .attr('transform', function(d) { return 'translate(' + dx(d.i) + ',0)' })\n                .attr('height', availableHeight);\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/70, data) )\n                    .tickSize(-availableHeight, 0);\n\n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')');\n                g.select('.nv-x.nv-axis')\n                    .call(xAxis);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis')\n                    .call(yAxis);\n            }\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            function updateZero() {\n                indexLine\n                    .data([index]);\n\n                //When dragging the index line, turn off line transitions.\n                // Then turn them back on when done dragging.\n                var oldDuration = chart.duration();\n                chart.duration(0);\n                chart.update();\n                chart.duration(oldDuration);\n            }\n\n            g.select('.nv-background rect')\n                .on('click', function() {\n                    index.x = d3.mouse(this)[0];\n                    index.i = Math.round(dx.invert(index.x));\n\n                    // update state and send stateChange with new index\n                    state.index = index.i;\n                    dispatch.stateChange(state);\n\n                    updateZero();\n                });\n\n            lines.dispatch.on('elementClick', function(e) {\n                index.i = e.pointIndex;\n                index.x = dx(index.i);\n\n                // update state and send stateChange with new index\n                state.index = index.i;\n                dispatch.stateChange(state);\n\n                updateZero();\n            });\n\n            controls.dispatch.on('legendClick', function(d,i) {\n                d.disabled = !d.disabled;\n                rescaleY = !d.disabled;\n\n                state.rescaleY = rescaleY;\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                lines.clearHighlights();\n                var singlePoint, pointIndex, pointXLocation, allData = [];\n\n                data\n                    .filter(function(series, i) {\n                        series.seriesIndex = i;\n                        return !series.disabled;\n                    })\n                    .forEach(function(series,i) {\n                        pointIndex = nv.interactiveBisect(series.values, e.pointXValue, chart.x());\n                        lines.highlightPoint(i, pointIndex, true);\n                        var point = series.values[pointIndex];\n                        if (typeof point === 'undefined') return;\n                        if (typeof singlePoint === 'undefined') singlePoint = point;\n                        if (typeof pointXLocation === 'undefined') pointXLocation = chart.xScale()(chart.x()(point,pointIndex));\n                        allData.push({\n                            key: series.key,\n                            value: chart.y()(point, pointIndex),\n                            color: color(series,series.seriesIndex)\n                        });\n                    });\n\n                //Highlight the tooltip entry based on which point the mouse is closest to.\n                if (allData.length > 2) {\n                    var yValue = chart.yScale().invert(e.mouseY);\n                    var domainExtent = Math.abs(chart.yScale().domain()[0] - chart.yScale().domain()[1]);\n                    var threshold = 0.03 * domainExtent;\n                    var indexToHighlight = nv.nearestValueIndex(allData.map(function(d){return d.value}),yValue,threshold);\n                    if (indexToHighlight !== null)\n                        allData[indexToHighlight].highlight = true;\n                }\n\n                var xValue = xAxis.tickFormat()(chart.x()(singlePoint,pointIndex), pointIndex);\n                interactiveLayer.tooltip\n                    .valueFormatter(function(d,i) {\n                        return yAxis.tickFormat()(d);\n                    })\n                    .data(\n                    {\n                        value: xValue,\n                        series: allData\n                    }\n                )();\n\n                interactiveLayer.renderGuideLine(pointXLocation);\n            });\n\n            interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                lines.clearHighlights();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n\n                    state.disabled = e.disabled;\n                }\n\n                if (typeof e.index !== 'undefined') {\n                    index.i = e.index;\n                    index.x = dx(index.i);\n\n                    state.index = e.index;\n\n                    indexLine\n                        .data([index]);\n                }\n\n                if (typeof e.rescaleY !== 'undefined') {\n                    rescaleY = e.rescaleY;\n                }\n\n                chart.update();\n            });\n\n        });\n\n        renderWatch.renderEnd('cumulativeLineChart immediate');\n\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    lines.dispatch.on('elementMouseover.tooltip', function(evt) {\n        var point = {\n            x: chart.x()(evt.point),\n            y: chart.y()(evt.point),\n            color: evt.point.color\n        };\n        evt.point = point;\n        tooltip.data(evt).hidden(false);\n    });\n\n    lines.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true)\n    });\n\n    //============================================================\n    // Functions\n    //------------------------------------------------------------\n\n    var indexifyYGetter = null;\n    /* Normalize the data according to an index point. */\n    function indexify(idx, data) {\n        if (!indexifyYGetter) indexifyYGetter = lines.y();\n        return data.map(function(line, i) {\n            if (!line.values) {\n                return line;\n            }\n            var indexValue = line.values[idx];\n            if (indexValue == null) {\n                return line;\n            }\n            var v = indexifyYGetter(indexValue, idx);\n\n            //TODO: implement check below, and disable series if series loses 100% or more cause divide by 0 issue\n            if (v < -.95 && !noErrorCheck) {\n                //if a series loses more than 100%, calculations fail.. anything close can cause major distortion (but is mathematically correct till it hits 100)\n\n                line.tempDisabled = true;\n                return line;\n            }\n\n            line.tempDisabled = false;\n\n            line.values = line.values.map(function(point, pointIndex) {\n                point.display = {'y': (indexifyYGetter(point, pointIndex) - v) / (1 + v) };\n                return point;\n            });\n\n            return line;\n        })\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.lines = lines;\n    chart.legend = legend;\n    chart.controls = controls;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.interactiveLayer = interactiveLayer;\n    chart.state = state;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        rescaleY:     {get: function(){return rescaleY;}, set: function(_){rescaleY=_;}},\n        showControls:     {get: function(){return showControls;}, set: function(_){showControls=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        average: {get: function(){return average;}, set: function(_){average=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        showXAxis:    {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        noErrorCheck:    {get: function(){return noErrorCheck;}, set: function(_){noErrorCheck=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n        }},\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = _;\n            if (_ === true) {\n                chart.interactive(false);\n                chart.useVoronoi(false);\n            }\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( (_) ? 'right' : 'left');\n        }},\n        duration:    {get: function(){return duration;}, set: function(_){\n            duration = _;\n            lines.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n            renderWatch.reset(duration);\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, lines);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","//TODO: consider deprecating by adding necessary features to multiBar model\nnv.models.discreteBar = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 960\n        , height = 500\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container\n        , x = d3.scale.ordinal()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove\n        , color = nv.utils.defaultColor()\n        , showValues = false\n        , valueFormat = d3.format(',.2f')\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd')\n        , rectClass = 'discreteBar'\n        , duration = 250\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0;\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            //add series index to each data point for reference\n            data.forEach(function(series, i) {\n                series.values.forEach(function(point) {\n                    point.series = i;\n                });\n            });\n\n            // Setup Scales\n            // remap and flatten the data for use in calculating the scales' domains\n            var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate\n                data.map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d,i), y: getY(d,i), y0: d.y0 }\n                    })\n                });\n\n            x   .domain(xDomain || d3.merge(seriesData).map(function(d) { return d.x }))\n                .rangeBands(xRange || [0, availableWidth], .1);\n            y   .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return d.y }).concat(forceY)));\n\n            // If showValues, pad the Y axis range to account for label height\n            if (showValues) y.range(yRange || [availableHeight - (y.domain()[0] < 0 ? 12 : 0), y.domain()[1] > 0 ? 12 : 0]);\n            else y.range(yRange || [availableHeight, 0]);\n\n            //store old scales if they exist\n            x0 = x0 || x;\n            y0 = y0 || y.copy().range([y(0),y(0)]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-discretebar').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-discretebar');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-groups');\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            //TODO: by definition, the discrete bar should not have multiple groups, will modify/remove later\n            var groups = wrap.select('.nv-groups').selectAll('.nv-group')\n                .data(function(d) { return d }, function(d) { return d.key });\n            groups.enter().append('g')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6);\n            groups.exit()\n                .watchTransition(renderWatch, 'discreteBar: exit groups')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6)\n                .remove();\n            groups\n                .attr('class', function(d,i) { return 'nv-group nv-series-' + i })\n                .classed('hover', function(d) { return d.hover });\n            groups\n                .watchTransition(renderWatch, 'discreteBar: groups')\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', .75);\n\n            var bars = groups.selectAll('g.nv-bar')\n                .data(function(d) { return d.values });\n            bars.exit().remove();\n\n            var barsEnter = bars.enter().append('g')\n                .attr('transform', function(d,i,j) {\n                    return 'translate(' + (x(getX(d,i)) + x.rangeBand() * .05 ) + ', ' + y(0) + ')'\n                })\n                .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    dispatch.elementMousemove({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('click', function(d,i) {\n                    var element = this;\n                    dispatch.elementClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\"),\n                        event: d3.event,\n                        element: element\n                    });\n                    d3.event.stopPropagation();\n                })\n                .on('dblclick', function(d,i) {\n                    dispatch.elementDblClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                    d3.event.stopPropagation();\n                });\n\n            barsEnter.append('rect')\n                .attr('height', 0)\n                .attr('width', x.rangeBand() * .9 / data.length )\n\n            if (showValues) {\n                barsEnter.append('text')\n                    .attr('text-anchor', 'middle')\n                ;\n\n                bars.select('text')\n                    .text(function(d,i) { return valueFormat(getY(d,i)) })\n                    .watchTransition(renderWatch, 'discreteBar: bars text')\n                    .attr('x', x.rangeBand() * .9 / 2)\n                    .attr('y', function(d,i) { return getY(d,i) < 0 ? y(getY(d,i)) - y(0) + 12 : -4 })\n\n                ;\n            } else {\n                bars.selectAll('text').remove();\n            }\n\n            bars\n                .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive' })\n                .style('fill', function(d,i) { return d.color || color(d,i) })\n                .style('stroke', function(d,i) { return d.color || color(d,i) })\n                .select('rect')\n                .attr('class', rectClass)\n                .watchTransition(renderWatch, 'discreteBar: bars rect')\n                .attr('width', x.rangeBand() * .9 / data.length);\n            bars.watchTransition(renderWatch, 'discreteBar: bars')\n                //.delay(function(d,i) { return i * 1200 / data[0].values.length })\n                .attr('transform', function(d,i) {\n                    var left = x(getX(d,i)) + x.rangeBand() * .05,\n                        top = getY(d,i) < 0 ?\n                            y(0) :\n                                y(0) - y(getY(d,i)) < 1 ?\n                            y(0) - 1 : //make 1 px positive bars show up above y=0\n                            y(getY(d,i));\n\n                    return 'translate(' + left + ', ' + top + ')'\n                })\n                .select('rect')\n                .attr('height', function(d,i) {\n                    return  Math.max(Math.abs(y(getY(d,i)) - y(0)), 1)\n                });\n\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n\n        });\n\n        renderWatch.renderEnd('discreteBar immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:   {get: function(){return width;}, set: function(_){width=_;}},\n        height:  {get: function(){return height;}, set: function(_){height=_;}},\n        forceY:  {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        showValues: {get: function(){return showValues;}, set: function(_){showValues=_;}},\n        x:       {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:       {get: function(){return getY;}, set: function(_){getY=_;}},\n        xScale:  {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:  {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain: {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain: {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:  {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:  {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        valueFormat:    {get: function(){return valueFormat;}, set: function(_){valueFormat=_;}},\n        id:          {get: function(){return id;}, set: function(_){id=_;}},\n        rectClass: {get: function(){return rectClass;}, set: function(_){rectClass=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.discreteBarChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var discretebar = nv.models.discreteBar()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n\t, legend = nv.models.legend()\n        , tooltip = nv.models.tooltip()\n        ;\n\n    var margin = {top: 15, right: 10, bottom: 50, left: 60}\n        , marginTop = null\n        , width = null\n        , height = null\n        , color = nv.utils.getColor()\n\t, showLegend = false\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , staggerLabels = false\n        , wrapLabels = false\n        , rotateLabels = 0\n        , x\n        , y\n        , noData = null\n        , dispatch = d3.dispatch('beforeUpdate','renderEnd')\n        , duration = 250\n        ;\n\n    xAxis\n        .orient('bottom')\n        .showMaxMin(false)\n        .tickFormat(function(d) { return d })\n    ;\n    yAxis\n        .orient((rightAlignYAxis) ? 'right' : 'left')\n        .tickFormat(d3.format(',.1f'))\n    ;\n\n    tooltip\n        .duration(0)\n        .headerEnabled(false)\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        })\n        .keyFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(discretebar);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() {\n                dispatch.beforeUpdate();\n                container.transition().duration(duration).call(chart);\n            };\n            chart.container = this;\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container);\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = discretebar.xScale();\n            y = discretebar.yScale().clamp(true);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-discreteBarWithAxes').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-discreteBarWithAxes').append('g');\n            var defsEnter = gEnter.append('defs');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis')\n                .append('g').attr('class', 'nv-zeroLine')\n                .append('line');\n\n            gEnter.append('g').attr('class', 'nv-barsWrap');\n\t    gEnter.append('g').attr('class', 'nv-legendWrap');\n\n            g.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth);\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                wrap.select('.nv-legendWrap')\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n            }\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            // Main Chart Component(s)\n            discretebar\n                .width(availableWidth)\n                .height(availableHeight);\n\n            var barsWrap = g.select('.nv-barsWrap')\n                .datum(data.filter(function(d) { return !d.disabled }));\n\n            barsWrap.transition().call(discretebar);\n\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-x-label-clip-' + discretebar.id())\n                .append('rect');\n\n            g.select('#nv-x-label-clip-' + discretebar.id() + ' rect')\n                .attr('width', x.rangeBand() * (staggerLabels ? 2 : 1))\n                .attr('height', 16)\n                .attr('x', -x.rangeBand() / (staggerLabels ? 1 : 2 ));\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n\n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + (y.range()[0] + ((discretebar.showValues() && y.domain()[0] < 0) ? 16 : 0)) + ')');\n                g.select('.nv-x.nv-axis').call(xAxis);\n\n                var xTicks = g.select('.nv-x.nv-axis').selectAll('g');\n                if (staggerLabels) {\n                    xTicks\n                        .selectAll('text')\n                        .attr('transform', function(d,i,j) { return 'translate(0,' + (j % 2 == 0 ? '5' : '17') + ')' })\n                }\n\n                if (rotateLabels) {\n                    xTicks\n                        .selectAll('.tick text')\n                        .attr('transform', 'rotate(' + rotateLabels + ' 0,0)')\n                        .style('text-anchor', rotateLabels > 0 ? 'start' : 'end');\n                }\n\n                if (wrapLabels) {\n                    g.selectAll('.tick text')\n                        .call(nv.utils.wrapTicks, chart.xAxis.rangeBand())\n                }\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis').call(yAxis);\n            }\n\n            // Zero line\n            g.select(\".nv-zeroLine line\")\n                .attr(\"x1\",0)\n                .attr(\"x2\",(rightAlignYAxis) ? -availableWidth : availableWidth)\n                .attr(\"y1\", y(0))\n                .attr(\"y2\", y(0))\n            ;\n        });\n\n        renderWatch.renderEnd('discreteBar chart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    discretebar.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt['series'] = {\n            key: chart.x()(evt.data),\n            value: chart.y()(evt.data),\n            color: evt.color\n        };\n        tooltip.data(evt).hidden(false);\n    });\n\n    discretebar.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    discretebar.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.discretebar = discretebar;\n    chart.legend = legend;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n\tshowLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        staggerLabels: {get: function(){return staggerLabels;}, set: function(_){staggerLabels=_;}},\n        rotateLabels:  {get: function(){return rotateLabels;}, set: function(_){rotateLabels=_;}},\n        wrapLabels:  {get: function(){return wrapLabels;}, set: function(_){wrapLabels=!!_;}},\n        showXAxis: {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis: {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            discretebar.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            discretebar.color(color);\n\t    legend.color(color);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( (_) ? 'right' : 'left');\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, discretebar);\n    nv.utils.initOptions(chart);\n\n    return chart;\n}\n","\nnv.models.distribution = function() {\n    \"use strict\";\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 400 //technically width or height depending on x or y....\n        , size = 8\n        , axis = 'x' // 'x' or 'y'... horizontal or vertical\n        , getData = function(d) { return d[axis] }  // defaults d.x or d.y\n        , color = nv.utils.defaultColor()\n        , scale = d3.scale.linear()\n        , domain\n        , duration = 250\n        , dispatch = d3.dispatch('renderEnd')\n        ;\n\n    //============================================================\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var scale0;\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    //============================================================\n\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableLength = width - (axis === 'x' ? margin.left + margin.right : margin.top + margin.bottom),\n                naxis = axis == 'x' ? 'y' : 'x',\n                container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            //------------------------------------------------------------\n            // Setup Scales\n\n            scale0 = scale0 || scale;\n\n            //------------------------------------------------------------\n\n\n            //------------------------------------------------------------\n            // Setup containers and skeleton of chart\n\n            var wrap = container.selectAll('g.nv-distribution').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-distribution');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')\n\n            //------------------------------------------------------------\n\n\n            var distWrap = g.selectAll('g.nv-dist')\n                .data(function(d) { return d }, function(d) { return d.key });\n\n            distWrap.enter().append('g');\n            distWrap\n                .attr('class', function(d,i) { return 'nv-dist nv-series-' + i })\n                .style('stroke', function(d,i) { return color(d, i) });\n\n            var dist = distWrap.selectAll('line.nv-dist' + axis)\n                .data(function(d) { return d.values })\n            dist.enter().append('line')\n                .attr(axis + '1', function(d,i) { return scale0(getData(d,i)) })\n                .attr(axis + '2', function(d,i) { return scale0(getData(d,i)) })\n            renderWatch.transition(distWrap.exit().selectAll('line.nv-dist' + axis), 'dist exit')\n                // .transition()\n                .attr(axis + '1', function(d,i) { return scale(getData(d,i)) })\n                .attr(axis + '2', function(d,i) { return scale(getData(d,i)) })\n                .style('stroke-opacity', 0)\n                .remove();\n            dist\n                .attr('class', function(d,i) { return 'nv-dist' + axis + ' nv-dist' + axis + '-' + i })\n                .attr(naxis + '1', 0)\n                .attr(naxis + '2', size);\n            renderWatch.transition(dist, 'dist')\n                // .transition()\n                .attr(axis + '1', function(d,i) { return scale(getData(d,i)) })\n                .attr(axis + '2', function(d,i) { return scale(getData(d,i)) })\n\n\n            scale0 = scale.copy();\n\n        });\n        renderWatch.renderEnd('distribution immediate');\n        return chart;\n    }\n\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n    chart.options = nv.utils.optionsFunc.bind(chart);\n    chart.dispatch = dispatch;\n\n    chart.margin = function(_) {\n        if (!arguments.length) return margin;\n        margin.top    = typeof _.top    != 'undefined' ? _.top    : margin.top;\n        margin.right  = typeof _.right  != 'undefined' ? _.right  : margin.right;\n        margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom;\n        margin.left   = typeof _.left   != 'undefined' ? _.left   : margin.left;\n        return chart;\n    };\n\n    chart.width = function(_) {\n        if (!arguments.length) return width;\n        width = _;\n        return chart;\n    };\n\n    chart.axis = function(_) {\n        if (!arguments.length) return axis;\n        axis = _;\n        return chart;\n    };\n\n    chart.size = function(_) {\n        if (!arguments.length) return size;\n        size = _;\n        return chart;\n    };\n\n    chart.getData = function(_) {\n        if (!arguments.length) return getData;\n        getData = d3.functor(_);\n        return chart;\n    };\n\n    chart.scale = function(_) {\n        if (!arguments.length) return scale;\n        scale = _;\n        return chart;\n    };\n\n    chart.color = function(_) {\n        if (!arguments.length) return color;\n        color = nv.utils.getColor(_);\n        return chart;\n    };\n\n    chart.duration = function(_) {\n        if (!arguments.length) return duration;\n        duration = _;\n        renderWatch.reset(duration);\n        return chart;\n    };\n    //============================================================\n\n\n    return chart;\n}\n","nv.models.focus = function(content) {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var content = content || nv.models.line()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , brush = d3.svg.brush()\n        ;\n\n    var margin = {top: 10, right: 0, bottom: 30, left: 0}\n        , color = nv.utils.defaultColor()\n        , width = null\n        , height = 70\n        , showXAxis = true\n        , showYAxis = false\n        , rightAlignYAxis = false\n        , ticks = null\n        , x\n        , y\n        , brushExtent = null\n        , duration = 250\n        , dispatch = d3.dispatch('brush', 'onBrush', 'renderEnd')\n        , syncBrushing = true\n        ;\n\n    content.interactive(false);\n    content.pointActive(function(d) { return false; });\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(content);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = height - margin.top - margin.bottom;\n\n            chart.update = function() { \n                if( duration === 0 ) {\n                    container.call( chart );\n                } else {\n                    container.transition().duration(duration).call(chart);\n                }\n            };\n            chart.container = this;\n\n            // Setup Scales\n            x = content.xScale();\n            y = content.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-focus').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-focus').append('g');\n            var g = wrap.select('g');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            gEnter.append('g').attr('class', 'nv-background').append('rect');\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis');\n            gEnter.append('g').attr('class', 'nv-contentWrap');\n            gEnter.append('g').attr('class', 'nv-brushBackground');\n            gEnter.append('g').attr('class', 'nv-x nv-brush');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            g.select('.nv-background rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n                \n            content\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled; }));\n\n            var contentWrap = g.select('.nv-contentWrap')\n                .datum(data.filter(function(d) { return !d.disabled; }));\n\n            d3.transition(contentWrap).call(content);\n            \n            // Setup Brush\n            brush\n                .x(x)\n                .on('brush', function() {\n                    onBrush(syncBrushing);\n                });\n\n            brush.on('brushend', function () {\n                if (!syncBrushing) {\n                    dispatch.onBrush(brush.empty() ? x.domain() : brush.extent());\n                }\n            });\n\n            if (brushExtent) brush.extent(brushExtent);\n\n            var brushBG = g.select('.nv-brushBackground').selectAll('g')\n                .data([brushExtent || brush.extent()]);\n    \n            var brushBGenter = brushBG.enter()\n                .append('g');\n\n            brushBGenter.append('rect')\n                .attr('class', 'left')\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('height', availableHeight);\n\n            brushBGenter.append('rect')\n                .attr('class', 'right')\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('height', availableHeight);\n\n            var gBrush = g.select('.nv-x.nv-brush')\n                .call(brush);\n            gBrush.selectAll('rect')\n                .attr('height', availableHeight);\n            gBrush.selectAll('.resize').append('path').attr('d', resizePath);\n\n            onBrush(true);\n\n            g.select('.nv-background rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            if (showXAxis) {\n                xAxis.scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n  \n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')');\n                d3.transition(g.select('.nv-x.nv-axis'))\n                    .call(xAxis);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                d3.transition(g.select('.nv-y.nv-axis'))\n                    .call(yAxis);\n            }\n            \n            g.select('.nv-x.nv-axis')\n                .attr('transform', 'translate(0,' + y.range()[0] + ')');\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            //============================================================\n            // Functions\n            //------------------------------------------------------------\n    \n            // Taken from crossfilter (http://square.github.com/crossfilter/)\n            function resizePath(d) {\n                var e = +(d == 'e'),\n                    x = e ? 1 : -1,\n                    y = availableHeight / 3;\n                return 'M' + (0.5 * x) + ',' + y\n                    + 'A6,6 0 0 ' + e + ' ' + (6.5 * x) + ',' + (y + 6)\n                    + 'V' + (2 * y - 6)\n                    + 'A6,6 0 0 ' + e + ' ' + (0.5 * x) + ',' + (2 * y)\n                    + 'Z'\n                    + 'M' + (2.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8)\n                    + 'M' + (4.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8);\n            }\n    \n    \n            function updateBrushBG() {\n                if (!brush.empty()) brush.extent(brushExtent);\n                brushBG\n                    .data([brush.empty() ? x.domain() : brushExtent])\n                    .each(function(d,i) {\n                        var leftWidth = x(d[0]) - x.range()[0],\n                            rightWidth = availableWidth - x(d[1]);\n                        d3.select(this).select('.left')\n                            .attr('width',  leftWidth < 0 ? 0 : leftWidth);\n    \n                        d3.select(this).select('.right')\n                            .attr('x', x(d[1]))\n                            .attr('width', rightWidth < 0 ? 0 : rightWidth);\n                    });\n            }\n\n\n            function onBrush(shouldDispatch) {\n                brushExtent = brush.empty() ? null : brush.extent();\n                var extent = brush.empty() ? x.domain() : brush.extent();\n                dispatch.brush({extent: extent, brush: brush});\n                updateBrushBG();\n                if (shouldDispatch) {\n                    dispatch.onBrush(extent);\n                }\n            }\n        });\n\n        renderWatch.renderEnd('focus immediate');\n        return chart;\n    }\n\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.content = content;\n    chart.brush = brush;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showXAxis:      {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        brushExtent: {get: function(){return brushExtent;}, set: function(_){brushExtent=_;}},\n        syncBrushing: {get: function(){return syncBrushing;}, set: function(_){syncBrushing=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            content.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            content.color(color);\n        }},\n        interpolate: {get: function(){return content.interpolate();}, set: function(_){\n            content.interpolate(_);\n        }},\n        xTickFormat: {get: function(){return xAxis.tickFormat();}, set: function(_){\n            xAxis.tickFormat(_);\n        }},\n        yTickFormat: {get: function(){return yAxis.tickFormat();}, set: function(_){\n            yAxis.tickFormat(_);\n        }},\n        x: {get: function(){return content.x();}, set: function(_){\n            content.x(_);\n        }},\n        y: {get: function(){return content.y();}, set: function(_){\n            content.y(_);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( rightAlignYAxis ? 'right' : 'left');\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, content);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","nv.models.forceDirectedGraph = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n    var margin = {top: 2, right: 0, bottom: 2, left: 0}\n        , width = 400\n        , height = 32\n        , container = null\n        , dispatch = d3.dispatch('renderEnd')\n        , color = nv.utils.getColor(['#000'])\n        , tooltip      = nv.models.tooltip()\n        , noData = null\n        // Force directed graph specific parameters [default values]\n        , linkStrength = 0.1\n        , friction = 0.9\n        , linkDist = 30\n        , charge = -120\n        , gravity = 0.1\n        , theta = 0.8\n        , alpha = 0.1\n        , radius = 5\n        // These functions allow to add extra attributes to ndes and links\n        ,nodeExtras = function(nodes) { /* Do nothing */ }\n        ,linkExtras = function(links) { /* Do nothing */ }\n        ;\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    function chart(selection) {\n        renderWatch.reset();\n\n        selection.each(function(data) {\n          container = d3.select(this);\n          nv.utils.initSVG(container);\n\n          var availableWidth = nv.utils.availableWidth(width, container, margin),\n              availableHeight = nv.utils.availableHeight(height, container, margin);\n\n          container\n                  .attr(\"width\", availableWidth)\n                  .attr(\"height\", availableHeight);\n\n          // Display No Data message if there's nothing to show.\n          if (!data || !data.links || !data.nodes) {\n              nv.utils.noData(chart, container)\n              return chart;\n          } else {\n              container.selectAll('.nv-noData').remove();\n          }\n          container.selectAll('*').remove();\n\n          // Collect names of all fields in the nodes\n          var nodeFieldSet = new Set();\n          data.nodes.forEach(function(node) {\n            var keys = Object.keys(node);\n            keys.forEach(function(key) {\n              nodeFieldSet.add(key);\n            });\n          });\n\n          var force = d3.layout.force()\n                .nodes(data.nodes)\n                .links(data.links)\n                .size([availableWidth, availableHeight])\n                .linkStrength(linkStrength)\n                .friction(friction)\n                .linkDistance(linkDist)\n                .charge(charge)\n                .gravity(gravity)\n                .theta(theta)\n                .alpha(alpha)\n                .start();\n\n          var link = container.selectAll(\".link\")\n                .data(data.links)\n                .enter().append(\"line\")\n                .attr(\"class\", \"nv-force-link\")\n                .style(\"stroke-width\", function(d) { return Math.sqrt(d.value); });\n\n          var node = container.selectAll(\".node\")\n                .data(data.nodes)\n                .enter()\n                .append(\"g\")\n                .attr(\"class\", \"nv-force-node\")\n                .call(force.drag);\n\n          node\n            .append(\"circle\")\n            .attr(\"r\", radius)\n            .style(\"fill\", function(d) { return color(d) } )\n            .on(\"mouseover\", function(evt) {\n              container.select('.nv-series-' + evt.seriesIndex + ' .nv-distx-' + evt.pointIndex)\n                  .attr('y1', evt.py);\n              container.select('.nv-series-' + evt.seriesIndex + ' .nv-disty-' + evt.pointIndex)\n                  .attr('x2', evt.px);\n\n              // Add 'series' object to\n              var nodeColor = color(evt);\n              evt.series = [];\n              nodeFieldSet.forEach(function(field) {\n                evt.series.push({\n                  color: nodeColor,\n                  key:   field,\n                  value: evt[field]\n                });\n              });\n              tooltip.data(evt).hidden(false);\n            })\n            .on(\"mouseout\",  function(d) {\n              tooltip.hidden(true);\n            });\n\n          tooltip.headerFormatter(function(d) {return \"Node\";});\n\n          // Apply extra attributes to nodes and links (if any)\n          linkExtras(link);\n          nodeExtras(node);\n\n          force.on(\"tick\", function() {\n              link.attr(\"x1\", function(d) { return d.source.x; })\n                  .attr(\"y1\", function(d) { return d.source.y; })\n                  .attr(\"x2\", function(d) { return d.target.x; })\n                  .attr(\"y2\", function(d) { return d.target.y; });\n\n              node.attr(\"transform\", function(d) {\n                return \"translate(\" + d.x + \", \" + d.y + \")\";\n              });\n            });\n        });\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:     {get: function(){return width;}, set: function(_){width=_;}},\n        height:    {get: function(){return height;}, set: function(_){height=_;}},\n\n        // Force directed graph specific parameters\n        linkStrength:{get: function(){return linkStrength;}, set: function(_){linkStrength=_;}},\n        friction:    {get: function(){return friction;}, set: function(_){friction=_;}},\n        linkDist:    {get: function(){return linkDist;}, set: function(_){linkDist=_;}},\n        charge:      {get: function(){return charge;}, set: function(_){charge=_;}},\n        gravity:     {get: function(){return gravity;}, set: function(_){gravity=_;}},\n        theta:       {get: function(){return theta;}, set: function(_){theta=_;}},\n        alpha:       {get: function(){return alpha;}, set: function(_){alpha=_;}},\n        radius:      {get: function(){return radius;}, set: function(_){radius=_;}},\n\n        //functor options\n        x: {get: function(){return getX;}, set: function(_){getX=d3.functor(_);}},\n        y: {get: function(){return getY;}, set: function(_){getY=d3.functor(_);}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        nodeExtras: {get: function(){return nodeExtras;}, set: function(_){\n            nodeExtras = _;\n        }},\n        linkExtras: {get: function(){return linkExtras;}, set: function(_){\n            linkExtras = _;\n        }}\n    });\n\n    chart.dispatch = dispatch;\n    chart.tooltip = tooltip;\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","nv.models.furiousLegend = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 5, right: 0, bottom: 5, left: 0}\n        , width = 400\n        , height = 20\n        , getKey = function(d) { return d.key }\n        , keyFormatter = function (d) { return d }\n        , color = nv.utils.getColor()\n        , maxKeyLength = 20 //default value for key lengths\n        , align = true\n        , padding = 28 //define how much space between legend items. - recommend 32 for furious version\n        , rightAlign = true\n        , updateState = true   //If true, legend will update data.disabled and trigger a 'stateChange' dispatch.\n        , radioButtonMode = false   //If true, clicking legend items will cause it to behave like a radio button. (only one can be selected at a time)\n        , expanded = false\n        , dispatch = d3.dispatch('legendClick', 'legendDblclick', 'legendMouseover', 'legendMouseout', 'stateChange')\n        , vers = 'classic' //Options are \"classic\" and \"furious\"\n        ;\n\n    function chart(selection) {\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-legend').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-legend').append('g');\n            var g = wrap.select('g');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var series = g.selectAll('.nv-series')\n                .data(function(d) {\n                    if(vers != 'furious') return d;\n\n                    return d.filter(function(n) {\n                        return expanded ? true : !n.disengaged;\n                    });\n                });\n            var seriesEnter = series.enter().append('g').attr('class', 'nv-series')\n\n            var seriesShape;\n\n            if(vers == 'classic') {\n                seriesEnter.append('circle')\n                    .style('stroke-width', 2)\n                    .attr('class','nv-legend-symbol')\n                    .attr('r', 5);\n\n                seriesShape = series.select('circle');\n            } else if (vers == 'furious') {\n                seriesEnter.append('rect')\n                    .style('stroke-width', 2)\n                    .attr('class','nv-legend-symbol')\n                    .attr('rx', 3)\n                    .attr('ry', 3);\n\n                seriesShape = series.select('rect');\n\n                seriesEnter.append('g')\n                    .attr('class', 'nv-check-box')\n                    .property('innerHTML','<path d=\"M0.5,5 L22.5,5 L22.5,26.5 L0.5,26.5 L0.5,5 Z\" class=\"nv-box\"></path><path d=\"M5.5,12.8618467 L11.9185089,19.2803556 L31,0.198864511\" class=\"nv-check\"></path>')\n                    .attr('transform', 'translate(-10,-8)scale(0.5)');\n\n                var seriesCheckbox = series.select('.nv-check-box');\n\n                seriesCheckbox.each(function(d,i) {\n                    d3.select(this).selectAll('path')\n                        .attr('stroke', setTextColor(d,i));\n                });\n            }\n\n            seriesEnter.append('text')\n                .attr('text-anchor', 'start')\n                .attr('class','nv-legend-text')\n                .attr('dy', '.32em')\n                .attr('dx', '8');\n\n            var seriesText = series.select('text.nv-legend-text');\n\n            series\n                .on('mouseover', function(d,i) {\n                    dispatch.legendMouseover(d,i);  //TODO: Make consistent with other event objects\n                })\n                .on('mouseout', function(d,i) {\n                    dispatch.legendMouseout(d,i);\n                })\n                .on('click', function(d,i) {\n                    dispatch.legendClick(d,i);\n                    // make sure we re-get data in case it was modified\n                    var data = series.data();\n                    if (updateState) {\n                        if(vers =='classic') {\n                            if (radioButtonMode) {\n                                //Radio button mode: set every series to disabled,\n                                //  and enable the clicked series.\n                                data.forEach(function(series) { series.disabled = true});\n                                d.disabled = false;\n                            }\n                            else {\n                                d.disabled = !d.disabled;\n                                if (data.every(function(series) { return series.disabled})) {\n                                    //the default behavior of NVD3 legends is, if every single series\n                                    // is disabled, turn all series' back on.\n                                    data.forEach(function(series) { series.disabled = false});\n                                }\n                            }\n                        } else if(vers == 'furious') {\n                            if(expanded) {\n                                d.disengaged = !d.disengaged;\n                                d.userDisabled = d.userDisabled == undefined ? !!d.disabled : d.userDisabled;\n                                d.disabled = d.disengaged || d.userDisabled;\n                            } else if (!expanded) {\n                                d.disabled = !d.disabled;\n                                d.userDisabled = d.disabled;\n                                var engaged = data.filter(function(d) { return !d.disengaged; });\n                                if (engaged.every(function(series) { return series.userDisabled })) {\n                                    //the default behavior of NVD3 legends is, if every single series\n                                    // is disabled, turn all series' back on.\n                                    data.forEach(function(series) {\n                                        series.disabled = series.userDisabled = false;\n                                    });\n                                }\n                            }\n                        }\n                        dispatch.stateChange({\n                            disabled: data.map(function(d) { return !!d.disabled }),\n                            disengaged: data.map(function(d) { return !!d.disengaged })\n                        });\n\n                    }\n                })\n                .on('dblclick', function(d,i) {\n                    if(vers == 'furious' && expanded) return;\n                    dispatch.legendDblclick(d,i);\n                    if (updateState) {\n                        // make sure we re-get data in case it was modified\n                        var data = series.data();\n                        //the default behavior of NVD3 legends, when double clicking one,\n                        // is to set all other series' to false, and make the double clicked series enabled.\n                        data.forEach(function(series) {\n                            series.disabled = true;\n                            if(vers == 'furious') series.userDisabled = series.disabled;\n                        });\n                        d.disabled = false;\n                        if(vers == 'furious') d.userDisabled = d.disabled;\n                        dispatch.stateChange({\n                            disabled: data.map(function(d) { return !!d.disabled })\n                        });\n                    }\n                });\n\n            series.classed('nv-disabled', function(d) { return d.userDisabled });\n            series.exit().remove();\n\n            seriesText\n                .attr('fill', setTextColor)\n                .text(function (d) { return keyFormatter(getKey(d)) });\n\n            //TODO: implement fixed-width and max-width options (max-width is especially useful with the align option)\n            // NEW ALIGNING CODE, TODO: clean up\n\n            var versPadding;\n            switch(vers) {\n                case 'furious' :\n                    versPadding = 23;\n                    break;\n                case 'classic' :\n                    versPadding = 20;\n            }\n\n            if (align) {\n\n                var seriesWidths = [];\n                series.each(function(d,i) {\n                    var legendText;\n                    if (keyFormatter(getKey(d)) && keyFormatter(getKey(d)).length > maxKeyLength) {\n                        var trimmedKey = keyFormatter(getKey(d)).substring(0, maxKeyLength);\n                        legendText = d3.select(this).select('text').text(trimmedKey + \"...\");\n                        d3.select(this).append(\"svg:title\").text(keyFormatter(getKey(d)));\n                    } else {\n                        legendText = d3.select(this).select('text');\n                    }\n                    var nodeTextLength;\n                    try {\n                        nodeTextLength = legendText.node().getComputedTextLength();\n                        // If the legendText is display:none'd (nodeTextLength == 0), simulate an error so we approximate, instead\n                        if(nodeTextLength <= 0) throw Error();\n                    }\n                    catch(e) {\n                        nodeTextLength = nv.utils.calcApproxTextWidth(legendText);\n                    }\n\n                    seriesWidths.push(nodeTextLength + padding);\n                });\n\n                var seriesPerRow = 0;\n                var legendWidth = 0;\n                var columnWidths = [];\n\n                while ( legendWidth < availableWidth && seriesPerRow < seriesWidths.length) {\n                    columnWidths[seriesPerRow] = seriesWidths[seriesPerRow];\n                    legendWidth += seriesWidths[seriesPerRow++];\n                }\n                if (seriesPerRow === 0) seriesPerRow = 1; //minimum of one series per row\n\n                while ( legendWidth > availableWidth && seriesPerRow > 1 ) {\n                    columnWidths = [];\n                    seriesPerRow--;\n\n                    for (var k = 0; k < seriesWidths.length; k++) {\n                        if (seriesWidths[k] > (columnWidths[k % seriesPerRow] || 0) )\n                            columnWidths[k % seriesPerRow] = seriesWidths[k];\n                    }\n\n                    legendWidth = columnWidths.reduce(function(prev, cur, index, array) {\n                        return prev + cur;\n                    });\n                }\n\n                var xPositions = [];\n                for (var i = 0, curX = 0; i < seriesPerRow; i++) {\n                    xPositions[i] = curX;\n                    curX += columnWidths[i];\n                }\n\n                series\n                    .attr('transform', function(d, i) {\n                        return 'translate(' + xPositions[i % seriesPerRow] + ',' + (5 + Math.floor(i / seriesPerRow) * versPadding) + ')';\n                    });\n\n                //position legend as far right as possible within the total width\n                if (rightAlign) {\n                    g.attr('transform', 'translate(' + (width - margin.right - legendWidth) + ',' + margin.top + ')');\n                }\n                else {\n                    g.attr('transform', 'translate(0' + ',' + margin.top + ')');\n                }\n\n                height = margin.top + margin.bottom + (Math.ceil(seriesWidths.length / seriesPerRow) * versPadding);\n\n            } else {\n\n                var ypos = 5,\n                    newxpos = 5,\n                    maxwidth = 0,\n                    xpos;\n                series\n                    .attr('transform', function(d, i) {\n                        var length = d3.select(this).select('text').node().getComputedTextLength() + padding;\n                        xpos = newxpos;\n\n                        if (width < margin.left + margin.right + xpos + length) {\n                            newxpos = xpos = 5;\n                            ypos += versPadding;\n                        }\n\n                        newxpos += length;\n                        if (newxpos > maxwidth) maxwidth = newxpos;\n\n                        return 'translate(' + xpos + ',' + ypos + ')';\n                    });\n\n                //position legend as far right as possible within the total width\n                g.attr('transform', 'translate(' + (width - margin.right - maxwidth) + ',' + margin.top + ')');\n\n                height = margin.top + margin.bottom + ypos + 15;\n            }\n\n            if(vers == 'furious') {\n                // Size rectangles after text is placed\n                seriesShape\n                    .attr('width', function(d,i) {\n                        return seriesText[0][i].getComputedTextLength() + 27;\n                    })\n                    .attr('height', 18)\n                    .attr('y', -9)\n                    .attr('x', -15)\n            }\n\n            seriesShape\n                .style('fill', setBGColor)\n                .style('stroke', function(d,i) { return d.color || color(d, i) });\n        });\n\n        function setTextColor(d,i) {\n            if(vers != 'furious') return '#000';\n            if(expanded) {\n                return d.disengaged ? color(d,i) : '#fff';\n            } else if (!expanded) {\n                return !!d.disabled ? color(d,i) : '#fff';\n            }\n        }\n\n        function setBGColor(d,i) {\n            if(expanded && vers == 'furious') {\n                return d.disengaged ? '#fff' : color(d,i);\n            } else {\n                return !!d.disabled ? '#fff' : color(d,i);\n            }\n        }\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:          {get: function(){return width;}, set: function(_){width=_;}},\n        height:         {get: function(){return height;}, set: function(_){height=_;}},\n        key:            {get: function(){return getKey;}, set: function(_){getKey=_;}},\n        keyFormatter:   {get: function(){return keyFormatter;}, set: function(_){keyFormatter=_;}},\n        align:          {get: function(){return align;}, set: function(_){align=_;}},\n        rightAlign:     {get: function(){return rightAlign;}, set: function(_){rightAlign=_;}},\n        maxKeyLength:   {get: function(){return maxKeyLength;}, set: function(_){maxKeyLength=_;}},\n        padding:        {get: function(){return padding;}, set: function(_){padding=_;}},\n        updateState:    {get: function(){return updateState;}, set: function(_){updateState=_;}},\n        radioButtonMode:{get: function(){return radioButtonMode;}, set: function(_){radioButtonMode=_;}},\n        expanded:       {get: function(){return expanded;}, set: function(_){expanded=_;}},\n        vers:           {get: function(){return vers;}, set: function(_){vers=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","//TODO: consider deprecating and using multibar with single series for this\nnv.models.historicalBar = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = null\n        , height = null\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , x = d3.scale.linear()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , forceX = []\n        , forceY = [0]\n        , padData = false\n        , clipEdge = true\n        , color = nv.utils.defaultColor()\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd')\n        , interactive = true\n        ;\n\n    var renderWatch = nv.utils.renderWatch(dispatch, 0);\n\n    function chart(selection) {\n        selection.each(function(data) {\n            renderWatch.reset();\n\n            container = d3.select(this);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            nv.utils.initSVG(container);\n\n            // Setup Scales\n            x.domain(xDomain || d3.extent(data[0].values.map(getX).concat(forceX) ));\n\n            if (padData)\n                x.range(xRange || [availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5)  / data[0].values.length ]);\n            else\n                x.range(xRange || [0, availableWidth]);\n\n            y.domain(yDomain || d3.extent(data[0].values.map(getY).concat(forceY) ))\n                .range(yRange || [availableHeight, 0]);\n\n            // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point\n            if (x.domain()[0] === x.domain()[1])\n                x.domain()[0] ?\n                    x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01])\n                    : x.domain([-1,1]);\n\n            if (y.domain()[0] === y.domain()[1])\n                y.domain()[0] ?\n                    y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01])\n                    : y.domain([-1,1]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-historicalBar-' + id).data([data[0].values]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-historicalBar-' + id);\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-bars');\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            container\n                .on('click', function(d,i) {\n                    dispatch.chartClick({\n                        data: d,\n                        index: i,\n                        pos: d3.event,\n                        id: id\n                    });\n                });\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-chart-clip-path-' + id)\n                .append('rect');\n\n            wrap.select('#nv-chart-clip-path-' + id + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            g.attr('clip-path', clipEdge ? 'url(#nv-chart-clip-path-' + id + ')' : '');\n\n            var bars = wrap.select('.nv-bars').selectAll('.nv-bar')\n                .data(function(d) { return d }, function(d,i) {return getX(d,i)});\n            bars.exit().remove();\n\n            bars.enter().append('rect')\n                .attr('x', 0 )\n                .attr('y', function(d,i) {  return nv.utils.NaNtoZero(y(Math.max(0, getY(d,i)))) })\n                .attr('height', function(d,i) { return nv.utils.NaNtoZero(Math.abs(y(getY(d,i)) - y(0))) })\n                .attr('transform', function(d,i) { return 'translate(' + (x(getX(d,i)) - availableWidth / data[0].values.length * .45) + ',0)'; })\n                .on('mouseover', function(d,i) {\n                    if (!interactive) return;\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n\n                })\n                .on('mouseout', function(d,i) {\n                    if (!interactive) return;\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    if (!interactive) return;\n                    dispatch.elementMousemove({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('click', function(d,i) {\n                    if (!interactive) return;\n                    var element = this;\n                    dispatch.elementClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\"),\n                        event: d3.event,\n                        element: element\n                    });\n                    d3.event.stopPropagation();\n                })\n                .on('dblclick', function(d,i) {\n                    if (!interactive) return;\n                    dispatch.elementDblClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                    d3.event.stopPropagation();\n                });\n\n            bars\n                .attr('fill', function(d,i) { return color(d, i); })\n                .attr('class', function(d,i,j) { return (getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive') + ' nv-bar-' + j + '-' + i })\n                .watchTransition(renderWatch, 'bars')\n                .attr('transform', function(d,i) { return 'translate(' + (x(getX(d,i)) - availableWidth / data[0].values.length * .45) + ',0)'; })\n                //TODO: better width calculations that don't assume always uniform data spacing;w\n                .attr('width', (availableWidth / data[0].values.length) * .9 );\n\n            bars.watchTransition(renderWatch, 'bars')\n                .attr('y', function(d,i) {\n                    var rval = getY(d,i) < 0 ?\n                        y(0) :\n                            y(0) - y(getY(d,i)) < 1 ?\n                        y(0) - 1 :\n                        y(getY(d,i));\n                    return nv.utils.NaNtoZero(rval);\n                })\n                .attr('height', function(d,i) { return nv.utils.NaNtoZero(Math.max(Math.abs(y(getY(d,i)) - y(0)),1)) });\n\n        });\n\n        renderWatch.renderEnd('historicalBar immediate');\n        return chart;\n    }\n\n    //Create methods to allow outside functions to highlight a specific bar.\n    chart.highlightPoint = function(pointIndex, isHoverOver) {\n        container\n            .select(\".nv-bars .nv-bar-0-\" + pointIndex)\n            .classed(\"hover\", isHoverOver)\n        ;\n    };\n\n    chart.clearHighlights = function() {\n        container\n            .select(\".nv-bars .nv-bar.hover\")\n            .classed(\"hover\", false)\n        ;\n    };\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:   {get: function(){return width;}, set: function(_){width=_;}},\n        height:  {get: function(){return height;}, set: function(_){height=_;}},\n        forceX:  {get: function(){return forceX;}, set: function(_){forceX=_;}},\n        forceY:  {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        padData: {get: function(){return padData;}, set: function(_){padData=_;}},\n        x:       {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:       {get: function(){return getY;}, set: function(_){getY=_;}},\n        xScale:  {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:  {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain: {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain: {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:  {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:  {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        clipEdge:    {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        id:          {get: function(){return id;}, set: function(_){id=_;}},\n        interactive: {get: function(){return interactive;}, set: function(_){interactive=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.historicalBarChart = function(bar_model) {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var bars = bar_model || nv.models.historicalBar()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , legend = nv.models.legend()\n        , interactiveLayer = nv.interactiveGuideline()\n        , tooltip = nv.models.tooltip()\n        ;\n\n\n    var margin = {top: 30, right: 90, bottom: 50, left: 90}\n        , marginTop = null\n        , color = nv.utils.defaultColor()\n        , width = null\n        , height = null\n        , showLegend = false\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , useInteractiveGuideline = false\n        , x\n        , y\n        , state = {}\n        , defaultState = null\n        , noData = null\n        , dispatch = d3.dispatch('tooltipHide', 'stateChange', 'changeState', 'renderEnd')\n        , transitionDuration = 250\n        ;\n\n    xAxis.orient('bottom').tickPadding(7);\n    yAxis.orient( (rightAlignYAxis) ? 'right' : 'left');\n    tooltip\n        .duration(0)\n        .headerEnabled(false)\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch, 0);\n\n    function chart(selection) {\n        selection.each(function(data) {\n            renderWatch.reset();\n            renderWatch.models(bars);\n            if (showXAxis) renderWatch.models(xAxis);\n            if (showYAxis) renderWatch.models(yAxis);\n\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() { container.transition().duration(transitionDuration).call(chart) };\n            chart.container = this;\n\n            //set state.disabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display noData message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = bars.xScale();\n            y = bars.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-historicalBarChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-historicalBarChart').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis');\n            gEnter.append('g').attr('class', 'nv-barsWrap');\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n            gEnter.append('g').attr('class', 'nv-interactive');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth);\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                wrap.select('.nv-legendWrap')\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n            }\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            //Set up interactive layer\n            if (useInteractiveGuideline) {\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left:margin.left, top:margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n            bars\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled }));\n\n            var barsWrap = g.select('.nv-barsWrap')\n                .datum(data.filter(function(d) { return !d.disabled }));\n            barsWrap.transition().call(bars);\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n\n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')');\n                g.select('.nv-x.nv-axis')\n                    .transition()\n                    .call(xAxis);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis')\n                    .transition()\n                    .call(yAxis);\n            }\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                bars.clearHighlights();\n\n                var singlePoint, pointIndex, pointXLocation, allData = [];\n                data\n                    .filter(function(series, i) {\n                        series.seriesIndex = i;\n                        return !series.disabled;\n                    })\n                    .forEach(function(series,i) {\n                        pointIndex = nv.interactiveBisect(series.values, e.pointXValue, chart.x());\n                        bars.highlightPoint(pointIndex,true);\n                        var point = series.values[pointIndex];\n                        if (point === undefined) return;\n                        if (singlePoint === undefined) singlePoint = point;\n                        if (pointXLocation === undefined) pointXLocation = chart.xScale()(chart.x()(point,pointIndex));\n                        allData.push({\n                            key: series.key,\n                            value: chart.y()(point, pointIndex),\n                            color: color(series,series.seriesIndex),\n                            data: series.values[pointIndex]\n                        });\n                    });\n\n                var xValue = xAxis.tickFormat()(chart.x()(singlePoint,pointIndex));\n                interactiveLayer.tooltip\n                    .valueFormatter(function(d,i) {\n                        return yAxis.tickFormat()(d);\n                    })\n                    .data({\n                        value: xValue,\n                        index: pointIndex,\n                        series: allData\n                    })();\n\n                interactiveLayer.renderGuideLine(pointXLocation);\n\n            });\n\n            interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                dispatch.tooltipHide();\n                bars.clearHighlights();\n            });\n\n            legend.dispatch.on('legendClick', function(d,i) {\n                d.disabled = !d.disabled;\n\n                if (!data.filter(function(d) { return !d.disabled }).length) {\n                    data.map(function(d) {\n                        d.disabled = false;\n                        wrap.selectAll('.nv-series').classed('disabled', false);\n                        return d;\n                    });\n                }\n\n                state.disabled = data.map(function(d) { return !!d.disabled });\n                dispatch.stateChange(state);\n\n                selection.transition().call(chart);\n            });\n\n            legend.dispatch.on('legendDblclick', function(d) {\n                //Double clicking should always enable current series, and disabled all others.\n                data.forEach(function(d) {\n                    d.disabled = true;\n                });\n                d.disabled = false;\n\n                state.disabled = data.map(function(d) { return !!d.disabled });\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n\n                    state.disabled = e.disabled;\n                }\n\n                chart.update();\n            });\n        });\n\n        renderWatch.renderEnd('historicalBarChart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    bars.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt['series'] = {\n            key: chart.x()(evt.data),\n            value: chart.y()(evt.data),\n            color: evt.color\n        };\n        tooltip.data(evt).hidden(false);\n    });\n\n    bars.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    bars.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.bars = bars;\n    chart.legend = legend;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.interactiveLayer = interactiveLayer;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        showXAxis: {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis: {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n            bars.color(color);\n        }},\n        duration:    {get: function(){return transitionDuration;}, set: function(_){\n            transitionDuration=_;\n            renderWatch.reset(transitionDuration);\n            yAxis.duration(transitionDuration);\n            xAxis.duration(transitionDuration);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( (_) ? 'right' : 'left');\n        }},\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = _;\n            if (_ === true) {\n                chart.interactive(false);\n            }\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, bars);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n\n\n// ohlcChart is just a historical chart with ohlc bars and some tweaks\nnv.models.ohlcBarChart = function() {\n    var chart = nv.models.historicalBarChart(nv.models.ohlcBar());\n\n    // special default tooltip since we show multiple values per x\n    chart.useInteractiveGuideline(true);\n    chart.interactiveLayer.tooltip.contentGenerator(function(data) {\n        // we assume only one series exists for this chart\n        var d = data.series[0].data;\n        // match line colors as defined in nv.d3.css\n        var color = d.open < d.close ? \"2ca02c\" : \"d62728\";\n        return '' +\n            '<h3 style=\"color: #' + color + '\">' + data.value + '</h3>' +\n            '<table>' +\n            '<tr><td>open:</td><td>' + chart.yAxis.tickFormat()(d.open) + '</td></tr>' +\n            '<tr><td>close:</td><td>' + chart.yAxis.tickFormat()(d.close) + '</td></tr>' +\n            '<tr><td>high</td><td>' + chart.yAxis.tickFormat()(d.high) + '</td></tr>' +\n            '<tr><td>low:</td><td>' + chart.yAxis.tickFormat()(d.low) + '</td></tr>' +\n            '</table>';\n    });\n    return chart;\n};\n\n// candlestickChart is just a historical chart with candlestick bars and some tweaks\nnv.models.candlestickBarChart = function() {\n    var chart = nv.models.historicalBarChart(nv.models.candlestickBar());\n\n    // special default tooltip since we show multiple values per x\n    chart.useInteractiveGuideline(true);\n    chart.interactiveLayer.tooltip.contentGenerator(function(data) {\n        // we assume only one series exists for this chart\n        var d = data.series[0].data;\n        // match line colors as defined in nv.d3.css\n        var color = d.open < d.close ? \"2ca02c\" : \"d62728\";\n        return '' +\n            '<h3 style=\"color: #' + color + '\">' + data.value + '</h3>' +\n            '<table>' +\n            '<tr><td>open:</td><td>' + chart.yAxis.tickFormat()(d.open) + '</td></tr>' +\n            '<tr><td>close:</td><td>' + chart.yAxis.tickFormat()(d.close) + '</td></tr>' +\n            '<tr><td>high</td><td>' + chart.yAxis.tickFormat()(d.high) + '</td></tr>' +\n            '<tr><td>low:</td><td>' + chart.yAxis.tickFormat()(d.low) + '</td></tr>' +\n            '</table>';\n    });\n    return chart;\n};\n","nv.models.legend = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 5, right: 0, bottom: 5, left: 0}\n        , width = 400\n        , height = 20\n        , getKey = function(d) { return d.key }\n        , keyFormatter = function (d) { return d }\n        , color = nv.utils.getColor()\n        , maxKeyLength = 20 //default value for key lengths\n        , align = true\n        , padding = 32 //define how much space between legend items. - recommend 32 for furious version\n        , rightAlign = true\n        , updateState = true   //If true, legend will update data.disabled and trigger a 'stateChange' dispatch.\n        , radioButtonMode = false   //If true, clicking legend items will cause it to behave like a radio button. (only one can be selected at a time)\n        , expanded = false\n        , dispatch = d3.dispatch('legendClick', 'legendDblclick', 'legendMouseover', 'legendMouseout', 'stateChange')\n        , vers = 'classic' //Options are \"classic\" and \"furious\"\n        ;\n\n    function chart(selection) {\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-legend').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-legend').append('g');\n            var g = wrap.select('g');\n\n            if (rightAlign)\n                wrap.attr('transform', 'translate(' + (- margin.right) + ',' + margin.top + ')');\n            else\n                wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var series = g.selectAll('.nv-series')\n                .data(function(d) {\n                    if(vers != 'furious') return d;\n\n                    return d.filter(function(n) {\n                        return expanded ? true : !n.disengaged;\n                    });\n                });\n\n            var seriesEnter = series.enter().append('g').attr('class', 'nv-series');\n            var seriesShape;\n\n            var versPadding;\n            switch(vers) {\n                case 'furious' :\n                    versPadding = 23;\n                    break;\n                case 'classic' :\n                    versPadding = 20;\n            }\n\n            if(vers == 'classic') {\n                seriesEnter.append('circle')\n                    .style('stroke-width', 2)\n                    .attr('class','nv-legend-symbol')\n                    .attr('r', 5);\n\n                seriesShape = series.select('.nv-legend-symbol');\n            } else if (vers == 'furious') {\n                seriesEnter.append('rect')\n                    .style('stroke-width', 2)\n                    .attr('class','nv-legend-symbol')\n                    .attr('rx', 3)\n                    .attr('ry', 3);\n                seriesShape = series.select('.nv-legend-symbol');\n\n                seriesEnter.append('g')\n                    .attr('class', 'nv-check-box')\n                    .property('innerHTML','<path d=\"M0.5,5 L22.5,5 L22.5,26.5 L0.5,26.5 L0.5,5 Z\" class=\"nv-box\"></path><path d=\"M5.5,12.8618467 L11.9185089,19.2803556 L31,0.198864511\" class=\"nv-check\"></path>')\n                    .attr('transform', 'translate(-10,-8)scale(0.5)');\n\n                var seriesCheckbox = series.select('.nv-check-box');\n\n                seriesCheckbox.each(function(d,i) {\n                    d3.select(this).selectAll('path')\n                        .attr('stroke', setTextColor(d,i));\n                });\n            }\n\n            seriesEnter.append('text')\n                .attr('text-anchor', 'start')\n                .attr('class','nv-legend-text')\n                .attr('dy', '.32em')\n                .attr('dx', '8');\n\n            var seriesText = series.select('text.nv-legend-text');\n\n            series\n                .on('mouseover', function(d,i) {\n                    dispatch.legendMouseover(d,i);  //TODO: Make consistent with other event objects\n                })\n                .on('mouseout', function(d,i) {\n                    dispatch.legendMouseout(d,i);\n                })\n                .on('click', function(d,i) {\n                    dispatch.legendClick(d,i);\n                    // make sure we re-get data in case it was modified\n                    var data = series.data();\n                    if (updateState) {\n                        if(vers =='classic') {\n                            if (radioButtonMode) {\n                                //Radio button mode: set every series to disabled,\n                                //  and enable the clicked series.\n                                data.forEach(function(series) { series.disabled = true});\n                                d.disabled = false;\n                            }\n                            else {\n                                d.disabled = !d.disabled;\n                                if (data.every(function(series) { return series.disabled})) {\n                                    //the default behavior of NVD3 legends is, if every single series\n                                    // is disabled, turn all series' back on.\n                                    data.forEach(function(series) { series.disabled = false});\n                                }\n                            }\n                        } else if(vers == 'furious') {\n                            if(expanded) {\n                                d.disengaged = !d.disengaged;\n                                d.userDisabled = d.userDisabled == undefined ? !!d.disabled : d.userDisabled;\n                                d.disabled = d.disengaged || d.userDisabled;\n                            } else if (!expanded) {\n                                d.disabled = !d.disabled;\n                                d.userDisabled = d.disabled;\n                                var engaged = data.filter(function(d) { return !d.disengaged; });\n                                if (engaged.every(function(series) { return series.userDisabled })) {\n                                    //the default behavior of NVD3 legends is, if every single series\n                                    // is disabled, turn all series' back on.\n                                    data.forEach(function(series) {\n                                        series.disabled = series.userDisabled = false;\n                                    });\n                                }\n                            }\n                        }\n                        dispatch.stateChange({\n                            disabled: data.map(function(d) { return !!d.disabled }),\n                            disengaged: data.map(function(d) { return !!d.disengaged })\n                        });\n\n                    }\n                })\n                .on('dblclick', function(d,i) {\n                    if(vers == 'furious' && expanded) return;\n                    dispatch.legendDblclick(d,i);\n                    if (updateState) {\n                        // make sure we re-get data in case it was modified\n                        var data = series.data();\n                        //the default behavior of NVD3 legends, when double clicking one,\n                        // is to set all other series' to false, and make the double clicked series enabled.\n                        data.forEach(function(series) {\n                            series.disabled = true;\n                            if(vers == 'furious') series.userDisabled = series.disabled;\n                        });\n                        d.disabled = false;\n                        if(vers == 'furious') d.userDisabled = d.disabled;\n                        dispatch.stateChange({\n                            disabled: data.map(function(d) { return !!d.disabled })\n                        });\n                    }\n                });\n\n            series.classed('nv-disabled', function(d) { return d.userDisabled });\n            series.exit().remove();\n\n            seriesText\n                .attr('fill', setTextColor)\n                .text(function (d) { return keyFormatter(getKey(d)) });\n\n            //TODO: implement fixed-width and max-width options (max-width is especially useful with the align option)\n            // NEW ALIGNING CODE, TODO: clean up\n            var legendWidth = 0;\n            if (align) {\n\n                var seriesWidths = [];\n                series.each(function(d,i) {\n                    var legendText;\n                    if (keyFormatter(getKey(d)) && keyFormatter(getKey(d)).length > maxKeyLength) {\n                        var trimmedKey = keyFormatter(getKey(d)).substring(0, maxKeyLength);\n                        legendText = d3.select(this).select('text').text(trimmedKey + \"...\");\n                        d3.select(this).append(\"svg:title\").text(keyFormatter(getKey(d)));\n                    } else {\n                        legendText = d3.select(this).select('text');\n                    }\n                    var nodeTextLength;\n                    try {\n                        nodeTextLength = legendText.node().getComputedTextLength();\n                        // If the legendText is display:none'd (nodeTextLength == 0), simulate an error so we approximate, instead\n                        if(nodeTextLength <= 0) throw Error();\n                    }\n                    catch(e) {\n                        nodeTextLength = nv.utils.calcApproxTextWidth(legendText);\n                    }\n\n                    seriesWidths.push(nodeTextLength + padding);\n                });\n\n                var seriesPerRow = 0;\n                var columnWidths = [];\n                legendWidth = 0;\n\n                while ( legendWidth < availableWidth && seriesPerRow < seriesWidths.length) {\n                    columnWidths[seriesPerRow] = seriesWidths[seriesPerRow];\n                    legendWidth += seriesWidths[seriesPerRow++];\n                }\n                if (seriesPerRow === 0) seriesPerRow = 1; //minimum of one series per row\n\n                while ( legendWidth > availableWidth && seriesPerRow > 1 ) {\n                    columnWidths = [];\n                    seriesPerRow--;\n\n                    for (var k = 0; k < seriesWidths.length; k++) {\n                        if (seriesWidths[k] > (columnWidths[k % seriesPerRow] || 0) )\n                            columnWidths[k % seriesPerRow] = seriesWidths[k];\n                    }\n\n                    legendWidth = columnWidths.reduce(function(prev, cur, index, array) {\n                        return prev + cur;\n                    });\n                }\n\n                var xPositions = [];\n                for (var i = 0, curX = 0; i < seriesPerRow; i++) {\n                    xPositions[i] = curX;\n                    curX += columnWidths[i];\n                }\n\n                series\n                    .attr('transform', function(d, i) {\n                        return 'translate(' + xPositions[i % seriesPerRow] + ',' + (5 + Math.floor(i / seriesPerRow) * versPadding) + ')';\n                    });\n\n                //position legend as far right as possible within the total width\n                if (rightAlign) {\n                    g.attr('transform', 'translate(' + (width - margin.right - legendWidth) + ',' + margin.top + ')');\n                }\n                else {\n                    g.attr('transform', 'translate(0' + ',' + margin.top + ')');\n                }\n\n                height = margin.top + margin.bottom + (Math.ceil(seriesWidths.length / seriesPerRow) * versPadding);\n\n            } else {\n\n                var ypos = 5,\n                    newxpos = 5,\n                    maxwidth = 0,\n                    xpos;\n                series\n                    .attr('transform', function(d, i) {\n                        var length = d3.select(this).select('text').node().getComputedTextLength() + padding;\n                        xpos = newxpos;\n\n                        if (width < margin.left + margin.right + xpos + length) {\n                            newxpos = xpos = 5;\n                            ypos += versPadding;\n                        }\n\n                        newxpos += length;\n                        if (newxpos > maxwidth) maxwidth = newxpos;\n\n                        if(legendWidth < xpos + maxwidth) {\n                            legendWidth = xpos + maxwidth;\n                        }\n                        return 'translate(' + xpos + ',' + ypos + ')';\n                    });\n\n                //position legend as far right as possible within the total width\n                g.attr('transform', 'translate(' + (width - margin.right - maxwidth) + ',' + margin.top + ')');\n\n                height = margin.top + margin.bottom + ypos + 15;\n            }\n\n            if(vers == 'furious') {\n                // Size rectangles after text is placed\n                seriesShape\n                    .attr('width', function(d,i) {\n                        return seriesText[0][i].getComputedTextLength() + 27;\n                    })\n                    .attr('height', 18)\n                    .attr('y', -9)\n                    .attr('x', -15);\n\n                // The background for the expanded legend (UI)\n                gEnter.insert('rect',':first-child')\n                    .attr('class', 'nv-legend-bg')\n                    .attr('fill', '#eee')\n                    // .attr('stroke', '#444')\n                    .attr('opacity',0);\n\n                var seriesBG = g.select('.nv-legend-bg');\n\n                seriesBG\n                .transition().duration(300)\n                    .attr('x', -versPadding )\n                    .attr('width', legendWidth + versPadding - 12)\n                    .attr('height', height + 10)\n                    .attr('y', -margin.top - 10)\n                    .attr('opacity', expanded ? 1 : 0);\n\n\n            }\n\n            seriesShape\n                .style('fill', setBGColor)\n                .style('fill-opacity', setBGOpacity)\n                .style('stroke', setBGColor);\n        });\n\n        function setTextColor(d,i) {\n            if(vers != 'furious') return '#000';\n            if(expanded) {\n                return d.disengaged ? '#000' : '#fff';\n            } else if (!expanded) {\n                if(!d.color) d.color = color(d,i);\n                return !!d.disabled ? d.color : '#fff';\n            }\n        }\n\n        function setBGColor(d,i) {\n            if(expanded && vers == 'furious') {\n                return d.disengaged ? '#eee' : d.color || color(d,i);\n            } else {\n                return d.color || color(d,i);\n            }\n        }\n\n\n        function setBGOpacity(d,i) {\n            if(expanded && vers == 'furious') {\n                return 1;\n            } else {\n                return !!d.disabled ? 0 : 1;\n            }\n        }\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:          {get: function(){return width;}, set: function(_){width=_;}},\n        height:         {get: function(){return height;}, set: function(_){height=_;}},\n        key:            {get: function(){return getKey;}, set: function(_){getKey=_;}},\n        keyFormatter:   {get: function(){return keyFormatter;}, set: function(_){keyFormatter=_;}},\n        align:          {get: function(){return align;}, set: function(_){align=_;}},\n        maxKeyLength:   {get: function(){return maxKeyLength;}, set: function(_){maxKeyLength=_;}},\n        rightAlign:     {get: function(){return rightAlign;}, set: function(_){rightAlign=_;}},\n        padding:        {get: function(){return padding;}, set: function(_){padding=_;}},\n        updateState:    {get: function(){return updateState;}, set: function(_){updateState=_;}},\n        radioButtonMode:{get: function(){return radioButtonMode;}, set: function(_){radioButtonMode=_;}},\n        expanded:       {get: function(){return expanded;}, set: function(_){expanded=_;}},\n        vers:           {get: function(){return vers;}, set: function(_){vers=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.line = function() {\n    \"use strict\";\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var  scatter = nv.models.scatter()\n        ;\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 960\n        , height = 500\n        , container = null\n        , strokeWidth = 1.5\n        , color = nv.utils.defaultColor() // a function that returns a color\n        , getX = function(d) { return d.x } // accessor to get the x value from a data point\n        , getY = function(d) { return d.y } // accessor to get the y value from a data point\n        , defined = function(d,i) { return !isNaN(getY(d,i)) && getY(d,i) !== null } // allows a line to be not continuous when it is not defined\n        , isArea = function(d) { return d.area } // decides if a line is an area or just a line\n        , clipEdge = false // if true, masks lines within x and y scale\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , interpolate = \"linear\" // controls the line interpolation\n        , duration = 250\n        , dispatch = d3.dispatch('elementClick', 'elementMouseover', 'elementMouseout', 'renderEnd')\n        ;\n\n    scatter\n        .pointSize(16) // default size\n        .pointDomain([16,256]) //set to speed up calculation, needs to be unset if there is a custom size accessor\n    ;\n\n    //============================================================\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0 //used to store previous scales\n        , renderWatch = nv.utils.renderWatch(dispatch, duration)\n        ;\n\n    //============================================================\n\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(scatter);\n        selection.each(function(data) {\n            container = d3.select(this);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n            nv.utils.initSVG(container);\n\n            // Setup Scales\n            x = scatter.xScale();\n            y = scatter.yScale();\n\n            x0 = x0 || x;\n            y0 = y0 || y;\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-line').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-line');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-groups');\n            gEnter.append('g').attr('class', 'nv-scatterWrap');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            scatter\n                .width(availableWidth)\n                .height(availableHeight);\n\n            var scatterWrap = wrap.select('.nv-scatterWrap');\n            scatterWrap.call(scatter);\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-edge-clip-' + scatter.id())\n                .append('rect');\n\n            wrap.select('#nv-edge-clip-' + scatter.id() + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', (availableHeight > 0) ? availableHeight : 0);\n\n            g   .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + scatter.id() + ')' : '');\n            scatterWrap\n                .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + scatter.id() + ')' : '');\n\n            var groups = wrap.select('.nv-groups').selectAll('.nv-group')\n                .data(function(d) { return d }, function(d) { return d.key });\n            groups.enter().append('g')\n                .style('stroke-opacity', 1e-6)\n                .style('stroke-width', function(d) { return d.strokeWidth || strokeWidth })\n                .style('fill-opacity', 1e-6);\n\n            groups.exit().remove();\n\n            groups\n                .attr('class', function(d,i) {\n                    return (d.classed || '') + ' nv-group nv-series-' + i;\n                })\n                .classed('hover', function(d) { return d.hover })\n                .style('fill', function(d,i){ return color(d, i) })\n                .style('stroke', function(d,i){ return color(d, i)});\n            groups.watchTransition(renderWatch, 'line: groups')\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', function(d) { return d.fillOpacity || .5});\n\n            var areaPaths = groups.selectAll('path.nv-area')\n                .data(function(d) { return isArea(d) ? [d] : [] }); // this is done differently than lines because I need to check if series is an area\n            areaPaths.enter().append('path')\n                .attr('class', 'nv-area')\n                .attr('d', function(d) {\n                    return d3.svg.area()\n                        .interpolate(interpolate)\n                        .defined(defined)\n                        .x(function(d,i) { return nv.utils.NaNtoZero(x0(getX(d,i))) })\n                        .y0(function(d,i) { return nv.utils.NaNtoZero(y0(getY(d,i))) })\n                        .y1(function(d,i) { return y0( y.domain()[0] <= 0 ? y.domain()[1] >= 0 ? 0 : y.domain()[1] : y.domain()[0] ) })\n                        //.y1(function(d,i) { return y0(0) }) //assuming 0 is within y domain.. may need to tweak this\n                        .apply(this, [d.values])\n                });\n            groups.exit().selectAll('path.nv-area')\n                .remove();\n\n            areaPaths.watchTransition(renderWatch, 'line: areaPaths')\n                .attr('d', function(d) {\n                    return d3.svg.area()\n                        .interpolate(interpolate)\n                        .defined(defined)\n                        .x(function(d,i) { return nv.utils.NaNtoZero(x(getX(d,i))) })\n                        .y0(function(d,i) { return nv.utils.NaNtoZero(y(getY(d,i))) })\n                        .y1(function(d,i) { return y( y.domain()[0] <= 0 ? y.domain()[1] >= 0 ? 0 : y.domain()[1] : y.domain()[0] ) })\n                        //.y1(function(d,i) { return y0(0) }) //assuming 0 is within y domain.. may need to tweak this\n                        .apply(this, [d.values])\n                });\n\n            var linePaths = groups.selectAll('path.nv-line')\n                .data(function(d) { return [d.values] });\n\n            linePaths.enter().append('path')\n                .attr('class', 'nv-line')\n                .attr('d',\n                    d3.svg.line()\n                    .interpolate(interpolate)\n                    .defined(defined)\n                    .x(function(d,i) { return nv.utils.NaNtoZero(x0(getX(d,i))) })\n                    .y(function(d,i) { return nv.utils.NaNtoZero(y0(getY(d,i))) })\n            );\n\n            linePaths.watchTransition(renderWatch, 'line: linePaths')\n                .attr('d',\n                    d3.svg.line()\n                    .interpolate(interpolate)\n                    .defined(defined)\n                    .x(function(d,i) { return nv.utils.NaNtoZero(x(getX(d,i))) })\n                    .y(function(d,i) { return nv.utils.NaNtoZero(y(getY(d,i))) })\n            );\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n        });\n        renderWatch.renderEnd('line immediate');\n        return chart;\n    }\n\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.scatter = scatter;\n    // Pass through events\n    scatter.dispatch.on('elementClick', function(){ dispatch.elementClick.apply(this, arguments); });\n    scatter.dispatch.on('elementMouseover', function(){ dispatch.elementMouseover.apply(this, arguments); });\n    scatter.dispatch.on('elementMouseout', function(){ dispatch.elementMouseout.apply(this, arguments); });\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        defined: {get: function(){return defined;}, set: function(_){defined=_;}},\n        interpolate:      {get: function(){return interpolate;}, set: function(_){interpolate=_;}},\n        clipEdge:    {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            scatter.duration(duration);\n        }},\n        isArea: {get: function(){return isArea;}, set: function(_){\n            isArea = d3.functor(_);\n        }},\n        x: {get: function(){return getX;}, set: function(_){\n            getX = _;\n            scatter.x(_);\n        }},\n        y: {get: function(){return getY;}, set: function(_){\n            getY = _;\n            scatter.y(_);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            scatter.color(color);\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, scatter);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","nv.models.lineChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var lines = nv.models.line()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , legend = nv.models.legend()\n        , interactiveLayer = nv.interactiveGuideline()\n        , tooltip = nv.models.tooltip()\n        , focus = nv.models.focus(nv.models.line())\n        ;\n\n    var margin = {top: 30, right: 20, bottom: 50, left: 60}\n        , marginTop = null\n        , color = nv.utils.defaultColor()\n        , width = null\n        , height = null\n        , showLegend = true\n        , legendPosition = 'top'\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , useInteractiveGuideline = false\n        , x\n        , y\n        , focusEnable = false\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState', 'renderEnd')\n        , duration = 250\n        ;\n\n    // set options on sub-objects for this chart\n    xAxis.orient('bottom').tickPadding(7);\n    yAxis.orient(rightAlignYAxis ? 'right' : 'left');\n\n    lines.clipEdge(true).duration(0);\n\n    tooltip.valueFormatter(function(d, i) {\n        return yAxis.tickFormat()(d, i);\n    }).headerFormatter(function(d, i) {\n        return xAxis.tickFormat()(d, i);\n    });\n\n    interactiveLayer.tooltip.valueFormatter(function(d, i) {\n        return yAxis.tickFormat()(d, i);\n    }).headerFormatter(function(d, i) {\n        return xAxis.tickFormat()(d, i);\n    });\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled; })\n            };\n        };\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        };\n    };\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(lines);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n            chart.update = function() {\n                if( duration === 0 ) {\n                    container.call( chart );\n                } else {\n                    container.transition().duration(duration).call(chart);\n                }\n            };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disabled\n            state.disabled = data.map(function(d) { return !!d.disabled; });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display noData message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length; }).length) {\n                nv.utils.noData(chart, container);\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            /* Update `main' graph on brush update. */\n            focus.dispatch.on(\"onBrush\", function(extent) {\n                onBrush(extent);\n            });\n\n            // Setup Scales\n            x = lines.xScale();\n            y = lines.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-lineChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-lineChart').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n\n            var focusEnter = gEnter.append('g').attr('class', 'nv-focus');\n            focusEnter.append('g').attr('class', 'nv-background').append('rect');\n            focusEnter.append('g').attr('class', 'nv-x nv-axis');\n            focusEnter.append('g').attr('class', 'nv-y nv-axis');\n            focusEnter.append('g').attr('class', 'nv-linesWrap');\n            focusEnter.append('g').attr('class', 'nv-interactive');\n\n            var contextEnter = gEnter.append('g').attr('class', 'nv-focusWrap');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth);\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (legendPosition === 'bottom') {\n                    wrap.select('.nv-legendWrap')\n                        .attr('transform', 'translate(0,' + availableHeight +')');\n                } else if (legendPosition === 'top') {\n                    if (!marginTop && legend.height() !== margin.top) {\n                        margin.top = legend.height();\n                        availableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n                    }\n\n                    wrap.select('.nv-legendWrap')\n                        .attr('transform', 'translate(0,' + (-margin.top) +')');\n                }\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            //Set up interactive layer\n            if (useInteractiveGuideline) {\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left:margin.left, top:margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n\n            g.select('.nv-focus .nv-background rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            lines\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled; }));\n\n            var linesWrap = g.select('.nv-linesWrap')\n                .datum(data.filter(function(d) { return !d.disabled; }));\n\n\n            // Setup Main (Focus) Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks(nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n            }\n\n            //============================================================\n            // Update Axes\n            //============================================================\n            function updateXAxis() {\n              if(showXAxis) {\n                g.select('.nv-focus .nv-x.nv-axis')\n                  .transition()\n                  .duration(duration)\n                  .call(xAxis)\n                ;\n              }\n            }\n\n            function updateYAxis() {\n              if(showYAxis) {\n                g.select('.nv-focus .nv-y.nv-axis')\n                  .transition()\n                  .duration(duration)\n                  .call(yAxis)\n                ;\n              }\n            }\n\n            g.select('.nv-focus .nv-x.nv-axis')\n                .attr('transform', 'translate(0,' + availableHeight + ')');\n\n            //============================================================\n            // Update Focus\n            //============================================================\n            if(!focusEnable) {\n                linesWrap.call(lines);\n                updateXAxis();\n                updateYAxis();\n            } else {\n                focus.width(availableWidth);\n                g.select('.nv-focusWrap')\n                    .attr('transform', 'translate(0,' + ( availableHeight + margin.bottom + focus.margin().top) + ')')\n                    .datum(data.filter(function(d) { return !d.disabled; }))\n                    .call(focus);\n                var extent = focus.brush.empty() ? focus.xDomain() : focus.brush.extent();\n                if(extent !== null){\n                    onBrush(extent);\n                }\n            }\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                lines.clearHighlights();\n                var singlePoint, pointIndex, pointXLocation, allData = [];\n                data\n                    .filter(function(series, i) {\n                        series.seriesIndex = i;\n                        return !series.disabled && !series.disableTooltip;\n                    })\n                    .forEach(function(series,i) {\n                        var extent = focusEnable ? (focus.brush.empty() ? focus.xScale().domain() : focus.brush.extent()) : x.domain();\n                        var currentValues = series.values.filter(function(d,i) {\n                            // Checks if the x point is between the extents, handling case where extent[0] is greater than extent[1]\n                            // (e.g. x domain is manually set to reverse the x-axis)\n                            if(extent[0] <= extent[1]) {\n                                return lines.x()(d,i) >= extent[0] && lines.x()(d,i) <= extent[1];\n                            } else {\n                                return lines.x()(d,i) >= extent[1] && lines.x()(d,i) <= extent[0];\n                            }\n                        });\n\n                        pointIndex = nv.interactiveBisect(currentValues, e.pointXValue, lines.x());\n                        var point = currentValues[pointIndex];\n                        var pointYValue = chart.y()(point, pointIndex);\n                        if (pointYValue !== null) {\n                            lines.highlightPoint(i, pointIndex, true);\n                        }\n                        if (point === undefined) return;\n                        if (singlePoint === undefined) singlePoint = point;\n                        if (pointXLocation === undefined) pointXLocation = chart.xScale()(chart.x()(point,pointIndex));\n                        allData.push({\n                            key: series.key,\n                            value: pointYValue,\n                            color: color(series,series.seriesIndex),\n                            data: point\n                        });\n                    });\n                //Highlight the tooltip entry based on which point the mouse is closest to.\n                if (allData.length > 2) {\n                    var yValue = chart.yScale().invert(e.mouseY);\n                    var domainExtent = Math.abs(chart.yScale().domain()[0] - chart.yScale().domain()[1]);\n                    var threshold = 0.03 * domainExtent;\n                    var indexToHighlight = nv.nearestValueIndex(allData.map(function(d){return d.value;}),yValue,threshold);\n                    if (indexToHighlight !== null)\n                        allData[indexToHighlight].highlight = true;\n                }\n\n                var defaultValueFormatter = function(d,i) {\n                    return d == null ? \"N/A\" : yAxis.tickFormat()(d);\n                };\n\n                interactiveLayer.tooltip\n                    .valueFormatter(interactiveLayer.tooltip.valueFormatter() || defaultValueFormatter)\n                    .data({\n                        value: chart.x()( singlePoint,pointIndex ),\n                        index: pointIndex,\n                        series: allData\n                    })();\n\n                interactiveLayer.renderGuideLine(pointXLocation);\n\n            });\n\n            interactiveLayer.dispatch.on('elementClick', function(e) {\n                var pointXLocation, allData = [];\n\n                data.filter(function(series, i) {\n                    series.seriesIndex = i;\n                    return !series.disabled;\n                }).forEach(function(series) {\n                    var pointIndex = nv.interactiveBisect(series.values, e.pointXValue, chart.x());\n                    var point = series.values[pointIndex];\n                    if (typeof point === 'undefined') return;\n                    if (typeof pointXLocation === 'undefined') pointXLocation = chart.xScale()(chart.x()(point,pointIndex));\n                    var yPos = chart.yScale()(chart.y()(point,pointIndex));\n                    allData.push({\n                        point: point,\n                        pointIndex: pointIndex,\n                        pos: [pointXLocation, yPos],\n                        seriesIndex: series.seriesIndex,\n                        series: series\n                    });\n                });\n\n                lines.dispatch.elementClick(allData);\n            });\n\n            interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                lines.clearHighlights();\n            });\n\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined' && data.length === e.disabled.length) {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n\n                    state.disabled = e.disabled;\n                }\n                chart.update();\n            });\n\n            //============================================================\n            // Functions\n            //------------------------------------------------------------\n\n            // Taken from crossfilter (http://square.github.com/crossfilter/)\n            function resizePath(d) {\n                var e = +(d == 'e'),\n                    x = e ? 1 : -1,\n                    y = availableHeight / 3;\n                return 'M' + (0.5 * x) + ',' + y\n                    + 'A6,6 0 0 ' + e + ' ' + (6.5 * x) + ',' + (y + 6)\n                    + 'V' + (2 * y - 6)\n                    + 'A6,6 0 0 ' + e + ' ' + (0.5 * x) + ',' + (2 * y)\n                    + 'Z'\n                    + 'M' + (2.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8)\n                    + 'M' + (4.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8);\n            }\n\n            function onBrush(extent) {\n                // Update Main (Focus)\n                var focusLinesWrap = g.select('.nv-focus .nv-linesWrap')\n                    .datum(\n                    data.filter(function(d) { return !d.disabled; })\n                        .map(function(d,i) {\n                            return {\n                                key: d.key,\n                                area: d.area,\n                                classed: d.classed,\n                                values: d.values.filter(function(d,i) {\n                                    return lines.x()(d,i) >= extent[0] && lines.x()(d,i) <= extent[1];\n                                }),\n                                disableTooltip: d.disableTooltip\n                            };\n                        })\n                );\n                focusLinesWrap.transition().duration(duration).call(lines);\n\n                // Update Main (Focus) Axes\n                updateXAxis();\n                updateYAxis();\n            }\n        });\n\n        renderWatch.renderEnd('lineChart immediate');\n        return chart;\n    }\n\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    lines.dispatch.on('elementMouseover.tooltip', function(evt) {\n        if(!evt.series.disableTooltip){\n            tooltip.data(evt).hidden(false);\n        }\n    });\n\n    lines.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.lines = lines;\n    chart.legend = legend;\n    chart.focus = focus;\n    chart.xAxis = xAxis;\n    chart.x2Axis = focus.xAxis\n    chart.yAxis = yAxis;\n    chart.y2Axis = focus.yAxis\n    chart.interactiveLayer = interactiveLayer;\n    chart.tooltip = tooltip;\n    chart.state = state;\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        legendPosition: {get: function(){return legendPosition;}, set: function(_){legendPosition=_;}},\n        showXAxis:      {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        // Focus options, mostly passed onto focus model.\n        focusEnable:    {get: function(){return focusEnable;}, set: function(_){focusEnable=_;}},\n        focusHeight:     {get: function(){return focus.height();}, set: function(_){focus.height(_);}},\n        focusShowAxisX:    {get: function(){return focus.showXAxis();}, set: function(_){focus.showXAxis(_);}},\n        focusShowAxisY:    {get: function(){return focus.showYAxis();}, set: function(_){focus.showYAxis(_);}},\n        brushExtent: {get: function(){return focus.brushExtent();}, set: function(_){focus.brushExtent(_);}},\n\n        // options that require extra logic in the setter\n        focusMargin: {get: function(){return focus.margin}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            focus.margin.right  = _.right  !== undefined ? _.right  : focus.margin.right;\n            focus.margin.bottom = _.bottom !== undefined ? _.bottom : focus.margin.bottom;\n            focus.margin.left   = _.left   !== undefined ? _.left   : focus.margin.left;\n        }},\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            lines.duration(duration);\n            focus.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n            lines.color(color);\n            focus.color(color);\n        }},\n        interpolate: {get: function(){return lines.interpolate();}, set: function(_){\n            lines.interpolate(_);\n            focus.interpolate(_);\n        }},\n        xTickFormat: {get: function(){return xAxis.tickFormat();}, set: function(_){\n            xAxis.tickFormat(_);\n            focus.xTickFormat(_);\n        }},\n        yTickFormat: {get: function(){return yAxis.tickFormat();}, set: function(_){\n            yAxis.tickFormat(_);\n            focus.yTickFormat(_);\n        }},\n        x: {get: function(){return lines.x();}, set: function(_){\n            lines.x(_);\n            focus.x(_);\n        }},\n        y: {get: function(){return lines.y();}, set: function(_){\n            lines.y(_);\n            focus.y(_);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( rightAlignYAxis ? 'right' : 'left');\n        }},\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = _;\n            if (useInteractiveGuideline) {\n                lines.interactive(false);\n                lines.useVoronoi(false);\n            }\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, lines);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n\nnv.models.lineWithFocusChart = function() {\n  return nv.models.lineChart()\n    .margin({ bottom: 30 })\n    .focusEnable( true );\n};\n","nv.models.linePlusBarChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var lines = nv.models.line()\n        , lines2 = nv.models.line()\n        , bars = nv.models.historicalBar()\n        , bars2 = nv.models.historicalBar()\n        , xAxis = nv.models.axis()\n        , x2Axis = nv.models.axis()\n        , y1Axis = nv.models.axis()\n        , y2Axis = nv.models.axis()\n        , y3Axis = nv.models.axis()\n        , y4Axis = nv.models.axis()\n        , legend = nv.models.legend()\n        , brush = d3.svg.brush()\n        , tooltip = nv.models.tooltip()\n        ;\n\n    var margin = {top: 30, right: 30, bottom: 30, left: 60}\n        , marginTop = null\n        , margin2 = {top: 0, right: 30, bottom: 20, left: 60}\n        , width = null\n        , height = null\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , color = nv.utils.defaultColor()\n        , showLegend = true\n        , focusEnable = true\n        , focusShowAxisY = false\n        , focusShowAxisX = true\n        , focusHeight = 50\n        , extent\n        , brushExtent = null\n        , x\n        , x2\n        , y1\n        , y2\n        , y3\n        , y4\n        , noData = null\n        , dispatch = d3.dispatch('brush', 'stateChange', 'changeState')\n        , transitionDuration = 0\n        , state = nv.utils.state()\n        , defaultState = null\n        , legendLeftAxisHint = ' (left axis)'\n        , legendRightAxisHint = ' (right axis)'\n        , switchYAxisOrder = false\n        ;\n\n    lines.clipEdge(true);\n    lines2.interactive(false);\n    // We don't want any points emitted for the focus chart's scatter graph.\n    lines2.pointActive(function(d) { return false });\n    xAxis.orient('bottom').tickPadding(5);\n    y1Axis.orient('left');\n    y2Axis.orient('right');\n    x2Axis.orient('bottom').tickPadding(5);\n    y3Axis.orient('left');\n    y4Axis.orient('right');\n\n    tooltip.headerEnabled(true).headerFormatter(function(d, i) {\n        return xAxis.tickFormat()(d, i);\n    });\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var getBarsAxis = function() {\n        return switchYAxisOrder\n            ? { main: y2Axis, focus: y4Axis }\n            : { main: y1Axis, focus: y3Axis }\n    }\n\n    var getLinesAxis = function() {\n        return switchYAxisOrder\n            ? { main: y1Axis, focus: y3Axis }\n            : { main: y2Axis, focus: y4Axis }\n    }\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled })\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    var allDisabled = function(data) {\n      return data.every(function(series) {\n        return series.disabled;\n      });\n    }\n\n    function chart(selection) {\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight1 = nv.utils.availableHeight(height, container, margin)\n                    - (focusEnable ? focusHeight : 0),\n                availableHeight2 = focusHeight - margin2.top - margin2.bottom;\n\n            chart.update = function() { container.transition().duration(transitionDuration).call(chart); };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disableddisabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            var dataBars = data.filter(function(d) { return !d.disabled && d.bar });\n            var dataLines = data.filter(function(d) { return !d.bar }); // removed the !d.disabled clause here to fix Issue #240\n\n            if (dataBars.length && !switchYAxisOrder) {\n                x = bars.xScale();\n            } else {\n                x = lines.xScale();\n            }\n\n            x2 = x2Axis.scale();\n\n            // select the scales and series based on the position of the yAxis\n            y1 = switchYAxisOrder ? lines.yScale() : bars.yScale();\n            y2 = switchYAxisOrder ? bars.yScale() : lines.yScale();\n            y3 = switchYAxisOrder ? lines2.yScale() : bars2.yScale();\n            y4 = switchYAxisOrder ? bars2.yScale() : lines2.yScale();\n\n            var series1 = data\n                .filter(function(d) { return !d.disabled && (switchYAxisOrder ? !d.bar : d.bar) })\n                .map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d,i), y: getY(d,i) }\n                    })\n                });\n\n            var series2 = data\n                .filter(function(d) { return !d.disabled && (switchYAxisOrder ? d.bar : !d.bar) })\n                .map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d,i), y: getY(d,i) }\n                    })\n                });\n\n            x.range([0, availableWidth]);\n\n            x2  .domain(d3.extent(d3.merge(series1.concat(series2)), function(d) { return d.x } ))\n                .range([0, availableWidth]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-linePlusBar').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-linePlusBar').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n\n            // this is the main chart\n            var focusEnter = gEnter.append('g').attr('class', 'nv-focus');\n            focusEnter.append('g').attr('class', 'nv-x nv-axis');\n            focusEnter.append('g').attr('class', 'nv-y1 nv-axis');\n            focusEnter.append('g').attr('class', 'nv-y2 nv-axis');\n            focusEnter.append('g').attr('class', 'nv-barsWrap');\n            focusEnter.append('g').attr('class', 'nv-linesWrap');\n\n            // context chart is where you can focus in\n            var contextEnter = gEnter.append('g').attr('class', 'nv-context');\n            contextEnter.append('g').attr('class', 'nv-x nv-axis');\n            contextEnter.append('g').attr('class', 'nv-y1 nv-axis');\n            contextEnter.append('g').attr('class', 'nv-y2 nv-axis');\n            contextEnter.append('g').attr('class', 'nv-barsWrap');\n            contextEnter.append('g').attr('class', 'nv-linesWrap');\n            contextEnter.append('g').attr('class', 'nv-brushBackground');\n            contextEnter.append('g').attr('class', 'nv-x nv-brush');\n\n            //============================================================\n            // Legend\n            //------------------------------------------------------------\n\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                var legendWidth = legend.align() ? availableWidth / 2 : availableWidth;\n                var legendXPosition = legend.align() ? legendWidth : 0;\n\n                legend.width(legendWidth);\n\n                g.select('.nv-legendWrap')\n                    .datum(data.map(function(series) {\n                        series.originalKey = series.originalKey === undefined ? series.key : series.originalKey;\n                        if(switchYAxisOrder) {\n                            series.key = series.originalKey + (series.bar ? legendRightAxisHint : legendLeftAxisHint);\n                        } else {\n                            series.key = series.originalKey + (series.bar ? legendLeftAxisHint : legendRightAxisHint);\n                        }\n                        return series;\n                    }))\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    // FIXME: shouldn't this be \"- (focusEnabled ? focusHeight : 0)\"?\n                    availableHeight1 = nv.utils.availableHeight(height, container, margin) - focusHeight;\n                }\n\n                g.select('.nv-legendWrap')\n                    .attr('transform', 'translate(' + legendXPosition + ',' + (-margin.top) +')');\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            //============================================================\n            // Context chart (focus chart) components\n            //------------------------------------------------------------\n\n            // hide or show the focus context chart\n            g.select('.nv-context').style('display', focusEnable ? 'initial' : 'none');\n\n            bars2\n                .width(availableWidth)\n                .height(availableHeight2)\n                .color(data.map(function (d, i) {\n                    return d.color || color(d, i);\n                }).filter(function (d, i) {\n                    return !data[i].disabled && data[i].bar\n                }));\n            lines2\n                .width(availableWidth)\n                .height(availableHeight2)\n                .color(data.map(function (d, i) {\n                    return d.color || color(d, i);\n                }).filter(function (d, i) {\n                    return !data[i].disabled && !data[i].bar\n                }));\n\n            var bars2Wrap = g.select('.nv-context .nv-barsWrap')\n                .datum(dataBars.length ? dataBars : [\n                    {values: []}\n                ]);\n            var lines2Wrap = g.select('.nv-context .nv-linesWrap')\n                .datum(allDisabled(dataLines) ?\n                       [{values: []}] :\n                       dataLines.filter(function(dataLine) {\n                         return !dataLine.disabled;\n                       }));\n\n            g.select('.nv-context')\n                .attr('transform', 'translate(0,' + ( availableHeight1 + margin.bottom + margin2.top) + ')');\n\n            bars2Wrap.transition().call(bars2);\n            lines2Wrap.transition().call(lines2);\n\n            // context (focus chart) axis controls\n            if (focusShowAxisX) {\n                x2Axis\n                    ._ticks( nv.utils.calcTicksX(availableWidth / 100, data))\n                    .tickSize(-availableHeight2, 0);\n                g.select('.nv-context .nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y3.range()[0] + ')');\n                g.select('.nv-context .nv-x.nv-axis').transition()\n                    .call(x2Axis);\n            }\n\n            if (focusShowAxisY) {\n                y3Axis\n                    .scale(y3)\n                    ._ticks( availableHeight2 / 36 )\n                    .tickSize( -availableWidth, 0);\n                y4Axis\n                    .scale(y4)\n                    ._ticks( availableHeight2 / 36 )\n                    .tickSize(dataBars.length ? 0 : -availableWidth, 0); // Show the y2 rules only if y1 has none\n\n                g.select('.nv-context .nv-y3.nv-axis')\n                    .style('opacity', dataBars.length ? 1 : 0)\n                    .attr('transform', 'translate(0,' + x2.range()[0] + ')');\n                g.select('.nv-context .nv-y2.nv-axis')\n                    .style('opacity', dataLines.length ? 1 : 0)\n                    .attr('transform', 'translate(' + x2.range()[1] + ',0)');\n\n                g.select('.nv-context .nv-y1.nv-axis').transition()\n                    .call(y3Axis);\n                g.select('.nv-context .nv-y2.nv-axis').transition()\n                    .call(y4Axis);\n            }\n\n            // Setup Brush\n            brush.x(x2).on('brush', onBrush);\n\n            if (brushExtent) brush.extent(brushExtent);\n\n            var brushBG = g.select('.nv-brushBackground').selectAll('g')\n                .data([brushExtent || brush.extent()]);\n\n            var brushBGenter = brushBG.enter()\n                .append('g');\n\n            brushBGenter.append('rect')\n                .attr('class', 'left')\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('height', availableHeight2);\n\n            brushBGenter.append('rect')\n                .attr('class', 'right')\n                .attr('x', 0)\n                .attr('y', 0)\n                .attr('height', availableHeight2);\n\n            var gBrush = g.select('.nv-x.nv-brush')\n                .call(brush);\n            gBrush.selectAll('rect')\n                //.attr('y', -5)\n                .attr('height', availableHeight2);\n            gBrush.selectAll('.resize').append('path').attr('d', resizePath);\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n                    state.disabled = e.disabled;\n                }\n                chart.update();\n            });\n\n            //============================================================\n            // Functions\n            //------------------------------------------------------------\n\n            // Taken from crossfilter (http://square.github.com/crossfilter/)\n            function resizePath(d) {\n                var e = +(d == 'e'),\n                    x = e ? 1 : -1,\n                    y = availableHeight2 / 3;\n                return 'M' + (.5 * x) + ',' + y\n                    + 'A6,6 0 0 ' + e + ' ' + (6.5 * x) + ',' + (y + 6)\n                    + 'V' + (2 * y - 6)\n                    + 'A6,6 0 0 ' + e + ' ' + (.5 * x) + ',' + (2 * y)\n                    + 'Z'\n                    + 'M' + (2.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8)\n                    + 'M' + (4.5 * x) + ',' + (y + 8)\n                    + 'V' + (2 * y - 8);\n            }\n\n\n            function updateBrushBG() {\n                if (!brush.empty()) brush.extent(brushExtent);\n                brushBG\n                    .data([brush.empty() ? x2.domain() : brushExtent])\n                    .each(function(d,i) {\n                        var leftWidth = x2(d[0]) - x2.range()[0],\n                            rightWidth = x2.range()[1] - x2(d[1]);\n                        d3.select(this).select('.left')\n                            .attr('width',  leftWidth < 0 ? 0 : leftWidth);\n\n                        d3.select(this).select('.right')\n                            .attr('x', x2(d[1]))\n                            .attr('width', rightWidth < 0 ? 0 : rightWidth);\n                    });\n            }\n\n            function onBrush() {\n                brushExtent = brush.empty() ? null : brush.extent();\n                extent = brush.empty() ? x2.domain() : brush.extent();\n                dispatch.brush({extent: extent, brush: brush});\n                updateBrushBG();\n\n                // Prepare Main (Focus) Bars and Lines\n                bars\n                    .width(availableWidth)\n                    .height(availableHeight1)\n                    .color(data.map(function(d,i) {\n                        return d.color || color(d, i);\n                    }).filter(function(d,i) { return !data[i].disabled && data[i].bar }));\n\n                lines\n                    .width(availableWidth)\n                    .height(availableHeight1)\n                    .color(data.map(function(d,i) {\n                        return d.color || color(d, i);\n                    }).filter(function(d,i) { return !data[i].disabled && !data[i].bar }));\n\n                var focusBarsWrap = g.select('.nv-focus .nv-barsWrap')\n                    .datum(!dataBars.length ? [{values:[]}] :\n                        dataBars\n                            .map(function(d,i) {\n                                return {\n                                    key: d.key,\n                                    values: d.values.filter(function(d,i) {\n                                        return bars.x()(d,i) >= extent[0] && bars.x()(d,i) <= extent[1];\n                                    })\n                                }\n                            })\n                );\n\n                var focusLinesWrap = g.select('.nv-focus .nv-linesWrap')\n                    .datum(allDisabled(dataLines) ? [{values:[]}] :\n                           dataLines\n                           .filter(function(dataLine) { return !dataLine.disabled; })\n                           .map(function(d,i) {\n                                return {\n                                    area: d.area,\n                                    fillOpacity: d.fillOpacity,\n                                    strokeWidth: d.strokeWidth,\n                                    key: d.key,\n                                    values: d.values.filter(function(d,i) {\n                                        return lines.x()(d,i) >= extent[0] && lines.x()(d,i) <= extent[1];\n                                    })\n                                }\n                            })\n                );\n\n                // Update Main (Focus) X Axis\n                if (dataBars.length && !switchYAxisOrder) {\n                    x = bars.xScale();\n                } else {\n                    x = lines.xScale();\n                }\n\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight1, 0);\n\n                xAxis.domain([Math.ceil(extent[0]), Math.floor(extent[1])]);\n\n                g.select('.nv-x.nv-axis').transition().duration(transitionDuration)\n                    .call(xAxis);\n\n                // Update Main (Focus) Bars and Lines\n                focusBarsWrap.transition().duration(transitionDuration).call(bars);\n                focusLinesWrap.transition().duration(transitionDuration).call(lines);\n\n                // Setup and Update Main (Focus) Y Axes\n                g.select('.nv-focus .nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y1.range()[0] + ')');\n\n                y1Axis\n                    .scale(y1)\n                    ._ticks( nv.utils.calcTicksY(availableHeight1/36, data) )\n                    .tickSize(-availableWidth, 0);\n                y2Axis\n                    .scale(y2)\n                    ._ticks( nv.utils.calcTicksY(availableHeight1/36, data) );\n\n                // Show the y2 rules only if y1 has none\n                if(!switchYAxisOrder) {\n                    y2Axis.tickSize(dataBars.length ? 0 : -availableWidth, 0);\n                } else {\n                    y2Axis.tickSize(dataLines.length ? 0 : -availableWidth, 0);\n                }\n\n                // Calculate opacity of the axis\n                var barsOpacity = dataBars.length ? 1 : 0;\n                var linesOpacity = dataLines.length && !allDisabled(dataLines) ? 1 : 0;\n\n                var y1Opacity = switchYAxisOrder ? linesOpacity : barsOpacity;\n                var y2Opacity = switchYAxisOrder ? barsOpacity : linesOpacity;\n\n                g.select('.nv-focus .nv-y1.nv-axis')\n                    .style('opacity', y1Opacity);\n                g.select('.nv-focus .nv-y2.nv-axis')\n                    .style('opacity', y2Opacity)\n                    .attr('transform', 'translate(' + x.range()[1] + ',0)');\n\n                g.select('.nv-focus .nv-y1.nv-axis').transition().duration(transitionDuration)\n                    .call(y1Axis);\n                g.select('.nv-focus .nv-y2.nv-axis').transition().duration(transitionDuration)\n                    .call(y2Axis);\n            }\n\n            onBrush();\n\n        });\n\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    lines.dispatch.on('elementMouseover.tooltip', function(evt) {\n        tooltip\n            .duration(100)\n            .valueFormatter(function(d, i) {\n                return getLinesAxis().main.tickFormat()(d, i);\n            })\n            .data(evt)\n            .hidden(false);\n    });\n\n    lines.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true)\n    });\n\n    bars.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt.value = chart.x()(evt.data);\n        evt['series'] = {\n            value: chart.y()(evt.data),\n            color: evt.color\n        };\n        tooltip\n            .duration(0)\n            .valueFormatter(function(d, i) {\n                return getBarsAxis().main.tickFormat()(d, i);\n            })\n            .data(evt)\n            .hidden(false);\n    });\n\n    bars.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    bars.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.legend = legend;\n    chart.lines = lines;\n    chart.lines2 = lines2;\n    chart.bars = bars;\n    chart.bars2 = bars2;\n    chart.xAxis = xAxis;\n    chart.x2Axis = x2Axis;\n    chart.y1Axis = y1Axis;\n    chart.y2Axis = y2Axis;\n    chart.y3Axis = y3Axis;\n    chart.y4Axis = y4Axis;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        brushExtent:    {get: function(){return brushExtent;}, set: function(_){brushExtent=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        focusEnable:    {get: function(){return focusEnable;}, set: function(_){focusEnable=_;}},\n        focusHeight:    {get: function(){return focusHeight;}, set: function(_){focusHeight=_;}},\n        focusShowAxisX:    {get: function(){return focusShowAxisX;}, set: function(_){focusShowAxisX=_;}},\n        focusShowAxisY:    {get: function(){return focusShowAxisY;}, set: function(_){focusShowAxisY=_;}},\n        legendLeftAxisHint:    {get: function(){return legendLeftAxisHint;}, set: function(_){legendLeftAxisHint=_;}},\n        legendRightAxisHint:    {get: function(){return legendRightAxisHint;}, set: function(_){legendRightAxisHint=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        focusMargin: {get: function(){return margin2;}, set: function(_){\n            margin2.top    = _.top    !== undefined ? _.top    : margin2.top;\n            margin2.right  = _.right  !== undefined ? _.right  : margin2.right;\n            margin2.bottom = _.bottom !== undefined ? _.bottom : margin2.bottom;\n            margin2.left   = _.left   !== undefined ? _.left   : margin2.left;\n        }},\n        duration: {get: function(){return transitionDuration;}, set: function(_){\n            transitionDuration = _;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n        }},\n        x: {get: function(){return getX;}, set: function(_){\n            getX = _;\n            lines.x(_);\n            lines2.x(_);\n            bars.x(_);\n            bars2.x(_);\n        }},\n        y: {get: function(){return getY;}, set: function(_){\n            getY = _;\n            lines.y(_);\n            lines2.y(_);\n            bars.y(_);\n            bars2.y(_);\n        }},\n        switchYAxisOrder:    {get: function(){return switchYAxisOrder;}, set: function(_){\n            // Switch the tick format for the yAxis\n            if(switchYAxisOrder !== _) {\n                var y1 = y1Axis;\n                y1Axis = y2Axis;\n                y2Axis = y1;\n\n                var y3 = y3Axis;\n                y3Axis = y4Axis;\n                y4Axis = y3;\n            }\n            switchYAxisOrder=_;\n\n            y1Axis.orient('left');\n            y2Axis.orient('right');\n            y3Axis.orient('left');\n            y4Axis.orient('right');\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, lines);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.multiBar = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 960\n        , height = 500\n        , x = d3.scale.ordinal()\n        , y = d3.scale.linear()\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove\n        , clipEdge = true\n        , stacked = false\n        , stackOffset = 'zero' // options include 'silhouette', 'wiggle', 'expand', 'zero', or a custom function\n        , color = nv.utils.defaultColor()\n        , hideable = false\n        , barColor = null // adding the ability to set the color for each rather than the whole group\n        , disabled // used in conjunction with barColor to communicate from multiBarHorizontalChart what series are disabled\n        , duration = 500\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , groupSpacing = 0.1\n        , fillOpacity = 0.75\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0 //used to store previous scales\n        , renderWatch = nv.utils.renderWatch(dispatch, duration)\n        ;\n\n    var last_datalength = 0;\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n            var nonStackableCount = 0;\n            // This function defines the requirements for render complete\n            var endFn = function(d, i) {\n                if (d.series === data.length - 1 && i === data[0].values.length - 1)\n                    return true;\n                return false;\n            };\n\n            if(hideable && data.length) hideable = [{\n                values: data[0].values.map(function(d) {\n                        return {\n                            x: d.x,\n                            y: 0,\n                            series: d.series,\n                            size: 0.01\n                        };}\n                )}];\n\n            if (stacked) {\n                var parsed = d3.layout.stack()\n                    .offset(stackOffset)\n                    .values(function(d){ return d.values })\n                    .y(getY)\n                (!data.length && hideable ? hideable : data);\n\n                parsed.forEach(function(series, i){\n                    // if series is non-stackable, use un-parsed data\n                    if (series.nonStackable) {\n                        data[i].nonStackableSeries = nonStackableCount++;\n                        parsed[i] = data[i];\n                    } else {\n                        // don't stack this seires on top of the nonStackable seriees\n                        if (i > 0 && parsed[i - 1].nonStackable){\n                            parsed[i].values.map(function(d,j){\n                                d.y0 -= parsed[i - 1].values[j].y;\n                                d.y1 = d.y0 + d.y;\n                            });\n                        }\n                    }\n                });\n                data = parsed;\n            }\n            //add series index and key to each data point for reference\n            data.forEach(function(series, i) {\n                series.values.forEach(function(point) {\n                    point.series = i;\n                    point.key = series.key;\n                });\n            });\n\n            // HACK for negative value stacking\n            if (stacked && data.length > 0) {\n                data[0].values.map(function(d,i) {\n                    var posBase = 0, negBase = 0;\n                    data.map(function(d, idx) {\n                        if (!data[idx].nonStackable) {\n                            var f = d.values[i]\n                            f.size = Math.abs(f.y);\n                            if (f.y<0)  {\n                                f.y1 = negBase;\n                                negBase = negBase - f.size;\n                            } else\n                            {\n                                f.y1 = f.size + posBase;\n                                posBase = posBase + f.size;\n                            }\n                        }\n\n                    });\n                });\n            }\n            // Setup Scales\n            // remap and flatten the data for use in calculating the scales' domains\n            var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate\n                data.map(function(d, idx) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d,i), y: getY(d,i), y0: d.y0, y1: d.y1, idx:idx }\n                    })\n                });\n\n            x.domain(xDomain || d3.merge(seriesData).map(function(d) { return d.x }))\n                .rangeBands(xRange || [0, availableWidth], groupSpacing);\n\n            y.domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) {\n                var domain = d.y;\n                // increase the domain range if this series is stackable\n                if (stacked && !data[d.idx].nonStackable) {\n                    if (d.y > 0){\n                        domain = d.y1\n                    } else {\n                        domain = d.y1 + d.y\n                    }\n                }\n                return domain;\n            }).concat(forceY)))\n            .range(yRange || [availableHeight, 0]);\n\n            // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point\n            if (x.domain()[0] === x.domain()[1])\n                x.domain()[0] ?\n                    x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01])\n                    : x.domain([-1,1]);\n\n            if (y.domain()[0] === y.domain()[1])\n                y.domain()[0] ?\n                    y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01])\n                    : y.domain([-1,1]);\n\n            x0 = x0 || x;\n            y0 = y0 || y;\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-multibar').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multibar');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-groups');\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-edge-clip-' + id)\n                .append('rect');\n            wrap.select('#nv-edge-clip-' + id + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            g.attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : '');\n\n            var groups = wrap.select('.nv-groups').selectAll('.nv-group')\n                .data(function(d) { return d }, function(d,i) { return i });\n            groups.enter().append('g')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6);\n\n            var exitTransition = renderWatch\n                .transition(groups.exit().selectAll('rect.nv-bar'), 'multibarExit', Math.min(100, duration))\n                .attr('y', function(d, i, j) {\n                    var yVal = y0(0) || 0;\n                    if (stacked) {\n                        if (data[d.series] && !data[d.series].nonStackable) {\n                            yVal = y0(d.y0);\n                        }\n                    }\n                    return yVal;\n                })\n                .attr('height', 0)\n                .remove();\n            if (exitTransition.delay)\n                exitTransition.delay(function(d,i) {\n                    var delay = i * (duration / (last_datalength + 1)) - i;\n                    return delay;\n                });\n            groups\n                .attr('class', function(d,i) { return 'nv-group nv-series-' + i })\n                .classed('hover', function(d) { return d.hover })\n                .style('fill', function(d,i){ return color(d, i) })\n                .style('stroke', function(d,i){ return color(d, i) });\n            groups\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', fillOpacity);\n\n            var bars = groups.selectAll('rect.nv-bar')\n                .data(function(d) { return (hideable && !data.length) ? hideable.values : d.values });\n            bars.exit().remove();\n\n            var barsEnter = bars.enter().append('rect')\n                    .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'})\n                    .attr('x', function(d,i,j) {\n                        return stacked && !data[j].nonStackable ? 0 : (j * x.rangeBand() / data.length )\n                    })\n                    .attr('y', function(d,i,j) { return y0(stacked && !data[j].nonStackable ? d.y0 : 0) || 0 })\n                    .attr('height', 0)\n                    .attr('width', function(d,i,j) { return x.rangeBand() / (stacked && !data[j].nonStackable ? 1 : data.length) })\n                    .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',0)'; })\n                ;\n            bars\n                .style('fill', function(d,i,j){ return color(d, j, i);  })\n                .style('stroke', function(d,i,j){ return color(d, j, i); })\n                .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    dispatch.elementMousemove({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('click', function(d,i) {\n                    var element = this;\n                    dispatch.elementClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\"),\n                        event: d3.event,\n                        element: element\n                    });\n                    d3.event.stopPropagation();\n                })\n                .on('dblclick', function(d,i) {\n                    dispatch.elementDblClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                    d3.event.stopPropagation();\n                });\n            bars\n                .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'})\n                .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',0)'; })\n\n            if (barColor) {\n                if (!disabled) disabled = data.map(function() { return true });\n                bars\n                    .style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(  disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i]  })[j]   ).toString(); })\n                    .style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(  disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i]  })[j]   ).toString(); });\n            }\n\n            var barSelection =\n                bars.watchTransition(renderWatch, 'multibar', Math.min(250, duration))\n                    .delay(function(d,i) {\n                        return i * duration / data[0].values.length;\n                    });\n            if (stacked){\n                barSelection\n                    .attr('y', function(d,i,j) {\n                        var yVal = 0;\n                        // if stackable, stack it on top of the previous series\n                        if (!data[j].nonStackable) {\n                            yVal = y(d.y1);\n                        } else {\n                            if (getY(d,i) < 0){\n                                yVal = y(0);\n                            } else {\n                                if (y(0) - y(getY(d,i)) < -1){\n                                    yVal = y(0) - 1;\n                                } else {\n                                    yVal = y(getY(d, i)) || 0;\n                                }\n                            }\n                        }\n                        return yVal;\n                    })\n                    .attr('height', function(d,i,j) {\n                        if (!data[j].nonStackable) {\n                            return Math.max(Math.abs(y(d.y+d.y0) - y(d.y0)), 0);\n                        } else {\n                            return Math.max(Math.abs(y(getY(d,i)) - y(0)), 0) || 0;\n                        }\n                    })\n                    .attr('x', function(d,i,j) {\n                        var width = 0;\n                        if (data[j].nonStackable) {\n                            width = d.series * x.rangeBand() / data.length;\n                            if (data.length !== nonStackableCount){\n                                width = data[j].nonStackableSeries * x.rangeBand()/(nonStackableCount*2);\n                            }\n                        }\n                        return width;\n                    })\n                    .attr('width', function(d,i,j){\n                        if (!data[j].nonStackable) {\n                            return x.rangeBand();\n                        } else {\n                            // if all series are nonStacable, take the full width\n                            var width = (x.rangeBand() / nonStackableCount);\n                            // otherwise, nonStackable graph will be only taking the half-width\n                            // of the x rangeBand\n                            if (data.length !== nonStackableCount) {\n                                width = x.rangeBand()/(nonStackableCount*2);\n                            }\n                            return width;\n                        }\n                    });\n            }\n            else {\n                barSelection\n                    .attr('x', function(d,i) {\n                        return d.series * x.rangeBand() / data.length;\n                    })\n                    .attr('width', x.rangeBand() / data.length)\n                    .attr('y', function(d,i) {\n                        return getY(d,i) < 0 ?\n                            y(0) :\n                                y(0) - y(getY(d,i)) < 1 ?\n                            y(0) - 1 :\n                            y(getY(d,i)) || 0;\n                    })\n                    .attr('height', function(d,i) {\n                        return Math.max(Math.abs(y(getY(d,i)) - y(0)),1) || 0;\n                    });\n            }\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n\n            // keep track of the last data value length for transition calculations\n            if (data[0] && data[0].values) {\n                last_datalength = data[0].values.length;\n            }\n\n        });\n\n        renderWatch.renderEnd('multibar immediate');\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:   {get: function(){return width;}, set: function(_){width=_;}},\n        height:  {get: function(){return height;}, set: function(_){height=_;}},\n        x:       {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:       {get: function(){return getY;}, set: function(_){getY=_;}},\n        xScale:  {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:  {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain: {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain: {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:  {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:  {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        forceY:  {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        stacked: {get: function(){return stacked;}, set: function(_){stacked=_;}},\n        stackOffset: {get: function(){return stackOffset;}, set: function(_){stackOffset=_;}},\n        clipEdge:    {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        disabled:    {get: function(){return disabled;}, set: function(_){disabled=_;}},\n        id:          {get: function(){return id;}, set: function(_){id=_;}},\n        hideable:    {get: function(){return hideable;}, set: function(_){hideable=_;}},\n        groupSpacing:{get: function(){return groupSpacing;}, set: function(_){groupSpacing=_;}},\n        fillOpacity: {get: function(){return fillOpacity;}, set: function(_){fillOpacity=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        barColor:  {get: function(){return barColor;}, set: function(_){\n            barColor = _ ? nv.utils.getColor(_) : null;\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","nv.models.multiBarChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var multibar = nv.models.multiBar()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , interactiveLayer = nv.interactiveGuideline()\n        , legend = nv.models.legend()\n        , controls = nv.models.legend()\n        , tooltip = nv.models.tooltip()\n        ;\n\n    var margin = {top: 30, right: 20, bottom: 50, left: 60}\n        , marginTop = null\n        , width = null\n        , height = null\n        , color = nv.utils.defaultColor()\n        , showControls = true\n        , controlLabels = {}\n        , showLegend = true\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , reduceXTicks = true // if false a tick will show for every data point\n        , staggerLabels = false\n        , wrapLabels = false\n        , rotateLabels = 0\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , dispatch = d3.dispatch('stateChange', 'changeState', 'renderEnd')\n        , controlWidth = function() { return showControls ? 180 : 0 }\n        , duration = 250\n        , useInteractiveGuideline = false\n        ;\n\n    state.stacked = false // DEPRECATED Maintained for backward compatibility\n\n    multibar.stacked(false);\n    xAxis\n        .orient('bottom')\n        .tickPadding(7)\n        .showMaxMin(false)\n        .tickFormat(function(d) { return d })\n    ;\n    yAxis\n        .orient((rightAlignYAxis) ? 'right' : 'left')\n        .tickFormat(d3.format(',.1f'))\n    ;\n\n    tooltip\n        .duration(0)\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    interactiveLayer.tooltip\n        .valueFormatter(function(d, i) {\n            return d == null ? \"N/A\" : yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    interactiveLayer.tooltip\n        .valueFormatter(function (d, i) {\n            return d == null ? \"N/A\" : yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function (d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    interactiveLayer.tooltip\n        .duration(0)\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    controls.updateState(false);\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n    var stacked = false;\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled }),\n                stacked: stacked\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.stacked !== undefined)\n                stacked = state.stacked;\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(multibar);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() {\n                if (duration === 0)\n                    container.call(chart);\n                else\n                    container.transition()\n                        .duration(duration)\n                        .call(chart);\n            };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disableddisabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display noData message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = multibar.xScale();\n            y = multibar.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-multiBarWithLegend').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multiBarWithLegend').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis');\n            gEnter.append('g').attr('class', 'nv-barsWrap');\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n            gEnter.append('g').attr('class', 'nv-controlsWrap');\n            gEnter.append('g').attr('class', 'nv-interactive');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth - controlWidth());\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                g.select('.nv-legendWrap')\n                    .attr('transform', 'translate(' + controlWidth() + ',' + (-margin.top) +')');\n            }\n\n            // Controls\n            if (!showControls) {\n                 g.select('.nv-controlsWrap').selectAll('*').remove();\n            } else {\n                var controlsData = [\n                    { key: controlLabels.grouped || 'Grouped', disabled: multibar.stacked() },\n                    { key: controlLabels.stacked || 'Stacked', disabled: !multibar.stacked() }\n                ];\n\n                controls.width(controlWidth()).color(['#444', '#444', '#444']);\n                g.select('.nv-controlsWrap')\n                    .datum(controlsData)\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n                    .call(controls);\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            // Main Chart Component(s)\n            multibar\n                .disabled(data.map(function(series) { return series.disabled }))\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled }));\n\n\n            var barsWrap = g.select('.nv-barsWrap')\n                .datum(data.filter(function(d) { return !d.disabled }));\n\n            barsWrap.call(multibar);\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize(-availableHeight, 0);\n\n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')');\n                g.select('.nv-x.nv-axis')\n                    .call(xAxis);\n\n                var xTicks = g.select('.nv-x.nv-axis > g').selectAll('g');\n\n                xTicks\n                    .selectAll('line, text')\n                    .style('opacity', 1)\n\n                if (staggerLabels) {\n                    var getTranslate = function(x,y) {\n                        return \"translate(\" + x + \",\" + y + \")\";\n                    };\n\n                    var staggerUp = 5, staggerDown = 17;  //pixels to stagger by\n                    // Issue #140\n                    xTicks\n                        .selectAll(\"text\")\n                        .attr('transform', function(d,i,j) {\n                            return  getTranslate(0, (j % 2 == 0 ? staggerUp : staggerDown));\n                        });\n\n                    var totalInBetweenTicks = d3.selectAll(\".nv-x.nv-axis .nv-wrap g g text\")[0].length;\n                    g.selectAll(\".nv-x.nv-axis .nv-axisMaxMin text\")\n                        .attr(\"transform\", function(d,i) {\n                            return getTranslate(0, (i === 0 || totalInBetweenTicks % 2 !== 0) ? staggerDown : staggerUp);\n                        });\n                }\n\n                if (wrapLabels) {\n                    g.selectAll('.tick text')\n                        .call(nv.utils.wrapTicks, chart.xAxis.rangeBand())\n                }\n\n                if (reduceXTicks)\n                    xTicks\n                        .filter(function(d,i) {\n                            return i % Math.ceil(data[0].values.length / (availableWidth / 100)) !== 0;\n                        })\n                        .selectAll('text, line')\n                        .style('opacity', 0);\n\n                if(rotateLabels)\n                    xTicks\n                        .selectAll('.tick text')\n                        .attr('transform', 'rotate(' + rotateLabels + ' 0,0)')\n                        .style('text-anchor', rotateLabels > 0 ? 'start' : 'end');\n\n                g.select('.nv-x.nv-axis').selectAll('g.nv-axisMaxMin text')\n                    .style('opacity', 1);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis')\n                    .call(yAxis);\n            }\n\n            //Set up interactive layer\n            if (useInteractiveGuideline) {\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left:margin.left, top:margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            controls.dispatch.on('legendClick', function(d,i) {\n                if (!d.disabled) return;\n                controlsData = controlsData.map(function(s) {\n                    s.disabled = true;\n                    return s;\n                });\n                d.disabled = false;\n\n                switch (d.key) {\n                    case 'Grouped':\n                    case controlLabels.grouped:\n                        multibar.stacked(false);\n                        break;\n                    case 'Stacked':\n                    case controlLabels.stacked:\n                        multibar.stacked(true);\n                        break;\n                }\n\n                state.stacked = multibar.stacked();\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n                    state.disabled = e.disabled;\n                }\n                if (typeof e.stacked !== 'undefined') {\n                    multibar.stacked(e.stacked);\n                    state.stacked = e.stacked;\n                    stacked = e.stacked;\n                }\n                chart.update();\n            });\n\n            if (useInteractiveGuideline) {\n                interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                    if (e.pointXValue == undefined) return;\n\n                    var singlePoint, pointIndex, pointXLocation, xValue, allData = [];\n                    data\n                        .filter(function(series, i) {\n                            series.seriesIndex = i;\n                            return !series.disabled;\n                        })\n                        .forEach(function(series,i) {\n                            pointIndex = x.domain().indexOf(e.pointXValue)\n\n                            var point = series.values[pointIndex];\n                            if (point === undefined) return;\n\n                            xValue = point.x;\n                            if (singlePoint === undefined) singlePoint = point;\n                            if (pointXLocation === undefined) pointXLocation = e.mouseX\n                            allData.push({\n                                key: series.key,\n                                value: chart.y()(point, pointIndex),\n                                color: color(series,series.seriesIndex),\n                                data: series.values[pointIndex]\n                            });\n                        });\n\n                    interactiveLayer.tooltip\n                        .data({\n                            value: xValue,\n                            index: pointIndex,\n                            series: allData\n                        })();\n\n                    interactiveLayer.renderGuideLine(pointXLocation);\n                });\n\n                interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                    interactiveLayer.tooltip.hidden(true);\n                });\n            }\n            else {\n                multibar.dispatch.on('elementMouseover.tooltip', function(evt) {\n                    evt.value = chart.x()(evt.data);\n                    evt['series'] = {\n                        key: evt.data.key,\n                        value: chart.y()(evt.data),\n                        color: evt.color\n                    };\n                    tooltip.data(evt).hidden(false);\n                });\n\n                multibar.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true);\n                });\n\n                multibar.dispatch.on('elementMousemove.tooltip', function(evt) {\n                    tooltip();\n                });\n            }\n        });\n\n        renderWatch.renderEnd('multibarchart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.multibar = multibar;\n    chart.legend = legend;\n    chart.controls = controls;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.state = state;\n    chart.tooltip = tooltip;\n    chart.interactiveLayer = interactiveLayer;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        showControls: {get: function(){return showControls;}, set: function(_){showControls=_;}},\n        controlLabels: {get: function(){return controlLabels;}, set: function(_){controlLabels=_;}},\n        showXAxis:      {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        reduceXTicks:    {get: function(){return reduceXTicks;}, set: function(_){reduceXTicks=_;}},\n        rotateLabels:    {get: function(){return rotateLabels;}, set: function(_){rotateLabels=_;}},\n        staggerLabels:    {get: function(){return staggerLabels;}, set: function(_){staggerLabels=_;}},\n        wrapLabels:   {get: function(){return wrapLabels;}, set: function(_){wrapLabels=!!_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            multibar.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n            renderWatch.reset(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( rightAlignYAxis ? 'right' : 'left');\n        }},\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = _;\n        }},\n        barColor:  {get: function(){return multibar.barColor;}, set: function(_){\n            multibar.barColor(_);\n            legend.color(function(d,i) {return d3.rgb('#ccc').darker(i * 1.5).toString();})\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, multibar);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.multiBarHorizontal = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 960\n        , height = 500\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , x = d3.scale.ordinal()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , getYerr = function(d) { return d.yErr }\n        , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove\n        , color = nv.utils.defaultColor()\n        , barColor = null // adding the ability to set the color for each rather than the whole group\n        , disabled // used in conjunction with barColor to communicate from multiBarHorizontalChart what series are disabled\n        , stacked = false\n        , showValues = false\n        , showBarLabels = false\n        , valuePadding = 60\n        , groupSpacing = 0.1\n        , fillOpacity = 0.75\n        , valueFormat = d3.format(',.2f')\n        , delay = 1200\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , duration = 250\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0; //used to store previous scales\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            if (stacked)\n                data = d3.layout.stack()\n                    .offset('zero')\n                    .values(function(d){ return d.values })\n                    .y(getY)\n                (data);\n\n            //add series index and key to each data point for reference\n            data.forEach(function(series, i) {\n                series.values.forEach(function(point) {\n                    point.series = i;\n                    point.key = series.key;\n                });\n            });\n\n            // HACK for negative value stacking\n            if (stacked)\n                data[0].values.map(function(d,i) {\n                    var posBase = 0, negBase = 0;\n                    data.map(function(d) {\n                        var f = d.values[i]\n                        f.size = Math.abs(f.y);\n                        if (f.y<0)  {\n                            f.y1 = negBase - f.size;\n                            negBase = negBase - f.size;\n                        } else\n                        {\n                            f.y1 = posBase;\n                            posBase = posBase + f.size;\n                        }\n                    });\n                });\n\n            // Setup Scales\n            // remap and flatten the data for use in calculating the scales' domains\n            var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate\n                data.map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d,i), y: getY(d,i), y0: d.y0, y1: d.y1 }\n                    })\n                });\n\n            x.domain(xDomain || d3.merge(seriesData).map(function(d) { return d.x }))\n                .rangeBands(xRange || [0, availableHeight], groupSpacing);\n\n            y.domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return stacked ? (d.y > 0 ? d.y1 + d.y : d.y1 ) : d.y }).concat(forceY)))\n\n            if (showValues && !stacked)\n                y.range(yRange || [(y.domain()[0] < 0 ? valuePadding : 0), availableWidth - (y.domain()[1] > 0 ? valuePadding : 0) ]);\n            else\n                y.range(yRange || [0, availableWidth]);\n\n            x0 = x0 || x;\n            y0 = y0 || d3.scale.linear().domain(y.domain()).range([y(0),y(0)]);\n\n            // Setup containers and skeleton of chart\n            var wrap = d3.select(this).selectAll('g.nv-wrap.nv-multibarHorizontal').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multibarHorizontal');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-groups');\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var groups = wrap.select('.nv-groups').selectAll('.nv-group')\n                .data(function(d) { return d }, function(d,i) { return i });\n            groups.enter().append('g')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6);\n            groups.exit().watchTransition(renderWatch, 'multibarhorizontal: exit groups')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6)\n                .remove();\n            groups\n                .attr('class', function(d,i) { return 'nv-group nv-series-' + i })\n                .classed('hover', function(d) { return d.hover })\n                .style('fill', function(d,i){ return color(d, i) })\n                .style('stroke', function(d,i){ return color(d, i) });\n            groups.watchTransition(renderWatch, 'multibarhorizontal: groups')\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', fillOpacity);\n\n            var bars = groups.selectAll('g.nv-bar')\n                .data(function(d) { return d.values });\n            bars.exit().remove();\n\n            var barsEnter = bars.enter().append('g')\n                .attr('transform', function(d,i,j) {\n                    return 'translate(' + y0(stacked ? d.y0 : 0) + ',' + (stacked ? 0 : (j * x.rangeBand() / data.length ) + x(getX(d,i))) + ')'\n                });\n\n            barsEnter.append('rect')\n                .attr('width', 0)\n                .attr('height', x.rangeBand() / (stacked ? 1 : data.length) )\n\n            bars\n                .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here\n                    d3.select(this).classed('hover', true);\n                    dispatch.elementMouseover({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.elementMouseout({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    dispatch.elementMouseout({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('mousemove', function(d,i) {\n                    dispatch.elementMousemove({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                })\n                .on('click', function(d,i) {\n                    var element = this;\n                    dispatch.elementClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\"),\n                        event: d3.event,\n                        element: element\n                    });\n                    d3.event.stopPropagation();\n                })\n                .on('dblclick', function(d,i) {\n                    dispatch.elementDblClick({\n                        data: d,\n                        index: i,\n                        color: d3.select(this).style(\"fill\")\n                    });\n                    d3.event.stopPropagation();\n                });\n\n            if (getYerr(data[0],0)) {\n                barsEnter.append('polyline');\n\n                bars.select('polyline')\n                    .attr('fill', 'none')\n                    .attr('points', function(d,i) {\n                        var xerr = getYerr(d,i)\n                            , mid = 0.8 * x.rangeBand() / ((stacked ? 1 : data.length) * 2);\n                        xerr = xerr.length ? xerr : [-Math.abs(xerr), Math.abs(xerr)];\n                        xerr = xerr.map(function(e) { return y(e) - y(0); });\n                        var a = [[xerr[0],-mid], [xerr[0],mid], [xerr[0],0], [xerr[1],0], [xerr[1],-mid], [xerr[1],mid]];\n                        return a.map(function (path) { return path.join(',') }).join(' ');\n                    })\n                    .attr('transform', function(d,i) {\n                        var mid = x.rangeBand() / ((stacked ? 1 : data.length) * 2);\n                        return 'translate(' + (getY(d,i) < 0 ? 0 : y(getY(d,i)) - y(0)) + ', ' + mid + ')'\n                    });\n            }\n\n            barsEnter.append('text');\n\n            if (showValues && !stacked) {\n                bars.select('text')\n                    .attr('text-anchor', function(d,i) { return getY(d,i) < 0 ? 'end' : 'start' })\n                    .attr('y', x.rangeBand() / (data.length * 2))\n                    .attr('dy', '.32em')\n                    .text(function(d,i) {\n                        var t = valueFormat(getY(d,i))\n                            , yerr = getYerr(d,i);\n                        if (yerr === undefined)\n                            return t;\n                        if (!yerr.length)\n                            return t + '±' + valueFormat(Math.abs(yerr));\n                        return t + '+' + valueFormat(Math.abs(yerr[1])) + '-' + valueFormat(Math.abs(yerr[0]));\n                    });\n                bars.watchTransition(renderWatch, 'multibarhorizontal: bars')\n                    .select('text')\n                    .attr('x', function(d,i) { return getY(d,i) < 0 ? -4 : y(getY(d,i)) - y(0) + 4 })\n            } else {\n                bars.selectAll('text').text('');\n            }\n\n            if (showBarLabels && !stacked) {\n                barsEnter.append('text').classed('nv-bar-label',true);\n                bars.select('text.nv-bar-label')\n                    .attr('text-anchor', function(d,i) { return getY(d,i) < 0 ? 'start' : 'end' })\n                    .attr('y', x.rangeBand() / (data.length * 2))\n                    .attr('dy', '.32em')\n                    .text(function(d,i) { return getX(d,i) });\n                bars.watchTransition(renderWatch, 'multibarhorizontal: bars')\n                    .select('text.nv-bar-label')\n                    .attr('x', function(d,i) { return getY(d,i) < 0 ? y(0) - y(getY(d,i)) + 4 : -4 });\n            }\n            else {\n                bars.selectAll('text.nv-bar-label').text('');\n            }\n\n            bars\n                .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'})\n\n            if (barColor) {\n                if (!disabled) disabled = data.map(function() { return true });\n                bars\n                    .style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(  disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i]  })[j]   ).toString(); })\n                    .style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(  disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i]  })[j]   ).toString(); });\n            }\n\n            if (stacked)\n                bars.watchTransition(renderWatch, 'multibarhorizontal: bars')\n                    .attr('transform', function(d,i) {\n                        return 'translate(' + y(d.y1) + ',' + x(getX(d,i)) + ')'\n                    })\n                    .select('rect')\n                    .attr('width', function(d,i) {\n                        return Math.abs(y(getY(d,i) + d.y0) - y(d.y0)) || 0\n                    })\n                    .attr('height', x.rangeBand() );\n            else\n                bars.watchTransition(renderWatch, 'multibarhorizontal: bars')\n                    .attr('transform', function(d,i) {\n                        //TODO: stacked must be all positive or all negative, not both?\n                        return 'translate(' +\n                            (getY(d,i) < 0 ? y(getY(d,i)) : y(0))\n                            + ',' +\n                            (d.series * x.rangeBand() / data.length\n                                +\n                                x(getX(d,i)) )\n                            + ')'\n                    })\n                    .select('rect')\n                    .attr('height', x.rangeBand() / data.length )\n                    .attr('width', function(d,i) {\n                        return Math.max(Math.abs(y(getY(d,i)) - y(0)),1) || 0\n                    });\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n\n        });\n\n        renderWatch.renderEnd('multibarHorizontal immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:   {get: function(){return width;}, set: function(_){width=_;}},\n        height:  {get: function(){return height;}, set: function(_){height=_;}},\n        x:       {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:       {get: function(){return getY;}, set: function(_){getY=_;}},\n        yErr:       {get: function(){return getYerr;}, set: function(_){getYerr=_;}},\n        xScale:  {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:  {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain: {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain: {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:  {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:  {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        forceY:  {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        stacked: {get: function(){return stacked;}, set: function(_){stacked=_;}},\n        showValues: {get: function(){return showValues;}, set: function(_){showValues=_;}},\n        // this shows the group name, seems pointless?\n        //showBarLabels:    {get: function(){return showBarLabels;}, set: function(_){showBarLabels=_;}},\n        disabled:     {get: function(){return disabled;}, set: function(_){disabled=_;}},\n        id:           {get: function(){return id;}, set: function(_){id=_;}},\n        valueFormat:  {get: function(){return valueFormat;}, set: function(_){valueFormat=_;}},\n        valuePadding: {get: function(){return valuePadding;}, set: function(_){valuePadding=_;}},\n        groupSpacing: {get: function(){return groupSpacing;}, set: function(_){groupSpacing=_;}},\n        fillOpacity:  {get: function(){return fillOpacity;}, set: function(_){fillOpacity=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        barColor:  {get: function(){return barColor;}, set: function(_){\n            barColor = _ ? nv.utils.getColor(_) : null;\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.multiBarHorizontalChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var multibar = nv.models.multiBarHorizontal()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , legend = nv.models.legend().height(30)\n        , controls = nv.models.legend().height(30)\n        , tooltip = nv.models.tooltip()\n        ;\n\n    var margin = {top: 30, right: 20, bottom: 50, left: 60}\n        , marginTop = null\n        , width = null\n        , height = null\n        , color = nv.utils.defaultColor()\n        , showControls = true\n        , controlLabels = {}\n        , showLegend = true\n        , showXAxis = true\n        , showYAxis = true\n        , stacked = false\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , dispatch = d3.dispatch('stateChange', 'changeState','renderEnd')\n        , controlWidth = function() { return showControls ? 180 : 0 }\n        , duration = 250\n        ;\n\n    state.stacked = false; // DEPRECATED Maintained for backward compatibility\n\n    multibar.stacked(stacked);\n\n    xAxis\n        .orient('left')\n        .tickPadding(5)\n        .showMaxMin(false)\n        .tickFormat(function(d) { return d })\n    ;\n    yAxis\n        .orient('bottom')\n        .tickFormat(d3.format(',.1f'))\n    ;\n\n    tooltip\n        .duration(0)\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        })\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        });\n\n    controls.updateState(false);\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled }),\n                stacked: stacked\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.stacked !== undefined)\n                stacked = state.stacked;\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(multibar);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() { container.transition().duration(duration).call(chart) };\n            chart.container = this;\n\n            stacked = multibar.stacked();\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disableddisabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = multibar.xScale();\n            y = multibar.yScale().clamp(true);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-multiBarHorizontalChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multiBarHorizontalChart').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis')\n                .append('g').attr('class', 'nv-zeroLine')\n                .append('line');\n            gEnter.append('g').attr('class', 'nv-barsWrap');\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n            gEnter.append('g').attr('class', 'nv-controlsWrap');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                legend.width(availableWidth - controlWidth());\n\n                g.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                g.select('.nv-legendWrap')\n                    .attr('transform', 'translate(' + controlWidth() + ',' + (-margin.top) +')');\n            }\n\n            // Controls\n            if (!showControls) {\n                 g.select('.nv-controlsWrap').selectAll('*').remove();\n            } else {\n                var controlsData = [\n                    { key: controlLabels.grouped || 'Grouped', disabled: multibar.stacked() },\n                    { key: controlLabels.stacked || 'Stacked', disabled: !multibar.stacked() }\n                ];\n\n                controls.width(controlWidth()).color(['#444', '#444', '#444']);\n                g.select('.nv-controlsWrap')\n                    .datum(controlsData)\n                    .attr('transform', 'translate(0,' + (-margin.top) +')')\n                    .call(controls);\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Main Chart Component(s)\n            multibar\n                .disabled(data.map(function(series) { return series.disabled }))\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled }));\n\n            var barsWrap = g.select('.nv-barsWrap')\n                .datum(data.filter(function(d) { return !d.disabled }));\n\n            barsWrap.transition().call(multibar);\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/24, data) )\n                    .tickSize(-availableWidth, 0);\n\n                g.select('.nv-x.nv-axis').call(xAxis);\n\n                var xTicks = g.select('.nv-x.nv-axis').selectAll('g');\n\n                xTicks\n                    .selectAll('line, text');\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize( -availableHeight, 0);\n\n                g.select('.nv-y.nv-axis')\n                    .attr('transform', 'translate(0,' + availableHeight + ')');\n                g.select('.nv-y.nv-axis').call(yAxis);\n            }\n\n            // Zero line\n            g.select(\".nv-zeroLine line\")\n                .attr(\"x1\", y(0))\n                .attr(\"x2\", y(0))\n                .attr(\"y1\", 0)\n                .attr(\"y2\", -availableHeight)\n            ;\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            controls.dispatch.on('legendClick', function(d,i) {\n                if (!d.disabled) return;\n                controlsData = controlsData.map(function(s) {\n                    s.disabled = true;\n                    return s;\n                });\n                d.disabled = false;\n\n                switch (d.key) {\n                    case 'Grouped':\n                    case controlLabels.grouped:\n                        multibar.stacked(false);\n                        break;\n                    case 'Stacked':\n                    case controlLabels.stacked:\n                        multibar.stacked(true);\n                        break;\n                }\n\n                state.stacked = multibar.stacked();\n                dispatch.stateChange(state);\n                stacked = multibar.stacked();\n\n                chart.update();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n\n                    state.disabled = e.disabled;\n                }\n\n                if (typeof e.stacked !== 'undefined') {\n                    multibar.stacked(e.stacked);\n                    state.stacked = e.stacked;\n                    stacked = e.stacked;\n                }\n\n                chart.update();\n            });\n        });\n        renderWatch.renderEnd('multibar horizontal chart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    multibar.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt.value = chart.x()(evt.data);\n        evt['series'] = {\n            key: evt.data.key,\n            value: chart.y()(evt.data),\n            color: evt.color\n        };\n        tooltip.data(evt).hidden(false);\n    });\n\n    multibar.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    multibar.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.multibar = multibar;\n    chart.legend = legend;\n    chart.controls = controls;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.state = state;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        showControls: {get: function(){return showControls;}, set: function(_){showControls=_;}},\n        controlLabels: {get: function(){return controlLabels;}, set: function(_){controlLabels=_;}},\n        showXAxis:      {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            multibar.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n        }},\n        barColor:  {get: function(){return multibar.barColor;}, set: function(_){\n            multibar.barColor(_);\n            legend.color(function(d,i) {return d3.rgb('#ccc').darker(i * 1.5).toString();})\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, multibar);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","nv.models.multiChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 30, right: 20, bottom: 50, left: 60},\n        marginTop = null,\n        color = nv.utils.defaultColor(),\n        width = null,\n        height = null,\n        showLegend = true,\n        noData = null,\n        yDomain1,\n        yDomain2,\n        getX = function(d) { return d.x },\n        getY = function(d) { return d.y},\n        interpolate = 'linear',\n        useVoronoi = true,\n        interactiveLayer = nv.interactiveGuideline(),\n        useInteractiveGuideline = false,\n        legendRightAxisHint = ' (right axis)',\n        duration = 250\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x = d3.scale.linear(),\n        yScale1 = d3.scale.linear(),\n        yScale2 = d3.scale.linear(),\n\n        lines1 = nv.models.line().yScale(yScale1).duration(duration),\n        lines2 = nv.models.line().yScale(yScale2).duration(duration),\n\n        scatters1 = nv.models.scatter().yScale(yScale1).duration(duration),\n        scatters2 = nv.models.scatter().yScale(yScale2).duration(duration),\n\n        bars1 = nv.models.multiBar().stacked(false).yScale(yScale1).duration(duration),\n        bars2 = nv.models.multiBar().stacked(false).yScale(yScale2).duration(duration),\n\n        stack1 = nv.models.stackedArea().yScale(yScale1).duration(duration),\n        stack2 = nv.models.stackedArea().yScale(yScale2).duration(duration),\n\n        xAxis = nv.models.axis().scale(x).orient('bottom').tickPadding(5).duration(duration),\n        yAxis1 = nv.models.axis().scale(yScale1).orient('left').duration(duration),\n        yAxis2 = nv.models.axis().scale(yScale2).orient('right').duration(duration),\n\n        legend = nv.models.legend().height(30),\n        tooltip = nv.models.tooltip(),\n        dispatch = d3.dispatch();\n\n    var charts = [lines1, lines2, scatters1, scatters2, bars1, bars2, stack1, stack2];\n\n    function chart(selection) {\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n\n            chart.update = function() { container.transition().call(chart); };\n            chart.container = this;\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            var dataLines1 = data.filter(function(d) {return d.type == 'line' && d.yAxis == 1});\n            var dataLines2 = data.filter(function(d) {return d.type == 'line' && d.yAxis == 2});\n            var dataScatters1 = data.filter(function(d) {return d.type == 'scatter' && d.yAxis == 1});\n            var dataScatters2 = data.filter(function(d) {return d.type == 'scatter' && d.yAxis == 2});\n            var dataBars1 =  data.filter(function(d) {return d.type == 'bar'  && d.yAxis == 1});\n            var dataBars2 =  data.filter(function(d) {return d.type == 'bar'  && d.yAxis == 2});\n            var dataStack1 = data.filter(function(d) {return d.type == 'area' && d.yAxis == 1});\n            var dataStack2 = data.filter(function(d) {return d.type == 'area' && d.yAxis == 2});\n\n            // Display noData message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container);\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            var series1 = data.filter(function(d) {return !d.disabled && d.yAxis == 1})\n                .map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d), y: getY(d) }\n                    })\n                });\n\n            var series2 = data.filter(function(d) {return !d.disabled && d.yAxis == 2})\n                .map(function(d) {\n                    return d.values.map(function(d,i) {\n                        return { x: getX(d), y: getY(d) }\n                    })\n                });\n\n            x   .domain(d3.extent(d3.merge(series1.concat(series2)), function(d) { return d.x }))\n                .range([0, availableWidth]);\n\n            var wrap = container.selectAll('g.wrap.multiChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'wrap nvd3 multiChart').append('g');\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y1 nv-axis');\n            gEnter.append('g').attr('class', 'nv-y2 nv-axis');\n            gEnter.append('g').attr('class', 'stack1Wrap');\n            gEnter.append('g').attr('class', 'stack2Wrap');\n            gEnter.append('g').attr('class', 'bars1Wrap');\n            gEnter.append('g').attr('class', 'bars2Wrap');\n            gEnter.append('g').attr('class', 'scatters1Wrap');\n            gEnter.append('g').attr('class', 'scatters2Wrap');\n            gEnter.append('g').attr('class', 'lines1Wrap');\n            gEnter.append('g').attr('class', 'lines2Wrap');\n            gEnter.append('g').attr('class', 'legendWrap');\n            gEnter.append('g').attr('class', 'nv-interactive');\n\n            var g = wrap.select('g');\n\n            var color_array = data.map(function(d,i) {\n                return data[i].color || color(d, i);\n            });\n\n            // Legend\n            if (!showLegend) {\n                g.select('.legendWrap').selectAll('*').remove();\n            } else {\n                var legendWidth = legend.align() ? availableWidth / 2 : availableWidth;\n                var legendXPosition = legend.align() ? legendWidth : 0;\n\n                legend.width(legendWidth);\n                legend.color(color_array);\n\n                g.select('.legendWrap')\n                    .datum(data.map(function(series) {\n                        series.originalKey = series.originalKey === undefined ? series.key : series.originalKey;\n                        series.key = series.originalKey + (series.yAxis == 1 ? '' : legendRightAxisHint);\n                        return series;\n                    }))\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                g.select('.legendWrap')\n                    .attr('transform', 'translate(' + legendXPosition + ',' + (-margin.top) +')');\n            }\n\n            lines1\n                .width(availableWidth)\n                .height(availableHeight)\n                .interpolate(interpolate)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'line'}));\n            lines2\n                .width(availableWidth)\n                .height(availableHeight)\n                .interpolate(interpolate)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'line'}));\n            scatters1\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'scatter'}));\n            scatters2\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'scatter'}));\n            bars1\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'bar'}));\n            bars2\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'bar'}));\n            stack1\n                .width(availableWidth)\n                .height(availableHeight)\n                .interpolate(interpolate)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'area'}));\n            stack2\n                .width(availableWidth)\n                .height(availableHeight)\n                .interpolate(interpolate)\n                .color(color_array.filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'area'}));\n\n            g.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            var lines1Wrap = g.select('.lines1Wrap')\n                .datum(dataLines1.filter(function(d){return !d.disabled}));\n            var scatters1Wrap = g.select('.scatters1Wrap')\n                .datum(dataScatters1.filter(function(d){return !d.disabled}));\n            var bars1Wrap = g.select('.bars1Wrap')\n                .datum(dataBars1.filter(function(d){return !d.disabled}));\n            var stack1Wrap = g.select('.stack1Wrap')\n                .datum(dataStack1.filter(function(d){return !d.disabled}));\n            var lines2Wrap = g.select('.lines2Wrap')\n                .datum(dataLines2.filter(function(d){return !d.disabled}));\n            var scatters2Wrap = g.select('.scatters2Wrap')\n                .datum(dataScatters2.filter(function(d){return !d.disabled}));\n            var bars2Wrap = g.select('.bars2Wrap')\n                .datum(dataBars2.filter(function(d){return !d.disabled}));\n            var stack2Wrap = g.select('.stack2Wrap')\n                .datum(dataStack2.filter(function(d){return !d.disabled}));\n\n            var extraValue1 = dataStack1.length ? dataStack1.map(function(a){return a.values}).reduce(function(a,b){\n                return a.map(function(aVal,i){return {x: aVal.x, y: aVal.y + b[i].y}})\n            }).concat([{x:0, y:0}]) : [];\n            var extraValue2 = dataStack2.length ? dataStack2.map(function(a){return a.values}).reduce(function(a,b){\n                return a.map(function(aVal,i){return {x: aVal.x, y: aVal.y + b[i].y}})\n            }).concat([{x:0, y:0}]) : [];\n\n            yScale1 .domain(yDomain1 || d3.extent(d3.merge(series1).concat(extraValue1), function(d) { return d.y } ))\n                .range([0, availableHeight]);\n\n            yScale2 .domain(yDomain2 || d3.extent(d3.merge(series2).concat(extraValue2), function(d) { return d.y } ))\n                .range([0, availableHeight]);\n\n            lines1.yDomain(yScale1.domain());\n            scatters1.yDomain(yScale1.domain());\n            bars1.yDomain(yScale1.domain());\n            stack1.yDomain(yScale1.domain());\n\n            lines2.yDomain(yScale2.domain());\n            scatters2.yDomain(yScale2.domain());\n            bars2.yDomain(yScale2.domain());\n            stack2.yDomain(yScale2.domain());\n\n            if(dataStack1.length){d3.transition(stack1Wrap).call(stack1);}\n            if(dataStack2.length){d3.transition(stack2Wrap).call(stack2);}\n\n            if(dataBars1.length){d3.transition(bars1Wrap).call(bars1);}\n            if(dataBars2.length){d3.transition(bars2Wrap).call(bars2);}\n\n            if(dataLines1.length){d3.transition(lines1Wrap).call(lines1);}\n            if(dataLines2.length){d3.transition(lines2Wrap).call(lines2);}\n\n            if(dataScatters1.length){d3.transition(scatters1Wrap).call(scatters1);}\n            if(dataScatters2.length){d3.transition(scatters2Wrap).call(scatters2);}\n\n            xAxis\n                ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                .tickSize(-availableHeight, 0);\n\n            g.select('.nv-x.nv-axis')\n                .attr('transform', 'translate(0,' + availableHeight + ')');\n            d3.transition(g.select('.nv-x.nv-axis'))\n                .call(xAxis);\n\n            yAxis1\n                ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                .tickSize( -availableWidth, 0);\n\n\n            d3.transition(g.select('.nv-y1.nv-axis'))\n                .call(yAxis1);\n\n            yAxis2\n                ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                .tickSize( -availableWidth, 0);\n\n            d3.transition(g.select('.nv-y2.nv-axis'))\n                .call(yAxis2);\n\n            g.select('.nv-y1.nv-axis')\n                .classed('nv-disabled', series1.length ? false : true)\n                .attr('transform', 'translate(' + x.range()[0] + ',0)');\n\n            g.select('.nv-y2.nv-axis')\n                .classed('nv-disabled', series2.length ? false : true)\n                .attr('transform', 'translate(' + x.range()[1] + ',0)');\n\n            legend.dispatch.on('stateChange', function(newState) {\n                chart.update();\n            });\n\n            if(useInteractiveGuideline){\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left:margin.left, top:margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n\n            //============================================================\n            // Event Handling/Dispatching\n            //------------------------------------------------------------\n\n            function mouseover_line(evt) {\n                var yaxis = data[evt.seriesIndex].yAxis === 2 ? yAxis2 : yAxis1;\n                evt.value = evt.point.x;\n                evt.series = {\n                    value: evt.point.y,\n                    color: evt.point.color,\n                    key: evt.series.key\n                };\n                tooltip\n                    .duration(0)\n                    .headerFormatter(function(d, i) {\n                    \treturn xAxis.tickFormat()(d, i);\n                    })\n                    .valueFormatter(function(d, i) {\n                        return yaxis.tickFormat()(d, i);\n                    })\n                    .data(evt)\n                    .hidden(false);\n            }\n\n            function mouseover_scatter(evt) {\n                var yaxis = data[evt.seriesIndex].yAxis === 2 ? yAxis2 : yAxis1;\n                evt.value = evt.point.x;\n                evt.series = {\n                    value: evt.point.y,\n                    color: evt.point.color,\n                    key: evt.series.key\n                };\n                tooltip\n                    .duration(100)\n                    .headerFormatter(function(d, i) {\n                    \treturn xAxis.tickFormat()(d, i);\n                    })\n                    .valueFormatter(function(d, i) {\n                        return yaxis.tickFormat()(d, i);\n                    })\n                    .data(evt)\n                    .hidden(false);\n            }\n\n            function mouseover_stack(evt) {\n                var yaxis = data[evt.seriesIndex].yAxis === 2 ? yAxis2 : yAxis1;\n                evt.point['x'] = stack1.x()(evt.point);\n                evt.point['y'] = stack1.y()(evt.point);\n                tooltip\n                    .duration(0)\n                    .headerFormatter(function(d, i) {\n                    \treturn xAxis.tickFormat()(d, i);\n                    })\n                    .valueFormatter(function(d, i) {\n                        return yaxis.tickFormat()(d, i);\n                    })\n                    .data(evt)\n                    .hidden(false);\n            }\n\n            function mouseover_bar(evt) {\n                var yaxis = data[evt.data.series].yAxis === 2 ? yAxis2 : yAxis1;\n\n                evt.value = bars1.x()(evt.data);\n                evt['series'] = {\n                    value: bars1.y()(evt.data),\n                    color: evt.color,\n                    key: evt.data.key\n                };\n                tooltip\n                    .duration(0)\n                    .headerFormatter(function(d, i) {\n                    \treturn xAxis.tickFormat()(d, i);\n                    })\n                    .valueFormatter(function(d, i) {\n                        return yaxis.tickFormat()(d, i);\n                    })\n                    .data(evt)\n                    .hidden(false);\n            }\n\n\n\n            function clearHighlights() {\n              for(var i=0, il=charts.length; i < il; i++){\n                var chart = charts[i];\n                try {\n                  chart.clearHighlights();\n                } catch(e){}\n              }\n            }\n\n            function highlightPoint(serieIndex, pointIndex, b){\n              for(var i=0, il=charts.length; i < il; i++){\n                var chart = charts[i];\n                try {\n                  chart.highlightPoint(serieIndex, pointIndex, b);\n                } catch(e){}\n              }\n            }\n\n            if(useInteractiveGuideline){\n                interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                    clearHighlights();\n                    var singlePoint, pointIndex, pointXLocation, allData = [];\n                    data\n                    .filter(function(series, i) {\n                        series.seriesIndex = i;\n                        return !series.disabled;\n                    })\n                    .forEach(function(series,i) {\n                        var extent = x.domain();\n                        var currentValues = series.values.filter(function(d,i) {\n                            return chart.x()(d,i) >= extent[0] && chart.x()(d,i) <= extent[1];\n                        });\n\n                        pointIndex = nv.interactiveBisect(currentValues, e.pointXValue, chart.x());\n                        var point = currentValues[pointIndex];\n                        var pointYValue = chart.y()(point, pointIndex);\n                        if (pointYValue !== null) {\n                            highlightPoint(i, pointIndex, true);\n                        }\n                        if (point === undefined) return;\n                        if (singlePoint === undefined) singlePoint = point;\n                        if (pointXLocation === undefined) pointXLocation = x(chart.x()(point,pointIndex));\n                        allData.push({\n                            key: series.key,\n                            value: pointYValue,\n                            color: color(series,series.seriesIndex),\n                            data: point,\n                            yAxis: series.yAxis == 2 ? yAxis2 : yAxis1\n                        });\n                    });\n\n                    var defaultValueFormatter = function(d,i) {\n                        var yAxis = allData[i].yAxis;\n                        return d == null ? \"N/A\" : yAxis.tickFormat()(d);\n                    };\n\n                    interactiveLayer.tooltip\n                        .headerFormatter(function(d, i) {\n                            return xAxis.tickFormat()(d, i);\n                        })\n                        .valueFormatter(interactiveLayer.tooltip.valueFormatter() || defaultValueFormatter)\n                        .data({\n                            value: chart.x()( singlePoint,pointIndex ),\n                            index: pointIndex,\n                            series: allData\n                        })();\n\n                    interactiveLayer.renderGuideLine(pointXLocation);\n                });\n\n                interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                    clearHighlights();\n                });\n            } else {\n                lines1.dispatch.on('elementMouseover.tooltip', mouseover_line);\n                lines2.dispatch.on('elementMouseover.tooltip', mouseover_line);\n                lines1.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n                lines2.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n\n                scatters1.dispatch.on('elementMouseover.tooltip', mouseover_scatter);\n                scatters2.dispatch.on('elementMouseover.tooltip', mouseover_scatter);\n                scatters1.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n                scatters2.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n\n                stack1.dispatch.on('elementMouseover.tooltip', mouseover_stack);\n                stack2.dispatch.on('elementMouseover.tooltip', mouseover_stack);\n                stack1.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n                stack2.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true)\n                });\n\n                bars1.dispatch.on('elementMouseover.tooltip', mouseover_bar);\n                bars2.dispatch.on('elementMouseover.tooltip', mouseover_bar);\n\n                bars1.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true);\n                });\n                bars2.dispatch.on('elementMouseout.tooltip', function(evt) {\n                    tooltip.hidden(true);\n                });\n                bars1.dispatch.on('elementMousemove.tooltip', function(evt) {\n                    tooltip();\n                });\n                bars2.dispatch.on('elementMousemove.tooltip', function(evt) {\n                    tooltip();\n                });\n            }\n        });\n\n        return chart;\n    }\n\n    //============================================================\n    // Global getters and setters\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.legend = legend;\n    chart.lines1 = lines1;\n    chart.lines2 = lines2;\n    chart.scatters1 = scatters1;\n    chart.scatters2 = scatters2;\n    chart.bars1 = bars1;\n    chart.bars2 = bars2;\n    chart.stack1 = stack1;\n    chart.stack2 = stack2;\n    chart.xAxis = xAxis;\n    chart.yAxis1 = yAxis1;\n    chart.yAxis2 = yAxis2;\n    chart.tooltip = tooltip;\n    chart.interactiveLayer = interactiveLayer;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        yDomain1:      {get: function(){return yDomain1;}, set: function(_){yDomain1=_;}},\n        yDomain2:    {get: function(){return yDomain2;}, set: function(_){yDomain2=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        interpolate:    {get: function(){return interpolate;}, set: function(_){interpolate=_;}},\n        legendRightAxisHint:    {get: function(){return legendRightAxisHint;}, set: function(_){legendRightAxisHint=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        x: {get: function(){return getX;}, set: function(_){\n            getX = _;\n            lines1.x(_);\n            lines2.x(_);\n            scatters1.x(_);\n            scatters2.x(_);\n            bars1.x(_);\n            bars2.x(_);\n            stack1.x(_);\n            stack2.x(_);\n        }},\n        y: {get: function(){return getY;}, set: function(_){\n            getY = _;\n            lines1.y(_);\n            lines2.y(_);\n            scatters1.y(_);\n            scatters2.y(_);\n            stack1.y(_);\n            stack2.y(_);\n            bars1.y(_);\n            bars2.y(_);\n        }},\n        useVoronoi: {get: function(){return useVoronoi;}, set: function(_){\n            useVoronoi=_;\n            lines1.useVoronoi(_);\n            lines2.useVoronoi(_);\n            stack1.useVoronoi(_);\n            stack2.useVoronoi(_);\n        }},\n\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = _;\n            if (useInteractiveGuideline) {\n                lines1.interactive(false);\n                lines1.useVoronoi(false);\n                lines2.interactive(false);\n                lines2.useVoronoi(false);\n                stack1.interactive(false);\n                stack1.useVoronoi(false);\n                stack2.interactive(false);\n                stack2.useVoronoi(false);\n                scatters1.interactive(false);\n                scatters2.interactive(false);\n            }\n        }},\n\n        duration: {get: function(){return duration;}, set: function(_) {\n            duration = _;\n            [lines1, lines2, stack1, stack2, scatters1, scatters2, xAxis, yAxis1, yAxis2].forEach(function(model){\n              model.duration(duration);\n            });\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.ohlcBar = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = null\n        , height = null\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , x = d3.scale.linear()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , getOpen = function(d) { return d.open }\n        , getClose = function(d) { return d.close }\n        , getHigh = function(d) { return d.high }\n        , getLow = function(d) { return d.low }\n        , forceX = []\n        , forceY = []\n        , padData     = false // If true, adds half a data points width to front and back, for lining up a line chart with a bar chart\n        , clipEdge = true\n        , color = nv.utils.defaultColor()\n        , interactive = false\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , dispatch = d3.dispatch('stateChange', 'changeState', 'renderEnd', 'chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    function chart(selection) {\n        selection.each(function(data) {\n            container = d3.select(this);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            nv.utils.initSVG(container);\n\n            // ohlc bar width.\n            var w = (availableWidth / data[0].values.length) * .9;\n\n            // Setup Scales\n            x.domain(xDomain || d3.extent(data[0].values.map(getX).concat(forceX) ));\n\n            if (padData)\n                x.range(xRange || [availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5)  / data[0].values.length ]);\n            else\n                x.range(xRange || [5 + w/2, availableWidth - w/2 - 5]);\n\n            y.domain(yDomain || [\n                    d3.min(data[0].values.map(getLow).concat(forceY)),\n                    d3.max(data[0].values.map(getHigh).concat(forceY))\n                ]\n            ).range(yRange || [availableHeight, 0]);\n\n            // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point\n            if (x.domain()[0] === x.domain()[1])\n                x.domain()[0] ?\n                    x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01])\n                    : x.domain([-1,1]);\n\n            if (y.domain()[0] === y.domain()[1])\n                y.domain()[0] ?\n                    y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01])\n                    : y.domain([-1,1]);\n\n            // Setup containers and skeleton of chart\n            var wrap = d3.select(this).selectAll('g.nv-wrap.nv-ohlcBar').data([data[0].values]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-ohlcBar');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-ticks');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            container\n                .on('click', function(d,i) {\n                    dispatch.chartClick({\n                        data: d,\n                        index: i,\n                        pos: d3.event,\n                        id: id\n                    });\n                });\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-chart-clip-path-' + id)\n                .append('rect');\n\n            wrap.select('#nv-chart-clip-path-' + id + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            g   .attr('clip-path', clipEdge ? 'url(#nv-chart-clip-path-' + id + ')' : '');\n\n            var ticks = wrap.select('.nv-ticks').selectAll('.nv-tick')\n                .data(function(d) { return d });\n            ticks.exit().remove();\n\n            ticks.enter().append('path')\n                .attr('class', function(d,i,j) { return (getOpen(d,i) > getClose(d,i) ? 'nv-tick negative' : 'nv-tick positive') + ' nv-tick-' + j + '-' + i })\n                .attr('d', function(d,i) {\n                    return 'm0,0l0,'\n                        + (y(getOpen(d,i))\n                            - y(getHigh(d,i)))\n                        + 'l'\n                        + (-w/2)\n                        + ',0l'\n                        + (w/2)\n                        + ',0l0,'\n                        + (y(getLow(d,i)) - y(getOpen(d,i)))\n                        + 'l0,'\n                        + (y(getClose(d,i))\n                            - y(getLow(d,i)))\n                        + 'l'\n                        + (w/2)\n                        + ',0l'\n                        + (-w/2)\n                        + ',0z';\n                })\n                .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',' + y(getHigh(d,i)) + ')'; })\n                .attr('fill', function(d,i) { return color[0]; })\n                .attr('stroke', function(d,i) { return color[0]; })\n                .attr('x', 0 )\n                .attr('y', function(d,i) {  return y(Math.max(0, getY(d,i))) })\n                .attr('height', function(d,i) { return Math.abs(y(getY(d,i)) - y(0)) });\n\n            // the bar colors are controlled by CSS currently\n            ticks.attr('class', function(d,i,j) {\n                return (getOpen(d,i) > getClose(d,i) ? 'nv-tick negative' : 'nv-tick positive') + ' nv-tick-' + j + '-' + i;\n            });\n\n            d3.transition(ticks)\n                .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',' + y(getHigh(d,i)) + ')'; })\n                .attr('d', function(d,i) {\n                    var w = (availableWidth / data[0].values.length) * .9;\n                    return 'm0,0l0,'\n                        + (y(getOpen(d,i))\n                            - y(getHigh(d,i)))\n                        + 'l'\n                        + (-w/2)\n                        + ',0l'\n                        + (w/2)\n                        + ',0l0,'\n                        + (y(getLow(d,i))\n                            - y(getOpen(d,i)))\n                        + 'l0,'\n                        + (y(getClose(d,i))\n                            - y(getLow(d,i)))\n                        + 'l'\n                        + (w/2)\n                        + ',0l'\n                        + (-w/2)\n                        + ',0z';\n                });\n        });\n\n        return chart;\n    }\n\n\n    //Create methods to allow outside functions to highlight a specific bar.\n    chart.highlightPoint = function(pointIndex, isHoverOver) {\n        chart.clearHighlights();\n        container.select(\".nv-ohlcBar .nv-tick-0-\" + pointIndex)\n            .classed(\"hover\", isHoverOver)\n        ;\n    };\n\n    chart.clearHighlights = function() {\n        container.select(\".nv-ohlcBar .nv-tick.hover\")\n            .classed(\"hover\", false)\n        ;\n    };\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:    {get: function(){return width;}, set: function(_){width=_;}},\n        height:   {get: function(){return height;}, set: function(_){height=_;}},\n        xScale:   {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:   {get: function(){return y;}, set: function(_){y=_;}},\n        xDomain:  {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain:  {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:   {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:   {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        forceX:   {get: function(){return forceX;}, set: function(_){forceX=_;}},\n        forceY:   {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        padData:  {get: function(){return padData;}, set: function(_){padData=_;}},\n        clipEdge: {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        id:       {get: function(){return id;}, set: function(_){id=_;}},\n        interactive: {get: function(){return interactive;}, set: function(_){interactive=_;}},\n\n        x:     {get: function(){return getX;}, set: function(_){getX=_;}},\n        y:     {get: function(){return getY;}, set: function(_){getY=_;}},\n        open:  {get: function(){return getOpen();}, set: function(_){getOpen=_;}},\n        close: {get: function(){return getClose();}, set: function(_){getClose=_;}},\n        high:  {get: function(){return getHigh;}, set: function(_){getHigh=_;}},\n        low:   {get: function(){return getLow;}, set: function(_){getLow=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    != undefined ? _.top    : margin.top;\n            margin.right  = _.right  != undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom != undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   != undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","// Code adapted from Jason Davies' \"Parallel Coordinates\"\n// http://bl.ocks.org/jasondavies/1341281\nnv.models.parallelCoordinates = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 30, right: 0, bottom: 10, left: 0}\n        , width = null\n        , height = null\n        , availableWidth = null\n        , availableHeight = null\n        , x = d3.scale.ordinal()\n        , y = {}\n        , undefinedValuesLabel = \"undefined values\"\n        , dimensionData = []\n        , enabledDimensions = []\n        , dimensionNames = []\n        , displayBrush = true\n        , color = nv.utils.defaultColor()\n        , filters = []\n        , active = []\n        , dragging = []\n        , axisWithUndefinedValues = []\n        , lineTension = 1\n        , foreground\n        , background\n        , dimensions\n        , line = d3.svg.line()\n        , axis = d3.svg.axis()\n        , dispatch = d3.dispatch('brushstart', 'brush', 'brushEnd', 'dimensionsOrder', \"stateChange\", 'elementClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd', 'activeChanged')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var container = d3.select(this);\n            availableWidth = nv.utils.availableWidth(width, container, margin);\n            availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            nv.utils.initSVG(container);\n\n           //Convert old data to new format (name, values)\n            if (data[0].values === undefined) {\n                var newData = [];\n                data.forEach(function (d) {\n                        var val = {};\n                        var key = Object.keys(d);\n                        key.forEach(function (k) { if (k !== \"name\") val[k] = d[k] });\n                        newData.push({ key: d.name, values: val });\n                });\n                data = newData;\n            }\n\n            var dataValues = data.map(function (d) {return d.values});\n            if (active.length === 0) {\n                active = data;\n            }; //set all active before first brush call\n            \n            dimensionNames = dimensionData.sort(function (a, b) { return a.currentPosition - b.currentPosition; }).map(function (d) { return d.key });\n            enabledDimensions = dimensionData.filter(function (d) { return !d.disabled; });\n            \n            // Setup Scales\n            x.rangePoints([0, availableWidth], 1).domain(enabledDimensions.map(function (d) { return d.key; }));\n\n            //Set as true if all values on an axis are missing.\n            // Extract the list of dimensions and create a scale for each.\n            var oldDomainMaxValue = {};\n            var displayMissingValuesline = false;\n            var currentTicks = [];\n            \n            dimensionNames.forEach(function(d) {\n                var extent = d3.extent(dataValues, function (p) { return +p[d]; });\n                var min = extent[0];\n                var max = extent[1];\n                var onlyUndefinedValues = false;\n                //If there is no values to display on an axis, set the extent to 0\n                if (isNaN(min) || isNaN(max)) {\n                    onlyUndefinedValues = true;\n                    min = 0;\n                    max = 0;\n                }\n                //Scale axis if there is only one value\n                if (min === max) {\n                    min = min - 1;\n                    max = max + 1;\n                }\n                var f = filters.filter(function (k) { return k.dimension == d; });\n                if (f.length !== 0) {\n                    //If there is only NaN values, keep the existing domain.\n                    if (onlyUndefinedValues) {\n                        min = y[d].domain()[0];\n                        max = y[d].domain()[1];\n                    }\n                        //If the brush extent is > max (< min), keep the extent value.\n                    else if (!f[0].hasOnlyNaN && displayBrush) {\n                        min = min > f[0].extent[0] ? f[0].extent[0] : min;\n                        max = max < f[0].extent[1] ? f[0].extent[1] : max;\n                    }\n                        //If there is NaN values brushed be sure the brush extent is on the domain.\n                    else if (f[0].hasNaN) {\n                        max = max < f[0].extent[1] ? f[0].extent[1] : max;\n                        oldDomainMaxValue[d] = y[d].domain()[1];\n                        displayMissingValuesline = true;\n                    }\n                }\n                //Use 90% of (availableHeight - 12) for the axis range, 12 reprensenting the space necessary to display \"undefined values\" text.\n                //The remaining 10% are used to display the missingValue line.\n                y[d] = d3.scale.linear()\n                    .domain([min, max])\n                    .range([(availableHeight - 12) * 0.9, 0]);\n\n                axisWithUndefinedValues = [];\n                y[d].brush = d3.svg.brush().y(y[d]).on('brushstart', brushstart).on('brush', brush).on('brushend', brushend);\n            });\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-parallelCoordinates').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-parallelCoordinates');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-parallelCoordinates background');\n            gEnter.append('g').attr('class', 'nv-parallelCoordinates foreground');\n            gEnter.append('g').attr('class', 'nv-parallelCoordinates missingValuesline');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            line.interpolate('cardinal').tension(lineTension);\n            axis.orient('left');\n            var axisDrag = d3.behavior.drag()\n                        .on('dragstart', dragStart)\n                        .on('drag', dragMove)\n                        .on('dragend', dragEnd);\n\n            //Add missing value line at the bottom of the chart\n            var missingValuesline, missingValueslineText;\n            var step = x.range()[1] - x.range()[0];\n            step = isNaN(step) ? x.range()[0] : step;\n            if (!isNaN(step)) {\n                var lineData = [0 + step / 2, availableHeight - 12, availableWidth - step / 2, availableHeight - 12];\n                missingValuesline = wrap.select('.missingValuesline').selectAll('line').data([lineData]);\n                missingValuesline.enter().append('line');\n                missingValuesline.exit().remove();\n                missingValuesline.attr(\"x1\", function(d) { return d[0]; })\n                        .attr(\"y1\", function(d) { return d[1]; })\n                        .attr(\"x2\", function(d) { return d[2]; })\n                        .attr(\"y2\", function(d) { return d[3]; });\n    \n                //Add the text \"undefined values\" under the missing value line\n                missingValueslineText = wrap.select('.missingValuesline').selectAll('text').data([undefinedValuesLabel]);\n                missingValueslineText.append('text').data([undefinedValuesLabel]);\n                missingValueslineText.enter().append('text');\n                missingValueslineText.exit().remove();\n                missingValueslineText.attr(\"y\", availableHeight)\n                        //To have the text right align with the missingValues line, substract 92 representing the text size.\n                        .attr(\"x\", availableWidth - 92 - step / 2)\n                        .text(function(d) { return d; });\n            }\n            // Add grey background lines for context.\n            background = wrap.select('.background').selectAll('path').data(data);\n            background.enter().append('path');\n            background.exit().remove();\n            background.attr('d', path);\n\n            // Add blue foreground lines for focus.\n            foreground = wrap.select('.foreground').selectAll('path').data(data);\n            foreground.enter().append('path')\n            foreground.exit().remove();\n            foreground.attr('d', path)\n                .style(\"stroke-width\", function (d, i) {\n                if (isNaN(d.strokeWidth)) { d.strokeWidth = 1;} return d.strokeWidth;})\n                .attr('stroke', function (d, i) { return d.color || color(d, i); });\n            foreground.on(\"mouseover\", function (d, i) {\n                d3.select(this).classed('hover', true).style(\"stroke-width\", d.strokeWidth + 2 + \"px\").style(\"stroke-opacity\", 1);\n                dispatch.elementMouseover({\n                    label: d.name,\n                    color: d.color || color(d, i),\n                    values: d.values,\n                    dimensions: enabledDimensions\n                });\n\n            });\n            foreground.on(\"mouseout\", function (d, i) {\n                d3.select(this).classed('hover', false).style(\"stroke-width\", d.strokeWidth + \"px\").style(\"stroke-opacity\", 0.7);\n                dispatch.elementMouseout({\n                    label: d.name,\n                    index: i\n                });\n            });\n            foreground.on('mousemove', function (d, i) {\n                dispatch.elementMousemove();\n            });\n            foreground.on('click', function (d) {\n                dispatch.elementClick({\n                    id: d.id\n                });\n            });\n            // Add a group element for each dimension.\n            dimensions = g.selectAll('.dimension').data(enabledDimensions);\n            var dimensionsEnter = dimensions.enter().append('g').attr('class', 'nv-parallelCoordinates dimension');\n\n            dimensions.attr('transform', function(d) { return 'translate(' + x(d.key) + ',0)'; });\n            dimensionsEnter.append('g').attr('class', 'nv-axis');\n\n            // Add an axis and title.\n            dimensionsEnter.append('text')\n                .attr('class', 'nv-label')\n                .style(\"cursor\", \"move\")\n                .attr('dy', '-1em')\n                .attr('text-anchor', 'middle')\n                .on(\"mouseover\", function(d, i) {\n                    dispatch.elementMouseover({\n                        label: d.tooltip || d.key,\n                        color: d.color \n                    });\n                })\n                .on(\"mouseout\", function(d, i) {\n                    dispatch.elementMouseout({\n                        label: d.tooltip\n                    });\n                })\n                .on('mousemove', function (d, i) {\n                    dispatch.elementMousemove();\n                })\n                .call(axisDrag);\n\n            dimensionsEnter.append('g').attr('class', 'nv-brushBackground');\n            dimensions.exit().remove();\n            dimensions.select('.nv-label').text(function (d) { return d.key });\n\n            // Add and store a brush for each axis.\n            restoreBrush(displayBrush);\n\n            var actives = dimensionNames.filter(function (p) { return !y[p].brush.empty(); }),\n                    extents = actives.map(function (p) { return y[p].brush.extent(); });\n            var formerActive = active.slice(0);\n\n            //Restore active values\n            active = [];\n            foreground.style(\"display\", function (d) {\n                var isActive = actives.every(function (p, i) {\n                    if ((isNaN(d.values[p]) || isNaN(parseFloat(d.values[p]))) && extents[i][0] == y[p].brush.y().domain()[0]) {\n                        return true;\n                    }\n                    return (extents[i][0] <= d.values[p] && d.values[p] <= extents[i][1]) && !isNaN(parseFloat(d.values[p]));\n                });\n                if (isActive)\n                    active.push(d);\n                return !isActive ? \"none\" : null;\n\n            });\n\n            if (filters.length > 0 || !nv.utils.arrayEquals(active, formerActive)) {\n               dispatch.activeChanged(active);\n            }\n\n            // Returns the path for a given data point.\n            function path(d) {\n                return line(enabledDimensions.map(function (p) {\n                    //If value if missing, put the value on the missing value line\n                    if (isNaN(d.values[p.key]) || isNaN(parseFloat(d.values[p.key])) || displayMissingValuesline) {\n                        var domain = y[p.key].domain();\n                        var range = y[p.key].range();\n                        var min = domain[0] - (domain[1] - domain[0]) / 9;\n\n                        //If it's not already the case, allow brush to select undefined values\n                        if (axisWithUndefinedValues.indexOf(p.key) < 0) {\n\n                            var newscale = d3.scale.linear().domain([min, domain[1]]).range([availableHeight - 12, range[1]]);\n                            y[p.key].brush.y(newscale);\n                            axisWithUndefinedValues.push(p.key);\n                        }\n                        if (isNaN(d.values[p.key]) || isNaN(parseFloat(d.values[p.key]))) {\n                            return [x(p.key), y[p.key](min)];\n                        }\n                    }\n\n                    //If parallelCoordinate contain missing values show the missing values line otherwise, hide it.\n                    if (missingValuesline !== undefined) {\n                        if (axisWithUndefinedValues.length > 0 || displayMissingValuesline) {\n                            missingValuesline.style(\"display\", \"inline\");\n                            missingValueslineText.style(\"display\", \"inline\");\n                        } else {\n                            missingValuesline.style(\"display\", \"none\");\n                            missingValueslineText.style(\"display\", \"none\");\n                        }\n                    }\n                    return [x(p.key), y[p.key](d.values[p.key])];\n                }));\n            }\n\n            function restoreBrush(visible) {\n                filters.forEach(function (f) {\n                    //If filter brushed NaN values, keep the brush on the bottom of the axis.\n                    var brushDomain = y[f.dimension].brush.y().domain();\n                    if (f.hasOnlyNaN) {\n                        f.extent[1] = (y[f.dimension].domain()[1] - brushDomain[0]) * (f.extent[1] - f.extent[0]) / (oldDomainMaxValue[f.dimension] - f.extent[0]) + brushDomain[0];\n                    }\n                    if (f.hasNaN) {\n                        f.extent[0] = brushDomain[0];\n                    }\n                    if (visible)\n                        y[f.dimension].brush.extent(f.extent);\n                });\n                \n                dimensions.select('.nv-brushBackground')\n                    .each(function (d) {\n                        d3.select(this).call(y[d.key].brush);\n\n                    })\n                    .selectAll('rect')\n                    .attr('x', -8)\n                    .attr('width', 16);\n                \n                updateTicks();\n            }\n            \n            // Handles a brush event, toggling the display of foreground lines.\n            function brushstart() {\n                //If brush aren't visible, show it before brushing again.\n                if (displayBrush === false) {\n                    displayBrush = true;\n                    restoreBrush(true);\n                }\n            }\n            \n            // Handles a brush event, toggling the display of foreground lines.\n            function brush() {\n                actives = dimensionNames.filter(function (p) { return !y[p].brush.empty(); });\n                extents = actives.map(function(p) { return y[p].brush.extent(); });\n\n                filters = []; //erase current filters\n                actives.forEach(function(d,i) {\n                    filters[i] = {\n                        dimension: d,\n                        extent: extents[i],\n                        hasNaN: false,\n                        hasOnlyNaN: false\n                    }\n                });\n\n                active = []; //erase current active list\n                foreground.style('display', function(d) {\n                    var isActive = actives.every(function(p, i) {\n                        if ((isNaN(d.values[p]) || isNaN(parseFloat(d.values[p]))) && extents[i][0] == y[p].brush.y().domain()[0]) return true;\n                        return (extents[i][0] <= d.values[p] && d.values[p] <= extents[i][1]) && !isNaN(parseFloat(d.values[p]));\n                    });\n                    if (isActive) active.push(d);\n                    return isActive ? null : 'none';\n                });\n                \n                updateTicks();\n                \n                dispatch.brush({\n                    filters: filters,\n                    active: active\n                });\n            }\n            function brushend() {\n                var hasActiveBrush = actives.length > 0 ? true : false;\n                filters.forEach(function (f) {\n                    if (f.extent[0] === y[f.dimension].brush.y().domain()[0] && axisWithUndefinedValues.indexOf(f.dimension) >= 0)\n                        f.hasNaN = true;\n                    if (f.extent[1] < y[f.dimension].domain()[0])\n                        f.hasOnlyNaN = true;\n                });\n                dispatch.brushEnd(active, hasActiveBrush);\n            }           \n            function updateTicks() {\n                dimensions.select('.nv-axis')\n                    .each(function (d, i) {\n                        var f = filters.filter(function (k) { return k.dimension == d.key; });\n                        currentTicks[d.key] = y[d.key].domain();\n                        \n                        //If brush are available, display brush extent\n                        if (f.length != 0 && displayBrush)\n                        {\n                            currentTicks[d.key] = [];\n                            if (f[0].extent[1] > y[d.key].domain()[0]) \n                                currentTicks[d.key] = [f[0].extent[1]];\n                            if (f[0].extent[0] >= y[d.key].domain()[0])\n                                currentTicks[d.key].push(f[0].extent[0]);    \n                        }\n                            \n                        d3.select(this).call(axis.scale(y[d.key]).tickFormat(d.format).tickValues(currentTicks[d.key]));\n                });\n            }\n            function dragStart(d) {\n                dragging[d.key] = this.parentNode.__origin__ = x(d.key);\n                background.attr(\"visibility\", \"hidden\");\n            }\n            function dragMove(d) {\n                dragging[d.key] = Math.min(availableWidth, Math.max(0, this.parentNode.__origin__ += d3.event.x));\n                foreground.attr(\"d\", path);\n                enabledDimensions.sort(function (a, b) { return dimensionPosition(a.key) - dimensionPosition(b.key); });\n                enabledDimensions.forEach(function (d, i) { return d.currentPosition = i; });\n                x.domain(enabledDimensions.map(function (d) { return d.key; }));\n                dimensions.attr(\"transform\", function(d) { return \"translate(\" + dimensionPosition(d.key) + \")\"; });\n            }\n            function dragEnd(d, i) {\n                delete this.parentNode.__origin__;\n                delete dragging[d.key];\n                d3.select(this.parentNode).attr(\"transform\", \"translate(\" + x(d.key) + \")\");\n                foreground\n                  .attr(\"d\", path);\n                background\n                  .attr(\"d\", path)\n                  .attr(\"visibility\", null);\n\n                dispatch.dimensionsOrder(enabledDimensions);\n            }\n            function dimensionPosition(d) {\n                var v = dragging[d];\n                return v == null ? x(d) : v;\n            }\n        });\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:         {get: function(){return width;},           set: function(_){width= _;}},\n        height:        {get: function(){return height;},          set: function(_){height= _;}},\n        dimensionData: { get: function () { return dimensionData; }, set: function (_) { dimensionData = _; } },\n        displayBrush: { get: function () { return displayBrush; }, set: function (_) { displayBrush = _; } },\n        filters: { get: function () { return filters; }, set: function (_) { filters = _; } },\n        active: { get: function () { return active; }, set: function (_) { active = _; } },\n        lineTension:   {get: function(){return lineTension;},     set: function(_){lineTension = _;}},\n        undefinedValuesLabel : {get: function(){return undefinedValuesLabel;}, set: function(_){undefinedValuesLabel=_;}},\n        \n        // deprecated options\n        dimensions: {get: function () { return dimensionData.map(function (d){return d.key}); }, set: function (_) {\n            // deprecated after 1.8.1\n            nv.deprecated('dimensions', 'use dimensionData instead');\n            if (dimensionData.length === 0) {\n                _.forEach(function (k) { dimensionData.push({ key: k }) })\n            } else {\n                _.forEach(function (k, i) { dimensionData[i].key= k })\n            }\n        }},\n        dimensionNames: {get: function () { return dimensionData.map(function (d){return d.key}); }, set: function (_) {\n            // deprecated after 1.8.1\n            nv.deprecated('dimensionNames', 'use dimensionData instead');\n            dimensionNames = [];\n            if (dimensionData.length === 0) {\n                _.forEach(function (k) { dimensionData.push({ key: k }) })\n            } else {\n                _.forEach(function (k, i) { dimensionData[i].key = k })\n            }\n \n        }},\n        dimensionFormats: {get: function () { return dimensionData.map(function (d) { return d.format }); }, set: function (_) {\n            // deprecated after 1.8.1\n            nv.deprecated('dimensionFormats', 'use dimensionData instead');\n            if (dimensionData.length === 0) {\n                _.forEach(function (f) { dimensionData.push({ format: f }) })\n            } else {\n                _.forEach(function (f, i) { dimensionData[i].format = f })\n            }\n\n        }},\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    =  _.top    !== undefined ? _.top    : margin.top;\n            margin.right  =  _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom =  _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   =  _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","nv.models.parallelCoordinatesChart = function () {\n        \"use strict\";\n        //============================================================\n        // Public Variables with Default Settings\n        //------------------------------------------------------------\n\n        var parallelCoordinates = nv.models.parallelCoordinates()\n        var legend = nv.models.legend()\n        var tooltip = nv.models.tooltip();\n        var dimensionTooltip = nv.models.tooltip();\n\n        var margin = { top: 0, right: 0, bottom: 0, left: 0 }\n        , marginTop = null\n        , width = null\n        , height = null\n        , showLegend = true\n        , color = nv.utils.defaultColor()\n        , state = nv.utils.state()\n        , dimensionData = []\n        , displayBrush = true\n        , defaultState = null\n        , noData = null\n        , nanValue = \"undefined\"\n        , dispatch = d3.dispatch('dimensionsOrder', 'brushEnd', 'stateChange', 'changeState', 'renderEnd')\n        , controlWidth = function () { return showControls ? 180 : 0 }\n        ;\n\n\t    //============================================================\n\n\t\t//============================================================\n        // Private Variables\n        //------------------------------------------------------------\n\n        var renderWatch = nv.utils.renderWatch(dispatch);\n\n        var stateGetter = function(data) {\n            return function() {\n                return {\n                    active: data.map(function(d) { return !d.disabled })\n                };\n            }\n        };\n\n        var stateSetter = function(data) {\n            return function(state) {\n                if(state.active !== undefined) {\n                    data.forEach(function(series, i) {\n                        series.disabled = !state.active[i];\n                    });\n                }\n            }\n        };\n\n        tooltip.contentGenerator(function(data) {\n            var str = '<table><thead><tr><td class=\"legend-color-guide\"><div style=\"background-color:' + data.color + '\"></div></td><td><strong>' + data.key + '</strong></td></tr></thead>';\n            if(data.series.length !== 0)\n            {\n                str = str + '<tbody><tr><td height =\"10px\"></td></tr>';\n                data.series.forEach(function(d){\n                    str = str + '<tr><td class=\"legend-color-guide\"><div style=\"background-color:' + d.color + '\"></div></td><td class=\"key\">' + d.key + '</td><td class=\"value\">' + d.value + '</td></tr>';\n                });\n                str = str + '</tbody>';\n            }\n            str = str + '</table>';\n            return str;\n        });\n\n        //============================================================\n        // Chart function\n        //------------------------------------------------------------\n\n        function chart(selection) {\n            renderWatch.reset();\n            renderWatch.models(parallelCoordinates);\n\n            selection.each(function(data) {\n                var container = d3.select(this);\n                nv.utils.initSVG(container);\n\n                var that = this;\n\n                var availableWidth = nv.utils.availableWidth(width, container, margin),\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n\n                chart.update = function() { container.call(chart); };\n                chart.container = this;\n\n                state.setter(stateSetter(dimensionData), chart.update)\n                    .getter(stateGetter(dimensionData))\n                    .update();\n\n                //set state.disabled\n                state.disabled = dimensionData.map(function (d) { return !!d.disabled });\n\n                //Keep dimensions position in memory\n                dimensionData = dimensionData.map(function (d) {d.disabled = !!d.disabled; return d});\n                dimensionData.forEach(function (d, i) {\n                    d.originalPosition = isNaN(d.originalPosition) ? i : d.originalPosition;\n                    d.currentPosition = isNaN(d.currentPosition) ? i : d.currentPosition;\n                });\n\n               if (!defaultState) {\n                    var key;\n                    defaultState = {};\n                    for(key in state) {\n                        if(state[key] instanceof Array)\n                            defaultState[key] = state[key].slice(0);\n                        else\n                            defaultState[key] = state[key];\n                    }\n                }\n\n                // Display No Data message if there's nothing to show.\n                if(!data || !data.length) {\n                    nv.utils.noData(chart, container);\n                    return chart;\n                } else {\n                    container.selectAll('.nv-noData').remove();\n                }\n\n                //------------------------------------------------------------\n                // Setup containers and skeleton of chart\n\n                var wrap = container.selectAll('g.nv-wrap.nv-parallelCoordinatesChart').data([data]);\n                var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-parallelCoordinatesChart').append('g');\n\n                var g = wrap.select('g');\n\n                gEnter.append('g').attr('class', 'nv-parallelCoordinatesWrap');\n                gEnter.append('g').attr('class', 'nv-legendWrap');\n\n                g.select(\"rect\")\n                    .attr(\"width\", availableWidth)\n                    .attr(\"height\", (availableHeight > 0) ? availableHeight : 0);\n\n                // Legend\n                if (!showLegend) {\n                    g.select('.nv-legendWrap').selectAll('*').remove();\n                } else {\n                    legend.width(availableWidth)\n                        .color(function (d) { return \"rgb(188,190,192)\"; });\n\n                    g.select('.nv-legendWrap')\n                        .datum(dimensionData.sort(function (a, b) { return a.originalPosition - b.originalPosition; }))\n                        .call(legend);\n\n                    if (!marginTop && legend.height() !== margin.top) {\n                        margin.top = legend.height();\n                        availableHeight = nv.utils.availableHeight(height, container, margin);\n                    }\n                    wrap.select('.nv-legendWrap')\n                       .attr('transform', 'translate( 0 ,' + (-margin.top) + ')');\n                }\n                wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n                // Main Chart Component(s)\n                parallelCoordinates\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .dimensionData(dimensionData)\n                    .displayBrush(displayBrush);\n\n\t\t        var parallelCoordinatesWrap = g.select('.nv-parallelCoordinatesWrap ')\n                  .datum(data);\n\n\t\t        parallelCoordinatesWrap.transition().call(parallelCoordinates);\n\n\t\t\t\t//============================================================\n                // Event Handling/Dispatching (in chart's scope)\n                //------------------------------------------------------------\n                //Display reset brush button\n\t\t        parallelCoordinates.dispatch.on('brushEnd', function (active, hasActiveBrush) {\n\t\t            if (hasActiveBrush) {\n\t\t                displayBrush = true;\n\t\t                dispatch.brushEnd(active);\n\t\t            } else {\n\n\t\t                displayBrush = false;\n\t\t            }\n\t\t        });\n\n\t\t        legend.dispatch.on('stateChange', function(newState) {\n\t\t            for(var key in newState) {\n\t\t                state[key] = newState[key];\n\t\t            }\n\t\t            dispatch.stateChange(state);\n\t\t            chart.update();\n\t\t        });\n\n                //Update dimensions order and display reset sorting button\n\t\t        parallelCoordinates.dispatch.on('dimensionsOrder', function (e) {\n\t\t            dimensionData.sort(function (a, b) { return a.currentPosition - b.currentPosition; });\n\t\t            var isSorted = false;\n\t\t            dimensionData.forEach(function (d, i) {\n\t\t                d.currentPosition = i;\n\t\t                if (d.currentPosition !== d.originalPosition)\n\t\t                    isSorted = true;\n\t\t            });\n\t\t            dispatch.dimensionsOrder(dimensionData, isSorted);\n\t\t        });\n\n\t\t\t\t// Update chart from a state object passed to event handler\n                dispatch.on('changeState', function (e) {\n\n                    if (typeof e.disabled !== 'undefined') {\n                        dimensionData.forEach(function (series, i) {\n                            series.disabled = e.disabled[i];\n                        });\n                        state.disabled = e.disabled;\n                    }\n                    chart.update();\n                });\n            });\n\n            renderWatch.renderEnd('parraleleCoordinateChart immediate');\n            return chart;\n        }\n\n\t\t//============================================================\n        // Event Handling/Dispatching (out of chart's scope)\n        //------------------------------------------------------------\n\n        parallelCoordinates.dispatch.on('elementMouseover.tooltip', function (evt) {\n            var tp = {\n                key: evt.label,\n                color: evt.color,\n                series: []\n             }\n            if(evt.values){\n                Object.keys(evt.values).forEach(function (d) {\n                    var dim = evt.dimensions.filter(function (dd) {return dd.key === d;})[0];\n                    if(dim){\n                        var v;\n                        if (isNaN(evt.values[d]) || isNaN(parseFloat(evt.values[d]))) {\n                            v = nanValue;\n                        } else {\n                            v = dim.format(evt.values[d]);\n                        }\n                        tp.series.push({ idx: dim.currentPosition, key: d, value: v, color: dim.color });\n                    }\n                });\n                tp.series.sort(function(a,b) {return a.idx - b.idx});\n             }\n            tooltip.data(tp).hidden(false);\n        });\n\n        parallelCoordinates.dispatch.on('elementMouseout.tooltip', function(evt) {\n            tooltip.hidden(true)\n        });\n\n        parallelCoordinates.dispatch.on('elementMousemove.tooltip', function () {\n            tooltip();\n        });\n\t\t //============================================================\n        // Expose Public Variables\n        //------------------------------------------------------------\n\n\t\t// expose chart's sub-components\n        chart.dispatch = dispatch;\n        chart.parallelCoordinates = parallelCoordinates;\n        chart.legend = legend;\n        chart.tooltip = tooltip;\n        chart.options = nv.utils.optionsFunc.bind(chart);\n\n        chart._options = Object.create({}, {\n            // simple options, just get/set the necessary values\n            width: { get: function () { return width; }, set: function (_) { width = _; } },\n            height: { get: function () { return height; }, set: function (_) { height = _; } },\n            showLegend: { get: function () { return showLegend; }, set: function (_) { showLegend = _; } },\n            defaultState: { get: function () { return defaultState; }, set: function (_) { defaultState = _; } },\n            dimensionData: { get: function () { return dimensionData; }, set: function (_) { dimensionData = _; } },\n            displayBrush: { get: function () { return displayBrush; }, set: function (_) { displayBrush = _; } },\n            noData: { get: function () { return noData; }, set: function (_) { noData = _; } },\n            nanValue: { get: function () { return nanValue; }, set: function (_) { nanValue = _; } },\n\n            // options that require extra logic in the setter\n            margin: {\n                get: function () { return margin; },\n                set: function (_) {\n                    if (_.top !== undefined) {\n                        margin.top = _.top;\n                        marginTop = _.top;\n                    }\n                    margin.right = _.right !== undefined ? _.right : margin.right;\n                    margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n                    margin.left = _.left !== undefined ? _.left : margin.left;\n                }\n            },\n            color: {get: function(){return color;}, set: function(_){\n                    color = nv.utils.getColor(_);\n                    legend.color(color);\n                    parallelCoordinates.color(color);\n                }}\n        });\n\n        nv.utils.inheritOptions(chart, parallelCoordinates);\n        nv.utils.initOptions(chart);\n\n        return chart;\n    };\n","nv.models.pie = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 500\n        , height = 500\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , color = nv.utils.defaultColor()\n        , valueFormat = d3.format(',.2f')\n        , showLabels = true\n        , labelsOutside = false\n        , labelType = \"key\"\n        , labelThreshold = .02 //if slice percentage is under this, don't show label\n        , donut = false\n        , title = false\n        , growOnHover = true\n        , titleOffset = 0\n        , labelSunbeamLayout = false\n        , startAngle = false\n        , padAngle = false\n        , endAngle = false\n        , cornerRadius = 0\n        , donutRatio = 0.5\n        , duration = 250\n        , arcsRadius = []\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'elementMousemove', 'renderEnd')\n        ;\n\n    var arcs = [];\n    var arcsOver = [];\n\n    //============================================================\n    // chart function\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right\n                , availableHeight = height - margin.top - margin.bottom\n                , radius = Math.min(availableWidth, availableHeight) / 2\n                , arcsRadiusOuter = []\n                , arcsRadiusInner = []\n                ;\n\n            container = d3.select(this)\n            if (arcsRadius.length === 0) {\n                var outer = radius - radius / 5;\n                var inner = donutRatio * radius;\n                for (var i = 0; i < data[0].length; i++) {\n                    arcsRadiusOuter.push(outer);\n                    arcsRadiusInner.push(inner);\n                }\n            } else {\n                if(growOnHover){\n                    arcsRadiusOuter = arcsRadius.map(function (d) { return (d.outer - d.outer / 5) * radius; });\n                    arcsRadiusInner = arcsRadius.map(function (d) { return (d.inner - d.inner / 5) * radius; });\n                    donutRatio = d3.min(arcsRadius.map(function (d) { return (d.inner - d.inner / 5); }));\n                } else {\n                    arcsRadiusOuter = arcsRadius.map(function (d) { return d.outer * radius; });\n                    arcsRadiusInner = arcsRadius.map(function (d) { return d.inner * radius; });\n                    donutRatio = d3.min(arcsRadius.map(function (d) { return d.inner; }));\n                }\n            }\n            nv.utils.initSVG(container);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('.nv-wrap.nv-pie').data(data);\n            var wrapEnter = wrap.enter().append('g').attr('class','nvd3 nv-wrap nv-pie nv-chart-' + id);\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n            var g_pie = gEnter.append('g').attr('class', 'nv-pie');\n            gEnter.append('g').attr('class', 'nv-pieLabels');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n            g.select('.nv-pie').attr('transform', 'translate(' + availableWidth / 2 + ',' + availableHeight / 2 + ')');\n            g.select('.nv-pieLabels').attr('transform', 'translate(' + availableWidth / 2 + ',' + availableHeight / 2 + ')');\n\n            //\n            container.on('click', function(d,i) {\n                dispatch.chartClick({\n                    data: d,\n                    index: i,\n                    pos: d3.event,\n                    id: id\n                });\n            });\n\n            arcs = [];\n            arcsOver = [];\n            for (var i = 0; i < data[0].length; i++) {\n\n                var arc = d3.svg.arc().outerRadius(arcsRadiusOuter[i]);\n                var arcOver = d3.svg.arc().outerRadius(arcsRadiusOuter[i] + 5);\n\n                if (startAngle !== false) {\n                    arc.startAngle(startAngle);\n                    arcOver.startAngle(startAngle);\n                }\n                if (endAngle !== false) {\n                    arc.endAngle(endAngle);\n                    arcOver.endAngle(endAngle);\n                }\n                if (donut) {\n                    arc.innerRadius(arcsRadiusInner[i]);\n                    arcOver.innerRadius(arcsRadiusInner[i]);\n                }\n\n                if (arc.cornerRadius && cornerRadius) {\n                    arc.cornerRadius(cornerRadius);\n                    arcOver.cornerRadius(cornerRadius);\n                }\n\n                arcs.push(arc);\n                arcsOver.push(arcOver);\n            }\n\n            // Setup the Pie chart and choose the data element\n            var pie = d3.layout.pie()\n                .sort(null)\n                .value(function(d) { return d.disabled ? 0 : getY(d) });\n\n            // padAngle added in d3 3.5\n            if (pie.padAngle && padAngle) {\n                pie.padAngle(padAngle);\n            }\n\n            // if title is specified and donut, put it in the middle\n            if (donut && title) {\n                g_pie.append(\"text\").attr('class', 'nv-pie-title');\n\n                wrap.select('.nv-pie-title')\n                    .style(\"text-anchor\", \"middle\")\n                    .text(function (d) {\n                        return title;\n                    })\n                    .style(\"font-size\", (Math.min(availableWidth, availableHeight)) * donutRatio * 2 / (title.length + 2) + \"px\")\n                    .attr(\"dy\", \"0.35em\") // trick to vertically center text\n                    .attr('transform', function(d, i) {\n                        return 'translate(0, '+ titleOffset + ')';\n                    });\n            }\n\n            var slices = wrap.select('.nv-pie').selectAll('.nv-slice').data(pie);\n            var pieLabels = wrap.select('.nv-pieLabels').selectAll('.nv-label').data(pie);\n\n            slices.exit().remove();\n            pieLabels.exit().remove();\n\n            var ae = slices.enter().append('g');\n            ae.attr('class', 'nv-slice');\n            ae.on('mouseover', function(d, i) {\n                d3.select(this).classed('hover', true);\n                if (growOnHover) {\n                    d3.select(this).select(\"path\").transition()\n                        .duration(70)\n                        .attr(\"d\", arcsOver[i]);\n                }\n                dispatch.elementMouseover({\n                    data: d.data,\n                    index: i,\n                    color: d3.select(this).style(\"fill\"),\n                    percent: (d.endAngle - d.startAngle) / (2 * Math.PI)\n                });\n            });\n            ae.on('mouseout', function(d, i) {\n                d3.select(this).classed('hover', false);\n                if (growOnHover) {\n                    d3.select(this).select(\"path\").transition()\n                        .duration(50)\n                        .attr(\"d\", arcs[i]);\n                }\n                dispatch.elementMouseout({data: d.data, index: i});\n            });\n            ae.on('mousemove', function(d, i) {\n                dispatch.elementMousemove({data: d.data, index: i});\n            });\n            ae.on('click', function(d, i) {\n                var element = this;\n                dispatch.elementClick({\n                    data: d.data,\n                    index: i,\n                    color: d3.select(this).style(\"fill\"),\n                    event: d3.event,\n                    element: element\n                });\n            });\n            ae.on('dblclick', function(d, i) {\n                dispatch.elementDblClick({\n                    data: d.data,\n                    index: i,\n                    color: d3.select(this).style(\"fill\")\n                });\n            });\n\n            slices.attr('fill', function(d,i) { return color(d.data, i); });\n            slices.attr('stroke', function(d,i) { return color(d.data, i); });\n\n            var paths = ae.append('path').each(function(d) {\n                this._current = d;\n            });\n\n            slices.select('path')\n                .transition()\n                .duration(duration)\n                .attr('d', function (d, i) { return arcs[i](d); })\n                .attrTween('d', arcTween);\n\n            if (showLabels) {\n                // This does the normal label\n                var labelsArc = [];\n                for (var i = 0; i < data[0].length; i++) {\n                    labelsArc.push(arcs[i]);\n\n                    if (labelsOutside) {\n                        if (donut) {\n                            labelsArc[i] = d3.svg.arc().outerRadius(arcs[i].outerRadius());\n                            if (startAngle !== false) labelsArc[i].startAngle(startAngle);\n                            if (endAngle !== false) labelsArc[i].endAngle(endAngle);\n                        }\n                    } else if (!donut) {\n                            labelsArc[i].innerRadius(0);\n                    }\n                }\n\n                pieLabels.enter().append(\"g\").classed(\"nv-label\",true).each(function(d,i) {\n                    var group = d3.select(this);\n\n                    group.attr('transform', function (d, i) {\n                        if (labelSunbeamLayout) {\n                            d.outerRadius = arcsRadiusOuter[i] + 10; // Set Outer Coordinate\n                            d.innerRadius = arcsRadiusOuter[i] + 15; // Set Inner Coordinate\n                            var rotateAngle = (d.startAngle + d.endAngle) / 2 * (180 / Math.PI);\n                            if ((d.startAngle + d.endAngle) / 2 < Math.PI) {\n                                rotateAngle -= 90;\n                            } else {\n                                rotateAngle += 90;\n                            }\n                            return 'translate(' + labelsArc[i].centroid(d) + ') rotate(' + rotateAngle + ')';\n                        } else {\n                            d.outerRadius = radius + 10; // Set Outer Coordinate\n                            d.innerRadius = radius + 15; // Set Inner Coordinate\n                            return 'translate(' + labelsArc[i].centroid(d) + ')'\n                        }\n                    });\n\n                    group.append('rect')\n                        .style('stroke', '#fff')\n                        .style('fill', '#fff')\n                        .attr(\"rx\", 3)\n                        .attr(\"ry\", 3);\n\n                    group.append('text')\n                        .style('text-anchor', labelSunbeamLayout ? ((d.startAngle + d.endAngle) / 2 < Math.PI ? 'start' : 'end') : 'middle') //center the text on it's origin or begin/end if orthogonal aligned\n                        .style('fill', '#000')\n                });\n\n                var labelLocationHash = {};\n                var avgHeight = 14;\n                var avgWidth = 140;\n                var createHashKey = function(coordinates) {\n                    return Math.floor(coordinates[0]/avgWidth) * avgWidth + ',' + Math.floor(coordinates[1]/avgHeight) * avgHeight;\n                };\n                var getSlicePercentage = function(d) {\n                    return (d.endAngle - d.startAngle) / (2 * Math.PI);\n                };\n\n                pieLabels.watchTransition(renderWatch, 'pie labels').attr('transform', function (d, i) {\n                    if (labelSunbeamLayout) {\n                        d.outerRadius = arcsRadiusOuter[i] + 10; // Set Outer Coordinate\n                        d.innerRadius = arcsRadiusOuter[i] + 15; // Set Inner Coordinate\n                        var rotateAngle = (d.startAngle + d.endAngle) / 2 * (180 / Math.PI);\n                        if ((d.startAngle + d.endAngle) / 2 < Math.PI) {\n                            rotateAngle -= 90;\n                        } else {\n                            rotateAngle += 90;\n                        }\n                        return 'translate(' + labelsArc[i].centroid(d) + ') rotate(' + rotateAngle + ')';\n                    } else {\n                        d.outerRadius = radius + 10; // Set Outer Coordinate\n                        d.innerRadius = radius + 15; // Set Inner Coordinate\n\n                        /*\n                        Overlapping pie labels are not good. What this attempts to do is, prevent overlapping.\n                        Each label location is hashed, and if a hash collision occurs, we assume an overlap.\n                        Adjust the label's y-position to remove the overlap.\n                        */\n                        var center = labelsArc[i].centroid(d);\n                        var percent = getSlicePercentage(d);\n                        if (d.value && percent >= labelThreshold) {\n                            var hashKey = createHashKey(center);\n                            if (labelLocationHash[hashKey]) {\n                                center[1] -= avgHeight;\n                            }\n                            labelLocationHash[createHashKey(center)] = true;\n                        }\n                        return 'translate(' + center + ')'\n                    }\n                });\n\n                pieLabels.select(\".nv-label text\")\n                    .style('text-anchor', function(d,i) {\n                        //center the text on it's origin or begin/end if orthogonal aligned\n                        return labelSunbeamLayout ? ((d.startAngle + d.endAngle) / 2 < Math.PI ? 'start' : 'end') : 'middle';\n                    })\n                    .text(function(d, i) {\n                        var percent = getSlicePercentage(d);\n                        var label = '';\n                        if (!d.value || percent < labelThreshold) return '';\n\n                        if(typeof labelType === 'function') {\n                            label = labelType(d, i, {\n                                'key': getX(d.data),\n                                'value': getY(d.data),\n                                'percent': valueFormat(percent)\n                            });\n                        } else {\n                            switch (labelType) {\n                                case 'key':\n                                    label = getX(d.data);\n                                    break;\n                                case 'value':\n                                    label = valueFormat(getY(d.data));\n                                    break;\n                                case 'percent':\n                                    label = d3.format('%')(percent);\n                                    break;\n                            }\n                        }\n                        return label;\n                    })\n                ;\n            }\n\n\n            // Computes the angle of an arc, converting from radians to degrees.\n            function angle(d) {\n                var a = (d.startAngle + d.endAngle) * 90 / Math.PI - 90;\n                return a > 90 ? a - 180 : a;\n            }\n\n            function arcTween(a, idx) {\n                a.endAngle = isNaN(a.endAngle) ? 0 : a.endAngle;\n                a.startAngle = isNaN(a.startAngle) ? 0 : a.startAngle;\n                if (!donut) a.innerRadius = 0;\n                var i = d3.interpolate(this._current, a);\n                this._current = i(0);\n                return function (t) {\n                    return arcs[idx](i(t));\n                };\n            }\n        });\n\n        renderWatch.renderEnd('pie immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        arcsRadius: { get: function () { return arcsRadius; }, set: function (_) { arcsRadius = _; } },\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLabels: {get: function(){return showLabels;}, set: function(_){showLabels=_;}},\n        title:      {get: function(){return title;}, set: function(_){title=_;}},\n        titleOffset:    {get: function(){return titleOffset;}, set: function(_){titleOffset=_;}},\n        labelThreshold: {get: function(){return labelThreshold;}, set: function(_){labelThreshold=_;}},\n        valueFormat:    {get: function(){return valueFormat;}, set: function(_){valueFormat=_;}},\n        x:          {get: function(){return getX;}, set: function(_){getX=_;}},\n        id:         {get: function(){return id;}, set: function(_){id=_;}},\n        endAngle:   {get: function(){return endAngle;}, set: function(_){endAngle=_;}},\n        startAngle: {get: function(){return startAngle;}, set: function(_){startAngle=_;}},\n        padAngle:   {get: function(){return padAngle;}, set: function(_){padAngle=_;}},\n        cornerRadius: {get: function(){return cornerRadius;}, set: function(_){cornerRadius=_;}},\n        donutRatio:   {get: function(){return donutRatio;}, set: function(_){donutRatio=_;}},\n        labelsOutside: {get: function(){return labelsOutside;}, set: function(_){labelsOutside=_;}},\n        labelSunbeamLayout: {get: function(){return labelSunbeamLayout;}, set: function(_){labelSunbeamLayout=_;}},\n        donut:              {get: function(){return donut;}, set: function(_){donut=_;}},\n        growOnHover:        {get: function(){return growOnHover;}, set: function(_){growOnHover=_;}},\n\n        // depreciated after 1.7.1\n        pieLabelsOutside: {get: function(){return labelsOutside;}, set: function(_){\n            labelsOutside=_;\n            nv.deprecated('pieLabelsOutside', 'use labelsOutside instead');\n        }},\n        // depreciated after 1.7.1\n        donutLabelsOutside: {get: function(){return labelsOutside;}, set: function(_){\n            labelsOutside=_;\n            nv.deprecated('donutLabelsOutside', 'use labelsOutside instead');\n        }},\n        // deprecated after 1.7.1\n        labelFormat: {get: function(){ return valueFormat;}, set: function(_) {\n            valueFormat=_;\n            nv.deprecated('labelFormat','use valueFormat instead');\n        }},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = typeof _.top    != 'undefined' ? _.top    : margin.top;\n            margin.right  = typeof _.right  != 'undefined' ? _.right  : margin.right;\n            margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom;\n            margin.left   = typeof _.left   != 'undefined' ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }},\n        y: {get: function(){return getY;}, set: function(_){\n            getY=d3.functor(_);\n        }},\n        color: {get: function(){return color;}, set: function(_){\n            color=nv.utils.getColor(_);\n        }},\n        labelType:          {get: function(){return labelType;}, set: function(_){\n            labelType= _ || 'key';\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","nv.models.pieChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var pie = nv.models.pie();\n    var legend = nv.models.legend();\n    var tooltip = nv.models.tooltip();\n\n    var margin = {top: 30, right: 20, bottom: 20, left: 20}\n        , marginTop = null\n        , width = null\n        , height = null\n        , showTooltipPercent = false\n        , showLegend = true\n        , legendPosition = \"top\"\n        , color = nv.utils.defaultColor()\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , duration = 250\n        , dispatch = d3.dispatch('stateChange', 'changeState','renderEnd')\n        ;\n\n    tooltip\n        .duration(0)\n        .headerEnabled(false)\n        .valueFormatter(function(d, i) {\n            return pie.valueFormat()(d, i);\n        });\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled })\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.active !== undefined) {\n                data.forEach(function (series, i) {\n                    series.disabled = !state.active[i];\n                });\n            }\n        }\n    };\n\n    //============================================================\n    // Chart function\n    //------------------------------------------------------------\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(pie);\n\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            var that = this;\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() { container.transition().call(chart); };\n            chart.container = this;\n\n            state.setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            //set state.disabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length) {\n                nv.utils.noData(chart, container);\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-pieChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-pieChart').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-pieWrap');\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                if (legendPosition === \"top\") {\n                    legend.width( availableWidth ).key(pie.x());\n\n                    wrap.select('.nv-legendWrap')\n                        .datum(data)\n                        .call(legend);\n\n                    if (!marginTop && legend.height() !== margin.top) {\n                        margin.top = legend.height();\n                        availableHeight = nv.utils.availableHeight(height, container, margin);\n                    }\n\n                    wrap.select('.nv-legendWrap')\n                        .attr('transform', 'translate(0,' + (-margin.top) +')');\n                } else if (legendPosition === \"right\") {\n                    var legendWidth = nv.models.legend().width();\n                    if (availableWidth / 2 < legendWidth) {\n                        legendWidth = (availableWidth / 2)\n                    }\n                    legend.height(availableHeight).key(pie.x());\n                    legend.width(legendWidth);\n                    availableWidth -= legend.width();\n\n                    wrap.select('.nv-legendWrap')\n                        .datum(data)\n                        .call(legend)\n                        .attr('transform', 'translate(' + (availableWidth) +',0)');\n                }\n            }\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Main Chart Component(s)\n            pie.width(availableWidth).height(availableHeight);\n            var pieWrap = g.select('.nv-pieWrap').datum([data]);\n            d3.transition(pieWrap).call(pie);\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState) {\n                    state[key] = newState[key];\n                }\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n                    state.disabled = e.disabled;\n                }\n                chart.update();\n            });\n        });\n\n        renderWatch.renderEnd('pieChart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    pie.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt['series'] = {\n            key: chart.x()(evt.data),\n            value: chart.y()(evt.data),\n            color: evt.color,\n            percent: evt.percent\n        };\n        if (!showTooltipPercent) {\n            delete evt.percent;\n            delete evt.series.percent;\n        }\n        tooltip.data(evt).hidden(false);\n    });\n\n    pie.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    pie.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.legend = legend;\n    chart.dispatch = dispatch;\n    chart.pie = pie;\n    chart.tooltip = tooltip;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    // use Object get/set functionality to map between vars and chart functions\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:              {get: function(){return width;},                set: function(_){width=_;}},\n        height:             {get: function(){return height;},               set: function(_){height=_;}},\n        noData:             {get: function(){return noData;},               set: function(_){noData=_;}},\n        showTooltipPercent: {get: function(){return showTooltipPercent;},   set: function(_){showTooltipPercent=_;}},\n        showLegend:         {get: function(){return showLegend;},           set: function(_){showLegend=_;}},\n        legendPosition:     {get: function(){return legendPosition;},       set: function(_){legendPosition=_;}},\n        defaultState:       {get: function(){return defaultState;},         set: function(_){defaultState=_;}},\n\n        // options that require extra logic in the setter\n        color: {get: function(){return color;}, set: function(_){\n            color = _;\n            legend.color(color);\n            pie.color(color);\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            pie.duration(duration);\n        }},\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }}\n    });\n    nv.utils.inheritOptions(chart, pie);\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","nv.models.sankey = function() {\n    'use strict';\n\n    // Sources:\n    // - https://bost.ocks.org/mike/sankey/\n    // - https://github.com/soxofaan/d3-plugin-captain-sankey\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var sankey = {},\n        nodeWidth = 24,\n        nodePadding = 8,\n        size = [1, 1],\n        nodes = [],\n        links = [],\n        sinksRight = true;\n\n    var layout = function(iterations) {\n        computeNodeLinks();\n        computeNodeValues();\n        computeNodeBreadths();\n        computeNodeDepths(iterations);\n    };\n\n    var relayout = function() {\n        computeLinkDepths();\n    };\n\n    // SVG path data generator, to be used as 'd' attribute on 'path' element selection.\n    var link = function() {\n        var curvature = .5;\n\n        function link(d) {\n\n            var x0 = d.source.x + d.source.dx,\n                x1 = d.target.x,\n                xi = d3.interpolateNumber(x0, x1),\n                x2 = xi(curvature),\n                x3 = xi(1 - curvature),\n                y0 = d.source.y + d.sy + d.dy / 2,\n                y1 = d.target.y + d.ty + d.dy / 2;\n            var linkPath = 'M' + x0 + ',' + y0\n                + 'C' + x2 + ',' + y0\n                + ' ' + x3 + ',' + y1\n                + ' ' + x1 + ',' + y1;\n            return linkPath;\n        }\n\n        link.curvature = function(_) {\n            if (!arguments.length) return curvature;\n            curvature = +_;\n            return link;\n        };\n\n        return link;\n    };\n\n    // Y-position of the middle of a node.\n    var center = function(node) {\n        return node.y + node.dy / 2;\n    };\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    // Populate the sourceLinks and targetLinks for each node.\n    // Also, if the source and target are not objects, assume they are indices.\n    function computeNodeLinks() {\n        nodes.forEach(function(node) {\n            // Links that have this node as source.\n            node.sourceLinks = [];\n            // Links that have this node as target.\n            node.targetLinks = [];\n        });\n        links.forEach(function(link) {\n            var source = link.source,\n                target = link.target;\n            if (typeof source === 'number') source = link.source = nodes[link.source];\n            if (typeof target === 'number') target = link.target = nodes[link.target];\n            source.sourceLinks.push(link);\n            target.targetLinks.push(link);\n        });\n    }\n\n    // Compute the value (size) of each node by summing the associated links.\n    function computeNodeValues() {\n        nodes.forEach(function(node) {\n            node.value = Math.max(\n                d3.sum(node.sourceLinks, value),\n                d3.sum(node.targetLinks, value)\n            );\n        });\n    }\n\n    // Iteratively assign the breadth (x-position) for each node.\n    // Nodes are assigned the maximum breadth of incoming neighbors plus one;\n    // nodes with no incoming links are assigned breadth zero, while\n    // nodes with no outgoing links are assigned the maximum breadth.\n    function computeNodeBreadths() {\n        //\n        var remainingNodes = nodes,\n            nextNodes,\n            x = 0;\n\n        // Work from left to right.\n        // Keep updating the breath (x-position) of nodes that are target of recently updated nodes.\n        //\n        while (remainingNodes.length && x < nodes.length) {\n            nextNodes = [];\n            remainingNodes.forEach(function(node) {\n                node.x = x;\n                node.dx = nodeWidth;\n                node.sourceLinks.forEach(function(link) {\n                    if (nextNodes.indexOf(link.target) < 0) {\n                        nextNodes.push(link.target);\n                    }\n                });\n            });\n            remainingNodes = nextNodes;\n            ++x;\n            //\n        }\n\n        // Optionally move pure sinks always to the right.\n        if (sinksRight) {\n            moveSinksRight(x);\n        }\n\n        scaleNodeBreadths((size[0] - nodeWidth) / (x - 1));\n    }\n\n    function moveSourcesRight() {\n        nodes.forEach(function(node) {\n            if (!node.targetLinks.length) {\n                node.x = d3.min(node.sourceLinks, function(d) { return d.target.x; }) - 1;\n            }\n        });\n    }\n\n    function moveSinksRight(x) {\n        nodes.forEach(function(node) {\n            if (!node.sourceLinks.length) {\n                node.x = x - 1;\n            }\n        });\n    }\n\n    function scaleNodeBreadths(kx) {\n        nodes.forEach(function(node) {\n            node.x *= kx;\n        });\n    }\n\n    // Compute the depth (y-position) for each node.\n    function computeNodeDepths(iterations) {\n        // Group nodes by breath.\n        var nodesByBreadth = d3.nest()\n            .key(function(d) { return d.x; })\n            .sortKeys(d3.ascending)\n            .entries(nodes)\n            .map(function(d) { return d.values; });\n\n        //\n        initializeNodeDepth();\n        resolveCollisions();\n        computeLinkDepths();\n        for (var alpha = 1; iterations > 0; --iterations) {\n            relaxRightToLeft(alpha *= .99);\n            resolveCollisions();\n            computeLinkDepths();\n            relaxLeftToRight(alpha);\n            resolveCollisions();\n            computeLinkDepths();\n        }\n\n        function initializeNodeDepth() {\n            // Calculate vertical scaling factor.\n            var ky = d3.min(nodesByBreadth, function(nodes) {\n                return (size[1] - (nodes.length - 1) * nodePadding) / d3.sum(nodes, value);\n            });\n\n            nodesByBreadth.forEach(function(nodes) {\n                nodes.forEach(function(node, i) {\n                    node.y = i;\n                    node.dy = node.value * ky;\n                });\n            });\n\n            links.forEach(function(link) {\n                link.dy = link.value * ky;\n            });\n        }\n\n        function relaxLeftToRight(alpha) {\n            nodesByBreadth.forEach(function(nodes, breadth) {\n                nodes.forEach(function(node) {\n                    if (node.targetLinks.length) {\n                        // Value-weighted average of the y-position of source node centers linked to this node.\n                        var y = d3.sum(node.targetLinks, weightedSource) / d3.sum(node.targetLinks, value);\n                        node.y += (y - center(node)) * alpha;\n                    }\n                });\n            });\n\n            function weightedSource(link) {\n                return (link.source.y + link.sy + link.dy / 2) * link.value;\n            }\n        }\n\n        function relaxRightToLeft(alpha) {\n            nodesByBreadth.slice().reverse().forEach(function(nodes) {\n                nodes.forEach(function(node) {\n                    if (node.sourceLinks.length) {\n                        // Value-weighted average of the y-positions of target nodes linked to this node.\n                        var y = d3.sum(node.sourceLinks, weightedTarget) / d3.sum(node.sourceLinks, value);\n                        node.y += (y - center(node)) * alpha;\n                    }\n                });\n            });\n\n            function weightedTarget(link) {\n                return (link.target.y + link.ty + link.dy / 2) * link.value;\n            }\n        }\n\n        function resolveCollisions() {\n            nodesByBreadth.forEach(function(nodes) {\n                var node,\n                    dy,\n                    y0 = 0,\n                    n = nodes.length,\n                    i;\n\n                // Push any overlapping nodes down.\n                nodes.sort(ascendingDepth);\n                for (i = 0; i < n; ++i) {\n                    node = nodes[i];\n                    dy = y0 - node.y;\n                    if (dy > 0) node.y += dy;\n                    y0 = node.y + node.dy + nodePadding;\n                }\n\n                // If the bottommost node goes outside the bounds, push it back up.\n                dy = y0 - nodePadding - size[1];\n                if (dy > 0) {\n                    y0 = node.y -= dy;\n\n                    // Push any overlapping nodes back up.\n                    for (i = n - 2; i >= 0; --i) {\n                        node = nodes[i];\n                        dy = node.y + node.dy + nodePadding - y0;\n                        if (dy > 0) node.y -= dy;\n                        y0 = node.y;\n                    }\n                }\n            });\n        }\n\n        function ascendingDepth(a, b) {\n            return a.y - b.y;\n        }\n    }\n\n    // Compute y-offset of the source endpoint (sy) and target endpoints (ty) of links,\n    // relative to the source/target node's y-position.\n    function computeLinkDepths() {\n        nodes.forEach(function(node) {\n            node.sourceLinks.sort(ascendingTargetDepth);\n            node.targetLinks.sort(ascendingSourceDepth);\n        });\n        nodes.forEach(function(node) {\n            var sy = 0, ty = 0;\n            node.sourceLinks.forEach(function(link) {\n                link.sy = sy;\n                sy += link.dy;\n            });\n            node.targetLinks.forEach(function(link) {\n                link.ty = ty;\n                ty += link.dy;\n            });\n        });\n\n        function ascendingSourceDepth(a, b) {\n            return a.source.y - b.source.y;\n        }\n\n        function ascendingTargetDepth(a, b) {\n            return a.target.y - b.target.y;\n        }\n    }\n\n    // Value property accessor.\n    function value(x) {\n        return x.value;\n    }\n\n    sankey.options = nv.utils.optionsFunc.bind(sankey);\n    sankey._options = Object.create({}, {\n        nodeWidth:    {get: function(){return nodeWidth;},   set: function(_){nodeWidth=+_;}},\n        nodePadding:  {get: function(){return nodePadding;}, set: function(_){nodePadding=_;}},\n        nodes:        {get: function(){return nodes;},       set: function(_){nodes=_;}},\n        links:        {get: function(){return links ;},      set: function(_){links=_;}},\n        size:         {get: function(){return size;},        set: function(_){size=_;}},\n        sinksRight:   {get: function(){return sinksRight;},  set: function(_){sinksRight=_;}},\n\n        layout:       {get: function(){layout(32);},         set: function(_){layout(_);}},\n        relayout:     {get: function(){relayout();},         set: function(_){}},\n        center:       {get: function(){return center();},    set: function(_){\n            if(typeof _ === 'function'){\n                center=_;\n            }\n        }},\n        link:         {get: function(){return link();},      set: function(_){\n            if(typeof _ === 'function'){\n                link=_;\n            }\n            return link();\n        }}\n    });\n\n    nv.utils.initOptions(sankey);\n\n    return sankey;\n};\n","nv.models.sankeyChart = function() {\n    \"use strict\";\n\n    // Sources:\n    // - https://bost.ocks.org/mike/sankey/\n    // - https://github.com/soxofaan/d3-plugin-captain-sankey\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 5, right: 0, bottom: 5, left: 0}\n        , sankey = nv.models.sankey()\n        , width = 600\n        , height = 400\n        , nodeWidth = 36\n        , nodePadding =  40\n        , units = 'units'\n        , center = undefined\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var formatNumber = d3.format(',.0f');    // zero decimal places\n    var format = function(d) {\n        return formatNumber(d) + ' ' + units;\n    };\n    var color = d3.scale.category20();\n    var linkTitle = function(d){\n        return d.source.name + ' → ' + d.target.name + '\\n' + format(d.value);\n    };\n    var nodeFillColor = function(d){\n        return d.color = color(d.name.replace(/ .*/, ''));\n    };\n    var nodeStrokeColor = function(d){\n        return d3.rgb(d.color).darker(2);\n    };\n    var nodeTitle = function(d){\n        return d.name + '\\n' + format(d.value);\n    };\n\n    var showError = function(element, message) {\n        element.append('text')\n            .attr('x', 0)\n            .attr('y', 0)\n            .attr('class', 'nvd3-sankey-chart-error')\n            .attr('text-anchor', 'middle')\n            .text(message);\n    };\n\n    function chart(selection) {\n        selection.each(function(data) {\n\n            var testData = {\n                nodes:\n                    [\n                        {'node': 1, 'name': 'Test 1'},\n                        {'node': 2, 'name': 'Test 2'},\n                        {'node': 3, 'name': 'Test 3'},\n                        {'node': 4, 'name': 'Test 4'},\n                        {'node': 5, 'name': 'Test 5'},\n                        {'node': 6, 'name': 'Test 6'}\n                    ],\n                links:\n                    [\n                        {'source': 0, 'target': 1, 'value': 2295},\n                        {'source': 0, 'target': 5, 'value': 1199},\n                        {'source': 1, 'target': 2, 'value': 1119},\n                        {'source': 1, 'target': 5, 'value': 1176},\n                        {'source': 2, 'target': 3, 'value': 487},\n                        {'source': 2, 'target': 5, 'value': 632},\n                        {'source': 3, 'target': 4, 'value': 301},\n                        {'source': 3, 'target': 5, 'value': 186}\n                    ]\n            };\n\n            // Error handling\n            var isDataValid = false;\n            var dataAvailable = false;\n\n            // check if data is valid\n            if(\n                (typeof data['nodes'] === 'object' && data['nodes'].length) >= 0 &&\n                (typeof data['links'] === 'object' && data['links'].length) >= 0\n            ){\n                isDataValid = true;\n            }\n\n            // check if data is available\n            if(\n                data['nodes'] && data['nodes'].length > 0 &&\n                data['links'] && data['links'].length > 0\n            ) {\n                dataAvailable = true;\n            }\n\n            // show error\n            if(!isDataValid) {\n                console.error('NVD3 Sankey chart error:', 'invalid data format for', data);\n                console.info('Valid data format is: ', testData, JSON.stringify(testData));\n                showError(selection, 'Error loading chart, data is invalid');\n                return false;\n            }\n\n            // TODO use nv.utils.noData\n            if(!dataAvailable) {\n                showError(selection, 'No data available');\n                return false;\n            }\n\n            // No errors, continue\n\n            // append the svg canvas to the page\n            var svg = selection.append('svg')\n                .attr('width', width)\n                .attr('height', height)\n                .append('g')\n                .attr('class', 'nvd3 nv-wrap nv-sankeyChart');\n\n            // Set the sankey diagram properties\n            sankey\n                .nodeWidth(nodeWidth)\n                .nodePadding(nodePadding)\n                .size([width, height]);\n\n            var path = sankey.link();\n\n            sankey\n                .nodes(data.nodes)\n                .links(data.links)\n                .layout(32)\n                .center(center);\n\n            // add in the links\n            var link = svg.append('g').selectAll('.link')\n                .data(data.links)\n                .enter().append('path')\n                .attr('class', 'link')\n                .attr('d', path)\n                .style('stroke-width', function(d) { return Math.max(1, d.dy); })\n            .sort(function(a,b) { return b.dy - a.dy; });\n\n            // add the link titles\n            link.append('title')\n                .text(linkTitle);\n\n            // add in the nodes\n            var node = svg.append('g').selectAll('.node')\n                .data(data.nodes)\n                .enter().append('g')\n                .attr('class', 'node')\n                .attr('transform', function(d) { return 'translate(' + d.x + ',' + d.y + ')'; })\n                .call(\n                    d3.behavior\n                        .drag()\n                        .origin(function(d) { return d; })\n                        .on('dragstart', function() {\n                            this.parentNode.appendChild(this);\n                        })\n                        .on('drag', dragmove)\n                );\n\n            // add the rectangles for the nodes\n            node.append('rect')\n                .attr('height', function(d) { return d.dy; })\n                .attr('width', sankey.nodeWidth())\n                .style('fill', nodeFillColor)\n                .style('stroke', nodeStrokeColor)\n                .append('title')\n                .text(nodeTitle);\n\n            // add in the title for the nodes\n            node.append('text')\n                .attr('x', -6)\n                .attr('y', function(d) { return d.dy / 2; })\n                .attr('dy', '.35em')\n                .attr('text-anchor', 'end')\n                .attr('transform', null)\n                .text(function(d) { return d.name; })\n                .filter(function(d) { return d.x < width / 2; })\n                .attr('x', 6 + sankey.nodeWidth())\n                .attr('text-anchor', 'start');\n\n            // the function for moving the nodes\n            function dragmove(d) {\n                d3.select(this).attr('transform',\n                'translate(' + d.x + ',' + (\n                    d.y = Math.max(0, Math.min(height - d.dy, d3.event.y))\n                ) + ')');\n                sankey.relayout();\n                link.attr('d', path);\n            }\n        });\n\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        units:           {get: function(){return units;},       set: function(_){units=_;}},\n        width:           {get: function(){return width;},       set: function(_){width=_;}},\n        height:          {get: function(){return height;},      set: function(_){height=_;}},\n        format:          {get: function(){return format;},      set: function(_){format=_;}},\n        linkTitle:       {get: function(){return linkTitle;},   set: function(_){linkTitle=_;}},\n        nodeWidth:       {get: function(){return nodeWidth;},   set: function(_){nodeWidth=_;}},\n        nodePadding:     {get: function(){return nodePadding;}, set: function(_){nodePadding=_;}},\n        center:          {get: function(){return center},       set: function(_){center=_}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        nodeStyle: {get: function(){return {};}, set: function(_){\n            nodeFillColor   = _.fillColor   !== undefined ? _.fillColor   : nodeFillColor;\n            nodeStrokeColor = _.strokeColor !== undefined ? _.strokeColor : nodeStrokeColor;\n            nodeTitle       = _.title       !== undefined ? _.title       : nodeTitle;\n        }}\n\n    });\n\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.scatter = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin       = {top: 0, right: 0, bottom: 0, left: 0}\n        , width        = null\n        , height       = null\n        , color        = nv.utils.defaultColor() // chooses color\n        , pointBorderColor = null\n        , id           = Math.floor(Math.random() * 100000) //Create semi-unique ID incase user doesn't select one\n        , container    = null\n        , x            = d3.scale.linear()\n        , y            = d3.scale.linear()\n        , z            = d3.scale.linear() //linear because d3.svg.shape.size is treated as area\n        , getX         = function(d) { return d.x } // accessor to get the x value\n        , getY         = function(d) { return d.y } // accessor to get the y value\n        , getSize      = function(d) { return d.size || 1} // accessor to get the point size\n        , getShape     = function(d) { return d.shape || 'circle' } // accessor to get point shape\n        , forceX       = [] // List of numbers to Force into the X scale (ie. 0, or a max / min, etc.)\n        , forceY       = [] // List of numbers to Force into the Y scale\n        , forceSize    = [] // List of numbers to Force into the Size scale\n        , interactive  = true // If true, plots a voronoi overlay for advanced point intersection\n        , pointActive  = function(d) { return !d.notActive } // any points that return false will be filtered out\n        , padData      = false // If true, adds half a data points width to front and back, for lining up a line chart with a bar chart\n        , padDataOuter = .1 //outerPadding to imitate ordinal scale outer padding\n        , clipEdge     = false // if true, masks points within x and y scale\n        , clipVoronoi  = true // if true, masks each point with a circle... can turn off to slightly increase performance\n        , showVoronoi  = false // display the voronoi areas\n        , clipRadius   = function() { return 25 } // function to get the radius for voronoi point clips\n        , xDomain      = null // Override x domain (skips the calculation from data)\n        , yDomain      = null // Override y domain\n        , xRange       = null // Override x range\n        , yRange       = null // Override y range\n        , sizeDomain   = null // Override point size domain\n        , sizeRange    = null\n        , singlePoint  = false\n        , dispatch     = d3.dispatch('elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout', 'renderEnd')\n        , useVoronoi   = true\n        , duration     = 250\n        , interactiveUpdateDelay = 300\n        , showLabels    = false\n        ;\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0, z0 // used to store previous scales\n        , width0\n        , height0\n        , timeoutID\n        , needsUpdate = false // Flag for when the points are visually updating, but the interactive layer is behind, to disable tooltips\n        , renderWatch = nv.utils.renderWatch(dispatch, duration)\n        , _sizeRange_def = [16, 256]\n        , _cache = {}\n        ;\n\n    function getCache(d) {\n        var key, val;\n        key = d[0].series + ':' + d[1];\n        val = _cache[key] = _cache[key] || {};\n        return val;\n    }\n\n    function delCache(d) {\n        var key, val;\n        key = d[0].series + ':' + d[1];\n        delete _cache[key];\n    }\n\n    function getDiffs(d) {\n        var i, key, val,\n            cache = getCache(d),\n            diffs = false;\n        for (i = 1; i < arguments.length; i += 2) {\n            key = arguments[i];\n            val = arguments[i + 1](d[0], d[1]);\n            if (cache[key] !== val || !cache.hasOwnProperty(key)) {\n                cache[key] = val;\n                diffs = true;\n            }\n        }\n        return diffs;\n    }\n\n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            container = d3.select(this);\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            nv.utils.initSVG(container);\n\n            //add series index to each data point for reference\n            data.forEach(function(series, i) {\n                series.values.forEach(function(point) {\n                    point.series = i;\n                });\n            });\n\n            // Setup Scales\n            var logScale = chart.yScale().name === d3.scale.log().name ? true : false;\n            // remap and flatten the data for use in calculating the scales' domains\n            var seriesData = (xDomain && yDomain && sizeDomain) ? [] : // if we know xDomain and yDomain and sizeDomain, no need to calculate.... if Size is constant remember to set sizeDomain to speed up performance\n                d3.merge(\n                    data.map(function(d) {\n                        return d.values.map(function(d,i) {\n                            return { x: getX(d,i), y: getY(d,i), size: getSize(d,i) }\n                        })\n                    })\n                );\n\n            x   .domain(xDomain || d3.extent(seriesData.map(function(d) { return d.x; }).concat(forceX)))\n\n            if (padData && data[0])\n                x.range(xRange || [(availableWidth * padDataOuter +  availableWidth) / (2 *data[0].values.length), availableWidth - availableWidth * (1 + padDataOuter) / (2 * data[0].values.length)  ]);\n            //x.range([availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5)  / data[0].values.length ]);\n            else\n                x.range(xRange || [0, availableWidth]);\n\n             if (logScale) {\n                    var min = d3.min(seriesData.map(function(d) { if (d.y !== 0) return d.y; }));\n                    y.clamp(true)\n                        .domain(yDomain || d3.extent(seriesData.map(function(d) {\n                            if (d.y !== 0) return d.y;\n                            else return min * 0.1;\n                        }).concat(forceY)))\n                        .range(yRange || [availableHeight, 0]);\n                } else {\n                        y.domain(yDomain || d3.extent(seriesData.map(function (d) { return d.y;}).concat(forceY)))\n                        .range(yRange || [availableHeight, 0]);\n                }\n\n            z   .domain(sizeDomain || d3.extent(seriesData.map(function(d) { return d.size }).concat(forceSize)))\n                .range(sizeRange || _sizeRange_def);\n\n            // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point\n            singlePoint = x.domain()[0] === x.domain()[1] || y.domain()[0] === y.domain()[1];\n\n            if (x.domain()[0] === x.domain()[1])\n                x.domain()[0] ?\n                    x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01])\n                    : x.domain([-1,1]);\n\n            if (y.domain()[0] === y.domain()[1])\n                y.domain()[0] ?\n                    y.domain([y.domain()[0] - y.domain()[0] * 0.01, y.domain()[1] + y.domain()[1] * 0.01])\n                    : y.domain([-1,1]);\n\n            if ( isNaN(x.domain()[0])) {\n                x.domain([-1,1]);\n            }\n\n            if ( isNaN(y.domain()[0])) {\n                y.domain([-1,1]);\n            }\n\n            x0 = x0 || x;\n            y0 = y0 || y;\n            z0 = z0 || z;\n\n            var scaleDiff = x(1) !== x0(1) || y(1) !== y0(1) || z(1) !== z0(1);\n\n            width0 = width0 || width;\n            height0 = height0 || height;\n\n            var sizeDiff = width0 !== width || height0 !== height;\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-scatter').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-scatter nv-chart-' + id);\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            wrap.classed('nv-single-point', singlePoint);\n            gEnter.append('g').attr('class', 'nv-groups');\n            gEnter.append('g').attr('class', 'nv-point-paths');\n            wrapEnter.append('g').attr('class', 'nv-point-clips');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-edge-clip-' + id)\n                .append('rect')\n                .attr('transform', 'translate( -10, -10)');\n                \n            wrap.select('#nv-edge-clip-' + id + ' rect')\n                .attr('width', availableWidth + 20)\n                .attr('height', (availableHeight > 0) ? availableHeight + 20 : 0);\n\n            g.attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : '');\n\n            function updateInteractiveLayer() {\n                // Always clear needs-update flag regardless of whether or not\n                // we will actually do anything (avoids needless invocations).\n                needsUpdate = false;\n\n                if (!interactive) return false;\n\n                // inject series and point index for reference into voronoi\n                if (useVoronoi === true) {\n                    var vertices = d3.merge(data.map(function(group, groupIndex) {\n                            return group.values\n                                .map(function(point, pointIndex) {\n                                    // *Adding noise to make duplicates very unlikely\n                                    // *Injecting series and point index for reference\n                                    /* *Adding a 'jitter' to the points, because there's an issue in d3.geom.voronoi.\n                                     */\n                                    var pX = getX(point,pointIndex);\n                                    var pY = getY(point,pointIndex);\n\n                                    return [nv.utils.NaNtoZero(x(pX))+ Math.random() * 1e-4,\n                                            nv.utils.NaNtoZero(y(pY))+ Math.random() * 1e-4,\n                                        groupIndex,\n                                        pointIndex, point]; //temp hack to add noise until I think of a better way so there are no duplicates\n                                })\n                                .filter(function(pointArray, pointIndex) {\n                                    return pointActive(pointArray[4], pointIndex); // Issue #237.. move filter to after map, so pointIndex is correct!\n                                })\n                        })\n                    );\n\n                    if (vertices.length == 0) return false;  // No active points, we're done\n                    if (vertices.length < 3) {\n                        // Issue #283 - Adding 2 dummy points to the voronoi b/c voronoi requires min 3 points to work\n                        vertices.push([x.range()[0] - 20, y.range()[0] - 20, null, null]);\n                        vertices.push([x.range()[1] + 20, y.range()[1] + 20, null, null]);\n                        vertices.push([x.range()[0] - 20, y.range()[0] + 20, null, null]);\n                        vertices.push([x.range()[1] + 20, y.range()[1] - 20, null, null]);\n                    }\n\n                    // keep voronoi sections from going more than 10 outside of graph\n                    // to avoid overlap with other things like legend etc\n                    var bounds = d3.geom.polygon([\n                        [-10,-10],\n                        [-10,height + 10],\n                        [width + 10,height + 10],\n                        [width + 10,-10]\n                    ]);\n\n                    var voronoi = d3.geom.voronoi(vertices).map(function(d, i) {\n                        return {\n                            'data': bounds.clip(d),\n                            'series': vertices[i][2],\n                            'point': vertices[i][3]\n                        }\n                    });\n\n                    // nuke all voronoi paths on reload and recreate them\n                    wrap.select('.nv-point-paths').selectAll('path').remove();\n                    var pointPaths = wrap.select('.nv-point-paths').selectAll('path').data(voronoi);\n                    var vPointPaths = pointPaths\n                        .enter().append(\"svg:path\")\n                        .attr(\"d\", function(d) {\n                            if (!d || !d.data || d.data.length === 0)\n                                return 'M 0 0';\n                            else\n                                return \"M\" + d.data.join(\",\") + \"Z\";\n                        })\n                        .attr(\"id\", function(d,i) {\n                            return \"nv-path-\"+i; })\n                        .attr(\"clip-path\", function(d,i) { return \"url(#nv-clip-\"+id+\"-\"+i+\")\"; })\n                        ;\n\n                    // good for debugging point hover issues\n                    if (showVoronoi) {\n                        vPointPaths.style(\"fill\", d3.rgb(230, 230, 230))\n                            .style('fill-opacity', 0.4)\n                            .style('stroke-opacity', 1)\n                            .style(\"stroke\", d3.rgb(200,200,200));\n                    }\n\n                    if (clipVoronoi) {\n                        // voronoi sections are already set to clip,\n                        // just create the circles with the IDs they expect\n                        wrap.select('.nv-point-clips').selectAll('*').remove(); // must do * since it has sub-dom\n                        var pointClips = wrap.select('.nv-point-clips').selectAll('clipPath').data(vertices);\n                        var vPointClips = pointClips\n                            .enter().append(\"svg:clipPath\")\n                            .attr(\"id\", function(d, i) { return \"nv-clip-\"+id+\"-\"+i;})\n                            .append(\"svg:circle\")\n                            .attr('cx', function(d) { return d[0]; })\n                            .attr('cy', function(d) { return d[1]; })\n                            .attr('r', clipRadius);\n                    }\n\n                    var mouseEventCallback = function(el, d, mDispatch) {\n                        if (needsUpdate) return 0;\n                        var series = data[d.series];\n                        if (series === undefined) return;\n                        var point  = series.values[d.point];\n                        point['color'] = color(series, d.series);\n\n                        // standardize attributes for tooltip.\n                        point['x'] = getX(point);\n                        point['y'] = getY(point);\n\n                        // can't just get box of event node since it's actually a voronoi polygon\n                        var box = container.node().getBoundingClientRect();\n                        var scrollTop  = window.pageYOffset || document.documentElement.scrollTop;\n                        var scrollLeft = window.pageXOffset || document.documentElement.scrollLeft;\n\n                        var pos = {\n                            left: x(getX(point, d.point)) + box.left + scrollLeft + margin.left + 10,\n                            top: y(getY(point, d.point)) + box.top + scrollTop + margin.top + 10\n                        };\n\n                        mDispatch({\n                            point: point,\n                            series: series,\n                            pos: pos,\n                            relativePos: [x(getX(point, d.point)) + margin.left, y(getY(point, d.point)) + margin.top],\n                            seriesIndex: d.series,\n                            pointIndex: d.point,\n                            event: d3.event,\n                            element: el\n                        });\n                    };\n\n                    pointPaths\n                        .on('click', function(d) {\n                            mouseEventCallback(this, d, dispatch.elementClick);\n                        })\n                        .on('dblclick', function(d) {\n                            mouseEventCallback(this, d, dispatch.elementDblClick);\n                        })\n                        .on('mouseover', function(d) {\n                            mouseEventCallback(this, d, dispatch.elementMouseover);\n                        })\n                        .on('mouseout', function(d, i) {\n                            mouseEventCallback(this, d, dispatch.elementMouseout);\n                        });\n\n                } else {\n                    // add event handlers to points instead voronoi paths\n                    wrap.select('.nv-groups').selectAll('.nv-group')\n                        .selectAll('.nv-point')\n                        //.data(dataWithPoints)\n                        //.style('pointer-events', 'auto') // recativate events, disabled by css\n                        .on('click', function(d,i) {\n                            //nv.log('test', d, i);\n                            if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point\n                            var series = data[d.series],\n                                point  = series.values[i];\n                            var element = this;\n                            dispatch.elementClick({\n                                point: point,\n                                series: series,\n                                pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top], //TODO: make this pos base on the page\n                                relativePos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],\n                                seriesIndex: d.series,\n                                pointIndex: i,\n                                event: d3.event,\n                                element: element\n                            });\n                        })\n                        .on('dblclick', function(d,i) {\n                            if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point\n                            var series = data[d.series],\n                                point  = series.values[i];\n\n                            dispatch.elementDblClick({\n                                point: point,\n                                series: series,\n                                pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],//TODO: make this pos base on the page\n                                relativePos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],\n                                seriesIndex: d.series,\n                                pointIndex: i\n                            });\n                        })\n                        .on('mouseover', function(d,i) {\n                            if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point\n                            var series = data[d.series],\n                                point  = series.values[i];\n\n                            dispatch.elementMouseover({\n                                point: point,\n                                series: series,\n                                pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],//TODO: make this pos base on the page\n                                relativePos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],\n                                seriesIndex: d.series,\n                                pointIndex: i,\n                                color: color(d, i)\n                            });\n                        })\n                        .on('mouseout', function(d,i) {\n                            if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point\n                            var series = data[d.series],\n                                point  = series.values[i];\n\n                            dispatch.elementMouseout({\n                                point: point,\n                                series: series,\n                                pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],//TODO: make this pos base on the page\n                                relativePos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top],\n                                seriesIndex: d.series,\n                                pointIndex: i,\n                                color: color(d, i)\n                            });\n                        });\n                }\n            }\n\n            needsUpdate = true;\n            var groups = wrap.select('.nv-groups').selectAll('.nv-group')\n                .data(function(d) { return d }, function(d) { return d.key });\n            groups.enter().append('g')\n                .style('stroke-opacity', 1e-6)\n                .style('fill-opacity', 1e-6);\n            groups.exit()\n                .remove();\n            groups\n                .attr('class', function(d,i) {\n                    return (d.classed || '') + ' nv-group nv-series-' + i;\n                })\n                .classed('nv-noninteractive', !interactive)\n                .classed('hover', function(d) { return d.hover });\n            groups.watchTransition(renderWatch, 'scatter: groups')\n                .style('fill', function(d,i) { return color(d, i) })\n                .style('stroke', function(d,i) { return d.pointBorderColor || pointBorderColor || color(d, i) })\n                .style('stroke-opacity', 1)\n                .style('fill-opacity', .5);\n\n            // create the points, maintaining their IDs from the original data set\n            var points = groups.selectAll('path.nv-point')\n                .data(function(d) {\n                    return d.values.map(\n                        function (point, pointIndex) {\n                            return [point, pointIndex]\n                        }).filter(\n                            function(pointArray, pointIndex) {\n                                return pointActive(pointArray[0], pointIndex)\n                            })\n                    });\n            points.enter().append('path')\n                .attr('class', function (d) {\n                    return 'nv-point nv-point-' + d[1];\n                })\n                .style('fill', function (d) { return d.color })\n                .style('stroke', function (d) { return d.color })\n                .attr('transform', function(d) {\n                    return 'translate(' + nv.utils.NaNtoZero(x0(getX(d[0],d[1]))) + ',' + nv.utils.NaNtoZero(y0(getY(d[0],d[1]))) + ')'\n                })\n                .attr('d',\n                    nv.utils.symbol()\n                    .type(function(d) { return getShape(d[0]); })\n                    .size(function(d) { return z(getSize(d[0],d[1])) })\n            );\n            points.exit().each(delCache).remove();\n            groups.exit().selectAll('path.nv-point')\n                .watchTransition(renderWatch, 'scatter exit')\n                .attr('transform', function(d) {\n                    return 'translate(' + nv.utils.NaNtoZero(x(getX(d[0],d[1]))) + ',' + nv.utils.NaNtoZero(y(getY(d[0],d[1]))) + ')'\n                })\n                .remove();\n            // Update points position only if \"x\" or \"y\" have changed\n            points.filter(function (d) { return scaleDiff || sizeDiff || getDiffs(d, 'x', getX, 'y', getY); })\n                .watchTransition(renderWatch, 'scatter points')\n                .attr('transform', function(d) {\n                    //nv.log(d, getX(d[0],d[1]), x(getX(d[0],d[1])));\n                    return 'translate(' + nv.utils.NaNtoZero(x(getX(d[0],d[1]))) + ',' + nv.utils.NaNtoZero(y(getY(d[0],d[1]))) + ')'\n                });\n            // Update points appearance only if \"shape\" or \"size\" have changed\n            points.filter(function (d) { return scaleDiff || sizeDiff || getDiffs(d, 'shape', getShape, 'size', getSize); })\n                .watchTransition(renderWatch, 'scatter points')\n                .attr('d',\n                    nv.utils.symbol()\n                    .type(function(d) { return getShape(d[0]); })\n                    .size(function(d) { return z(getSize(d[0],d[1])) })\n            );\n\n            // add label a label to scatter chart\n            if(showLabels)\n            {\n                var titles =  groups.selectAll('.nv-label')\n                    .data(function(d) {\n                        return d.values.map(\n                            function (point, pointIndex) {\n                                return [point, pointIndex]\n                            }).filter(\n                                function(pointArray, pointIndex) {\n                                    return pointActive(pointArray[0], pointIndex)\n                                })\n                        });\n\n                titles.enter().append('text')\n                    .style('fill', function (d,i) {\n                        return d.color })\n                    .style('stroke-opacity', 0)\n                    .style('fill-opacity', 1)\n                    .attr('transform', function(d) {\n                        var dx = nv.utils.NaNtoZero(x0(getX(d[0],d[1]))) + Math.sqrt(z(getSize(d[0],d[1]))/Math.PI) + 2;\n                        return 'translate(' + dx + ',' + nv.utils.NaNtoZero(y0(getY(d[0],d[1]))) + ')';\n                    })\n                    .text(function(d,i){\n                        return d[0].label;});\n\n                titles.exit().remove();\n                groups.exit().selectAll('path.nv-label')\n                    .watchTransition(renderWatch, 'scatter exit')\n                    .attr('transform', function(d) {\n                        var dx = nv.utils.NaNtoZero(x(getX(d[0],d[1])))+ Math.sqrt(z(getSize(d[0],d[1]))/Math.PI)+2;\n                        return 'translate(' + dx + ',' + nv.utils.NaNtoZero(y(getY(d[0],d[1]))) + ')';\n                    })\n                    .remove();\n               titles.each(function(d) {\n                  d3.select(this)\n                    .classed('nv-label', true)\n                    .classed('nv-label-' + d[1], false)\n                    .classed('hover',false);\n                });\n                titles.watchTransition(renderWatch, 'scatter labels')\n                    .attr('transform', function(d) {\n                        var dx = nv.utils.NaNtoZero(x(getX(d[0],d[1])))+ Math.sqrt(z(getSize(d[0],d[1]))/Math.PI)+2;\n                        return 'translate(' + dx + ',' + nv.utils.NaNtoZero(y(getY(d[0],d[1]))) + ')'\n                    });\n            }\n\n            // Delay updating the invisible interactive layer for smoother animation\n            if( interactiveUpdateDelay )\n            {\n                clearTimeout(timeoutID); // stop repeat calls to updateInteractiveLayer\n                timeoutID = setTimeout(updateInteractiveLayer, interactiveUpdateDelay );\n            }\n            else\n            {\n                updateInteractiveLayer();\n            }\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n            z0 = z.copy();\n\n            width0 = width;\n            height0 = height;\n\n        });\n        renderWatch.renderEnd('scatter immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    // utility function calls provided by this chart\n    chart._calls = new function() {\n        this.clearHighlights = function () {\n            nv.dom.write(function() {\n                container.selectAll(\".nv-point.hover\").classed(\"hover\", false);\n            });\n            return null;\n        };\n        this.highlightPoint = function (seriesIndex, pointIndex, isHoverOver) {\n            nv.dom.write(function() {\n                container.select('.nv-groups')\n                  .selectAll(\".nv-series-\" + seriesIndex)\n                  .selectAll(\".nv-point-\" + pointIndex)\n                  .classed(\"hover\", isHoverOver);\n            });\n        };\n    };\n\n    // trigger calls from events too\n    dispatch.on('elementMouseover.point', function(d) {\n        if (interactive) chart._calls.highlightPoint(d.seriesIndex,d.pointIndex,true);\n    });\n\n    dispatch.on('elementMouseout.point', function(d) {\n        if (interactive) chart._calls.highlightPoint(d.seriesIndex,d.pointIndex,false);\n    });\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:        {get: function(){return width;}, set: function(_){width=_;}},\n        height:       {get: function(){return height;}, set: function(_){height=_;}},\n        xScale:       {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:       {get: function(){return y;}, set: function(_){y=_;}},\n        pointScale:   {get: function(){return z;}, set: function(_){z=_;}},\n        xDomain:      {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain:      {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        pointDomain:  {get: function(){return sizeDomain;}, set: function(_){sizeDomain=_;}},\n        xRange:       {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:       {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        pointRange:   {get: function(){return sizeRange;}, set: function(_){sizeRange=_;}},\n        forceX:       {get: function(){return forceX;}, set: function(_){forceX=_;}},\n        forceY:       {get: function(){return forceY;}, set: function(_){forceY=_;}},\n        forcePoint:   {get: function(){return forceSize;}, set: function(_){forceSize=_;}},\n        interactive:  {get: function(){return interactive;}, set: function(_){interactive=_;}},\n        pointActive:  {get: function(){return pointActive;}, set: function(_){pointActive=_;}},\n        padDataOuter: {get: function(){return padDataOuter;}, set: function(_){padDataOuter=_;}},\n        padData:      {get: function(){return padData;}, set: function(_){padData=_;}},\n        clipEdge:     {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        clipVoronoi:  {get: function(){return clipVoronoi;}, set: function(_){clipVoronoi=_;}},\n        clipRadius:   {get: function(){return clipRadius;}, set: function(_){clipRadius=_;}},\n        showVoronoi:   {get: function(){return showVoronoi;}, set: function(_){showVoronoi=_;}},\n        id:           {get: function(){return id;}, set: function(_){id=_;}},\n        interactiveUpdateDelay: {get:function(){return interactiveUpdateDelay;}, set: function(_){interactiveUpdateDelay=_;}},\n        showLabels: {get: function(){return showLabels;}, set: function(_){ showLabels = _;}},\n        pointBorderColor: {get: function(){return pointBorderColor;}, set: function(_){pointBorderColor=_;}},\n\n        // simple functor options\n        x:     {get: function(){return getX;}, set: function(_){getX = d3.functor(_);}},\n        y:     {get: function(){return getY;}, set: function(_){getY = d3.functor(_);}},\n        pointSize: {get: function(){return getSize;}, set: function(_){getSize = d3.functor(_);}},\n        pointShape: {get: function(){return getShape;}, set: function(_){getShape = d3.functor(_);}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n        }},\n        color: {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        useVoronoi: {get: function(){return useVoronoi;}, set: function(_){\n            useVoronoi = _;\n            if (useVoronoi === false) {\n                clipVoronoi = false;\n            }\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","\nnv.models.scatterChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var scatter      = nv.models.scatter()\n        , xAxis        = nv.models.axis()\n        , yAxis        = nv.models.axis()\n        , legend       = nv.models.legend()\n        , distX        = nv.models.distribution()\n        , distY        = nv.models.distribution()\n        , tooltip      = nv.models.tooltip()\n        ;\n\n    var margin       = {top: 30, right: 20, bottom: 50, left: 75}\n        , marginTop = null\n        , width        = null\n        , height       = null\n        , container    = null\n        , color        = nv.utils.defaultColor()\n        , x            = scatter.xScale()\n        , y            = scatter.yScale()\n        , showDistX    = false\n        , showDistY    = false\n        , showLegend   = true\n        , showXAxis    = true\n        , showYAxis    = true\n        , rightAlignYAxis = false\n        , state = nv.utils.state()\n        , defaultState = null\n        , dispatch = d3.dispatch('stateChange', 'changeState', 'renderEnd')\n        , noData       = null\n        , duration = 250\n        , showLabels    = false\n        ;\n\n    scatter.xScale(x).yScale(y);\n    xAxis.orient('bottom').tickPadding(10);\n    yAxis\n        .orient((rightAlignYAxis) ? 'right' : 'left')\n        .tickPadding(10)\n    ;\n    distX.axis('x');\n    distY.axis('y');\n    tooltip\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        })\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        });\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var x0, y0\n        , renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled })\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(scatter);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n        if (showDistX) renderWatch.models(distX);\n        if (showDistY) renderWatch.models(distY);\n\n        selection.each(function(data) {\n            var that = this;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() {\n                if (duration === 0)\n                    container.call(chart);\n                else\n                    container.transition().duration(duration).call(chart);\n            };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disableddisabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display noData message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container);\n                renderWatch.renderEnd('scatter immediate');\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            // Setup Scales\n            x = scatter.xScale();\n            y = scatter.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-scatterChart').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-scatterChart nv-chart-' + scatter.id());\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            // background for pointer events\n            gEnter.append('rect').attr('class', 'nvd3 nv-background').style(\"pointer-events\",\"none\");\n\n            gEnter.append('g').attr('class', 'nv-x nv-axis');\n            gEnter.append('g').attr('class', 'nv-y nv-axis');\n            gEnter.append('g').attr('class', 'nv-scatterWrap');\n            gEnter.append('g').attr('class', 'nv-regressionLinesWrap');\n            gEnter.append('g').attr('class', 'nv-distWrap');\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                var legendWidth = availableWidth;\n                legend.width(legendWidth);\n\n                wrap.select('.nv-legendWrap')\n                    .datum(data)\n                    .call(legend);\n\n                if (!marginTop && legend.height() !== margin.top) {\n                    margin.top = legend.height();\n                    availableHeight = nv.utils.availableHeight(height, container, margin);\n                }\n\n                wrap.select('.nv-legendWrap')\n                    .attr('transform', 'translate(0' + ',' + (-margin.top) +')');\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Main Chart Component(s)\n            scatter\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    d.color = d.color || color(d, i);\n                    return d.color;\n                }).filter(function(d,i) { return !data[i].disabled }))\n                .showLabels(showLabels);\n\n            wrap.select('.nv-scatterWrap')\n                .datum(data.filter(function(d) { return !d.disabled }))\n                .call(scatter);\n\n\n            wrap.select('.nv-regressionLinesWrap')\n                .attr('clip-path', 'url(#nv-edge-clip-' + scatter.id() + ')');\n\n            var regWrap = wrap.select('.nv-regressionLinesWrap').selectAll('.nv-regLines')\n                .data(function (d) {\n                    return d;\n                });\n\n            regWrap.enter().append('g').attr('class', 'nv-regLines');\n\n            var regLine = regWrap.selectAll('.nv-regLine')\n                .data(function (d) {\n                    return [d]\n                });\n\n            regLine.enter()\n                .append('line').attr('class', 'nv-regLine')\n                .style('stroke-opacity', 0);\n\n            // don't add lines unless we have slope and intercept to use\n            regLine.filter(function(d) {\n                return d.intercept && d.slope;\n            })\n                .watchTransition(renderWatch, 'scatterPlusLineChart: regline')\n                .attr('x1', x.range()[0])\n                .attr('x2', x.range()[1])\n                .attr('y1', function (d, i) {\n                    return y(x.domain()[0] * d.slope + d.intercept)\n                })\n                .attr('y2', function (d, i) {\n                    return y(x.domain()[1] * d.slope + d.intercept)\n                })\n                .style('stroke', function (d, i, j) {\n                    return color(d, j)\n                })\n                .style('stroke-opacity', function (d, i) {\n                    return (d.disabled || typeof d.slope === 'undefined' || typeof d.intercept === 'undefined') ? 0 : 1\n                });\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis\n                    .scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize( -availableHeight , 0);\n\n                g.select('.nv-x.nv-axis')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')')\n                    .call(xAxis);\n            }\n\n            if (showYAxis) {\n                yAxis\n                    .scale(y)\n                    ._ticks( nv.utils.calcTicksY(availableHeight/36, data) )\n                    .tickSize( -availableWidth, 0);\n\n                g.select('.nv-y.nv-axis')\n                    .call(yAxis);\n            }\n\n            // Setup Distribution\n            if (showDistX) {\n                distX\n                    .getData(scatter.x())\n                    .scale(x)\n                    .width(availableWidth)\n                    .color(data.map(function(d,i) {\n                        return d.color || color(d, i);\n                    }).filter(function(d,i) { return !data[i].disabled }));\n                gEnter.select('.nv-distWrap').append('g')\n                    .attr('class', 'nv-distributionX');\n                g.select('.nv-distributionX')\n                    .attr('transform', 'translate(0,' + y.range()[0] + ')')\n                    .datum(data.filter(function(d) { return !d.disabled }))\n                    .call(distX);\n            }\n\n            if (showDistY) {\n                distY\n                    .getData(scatter.y())\n                    .scale(y)\n                    .width(availableHeight)\n                    .color(data.map(function(d,i) {\n                        return d.color || color(d, i);\n                    }).filter(function(d,i) { return !data[i].disabled }));\n                gEnter.select('.nv-distWrap').append('g')\n                    .attr('class', 'nv-distributionY');\n                g.select('.nv-distributionY')\n                    .attr('transform', 'translate(' + (rightAlignYAxis ? availableWidth : -distY.size() ) + ',0)')\n                    .datum(data.filter(function(d) { return !d.disabled }))\n                    .call(distY);\n            }\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n                if (typeof e.disabled !== 'undefined') {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n                    state.disabled = e.disabled;\n                }\n                chart.update();\n            });\n\n            // mouseover needs availableHeight so we just keep scatter mouse events inside the chart block\n            scatter.dispatch.on('elementMouseout.tooltip', function(evt) {\n                tooltip.hidden(true);\n                container.select('.nv-chart-' + scatter.id() + ' .nv-series-' + evt.seriesIndex + ' .nv-distx-' + evt.pointIndex)\n                    .attr('y1', 0);\n                container.select('.nv-chart-' + scatter.id() + ' .nv-series-' + evt.seriesIndex + ' .nv-disty-' + evt.pointIndex)\n                    .attr('x2', distY.size());\n            });\n\n            scatter.dispatch.on('elementMouseover.tooltip', function(evt) {\n                container.select('.nv-series-' + evt.seriesIndex + ' .nv-distx-' + evt.pointIndex)\n                    .attr('y1', evt.relativePos[1] - availableHeight);\n                container.select('.nv-series-' + evt.seriesIndex + ' .nv-disty-' + evt.pointIndex)\n                    .attr('x2', evt.relativePos[0] + distX.size());\n                tooltip.data(evt).hidden(false);\n            });\n\n            //store old scales for use in transitions on update\n            x0 = x.copy();\n            y0 = y.copy();\n\n        });\n\n        renderWatch.renderEnd('scatter with line immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.scatter = scatter;\n    chart.legend = legend;\n    chart.xAxis = xAxis;\n    chart.yAxis = yAxis;\n    chart.distX = distX;\n    chart.distY = distY;\n    chart.tooltip = tooltip;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        container:  {get: function(){return container;}, set: function(_){container=_;}},\n        showDistX:  {get: function(){return showDistX;}, set: function(_){showDistX=_;}},\n        showDistY:  {get: function(){return showDistY;}, set: function(_){showDistY=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        showXAxis:  {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:  {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:     {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:     {get: function(){return noData;}, set: function(_){noData=_;}},\n        duration:   {get: function(){return duration;}, set: function(_){duration=_;}},\n        showLabels: {get: function(){return showLabels;}, set: function(_){showLabels=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( (_) ? 'right' : 'left');\n        }},\n        color: {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n            distX.color(color);\n            distY.color(color);\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, scatter);\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","\nnv.models.sparkline = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 2, right: 0, bottom: 2, left: 0}\n        , width = 400\n        , height = 32\n        , container = null\n        , animate = true\n        , x = d3.scale.linear()\n        , y = d3.scale.linear()\n        , getX = function(d) { return d.x }\n        , getY = function(d) { return d.y }\n        , color = nv.utils.getColor(['#000'])\n        , xDomain\n        , yDomain\n        , xRange\n        , yRange\n        , showMinMaxPoints = true\n        , showCurrentPoint = true\n        , dispatch = d3.dispatch('renderEnd')\n        ;\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n    \n    function chart(selection) {\n        renderWatch.reset();\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup Scales\n            x   .domain(xDomain || d3.extent(data, getX ))\n                .range(xRange || [0, availableWidth]);\n\n            y   .domain(yDomain || d3.extent(data, getY ))\n                .range(yRange || [availableHeight, 0]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-sparkline').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-sparkline');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')\n\n            var paths = wrap.selectAll('path')\n                .data(function(d) { return [d] });\n            paths.enter().append('path');\n            paths.exit().remove();\n            paths\n                .style('stroke', function(d,i) { return d.color || color(d, i) })\n                .attr('d', d3.svg.line()\n                    .x(function(d,i) { return x(getX(d,i)) })\n                    .y(function(d,i) { return y(getY(d,i)) })\n            );\n\n            // TODO: Add CURRENT data point (Need Min, Mac, Current / Most recent)\n            var points = wrap.selectAll('circle.nv-point')\n                .data(function(data) {\n                    var yValues = data.map(function(d, i) { return getY(d,i); });\n                    function pointIndex(index) {\n                        if (index != -1) {\n                            var result = data[index];\n                            result.pointIndex = index;\n                            return result;\n                        } else {\n                            return null;\n                        }\n                    }\n                    var maxPoint = pointIndex(yValues.lastIndexOf(y.domain()[1])),\n                        minPoint = pointIndex(yValues.indexOf(y.domain()[0])),\n                        currentPoint = pointIndex(yValues.length - 1);\n                    return [(showMinMaxPoints ? minPoint : null), (showMinMaxPoints ? maxPoint : null), (showCurrentPoint ? currentPoint : null)].filter(function (d) {return d != null;});\n                });\n            points.enter().append('circle');\n            points.exit().remove();\n            points\n                .attr('cx', function(d,i) { return x(getX(d,d.pointIndex)) })\n                .attr('cy', function(d,i) { return y(getY(d,d.pointIndex)) })\n                .attr('r', 2)\n                .attr('class', function(d,i) {\n                    return getX(d, d.pointIndex) == x.domain()[1] ? 'nv-point nv-currentValue' :\n                            getY(d, d.pointIndex) == y.domain()[0] ? 'nv-point nv-minValue' : 'nv-point nv-maxValue'\n                });\n        });\n        \n        renderWatch.renderEnd('sparkline immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:            {get: function(){return width;}, set: function(_){width=_;}},\n        height:           {get: function(){return height;}, set: function(_){height=_;}},\n        xDomain:          {get: function(){return xDomain;}, set: function(_){xDomain=_;}},\n        yDomain:          {get: function(){return yDomain;}, set: function(_){yDomain=_;}},\n        xRange:           {get: function(){return xRange;}, set: function(_){xRange=_;}},\n        yRange:           {get: function(){return yRange;}, set: function(_){yRange=_;}},\n        xScale:           {get: function(){return x;}, set: function(_){x=_;}},\n        yScale:           {get: function(){return y;}, set: function(_){y=_;}},\n        animate:          {get: function(){return animate;}, set: function(_){animate=_;}},\n        showMinMaxPoints: {get: function(){return showMinMaxPoints;}, set: function(_){showMinMaxPoints=_;}},\n        showCurrentPoint: {get: function(){return showCurrentPoint;}, set: function(_){showCurrentPoint=_;}},\n\n        //functor options\n        x: {get: function(){return getX;}, set: function(_){getX=d3.functor(_);}},\n        y: {get: function(){return getY;}, set: function(_){getY=d3.functor(_);}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }}\n    });\n\n    chart.dispatch = dispatch;\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","\nnv.models.sparklinePlus = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var sparkline = nv.models.sparkline();\n\n    var margin = {top: 15, right: 100, bottom: 10, left: 50}\n        , width = null\n        , height = null\n        , x\n        , y\n        , index = []\n        , paused = false\n        , xTickFormat = d3.format(',r')\n        , yTickFormat = d3.format(',.2f')\n        , showLastValue = true\n        , alignValue = true\n        , rightAlignValue = false\n        , noData = null\n        , dispatch = d3.dispatch('renderEnd')\n        ;\n        \n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(sparkline);\n        selection.each(function(data) {\n            var container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() { container.call(chart); };\n            chart.container = this;\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            var currentValue = sparkline.y()(data[data.length-1], data.length-1);\n\n            // Setup Scales\n            x = sparkline.xScale();\n            y = sparkline.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-sparklineplus').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-sparklineplus');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-sparklineWrap');\n            gEnter.append('g').attr('class', 'nv-valueWrap');\n            gEnter.append('g').attr('class', 'nv-hoverArea');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            // Main Chart Component(s)\n            var sparklineWrap = g.select('.nv-sparklineWrap');\n\n            sparkline.width(availableWidth).height(availableHeight);\n            sparklineWrap.call(sparkline);\n\n            if (showLastValue) {\n                var valueWrap = g.select('.nv-valueWrap');\n                var value = valueWrap.selectAll('.nv-currentValue')\n                    .data([currentValue]);\n\n                value.enter().append('text').attr('class', 'nv-currentValue')\n                    .attr('dx', rightAlignValue ? -8 : 8)\n                    .attr('dy', '.9em')\n                    .style('text-anchor', rightAlignValue ? 'end' : 'start');\n\n                value\n                    .attr('x', availableWidth + (rightAlignValue ? margin.right : 0))\n                    .attr('y', alignValue ? function (d) {\n                        return y(d)\n                    } : 0)\n                    .style('fill', sparkline.color()(data[data.length - 1], data.length - 1))\n                    .text(yTickFormat(currentValue));\n            }\n\n            gEnter.select('.nv-hoverArea').append('rect')\n                .on('mousemove', sparklineHover)\n                .on('click', function() { paused = !paused })\n                .on('mouseout', function() { index = []; updateValueLine(); });\n\n            g.select('.nv-hoverArea rect')\n                .attr('transform', function(d) { return 'translate(' + -margin.left + ',' + -margin.top + ')' })\n                .attr('width', availableWidth + margin.left + margin.right)\n                .attr('height', availableHeight + margin.top);\n\n            //index is currently global (within the chart), may or may not keep it that way\n            function updateValueLine() {\n                if (paused) return;\n\n                var hoverValue = g.selectAll('.nv-hoverValue').data(index);\n\n                var hoverEnter = hoverValue.enter()\n                    .append('g').attr('class', 'nv-hoverValue')\n                    .style('stroke-opacity', 0)\n                    .style('fill-opacity', 0);\n\n                hoverValue.exit()\n                    .transition().duration(250)\n                    .style('stroke-opacity', 0)\n                    .style('fill-opacity', 0)\n                    .remove();\n\n                hoverValue\n                    .attr('transform', function(d) { return 'translate(' + x(sparkline.x()(data[d],d)) + ',0)' })\n                    .transition().duration(250)\n                    .style('stroke-opacity', 1)\n                    .style('fill-opacity', 1);\n\n                if (!index.length) return;\n\n                hoverEnter.append('line')\n                    .attr('x1', 0)\n                    .attr('y1', -margin.top)\n                    .attr('x2', 0)\n                    .attr('y2', availableHeight);\n\n                hoverEnter.append('text').attr('class', 'nv-xValue')\n                    .attr('x', -6)\n                    .attr('y', -margin.top)\n                    .attr('text-anchor', 'end')\n                    .attr('dy', '.9em');\n\n                g.select('.nv-hoverValue .nv-xValue')\n                    .text(xTickFormat(sparkline.x()(data[index[0]], index[0])));\n\n                hoverEnter.append('text').attr('class', 'nv-yValue')\n                    .attr('x', 6)\n                    .attr('y', -margin.top)\n                    .attr('text-anchor', 'start')\n                    .attr('dy', '.9em');\n\n                g.select('.nv-hoverValue .nv-yValue')\n                    .text(yTickFormat(sparkline.y()(data[index[0]], index[0])));\n            }\n\n            function sparklineHover() {\n                if (paused) return;\n\n                var pos = d3.mouse(this)[0] - margin.left;\n\n                function getClosestIndex(data, x) {\n                    var distance = Math.abs(sparkline.x()(data[0], 0) - x);\n                    var closestIndex = 0;\n                    for (var i = 0; i < data.length; i++){\n                        if (Math.abs(sparkline.x()(data[i], i) - x) < distance) {\n                            distance = Math.abs(sparkline.x()(data[i], i) - x);\n                            closestIndex = i;\n                        }\n                    }\n                    return closestIndex;\n                }\n\n                index = [getClosestIndex(data, Math.round(x.invert(pos)))];\n                updateValueLine();\n            }\n\n        });\n        renderWatch.renderEnd('sparklinePlus immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.sparkline = sparkline;\n\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:           {get: function(){return width;}, set: function(_){width=_;}},\n        height:          {get: function(){return height;}, set: function(_){height=_;}},\n        xTickFormat:     {get: function(){return xTickFormat;}, set: function(_){xTickFormat=_;}},\n        yTickFormat:     {get: function(){return yTickFormat;}, set: function(_){yTickFormat=_;}},\n        showLastValue:   {get: function(){return showLastValue;}, set: function(_){showLastValue=_;}},\n        alignValue:      {get: function(){return alignValue;}, set: function(_){alignValue=_;}},\n        rightAlignValue: {get: function(){return rightAlignValue;}, set: function(_){rightAlignValue=_;}},\n        noData:          {get: function(){return noData;}, set: function(_){noData=_;}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, sparkline);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.stackedArea = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 960\n        , height = 500\n        , color = nv.utils.defaultColor() // a function that computes the color\n        , id = Math.floor(Math.random() * 100000) //Create semi-unique ID incase user doesn't selet one\n        , container = null\n        , getX = function(d) { return d.x } // accessor to get the x value from a data point\n        , getY = function(d) { return d.y } // accessor to get the y value from a data point\n        , defined = function(d,i) { return !isNaN(getY(d,i)) && getY(d,i) !== null } // allows a line to be not continuous when it is not defined\n        , style = 'stack'\n        , offset = 'zero'\n        , order = 'default'\n        , interpolate = 'linear'  // controls the line interpolation\n        , clipEdge = false // if true, masks lines within x and y scale\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , scatter = nv.models.scatter()\n        , duration = 250\n        , dispatch =  d3.dispatch('areaClick', 'areaMouseover', 'areaMouseout','renderEnd', 'elementClick', 'elementMouseover', 'elementMouseout')\n        ;\n\n    scatter\n        .pointSize(2.2) // default size\n        .pointDomain([2.2, 2.2]) // all the same size by default\n    ;\n\n    /************************************\n     * offset:\n     *   'wiggle' (stream)\n     *   'zero' (stacked)\n     *   'expand' (normalize to 100%)\n     *   'silhouette' (simple centered)\n     *\n     * order:\n     *   'inside-out' (stream)\n     *   'default' (input order)\n     ************************************/\n\n    var renderWatch = nv.utils.renderWatch(dispatch, duration);\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(scatter);\n        selection.each(function(data) {\n            var availableWidth = width - margin.left - margin.right,\n                availableHeight = height - margin.top - margin.bottom;\n\n            container = d3.select(this);\n            nv.utils.initSVG(container);\n\n            // Setup Scales\n            x = scatter.xScale();\n            y = scatter.yScale();\n\n            var dataRaw = data;\n            // Injecting point index into each point because d3.layout.stack().out does not give index\n            data.forEach(function(aseries, i) {\n                aseries.seriesIndex = i;\n                aseries.values = aseries.values.map(function(d, j) {\n                    d.index = j;\n                    d.seriesIndex = i;\n                    return d;\n                });\n            });\n\n            var dataFiltered = data.filter(function(series) {\n                return !series.disabled;\n            });\n\n            data = d3.layout.stack()\n                .order(order)\n                .offset(offset)\n                .values(function(d) { return d.values })  //TODO: make values customizeable in EVERY model in this fashion\n                .x(getX)\n                .y(getY)\n                .out(function(d, y0, y) {\n                    d.display = {\n                        y: y,\n                        y0: y0\n                    };\n                })\n            (dataFiltered);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-stackedarea').data([data]);\n            var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-stackedarea');\n            var defsEnter = wrapEnter.append('defs');\n            var gEnter = wrapEnter.append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-areaWrap');\n            gEnter.append('g').attr('class', 'nv-scatterWrap');\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n            \n            // If the user has not specified forceY, make sure 0 is included in the domain\n            // Otherwise, use user-specified values for forceY\n            if (scatter.forceY().length == 0) {\n                scatter.forceY().push(0);\n            }\n            \n            scatter\n                .width(availableWidth)\n                .height(availableHeight)\n                .x(getX)\n                .y(function(d) {\n                    if (d.display !== undefined) { return d.display.y + d.display.y0; }\n                })\n                .color(data.map(function(d,i) {\n                    d.color = d.color || color(d, d.seriesIndex);\n                    return d.color;\n                }));\n\n            var scatterWrap = g.select('.nv-scatterWrap')\n                .datum(data);\n\n            scatterWrap.call(scatter);\n\n            defsEnter.append('clipPath')\n                .attr('id', 'nv-edge-clip-' + id)\n                .append('rect');\n\n            wrap.select('#nv-edge-clip-' + id + ' rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            g.attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : '');\n\n            var area = d3.svg.area()\n                .defined(defined)\n                .x(function(d,i)  { return x(getX(d,i)) })\n                .y0(function(d) {\n                    return y(d.display.y0)\n                })\n                .y1(function(d) {\n                    return y(d.display.y + d.display.y0)\n                })\n                .interpolate(interpolate);\n\n            var zeroArea = d3.svg.area()\n                .defined(defined)\n                .x(function(d,i)  { return x(getX(d,i)) })\n                .y0(function(d) { return y(d.display.y0) })\n                .y1(function(d) { return y(d.display.y0) });\n\n            var path = g.select('.nv-areaWrap').selectAll('path.nv-area')\n                .data(function(d) { return d });\n\n            path.enter().append('path').attr('class', function(d,i) { return 'nv-area nv-area-' + i })\n                .attr('d', function(d,i){\n                    return zeroArea(d.values, d.seriesIndex);\n                })\n                .on('mouseover', function(d,i) {\n                    d3.select(this).classed('hover', true);\n                    dispatch.areaMouseover({\n                        point: d,\n                        series: d.key,\n                        pos: [d3.event.pageX, d3.event.pageY],\n                        seriesIndex: d.seriesIndex\n                    });\n                })\n                .on('mouseout', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.areaMouseout({\n                        point: d,\n                        series: d.key,\n                        pos: [d3.event.pageX, d3.event.pageY],\n                        seriesIndex: d.seriesIndex\n                    });\n                })\n                .on('click', function(d,i) {\n                    d3.select(this).classed('hover', false);\n                    dispatch.areaClick({\n                        point: d,\n                        series: d.key,\n                        pos: [d3.event.pageX, d3.event.pageY],\n                        seriesIndex: d.seriesIndex\n                    });\n                });\n\n            path.exit().remove();\n            path.style('fill', function(d,i){\n                    return d.color || color(d, d.seriesIndex)\n                })\n                .style('stroke', function(d,i){ return d.color || color(d, d.seriesIndex) });\n            path.watchTransition(renderWatch,'stackedArea path')\n                .attr('d', function(d,i) {\n                    return area(d.values,i)\n                });\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            scatter.dispatch.on('elementMouseover.area', function(e) {\n                g.select('.nv-chart-' + id + ' .nv-area-' + e.seriesIndex).classed('hover', true);\n            });\n            scatter.dispatch.on('elementMouseout.area', function(e) {\n                g.select('.nv-chart-' + id + ' .nv-area-' + e.seriesIndex).classed('hover', false);\n            });\n\n            //Special offset functions\n            chart.d3_stackedOffset_stackPercent = function(stackData) {\n                var n = stackData.length,    //How many series\n                    m = stackData[0].length,     //how many points per series\n                    i,\n                    j,\n                    o,\n                    y0 = [];\n\n                for (j = 0; j < m; ++j) { //Looping through all points\n                    for (i = 0, o = 0; i < dataRaw.length; i++) { //looping through all series\n                        o += getY(dataRaw[i].values[j]); //total y value of all series at a certian point in time.\n                    }\n\n                    if (o) for (i = 0; i < n; i++) { //(total y value of all series at point in time i) != 0\n                        stackData[i][j][1] /= o;\n                    } else { //(total y value of all series at point in time i) == 0\n                        for (i = 0; i < n; i++) {\n                            stackData[i][j][1] = 0;\n                        }\n                    }\n                }\n                for (j = 0; j < m; ++j) y0[j] = 0;\n                return y0;\n            };\n\n        });\n\n        renderWatch.renderEnd('stackedArea immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Global getters and setters\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.scatter = scatter;\n\n    scatter.dispatch.on('elementClick', function(){ dispatch.elementClick.apply(this, arguments); });\n    scatter.dispatch.on('elementMouseover', function(){ dispatch.elementMouseover.apply(this, arguments); });\n    scatter.dispatch.on('elementMouseout', function(){ dispatch.elementMouseout.apply(this, arguments); });\n\n    chart.interpolate = function(_) {\n        if (!arguments.length) return interpolate;\n        interpolate = _;\n        return chart;\n    };\n\n    chart.duration = function(_) {\n        if (!arguments.length) return duration;\n        duration = _;\n        renderWatch.reset(duration);\n        scatter.duration(duration);\n        return chart;\n    };\n\n    chart.dispatch = dispatch;\n    chart.scatter = scatter;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        defined: {get: function(){return defined;}, set: function(_){defined=_;}},\n        clipEdge: {get: function(){return clipEdge;}, set: function(_){clipEdge=_;}},\n        offset:      {get: function(){return offset;}, set: function(_){offset=_;}},\n        order:    {get: function(){return order;}, set: function(_){order=_;}},\n        interpolate:    {get: function(){return interpolate;}, set: function(_){interpolate=_;}},\n\n        // simple functor options\n        x:     {get: function(){return getX;}, set: function(_){getX = d3.functor(_);}},\n        y:     {get: function(){return getY;}, set: function(_){getY = d3.functor(_);}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n        }},\n        style: {get: function(){return style;}, set: function(_){\n            style = _;\n            switch (style) {\n                case 'stack':\n                    chart.offset('zero');\n                    chart.order('default');\n                    break;\n                case 'stream':\n                    chart.offset('wiggle');\n                    chart.order('inside-out');\n                    break;\n                case 'stream-center':\n                    chart.offset('silhouette');\n                    chart.order('inside-out');\n                    break;\n                case 'expand':\n                    chart.offset('expand');\n                    chart.order('default');\n                    break;\n                case 'stack_percent':\n                    chart.offset(chart.d3_stackedOffset_stackPercent);\n                    chart.order('default');\n                    break;\n            }\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            scatter.duration(duration);\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, scatter);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n","\nnv.models.stackedAreaChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var stacked = nv.models.stackedArea()\n        , xAxis = nv.models.axis()\n        , yAxis = nv.models.axis()\n        , legend = nv.models.legend()\n        , controls = nv.models.legend()\n        , interactiveLayer = nv.interactiveGuideline()\n        , tooltip = nv.models.tooltip()\n        , focus = nv.models.focus(nv.models.stackedArea())\n        ;\n\n    var margin = {top: 10, right: 25, bottom: 50, left: 60}\n        , marginTop = null\n        , width = null\n        , height = null\n        , color = nv.utils.defaultColor()\n        , showControls = true\n        , showLegend = true\n        , legendPosition = 'top'\n        , showXAxis = true\n        , showYAxis = true\n        , rightAlignYAxis = false\n        , focusEnable = false\n        , useInteractiveGuideline = false\n        , showTotalInTooltip = true\n        , totalLabel = 'TOTAL'\n        , x //can be accessed via chart.xScale()\n        , y //can be accessed via chart.yScale()\n        , state = nv.utils.state()\n        , defaultState = null\n        , noData = null\n        , dispatch = d3.dispatch('stateChange', 'changeState','renderEnd')\n        , controlWidth = 250\n        , controlOptions = ['Stacked','Stream','Expanded']\n        , controlLabels = {}\n        , duration = 250\n        ;\n\n    state.style = stacked.style();\n    xAxis.orient('bottom').tickPadding(7);\n    yAxis.orient((rightAlignYAxis) ? 'right' : 'left');\n\n    tooltip\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        })\n        .valueFormatter(function(d, i) {\n            return yAxis.tickFormat()(d, i);\n        });\n\n    interactiveLayer.tooltip\n        .headerFormatter(function(d, i) {\n            return xAxis.tickFormat()(d, i);\n        })\n        .valueFormatter(function(d, i) {\n            return d == null ? \"N/A\" : yAxis.tickFormat()(d, i);\n        });\n\n    var oldYTickFormat = null,\n        oldValueFormatter = null;\n\n    controls.updateState(false);\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n    var style = stacked.style();\n\n    var stateGetter = function(data) {\n        return function(){\n            return {\n                active: data.map(function(d) { return !d.disabled }),\n                style: stacked.style()\n            };\n        }\n    };\n\n    var stateSetter = function(data) {\n        return function(state) {\n            if (state.style !== undefined)\n                style = state.style;\n            if (state.active !== undefined)\n                data.forEach(function(series,i) {\n                    series.disabled = !state.active[i];\n                });\n        }\n    };\n\n    var percentFormatter = d3.format('%');\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(stacked);\n        if (showXAxis) renderWatch.models(xAxis);\n        if (showYAxis) renderWatch.models(yAxis);\n\n        selection.each(function(data) {\n            var container = d3.select(this),\n                that = this;\n            nv.utils.initSVG(container);\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin),\n                availableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n\n            chart.update = function() { container.transition().duration(duration).call(chart); };\n            chart.container = this;\n\n            state\n                .setter(stateSetter(data), chart.update)\n                .getter(stateGetter(data))\n                .update();\n\n            // DEPRECATED set state.disabled\n            state.disabled = data.map(function(d) { return !!d.disabled });\n\n            if (!defaultState) {\n                var key;\n                defaultState = {};\n                for (key in state) {\n                    if (state[key] instanceof Array)\n                        defaultState[key] = state[key].slice(0);\n                    else\n                        defaultState[key] = state[key];\n                }\n            }\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) {\n                nv.utils.noData(chart, container)\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n            // Setup Scales\n            x = stacked.xScale();\n            y = stacked.yScale();\n\n            // Setup containers and skeleton of chart\n            var wrap = container.selectAll('g.nv-wrap.nv-stackedAreaChart').data([data]);\n            var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-stackedAreaChart').append('g');\n            var g = wrap.select('g');\n\n            gEnter.append('g').attr('class', 'nv-legendWrap');\n            gEnter.append('g').attr('class', 'nv-controlsWrap');\n\n            var focusEnter = gEnter.append('g').attr('class', 'nv-focus');\n            focusEnter.append('g').attr('class', 'nv-background').append('rect');\n            focusEnter.append('g').attr('class', 'nv-x nv-axis');\n            focusEnter.append('g').attr('class', 'nv-y nv-axis');\n            focusEnter.append('g').attr('class', 'nv-stackedWrap');\n            focusEnter.append('g').attr('class', 'nv-interactive');\n\n            // g.select(\"rect\").attr(\"width\",availableWidth).attr(\"height\",availableHeight);\n\n            var contextEnter = gEnter.append('g').attr('class', 'nv-focusWrap');\n\n            // Legend\n            if (!showLegend) {\n                g.select('.nv-legendWrap').selectAll('*').remove();\n            } else {\n                var legendWidth = (showControls && legendPosition === 'top') ? availableWidth - controlWidth : availableWidth;\n\n                legend.width(legendWidth);\n                g.select('.nv-legendWrap').datum(data).call(legend);\n\n                if (legendPosition === 'bottom') {\n                \t// constant from axis.js, plus some margin for better layout\n                \tvar xAxisHeight = (showXAxis ? 12 : 0) + 10;\n                   \tmargin.bottom = Math.max(legend.height() + xAxisHeight, margin.bottom);\n                   \tavailableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n                \tvar legendTop = availableHeight + xAxisHeight;\n                    g.select('.nv-legendWrap')\n                        .attr('transform', 'translate(0,' + legendTop +')');\n                } else if (legendPosition === 'top') {\n                    if (!marginTop && margin.top != legend.height()) {\n                        margin.top = legend.height();\n                        availableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n                    }\n\n                    g.select('.nv-legendWrap')\n                    \t.attr('transform', 'translate(' + (availableWidth-legendWidth) + ',' + (-margin.top) +')');\n                }\n            }\n\n            // Controls\n            if (!showControls) {\n                 g.select('.nv-controlsWrap').selectAll('*').remove();\n            } else {\n                var controlsData = [\n                    {\n                        key: controlLabels.stacked || 'Stacked',\n                        metaKey: 'Stacked',\n                        disabled: stacked.style() != 'stack',\n                        style: 'stack'\n                    },\n                    {\n                        key: controlLabels.stream || 'Stream',\n                        metaKey: 'Stream',\n                        disabled: stacked.style() != 'stream',\n                        style: 'stream'\n                    },\n                    {\n                        key: controlLabels.expanded || 'Expanded',\n                        metaKey: 'Expanded',\n                        disabled: stacked.style() != 'expand',\n                        style: 'expand'\n                    },\n                    {\n                        key: controlLabels.stack_percent || 'Stack %',\n                        metaKey: 'Stack_Percent',\n                        disabled: stacked.style() != 'stack_percent',\n                        style: 'stack_percent'\n                    }\n                ];\n\n                controlWidth = (controlOptions.length/3) * 260;\n                controlsData = controlsData.filter(function(d) {\n                    return controlOptions.indexOf(d.metaKey) !== -1;\n                });\n\n                controls\n                    .width( controlWidth )\n                    .color(['#444', '#444', '#444']);\n\n                g.select('.nv-controlsWrap')\n                    .datum(controlsData)\n                    .call(controls);\n\n                var requiredTop = Math.max(controls.height(), showLegend && (legendPosition === 'top') ? legend.height() : 0);\n\n                if ( margin.top != requiredTop ) {\n                    margin.top = requiredTop;\n                    availableHeight = nv.utils.availableHeight(height, container, margin) - (focusEnable ? focus.height() : 0);\n                }\n\n                g.select('.nv-controlsWrap')\n                    .attr('transform', 'translate(0,' + (-margin.top) +')');\n            }\n\n            wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n            if (rightAlignYAxis) {\n                g.select(\".nv-y.nv-axis\")\n                    .attr(\"transform\", \"translate(\" + availableWidth + \",0)\");\n            }\n\n            //Set up interactive layer\n            if (useInteractiveGuideline) {\n                interactiveLayer\n                    .width(availableWidth)\n                    .height(availableHeight)\n                    .margin({left: margin.left, top: margin.top})\n                    .svgContainer(container)\n                    .xScale(x);\n                wrap.select(\".nv-interactive\").call(interactiveLayer);\n            }\n\n            g.select('.nv-focus .nv-background rect')\n                .attr('width', availableWidth)\n                .attr('height', availableHeight);\n\n            stacked\n                .width(availableWidth)\n                .height(availableHeight)\n                .color(data.map(function(d,i) {\n                    return d.color || color(d, i);\n                }).filter(function(d,i) { return !data[i].disabled; }));\n\n            var stackedWrap = g.select('.nv-focus .nv-stackedWrap')\n                .datum(data.filter(function(d) { return !d.disabled; }));\n\n            // Setup Axes\n            if (showXAxis) {\n                xAxis.scale(x)\n                    ._ticks( nv.utils.calcTicksX(availableWidth/100, data) )\n                    .tickSize( -availableHeight, 0);\n            }\n\n            if (showYAxis) {\n                var ticks;\n                if (stacked.offset() === 'wiggle') {\n                    ticks = 0;\n                }\n                else {\n                    ticks = nv.utils.calcTicksY(availableHeight/36, data);\n                }\n                yAxis.scale(y)\n                    ._ticks(ticks)\n                    .tickSize(-availableWidth, 0);\n            }\n\n            //============================================================\n            // Update Axes\n            //============================================================\n            function updateXAxis() {\n                if(showXAxis) {\n                    g.select('.nv-focus .nv-x.nv-axis')\n                        .attr('transform', 'translate(0,' + availableHeight + ')')\n                        .transition()\n                        .duration(duration)\n                        .call(xAxis)\n                        ;\n                }\n            }\n\n            function updateYAxis() {\n                if(showYAxis) {\n                    if (stacked.style() === 'expand' || stacked.style() === 'stack_percent') {\n                        var currentFormat = yAxis.tickFormat();\n\n                        if ( !oldYTickFormat || currentFormat !== percentFormatter )\n                            oldYTickFormat = currentFormat;\n\n                        //Forces the yAxis to use percentage in 'expand' mode.\n                        yAxis.tickFormat(percentFormatter);\n                    }\n                    else {\n                        if (oldYTickFormat) {\n                            yAxis.tickFormat(oldYTickFormat);\n                            oldYTickFormat = null;\n                        }\n                    }\n\n                    g.select('.nv-focus .nv-y.nv-axis')\n                    .transition().duration(0)\n                    .call(yAxis);\n                }\n            }\n\n            //============================================================\n            // Update Focus\n            //============================================================\n            if(!focusEnable) {\n                stackedWrap.transition().call(stacked);\n                updateXAxis();\n                updateYAxis();\n            } else {\n                focus.width(availableWidth);\n                g.select('.nv-focusWrap')\n                    .attr('transform', 'translate(0,' + ( availableHeight + margin.bottom + focus.margin().top) + ')')\n                    .datum(data.filter(function(d) { return !d.disabled; }))\n                    .call(focus);\n                var extent = focus.brush.empty() ? focus.xDomain() : focus.brush.extent();\n                if(extent !== null){\n                    onBrush(extent);\n                }\n            }\n\n            //============================================================\n            // Event Handling/Dispatching (in chart's scope)\n            //------------------------------------------------------------\n\n            stacked.dispatch.on('areaClick.toggle', function(e) {\n                if (data.filter(function(d) { return !d.disabled }).length === 1)\n                    data.forEach(function(d) {\n                        d.disabled = false;\n                    });\n                else\n                    data.forEach(function(d,i) {\n                        d.disabled = (i != e.seriesIndex);\n                    });\n\n                state.disabled = data.map(function(d) { return !!d.disabled });\n                dispatch.stateChange(state);\n\n                chart.update();\n            });\n\n            legend.dispatch.on('stateChange', function(newState) {\n                for (var key in newState)\n                    state[key] = newState[key];\n                dispatch.stateChange(state);\n                chart.update();\n            });\n\n            controls.dispatch.on('legendClick', function(d,i) {\n                if (!d.disabled) return;\n\n                controlsData = controlsData.map(function(s) {\n                    s.disabled = true;\n                    return s;\n                });\n                d.disabled = false;\n\n                stacked.style(d.style);\n\n\n                state.style = stacked.style();\n                dispatch.stateChange(state);\n\n                chart.update();\n            });\n\n            interactiveLayer.dispatch.on('elementMousemove', function(e) {\n                stacked.clearHighlights();\n                var singlePoint, pointIndex, pointXLocation, allData = [], valueSum = 0, allNullValues = true;\n                data\n                    .filter(function(series, i) {\n                        series.seriesIndex = i;\n                        return !series.disabled;\n                    })\n                    .forEach(function(series,i) {\n                        pointIndex = nv.interactiveBisect(series.values, e.pointXValue, chart.x());\n                        var point = series.values[pointIndex];\n                        var pointYValue = chart.y()(point, pointIndex);\n                        if (pointYValue != null) {\n                            stacked.highlightPoint(i, pointIndex, true);\n                        }\n                        if (typeof point === 'undefined') return;\n                        if (typeof singlePoint === 'undefined') singlePoint = point;\n                        if (typeof pointXLocation === 'undefined') pointXLocation = chart.xScale()(chart.x()(point,pointIndex));\n\n                        //If we are in 'expand' mode, use the stacked percent value instead of raw value.\n                        var tooltipValue = (stacked.style() == 'expand') ? point.display.y : chart.y()(point,pointIndex);\n                        allData.push({\n                            key: series.key,\n                            value: tooltipValue,\n                            color: color(series,series.seriesIndex),\n                            point: point\n                        });\n\n                        if (showTotalInTooltip && stacked.style() != 'expand' && tooltipValue != null) {\n                          valueSum += tooltipValue;\n                          allNullValues = false;\n                        };\n                    });\n\n                allData.reverse();\n\n                //Highlight the tooltip entry based on which stack the mouse is closest to.\n                if (allData.length > 2) {\n                    var yValue = chart.yScale().invert(e.mouseY);\n                    var yDistMax = Infinity, indexToHighlight = null;\n                    allData.forEach(function(series,i) {\n\n                        //To handle situation where the stacked area chart is negative, we need to use absolute values\n                        //when checking if the mouse Y value is within the stack area.\n                        yValue = Math.abs(yValue);\n                        var stackedY0 = Math.abs(series.point.display.y0);\n                        var stackedY = Math.abs(series.point.display.y);\n                        if ( yValue >= stackedY0 && yValue <= (stackedY + stackedY0))\n                        {\n                            indexToHighlight = i;\n                            return;\n                        }\n                    });\n                    if (indexToHighlight != null)\n                        allData[indexToHighlight].highlight = true;\n                }\n\n                //If we are not in 'expand' mode, add a 'Total' row to the tooltip.\n                if (showTotalInTooltip && stacked.style() != 'expand' && allData.length >= 2 && !allNullValues) {\n                    allData.push({\n                        key: totalLabel,\n                        value: valueSum,\n                        total: true\n                    });\n                }\n\n                var xValue = chart.x()(singlePoint,pointIndex);\n\n                var valueFormatter = interactiveLayer.tooltip.valueFormatter();\n                // Keeps track of the tooltip valueFormatter if the chart changes to expanded view\n                if (stacked.style() === 'expand' || stacked.style() === 'stack_percent') {\n                    if ( !oldValueFormatter ) {\n                        oldValueFormatter = valueFormatter;\n                    }\n                    //Forces the tooltip to use percentage in 'expand' mode.\n                    valueFormatter = d3.format(\".1%\");\n                }\n                else {\n                    if (oldValueFormatter) {\n                        valueFormatter = oldValueFormatter;\n                        oldValueFormatter = null;\n                    }\n                }\n\n                interactiveLayer.tooltip\n                    .valueFormatter(valueFormatter)\n                    .data(\n                    {\n                        value: xValue,\n                        series: allData\n                    }\n                )();\n\n                interactiveLayer.renderGuideLine(pointXLocation);\n\n            });\n\n            interactiveLayer.dispatch.on(\"elementMouseout\",function(e) {\n                stacked.clearHighlights();\n            });\n\n            /* Update `main' graph on brush update. */\n            focus.dispatch.on(\"onBrush\", function(extent) {\n                onBrush(extent);\n            });\n\n            // Update chart from a state object passed to event handler\n            dispatch.on('changeState', function(e) {\n\n                if (typeof e.disabled !== 'undefined' && data.length === e.disabled.length) {\n                    data.forEach(function(series,i) {\n                        series.disabled = e.disabled[i];\n                    });\n\n                    state.disabled = e.disabled;\n                }\n\n                if (typeof e.style !== 'undefined') {\n                    stacked.style(e.style);\n                    style = e.style;\n                }\n\n                chart.update();\n            });\n\n            //============================================================\n            // Functions\n            //------------------------------------------------------------\n\n            function onBrush(extent) {\n                // Update Main (Focus)\n                var stackedWrap = g.select('.nv-focus .nv-stackedWrap')\n                    .datum(\n                    data.filter(function(d) { return !d.disabled; })\n                        .map(function(d,i) {\n                            return {\n                                key: d.key,\n                                area: d.area,\n                                classed: d.classed,\n                                values: d.values.filter(function(d,i) {\n                                    return stacked.x()(d,i) >= extent[0] && stacked.x()(d,i) <= extent[1];\n                                }),\n                                disableTooltip: d.disableTooltip\n                            };\n                        })\n                );\n                stackedWrap.transition().duration(duration).call(stacked);\n\n                // Update Main (Focus) Axes\n                updateXAxis();\n                updateYAxis();\n            }\n\n        });\n\n        renderWatch.renderEnd('stacked Area chart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    stacked.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt.point['x'] = stacked.x()(evt.point);\n        evt.point['y'] = stacked.y()(evt.point);\n        tooltip.data(evt).hidden(false);\n    });\n\n    stacked.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true)\n    });\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.stacked = stacked;\n    chart.legend = legend;\n    chart.controls = controls;\n    chart.xAxis = xAxis;\n    chart.x2Axis = focus.xAxis;\n    chart.yAxis = yAxis;\n    chart.y2Axis = focus.yAxis;\n    chart.interactiveLayer = interactiveLayer;\n    chart.tooltip = tooltip;\n    chart.focus = focus;\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        showLegend: {get: function(){return showLegend;}, set: function(_){showLegend=_;}},\n        legendPosition: {get: function(){return legendPosition;}, set: function(_){legendPosition=_;}},\n        showXAxis:      {get: function(){return showXAxis;}, set: function(_){showXAxis=_;}},\n        showYAxis:    {get: function(){return showYAxis;}, set: function(_){showYAxis=_;}},\n        defaultState:    {get: function(){return defaultState;}, set: function(_){defaultState=_;}},\n        noData:    {get: function(){return noData;}, set: function(_){noData=_;}},\n        showControls:    {get: function(){return showControls;}, set: function(_){showControls=_;}},\n        controlLabels:    {get: function(){return controlLabels;}, set: function(_){controlLabels=_;}},\n        controlOptions:    {get: function(){return controlOptions;}, set: function(_){controlOptions=_;}},\n        showTotalInTooltip:      {get: function(){return showTotalInTooltip;}, set: function(_){showTotalInTooltip=_;}},\n        totalLabel:      {get: function(){return totalLabel;}, set: function(_){totalLabel=_;}},\n        focusEnable:    {get: function(){return focusEnable;}, set: function(_){focusEnable=_;}},\n        focusHeight:     {get: function(){return focus.height();}, set: function(_){focus.height(_);}},\n        brushExtent: {get: function(){return focus.brushExtent();}, set: function(_){focus.brushExtent(_);}},\n\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            if (_.top !== undefined) {\n                margin.top = _.top;\n                marginTop = _.top;\n            }\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n        }},\n        focusMargin: {get: function(){return focus.margin}, set: function(_){\n            focus.margin.top    = _.top    !== undefined ? _.top    : focus.margin.top;\n            focus.margin.right  = _.right  !== undefined ? _.right  : focus.margin.right;\n            focus.margin.bottom = _.bottom !== undefined ? _.bottom : focus.margin.bottom;\n            focus.margin.left   = _.left   !== undefined ? _.left   : focus.margin.left;\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            stacked.duration(duration);\n            xAxis.duration(duration);\n            yAxis.duration(duration);\n        }},\n        color:  {get: function(){return color;}, set: function(_){\n            color = nv.utils.getColor(_);\n            legend.color(color);\n            stacked.color(color);\n            focus.color(color);\n        }},\n        x: {get: function(){return stacked.x();}, set: function(_){\n            stacked.x(_);\n            focus.x(_);\n        }},\n        y: {get: function(){return stacked.y();}, set: function(_){\n            stacked.y(_);\n            focus.y(_);\n        }},\n        rightAlignYAxis: {get: function(){return rightAlignYAxis;}, set: function(_){\n            rightAlignYAxis = _;\n            yAxis.orient( rightAlignYAxis ? 'right' : 'left');\n        }},\n        useInteractiveGuideline: {get: function(){return useInteractiveGuideline;}, set: function(_){\n            useInteractiveGuideline = !!_;\n            chart.interactive(!_);\n            chart.useVoronoi(!_);\n            stacked.scatter.interactive(!_);\n        }}\n    });\n\n    nv.utils.inheritOptions(chart, stacked);\n    nv.utils.initOptions(chart);\n\n    return chart;\n};\n\nnv.models.stackedAreaWithFocusChart = function() {\n  return nv.models.stackedAreaChart()\n    .margin({ bottom: 30 })\n    .focusEnable( true );\n};\n","// based on http://bl.ocks.org/kerryrodden/477c1bfb081b783f80ad\nnv.models.sunburst = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var margin = {top: 0, right: 0, bottom: 0, left: 0}\n        , width = 600\n        , height = 600\n        , mode = \"count\"\n        , modes = {count: function(d) { return 1; }, value: function(d) { return d.value || d.size }, size: function(d) { return d.value || d.size }}\n        , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one\n        , container = null\n        , color = nv.utils.defaultColor()\n        , showLabels = false\n        , labelFormat = function(d){if(mode === 'count'){return d.name + ' #' + d.value}else{return d.name + ' ' + (d.value || d.size)}}\n        , labelThreshold = 0.02\n        , sort = function(d1, d2){return d1.name > d2.name;}\n        , key = function(d,i){return d.name;}\n        , groupColorByParent = true\n        , duration = 500\n        , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMousemove', 'elementMouseover', 'elementMouseout', 'renderEnd');\n\n    //============================================================\n    // aux functions and setup\n    //------------------------------------------------------------\n\n    var x = d3.scale.linear().range([0, 2 * Math.PI]);\n    var y = d3.scale.sqrt();\n\n    var partition = d3.layout.partition().sort(sort);\n\n    var node, availableWidth, availableHeight, radius;\n    var prevPositions = {};\n\n    var arc = d3.svg.arc()\n        .startAngle(function(d) {return Math.max(0, Math.min(2 * Math.PI, x(d.x))) })\n        .endAngle(function(d) {return Math.max(0, Math.min(2 * Math.PI, x(d.x + d.dx))) })\n        .innerRadius(function(d) {return Math.max(0, y(d.y)) })\n        .outerRadius(function(d) {return Math.max(0, y(d.y + d.dy)) });\n\n    function rotationToAvoidUpsideDown(d) {\n        var centerAngle = computeCenterAngle(d);\n        if(centerAngle > 90){\n            return 180;\n        }\n        else {\n            return 0;\n        }\n    }\n\n    function computeCenterAngle(d) {\n        var startAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x)));\n        var endAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x + d.dx)));\n        var centerAngle = (((startAngle + endAngle) / 2) * (180 / Math.PI)) - 90;\n        return centerAngle;\n    }\n\n    function computeNodePercentage(d) {\n        var startAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x)));\n        var endAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x + d.dx)));\n        return (endAngle - startAngle) / (2 * Math.PI);\n    }\n\n    function labelThresholdMatched(d) {\n        var startAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x)));\n        var endAngle = Math.max(0, Math.min(2 * Math.PI, x(d.x + d.dx)));\n\n        var size = endAngle - startAngle;\n        return size > labelThreshold;\n    }\n\n    // When zooming: interpolate the scales.\n    function arcTweenZoom(e,i) {\n        var xd = d3.interpolate(x.domain(), [node.x, node.x + node.dx]),\n        yd = d3.interpolate(y.domain(), [node.y, 1]),\n        yr = d3.interpolate(y.range(), [node.y ? 20 : 0, radius]);\n\n        if (i === 0) {\n            return function() {return arc(e);}\n        }\n        else {\n            return function (t) {\n                x.domain(xd(t));\n                y.domain(yd(t)).range(yr(t));\n                return arc(e);\n            }\n        };\n    }\n\n    function arcTweenUpdate(d) {\n        var ipo = d3.interpolate({x: d.x0, dx: d.dx0, y: d.y0, dy: d.dy0}, d);\n\n        return function (t) {\n            var b = ipo(t);\n\n            d.x0 = b.x;\n            d.dx0 = b.dx;\n            d.y0 = b.y;\n            d.dy0 = b.dy;\n\n            return arc(b);\n        };\n    }\n\n    function updatePrevPosition(node) {\n        var k = key(node);\n        if(! prevPositions[k]) prevPositions[k] = {};\n        var pP = prevPositions[k];\n        pP.dx = node.dx;\n        pP.x = node.x;\n        pP.dy = node.dy;\n        pP.y = node.y;\n    }\n\n    function storeRetrievePrevPositions(nodes) {\n        nodes.forEach(function(n){\n            var k = key(n);\n            var pP = prevPositions[k];\n            //console.log(k,n,pP);\n            if( pP ){\n                n.dx0 = pP.dx;\n                n.x0 = pP.x;\n                n.dy0 = pP.dy;\n                n.y0 = pP.y;\n            }\n            else {\n                n.dx0 = n.dx;\n                n.x0 = n.x;\n                n.dy0 = n.dy;\n                n.y0 = n.y;\n            }\n            updatePrevPosition(n);\n        });\n    }\n\n    function zoomClick(d) {\n        var labels = container.selectAll('text')\n        var path = container.selectAll('path')\n\n        // fade out all text elements\n        labels.transition().attr(\"opacity\",0);\n\n        // to allow reference to the new center node\n        node = d;\n\n        path.transition()\n            .duration(duration)\n            .attrTween(\"d\", arcTweenZoom)\n            .each('end', function(e) {\n                // partially taken from here: http://bl.ocks.org/metmajer/5480307\n                // check if the animated element's data e lies within the visible angle span given in d\n                if(e.x >= d.x && e.x < (d.x + d.dx) ){\n                    if(e.depth >= d.depth){\n                        // get a selection of the associated text element\n                        var parentNode = d3.select(this.parentNode);\n                        var arcText = parentNode.select('text');\n\n                        // fade in the text element and recalculate positions\n                        arcText.transition().duration(duration)\n                        .text( function(e){return labelFormat(e) })\n                        .attr(\"opacity\", function(d){\n                            if(labelThresholdMatched(d)) {\n                                return 1;\n                            }\n                            else {\n                                return 0;\n                            }\n                        })\n                        .attr(\"transform\", function() {\n                            var width = this.getBBox().width;\n                            if(e.depth === 0)\n                            return \"translate(\" + (width / 2 * - 1) + \",0)\";\n                            else if(e.depth === d.depth){\n                                return \"translate(\" + (y(e.y) + 5) + \",0)\";\n                            }\n                            else {\n                                var centerAngle = computeCenterAngle(e);\n                                var rotation = rotationToAvoidUpsideDown(e);\n                                if (rotation === 0) {\n                                    return 'rotate('+ centerAngle +')translate(' + (y(e.y) + 5) + ',0)';\n                                }\n                                else {\n                                    return 'rotate('+ centerAngle +')translate(' + (y(e.y) + width + 5) + ',0)rotate(' + rotation + ')';\n                                }\n                            }\n                        });\n                    }\n                }\n            })\n    }\n\n    //============================================================\n    // chart function\n    //------------------------------------------------------------\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    function chart(selection) {\n        renderWatch.reset();\n\n        selection.each(function(data) {\n            container = d3.select(this);\n            availableWidth = nv.utils.availableWidth(width, container, margin);\n            availableHeight = nv.utils.availableHeight(height, container, margin);\n            radius = Math.min(availableWidth, availableHeight) / 2;\n\n            y.range([0, radius]);\n\n            // Setup containers and skeleton of chart\n            var wrap = container.select('g.nvd3.nv-wrap.nv-sunburst');\n            if( !wrap[0][0] ) {\n                wrap = container.append('g')\n                    .attr('class', 'nvd3 nv-wrap nv-sunburst nv-chart-' + id)\n                    .attr('transform', 'translate(' + ((availableWidth / 2) + margin.left + margin.right) + ',' + ((availableHeight / 2) + margin.top + margin.bottom) + ')');\n            } else {\n                wrap.attr('transform', 'translate(' + ((availableWidth / 2) + margin.left + margin.right) + ',' + ((availableHeight / 2) + margin.top + margin.bottom) + ')');\n            }\n\n            container.on('click', function (d, i) {\n                dispatch.chartClick({\n                    data: d,\n                    index: i,\n                    pos: d3.event,\n                    id: id\n                });\n            });\n\n            partition.value(modes[mode] || modes[\"count\"]);\n\n            //reverse the drawing order so that the labels of inner\n            //arcs are drawn on top of the outer arcs.\n            var nodes = partition.nodes(data[0]).reverse()\n\n            storeRetrievePrevPositions(nodes);\n            var cG = wrap.selectAll('.arc-container').data(nodes, key)\n\n            //handle new datapoints\n            var cGE = cG.enter()\n                .append(\"g\")\n                .attr(\"class\",'arc-container')\n\n            cGE.append(\"path\")\n                .attr(\"d\", arc)\n                .style(\"fill\", function (d) {\n                    if (d.color) {\n                        return d.color;\n                    }\n                    else if (groupColorByParent) {\n                        return color((d.children ? d : d.parent).name);\n                    }\n                    else {\n                        return color(d.name);\n                    }\n                })\n                .style(\"stroke\", \"#FFF\")\n                .on(\"click\", function(d,i){\n                    zoomClick(d);\n                    dispatch.elementClick({\n                        data: d,\n                        index: i\n                    })\n                })\n                .on('mouseover', function(d,i){\n                    d3.select(this).classed('hover', true).style('opacity', 0.8);\n                    dispatch.elementMouseover({\n                        data: d,\n                        color: d3.select(this).style(\"fill\"),\n                        percent: computeNodePercentage(d)\n                    });\n                })\n                .on('mouseout', function(d,i){\n                    d3.select(this).classed('hover', false).style('opacity', 1);\n                    dispatch.elementMouseout({\n                        data: d\n                    });\n                })\n                .on('mousemove', function(d,i){\n                    dispatch.elementMousemove({\n                        data: d\n                    });\n                });\n\n            ///Iterating via each and selecting based on the this\n            ///makes it work ... a cG.selectAll('path') doesn't.\n            ///Without iteration the data (in the element) didn't update.\n            cG.each(function(d){\n                d3.select(this).select('path')\n                    .transition()\n                    .duration(duration)\n                    .attrTween('d', arcTweenUpdate);\n            });\n\n            if(showLabels){\n                //remove labels first and add them back\n                cG.selectAll('text').remove();\n\n                //this way labels are on top of newly added arcs\n                cG.append('text')\n                    .text( function(e){ return labelFormat(e)})\n                    .transition()\n                    .duration(duration)\n                    .attr(\"opacity\", function(d){\n                        if(labelThresholdMatched(d)) {\n                            return 1;\n                        }\n                        else {\n                            return 0;\n                        }\n                    })\n                    .attr(\"transform\", function(d) {\n                        var width = this.getBBox().width;\n                        if(d.depth === 0){\n                            return \"rotate(0)translate(\" + (width / 2 * -1) + \",0)\";\n                        }\n                        else {\n                            var centerAngle = computeCenterAngle(d);\n                            var rotation = rotationToAvoidUpsideDown(d);\n                            if (rotation === 0) {\n                                return 'rotate('+ centerAngle +')translate(' + (y(d.y) + 5) + ',0)';\n                            }\n                            else {\n                                return 'rotate('+ centerAngle +')translate(' + (y(d.y) + width + 5) + ',0)rotate(' + rotation + ')';\n                            }\n                        }\n                    });\n            }\n\n            //zoom out to the center when the data is updated.\n            zoomClick(nodes[nodes.length - 1])\n\n\n            //remove unmatched elements ...\n            cG.exit()\n                .transition()\n                .duration(duration)\n                .attr('opacity',0)\n                .each('end',function(d){\n                    var k = key(d);\n                    prevPositions[k] = undefined;\n                })\n                .remove();\n        });\n\n\n        renderWatch.renderEnd('sunburst immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    chart.dispatch = dispatch;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        width:      {get: function(){return width;}, set: function(_){width=_;}},\n        height:     {get: function(){return height;}, set: function(_){height=_;}},\n        mode:       {get: function(){return mode;}, set: function(_){mode=_;}},\n        id:         {get: function(){return id;}, set: function(_){id=_;}},\n        duration:   {get: function(){return duration;}, set: function(_){duration=_;}},\n        groupColorByParent: {get: function(){return groupColorByParent;}, set: function(_){groupColorByParent=!!_;}},\n        showLabels: {get: function(){return showLabels;}, set: function(_){showLabels=!!_}},\n        labelFormat: {get: function(){return labelFormat;}, set: function(_){labelFormat=_}},\n        labelThreshold: {get: function(){return labelThreshold;}, set: function(_){labelThreshold=_}},\n        sort: {get: function(){return sort;}, set: function(_){sort=_}},\n        key: {get: function(){return key;}, set: function(_){key=_}},\n        // options that require extra logic in the setter\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    != undefined ? _.top    : margin.top;\n            margin.right  = _.right  != undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom != undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   != undefined ? _.left   : margin.left;\n        }},\n        color: {get: function(){return color;}, set: function(_){\n            color=nv.utils.getColor(_);\n        }}\n    });\n\n    nv.utils.initOptions(chart);\n    return chart;\n};\n","nv.models.sunburstChart = function() {\n    \"use strict\";\n\n    //============================================================\n    // Public Variables with Default Settings\n    //------------------------------------------------------------\n\n    var sunburst = nv.models.sunburst();\n    var tooltip = nv.models.tooltip();\n\n    var margin = {top: 30, right: 20, bottom: 20, left: 20}\n        , width = null\n        , height = null\n        , color = nv.utils.defaultColor()\n        , showTooltipPercent = false\n        , id = Math.round(Math.random() * 100000)\n        , defaultState = null\n        , noData = null\n        , duration = 250\n        , dispatch = d3.dispatch('stateChange', 'changeState','renderEnd');\n\n\n    //============================================================\n    // Private Variables\n    //------------------------------------------------------------\n\n    var renderWatch = nv.utils.renderWatch(dispatch);\n\n    tooltip\n        .duration(0)\n        .headerEnabled(false)\n        .valueFormatter(function(d){return d;});\n\n    //============================================================\n    // Chart function\n    //------------------------------------------------------------\n\n    function chart(selection) {\n        renderWatch.reset();\n        renderWatch.models(sunburst);\n\n        selection.each(function(data) {\n            var container = d3.select(this);\n\n            nv.utils.initSVG(container);\n\n            var availableWidth = nv.utils.availableWidth(width, container, margin);\n            var availableHeight = nv.utils.availableHeight(height, container, margin);\n\n            chart.update = function() {\n                if (duration === 0) {\n                    container.call(chart);\n                } else {\n                    container.transition().duration(duration).call(chart);\n                }\n            };\n            chart.container = container;\n\n            // Display No Data message if there's nothing to show.\n            if (!data || !data.length) {\n                nv.utils.noData(chart, container);\n                return chart;\n            } else {\n                container.selectAll('.nv-noData').remove();\n            }\n\n            sunburst.width(availableWidth).height(availableHeight).margin(margin);\n            container.call(sunburst);\n        });\n\n        renderWatch.renderEnd('sunburstChart immediate');\n        return chart;\n    }\n\n    //============================================================\n    // Event Handling/Dispatching (out of chart's scope)\n    //------------------------------------------------------------\n\n    sunburst.dispatch.on('elementMouseover.tooltip', function(evt) {\n        evt.series = {\n            key: evt.data.name,\n            value: (evt.data.value || evt.data.size),\n            color: evt.color,\n            percent: evt.percent\n        };\n        if (!showTooltipPercent) {\n            delete evt.percent;\n            delete evt.series.percent;\n        }\n        tooltip.data(evt).hidden(false);\n    });\n\n    sunburst.dispatch.on('elementMouseout.tooltip', function(evt) {\n        tooltip.hidden(true);\n    });\n\n    sunburst.dispatch.on('elementMousemove.tooltip', function(evt) {\n        tooltip();\n    });\n\n    //============================================================\n    // Expose Public Variables\n    //------------------------------------------------------------\n\n    // expose chart's sub-components\n    chart.dispatch = dispatch;\n    chart.sunburst = sunburst;\n    chart.tooltip = tooltip;\n    chart.options = nv.utils.optionsFunc.bind(chart);\n\n    // use Object get/set functionality to map between vars and chart functions\n    chart._options = Object.create({}, {\n        // simple options, just get/set the necessary values\n        noData:             {get: function(){return noData;},               set: function(_){noData=_;}},\n        defaultState:       {get: function(){return defaultState;},         set: function(_){defaultState=_;}},\n        showTooltipPercent: {get: function(){return showTooltipPercent;},   set: function(_){showTooltipPercent=_;}},\n\n        // options that require extra logic in the setter\n        color: {get: function(){return color;}, set: function(_){\n            color = _;\n            sunburst.color(color);\n        }},\n        duration: {get: function(){return duration;}, set: function(_){\n            duration = _;\n            renderWatch.reset(duration);\n            sunburst.duration(duration);\n        }},\n        margin: {get: function(){return margin;}, set: function(_){\n            margin.top    = _.top    !== undefined ? _.top    : margin.top;\n            margin.right  = _.right  !== undefined ? _.right  : margin.right;\n            margin.bottom = _.bottom !== undefined ? _.bottom : margin.bottom;\n            margin.left   = _.left   !== undefined ? _.left   : margin.left;\n            sunburst.margin(margin);\n        }}\n    });\n    nv.utils.inheritOptions(chart, sunburst);\n    nv.utils.initOptions(chart);\n    return chart;\n\n};\n"]}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
deleted file mode 100644
index 271ac74..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-    padding: 40px;
-    padding-top: 60px;
-}
-.starter-template {
-    padding: 40px 15px;
-    text-align: center;
-}
-
-
-.btn {
-    border: 0 none;
-    font-weight: 700;
-    letter-spacing: 1px;
-    text-transform: uppercase;
-}
-
-.btn:focus, .btn:active:focus, .btn.active:focus {
-    outline: 0 none;
-}
-
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td:hover {
-    background-color: #3276b1;
-}
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td {
-    background-color: #3276b1;
-}
-.tagPanel tr.selectedtag td {
-    background-color: #3276b1;
-}
-.top-buffer { margin-top:4px; }
-
-
-.sortorder:after {
-    content: '\25b2';   // BLACK UP-POINTING TRIANGLE
-}
-.sortorder.reverse:after {
-    content: '\25bc';   // BLACK DOWN-POINTING TRIANGLE
-}
-
-.wrap-table{
-    word-wrap: break-word;
-    table-layout: fixed;
-}
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
deleted file mode 100644
index a31078c..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
+++ /dev/null
@@ -1,385 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function() {
-  "use strict";
-
-  var isIgnoredJmxKeys = function(key) {
-    return key == 'name' || key == 'modelerType' || key == "$$hashKey" ||
-      key.match(/tag.*/);
-  };
-  angular.module('ozone', ['nvd3', 'ngRoute']);
-  angular.module('ozone').config(function($routeProvider) {
-    $routeProvider
-      .when("/", {
-        templateUrl: "main.html"
-      })
-      .when("/metrics/rpc", {
-        template: "<rpc-metrics></rpc-metrics>"
-      })
-      .when("/config", {
-        template: "<config></config>"
-      })
-  });
-  angular.module('ozone').component('overview', {
-    templateUrl: 'static/templates/overview.html',
-    transclude: true,
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=Hadoop:service=*,name=*,component=ServerRuntime")
-        .then(function(result) {
-          ctrl.jmx = result.data.beans[0]
-        })
-    }
-  });
-  angular.module('ozone').component('jvmParameters', {
-    templateUrl: 'static/templates/jvm.html',
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=java.lang:type=Runtime")
-        .then(function(result) {
-          ctrl.jmx = result.data.beans[0];
-
-          //convert array to a map
-          var systemProperties = {};
-          for (var idx in ctrl.jmx.SystemProperties) {
-            var item = ctrl.jmx.SystemProperties[idx];
-            systemProperties[item.key.replace(/\./g, "_")] = item.value;
-          }
-          ctrl.jmx.SystemProperties = systemProperties;
-        })
-    }
-  });
-
-  angular.module('ozone').component('rpcMetrics', {
-    template: '<h1>Rpc metrics</h1><tabs>' +
-      '<pane ng-repeat="metric in $ctrl.metrics" ' +
-      'title="{{metric[\'tag.serverName\']}} ({{metric[\'tag.port\']}})">' +
-      '<rpc-metric jmxdata="metric"></rpc-metric></pane>' +
-      '</tabs>',
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=Hadoop:service=*,name=RpcActivityForPort*")
-        .then(function(result) {
-          ctrl.metrics = result.data.beans;
-        })
-    }
-  });
-  angular.module('ozone').component('rpcMetric', {
-    bindings: {
-      jmxdata: '<'
-    },
-    templateUrl: 'static/templates/rpc-metrics.html',
-    controller: function() {
-      var ctrl = this;
-
-
-      ctrl.percentileGraphOptions = {
-        chart: {
-          type: 'discreteBarChart',
-          height: 450,
-          margin: {
-            top: 20,
-            right: 20,
-            bottom: 50,
-            left: 55
-          },
-          x: function(d) {
-            return d.label;
-          },
-          y: function(d) {
-            return d.value;
-          },
-          showValues: true,
-          valueFormat: function(d) {
-            return d3.format(',.1f')(d);
-          },
-          duration: 500,
-          xAxis: {
-            axisLabel: 'Percentage'
-          },
-          yAxis: {
-            axisLabel: 'Latency (ms)',
-            axisLabelDistance: -10
-          }
-        }
-      };
-
-      ctrl.$onChanges = function(data) {
-        var groupedMetrics = {}
-
-        var createPercentageMetrics = function(metricName, window) {
-          groupedMetrics.percentiles = groupedMetrics['percentiles'] || {}
-          groupedMetrics.percentiles[window] = groupedMetrics.percentiles[window] || {};
-          groupedMetrics.percentiles[window][metricName] = groupedMetrics.percentiles[window][metricName] || {
-            graphdata: [{
-              key: window,
-              values: []
-            }],
-            numOps: 0
-          };
-
-        };
-        var metrics = ctrl.jmxdata;
-        for (var key in metrics) {
-          var percentile = key.match(/(.*Time)(\d+s)(\d+th)PercentileLatency/);
-          var percentileNumOps = key.match(/(.*Time)(\d+s)NumOps/);
-          var successFailures = key.match(/(.*)(Success|Failures)/);
-          var numAverages = key.match(/(.*Time)(NumOps|AvgTime)/);
-          if (percentile) {
-            var metricName = percentile[1];
-            var window = percentile[2];
-            var percentage = percentile[3]
-            createPercentageMetrics(metricName, window);
-
-
-            groupedMetrics.percentiles[window][metricName].graphdata[0]
-              .values.push({
-                label: percentage,
-                value: metrics[key]
-              })
-          } else if (successFailures) {
-            var metricName = successFailures[1];
-            groupedMetrics.successfailures = groupedMetrics['successfailures'] || {}
-            groupedMetrics.successfailures[metricName] = groupedMetrics.successfailures[metricName] || {
-              success: 0,
-              failures: 0
-            };
-            if (successFailures[2] == 'Success') {
-              groupedMetrics.successfailures[metricName].success = metrics[key];
-            } else {
-              groupedMetrics.successfailures[metricName].failures = metrics[key];
-            }
-
-          } else if (numAverages) {
-            var metricName = numAverages[1];
-            groupedMetrics.numavgs = groupedMetrics['numavgs'] || {}
-            groupedMetrics.numavgs[metricName] = groupedMetrics.numavgs[metricName] || {
-              numOps: 0,
-              avgTime: 0
-            };
-            if (numAverages[2] == 'NumOps') {
-              groupedMetrics.numavgs[metricName].numOps = metrics[key];
-            } else {
-              groupedMetrics.numavgs[metricName].avgTime = metrics[key];
-            }
-
-          } else if (percentileNumOps) {
-            var metricName = percentileNumOps[1];
-            var window = percentileNumOps[2];
-            createPercentageMetrics(metricName, window);
-            groupedMetrics.percentiles[window][metricName].numOps = metrics[key];
-          } else if (isIgnoredJmxKeys(key)) {
-            //ignore
-          } else {
-            groupedMetrics.others = groupedMetrics.others || [];
-            groupedMetrics.others.push({
-              'key': key,
-              'value': metrics[key]
-            });
-          }
-
-        }
-        ctrl.metrics = groupedMetrics;
-      };
-
-    }
-  });
-  angular.module('ozone')
-    .component('tabs', {
-      transclude: true,
-      controller: function($scope) {
-        var ctrl = this;
-        var panes = this.panes = [];
-        this.select = function(pane) {
-          angular.forEach(panes, function(pane) {
-            pane.selected = false;
-          });
-          pane.selected = true;
-        };
-        this.addPane = function(pane) {
-          if (panes.length === 0) {
-            this.select(pane);
-          }
-          panes.push(pane);
-        };
-        this.click = function(pane) {
-          ctrl.select(pane);
-        }
-      },
-      template: '<div class="nav navtabs"><div class="row"><ul' +
-        ' class="nav nav-pills">' +
-        '<li ng-repeat="pane in $ctrl.panes" ng-class="{active:pane.selected}">' +
-        '<a href="" ng-click="$ctrl.click(pane)">{{pane.title}}</a> ' +
-        '</li> </ul></div><br/><div class="tab-content" ng-transclude></div> </div>'
-    })
-    .component('pane', {
-      transclude: true,
-      require: {
-        tabsCtrl: '^tabs'
-      },
-      bindings: {
-        title: '@'
-      },
-      controller: function() {
-        this.$onInit = function() {
-          this.tabsCtrl.addPane(this);
-        };
-      },
-      template: '<div class="tab-pane" ng-if="$ctrl.selected" ng-transclude></div>'
-    });
-
-  angular.module('ozone').component('navmenu', {
-    bindings: {
-      metrics: '<'
-    },
-    templateUrl: 'static/templates/menu.html',
-    controller: function($http) {
-      var ctrl = this;
-      ctrl.docs = false;
-      $http.head("docs/index.html")
-        .then(function(result) {
-          ctrl.docs = true;
-        }, function() {
-          ctrl.docs = false;
-        });
-    }
-  });
-
-  angular.module('ozone').component('config', {
-    templateUrl: 'static/templates/config.html',
-    controller: function($scope, $http) {
-      var ctrl = this;
-      ctrl.selectedTags = [];
-      ctrl.configArray = [];
-
-      $http.get("conf?cmd=getOzoneTags")
-        .then(function(response) {
-          ctrl.tags = response.data;
-          var excludedTags = ['CBLOCK', 'OM', 'SCM'];
-          for (var i = 0; i < excludedTags.length; i++) {
-            var idx = ctrl.tags.indexOf(excludedTags[i]);
-            // Remove CBLOCK related properties
-            if (idx > -1) {
-              ctrl.tags.splice(idx, 1);
-            }
-          }
-          ctrl.loadAll();
-        });
-
-      ctrl.convertToArray = function(srcObj) {
-        ctrl.keyTagMap = {};
-        for (var idx in srcObj) {
-          //console.log("Adding keys for "+idx)
-          for (var key in srcObj[idx]) {
-
-            if (ctrl.keyTagMap.hasOwnProperty(key)) {
-              ctrl.keyTagMap[key]['tag'].push(idx);
-            } else {
-              var newProp = {};
-              newProp['name'] = key;
-              newProp['value'] = srcObj[idx][key];
-              newProp['tag'] = [];
-              newProp['tag'].push(idx);
-              ctrl.keyTagMap[key] = newProp;
-            }
-          }
-        }
-      }
-
-      ctrl.loadAll = function() {
-        $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags)
-          .then(function(response) {
-
-            ctrl.convertToArray(response.data);
-            ctrl.configs = Object.values(ctrl.keyTagMap);
-            ctrl.component = 'All';
-            ctrl.sortBy('name');
-          });
-      };
-
-      ctrl.filterTags = function() {
-        if (!ctrl.selectedTags) {
-          return true;
-        }
-
-        if (ctrl.selectedTags.length < 1 && ctrl.component == 'All') {
-          return true;
-        }
-
-        ctrl.configs = ctrl.configs.filter(function(item) {
-
-          if (ctrl.component != 'All' && (item['tag'].indexOf(ctrl
-              .component) < 0)) {
-            return false;
-          }
-
-          if (ctrl.selectedTags.length < 1) {
-            return true;
-          }
-          for (var tag in item['tag']) {
-            tag = item['tag'][tag];
-            if (ctrl.selectedTags.indexOf(tag) > -1) {
-              return true;
-            }
-          }
-          return false;
-        });
-
-      };
-      ctrl.configFilter = function(config) {
-        return false;
-      };
-      ctrl.selected = function(tag) {
-        return ctrl.selectedTags.includes(tag);
-      };
-
-      ctrl.switchto = function(tag) {
-        ctrl.component = tag;
-        ctrl.reloadConfig();
-      };
-
-      ctrl.select = function(tag) {
-        var tagIdx = ctrl.selectedTags.indexOf(tag);
-        if (tagIdx > -1) {
-          ctrl.selectedTags.splice(tagIdx, 1);
-        } else {
-          ctrl.selectedTags.push(tag);
-        }
-        ctrl.reloadConfig();
-      };
-
-      ctrl.reloadConfig = function() {
-        ctrl.configs = [];
-        ctrl.configs = Object.values(ctrl.keyTagMap);
-        ctrl.filterTags();
-      };
-
-      ctrl.sortBy = function(field) {
-        ctrl.reverse = (ctrl.propertyName === field) ? !ctrl.reverse : false;
-        ctrl.propertyName = field;
-      };
-
-      ctrl.allSelected = function(comp) {
-        //console.log("Adding key for compo ->"+comp)
-        return ctrl.component == comp;
-      };
-
-    }
-  });
-
-})();
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
deleted file mode 100644
index b52f653..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
+++ /dev/null
@@ -1,91 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<div class="row top-buffer">
-  <div class="col-md-2">
-    <input type="text" class="form-control" placeholder="Search Properties"
-           name="search" ng-model="search.$">
-  </div>
-  <div class="col-md-10">
-    <div class="btn-group btn-group-justified">
-      <a class="btn"
-         ng-class="$ctrl.allSelected('All') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('All')">All
-      </a>
-      <a class="btn"
-         ng-class="$ctrl.allSelected('OM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('OM')">OM</a>
-      <a class="btn"
-         ng-class="$ctrl.allSelected('SCM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('SCM')">SCM</a>
-    </div>
-  </div>
-</div>
-<div class="row">
-  <div class="col-md-2">
-
-    <table class="table table-striped table-condensed tagPanel">
-      <colgroup>
-        <col class="col-md-12">
-      </colgroup>
-      <thead>
-      <tr>
-        <th>Tag</th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr ng-click="$ctrl.select(tag)"
-          ng-class="$ctrl.selected(tag) ? 'selectedtag':''"
-          ng-repeat="tag in $ctrl.tags">
-        <td>{{tag}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-  <div class="col-md-10">
-    <table class="table table-striped table-condensed table-hover wrap-table">
-      <thead>
-      <tr>
-        <th class="col-md-3" >
-          <a href="javascript:void(0)" ng-click="$ctrl.sortBy('name')">Property</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 'name'"
-                ng-class="{reverse: $ctrl.reverse}">
-              </span>
-        </th>
-        <th class="col-md-2" style="word-wrap: break-word;">
-          <a href="javascript:void(0)" ng-click="$ctrl.sortBy('value')">Value</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 'value'"
-                ng-class="{reverse: $ctrl.reverse}"></span>
-        </th>
-        <th class="col-md-7">
-          <a href="javascript:void(0)" ng-click="$ctrl.sortBy('description')">Description</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 'description'"
-                ng-class="{reverse: reverse}"></span>
-        </th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr
-          ng-repeat="config in $ctrl.configs | filter:search | orderBy:$ctrl.propertyName:$ctrl.reverse">
-        <td style="word-wrap: break-word;">{{config.name}}</td>
-        <td style="word-wrap: break-word;">{{config.value}}</td>
-        <td style="word-wrap: break-word;">{{config.description}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-</div>
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
deleted file mode 100644
index c1f7d16..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<table class="table table-bordered table-striped">
-    <tr>
-        <th>JVM:</th>
-        <td>{{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}}</td>
-    </tr>
-    <tr>
-        <th>Input arguments:</th>
-        <td>{{$ctrl.jmx.InputArguments}}</td>
-    </tr>
-</table>
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html
deleted file mode 100644
index 95f1b484..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div id="navbar" class="collapse navbar-collapse">
-    <ul class="nav navbar-nav" id="ui-tabs">
-        <li>
-            <a class="dropdown-toggle"
-               id="metricsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true">
-                Metrics
-                <span class="caret"></span>
-            </a>
-            <ul
-                class="dropdown-menu"
-                aria-labelledby="metricsMenu">
-                <li ng-repeat="(name, url) in $ctrl.metrics">
-                    <a ng-href="{{url}}">{{name}}<span
-                        aria-hidden="true"></span></a></li>
-            </ul>
-        </li>
-        <li><a href="#!/config">Configuration</a></li>
-        <li ng-show="$ctrl.docs"><a href="/docs">Documentation</a></li>
-        <li>
-            <a class="dropdown-toggle"
-               id="toolsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true"
-               >
-                Common tools
-                <span class="caret"></span>
-            </a>
-            <ul class="dropdown-menu" aria-labelledby="toolsMenu">
-                <li><a href="jmx">JMX <span
-                        aria-hidden="true"></span></a></li>
-                <li><a href="conf">Config <span
-                        aria-hidden="true"></a></li>
-                <li><a href="stacks">Stacks <span
-                        aria-hidden="true"></a></li>
-                <li><a href="logLevel">Log levels <span
-                        aria-hidden="true"></a></li>
-            </ul>
-        </li>
-    </ul>
-</div><!--/.nav-collapse -->
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
deleted file mode 100644
index 30e2d26..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
+++ /dev/null
@@ -1,39 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>Overview</h1>
-<table class="table table-bordered table-striped">
-    <tbody>
-    <tr>
-        <th>Started:</th>
-        <td>{{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}</td>
-    </tr>
-    <tr>
-        <th>Version:</th>
-        <td>{{$ctrl.jmx.Version}}</td>
-    </tr>
-    <tr>
-        <th>Compiled:</th>
-        <td>{{$ctrl.jmx.CompileInfo}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>JVM parameters</h2>
-
-<jvm-parameters></jvm-parameters>
-
-<div ng-transclude></div>
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
deleted file mode 100644
index facb152..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
+++ /dev/null
@@ -1,87 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div ng-hide="$ctrl.metrics.percentiles" class="alert alert-info">
-    Please set <b>rpc.metrics.quantile.enable</b> to <b>true</b> and define the
-    intervals in seconds with setting <b>rpc.metrics.percentiles.intervals</b>
-    (eg. set to <b>60,300</b>) in your hdfs-site.xml
-    to display Hadoop RPC related graphs.
-</div>
-<div ng-repeat="(window,windowed) in $ctrl.metrics.percentiles">
-    <h2>{{window}} window</h2>
-    <p>Quantiles based on a fixed {{window}} window. Calculated once at every
-        {{window}}</p>
-
-    <div class="row">
-        <div class="col-md-6 col-lg-4"
-             ng-repeat="(metric,percentiles) in windowed">
-            <h3>{{metric}}</h3>
-            <p>{{percentiles.numOps}} sample</p>
-            <nvd3 options="$ctrl.percentileGraphOptions"
-                  data="percentiles.graphdata"></nvd3>
-        </div>
-    </div>
-
-</div>
-<div class="row">
-    <div ng-show="$ctrl.metrics.numavgs" class="col-md-6">
-        <h2>Number of ops / Averages</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Number of ops</th>
-                <th>Average time (ms)</th>
-            </tr>
-            </thead>
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.numavgs">
-                <td>{{key}}</td>
-                <td>{{metric.numOps | number}}</td>
-                <td>{{metric.avgTime | number:2}}</td>
-            </tr>
-        </table>
-    </div>
-    <div ng-show="$ctrl.metrics.successfailures" class="col-md-6">
-        <h2>Success / Failures</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Success</th>
-                <th>Failures</th>
-            </tr>
-            </thead>
-
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.successfailures">
-                <td>{{key}}</td>
-                <td>{{metric.success}}</td>
-                <td>{{metric.failures}}</td>
-            </tr>
-        </table>
-    </div>
-</div>
-<div ng-show="$ctrl.metrics.others">
-    <h2>Other JMX Metrics</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
deleted file mode 100644
index c6eae0e..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test Common ozone/hdds web methods.
- */
-public class TestBaseHttpServer {
-  @Test
-  public void getBindAddress() throws Exception {
-    Configuration conf = new Configuration();
-    conf.set("enabled", "false");
-
-    BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") {
-      @Override
-      protected String getHttpAddressKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpsAddressKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpBindHostKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpsBindHostKey() {
-        return null;
-      }
-
-      @Override
-      protected String getBindHostDefault() {
-        return null;
-      }
-
-      @Override
-      protected int getHttpBindPortDefault() {
-        return 0;
-      }
-
-      @Override
-      protected int getHttpsBindPortDefault() {
-        return 0;
-      }
-
-      @Override
-      protected String getKeytabFile() {
-        return null;
-      }
-
-      @Override
-      protected String getSpnegoPrincipal() {
-        return null;
-      }
-
-      @Override
-      protected String getEnabledKey() {
-        return "enabled";
-      }
-    };
-
-    conf.set("addresskey", "0.0.0.0:1234");
-
-    Assert.assertEquals("/0.0.0.0:1234", baseHttpServer
-        .getBindAddress("bindhostkey", "addresskey",
-            "default", 65).toString());
-
-    conf.set("bindhostkey", "1.2.3.4");
-
-    Assert.assertEquals("/1.2.3.4:1234", baseHttpServer
-        .getBindAddress("bindhostkey", "addresskey",
-            "default", 65).toString());
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java
deleted file mode 100644
index 1c4adf6..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.server.ProfileServlet.Event;
-import org.apache.hadoop.hdds.server.ProfileServlet.Output;
-
-import org.junit.Test;
-
-/**
- * Test prometheus Sink.
- */
-public class TestProfileServlet {
-
-  @Test
-  public void testNameValidation() throws IOException {
-    ProfileServlet.validateFileName(
-        ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC));
-
-    ProfileServlet.validateFileName(
-        ProfileServlet.generateFileName(23, Output.COLLAPSED,
-            Event.L1_DCACHE_LOAD_MISSES));
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void testNameValidationWithNewLine() throws IOException {
-    ProfileServlet.validateFileName(
-        "test\n" + ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC));
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void testNameValidationWithSlash() throws IOException {
-    ProfileServlet.validateFileName(
-        "../" + ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC));
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java
deleted file mode 100644
index f2683b5..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test prometheus Sink.
- */
-public class TestPrometheusMetricsSink {
-
-  @Test
-  public void testPublish() throws IOException {
-    //GIVEN
-    MetricsSystem metrics = DefaultMetricsSystem.instance();
-
-    metrics.init("test");
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-    metrics.register("Prometheus", "Prometheus", sink);
-    TestMetrics testMetrics = metrics
-        .register("TestMetrics", "Testing metrics", new TestMetrics());
-
-    metrics.start();
-    testMetrics.numBucketCreateFails.incr();
-    metrics.publishMetricsNow();
-    ByteArrayOutputStream stream = new ByteArrayOutputStream();
-    OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8);
-
-    //WHEN
-    sink.writeMetrics(writer);
-    writer.flush();
-
-    //THEN
-    String writtenMetrics = stream.toString(UTF_8.name());
-    Assert.assertTrue(
-        "The expected metric line is missing from prometheus metrics output",
-        writtenMetrics.contains(
-            "test_metrics_num_bucket_create_fails{context=\"dfs\"")
-    );
-
-    metrics.stop();
-    metrics.shutdown();
-  }
-
-  @Test
-  public void testPublishWithSameName() throws IOException {
-    //GIVEN
-    MetricsSystem metrics = DefaultMetricsSystem.instance();
-
-    metrics.init("test");
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-    metrics.register("Prometheus", "Prometheus", sink);
-    metrics.register("FooBar", "fooBar", (MetricsSource) (collector, all) -> {
-      collector.addRecord("RpcMetrics").add(new MetricsTag(PORT_INFO, "1234"))
-          .addGauge(COUNTER_INFO, 123).endRecord();
-
-      collector.addRecord("RpcMetrics").add(new MetricsTag(
-          PORT_INFO, "2345")).addGauge(COUNTER_INFO, 234).endRecord();
-    });
-
-    metrics.start();
-    metrics.publishMetricsNow();
-
-    ByteArrayOutputStream stream = new ByteArrayOutputStream();
-    OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8);
-
-    //WHEN
-    sink.writeMetrics(writer);
-    writer.flush();
-
-    //THEN
-    String writtenMetrics = stream.toString(UTF_8.name());
-    Assert.assertTrue(
-        "The expected metric line is missing from prometheus metrics output",
-        writtenMetrics.contains(
-            "rpc_metrics_counter{port=\"2345\""));
-
-    Assert.assertTrue(
-        "The expected metric line is missing from prometheus metrics "
-            + "output",
-        writtenMetrics.contains(
-            "rpc_metrics_counter{port=\"1234\""));
-
-    metrics.stop();
-    metrics.shutdown();
-  }
-
-  @Test
-  public void testNamingCamelCase() {
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-
-    Assert.assertEquals("rpc_time_some_metrics",
-        sink.prometheusName("RpcTime", "SomeMetrics"));
-
-    Assert.assertEquals("om_rpc_time_om_info_keys",
-        sink.prometheusName("OMRpcTime", "OMInfoKeys"));
-
-    Assert.assertEquals("rpc_time_small",
-        sink.prometheusName("RpcTime", "small"));
-  }
-
-  @Test
-  public void testNamingRocksDB() {
-    //RocksDB metrics are handled differently.
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-    Assert.assertEquals("rocksdb_om.db_num_open_connections",
-        sink.prometheusName("Rocksdb_om.db", "num_open_connections"));
-  }
-
-  @Test
-  public void testNamingPipeline() {
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-
-    String recordName = "SCMPipelineMetrics";
-    String metricName = "NumBlocksAllocated-"
-        + "RATIS-THREE-47659e3d-40c9-43b3-9792-4982fc279aba";
-    Assert.assertEquals(
-        "scm_pipeline_metrics_"
-            + "num_blocks_allocated_"
-            + "ratis_three_47659e3d_40c9_43b3_9792_4982fc279aba",
-        sink.prometheusName(recordName, metricName));
-  }
-
-  @Test
-  public void testNamingSpaces() {
-    PrometheusMetricsSink sink = new PrometheusMetricsSink();
-
-    String recordName = "JvmMetrics";
-    String metricName = "GcTimeMillisG1 Young Generation";
-    Assert.assertEquals(
-        "jvm_metrics_gc_time_millis_g1_young_generation",
-        sink.prometheusName(recordName, metricName));
-  }
-
-  /**
-   * Example metric pojo.
-   */
-  @Metrics(about = "Test Metrics", context = "dfs")
-  private static class TestMetrics {
-
-    @Metric
-    private MutableCounterLong numBucketCreateFails;
-  }
-
-  public static final MetricsInfo PORT_INFO = new MetricsInfo() {
-    @Override
-    public String name() {
-      return "PORT";
-    }
-
-    @Override
-    public String description() {
-      return "port";
-    }
-  };
-
-  public static final MetricsInfo COUNTER_INFO = new MetricsInfo() {
-    @Override
-    public String name() {
-      return "COUNTER";
-    }
-
-    @Override
-    public String description() {
-      return "counter";
-    }
-  };
-
-}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
deleted file mode 100644
index 9735d2c..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.test.PathUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Unit tests for {@link ServerUtils}.
- */
-public class TestServerUtils {
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  /**
-   * Test {@link ServerUtils#getScmDbDir}.
-   */
-  @Test
-  public void testGetScmDbDir() {
-    final File testDir = PathUtils.getTestDir(TestServerUtils.class);
-    final File dbDir = new File(testDir, "scmDbDir");
-    final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-
-    try {
-      assertFalse(metaDir.exists());
-      assertFalse(dbDir.exists());
-      assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
-      assertTrue(dbDir.exists());
-      assertFalse(metaDir.exists());
-    } finally {
-      FileUtils.deleteQuietly(dbDir);
-    }
-  }
-
-  /**
-   * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
-   * when OZONE_SCM_DB_DIRS is undefined.
-   */
-  @Test
-  public void testGetScmDbDirWithFallback() {
-    final File testDir = PathUtils.getTestDir(TestServerUtils.class);
-    final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-    try {
-      assertFalse(metaDir.exists());
-      assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
-      assertTrue(metaDir.exists());
-    } finally {
-      FileUtils.deleteQuietly(metaDir);
-    }
-  }
-
-  @Test
-  public void testNoScmDbDirConfigured() {
-    thrown.expect(IllegalArgumentException.class);
-    ServerUtils.getScmDbDir(new OzoneConfiguration());
-  }
-
-  @Test
-  public void ozoneMetadataDirIsMandatory() {
-    thrown.expect(IllegalArgumentException.class);
-    ServerUtils.getOzoneMetaDirPath(new OzoneConfiguration());
-  }
-
-  @Test
-  public void ozoneMetadataDirAcceptsSingleItem() {
-    final File testDir = PathUtils.getTestDir(TestServerUtils.class);
-    final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-
-    try {
-      assertFalse(metaDir.exists());
-      assertEquals(metaDir, ServerUtils.getOzoneMetaDirPath(conf));
-      assertTrue(metaDir.exists());
-    } finally {
-      FileUtils.deleteQuietly(metaDir);
-    }
-  }
-
-  @Test
-  public void ozoneMetadataDirRejectsList() {
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, "/data/meta1,/data/meta2");
-    thrown.expect(IllegalArgumentException.class);
-
-    ServerUtils.getOzoneMetaDirPath(conf);
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
deleted file mode 100644
index 3f34a70..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Dummy class for testing to collect all the received events.
- */
-public class EventHandlerStub<PAYLOAD> implements EventHandler<PAYLOAD> {
-
-  private List<PAYLOAD> receivedEvents = new ArrayList<>();
-
-  @Override
-  public void onMessage(PAYLOAD payload, EventPublisher publisher) {
-    receivedEvents.add(payload);
-  }
-
-  public List<PAYLOAD> getReceivedEvents() {
-    return receivedEvents;
-  }
-}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
deleted file mode 100644
index 0c1200f..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-/**
- * Testing the basic functionality of the event queue.
- */
-public class TestEventQueue {
-
-  private static final Event<Long> EVENT1 =
-      new TypedEvent<>(Long.class, "SCM_EVENT1");
-  private static final Event<Long> EVENT2 =
-      new TypedEvent<>(Long.class, "SCM_EVENT2");
-
-  private static final Event<Long> EVENT3 =
-      new TypedEvent<>(Long.class, "SCM_EVENT3");
-  private static final Event<Long> EVENT4 =
-      new TypedEvent<>(Long.class, "SCM_EVENT4");
-
-  private EventQueue queue;
-
-  @Before
-  public void startEventQueue() {
-    DefaultMetricsSystem.initialize(getClass().getSimpleName());
-    queue = new EventQueue();
-  }
-
-  @After
-  public void stopEventQueue() {
-    DefaultMetricsSystem.shutdown();
-    queue.close();
-  }
-
-  @Test
-  public void simpleEvent() {
-
-    final long[] result = new long[2];
-
-    queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload);
-
-    queue.fireEvent(EVENT1, 11L);
-    queue.processAll(1000);
-    Assert.assertEquals(11, result[0]);
-
-  }
-
-  @Test
-  public void multipleSubscriber() {
-    final long[] result = new long[2];
-    queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload);
-
-    queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload);
-
-    queue.fireEvent(EVENT2, 23L);
-    queue.processAll(1000);
-    Assert.assertEquals(23, result[0]);
-    Assert.assertEquals(23, result[1]);
-
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
deleted file mode 100644
index bb05ef4..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.junit.Test;
-
-/**
- * More realistic event test with sending event from one listener.
- */
-public class TestEventQueueChain {
-
-  private static final Event<FailedNode> DECOMMISSION =
-      new TypedEvent<>(FailedNode.class);
-
-  private static final Event<FailedNode> DECOMMISSION_START =
-      new TypedEvent<>(FailedNode.class);
-
-  @Test
-  public void simpleEvent() {
-    EventQueue queue = new EventQueue();
-
-    queue.addHandler(DECOMMISSION, new PipelineManager());
-    queue.addHandler(DECOMMISSION_START, new NodeWatcher());
-
-    queue.fireEvent(DECOMMISSION, new FailedNode("node1"));
-
-    queue.processAll(5000);
-  }
-
-
-  static class FailedNode {
-    private final String nodeId;
-
-    FailedNode(String nodeId) {
-      this.nodeId = nodeId;
-    }
-
-    String getNodeId() {
-      return nodeId;
-    }
-  }
-
-  private static class PipelineManager implements EventHandler<FailedNode> {
-
-    @Override
-    public void onMessage(FailedNode message, EventPublisher publisher) {
-
-      System.out.println(
-          "Closing pipelines for all pipelines including node: " + message
-              .getNodeId());
-
-      publisher.fireEvent(DECOMMISSION_START, message);
-    }
-
-  }
-
-  private static class NodeWatcher implements EventHandler<FailedNode> {
-
-    @Override
-    public void onMessage(FailedNode message, EventPublisher publisher) {
-      System.out.println("Clear timer");
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
deleted file mode 100644
index dcbcdb0..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Objects;
-
-/**
- * Test the basic functionality of event watcher.
- */
-public class TestEventWatcher {
-
-  private static final TypedEvent<UnderreplicatedEvent> WATCH_UNDER_REPLICATED =
-      new TypedEvent<>(UnderreplicatedEvent.class);
-
-  private static final TypedEvent<UnderreplicatedEvent> UNDER_REPLICATED =
-      new TypedEvent<>(UnderreplicatedEvent.class);
-
-  private static final TypedEvent<ReplicationCompletedEvent>
-      REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
-
-  private LeaseManager<Long> leaseManager;
-
-  @Before
-  public void startLeaseManager() {
-    DefaultMetricsSystem.instance();
-    leaseManager = new LeaseManager<>("Test", 2000L);
-    leaseManager.start();
-  }
-
-  @After
-  public void stopLeaseManager() {
-    leaseManager.shutdown();
-    DefaultMetricsSystem.shutdown();
-  }
-
-  @Test
-  public void testEventHandling() throws InterruptedException {
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(id1, "C1"));
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(id2, "C2"));
-
-    Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
-
-    Thread.sleep(1000);
-
-    queue.fireEvent(REPLICATION_COMPLETED,
-        new ReplicationCompletedEvent(id1, "C2", "D1"));
-
-    Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
-
-    Thread.sleep(1500);
-
-    queue.processAll(1000L);
-
-    Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size());
-    Assert.assertEquals(id2,
-        underReplicatedEvents.getReceivedEvents().get(0).id);
-
-  }
-
-  @Test
-  public void testInprogressFilter() throws InterruptedException {
-
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"));
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"));
-
-    queue.processAll(1000L);
-    Thread.sleep(1000L);
-    List<UnderreplicatedEvent> c1todo = replicationWatcher
-        .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
-
-    Assert.assertEquals(2, c1todo.size());
-    Assert.assertTrue(replicationWatcher.contains(event1));
-    Thread.sleep(1500L);
-
-    c1todo = replicationWatcher
-        .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
-    Assert.assertEquals(0, c1todo.size());
-    Assert.assertFalse(replicationWatcher.contains(event1));
-
-  }
-
-  @Test
-  public void testMetrics() throws InterruptedException {
-
-    DefaultMetricsSystem.initialize("test");
-
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    //send 3 event to track 3 in-progress activity
-    UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    UnderreplicatedEvent event2 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2");
-
-    UnderreplicatedEvent event3 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event2);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event3);
-
-    //1st event is completed, don't need to track any more
-    ReplicationCompletedEvent event1Completed =
-        new ReplicationCompletedEvent(event1.id, "C1", "D1");
-
-    queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
-
-    //lease manager timeout = 2000L
-    Thread.sleep(3 * 2000L);
-
-    queue.processAll(2000L);
-
-    //until now: 3 in-progress activities are tracked with three
-    // UnderreplicatedEvents. The first one is completed, the remaining two
-    // are timed out (as the timeout -- defined in the lease manager -- is
-    // 2000ms).
-
-    EventWatcherMetrics metrics = replicationWatcher.getMetrics();
-
-    //3 events are received
-    Assert.assertEquals(3, metrics.getTrackedEvents().value());
-
-    //completed + timed out = all messages
-    Assert.assertEquals(
-        "number of timed out and completed messages should be the same as the"
-            + " all messages",
-        metrics.getTrackedEvents().value(),
-        metrics.getCompletedEvents().value() + metrics.getTimedOutEvents()
-            .value());
-
-    //_at least_ two are timed out.
-    Assert.assertTrue("At least two events should be timed out.",
-        metrics.getTimedOutEvents().value() >= 2);
-
-    DefaultMetricsSystem.shutdown();
-  }
-
-  private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-      createEventWatcher() {
-    return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
-        REPLICATION_COMPLETED, leaseManager);
-  }
-
-  private static class CommandWatcherExample
-      extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
-
-    CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
-        Event<ReplicationCompletedEvent> completionEvent,
-        LeaseManager<Long> leaseManager) {
-      super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
-    }
-
-    @Override
-    protected void onTimeout(EventPublisher publisher,
-        UnderreplicatedEvent payload) {
-      publisher.fireEvent(UNDER_REPLICATED, payload);
-    }
-
-    @Override
-    protected void onFinished(EventPublisher publisher,
-        UnderreplicatedEvent payload) {
-      //Good job. We did it.
-    }
-
-    @Override
-    public EventWatcherMetrics getMetrics() {
-      return super.getMetrics();
-    }
-  }
-
-  private static class ReplicationCompletedEvent
-      implements IdentifiableEventPayload {
-
-    private final long id;
-
-    private final String containerId;
-
-    private final String datanodeId;
-
-    ReplicationCompletedEvent(long id, String containerId,
-        String datanodeId) {
-      this.id = id;
-      this.containerId = containerId;
-      this.datanodeId = datanodeId;
-    }
-
-    @Override
-    public long getId() {
-      return id;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-      ReplicationCompletedEvent that = (ReplicationCompletedEvent) o;
-      return Objects.equals(containerId, that.containerId) && Objects
-          .equals(datanodeId, that.datanodeId);
-    }
-
-    @Override
-    public int hashCode() {
-
-      return Objects.hash(containerId, datanodeId);
-    }
-  }
-
-  private static class UnderreplicatedEvent
-
-      implements IdentifiableEventPayload {
-
-    private final long id;
-
-    private final String containerId;
-
-    UnderreplicatedEvent(long id, String containerId) {
-      this.containerId = containerId;
-      this.id = id;
-    }
-
-    @Override
-    public long getId() {
-      return id;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
deleted file mode 100644
index 720dd6f..0000000
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Tests for Event Watcher.
- */
-package org.apache.hadoop.hdds.server.events;
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/resources/ozone-site.xml b/hadoop-hdds/framework/src/test/resources/ozone-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-hdds/framework/src/test/resources/ozone-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
deleted file mode 100644
index a174337..0000000
--- a/hadoop-hdds/pom.xml
+++ /dev/null
@@ -1,406 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-main-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-    <relativePath>../pom.ozone.xml</relativePath>
-  </parent>
-
-  <artifactId>hadoop-hdds</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Project</description>
-  <name>Apache Hadoop HDDS</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>client</module>
-    <module>common</module>
-    <module>framework</module>
-    <module>container-service</module>
-    <module>server-scm</module>
-    <module>tools</module>
-    <module>docs</module>
-    <module>config</module>
-  </modules>
-
-  <properties>
-    <!-- version for hdds/ozone components -->
-    <hdds.version>0.5.0-SNAPSHOT</hdds.version>
-
-    <!-- Apache Ratis version -->
-    <ratis.version>0.5.0-201fc85-SNAPSHOT</ratis.version>
-
-    <bouncycastle.version>1.60</bouncycastle.version>
-
-    <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
-    <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
-
-    <maven-surefire-plugin.version>3.0.0-M1</maven-surefire-plugin.version>
-
-    <junit.jupiter.version>5.3.1</junit.jupiter.version>
-  </properties>
-  <repositories>
-    <repository>
-      <id>apache.snapshots.https</id>
-      <url>https://repository.apache.org/content/repositories/snapshots</url>
-    </repository>
-  </repositories>
-  <pluginRepositories>
-    <pluginRepository>
-      <id>apache.snapshots.https</id>
-      <url>https://repository.apache.org/content/repositories/snapshots</url>
-    </pluginRepository>
-  </pluginRepositories>
-  <dependencyManagement>
-    <dependencies>
-
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-common</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-client</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-tools</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-framework</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-docs</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-config</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <type>test-jar</type>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-core</artifactId>
-        <version>1.19</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-generator-annprocess</artifactId>
-        <version>1.19</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.ratis</groupId>
-        <artifactId>ratis-proto-shaded</artifactId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-common</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-client</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-server</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-netty</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-grpc</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.bouncycastle</groupId>
-        <artifactId>bcprov-jdk15on</artifactId>
-        <version>${bouncycastle.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.bouncycastle</groupId>
-        <artifactId>bcpkix-jdk15on</artifactId>
-        <version>${bouncycastle.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.junit.jupiter</groupId>
-        <artifactId>junit-jupiter-api</artifactId>
-        <version>${junit.jupiter.version}</version>
-        <scope>test</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.squareup.okhttp</groupId>
-          <artifactId>okhttp</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>info.picocli</groupId>
-      <artifactId>picocli</artifactId>
-      <version>3.9.6</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-enforcer-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>depcheck</id>
-            <configuration>
-              <rules>
-                <DependencyConvergence>
-                  <uniqueVersions>false</uniqueVersions>
-                </DependencyConvergence>
-              </rules>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/hs_err*.log</exclude>
-            <exclude>**/target/**</exclude>
-            <exclude>.gitattributes</exclude>
-            <exclude>.idea/**</exclude>
-            <exclude>src/main/resources/webapps/static/angular-1.6.4.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/angular-route-1.6.4.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/d3-3.5.17.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/jquery-3.4.1.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/bootstrap-3.4.1/**</exclude>
-            <exclude>src/test/resources/additionalfields.container</exclude>
-            <exclude>src/test/resources/incorrect.checksum.container</exclude>
-            <exclude>src/test/resources/incorrect.container</exclude>
-            <exclude>src/test/resources/test.db.ini</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-classpath-descriptor</id>
-            <phase>package</phase>
-            <goals>
-              <goal>build-classpath</goal>
-            </goals>
-            <configuration>
-              <outputFile>${project.build.directory}/classpath</outputFile>
-              <prefix>$HDDS_LIB_JARS_DIR</prefix>
-              <outputFilterFile>true</outputFilterFile>
-              <includeScope>runtime</includeScope>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>attach-classpath-artifact</id>
-            <phase>package</phase>
-            <goals>
-              <goal>attach-artifact</goal>
-            </goals>
-            <configuration>
-              <artifacts>
-                <artifact>
-                  <file>${project.build.directory}/classpath</file>
-                  <type>cp</type>
-                  <classifier>classpath</classifier>
-                </artifact>
-              </artifacts>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>parallel-tests</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-maven-plugins</artifactId>
-            <executions>
-              <execution>
-                <id>parallel-tests-createdir</id>
-                <goals>
-                  <goal>parallel-tests-createdir</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-surefire-plugin</artifactId>
-            <configuration>
-              <forkCount>${testsThreadCount}</forkCount>
-              <reuseForks>false</reuseForks>
-              <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
-              <systemPropertyVariables>
-                <testsThreadCount>${testsThreadCount}</testsThreadCount>
-                <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
-                <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
-                <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
-
-                <!-- This is intentionally the same directory for all JUnit -->
-                <!-- forks, for use in the very rare situation that -->
-                <!-- concurrent tests need to coordinate, such as using lock -->
-                <!-- files. -->
-                <test.build.shared.data>${test.build.data}</test.build.shared.data>
-
-                <!-- Due to a Maven quirk, setting this to just -->
-                <!-- surefire.forkNumber won't do the parameter substitution. -->
-                <!-- Putting a prefix in front of it like "fork-" makes it -->
-                <!-- work. -->
-                <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
-              </systemPropertyVariables>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
deleted file mode 100644
index 68a5cd8..0000000
--- a/hadoop-hdds/server-scm/pom.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-server-scm</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Storage Container Manager Server</description>
-  <name>Apache Hadoop HDDS SCM Server</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-docs</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.hamcrest</groupId>
-      <artifactId>hamcrest-core</artifactId>
-      <version>1.3</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.assertj</groupId>
-      <artifactId>assertj-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.hamcrest</groupId>
-      <artifactId>hamcrest-all</artifactId>
-      <version>1.3</version>
-    </dependency>
-    <dependency>
-      <groupId>org.bouncycastle</groupId>
-      <artifactId>bcprov-jdk15on</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-common-html</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>unpack</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-server-framework</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}
-                  </outputDirectory>
-                  <includes>webapps/static/**/*.*</includes>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-docs</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}/webapps/scm
-                  </outputDirectory>
-                  <includes>docs/**/*.*</includes>
-                </artifactItem>
-              </artifactItems>
-              <overWriteSnapshots>true</overWriteSnapshots>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-    <testResources>
-      <testResource>
-        <directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
-      </testResource>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-      </testResource>
-    </testResources>
-  </build>
-</project>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
deleted file mode 100644
index 426341a..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.safemode.Precheck;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * SCM utility class.
- */
-public final class ScmUtils {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(ScmUtils.class);
-
-  private ScmUtils() {
-  }
-
-  /**
-   * Perform all prechecks for given scm operation.
-   *
-   * @param operation
-   * @param preChecks prechecks to be performed
-   */
-  public static void preCheck(ScmOps operation, Precheck... preChecks)
-      throws SCMException {
-    for (Precheck preCheck : preChecks) {
-      preCheck.check(operation);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
deleted file mode 100644
index 0bdbeb8..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- *
- *  Block APIs.
- *  Container is transparent to these APIs.
- */
-public interface BlockManager extends Closeable {
-  /**
-   * Allocates a new block for a given size.
-   * @param size - Block Size
-   * @param type Replication Type
-   * @param factor - Replication Factor
-   * @param excludeList List of datanodes/containers to exclude during block
-   *                    allocation.
-   * @return AllocatedBlock
-   * @throws IOException
-   */
-  AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, String owner,
-      ExcludeList excludeList) throws IOException;
-
-  /**
-   * Deletes a list of blocks in an atomic operation. Internally, SCM
-   * writes these blocks into a {@link DeletedBlockLog} and deletes them
-   * from SCM DB. If this is successful, given blocks are entering pending
-   * deletion state and becomes invisible from SCM namespace.
-   *
-   * @param blockIDs block IDs. This is often the list of blocks of
-   *                 a particular object key.
-   * @throws IOException if exception happens, non of the blocks is deleted.
-   */
-  void deleteBlocks(List<BlockID> blockIDs) throws IOException;
-
-  /**
-   * @return the block deletion transaction log maintained by SCM.
-   */
-  DeletedBlockLog getDeletedBlockLog();
-
-  /**
-   * Start block manager background services.
-   * @throws IOException
-   */
-  void start() throws IOException;
-
-  /**
-   * Shutdown block manager background services.
-   * @throws IOException
-   */
-  void stop() throws IOException;
-
-  /**
-   * @return the block deleting service executed in SCM.
-   */
-  SCMBlockDeletingService getSCMBlockDeletingService();
-
-  /**
-   * Set SafeMode status.
-   *
-   * @param safeModeStatus
-   */
-  void setSafeModeStatus(boolean safeModeStatus);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
deleted file mode 100644
index 4c182c3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ /dev/null
@@ -1,362 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.TimeUnit;
-import javax.management.ObjectName;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.hdds.utils.UniqueId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .INVALID_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-
-
-/** Block Manager manages the block access for SCM. */
-public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BlockManagerImpl.class);
-  // TODO : FIX ME : Hard coding the owner.
-  // Currently only user of the block service is Ozone, CBlock manages blocks
-  // by itself and does not rely on the Block service offered by SCM.
-
-  private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
-
-  private final long containerSize;
-
-  private final DeletedBlockLog deletedBlockLog;
-  private final SCMBlockDeletingService blockDeletingService;
-
-  private ObjectName mxBean;
-  private SafeModePrecheck safeModePrecheck;
-
-  /**
-   * Constructor.
-   *
-   * @param conf - configuration.
-   * @param scm
-   * @throws IOException
-   */
-  public BlockManagerImpl(final Configuration conf,
-                          final StorageContainerManager scm) {
-    Objects.requireNonNull(scm, "SCM cannot be null");
-    this.pipelineManager = scm.getPipelineManager();
-    this.containerManager = scm.getContainerManager();
-
-    this.containerSize = (long)conf.getStorageSize(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-
-    mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
-
-    // SCM block deleting transaction log and deleting service.
-    deletedBlockLog = new DeletedBlockLogImpl(conf, scm.getContainerManager(),
-        scm.getScmMetadataStore());
-    long svcInterval =
-        conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-            OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    long serviceTimeout =
-        conf.getTimeDuration(
-            OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-            OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    blockDeletingService =
-        new SCMBlockDeletingService(deletedBlockLog, containerManager,
-            scm.getScmNodeManager(), scm.getEventQueue(), svcInterval,
-            serviceTimeout, conf);
-    safeModePrecheck = new SafeModePrecheck(conf);
-  }
-
-  /**
-   * Start block manager services.
-   *
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    this.blockDeletingService.start();
-  }
-
-  /**
-   * Shutdown block manager services.
-   *
-   * @throws IOException
-   */
-  public void stop() throws IOException {
-    this.blockDeletingService.shutdown();
-    this.close();
-  }
-
-  /**
-   * Allocates a block in a container and returns that info.
-   *
-   * @param size - Block Size
-   * @param type Replication Type
-   * @param factor - Replication Factor
-   * @param excludeList List of datanodes/containers to exclude during block
-   *                    allocation.
-   * @return Allocated block
-   * @throws IOException on failure.
-   */
-  @Override
-  public AllocatedBlock allocateBlock(final long size, ReplicationType type,
-      ReplicationFactor factor, String owner, ExcludeList excludeList)
-      throws IOException {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor);
-    }
-    ScmUtils.preCheck(ScmOps.allocateBlock, safeModePrecheck);
-    if (size < 0 || size > containerSize) {
-      LOG.warn("Invalid block size requested : {}", size);
-      throw new SCMException("Unsupported block size: " + size,
-          INVALID_BLOCK_SIZE);
-    }
-
-    /*
-      Here is the high level logic.
-
-      1. We try to find pipelines in open state.
-
-      2. If there are no pipelines in OPEN state, then we try to create one.
-
-      3. We allocate a block from the available containers in the selected
-      pipeline.
-
-      TODO : #CLUTIL Support random picking of two containers from the list.
-      So we can use different kind of policies.
-    */
-
-    ContainerInfo containerInfo;
-
-    while (true) {
-      List<Pipeline> availablePipelines =
-          pipelineManager
-              .getPipelines(type, factor, Pipeline.PipelineState.OPEN,
-                  excludeList.getDatanodes(), excludeList.getPipelineIds());
-      Pipeline pipeline = null;
-      if (availablePipelines.size() == 0) {
-        try {
-          // TODO: #CLUTIL Remove creation logic when all replication types and
-          // factors are handled by pipeline creator
-          pipeline = pipelineManager.createPipeline(type, factor);
-        } catch (IOException e) {
-          LOG.warn("Pipeline creation failed for type:{} factor:{}. Retrying " +
-                  "get pipelines call once.", type, factor, e);
-          availablePipelines = pipelineManager
-              .getPipelines(type, factor, Pipeline.PipelineState.OPEN,
-                  excludeList.getDatanodes(), excludeList.getPipelineIds());
-          if (availablePipelines.size() == 0) {
-            LOG.info("Could not find available pipeline of type:{} and " +
-                "factor:{} even after retrying", type, factor);
-            break;
-          }
-        }
-      }
-
-      if (null == pipeline) {
-        // TODO: #CLUTIL Make the selection policy driven.
-        pipeline = availablePipelines
-            .get((int) (Math.random() * availablePipelines.size()));
-      }
-
-      // look for OPEN containers that match the criteria.
-      containerInfo = containerManager.getMatchingContainer(size, owner,
-          pipeline, excludeList.getContainerIds());
-
-      if (containerInfo != null) {
-        return newBlock(containerInfo);
-      }
-    }
-
-    // we have tried all strategies we know and but somehow we are not able
-    // to get a container for this block. Log that info and return a null.
-    LOG.error(
-        "Unable to allocate a block for the size: {}, type: {}, factor: {}",
-        size, type, factor);
-    return null;
-  }
-
-  /**
-   * newBlock - returns a new block assigned to a container.
-   *
-   * @param containerInfo - Container Info.
-   * @return AllocatedBlock
-   */
-  private AllocatedBlock newBlock(ContainerInfo containerInfo) {
-    try {
-      final Pipeline pipeline = pipelineManager
-          .getPipeline(containerInfo.getPipelineID());
-      // TODO : Revisit this local ID allocation when HA is added.
-      long localID = UniqueId.next();
-      long containerID = containerInfo.getContainerID();
-      AllocatedBlock.Builder abb =  new AllocatedBlock.Builder()
-          .setContainerBlockID(new ContainerBlockID(containerID, localID))
-          .setPipeline(pipeline);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("New block allocated : {} Container ID: {}", localID,
-            containerID);
-      }
-      pipelineManager.incNumBlocksAllocatedMetric(pipeline.getId());
-      return abb.build();
-    } catch (PipelineNotFoundException ex) {
-      LOG.error("Pipeline Machine count is zero.", ex);
-      return null;
-    }
-  }
-
-  /**
-   * Deletes a list of blocks in an atomic operation. Internally, SCM writes
-   * these blocks into a
-   * {@link DeletedBlockLog} and deletes them from SCM DB. If this is
-   * successful, given blocks are
-   * entering pending deletion state and becomes invisible from SCM namespace.
-   *
-   * @param blockIDs block IDs. This is often the list of blocks of a
-   * particular object key.
-   * @throws IOException if exception happens, non of the blocks is deleted.
-   */
-  @Override
-  public void deleteBlocks(List<BlockID> blockIDs) throws IOException {
-    ScmUtils.preCheck(ScmOps.deleteBlock, safeModePrecheck);
-
-    LOG.info("Deleting blocks {}", StringUtils.join(",", blockIDs));
-    Map<Long, List<Long>> containerBlocks = new HashMap<>();
-    // TODO: track the block size info so that we can reclaim the container
-    // TODO: used space when the block is deleted.
-    for (BlockID block : blockIDs) {
-      // Merge blocks to a container to blocks mapping,
-      // prepare to persist this info to the deletedBlocksLog.
-      long containerID = block.getContainerID();
-      if (containerBlocks.containsKey(containerID)) {
-        containerBlocks.get(containerID).add(block.getLocalID());
-      } else {
-        List<Long> item = new ArrayList<>();
-        item.add(block.getLocalID());
-        containerBlocks.put(containerID, item);
-      }
-    }
-
-    try {
-      deletedBlockLog.addTransactions(containerBlocks);
-    } catch (IOException e) {
-      throw new IOException(
-          "Skip writing the deleted blocks info to"
-              + " the delLog because addTransaction fails. Batch skipped: "
-              + StringUtils.join(",", blockIDs), e);
-    }
-    // TODO: Container report handling of the deleted blocks:
-    // Remove tombstone and update open container usage.
-    // We will revisit this when the closed container replication is done.
-  }
-
-  @Override
-  public DeletedBlockLog getDeletedBlockLog() {
-    return this.deletedBlockLog;
-  }
-
-  /**
-   * Close the resources for BlockManager.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (deletedBlockLog != null) {
-      deletedBlockLog.close();
-    }
-    blockDeletingService.shutdown();
-    if (mxBean != null) {
-      MBeans.unregister(mxBean);
-      mxBean = null;
-    }
-  }
-
-  @Override
-  public int getOpenContainersNo() {
-    return 0;
-    // TODO : FIX ME : The open container being a single number does not make
-    // sense.
-    // We have to get open containers by Replication Type and Replication
-    // factor. Hence returning 0 for now.
-    // containers.get(HddsProtos.LifeCycleState.OPEN).size();
-  }
-
-  @Override
-  public SCMBlockDeletingService getSCMBlockDeletingService() {
-    return this.blockDeletingService;
-  }
-
-  @Override
-  public void setSafeModeStatus(boolean safeModeStatus) {
-    this.safeModePrecheck.setInSafeMode(safeModeStatus);
-  }
-
-  /**
-   * Returns status of scm safe mode determined by SAFE_MODE_STATUS event.
-   * */
-  public boolean isScmInSafeMode() {
-    return this.safeModePrecheck.isInSafeMode();
-  }
-
-  /**
-   * Get class logger.
-   * */
-  public static Logger getLogger() {
-    return LOG;
-  }
-
-  /**
-   * This class uses system current time milliseconds to generate unique id.
-   */
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java
deleted file mode 100644
index 23c6983..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-
-
-/**
- * JMX interface for the block manager.
- */
-public interface BlockmanagerMXBean {
-
-  /**
-   * Number of open containers manager by the block manager.
-   */
-  int getOpenContainersNo();
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
deleted file mode 100644
index ce65a70..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import com.google.common.collect.ArrayListMultimap;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-
-/**
- * A wrapper class to hold info about datanode and all deleted block
- * transactions that will be sent to this datanode.
- */
-public class DatanodeDeletedBlockTransactions {
-  private int nodeNum;
-  // The throttle size for each datanode.
-  private int maximumAllowedTXNum;
-  // Current counter of inserted TX.
-  private int currentTXNum;
-  private ContainerManager containerManager;
-  // A list of TXs mapped to a certain datanode ID.
-  private final ArrayListMultimap<UUID, DeletedBlocksTransaction>
-      transactions;
-
-  DatanodeDeletedBlockTransactions(ContainerManager containerManager,
-      int maximumAllowedTXNum, int nodeNum) {
-    this.transactions = ArrayListMultimap.create();
-    this.containerManager = containerManager;
-    this.maximumAllowedTXNum = maximumAllowedTXNum;
-    this.nodeNum = nodeNum;
-  }
-
-  public boolean addTransaction(DeletedBlocksTransaction tx,
-      Set<UUID> dnsWithTransactionCommitted) {
-    try {
-      boolean success = false;
-      final ContainerID id = ContainerID.valueof(tx.getContainerID());
-      final ContainerInfo container = containerManager.getContainer(id);
-      final Set<ContainerReplica> replicas = containerManager
-          .getContainerReplicas(id);
-      if (!container.isOpen()) {
-        for (ContainerReplica replica : replicas) {
-          UUID dnID = replica.getDatanodeDetails().getUuid();
-          if (dnsWithTransactionCommitted == null ||
-              !dnsWithTransactionCommitted.contains(dnID)) {
-            // Transaction need not be sent to dns which have
-            // already committed it
-            success = addTransactionToDN(dnID, tx);
-          }
-        }
-      }
-      return success;
-    } catch (IOException e) {
-      SCMBlockDeletingService.LOG.warn("Got container info error.", e);
-      return false;
-    }
-  }
-
-  private boolean addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) {
-    if (transactions.containsKey(dnID)) {
-      List<DeletedBlocksTransaction> txs = transactions.get(dnID);
-      if (txs != null && txs.size() < maximumAllowedTXNum) {
-        boolean hasContained = false;
-        for (DeletedBlocksTransaction t : txs) {
-          if (t.getContainerID() == tx.getContainerID()) {
-            hasContained = true;
-            break;
-          }
-        }
-
-        if (!hasContained) {
-          txs.add(tx);
-          currentTXNum++;
-          return true;
-        }
-      }
-    } else {
-      currentTXNum++;
-      transactions.put(dnID, tx);
-      return true;
-    }
-    SCMBlockDeletingService.LOG
-        .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID());
-    return false;
-  }
-
-  Set<UUID> getDatanodeIDs() {
-    return transactions.keySet();
-  }
-
-  boolean isEmpty() {
-    return transactions.isEmpty();
-  }
-
-  boolean hasTransactions(UUID dnId) {
-    return transactions.containsKey(dnId) &&
-        !transactions.get(dnId).isEmpty();
-  }
-
-  List<DeletedBlocksTransaction> getDatanodeTransactions(UUID dnId) {
-    return transactions.get(dnId);
-  }
-
-  List<String> getTransactionIDList(UUID dnId) {
-    if (hasTransactions(dnId)) {
-      return transactions.get(dnId).stream()
-          .map(DeletedBlocksTransaction::getTxID).map(String::valueOf)
-          .collect(Collectors.toList());
-    } else {
-      return Collections.emptyList();
-    }
-  }
-
-  boolean isFull() {
-    return currentTXNum >= maximumAllowedTXNum * nodeNum;
-  }
-
-  int getTXNum() {
-    return currentTXNum;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
deleted file mode 100644
index db6c1c5..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-/**
- * The DeletedBlockLog is a persisted log in SCM to keep tracking
- * container blocks which are under deletion. It maintains info
- * about under-deletion container blocks that notified by OM,
- * and the state how it is processed.
- */
-public interface DeletedBlockLog extends Closeable {
-
-  /**
-   * Scan entire log once and returns TXs to DatanodeDeletedBlockTransactions.
-   * Once DatanodeDeletedBlockTransactions is full, the scan behavior will
-   * stop.
-   * @param transactions a list of TXs will be set into.
-   * @return Mapping from containerId to latest transactionId for the container.
-   * @throws IOException
-   */
-  Map<Long, Long> getTransactions(DatanodeDeletedBlockTransactions transactions)
-      throws IOException;
-
-  /**
-   * Return all failed transactions in the log. A transaction is considered
-   * to be failed if it has been sent more than MAX_RETRY limit and its
-   * count is reset to -1.
-   *
-   * @return a list of failed deleted block transactions.
-   * @throws IOException
-   */
-  List<DeletedBlocksTransaction> getFailedTransactions()
-      throws IOException;
-
-  /**
-   * Increments count for given list of transactions by 1.
-   * The log maintains a valid range of counts for each transaction
-   * [0, MAX_RETRY]. If exceed this range, resets it to -1 to indicate
-   * the transaction is no longer valid.
-   *
-   * @param txIDs - transaction ID.
-   */
-  void incrementCount(List<Long> txIDs)
-      throws IOException;
-
-  /**
-   * Commits a transaction means to delete all footprints of a transaction
-   * from the log. This method doesn't guarantee all transactions can be
-   * successfully deleted, it tolerate failures and tries best efforts to.
-   *  @param transactionResults - delete block transaction results.
-   * @param dnID - ID of datanode which acknowledges the delete block command.
-   */
-  void commitTransactions(List<DeleteBlockTransactionResult> transactionResults,
-      UUID dnID);
-
-  /**
-   * Creates a block deletion transaction and adds that into the log.
-   *
-   * @param containerID - container ID.
-   * @param blocks - blocks that belong to the same container.
-   *
-   * @throws IOException
-   */
-  void addTransaction(long containerID, List<Long> blocks)
-      throws IOException;
-
-  /**
-   * Creates block deletion transactions for a set of containers,
-   * add into the log and persist them atomically. An object key
-   * might be stored in multiple containers and multiple blocks,
-   * this API ensures that these updates are done in atomic manner
-   * so if any of them fails, the entire operation fails without
-   * any updates to the log. Note, this doesn't mean to create only
-   * one transaction, it creates multiple transactions (depends on the
-   * number of containers) together (on success) or non (on failure).
-   *
-   * @param containerBlocksMap a map of containerBlocks.
-   * @throws IOException
-   */
-  void addTransactions(Map<Long, List<Long>> containerBlocksMap)
-      throws IOException;
-
-  /**
-   * Returns the total number of valid transactions. A transaction is
-   * considered to be valid as long as its count is in range [0, MAX_RETRY].
-   *
-   * @return number of a valid transactions.
-   * @throws IOException
-   */
-  int getNumOfValidTransactions() throws IOException;
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
deleted file mode 100644
index 7c920ba..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import com.google.common.collect.Lists;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.stream.Collectors;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.scm.command
-    .CommandStatusReportHandler.DeleteBlockStatus;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.eclipse.jetty.util.ConcurrentHashSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.lang.Math.min;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
-
-/**
- * A implement class of {@link DeletedBlockLog}, and it uses
- * K/V db to maintain block deletion transactions between scm and datanode.
- * This is a very basic implementation, it simply scans the log and
- * memorize the position that scanned by last time, and uses this to
- * determine where the next scan starts. It has no notion about weight
- * of each transaction so as long as transaction is still valid, they get
- * equally same chance to be retrieved which only depends on the nature
- * order of the transaction ID.
- */
-public class DeletedBlockLogImpl
-    implements DeletedBlockLog, EventHandler<DeleteBlockStatus> {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(DeletedBlockLogImpl.class);
-
-  private final int maxRetry;
-  private final ContainerManager containerManager;
-  private final SCMMetadataStore scmMetadataStore;
-  private final Lock lock;
-  // Maps txId to set of DNs which are successful in committing the transaction
-  private Map<Long, Set<UUID>> transactionToDNsCommitMap;
-
-  public DeletedBlockLogImpl(Configuration conf,
-                             ContainerManager containerManager,
-                             SCMMetadataStore scmMetadataStore) {
-    maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
-        OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
-    this.containerManager = containerManager;
-    this.scmMetadataStore = scmMetadataStore;
-    this.lock = new ReentrantLock();
-
-    // transactionToDNsCommitMap is updated only when
-    // transaction is added to the log and when it is removed.
-
-    // maps transaction to dns which have committed it.
-    transactionToDNsCommitMap = new ConcurrentHashMap<>();
-  }
-
-
-  @Override
-  public List<DeletedBlocksTransaction> getFailedTransactions()
-      throws IOException {
-    lock.lock();
-    try {
-      final List<DeletedBlocksTransaction> failedTXs = Lists.newArrayList();
-      try (TableIterator<Long,
-          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
-               scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
-        while (iter.hasNext()) {
-          DeletedBlocksTransaction delTX = iter.next().getValue();
-          if (delTX.getCount() == -1) {
-            failedTXs.add(delTX);
-          }
-        }
-      }
-      return failedTXs;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @param txIDs - transaction ID.
-   * @throws IOException
-   */
-  @Override
-  public void incrementCount(List<Long> txIDs) throws IOException {
-    for (Long txID : txIDs) {
-      lock.lock();
-      try {
-        DeletedBlocksTransaction block =
-            scmMetadataStore.getDeletedBlocksTXTable().get(txID);
-        if (block == null) {
-          // Should we make this an error ? How can we not find the deleted
-          // TXID?
-          LOG.warn("Deleted TXID not found.");
-          continue;
-        }
-        DeletedBlocksTransaction.Builder builder = block.toBuilder();
-        int currentCount = block.getCount();
-        if (currentCount > -1) {
-          builder.setCount(++currentCount);
-        }
-        // if the retry time exceeds the maxRetry value
-        // then set the retry value to -1, stop retrying, admins can
-        // analyze those blocks and purge them manually by SCMCli.
-        if (currentCount > maxRetry) {
-          builder.setCount(-1);
-        }
-        scmMetadataStore.getDeletedBlocksTXTable().put(txID,
-            builder.build());
-      } catch (IOException ex) {
-        LOG.warn("Cannot increase count for txID " + txID, ex);
-        // We do not throw error here, since we don't want to abort the loop.
-        // Just log and continue processing the rest of txids.
-      } finally {
-        lock.unlock();
-      }
-    }
-  }
-
-
-  private DeletedBlocksTransaction constructNewTransaction(long txID,
-                                                           long containerID,
-                                                           List<Long> blocks) {
-    return DeletedBlocksTransaction.newBuilder()
-        .setTxID(txID)
-        .setContainerID(containerID)
-        .addAllLocalID(blocks)
-        .setCount(0)
-        .build();
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @param transactionResults - transaction IDs.
-   * @param dnID               - Id of Datanode which has acknowledged
-   *                           a delete block command.
-   * @throws IOException
-   */
-  @Override
-  public void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
-    lock.lock();
-    try {
-      Set<UUID> dnsWithCommittedTxn;
-      for (DeleteBlockTransactionResult transactionResult :
-          transactionResults) {
-        if (isTransactionFailed(transactionResult)) {
-          continue;
-        }
-        try {
-          long txID = transactionResult.getTxID();
-          // set of dns which have successfully committed transaction txId.
-          dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
-          final ContainerID containerId = ContainerID.valueof(
-              transactionResult.getContainerID());
-          if (dnsWithCommittedTxn == null) {
-            LOG.warn("Transaction txId={} commit by dnId={} for containerID={} "
-                    + "failed. Corresponding entry not found.", txID, dnID,
-                containerId);
-            return;
-          }
-
-          dnsWithCommittedTxn.add(dnID);
-          final ContainerInfo container =
-              containerManager.getContainer(containerId);
-          final Set<ContainerReplica> replicas =
-              containerManager.getContainerReplicas(containerId);
-          // The delete entry can be safely removed from the log if all the
-          // corresponding nodes commit the txn. It is required to check that
-          // the nodes returned in the pipeline match the replication factor.
-          if (min(replicas.size(), dnsWithCommittedTxn.size())
-              >= container.getReplicationFactor().getNumber()) {
-            List<UUID> containerDns = replicas.stream()
-                .map(ContainerReplica::getDatanodeDetails)
-                .map(DatanodeDetails::getUuid)
-                .collect(Collectors.toList());
-            if (dnsWithCommittedTxn.containsAll(containerDns)) {
-              transactionToDNsCommitMap.remove(txID);
-              LOG.debug("Purging txId={} from block deletion log", txID);
-              scmMetadataStore.getDeletedBlocksTXTable().delete(txID);
-            }
-          }
-          LOG.debug("Datanode txId={} containerId={} committed by dnId={}",
-              txID, containerId, dnID);
-        } catch (IOException e) {
-          LOG.warn("Could not commit delete block transaction: " +
-              transactionResult.getTxID(), e);
-        }
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  private boolean isTransactionFailed(DeleteBlockTransactionResult result) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "Got block deletion ACK from datanode, TXIDs={}, " + "success={}",
-          result.getTxID(), result.getSuccess());
-    }
-    if (!result.getSuccess()) {
-      LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
-          + "TX in next interval", result.getTxID());
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @param containerID - container ID.
-   * @param blocks      - blocks that belong to the same container.
-   * @throws IOException
-   */
-  @Override
-  public void addTransaction(long containerID, List<Long> blocks)
-      throws IOException {
-    lock.lock();
-    try {
-      Long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
-      DeletedBlocksTransaction tx =
-          constructNewTransaction(nextTXID, containerID, blocks);
-      scmMetadataStore.getDeletedBlocksTXTable().put(nextTXID, tx);
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public int getNumOfValidTransactions() throws IOException {
-    lock.lock();
-    try {
-      final AtomicInteger num = new AtomicInteger(0);
-      try (TableIterator<Long,
-          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
-               scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
-        while (iter.hasNext()) {
-          DeletedBlocksTransaction delTX = iter.next().getValue();
-          if (delTX.getCount() > -1) {
-            num.incrementAndGet();
-          }
-        }
-      }
-      return num.get();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @param containerBlocksMap a map of containerBlocks.
-   * @throws IOException
-   */
-  @Override
-  public void addTransactions(Map<Long, List<Long>> containerBlocksMap)
-      throws IOException {
-    lock.lock();
-    try {
-      BatchOperation batch = scmMetadataStore.getStore().initBatchOperation();
-      for (Map.Entry<Long, List<Long>> entry : containerBlocksMap.entrySet()) {
-        long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
-        DeletedBlocksTransaction tx = constructNewTransaction(nextTXID,
-            entry.getKey(), entry.getValue());
-        scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch,
-            nextTXID, tx);
-      }
-      scmMetadataStore.getStore().commitBatchOperation(batch);
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public Map<Long, Long> getTransactions(
-      DatanodeDeletedBlockTransactions transactions) throws IOException {
-    lock.lock();
-    try {
-      Map<Long, Long> deleteTransactionMap = new HashMap<>();
-      try (TableIterator<Long,
-          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
-               scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
-        while (iter.hasNext()) {
-          Table.KeyValue<Long, DeletedBlocksTransaction> keyValue =
-              iter.next();
-          DeletedBlocksTransaction block = keyValue.getValue();
-          if (block.getCount() > -1 && block.getCount() <= maxRetry) {
-            if (transactions.addTransaction(block,
-                transactionToDNsCommitMap.get(block.getTxID()))) {
-              deleteTransactionMap.put(block.getContainerID(),
-                  block.getTxID());
-              transactionToDNsCommitMap
-                  .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
-            }
-          }
-        }
-      }
-      return deleteTransactionMap;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public void onMessage(DeleteBlockStatus deleteBlockStatus,
-                        EventPublisher publisher) {
-    ContainerBlocksDeletionACKProto ackProto =
-        deleteBlockStatus.getCmdStatus().getBlockDeletionAck();
-    commitTransactions(ackProto.getResultsList(),
-        UUID.fromString(ackProto.getDnId()));
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java
deleted file mode 100644
index 4090f6b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-/**
- * Event handler for PedingDeleteStatuList events.
- */
-public class PendingDeleteHandler implements
-    EventHandler<PendingDeleteStatusList> {
-
-  private SCMBlockDeletingService scmBlockDeletingService;
-
-  public PendingDeleteHandler(
-      SCMBlockDeletingService scmBlockDeletingService) {
-    this.scmBlockDeletingService = scmBlockDeletingService;
-  }
-
-  @Override
-  public void onMessage(PendingDeleteStatusList pendingDeleteStatusList,
-      EventPublisher publisher) {
-    scmBlockDeletingService.handlePendingDeletes(pendingDeleteStatusList);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
deleted file mode 100644
index ee64c48..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Pending Deletes in the block space.
- */
-public class PendingDeleteStatusList {
-
-  private List<PendingDeleteStatus> pendingDeleteStatuses;
-  private DatanodeDetails datanodeDetails;
-
-  public PendingDeleteStatusList(DatanodeDetails datanodeDetails) {
-    this.datanodeDetails = datanodeDetails;
-    pendingDeleteStatuses = new ArrayList<>();
-  }
-
-  public void addPendingDeleteStatus(long dnDeleteTransactionId,
-      long scmDeleteTransactionId, long containerId) {
-    pendingDeleteStatuses.add(
-        new PendingDeleteStatus(dnDeleteTransactionId, scmDeleteTransactionId,
-            containerId));
-  }
-
-  /**
-   * Status of pending deletes.
-   */
-  public static class PendingDeleteStatus {
-    private long dnDeleteTransactionId;
-    private long scmDeleteTransactionId;
-    private long containerId;
-
-    public PendingDeleteStatus(long dnDeleteTransactionId,
-        long scmDeleteTransactionId, long containerId) {
-      this.dnDeleteTransactionId = dnDeleteTransactionId;
-      this.scmDeleteTransactionId = scmDeleteTransactionId;
-      this.containerId = containerId;
-    }
-
-    public long getDnDeleteTransactionId() {
-      return dnDeleteTransactionId;
-    }
-
-    public long getScmDeleteTransactionId() {
-      return scmDeleteTransactionId;
-    }
-
-    public long getContainerId() {
-      return containerId;
-    }
-
-  }
-
-  public List<PendingDeleteStatus> getPendingDeleteStatuses() {
-    return pendingDeleteStatuses;
-  }
-
-  public int getNumPendingDeletes() {
-    return pendingDeleteStatuses.size();
-  }
-
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
deleted file mode 100644
index 74db22d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.BackgroundTask;
-import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
-
-/**
- * A background service running in SCM to delete blocks. This service scans
- * block deletion log in certain interval and caches block deletion commands
- * in {@link org.apache.hadoop.hdds.scm.node.CommandQueue}, asynchronously
- * SCM HB thread polls cached commands and sends them to datanode for physical
- * processing.
- */
-public class SCMBlockDeletingService extends BackgroundService {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(SCMBlockDeletingService.class);
-
-  // ThreadPoolSize=2, 1 for scheduler and the other for the scanner.
-  private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 2;
-  private final DeletedBlockLog deletedBlockLog;
-  private final ContainerManager containerManager;
-  private final NodeManager nodeManager;
-  private final EventPublisher eventPublisher;
-
-  // Block delete limit size is dynamically calculated based on container
-  // delete limit size (ozone.block.deleting.container.limit.per.interval)
-  // that configured for datanode. To ensure DN not wait for
-  // delete commands, we use this value multiply by a factor 2 as the final
-  // limit TX size for each node.
-  // Currently we implement a throttle algorithm that throttling delete blocks
-  // for each datanode. Each node is limited by the calculation size. Firstly
-  // current node info is fetched from nodemanager, then scan entire delLog
-  // from the beginning to end. If one node reaches maximum value, its records
-  // will be skipped. If not, keep scanning until it reaches maximum value.
-  // Once all node are full, the scan behavior will stop.
-  private int blockDeleteLimitSize;
-
-  public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
-      ContainerManager containerManager, NodeManager nodeManager,
-      EventPublisher eventPublisher, long interval, long serviceTimeout,
-      Configuration conf) {
-    super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
-        BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
-    this.deletedBlockLog = deletedBlockLog;
-    this.containerManager = containerManager;
-    this.nodeManager = nodeManager;
-    this.eventPublisher = eventPublisher;
-
-    int containerLimit = conf.getInt(
-        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
-        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
-    Preconditions.checkArgument(containerLimit > 0,
-        "Container limit size should be " + "positive.");
-    // Use container limit value multiply by a factor 2 to ensure DN
-    // not wait for orders.
-    this.blockDeleteLimitSize = containerLimit * 2;
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new DeletedBlockTransactionScanner());
-    return queue;
-  }
-
-  public void handlePendingDeletes(PendingDeleteStatusList deletionStatusList) {
-    DatanodeDetails dnDetails = deletionStatusList.getDatanodeDetails();
-    for (PendingDeleteStatusList.PendingDeleteStatus deletionStatus :
-        deletionStatusList.getPendingDeleteStatuses()) {
-      LOG.info(
-          "Block deletion txnID mismatch in datanode {} for containerID {}."
-              + " Datanode delete txnID: {}, SCM txnID: {}",
-          dnDetails.getUuid(), deletionStatus.getContainerId(),
-          deletionStatus.getDnDeleteTransactionId(),
-          deletionStatus.getScmDeleteTransactionId());
-    }
-  }
-
-  private class DeletedBlockTransactionScanner
-      implements BackgroundTask<EmptyTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 1;
-    }
-
-    @Override
-    public EmptyTaskResult call() throws Exception {
-      int dnTxCount = 0;
-      long startTime = Time.monotonicNow();
-      // Scan SCM DB in HB interval and collect a throttled list of
-      // to delete blocks.
-      LOG.debug("Running DeletedBlockTransactionScanner");
-      DatanodeDeletedBlockTransactions transactions = null;
-      List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
-      Map<Long, Long> transactionMap = null;
-      if (datanodes != null) {
-        transactions = new DatanodeDeletedBlockTransactions(containerManager,
-            blockDeleteLimitSize, datanodes.size());
-        try {
-          transactionMap = deletedBlockLog.getTransactions(transactions);
-        } catch (IOException e) {
-          // We may tolerant a number of failures for sometime
-          // but if it continues to fail, at some point we need to raise
-          // an exception and probably fail the SCM ? At present, it simply
-          // continues to retry the scanning.
-          LOG.error("Failed to get block deletion transactions from delTX log",
-              e);
-        }
-        LOG.debug("Scanned deleted blocks log and got {} delTX to process.",
-            transactions.getTXNum());
-      }
-
-      if (transactions != null && !transactions.isEmpty()) {
-        for (UUID dnId : transactions.getDatanodeIDs()) {
-          List<DeletedBlocksTransaction> dnTXs = transactions
-              .getDatanodeTransactions(dnId);
-          if (dnTXs != null && !dnTXs.isEmpty()) {
-            dnTxCount += dnTXs.size();
-            // TODO commandQueue needs a cap.
-            // We should stop caching new commands if num of un-processed
-            // command is bigger than a limit, e.g 50. In case datanode goes
-            // offline for sometime, the cached commands be flooded.
-            eventPublisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND,
-                new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs)));
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(
-                  "Added delete block command for datanode {} in the queue," +
-                      " number of delete block transactions: {}, TxID list: {}",
-                  dnId, dnTXs.size(), String.join(",",
-                      transactions.getTransactionIDList(dnId)));
-            }
-          }
-        }
-        containerManager.updateDeleteTransactionId(transactionMap);
-      }
-
-      if (dnTxCount > 0) {
-        LOG.info(
-            "Totally added {} delete blocks command for"
-                + " {} datanodes, task elapsed time: {}ms",
-            dnTxCount, transactions.getDatanodeIDs().size(),
-            Time.monotonicNow() - startTime);
-      }
-
-      return EmptyTaskResult.newResult();
-    }
-  }
-
-  @VisibleForTesting
-  public void setBlockDeleteTXNum(int numTXs) {
-    blockDeleteLimitSize = numTXs;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java
deleted file mode 100644
index e1bfdff..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-/**
- * This package contains routines to manage the block location and
- * mapping inside SCM
- */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
deleted file mode 100644
index e909865..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.command;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .CommandStatusReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * Handles CommandStatusReports from datanode.
- */
-public class CommandStatusReportHandler implements
-    EventHandler<CommandStatusReportFromDatanode> {
-
-  private static final Logger LOGGER = LoggerFactory
-      .getLogger(CommandStatusReportHandler.class);
-
-  @Override
-  public void onMessage(CommandStatusReportFromDatanode report,
-      EventPublisher publisher) {
-    Preconditions.checkNotNull(report);
-    List<CommandStatus> cmdStatusList = report.getReport().getCmdStatusList();
-    Preconditions.checkNotNull(cmdStatusList);
-    if (LOGGER.isTraceEnabled()) {
-      LOGGER.trace("Processing command status report for dn: {}", report
-          .getDatanodeDetails());
-    }
-
-    // Route command status to its watchers.
-    cmdStatusList.forEach(cmdStatus -> {
-      if (LOGGER.isTraceEnabled()) {
-        LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus
-            .getCmdId(), cmdStatus.getType());
-      }
-      if (cmdStatus.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
-        if (cmdStatus.getStatus() == CommandStatus.Status.EXECUTED) {
-          publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS,
-              new DeleteBlockStatus(cmdStatus));
-        }
-      } else {
-        LOGGER.debug("CommandStatus of type:{} not handled in " +
-            "CommandStatusReportHandler.", cmdStatus.getType());
-      }
-    });
-  }
-
-  /**
-   * Wrapper event for CommandStatus.
-   */
-  public static class CommandStatusEvent implements IdentifiableEventPayload {
-    private CommandStatus cmdStatus;
-
-    CommandStatusEvent(CommandStatus cmdStatus) {
-      this.cmdStatus = cmdStatus;
-    }
-
-    public CommandStatus getCmdStatus() {
-      return cmdStatus;
-    }
-
-    @Override
-    public String toString() {
-      return "CommandStatusEvent:" + cmdStatus.toString();
-    }
-
-    @Override
-    public long getId() {
-      return cmdStatus.getCmdId();
-    }
-  }
-
-  /**
-   * Wrapper event for DeleteBlock Command.
-   */
-  public static class DeleteBlockStatus extends CommandStatusEvent {
-    public DeleteBlockStatus(CommandStatus cmdStatus) {
-      super(cmdStatus);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
deleted file mode 100644
index ba17fb9..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- * <p>
- * This package contains HDDS protocol related classes.
- */
-
-/**
- * This package contains HDDS protocol related classes.
- */
-package org.apache.hadoop.hdds.scm.command;
-/*
- * Classes related to commands issued from SCM to DataNode.
- * */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
deleted file mode 100644
index 59be36b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
-import org.slf4j.Logger;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.function.Supplier;
-
-/**
- * Base class for all the container report handlers.
- */
-public class AbstractContainerReportHandler {
-
-  private final ContainerManager containerManager;
-  private final Logger logger;
-
-  /**
-   * Constructs AbstractContainerReportHandler instance with the
-   * given ContainerManager instance.
-   *
-   * @param containerManager ContainerManager
-   * @param logger Logger to be used for logging
-   */
-  AbstractContainerReportHandler(final ContainerManager containerManager,
-                                 final Logger logger) {
-    Preconditions.checkNotNull(containerManager);
-    Preconditions.checkNotNull(logger);
-    this.containerManager = containerManager;
-    this.logger = logger;
-  }
-
-  /**
-   * Process the given ContainerReplica received from specified datanode.
-   *
-   * @param datanodeDetails DatanodeDetails of the node which reported
-   *                        this replica
-   * @param replicaProto ContainerReplica
-   *
-   * @throws IOException In case of any Exception while processing the report
-   */
-  void processContainerReplica(final DatanodeDetails datanodeDetails,
-                               final ContainerReplicaProto replicaProto)
-      throws IOException {
-    final ContainerID containerId = ContainerID
-        .valueof(replicaProto.getContainerID());
-    final ContainerReplica replica = ContainerReplica.newBuilder()
-        .setContainerID(containerId)
-        .setContainerState(replicaProto.getState())
-        .setDatanodeDetails(datanodeDetails)
-        .setOriginNodeId(UUID.fromString(replicaProto.getOriginNodeId()))
-        .setSequenceId(replicaProto.getBlockCommitSequenceId())
-        .build();
-
-    if (logger.isDebugEnabled()) {
-      logger.debug("Processing replica of container {} from datanode {}",
-          containerId, datanodeDetails);
-    }
-    // Synchronized block should be replaced by container lock,
-    // once we have introduced lock inside ContainerInfo.
-    synchronized (containerManager.getContainer(containerId)) {
-      updateContainerStats(containerId, replicaProto);
-      updateContainerState(datanodeDetails, containerId, replica);
-      containerManager.updateContainerReplica(containerId, replica);
-    }
-  }
-
-  /**
-   * Update the container stats if it's lagging behind the stats in reported
-   * replica.
-   *
-   * @param containerId ID of the container
-   * @param replicaProto Container Replica information
-   * @throws ContainerNotFoundException If the container is not present
-   */
-  private void updateContainerStats(final ContainerID containerId,
-                                    final ContainerReplicaProto replicaProto)
-      throws ContainerNotFoundException {
-
-    if (!isUnhealthy(replicaProto::getState)) {
-      final ContainerInfo containerInfo = containerManager
-          .getContainer(containerId);
-
-      if (containerInfo.getSequenceId() <
-          replicaProto.getBlockCommitSequenceId()) {
-        containerInfo.updateSequenceId(
-            replicaProto.getBlockCommitSequenceId());
-      }
-      if (containerInfo.getUsedBytes() < replicaProto.getUsed()) {
-        containerInfo.setUsedBytes(replicaProto.getUsed());
-      }
-      if (containerInfo.getNumberOfKeys() < replicaProto.getKeyCount()) {
-        containerInfo.setNumberOfKeys(replicaProto.getKeyCount());
-      }
-    }
-  }
-
-  /**
-   * Updates the container state based on the given replica state.
-   *
-   * @param datanode Datanode from which the report is received
-   * @param containerId ID of the container
-   * @param replica ContainerReplica
-   * @throws IOException In case of Exception
-   */
-  private void updateContainerState(final DatanodeDetails datanode,
-                                    final ContainerID containerId,
-                                    final ContainerReplica replica)
-      throws IOException {
-
-    final ContainerInfo container = containerManager
-        .getContainer(containerId);
-
-    switch (container.getState()) {
-    case OPEN:
-      /*
-       * If the state of a container is OPEN, datanodes cannot report
-       * any other state.
-       */
-      if (replica.getState() != State.OPEN) {
-        logger.warn("Container {} is in OPEN state, but the datanode {} " +
-            "reports an {} replica.", containerId,
-            datanode, replica.getState());
-        // Should we take some action?
-      }
-      break;
-    case CLOSING:
-      /*
-       * When the container is in CLOSING state the replicas can be in any
-       * of the following states:
-       *
-       * - OPEN
-       * - CLOSING
-       * - QUASI_CLOSED
-       * - CLOSED
-       *
-       * If all the replica are either in OPEN or CLOSING state, do nothing.
-       *
-       * If the replica is in QUASI_CLOSED state, move the container to
-       * QUASI_CLOSED state.
-       *
-       * If the replica is in CLOSED state, mark the container as CLOSED.
-       *
-       */
-
-      if (replica.getState() == State.QUASI_CLOSED) {
-        logger.info("Moving container {} to QUASI_CLOSED state, datanode {} " +
-                "reported QUASI_CLOSED replica.", containerId, datanode);
-        containerManager.updateContainerState(containerId,
-            LifeCycleEvent.QUASI_CLOSE);
-      }
-
-      if (replica.getState() == State.CLOSED) {
-        logger.info("Moving container {} to CLOSED state, datanode {} " +
-            "reported CLOSED replica.", containerId, datanode);
-        Preconditions.checkArgument(replica.getSequenceId()
-            == container.getSequenceId());
-        containerManager.updateContainerState(containerId,
-            LifeCycleEvent.CLOSE);
-      }
-
-      break;
-    case QUASI_CLOSED:
-      /*
-       * The container is in QUASI_CLOSED state, this means that at least
-       * one of the replica was QUASI_CLOSED.
-       *
-       * Now replicas can be in any of the following state.
-       *
-       * 1. OPEN
-       * 2. CLOSING
-       * 3. QUASI_CLOSED
-       * 4. CLOSED
-       *
-       * If at least one of the replica is in CLOSED state, mark the
-       * container as CLOSED.
-       *
-       */
-      if (replica.getState() == State.CLOSED) {
-        logger.info("Moving container {} to CLOSED state, datanode {} " +
-            "reported CLOSED replica.", containerId, datanode);
-        Preconditions.checkArgument(replica.getSequenceId()
-            == container.getSequenceId());
-        containerManager.updateContainerState(containerId,
-            LifeCycleEvent.FORCE_CLOSE);
-      }
-      break;
-    case CLOSED:
-      /*
-       * The container is already in closed state. do nothing.
-       */
-      break;
-    case DELETING:
-      throw new UnsupportedOperationException(
-          "Unsupported container state 'DELETING'.");
-    case DELETED:
-      throw new UnsupportedOperationException(
-          "Unsupported container state 'DELETED'.");
-    default:
-      break;
-    }
-  }
-
-  /**
-   * Returns true if the container replica is not marked as UNHEALTHY.
-   *
-   * @param replicaState State of the container replica.
-   * @return true if unhealthy, false otherwise
-   */
-  private boolean isUnhealthy(final Supplier<State> replicaState) {
-    return replicaState.get() == ContainerReplicaProto.State.UNHEALTHY;
-  }
-
-  /**
-   * Return ContainerManager.
-   * @return {@link ContainerManager}
-   */
-  protected ContainerManager getContainerManager() {
-    return containerManager;
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
deleted file mode 100644
index fd73711..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
-
-/**
- * In case of a node failure, volume failure, volume out of spapce, node
- * out of space etc, CLOSE_CONTAINER will be triggered.
- * CloseContainerEventHandler is the handler for CLOSE_CONTAINER.
- * When a close container event is fired, a close command for the container
- * should be sent to all the datanodes in the pipeline and containerStateManager
- * needs to update the container state to Closing.
- */
-public class CloseContainerEventHandler implements EventHandler<ContainerID> {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(CloseContainerEventHandler.class);
-
-  private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
-
-  public CloseContainerEventHandler(final PipelineManager pipelineManager,
-      final ContainerManager containerManager) {
-    this.pipelineManager = pipelineManager;
-    this.containerManager = containerManager;
-  }
-
-  @Override
-  public void onMessage(ContainerID containerID, EventPublisher publisher) {
-    LOG.info("Close container Event triggered for container : {}", containerID);
-    try {
-      // If the container is in OPEN state, FINALIZE it.
-      if (containerManager.getContainer(containerID).getState()
-          == LifeCycleState.OPEN) {
-        containerManager.updateContainerState(
-            containerID, LifeCycleEvent.FINALIZE);
-      }
-
-      // ContainerInfo has to read again after the above state change.
-      final ContainerInfo container = containerManager
-          .getContainer(containerID);
-      // Send close command to datanodes, if the container is in CLOSING state
-      if (container.getState() == LifeCycleState.CLOSING) {
-
-        final CloseContainerCommand closeContainerCommand =
-            new CloseContainerCommand(
-                containerID.getId(), container.getPipelineID());
-
-        getNodes(container).forEach(node -> publisher.fireEvent(
-            DATANODE_COMMAND,
-            new CommandForDatanode<>(node.getUuid(), closeContainerCommand)));
-      } else {
-        LOG.warn("Cannot close container {}, which is in {} state.",
-            containerID, container.getState());
-      }
-
-    } catch (IOException ex) {
-      LOG.error("Failed to close the container {}.", containerID, ex);
-    }
-  }
-
-  /**
-   * Returns the list of Datanodes where this container lives.
-   *
-   * @param container ContainerInfo
-   * @return list of DatanodeDetails
-   * @throws ContainerNotFoundException
-   */
-  private List<DatanodeDetails> getNodes(final ContainerInfo container)
-      throws ContainerNotFoundException {
-    try {
-      return pipelineManager.getPipeline(container.getPipelineID()).getNodes();
-    } catch (PipelineNotFoundException ex) {
-      // Use container replica if the pipeline is not available.
-      return containerManager.getContainerReplicas(container.containerID())
-          .stream()
-          .map(ContainerReplica::getDatanodeDetails)
-          .collect(Collectors.toList());
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
deleted file mode 100644
index e79f268..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerActionsFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles container reports from datanode.
- */
-public class ContainerActionsHandler implements
-    EventHandler<ContainerActionsFromDatanode> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ContainerActionsHandler.class);
-
-  @Override
-  public void onMessage(
-      ContainerActionsFromDatanode containerReportFromDatanode,
-      EventPublisher publisher) {
-    DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails();
-    for (ContainerAction action : containerReportFromDatanode.getReport()
-        .getContainerActionsList()) {
-      ContainerID containerId = ContainerID.valueof(action.getContainerID());
-      switch (action.getAction()) {
-      case CLOSE:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Closing container {} in datanode {} because the" +
-              " container is {}.", containerId, dd, action.getReason());
-        }
-        publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId);
-        break;
-      default:
-        LOG.warn("Invalid action {} with reason {}, from datanode {}. ",
-            action.getAction(), action.getReason(), dd); }
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
deleted file mode 100644
index f9488e2..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-// TODO: Write extensive java doc.
-// This is the main interface of ContainerManager.
-/**
- * ContainerManager class contains the mapping from a name to a pipeline
- * mapping. This is used by SCM when allocating new locations and when
- * looking up a key.
- */
-public interface ContainerManager extends Closeable {
-
-
-  /**
-   * Returns all the container Ids managed by ContainerManager.
-   *
-   * @return Set of ContainerID
-   */
-  Set<ContainerID> getContainerIDs();
-
-  /**
-   * Returns all the containers managed by ContainerManager.
-   *
-   * @return List of ContainerInfo
-   */
-  List<ContainerInfo> getContainers();
-
-  /**
-   * Returns all the containers which are in the specified state.
-   *
-   * @return List of ContainerInfo
-   */
-  List<ContainerInfo> getContainers(HddsProtos.LifeCycleState state);
-
-  /**
-   * Returns number of containers in the given,
-   *  {@link org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState}.
-   *
-   * @return Number of containers
-   */
-  Integer getContainerCountByState(HddsProtos.LifeCycleState state);
-
-  /**
-   * Returns the ContainerInfo from the container ID.
-   *
-   * @param containerID - ID of container.
-   * @return - ContainerInfo such as creation state and the pipeline.
-   * @throws IOException
-   */
-  ContainerInfo getContainer(ContainerID containerID)
-      throws ContainerNotFoundException;
-
-  /**
-   * Returns containers under certain conditions.
-   * Search container IDs from start ID(exclusive),
-   * The max size of the searching range cannot exceed the
-   * value of count.
-   *
-   * @param startContainerID start containerID, >=0,
-   * start searching at the head if 0.
-   * @param count count must be >= 0
-   *              Usually the count will be replace with a very big
-   *              value instead of being unlimited in case the db is very big.
-   *
-   * @return a list of container.
-   * @throws IOException
-   */
-  List<ContainerInfo> listContainer(ContainerID startContainerID, int count);
-
-  /**
-   * Allocates a new container for a given keyName and replication factor.
-   *
-   * @param replicationFactor - replication factor of the container.
-   * @param owner
-   * @return - ContainerInfo.
-   * @throws IOException
-   */
-  ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor, String owner)
-      throws IOException;
-
-  /**
-   * Deletes a container from SCM.
-   *
-   * @param containerID - Container ID
-   * @throws IOException
-   */
-  void deleteContainer(ContainerID containerID) throws IOException;
-
-  /**
-   * Update container state.
-   * @param containerID - Container ID
-   * @param event - container life cycle event
-   * @return - new container state
-   * @throws IOException
-   */
-  HddsProtos.LifeCycleState updateContainerState(ContainerID containerID,
-      HddsProtos.LifeCycleEvent event) throws IOException;
-
-  /**
-   * Returns the latest list of replicas for given containerId.
-   *
-   * @param containerID Container ID
-   * @return Set of ContainerReplica
-   */
-  Set<ContainerReplica> getContainerReplicas(ContainerID containerID)
-      throws ContainerNotFoundException;
-
-  /**
-   * Adds a container Replica for the given Container.
-   *
-   * @param containerID Container ID
-   * @param replica ContainerReplica
-   */
-  void updateContainerReplica(ContainerID containerID, ContainerReplica replica)
-      throws ContainerNotFoundException;
-
-  /**
-   * Remove a container Replica form a given Container.
-   *
-   * @param containerID Container ID
-   * @param replica ContainerReplica
-   * @return True of dataNode is removed successfully else false.
-   */
-  void removeContainerReplica(ContainerID containerID, ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException;
-
-  /**
-   * Update deleteTransactionId according to deleteTransactionMap.
-   *
-   * @param deleteTransactionMap Maps the containerId to latest delete
-   *                             transaction id for the container.
-   * @throws IOException
-   */
-  void updateDeleteTransactionId(Map<Long, Long> deleteTransactionMap)
-      throws IOException;
-
-  /**
-   * Returns ContainerInfo which matches the requirements.
-   * @param size - the amount of space required in the container
-   * @param owner - the user which requires space in its owned container
-   * @param pipeline - pipeline to which the container should belong
-   * @return ContainerInfo for the matching container.
-   */
-  ContainerInfo getMatchingContainer(long size, String owner,
-      Pipeline pipeline);
-
-  /**
-   * Returns ContainerInfo which matches the requirements.
-   * @param size - the amount of space required in the container
-   * @param owner - the user which requires space in its owned container
-   * @param pipeline - pipeline to which the container should belong.
-   * @param excludedContainerIDS - containerIds to be excluded.
-   * @return ContainerInfo for the matching container.
-   */
-  ContainerInfo getMatchingContainer(long size, String owner,
-      Pipeline pipeline, List<ContainerID> excludedContainerIDS);
-
-  /**
-   * Once after report processor handler completes, call this to notify
-   * container manager to increment metrics.
-   * @param isFullReport
-   * @param success
-   */
-  void notifyContainerReportProcessing(boolean isFullReport, boolean success);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
deleted file mode 100644
index 8bfcb84..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.builder.CompareToBuilder;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-
-import java.util.Optional;
-import java.util.UUID;
-
-/**
- * In-memory state of a container replica.
- */
-public final class ContainerReplica implements Comparable<ContainerReplica> {
-
-  final private ContainerID containerID;
-  final private ContainerReplicaProto.State state;
-  final private DatanodeDetails datanodeDetails;
-  final private UUID placeOfBirth;
-
-  private Long sequenceId;
-
-
-  private ContainerReplica(final ContainerID containerID,
-      final ContainerReplicaProto.State state, final DatanodeDetails datanode,
-      final UUID originNodeId) {
-    this.containerID = containerID;
-    this.state = state;
-    this.datanodeDetails = datanode;
-    this.placeOfBirth = originNodeId;
-  }
-
-  private void setSequenceId(Long seqId) {
-    sequenceId = seqId;
-  }
-
-  /**
-   * Returns the DatanodeDetails to which this replica belongs.
-   *
-   * @return DatanodeDetails
-   */
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  /**
-   * Returns the UUID of Datanode where this replica originated.
-   *
-   * @return UUID
-   */
-  public UUID getOriginDatanodeId() {
-    return placeOfBirth;
-  }
-
-  /**
-   * Returns the state of this replica.
-   *
-   * @return replica state
-   */
-  public ContainerReplicaProto.State getState() {
-    return state;
-  }
-
-  /**
-   * Returns the Sequence Id of this replica.
-   *
-   * @return Sequence Id
-   */
-  public Long getSequenceId() {
-    return sequenceId;
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(61, 71)
-        .append(containerID)
-        .append(datanodeDetails)
-        .toHashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    final ContainerReplica that = (ContainerReplica) o;
-
-    return new EqualsBuilder()
-        .append(containerID, that.containerID)
-        .append(datanodeDetails, that.datanodeDetails)
-        .isEquals();
-  }
-
-  @Override
-  public int compareTo(ContainerReplica that) {
-    Preconditions.checkNotNull(that);
-    return new CompareToBuilder()
-        .append(this.containerID, that.containerID)
-        .append(this.datanodeDetails, that.datanodeDetails)
-        .build();
-  }
-
-  /**
-   * Returns a new Builder to construct ContainerReplica.
-   *
-   * @return ContainerReplicaBuilder
-   */
-  public static ContainerReplicaBuilder newBuilder() {
-    return new ContainerReplicaBuilder();
-  }
-
-  @Override
-  public String toString() {
-    return "ContainerReplica{" +
-        "containerID=" + containerID +
-        ", datanodeDetails=" + datanodeDetails +
-        ", placeOfBirth=" + placeOfBirth +
-        ", sequenceId=" + sequenceId +
-        '}';
-  }
-
-  /**
-   * Used for building ContainerReplica instance.
-   */
-  public static class ContainerReplicaBuilder {
-
-    private ContainerID containerID;
-    private ContainerReplicaProto.State state;
-    private DatanodeDetails datanode;
-    private UUID placeOfBirth;
-    private Long sequenceId;
-
-    /**
-     * Set Container Id.
-     *
-     * @param cID ContainerID
-     * @return ContainerReplicaBuilder
-     */
-    public ContainerReplicaBuilder setContainerID(
-        final ContainerID cID) {
-      this.containerID = cID;
-      return this;
-    }
-
-    public ContainerReplicaBuilder setContainerState(
-        final ContainerReplicaProto.State  containerState) {
-      state = containerState;
-      return this;
-    }
-
-    /**
-     * Set DatanodeDetails.
-     *
-     * @param datanodeDetails DatanodeDetails
-     * @return ContainerReplicaBuilder
-     */
-    public ContainerReplicaBuilder setDatanodeDetails(
-        DatanodeDetails datanodeDetails) {
-      datanode = datanodeDetails;
-      return this;
-    }
-
-    /**
-     * Set replica origin node id.
-     *
-     * @param originNodeId origin node UUID
-     * @return ContainerReplicaBuilder
-     */
-    public ContainerReplicaBuilder setOriginNodeId(UUID originNodeId) {
-      placeOfBirth = originNodeId;
-      return this;
-    }
-
-    /**
-     * Set sequence Id of the replica.
-     *
-     * @param seqId container sequence Id
-     * @return ContainerReplicaBuilder
-     */
-    public ContainerReplicaBuilder setSequenceId(long seqId) {
-      sequenceId = seqId;
-      return this;
-    }
-
-    /**
-     * Constructs new ContainerReplicaBuilder.
-     *
-     * @return ContainerReplicaBuilder
-     */
-    public ContainerReplica build() {
-      Preconditions.checkNotNull(containerID,
-          "Container Id can't be null");
-      Preconditions.checkNotNull(state,
-          "Container state can't be null");
-      Preconditions.checkNotNull(datanode,
-          "DatanodeDetails can't be null");
-      ContainerReplica replica = new ContainerReplica(
-          containerID, state, datanode,
-          Optional.ofNullable(placeOfBirth).orElse(datanode.getUuid()));
-      Optional.ofNullable(sequenceId).ifPresent(replica::setSequenceId);
-      return replica;
-    }
-  }
-
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
deleted file mode 100644
index 2227df6..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * Handles container reports from datanode.
- */
-public class ContainerReportHandler extends AbstractContainerReportHandler
-    implements EventHandler<ContainerReportFromDatanode> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerReportHandler.class);
-
-  private final NodeManager nodeManager;
-  private final ContainerManager containerManager;
-
-  /**
-   * Constructs ContainerReportHandler instance with the
-   * given NodeManager and ContainerManager instance.
-   *
-   * @param nodeManager NodeManager instance
-   * @param containerManager ContainerManager instance
-   */
-  public ContainerReportHandler(final NodeManager nodeManager,
-                                final ContainerManager containerManager) {
-    super(containerManager, LOG);
-    this.nodeManager = nodeManager;
-    this.containerManager = containerManager;
-  }
-
-  /**
-   * Process the container reports from datanodes.
-   *
-   * @param reportFromDatanode Container Report
-   * @param publisher EventPublisher reference
-   */
-  @Override
-  public void onMessage(final ContainerReportFromDatanode reportFromDatanode,
-                        final EventPublisher publisher) {
-
-    final DatanodeDetails datanodeDetails =
-        reportFromDatanode.getDatanodeDetails();
-    final ContainerReportsProto containerReport =
-        reportFromDatanode.getReport();
-
-    try {
-      final List<ContainerReplicaProto> replicas =
-          containerReport.getReportsList();
-      final Set<ContainerID> containersInSCM =
-          nodeManager.getContainers(datanodeDetails);
-
-      final Set<ContainerID> containersInDn = replicas.parallelStream()
-          .map(ContainerReplicaProto::getContainerID)
-          .map(ContainerID::valueof).collect(Collectors.toSet());
-
-      final Set<ContainerID> missingReplicas = new HashSet<>(containersInSCM);
-      missingReplicas.removeAll(containersInDn);
-
-      processContainerReplicas(datanodeDetails, replicas);
-      processMissingReplicas(datanodeDetails, missingReplicas);
-      updateDeleteTransaction(datanodeDetails, replicas, publisher);
-
-      /*
-       * Update the latest set of containers for this datanode in
-       * NodeManager
-       */
-      nodeManager.setContainers(datanodeDetails, containersInDn);
-
-      containerManager.notifyContainerReportProcessing(true, true);
-    } catch (NodeNotFoundException ex) {
-      containerManager.notifyContainerReportProcessing(true, false);
-      LOG.error("Received container report from unknown datanode {} {}",
-          datanodeDetails, ex);
-    }
-
-  }
-
-  /**
-   * Processes the ContainerReport.
-   *
-   * @param datanodeDetails Datanode from which this report was received
-   * @param replicas list of ContainerReplicaProto
-   */
-  private void processContainerReplicas(final DatanodeDetails datanodeDetails,
-      final List<ContainerReplicaProto> replicas) {
-    for (ContainerReplicaProto replicaProto : replicas) {
-      try {
-        processContainerReplica(datanodeDetails, replicaProto);
-      } catch (ContainerNotFoundException e) {
-        LOG.error("Received container report for an unknown container" +
-                " {} from datanode {}.", replicaProto.getContainerID(),
-            datanodeDetails, e);
-      } catch (IOException e) {
-        LOG.error("Exception while processing container report for container" +
-                " {} from datanode {}.", replicaProto.getContainerID(),
-            datanodeDetails, e);
-      }
-    }
-  }
-
-  /**
-   * Process the missing replica on the given datanode.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param missingReplicas ContainerID which are missing on the given datanode
-   */
-  private void processMissingReplicas(final DatanodeDetails datanodeDetails,
-                                      final Set<ContainerID> missingReplicas) {
-    for (ContainerID id : missingReplicas) {
-      try {
-        containerManager.getContainerReplicas(id).stream()
-            .filter(replica -> replica.getDatanodeDetails()
-                .equals(datanodeDetails)).findFirst()
-            .ifPresent(replica -> {
-              try {
-                containerManager.removeContainerReplica(id, replica);
-              } catch (ContainerNotFoundException |
-                  ContainerReplicaNotFoundException ignored) {
-                // This should not happen, but even if it happens, not an issue
-              }
-            });
-      } catch (ContainerNotFoundException e) {
-        LOG.warn("Cannot remove container replica, container {} not found.",
-            id, e);
-      }
-    }
-  }
-
-  /**
-   * Updates the Delete Transaction Id for the given datanode.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param replicas List of ContainerReplicaProto
-   * @param publisher EventPublisher reference
-   */
-  private void updateDeleteTransaction(final DatanodeDetails datanodeDetails,
-      final List<ContainerReplicaProto> replicas,
-      final EventPublisher publisher) {
-    final PendingDeleteStatusList pendingDeleteStatusList =
-        new PendingDeleteStatusList(datanodeDetails);
-    for (ContainerReplicaProto replica : replicas) {
-      try {
-        final ContainerInfo containerInfo = containerManager.getContainer(
-            ContainerID.valueof(replica.getContainerID()));
-        if (containerInfo.getDeleteTransactionId() >
-            replica.getDeleteTransactionId()) {
-          pendingDeleteStatusList.addPendingDeleteStatus(
-              replica.getDeleteTransactionId(),
-              containerInfo.getDeleteTransactionId(),
-              containerInfo.getContainerID());
-        }
-      } catch (ContainerNotFoundException cnfe) {
-        LOG.warn("Cannot update pending delete transaction for " +
-            "container #{}. Reason: container missing.",
-            replica.getContainerID());
-      }
-    }
-    if (pendingDeleteStatusList.getNumPendingDeletes() > 0) {
-      publisher.fireEvent(SCMEvents.PENDING_DELETE_STATUS,
-          pendingDeleteStatusList);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
deleted file mode 100644
index 7dde8d7..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ /dev/null
@@ -1,535 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_CONTAINER_STATE;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.states.ContainerState;
-import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
-import org.apache.hadoop.ozone.common.statemachine.StateMachine;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AtomicLongMap;
-
-/**
- * A container state manager keeps track of container states and returns
- * containers that match various queries.
- * <p>
- * This state machine is driven by a combination of server and client actions.
- * <p>
- * This is how a create container happens: 1. When a container is created, the
- * Server(or SCM) marks that Container as ALLOCATED state. In this state, SCM
- * has chosen a pipeline for container to live on. However, the container is not
- * created yet. This container along with the pipeline is returned to the
- * client.
- * <p>
- * 2. The client when it sees the Container state as ALLOCATED understands that
- * container needs to be created on the specified pipeline. The client lets the
- * SCM know that saw this flag and is initiating the on the data nodes.
- * <p>
- * This is done by calling into notifyObjectCreation(ContainerName,
- * BEGIN_CREATE) flag. When SCM gets this call, SCM puts the container state
- * into CREATING. All this state means is that SCM told Client to create a
- * container and client saw that request.
- * <p>
- * 3. Then client makes calls to datanodes directly, asking the datanodes to
- * create the container. This is done with the help of pipeline that supports
- * this container.
- * <p>
- * 4. Once the creation of the container is complete, the client will make
- * another call to the SCM, this time specifying the containerName and the
- * COMPLETE_CREATE as the Event.
- * <p>
- * 5. With COMPLETE_CREATE event, the container moves to an Open State. This is
- * the state when clients can write to a container.
- * <p>
- * 6. If the client does not respond with the COMPLETE_CREATE event with a
- * certain time, the state machine times out and triggers a delete operation of
- * the container.
- * <p>
- * Please see the function initializeStateMachine below to see how this looks in
- * code.
- * <p>
- * Reusing existing container :
- * <p>
- * The create container call is not made all the time, the system tries to use
- * open containers as much as possible. So in those cases, it looks thru the
- * list of open containers and will return containers that match the specific
- * signature.
- * <p>
- * Please note : Logically there are 3 separate state machines in the case of
- * containers.
- * <p>
- * The Create State Machine -- Commented extensively above.
- * <p>
- * Open/Close State Machine - Once the container is in the Open State,
- * eventually it will be closed, once sufficient data has been written to it.
- * <p>
- * TimeOut Delete Container State Machine - if the container creating times out,
- * then Container State manager decides to delete the container.
- */
-public class ContainerStateManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerStateManager.class);
-
-  private final StateMachine<HddsProtos.LifeCycleState,
-      HddsProtos.LifeCycleEvent> stateMachine;
-
-  private final long containerSize;
-  private final ConcurrentHashMap<ContainerState, ContainerID> lastUsedMap;
-  private final ContainerStateMap containers;
-  private final AtomicLong containerCount;
-  private final AtomicLongMap<LifeCycleState> containerStateCount =
-      AtomicLongMap.create();
-
-  /**
-   * Constructs a Container State Manager that tracks all containers owned by
-   * SCM for the purpose of allocation of blocks.
-   * <p>
-   * TODO : Add Container Tags so we know which containers are owned by SCM.
-   */
-  @SuppressWarnings("unchecked")
-  public ContainerStateManager(final Configuration configuration) {
-
-    // Initialize the container state machine.
-    final Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
-
-    // These are the steady states of a container.
-    finalStates.add(LifeCycleState.OPEN);
-    finalStates.add(LifeCycleState.CLOSED);
-    finalStates.add(LifeCycleState.DELETED);
-
-    this.stateMachine = new StateMachine<>(LifeCycleState.OPEN,
-        finalStates);
-    initializeStateMachine();
-
-    this.containerSize = (long) configuration.getStorageSize(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-
-    this.lastUsedMap = new ConcurrentHashMap<>();
-    this.containerCount = new AtomicLong(0);
-    this.containers = new ContainerStateMap();
-  }
-
-  /*
-   *
-   * Event and State Transition Mapping:
-   *
-   * State: OPEN         ----------------> CLOSING
-   * Event:                    FINALIZE
-   *
-   * State: CLOSING      ----------------> QUASI_CLOSED
-   * Event:                  QUASI_CLOSE
-   *
-   * State: CLOSING      ----------------> CLOSED
-   * Event:                     CLOSE
-   *
-   * State: QUASI_CLOSED ----------------> CLOSED
-   * Event:                  FORCE_CLOSE
-   *
-   * State: CLOSED       ----------------> DELETING
-   * Event:                    DELETE
-   *
-   * State: DELETING     ----------------> DELETED
-   * Event:                    CLEANUP
-   *
-   *
-   * Container State Flow:
-   *
-   * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED]
-   *          (FINALIZE)      |      (QUASI_CLOSE)        |
-   *                          |                           |
-   *                          |                           |
-   *                  (CLOSE) |             (FORCE_CLOSE) |
-   *                          |                           |
-   *                          |                           |
-   *                          +--------->[CLOSED]<--------+
-   *                                        |
-   *                                (DELETE)|
-   *                                        |
-   *                                        |
-   *                                   [DELETING]
-   *                                        |
-   *                              (CLEANUP) |
-   *                                        |
-   *                                        V
-   *                                    [DELETED]
-   *
-   */
-  private void initializeStateMachine() {
-    stateMachine.addTransition(LifeCycleState.OPEN,
-        LifeCycleState.CLOSING,
-        LifeCycleEvent.FINALIZE);
-
-    stateMachine.addTransition(LifeCycleState.CLOSING,
-        LifeCycleState.QUASI_CLOSED,
-        LifeCycleEvent.QUASI_CLOSE);
-
-    stateMachine.addTransition(LifeCycleState.CLOSING,
-        LifeCycleState.CLOSED,
-        LifeCycleEvent.CLOSE);
-
-    stateMachine.addTransition(LifeCycleState.QUASI_CLOSED,
-        LifeCycleState.CLOSED,
-        LifeCycleEvent.FORCE_CLOSE);
-
-    stateMachine.addTransition(LifeCycleState.CLOSED,
-        LifeCycleState.DELETING,
-        LifeCycleEvent.DELETE);
-
-    stateMachine.addTransition(LifeCycleState.DELETING,
-        LifeCycleState.DELETED,
-        LifeCycleEvent.CLEANUP);
-  }
-
-
-  void loadContainer(final ContainerInfo containerInfo) throws SCMException {
-    containers.addContainer(containerInfo);
-    containerCount.set(Long.max(
-        containerInfo.getContainerID(), containerCount.get()));
-    containerStateCount.incrementAndGet(containerInfo.getState());
-  }
-
-  /**
-   * Allocates a new container based on the type, replication etc.
-   *
-   * @param pipelineManager -- Pipeline Manager class.
-   * @param type -- Replication type.
-   * @param replicationFactor - Replication replicationFactor.
-   * @return ContainerWithPipeline
-   * @throws IOException  on Failure.
-   */
-  ContainerInfo allocateContainer(final PipelineManager pipelineManager,
-      final HddsProtos.ReplicationType type,
-      final HddsProtos.ReplicationFactor replicationFactor, final String owner)
-      throws IOException {
-
-    Pipeline pipeline;
-    try {
-      // TODO: #CLUTIL remove creation logic when all replication types and
-      // factors are handled by pipeline creator job.
-      pipeline = pipelineManager.createPipeline(type, replicationFactor);
-    } catch (IOException e) {
-      final List<Pipeline> pipelines = pipelineManager
-          .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN);
-      if (pipelines.isEmpty()) {
-        throw new IOException("Could not allocate container. Cannot get any" +
-            " matching pipeline for Type:" + type +
-            ", Factor:" + replicationFactor + ", State:PipelineState.OPEN");
-      }
-      pipeline = pipelines.get((int) containerCount.get() % pipelines.size());
-    }
-    synchronized (pipeline) {
-      return allocateContainer(pipelineManager, owner, pipeline);
-    }
-  }
-
-  /**
-   * Allocates a new container based on the type, replication etc.
-   * This method should be called only after the lock on the pipeline is held
-   * on which the container will be allocated.
-   *
-   * @param pipelineManager   - Pipeline Manager class.
-   * @param owner             - Owner of the container.
-   * @param pipeline          - Pipeline to which the container needs to be
-   *                          allocated.
-   * @return ContainerWithPipeline
-   * @throws IOException on Failure.
-   */
-  ContainerInfo allocateContainer(
-      final PipelineManager pipelineManager, final String owner,
-      Pipeline pipeline) throws IOException {
-    Preconditions.checkNotNull(pipeline,
-        "Pipeline couldn't be found for the new container. "
-            + "Do you have enough nodes?");
-
-    final long containerID = containerCount.incrementAndGet();
-    final ContainerInfo containerInfo = new ContainerInfo.Builder()
-        .setState(LifeCycleState.OPEN)
-        .setPipelineID(pipeline.getId())
-        .setUsedBytes(0)
-        .setNumberOfKeys(0)
-        .setStateEnterTime(Time.monotonicNow())
-        .setOwner(owner)
-        .setContainerID(containerID)
-        .setDeleteTransactionId(0)
-        .setReplicationFactor(pipeline.getFactor())
-        .setReplicationType(pipeline.getType())
-        .build();
-    Preconditions.checkNotNull(containerInfo);
-    containers.addContainer(containerInfo);
-    pipelineManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
-    containerStateCount.incrementAndGet(containerInfo.getState());
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("New container allocated: {}", containerInfo);
-    }
-    return containerInfo;
-  }
-
-  /**
-   * Update the Container State to the next state.
-   *
-   * @param containerID - ContainerID
-   * @param event - LifeCycle Event
-   * @throws SCMException  on Failure.
-   */
-  void updateContainerState(final ContainerID containerID,
-      final HddsProtos.LifeCycleEvent event)
-      throws SCMException, ContainerNotFoundException {
-    final ContainerInfo info = containers.getContainerInfo(containerID);
-    try {
-      final LifeCycleState oldState = info.getState();
-      final LifeCycleState newState = stateMachine.getNextState(
-          info.getState(), event);
-      containers.updateState(containerID, info.getState(), newState);
-      containerStateCount.incrementAndGet(newState);
-      containerStateCount.decrementAndGet(oldState);
-    } catch (InvalidStateTransitionException ex) {
-      String error = String.format("Failed to update container state %s, " +
-              "reason: invalid state transition from state: %s upon " +
-              "event: %s.",
-          containerID, info.getState(), event);
-      LOG.error(error);
-      throw new SCMException(error, FAILED_TO_CHANGE_CONTAINER_STATE);
-    }
-  }
-
-  /**
-   * Update deleteTransactionId for a container.
-   *
-   * @param deleteTransactionMap maps containerId to its new
-   *                             deleteTransactionID
-   */
-  void updateDeleteTransactionId(
-      final Map<Long, Long> deleteTransactionMap) {
-    deleteTransactionMap.forEach((k, v) -> {
-      try {
-        containers.getContainerInfo(ContainerID.valueof(k))
-            .updateDeleteTransactionId(v);
-      } catch (ContainerNotFoundException e) {
-        LOG.warn("Exception while updating delete transaction id.", e);
-      }
-    });
-  }
-
-
-  /**
-   * Return a container matching the attributes specified.
-   *
-   * @param size         - Space needed in the Container.
-   * @param owner        - Owner of the container - A specific nameservice.
-   * @param pipelineID   - ID of the pipeline
-   * @param containerIDs - Set of containerIDs to choose from
-   * @return ContainerInfo, null if there is no match found.
-   */
-  ContainerInfo getMatchingContainer(final long size, String owner,
-      PipelineID pipelineID, NavigableSet<ContainerID> containerIDs) {
-    if (containerIDs.isEmpty()) {
-      return null;
-    }
-
-    // Get the last used container and find container above the last used
-    // container ID.
-    final ContainerState key = new ContainerState(owner, pipelineID);
-    final ContainerID lastID =
-        lastUsedMap.getOrDefault(key, containerIDs.first());
-
-    // There is a small issue here. The first time, we will skip the first
-    // container. But in most cases it will not matter.
-    NavigableSet<ContainerID> resultSet = containerIDs.tailSet(lastID, false);
-    if (resultSet.size() == 0) {
-      resultSet = containerIDs;
-    }
-
-    ContainerInfo selectedContainer =
-        findContainerWithSpace(size, resultSet, owner, pipelineID);
-    if (selectedContainer == null) {
-
-      // If we did not find any space in the tailSet, we need to look for
-      // space in the headset, we need to pass true to deal with the
-      // situation that we have a lone container that has space. That is we
-      // ignored the last used container under the assumption we can find
-      // other containers with space, but if have a single container that is
-      // not true. Hence we need to include the last used container as the
-      // last element in the sorted set.
-
-      resultSet = containerIDs.headSet(lastID, true);
-      selectedContainer =
-          findContainerWithSpace(size, resultSet, owner, pipelineID);
-    }
-
-    return selectedContainer;
-  }
-
-  private ContainerInfo findContainerWithSpace(final long size,
-      final NavigableSet<ContainerID> searchSet, final String owner,
-      final PipelineID pipelineID) {
-    try {
-      // Get the container with space to meet our request.
-      for (ContainerID id : searchSet) {
-        final ContainerInfo containerInfo = containers.getContainerInfo(id);
-        if (containerInfo.getUsedBytes() + size <= this.containerSize) {
-          containerInfo.updateLastUsedTime();
-          return containerInfo;
-        }
-      }
-    } catch (ContainerNotFoundException e) {
-      // This should not happen!
-      LOG.warn("Exception while finding container with space", e);
-    }
-    return null;
-  }
-
-  Set<ContainerID> getAllContainerIDs() {
-    return containers.getAllContainerIDs();
-  }
-
-  /**
-   * Returns Containers by State.
-   *
-   * @param state - State - Open, Closed etc.
-   * @return List of containers by state.
-   */
-  Set<ContainerID> getContainerIDsByState(final LifeCycleState state) {
-    return containers.getContainerIDsByState(state);
-  }
-
-  /**
-   * Get count of containers in the current {@link LifeCycleState}.
-   *
-   * @param state {@link LifeCycleState}
-   * @return Count of containers
-   */
-  Integer getContainerCountByState(final LifeCycleState state) {
-    return Long.valueOf(containerStateCount.get(state)).intValue();
-  }
-
-  /**
-   * Returns a set of ContainerIDs that match the Container.
-   *
-   * @param owner  Owner of the Containers.
-   * @param type - Replication Type of the containers
-   * @param factor - Replication factor of the containers.
-   * @param state - Current State, like Open, Close etc.
-   * @return Set of containers that match the specific query parameters.
-   */
-  NavigableSet<ContainerID> getMatchingContainerIDs(final String owner,
-      final ReplicationType type, final ReplicationFactor factor,
-      final LifeCycleState state) {
-    return containers.getMatchingContainerIDs(state, owner,
-        factor, type);
-  }
-
-  /**
-   * Returns the containerInfo for the given container id.
-   * @param containerID id of the container
-   * @return ContainerInfo containerInfo
-   * @throws IOException
-   */
-  ContainerInfo getContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    return containers.getContainerInfo(containerID);
-  }
-
-  void close() throws IOException {
-  }
-
-  /**
-   * Returns the latest list of DataNodes where replica for given containerId
-   * exist. Throws an SCMException if no entry is found for given containerId.
-   *
-   * @param containerID
-   * @return Set<DatanodeDetails>
-   */
-  Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
-    return containers.getContainerReplicas(containerID);
-  }
-
-  /**
-   * Add a container Replica for given DataNode.
-   *
-   * @param containerID
-   * @param replica
-   */
-  void updateContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica) throws ContainerNotFoundException {
-    containers.updateContainerReplica(containerID, replica);
-  }
-
-  /**
-   * Remove a container Replica for given DataNode.
-   *
-   * @param containerID
-   * @param replica
-   * @return True of dataNode is removed successfully else false.
-   */
-  void removeContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    containers.removeContainerReplica(containerID, replica);
-  }
-
-  void removeContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    containers.removeContainer(containerID);
-  }
-
-  /**
-   * Update the lastUsedmap to update with ContainerState and containerID.
-   * @param pipelineID
-   * @param containerID
-   * @param owner
-   */
-  public synchronized void updateLastUsedMap(PipelineID pipelineID,
-      ContainerID containerID, String owner) {
-    lastUsedMap.put(new ContainerState(owner, pipelineID),
-        containerID);
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
deleted file mode 100644
index b581000..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
-    .ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .IncrementalContainerReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles incremental container reports from datanode.
- */
-public class IncrementalContainerReportHandler extends
-    AbstractContainerReportHandler
-    implements EventHandler<IncrementalContainerReportFromDatanode> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      IncrementalContainerReportHandler.class);
-
-  private final NodeManager nodeManager;
-
-  public IncrementalContainerReportHandler(
-      final NodeManager nodeManager,
-      final ContainerManager containerManager)  {
-    super(containerManager, LOG);
-    this.nodeManager = nodeManager;
-  }
-
-  @Override
-  public void onMessage(final IncrementalContainerReportFromDatanode report,
-                        final EventPublisher publisher) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Processing incremental container report from data node {}",
-          report.getDatanodeDetails().getUuid());
-    }
-
-    boolean success = true;
-    for (ContainerReplicaProto replicaProto :
-        report.getReport().getReportList()) {
-      try {
-        final DatanodeDetails dd = report.getDatanodeDetails();
-        final ContainerID id = ContainerID.valueof(
-            replicaProto.getContainerID());
-        nodeManager.addContainer(dd, id);
-        processContainerReplica(dd, replicaProto);
-      } catch (ContainerNotFoundException e) {
-        success = false;
-        LOG.warn("Container {} not found!", replicaProto.getContainerID());
-      } catch (NodeNotFoundException ex) {
-        success = false;
-        LOG.error("Received ICR from unknown datanode {} {}",
-            report.getDatanodeDetails(), ex);
-      } catch (IOException e) {
-        success = false;
-        LOG.error("Exception while processing ICR for container {}",
-            replicaProto.getContainerID());
-      }
-    }
-
-    if (success) {
-      getContainerManager().notifyContainerReportProcessing(false, true);
-    } else {
-      getContainerManager().notifyContainerReportProcessing(false, false);
-    }
-
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
deleted file mode 100644
index 5540d73..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ /dev/null
@@ -1,875 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.StringJoiner;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Consumer;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigType;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.lock.LockManager;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.ExitUtil;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.GeneratedMessage;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
-import static org.apache.hadoop.hdds.conf.ConfigTag.SCM;
-import org.apache.ratis.util.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Replication Manager (RM) is the one which is responsible for making sure
- * that the containers are properly replicated. Replication Manager deals only
- * with Quasi Closed / Closed container.
- */
-public class ReplicationManager implements MetricsSource {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationManager.class);
-
-  public static final String METRICS_SOURCE_NAME = "SCMReplicationManager";
-
-  /**
-   * Reference to the ContainerManager.
-   */
-  private final ContainerManager containerManager;
-
-  /**
-   * PlacementPolicy which is used to identify where a container
-   * should be replicated.
-   */
-  private final ContainerPlacementPolicy containerPlacement;
-
-  /**
-   * EventPublisher to fire Replicate and Delete container events.
-   */
-  private final EventPublisher eventPublisher;
-
-  /**
-   * Used for locking a container using its ID while processing it.
-   */
-  private final LockManager<ContainerID> lockManager;
-
-  /**
-   * This is used for tracking container replication commands which are issued
-   * by ReplicationManager and not yet complete.
-   */
-  private final Map<ContainerID, List<InflightAction>> inflightReplication;
-
-  /**
-   * This is used for tracking container deletion commands which are issued
-   * by ReplicationManager and not yet complete.
-   */
-  private final Map<ContainerID, List<InflightAction>> inflightDeletion;
-
-  /**
-   * ReplicationManager specific configuration.
-   */
-  private final ReplicationManagerConfiguration conf;
-
-  /**
-   * ReplicationMonitor thread is the one which wakes up at configured
-   * interval and processes all the containers.
-   */
-  private Thread replicationMonitor;
-
-  /**
-   * Flag used for checking if the ReplicationMonitor thread is running or
-   * not.
-   */
-  private volatile boolean running;
-
-  /**
-   * Constructs ReplicationManager instance with the given configuration.
-   *
-   * @param conf OzoneConfiguration
-   * @param containerManager ContainerManager
-   * @param containerPlacement ContainerPlacementPolicy
-   * @param eventPublisher EventPublisher
-   */
-  public ReplicationManager(final ReplicationManagerConfiguration conf,
-                            final ContainerManager containerManager,
-                            final ContainerPlacementPolicy containerPlacement,
-                            final EventPublisher eventPublisher,
-                            final LockManager<ContainerID> lockManager) {
-    this.containerManager = containerManager;
-    this.containerPlacement = containerPlacement;
-    this.eventPublisher = eventPublisher;
-    this.lockManager = lockManager;
-    this.conf = conf;
-    this.running = false;
-    this.inflightReplication = new ConcurrentHashMap<>();
-    this.inflightDeletion = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * Starts Replication Monitor thread.
-   */
-  public synchronized void start() {
-
-    if (!isRunning()) {
-      DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME,
-          "SCM Replication manager (closed container replication) related "
-              + "metrics",
-          this);
-      LOG.info("Starting Replication Monitor Thread.");
-      running = true;
-      replicationMonitor = new Thread(this::run);
-      replicationMonitor.setName("ReplicationMonitor");
-      replicationMonitor.setDaemon(true);
-      replicationMonitor.start();
-    } else {
-      LOG.info("Replication Monitor Thread is already running.");
-    }
-  }
-
-  /**
-   * Returns true if the Replication Monitor Thread is running.
-   *
-   * @return true if running, false otherwise
-   */
-  public boolean isRunning() {
-    if (!running) {
-      synchronized (this) {
-        return replicationMonitor != null
-            && replicationMonitor.isAlive();
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Process all the containers immediately.
-   */
-  @VisibleForTesting
-  @SuppressFBWarnings(value="NN_NAKED_NOTIFY",
-      justification="Used only for testing")
-  public synchronized void processContainersNow() {
-    notify();
-  }
-
-  /**
-   * Stops Replication Monitor thread.
-   */
-  public synchronized void stop() {
-    if (running) {
-      LOG.info("Stopping Replication Monitor Thread.");
-      inflightReplication.clear();
-      inflightDeletion.clear();
-      running = false;
-      notify();
-    } else {
-      LOG.info("Replication Monitor Thread is not running.");
-    }
-  }
-
-  /**
-   * ReplicationMonitor thread runnable. This wakes up at configured
-   * interval and processes all the containers in the system.
-   */
-  private synchronized void run() {
-    try {
-      while (running) {
-        final long start = Time.monotonicNow();
-        final Set<ContainerID> containerIds =
-            containerManager.getContainerIDs();
-        containerIds.forEach(this::processContainer);
-
-        LOG.info("Replication Monitor Thread took {} milliseconds for" +
-                " processing {} containers.", Time.monotonicNow() - start,
-            containerIds.size());
-
-        wait(conf.getInterval());
-      }
-    } catch (Throwable t) {
-      // When we get runtime exception, we should terminate SCM.
-      LOG.error("Exception in Replication Monitor Thread.", t);
-      ExitUtil.terminate(1, t);
-    }
-  }
-
-  /**
-   * Process the given container.
-   *
-   * @param id ContainerID
-   */
-  private void processContainer(ContainerID id) {
-    lockManager.lock(id);
-    try {
-      final ContainerInfo container = containerManager.getContainer(id);
-      final Set<ContainerReplica> replicas = containerManager
-          .getContainerReplicas(container.containerID());
-      final LifeCycleState state = container.getState();
-
-      /*
-       * We don't take any action if the container is in OPEN state.
-       */
-      if (state == LifeCycleState.OPEN) {
-        return;
-      }
-
-      /*
-       * If the container is in CLOSING state, the replicas can either
-       * be in OPEN or in CLOSING state. In both of this cases
-       * we have to resend close container command to the datanodes.
-       */
-      if (state == LifeCycleState.CLOSING) {
-        replicas.forEach(replica -> sendCloseCommand(
-            container, replica.getDatanodeDetails(), false));
-        return;
-      }
-
-      /*
-       * If the container is in QUASI_CLOSED state, check and close the
-       * container if possible.
-       */
-      if (state == LifeCycleState.QUASI_CLOSED &&
-          canForceCloseContainer(container, replicas)) {
-        forceCloseContainer(container, replicas);
-        return;
-      }
-
-      /*
-       * Before processing the container we have to reconcile the
-       * inflightReplication and inflightDeletion actions.
-       *
-       * We remove the entry from inflightReplication and inflightDeletion
-       * list, if the operation is completed or if it has timed out.
-       */
-      updateInflightAction(container, inflightReplication,
-          action -> replicas.stream()
-              .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode)));
-
-      updateInflightAction(container, inflightDeletion,
-          action -> replicas.stream()
-              .noneMatch(r -> r.getDatanodeDetails().equals(action.datanode)));
-
-
-      /*
-       * We don't have to take any action if the container is healthy.
-       *
-       * According to ReplicationMonitor container is considered healthy if
-       * the container is either in QUASI_CLOSED or in CLOSED state and has
-       * exact number of replicas in the same state.
-       */
-      if (isContainerHealthy(container, replicas)) {
-        return;
-      }
-
-      /*
-       * Check if the container is under replicated and take appropriate
-       * action.
-       */
-      if (isContainerUnderReplicated(container, replicas)) {
-        handleUnderReplicatedContainer(container, replicas);
-        return;
-      }
-
-      /*
-       * Check if the container is over replicated and take appropriate
-       * action.
-       */
-      if (isContainerOverReplicated(container, replicas)) {
-        handleOverReplicatedContainer(container, replicas);
-        return;
-      }
-
-      /*
-       * The container is neither under nor over replicated and the container
-       * is not healthy. This means that the container has unhealthy/corrupted
-       * replica.
-       */
-      handleUnstableContainer(container, replicas);
-
-    } catch (ContainerNotFoundException ex) {
-      LOG.warn("Missing container {}.", id);
-    } finally {
-      lockManager.unlock(id);
-    }
-  }
-
-  /**
-   * Reconciles the InflightActions for a given container.
-   *
-   * @param container Container to update
-   * @param inflightActions inflightReplication (or) inflightDeletion
-   * @param filter filter to check if the operation is completed
-   */
-  private void updateInflightAction(final ContainerInfo container,
-      final Map<ContainerID, List<InflightAction>> inflightActions,
-      final Predicate<InflightAction> filter) {
-    final ContainerID id = container.containerID();
-    final long deadline = Time.monotonicNow() - conf.getEventTimeout();
-    if (inflightActions.containsKey(id)) {
-      final List<InflightAction> actions = inflightActions.get(id);
-      actions.removeIf(action -> action.time < deadline);
-      actions.removeIf(filter);
-      if (actions.isEmpty()) {
-        inflightActions.remove(id);
-      }
-    }
-  }
-
-  /**
-   * Returns true if the container is healthy according to ReplicationMonitor.
-   *
-   * According to ReplicationMonitor container is considered healthy if
-   * it has exact number of replicas in the same state as the container.
-   *
-   * @param container Container to check
-   * @param replicas Set of ContainerReplicas
-   * @return true if the container is healthy, false otherwise
-   */
-  private boolean isContainerHealthy(final ContainerInfo container,
-                                     final Set<ContainerReplica> replicas) {
-    return container.getReplicationFactor().getNumber() == replicas.size() &&
-        replicas.stream().allMatch(
-            r -> compareState(container.getState(), r.getState()));
-  }
-
-  /**
-   * Checks if the container is under replicated or not.
-   *
-   * @param container Container to check
-   * @param replicas Set of ContainerReplicas
-   * @return true if the container is under replicated, false otherwise
-   */
-  private boolean isContainerUnderReplicated(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-    return container.getReplicationFactor().getNumber() >
-        getReplicaCount(container.containerID(), replicas);
-  }
-
-  /**
-   * Checks if the container is over replicated or not.
-   *
-   * @param container Container to check
-   * @param replicas Set of ContainerReplicas
-   * @return true if the container if over replicated, false otherwise
-   */
-  private boolean isContainerOverReplicated(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-    return container.getReplicationFactor().getNumber() <
-        getReplicaCount(container.containerID(), replicas);
-  }
-
-  /**
-   * Returns the replication count of the given container. This also
-   * considers inflight replication and deletion.
-   *
-   * @param id ContainerID
-   * @param replicas Set of existing replicas
-   * @return number of estimated replicas for this container
-   */
-  private int getReplicaCount(final ContainerID id,
-                              final Set<ContainerReplica> replicas) {
-    return replicas.size()
-        + inflightReplication.getOrDefault(id, Collections.emptyList()).size()
-        - inflightDeletion.getOrDefault(id, Collections.emptyList()).size();
-  }
-
-  /**
-   * Returns true if more than 50% of the container replicas with unique
-   * originNodeId are in QUASI_CLOSED state.
-   *
-   * @param container Container to check
-   * @param replicas Set of ContainerReplicas
-   * @return true if we can force close the container, false otherwise
-   */
-  private boolean canForceCloseContainer(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-    Preconditions.assertTrue(container.getState() ==
-        LifeCycleState.QUASI_CLOSED);
-    final int replicationFactor = container.getReplicationFactor().getNumber();
-    final long uniqueQuasiClosedReplicaCount = replicas.stream()
-        .filter(r -> r.getState() == State.QUASI_CLOSED)
-        .map(ContainerReplica::getOriginDatanodeId)
-        .distinct()
-        .count();
-    return uniqueQuasiClosedReplicaCount > (replicationFactor / 2);
-  }
-
-  /**
-   * Force close the container replica(s) with highest sequence Id.
-   *
-   * <p>
-   *   Note: We should force close the container only if >50% (quorum)
-   *   of replicas with unique originNodeId are in QUASI_CLOSED state.
-   * </p>
-   *
-   * @param container ContainerInfo
-   * @param replicas Set of ContainerReplicas
-   */
-  private void forceCloseContainer(final ContainerInfo container,
-                                   final Set<ContainerReplica> replicas) {
-    Preconditions.assertTrue(container.getState() ==
-        LifeCycleState.QUASI_CLOSED);
-
-    final List<ContainerReplica> quasiClosedReplicas = replicas.stream()
-        .filter(r -> r.getState() == State.QUASI_CLOSED)
-        .collect(Collectors.toList());
-
-    final Long sequenceId = quasiClosedReplicas.stream()
-        .map(ContainerReplica::getSequenceId)
-        .max(Long::compare)
-        .orElse(-1L);
-
-    LOG.info("Force closing container {} with BCSID {}," +
-        " which is in QUASI_CLOSED state.",
-        container.containerID(), sequenceId);
-
-    quasiClosedReplicas.stream()
-        .filter(r -> sequenceId != -1L)
-        .filter(replica -> replica.getSequenceId().equals(sequenceId))
-        .forEach(replica -> sendCloseCommand(
-            container, replica.getDatanodeDetails(), true));
-  }
-
-  /**
-   * If the given container is under replicated, identify a new set of
-   * datanode(s) to replicate the container using ContainerPlacementPolicy
-   * and send replicate container command to the identified datanode(s).
-   *
-   * @param container ContainerInfo
-   * @param replicas Set of ContainerReplicas
-   */
-  private void handleUnderReplicatedContainer(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-    LOG.debug("Handling underreplicated container: {}",
-        container.getContainerID());
-    try {
-      final ContainerID id = container.containerID();
-      final List<DatanodeDetails> deletionInFlight = inflightDeletion
-          .getOrDefault(id, Collections.emptyList())
-          .stream()
-          .map(action -> action.datanode)
-          .collect(Collectors.toList());
-      final List<DatanodeDetails> source = replicas.stream()
-          .filter(r ->
-              r.getState() == State.QUASI_CLOSED ||
-              r.getState() == State.CLOSED)
-          .filter(r -> !deletionInFlight.contains(r.getDatanodeDetails()))
-          .sorted((r1, r2) -> r2.getSequenceId().compareTo(r1.getSequenceId()))
-          .map(ContainerReplica::getDatanodeDetails)
-          .collect(Collectors.toList());
-      if (source.size() > 0) {
-        final int replicationFactor = container
-            .getReplicationFactor().getNumber();
-        final int delta = replicationFactor - getReplicaCount(id, replicas);
-        final List<DatanodeDetails> excludeList = replicas.stream()
-            .map(ContainerReplica::getDatanodeDetails)
-            .collect(Collectors.toList());
-        List<InflightAction> actionList = inflightReplication.get(id);
-        if (actionList != null) {
-          actionList.stream().map(r -> r.datanode)
-              .forEach(excludeList::add);
-        }
-        final List<DatanodeDetails> selectedDatanodes = containerPlacement
-            .chooseDatanodes(excludeList, null, delta,
-                container.getUsedBytes());
-
-        LOG.info("Container {} is under replicated. Expected replica count" +
-                " is {}, but found {}.", id, replicationFactor,
-            replicationFactor - delta);
-
-        for (DatanodeDetails datanode : selectedDatanodes) {
-          sendReplicateCommand(container, datanode, source);
-        }
-      } else {
-        LOG.warn("Cannot replicate container {}, no healthy replica found.",
-            container.containerID());
-      }
-    } catch (IOException ex) {
-      LOG.warn("Exception while replicating container {}.",
-          container.getContainerID(), ex);
-    }
-  }
-
-  /**
-   * If the given container is over replicated, identify the datanode(s)
-   * to delete the container and send delete container command to the
-   * identified datanode(s).
-   *
-   * @param container ContainerInfo
-   * @param replicas Set of ContainerReplicas
-   */
-  private void handleOverReplicatedContainer(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-
-    final ContainerID id = container.containerID();
-    final int replicationFactor = container.getReplicationFactor().getNumber();
-    // Dont consider inflight replication while calculating excess here.
-    final int excess = replicas.size() - replicationFactor -
-        inflightDeletion.getOrDefault(id, Collections.emptyList()).size();
-
-    if (excess > 0) {
-
-      LOG.info("Container {} is over replicated. Expected replica count" +
-              " is {}, but found {}.", id, replicationFactor,
-          replicationFactor + excess);
-
-      final Map<UUID, ContainerReplica> uniqueReplicas =
-          new LinkedHashMap<>();
-
-      replicas.stream()
-          .filter(r -> compareState(container.getState(), r.getState()))
-          .forEach(r -> uniqueReplicas
-              .putIfAbsent(r.getOriginDatanodeId(), r));
-
-      // Retain one healthy replica per origin node Id.
-      final List<ContainerReplica> eligibleReplicas = new ArrayList<>(replicas);
-      eligibleReplicas.removeAll(uniqueReplicas.values());
-
-      final List<ContainerReplica> unhealthyReplicas = eligibleReplicas
-          .stream()
-          .filter(r -> !compareState(container.getState(), r.getState()))
-          .collect(Collectors.toList());
-
-      //Move the unhealthy replicas to the front of eligible replicas to delete
-      eligibleReplicas.removeAll(unhealthyReplicas);
-      eligibleReplicas.addAll(0, unhealthyReplicas);
-
-      for (int i = 0; i < excess; i++) {
-        sendDeleteCommand(container,
-            eligibleReplicas.get(i).getDatanodeDetails(), true);
-      }
-    }
-  }
-
-  /**
-   * Handles unstable container.
-   * A container is inconsistent if any of the replica state doesn't
-   * match the container state. We have to take appropriate action
-   * based on state of the replica.
-   *
-   * @param container ContainerInfo
-   * @param replicas Set of ContainerReplicas
-   */
-  private void handleUnstableContainer(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) {
-    // Find unhealthy replicas
-    List<ContainerReplica> unhealthyReplicas = replicas.stream()
-        .filter(r -> !compareState(container.getState(), r.getState()))
-        .collect(Collectors.toList());
-
-    Iterator<ContainerReplica> iterator = unhealthyReplicas.iterator();
-    while (iterator.hasNext()) {
-      final ContainerReplica replica = iterator.next();
-      final State state = replica.getState();
-      if (state == State.OPEN || state == State.CLOSING) {
-        sendCloseCommand(container, replica.getDatanodeDetails(), false);
-        iterator.remove();
-      }
-
-      if (state == State.QUASI_CLOSED) {
-        // Send force close command if the BCSID matches
-        if (container.getSequenceId() == replica.getSequenceId()) {
-          sendCloseCommand(container, replica.getDatanodeDetails(), true);
-          iterator.remove();
-        }
-      }
-    }
-
-    // Now we are left with the replicas which are either unhealthy or
-    // the BCSID doesn't match. These replicas should be deleted.
-
-    /*
-     * If we have unhealthy replicas we go under replicated and then
-     * replicate the healthy copy.
-     *
-     * We also make sure that we delete only one unhealthy replica at a time.
-     *
-     * If there are two unhealthy replica:
-     *  - Delete first unhealthy replica
-     *  - Re-replicate the healthy copy
-     *  - Delete second unhealthy replica
-     *  - Re-replicate the healthy copy
-     *
-     * Note: Only one action will be executed in a single ReplicationMonitor
-     *       iteration. So to complete all the above actions we need four
-     *       ReplicationMonitor iterations.
-     */
-
-    unhealthyReplicas.stream().findFirst().ifPresent(replica ->
-        sendDeleteCommand(container, replica.getDatanodeDetails(), false));
-
-  }
-
-  /**
-   * Sends close container command for the given container to the given
-   * datanode.
-   *
-   * @param container Container to be closed
-   * @param datanode The datanode on which the container
-   *                  has to be closed
-   * @param force Should be set to true if we want to close a
-   *               QUASI_CLOSED container
-   */
-  private void sendCloseCommand(final ContainerInfo container,
-                                final DatanodeDetails datanode,
-                                final boolean force) {
-
-    LOG.info("Sending close container command for container {}" +
-            " to datanode {}.", container.containerID(), datanode);
-
-    CloseContainerCommand closeContainerCommand =
-        new CloseContainerCommand(container.getContainerID(),
-            container.getPipelineID(), force);
-    eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
-        new CommandForDatanode<>(datanode.getUuid(), closeContainerCommand));
-  }
-
-  /**
-   * Sends replicate container command for the given container to the given
-   * datanode.
-   *
-   * @param container Container to be replicated
-   * @param datanode The destination datanode to replicate
-   * @param sources List of source nodes from where we can replicate
-   */
-  private void sendReplicateCommand(final ContainerInfo container,
-                                    final DatanodeDetails datanode,
-                                    final List<DatanodeDetails> sources) {
-
-    LOG.info("Sending replicate container command for container {}" +
-            " to datanode {}", container.containerID(), datanode);
-
-    final ContainerID id = container.containerID();
-    final ReplicateContainerCommand replicateCommand =
-        new ReplicateContainerCommand(id.getId(), sources);
-    inflightReplication.computeIfAbsent(id, k -> new ArrayList<>());
-    sendAndTrackDatanodeCommand(datanode, replicateCommand,
-        action -> inflightReplication.get(id).add(action));
-  }
-
-  /**
-   * Sends delete container command for the given container to the given
-   * datanode.
-   *
-   * @param container Container to be deleted
-   * @param datanode The datanode on which the replica should be deleted
-   * @param force Should be set to true to delete an OPEN replica
-   */
-  private void sendDeleteCommand(final ContainerInfo container,
-                                 final DatanodeDetails datanode,
-                                 final boolean force) {
-
-    LOG.info("Sending delete container command for container {}" +
-            " to datanode {}", container.containerID(), datanode);
-
-    final ContainerID id = container.containerID();
-    final DeleteContainerCommand deleteCommand =
-        new DeleteContainerCommand(id.getId(), force);
-    inflightDeletion.computeIfAbsent(id, k -> new ArrayList<>());
-    sendAndTrackDatanodeCommand(datanode, deleteCommand,
-        action -> inflightDeletion.get(id).add(action));
-  }
-
-  /**
-   * Creates CommandForDatanode with the given SCMCommand and fires
-   * DATANODE_COMMAND event to event queue.
-   *
-   * Tracks the command using the given tracker.
-   *
-   * @param datanode Datanode to which the command has to be sent
-   * @param command SCMCommand to be sent
-   * @param tracker Tracker which tracks the inflight actions
-   * @param <T> Type of SCMCommand
-   */
-  private <T extends GeneratedMessage> void sendAndTrackDatanodeCommand(
-      final DatanodeDetails datanode,
-      final SCMCommand<T> command,
-      final Consumer<InflightAction> tracker) {
-    final CommandForDatanode<T> datanodeCommand =
-        new CommandForDatanode<>(datanode.getUuid(), command);
-    eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand);
-    tracker.accept(new InflightAction(datanode, Time.monotonicNow()));
-  }
-
-  /**
-   * Compares the container state with the replica state.
-   *
-   * @param containerState ContainerState
-   * @param replicaState ReplicaState
-   * @return true if the state matches, false otherwise
-   */
-  private static boolean compareState(final LifeCycleState containerState,
-                                      final State replicaState) {
-    switch (containerState) {
-    case OPEN:
-      return replicaState == State.OPEN;
-    case CLOSING:
-      return replicaState == State.CLOSING;
-    case QUASI_CLOSED:
-      return replicaState == State.QUASI_CLOSED;
-    case CLOSED:
-      return replicaState == State.CLOSED;
-    case DELETING:
-      return false;
-    case DELETED:
-      return false;
-    default:
-      return false;
-    }
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    collector.addRecord(ReplicationManager.class.getSimpleName())
-        .addGauge(ReplicationManagerMetrics.INFLIGHT_REPLICATION,
-            inflightReplication.size())
-        .addGauge(ReplicationManagerMetrics.INFLIGHT_DELETION,
-            inflightDeletion.size())
-        .endRecord();
-  }
-
-  /**
-   * Wrapper class to hold the InflightAction with its start time.
-   */
-  private static final class InflightAction {
-
-    private final DatanodeDetails datanode;
-    private final long time;
-
-    private InflightAction(final DatanodeDetails datanode,
-                           final long time) {
-      this.datanode = datanode;
-      this.time = time;
-    }
-  }
-
-  /**
-   * Configuration used by the Replication Manager.
-   */
-  @ConfigGroup(prefix = "hdds.scm.replication")
-  public static class ReplicationManagerConfiguration {
-    /**
-     * The frequency in which ReplicationMonitor thread should run.
-     */
-    private long interval = 5 * 60 * 1000;
-
-    /**
-     * Timeout for container replication & deletion command issued by
-     * ReplicationManager.
-     */
-    private long eventTimeout = 10 * 60 * 1000;
-
-    @Config(key = "thread.interval",
-        type = ConfigType.TIME,
-        defaultValue = "300s",
-        tags = {SCM, OZONE},
-        description = "When a heartbeat from the data node arrives on SCM, "
-            + "It is queued for processing with the time stamp of when the "
-            + "heartbeat arrived. There is a heartbeat processing thread "
-            + "inside "
-            + "SCM that runs at a specified interval. This value controls how "
-            + "frequently this thread is run.\n\n"
-            + "There are some assumptions build into SCM such as this "
-            + "value should allow the heartbeat processing thread to run at "
-            + "least three times more frequently than heartbeats and at least "
-            + "five times more than stale node detection time. "
-            + "If you specify a wrong value, SCM will gracefully refuse to "
-            + "run. "
-            + "For more info look at the node manager tests in SCM.\n"
-            + "\n"
-            + "In short, you don't need to change this."
-    )
-    public void setInterval(long interval) {
-      this.interval = interval;
-    }
-
-    @Config(key = "event.timeout",
-        type = ConfigType.TIME,
-        defaultValue = "10m",
-        tags = {SCM, OZONE},
-        description = "Timeout for the container replication/deletion commands "
-            + "sent  to datanodes. After this timeout the command will be "
-            + "retried.")
-    public void setEventTimeout(long eventTimeout) {
-      this.eventTimeout = eventTimeout;
-    }
-
-    public long getInterval() {
-      return interval;
-    }
-
-    public long getEventTimeout() {
-      return eventTimeout;
-    }
-  }
-
-  /**
-   * Metric name definitions for Replication manager.
-   */
-  public enum ReplicationManagerMetrics implements MetricsInfo {
-
-    INFLIGHT_REPLICATION("Tracked inflight container replication requests."),
-    INFLIGHT_DELETION("Tracked inflight container deletion requests.");
-
-    private final String desc;
-
-    ReplicationManagerMetrics(String desc) {
-      this.desc = desc;
-    }
-
-    @Override
-    public String description() {
-      return desc;
-    }
-
-    @Override
-    public String toString() {
-      return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
-          .add("name=" + name())
-          .add("description=" + desc)
-          .toString();
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
deleted file mode 100644
index 470d4eb..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ /dev/null
@@ -1,592 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
-
-/**
- * ContainerManager class contains the mapping from a name to a pipeline
- * mapping. This is used by SCM when allocating new locations and when
- * looking up a key.
- */
-public class SCMContainerManager implements ContainerManager {
-  private static final Logger LOG = LoggerFactory.getLogger(SCMContainerManager
-      .class);
-
-  private final Lock lock;
-  private final MetadataStore containerStore;
-  private final PipelineManager pipelineManager;
-  private final ContainerStateManager containerStateManager;
-  private final int numContainerPerOwnerInPipeline;
-
-  private final SCMContainerManagerMetrics scmContainerManagerMetrics;
-
-  /**
-   * Constructs a mapping class that creates mapping between container names
-   * and pipelines.
-   *
-   * @param nodeManager - NodeManager so that we can get the nodes that are
-   * healthy to place new
-   * containers.
-   * passed to LevelDB and this memory is allocated in Native code space.
-   * CacheSize is specified
-   * in MB.
-   * @param pipelineManager - PipelineManager
-   * @throws IOException on Failure.
-   */
-  @SuppressWarnings("unchecked")
-  public SCMContainerManager(final Configuration conf,
-      final NodeManager nodeManager, PipelineManager pipelineManager,
-      final EventPublisher eventPublisher) throws IOException {
-
-    final File metaDir = ServerUtils.getScmDbDir(conf);
-    final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB);
-    final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
-    this.containerStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setDbFile(containerDBPath)
-        .setCacheSize(cacheSize * OzoneConsts.MB)
-        .build();
-
-    this.lock = new ReentrantLock();
-    this.pipelineManager = pipelineManager;
-    this.containerStateManager = new ContainerStateManager(conf);
-    this.numContainerPerOwnerInPipeline = conf
-        .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-            ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
-
-    loadExistingContainers();
-
-    scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
-  }
-
-  private void loadExistingContainers() throws IOException {
-    List<Map.Entry<byte[], byte[]>> range = containerStore
-        .getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
-    for (Map.Entry<byte[], byte[]> entry : range) {
-      ContainerInfo container = ContainerInfo.fromProtobuf(
-          ContainerInfoProto.PARSER.parseFrom(entry.getValue()));
-      Preconditions.checkNotNull(container);
-      containerStateManager.loadContainer(container);
-      if (container.getState() == LifeCycleState.OPEN) {
-        pipelineManager.addContainerToPipeline(container.getPipelineID(),
-            ContainerID.valueof(container.getContainerID()));
-      }
-    }
-  }
-
-  @VisibleForTesting
-  // TODO: remove this later.
-  public ContainerStateManager getContainerStateManager() {
-    return containerStateManager;
-  }
-
-  @Override
-  public Set<ContainerID> getContainerIDs() {
-    lock.lock();
-    try {
-      return containerStateManager.getAllContainerIDs();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public List<ContainerInfo> getContainers() {
-    lock.lock();
-    try {
-      return containerStateManager.getAllContainerIDs().stream().map(id -> {
-        try {
-          return containerStateManager.getContainer(id);
-        } catch (ContainerNotFoundException e) {
-          // How can this happen?
-          return null;
-        }
-      }).filter(Objects::nonNull).collect(Collectors.toList());
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public List<ContainerInfo> getContainers(LifeCycleState state) {
-    lock.lock();
-    try {
-      return containerStateManager.getContainerIDsByState(state).stream()
-          .map(id -> {
-            try {
-              return containerStateManager.getContainer(id);
-            } catch (ContainerNotFoundException e) {
-              // How can this happen?
-              return null;
-            }
-          }).filter(Objects::nonNull).collect(Collectors.toList());
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Get number of containers in the given state.
-   *
-   * @param state {@link LifeCycleState}
-   * @return Count
-   */
-  public Integer getContainerCountByState(LifeCycleState state) {
-    return containerStateManager.getContainerCountByState(state);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public ContainerInfo getContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    return containerStateManager.getContainer(containerID);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(ContainerID startContainerID,
-      int count) {
-    lock.lock();
-    try {
-      scmContainerManagerMetrics.incNumListContainersOps();
-      final long startId = startContainerID == null ?
-          0 : startContainerID.getId();
-      final List<ContainerID> containersIds =
-          new ArrayList<>(containerStateManager.getAllContainerIDs());
-      Collections.sort(containersIds);
-
-      return containersIds.stream()
-          .filter(id -> id.getId() > startId)
-          .limit(count)
-          .map(id -> {
-            try {
-              return containerStateManager.getContainer(id);
-            } catch (ContainerNotFoundException ex) {
-              // This can never happen, as we hold lock no one else can remove
-              // the container after we got the container ids.
-              LOG.warn("Container Missing.", ex);
-              return null;
-            }
-          }).collect(Collectors.toList());
-    } finally {
-      lock.unlock();
-    }
-  }
-
-
-  /**
-   * Allocates a new container.
-   *
-   * @param replicationFactor - replication factor of the container.
-   * @param owner - The string name of the Service that owns this container.
-   * @return - Pipeline that makes up this container.
-   * @throws IOException - Exception
-   */
-  @Override
-  public ContainerInfo allocateContainer(final ReplicationType type,
-      final ReplicationFactor replicationFactor, final String owner)
-      throws IOException {
-    try {
-      lock.lock();
-      ContainerInfo containerInfo = null;
-      try {
-        containerInfo =
-            containerStateManager.allocateContainer(pipelineManager, type,
-              replicationFactor, owner);
-      } catch (IOException ex) {
-        scmContainerManagerMetrics.incNumFailureCreateContainers();
-        throw ex;
-      }
-      // Add container to DB.
-      try {
-        addContainerToDB(containerInfo);
-      } catch (IOException ex) {
-        // When adding to DB failed, we are removing from containerStateMap.
-        // We should also remove from pipeline2Container Map in
-        // PipelineStateManager.
-        pipelineManager.removeContainerFromPipeline(
-            containerInfo.getPipelineID(),
-            new ContainerID(containerInfo.getContainerID()));
-        throw ex;
-      }
-      return containerInfo;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Deletes a container from SCM.
-   *
-   * @param containerID - Container ID
-   * @throws IOException if container doesn't exist or container store failed
-   *                     to delete the
-   *                     specified key.
-   */
-  @Override
-  public void deleteContainer(ContainerID containerID) throws IOException {
-    lock.lock();
-    try {
-      containerStateManager.removeContainer(containerID);
-      final byte[] dbKey = Longs.toByteArray(containerID.getId());
-      final byte[] containerBytes = containerStore.get(dbKey);
-      if (containerBytes != null) {
-        containerStore.delete(dbKey);
-      } else {
-        // Where did the container go? o_O
-        LOG.warn("Unable to remove the container {} from container store," +
-                " it's missing!", containerID);
-      }
-      scmContainerManagerMetrics.incNumSuccessfulDeleteContainers();
-    } catch (ContainerNotFoundException cnfe) {
-      scmContainerManagerMetrics.incNumFailureDeleteContainers();
-      throw new SCMException(
-          "Failed to delete container " + containerID + ", reason : " +
-              "container doesn't exist.",
-          SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc} Used by client to update container state on SCM.
-   */
-  @Override
-  public HddsProtos.LifeCycleState updateContainerState(
-      ContainerID containerID, HddsProtos.LifeCycleEvent event)
-      throws IOException {
-    // Should we return the updated ContainerInfo instead of LifeCycleState?
-    lock.lock();
-    try {
-      final ContainerInfo container = containerStateManager
-          .getContainer(containerID);
-      final LifeCycleState oldState = container.getState();
-      containerStateManager.updateContainerState(containerID, event);
-      final LifeCycleState newState = container.getState();
-
-      if (oldState == LifeCycleState.OPEN && newState != LifeCycleState.OPEN) {
-        pipelineManager
-            .removeContainerFromPipeline(container.getPipelineID(),
-                containerID);
-      }
-      final byte[] dbKey = Longs.toByteArray(containerID.getId());
-      containerStore.put(dbKey, container.getProtobuf().toByteArray());
-      return newState;
-    } catch (ContainerNotFoundException cnfe) {
-      throw new SCMException(
-          "Failed to update container state"
-              + containerID
-              + ", reason : container doesn't exist.",
-          SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
-    } finally {
-      lock.unlock();
-    }
-  }
-
-
-    /**
-     * Update deleteTransactionId according to deleteTransactionMap.
-     *
-     * @param deleteTransactionMap Maps the containerId to latest delete
-     *                             transaction id for the container.
-     * @throws IOException
-     */
-  public void updateDeleteTransactionId(Map<Long, Long> deleteTransactionMap)
-      throws IOException {
-    if (deleteTransactionMap == null) {
-      return;
-    }
-
-    lock.lock();
-    try {
-      BatchOperation batch = new BatchOperation();
-      for (Map.Entry<Long, Long> entry : deleteTransactionMap.entrySet()) {
-        long containerID = entry.getKey();
-        byte[] dbKey = Longs.toByteArray(containerID);
-        byte[] containerBytes = containerStore.get(dbKey);
-        if (containerBytes == null) {
-          throw new SCMException(
-              "Failed to increment number of deleted blocks for container "
-                  + containerID + ", reason : " + "container doesn't exist.",
-              SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
-        }
-        ContainerInfo containerInfo = ContainerInfo.fromProtobuf(
-            HddsProtos.ContainerInfoProto.parseFrom(containerBytes));
-        containerInfo.updateDeleteTransactionId(entry.getValue());
-        batch.put(dbKey, containerInfo.getProtobuf().toByteArray());
-      }
-      containerStore.writeBatch(batch);
-      containerStateManager
-          .updateDeleteTransactionId(deleteTransactionMap);
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Return a container matching the attributes specified.
-   *
-   * @param sizeRequired - Space needed in the Container.
-   * @param owner        - Owner of the container - A specific nameservice.
-   * @param pipeline     - Pipeline to which the container should belong.
-   * @return ContainerInfo, null if there is no match found.
-   */
-  public ContainerInfo getMatchingContainer(final long sizeRequired,
-      String owner, Pipeline pipeline) {
-    return getMatchingContainer(sizeRequired, owner, pipeline, Collections
-        .emptyList());
-  }
-
-  public ContainerInfo getMatchingContainer(final long sizeRequired,
-      String owner, Pipeline pipeline, List<ContainerID> excludedContainers) {
-    NavigableSet<ContainerID> containerIDs;
-    try {
-      synchronized (pipeline) {
-        //TODO: #CLUTIL See if lock is required here
-        containerIDs =
-            pipelineManager.getContainersInPipeline(pipeline.getId());
-
-        containerIDs = getContainersForOwner(containerIDs, owner);
-        if (containerIDs.size() < numContainerPerOwnerInPipeline) {
-          // TODO: #CLUTIL Maybe we can add selection logic inside synchronized
-          // as well
-          if (containerIDs.size() < numContainerPerOwnerInPipeline) {
-            ContainerInfo containerInfo =
-                containerStateManager.allocateContainer(pipelineManager, owner,
-                    pipeline);
-            // Add to DB
-            addContainerToDB(containerInfo);
-            containerStateManager.updateLastUsedMap(pipeline.getId(),
-                containerInfo.containerID(), owner);
-            return containerInfo;
-          }
-        }
-      }
-
-      containerIDs.removeAll(excludedContainers);
-      ContainerInfo containerInfo =
-          containerStateManager.getMatchingContainer(sizeRequired, owner,
-              pipeline.getId(), containerIDs);
-      if (containerInfo == null) {
-        synchronized (pipeline) {
-          containerInfo =
-              containerStateManager.allocateContainer(pipelineManager, owner,
-                  pipeline);
-          // Add to DB
-          addContainerToDB(containerInfo);
-        }
-      }
-      containerStateManager.updateLastUsedMap(pipeline.getId(),
-          containerInfo.containerID(), owner);
-      // TODO: #CLUTIL cleanup entries in lastUsedMap
-      return containerInfo;
-    } catch (Exception e) {
-      LOG.warn("Container allocation failed for pipeline={} requiredSize={} {}",
-          pipeline, sizeRequired, e);
-      return null;
-    }
-  }
-
-  /**
-   * Add newly allocated container to container DB.
-   * @param containerInfo
-   * @throws IOException
-   */
-  private void addContainerToDB(ContainerInfo containerInfo)
-      throws IOException {
-    try {
-      final byte[] containerIDBytes = Longs.toByteArray(
-          containerInfo.getContainerID());
-      containerStore.put(containerIDBytes,
-          containerInfo.getProtobuf().toByteArray());
-      // Incrementing here, as allocateBlock to create a container calls
-      // getMatchingContainer() and finally calls this API to add newly
-      // created container to DB.
-      // Even allocateContainer calls this API to add newly allocated
-      // container to DB. So we need to increment metrics here.
-      scmContainerManagerMetrics.incNumSuccessfulCreateContainers();
-    } catch (IOException ex) {
-      // If adding to containerStore fails, we should remove the container
-      // from in-memory map.
-      scmContainerManagerMetrics.incNumFailureCreateContainers();
-      LOG.error("Add Container to DB failed for ContainerID #{}",
-          containerInfo.getContainerID());
-      try {
-        containerStateManager.removeContainer(containerInfo.containerID());
-      } catch (ContainerNotFoundException cnfe) {
-        // This should not happen, as we are removing after adding in to
-        // container state cmap.
-      }
-      throw ex;
-    }
-  }
-
-  /**
-   * Returns the container ID's matching with specified owner.
-   * @param containerIDs
-   * @param owner
-   * @return NavigableSet<ContainerID>
-   */
-  private NavigableSet<ContainerID> getContainersForOwner(
-      NavigableSet<ContainerID> containerIDs, String owner) {
-    Iterator<ContainerID> containerIDIterator = containerIDs.iterator();
-    while (containerIDIterator.hasNext()) {
-      ContainerID cid = containerIDIterator.next();
-      try {
-        if (!getContainer(cid).getOwner().equals(owner)) {
-          containerIDIterator.remove();
-        }
-      } catch (ContainerNotFoundException e) {
-        LOG.error("Could not find container info for container id={} {}", cid,
-            e);
-        containerIDIterator.remove();
-      }
-    }
-    return containerIDs;
-  }
-
-
-
-  /**
-   * Returns the latest list of DataNodes where replica for given containerId
-   * exist. Throws an SCMException if no entry is found for given containerId.
-   *
-   * @param containerID
-   * @return Set<DatanodeDetails>
-   */
-  public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
-    return containerStateManager.getContainerReplicas(containerID);
-  }
-
-  /**
-   * Add a container Replica for given DataNode.
-   *
-   * @param containerID
-   * @param replica
-   */
-  public void updateContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica) throws ContainerNotFoundException {
-    containerStateManager.updateContainerReplica(containerID, replica);
-  }
-
-  /**
-   * Remove a container Replica for given DataNode.
-   *
-   * @param containerID
-   * @param replica
-   * @return True of dataNode is removed successfully else false.
-   */
-  public void removeContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    containerStateManager.removeContainerReplica(containerID, replica);
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it.
-   * If the stream is
-   * already closed then invoking this method has no effect.
-   * <p>
-   * <p>As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful
-   * attention. It is strongly advised to relinquish the underlying resources
-   * and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing the
-   * {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    if (containerStateManager != null) {
-      containerStateManager.close();
-    }
-    if (containerStore != null) {
-      containerStore.close();
-    }
-
-    if (scmContainerManagerMetrics != null) {
-      this.scmContainerManagerMetrics.unRegister();
-    }
-  }
-
-  public void notifyContainerReportProcessing(boolean isFullReport,
-      boolean success) {
-    if (isFullReport) {
-      if (success) {
-        scmContainerManagerMetrics.incNumContainerReportsProcessedSuccessful();
-      } else {
-        scmContainerManagerMetrics.incNumContainerReportsProcessedFailed();
-      }
-    } else {
-      if (success) {
-        scmContainerManagerMetrics.incNumICRReportsProcessedSuccessful();
-      } else {
-        scmContainerManagerMetrics.incNumICRReportsProcessedFailed();
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
deleted file mode 100644
index ee02bbd..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-/**
- * This package has class that close a container. That is move a container from
- * open state to close state.
- */
-package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
deleted file mode 100644
index e9a2579..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.metrics;
-
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * Class contains metrics related to ContainerManager.
- */
-@Metrics(about = "SCM ContainerManager metrics", context = "ozone")
-public final class SCMContainerManagerMetrics {
-
-  private static final String SOURCE_NAME =
-      SCMContainerManagerMetrics.class.getSimpleName();
-
-  // These are the metrics which will be reset to zero after restart.
-  // These metrics capture count of number of successful/failure operations
-  // of create/delete containers in SCM.
-
-  private @Metric MutableCounterLong numSuccessfulCreateContainers;
-  private @Metric MutableCounterLong numFailureCreateContainers;
-  private @Metric MutableCounterLong numSuccessfulDeleteContainers;
-  private @Metric MutableCounterLong numFailureDeleteContainers;
-  private @Metric MutableCounterLong numListContainerOps;
-
-
-  private @Metric MutableCounterLong numContainerReportsProcessedSuccessful;
-  private @Metric MutableCounterLong numContainerReportsProcessedFailed;
-  private @Metric MutableCounterLong numICRReportsProcessedSuccessful;
-  private @Metric MutableCounterLong numICRReportsProcessedFailed;
-
-  private SCMContainerManagerMetrics() {
-  }
-
-  /**
-   * Create and return metrics instance.
-   * @return SCMContainerManagerMetrics
-   */
-  public static SCMContainerManagerMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "SCM ContainerManager Metrics",
-        new SCMContainerManagerMetrics());
-  }
-
-  /**
-   * Unregister metrics.
-   */
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-
-  public void incNumSuccessfulCreateContainers() {
-    this.numSuccessfulCreateContainers.incr();
-  }
-
-  public void incNumFailureCreateContainers() {
-    this.numFailureCreateContainers.incr();
-  }
-
-  public void incNumSuccessfulDeleteContainers() {
-    this.numSuccessfulDeleteContainers.incr();
-  }
-
-  public void incNumFailureDeleteContainers() {
-    this.numFailureDeleteContainers.incr();
-  }
-
-  public void incNumListContainersOps() {
-    this.numListContainerOps.incr();
-  }
-
-  public void incNumContainerReportsProcessedSuccessful() {
-    this.numContainerReportsProcessedSuccessful.incr();
-  }
-
-  public void incNumContainerReportsProcessedFailed() {
-    this.numContainerReportsProcessedFailed.incr();
-  }
-
-  public void incNumICRReportsProcessedSuccessful() {
-    this.numICRReportsProcessedSuccessful.incr();
-  }
-
-  public void incNumICRReportsProcessedFailed() {
-    this.numICRReportsProcessedFailed.incr();
-  }
-
-  public long getNumContainerReportsProcessedSuccessful() {
-    return numContainerReportsProcessedSuccessful.value();
-  }
-
-  public long getNumContainerReportsProcessedFailed() {
-    return numContainerReportsProcessedFailed.value();
-  }
-
-  public long getNumICRReportsProcessedSuccessful() {
-    return numICRReportsProcessedSuccessful.value();
-  }
-
-  public long getNumICRReportsProcessedFailed() {
-    return numICRReportsProcessedFailed.value();
-  }
-
-  public long getNumSuccessfulCreateContainers() {
-    return numSuccessfulCreateContainers.value();
-  }
-
-  public long getNumFailureCreateContainers() {
-    return numFailureCreateContainers.value();
-  }
-
-  public long getNumSuccessfulDeleteContainers() {
-    return numSuccessfulDeleteContainers.value();
-  }
-
-  public long getNumFailureDeleteContainers() {
-    return numFailureDeleteContainers.value();
-  }
-
-  public long getNumListContainersOps() {
-    return numListContainerOps.value();
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java
deleted file mode 100644
index 3198de1..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.metrics;
-
-/*
- * This package contains StorageContainerManager metric classes.
- */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
deleted file mode 100644
index 3f8d056..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-/**
- * This package contains routines to manage the container location and
- * mapping inside SCM
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
deleted file mode 100644
index 18ec2c3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Constructor;
-
-/**
- * A factory to create container placement instance based on configuration
- * property ozone.scm.container.placement.classname.
- */
-public final class ContainerPlacementPolicyFactory {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerPlacementPolicyFactory.class);
-
-  private static final Class<? extends ContainerPlacementPolicy>
-      OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT =
-      SCMContainerPlacementRandom.class;
-
-  private ContainerPlacementPolicyFactory() {
-  }
-
-  public static ContainerPlacementPolicy getPolicy(Configuration conf,
-      final NodeManager nodeManager, NetworkTopology clusterMap,
-      final boolean fallback, SCMContainerPlacementMetrics metrics)
-      throws SCMException{
-    final Class<? extends ContainerPlacementPolicy> placementClass = conf
-        .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-            OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT,
-            ContainerPlacementPolicy.class);
-    Constructor<? extends ContainerPlacementPolicy> constructor;
-    try {
-      constructor = placementClass.getDeclaredConstructor(NodeManager.class,
-          Configuration.class, NetworkTopology.class, boolean.class,
-          SCMContainerPlacementMetrics.class);
-      LOG.info("Create container placement policy of type " +
-          placementClass.getCanonicalName());
-    } catch (NoSuchMethodException e) {
-      String msg = "Failed to find constructor(NodeManager, Configuration, " +
-          "NetworkTopology, boolean) for class " +
-          placementClass.getCanonicalName();
-      LOG.error(msg);
-      throw new SCMException(msg,
-          SCMException.ResultCodes.FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY);
-    }
-
-    try {
-      return constructor.newInstance(nodeManager, conf, clusterMap, fallback,
-          metrics);
-    } catch (Exception e) {
-      throw new RuntimeException("Failed to instantiate class " +
-          placementClass.getCanonicalName() + " for " + e.getMessage());
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
deleted file mode 100644
index 77cdd83..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.stream.Collectors;
-
-/**
- * SCM CommonPolicy implements a set of invariants which are common
- * for all container placement policies, acts as the repository of helper
- * functions which are common to placement policies.
- */
-public abstract class SCMCommonPolicy implements ContainerPlacementPolicy {
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(SCMCommonPolicy.class);
-  private final NodeManager nodeManager;
-  private final Random rand;
-  private final Configuration conf;
-
-  /**
-   * Constructs SCM Common Policy Class.
-   *
-   * @param nodeManager NodeManager
-   * @param conf Configuration class.
-   */
-  public SCMCommonPolicy(NodeManager nodeManager, Configuration conf) {
-    this.nodeManager = nodeManager;
-    this.rand = new Random();
-    this.conf = conf;
-  }
-
-  /**
-   * Return node manager.
-   *
-   * @return node manager
-   */
-  public NodeManager getNodeManager() {
-    return nodeManager;
-  }
-
-  /**
-   * Returns the Random Object.
-   *
-   * @return rand
-   */
-  public Random getRand() {
-    return rand;
-  }
-
-  /**
-   * Get Config.
-   *
-   * @return Configuration
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Given the replication factor and size required, return set of datanodes
-   * that satisfy the nodes and size requirement.
-   * <p>
-   * Here are some invariants of container placement.
-   * <p>
-   * 1. We place containers only on healthy nodes.
-   * 2. We place containers on nodes with enough space for that container.
-   * 3. if a set of containers are requested, we either meet the required
-   * number of nodes or we fail that request.
-   *
-   *
-   * @param excludedNodes - datanodes with existing replicas
-   * @param favoredNodes - list of nodes preferred.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return list of datanodes chosen.
-   * @throws SCMException SCM exception.
-   */
-  @Override
-  public List<DatanodeDetails> chooseDatanodes(
-      List<DatanodeDetails> excludedNodes, List<DatanodeDetails> favoredNodes,
-      int nodesRequired, final long sizeRequired) throws SCMException {
-    List<DatanodeDetails> healthyNodes =
-        nodeManager.getNodes(HddsProtos.NodeState.HEALTHY);
-    if (excludedNodes != null) {
-      healthyNodes.removeAll(excludedNodes);
-    }
-    String msg;
-    if (healthyNodes.size() == 0) {
-      msg = "No healthy node found to allocate container.";
-      LOG.error(msg);
-      throw new SCMException(msg, SCMException.ResultCodes
-          .FAILED_TO_FIND_HEALTHY_NODES);
-    }
-
-    if (healthyNodes.size() < nodesRequired) {
-      msg = String.format("Not enough healthy nodes to allocate container. %d "
-              + " datanodes required. Found %d",
-          nodesRequired, healthyNodes.size());
-      LOG.error(msg);
-      throw new SCMException(msg,
-          SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
-    }
-    List<DatanodeDetails> healthyList = healthyNodes.stream().filter(d ->
-        hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList());
-
-    if (healthyList.size() < nodesRequired) {
-      msg = String.format("Unable to find enough nodes that meet the space " +
-              "requirement of %d bytes in healthy node set." +
-              " Nodes required: %d Found: %d",
-          sizeRequired, nodesRequired, healthyList.size());
-      LOG.error(msg);
-      throw new SCMException(msg,
-          SCMException.ResultCodes.FAILED_TO_FIND_NODES_WITH_SPACE);
-    }
-    return healthyList;
-  }
-
-  /**
-   * Returns true if this node has enough space to meet our requirement.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @return true if we have enough space.
-   */
-  boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
-      long sizeRequired) {
-    SCMNodeMetric nodeMetric = nodeManager.getNodeStat(datanodeDetails);
-    return (nodeMetric != null) && (nodeMetric.get() != null)
-        && nodeMetric.get().getRemaining().hasResources(sizeRequired);
-  }
-
-  /**
-   * This function invokes the derived classes chooseNode Function to build a
-   * list of nodes. Then it verifies that invoked policy was able to return
-   * expected number of nodes.
-   *
-   * @param nodesRequired - Nodes Required
-   * @param healthyNodes - List of Nodes in the result set.
-   * @return List of Datanodes that can be used for placement.
-   * @throws SCMException
-   */
-  public List<DatanodeDetails> getResultSet(
-      int nodesRequired, List<DatanodeDetails> healthyNodes)
-      throws SCMException {
-    List<DatanodeDetails> results = new ArrayList<>();
-    for (int x = 0; x < nodesRequired; x++) {
-      // invoke the choose function defined in the derived classes.
-      DatanodeDetails nodeId = chooseNode(healthyNodes);
-      if (nodeId != null) {
-        results.add(nodeId);
-      }
-    }
-
-    if (results.size() < nodesRequired) {
-      LOG.error("Unable to find the required number of healthy nodes that " +
-              "meet the criteria. Required nodes: {}, Found nodes: {}",
-          nodesRequired, results.size());
-      throw new SCMException("Unable to find required number of nodes.",
-          SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
-    }
-    return results;
-  }
-
-  /**
-   * Choose a datanode according to the policy, this function is implemented
-   * by the actual policy class. For example, PlacementCapacity or
-   * PlacementRandom.
-   *
-   * @param healthyNodes - Set of healthy nodes we can choose from.
-   * @return DatanodeDetails
-   */
-  public abstract DatanodeDetails chooseNode(
-      List<DatanodeDetails> healthyNodes);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
deleted file mode 100644
index 85d281c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Container placement policy that randomly choose datanodes with remaining
- * space to satisfy the size constraints.
- * <p>
- * The Algorithm is as follows, Pick 2 random nodes from a given pool of nodes
- * and then pick the node which lower utilization. This leads to a higher
- * probability of nodes with lower utilization to be picked.
- * <p>
- * For those wondering why we choose two nodes randomly and choose the node
- * with lower utilization. There are links to this original papers in
- * HDFS-11564.
- * <p>
- * A brief summary -- We treat the nodes from a scale of lowest utilized to
- * highest utilized, there are (s * ( s + 1)) / 2 possibilities to build
- * distinct pairs of nodes.  There are s - k pairs of nodes in which the rank
- * k node is less than the couple. So probability of a picking a node is
- * (2 * (s -k)) / (s * (s - 1)).
- * <p>
- * In English, There is a much higher probability of picking less utilized nodes
- * as compared to nodes with higher utilization since we pick 2 nodes and
- * then pick the node with lower utilization.
- * <p>
- * This avoids the issue of users adding new nodes into the cluster and HDFS
- * sending all traffic to those nodes if we only use a capacity based
- * allocation scheme. Unless those nodes are part of the set of the first 2
- * nodes then newer nodes will not be in the running to get the container.
- * <p>
- * This leads to an I/O pattern where the lower utilized nodes are favoured
- * more than higher utilized nodes, but part of the I/O will still go to the
- * older higher utilized nodes.
- * <p>
- * With this algorithm in place, our hope is that balancer tool needs to do
- * little or no work and the cluster will achieve a balanced distribution
- * over time.
- */
-public final class SCMContainerPlacementCapacity extends SCMCommonPolicy {
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(SCMContainerPlacementCapacity.class);
-
-  /**
-   * Constructs a Container Placement with considering only capacity.
-   * That is this policy tries to place containers based on node weight.
-   *
-   * @param nodeManager Node Manager
-   * @param conf Configuration
-   */
-  public SCMContainerPlacementCapacity(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
-      final boolean fallback, final SCMContainerPlacementMetrics metrics) {
-    super(nodeManager, conf);
-  }
-
-  /**
-   * Called by SCM to choose datanodes.
-   *
-   *
-   * @param excludedNodes - list of the datanodes to exclude.
-   * @param favoredNodes - list of nodes preferred.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return List of datanodes.
-   * @throws SCMException  SCMException
-   */
-  @Override
-  public List<DatanodeDetails> chooseDatanodes(
-      List<DatanodeDetails> excludedNodes, List<DatanodeDetails> favoredNodes,
-      final int nodesRequired, final long sizeRequired) throws SCMException {
-    List<DatanodeDetails> healthyNodes = super.chooseDatanodes(excludedNodes,
-        favoredNodes, nodesRequired, sizeRequired);
-    if (healthyNodes.size() == nodesRequired) {
-      return healthyNodes;
-    }
-    return getResultSet(nodesRequired, healthyNodes);
-  }
-
-  /**
-   * Find a node from the healthy list and return it after removing it from the
-   * list that we are operating on.
-   *
-   * @param healthyNodes - List of healthy nodes that meet the size
-   * requirement.
-   * @return DatanodeDetails that is chosen.
-   */
-  @Override
-  public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
-    int firstNodeNdx = getRand().nextInt(healthyNodes.size());
-    int secondNodeNdx = getRand().nextInt(healthyNodes.size());
-
-    DatanodeDetails datanodeDetails;
-    // There is a possibility that both numbers will be same.
-    // if that is so, we just return the node.
-    if (firstNodeNdx == secondNodeNdx) {
-      datanodeDetails = healthyNodes.get(firstNodeNdx);
-    } else {
-      DatanodeDetails firstNodeDetails = healthyNodes.get(firstNodeNdx);
-      DatanodeDetails secondNodeDetails = healthyNodes.get(secondNodeNdx);
-      SCMNodeMetric firstNodeMetric =
-          getNodeManager().getNodeStat(firstNodeDetails);
-      SCMNodeMetric secondNodeMetric =
-          getNodeManager().getNodeStat(secondNodeDetails);
-      datanodeDetails = firstNodeMetric.isGreater(secondNodeMetric.get())
-          ? firstNodeDetails : secondNodeDetails;
-    }
-    healthyNodes.remove(datanodeDetails);
-    return datanodeDetails;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
deleted file mode 100644
index fb709b1..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * This class is for maintaining Topology aware container placement statistics.
- */
-@Metrics(about="SCM Container Placement Metrics", context = "ozone")
-public class SCMContainerPlacementMetrics implements MetricsSource {
-  public static final String SOURCE_NAME =
-      SCMContainerPlacementMetrics.class.getSimpleName();
-  private static final MetricsInfo RECORD_INFO = Interns.info(SOURCE_NAME,
-      "SCM Container Placement Metrics");
-  private static MetricsRegistry registry;
-
-  // total datanode allocation request count
-  @Metric private MutableCounterLong datanodeRequestCount;
-  // datanode allocation attempt count, including success, fallback and failed
-  @Metric private MutableCounterLong datanodeChooseAttemptCount;
-  // datanode successful allocation count
-  @Metric private MutableCounterLong datanodeChooseSuccessCount;
-  // datanode allocated with some allocation constrains compromised
-  @Metric private MutableCounterLong datanodeChooseFallbackCount;
-
-  public SCMContainerPlacementMetrics() {
-  }
-
-  public static SCMContainerPlacementMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    registry = new MetricsRegistry(RECORD_INFO);
-    return ms.register(SOURCE_NAME, "SCM Container Placement Metrics",
-        new SCMContainerPlacementMetrics());
-  }
-
-  public void incrDatanodeRequestCount(long count) {
-    this.datanodeRequestCount.incr(count);
-  }
-
-  public void incrDatanodeChooseSuccessCount() {
-    this.datanodeChooseSuccessCount.incr(1);
-  }
-
-  public void incrDatanodeChooseFallbackCount() {
-    this.datanodeChooseFallbackCount.incr(1);
-  }
-
-  public void incrDatanodeChooseAttemptCount() {
-    this.datanodeChooseAttemptCount.incr(1);
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-
-  @VisibleForTesting
-  public long getDatanodeRequestCount() {
-    return this.datanodeRequestCount.value();
-  }
-
-  @VisibleForTesting
-  public long getDatanodeChooseSuccessCount() {
-    return this.datanodeChooseSuccessCount.value();
-  }
-
-  @VisibleForTesting
-  public long getDatanodeChooseFallbackCount() {
-    return this.datanodeChooseFallbackCount.value();
-  }
-
-  @VisibleForTesting
-  public long getDatanodeChooseAttemptCount() {
-    return this.datanodeChooseAttemptCount.value();
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    registry.snapshot(collector.addRecord(registry.info().name()), true);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
deleted file mode 100644
index 6d49459..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetConstants;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.Node;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Container placement policy that choose datanodes with network topology
- * awareness, together with the space to satisfy the size constraints.
- * <p>
- * This placement policy complies with the algorithm used in HDFS. With default
- * 3 replica, two replica will be on the same rack, the third one will on a
- * different rack.
- * <p>
- * This implementation applies to network topology like "/rack/node". Don't
- * recommend to use this if the network topology has more layers.
- * <p>
- */
-public final class SCMContainerPlacementRackAware extends SCMCommonPolicy {
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(SCMContainerPlacementRackAware.class);
-  private final NetworkTopology networkTopology;
-  private boolean fallback;
-  private static final int RACK_LEVEL = 1;
-  private static final int MAX_RETRY= 3;
-  private final SCMContainerPlacementMetrics metrics;
-
-  /**
-   * Constructs a Container Placement with rack awareness.
-   *
-   * @param nodeManager Node Manager
-   * @param conf Configuration
-   * @param fallback Whether reducing constrains to choose a data node when
-   *                 there is no node which satisfy all constrains.
-   *                 Basically, false for open container placement, and true
-   *                 for closed container placement.
-   */
-  public SCMContainerPlacementRackAware(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
-      final boolean fallback, final SCMContainerPlacementMetrics metrics) {
-    super(nodeManager, conf);
-    this.networkTopology = networkTopology;
-    this.fallback = fallback;
-    this.metrics = metrics;
-  }
-
-  /**
-   * Called by SCM to choose datanodes.
-   * There are two scenarios, one is choosing all nodes for a new pipeline.
-   * Another is choosing node to meet replication requirement.
-   *
-   *
-   * @param excludedNodes - list of the datanodes to exclude.
-   * @param favoredNodes - list of nodes preferred. This is a hint to the
-   *                     allocator, whether the favored nodes will be used
-   *                     depends on whether the nodes meets the allocator's
-   *                     requirement.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return List of datanodes.
-   * @throws SCMException  SCMException
-   */
-  @Override
-  public List<DatanodeDetails> chooseDatanodes(
-      List<DatanodeDetails> excludedNodes, List<DatanodeDetails> favoredNodes,
-      int nodesRequired, final long sizeRequired) throws SCMException {
-    Preconditions.checkArgument(nodesRequired > 0);
-    metrics.incrDatanodeRequestCount(nodesRequired);
-    int datanodeCount = networkTopology.getNumOfLeafNode(NetConstants.ROOT);
-    int excludedNodesCount = excludedNodes == null ? 0 : excludedNodes.size();
-    if (datanodeCount < nodesRequired + excludedNodesCount) {
-      throw new SCMException("No enough datanodes to choose. " +
-          "TotalNode = " + datanodeCount +
-          "RequiredNode = " + nodesRequired +
-          "ExcludedNode = " + excludedNodesCount, null);
-    }
-    List<DatanodeDetails> mutableFavoredNodes = favoredNodes;
-    // sanity check of favoredNodes
-    if (mutableFavoredNodes != null && excludedNodes != null) {
-      mutableFavoredNodes = new ArrayList<>();
-      mutableFavoredNodes.addAll(favoredNodes);
-      mutableFavoredNodes.removeAll(excludedNodes);
-    }
-    int favoredNodeNum = mutableFavoredNodes == null? 0 :
-        mutableFavoredNodes.size();
-
-    List<Node> chosenNodes = new ArrayList<>();
-    int favorIndex = 0;
-    if (excludedNodes == null || excludedNodes.isEmpty()) {
-      // choose all nodes for a new pipeline case
-      // choose first datanode from scope ROOT or from favoredNodes if not null
-      Node favoredNode = favoredNodeNum > favorIndex ?
-          mutableFavoredNodes.get(favorIndex) : null;
-      Node firstNode;
-      if (favoredNode != null) {
-        firstNode = favoredNode;
-        favorIndex++;
-      } else {
-        firstNode = chooseNode(null, null, sizeRequired);
-      }
-      chosenNodes.add(firstNode);
-      nodesRequired--;
-      if (nodesRequired == 0) {
-        return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0]));
-      }
-
-      // choose second datanode on the same rack as first one
-      favoredNode = favoredNodeNum > favorIndex ?
-          mutableFavoredNodes.get(favorIndex) : null;
-      Node secondNode;
-      if (favoredNode != null &&
-          networkTopology.isSameParent(firstNode, favoredNode)) {
-        secondNode = favoredNode;
-        favorIndex++;
-      } else {
-        secondNode = chooseNode(chosenNodes, firstNode, sizeRequired);
-      }
-      chosenNodes.add(secondNode);
-      nodesRequired--;
-      if (nodesRequired == 0) {
-        return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0]));
-      }
-
-      // choose remaining datanodes on different rack as first and second
-      return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex,
-          nodesRequired, sizeRequired);
-    } else {
-      List<Node> mutableExcludedNodes = new ArrayList<>();
-      mutableExcludedNodes.addAll(excludedNodes);
-      // choose node to meet replication requirement
-      // case 1: one excluded node, choose one on the same rack as the excluded
-      // node, choose others on different racks.
-      Node favoredNode;
-      if (excludedNodes.size() == 1) {
-        favoredNode = favoredNodeNum > favorIndex ?
-            mutableFavoredNodes.get(favorIndex) : null;
-        Node firstNode;
-        if (favoredNode != null &&
-            networkTopology.isSameParent(excludedNodes.get(0), favoredNode)) {
-          firstNode = favoredNode;
-          favorIndex++;
-        } else {
-          firstNode = chooseNode(mutableExcludedNodes, excludedNodes.get(0),
-              sizeRequired);
-        }
-        chosenNodes.add(firstNode);
-        nodesRequired--;
-        if (nodesRequired == 0) {
-          return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0]));
-        }
-        // choose remaining nodes on different racks
-        return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex,
-            nodesRequired, sizeRequired);
-      }
-      // case 2: two or more excluded nodes, if these two nodes are
-      // in the same rack, then choose nodes on different racks, otherwise,
-      // choose one on the same rack as one of excluded nodes, remaining chosen
-      // are on different racks.
-      for(int i = 0; i < excludedNodesCount; i++) {
-        for (int j = i + 1; j < excludedNodesCount; j++) {
-          if (networkTopology.isSameParent(
-              excludedNodes.get(i), excludedNodes.get(j))) {
-            // choose remaining nodes on different racks
-            return chooseNodes(mutableExcludedNodes, chosenNodes,
-                mutableFavoredNodes, favorIndex, nodesRequired, sizeRequired);
-          }
-        }
-      }
-      // choose one data on the same rack with one excluded node
-      favoredNode = favoredNodeNum > favorIndex ?
-          mutableFavoredNodes.get(favorIndex) : null;
-      Node secondNode;
-      if (favoredNode != null && networkTopology.isSameParent(
-          mutableExcludedNodes.get(0), favoredNode)) {
-        secondNode = favoredNode;
-        favorIndex++;
-      } else {
-        secondNode =
-            chooseNode(chosenNodes, mutableExcludedNodes.get(0), sizeRequired);
-      }
-      chosenNodes.add(secondNode);
-      mutableExcludedNodes.add(secondNode);
-      nodesRequired--;
-      if (nodesRequired == 0) {
-        return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0]));
-      }
-      // choose remaining nodes on different racks
-      return chooseNodes(mutableExcludedNodes, chosenNodes, mutableFavoredNodes,
-          favorIndex, nodesRequired, sizeRequired);
-    }
-  }
-
-  @Override
-  public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
-    return null;
-  }
-
-  /**
-   * Choose a datanode which meets the requirements. If there is no node which
-   * meets all the requirements, there is fallback chosen process depending on
-   * whether fallback is allowed when this class is instantiated.
-   *
-   *
-   * @param excludedNodes - list of the datanodes to excluded. Can be null.
-   * @param affinityNode - the chosen nodes should be on the same rack as
-   *                    affinityNode. Can be null.
-   * @param sizeRequired - size required for the container or block.
-   * @return List of chosen datanodes.
-   * @throws SCMException  SCMException
-   */
-  private Node chooseNode(List<Node> excludedNodes, Node affinityNode,
-      long sizeRequired) throws SCMException {
-    int ancestorGen = RACK_LEVEL;
-    int maxRetry = MAX_RETRY;
-    List<String> excludedNodesForCapacity = null;
-    boolean isFallbacked = false;
-    while(true) {
-      metrics.incrDatanodeChooseAttemptCount();
-      Node node = networkTopology.chooseRandom(NetConstants.ROOT,
-          excludedNodesForCapacity, excludedNodes, affinityNode, ancestorGen);
-      if (node == null) {
-        // cannot find the node which meets all constrains
-        LOG.warn("Failed to find the datanode for container. excludedNodes:" +
-            (excludedNodes == null ? "" : excludedNodes.toString()) +
-            ", affinityNode:" +
-            (affinityNode == null ? "" : affinityNode.getNetworkFullPath()));
-        if (fallback) {
-          isFallbacked = true;
-          // fallback, don't consider the affinity node
-          if (affinityNode != null) {
-            affinityNode = null;
-            continue;
-          }
-          // fallback, don't consider cross rack
-          if (ancestorGen == RACK_LEVEL) {
-            ancestorGen--;
-            continue;
-          }
-        }
-        // there is no constrains to reduce or fallback is true
-        throw new SCMException("No satisfied datanode to meet the" +
-            " excludedNodes and affinityNode constrains.", null);
-      }
-      if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Datanode {} is chosen for container. Required size is {}",
-              node.toString(), sizeRequired);
-        }
-        metrics.incrDatanodeChooseSuccessCount();
-        if (isFallbacked) {
-          metrics.incrDatanodeChooseFallbackCount();
-        }
-        return node;
-      } else {
-        maxRetry--;
-        if (maxRetry == 0) {
-          // avoid the infinite loop
-          String errMsg = "No satisfied datanode to meet the space constrains. "
-              + " sizeRequired: " + sizeRequired;
-          LOG.info(errMsg);
-          throw new SCMException(errMsg, null);
-        }
-        if (excludedNodesForCapacity == null) {
-          excludedNodesForCapacity = new ArrayList<>();
-        }
-        excludedNodesForCapacity.add(node.getNetworkFullPath());
-      }
-    }
-  }
-
-  /**
-   * Choose a batch of datanodes on different rack than excludedNodes or
-   * chosenNodes.
-   *
-   *
-   * @param excludedNodes - list of the datanodes to excluded. Can be null.
-   * @param chosenNodes - list of nodes already chosen. These nodes should also
-   *                    be excluded. Cannot be null.
-   * @param favoredNodes - list of favoredNodes. It's a hint. Whether the nodes
-   *                     are chosen depends on whether they meet the constrains.
-   *                     Can be null.
-   * @param favorIndex - the node index of favoredNodes which is not chosen yet.
-   * @param sizeRequired - size required for the container or block.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return List of chosen datanodes.
-   * @throws SCMException  SCMException
-   */
-  private List<DatanodeDetails> chooseNodes(List<Node> excludedNodes,
-      List<Node> chosenNodes, List<DatanodeDetails> favoredNodes,
-      int favorIndex, int nodesRequired, long sizeRequired)
-      throws SCMException {
-    Preconditions.checkArgument(chosenNodes != null);
-    List<Node> excludedNodeList = excludedNodes != null ?
-        excludedNodes : chosenNodes;
-    int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size();
-    while(true) {
-      Node favoredNode = favoredNodeNum > favorIndex ?
-          favoredNodes.get(favorIndex) : null;
-      Node chosenNode;
-      if (favoredNode != null && networkTopology.isSameParent(
-          excludedNodeList.get(excludedNodeList.size() - 1), favoredNode)) {
-        chosenNode = favoredNode;
-        favorIndex++;
-      } else {
-        chosenNode = chooseNode(excludedNodeList, null, sizeRequired);
-      }
-      excludedNodeList.add(chosenNode);
-      if (excludedNodeList != chosenNodes) {
-        chosenNodes.add(chosenNode);
-      }
-      nodesRequired--;
-      if (nodesRequired == 0) {
-        return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0]));
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
deleted file mode 100644
index 6b1a5c8..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * Container placement policy that randomly chooses healthy datanodes.
- * This is very similar to current HDFS placement. That is we
- * just randomly place containers without any considerations of utilization.
- * <p>
- * That means we rely on balancer to achieve even distribution of data.
- * Balancer will need to support containers as a feature before this class
- * can be practically used.
- */
-public final class SCMContainerPlacementRandom extends SCMCommonPolicy
-    implements ContainerPlacementPolicy {
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(SCMContainerPlacementRandom.class);
-
-  /**
-   * Construct a random Block Placement policy.
-   *
-   * @param nodeManager nodeManager
-   * @param conf Config
-   */
-  public SCMContainerPlacementRandom(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
-      final boolean fallback, final SCMContainerPlacementMetrics metrics) {
-    super(nodeManager, conf);
-  }
-
-  /**
-   * Choose datanodes called by the SCM to choose the datanode.
-   *
-   *
-   * @param excludedNodes - list of the datanodes to exclude.
-   * @param favoredNodes - list of nodes preferred.
-   * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
-   * @return List of Datanodes.
-   * @throws SCMException  SCMException
-   */
-  @Override
-  public List<DatanodeDetails> chooseDatanodes(
-      List<DatanodeDetails> excludedNodes, List<DatanodeDetails> favoredNodes,
-      final int nodesRequired, final long sizeRequired) throws SCMException {
-    List<DatanodeDetails> healthyNodes =
-        super.chooseDatanodes(excludedNodes, favoredNodes, nodesRequired,
-            sizeRequired);
-
-    if (healthyNodes.size() == nodesRequired) {
-      return healthyNodes;
-    }
-    return getResultSet(nodesRequired, healthyNodes);
-  }
-
-  /**
-   * Just chose a node randomly and remove it from the set of nodes we can
-   * chose from.
-   *
-   * @param healthyNodes - all healthy datanodes.
-   * @return one randomly chosen datanode that from two randomly chosen datanode
-   */
-  @Override
-  public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
-    DatanodeDetails selectedNode =
-        healthyNodes.get(getRand().nextInt(healthyNodes.size()));
-    healthyNodes.remove(selectedNode);
-    return selectedNode;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
deleted file mode 100644
index 1cb810d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-// Various placement algorithms.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java
deleted file mode 100644
index b8e8998..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import java.io.IOException;
-
-/**
- * This class represents the SCM container stat.
- */
-public class ContainerStat {
-  /**
-   * The maximum container size.
-   */
-  @JsonProperty("Size")
-  private LongMetric size;
-
-  /**
-   * The number of bytes used by the container.
-   */
-  @JsonProperty("Used")
-  private LongMetric used;
-
-  /**
-   * The number of keys in the container.
-   */
-  @JsonProperty("KeyCount")
-  private LongMetric keyCount;
-
-  /**
-   * The number of bytes read from the container.
-   */
-  @JsonProperty("ReadBytes")
-  private LongMetric readBytes;
-
-  /**
-   * The number of bytes write into the container.
-   */
-  @JsonProperty("WriteBytes")
-  private LongMetric writeBytes;
-
-  /**
-   * The number of times the container is read.
-   */
-  @JsonProperty("ReadCount")
-  private LongMetric readCount;
-
-  /**
-   * The number of times the container is written into.
-   */
-  @JsonProperty("WriteCount")
-  private LongMetric writeCount;
-
-  public ContainerStat() {
-    this(0L, 0L, 0L, 0L, 0L, 0L, 0L);
-  }
-
-  public ContainerStat(long size, long used, long keyCount, long readBytes,
-      long writeBytes, long readCount, long writeCount) {
-    Preconditions.checkArgument(size >= 0,
-        "Container size cannot be " + "negative.");
-    Preconditions.checkArgument(used >= 0,
-        "Used space cannot be " + "negative.");
-    Preconditions.checkArgument(keyCount >= 0,
-        "Key count cannot be " + "negative");
-    Preconditions.checkArgument(readBytes >= 0,
-        "Read bytes read cannot be " + "negative.");
-    Preconditions.checkArgument(readBytes >= 0,
-        "Write bytes cannot be " + "negative.");
-    Preconditions.checkArgument(readCount >= 0,
-        "Read count cannot be " + "negative.");
-    Preconditions.checkArgument(writeCount >= 0,
-        "Write count cannot be " + "negative");
-
-    this.size = new LongMetric(size);
-    this.used = new LongMetric(used);
-    this.keyCount = new LongMetric(keyCount);
-    this.readBytes = new LongMetric(readBytes);
-    this.writeBytes = new LongMetric(writeBytes);
-    this.readCount = new LongMetric(readCount);
-    this.writeCount = new LongMetric(writeCount);
-  }
-
-  public LongMetric getSize() {
-    return size;
-  }
-
-  public LongMetric getUsed() {
-    return used;
-  }
-
-  public LongMetric getKeyCount() {
-    return keyCount;
-  }
-
-  public LongMetric getReadBytes() {
-    return readBytes;
-  }
-
-  public LongMetric getWriteBytes() {
-    return writeBytes;
-  }
-
-  public LongMetric getReadCount() {
-    return readCount;
-  }
-
-  public LongMetric getWriteCount() {
-    return writeCount;
-  }
-
-  public void add(ContainerStat stat) {
-    if (stat == null) {
-      return;
-    }
-
-    this.size.add(stat.getSize().get());
-    this.used.add(stat.getUsed().get());
-    this.keyCount.add(stat.getKeyCount().get());
-    this.readBytes.add(stat.getReadBytes().get());
-    this.writeBytes.add(stat.getWriteBytes().get());
-    this.readCount.add(stat.getReadCount().get());
-    this.writeCount.add(stat.getWriteCount().get());
-  }
-
-  public void subtract(ContainerStat stat) {
-    if (stat == null) {
-      return;
-    }
-
-    this.size.subtract(stat.getSize().get());
-    this.used.subtract(stat.getUsed().get());
-    this.keyCount.subtract(stat.getKeyCount().get());
-    this.readBytes.subtract(stat.getReadBytes().get());
-    this.writeBytes.subtract(stat.getWriteBytes().get());
-    this.readCount.subtract(stat.getReadCount().get());
-    this.writeCount.subtract(stat.getWriteCount().get());
-  }
-
-  public String toJsonString() {
-    try {
-      return JsonUtils.toJsonString(this);
-    } catch (IOException ignored) {
-      return null;
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
deleted file mode 100644
index 5305942..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-
-/**
- * DatanodeMetric acts as the basis for all the metric that is used in
- * comparing 2 datanodes.
- */
-public interface DatanodeMetric<T, S>  {
-
-  /**
-   * Some syntactic sugar over Comparable interface. This makes code easier to
-   * read.
-   *
-   * @param o - Other Object
-   * @return - True if *this* object is greater than argument.
-   */
-  boolean isGreater(T o);
-
-  /**
-   * Inverse of isGreater.
-   *
-   * @param o - other object.
-   * @return True if *this* object is Lesser than argument.
-   */
-  boolean isLess(T o);
-
-  /**
-   * Returns true if the object has same values. Because of issues with
-   * equals, and loss of type information this interface supports isEqual.
-   *
-   * @param o object to compare.
-   * @return True, if the values match.
-   */
-  boolean isEqual(T o);
-
-  /**
-   * A resourceCheck, defined by resourceNeeded.
-   * For example, S could be bytes required
-   * and DatanodeMetric can reply by saying it can be met or not.
-   *
-   * @param resourceNeeded -  ResourceNeeded in its own metric.
-   * @return boolean, True if this resource requirement can be met.
-   */
-  boolean hasResources(S resourceNeeded) throws SCMException;
-
-  /**
-   * Returns the metric.
-   *
-   * @return T, the object that represents this metric.
-   */
-  T get();
-
-  /**
-   * Sets the value of this metric.
-   *
-   * @param value - value of the metric.
-   */
-  void set(T value);
-
-  /**
-   * Adds a value of to the base.
-   * @param value - value
-   */
-  void add(T value);
-
-  /**
-   * subtract a value.
-   * @param value value
-   */
-  void subtract(T value);
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
deleted file mode 100644
index e1c8f87..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
-
-/**
- * An helper class for all metrics based on Longs.
- */
-@JsonAutoDetect(fieldVisibility = Visibility.ANY)
-public class LongMetric implements DatanodeMetric<Long, Long> {
-  private Long value;
-
-  /**
-   * Constructs a long Metric.
-   *
-   * @param value Value for this metric.
-   */
-  public LongMetric(Long value) {
-    this.value = value;
-  }
-
-  /**
-   * Some syntactic sugar over Comparable interface. This makes code easier to
-   * read.
-   *
-   * @param o - Other Object
-   * @return - True if *this* object is greater than argument.
-   */
-  @Override
-  public boolean isGreater(Long o) {
-    return compareTo(o) > 0;
-  }
-
-  /**
-   * Inverse of isGreater.
-   *
-   * @param o - other object.
-   * @return True if *this* object is Lesser than argument.
-   */
-  @Override
-  public boolean isLess(Long o) {
-    return compareTo(o) < 0;
-  }
-
-  /**
-   * Returns true if the object has same values. Because of issues with
-   * equals, and loss of type information this interface supports isEqual.
-   *
-   * @param o object to compare.
-   * @return True, if the values match.
-   */
-  @Override
-  public boolean isEqual(Long o) {
-    return compareTo(o) == 0;
-  }
-
-  /**
-   * A resourceCheck, defined by resourceNeeded.
-   * For example, S could be bytes required
-   * and DatanodeMetric can reply by saying it can be met or not.
-   *
-   * @param resourceNeeded -  ResourceNeeded in its own metric.
-   * @return boolean, True if this resource requirement can be met.
-   */
-  @Override
-  public boolean hasResources(Long resourceNeeded) {
-    return isGreater(resourceNeeded);
-  }
-
-  /**
-   * Returns the metric.
-   *
-   * @return T, the object that represents this metric.
-   */
-  @Override
-  public Long get() {
-    return this.value;
-  }
-
-  /**
-   * Sets the value of this metric.
-   *
-   * @param setValue - value of the metric.
-   */
-  @Override
-  public void set(Long setValue) {
-    this.value = setValue;
-
-  }
-
-  /**
-   * Adds a value of to the base.
-   *
-   * @param addValue - value
-   */
-  @Override
-  public void add(Long addValue) {
-    this.value += addValue;
-  }
-
-  /**
-   * subtract a value.
-   *
-   * @param subValue value
-   */
-  @Override
-  public void subtract(Long subValue) {
-    this.value -= subValue;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  public int compareTo(Long o) {
-    return Long.compare(this.value, o);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    LongMetric that = (LongMetric) o;
-
-    return value != null ? value.equals(that.value) : that.value == null;
-  }
-
-  @Override
-  public int hashCode() {
-    return value != null ? value.hashCode() : 0;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
deleted file mode 100644
index d6857d3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Interface that defines Node Stats.
- */
-interface NodeStat {
-  /**
-   * Get capacity of the node.
-   * @return capacity of the node.
-   */
-  LongMetric getCapacity();
-
-  /**
-   * Get the used space of the node.
-   * @return the used space of the node.
-   */
-  LongMetric getScmUsed();
-
-  /**
-   * Get the remaining space of the node.
-   * @return the remaining space of the node.
-   */
-  LongMetric getRemaining();
-
-  /**
-   * Set the total/used/remaining space.
-   * @param capacity - total space.
-   * @param used - used space.
-   * @param remain - remaining space.
-   */
-  @VisibleForTesting
-  void set(long capacity, long used, long remain);
-
-  /**
-   * Adding of the stat.
-   * @param stat - stat to be added.
-   * @return updated node stat.
-   */
-  NodeStat add(NodeStat stat);
-
-  /**
-   * Subtract of the stat.
-   * @param stat - stat to be subtracted.
-   * @return updated nodestat.
-   */
-  NodeStat subtract(NodeStat stat);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
deleted file mode 100644
index e4dd9aa..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-
-/**
- * This class is for maintaining StorageContainerManager statistics.
- */
-@Metrics(about="Storage Container Manager Metrics", context="dfs")
-public class SCMMetrics {
-  public static final String SOURCE_NAME =
-      SCMMetrics.class.getSimpleName();
-
-  /**
-   * Container stat metrics, the meaning of following metrics
-   * can be found in {@link ContainerStat}.
-   */
-  @Metric private MutableGaugeLong lastContainerReportSize;
-  @Metric private MutableGaugeLong lastContainerReportUsed;
-  @Metric private MutableGaugeLong lastContainerReportKeyCount;
-  @Metric private MutableGaugeLong lastContainerReportReadBytes;
-  @Metric private MutableGaugeLong lastContainerReportWriteBytes;
-  @Metric private MutableGaugeLong lastContainerReportReadCount;
-  @Metric private MutableGaugeLong lastContainerReportWriteCount;
-
-  @Metric private MutableCounterLong containerReportSize;
-  @Metric private MutableCounterLong containerReportUsed;
-  @Metric private MutableCounterLong containerReportKeyCount;
-  @Metric private MutableCounterLong containerReportReadBytes;
-  @Metric private MutableCounterLong containerReportWriteBytes;
-  @Metric private MutableCounterLong containerReportReadCount;
-  @Metric private MutableCounterLong containerReportWriteCount;
-
-  public SCMMetrics() {
-  }
-
-  public static SCMMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "Storage Container Manager Metrics",
-        new SCMMetrics());
-  }
-
-  public void setLastContainerReportSize(long size) {
-    this.lastContainerReportSize.set(size);
-  }
-
-  public void setLastContainerReportUsed(long used) {
-    this.lastContainerReportUsed.set(used);
-  }
-
-  public void setLastContainerReportKeyCount(long keyCount) {
-    this.lastContainerReportKeyCount.set(keyCount);
-  }
-
-  public void setLastContainerReportReadBytes(long readBytes) {
-    this.lastContainerReportReadBytes.set(readBytes);
-  }
-
-  public void setLastContainerReportWriteBytes(long writeBytes) {
-    this.lastContainerReportWriteBytes.set(writeBytes);
-  }
-
-  public void setLastContainerReportReadCount(long readCount) {
-    this.lastContainerReportReadCount.set(readCount);
-  }
-
-  public void setLastContainerReportWriteCount(long writeCount) {
-    this.lastContainerReportWriteCount.set(writeCount);
-  }
-
-  public void incrContainerReportSize(long size) {
-    this.containerReportSize.incr(size);
-  }
-
-  public void incrContainerReportUsed(long used) {
-    this.containerReportUsed.incr(used);
-  }
-
-  public void incrContainerReportKeyCount(long keyCount) {
-    this.containerReportKeyCount.incr(keyCount);
-  }
-
-  public void incrContainerReportReadBytes(long readBytes) {
-    this.containerReportReadBytes.incr(readBytes);
-  }
-
-  public void incrContainerReportWriteBytes(long writeBytes) {
-    this.containerReportWriteBytes.incr(writeBytes);
-  }
-
-  public void incrContainerReportReadCount(long readCount) {
-    this.containerReportReadCount.incr(readCount);
-  }
-
-  public void incrContainerReportWriteCount(long writeCount) {
-    this.containerReportWriteCount.incr(writeCount);
-  }
-
-  public void setLastContainerStat(ContainerStat newStat) {
-    this.lastContainerReportSize.set(newStat.getSize().get());
-    this.lastContainerReportUsed.set(newStat.getUsed().get());
-    this.lastContainerReportKeyCount.set(newStat.getKeyCount().get());
-    this.lastContainerReportReadBytes.set(newStat.getReadBytes().get());
-    this.lastContainerReportWriteBytes.set(newStat.getWriteBytes().get());
-    this.lastContainerReportReadCount.set(newStat.getReadCount().get());
-    this.lastContainerReportWriteCount.set(newStat.getWriteCount().get());
-  }
-
-  public void incrContainerStat(ContainerStat deltaStat) {
-    this.containerReportSize.incr(deltaStat.getSize().get());
-    this.containerReportUsed.incr(deltaStat.getUsed().get());
-    this.containerReportKeyCount.incr(deltaStat.getKeyCount().get());
-    this.containerReportReadBytes.incr(deltaStat.getReadBytes().get());
-    this.containerReportWriteBytes.incr(deltaStat.getWriteBytes().get());
-    this.containerReportReadCount.incr(deltaStat.getReadCount().get());
-    this.containerReportWriteCount.incr(deltaStat.getWriteCount().get());
-  }
-
-  public void decrContainerStat(ContainerStat deltaStat) {
-    this.containerReportSize.incr(-1 * deltaStat.getSize().get());
-    this.containerReportUsed.incr(-1 * deltaStat.getUsed().get());
-    this.containerReportKeyCount.incr(-1 * deltaStat.getKeyCount().get());
-    this.containerReportReadBytes.incr(-1 * deltaStat.getReadBytes().get());
-    this.containerReportWriteBytes.incr(-1 * deltaStat.getWriteBytes().get());
-    this.containerReportReadCount.incr(-1 * deltaStat.getReadCount().get());
-    this.containerReportWriteCount.incr(-1 * deltaStat.getWriteCount().get());
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
deleted file mode 100644
index a886084..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * SCM Node Metric that is used in the placement classes.
- */
-public class SCMNodeMetric  implements DatanodeMetric<SCMNodeStat, Long> {
-  private SCMNodeStat stat;
-
-  /**
-   * Constructs an SCMNode Metric.
-   *
-   * @param stat - SCMNodeStat.
-   */
-  public SCMNodeMetric(SCMNodeStat stat) {
-    this.stat = stat;
-  }
-
-  /**
-   * Set the capacity, used and remaining space on a datanode.
-   *
-   * @param capacity in bytes
-   * @param used in bytes
-   * @param remaining in bytes
-   */
-  @VisibleForTesting
-  public SCMNodeMetric(long capacity, long used, long remaining) {
-    this.stat = new SCMNodeStat();
-    this.stat.set(capacity, used, remaining);
-  }
-
-  /**
-   *
-   * @param o - Other Object
-   * @return - True if *this* object is greater than argument.
-   */
-  @Override
-  public boolean isGreater(SCMNodeStat o) {
-    Preconditions.checkNotNull(this.stat, "Argument cannot be null");
-    Preconditions.checkNotNull(o, "Argument cannot be null");
-
-    // if zero, replace with 1 for the division to work.
-    long thisDenominator = (this.stat.getCapacity().get() == 0)
-        ? 1 : this.stat.getCapacity().get();
-    long otherDenominator = (o.getCapacity().get() == 0)
-        ? 1 : o.getCapacity().get();
-
-    float thisNodeWeight =
-        stat.getScmUsed().get() / (float) thisDenominator;
-
-    float oNodeWeight =
-        o.getScmUsed().get() / (float) otherDenominator;
-
-    if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) {
-      return thisNodeWeight > oNodeWeight;
-    }
-    // if these nodes are have similar weight then return the node with more
-    // free space as the greater node.
-    return stat.getRemaining().isGreater(o.getRemaining().get());
-  }
-
-  /**
-   * Inverse of isGreater.
-   *
-   * @param o - other object.
-   * @return True if *this* object is Lesser than argument.
-   */
-  @Override
-  public boolean isLess(SCMNodeStat o) {
-    Preconditions.checkNotNull(o, "Argument cannot be null");
-
-    // if zero, replace with 1 for the division to work.
-    long thisDenominator = (this.stat.getCapacity().get() == 0)
-        ? 1 : this.stat.getCapacity().get();
-    long otherDenominator = (o.getCapacity().get() == 0)
-        ? 1 : o.getCapacity().get();
-
-    float thisNodeWeight =
-        stat.getScmUsed().get() / (float) thisDenominator;
-
-    float oNodeWeight =
-        o.getScmUsed().get() / (float) otherDenominator;
-
-    if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) {
-      return thisNodeWeight < oNodeWeight;
-    }
-
-    // if these nodes are have similar weight then return the node with less
-    // free space as the lesser node.
-    return stat.getRemaining().isLess(o.getRemaining().get());
-  }
-
-  /**
-   * Returns true if the object has same values. Because of issues with
-   * equals, and loss of type information this interface supports isEqual.
-   *
-   * @param o object to compare.
-   * @return True, if the values match.
-   * TODO : Consider if it makes sense to add remaining to this equation.
-   */
-  @Override
-  public boolean isEqual(SCMNodeStat o) {
-    float thisNodeWeight = stat.getScmUsed().get() / (float)
-        stat.getCapacity().get();
-    float oNodeWeight = o.getScmUsed().get() / (float) o.getCapacity().get();
-    return Math.abs(thisNodeWeight - oNodeWeight) < 0.000001;
-  }
-
-  /**
-   * A resourceCheck, defined by resourceNeeded.
-   * For example, S could be bytes required
-   * and DatanodeMetric can reply by saying it can be met or not.
-   *
-   * @param resourceNeeded -  ResourceNeeded in its own metric.
-   * @return boolean, True if this resource requirement can be met.
-   */
-  @Override
-  public boolean hasResources(Long resourceNeeded) {
-    return false;
-  }
-
-  /**
-   * Returns the metric.
-   *
-   * @return T, the object that represents this metric.
-   */
-  @Override
-  public SCMNodeStat get() {
-    return stat;
-  }
-
-  /**
-   * Sets the value of this metric.
-   *
-   * @param value - value of the metric.
-   */
-  @Override
-  public void set(SCMNodeStat value) {
-    stat.set(value.getCapacity().get(), value.getScmUsed().get(),
-        value.getRemaining().get());
-  }
-
-  /**
-   * Adds a value of to the base.
-   *
-   * @param value - value
-   */
-  @Override
-  public void add(SCMNodeStat value) {
-    stat.add(value);
-  }
-
-  /**
-   * subtract a value.
-   *
-   * @param value value
-   */
-  @Override
-  public void subtract(SCMNodeStat value) {
-    stat.subtract(value);
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  //@Override
-  public int compareTo(SCMNodeStat o) {
-    if (isEqual(o)) {
-      return 0;
-    }
-    if (isGreater(o)) {
-      return 1;
-    } else {
-      return -1;
-    }
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    SCMNodeMetric that = (SCMNodeMetric) o;
-
-    return stat != null ? stat.equals(that.stat) : that.stat == null;
-  }
-
-  @Override
-  public int hashCode() {
-    return stat != null ? stat.hashCode() : 0;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
deleted file mode 100644
index 962bbb4..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * This class represents the SCM node stat.
- */
-public class SCMNodeStat implements NodeStat {
-  private LongMetric capacity;
-  private LongMetric scmUsed;
-  private LongMetric remaining;
-
-  public SCMNodeStat() {
-    this(0L, 0L, 0L);
-  }
-
-  public SCMNodeStat(SCMNodeStat other) {
-    this(other.capacity.get(), other.scmUsed.get(), other.remaining.get());
-  }
-
-  public SCMNodeStat(long capacity, long used, long remaining) {
-    Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " +
-        "negative.");
-    Preconditions.checkArgument(used >= 0, "used space cannot be " +
-        "negative.");
-    Preconditions.checkArgument(remaining >= 0, "remaining cannot be " +
-        "negative");
-    this.capacity = new LongMetric(capacity);
-    this.scmUsed = new LongMetric(used);
-    this.remaining = new LongMetric(remaining);
-  }
-
-  /**
-   * @return the total configured capacity of the node.
-   */
-  @Override
-  public LongMetric getCapacity() {
-    return capacity;
-  }
-
-  /**
-   * @return the total SCM used space on the node.
-   */
-  @Override
-  public LongMetric getScmUsed() {
-    return scmUsed;
-  }
-
-  /**
-   * @return the total remaining space available on the node.
-   */
-  @Override
-  public LongMetric getRemaining() {
-    return remaining;
-  }
-
-  /**
-   * Set the capacity, used and remaining space on a datanode.
-   *
-   * @param newCapacity in bytes
-   * @param newUsed in bytes
-   * @param newRemaining in bytes
-   */
-  @Override
-  @VisibleForTesting
-  public void set(long newCapacity, long newUsed, long newRemaining) {
-    Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
-        "negative.");
-    Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
-        "negative.");
-    Preconditions.checkArgument(newRemaining >= 0, "remaining cannot be " +
-        "negative");
-
-    this.capacity = new LongMetric(newCapacity);
-    this.scmUsed = new LongMetric(newUsed);
-    this.remaining = new LongMetric(newRemaining);
-  }
-
-  /**
-   * Adds a new nodestat to existing values of the node.
-   *
-   * @param stat Nodestat.
-   * @return SCMNodeStat
-   */
-  @Override
-  public SCMNodeStat add(NodeStat stat) {
-    this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
-    this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
-    this.remaining.set(this.getRemaining().get() + stat.getRemaining().get());
-    return this;
-  }
-
-  /**
-   * Subtracts the stat values from the existing NodeStat.
-   *
-   * @param stat SCMNodeStat.
-   * @return Modified SCMNodeStat
-   */
-  @Override
-  public SCMNodeStat subtract(NodeStat stat) {
-    this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
-    this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
-    this.remaining.set(this.getRemaining().get() - stat.getRemaining().get());
-    return this;
-  }
-
-  @Override
-  public boolean equals(Object to) {
-    if (to instanceof SCMNodeStat) {
-      SCMNodeStat tempStat = (SCMNodeStat) to;
-      return capacity.isEqual(tempStat.getCapacity().get()) &&
-          scmUsed.isEqual(tempStat.getScmUsed().get()) &&
-          remaining.isEqual(tempStat.getRemaining().get());
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java
deleted file mode 100644
index 4a81d69..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.metrics;
-
-// Various metrics supported by Datanode and used by SCM in the placement
-// strategy.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java
deleted file mode 100644
index dc54d9b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement;
-// Classes related to container placement.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
deleted file mode 100644
index 92a30d5..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import javax.management.ObjectName;
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.metrics2.util.MBeans;
-
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.utils.Scheduler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Event listener to track the current state of replication.
- */
-public class ReplicationActivityStatus implements
-    ReplicationActivityStatusMXBean, Closeable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationActivityStatus.class);
-
-  private Scheduler scheduler;
-  private AtomicBoolean replicationEnabled = new AtomicBoolean();
-  private ObjectName jmxObjectName;
-
-  public ReplicationActivityStatus(Scheduler scheduler) {
-    this.scheduler = scheduler;
-  }
-
-  @Override
-  public boolean isReplicationEnabled() {
-    return replicationEnabled.get();
-  }
-
-  @VisibleForTesting
-  @Override
-  public void setReplicationEnabled(boolean enabled) {
-    replicationEnabled.set(enabled);
-  }
-
-  @VisibleForTesting
-  public void enableReplication() {
-    replicationEnabled.set(true);
-  }
-
-
-  public void start() {
-    try {
-      this.jmxObjectName =
-          MBeans.register(
-              "StorageContainerManager", "ReplicationActivityStatus", this);
-    } catch (Exception ex) {
-      LOG.error("JMX bean for ReplicationActivityStatus can't be registered",
-          ex);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (this.jmxObjectName != null) {
-      MBeans.unregister(jmxObjectName);
-    }
-  }
-
-  /**
-   * Waits for
-   * {@link HddsConfigKeys#HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT} and set
-   * replicationEnabled to start replication monitor thread.
-   */
-  public void fireReplicationStart(boolean safeModeStatus,
-      long waitTime) {
-    if (!safeModeStatus) {
-      scheduler.schedule(() -> {
-        setReplicationEnabled(true);
-        LOG.info("Replication Timer sleep for {} ms completed. Enable "
-            + "Replication", waitTime);
-      }, waitTime, TimeUnit.MILLISECONDS);
-    }
-  }
-
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java
deleted file mode 100644
index 164bd24..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-/**
- * JMX interface to monitor replication status.
- */
-public interface ReplicationActivityStatusMXBean {
-
-  boolean isReplicationEnabled();
-
-  void setReplicationEnabled(boolean enabled);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
deleted file mode 100644
index 934b01e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.replication;
-
-/**
- * HDDS (Closed) Container replicaton related classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
deleted file mode 100644
index af44a8a..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-package org.apache.hadoop.hdds.scm.container.states;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_CONTAINER_STATE;
-
-/**
- * Each Attribute that we manage for a container is maintained as a map.
- * <p>
- * Currently we manage the following attributes for a container.
- * <p>
- * 1. StateMap - LifeCycleState -> Set of ContainerIDs
- * 2. TypeMap  - ReplicationType -> Set of ContainerIDs
- * 3. OwnerMap - OwnerNames -> Set of ContainerIDs
- * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs
- * <p>
- * This means that for a cluster size of 750 PB -- we will have around 150
- * Million containers, if we assume 5GB average container size.
- * <p>
- * That implies that these maps will take around 2/3 GB of RAM which will be
- * pinned down in the SCM. This is deemed acceptable since we can tune the
- * container size --say we make it 10GB average size, then we can deal with a
- * cluster size of 1.5 exa bytes with the same metadata in SCMs memory.
- * <p>
- * Please note: **This class is not thread safe**. This used to be thread safe,
- * while bench marking we found that ContainerStateMap would be taking 5
- * locks for a single container insert. If we remove locks in this class,
- * then we are able to perform about 540K operations per second, with the
- * locks in this class it goes down to 246K operations per second. Hence we
- * are going to rely on ContainerStateMap locks to maintain consistency of
- * data in these classes too, since ContainerAttribute is only used by
- * ContainerStateMap class.
- */
-public class ContainerAttribute<T> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerAttribute.class);
-
-  private final Map<T, NavigableSet<ContainerID>> attributeMap;
-  private static final NavigableSet<ContainerID> EMPTY_SET =  Collections
-      .unmodifiableNavigableSet(new TreeSet<>());
-
-  /**
-   * Creates a Container Attribute map from an existing Map.
-   *
-   * @param attributeMap - AttributeMap
-   */
-  public ContainerAttribute(Map<T, NavigableSet<ContainerID>> attributeMap) {
-    this.attributeMap = attributeMap;
-  }
-
-  /**
-   * Create an empty Container Attribute map.
-   */
-  public ContainerAttribute() {
-    this.attributeMap = new HashMap<>();
-  }
-
-  /**
-   * Insert or update the value in the Attribute map.
-   *
-   * @param key - The key to the set where the ContainerID should exist.
-   * @param value - Actual Container ID.
-   * @throws SCMException - on Error
-   */
-  public boolean insert(T key, ContainerID value) throws SCMException {
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(value);
-
-    if (attributeMap.containsKey(key)) {
-      if (attributeMap.get(key).add(value)) {
-        return true; //we inserted the value as it doesn’t exist in the set.
-      } else { // Failure indicates that this ContainerID exists in the Set
-        if (!attributeMap.get(key).remove(value)) {
-          LOG.error("Failure to remove the object from the Map.Key:{}, " +
-              "ContainerID: {}", key, value);
-          throw new SCMException("Failure to remove the object from the Map",
-              FAILED_TO_CHANGE_CONTAINER_STATE);
-        }
-        attributeMap.get(key).add(value);
-        return true;
-      }
-    } else {
-      // This key does not exist, we need to allocate this key in the map.
-      // TODO: Replace TreeSet with FoldedTreeSet from HDFS Utils.
-      // Skipping for now, since FoldedTreeSet does not have implementations
-      // for headSet and TailSet. We need those calls.
-      this.attributeMap.put(key, new TreeSet<>());
-      // This should not fail, we just allocated this object.
-      attributeMap.get(key).add(value);
-      return true;
-    }
-  }
-
-  /**
-   * Returns true if have this bucket in the attribute map.
-   *
-   * @param key - Key to lookup
-   * @return true if we have the key
-   */
-  public boolean hasKey(T key) {
-    Preconditions.checkNotNull(key);
-    return this.attributeMap.containsKey(key);
-  }
-
-  /**
-   * Returns true if we have the key and the containerID in the bucket.
-   *
-   * @param key - Key to the bucket
-   * @param id - container ID that we want to lookup
-   * @return true or false
-   */
-  public boolean hasContainerID(T key, ContainerID id) {
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(id);
-
-    return this.attributeMap.containsKey(key) &&
-        this.attributeMap.get(key).contains(id);
-  }
-
-  /**
-   * Returns true if we have the key and the containerID in the bucket.
-   *
-   * @param key - Key to the bucket
-   * @param id - container ID that we want to lookup
-   * @return true or false
-   */
-  public boolean hasContainerID(T key, int id) {
-    return hasContainerID(key, ContainerID.valueof(id));
-  }
-
-  /**
-   * Clears all entries for this key type.
-   *
-   * @param key - Key that identifies the Set.
-   */
-  public void clearSet(T key) {
-    Preconditions.checkNotNull(key);
-
-    if (attributeMap.containsKey(key)) {
-      attributeMap.get(key).clear();
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("key: {} does not exist in the attributeMap", key);
-      }
-    }
-  }
-
-  /**
-   * Removes a container ID from the set pointed by the key.
-   *
-   * @param key - key to identify the set.
-   * @param value - Container ID
-   */
-  public boolean remove(T key, ContainerID value) {
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(value);
-
-    if (attributeMap.containsKey(key)) {
-      if (!attributeMap.get(key).remove(value)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("ContainerID: {} does not exist in the set pointed by " +
-              "key:{}", value, key);
-        }
-        return false;
-      }
-      return true;
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("key: {} does not exist in the attributeMap", key);
-      }
-      return false;
-    }
-  }
-
-  /**
-   * Returns the collection that maps to the given key.
-   *
-   * @param key - Key to the bucket.
-   * @return Underlying Set in immutable form.
-   */
-  public NavigableSet<ContainerID> getCollection(T key) {
-    Preconditions.checkNotNull(key);
-
-    if (this.attributeMap.containsKey(key)) {
-      return Collections.unmodifiableNavigableSet(this.attributeMap.get(key));
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("No such Key. Key {}", key);
-    }
-    return EMPTY_SET;
-  }
-
-  /**
-   * Moves a ContainerID from one bucket to another.
-   *
-   * @param currentKey - Current Key
-   * @param newKey - newKey
-   * @param value - ContainerID
-   * @throws SCMException on Error
-   */
-  public void update(T currentKey, T newKey, ContainerID value)
-      throws SCMException {
-    Preconditions.checkNotNull(currentKey);
-    Preconditions.checkNotNull(newKey);
-
-    boolean removed = false;
-    try {
-      removed = remove(currentKey, value);
-      if (!removed) {
-        throw new SCMException("Unable to find key in the current key bucket",
-            FAILED_TO_CHANGE_CONTAINER_STATE);
-      }
-      insert(newKey, value);
-    } catch (SCMException ex) {
-      // if we removed the key, insert it back to original bucket, since the
-      // next insert failed.
-      LOG.error("error in update.", ex);
-      if (removed) {
-        insert(currentKey, value);
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("reinserted the removed key. {}", currentKey);
-        }
-      }
-      throw ex;
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java
deleted file mode 100644
index cd49115..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container.states;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * Key for the Caching layer for Container Query.
- */
-public class ContainerQueryKey {
-  private final HddsProtos.LifeCycleState state;
-  private final String owner;
-  private final HddsProtos.ReplicationFactor factor;
-  private final HddsProtos.ReplicationType type;
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerQueryKey that = (ContainerQueryKey) o;
-
-    return new EqualsBuilder()
-        .append(getState(), that.getState())
-        .append(getOwner(), that.getOwner())
-        .append(getFactor(), that.getFactor())
-        .append(getType(), that.getType())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(61, 71)
-        .append(getState())
-        .append(getOwner())
-        .append(getFactor())
-        .append(getType())
-        .toHashCode();
-  }
-
-  /**
-   * Constructor for ContainerQueryKey.
-   * @param state LifeCycleState
-   * @param owner - Name of the Owner.
-   * @param factor Replication Factor.
-   * @param type - Replication Type.
-   */
-  public ContainerQueryKey(HddsProtos.LifeCycleState state, String owner,
-      HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType type) {
-    this.state = state;
-    this.owner = owner;
-    this.factor = factor;
-    this.type = type;
-  }
-
-  /**
-   * Returns the state of containers which this key represents.
-   * @return LifeCycleState
-   */
-  public HddsProtos.LifeCycleState getState() {
-    return state;
-  }
-
-  /**
-   * Returns the owner of containers which this key represents.
-   * @return Owner
-   */
-  public String getOwner() {
-    return owner;
-  }
-
-  /**
-   * Returns the replication factor of containers which this key represents.
-   * @return ReplicationFactor
-   */
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  /**
-   * Returns the replication type of containers which this key represents.
-   * @return ReplicationType
-   */
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java
deleted file mode 100644
index e4e8ed3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container.states;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-
-/**
- * Class that acts as the container state.
- */
-public class ContainerState {
-  private final String owner;
-  private final PipelineID pipelineID;
-
-  /**
-   * Constructs a Container Key.
-   *
-   * @param owner - Container Owners
-   * @param pipelineID - ID of the pipeline
-   */
-  public ContainerState(String owner, PipelineID pipelineID) {
-    this.pipelineID = pipelineID;
-    this.owner = owner;
-  }
-
-  public String getOwner() {
-    return owner;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerState that = (ContainerState) o;
-
-    return new EqualsBuilder()
-        .append(owner, that.owner)
-        .append(pipelineID, that.pipelineID)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(137, 757)
-        .append(owner)
-        .append(pipelineID)
-        .toHashCode();
-  }
-
-  @Override
-  public String toString() {
-    return "ContainerKey{" +
-        ", owner=" + owner +
-        ", pipelineID=" + pipelineID +
-        '}';
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
deleted file mode 100644
index 5fc9400..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container.states;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Set;
-import java.util.Collections;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .CONTAINER_EXISTS;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_CONTAINER_STATE;
-
-/**
- * Container State Map acts like a unified map for various attributes that are
- * used to select containers when we need allocated blocks.
- * <p>
- * This class provides the ability to query 5 classes of attributes. They are
- * <p>
- * 1. LifeCycleStates - LifeCycle States of container describe in which state
- * a container is. For example, a container needs to be in Open State for a
- * client to able to write to it.
- * <p>
- * 2. Owners - Each instance of Name service, for example, Namenode of HDFS or
- * Ozone Manager (OM) of Ozone or CBlockServer --  is an owner. It is
- * possible to have many OMs for a Ozone cluster and only one SCM. But SCM
- * keeps the data from each OM in separate bucket, never mixing them. To
- * write data, often we have to find all open containers for a specific owner.
- * <p>
- * 3. ReplicationType - The clients are allowed to specify what kind of
- * replication pipeline they want to use. Each Container exists on top of a
- * pipeline, so we need to get ReplicationType that is specified by the user.
- * <p>
- * 4. ReplicationFactor - The replication factor represents how many copies
- * of data should be made, right now we support 2 different types, ONE
- * Replica and THREE Replica. User can specify how many copies should be made
- * for a ozone key.
- * <p>
- * The most common access pattern of this class is to select a container based
- * on all these parameters, for example, when allocating a block we will
- * select a container that belongs to user1, with Ratis replication which can
- * make 3 copies of data. The fact that we will look for open containers by
- * default and if we cannot find them we will add new containers.
- */
-public class ContainerStateMap {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerStateMap.class);
-
-  private final static NavigableSet<ContainerID> EMPTY_SET  =
-      Collections.unmodifiableNavigableSet(new TreeSet<>());
-
-  private final ContainerAttribute<LifeCycleState> lifeCycleStateMap;
-  private final ContainerAttribute<String> ownerMap;
-  private final ContainerAttribute<ReplicationFactor> factorMap;
-  private final ContainerAttribute<ReplicationType> typeMap;
-  private final Map<ContainerID, ContainerInfo> containerMap;
-  private final Map<ContainerID, Set<ContainerReplica>> replicaMap;
-  private final Map<ContainerQueryKey, NavigableSet<ContainerID>> resultCache;
-
-  // Container State Map lock should be held before calling into
-  // Update ContainerAttributes. The consistency of ContainerAttributes is
-  // protected by this lock.
-  private final ReadWriteLock lock;
-
-  /**
-   * Create a ContainerStateMap.
-   */
-  public ContainerStateMap() {
-    this.lifeCycleStateMap = new ContainerAttribute<>();
-    this.ownerMap = new ContainerAttribute<>();
-    this.factorMap = new ContainerAttribute<>();
-    this.typeMap = new ContainerAttribute<>();
-    this.containerMap = new ConcurrentHashMap<>();
-    this.lock = new ReentrantReadWriteLock();
-    this.replicaMap = new ConcurrentHashMap<>();
-    this.resultCache = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * Adds a ContainerInfo Entry in the ContainerStateMap.
-   *
-   * @param info - container info
-   * @throws SCMException - throws if create failed.
-   */
-  public void addContainer(final ContainerInfo info)
-      throws SCMException {
-    Preconditions.checkNotNull(info, "Container Info cannot be null");
-    Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
-        "ExpectedReplicaCount should be greater than 0");
-
-    lock.writeLock().lock();
-    try {
-      final ContainerID id = info.containerID();
-      if (containerMap.putIfAbsent(id, info) != null) {
-        LOG.debug("Duplicate container ID detected. {}", id);
-        throw new
-            SCMException("Duplicate container ID detected.",
-            CONTAINER_EXISTS);
-      }
-
-      lifeCycleStateMap.insert(info.getState(), id);
-      ownerMap.insert(info.getOwner(), id);
-      factorMap.insert(info.getReplicationFactor(), id);
-      typeMap.insert(info.getReplicationType(), id);
-      replicaMap.put(id, ConcurrentHashMap.newKeySet());
-
-      // Flush the cache of this container type, will be added later when
-      // get container queries are executed.
-      flushCache(info);
-      LOG.trace("Created container with {} successfully.", id);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes a Container Entry from ContainerStateMap.
-   *
-   * @param containerID - ContainerID
-   * @throws SCMException - throws if create failed.
-   */
-  public void removeContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    Preconditions.checkNotNull(containerID, "ContainerID cannot be null");
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      // Should we revert back to the original state if any of the below
-      // remove operation fails?
-      final ContainerInfo info = containerMap.remove(containerID);
-      lifeCycleStateMap.remove(info.getState(), containerID);
-      ownerMap.remove(info.getOwner(), containerID);
-      factorMap.remove(info.getReplicationFactor(), containerID);
-      typeMap.remove(info.getReplicationType(), containerID);
-      // Flush the cache of this container type.
-      flushCache(info);
-      LOG.trace("Removed container with {} successfully.", containerID);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the latest state of Container from SCM's Container State Map.
-   *
-   * @param containerID - ContainerID
-   * @return container info, if found.
-   */
-  public ContainerInfo getContainerInfo(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    lock.readLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      return containerMap.get(containerID);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the latest list of DataNodes where replica for given containerId
-   * exist. Throws an SCMException if no entry is found for given containerId.
-   *
-   * @param containerID
-   * @return Set<DatanodeDetails>
-   */
-  public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
-    Preconditions.checkNotNull(containerID);
-    lock.readLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      return Collections
-          .unmodifiableSet(replicaMap.get(containerID));
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Adds given datanodes as nodes where replica for given containerId exist.
-   * Logs a debug entry if a datanode is already added as replica for given
-   * ContainerId.
-   *
-   * @param containerID
-   * @param replica
-   */
-  public void updateContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica) throws ContainerNotFoundException {
-    Preconditions.checkNotNull(containerID);
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      Set<ContainerReplica> replicas = replicaMap.get(containerID);
-      replicas.remove(replica);
-      replicas.add(replica);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Remove a container Replica for given DataNode.
-   *
-   * @param containerID
-   * @param replica
-   * @return True of dataNode is removed successfully else false.
-   */
-  public void removeContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    Preconditions.checkNotNull(containerID);
-    Preconditions.checkNotNull(replica);
-
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      if(!replicaMap.get(containerID).remove(replica)) {
-        throw new ContainerReplicaNotFoundException(
-            "Container #"
-                + containerID.getId() + ", replica: " + replica);
-      }
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Just update the container State.
-   * @param info ContainerInfo.
-   */
-  public void updateContainerInfo(final ContainerInfo info)
-      throws ContainerNotFoundException {
-    lock.writeLock().lock();
-    try {
-      Preconditions.checkNotNull(info);
-      checkIfContainerExist(info.containerID());
-      final ContainerInfo currentInfo = containerMap.get(info.containerID());
-      flushCache(info, currentInfo);
-      containerMap.put(info.containerID(), info);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Update the State of a container.
-   *
-   * @param containerID - ContainerID
-   * @param currentState - CurrentState
-   * @param newState - NewState.
-   * @throws SCMException - in case of failure.
-   */
-  public void updateState(ContainerID containerID, LifeCycleState currentState,
-      LifeCycleState newState) throws SCMException, ContainerNotFoundException {
-    Preconditions.checkNotNull(currentState);
-    Preconditions.checkNotNull(newState);
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      final ContainerInfo currentInfo = containerMap.get(containerID);
-      try {
-        currentInfo.setState(newState);
-
-        // We are updating two places before this update is done, these can
-        // fail independently, since the code needs to handle it.
-
-        // We update the attribute map, if that fails it will throw an
-        // exception, so no issues, if we are successful, we keep track of the
-        // fact that we have updated the lifecycle state in the map, and update
-        // the container state. If this second update fails, we will attempt to
-        // roll back the earlier change we did. If the rollback fails, we can
-        // be in an inconsistent state,
-
-        lifeCycleStateMap.update(currentState, newState, containerID);
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Updated the container {} to new state. Old = {}, new = " +
-              "{}", containerID, currentState, newState);
-        }
-
-        // Just flush both old and new data sets from the result cache.
-        flushCache(currentInfo);
-      } catch (SCMException ex) {
-        LOG.error("Unable to update the container state. {}", ex);
-        // we need to revert the change in this attribute since we are not
-        // able to update the hash table.
-        LOG.info("Reverting the update to lifecycle state. Moving back to " +
-                "old state. Old = {}, Attempted state = {}", currentState,
-            newState);
-
-        currentInfo.setState(currentState);
-
-        // if this line throws, the state map can be in an inconsistent
-        // state, since we will have modified the attribute by the
-        // container state will not in sync since we were not able to put
-        // that into the hash table.
-        lifeCycleStateMap.update(newState, currentState, containerID);
-
-        throw new SCMException("Updating the container map failed.", ex,
-            FAILED_TO_CHANGE_CONTAINER_STATE);
-      }
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  public Set<ContainerID> getAllContainerIDs() {
-    return Collections.unmodifiableSet(containerMap.keySet());
-  }
-
-  /**
-   * Returns A list of containers owned by a name service.
-   *
-   * @param ownerName - Name of the NameService.
-   * @return - NavigableSet of ContainerIDs.
-   */
-  NavigableSet<ContainerID> getContainerIDsByOwner(final String ownerName) {
-    Preconditions.checkNotNull(ownerName);
-    lock.readLock().lock();
-    try {
-      return ownerMap.getCollection(ownerName);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns Containers in the System by the Type.
-   *
-   * @param type - Replication type -- StandAlone, Ratis etc.
-   * @return NavigableSet
-   */
-  NavigableSet<ContainerID> getContainerIDsByType(final ReplicationType type) {
-    Preconditions.checkNotNull(type);
-    lock.readLock().lock();
-    try {
-      return typeMap.getCollection(type);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns Containers by replication factor.
-   *
-   * @param factor - Replication Factor.
-   * @return NavigableSet.
-   */
-  NavigableSet<ContainerID> getContainerIDsByFactor(
-      final ReplicationFactor factor) {
-    Preconditions.checkNotNull(factor);
-    lock.readLock().lock();
-    try {
-      return factorMap.getCollection(factor);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns Containers by State.
-   *
-   * @param state - State - Open, Closed etc.
-   * @return List of containers by state.
-   */
-  public NavigableSet<ContainerID> getContainerIDsByState(
-      final LifeCycleState state) {
-    Preconditions.checkNotNull(state);
-    lock.readLock().lock();
-    try {
-      return lifeCycleStateMap.getCollection(state);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Gets the containers that matches the  following filters.
-   *
-   * @param state - LifeCycleState
-   * @param owner - Owner
-   * @param factor - Replication Factor
-   * @param type - Replication Type
-   * @return ContainerInfo or Null if not container satisfies the criteria.
-   */
-  public NavigableSet<ContainerID> getMatchingContainerIDs(
-      final LifeCycleState state, final String owner,
-      final ReplicationFactor factor, final ReplicationType type) {
-
-    Preconditions.checkNotNull(state, "State cannot be null");
-    Preconditions.checkNotNull(owner, "Owner cannot be null");
-    Preconditions.checkNotNull(factor, "Factor cannot be null");
-    Preconditions.checkNotNull(type, "Type cannot be null");
-
-    lock.readLock().lock();
-    try {
-      final ContainerQueryKey queryKey =
-          new ContainerQueryKey(state, owner, factor, type);
-      if(resultCache.containsKey(queryKey)){
-        return resultCache.get(queryKey);
-      }
-
-      // If we cannot meet any one condition we return EMPTY_SET immediately.
-      // Since when we intersect these sets, the result will be empty if any
-      // one is empty.
-      final NavigableSet<ContainerID> stateSet =
-          lifeCycleStateMap.getCollection(state);
-      if (stateSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> ownerSet =
-          ownerMap.getCollection(owner);
-      if (ownerSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> factorSet =
-          factorMap.getCollection(factor);
-      if (factorSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> typeSet =
-          typeMap.getCollection(type);
-      if (typeSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-
-      // if we add more constraints we will just add those sets here..
-      final NavigableSet<ContainerID>[] sets = sortBySize(stateSet,
-          ownerSet, factorSet, typeSet);
-
-      NavigableSet<ContainerID> currentSet = sets[0];
-      // We take the smallest set and intersect against the larger sets. This
-      // allows us to reduce the lookups to the least possible number.
-      for (int x = 1; x < sets.length; x++) {
-        currentSet = intersectSets(currentSet, sets[x]);
-      }
-      resultCache.put(queryKey, currentSet);
-      return currentSet;
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Calculates the intersection between sets and returns a new set.
-   *
-   * @param smaller - First Set
-   * @param bigger - Second Set
-   * @return resultSet which is the intersection of these two sets.
-   */
-  private NavigableSet<ContainerID> intersectSets(
-      final NavigableSet<ContainerID> smaller,
-      final NavigableSet<ContainerID> bigger) {
-    Preconditions.checkState(smaller.size() <= bigger.size(),
-        "This function assumes the first set is lesser or equal to second " +
-            "set");
-    final NavigableSet<ContainerID> resultSet = new TreeSet<>();
-    for (ContainerID id : smaller) {
-      if (bigger.contains(id)) {
-        resultSet.add(id);
-      }
-    }
-    return resultSet;
-  }
-
-  /**
-   * Sorts a list of Sets based on Size. This is useful when we are
-   * intersecting the sets.
-   *
-   * @param sets - varagrs of sets
-   * @return Returns a sorted array of sets based on the size of the set.
-   */
-  @SuppressWarnings("unchecked")
-  private NavigableSet<ContainerID>[] sortBySize(
-      final NavigableSet<ContainerID>... sets) {
-    for (int x = 0; x < sets.length - 1; x++) {
-      for (int y = 0; y < sets.length - x - 1; y++) {
-        if (sets[y].size() > sets[y + 1].size()) {
-          final NavigableSet temp = sets[y];
-          sets[y] = sets[y + 1];
-          sets[y + 1] = temp;
-        }
-      }
-    }
-    return sets;
-  }
-
-  private void flushCache(final ContainerInfo... containerInfos) {
-    for (ContainerInfo containerInfo : containerInfos) {
-      final ContainerQueryKey key = new ContainerQueryKey(
-          containerInfo.getState(),
-          containerInfo.getOwner(),
-          containerInfo.getReplicationFactor(),
-          containerInfo.getReplicationType());
-      resultCache.remove(key);
-    }
-  }
-
-  private void checkIfContainerExist(ContainerID containerID)
-      throws ContainerNotFoundException {
-    if (!containerMap.containsKey(containerID)) {
-      throw new ContainerNotFoundException("#" + containerID.getId());
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
deleted file mode 100644
index 8ad1c8b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-/**
- * Container States package.
- */
-package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
deleted file mode 100644
index 43d396e0..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.events;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .IncrementalContainerReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-        .PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .PipelineActionsFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerActionsFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .CommandStatusReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
-    .NodeRegistrationContainerReport;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-
-/**
- * Class that acts as the namespace for all SCM Events.
- */
-public final class SCMEvents {
-
-  /**
-   * NodeReports are  sent out by Datanodes. This report is received by
-   * SCMDatanodeHeartbeatDispatcher and NodeReport Event is generated.
-   */
-  public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
-      new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report");
-
-  /**
-   * Event generated on DataNode registration.
-   */
-  public static final TypedEvent<NodeRegistrationContainerReport>
-      NODE_REGISTRATION_CONT_REPORT = new TypedEvent<>(
-      NodeRegistrationContainerReport.class,
-      "Node_Registration_Container_Report");
-
-  /**
-   * ContainerReports are send out by Datanodes. This report is received by
-   * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated.
-   */
-  public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
-      new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report");
-
-  /**
-   * IncrementalContainerReports are send out by Datanodes.
-   * This report is received by SCMDatanodeHeartbeatDispatcher and
-   * Incremental_Container_Report Event is generated.
-   */
-  public static final TypedEvent<IncrementalContainerReportFromDatanode>
-      INCREMENTAL_CONTAINER_REPORT = new TypedEvent<>(
-      IncrementalContainerReportFromDatanode.class,
-      "Incremental_Container_Report");
-
-  /**
-   * ContainerActions are sent by Datanode. This event is received by
-   * SCMDatanodeHeartbeatDispatcher and CONTAINER_ACTIONS event is generated.
-   */
-  public static final TypedEvent<ContainerActionsFromDatanode>
-      CONTAINER_ACTIONS = new TypedEvent<>(ContainerActionsFromDatanode.class,
-      "Container_Actions");
-
-  /**
-   * PipelineReports are send out by Datanodes. This report is received by
-   * SCMDatanodeHeartbeatDispatcher and Pipeline_Report Event is generated.
-   */
-  public static final TypedEvent<PipelineReportFromDatanode> PIPELINE_REPORT =
-          new TypedEvent<>(PipelineReportFromDatanode.class, "Pipeline_Report");
-
-  /**
-   * PipelineReport processed by pipeline report handler. This event is
-   * received by HealthyPipelineSafeModeRule.
-   */
-  public static final TypedEvent<PipelineReportFromDatanode>
-      PROCESSED_PIPELINE_REPORT = new TypedEvent<>(
-          PipelineReportFromDatanode.class, "Processed_Pipeline_Report");
-
-  /**
-   * PipelineActions are sent by Datanode. This event is received by
-   * SCMDatanodeHeartbeatDispatcher and PIPELINE_ACTIONS event is generated.
-   */
-  public static final TypedEvent<PipelineActionsFromDatanode>
-      PIPELINE_ACTIONS = new TypedEvent<>(PipelineActionsFromDatanode.class,
-      "Pipeline_Actions");
-
-  /**
-   * A Command status report will be sent by datanodes. This repoort is received
-   * by SCMDatanodeHeartbeatDispatcher and CommandReport event is generated.
-   */
-  public static final TypedEvent<CommandStatusReportFromDatanode>
-      CMD_STATUS_REPORT =
-      new TypedEvent<>(CommandStatusReportFromDatanode.class,
-          "Cmd_Status_Report");
-
-  /**
-   * When ever a command for the Datanode needs to be issued by any component
-   * inside SCM, a Datanode_Command event is generated. NodeManager listens to
-   * these events and dispatches them to Datanode for further processing.
-   */
-  public static final Event<CommandForDatanode> DATANODE_COMMAND =
-      new TypedEvent<>(CommandForDatanode.class, "Datanode_Command");
-
-  public static final TypedEvent<CommandForDatanode>
-      RETRIABLE_DATANODE_COMMAND =
-      new TypedEvent<>(CommandForDatanode.class, "Retriable_Datanode_Command");
-
-  /**
-   * A Close Container Event can be triggered under many condition. Some of them
-   * are: 1. A Container is full, then we stop writing further information to
-   * that container. DN's let SCM know that current state and sends a
-   * informational message that allows SCM to close the container.
-   * <p>
-   * 2. If a pipeline is open; for example Ratis; if a single node fails, we
-   * will proactively close these containers.
-   * <p>
-   * Once a command is dispatched to DN, we will also listen to updates from the
-   * datanode which lets us know that this command completed or timed out.
-   */
-  public static final TypedEvent<ContainerID> CLOSE_CONTAINER =
-      new TypedEvent<>(ContainerID.class, "Close_Container");
-
-  /**
-   * This event will be triggered whenever a new datanode is registered with
-   * SCM.
-   */
-  public static final TypedEvent<DatanodeDetails> NEW_NODE =
-      new TypedEvent<>(DatanodeDetails.class, "New_Node");
-
-  /**
-   * This event will be triggered whenever a datanode is moved from healthy to
-   * stale state.
-   */
-  public static final TypedEvent<DatanodeDetails> STALE_NODE =
-      new TypedEvent<>(DatanodeDetails.class, "Stale_Node");
-
-  /**
-   * This event will be triggered whenever a datanode is moved from stale to
-   * dead state.
-   */
-  public static final TypedEvent<DatanodeDetails> DEAD_NODE =
-      new TypedEvent<>(DatanodeDetails.class, "Dead_Node");
-
-  /**
-   * This event will be triggered whenever a datanode is moved from non-healthy
-   * state to healthy state.
-   */
-  public static final TypedEvent<DatanodeDetails> NON_HEALTHY_TO_HEALTHY_NODE =
-      new TypedEvent<>(DatanodeDetails.class, "NON_HEALTHY_TO_HEALTHY_NODE");
-
-  /**
-   * This event will be triggered by CommandStatusReportHandler whenever a
-   * status for DeleteBlock SCMCommand is received.
-   */
-  public static final TypedEvent<CommandStatusReportHandler.DeleteBlockStatus>
-      DELETE_BLOCK_STATUS =
-      new TypedEvent<>(CommandStatusReportHandler.DeleteBlockStatus.class,
-          "Delete_Block_Status");
-
-  /**
-   * This event will be triggered while processing container reports from DN
-   * when deleteTransactionID of container in report mismatches with the
-   * deleteTransactionID on SCM.
-   */
-  public static final Event<PendingDeleteStatusList> PENDING_DELETE_STATUS =
-      new TypedEvent<>(PendingDeleteStatusList.class, "Pending_Delete_Status");
-
-  public static final TypedEvent<SafeModeStatus> SAFE_MODE_STATUS =
-      new TypedEvent<>(SafeModeStatus.class);
-
-  /**
-   * Private Ctor. Never Constructed.
-   */
-  private SCMEvents() {
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
deleted file mode 100644
index 46181a3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Events Package contains all the Events used by SCM internally to
- * communicate between different sub-systems that make up SCM.
- */
-package org.apache.hadoop.hdds.scm.events;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java
deleted file mode 100644
index 62eb0f2..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.metadata;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-/**
- * Encode and decode BigInteger.
- */
-public class BigIntegerCodec implements Codec<BigInteger> {
-  @Override
-  public byte[] toPersistedFormat(BigInteger object) throws IOException {
-    return object.toByteArray();
-  }
-
-  @Override
-  public BigInteger fromPersistedFormat(byte[] rawData) throws IOException {
-    return new BigInteger(rawData);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
deleted file mode 100644
index f825025..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.metadata;
-
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import java.io.IOException;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-/**
- * Codec for Persisting the DeletedBlocks.
- */
-public class DeletedBlocksTransactionCodec
-    implements Codec<DeletedBlocksTransaction> {
-  @Override
-  public byte[] toPersistedFormat(DeletedBlocksTransaction object)
-      throws IOException {
-    return object.toByteArray();
-  }
-
-  @Override
-  public DeletedBlocksTransaction fromPersistedFormat(byte[] rawData)
-      throws IOException {
-    try {
-      return DeletedBlocksTransaction.parseFrom(rawData);
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't convert rawBytes to DeletedBlocksTransaction.", e);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java
deleted file mode 100644
index 1692320..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.metadata;
-
-import com.google.common.primitives.Longs;
-import java.io.IOException;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-/**
- * Codec for Persisting the DeletedBlocks.
- */
-public class LongCodec implements Codec<Long> {
-
-  @Override
-  public byte[] toPersistedFormat(Long object) throws IOException {
-    return Longs.toByteArray(object);
-  }
-
-  @Override
-  public Long fromPersistedFormat(byte[] rawData) throws IOException {
-    return Longs.fromByteArray(rawData);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
deleted file mode 100644
index 1150316..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.metadata;
-
-import java.math.BigInteger;
-import java.security.cert.X509Certificate;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import java.io.IOException;
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-
-/**
- * Generic interface for data stores for SCM.
- * This is similar to the OMMetadataStore class,
- * where we write classes into some underlying storage system.
- */
-public interface SCMMetadataStore {
-  /**
-   * Start metadata manager.
-   *
-   * @param configuration - Configuration
-   * @throws IOException - Unable to start metadata store.
-   */
-  void start(OzoneConfiguration configuration) throws IOException;
-
-  /**
-   * Stop metadata manager.
-   */
-  void stop() throws Exception;
-
-  /**
-   * Get metadata store.
-   *
-   * @return metadata store.
-   */
-  @VisibleForTesting
-  DBStore getStore();
-
-  /**
-   * A Table that keeps the deleted blocks lists and transactions.
-   *
-   * @return Table
-   */
-  Table<Long, DeletedBlocksTransaction> getDeletedBlocksTXTable();
-
-  /**
-   * Returns the current TXID for the deleted blocks.
-   *
-   * @return Long
-   */
-  Long getCurrentTXID();
-
-  /**
-   * Returns the next TXID for the Deleted Blocks.
-   *
-   * @return Long.
-   */
-  Long getNextDeleteBlockTXID();
-
-  /**
-   * A table that maintains all the valid certificates issued by the SCM CA.
-   *
-   * @return Table
-   */
-  Table<BigInteger, X509Certificate> getValidCertsTable();
-
-  /**
-   * A Table that maintains all revoked certificates until they expire.
-   *
-   * @return Table.
-   */
-  Table<BigInteger, X509Certificate> getRevokedCertsTable();
-
-  /**
-   * Returns the list of Certificates of a specific type.
-   *
-   * @param certType - CertType.
-   * @return Iterator<X509Certificate>
-   */
-  TableIterator getAllCerts(CertificateStore.CertType certType);
-
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
deleted file mode 100644
index eff7a98..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.metadata;
-
-import java.io.File;
-import java.math.BigInteger;
-import java.nio.file.Paths;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import java.io.IOException;
-import org.apache.hadoop.hdds.security.x509.certificate.authority
-    .CertificateStore;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_DB_NAME;
-
-/**
- * A RocksDB based implementation of SCM Metadata Store.
- * <p>
- * <p>
- * +---------------+------------------+-------------------------+
- * | Column Family |    Key           |          Value          |
- * +---------------+------------------+-------------------------+
- * | DeletedBlocks | TXID(Long)       | DeletedBlockTransaction |
- * +---------------+------------------+-------------------------+
- * | ValidCerts    | Serial (BigInt)  | X509Certificate         |
- * +---------------+------------------+-------------------------+
- * |RevokedCerts   | Serial (BigInt)  | X509Certificate         |
- * +---------------+------------------+-------------------------+
- */
-public class SCMMetadataStoreRDBImpl implements SCMMetadataStore {
-
-  private static final String DELETED_BLOCKS_TABLE = "deletedBlocks";
-  private Table deletedBlocksTable;
-
-  private static final String VALID_CERTS_TABLE = "validCerts";
-  private Table validCertsTable;
-
-  private static final String REVOKED_CERTS_TABLE = "revokedCerts";
-  private Table revokedCertsTable;
-
-
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMMetadataStoreRDBImpl.class);
-  private DBStore store;
-  private final OzoneConfiguration configuration;
-  private final AtomicLong txID;
-
-  /**
-   * Constructs the metadata store and starts the DB Services.
-   *
-   * @param config - Ozone Configuration.
-   * @throws IOException - on Failure.
-   */
-  public SCMMetadataStoreRDBImpl(OzoneConfiguration config)
-      throws IOException {
-    this.configuration = config;
-    start(this.configuration);
-    this.txID = new AtomicLong(this.getLargestRecordedTXID());
-  }
-
-  @Override
-  public void start(OzoneConfiguration config)
-      throws IOException {
-    if (this.store == null) {
-      File metaDir = ServerUtils.getScmDbDir(configuration);
-
-      this.store = DBStoreBuilder.newBuilder(configuration)
-          .setName(SCM_DB_NAME)
-          .setPath(Paths.get(metaDir.getPath()))
-          .addTable(DELETED_BLOCKS_TABLE)
-          .addTable(VALID_CERTS_TABLE)
-          .addTable(REVOKED_CERTS_TABLE)
-          .addCodec(DeletedBlocksTransaction.class,
-              new DeletedBlocksTransactionCodec())
-          .addCodec(Long.class, new LongCodec())
-          .addCodec(BigInteger.class, new BigIntegerCodec())
-          .addCodec(X509Certificate.class, new X509CertificateCodec())
-          .build();
-
-      deletedBlocksTable = this.store.getTable(DELETED_BLOCKS_TABLE,
-          Long.class, DeletedBlocksTransaction.class);
-      checkTableStatus(deletedBlocksTable, DELETED_BLOCKS_TABLE);
-
-      validCertsTable = this.store.getTable(VALID_CERTS_TABLE,
-          BigInteger.class, X509Certificate.class);
-      checkTableStatus(validCertsTable, VALID_CERTS_TABLE);
-
-      revokedCertsTable = this.store.getTable(REVOKED_CERTS_TABLE,
-          BigInteger.class, X509Certificate.class);
-      checkTableStatus(revokedCertsTable, REVOKED_CERTS_TABLE);
-    }
-  }
-
-  @Override
-  public void stop() throws Exception {
-    if (store != null) {
-      store.close();
-      store = null;
-    }
-  }
-
-  @Override
-  public DBStore getStore() {
-    return this.store;
-  }
-
-  @Override
-  public Table<Long, DeletedBlocksTransaction> getDeletedBlocksTXTable() {
-    return deletedBlocksTable;
-  }
-
-  @Override
-  public Long getNextDeleteBlockTXID() {
-    return this.txID.incrementAndGet();
-  }
-
-  @Override
-  public Table<BigInteger, X509Certificate> getValidCertsTable() {
-    return validCertsTable;
-  }
-
-  @Override
-  public Table<BigInteger, X509Certificate> getRevokedCertsTable() {
-    return revokedCertsTable;
-  }
-
-  @Override
-  public TableIterator getAllCerts(CertificateStore.CertType certType) {
-    if(certType == CertificateStore.CertType.VALID_CERTS) {
-      return validCertsTable.iterator();
-    }
-
-    if(certType == CertificateStore.CertType.REVOKED_CERTS) {
-      return revokedCertsTable.iterator();
-    }
-
-    return null;
-  }
-
-  @Override
-  public Long getCurrentTXID() {
-    return this.txID.get();
-  }
-
-  /**
-   * Returns the largest recorded TXID from the DB.
-   *
-   * @return Long
-   * @throws IOException
-   */
-  private Long getLargestRecordedTXID() throws IOException {
-    try (TableIterator<Long, DeletedBlocksTransaction> txIter =
-             deletedBlocksTable.iterator()) {
-      txIter.seekToLast();
-      Long txid = txIter.key();
-      if (txid != null) {
-        return txid;
-      }
-    }
-    return 0L;
-  }
-
-
-  private void checkTableStatus(Table table, String name) throws IOException {
-    String logMessage = "Unable to get a reference to %s table. Cannot " +
-        "continue.";
-    String errMsg = "Inconsistent DB state, Table - %s. Please check the" +
-        " logs for more info.";
-    if (table == null) {
-      LOG.error(String.format(logMessage, name));
-      throw new IOException(String.format(errMsg, name));
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
deleted file mode 100644
index b21103e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.metadata;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-/**
- * Encodes and Decodes X509Certificate Class.
- */
-public class X509CertificateCodec implements Codec<X509Certificate> {
-  @Override
-  public byte[] toPersistedFormat(X509Certificate object) throws IOException {
-    try {
-      return CertificateCodec.getPEMEncodedString(object)
-          .getBytes(Charset.forName("UTF-8"));
-    } catch (SCMSecurityException exp) {
-      throw new IOException(exp);
-    }
-  }
-
-  @Override
-  public X509Certificate fromPersistedFormat(byte[] rawData)
-      throws IOException {
-    try{
-      String s = new String(rawData, Charset.forName("UTF-8"));
-      return CertificateCodec.getX509Certificate(s);
-    } catch (CertificateException exp) {
-      throw new IOException(exp);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
deleted file mode 100644
index 23e8aaa..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Metadata layer for SCM.
- */
-package org.apache.hadoop.hdds.scm.metadata;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
deleted file mode 100644
index eb6dc0d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * Command Queue is queue of commands for the datanode.
- * <p>
- * Node manager, container Manager and Ozone managers can queue commands for
- * datanodes into this queue. These commands will be send in the order in which
- * there where queued.
- */
-public class CommandQueue {
-  // This list is used as default return value.
-  private static final List<SCMCommand> DEFAULT_LIST = new ArrayList<>();
-  private final Map<UUID, Commands> commandMap;
-  private final Lock lock;
-  private long commandsInQueue;
-
-  /**
-   * Returns number of commands in queue.
-   * @return Command Count.
-   */
-  public long getCommandsInQueue() {
-    return commandsInQueue;
-  }
-
-  /**
-   * Constructs a Command Queue.
-   * TODO : Add a flusher thread that throws away commands older than a certain
-   * time period.
-   */
-  public CommandQueue() {
-    commandMap = new HashMap<>();
-    lock = new ReentrantLock();
-    commandsInQueue = 0;
-  }
-
-  /**
-   * This function is used only for test purposes.
-   */
-  @VisibleForTesting
-  public void clear() {
-    lock.lock();
-    try {
-      commandMap.clear();
-      commandsInQueue = 0;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Returns  a list of Commands for the datanode to execute, if we have no
-   * commands returns a empty list otherwise the current set of
-   * commands are returned and command map set to empty list again.
-   *
-   * @param datanodeUuid Datanode UUID
-   * @return List of SCM Commands.
-   */
-  @SuppressWarnings("unchecked")
-  List<SCMCommand> getCommand(final UUID datanodeUuid) {
-    lock.lock();
-    try {
-      Commands cmds = commandMap.remove(datanodeUuid);
-      List<SCMCommand> cmdList = null;
-      if(cmds != null) {
-        cmdList = cmds.getCommands();
-        commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0;
-        // A post condition really.
-        Preconditions.checkState(commandsInQueue >= 0);
-      }
-      return cmds == null ? DEFAULT_LIST : cmdList;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Adds a Command to the SCM Queue to send the command to container.
-   *
-   * @param datanodeUuid DatanodeDetails.Uuid
-   * @param command    - Command
-   */
-  public void addCommand(final UUID datanodeUuid, final SCMCommand
-      command) {
-    lock.lock();
-    try {
-      if (commandMap.containsKey(datanodeUuid)) {
-        commandMap.get(datanodeUuid).add(command);
-      } else {
-        commandMap.put(datanodeUuid, new Commands(command));
-      }
-      commandsInQueue++;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Class that stores commands for a datanode.
-   */
-  private static class Commands {
-    private long updateTime;
-    private long readTime;
-    private List<SCMCommand> commands;
-
-    /**
-     * Constructs a Commands class.
-     */
-    Commands() {
-      commands = new ArrayList<>();
-      updateTime = 0;
-      readTime = 0;
-    }
-
-    /**
-     * Creates the object and populates with the command.
-     * @param command command to add to queue.
-     */
-    Commands(SCMCommand command) {
-      this();
-      this.add(command);
-    }
-
-    /**
-     * Gets the last time the commands for this node was updated.
-     * @return Time stamp
-     */
-    public long getUpdateTime() {
-      return updateTime;
-    }
-
-    /**
-     * Gets the last read time.
-     * @return last time when these commands were read from this queue.
-     */
-    public long getReadTime() {
-      return readTime;
-    }
-
-    /**
-     * Adds a command to the list.
-     *
-     * @param command SCMCommand
-     */
-    public void add(SCMCommand command) {
-      this.commands.add(command);
-      updateTime = Time.monotonicNow();
-    }
-
-    /**
-     * Returns the commands for this datanode.
-     * @return command list.
-     */
-    public List<SCMCommand> getCommands() {
-      List<SCMCommand> temp = this.commands;
-      this.commands = new ArrayList<>();
-      readTime = Time.monotonicNow();
-      return temp;
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
deleted file mode 100644
index d06ea2a..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.util.Time;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * This class extends the primary identifier of a Datanode with ephemeral
- * state, eg last reported time, usage information etc.
- */
-public class DatanodeInfo extends DatanodeDetails {
-
-  private final ReadWriteLock lock;
-
-  private volatile long lastHeartbeatTime;
-  private long lastStatsUpdatedTime;
-
-  private List<StorageReportProto> storageReports;
-
-  /**
-   * Constructs DatanodeInfo from DatanodeDetails.
-   *
-   * @param datanodeDetails Details about the datanode
-   */
-  public DatanodeInfo(DatanodeDetails datanodeDetails) {
-    super(datanodeDetails);
-    this.lock = new ReentrantReadWriteLock();
-    this.lastHeartbeatTime = Time.monotonicNow();
-    this.storageReports = Collections.emptyList();
-  }
-
-  /**
-   * Updates the last heartbeat time with current time.
-   */
-  public void updateLastHeartbeatTime() {
-    try {
-      lock.writeLock().lock();
-      lastHeartbeatTime = Time.monotonicNow();
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the last heartbeat time.
-   *
-   * @return last heartbeat time.
-   */
-  public long getLastHeartbeatTime() {
-    try {
-      lock.readLock().lock();
-      return lastHeartbeatTime;
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Updates the datanode storage reports.
-   *
-   * @param reports list of storage report
-   */
-  public void updateStorageReports(List<StorageReportProto> reports) {
-    try {
-      lock.writeLock().lock();
-      lastStatsUpdatedTime = Time.monotonicNow();
-      storageReports = reports;
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the storage reports associated with this datanode.
-   *
-   * @return list of storage report
-   */
-  public List<StorageReportProto> getStorageReports() {
-    try {
-      lock.readLock().lock();
-      return storageReports;
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the last updated time of datanode info.
-   * @return the last updated time of datanode info.
-   */
-  public long getLastStatsUpdatedTime() {
-    return lastStatsUpdatedTime;
-  }
-
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return super.equals(obj);
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
deleted file mode 100644
index 17e1fed..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import java.io.IOException;
-import java.util.Optional;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerException;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
-
-/**
- * Handles Dead Node event.
- */
-public class DeadNodeHandler implements EventHandler<DatanodeDetails> {
-
-  private final NodeManager nodeManager;
-  private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DeadNodeHandler.class);
-
-  public DeadNodeHandler(final NodeManager nodeManager,
-                         final PipelineManager pipelineManager,
-                         final ContainerManager containerManager) {
-    this.nodeManager = nodeManager;
-    this.pipelineManager = pipelineManager;
-    this.containerManager = containerManager;
-  }
-
-  @Override
-  public void onMessage(final DatanodeDetails datanodeDetails,
-                        final EventPublisher publisher) {
-
-    try {
-
-      /*
-       * We should have already destroyed all the pipelines on this datanode
-       * when it was marked as stale. Destroy pipeline should also have closed
-       * all the containers on this datanode.
-       *
-       * Ideally we should not have any pipeline or OPEN containers now.
-       *
-       * To be on a safer side, we double check here and take appropriate
-       * action.
-       */
-
-      destroyPipelines(datanodeDetails);
-      closeContainers(datanodeDetails, publisher);
-
-      // Remove the container replicas associated with the dead node.
-      removeContainerReplicas(datanodeDetails);
-
-    } catch (NodeNotFoundException ex) {
-      // This should not happen, we cannot get a dead node event for an
-      // unregistered datanode!
-      LOG.error("DeadNode event for a unregistered node: {}!", datanodeDetails);
-    }
-  }
-
-  /**
-   * Destroys all the pipelines on the given datanode if there are any.
-   *
-   * @param datanodeDetails DatanodeDetails
-   */
-  private void destroyPipelines(final DatanodeDetails datanodeDetails) {
-    Optional.ofNullable(nodeManager.getPipelines(datanodeDetails))
-        .ifPresent(pipelines ->
-            pipelines.forEach(id -> {
-              try {
-                pipelineManager.finalizeAndDestroyPipeline(
-                    pipelineManager.getPipeline(id), false);
-              } catch (PipelineNotFoundException ignore) {
-                // Pipeline is not there in pipeline manager,
-                // should we care?
-              } catch (IOException ex) {
-                LOG.warn("Exception while finalizing pipeline {}",
-                    id, ex);
-              }
-            }));
-  }
-
-  /**
-   * Sends CloseContainerCommand to all the open containers on the
-   * given datanode.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param publisher EventPublisher
-   * @throws NodeNotFoundException
-   */
-  private void closeContainers(final DatanodeDetails datanodeDetails,
-                               final EventPublisher publisher)
-      throws NodeNotFoundException {
-    nodeManager.getContainers(datanodeDetails)
-        .forEach(id -> {
-          try {
-            final ContainerInfo container = containerManager.getContainer(id);
-            if (container.getState() == HddsProtos.LifeCycleState.OPEN) {
-              publisher.fireEvent(CLOSE_CONTAINER, id);
-            }
-          } catch (ContainerNotFoundException cnfe) {
-            LOG.warn("Container {} is not managed by ContainerManager.",
-                id, cnfe);
-          }
-        });
-  }
-
-  /**
-   * Removes the ContainerReplica of the dead datanode from the containers
-   * which are hosted by that datanode.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @throws NodeNotFoundException
-   */
-  private void removeContainerReplicas(final DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException {
-    nodeManager.getContainers(datanodeDetails)
-        .forEach(id -> {
-          try {
-            final ContainerInfo container = containerManager.getContainer(id);
-            // Identify and remove the ContainerReplica of dead node
-            containerManager.getContainerReplicas(id)
-                .stream()
-                .filter(r -> r.getDatanodeDetails().equals(datanodeDetails))
-                .findFirst()
-                .ifPresent(replica -> {
-                  try {
-                    containerManager.removeContainerReplica(id, replica);
-                  } catch (ContainerException ex) {
-                    LOG.warn("Exception while removing container replica #{} " +
-                        "of container {}.", replica, container, ex);
-                  }
-                });
-          } catch (ContainerNotFoundException cnfe) {
-            LOG.warn("Container {} is not managed by ContainerManager.",
-                id, cnfe);
-          }
-        });
-  }
-
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
deleted file mode 100644
index 1dc924b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-/**
- * Handles New Node event.
- */
-public class NewNodeHandler implements EventHandler<DatanodeDetails> {
-
-  private final PipelineManager pipelineManager;
-  private final Configuration conf;
-
-  public NewNodeHandler(PipelineManager pipelineManager, Configuration conf) {
-    this.pipelineManager = pipelineManager;
-    this.conf = conf;
-  }
-
-  @Override
-  public void onMessage(DatanodeDetails datanodeDetails,
-                        EventPublisher publisher) {
-    pipelineManager.triggerPipelineCreation();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
deleted file mode 100644
index fd8bb87..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import java.io.Closeable;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-
-/**
- * A node manager supports a simple interface for managing a datanode.
- * <p>
- * 1. A datanode registers with the NodeManager.
- * <p>
- * 2. If the node is allowed to register, we add that to the nodes that we need
- * to keep track of.
- * <p>
- * 3. A heartbeat is made by the node at a fixed frequency.
- * <p>
- * 4. A node can be in any of these 4 states: {HEALTHY, STALE, DEAD,
- * DECOMMISSIONED}
- * <p>
- * HEALTHY - It is a datanode that is regularly heartbeating us.
- *
- * STALE - A datanode for which we have missed few heart beats.
- *
- * DEAD - A datanode that we have not heard from for a while.
- *
- * DECOMMISSIONED - Someone told us to remove this node from the tracking
- * list, by calling removeNode. We will throw away this nodes info soon.
- */
-public interface NodeManager extends StorageContainerNodeProtocol,
-    EventHandler<CommandForDatanode>, NodeManagerMXBean, Closeable {
-
-  /**
-   * Gets all Live Datanodes that is currently communicating with SCM.
-   * @param nodeState - State of the node
-   * @return List of Datanodes that are Heartbeating SCM.
-   */
-  List<DatanodeDetails> getNodes(NodeState nodeState);
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   * @param nodeState - State of the node
-   * @return int -- count
-   */
-  int getNodeCount(NodeState nodeState);
-
-  /**
-   * Get all datanodes known to SCM.
-   *
-   * @return List of DatanodeDetails known to SCM.
-   */
-  List<DatanodeDetails> getAllNodes();
-
-  /**
-   * Returns the aggregated node stats.
-   * @return the aggregated node stats.
-   */
-  SCMNodeStat getStats();
-
-  /**
-   * Return a map of node stats.
-   * @return a map of individual node stats (live/stale but not dead).
-   */
-  Map<DatanodeDetails, SCMNodeStat> getNodeStats();
-
-  /**
-   * Return the node stat of the specified datanode.
-   * @param datanodeDetails DatanodeDetails.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
-
-  /**
-   * Returns the node state of a specific node.
-   * @param datanodeDetails DatanodeDetails
-   * @return Healthy/Stale/Dead.
-   */
-  NodeState getNodeState(DatanodeDetails datanodeDetails);
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param datanodeDetails DatanodeDetails
-   * @return Set of PipelineID
-   */
-  Set<PipelineID> getPipelines(DatanodeDetails datanodeDetails);
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  void addPipeline(Pipeline pipeline);
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  void removePipeline(Pipeline pipeline);
-
-  /**
-   * Adds the given container to the specified datanode.
-   *
-   * @param datanodeDetails - DatanodeDetails
-   * @param containerId - containerID
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                        use addDatanodeInContainerMap call.
-   */
-  void addContainer(DatanodeDetails datanodeDetails,
-                    ContainerID containerId) throws NodeNotFoundException;
-
-  /**
-   * Remaps datanode to containers mapping to the new set of containers.
-   * @param datanodeDetails - DatanodeDetails
-   * @param containerIds - Set of containerIDs
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                        use addDatanodeInContainerMap call.
-   */
-  void setContainers(DatanodeDetails datanodeDetails,
-      Set<ContainerID> containerIds) throws NodeNotFoundException;
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param datanodeDetails DatanodeDetails
-   * @return set of containerIDs
-   */
-  Set<ContainerID> getContainers(DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException;
-
-  /**
-   * Add a {@link SCMCommand} to the command queue, which are
-   * handled by HB thread asynchronously.
-   * @param dnId datanode uuid
-   * @param command
-   */
-  void addDatanodeCommand(UUID dnId, SCMCommand command);
-
-  /**
-   * Process node report.
-   *
-   * @param datanodeDetails
-   * @param nodeReport
-   */
-  void processNodeReport(DatanodeDetails datanodeDetails,
-                         NodeReportProto nodeReport);
-
-  /**
-   * Get list of SCMCommands in the Command Queue for a particular Datanode.
-   * @param dnID - Datanode uuid.
-   * @return list of commands
-   */
-  // TODO: We can give better name to this method!
-  List<SCMCommand> getCommandQueue(UUID dnID);
-
-  /**
-   * Given datanode uuid, returns the DatanodeDetails for the node.
-   *
-   * @param uuid datanode uuid
-   * @return the given datanode, or null if not found
-   */
-  DatanodeDetails getNodeByUuid(String uuid);
-
-  /**
-   * Given datanode address(Ipaddress or hostname), returns a list of
-   * DatanodeDetails for the datanodes running at that address.
-   *
-   * @param address datanode address
-   * @return the given datanode, or empty list if none found
-   */
-  List<DatanodeDetails> getNodesByAddress(String address);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
deleted file mode 100644
index e1b51ef..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.util.Map;
-
-/**
- *
- * This is the JMX management interface for node manager information.
- */
-@InterfaceAudience.Private
-public interface NodeManagerMXBean {
-
-  /**
-   * Get the number of data nodes that in all states.
-   *
-   * @return A state to number of nodes that in this state mapping
-   */
-  Map<String, Integer> getNodeCount();
-
-  /**
-   * Get the disk metrics like capacity, usage and remaining based on the
-   * storage type.
-   */
-  Map<String, Long> getNodeInfo();
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
deleted file mode 100644
index 71e1b07..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles Node Reports from datanode.
- */
-public class NodeReportHandler implements EventHandler<NodeReportFromDatanode> {
-
-  private static final Logger LOGGER = LoggerFactory
-      .getLogger(NodeReportHandler.class);
-  private final NodeManager nodeManager;
-
-  public NodeReportHandler(NodeManager nodeManager) {
-    Preconditions.checkNotNull(nodeManager);
-    this.nodeManager = nodeManager;
-  }
-
-  @Override
-  public void onMessage(NodeReportFromDatanode nodeReportFromDatanode,
-      EventPublisher publisher) {
-    Preconditions.checkNotNull(nodeReportFromDatanode);
-    DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails();
-    Preconditions.checkNotNull(dn, "NodeReport is "
-        + "missing DatanodeDetails.");
-    nodeManager
-        .processNodeReport(dn, nodeReportFromDatanode.getReport());
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
deleted file mode 100644
index 954cb0e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ /dev/null
@@ -1,765 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.states.*;
-import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
-import org.apache.hadoop.ozone.common.statemachine.StateMachine;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.util.*;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * NodeStateManager maintains the state of all the datanodes in the cluster. All
- * the node state change should happen only via NodeStateManager. It also
- * runs a heartbeat thread which periodically updates the node state.
- * <p>
- * The getNode(byState) functions make copy of node maps and then creates a list
- * based on that. It should be assumed that these get functions always report
- * *stale* information. For example, getting the deadNodeCount followed by
- * getNodes(DEAD) could very well produce totally different count. Also
- * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not
- * guaranteed to add up to the total nodes that we know off. Please treat all
- * get functions in this file as a snap-shot of information that is inconsistent
- * as soon as you read it.
- */
-public class NodeStateManager implements Runnable, Closeable {
-
-  /**
-   * Node's life cycle events.
-   */
-  private enum NodeLifeCycleEvent {
-    TIMEOUT, RESTORE, RESURRECT, DECOMMISSION, DECOMMISSIONED
-  }
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(NodeStateManager.class);
-
-  /**
-   * StateMachine for node lifecycle.
-   */
-  private final StateMachine<NodeState, NodeLifeCycleEvent> stateMachine;
-  /**
-   * This is the map which maintains the current state of all datanodes.
-   */
-  private final NodeStateMap nodeStateMap;
-  /**
-   * Maintains the mapping from node to pipelines a node is part of.
-   */
-  private final Node2PipelineMap node2PipelineMap;
-  /**
-   * Used for publishing node state change events.
-   */
-  private final EventPublisher eventPublisher;
-  /**
-   * Maps the event to be triggered when a node state us updated.
-   */
-  private final Map<NodeState, Event<DatanodeDetails>> state2EventMap;
-  /**
-   * ExecutorService used for scheduling heartbeat processing thread.
-   */
-  private final ScheduledExecutorService executorService;
-  /**
-   * The frequency in which we have run the heartbeat processing thread.
-   */
-  private final long heartbeatCheckerIntervalMs;
-  /**
-   * The timeout value which will be used for marking a datanode as stale.
-   */
-  private final long staleNodeIntervalMs;
-  /**
-   * The timeout value which will be used for marking a datanode as dead.
-   */
-  private final long deadNodeIntervalMs;
-
-  /**
-   * The future is used to pause/unpause the scheduled checks.
-   */
-  private ScheduledFuture<?> healthCheckFuture;
-
-  /**
-   * Test utility - tracks if health check has been paused (unit tests).
-   */
-  private boolean checkPaused;
-
-  /**
-   * timestamp of the latest heartbeat check process.
-   */
-  private long lastHealthCheck;
-
-  /**
-   * number of times the heart beat check was skipped.
-   */
-  private long skippedHealthChecks;
-
-  /**
-   * Constructs a NodeStateManager instance with the given configuration.
-   *
-   * @param conf Configuration
-   */
-  public NodeStateManager(Configuration conf, EventPublisher eventPublisher) {
-    this.nodeStateMap = new NodeStateMap();
-    this.node2PipelineMap = new Node2PipelineMap();
-    this.eventPublisher = eventPublisher;
-    this.state2EventMap = new HashMap<>();
-    initialiseState2EventMap();
-    Set<NodeState> finalStates = new HashSet<>();
-    finalStates.add(NodeState.DECOMMISSIONED);
-    this.stateMachine = new StateMachine<>(NodeState.HEALTHY, finalStates);
-    initializeStateMachine();
-    heartbeatCheckerIntervalMs = HddsServerUtil
-        .getScmheartbeatCheckerInterval(conf);
-    staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf);
-    deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf);
-    Preconditions.checkState(heartbeatCheckerIntervalMs > 0,
-        OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL + " should be greater than 0.");
-    Preconditions.checkState(staleNodeIntervalMs < deadNodeIntervalMs,
-        OZONE_SCM_STALENODE_INTERVAL + " should be less than" +
-            OZONE_SCM_DEADNODE_INTERVAL);
-    executorService = HadoopExecutors.newScheduledThreadPool(1,
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("SCM Heartbeat Processing Thread - %d").build());
-
-    skippedHealthChecks = 0;
-    checkPaused = false; // accessed only from test functions
-
-    scheduleNextHealthCheck();
-  }
-
-  /**
-   * Populates state2event map.
-   */
-  private void initialiseState2EventMap() {
-    state2EventMap.put(NodeState.STALE, SCMEvents.STALE_NODE);
-    state2EventMap.put(NodeState.DEAD, SCMEvents.DEAD_NODE);
-    state2EventMap
-        .put(NodeState.HEALTHY, SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE);
-  }
-
-  /*
-   *
-   * Node and State Transition Mapping:
-   *
-   * State: HEALTHY         -------------------> STALE
-   * Event:                       TIMEOUT
-   *
-   * State: STALE           -------------------> DEAD
-   * Event:                       TIMEOUT
-   *
-   * State: STALE           -------------------> HEALTHY
-   * Event:                       RESTORE
-   *
-   * State: DEAD            -------------------> HEALTHY
-   * Event:                       RESURRECT
-   *
-   * State: HEALTHY         -------------------> DECOMMISSIONING
-   * Event:                     DECOMMISSION
-   *
-   * State: STALE           -------------------> DECOMMISSIONING
-   * Event:                     DECOMMISSION
-   *
-   * State: DEAD            -------------------> DECOMMISSIONING
-   * Event:                     DECOMMISSION
-   *
-   * State: DECOMMISSIONING -------------------> DECOMMISSIONED
-   * Event:                     DECOMMISSIONED
-   *
-   *  Node State Flow
-   *
-   *  +--------------------------------------------------------+
-   *  |                                     (RESURRECT)        |
-   *  |   +--------------------------+                         |
-   *  |   |      (RESTORE)           |                         |
-   *  |   |                          |                         |
-   *  V   V                          |                         |
-   * [HEALTHY]------------------->[STALE]------------------->[DEAD]
-   *    |         (TIMEOUT)          |         (TIMEOUT)       |
-   *    |                            |                         |
-   *    |                            |                         |
-   *    |                            |                         |
-   *    |                            |                         |
-   *    | (DECOMMISSION)             | (DECOMMISSION)          | (DECOMMISSION)
-   *    |                            V                         |
-   *    +------------------->[DECOMMISSIONING]<----------------+
-   *                                 |
-   *                                 | (DECOMMISSIONED)
-   *                                 |
-   *                                 V
-   *                          [DECOMMISSIONED]
-   *
-   */
-
-  /**
-   * Initializes the lifecycle of node state machine.
-   */
-  private void initializeStateMachine() {
-    stateMachine.addTransition(
-        NodeState.HEALTHY, NodeState.STALE, NodeLifeCycleEvent.TIMEOUT);
-    stateMachine.addTransition(
-        NodeState.STALE, NodeState.DEAD, NodeLifeCycleEvent.TIMEOUT);
-    stateMachine.addTransition(
-        NodeState.STALE, NodeState.HEALTHY, NodeLifeCycleEvent.RESTORE);
-    stateMachine.addTransition(
-        NodeState.DEAD, NodeState.HEALTHY, NodeLifeCycleEvent.RESURRECT);
-    stateMachine.addTransition(
-        NodeState.HEALTHY, NodeState.DECOMMISSIONING,
-        NodeLifeCycleEvent.DECOMMISSION);
-    stateMachine.addTransition(
-        NodeState.STALE, NodeState.DECOMMISSIONING,
-        NodeLifeCycleEvent.DECOMMISSION);
-    stateMachine.addTransition(
-        NodeState.DEAD, NodeState.DECOMMISSIONING,
-        NodeLifeCycleEvent.DECOMMISSION);
-    stateMachine.addTransition(
-        NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONED,
-        NodeLifeCycleEvent.DECOMMISSIONED);
-
-  }
-
-  /**
-   * Adds a new node to the state manager.
-   *
-   * @param datanodeDetails DatanodeDetails
-   *
-   * @throws NodeAlreadyExistsException if the node is already present
-   */
-  public void addNode(DatanodeDetails datanodeDetails)
-      throws NodeAlreadyExistsException {
-    nodeStateMap.addNode(datanodeDetails, stateMachine.getInitialState());
-    eventPublisher.fireEvent(SCMEvents.NEW_NODE, datanodeDetails);
-  }
-
-  /**
-   * Adds a pipeline in the node2PipelineMap.
-   * @param pipeline - Pipeline to be added
-   */
-  public void addPipeline(Pipeline pipeline) {
-    node2PipelineMap.addPipeline(pipeline);
-  }
-
-  /**
-   * Get information about the node.
-   *
-   * @param datanodeDetails DatanodeDetails
-   *
-   * @return DatanodeInfo
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  public DatanodeInfo getNode(DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException {
-    return nodeStateMap.getNodeInfo(datanodeDetails.getUuid());
-  }
-
-  /**
-   * Updates the last heartbeat time of the node.
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  public void updateLastHeartbeatTime(DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException {
-    nodeStateMap.getNodeInfo(datanodeDetails.getUuid())
-        .updateLastHeartbeatTime();
-  }
-
-  /**
-   * Returns the current state of the node.
-   *
-   * @param datanodeDetails DatanodeDetails
-   *
-   * @return NodeState
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  public NodeState getNodeState(DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException {
-    return nodeStateMap.getNodeState(datanodeDetails.getUuid());
-  }
-
-  /**
-   * Returns all the node which are in healthy state.
-   *
-   * @return list of healthy nodes
-   */
-  public List<DatanodeInfo> getHealthyNodes() {
-    return getNodes(NodeState.HEALTHY);
-  }
-
-  /**
-   * Returns all the node which are in stale state.
-   *
-   * @return list of stale nodes
-   */
-  public List<DatanodeInfo> getStaleNodes() {
-    return getNodes(NodeState.STALE);
-  }
-
-  /**
-   * Returns all the node which are in dead state.
-   *
-   * @return list of dead nodes
-   */
-  public List<DatanodeInfo> getDeadNodes() {
-    return getNodes(NodeState.DEAD);
-  }
-
-  /**
-   * Returns all the node which are in the specified state.
-   *
-   * @param state NodeState
-   *
-   * @return list of nodes
-   */
-  public List<DatanodeInfo> getNodes(NodeState state) {
-    List<DatanodeInfo> nodes = new ArrayList<>();
-    nodeStateMap.getNodes(state).forEach(
-        uuid -> {
-          try {
-            nodes.add(nodeStateMap.getNodeInfo(uuid));
-          } catch (NodeNotFoundException e) {
-            // This should not happen unless someone else other than
-            // NodeStateManager is directly modifying NodeStateMap and removed
-            // the node entry after we got the list of UUIDs.
-            LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
-          }
-        });
-    return nodes;
-  }
-
-  /**
-   * Returns all the nodes which have registered to NodeStateManager.
-   *
-   * @return all the managed nodes
-   */
-  public List<DatanodeInfo> getAllNodes() {
-    List<DatanodeInfo> nodes = new ArrayList<>();
-    nodeStateMap.getAllNodes().forEach(
-        uuid -> {
-          try {
-            nodes.add(nodeStateMap.getNodeInfo(uuid));
-          } catch (NodeNotFoundException e) {
-            // This should not happen unless someone else other than
-            // NodeStateManager is directly modifying NodeStateMap and removed
-            // the node entry after we got the list of UUIDs.
-            LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
-          }
-        });
-    return nodes;
-  }
-
-  /**
-   * Gets set of pipelineID a datanode belongs to.
-   * @param dnId - Datanode ID
-   * @return Set of PipelineID
-   */
-  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
-    return node2PipelineMap.getPipelines(dnId);
-  }
-
-  /**
-   * Returns the count of healthy nodes.
-   *
-   * @return healthy node count
-   */
-  public int getHealthyNodeCount() {
-    return getNodeCount(NodeState.HEALTHY);
-  }
-
-  /**
-   * Returns the count of stale nodes.
-   *
-   * @return stale node count
-   */
-  public int getStaleNodeCount() {
-    return getNodeCount(NodeState.STALE);
-  }
-
-  /**
-   * Returns the count of dead nodes.
-   *
-   * @return dead node count
-   */
-  public int getDeadNodeCount() {
-    return getNodeCount(NodeState.DEAD);
-  }
-
-  /**
-   * Returns the count of nodes in specified state.
-   *
-   * @param state NodeState
-   *
-   * @return node count
-   */
-  public int getNodeCount(NodeState state) {
-    return nodeStateMap.getNodeCount(state);
-  }
-
-  /**
-   * Returns the count of all nodes managed by NodeStateManager.
-   *
-   * @return node count
-   */
-  public int getTotalNodeCount() {
-    return nodeStateMap.getTotalNodeCount();
-  }
-
-  /**
-   * Removes a pipeline from the node2PipelineMap.
-   * @param pipeline - Pipeline to be removed
-   */
-  public void removePipeline(Pipeline pipeline) {
-    node2PipelineMap.removePipeline(pipeline);
-  }
-
-  /**
-   * Adds the given container to the specified datanode.
-   *
-   * @param uuid - datanode uuid
-   * @param containerId - containerID
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                        use addDatanodeInContainerMap call.
-   */
-  public void addContainer(final UUID uuid,
-                           final ContainerID containerId)
-      throws NodeNotFoundException {
-    nodeStateMap.addContainer(uuid, containerId);
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param uuid - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws NodeNotFoundException - if datanode is not known.
-   */
-  public void setContainers(UUID uuid, Set<ContainerID> containerIds)
-      throws NodeNotFoundException {
-    nodeStateMap.setContainers(uuid, containerIds);
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param uuid - DatanodeID
-   * @return - set of containerIDs
-   */
-  public Set<ContainerID> getContainers(UUID uuid)
-      throws NodeNotFoundException {
-    return nodeStateMap.getContainers(uuid);
-  }
-
-  /**
-   * Move Stale or Dead node to healthy if we got a heartbeat from them.
-   * Move healthy nodes to stale nodes if it is needed.
-   * Move Stales node to dead if needed.
-   *
-   * @see Thread#run()
-   */
-  @Override
-  public void run() {
-
-    if (shouldSkipCheck()) {
-      skippedHealthChecks++;
-      LOG.info("Detected long delay in scheduling HB processing thread. "
-          + "Skipping heartbeat checks for one iteration.");
-    } else {
-      checkNodesHealth();
-    }
-
-    // we purposefully make this non-deterministic. Instead of using a
-    // scheduleAtFixedFrequency  we will just go to sleep
-    // and wake up at the next rendezvous point, which is currentTime +
-    // heartbeatCheckerIntervalMs. This leads to the issue that we are now
-    // heart beating not at a fixed cadence, but clock tick + time taken to
-    // work.
-    //
-    // This time taken to work can skew the heartbeat processor thread.
-    // The reason why we don't care is because of the following reasons.
-    //
-    // 1. checkerInterval is general many magnitudes faster than datanode HB
-    // frequency.
-    //
-    // 2. if we have too much nodes, the SCM would be doing only HB
-    // processing, this could lead to SCM's CPU starvation. With this
-    // approach we always guarantee that  HB thread sleeps for a little while.
-    //
-    // 3. It is possible that we will never finish processing the HB's in the
-    // thread. But that means we have a mis-configured system. We will warn
-    // the users by logging that information.
-    //
-    // 4. And the most important reason, heartbeats are not blocked even if
-    // this thread does not run, they will go into the processing queue.
-    scheduleNextHealthCheck();
-  }
-
-  private void checkNodesHealth() {
-
-    /*
-     *
-     *          staleNodeDeadline                healthyNodeDeadline
-     *                 |                                  |
-     *      Dead       |             Stale                |     Healthy
-     *      Node       |             Node                 |     Node
-     *      Window     |             Window               |     Window
-     * ----------------+----------------------------------+------------------->
-     *                      >>-->> time-line >>-->>
-     *
-     * Here is the logic of computing the health of a node.
-     *
-     * 1. We get the current time and look back that the time
-     *    when we got a heartbeat from a node.
-     * 
-     * 2. If the last heartbeat was within the window of healthy node we mark
-     *    it as healthy.
-     * 
-     * 3. If the last HB Time stamp is longer and falls within the window of
-     *    Stale Node time, we will mark it as Stale.
-     * 
-     * 4. If the last HB time is older than the Stale Window, then the node is
-     *    marked as dead.
-     *
-     * The Processing starts from current time and looks backwards in time.
-     */
-    long processingStartTime = Time.monotonicNow();
-    // After this time node is considered to be stale.
-    long healthyNodeDeadline = processingStartTime - staleNodeIntervalMs;
-    // After this time node is considered to be dead.
-    long staleNodeDeadline = processingStartTime - deadNodeIntervalMs;
-
-    Predicate<Long> healthyNodeCondition =
-        (lastHbTime) -> lastHbTime >= healthyNodeDeadline;
-    // staleNodeCondition is superset of stale and dead node
-    Predicate<Long> staleNodeCondition =
-        (lastHbTime) -> lastHbTime < healthyNodeDeadline;
-    Predicate<Long> deadNodeCondition =
-        (lastHbTime) -> lastHbTime < staleNodeDeadline;
-    try {
-      for (NodeState state : NodeState.values()) {
-        List<UUID> nodes = nodeStateMap.getNodes(state);
-        for (UUID id : nodes) {
-          DatanodeInfo node = nodeStateMap.getNodeInfo(id);
-          switch (state) {
-          case HEALTHY:
-            // Move the node to STALE if the last heartbeat time is less than
-            // configured stale-node interval.
-            updateNodeState(node, staleNodeCondition, state,
-                  NodeLifeCycleEvent.TIMEOUT);
-            break;
-          case STALE:
-            // Move the node to DEAD if the last heartbeat time is less than
-            // configured dead-node interval.
-            updateNodeState(node, deadNodeCondition, state,
-                NodeLifeCycleEvent.TIMEOUT);
-            // Restore the node if we have received heartbeat before configured
-            // stale-node interval.
-            updateNodeState(node, healthyNodeCondition, state,
-                NodeLifeCycleEvent.RESTORE);
-            break;
-          case DEAD:
-            // Resurrect the node if we have received heartbeat before
-            // configured stale-node interval.
-            updateNodeState(node, healthyNodeCondition, state,
-                NodeLifeCycleEvent.RESURRECT);
-            break;
-            // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in
-            // heartbeat processing.
-          case DECOMMISSIONING:
-          case DECOMMISSIONED:
-          default:
-          }
-        }
-      }
-    } catch (NodeNotFoundException e) {
-      // This should not happen unless someone else other than
-      // NodeStateManager is directly modifying NodeStateMap and removed
-      // the node entry after we got the list of UUIDs.
-      LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
-    }
-    long processingEndTime = Time.monotonicNow();
-    //If we have taken too much time for HB processing, log that information.
-    if ((processingEndTime - processingStartTime) >
-        heartbeatCheckerIntervalMs) {
-      LOG.error("Total time spend processing datanode HB's is greater than " +
-              "configured values for datanode heartbeats. Please adjust the" +
-              " heartbeat configs. Time Spend on HB processing: {} seconds " +
-              "Datanode heartbeat Interval: {} seconds.",
-          TimeUnit.MILLISECONDS
-              .toSeconds(processingEndTime - processingStartTime),
-          heartbeatCheckerIntervalMs);
-    }
-
-  }
-
-  private void scheduleNextHealthCheck() {
-
-    if (!Thread.currentThread().isInterrupted() &&
-        !executorService.isShutdown()) {
-      //BUGBUG: The return future needs to checked here to make sure the
-      // exceptions are handled correctly.
-      healthCheckFuture = executorService.schedule(this,
-          heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS);
-    } else {
-      LOG.warn("Current Thread is interrupted, shutting down HB processing " +
-          "thread for Node Manager.");
-    }
-
-    lastHealthCheck = Time.monotonicNow();
-  }
-
-  /**
-   * if the time since last check exceeds the stale|dead node interval, skip.
-   * such long delays might be caused by a JVM pause. SCM cannot make reliable
-   * conclusions about datanode health in such situations.
-   * @return : true indicates skip HB checks
-   */
-  private boolean shouldSkipCheck() {
-
-    long currentTime = Time.monotonicNow();
-    long minInterval = Math.min(staleNodeIntervalMs, deadNodeIntervalMs);
-
-    return ((currentTime - lastHealthCheck) >= minInterval);
-  }
-
-  /**
-   * Updates the node state if the condition satisfies.
-   *
-   * @param node DatanodeInfo
-   * @param condition condition to check
-   * @param state current state of node
-   * @param lifeCycleEvent NodeLifeCycleEvent to be applied if condition
-   *                       matches
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  private void updateNodeState(DatanodeInfo node, Predicate<Long> condition,
-      NodeState state, NodeLifeCycleEvent lifeCycleEvent)
-      throws NodeNotFoundException {
-    try {
-      if (condition.test(node.getLastHeartbeatTime())) {
-        NodeState newState = stateMachine.getNextState(state, lifeCycleEvent);
-        nodeStateMap.updateNodeState(node.getUuid(), state, newState);
-        if (state2EventMap.containsKey(newState)) {
-          eventPublisher.fireEvent(state2EventMap.get(newState), node);
-        }
-      }
-    } catch (InvalidStateTransitionException e) {
-      LOG.warn("Invalid state transition of node {}." +
-              " Current state: {}, life cycle event: {}",
-          node, state, lifeCycleEvent);
-    }
-  }
-
-  @Override
-  public void close() {
-    executorService.shutdown();
-    try {
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        executorService.shutdownNow();
-      }
-
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        LOG.error("Unable to shutdown NodeStateManager properly.");
-      }
-    } catch (InterruptedException e) {
-      executorService.shutdownNow();
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  /**
-   * Test Utility : return number of times heartbeat check was skipped.
-   * @return : count of times HB process was skipped
-   */
-  @VisibleForTesting
-  long getSkippedHealthChecks() {
-    return skippedHealthChecks;
-  }
-
-  /**
-   * Test Utility : Pause the periodic node hb check.
-   * @return ScheduledFuture for the scheduled check that got cancelled.
-   */
-  @VisibleForTesting
-  ScheduledFuture pause() {
-
-    if (executorService.isShutdown() || checkPaused) {
-      return null;
-    }
-
-    checkPaused = healthCheckFuture.cancel(false);
-
-    return healthCheckFuture;
-  }
-
-  /**
-   * Test utility : unpause the periodic node hb check.
-   * @return ScheduledFuture for the next scheduled check
-   */
-  @VisibleForTesting
-  ScheduledFuture unpause() {
-
-    if (executorService.isShutdown()) {
-      return null;
-    }
-
-    if (checkPaused) {
-      Preconditions.checkState(((healthCheckFuture == null)
-          || healthCheckFuture.isCancelled()
-          || healthCheckFuture.isDone()));
-
-      checkPaused = false;
-      /**
-       * We do not call scheduleNextHealthCheck because we are
-       * not updating the lastHealthCheck timestamp.
-       */
-      healthCheckFuture = executorService.schedule(this,
-          heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS);
-    }
-
-    return healthCheckFuture;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
deleted file mode 100644
index 5976c17..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-/**
- * Handles Stale node event.
- */
-public class NonHealthyToHealthyNodeHandler
-    implements EventHandler<DatanodeDetails> {
-
-  private final PipelineManager pipelineManager;
-  private final Configuration conf;
-
-  public NonHealthyToHealthyNodeHandler(
-      PipelineManager pipelineManager, OzoneConfiguration conf) {
-    this.pipelineManager = pipelineManager;
-    this.conf = conf;
-  }
-
-  @Override
-  public void onMessage(DatanodeDetails datanodeDetails,
-      EventPublisher publisher) {
-    pipelineManager.triggerPipelineCreation();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
deleted file mode 100644
index e1e1d6c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ /dev/null
@@ -1,684 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.LinkedList;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ScheduledFuture;
-import java.util.stream.Collectors;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.VersionInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.CachedDNSToSwitchMapping;
-import org.apache.hadoop.net.DNSToSwitchMapping;
-import org.apache.hadoop.net.TableMapping;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Maintains information about the Datanodes on SCM side.
- * <p>
- * Heartbeats under SCM is very simple compared to HDFS heartbeatManager.
- * <p>
- * The getNode(byState) functions make copy of node maps and then creates a list
- * based on that. It should be assumed that these get functions always report
- * *stale* information. For example, getting the deadNodeCount followed by
- * getNodes(DEAD) could very well produce totally different count. Also
- * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not
- * guaranteed to add up to the total nodes that we know off. Please treat all
- * get functions in this file as a snap-shot of information that is inconsistent
- * as soon as you read it.
- */
-public class SCMNodeManager implements NodeManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMNodeManager.class);
-
-  private final NodeStateManager nodeStateManager;
-  private final VersionInfo version;
-  private final CommandQueue commandQueue;
-  private final SCMNodeMetrics metrics;
-  // Node manager MXBean
-  private ObjectName nmInfoBean;
-  private final SCMStorageConfig scmStorageConfig;
-  private final NetworkTopology clusterMap;
-  private final DNSToSwitchMapping dnsToSwitchMapping;
-  private final boolean useHostname;
-  private final ConcurrentHashMap<String, Set<String>> dnsToUuidMap =
-      new ConcurrentHashMap<>();
-
-  /**
-   * Constructs SCM machine Manager.
-   */
-  public SCMNodeManager(OzoneConfiguration conf,
-      SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher,
-      NetworkTopology networkTopology) {
-    this.nodeStateManager = new NodeStateManager(conf, eventPublisher);
-    this.version = VersionInfo.getLatestVersion();
-    this.commandQueue = new CommandQueue();
-    this.scmStorageConfig = scmStorageConfig;
-    LOG.info("Entering startup safe mode.");
-    registerMXBean();
-    this.metrics = SCMNodeMetrics.create(this);
-    this.clusterMap = networkTopology;
-    Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
-        conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-            TableMapping.class, DNSToSwitchMapping.class);
-    DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
-        dnsToSwitchMappingClass, conf);
-    this.dnsToSwitchMapping =
-        ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
-            : new CachedDNSToSwitchMapping(newInstance));
-    this.useHostname = conf.getBoolean(
-        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
-        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
-  }
-
-  private void registerMXBean() {
-    this.nmInfoBean = MBeans.register("SCMNodeManager",
-        "SCMNodeManagerInfo", this);
-  }
-
-  private void unregisterMXBean() {
-    if (this.nmInfoBean != null) {
-      MBeans.unregister(this.nmInfoBean);
-      this.nmInfoBean = null;
-    }
-  }
-
-
-  /**
-   * Returns all datanode that are in the given state. This function works by
-   * taking a snapshot of the current collection and then returning the list
-   * from that collection. This means that real map might have changed by the
-   * time we return this list.
-   *
-   * @return List of Datanodes that are known to SCM in the requested state.
-   */
-  @Override
-  public List<DatanodeDetails> getNodes(NodeState nodestate) {
-    return nodeStateManager.getNodes(nodestate).stream()
-        .map(node -> (DatanodeDetails)node).collect(Collectors.toList());
-  }
-
-  /**
-   * Returns all datanodes that are known to SCM.
-   *
-   * @return List of DatanodeDetails
-   */
-  @Override
-  public List<DatanodeDetails> getAllNodes() {
-    return nodeStateManager.getAllNodes().stream()
-        .map(node -> (DatanodeDetails)node).collect(Collectors.toList());
-  }
-
-  /**
-   * Returns the Number of Datanodes by State they are in.
-   *
-   * @return count
-   */
-  @Override
-  public int getNodeCount(NodeState nodestate) {
-    return nodeStateManager.getNodeCount(nodestate);
-  }
-
-  /**
-   * Returns the node state of a specific node.
-   *
-   * @param datanodeDetails Datanode Details
-   * @return Healthy/Stale/Dead/Unknown.
-   */
-  @Override
-  public NodeState getNodeState(DatanodeDetails datanodeDetails) {
-    try {
-      return nodeStateManager.getNodeState(datanodeDetails);
-    } catch (NodeNotFoundException e) {
-      // TODO: should we throw NodeNotFoundException?
-      return null;
-    }
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. If
-   * the stream is already closed then invoking this method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    unregisterMXBean();
-    metrics.unRegister();
-    nodeStateManager.close();
-  }
-
-  /**
-   * Gets the version info from SCM.
-   *
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed by
-   * datanode.
-   */
-  @Override
-  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
-    return VersionResponse.newBuilder()
-        .setVersion(this.version.getVersion())
-        .addValue(OzoneConsts.SCM_ID,
-            this.scmStorageConfig.getScmId())
-        .addValue(OzoneConsts.CLUSTER_ID, this.scmStorageConfig.getClusterID())
-        .build();
-  }
-
-  /**
-   * Register the node if the node finds that it is not registered with any
-   * SCM.
-   *
-   * @param datanodeDetails - Send datanodeDetails with Node info.
-   *                   This function generates and assigns new datanode ID
-   *                   for the datanode. This allows SCM to be run independent
-   *                   of Namenode if required.
-   * @param nodeReport NodeReport.
-   *
-   * @return SCMHeartbeatResponseProto
-   */
-  @Override
-  public RegisteredCommand register(
-      DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
-      PipelineReportsProto pipelineReportsProto) {
-
-    InetAddress dnAddress = Server.getRemoteIp();
-    if (dnAddress != null) {
-      // Mostly called inside an RPC, update ip and peer hostname
-      datanodeDetails.setHostName(dnAddress.getHostName());
-      datanodeDetails.setIpAddress(dnAddress.getHostAddress());
-    }
-    try {
-      String dnsName;
-      String networkLocation;
-      datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
-      if (useHostname) {
-        dnsName = datanodeDetails.getHostName();
-      } else {
-        dnsName = datanodeDetails.getIpAddress();
-      }
-      networkLocation = nodeResolve(dnsName);
-      if (networkLocation != null) {
-        datanodeDetails.setNetworkLocation(networkLocation);
-      }
-      nodeStateManager.addNode(datanodeDetails);
-      clusterMap.add(datanodeDetails);
-      addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString());
-      // Updating Node Report, as registration is successful
-      processNodeReport(datanodeDetails, nodeReport);
-      LOG.info("Registered Data node : {}", datanodeDetails);
-    } catch (NodeAlreadyExistsException e) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Datanode is already registered. Datanode: {}",
-            datanodeDetails.toString());
-      }
-    }
-
-    return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
-        .setDatanode(datanodeDetails)
-        .setClusterID(this.scmStorageConfig.getClusterID())
-        .build();
-  }
-
-  /**
-   * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs
-   * running on that host. As each address can have many DNs running on it,
-   * this is a one to many mapping.
-   * @param dnsName String representing the hostname or IP of the node
-   * @param uuid String representing the UUID of the registered node.
-   */
-  @SuppressFBWarnings(value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
-      justification="The method is synchronized and this is the only place "+
-          "dnsToUuidMap is modified")
-  private synchronized void addEntryTodnsToUuidMap(
-      String dnsName, String uuid) {
-    Set<String> dnList = dnsToUuidMap.get(dnsName);
-    if (dnList == null) {
-      dnList = ConcurrentHashMap.newKeySet();
-      dnsToUuidMap.put(dnsName, dnList);
-    }
-    dnList.add(uuid);
-  }
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   *
-   * @param datanodeDetails - DatanodeDetailsProto.
-   * @return SCMheartbeat response.
-   */
-  @Override
-  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
-    Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " +
-        "DatanodeDetails.");
-    try {
-      nodeStateManager.updateLastHeartbeatTime(datanodeDetails);
-      metrics.incNumHBProcessed();
-    } catch (NodeNotFoundException e) {
-      metrics.incNumHBProcessingFailed();
-      LOG.error("SCM trying to process heartbeat from an " +
-          "unregistered node {}. Ignoring the heartbeat.", datanodeDetails);
-    }
-    return commandQueue.getCommand(datanodeDetails.getUuid());
-  }
-
-  @Override
-  public Boolean isNodeRegistered(DatanodeDetails datanodeDetails) {
-    try {
-      nodeStateManager.getNode(datanodeDetails);
-      return true;
-    } catch (NodeNotFoundException e) {
-      return false;
-    }
-  }
-
-  /**
-   * Process node report.
-   *
-   * @param datanodeDetails
-   * @param nodeReport
-   */
-  @Override
-  public void processNodeReport(DatanodeDetails datanodeDetails,
-                                NodeReportProto nodeReport) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Processing node report from [datanode={}]",
-          datanodeDetails.getHostName());
-    }
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("HB is received from [datanode={}]: <json>{}</json>",
-          datanodeDetails.getHostName(),
-          nodeReport.toString().replaceAll("\n", "\\\\n"));
-    }
-    try {
-      DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails);
-      if (nodeReport != null) {
-        datanodeInfo.updateStorageReports(nodeReport.getStorageReportList());
-        metrics.incNumNodeReportProcessed();
-      }
-    } catch (NodeNotFoundException e) {
-      metrics.incNumNodeReportProcessingFailed();
-      LOG.warn("Got node report from unregistered datanode {}",
-          datanodeDetails);
-    }
-  }
-
-  /**
-   * Returns the aggregated node stats.
-   * @return the aggregated node stats.
-   */
-  @Override
-  public SCMNodeStat getStats() {
-    long capacity = 0L;
-    long used = 0L;
-    long remaining = 0L;
-
-    for (SCMNodeStat stat : getNodeStats().values()) {
-      capacity += stat.getCapacity().get();
-      used += stat.getScmUsed().get();
-      remaining += stat.getRemaining().get();
-    }
-    return new SCMNodeStat(capacity, used, remaining);
-  }
-
-  /**
-   * Return a map of node stats.
-   * @return a map of individual node stats (live/stale but not dead).
-   */
-  @Override
-  public Map<DatanodeDetails, SCMNodeStat> getNodeStats() {
-
-    final Map<DatanodeDetails, SCMNodeStat> nodeStats = new HashMap<>();
-
-    final List<DatanodeInfo> healthyNodes =  nodeStateManager
-        .getNodes(NodeState.HEALTHY);
-    final List<DatanodeInfo> staleNodes = nodeStateManager
-        .getNodes(NodeState.STALE);
-    final List<DatanodeInfo> datanodes = new ArrayList<>(healthyNodes);
-    datanodes.addAll(staleNodes);
-
-    for (DatanodeInfo dnInfo : datanodes) {
-      SCMNodeStat nodeStat = getNodeStatInternal(dnInfo);
-      if (nodeStat != null) {
-        nodeStats.put(dnInfo, nodeStat);
-      }
-    }
-    return nodeStats;
-  }
-
-  /**
-   * Return the node stat of the specified datanode.
-   * @param datanodeDetails - datanode ID.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  @Override
-  public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) {
-    final SCMNodeStat nodeStat = getNodeStatInternal(datanodeDetails);
-    return nodeStat != null ? new SCMNodeMetric(nodeStat) : null;
-  }
-
-  private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) {
-    try {
-      long capacity = 0L;
-      long used = 0L;
-      long remaining = 0L;
-
-      final DatanodeInfo datanodeInfo = nodeStateManager
-          .getNode(datanodeDetails);
-      final List<StorageReportProto> storageReportProtos = datanodeInfo
-          .getStorageReports();
-      for (StorageReportProto reportProto : storageReportProtos) {
-        capacity += reportProto.getCapacity();
-        used += reportProto.getScmUsed();
-        remaining += reportProto.getRemaining();
-      }
-      return new SCMNodeStat(capacity, used, remaining);
-    } catch (NodeNotFoundException e) {
-      LOG.warn("Cannot generate NodeStat, datanode {} not found.",
-          datanodeDetails.getUuid());
-      return null;
-    }
-  }
-
-  @Override
-  public Map<String, Integer> getNodeCount() {
-    Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
-    for(NodeState state : NodeState.values()) {
-      nodeCountMap.put(state.toString(), getNodeCount(state));
-    }
-    return nodeCountMap;
-  }
-
-  // We should introduce DISK, SSD, etc., notion in
-  // SCMNodeStat and try to use it.
-  @Override
-  public Map<String, Long> getNodeInfo() {
-    long diskCapacity = 0L;
-    long diskUsed = 0L;
-    long diskRemaning = 0L;
-
-    long ssdCapacity = 0L;
-    long ssdUsed = 0L;
-    long ssdRemaining = 0L;
-
-    List<DatanodeInfo> healthyNodes =  nodeStateManager
-        .getNodes(NodeState.HEALTHY);
-    List<DatanodeInfo> staleNodes = nodeStateManager
-        .getNodes(NodeState.STALE);
-
-    List<DatanodeInfo> datanodes = new ArrayList<>(healthyNodes);
-    datanodes.addAll(staleNodes);
-
-    for (DatanodeInfo dnInfo : datanodes) {
-      List<StorageReportProto> storageReportProtos = dnInfo.getStorageReports();
-      for (StorageReportProto reportProto : storageReportProtos) {
-        if (reportProto.getStorageType() ==
-            StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) {
-          diskCapacity += reportProto.getCapacity();
-          diskRemaning += reportProto.getRemaining();
-          diskUsed += reportProto.getScmUsed();
-        } else if (reportProto.getStorageType() ==
-            StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) {
-          ssdCapacity += reportProto.getCapacity();
-          ssdRemaining += reportProto.getRemaining();
-          ssdUsed += reportProto.getScmUsed();
-        }
-      }
-    }
-
-    Map<String, Long> nodeInfo = new HashMap<>();
-    nodeInfo.put("DISKCapacity", diskCapacity);
-    nodeInfo.put("DISKUsed", diskUsed);
-    nodeInfo.put("DISKRemaining", diskRemaning);
-
-    nodeInfo.put("SSDCapacity", ssdCapacity);
-    nodeInfo.put("SSDUsed", ssdUsed);
-    nodeInfo.put("SSDRemaining", ssdRemaining);
-    return nodeInfo;
-  }
-
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param datanodeDetails - datanodeID
-   * @return Set of PipelineID
-   */
-  @Override
-  public Set<PipelineID> getPipelines(DatanodeDetails datanodeDetails) {
-    return nodeStateManager.getPipelineByDnID(datanodeDetails.getUuid());
-  }
-
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  @Override
-  public void addPipeline(Pipeline pipeline) {
-    nodeStateManager.addPipeline(pipeline);
-  }
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  @Override
-  public void removePipeline(Pipeline pipeline) {
-    nodeStateManager.removePipeline(pipeline);
-  }
-
-  @Override
-  public void addContainer(final DatanodeDetails datanodeDetails,
-                           final ContainerID containerId)
-      throws NodeNotFoundException {
-    nodeStateManager.addContainer(datanodeDetails.getUuid(), containerId);
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param datanodeDetails - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                        use addDatanodeInContainerMap call.
-   */
-  @Override
-  public void setContainers(DatanodeDetails datanodeDetails,
-      Set<ContainerID> containerIds) throws NodeNotFoundException {
-    nodeStateManager.setContainers(datanodeDetails.getUuid(),
-        containerIds);
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param datanodeDetails - DatanodeID
-   * @return - set of containerIDs
-   */
-  @Override
-  public Set<ContainerID> getContainers(DatanodeDetails datanodeDetails)
-      throws NodeNotFoundException {
-    return nodeStateManager.getContainers(datanodeDetails.getUuid());
-  }
-
-  // TODO:
-  // Since datanode commands are added through event queue, onMessage method
-  // should take care of adding commands to command queue.
-  // Refactor and remove all the usage of this method and delete this method.
-  @Override
-  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    this.commandQueue.addCommand(dnId, command);
-  }
-
-  /**
-   * This method is called by EventQueue whenever someone adds a new
-   * DATANODE_COMMAND to the Queue.
-   *
-   * @param commandForDatanode DatanodeCommand
-   * @param ignored publisher
-   */
-  @Override
-  public void onMessage(CommandForDatanode commandForDatanode,
-      EventPublisher ignored) {
-    addDatanodeCommand(commandForDatanode.getDatanodeId(),
-        commandForDatanode.getCommand());
-  }
-
-  @Override
-  public List<SCMCommand> getCommandQueue(UUID dnID) {
-    return commandQueue.getCommand(dnID);
-  }
-
-  /**
-   * Given datanode uuid, returns the DatanodeDetails for the node.
-   *
-   * @param uuid node host address
-   * @return the given datanode, or null if not found
-   */
-  @Override
-  public DatanodeDetails getNodeByUuid(String uuid) {
-    if (Strings.isNullOrEmpty(uuid)) {
-      LOG.warn("uuid is null");
-      return null;
-    }
-    DatanodeDetails temp = DatanodeDetails.newBuilder().setUuid(uuid).build();
-    try {
-      return nodeStateManager.getNode(temp);
-    } catch (NodeNotFoundException e) {
-      LOG.warn("Cannot find node for uuid {}", uuid);
-      return null;
-    }
-  }
-
-  /**
-   * Given datanode address(Ipaddress or hostname), return a list of
-   * DatanodeDetails for the datanodes registered on that address.
-   *
-   * @param address datanode address
-   * @return the given datanode, or empty list if none found
-   */
-  @Override
-  public List<DatanodeDetails> getNodesByAddress(String address) {
-    List<DatanodeDetails> results = new LinkedList<>();
-    if (Strings.isNullOrEmpty(address)) {
-      LOG.warn("address is null");
-      return results;
-    }
-    Set<String> uuids = dnsToUuidMap.get(address);
-    if (uuids == null) {
-      LOG.warn("Cannot find node for address {}", address);
-      return results;
-    }
-
-    for (String uuid : uuids) {
-      DatanodeDetails temp = DatanodeDetails.newBuilder().setUuid(uuid).build();
-      try {
-        results.add(nodeStateManager.getNode(temp));
-      } catch (NodeNotFoundException e) {
-        LOG.warn("Cannot find node for uuid {}", uuid);
-      }
-    }
-    return results;
-  }
-
-  private String nodeResolve(String hostname) {
-    List<String> hosts = new ArrayList<>(1);
-    hosts.add(hostname);
-    List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
-    if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
-      String location = resolvedHosts.get(0);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Resolve datanode {} return location {}", hostname, location);
-      }
-      return location;
-    } else {
-      LOG.error("Node {} Resolution failed. Please make sure that DNS table " +
-          "mapping or configured mapping is functional.", hostname);
-      return null;
-    }
-  }
-
-  /**
-   * Test utility to stop heartbeat check process.
-   * @return ScheduledFuture of next scheduled check that got cancelled.
-   */
-  @VisibleForTesting
-  ScheduledFuture pauseHealthCheck() {
-    return nodeStateManager.pause();
-  }
-
-  /**
-   * Test utility to resume the paused heartbeat check process.
-   * @return ScheduledFuture of the next scheduled check
-   */
-  @VisibleForTesting
-  ScheduledFuture unpauseHealthCheck() {
-    return nodeStateManager.unpause();
-  }
-
-  /**
-   * Test utility to get the count of skipped heartbeat check iterations.
-   * @return count of skipped heartbeat check iterations
-   */
-  @VisibleForTesting
-  long getSkippedHealthChecks() {
-    return nodeStateManager.getSkippedHealthChecks();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
deleted file mode 100644
index 1596523..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * This class maintains Node related metrics.
- */
-@InterfaceAudience.Private
-@Metrics(about = "SCM NodeManager Metrics", context = "ozone")
-public final class SCMNodeMetrics implements MetricsSource {
-
-  private static final String SOURCE_NAME =
-      SCMNodeMetrics.class.getSimpleName();
-
-  private @Metric MutableCounterLong numHBProcessed;
-  private @Metric MutableCounterLong numHBProcessingFailed;
-  private @Metric MutableCounterLong numNodeReportProcessed;
-  private @Metric MutableCounterLong numNodeReportProcessingFailed;
-
-  private final MetricsRegistry registry;
-  private final NodeManagerMXBean managerMXBean;
-  private final MetricsInfo recordInfo = Interns.info("SCMNodeManager",
-      "SCM NodeManager metrics");
-
-  /** Private constructor. */
-  private SCMNodeMetrics(NodeManagerMXBean managerMXBean) {
-    this.managerMXBean = managerMXBean;
-    this.registry = new MetricsRegistry(recordInfo);
-  }
-
-  /**
-   * Create and returns SCMNodeMetrics instance.
-   *
-   * @return SCMNodeMetrics
-   */
-  public static SCMNodeMetrics create(NodeManagerMXBean managerMXBean) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "SCM NodeManager Metrics",
-        new SCMNodeMetrics(managerMXBean));
-  }
-
-  /**
-   * Unregister the metrics instance.
-   */
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-
-  /**
-   * Increments number of heartbeat processed count.
-   */
-  void incNumHBProcessed() {
-    numHBProcessed.incr();
-  }
-
-  /**
-   * Increments number of heartbeat processing failed count.
-   */
-  void incNumHBProcessingFailed() {
-    numHBProcessingFailed.incr();
-  }
-
-  /**
-   * Increments number of node report processed count.
-   */
-  void incNumNodeReportProcessed() {
-    numNodeReportProcessed.incr();
-  }
-
-  /**
-   * Increments number of node report processing failed count.
-   */
-  void incNumNodeReportProcessingFailed() {
-    numNodeReportProcessingFailed.incr();
-  }
-
-  /**
-   * Get aggregated counter and gauage metrics.
-   */
-  @Override
-  @SuppressWarnings("SuspiciousMethodCalls")
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    Map<String, Integer> nodeCount = managerMXBean.getNodeCount();
-    Map<String, Long> nodeInfo = managerMXBean.getNodeInfo();
-
-    registry.snapshot(
-        collector.addRecord(registry.info()) // Add annotated ones first
-            .addGauge(Interns.info(
-                "HealthyNodes",
-                "Number of healthy datanodes"),
-                nodeCount.get(HEALTHY.toString()))
-            .addGauge(Interns.info("StaleNodes",
-                "Number of stale datanodes"),
-                nodeCount.get(STALE.toString()))
-            .addGauge(Interns.info("DeadNodes",
-                "Number of dead datanodes"),
-                nodeCount.get(DEAD.toString()))
-            .addGauge(Interns.info("DecommissioningNodes",
-                "Number of decommissioning datanodes"),
-                nodeCount.get(DECOMMISSIONING.toString()))
-            .addGauge(Interns.info("DecommissionedNodes",
-                "Number of decommissioned datanodes"),
-                nodeCount.get(DECOMMISSIONED.toString()))
-            .addGauge(Interns.info("DiskCapacity",
-                "Total disk capacity"),
-                nodeInfo.get("DISKCapacity"))
-            .addGauge(Interns.info("DiskUsed",
-                "Total disk capacity used"),
-                nodeInfo.get("DISKUsed"))
-            .addGauge(Interns.info("DiskRemaining",
-                "Total disk capacity remaining"),
-                nodeInfo.get("DISKRemaining"))
-            .addGauge(Interns.info("SSDCapacity",
-                "Total ssd capacity"),
-                nodeInfo.get("SSDCapacity"))
-            .addGauge(Interns.info("SSDUsed",
-                "Total ssd capacity used"),
-                nodeInfo.get("SSDUsed"))
-            .addGauge(Interns.info("SSDRemaining",
-                "Total disk capacity remaining"),
-                nodeInfo.get("SSDRemaining")),
-        all);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
deleted file mode 100644
index 32ecbad..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-
-import java.util.Set;
-import java.util.UUID;
-
-/**
- *
- * This is the JMX management interface for node manager information.
- */
-@InterfaceAudience.Private
-public interface SCMNodeStorageStatMXBean {
-  /**
-   * Get the capacity of the dataNode.
-   * @param datanodeID Datanode Id
-   * @return long
-   */
-  long getCapacity(UUID datanodeID);
-
-  /**
-   * Returns the remaining space of a Datanode.
-   * @param datanodeId Datanode Id
-   * @return long
-   */
-  long getRemainingSpace(UUID datanodeId);
-
-
-  /**
-   * Returns used space in bytes of a Datanode.
-   * @return long
-   */
-  long getUsedSpace(UUID datanodeId);
-
-  /**
-   * Returns the total capacity of all dataNodes.
-   * @return long
-   */
-  long getTotalCapacity();
-
-  /**
-   * Returns the total Used Space in all Datanodes.
-   * @return long
-   */
-  long getTotalSpaceUsed();
-
-  /**
-   * Returns the total Remaining Space in all Datanodes.
-   * @return long
-   */
-  long getTotalFreeSpace();
-
-  /**
-   * Returns the set of disks for a given Datanode.
-   * @return set of storage volumes
-   */
-  Set<StorageLocationReport> getStorageVolumes(UUID datanodeId);
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
deleted file mode 100644
index 1b0e5b5..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
-
-/**
- * This data structure maintains the disk space capacity, disk usage and free
- * space availability per Datanode.
- * This information is built from the DN node reports.
- */
-public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
-  static final Logger LOG =
-      LoggerFactory.getLogger(SCMNodeStorageStatMap.class);
-
-  private final double warningUtilizationThreshold;
-  private final double criticalUtilizationThreshold;
-
-  private final Map<UUID, Set<StorageLocationReport>> scmNodeStorageReportMap;
-  // NodeStorageInfo MXBean
-  private ObjectName scmNodeStorageInfoBean;
-  /**
-   * constructs the scmNodeStorageReportMap object.
-   */
-  public SCMNodeStorageStatMap(OzoneConfiguration conf) {
-    // scmNodeStorageReportMap = new ConcurrentHashMap<>();
-    scmNodeStorageReportMap = new ConcurrentHashMap<>();
-    warningUtilizationThreshold = conf.getDouble(
-        OzoneConfigKeys.
-            HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD,
-        OzoneConfigKeys.
-            HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT);
-    criticalUtilizationThreshold = conf.getDouble(
-        OzoneConfigKeys.
-            HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD,
-        OzoneConfigKeys.
-            HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
-  }
-
-  /**
-   * Enum that Describes what we should do at various thresholds.
-   */
-  public enum UtilizationThreshold {
-    NORMAL, WARN, CRITICAL;
-  }
-
-  /**
-   * Returns true if this a datanode that is already tracked by
-   * scmNodeStorageReportMap.
-   *
-   * @param datanodeID - UUID of the Datanode.
-   * @return True if this is tracked, false if this map does not know about it.
-   */
-  public boolean isKnownDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    return scmNodeStorageReportMap.containsKey(datanodeID);
-  }
-
-  public List<UUID> getDatanodeList(
-      UtilizationThreshold threshold) {
-    return scmNodeStorageReportMap.entrySet().stream().filter(
-        entry -> (isThresholdReached(threshold,
-            getScmUsedratio(getUsedSpace(entry.getKey()),
-                getCapacity(entry.getKey())))))
-        .map(Map.Entry::getKey)
-        .collect(Collectors.toList());
-  }
-
-
-
-  /**
-   * Insert a new datanode into Node2Container Map.
-   *
-   * @param datanodeID -- Datanode UUID
-   * @param report - set if StorageReports.
-   */
-  public void insertNewDatanode(UUID datanodeID,
-      Set<StorageLocationReport> report) throws SCMException {
-    Preconditions.checkNotNull(report);
-    Preconditions.checkState(report.size() != 0);
-    Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageReportMap) {
-      if (isKnownDatanode(datanodeID)) {
-        throw new SCMException("Node already exists in the map",
-            DUPLICATE_DATANODE);
-      }
-      scmNodeStorageReportMap.putIfAbsent(datanodeID, report);
-    }
-  }
-
-  //TODO: This should be called once SCMNodeManager gets Started.
-  private void registerMXBean() {
-    this.scmNodeStorageInfoBean = MBeans.register("StorageContainerManager",
-        "scmNodeStorageInfo", this);
-  }
-
-  //TODO: Unregister call should happen as a part of SCMNodeManager shutdown.
-  private void unregisterMXBean() {
-    if(this.scmNodeStorageInfoBean != null) {
-      MBeans.unregister(this.scmNodeStorageInfoBean);
-      this.scmNodeStorageInfoBean = null;
-    }
-  }
-  /**
-   * Updates the Container list of an existing DN.
-   *
-   * @param datanodeID - UUID of DN.
-   * @param report - set of Storage Reports for the Datanode.
-   * @throws SCMException - if we don't know about this datanode, for new DN
-   *                        use addDatanodeInContainerMap.
-   */
-  public void updateDatanodeMap(UUID datanodeID,
-      Set<StorageLocationReport> report) throws SCMException {
-    Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(report);
-    Preconditions.checkState(report.size() != 0);
-    synchronized (scmNodeStorageReportMap) {
-      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
-        throw new SCMException("No such datanode", NO_SUCH_DATANODE);
-      }
-      scmNodeStorageReportMap.put(datanodeID, report);
-    }
-  }
-
-  public StorageReportResult processNodeReport(UUID datanodeID,
-      StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport)
-      throws IOException {
-    Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(nodeReport);
-
-    long totalCapacity = 0;
-    long totalRemaining = 0;
-    long totalScmUsed = 0;
-    Set<StorageLocationReport> storagReportSet = new HashSet<>();
-    Set<StorageLocationReport> fullVolumeSet = new HashSet<>();
-    Set<StorageLocationReport> failedVolumeSet = new HashSet<>();
-    List<StorageReportProto>
-        storageReports = nodeReport.getStorageReportList();
-    for (StorageReportProto report : storageReports) {
-      StorageLocationReport storageReport =
-          StorageLocationReport.getFromProtobuf(report);
-      storagReportSet.add(storageReport);
-      if (report.hasFailed() && report.getFailed()) {
-        failedVolumeSet.add(storageReport);
-      } else if (isThresholdReached(UtilizationThreshold.CRITICAL,
-          getScmUsedratio(report.getScmUsed(), report.getCapacity()))) {
-        fullVolumeSet.add(storageReport);
-      }
-      totalCapacity += report.getCapacity();
-      totalRemaining += report.getRemaining();
-      totalScmUsed += report.getScmUsed();
-    }
-
-    if (!isKnownDatanode(datanodeID)) {
-      insertNewDatanode(datanodeID, storagReportSet);
-    } else {
-      updateDatanodeMap(datanodeID, storagReportSet);
-    }
-    if (isThresholdReached(UtilizationThreshold.CRITICAL,
-        getScmUsedratio(totalScmUsed, totalCapacity))) {
-      LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}",
-          datanodeID, totalCapacity, totalScmUsed);
-      return StorageReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.DATANODE_OUT_OF_SPACE)
-          .setFullVolumeSet(fullVolumeSet).setFailedVolumeSet(failedVolumeSet)
-          .build();
-    }
-    if (isThresholdReached(UtilizationThreshold.WARN,
-        getScmUsedratio(totalScmUsed, totalCapacity))) {
-      LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
-          datanodeID, totalCapacity, totalScmUsed);
-    }
-
-    if (failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
-      return StorageReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.STORAGE_OUT_OF_SPACE)
-          .setFullVolumeSet(fullVolumeSet).build();
-    }
-
-    if (!failedVolumeSet.isEmpty() && fullVolumeSet.isEmpty()) {
-      return StorageReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.FAILED_STORAGE)
-          .setFailedVolumeSet(failedVolumeSet).build();
-    }
-    if (!failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
-      return StorageReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE)
-          .setFailedVolumeSet(failedVolumeSet).setFullVolumeSet(fullVolumeSet)
-          .build();
-    }
-    return StorageReportResult.ReportResultBuilder.newBuilder()
-        .setStatus(ReportStatus.ALL_IS_WELL).build();
-  }
-
-  private boolean isThresholdReached(UtilizationThreshold threshold,
-      double scmUsedratio) {
-    switch (threshold) {
-    case NORMAL:
-      return scmUsedratio < warningUtilizationThreshold;
-    case WARN:
-      return scmUsedratio >= warningUtilizationThreshold
-          && scmUsedratio < criticalUtilizationThreshold;
-    case CRITICAL:
-      return scmUsedratio >= criticalUtilizationThreshold;
-    default:
-      throw new RuntimeException("Unknown UtilizationThreshold value");
-    }
-  }
-
-  @Override
-  public long getCapacity(UUID dnId) {
-    long capacity = 0;
-    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
-    for (StorageLocationReport report : reportSet) {
-      capacity += report.getCapacity();
-    }
-    return capacity;
-  }
-
-  @Override
-  public long getRemainingSpace(UUID dnId) {
-    long remaining = 0;
-    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
-    for (StorageLocationReport report : reportSet) {
-      remaining += report.getRemaining();
-    }
-    return remaining;
-  }
-
-  @Override
-  public long getUsedSpace(UUID dnId) {
-    long scmUsed = 0;
-    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
-    for (StorageLocationReport report : reportSet) {
-      scmUsed += report.getScmUsed();
-    }
-    return scmUsed;
-  }
-
-  @Override
-  public long getTotalCapacity() {
-    long capacity = 0;
-    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
-    for (UUID id : dnIdSet) {
-      capacity += getCapacity(id);
-    }
-    return capacity;
-  }
-
-  @Override
-  public long getTotalSpaceUsed() {
-    long scmUsed = 0;
-    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
-    for (UUID id : dnIdSet) {
-      scmUsed += getUsedSpace(id);
-    }
-    return scmUsed;
-  }
-
-  @Override
-  public long getTotalFreeSpace() {
-    long remaining = 0;
-    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
-    for (UUID id : dnIdSet) {
-      remaining += getRemainingSpace(id);
-    }
-    return remaining;
-  }
-
-  /**
-   * removes the dataNode from scmNodeStorageReportMap.
-   * @param datanodeID
-   * @throws SCMException in case the dataNode is not found in the map.
-   */
-  public void removeDatanode(UUID datanodeID) throws SCMException {
-    Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageReportMap) {
-      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
-        throw new SCMException("No such datanode", NO_SUCH_DATANODE);
-      }
-      scmNodeStorageReportMap.remove(datanodeID);
-    }
-  }
-
-  /**
-   * Returns the set of storage volumes for a Datanode.
-   * @param  datanodeID
-   * @return set of storage volumes.
-   */
-
-  @Override
-  public Set<StorageLocationReport> getStorageVolumes(UUID datanodeID) {
-    return scmNodeStorageReportMap.get(datanodeID);
-  }
-
-
-  /**
-   * Truncate to 4 digits since uncontrolled precision is some times
-   * counter intuitive to what users expect.
-   * @param value - double.
-   * @return double.
-   */
-  private double truncateDecimals(double value) {
-    final int multiplier = 10000;
-    return (double) ((long) (value * multiplier)) / multiplier;
-  }
-
-  /**
-   * get the scmUsed ratio.
-   */
-  public  double getScmUsedratio(long scmUsed, long capacity) {
-    double scmUsedRatio =
-        truncateDecimals(scmUsed / (double) capacity);
-    return scmUsedRatio;
-  }
-  /**
-   * Results possible from processing a Node report by
-   * Node2ContainerMapper.
-   */
-  public enum ReportStatus {
-    ALL_IS_WELL,
-    DATANODE_OUT_OF_SPACE,
-    STORAGE_OUT_OF_SPACE,
-    FAILED_STORAGE,
-    FAILED_AND_OUT_OF_SPACE_STORAGE
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
deleted file mode 100644
index 26e8f5f..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Set;
-
-/**
- * Handles Stale node event.
- */
-public class StaleNodeHandler implements EventHandler<DatanodeDetails> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StaleNodeHandler.class);
-
-  private final NodeManager nodeManager;
-  private final PipelineManager pipelineManager;
-  private final Configuration conf;
-
-  public StaleNodeHandler(NodeManager nodeManager,
-      PipelineManager pipelineManager, OzoneConfiguration conf) {
-    this.nodeManager = nodeManager;
-    this.pipelineManager = pipelineManager;
-    this.conf = conf;
-  }
-
-  @Override
-  public void onMessage(DatanodeDetails datanodeDetails,
-      EventPublisher publisher) {
-    Set<PipelineID> pipelineIds =
-        nodeManager.getPipelines(datanodeDetails);
-    LOG.info("Datanode {} moved to stale state. Finalizing its pipelines {}",
-        datanodeDetails, pipelineIds);
-    for (PipelineID pipelineID : pipelineIds) {
-      try {
-        Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-        pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
-      } catch (IOException e) {
-        LOG.info("Could not finalize pipeline={} for dn={}", pipelineID,
-            datanodeDetails);
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
deleted file mode 100644
index 0b63ceb..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
+++ /dev/null
@@ -1,87 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-
-import java.util.Set;
-
-/**
- * A Container Report gets processsed by the Node2Container and returns the
- * Report Result class.
- */
-public class StorageReportResult {
-  private SCMNodeStorageStatMap.ReportStatus status;
-  private Set<StorageLocationReport> fullVolumes;
-  private Set<StorageLocationReport> failedVolumes;
-
-  StorageReportResult(SCMNodeStorageStatMap.ReportStatus status,
-      Set<StorageLocationReport> fullVolumes,
-      Set<StorageLocationReport> failedVolumes) {
-    this.status = status;
-    this.fullVolumes = fullVolumes;
-    this.failedVolumes = failedVolumes;
-  }
-
-  public SCMNodeStorageStatMap.ReportStatus getStatus() {
-    return status;
-  }
-
-  public Set<StorageLocationReport> getFullVolumes() {
-    return fullVolumes;
-  }
-
-  public Set<StorageLocationReport> getFailedVolumes() {
-    return failedVolumes;
-  }
-
-  static class ReportResultBuilder {
-    private SCMNodeStorageStatMap.ReportStatus status;
-    private Set<StorageLocationReport> fullVolumes;
-    private Set<StorageLocationReport> failedVolumes;
-
-    static ReportResultBuilder newBuilder() {
-      return new ReportResultBuilder();
-    }
-
-    public ReportResultBuilder setStatus(
-        SCMNodeStorageStatMap.ReportStatus newstatus) {
-      this.status = newstatus;
-      return this;
-    }
-
-    public ReportResultBuilder setFullVolumeSet(
-        Set<StorageLocationReport> fullVolumesSet) {
-      this.fullVolumes = fullVolumesSet;
-      return this;
-    }
-
-    public ReportResultBuilder setFailedVolumeSet(
-        Set<StorageLocationReport> failedVolumesSet) {
-      this.failedVolumes = failedVolumesSet;
-      return this;
-    }
-
-    StorageReportResult build() {
-      return new StorageReportResult(status, fullVolumes, failedVolumes);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java
deleted file mode 100644
index d6a8ad0..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-/**
- * The node package deals with node management.
- * <p/>
- * The node manager takes care of node registrations, removal of node and
- * handling of heartbeats.
- * <p/>
- * The node manager maintains statistics that gets send as part of
- * heartbeats.
- * <p/>
- * The container manager polls the node manager to learn the state of
- * datanodes  that it is interested in.
- * <p/>
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
deleted file mode 100644
index c0f46f1..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import java.util.HashSet;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .NO_SUCH_DATANODE;
-
-/**
- * This data structure maintains the list of containers that is on a datanode.
- * This information is built from the DN container reports.
- */
-public class Node2ContainerMap extends Node2ObjectsMap<ContainerID> {
-
-  /**
-   * Constructs a Node2ContainerMap Object.
-   */
-  public Node2ContainerMap() {
-    super();
-  }
-
-  /**
-   * Returns null if there no containers associated with this datanode ID.
-   *
-   * @param datanode - UUID
-   * @return Set of containers or Null.
-   */
-  public Set<ContainerID> getContainers(UUID datanode) {
-    return getObjects(datanode);
-  }
-
-  /**
-   * Insert a new datanode into Node2Container Map.
-   *
-   * @param datanodeID   -- Datanode UUID
-   * @param containerIDs - List of ContainerIDs.
-   */
-  @Override
-  public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
-      throws SCMException {
-    super.insertNewDatanode(datanodeID, containerIDs);
-  }
-
-  /**
-   * Updates the Container list of an existing DN.
-   *
-   * @param datanodeID - UUID of DN.
-   * @param containers - Set of Containers tht is present on DN.
-   * @throws SCMException - if we don't know about this datanode, for new DN
-   *                        use addDatanodeInContainerMap.
-   */
-  public void setContainersForDatanode(UUID datanodeID,
-      Set<ContainerID> containers) throws SCMException {
-    Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(containers);
-    if (dn2ObjectMap
-        .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers))
-        == null) {
-      throw new SCMException("No such datanode", NO_SUCH_DATANODE);
-    }
-  }
-
-  @VisibleForTesting
-  @Override
-  public int size() {
-    return dn2ObjectMap.size();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
deleted file mode 100644
index 37525b0..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-
-import java.util.UUID;
-import java.util.Set;
-import java.util.Map;
-import java.util.TreeSet;
-import java.util.HashSet;
-import java.util.Collections;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
-
-/**
- * This data structure maintains the list of containers that is on a datanode.
- * This information is built from the DN container reports.
- */
-public class Node2ObjectsMap<T> {
-
-  @SuppressWarnings("visibilitymodifier")
-  protected final Map<UUID, Set<T>> dn2ObjectMap;
-
-  /**
-   * Constructs a Node2ContainerMap Object.
-   */
-  public Node2ObjectsMap() {
-    dn2ObjectMap = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * Returns true if this a datanode that is already tracked by
-   * Node2ContainerMap.
-   *
-   * @param datanodeID - UUID of the Datanode.
-   * @return True if this is tracked, false if this map does not know about it.
-   */
-  public boolean isKnownDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    return dn2ObjectMap.containsKey(datanodeID);
-  }
-
-  /**
-   * Insert a new datanode into Node2Container Map.
-   *
-   * @param datanodeID   -- Datanode UUID
-   * @param containerIDs - List of ContainerIDs.
-   */
-  public void insertNewDatanode(UUID datanodeID, Set<T> containerIDs)
-      throws SCMException {
-    Preconditions.checkNotNull(containerIDs);
-    Preconditions.checkNotNull(datanodeID);
-    if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs))
-        != null) {
-      throw new SCMException("Node already exists in the map",
-          DUPLICATE_DATANODE);
-    }
-  }
-
-  /**
-   * Removes datanode Entry from the map.
-   *
-   * @param datanodeID - Datanode ID.
-   */
-  void removeDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null);
-  }
-
-  /**
-   * Returns null if there no containers associated with this datanode ID.
-   *
-   * @param datanode - UUID
-   * @return Set of containers or Null.
-   */
-  Set<T> getObjects(UUID datanode) {
-    Preconditions.checkNotNull(datanode);
-    final Set<T> s = dn2ObjectMap.get(datanode);
-    return s != null? Collections.unmodifiableSet(s): Collections.emptySet();
-  }
-
-  public ReportResult.ReportResultBuilder<T> newBuilder() {
-    return new ReportResult.ReportResultBuilder<>();
-  }
-
-  public ReportResult<T> processReport(UUID datanodeID, Set<T> objects) {
-    Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(objects);
-
-    if (!isKnownDatanode(datanodeID)) {
-      return newBuilder()
-          .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND)
-          .setNewEntries(objects)
-          .build();
-    }
-
-    // Conditions like Zero length containers should be handled by removeAll.
-    Set<T> currentSet = dn2ObjectMap.get(datanodeID);
-    TreeSet<T> newObjects = new TreeSet<>(objects);
-    newObjects.removeAll(currentSet);
-
-    TreeSet<T> missingObjects = new TreeSet<>(currentSet);
-    missingObjects.removeAll(objects);
-
-    if (newObjects.isEmpty() && missingObjects.isEmpty()) {
-      return newBuilder()
-          .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
-          .build();
-    }
-
-    if (newObjects.isEmpty() && !missingObjects.isEmpty()) {
-      return newBuilder()
-          .setStatus(ReportResult.ReportStatus.MISSING_ENTRIES)
-          .setMissingEntries(missingObjects)
-          .build();
-    }
-
-    if (!newObjects.isEmpty() && missingObjects.isEmpty()) {
-      return newBuilder()
-          .setStatus(ReportResult.ReportStatus.NEW_ENTRIES_FOUND)
-          .setNewEntries(newObjects)
-          .build();
-    }
-
-    if (!newObjects.isEmpty() && !missingObjects.isEmpty()) {
-      return newBuilder()
-          .setStatus(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND)
-          .setNewEntries(newObjects)
-          .setMissingEntries(missingObjects)
-          .build();
-    }
-
-    // default status & Make compiler happy
-    return newBuilder()
-        .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
-        .build();
-  }
-
-  @VisibleForTesting
-  public int size() {
-    return dn2ObjectMap.size();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
deleted file mode 100644
index f8633f9..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * This data structure maintains the list of pipelines which the given
- * datanode is a part of. This information will be added whenever a new
- * pipeline allocation happens.
- *
- * <p>TODO: this information needs to be regenerated from pipeline reports
- * on SCM restart
- */
-public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
-
-  /** Constructs a Node2PipelineMap Object. */
-  public Node2PipelineMap() {
-    super();
-  }
-
-  /**
-   * Returns null if there no pipelines associated with this datanode ID.
-   *
-   * @param datanode - UUID
-   * @return Set of pipelines or Null.
-   */
-  public Set<PipelineID> getPipelines(UUID datanode) {
-    return getObjects(datanode);
-  }
-
-  /**
-   * Adds a pipeline entry to a given dataNode in the map.
-   *
-   * @param pipeline Pipeline to be added
-   */
-  public synchronized void addPipeline(Pipeline pipeline) {
-    for (DatanodeDetails details : pipeline.getNodes()) {
-      UUID dnId = details.getUuid();
-      dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet())
-          .add(pipeline.getId());
-    }
-  }
-
-  public synchronized void removePipeline(Pipeline pipeline) {
-    for (DatanodeDetails details : pipeline.getNodes()) {
-      UUID dnId = details.getUuid();
-      dn2ObjectMap.computeIfPresent(dnId,
-          (k, v) -> {
-            v.remove(pipeline.getId());
-            return v;
-          });
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java
deleted file mode 100644
index aa5c382..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-/**
- * This exception represents that there is already a node added to NodeStateMap
- * with same UUID.
- */
-public class NodeAlreadyExistsException extends NodeException {
-
-  /**
-   * Constructs an {@code NodeAlreadyExistsException} with {@code null}
-   * as its error detail message.
-   */
-  public NodeAlreadyExistsException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code NodeAlreadyExistsException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public NodeAlreadyExistsException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java
deleted file mode 100644
index c67b55d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-/**
- * This exception represents all node related exceptions in NodeStateMap.
- */
-public class NodeException extends Exception {
-
-  /**
-   * Constructs an {@code NodeException} with {@code null}
-   * as its error detail message.
-   */
-  public NodeException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code NodeException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public NodeException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
deleted file mode 100644
index c44a08c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-/**
- * This exception represents that the node that is being accessed does not
- * exist in NodeStateMap.
- */
-public class NodeNotFoundException extends NodeException {
-
-
-  /**
-   * Constructs an {@code NodeNotFoundException} with {@code null}
-   * as its error detail message.
-   */
-  public NodeNotFoundException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code NodeNotFoundException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public NodeNotFoundException(String message) {
-    super(message);
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
deleted file mode 100644
index 0c1ab2c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
-
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * Maintains the state of datanodes in SCM. This class should only be used by
- * NodeStateManager to maintain the state. If anyone wants to change the
- * state of a node they should call NodeStateManager, do not directly use
- * this class.
- */
-public class NodeStateMap {
-
-  /**
-   * Node id to node info map.
-   */
-  private final ConcurrentHashMap<UUID, DatanodeInfo> nodeMap;
-  /**
-   * Represents the current state of node.
-   */
-  private final ConcurrentHashMap<NodeState, Set<UUID>> stateMap;
-  /**
-   * Node to set of containers on the node.
-   */
-  private final ConcurrentHashMap<UUID, Set<ContainerID>> nodeToContainer;
-
-  private final ReadWriteLock lock;
-
-  /**
-   * Creates a new instance of NodeStateMap with no nodes.
-   */
-  public NodeStateMap() {
-    lock = new ReentrantReadWriteLock();
-    nodeMap = new ConcurrentHashMap<>();
-    stateMap = new ConcurrentHashMap<>();
-    nodeToContainer = new ConcurrentHashMap<>();
-    initStateMap();
-  }
-
-  /**
-   * Initializes the state map with available states.
-   */
-  private void initStateMap() {
-    for (NodeState state : NodeState.values()) {
-      stateMap.put(state, new HashSet<>());
-    }
-  }
-
-  /**
-   * Adds a node to NodeStateMap.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param nodeState initial NodeState
-   *
-   * @throws NodeAlreadyExistsException if the node already exist
-   */
-  public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState)
-      throws NodeAlreadyExistsException {
-    lock.writeLock().lock();
-    try {
-      UUID id = datanodeDetails.getUuid();
-      if (nodeMap.containsKey(id)) {
-        throw new NodeAlreadyExistsException("Node UUID: " + id);
-      }
-      nodeMap.put(id, new DatanodeInfo(datanodeDetails));
-      nodeToContainer.put(id, Collections.emptySet());
-      stateMap.get(nodeState).add(id);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Updates the node state.
-   *
-   * @param nodeId Node Id
-   * @param currentState current state
-   * @param newState new state
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  public void updateNodeState(UUID nodeId, NodeState currentState,
-                              NodeState newState)throws NodeNotFoundException {
-    lock.writeLock().lock();
-    try {
-      checkIfNodeExist(nodeId);
-      if (stateMap.get(currentState).remove(nodeId)) {
-        stateMap.get(newState).add(nodeId);
-      } else {
-        throw new NodeNotFoundException("Node UUID: " + nodeId +
-            ", not found in state: " + currentState);
-      }
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns DatanodeInfo for the given node id.
-   *
-   * @param uuid Node Id
-   *
-   * @return DatanodeInfo of the node
-   *
-   * @throws NodeNotFoundException if the node is not present
-   */
-  public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException {
-    lock.readLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      return nodeMap.get(uuid);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-
-  /**
-   * Returns the list of node ids which are in the specified state.
-   *
-   * @param state NodeState
-   *
-   * @return list of node ids
-   */
-  public List<UUID> getNodes(NodeState state) {
-    lock.readLock().lock();
-    try {
-      return new ArrayList<>(stateMap.get(state));
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the list of all the node ids.
-   *
-   * @return list of all the node ids
-   */
-  public List<UUID> getAllNodes() {
-    lock.readLock().lock();
-    try {
-      return new ArrayList<>(nodeMap.keySet());
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the count of nodes in the specified state.
-   *
-   * @param state NodeState
-   *
-   * @return Number of nodes in the specified state
-   */
-  public int getNodeCount(NodeState state) {
-    lock.readLock().lock();
-    try {
-      return stateMap.get(state).size();
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the total node count.
-   *
-   * @return node count
-   */
-  public int getTotalNodeCount() {
-    lock.readLock().lock();
-    try {
-      return nodeMap.size();
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the current state of the node.
-   *
-   * @param uuid node id
-   *
-   * @return NodeState
-   *
-   * @throws NodeNotFoundException if the node is not found
-   */
-  public NodeState getNodeState(UUID uuid) throws NodeNotFoundException {
-    lock.readLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      for (Map.Entry<NodeState, Set<UUID>> entry : stateMap.entrySet()) {
-        if (entry.getValue().contains(uuid)) {
-          return entry.getKey();
-        }
-      }
-      throw new NodeNotFoundException("Node not found in node state map." +
-          " UUID: " + uuid);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Adds the given container to the specified datanode.
-   *
-   * @param uuid - datanode uuid
-   * @param containerId - containerID
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                        use addDatanodeInContainerMap call.
-   */
-  public void addContainer(final UUID uuid,
-                           final ContainerID containerId)
-      throws NodeNotFoundException {
-    lock.writeLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      nodeToContainer.get(uuid).add(containerId);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  public void setContainers(UUID uuid, Set<ContainerID> containers)
-      throws NodeNotFoundException{
-    lock.writeLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      nodeToContainer.put(uuid, containers);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  public Set<ContainerID> getContainers(UUID uuid)
-      throws NodeNotFoundException {
-    lock.readLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      return Collections.unmodifiableSet(nodeToContainer.get(uuid));
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  public void removeContainer(UUID uuid, ContainerID containerID) throws
-      NodeNotFoundException {
-    lock.writeLock().lock();
-    try {
-      checkIfNodeExist(uuid);
-      nodeToContainer.get(uuid).remove(containerID);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Since we don't hold a global lock while constructing this string,
-   * the result might be inconsistent. If someone has changed the state of node
-   * while we are constructing the string, the result will be inconsistent.
-   * This should only be used for logging. We should not parse this string and
-   * use it for any critical calculations.
-   *
-   * @return current state of NodeStateMap
-   */
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("Total number of nodes: ").append(getTotalNodeCount());
-    for (NodeState state : NodeState.values()) {
-      builder.append("Number of nodes in ").append(state).append(" state: ")
-          .append(getNodeCount(state));
-    }
-    return builder.toString();
-  }
-
-  /**
-   * Throws NodeNotFoundException if the Node for given id doesn't exist.
-   *
-   * @param uuid Node UUID
-   * @throws NodeNotFoundException If the node is missing.
-   */
-  private void checkIfNodeExist(UUID uuid) throws NodeNotFoundException {
-    if (!nodeToContainer.containsKey(uuid)) {
-      throw new NodeNotFoundException("Node UUID: " + uuid);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
deleted file mode 100644
index 0c7610f..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import java.util.Collections;
-import java.util.Set;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A Container/Pipeline Report gets processed by the
- * Node2Container/Node2Pipeline and returns Report Result class.
- */
-public final class ReportResult<T> {
-  private ReportStatus status;
-  private Set<T> missingEntries;
-  private Set<T> newEntries;
-
-  private ReportResult(ReportStatus status,
-      Set<T> missingEntries,
-      Set<T> newEntries) {
-    this.status = status;
-    Preconditions.checkNotNull(missingEntries);
-    Preconditions.checkNotNull(newEntries);
-    this.missingEntries = missingEntries;
-    this.newEntries = newEntries;
-  }
-
-  public ReportStatus getStatus() {
-    return status;
-  }
-
-  public Set<T> getMissingEntries() {
-    return missingEntries;
-  }
-
-  public Set<T> getNewEntries() {
-    return newEntries;
-  }
-
-  /**
-   * Result after processing report for node2Object map.
-   * @param <T>
-   */
-  public static class ReportResultBuilder<T> {
-    private ReportStatus status;
-    private Set<T> missingEntries;
-    private Set<T> newEntries;
-
-    public ReportResultBuilder<T> setStatus(
-        ReportStatus newStatus) {
-      this.status = newStatus;
-      return this;
-    }
-
-    public ReportResultBuilder<T> setMissingEntries(
-        Set<T> missingEntriesList) {
-      this.missingEntries = missingEntriesList;
-      return this;
-    }
-
-    public ReportResultBuilder<T> setNewEntries(
-        Set<T> newEntriesList) {
-      this.newEntries = newEntriesList;
-      return this;
-    }
-
-    public ReportResult<T> build() {
-
-      Set<T> nullSafeMissingEntries = this.missingEntries;
-      Set<T> nullSafeNewEntries = this.newEntries;
-      if (nullSafeNewEntries == null) {
-        nullSafeNewEntries = Collections.emptySet();
-      }
-      if (nullSafeMissingEntries == null) {
-        nullSafeMissingEntries = Collections.emptySet();
-      }
-      return new ReportResult<T>(status, nullSafeMissingEntries,
-              nullSafeNewEntries);
-    }
-  }
-
-  /**
-   * Results possible from processing a report.
-   */
-  public enum ReportStatus {
-    ALL_IS_WELL,
-    MISSING_ENTRIES,
-    NEW_ENTRIES_FOUND,
-    MISSING_AND_NEW_ENTRIES_FOUND,
-    NEW_DATANODE_FOUND,
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
deleted file mode 100644
index c429c5c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-/**
- * Node States package.
- */
-package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 4669e74..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/*
- * This package contains StorageContainerManager classes.
- */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
deleted file mode 100644
index 6873566..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.utils.Scheduler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Implements api for running background pipeline creation jobs.
- */
-class BackgroundPipelineCreator {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BackgroundPipelineCreator.class);
-
-  private final Scheduler scheduler;
-  private final AtomicBoolean isPipelineCreatorRunning;
-  private final PipelineManager pipelineManager;
-  private final Configuration conf;
-
-  BackgroundPipelineCreator(PipelineManager pipelineManager,
-      Scheduler scheduler, Configuration conf) {
-    this.pipelineManager = pipelineManager;
-    this.conf = conf;
-    this.scheduler = scheduler;
-    isPipelineCreatorRunning = new AtomicBoolean(false);
-  }
-
-  private boolean shouldSchedulePipelineCreator() {
-    return isPipelineCreatorRunning.compareAndSet(false, true);
-  }
-
-  /**
-   * Schedules a fixed interval job to create pipelines.
-   */
-  void startFixedIntervalPipelineCreator() {
-    long intervalInMillis = conf
-        .getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL,
-            ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    // TODO: #CLUTIL We can start the job asap
-    scheduler.scheduleWithFixedDelay(() -> {
-      if (!shouldSchedulePipelineCreator()) {
-        return;
-      }
-      createPipelines();
-    }, 0, intervalInMillis, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Triggers pipeline creation via background thread.
-   */
-  void triggerPipelineCreation() {
-    // TODO: #CLUTIL introduce a better mechanism to not have more than one
-    // job of a particular type running, probably via ratis.
-    if (!shouldSchedulePipelineCreator()) {
-      return;
-    }
-    scheduler.schedule(this::createPipelines, 0, TimeUnit.MILLISECONDS);
-  }
-
-  private void createPipelines() {
-    // TODO: #CLUTIL Different replication factor may need to be supported
-    HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf(
-        conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
-            OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
-
-    for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
-        .values()) {
-      while (true) {
-        try {
-          if (scheduler.isClosed()) {
-            break;
-          }
-          pipelineManager.createPipeline(type, factor);
-        } catch (IOException ioe) {
-          break;
-        } catch (Throwable t) {
-          LOG.error("Error while creating pipelines {}", t);
-          break;
-        }
-      }
-    }
-    isPipelineCreatorRunning.set(false);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java
deleted file mode 100644
index a6a5a69..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import java.io.IOException;
-
-/**
- * Exception thrown when there are not enough Datanodes to create a pipeline.
- */
-public class InsufficientDatanodesException extends IOException {
-
-
-  public InsufficientDatanodesException() {
-    super();
-  }
-
-  public InsufficientDatanodesException(String message) {
-    super(message);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
deleted file mode 100644
index 8d497fa..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
-
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handles pipeline actions from datanode.
- */
-public class PipelineActionHandler
-    implements EventHandler<PipelineActionsFromDatanode> {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(PipelineActionHandler.class);
-
-  private final PipelineManager pipelineManager;
-  private final Configuration ozoneConf;
-
-  public PipelineActionHandler(PipelineManager pipelineManager,
-      OzoneConfiguration conf) {
-    this.pipelineManager = pipelineManager;
-    this.ozoneConf = conf;
-  }
-
-  @Override
-  public void onMessage(PipelineActionsFromDatanode report,
-      EventPublisher publisher) {
-    for (PipelineAction action : report.getReport().getPipelineActionsList()) {
-      if (action.getAction() == PipelineAction.Action.CLOSE) {
-        PipelineID pipelineID = null;
-        try {
-          pipelineID = PipelineID.
-              getFromProtobuf(action.getClosePipeline().getPipelineID());
-          Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-          LOG.error("Received pipeline action {} for {} from datanode {}. " +
-                  "Reason : {}", action.getAction(), pipeline,
-              report.getDatanodeDetails(),
-              action.getClosePipeline().getDetailedReason());
-          pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
-        } catch (IOException ioe) {
-          LOG.error("Could not execute pipeline action={} pipeline={} {}",
-              action, pipelineID, ioe);
-        }
-      } else {
-        LOG.error("unknown pipeline action:{}" + action.getAction());
-      }
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
deleted file mode 100644
index 77e037a..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Creates pipeline based on replication type.
- */
-public final class PipelineFactory {
-
-  private Map<ReplicationType, PipelineProvider> providers;
-
-  PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager,
-      Configuration conf, GrpcTlsConfig tlsConfig) {
-    providers = new HashMap<>();
-    providers.put(ReplicationType.STAND_ALONE,
-        new SimplePipelineProvider(nodeManager));
-    providers.put(ReplicationType.RATIS,
-        new RatisPipelineProvider(nodeManager, stateManager, conf, tlsConfig));
-  }
-
-  @VisibleForTesting
-  void setProvider(ReplicationType replicationType,
-                     PipelineProvider provider) {
-    providers.put(replicationType, provider);
-  }
-
-  public Pipeline create(ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    return providers.get(type).create(factor);
-  }
-
-  public Pipeline create(ReplicationType type, ReplicationFactor factor,
-      List<DatanodeDetails> nodes) {
-    return providers.get(type).create(factor, nodes);
-  }
-
-  public void shutdown() {
-    providers.values().forEach(provider -> provider.shutdown());
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
deleted file mode 100644
index 9ba5f31..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.NavigableSet;
-
-/**
- * Interface which exposes the api for pipeline management.
- */
-public interface PipelineManager extends Closeable, PipelineManagerMXBean {
-
-  Pipeline createPipeline(ReplicationType type, ReplicationFactor factor)
-      throws IOException;
-
-  Pipeline createPipeline(ReplicationType type, ReplicationFactor factor,
-      List<DatanodeDetails> nodes);
-
-  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException;
-
-  List<Pipeline> getPipelines();
-
-  List<Pipeline> getPipelines(ReplicationType type);
-
-  List<Pipeline> getPipelines(ReplicationType type,
-      ReplicationFactor factor);
-
-  List<Pipeline> getPipelines(ReplicationType type,
-      ReplicationFactor factor, Pipeline.PipelineState state);
-
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
-      Pipeline.PipelineState state, Collection<DatanodeDetails> excludeDns,
-      Collection<PipelineID> excludePipelines);
-
-  void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)
-      throws IOException;
-
-  void removeContainerFromPipeline(PipelineID pipelineID,
-      ContainerID containerID) throws IOException;
-
-  NavigableSet<ContainerID> getContainersInPipeline(PipelineID pipelineID)
-      throws IOException;
-
-  int getNumberOfContainers(PipelineID pipelineID) throws IOException;
-
-  void openPipeline(PipelineID pipelineId) throws IOException;
-
-  void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout)
-      throws IOException;
-
-  void startPipelineCreator();
-
-  void triggerPipelineCreation();
-
-  void incNumBlocksAllocatedMetric(PipelineID id);
-
-  /**
-   * Activates a dormant pipeline.
-   *
-   * @param pipelineID ID of the pipeline to activate.
-   * @throws IOException in case of any Exception
-   */
-  void activatePipeline(PipelineID pipelineID) throws IOException;
-
-  /**
-   * Deactivates an active pipeline.
-   *
-   * @param pipelineID ID of the pipeline to deactivate.
-   * @throws IOException in case of any Exception
-   */
-  void deactivatePipeline(PipelineID pipelineID) throws IOException;
-
-  GrpcTlsConfig getGrpcTlsConfig();
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
deleted file mode 100644
index 77a7a81..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.util.Map;
-
-/**
- * This is the JMX management interface for information related to
- * PipelineManager.
- */
-@InterfaceAudience.Private
-public interface PipelineManagerMXBean {
-
-  /**
-   * Returns the number of pipelines in different state.
-   * @return state to number of pipeline map
-   */
-  Map<String, Integer> getPipelineInfo();
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
deleted file mode 100644
index a0ce216..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface for creating pipelines.
- */
-public interface PipelineProvider {
-
-  Pipeline create(ReplicationFactor factor) throws IOException;
-
-  Pipeline create(ReplicationFactor factor, List<DatanodeDetails> nodes);
-
-  void shutdown();
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
deleted file mode 100644
index 793f4e2..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server
-    .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Objects;
-
-/**
- * Handles Pipeline Reports from datanode.
- */
-public class PipelineReportHandler implements
-    EventHandler<PipelineReportFromDatanode> {
-
-  private static final Logger LOGGER = LoggerFactory
-      .getLogger(PipelineReportHandler.class);
-  private final PipelineManager pipelineManager;
-  private final Configuration conf;
-  private final SCMSafeModeManager scmSafeModeManager;
-  private final boolean pipelineAvailabilityCheck;
-
-  public PipelineReportHandler(SCMSafeModeManager scmSafeModeManager,
-      PipelineManager pipelineManager,
-      Configuration conf) {
-    Preconditions.checkNotNull(pipelineManager);
-    Objects.requireNonNull(scmSafeModeManager);
-    this.scmSafeModeManager = scmSafeModeManager;
-    this.pipelineManager = pipelineManager;
-    this.conf = conf;
-    this.pipelineAvailabilityCheck = conf.getBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT);
-
-  }
-
-  @Override
-  public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode,
-      EventPublisher publisher) {
-    Preconditions.checkNotNull(pipelineReportFromDatanode);
-    DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
-    PipelineReportsProto pipelineReport =
-        pipelineReportFromDatanode.getReport();
-    Preconditions.checkNotNull(dn, "Pipeline Report is "
-        + "missing DatanodeDetails.");
-    if (LOGGER.isTraceEnabled()) {
-      LOGGER.trace("Processing pipeline report for dn: {}", dn);
-    }
-    for (PipelineReport report : pipelineReport.getPipelineReportList()) {
-      try {
-        processPipelineReport(report, dn);
-      } catch (IOException e) {
-        LOGGER.error("Could not process pipeline report={} from dn={} {}",
-            report, dn, e);
-      }
-    }
-    if (pipelineAvailabilityCheck && scmSafeModeManager.getInSafeMode()) {
-      publisher.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          pipelineReportFromDatanode);
-    }
-
-  }
-
-  private void processPipelineReport(PipelineReport report, DatanodeDetails dn)
-      throws IOException {
-    PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID());
-    Pipeline pipeline;
-    try {
-      pipeline = pipelineManager.getPipeline(pipelineID);
-    } catch (PipelineNotFoundException e) {
-      RatisPipelineUtils.destroyPipeline(dn, pipelineID, conf,
-          pipelineManager.getGrpcTlsConfig());
-      return;
-    }
-
-    if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
-      LOGGER.info("Pipeline {} reported by {}", pipeline.getId(), dn);
-      pipeline.reportDatanode(dn);
-      if (pipeline.isHealthy()) {
-        // if all the dns have reported, pipeline can be moved to OPEN state
-        pipelineManager.openPipeline(pipelineID);
-      }
-    } else {
-      // In OPEN state case just report the datanode
-      pipeline.reportDatanode(dn);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
deleted file mode 100644
index 7615057..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.NavigableSet;
-
-/**
- * Manages the state of pipelines in SCM. All write operations like pipeline
- * creation, removal and updates should come via SCMPipelineManager.
- * PipelineStateMap class holds the data structures related to pipeline and its
- * state. All the read and write operations in PipelineStateMap are protected
- * by a read write lock.
- */
-class PipelineStateManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(PipelineStateManager.class);
-
-  private final PipelineStateMap pipelineStateMap;
-
-  PipelineStateManager(Configuration conf) {
-    this.pipelineStateMap = new PipelineStateMap();
-  }
-
-  void addPipeline(Pipeline pipeline) throws IOException {
-    pipelineStateMap.addPipeline(pipeline);
-    if (pipeline.getPipelineState() == PipelineState.OPEN) {
-      LOG.info("Created pipeline " + pipeline);
-    }
-  }
-
-  void addContainerToPipeline(PipelineID pipelineId, ContainerID containerID)
-      throws IOException {
-    pipelineStateMap.addContainerToPipeline(pipelineId, containerID);
-  }
-
-  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException {
-    return pipelineStateMap.getPipeline(pipelineID);
-  }
-
-  public List<Pipeline> getPipelines() {
-    return pipelineStateMap.getPipelines();
-  }
-
-  List<Pipeline> getPipelines(ReplicationType type) {
-    return pipelineStateMap.getPipelines(type);
-  }
-
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor) {
-    return pipelineStateMap.getPipelines(type, factor);
-  }
-
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
-      PipelineState state) {
-    return pipelineStateMap.getPipelines(type, factor, state);
-  }
-
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
-      PipelineState state, Collection<DatanodeDetails> excludeDns,
-      Collection<PipelineID> excludePipelines) {
-    return pipelineStateMap
-        .getPipelines(type, factor, state, excludeDns, excludePipelines);
-  }
-
-  List<Pipeline> getPipelines(ReplicationType type, PipelineState... states) {
-    return pipelineStateMap.getPipelines(type, states);
-  }
-
-  NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
-      throws IOException {
-    return pipelineStateMap.getContainers(pipelineID);
-  }
-
-  int getNumberOfContainers(PipelineID pipelineID) throws IOException {
-    return pipelineStateMap.getNumberOfContainers(pipelineID);
-  }
-
-  Pipeline removePipeline(PipelineID pipelineID) throws IOException {
-    Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID);
-    LOG.info("Pipeline {} removed from db", pipeline);
-    return pipeline;
-  }
-
-  void removeContainerFromPipeline(PipelineID pipelineID,
-      ContainerID containerID) throws IOException {
-    pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID);
-  }
-
-  Pipeline finalizePipeline(PipelineID pipelineId)
-      throws PipelineNotFoundException {
-    Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId);
-    if (!pipeline.isClosed()) {
-      pipeline = pipelineStateMap
-          .updatePipelineState(pipelineId, PipelineState.CLOSED);
-      LOG.info("Pipeline {} moved to CLOSED state", pipeline);
-    }
-    return pipeline;
-  }
-
-  Pipeline openPipeline(PipelineID pipelineId) throws IOException {
-    Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId);
-    if (pipeline.isClosed()) {
-      throw new IOException("Closed pipeline can not be opened");
-    }
-    if (pipeline.getPipelineState() == PipelineState.ALLOCATED) {
-      pipeline = pipelineStateMap
-          .updatePipelineState(pipelineId, PipelineState.OPEN);
-      LOG.info("Pipeline {} moved to OPEN state", pipeline.toString());
-    }
-    return pipeline;
-  }
-
-  /**
-   * Activates a dormant pipeline.
-   *
-   * @param pipelineID ID of the pipeline to activate.
-   * @throws IOException in case of any Exception
-   */
-  public void activatePipeline(PipelineID pipelineID)
-      throws IOException {
-    pipelineStateMap
-        .updatePipelineState(pipelineID, PipelineState.OPEN);
-  }
-
-  /**
-   * Deactivates an active pipeline.
-   *
-   * @param pipelineID ID of the pipeline to deactivate.
-   * @throws IOException in case of any Exception
-   */
-  public void deactivatePipeline(PipelineID pipelineID)
-      throws IOException {
-    pipelineStateMap
-        .updatePipelineState(pipelineID, PipelineState.DORMANT);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
deleted file mode 100644
index 443378c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
+++ /dev/null
@@ -1,420 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-/**
- * Holds the data structures which maintain the information about pipeline and
- * its state.
- * Invariant: If a pipeline exists in PipelineStateMap, both pipelineMap and
- * pipeline2container would have a non-null mapping for it.
- */
-class PipelineStateMap {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      PipelineStateMap.class);
-
-  private final Map<PipelineID, Pipeline> pipelineMap;
-  private final Map<PipelineID, NavigableSet<ContainerID>> pipeline2container;
-  private final Map<PipelineQuery, List<Pipeline>> query2OpenPipelines;
-
-  PipelineStateMap() {
-
-    // TODO: Use TreeMap for range operations?
-    pipelineMap = new HashMap<>();
-    pipeline2container = new HashMap<>();
-    query2OpenPipelines = new HashMap<>();
-    initializeQueryMap();
-
-  }
-
-  private void initializeQueryMap() {
-    for (ReplicationType type : ReplicationType.values()) {
-      for (ReplicationFactor factor : ReplicationFactor.values()) {
-        query2OpenPipelines
-            .put(new PipelineQuery(type, factor), new CopyOnWriteArrayList<>());
-      }
-    }
-  }
-
-  /**
-   * Adds provided pipeline in the data structures.
-   *
-   * @param pipeline - Pipeline to add
-   * @throws IOException if pipeline with provided pipelineID already exists
-   */
-  void addPipeline(Pipeline pipeline) throws IOException {
-    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
-    Preconditions.checkArgument(
-        pipeline.getNodes().size() == pipeline.getFactor().getNumber(),
-        String.format("Nodes size=%d, replication factor=%d do not match ",
-                pipeline.getNodes().size(), pipeline.getFactor().getNumber()));
-
-    if (pipelineMap.putIfAbsent(pipeline.getId(), pipeline) != null) {
-      LOG.warn("Duplicate pipeline ID detected. {}", pipeline.getId());
-      throw new IOException(String
-          .format("Duplicate pipeline ID %s detected.", pipeline.getId()));
-    }
-    pipeline2container.put(pipeline.getId(), new TreeSet<>());
-    if (pipeline.getPipelineState() == PipelineState.OPEN) {
-      query2OpenPipelines.get(new PipelineQuery(pipeline)).add(pipeline);
-    }
-  }
-
-  /**
-   * Add container to an existing pipeline.
-   *
-   * @param pipelineID - PipelineID of the pipeline to which container is added
-   * @param containerID - ContainerID of the container to add
-   * @throws IOException if pipeline is not in open state or does not exist
-   */
-  void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)
-      throws IOException {
-    Preconditions.checkNotNull(pipelineID,
-        "Pipeline Id cannot be null");
-    Preconditions.checkNotNull(containerID,
-        "Container Id cannot be null");
-
-    Pipeline pipeline = getPipeline(pipelineID);
-    if (pipeline.isClosed()) {
-      throw new IOException(String
-          .format("Cannot add container to pipeline=%s in closed state",
-              pipelineID));
-    }
-    pipeline2container.get(pipelineID).add(containerID);
-  }
-
-  /**
-   * Get pipeline corresponding to specified pipelineID.
-   *
-   * @param pipelineID - PipelineID of the pipeline to be retrieved
-   * @return Pipeline
-   * @throws IOException if pipeline is not found
-   */
-  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException {
-    Preconditions.checkNotNull(pipelineID,
-        "Pipeline Id cannot be null");
-
-    Pipeline pipeline = pipelineMap.get(pipelineID);
-    if (pipeline == null) {
-      throw new PipelineNotFoundException(
-          String.format("%s not found", pipelineID));
-    }
-    return pipeline;
-  }
-
-  /**
-   * Get list of pipelines in SCM.
-   * @return List of pipelines
-   */
-  public List<Pipeline> getPipelines() {
-    return new ArrayList<>(pipelineMap.values());
-  }
-
-  /**
-   * Get pipeline corresponding to specified replication type.
-   *
-   * @param type - ReplicationType
-   * @return List of pipelines which have the specified replication type
-   */
-  List<Pipeline> getPipelines(ReplicationType type) {
-    Preconditions.checkNotNull(type, "Replication type cannot be null");
-
-    return pipelineMap.values().stream()
-        .filter(p -> p.getType().equals(type))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Get pipeline corresponding to specified replication type and factor.
-   *
-   * @param type - ReplicationType
-   * @param factor - ReplicationFactor
-   * @return List of pipelines with specified replication type and factor
-   */
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor) {
-    Preconditions.checkNotNull(type, "Replication type cannot be null");
-    Preconditions.checkNotNull(factor, "Replication factor cannot be null");
-
-    return pipelineMap.values().stream()
-        .filter(pipeline -> pipeline.getType() == type
-            && pipeline.getFactor() == factor)
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Get list of pipeline corresponding to specified replication type and
-   * pipeline states.
-   *
-   * @param type - ReplicationType
-   * @param states - Array of required PipelineState
-   * @return List of pipelines with specified replication type and states
-   */
-  List<Pipeline> getPipelines(ReplicationType type, PipelineState... states) {
-    Preconditions.checkNotNull(type, "Replication type cannot be null");
-    Preconditions.checkNotNull(states, "Pipeline state cannot be null");
-
-    Set<PipelineState> pipelineStates = new HashSet<>();
-    pipelineStates.addAll(Arrays.asList(states));
-    return pipelineMap.values().stream().filter(
-        pipeline -> pipeline.getType() == type && pipelineStates
-            .contains(pipeline.getPipelineState()))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Get list of pipeline corresponding to specified replication type,
-   * replication factor and pipeline state.
-   *
-   * @param type - ReplicationType
-   * @param state - Required PipelineState
-   * @return List of pipelines with specified replication type,
-   * replication factor and pipeline state
-   */
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
-      PipelineState state) {
-    Preconditions.checkNotNull(type, "Replication type cannot be null");
-    Preconditions.checkNotNull(factor, "Replication factor cannot be null");
-    Preconditions.checkNotNull(state, "Pipeline state cannot be null");
-
-    if (state == PipelineState.OPEN) {
-      return Collections.unmodifiableList(
-          query2OpenPipelines.get(new PipelineQuery(type, factor)));
-    }
-    return pipelineMap.values().stream().filter(
-        pipeline -> pipeline.getType() == type
-            && pipeline.getPipelineState() == state
-            && pipeline.getFactor() == factor)
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Get list of pipeline corresponding to specified replication type,
-   * replication factor and pipeline state.
-   *
-   * @param type - ReplicationType
-   * @param state - Required PipelineState
-   * @param excludeDns list of dns to exclude
-   * @param excludePipelines pipelines to exclude
-   * @return List of pipelines with specified replication type,
-   * replication factor and pipeline state
-   */
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
-      PipelineState state, Collection<DatanodeDetails> excludeDns,
-      Collection<PipelineID> excludePipelines) {
-    Preconditions.checkNotNull(type, "Replication type cannot be null");
-    Preconditions.checkNotNull(factor, "Replication factor cannot be null");
-    Preconditions.checkNotNull(state, "Pipeline state cannot be null");
-    Preconditions
-        .checkNotNull(excludeDns, "Datanode exclude list cannot be null");
-    Preconditions
-        .checkNotNull(excludeDns, "Pipeline exclude list cannot be null");
-    return getPipelines(type, factor, state).stream().filter(
-        pipeline -> !discardPipeline(pipeline, excludePipelines)
-            && !discardDatanode(pipeline, excludeDns))
-        .collect(Collectors.toList());
-  }
-
-  private boolean discardPipeline(Pipeline pipeline,
-      Collection<PipelineID> excludePipelines) {
-    if (excludePipelines.isEmpty()) {
-      return false;
-    }
-    Predicate<PipelineID> predicate = p -> p.equals(pipeline.getId());
-    return excludePipelines.parallelStream().anyMatch(predicate);
-  }
-
-  private boolean discardDatanode(Pipeline pipeline,
-      Collection<DatanodeDetails> excludeDns) {
-    if (excludeDns.isEmpty()) {
-      return false;
-    }
-    boolean discard = false;
-    for (DatanodeDetails dn : pipeline.getNodes()) {
-      Predicate<DatanodeDetails> predicate = p -> p.equals(dn);
-      discard = excludeDns.parallelStream().anyMatch(predicate);
-      if (discard) {
-        break;
-      }
-    }
-    return discard;
-  }
-  /**
-   * Get set of containerIDs corresponding to a pipeline.
-   *
-   * @param pipelineID - PipelineID
-   * @return Set of containerIDs belonging to the pipeline
-   * @throws IOException if pipeline is not found
-   */
-  NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
-      throws PipelineNotFoundException {
-    Preconditions.checkNotNull(pipelineID,
-        "Pipeline Id cannot be null");
-
-    NavigableSet<ContainerID> containerIDs = pipeline2container.get(pipelineID);
-    if (containerIDs == null) {
-      throw new PipelineNotFoundException(
-          String.format("%s not found", pipelineID));
-    }
-    return new TreeSet<>(containerIDs);
-  }
-
-  /**
-   * Get number of containers corresponding to a pipeline.
-   *
-   * @param pipelineID - PipelineID
-   * @return Number of containers belonging to the pipeline
-   * @throws IOException if pipeline is not found
-   */
-  int getNumberOfContainers(PipelineID pipelineID)
-      throws PipelineNotFoundException {
-    Preconditions.checkNotNull(pipelineID,
-        "Pipeline Id cannot be null");
-
-    Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
-    if (containerIDs == null) {
-      throw new PipelineNotFoundException(
-          String.format("%s not found", pipelineID));
-    }
-    return containerIDs.size();
-  }
-
-  /**
-   * Remove pipeline from the data structures.
-   *
-   * @param pipelineID - PipelineID of the pipeline to be removed
-   * @throws IOException if the pipeline is not empty or does not exist
-   */
-  Pipeline removePipeline(PipelineID pipelineID) throws IOException {
-    Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
-
-    Pipeline pipeline = getPipeline(pipelineID);
-    if (!pipeline.isClosed()) {
-      throw new IOException(
-          String.format("Pipeline with %s is not yet closed", pipelineID));
-    }
-
-    pipelineMap.remove(pipelineID);
-    pipeline2container.remove(pipelineID);
-    return pipeline;
-  }
-
-  /**
-   * Remove container from a pipeline.
-   *
-   * @param pipelineID - PipelineID of the pipeline from which container needs
-   *                   to be removed
-   * @param containerID - ContainerID of the container to remove
-   * @throws IOException if pipeline does not exist
-   */
-  void removeContainerFromPipeline(PipelineID pipelineID,
-      ContainerID containerID) throws IOException {
-    Preconditions.checkNotNull(pipelineID,
-        "Pipeline Id cannot be null");
-    Preconditions.checkNotNull(containerID,
-        "container Id cannot be null");
-
-    Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
-    if (containerIDs == null) {
-      throw new PipelineNotFoundException(
-          String.format("%s not found", pipelineID));
-    }
-    containerIDs.remove(containerID);
-  }
-
-  /**
-   * Updates the state of pipeline.
-   *
-   * @param pipelineID - PipelineID of the pipeline whose state needs
-   *                   to be updated
-   * @param state - new state of the pipeline
-   * @return Pipeline with the updated state
-   * @throws IOException if pipeline does not exist
-   */
-  Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state)
-      throws PipelineNotFoundException {
-    Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
-    Preconditions.checkNotNull(state, "Pipeline LifeCycleState cannot be null");
-
-    final Pipeline pipeline = getPipeline(pipelineID);
-    Pipeline updatedPipeline = pipelineMap.compute(pipelineID,
-        (id, p) -> Pipeline.newBuilder(pipeline).setState(state).build());
-    PipelineQuery query = new PipelineQuery(pipeline);
-    if (updatedPipeline.getPipelineState() == PipelineState.OPEN) {
-      // for transition to OPEN state add pipeline to query2OpenPipelines
-      query2OpenPipelines.get(query).add(updatedPipeline);
-    } else {
-      // for transition from OPEN to CLOSED state remove pipeline from
-      // query2OpenPipelines
-      query2OpenPipelines.get(query).remove(pipeline);
-    }
-    return updatedPipeline;
-  }
-
-  private static class PipelineQuery {
-    private ReplicationType type;
-    private ReplicationFactor factor;
-
-    PipelineQuery(ReplicationType type, ReplicationFactor factor) {
-      this.type = Preconditions.checkNotNull(type);
-      this.factor = Preconditions.checkNotNull(factor);
-    }
-
-    PipelineQuery(Pipeline pipeline) {
-      type = pipeline.getType();
-      factor = pipeline.getFactor();
-    }
-
-    @Override
-    @SuppressFBWarnings("NP_EQUALS_SHOULD_HANDLE_NULL_ARGUMENT")
-    public boolean equals(Object other) {
-      if (this == other) {
-        return true;
-      }
-      if (!this.getClass().equals(other.getClass())) {
-        return false;
-      }
-      PipelineQuery otherQuery = (PipelineQuery) other;
-      return type == otherQuery.type && factor == otherQuery.factor;
-    }
-
-    @Override
-    public int hashCode() {
-      return new HashCodeBuilder()
-          .append(type)
-          .append(factor)
-          .toHashCode();
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
deleted file mode 100644
index 0324a58..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-import org.apache.hadoop.io.MultipleIOException;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.TimeDuration;
-import org.apache.ratis.util.function.CheckedBiConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.ForkJoinWorkerThread;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-/**
- * Implements Api for creating ratis pipelines.
- */
-public class RatisPipelineProvider implements PipelineProvider {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RatisPipelineProvider.class);
-
-  private final NodeManager nodeManager;
-  private final PipelineStateManager stateManager;
-  private final Configuration conf;
-
-  // Set parallelism at 3, as now in Ratis we create 1 and 3 node pipelines.
-  private final int parallelismForPool = 3;
-
-  private final ForkJoinPool.ForkJoinWorkerThreadFactory factory =
-      (pool -> {
-        final ForkJoinWorkerThread worker = ForkJoinPool.
-            defaultForkJoinWorkerThreadFactory.newThread(pool);
-        worker.setName("RATISCREATEPIPELINE" + worker.getPoolIndex());
-        return worker;
-      });
-
-  private final ForkJoinPool forkJoinPool = new ForkJoinPool(
-      parallelismForPool, factory, null, false);
-  private final GrpcTlsConfig tlsConfig;
-
-  RatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, Configuration conf,
-      GrpcTlsConfig tlsConfig) {
-    this.nodeManager = nodeManager;
-    this.stateManager = stateManager;
-    this.conf = conf;
-    this.tlsConfig = tlsConfig;
-  }
-
-
-  /**
-   * Create pluggable container placement policy implementation instance.
-   *
-   * @param nodeManager - SCM node manager.
-   * @param conf - configuration.
-   * @return SCM container placement policy implementation instance.
-   */
-  @SuppressWarnings("unchecked")
-  // TODO: should we rename ContainerPlacementPolicy to PipelinePlacementPolicy?
-  private static ContainerPlacementPolicy createContainerPlacementPolicy(
-      final NodeManager nodeManager, final Configuration conf) {
-    Class<? extends ContainerPlacementPolicy> implClass =
-        (Class<? extends ContainerPlacementPolicy>) conf.getClass(
-            ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-            SCMContainerPlacementRandom.class);
-
-    try {
-      Constructor<? extends ContainerPlacementPolicy> ctor =
-          implClass.getDeclaredConstructor(NodeManager.class,
-              Configuration.class);
-      return ctor.newInstance(nodeManager, conf);
-    } catch (RuntimeException e) {
-      throw e;
-    } catch (InvocationTargetException e) {
-      throw new RuntimeException(implClass.getName()
-          + " could not be constructed.", e.getCause());
-    } catch (Exception e) {
-//      LOG.error("Unhandled exception occurred, Placement policy will not " +
-//          "be functional.");
-      throw new IllegalArgumentException("Unable to load " +
-          "ContainerPlacementPolicy", e);
-    }
-  }
-
-  @Override
-  public Pipeline create(ReplicationFactor factor) throws IOException {
-    // Get set of datanodes already used for ratis pipeline
-    Set<DatanodeDetails> dnsUsed = new HashSet<>();
-    stateManager.getPipelines(ReplicationType.RATIS, factor).stream().filter(
-        p -> p.getPipelineState().equals(PipelineState.OPEN) ||
-            p.getPipelineState().equals(PipelineState.DORMANT) ||
-            p.getPipelineState().equals(PipelineState.ALLOCATED))
-        .forEach(p -> dnsUsed.addAll(p.getNodes()));
-
-    // Get list of healthy nodes
-    List<DatanodeDetails> dns =
-        nodeManager.getNodes(NodeState.HEALTHY)
-            .parallelStream()
-            .filter(dn -> !dnsUsed.contains(dn))
-            .limit(factor.getNumber())
-            .collect(Collectors.toList());
-    if (dns.size() < factor.getNumber()) {
-      String e = String
-          .format("Cannot create pipeline of factor %d using %d nodes.",
-              factor.getNumber(), dns.size());
-      throw new InsufficientDatanodesException(e);
-    }
-
-    Pipeline pipeline = Pipeline.newBuilder()
-        .setId(PipelineID.randomId())
-        .setState(PipelineState.OPEN)
-        .setType(ReplicationType.RATIS)
-        .setFactor(factor)
-        .setNodes(dns)
-        .build();
-    initializePipeline(pipeline);
-    return pipeline;
-  }
-
-  @Override
-  public Pipeline create(ReplicationFactor factor,
-      List<DatanodeDetails> nodes) {
-    return Pipeline.newBuilder()
-        .setId(PipelineID.randomId())
-        .setState(PipelineState.OPEN)
-        .setType(ReplicationType.RATIS)
-        .setFactor(factor)
-        .setNodes(nodes)
-        .build();
-  }
-
-
-  @Override
-  public void shutdown() {
-    forkJoinPool.shutdownNow();
-    try {
-      forkJoinPool.awaitTermination(60, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("Unexpected exception occurred during shutdown of " +
-              "RatisPipelineProvider", e);
-    }
-  }
-
-  protected void initializePipeline(Pipeline pipeline) throws IOException {
-    final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
-    }
-    callRatisRpc(pipeline.getNodes(),
-        (raftClient, peer) -> {
-          RaftClientReply reply = raftClient.groupAdd(group, peer.getId());
-          if (reply == null || !reply.isSuccess()) {
-            String msg = "Pipeline initialization failed for pipeline:"
-                + pipeline.getId() + " node:" + peer.getId();
-            LOG.error(msg);
-            throw new IOException(msg);
-          }
-        });
-  }
-
-  private void callRatisRpc(List<DatanodeDetails> datanodes,
-      CheckedBiConsumer< RaftClient, RaftPeer, IOException> rpc)
-      throws IOException {
-    if (datanodes.isEmpty()) {
-      return;
-    }
-
-    final String rpcType = conf
-        .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-            ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(conf);
-    final List< IOException > exceptions =
-        Collections.synchronizedList(new ArrayList<>());
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(conf);
-    final TimeDuration requestTimeout =
-        RatisHelper.getClientRequestTimeout(conf);
-    try {
-      forkJoinPool.submit(() -> {
-        datanodes.parallelStream().forEach(d -> {
-          final RaftPeer p = RatisHelper.toRaftPeer(d);
-          try (RaftClient client = RatisHelper
-              .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p,
-                  retryPolicy, maxOutstandingRequests, tlsConfig,
-                  requestTimeout)) {
-            rpc.accept(client, p);
-          } catch (IOException ioe) {
-            String errMsg =
-                "Failed invoke Ratis rpc " + rpc + " for " + d.getUuid();
-            LOG.error(errMsg, ioe);
-            exceptions.add(new IOException(errMsg, ioe));
-          }
-        });
-      }).get();
-    } catch (ExecutionException | RejectedExecutionException ex) {
-      LOG.error(ex.getClass().getName() + " exception occurred during " +
-          "createPipeline", ex);
-      throw new IOException(ex.getClass().getName() + " exception occurred " +
-          "during createPipeline", ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupt exception occurred during " +
-          "createPipeline", ex);
-    }
-    if (!exceptions.isEmpty()) {
-      throw MultipleIOException.createIOException(exceptions);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
deleted file mode 100644
index 20fa092..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Utility class for Ratis pipelines. Contains methods to create and destroy
- * ratis pipelines.
- */
-public final class RatisPipelineUtils {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RatisPipelineUtils.class);
-
-  private RatisPipelineUtils() {
-  }
-  /**
-   * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all
-   * the datanodes.
-   *
-   * @param pipeline        - Pipeline to be destroyed
-   * @param ozoneConf       - Ozone configuration
-   * @param grpcTlsConfig
-   * @throws IOException
-   */
-  static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf,
-      GrpcTlsConfig grpcTlsConfig) {
-    final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
-    }
-    for (DatanodeDetails dn : pipeline.getNodes()) {
-      try {
-        destroyPipeline(dn, pipeline.getId(), ozoneConf, grpcTlsConfig);
-      } catch (IOException e) {
-        LOG.warn("Pipeline destroy failed for pipeline={} dn={}",
-            pipeline.getId(), dn);
-      }
-    }
-  }
-
-  /**
-   * Sends ratis command to destroy pipeline on the given datanode.
-   *
-   * @param dn         - Datanode on which pipeline needs to be destroyed
-   * @param pipelineID - ID of pipeline to be destroyed
-   * @param ozoneConf  - Ozone configuration
-   * @param grpcTlsConfig - grpc tls configuration
-   * @throws IOException
-   */
-  static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID,
-      Configuration ozoneConf, GrpcTlsConfig grpcTlsConfig) throws IOException {
-    final String rpcType = ozoneConf
-        .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-            ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
-    final RaftPeer p = RatisHelper.toRaftPeer(dn);
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
-    final TimeDuration requestTimeout =
-        RatisHelper.getClientRequestTimeout(ozoneConf);
-    try(RaftClient client = RatisHelper
-        .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p,
-            retryPolicy, maxOutstandingRequests, grpcTlsConfig,
-            requestTimeout)) {
-      client.groupRemove(RaftGroupId.valueOf(pipelineID.getId()),
-          true, p.getId());
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
deleted file mode 100644
index 0964f6d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ /dev/null
@@ -1,469 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.apache.hadoop.hdds.utils.Scheduler;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.Collection;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static org.apache.hadoop.hdds.scm
-    .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm
-    .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
-
-/**
- * Implements api needed for management of pipelines. All the write operations
- * for pipelines must come via PipelineManager. It synchronises all write
- * and read operations via a ReadWriteLock.
- */
-public class SCMPipelineManager implements PipelineManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMPipelineManager.class);
-
-  private final ReadWriteLock lock;
-  private final PipelineFactory pipelineFactory;
-  private final PipelineStateManager stateManager;
-  private final BackgroundPipelineCreator backgroundPipelineCreator;
-  private Scheduler scheduler;
-  private MetadataStore pipelineStore;
-
-  private final EventPublisher eventPublisher;
-  private final NodeManager nodeManager;
-  private final SCMPipelineMetrics metrics;
-  private final Configuration conf;
-  // Pipeline Manager MXBean
-  private ObjectName pmInfoBean;
-  private GrpcTlsConfig grpcTlsConfig;
-
-  public SCMPipelineManager(Configuration conf, NodeManager nodeManager,
-      EventPublisher eventPublisher, GrpcTlsConfig grpcTlsConfig)
-      throws IOException {
-    this.lock = new ReentrantReadWriteLock();
-    this.conf = conf;
-    this.stateManager = new PipelineStateManager(conf);
-    this.pipelineFactory = new PipelineFactory(nodeManager, stateManager,
-        conf, grpcTlsConfig);
-    // TODO: See if thread priority needs to be set for these threads
-    scheduler = new Scheduler("RatisPipelineUtilsThread", false, 1);
-    this.backgroundPipelineCreator =
-        new BackgroundPipelineCreator(this, scheduler, conf);
-    int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-    final File metaDir = ServerUtils.getScmDbDir(conf);
-    final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
-    this.pipelineStore =
-        MetadataStoreBuilder.newBuilder()
-            .setCreateIfMissing(true)
-            .setConf(conf)
-            .setDbFile(pipelineDBPath)
-            .setCacheSize(cacheSize * OzoneConsts.MB)
-            .build();
-    this.eventPublisher = eventPublisher;
-    this.nodeManager = nodeManager;
-    this.metrics = SCMPipelineMetrics.create();
-    this.pmInfoBean = MBeans.register("SCMPipelineManager",
-        "SCMPipelineManagerInfo", this);
-    initializePipelineState();
-    this.grpcTlsConfig = grpcTlsConfig;
-  }
-
-  public PipelineStateManager getStateManager() {
-    return stateManager;
-  }
-
-  @VisibleForTesting
-  public void setPipelineProvider(ReplicationType replicationType,
-                                  PipelineProvider provider) {
-    pipelineFactory.setProvider(replicationType, provider);
-  }
-
-  private void initializePipelineState() throws IOException {
-    if (pipelineStore.isEmpty()) {
-      LOG.info("No pipeline exists in current db");
-      return;
-    }
-    List<Map.Entry<byte[], byte[]>> pipelines =
-        pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE,
-            (MetadataKeyFilters.MetadataKeyFilter[])null);
-
-    for (Map.Entry<byte[], byte[]> entry : pipelines) {
-      HddsProtos.Pipeline.Builder pipelineBuilder = HddsProtos.Pipeline
-          .newBuilder(HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue()));
-      Pipeline pipeline = Pipeline.getFromProtobuf(pipelineBuilder.setState(
-          HddsProtos.PipelineState.PIPELINE_ALLOCATED).build());
-      Preconditions.checkNotNull(pipeline);
-      stateManager.addPipeline(pipeline);
-      nodeManager.addPipeline(pipeline);
-    }
-  }
-
-  @Override
-  public synchronized Pipeline createPipeline(
-      ReplicationType type, ReplicationFactor factor) throws IOException {
-    lock.writeLock().lock();
-    try {
-      Pipeline pipeline = pipelineFactory.create(type, factor);
-      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
-          pipeline.getProtobufMessage().toByteArray());
-      stateManager.addPipeline(pipeline);
-      nodeManager.addPipeline(pipeline);
-      metrics.incNumPipelineCreated();
-      metrics.createPerPipelineMetrics(pipeline);
-      return pipeline;
-    } catch (InsufficientDatanodesException idEx) {
-      throw idEx;
-    } catch (IOException ex) {
-      metrics.incNumPipelineCreationFailed();
-      throw ex;
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor,
-                                 List<DatanodeDetails> nodes) {
-    // This will mostly be used to create dummy pipeline for SimplePipelines.
-    // We don't update the metrics for SimplePipelines.
-    lock.writeLock().lock();
-    try {
-      return pipelineFactory.create(type, factor, nodes);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline(PipelineID pipelineID)
-      throws PipelineNotFoundException {
-    lock.readLock().lock();
-    try {
-      return stateManager.getPipeline(pipelineID);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<Pipeline> getPipelines() {
-    lock.readLock().lock();
-    try {
-      return stateManager.getPipelines();
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<Pipeline> getPipelines(ReplicationType type) {
-    lock.readLock().lock();
-    try {
-      return stateManager.getPipelines(type);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<Pipeline> getPipelines(ReplicationType type,
-      ReplicationFactor factor) {
-    lock.readLock().lock();
-    try {
-      return stateManager.getPipelines(type, factor);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<Pipeline> getPipelines(ReplicationType type,
-      ReplicationFactor factor, Pipeline.PipelineState state) {
-    lock.readLock().lock();
-    try {
-      return stateManager.getPipelines(type, factor, state);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<Pipeline> getPipelines(ReplicationType type,
-      ReplicationFactor factor, Pipeline.PipelineState state,
-      Collection<DatanodeDetails> excludeDns,
-      Collection<PipelineID> excludePipelines) {
-    lock.readLock().lock();
-    try {
-      return stateManager
-          .getPipelines(type, factor, state, excludeDns, excludePipelines);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public void addContainerToPipeline(PipelineID pipelineID,
-      ContainerID containerID) throws IOException {
-    lock.writeLock().lock();
-    try {
-      stateManager.addContainerToPipeline(pipelineID, containerID);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void removeContainerFromPipeline(PipelineID pipelineID,
-      ContainerID containerID) throws IOException {
-    lock.writeLock().lock();
-    try {
-      stateManager.removeContainerFromPipeline(pipelineID, containerID);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public NavigableSet<ContainerID> getContainersInPipeline(
-      PipelineID pipelineID) throws IOException {
-    lock.readLock().lock();
-    try {
-      return stateManager.getContainers(pipelineID);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
-    return stateManager.getNumberOfContainers(pipelineID);
-  }
-
-  @Override
-  public void openPipeline(PipelineID pipelineId) throws IOException {
-    lock.writeLock().lock();
-    try {
-      Pipeline pipeline = stateManager.openPipeline(pipelineId);
-      metrics.createPerPipelineMetrics(pipeline);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to
-   * destroy pipeline on the datanodes immediately or after timeout based on the
-   * value of onTimeout parameter.
-   *
-   * @param pipeline        - Pipeline to be destroyed
-   * @param onTimeout       - if true pipeline is removed and destroyed on
-   *                        datanodes after timeout
-   * @throws IOException
-   */
-  @Override
-  public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout)
-      throws IOException {
-    LOG.info("destroying pipeline:{}", pipeline);
-    finalizePipeline(pipeline.getId());
-    if (onTimeout) {
-      long pipelineDestroyTimeoutInMillis =
-          conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
-              ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT,
-              TimeUnit.MILLISECONDS);
-      scheduler.schedule(() -> destroyPipeline(pipeline),
-          pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG,
-          String.format("Destroy pipeline failed for pipeline:%s", pipeline));
-    } else {
-      destroyPipeline(pipeline);
-    }
-  }
-
-  @Override
-  public Map<String, Integer> getPipelineInfo() {
-    final Map<String, Integer> pipelineInfo = new HashMap<>();
-    for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) {
-      pipelineInfo.put(state.toString(), 0);
-    }
-    stateManager.getPipelines().forEach(pipeline ->
-        pipelineInfo.computeIfPresent(
-            pipeline.getPipelineState().toString(), (k, v) -> v + 1));
-    return pipelineInfo;
-  }
-
-  /**
-   * Schedules a fixed interval job to create pipelines.
-   */
-  @Override
-  public void startPipelineCreator() {
-    backgroundPipelineCreator.startFixedIntervalPipelineCreator();
-  }
-
-  /**
-   * Triggers pipeline creation after the specified time.
-   */
-  @Override
-  public void triggerPipelineCreation() {
-    backgroundPipelineCreator.triggerPipelineCreation();
-  }
-
-  /**
-   * Activates a dormant pipeline.
-   *
-   * @param pipelineID ID of the pipeline to activate.
-   * @throws IOException in case of any Exception
-   */
-  @Override
-  public void activatePipeline(PipelineID pipelineID)
-      throws IOException {
-    stateManager.activatePipeline(pipelineID);
-  }
-
-  /**
-   * Deactivates an active pipeline.
-   *
-   * @param pipelineID ID of the pipeline to deactivate.
-   * @throws IOException in case of any Exception
-   */
-  @Override
-  public void deactivatePipeline(PipelineID pipelineID)
-      throws IOException {
-    stateManager.deactivatePipeline(pipelineID);
-  }
-
-  /**
-   * Moves the pipeline to CLOSED state and sends close container command for
-   * all the containers in the pipeline.
-   *
-   * @param pipelineId - ID of the pipeline to be moved to CLOSED state.
-   * @throws IOException
-   */
-  private void finalizePipeline(PipelineID pipelineId) throws IOException {
-    lock.writeLock().lock();
-    try {
-      stateManager.finalizePipeline(pipelineId);
-      Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
-      for (ContainerID containerID : containerIDs) {
-        eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
-      }
-      metrics.removePipelineMetrics(pipelineId);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all
-   * the datanodes for ratis pipelines.
-   *
-   * @param pipeline        - Pipeline to be destroyed
-   * @throws IOException
-   */
-  private void destroyPipeline(Pipeline pipeline) throws IOException {
-    RatisPipelineUtils.destroyPipeline(pipeline, conf, grpcTlsConfig);
-    // remove the pipeline from the pipeline manager
-    removePipeline(pipeline.getId());
-    triggerPipelineCreation();
-  }
-
-  /**
-   * Removes the pipeline from the db and pipeline state map.
-   *
-   * @param pipelineId - ID of the pipeline to be removed
-   * @throws IOException
-   */
-  private void removePipeline(PipelineID pipelineId) throws IOException {
-    lock.writeLock().lock();
-    try {
-      pipelineStore.delete(pipelineId.getProtobuf().toByteArray());
-      Pipeline pipeline = stateManager.removePipeline(pipelineId);
-      nodeManager.removePipeline(pipeline);
-      metrics.incNumPipelineDestroyed();
-    } catch (IOException ex) {
-      metrics.incNumPipelineDestroyFailed();
-      throw ex;
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void incNumBlocksAllocatedMetric(PipelineID id) {
-    metrics.incNumBlocksAllocated(id);
-  }
-
-  @Override
-  public GrpcTlsConfig getGrpcTlsConfig() {
-    return grpcTlsConfig;
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (scheduler != null) {
-      scheduler.close();
-      scheduler = null;
-    }
-
-    if (pipelineStore != null) {
-      pipelineStore.close();
-      pipelineStore = null;
-    }
-    if(pmInfoBean != null) {
-      MBeans.unregister(this.pmInfoBean);
-      pmInfoBean = null;
-    }
-    if(metrics != null) {
-      metrics.unRegister();
-    }
-    // shutdown pipeline provider.
-    pipelineFactory.shutdown();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
deleted file mode 100644
index d0f7f6e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * This class maintains Pipeline related metrics.
- */
-@InterfaceAudience.Private
-@Metrics(about = "SCM PipelineManager Metrics", context = "ozone")
-public final class SCMPipelineMetrics implements MetricsSource {
-
-  private static final String SOURCE_NAME =
-      SCMPipelineMetrics.class.getSimpleName();
-
-  private MetricsRegistry registry;
-
-  private @Metric MutableCounterLong numPipelineCreated;
-  private @Metric MutableCounterLong numPipelineCreationFailed;
-  private @Metric MutableCounterLong numPipelineDestroyed;
-  private @Metric MutableCounterLong numPipelineDestroyFailed;
-  private @Metric MutableCounterLong numPipelineReportProcessed;
-  private @Metric MutableCounterLong numPipelineReportProcessingFailed;
-  private Map<PipelineID, MutableCounterLong> numBlocksAllocated;
-
-  /** Private constructor. */
-  private SCMPipelineMetrics() {
-    this.registry = new MetricsRegistry(SOURCE_NAME);
-    numBlocksAllocated = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * Create and returns SCMPipelineMetrics instance.
-   *
-   * @return SCMPipelineMetrics
-   */
-  public static SCMPipelineMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "SCM PipelineManager Metrics",
-        new SCMPipelineMetrics());
-  }
-
-  /**
-   * Unregister the metrics instance.
-   */
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-
-  @Override
-  @SuppressWarnings("SuspiciousMethodCalls")
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    MetricsRecordBuilder recordBuilder = collector.addRecord(SOURCE_NAME);
-    numPipelineCreated.snapshot(recordBuilder, true);
-    numPipelineCreationFailed.snapshot(recordBuilder, true);
-    numPipelineDestroyed.snapshot(recordBuilder, true);
-    numPipelineDestroyFailed.snapshot(recordBuilder, true);
-    numPipelineReportProcessed.snapshot(recordBuilder, true);
-    numPipelineReportProcessingFailed.snapshot(recordBuilder, true);
-    numBlocksAllocated
-        .forEach((pid, metric) -> metric.snapshot(recordBuilder, true));
-  }
-
-  void createPerPipelineMetrics(Pipeline pipeline) {
-    numBlocksAllocated.put(pipeline.getId(), new MutableCounterLong(Interns
-        .info(getBlockAllocationMetricName(pipeline),
-            "Number of blocks allocated in pipeline " + pipeline.getId()), 0L));
-  }
-
-  public static String getBlockAllocationMetricName(Pipeline pipeline) {
-    return "NumBlocksAllocated-" + pipeline.getType() + "-" + pipeline
-        .getFactor() + "-" + pipeline.getId().getId();
-  }
-
-  void removePipelineMetrics(PipelineID pipelineID) {
-    numBlocksAllocated.remove(pipelineID);
-  }
-
-  /**
-   * Increments number of blocks allocated for the pipeline.
-   */
-  void incNumBlocksAllocated(PipelineID pipelineID) {
-    Optional.of(numBlocksAllocated.get(pipelineID)).ifPresent(
-        MutableCounterLong::incr);
-  }
-
-  /**
-   * Increments number of successful pipeline creation count.
-   */
-  void incNumPipelineCreated() {
-    numPipelineCreated.incr();
-  }
-
-  /**
-   * Increments number of failed pipeline creation count.
-   */
-  void incNumPipelineCreationFailed() {
-    numPipelineCreationFailed.incr();
-  }
-
-  /**
-   * Increments number of successful pipeline destroy count.
-   */
-  void incNumPipelineDestroyed() {
-    numPipelineDestroyed.incr();
-  }
-
-  /**
-   * Increments number of failed pipeline destroy count.
-   */
-  void incNumPipelineDestroyFailed() {
-    numPipelineDestroyFailed.incr();
-  }
-
-  /**
-   * Increments number of pipeline report processed count.
-   */
-  void incNumPipelineReportProcessed() {
-    numPipelineReportProcessed.incr();
-  }
-
-  /**
-   * Increments number of pipeline report processing failed count.
-   */
-  void incNumPipelineReportProcessingFailed() {
-    numPipelineReportProcessingFailed.incr();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
deleted file mode 100644
index ab98dfa..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Implements Api for creating stand alone pipelines.
- */
-public class SimplePipelineProvider implements PipelineProvider {
-
-  private final NodeManager nodeManager;
-
-  public SimplePipelineProvider(NodeManager nodeManager) {
-    this.nodeManager = nodeManager;
-  }
-
-  @Override
-  public Pipeline create(ReplicationFactor factor) throws IOException {
-    List<DatanodeDetails> dns =
-        nodeManager.getNodes(NodeState.HEALTHY);
-    if (dns.size() < factor.getNumber()) {
-      String e = String
-          .format("Cannot create pipeline of factor %d using %d nodes.",
-              factor.getNumber(), dns.size());
-      throw new IOException(e);
-    }
-
-    Collections.shuffle(dns);
-    return Pipeline.newBuilder()
-        .setId(PipelineID.randomId())
-        .setState(PipelineState.OPEN)
-        .setType(ReplicationType.STAND_ALONE)
-        .setFactor(factor)
-        .setNodes(dns.subList(0, factor.getNumber()))
-        .build();
-  }
-
-  @Override
-  public Pipeline create(ReplicationFactor factor,
-      List<DatanodeDetails> nodes) {
-    return Pipeline.newBuilder()
-        .setId(PipelineID.randomId())
-        .setState(PipelineState.OPEN)
-        .setType(ReplicationType.STAND_ALONE)
-        .setFactor(factor)
-        .setNodes(nodes)
-        .build();
-  }
-
-  @Override
-  public void shutdown() {
-    // Do nothing.
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
deleted file mode 100644
index 51adc88..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-/**
- Ozone supports the notion of different kind of pipelines.
- That means that we can have a replication pipeline build on
- Ratis, Simple or some other protocol. All Pipeline managers
- the entities in charge of pipelines reside in the package.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 2d14fa6..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto.ResponseCode;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Status;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link SCMSecurityProtocolPB} to the {@link
- * SCMSecurityProtocol} server implementation.
- */
-public class SCMSecurityProtocolServerSideTranslatorPB
-    implements SCMSecurityProtocolPB {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMSecurityProtocolServerSideTranslatorPB.class);
-
-  private final SCMSecurityProtocol impl;
-
-  private OzoneProtocolMessageDispatcher<SCMSecurityRequest,
-      SCMSecurityResponse>
-      dispatcher;
-
-  public SCMSecurityProtocolServerSideTranslatorPB(SCMSecurityProtocol impl,
-      ProtocolMessageMetrics messageMetrics) {
-    this.impl = impl;
-    this.dispatcher =
-        new OzoneProtocolMessageDispatcher<>("ScmSecurityProtocol",
-            messageMetrics, LOG);
-  }
-
-  @Override
-  public SCMSecurityResponse submitRequest(RpcController controller,
-      SCMSecurityRequest request) throws ServiceException {
-    return dispatcher.processRequest(request, this::processRequest,
-        request.getCmdType(), request.getTraceID());
-  }
-
-  public SCMSecurityResponse processRequest(SCMSecurityRequest request)
-      throws ServiceException {
-    try {
-      switch (request.getCmdType()) {
-      case GetCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getCertificate(request.getGetCertificateRequest()))
-            .build();
-      case GetCACertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getCACertificate(request.getGetCACertificateRequest()))
-            .build();
-      case GetOMCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getOMCertificate(request.getGetOMCertRequest()))
-            .build();
-      case GetDataNodeCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getDataNodeCertificate(request.getGetDataNodeCertRequest()))
-            .build();
-      default:
-        throw new IllegalArgumentException(
-            "Unknown request type: " + request.getCmdType());
-      }
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   *
-   * @param request
-   * @return SCMGetDataNodeCertResponseProto.
-   */
-
-  public SCMGetCertResponseProto getDataNodeCertificate(
-      SCMGetDataNodeCertRequestProto request)
-      throws IOException {
-
-    String certificate = impl
-        .getDataNodeCertificate(request.getDatanodeDetails(),
-            request.getCSR());
-    SCMGetCertResponseProto.Builder builder =
-        SCMGetCertResponseProto
-            .newBuilder()
-            .setResponseCode(ResponseCode.success)
-            .setX509Certificate(certificate)
-            .setX509CACertificate(impl.getCACertificate());
-
-    return builder.build();
-
-  }
-
-  /**
-   * Get SCM signed certificate for OzoneManager.
-   *
-   * @param request
-   * @return SCMGetCertResponseProto.
-   */
-  public SCMGetCertResponseProto getOMCertificate(
-      SCMGetOMCertRequestProto request) throws IOException {
-    String certificate = impl
-        .getOMCertificate(request.getOmDetails(),
-            request.getCSR());
-    SCMGetCertResponseProto.Builder builder =
-        SCMGetCertResponseProto
-            .newBuilder()
-            .setResponseCode(ResponseCode.success)
-            .setX509Certificate(certificate)
-            .setX509CACertificate(impl.getCACertificate());
-    return builder.build();
-
-  }
-
-  public SCMGetCertResponseProto getCertificate(
-      SCMGetCertificateRequestProto request) throws IOException {
-
-    String certificate = impl.getCertificate(request.getCertSerialId());
-    SCMGetCertResponseProto.Builder builder =
-        SCMGetCertResponseProto
-            .newBuilder()
-            .setResponseCode(ResponseCode.success)
-            .setX509Certificate(certificate);
-    return builder.build();
-
-  }
-
-  public SCMGetCertResponseProto getCACertificate(
-      SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto request)
-      throws IOException {
-
-    String certificate = impl.getCACertificate();
-    SCMGetCertResponseProto.Builder builder =
-        SCMGetCertResponseProto
-            .newBuilder()
-            .setResponseCode(ResponseCode.success)
-            .setX509Certificate(certificate);
-    return builder.build();
-
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
deleted file mode 100644
index b6ce067..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerLocationProtocolPB} to the
- * {@link StorageContainerLocationProtocol} server implementation.
- */
-@InterfaceAudience.Private
-public final class ScmBlockLocationProtocolServerSideTranslatorPB
-    implements ScmBlockLocationProtocolPB {
-
-  private final ScmBlockLocationProtocol impl;
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(ScmBlockLocationProtocolServerSideTranslatorPB.class);
-
-  private final OzoneProtocolMessageDispatcher<SCMBlockLocationRequest,
-      SCMBlockLocationResponse>
-      dispatcher;
-
-  /**
-   * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB.
-   *
-   * @param impl {@link ScmBlockLocationProtocol} server implementation
-   */
-  public ScmBlockLocationProtocolServerSideTranslatorPB(
-      ScmBlockLocationProtocol impl,
-      ProtocolMessageMetrics metrics) throws IOException {
-    this.impl = impl;
-    dispatcher = new OzoneProtocolMessageDispatcher<>(
-        "BlockLocationProtocol", metrics, LOG);
-
-  }
-
-  private SCMBlockLocationResponse.Builder createSCMBlockResponse(
-      ScmBlockLocationProtocolProtos.Type cmdType,
-      String traceID) {
-    return SCMBlockLocationResponse.newBuilder()
-        .setCmdType(cmdType)
-        .setTraceID(traceID);
-  }
-
-  @Override
-  public SCMBlockLocationResponse send(RpcController controller,
-      SCMBlockLocationRequest request) throws ServiceException {
-    return dispatcher.processRequest(
-        request,
-        this::processMessage,
-        request.getCmdType(),
-        request.getTraceID());
-  }
-
-  private SCMBlockLocationResponse processMessage(
-      SCMBlockLocationRequest request) throws ServiceException {
-    SCMBlockLocationResponse.Builder response = createSCMBlockResponse(
-        request.getCmdType(),
-        request.getTraceID());
-    response.setSuccess(true);
-    response.setStatus(Status.OK);
-
-    try {
-      switch (request.getCmdType()) {
-      case AllocateScmBlock:
-        response.setAllocateScmBlockResponse(
-            allocateScmBlock(request.getAllocateScmBlockRequest()));
-        break;
-      case DeleteScmKeyBlocks:
-        response.setDeleteScmKeyBlocksResponse(
-            deleteScmKeyBlocks(request.getDeleteScmKeyBlocksRequest()));
-        break;
-      case GetScmInfo:
-        response.setGetScmInfoResponse(
-            getScmInfo(request.getGetScmInfoRequest()));
-        break;
-      case SortDatanodes:
-        response.setSortDatanodesResponse(
-            sortDatanodes(request.getSortDatanodesRequest()));
-        break;
-      default:
-        // Should never happen
-        throw new IOException("Unknown Operation " + request.getCmdType() +
-            " in ScmBlockLocationProtocol");
-      }
-    } catch (IOException e) {
-      response.setSuccess(false);
-      response.setStatus(exceptionToResponseStatus(e));
-      if (e.getMessage() != null) {
-        response.setMessage(e.getMessage());
-      }
-    }
-
-    return response.build();
-  }
-
-  private Status exceptionToResponseStatus(IOException ex) {
-    if (ex instanceof SCMException) {
-      return Status.values()[((SCMException) ex).getResult().ordinal()];
-    } else {
-      return Status.INTERNAL_ERROR;
-    }
-  }
-
-  public AllocateScmBlockResponseProto allocateScmBlock(
-      AllocateScmBlockRequestProto request)
-      throws IOException {
-    List<AllocatedBlock> allocatedBlocks =
-        impl.allocateBlock(request.getSize(),
-            request.getNumBlocks(), request.getType(),
-            request.getFactor(), request.getOwner(),
-            ExcludeList.getFromProtoBuf(request.getExcludeList()));
-
-    AllocateScmBlockResponseProto.Builder builder =
-        AllocateScmBlockResponseProto.newBuilder();
-
-    if (allocatedBlocks.size() < request.getNumBlocks()) {
-      throw new SCMException("Allocated " + allocatedBlocks.size() +
-          " blocks. Requested " + request.getNumBlocks() + " blocks",
-          SCMException.ResultCodes.FAILED_TO_ALLOCATE_ENOUGH_BLOCKS);
-    }
-    for (AllocatedBlock block : allocatedBlocks) {
-      builder.addBlocks(AllocateBlockResponse.newBuilder()
-          .setContainerBlockID(block.getBlockID().getProtobuf())
-          .setPipeline(block.getPipeline().getProtobufMessage()));
-    }
-
-    return builder.build();
-  }
-
-  public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks(
-      DeleteScmKeyBlocksRequestProto req)
-      throws IOException {
-    DeleteScmKeyBlocksResponseProto.Builder resp =
-        DeleteScmKeyBlocksResponseProto.newBuilder();
-
-    List<BlockGroup> infoList = req.getKeyBlocksList().stream()
-        .map(BlockGroup::getFromProto).collect(Collectors.toList());
-    final List<DeleteBlockGroupResult> results =
-        impl.deleteKeyBlocks(infoList);
-    for (DeleteBlockGroupResult result : results) {
-      DeleteKeyBlocksResultProto.Builder deleteResult =
-          DeleteKeyBlocksResultProto
-              .newBuilder()
-              .setObjectKey(result.getObjectKey())
-              .addAllBlockResults(result.getBlockResultProtoList());
-      resp.addResults(deleteResult.build());
-    }
-    return resp.build();
-  }
-
-  public HddsProtos.GetScmInfoResponseProto getScmInfo(
-      HddsProtos.GetScmInfoRequestProto req)
-      throws IOException {
-    ScmInfo scmInfo = impl.getScmInfo();
-    return HddsProtos.GetScmInfoResponseProto.newBuilder()
-        .setClusterId(scmInfo.getClusterId())
-        .setScmId(scmInfo.getScmId())
-        .build();
-  }
-
-  public SortDatanodesResponseProto sortDatanodes(
-      SortDatanodesRequestProto request) throws ServiceException {
-    SortDatanodesResponseProto.Builder resp =
-        SortDatanodesResponseProto.newBuilder();
-    try {
-      List<String> nodeList = request.getNodeNetworkNameList();
-      final List<DatanodeDetails> results =
-          impl.sortDatanodes(nodeList, request.getClient());
-      if (results != null && results.size() > 0) {
-        results.stream().forEach(dn -> resp.addNode(dn.getProtoBufMessage()));
-      }
-      return resp.build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 0d2f470..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerLocationProtocolPB} to the
- * {@link StorageContainerLocationProtocol} server implementation.
- */
-@InterfaceAudience.Private
-public final class StorageContainerLocationProtocolServerSideTranslatorPB
-    implements StorageContainerLocationProtocolPB {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(
-          StorageContainerLocationProtocolServerSideTranslatorPB.class);
-
-  private final StorageContainerLocationProtocol impl;
-
-  private OzoneProtocolMessageDispatcher<ScmContainerLocationRequest,
-      ScmContainerLocationResponse>
-      dispatcher;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB.
-   *
-   * @param impl            {@link StorageContainerLocationProtocol} server
-   *                        implementation
-   * @param protocolMetrics
-   */
-  public StorageContainerLocationProtocolServerSideTranslatorPB(
-      StorageContainerLocationProtocol impl,
-      ProtocolMessageMetrics protocolMetrics) throws IOException {
-    this.impl = impl;
-    this.dispatcher =
-        new OzoneProtocolMessageDispatcher<>("ScmContainerLocation",
-            protocolMetrics, LOG);
-  }
-
-  @Override
-  public ScmContainerLocationResponse submitRequest(RpcController controller,
-      ScmContainerLocationRequest request) throws ServiceException {
-    return dispatcher
-        .processRequest(request, this::processRequest, request.getCmdType(),
-            request.getTraceID());
-  }
-
-  public ScmContainerLocationResponse processRequest(
-      ScmContainerLocationRequest request) throws ServiceException {
-    try {
-      switch (request.getCmdType()) {
-      case AllocateContainer:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setContainerResponse(
-                allocateContainer(request.getContainerRequest()))
-            .build();
-      case GetContainer:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetContainerResponse(
-                getContainer(request.getGetContainerRequest()))
-            .build();
-      case GetContainerWithPipeline:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetContainerWithPipelineResponse(getContainerWithPipeline(
-                request.getGetContainerWithPipelineRequest()))
-            .build();
-      case ListContainer:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setScmListContainerResponse(listContainer(
-                request.getScmListContainerRequest()))
-            .build();
-      case QueryNode:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setNodeQueryResponse(queryNode(request.getNodeQueryRequest()))
-            .build();
-      case NotifyObjectStageChange:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setObjectStageChangeResponse(notifyObjectStageChange(
-                request.getObjectStageChangeRequest()))
-            .build();
-      case ListPipelines:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setListPipelineResponse(listPipelines(
-                request.getListPipelineRequest()))
-            .build();
-      case ActivatePipeline:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setActivatePipelineResponse(activatePipeline(
-                request.getActivatePipelineRequest()))
-            .build();
-      case GetScmInfo:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetScmInfoResponse(getScmInfo(
-                request.getGetScmInfoRequest()))
-            .build();
-      case InSafeMode:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setInSafeModeResponse(inSafeMode(
-                request.getInSafeModeRequest()))
-            .build();
-      case ForceExitSafeMode:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setForceExitSafeModeResponse(forceExitSafeMode(
-                request.getForceExitSafeModeRequest()))
-            .build();
-      case StartReplicationManager:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setStartReplicationManagerResponse(startReplicationManager(
-                request.getStartReplicationManagerRequest()))
-            .build();
-      case StopReplicationManager:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setStopReplicationManagerResponse(stopReplicationManager(
-                request.getStopReplicationManagerRequest()))
-            .build();
-      case GetReplicationManagerStatus:
-        return ScmContainerLocationResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setReplicationManagerStatusResponse(getReplicationManagerStatus(
-                request.getSeplicationManagerStatusRequest()))
-            .build();
-      default:
-        throw new IllegalArgumentException(
-            "Unknown command type: " + request.getCmdType());
-      }
-
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  public ContainerResponseProto allocateContainer(ContainerRequestProto request)
-      throws IOException {
-    ContainerWithPipeline containerWithPipeline = impl
-        .allocateContainer(request.getReplicationType(),
-            request.getReplicationFactor(), request.getOwner());
-    return ContainerResponseProto.newBuilder()
-        .setContainerWithPipeline(containerWithPipeline.getProtobuf())
-        .setErrorCode(ContainerResponseProto.Error.success)
-        .build();
-
-  }
-
-  public GetContainerResponseProto getContainer(
-      GetContainerRequestProto request) throws IOException {
-    ContainerInfo container = impl.getContainer(request.getContainerID());
-    return GetContainerResponseProto.newBuilder()
-        .setContainerInfo(container.getProtobuf())
-        .build();
-  }
-
-  public GetContainerWithPipelineResponseProto getContainerWithPipeline(
-      GetContainerWithPipelineRequestProto request)
-      throws IOException {
-    ContainerWithPipeline container = impl
-        .getContainerWithPipeline(request.getContainerID());
-    return GetContainerWithPipelineResponseProto.newBuilder()
-        .setContainerWithPipeline(container.getProtobuf())
-        .build();
-  }
-
-  public SCMListContainerResponseProto listContainer(
-      SCMListContainerRequestProto request) throws IOException {
-
-    long startContainerID = 0;
-    int count = -1;
-
-    // Arguments check.
-    if (request.hasStartContainerID()) {
-      // End container name is given.
-      startContainerID = request.getStartContainerID();
-    }
-    count = request.getCount();
-    List<ContainerInfo> containerList =
-        impl.listContainer(startContainerID, count);
-    SCMListContainerResponseProto.Builder builder =
-        SCMListContainerResponseProto.newBuilder();
-    for (ContainerInfo container : containerList) {
-      builder.addContainers(container.getProtobuf());
-    }
-    return builder.build();
-  }
-
-  public SCMDeleteContainerResponseProto deleteContainer(
-      SCMDeleteContainerRequestProto request)
-      throws IOException {
-    impl.deleteContainer(request.getContainerID());
-    return SCMDeleteContainerResponseProto.newBuilder().build();
-
-  }
-
-  public NodeQueryResponseProto queryNode(
-      StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
-      throws IOException {
-
-    HddsProtos.NodeState nodeState = request.getState();
-    List<HddsProtos.Node> datanodes = impl.queryNode(nodeState,
-        request.getScope(), request.getPoolName());
-    return NodeQueryResponseProto.newBuilder()
-        .addAllDatanodes(datanodes)
-        .build();
-
-  }
-
-  public ObjectStageChangeResponseProto notifyObjectStageChange(
-      ObjectStageChangeRequestProto request)
-      throws IOException {
-    impl.notifyObjectStageChange(request.getType(), request.getId(),
-        request.getOp(), request.getStage());
-    return ObjectStageChangeResponseProto.newBuilder().build();
-  }
-
-  public ListPipelineResponseProto listPipelines(
-      ListPipelineRequestProto request)
-      throws IOException {
-    ListPipelineResponseProto.Builder builder = ListPipelineResponseProto
-        .newBuilder();
-    List<Pipeline> pipelines = impl.listPipelines();
-    for (Pipeline pipeline : pipelines) {
-      HddsProtos.Pipeline protobufMessage = pipeline.getProtobufMessage();
-      builder.addPipelines(protobufMessage);
-    }
-    return builder.build();
-  }
-
-  public ActivatePipelineResponseProto activatePipeline(
-      ActivatePipelineRequestProto request)
-      throws IOException {
-    impl.activatePipeline(request.getPipelineID());
-    return ActivatePipelineResponseProto.newBuilder().build();
-  }
-
-  public DeactivatePipelineResponseProto deactivatePipeline(
-      DeactivatePipelineRequestProto request)
-      throws IOException {
-    impl.deactivatePipeline(request.getPipelineID());
-    return DeactivatePipelineResponseProto.newBuilder().build();
-  }
-
-  public ClosePipelineResponseProto closePipeline(
-      RpcController controller, ClosePipelineRequestProto request)
-      throws IOException {
-
-    impl.closePipeline(request.getPipelineID());
-    return ClosePipelineResponseProto.newBuilder().build();
-
-  }
-
-  public HddsProtos.GetScmInfoResponseProto getScmInfo(
-      HddsProtos.GetScmInfoRequestProto req)
-      throws IOException {
-    ScmInfo scmInfo = impl.getScmInfo();
-    return HddsProtos.GetScmInfoResponseProto.newBuilder()
-        .setClusterId(scmInfo.getClusterId())
-        .setScmId(scmInfo.getScmId())
-        .build();
-
-  }
-
-  public InSafeModeResponseProto inSafeMode(
-      InSafeModeRequestProto request) throws IOException {
-
-    return InSafeModeResponseProto.newBuilder()
-        .setInSafeMode(impl.inSafeMode()).build();
-
-  }
-
-  public ForceExitSafeModeResponseProto forceExitSafeMode(
-      ForceExitSafeModeRequestProto request)
-      throws IOException {
-    return ForceExitSafeModeResponseProto.newBuilder()
-        .setExitedSafeMode(impl.forceExitSafeMode()).build();
-
-  }
-
-  public StartReplicationManagerResponseProto startReplicationManager(
-      StartReplicationManagerRequestProto request)
-      throws IOException {
-    impl.startReplicationManager();
-    return StartReplicationManagerResponseProto.newBuilder().build();
-  }
-
-  public StopReplicationManagerResponseProto stopReplicationManager(
-      StopReplicationManagerRequestProto request)
-      throws IOException {
-    impl.stopReplicationManager();
-    return StopReplicationManagerResponseProto.newBuilder().build();
-
-  }
-
-  public ReplicationManagerStatusResponseProto getReplicationManagerStatus(
-      ReplicationManagerStatusRequestProto request)
-      throws IOException {
-    return ReplicationManagerStatusResponseProto.newBuilder()
-        .setIsRunning(impl.getReplicationManagerStatus()).build();
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
deleted file mode 100644
index 411f22e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-/**
- * RPC/protobuf specific translator classes for SCM protocol.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
deleted file mode 100644
index 4944017..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.ratis;
-
-/**
- * This package contains classes related to Apache Ratis for SCM.
- */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
deleted file mode 100644
index 8eadeb3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
-    .NodeRegistrationContainerReport;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-
-/**
- * Class defining Safe mode exit criteria for Containers.
- */
-public class ContainerSafeModeRule extends
-    SafeModeExitRule<NodeRegistrationContainerReport>{
-
-  // Required cutoff % for containers with at least 1 reported replica.
-  private double safeModeCutoff;
-  // Containers read from scm db (excluding containers in ALLOCATED state).
-  private Map<Long, ContainerInfo> containerMap;
-  private double maxContainer;
-
-  private AtomicLong containerWithMinReplicas = new AtomicLong(0);
-
-  public ContainerSafeModeRule(String ruleName, EventQueue eventQueue,
-      Configuration conf,
-      List<ContainerInfo> containers, SCMSafeModeManager manager) {
-    super(manager, ruleName, eventQueue);
-    safeModeCutoff = conf.getDouble(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT);
-
-    Preconditions.checkArgument(
-        (safeModeCutoff >= 0.0 && safeModeCutoff <= 1.0),
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT  +
-            " value should be >= 0.0 and <= 1.0");
-
-    containerMap = new ConcurrentHashMap<>();
-    containers.forEach(container -> {
-      // There can be containers in OPEN/CLOSING state which were never
-      // created by the client. We are not considering these containers for
-      // now. These containers can be handled by tracking pipelines.
-
-      Optional.ofNullable(container.getState())
-          .filter(state -> state != HddsProtos.LifeCycleState.OPEN)
-          .filter(state -> state != HddsProtos.LifeCycleState.CLOSING)
-          .ifPresent(s -> containerMap.put(container.getContainerID(),
-              container));
-    });
-    maxContainer = containerMap.size();
-    long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff);
-    getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff);
-  }
-
-
-  @Override
-  protected TypedEvent<NodeRegistrationContainerReport> getEventType() {
-    return SCMEvents.NODE_REGISTRATION_CONT_REPORT;
-  }
-
-
-  @Override
-  protected boolean validate() {
-    return getCurrentContainerThreshold() >= safeModeCutoff;
-  }
-
-  @VisibleForTesting
-  public double getCurrentContainerThreshold() {
-    if (maxContainer == 0) {
-      return 1;
-    }
-    return (containerWithMinReplicas.doubleValue() / maxContainer);
-  }
-
-  @Override
-  protected void process(NodeRegistrationContainerReport reportsProto) {
-
-    reportsProto.getReport().getReportsList().forEach(c -> {
-      if (containerMap.containsKey(c.getContainerID())) {
-        if(containerMap.remove(c.getContainerID()) != null) {
-          containerWithMinReplicas.getAndAdd(1);
-          getSafeModeMetrics()
-              .incCurrentContainersWithOneReplicaReportedCount();
-        }
-      }
-    });
-
-    if (scmInSafeMode()) {
-      SCMSafeModeManager.getLogger().info(
-          "SCM in safe mode. {} % containers have at least one"
-              + " reported replica.",
-          (containerWithMinReplicas.doubleValue() / maxContainer) * 100);
-    }
-  }
-
-  @Override
-  protected void cleanup() {
-    containerMap.clear();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
deleted file mode 100644
index 1029d71..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import java.util.HashSet;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport;
-
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-
-/**
- * Class defining Safe mode exit criteria according to number of DataNodes
- * registered with SCM.
- */
-public class DataNodeSafeModeRule extends
-    SafeModeExitRule<NodeRegistrationContainerReport>{
-
-  // Min DataNodes required to exit safe mode.
-  private int requiredDns;
-  private int registeredDns = 0;
-  // Set to track registered DataNodes.
-  private HashSet<UUID> registeredDnSet;
-
-  public DataNodeSafeModeRule(String ruleName, EventQueue eventQueue,
-      Configuration conf,
-      SCMSafeModeManager manager) {
-    super(manager, ruleName, eventQueue);
-    requiredDns = conf.getInt(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT);
-    registeredDnSet = new HashSet<>(requiredDns * 2);
-  }
-
-  @Override
-  protected TypedEvent<NodeRegistrationContainerReport> getEventType() {
-    return SCMEvents.NODE_REGISTRATION_CONT_REPORT;
-  }
-
-  @Override
-  protected boolean validate() {
-    return registeredDns >= requiredDns;
-  }
-
-  @Override
-  protected void process(NodeRegistrationContainerReport reportsProto) {
-
-    registeredDnSet.add(reportsProto.getDatanodeDetails().getUuid());
-    registeredDns = registeredDnSet.size();
-
-    if (scmInSafeMode()) {
-      SCMSafeModeManager.getLogger().info(
-          "SCM in safe mode. {} DataNodes registered, {} required.",
-          registeredDns, requiredDns);
-    }
-
-  }
-
-  @Override
-  protected void cleanup() {
-    registeredDnSet.clear();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
deleted file mode 100644
index 7a00d76..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Class defining Safe mode exit criteria for Pipelines.
- *
- * This rule defines percentage of healthy pipelines need to be reported.
- * Once safe mode exit happens, this rules take care of writes can go
- * through in a cluster.
- */
-public class HealthyPipelineSafeModeRule
-    extends SafeModeExitRule<PipelineReportFromDatanode>{
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(HealthyPipelineSafeModeRule.class);
-  private final PipelineManager pipelineManager;
-  private final int healthyPipelineThresholdCount;
-  private int currentHealthyPipelineCount = 0;
-  private final Set<DatanodeDetails> processedDatanodeDetails =
-      new HashSet<>();
-
-  HealthyPipelineSafeModeRule(String ruleName, EventQueue eventQueue,
-      PipelineManager pipelineManager,
-      SCMSafeModeManager manager, Configuration configuration) {
-    super(manager, ruleName, eventQueue);
-    this.pipelineManager = pipelineManager;
-    double healthyPipelinesPercent =
-        configuration.getDouble(HddsConfigKeys.
-                HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT,
-            HddsConfigKeys.
-                HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT);
-
-    Preconditions.checkArgument(
-        (healthyPipelinesPercent >= 0.0 && healthyPipelinesPercent <= 1.0),
-        HddsConfigKeys.
-            HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT
-            + " value should be >= 0.0 and <= 1.0");
-
-    // As we want to wait for 3 node pipelines
-    int pipelineCount =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE).size();
-
-    // This value will be zero when pipeline count is 0.
-    // On a fresh installed cluster, there will be zero pipelines in the SCM
-    // pipeline DB.
-    healthyPipelineThresholdCount =
-        (int) Math.ceil(healthyPipelinesPercent * pipelineCount);
-
-    LOG.info(" Total pipeline count is {}, healthy pipeline " +
-        "threshold count is {}", pipelineCount, healthyPipelineThresholdCount);
-
-    getSafeModeMetrics().setNumHealthyPipelinesThreshold(
-        healthyPipelineThresholdCount);
-  }
-
-  @Override
-  protected TypedEvent<PipelineReportFromDatanode> getEventType() {
-    return SCMEvents.PROCESSED_PIPELINE_REPORT;
-  }
-
-  @Override
-  protected boolean validate() {
-    if (currentHealthyPipelineCount >= healthyPipelineThresholdCount) {
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  protected void process(PipelineReportFromDatanode
-      pipelineReportFromDatanode) {
-
-    // When SCM is in safe mode for long time, already registered
-    // datanode can send pipeline report again, then pipeline handler fires
-    // processed report event, we should not consider this pipeline report
-    // from datanode again during threshold calculation.
-    Preconditions.checkNotNull(pipelineReportFromDatanode);
-    DatanodeDetails dnDetails = pipelineReportFromDatanode.getDatanodeDetails();
-    if (!processedDatanodeDetails.contains(
-        pipelineReportFromDatanode.getDatanodeDetails())) {
-
-      Pipeline pipeline;
-      PipelineReportsProto pipelineReport =
-          pipelineReportFromDatanode.getReport();
-
-      for (PipelineReport report : pipelineReport.getPipelineReportList()) {
-        PipelineID pipelineID = PipelineID
-            .getFromProtobuf(report.getPipelineID());
-        try {
-          pipeline = pipelineManager.getPipeline(pipelineID);
-        } catch (PipelineNotFoundException e) {
-          continue;
-        }
-
-        if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE &&
-            pipeline.getPipelineState() == Pipeline.PipelineState.OPEN) {
-          // If the pipeline is open state mean, all 3 datanodes are reported
-          // for this pipeline.
-          currentHealthyPipelineCount++;
-          getSafeModeMetrics().incCurrentHealthyPipelinesCount();
-        }
-      }
-      if (scmInSafeMode()) {
-        SCMSafeModeManager.getLogger().info(
-            "SCM in safe mode. Healthy pipelines reported count is {}, " +
-                "required healthy pipeline reported count is {}",
-            currentHealthyPipelineCount, healthyPipelineThresholdCount);
-      }
-
-      processedDatanodeDetails.add(dnDetails);
-    }
-
-  }
-
-  @Override
-  protected void cleanup() {
-    processedDatanodeDetails.clear();
-  }
-
-  @VisibleForTesting
-  public int getCurrentHealthyPipelineCount() {
-    return currentHealthyPipelineCount;
-  }
-
-  @VisibleForTesting
-  public int getHealthyPipelineThresholdCount() {
-    return healthyPipelineThresholdCount;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
deleted file mode 100644
index 841d8ff..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.
-    PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * This rule covers whether we have at least one datanode is reported for each
- * pipeline. This rule is for all open containers, we have at least one
- * replica available for read when we exit safe mode.
- */
-public class OneReplicaPipelineSafeModeRule extends
-    SafeModeExitRule<PipelineReportFromDatanode> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OneReplicaPipelineSafeModeRule.class);
-
-  private int thresholdCount;
-  private Set<PipelineID> reportedPipelineIDSet = new HashSet<>();
-  private final PipelineManager pipelineManager;
-  private int currentReportedPipelineCount = 0;
-
-
-  public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue,
-      PipelineManager pipelineManager,
-      SCMSafeModeManager safeModeManager, Configuration configuration) {
-    super(safeModeManager, ruleName, eventQueue);
-    this.pipelineManager = pipelineManager;
-
-    double percent =
-        configuration.getDouble(
-            HddsConfigKeys.HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT,
-            HddsConfigKeys.
-                HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT);
-
-    Preconditions.checkArgument((percent >= 0.0 && percent <= 1.0),
-        HddsConfigKeys.
-            HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT  +
-            " value should be >= 0.0 and <= 1.0");
-
-    int totalPipelineCount =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE).size();
-
-    thresholdCount = (int) Math.ceil(percent * totalPipelineCount);
-
-    LOG.info(" Total pipeline count is {}, pipeline's with atleast one " +
-        "datanode reported threshold count is {}", totalPipelineCount,
-        thresholdCount);
-
-    getSafeModeMetrics().setNumPipelinesWithAtleastOneReplicaReportedThreshold(
-        thresholdCount);
-
-  }
-
-  @Override
-  protected TypedEvent<PipelineReportFromDatanode> getEventType() {
-    return SCMEvents.PROCESSED_PIPELINE_REPORT;
-  }
-
-  @Override
-  protected boolean validate() {
-    if (currentReportedPipelineCount >= thresholdCount) {
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  protected void process(PipelineReportFromDatanode
-      pipelineReportFromDatanode) {
-    Pipeline pipeline;
-    Preconditions.checkNotNull(pipelineReportFromDatanode);
-    PipelineReportsProto pipelineReport =
-        pipelineReportFromDatanode.getReport();
-
-    for (PipelineReport report : pipelineReport.getPipelineReportList()) {
-      PipelineID pipelineID = PipelineID
-          .getFromProtobuf(report.getPipelineID());
-      try {
-        pipeline = pipelineManager.getPipeline(pipelineID);
-      } catch (PipelineNotFoundException e) {
-        continue;
-      }
-
-      if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE &&
-          !reportedPipelineIDSet.contains(pipelineID)) {
-        reportedPipelineIDSet.add(pipelineID);
-        getSafeModeMetrics()
-            .incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount();
-      }
-    }
-
-    currentReportedPipelineCount = reportedPipelineIDSet.size();
-
-    if (scmInSafeMode()) {
-      SCMSafeModeManager.getLogger().info(
-          "SCM in safe mode. Pipelines with atleast one datanode reported " +
-              "count is {}, required atleast one datanode reported per " +
-              "pipeline count is {}",
-          currentReportedPipelineCount, thresholdCount);
-    }
-
-  }
-
-  @Override
-  protected void cleanup() {
-    reportedPipelineIDSet.clear();
-  }
-
-  @VisibleForTesting
-  public int getThresholdCount() {
-    return thresholdCount;
-  }
-
-  @VisibleForTesting
-  public int getCurrentReportedPipelineCount() {
-    return currentReportedPipelineCount;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java
deleted file mode 100644
index 12c6c31..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-
-/**
- * Precheck for SCM operations.
- * */
-public interface Precheck<T> {
-  boolean check(T t) throws SCMException;
-  String type();
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
deleted file mode 100644
index a22d162..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * StorageContainerManager enters safe mode on startup to allow system to
- * reach a stable state before becoming fully functional. SCM will wait
- * for certain resources to be reported before coming out of safe mode.
- *
- * SafeModeExitRule defines format to define new rules which must be satisfied
- * to exit Safe mode.
- *
- * Current SafeMode rules:
- * 1. ContainerSafeModeRule:
- * On every new datanode registration, it fires
- * {@link SCMEvents#NODE_REGISTRATION_CONT_REPORT}.  This rule handles this
- * event. This rule process this report, increment the
- * containerWithMinReplicas count when this reported replica is in the
- * containerMap. Then validates if cutoff threshold for containers is meet.
- *
- * 2. DatanodeSafeModeRule:
- * On every new datanode registration, it fires
- * {@link SCMEvents#NODE_REGISTRATION_CONT_REPORT}. This rule handles this
- * event. This rule process this report, and check if this is new node, add
- * to its reported node list. Then validate it cutoff threshold for minimum
- * number of datanode registered is met or not.
- *
- * 3. HealthyPipelineSafeModeRule:
- * Once the pipelineReportHandler processes the
- * {@link SCMEvents#PIPELINE_REPORT}, it fires
- * {@link SCMEvents#PROCESSED_PIPELINE_REPORT}. This rule handles this
- * event. This rule processes this report, and check if pipeline is healthy
- * and increments current healthy pipeline count. Then validate it cutoff
- * threshold for healthy pipeline is met or not.
- *
- * 4. OneReplicaPipelineSafeModeRule:
- * Once the pipelineReportHandler processes the
- * {@link SCMEvents#PIPELINE_REPORT}, it fires
- * {@link SCMEvents#PROCESSED_PIPELINE_REPORT}. This rule handles this
- * event. This rule processes this report, and add the reported pipeline to
- * reported pipeline set. Then validate it cutoff threshold for one replica
- * per pipeline is met or not.
- *
- */
-public class SCMSafeModeManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMSafeModeManager.class);
-  private final boolean isSafeModeEnabled;
-  private AtomicBoolean inSafeMode = new AtomicBoolean(true);
-
-  private Map<String, SafeModeExitRule> exitRules = new HashMap(1);
-  private Configuration config;
-  private static final String CONT_EXIT_RULE = "ContainerSafeModeRule";
-  private static final String DN_EXIT_RULE = "DataNodeSafeModeRule";
-  private static final String HEALTHY_PIPELINE_EXIT_RULE =
-      "HealthyPipelineSafeModeRule";
-  private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE =
-      "AtleastOneDatanodeReportedRule";
-
-  private Set<String> validatedRules = new HashSet<>();
-
-  private final EventQueue eventPublisher;
-  private final PipelineManager pipelineManager;
-
-  private final SafeModeMetrics safeModeMetrics;
-
-  public SCMSafeModeManager(Configuration conf,
-      List<ContainerInfo> allContainers, PipelineManager pipelineManager,
-      EventQueue eventQueue) {
-    this.config = conf;
-    this.pipelineManager = pipelineManager;
-    this.eventPublisher = eventQueue;
-    this.isSafeModeEnabled = conf.getBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT);
-
-
-    if (isSafeModeEnabled) {
-      this.safeModeMetrics = SafeModeMetrics.create();
-      ContainerSafeModeRule containerSafeModeRule =
-          new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config,
-              allContainers, this);
-      DataNodeSafeModeRule dataNodeSafeModeRule =
-          new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, this);
-      exitRules.put(CONT_EXIT_RULE, containerSafeModeRule);
-      exitRules.put(DN_EXIT_RULE, dataNodeSafeModeRule);
-      if (conf.getBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT)
-          && pipelineManager != null) {
-        HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
-            new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE,
-                eventQueue, pipelineManager,
-                this, config);
-        OneReplicaPipelineSafeModeRule oneReplicaPipelineSafeModeRule =
-            new OneReplicaPipelineSafeModeRule(
-                ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue,
-                pipelineManager, this, conf);
-        exitRules.put(HEALTHY_PIPELINE_EXIT_RULE, healthyPipelineSafeModeRule);
-        exitRules.put(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE,
-            oneReplicaPipelineSafeModeRule);
-      }
-      emitSafeModeStatus();
-    } else {
-      this.safeModeMetrics = null;
-      exitSafeMode(eventQueue);
-    }
-  }
-
-  public void stop() {
-    if (isSafeModeEnabled) {
-      this.safeModeMetrics.unRegister();
-    }
-  }
-
-  public SafeModeMetrics getSafeModeMetrics() {
-    return safeModeMetrics;
-  }
-
-  /**
-   * Emit Safe mode status.
-   */
-  @VisibleForTesting
-  public void emitSafeModeStatus() {
-    eventPublisher.fireEvent(SCMEvents.SAFE_MODE_STATUS,
-        new SafeModeStatus(getInSafeMode()));
-  }
-
-
-  public synchronized void validateSafeModeExitRules(String ruleName,
-      EventPublisher eventQueue) {
-
-    if (exitRules.get(ruleName) != null) {
-      validatedRules.add(ruleName);
-    } else {
-      // This should never happen
-      LOG.error("No Such Exit rule {}", ruleName);
-    }
-
-
-    if (validatedRules.size() == exitRules.size()) {
-      // All rules are satisfied, we can exit safe mode.
-      LOG.info("ScmSafeModeManager, all rules are successfully validated");
-      exitSafeMode(eventQueue);
-    }
-
-  }
-
-  /**
-   * Exit safe mode. It does following actions:
-   * 1. Set safe mode status to false.
-   * 2. Emits START_REPLICATION for ReplicationManager.
-   * 3. Cleanup resources.
-   * 4. Emit safe mode status.
-   * @param eventQueue
-   */
-  @VisibleForTesting
-  public void exitSafeMode(EventPublisher eventQueue) {
-    LOG.info("SCM exiting safe mode.");
-    setInSafeMode(false);
-
-    // TODO: Remove handler registration as there is no need to listen to
-    // register events anymore.
-
-    emitSafeModeStatus();
-    // TODO: #CLUTIL if we reenter safe mode the fixed interval pipeline
-    // creation job needs to stop
-    pipelineManager.startPipelineCreator();
-  }
-
-  public boolean getInSafeMode() {
-    if (!isSafeModeEnabled) {
-      return false;
-    }
-    return inSafeMode.get();
-  }
-
-  /**
-   * Set safe mode status.
-   */
-  public void setInSafeMode(boolean inSafeMode) {
-    this.inSafeMode.set(inSafeMode);
-  }
-
-  public static Logger getLogger() {
-    return LOG;
-  }
-
-  @VisibleForTesting
-  public double getCurrentContainerThreshold() {
-    return ((ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE))
-        .getCurrentContainerThreshold();
-  }
-
-  @VisibleForTesting
-  public HealthyPipelineSafeModeRule getHealthyPipelineSafeModeRule() {
-    return (HealthyPipelineSafeModeRule)
-        exitRules.get(HEALTHY_PIPELINE_EXIT_RULE);
-  }
-
-  @VisibleForTesting
-  public OneReplicaPipelineSafeModeRule getOneReplicaPipelineSafeModeRule() {
-    return (OneReplicaPipelineSafeModeRule)
-        exitRules.get(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE);
-  }
-
-
-  /**
-   * Class used during SafeMode status event.
-   */
-  public static class SafeModeStatus {
-
-    private boolean safeModeStatus;
-    public SafeModeStatus(boolean safeModeState) {
-      this.safeModeStatus = safeModeState;
-    }
-
-    public boolean getSafeModeStatus() {
-      return safeModeStatus;
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java
deleted file mode 100644
index 05e84db..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
-
-/**
- * Abstract class for SafeModeExitRules. When a new rule is added, the new
- * rule should extend this abstract class.
- *
- * Each rule Should do:
- * 1. Should add a handler for the event it is looking for during the
- * initialization of the rule.
- * 2. Add the rule in ScmSafeModeManager to list of the rules.
- *
- *
- * @param <T>
- */
-public abstract class SafeModeExitRule<T> implements EventHandler<T> {
-
-  private final SCMSafeModeManager safeModeManager;
-  private final String ruleName;
-
-  public SafeModeExitRule(SCMSafeModeManager safeModeManager,
-      String ruleName, EventQueue eventQueue) {
-    this.safeModeManager = safeModeManager;
-    this.ruleName = ruleName;
-    eventQueue.addHandler(getEventType(), this);
-  }
-
-  /**
-   * Return's the name of this SafeModeExit Rule.
-   * @return ruleName
-   */
-  public String getRuleName() {
-    return ruleName;
-  }
-
-  /**
-   * Return's the event type this safeMode exit rule handles.
-   * @return TypedEvent
-   */
-  protected abstract TypedEvent<T> getEventType();
-
-  /**
-   * Validate's this rule. If this rule condition is met, returns true, else
-   * returns false.
-   * @return boolean
-   */
-  protected abstract boolean validate();
-
-  /**
-   * Actual processing logic for this rule.
-   * @param report
-   */
-  protected abstract void process(T report);
-
-  /**
-   * Cleanup action's need to be done, once this rule is satisfied.
-   */
-  protected abstract void cleanup();
-
-  @Override
-  public final void onMessage(T report, EventPublisher publisher) {
-
-    // TODO: when we have remove handlers, we can remove getInSafemode check
-
-    if (scmInSafeMode()) {
-      if (validate()) {
-        safeModeManager.validateSafeModeExitRules(ruleName, publisher);
-        cleanup();
-        return;
-      }
-
-      process(report);
-
-      if (validate()) {
-        safeModeManager.validateSafeModeExitRules(ruleName, publisher);
-        cleanup();
-      }
-    }
-  }
-
-  /**
-   * Return true if SCM is in safe mode, else false.
-   * @return boolean
-   */
-  protected boolean scmInSafeMode() {
-    return safeModeManager.getInSafeMode();
-  }
-
-  protected SafeModeMetrics getSafeModeMetrics() {
-    return safeModeManager.getSafeModeMetrics();
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java
deleted file mode 100644
index b9e5333..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Class to handle the activities needed to be performed after exiting safe
- * mode.
- */
-public class SafeModeHandler implements EventHandler<SafeModeStatus> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SafeModeHandler.class);
-
-  private final SCMClientProtocolServer scmClientProtocolServer;
-  private final BlockManager scmBlockManager;
-  private final long waitTime;
-  private final AtomicBoolean isInSafeMode = new AtomicBoolean(true);
-  private final ReplicationManager replicationManager;
-
-  private final PipelineManager scmPipelineManager;
-
-  /**
-   * SafeModeHandler, to handle the logic once we exit safe mode.
-   * @param configuration
-   * @param clientProtocolServer
-   * @param blockManager
-   * @param replicationManager
-   */
-  public SafeModeHandler(Configuration configuration,
-      SCMClientProtocolServer clientProtocolServer,
-      BlockManager blockManager,
-      ReplicationManager replicationManager, PipelineManager pipelineManager) {
-    Objects.requireNonNull(configuration, "Configuration cannot be null");
-    Objects.requireNonNull(clientProtocolServer, "SCMClientProtocolServer " +
-        "object cannot be null");
-    Objects.requireNonNull(blockManager, "BlockManager object cannot be null");
-    Objects.requireNonNull(replicationManager, "ReplicationManager " +
-        "object cannot be null");
-    Objects.requireNonNull(pipelineManager, "PipelineManager object cannot " +
-        "be" + "null");
-    this.waitTime = configuration.getTimeDuration(
-        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
-        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    this.scmClientProtocolServer = clientProtocolServer;
-    this.scmBlockManager = blockManager;
-    this.replicationManager = replicationManager;
-    this.scmPipelineManager = pipelineManager;
-
-    final boolean safeModeEnabled = configuration.getBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT);
-    isInSafeMode.set(safeModeEnabled);
-
-  }
-
-
-
-  /**
-   * Set SafeMode status based on
-   * {@link org.apache.hadoop.hdds.scm.events.SCMEvents#SAFE_MODE_STATUS}.
-   *
-   * Inform BlockManager, ScmClientProtocolServer and replicationAcitivity
-   * status about safeMode status.
-   *
-   * @param safeModeStatus
-   * @param publisher
-   */
-  @Override
-  public void onMessage(SafeModeStatus safeModeStatus,
-      EventPublisher publisher) {
-
-    isInSafeMode.set(safeModeStatus.getSafeModeStatus());
-    scmClientProtocolServer.setSafeModeStatus(isInSafeMode.get());
-    scmBlockManager.setSafeModeStatus(isInSafeMode.get());
-
-    if (!isInSafeMode.get()) {
-      final Thread safeModeExitThread = new Thread(() -> {
-        try {
-          Thread.sleep(waitTime);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        replicationManager.start();
-        cleanupPipelines();
-      });
-
-      safeModeExitThread.setDaemon(true);
-      safeModeExitThread.start();
-    }
-
-  }
-
-  private void cleanupPipelines() {
-    List<Pipeline> pipelineList = scmPipelineManager.getPipelines();
-    pipelineList.forEach((pipeline) -> {
-      try {
-        if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
-          scmPipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-        }
-      } catch (IOException ex) {
-        LOG.error("Finalize and destroy pipeline failed for pipeline "
-            + pipeline.toString(), ex);
-      }
-    });
-  }
-
-  public boolean getSafeModeStatus() {
-    return isInSafeMode.get();
-  }
-
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java
deleted file mode 100644
index 80b8257..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * This class is used for maintaining SafeMode metric information, which can
- * be used for monitoring during SCM startup when SCM is still in SafeMode.
- */
-public class SafeModeMetrics {
-  private static final String SOURCE_NAME =
-      SafeModeMetrics.class.getSimpleName();
-
-
-  // These all values will be set to some values when safemode is enabled.
-  private @Metric MutableCounterLong
-      numContainerWithOneReplicaReportedThreshold;
-  private @Metric MutableCounterLong
-      currentContainersWithOneReplicaReportedCount;
-
-  // When hdds.scm.safemode.pipeline-availability.check is set then only
-  // below metrics will have some values, otherwise they will be zero.
-  private @Metric MutableCounterLong numHealthyPipelinesThreshold;
-  private @Metric MutableCounterLong currentHealthyPipelinesCount;
-  private @Metric MutableCounterLong
-      numPipelinesWithAtleastOneReplicaReportedThreshold;
-  private @Metric MutableCounterLong
-      currentPipelinesWithAtleastOneReplicaReportedCount;
-
-  public static SafeModeMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
-        "SCM Safemode Metrics",
-        new SafeModeMetrics());
-  }
-
-  public void setNumHealthyPipelinesThreshold(long val) {
-    this.numHealthyPipelinesThreshold.incr(val);
-  }
-
-  public void incCurrentHealthyPipelinesCount() {
-    this.currentHealthyPipelinesCount.incr();
-  }
-
-  public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) {
-    this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val);
-  }
-
-  public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() {
-    this.currentPipelinesWithAtleastOneReplicaReportedCount.incr();
-  }
-
-  public void setNumContainerWithOneReplicaReportedThreshold(long val) {
-    this.numContainerWithOneReplicaReportedThreshold.incr(val);
-  }
-
-  public void incCurrentContainersWithOneReplicaReportedCount() {
-    this.currentContainersWithOneReplicaReportedCount.incr();
-  }
-
-  public MutableCounterLong getNumHealthyPipelinesThreshold() {
-    return numHealthyPipelinesThreshold;
-  }
-
-  public MutableCounterLong getCurrentHealthyPipelinesCount() {
-    return currentHealthyPipelinesCount;
-  }
-
-  public MutableCounterLong
-      getNumPipelinesWithAtleastOneReplicaReportedThreshold() {
-    return numPipelinesWithAtleastOneReplicaReportedThreshold;
-  }
-
-  public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() {
-    return currentPipelinesWithAtleastOneReplicaReportedCount;
-  }
-
-  public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() {
-    return numContainerWithOneReplicaReportedThreshold;
-  }
-
-  public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() {
-    return currentContainersWithOneReplicaReportedCount;
-  }
-
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java
deleted file mode 100644
index b63d04e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-
-/**
- * Safe mode pre-check for SCM operations.
- * */
-public class SafeModePrecheck implements Precheck<ScmOps> {
-
-  private AtomicBoolean inSafeMode;
-  public static final String PRECHECK_TYPE = "SafeModePrecheck";
-
-  public SafeModePrecheck(Configuration conf) {
-    boolean safeModeEnabled = conf.getBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT);
-    if (safeModeEnabled) {
-      inSafeMode = new AtomicBoolean(true);
-    } else {
-      inSafeMode = new AtomicBoolean(false);
-    }
-  }
-
-  @Override
-  public boolean check(ScmOps op) throws SCMException {
-    if (inSafeMode.get() && SafeModeRestrictedOps
-        .isRestrictedInSafeMode(op)) {
-      throw new SCMException("SafeModePrecheck failed for " + op,
-          ResultCodes.SAFE_MODE_EXCEPTION);
-    }
-    return inSafeMode.get();
-  }
-
-  @Override
-  public String type() {
-    return PRECHECK_TYPE;
-  }
-
-  public boolean isInSafeMode() {
-    return inSafeMode.get();
-  }
-
-  public void setInSafeMode(boolean inSafeMode) {
-    this.inSafeMode.set(inSafeMode);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java
deleted file mode 100644
index 5f516e4..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import java.util.EnumSet;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-
-/**
- * Operations restricted in SCM safe mode.
- */
-public final class SafeModeRestrictedOps {
-  private static EnumSet restrictedOps = EnumSet.noneOf(ScmOps.class);
-
-  private SafeModeRestrictedOps() {
-  }
-
-  static {
-    restrictedOps.add(ScmOps.allocateBlock);
-    restrictedOps.add(ScmOps.allocateContainer);
-  }
-
-  public static boolean isRestrictedInSafeMode(ScmOps opName) {
-    return restrictedOps.contains(opName);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java
deleted file mode 100644
index b5fd826..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
deleted file mode 100644
index 9c69758..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ /dev/null
@@ -1,365 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.Node;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditLoggerType;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.audit.Auditor;
-import org.apache.hadoop.ozone.audit.SCMAction;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
-
-import com.google.common.collect.Maps;
-import com.google.protobuf.BlockingService;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
-import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * SCM block protocol is the protocol used by Namenode and OzoneManager to get
- * blocks from the SCM.
- */
-public class SCMBlockProtocolServer implements
-    ScmBlockLocationProtocol, Auditor {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMBlockProtocolServer.class);
-
-  private static final AuditLogger AUDIT =
-      new AuditLogger(AuditLoggerType.SCMLOGGER);
-
-  private final StorageContainerManager scm;
-  private final OzoneConfiguration conf;
-  private final RPC.Server blockRpcServer;
-  private final InetSocketAddress blockRpcAddress;
-  private final ProtocolMessageMetrics
-      protocolMessageMetrics;
-
-  /**
-   * The RPC server that listens to requests from block service clients.
-   */
-  public SCMBlockProtocolServer(OzoneConfiguration conf,
-      StorageContainerManager scm) throws IOException {
-    this.scm = scm;
-    this.conf = conf;
-    final int handlerCount =
-        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
-            OZONE_SCM_HANDLER_COUNT_DEFAULT);
-
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    protocolMessageMetrics =
-        ProtocolMessageMetrics.create("ScmBlockLocationProtocol",
-            "SCM Block location protocol counters",
-            ScmBlockLocationProtocolProtos.Type.values());
-
-    // SCM Block Service RPC.
-    BlockingService blockProtoPbService =
-        ScmBlockLocationProtocolProtos.ScmBlockLocationProtocolService
-            .newReflectiveBlockingService(
-                new ScmBlockLocationProtocolServerSideTranslatorPB(this,
-                    protocolMessageMetrics));
-
-    final InetSocketAddress scmBlockAddress = HddsServerUtil
-        .getScmBlockClientBindAddress(conf);
-    blockRpcServer =
-        startRpcServer(
-            conf,
-            scmBlockAddress,
-            ScmBlockLocationProtocolPB.class,
-            blockProtoPbService,
-            handlerCount);
-    blockRpcAddress =
-        updateRPCListenAddress(
-            conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress,
-            blockRpcServer);
-    if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
-        false)) {
-      blockRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
-    }
-  }
-
-  public RPC.Server getBlockRpcServer() {
-    return blockRpcServer;
-  }
-
-  public InetSocketAddress getBlockRpcAddress() {
-    return blockRpcAddress;
-  }
-
-  public void start() {
-    protocolMessageMetrics.register();
-    LOG.info(
-        StorageContainerManager.buildRpcServerStartMessage(
-            "RPC server for Block Protocol", getBlockRpcAddress()));
-    getBlockRpcServer().start();
-  }
-
-  public void stop() {
-    try {
-      protocolMessageMetrics.unregister();
-      LOG.info("Stopping the RPC server for Block Protocol");
-      getBlockRpcServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Block Protocol RPC stop failed.", ex);
-    }
-    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
-  }
-
-  public void join() throws InterruptedException {
-    LOG.trace("Join RPC server for Block Protocol");
-    getBlockRpcServer().join();
-  }
-
-  @Override
-  public List<AllocatedBlock> allocateBlock(long size, int num,
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner, ExcludeList excludeList) throws IOException {
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("size", String.valueOf(size));
-    auditMap.put("type", type.name());
-    auditMap.put("factor", factor.name());
-    auditMap.put("owner", owner);
-    List<AllocatedBlock> blocks = new ArrayList<>(num);
-    boolean auditSuccess = true;
-    try {
-      for (int i = 0; i < num; i++) {
-        AllocatedBlock block = scm.getScmBlockManager()
-            .allocateBlock(size, type, factor, owner, excludeList);
-        if (block != null) {
-          blocks.add(block);
-        }
-      }
-      return blocks;
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(SCMAction.ALLOCATE_BLOCK, auditMap, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(SCMAction.ALLOCATE_BLOCK, auditMap)
-        );
-      }
-    }
-  }
-
-  /**
-   * Delete blocks for a set of object keys.
-   *
-   * @param keyBlocksInfoList list of block keys with object keys to delete.
-   * @return deletion results.
-   */
-  @Override
-  public List<DeleteBlockGroupResult> deleteKeyBlocks(
-      List<BlockGroup> keyBlocksInfoList) throws IOException {
-    LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList
-        .size());
-    List<DeleteBlockGroupResult> results = new ArrayList<>();
-    Map<String, String> auditMap = Maps.newHashMap();
-    for (BlockGroup keyBlocks : keyBlocksInfoList) {
-      ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result resultCode;
-      try {
-        // We delete blocks in an atomic operation to prevent getting
-        // into state like only a partial of blocks are deleted,
-        // which will leave key in an inconsistent state.
-        auditMap.put("keyBlockToDelete", keyBlocks.toString());
-        scm.getScmBlockManager().deleteBlocks(keyBlocks.getBlockIDList());
-        resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
-            .Result.success;
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(SCMAction.DELETE_KEY_BLOCK, auditMap)
-        );
-      } catch (SCMException scmEx) {
-        LOG.warn("Fail to delete block: {}", keyBlocks.getGroupID(), scmEx);
-        AUDIT.logWriteFailure(
-            buildAuditMessageForFailure(SCMAction.DELETE_KEY_BLOCK, auditMap,
-                scmEx)
-        );
-        switch (scmEx.getResult()) {
-        case SAFE_MODE_EXCEPTION:
-          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
-              .Result.safeMode;
-          break;
-        case FAILED_TO_FIND_BLOCK:
-          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
-              .Result.errorNotFound;
-          break;
-        default:
-          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
-              .Result.unknownFailure;
-        }
-      } catch (IOException ex) {
-        LOG.warn("Fail to delete blocks for object key: {}", keyBlocks
-            .getGroupID(), ex);
-        AUDIT.logWriteFailure(
-            buildAuditMessageForFailure(SCMAction.DELETE_KEY_BLOCK, auditMap,
-                ex)
-        );
-        resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
-            .Result.unknownFailure;
-      }
-      List<DeleteBlockResult> blockResultList = new ArrayList<>();
-      for (BlockID blockKey : keyBlocks.getBlockIDList()) {
-        blockResultList.add(new DeleteBlockResult(blockKey, resultCode));
-      }
-      results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
-          blockResultList));
-    }
-    return results;
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    boolean auditSuccess = true;
-    try{
-      ScmInfo.Builder builder =
-          new ScmInfo.Builder()
-              .setClusterId(scm.getScmStorageConfig().getClusterID())
-              .setScmId(scm.getScmStorageConfig().getScmId());
-      return builder.build();
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.GET_SCM_INFO, null, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null)
-        );
-      }
-    }
-  }
-
-  @Override
-  public List<DatanodeDetails> sortDatanodes(List<String> nodes,
-      String clientMachine) throws IOException {
-    boolean auditSuccess = true;
-    try{
-      NodeManager nodeManager = scm.getScmNodeManager();
-      Node client = null;
-      List<DatanodeDetails> possibleClients =
-          nodeManager.getNodesByAddress(clientMachine);
-      if (possibleClients.size()>0){
-        client = possibleClients.get(0);
-      }
-      List<Node> nodeList = new ArrayList();
-      nodes.stream().forEach(uuid -> {
-        DatanodeDetails node = nodeManager.getNodeByUuid(uuid);
-        if (node != null) {
-          nodeList.add(node);
-        }
-      });
-      List<? extends Node> sortedNodeList = scm.getClusterMap()
-          .sortByDistanceCost(client, nodeList, nodes.size());
-      List<DatanodeDetails> ret = new ArrayList<>();
-      sortedNodeList.stream().forEach(node -> ret.add((DatanodeDetails)node));
-      return ret;
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.SORT_DATANODE, null, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.SORT_DATANODE, null)
-        );
-      }
-    }
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.SUCCESS.toString())
-        .withException(null)
-        .build();
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForFailure(AuditAction op, Map<String,
-      String> auditMap, Throwable throwable) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.FAILURE.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  @Override
-  public void close() throws IOException {
-    stop();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
deleted file mode 100644
index b23d938..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A Certificate Store class that persists certificates issued by SCM CA.
- */
-public class SCMCertStore implements CertificateStore {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMCertStore.class);
-  private final SCMMetadataStore scmMetadataStore;
-  private final Lock lock;
-
-  public SCMCertStore(SCMMetadataStore dbStore) {
-    this.scmMetadataStore = dbStore;
-    lock = new ReentrantLock();
-
-  }
-
-  @Override
-  public void storeValidCertificate(BigInteger serialID,
-                                    X509Certificate certificate)
-      throws IOException {
-    lock.lock();
-    try {
-      // This makes sure that no certificate IDs are reusable.
-      if ((getCertificateByID(serialID, CertType.VALID_CERTS) == null) &&
-          (getCertificateByID(serialID, CertType.REVOKED_CERTS) == null)) {
-        scmMetadataStore.getValidCertsTable().put(serialID, certificate);
-      } else {
-        throw new SCMSecurityException("Conflicting certificate ID");
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public void revokeCertificate(BigInteger serialID) throws IOException {
-    lock.lock();
-    try {
-      X509Certificate cert = getCertificateByID(serialID, CertType.VALID_CERTS);
-      if (cert == null) {
-        LOG.error("trying to revoke a certificate that is not valid. Serial: " +
-            "{}", serialID.toString());
-        throw new SCMSecurityException("Trying to revoke an invalid " +
-            "certificate.");
-      }
-      // TODO : Check if we are trying to revoke an expired certificate.
-
-      if (getCertificateByID(serialID, CertType.REVOKED_CERTS) != null) {
-        LOG.error("Trying to revoke a certificate that is already revoked.");
-        throw new SCMSecurityException("Trying to revoke an already revoked " +
-            "certificate.");
-      }
-
-      // let is do this in a transaction.
-      try (BatchOperation batch =
-               scmMetadataStore.getStore().initBatchOperation();) {
-        scmMetadataStore.getRevokedCertsTable()
-            .putWithBatch(batch, serialID, cert);
-        scmMetadataStore.getValidCertsTable().deleteWithBatch(batch, serialID);
-        scmMetadataStore.getStore().commitBatchOperation(batch);
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public void removeExpiredCertificate(BigInteger serialID)
-      throws IOException {
-    // TODO: Later this allows removal of expired certificates from the system.
-  }
-
-  @Override
-  public X509Certificate getCertificateByID(BigInteger serialID,
-                                            CertType certType)
-      throws IOException {
-    if (certType == CertType.VALID_CERTS) {
-      return scmMetadataStore.getValidCertsTable().get(serialID);
-    } else {
-      return scmMetadataStore.getRevokedCertsTable().get(serialID);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
deleted file mode 100644
index 9c27f6a..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ /dev/null
@@ -1,610 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.protobuf.BlockingService;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditLoggerType;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.audit.Auditor;
-import org.apache.hadoop.ozone.audit.SCMAction;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .StorageContainerLocationProtocolService.newReflectiveBlockingService;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import static org.apache.hadoop.hdds.scm.server.StorageContainerManager
-    .startRpcServer;
-
-/**
- * The RPC server that listens to requests from clients.
- */
-public class SCMClientProtocolServer implements
-    StorageContainerLocationProtocol, Auditor {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMClientProtocolServer.class);
-  private static final AuditLogger AUDIT =
-      new AuditLogger(AuditLoggerType.SCMLOGGER);
-  private final RPC.Server clientRpcServer;
-  private final InetSocketAddress clientRpcAddress;
-  private final StorageContainerManager scm;
-  private final OzoneConfiguration conf;
-  private SafeModePrecheck safeModePrecheck;
-  private final ProtocolMessageMetrics protocolMetrics;
-
-  public SCMClientProtocolServer(OzoneConfiguration conf,
-      StorageContainerManager scm) throws IOException {
-    this.scm = scm;
-    this.conf = conf;
-    safeModePrecheck = new SafeModePrecheck(conf);
-    final int handlerCount =
-        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
-            OZONE_SCM_HANDLER_COUNT_DEFAULT);
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    protocolMetrics = ProtocolMessageMetrics
-        .create("ScmContainerLocationProtocol",
-            "SCM ContainerLocation protocol metrics",
-            StorageContainerLocationProtocolProtos.Type.values());
-
-    // SCM Container Service RPC
-    BlockingService storageProtoPbService =
-        newReflectiveBlockingService(
-            new StorageContainerLocationProtocolServerSideTranslatorPB(this,
-                protocolMetrics));
-
-    final InetSocketAddress scmAddress = HddsServerUtil
-        .getScmClientBindAddress(conf);
-    clientRpcServer =
-        startRpcServer(
-            conf,
-            scmAddress,
-            StorageContainerLocationProtocolPB.class,
-            storageProtoPbService,
-            handlerCount);
-    clientRpcAddress =
-        updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY,
-            scmAddress, clientRpcServer);
-    if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
-        false)) {
-      clientRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
-    }
-  }
-
-  public RPC.Server getClientRpcServer() {
-    return clientRpcServer;
-  }
-
-  public InetSocketAddress getClientRpcAddress() {
-    return clientRpcAddress;
-  }
-
-  public void start() {
-    protocolMetrics.register();
-    LOG.info(
-        StorageContainerManager.buildRpcServerStartMessage(
-            "RPC server for Client ", getClientRpcAddress()));
-    getClientRpcServer().start();
-  }
-
-  public void stop() {
-    protocolMetrics.unregister();
-    try {
-      LOG.info("Stopping the RPC server for Client Protocol");
-      getClientRpcServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Client Protocol RPC stop failed.", ex);
-    }
-    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
-  }
-
-  public void join() throws InterruptedException {
-    LOG.trace("Join RPC server for Client Protocol");
-    getClientRpcServer().join();
-  }
-
-  @VisibleForTesting
-  public String getRpcRemoteUsername() {
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-    return user == null ? null : user.getUserName();
-  }
-
-  @Override
-  public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
-      replicationType, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-    ScmUtils.preCheck(ScmOps.allocateContainer, safeModePrecheck);
-    getScm().checkAdminAccess(getRpcRemoteUsername());
-
-    final ContainerInfo container = scm.getContainerManager()
-        .allocateContainer(replicationType, factor, owner);
-    final Pipeline pipeline = scm.getPipelineManager()
-        .getPipeline(container.getPipelineID());
-    return new ContainerWithPipeline(container, pipeline);
-  }
-
-  @Override
-  public ContainerInfo getContainer(long containerID) throws IOException {
-    String remoteUser = getRpcRemoteUsername();
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("containerID", String.valueOf(containerID));
-    getScm().checkAdminAccess(remoteUser);
-    try {
-      return scm.getContainerManager()
-          .getContainer(ContainerID.valueof(containerID));
-    } catch (IOException ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.GET_CONTAINER, auditMap, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.GET_CONTAINER, auditMap)
-        );
-      }
-    }
-
-  }
-
-  @Override
-  public ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException {
-    final ContainerID cid = ContainerID.valueof(containerID);
-    try {
-      final ContainerInfo container = scm.getContainerManager()
-          .getContainer(cid);
-
-      if (safeModePrecheck.isInSafeMode()) {
-        if (container.isOpen()) {
-          if (!hasRequiredReplicas(container)) {
-            throw new SCMException("Open container " + containerID + " doesn't"
-                + " have enough replicas to service this operation in "
-                + "Safe mode.", ResultCodes.SAFE_MODE_EXCEPTION);
-          }
-        }
-      }
-      getScm().checkAdminAccess(null);
-
-      Pipeline pipeline;
-      try {
-        pipeline = container.isOpen() ? scm.getPipelineManager()
-            .getPipeline(container.getPipelineID()) : null;
-      } catch (PipelineNotFoundException ex) {
-        // The pipeline is destroyed.
-        pipeline = null;
-      }
-
-      if (pipeline == null) {
-        pipeline = scm.getPipelineManager().createPipeline(
-            HddsProtos.ReplicationType.STAND_ALONE,
-            container.getReplicationFactor(),
-            scm.getContainerManager()
-                .getContainerReplicas(cid).stream()
-                .map(ContainerReplica::getDatanodeDetails)
-                .collect(Collectors.toList()));
-      }
-
-      AUDIT.logReadSuccess(buildAuditMessageForSuccess(
-          SCMAction.GET_CONTAINER_WITH_PIPELINE,
-          Collections.singletonMap("containerID", cid.toString())));
-
-      return new ContainerWithPipeline(container, pipeline);
-    } catch (IOException ex) {
-      AUDIT.logReadFailure(buildAuditMessageForFailure(
-          SCMAction.GET_CONTAINER_WITH_PIPELINE,
-          Collections.singletonMap("containerID", cid.toString()), ex));
-      throw ex;
-    }
-  }
-
-  /**
-   * Check if container reported replicas are equal or greater than required
-   * replication factor.
-   */
-  private boolean hasRequiredReplicas(ContainerInfo contInfo) {
-    try{
-      return getScm().getContainerManager()
-          .getContainerReplicas(contInfo.containerID())
-          .size() >= contInfo.getReplicationFactor().getNumber();
-    } catch (ContainerNotFoundException ex) {
-      // getContainerReplicas throws exception if no replica's exist for given
-      // container.
-      return false;
-    }
-  }
-
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID,
-      int count) throws IOException {
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("startContainerID", String.valueOf(startContainerID));
-    auditMap.put("count", String.valueOf(count));
-    try {
-      // To allow startcontainerId to take the value "0",
-      // "null" is assigned, so that its handled in the
-      // scm.getContainerManager().listContainer method
-      final ContainerID containerId = startContainerID != 0 ? ContainerID
-          .valueof(startContainerID) : null;
-      return scm.getContainerManager().
-          listContainer(containerId, count);
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap));
-      }
-    }
-
-  }
-
-  @Override
-  public void deleteContainer(long containerID) throws IOException {
-    String remoteUser = getRpcRemoteUsername();
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("containerID", String.valueOf(containerID));
-    auditMap.put("remoteUser", remoteUser);
-    try {
-      getScm().checkAdminAccess(remoteUser);
-      scm.getContainerManager().deleteContainer(
-          ContainerID.valueof(containerID));
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(SCMAction.DELETE_CONTAINER, auditMap, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(SCMAction.DELETE_CONTAINER, auditMap)
-        );
-      }
-    }
-  }
-
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState state,
-      HddsProtos.QueryScope queryScope, String poolName) throws
-      IOException {
-
-    if (queryScope == HddsProtos.QueryScope.POOL) {
-      throw new IllegalArgumentException("Not Supported yet");
-    }
-
-    List<HddsProtos.Node> result = new ArrayList<>();
-    queryNode(state).forEach(node -> result.add(HddsProtos.Node.newBuilder()
-        .setNodeID(node.getProtoBufMessage())
-        .addNodeStates(state)
-        .build()));
-
-    return result;
-
-  }
-
-  @Override
-  public void notifyObjectStageChange(StorageContainerLocationProtocolProtos
-      .ObjectStageChangeRequestProto.Type type, long id,
-      StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op
-          op, StorageContainerLocationProtocolProtos
-      .ObjectStageChangeRequestProto.Stage stage) throws IOException {
-
-    LOG.info("Object type {} id {} op {} new stage {}", type, id, op,
-        stage);
-    if (type == StorageContainerLocationProtocolProtos
-        .ObjectStageChangeRequestProto.Type.container) {
-      if (op == StorageContainerLocationProtocolProtos
-          .ObjectStageChangeRequestProto.Op.close) {
-        if (stage == StorageContainerLocationProtocolProtos
-            .ObjectStageChangeRequestProto.Stage.begin) {
-          scm.getContainerManager()
-              .updateContainerState(ContainerID.valueof(id),
-                  HddsProtos.LifeCycleEvent.FINALIZE);
-        } else {
-          scm.getContainerManager()
-              .updateContainerState(ContainerID.valueof(id),
-                  HddsProtos.LifeCycleEvent.CLOSE);
-        }
-      }
-    } // else if (type == ObjectStageChangeRequestProto.Type.pipeline) {
-    // TODO: pipeline state update will be addressed in future patch.
-    // }
-
-  }
-
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException {
-    // TODO: will be addressed in future patch.
-    // This is needed only for debugging purposes to make sure cluster is
-    // working correctly.
-    return null;
-  }
-
-  @Override
-  public List<Pipeline> listPipelines() {
-    AUDIT.logReadSuccess(
-        buildAuditMessageForSuccess(SCMAction.LIST_PIPELINE, null));
-    return scm.getPipelineManager().getPipelines();
-  }
-
-  @Override
-  public void activatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    AUDIT.logReadSuccess(buildAuditMessageForSuccess(
-        SCMAction.ACTIVATE_PIPELINE, null));
-    scm.getPipelineManager().activatePipeline(
-        PipelineID.getFromProtobuf(pipelineID));
-  }
-
-  @Override
-  public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    AUDIT.logReadSuccess(buildAuditMessageForSuccess(
-        SCMAction.DEACTIVATE_PIPELINE, null));
-    scm.getPipelineManager().deactivatePipeline(
-        PipelineID.getFromProtobuf(pipelineID));
-  }
-
-  @Override
-  public void closePipeline(HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("pipelineID", pipelineID.getId());
-    PipelineManager pipelineManager = scm.getPipelineManager();
-    Pipeline pipeline =
-        pipelineManager.getPipeline(PipelineID.getFromProtobuf(pipelineID));
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    AUDIT.logWriteSuccess(
-        buildAuditMessageForSuccess(SCMAction.CLOSE_PIPELINE, null)
-    );
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    boolean auditSuccess = true;
-    try{
-      ScmInfo.Builder builder =
-          new ScmInfo.Builder()
-              .setClusterId(scm.getScmStorageConfig().getClusterID())
-              .setScmId(scm.getScmStorageConfig().getScmId());
-      return builder.build();
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.GET_SCM_INFO, null, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null)
-        );
-      }
-    }
-  }
-
-  /**
-   * Check if SCM is in safe mode.
-   *
-   * @return Returns true if SCM is in safe mode else returns false.
-   * @throws IOException
-   */
-  @Override
-  public boolean inSafeMode() throws IOException {
-    AUDIT.logReadSuccess(
-        buildAuditMessageForSuccess(SCMAction.IN_SAFE_MODE, null)
-    );
-    return scm.isInSafeMode();
-  }
-
-  /**
-   * Force SCM out of Safe mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  @Override
-  public boolean forceExitSafeMode() throws IOException {
-    AUDIT.logWriteSuccess(
-        buildAuditMessageForSuccess(SCMAction.FORCE_EXIT_SAFE_MODE, null)
-    );
-    return scm.exitSafeMode();
-  }
-
-  @Override
-  public void startReplicationManager() {
-    AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-        SCMAction.START_REPLICATION_MANAGER, null));
-    scm.getReplicationManager().start();
-  }
-
-  @Override
-  public void stopReplicationManager() {
-    AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-        SCMAction.STOP_REPLICATION_MANAGER, null));
-    scm.getReplicationManager().stop();
-  }
-
-  @Override
-  public boolean getReplicationManagerStatus() {
-    AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-        SCMAction.GET_REPLICATION_MANAGER_STATUS, null));
-    return scm.getReplicationManager().isRunning();
-  }
-
-  /**
-   * Queries a list of Node that match a set of statuses.
-   *
-   * <p>For example, if the nodeStatuses is HEALTHY and RAFT_MEMBER, then
-   * this call will return all
-   * healthy nodes which members in Raft pipeline.
-   *
-   * <p>Right now we don't support operations, so we assume it is an AND
-   * operation between the
-   * operators.
-   *
-   * @param state - NodeStates.
-   * @return List of Datanodes.
-   */
-  public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
-    Preconditions.checkNotNull(state, "Node Query set cannot be null");
-    return new ArrayList<>(queryNodeState(state));
-  }
-
-  @VisibleForTesting
-  public StorageContainerManager getScm() {
-    return scm;
-  }
-
-  /**
-   * Set safe mode status based on .
-   */
-  public boolean getSafeModeStatus() {
-    return safeModePrecheck.isInSafeMode();
-  }
-
-
-  /**
-   * Query the System for Nodes.
-   *
-   * @param nodeState - NodeState that we are interested in matching.
-   * @return Set of Datanodes that match the NodeState.
-   */
-  private Set<DatanodeDetails> queryNodeState(HddsProtos.NodeState nodeState) {
-    Set<DatanodeDetails> returnSet = new TreeSet<>();
-    List<DatanodeDetails> tmp = scm.getScmNodeManager().getNodes(nodeState);
-    if ((tmp != null) && (tmp.size() > 0)) {
-      returnSet.addAll(tmp);
-    }
-    return returnSet;
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.SUCCESS.toString())
-        .withException(null)
-        .build();
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForFailure(AuditAction op, Map<String,
-      String> auditMap, Throwable throwable) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.FAILURE.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  @Override
-  public void close() throws IOException {
-    stop();
-  }
-
-  /**
-   * Set SafeMode status.
-   *
-   * @param safeModeStatus
-   */
-  public void setSafeModeStatus(boolean safeModeStatus) {
-    safeModePrecheck.setInSafeMode(safeModeStatus);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
deleted file mode 100644
index 9bbabd1..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.security.x509.certificate.authority
-    .CertificateServer;
-
-/**
- * This class acts as an SCM builder Class. This class is important for us
- * from a resilience perspective of SCM. This class will allow us swap out
- * different managers and replace with out on manager in the testing phase.
- * <p>
- * At some point in the future, we will make all these managers dynamically
- * loadable, so other developers can extend SCM by replacing various managers.
- * <p>
- * TODO: Add different config keys, so that we can load different managers at
- * run time. This will make it easy to extend SCM without having to replace
- * whole SCM each time.
- * <p>
- * Different Managers supported by this builder are:
- * NodeManager scmNodeManager;
- * PipelineManager pipelineManager;
- * ContainerManager containerManager;
- * BlockManager scmBlockManager;
- * ReplicationManager replicationManager;
- * SCMSafeModeManager scmSafeModeManager;
- * CertificateServer certificateServer;
- * SCMMetadata scmMetadataStore.
- *
- * If any of these are *not* specified then the default version of these
- * managers are used by SCM.
- *
- */
-public final class SCMConfigurator {
-  private NodeManager scmNodeManager;
-  private PipelineManager pipelineManager;
-  private ContainerManager containerManager;
-  private BlockManager scmBlockManager;
-  private ReplicationManager replicationManager;
-  private SCMSafeModeManager scmSafeModeManager;
-  private CertificateServer certificateServer;
-  private SCMMetadataStore metadataStore;
-  private NetworkTopology networkTopology;
-
-  /**
-   * Allows user to specify a version of Node manager to use with this SCM.
-   * @param scmNodeManager - Node Manager.
-   */
-  public void setScmNodeManager(NodeManager scmNodeManager) {
-    this.scmNodeManager = scmNodeManager;
-  }
-
-  /**
-   * Allows user to specify a custom version of PipelineManager to use with
-   * this SCM.
-   * @param pipelineManager - Pipeline Manager.
-   */
-  public void setPipelineManager(PipelineManager pipelineManager) {
-    this.pipelineManager = pipelineManager;
-  }
-
-  /**
-   *  Allows user to specify a custom version of containerManager to use with
-   *  this SCM.
-   * @param containerManager - Container Manager.
-   */
-  public void setContainerManager(ContainerManager containerManager) {
-    this.containerManager = containerManager;
-  }
-
-  /**
-   *  Allows user to specify a custom version of Block Manager to use with
-   *  this SCM.
-   * @param scmBlockManager - Block Manager
-   */
-  public void setScmBlockManager(BlockManager scmBlockManager) {
-    this.scmBlockManager = scmBlockManager;
-  }
-
-  /**
-   * Allows user to specify a custom version of Replication Manager to use
-   * with this SCM.
-   * @param replicationManager - replication Manager.
-   */
-  public void setReplicationManager(ReplicationManager replicationManager) {
-    this.replicationManager = replicationManager;
-  }
-
-  /**
-   * Allows user to specify a custom version of Safe Mode Manager to use
-   * with this SCM.
-   * @param scmSafeModeManager - SafeMode Manager.
-   */
-  public void setScmSafeModeManager(SCMSafeModeManager scmSafeModeManager) {
-    this.scmSafeModeManager = scmSafeModeManager;
-  }
-
-  /**
-   * Allows user to specify a custom version of Certificate Server to use
-   * with this SCM.
-   * @param certificateAuthority - Certificate server.
-   */
-  public void setCertificateServer(CertificateServer certificateAuthority) {
-    this.certificateServer = certificateAuthority;
-  }
-
-  /**
-   * Allows user to specify a custom version of Metadata Store to  be used
-   * with this SCM.
-   * @param scmMetadataStore - scm metadata store.
-   */
-  public void setMetadataStore(SCMMetadataStore scmMetadataStore) {
-    this.metadataStore = scmMetadataStore;
-  }
-
-  /**
-   * Allows user to specify a custom version of Network Topology Cluster
-   * to  be used with this SCM.
-   * @param networkTopology - network topology cluster.
-   */
-  public void setNetworkTopology(NetworkTopology networkTopology) {
-    this.networkTopology = networkTopology;
-  }
-
-  /**
-   * Gets SCM Node Manager.
-   * @return Node Manager.
-   */
-  public NodeManager getScmNodeManager() {
-    return scmNodeManager;
-  }
-
-  /**
-   * Get Pipeline Manager.
-   * @return pipeline manager.
-   */
-  public PipelineManager getPipelineManager() {
-    return pipelineManager;
-  }
-
-  /**
-   * Get Container Manager.
-   * @return container Manger.
-   */
-  public ContainerManager getContainerManager() {
-    return containerManager;
-  }
-
-  /**
-   * Get SCM Block Manager.
-   * @return Block Manager.
-   */
-  public BlockManager getScmBlockManager() {
-    return scmBlockManager;
-  }
-
-  /**
-   * Get Replica Manager.
-   * @return Replica Manager.
-   */
-  public ReplicationManager getReplicationManager() {
-    return replicationManager;
-  }
-
-  /**
-   * Gets Safe Mode Manager.
-   * @return Safe Mode manager.
-   */
-  public SCMSafeModeManager getScmSafeModeManager() {
-    return scmSafeModeManager;
-  }
-
-  /**
-   * Get Certificate Manager.
-   * @return Certificate Manager.
-   */
-  public CertificateServer getCertificateServer() {
-    return certificateServer;
-  }
-
-  /**
-   * Get Metadata Store.
-   * @return SCMMetadataStore.
-   */
-  public SCMMetadataStore getMetadataStore() {
-    return metadataStore;
-  }
-
-  /**
-   * Get network topology cluster tree.
-   * @return NetworkTopology.
-   */
-  public NetworkTopology getNetworkTopology() {
-    return networkTopology;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
deleted file mode 100644
index 5e8e137..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED;
-
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-
-/**
- * Metrics source to report number of containers in different states.
- */
-@InterfaceAudience.Private
-@Metrics(about = "SCM Container Manager Metrics", context = "ozone")
-public class SCMContainerMetrics implements MetricsSource {
-
-  private final SCMMXBean scmmxBean;
-  private static final String SOURCE =
-      SCMContainerMetrics.class.getSimpleName();
-
-  public SCMContainerMetrics(SCMMXBean scmmxBean) {
-    this.scmmxBean = scmmxBean;
-  }
-
-  public static SCMContainerMetrics create(SCMMXBean scmmxBean) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE, "Storage " +
-        "Container Manager Metrics", new SCMContainerMetrics(scmmxBean));
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE);
-  }
-
-  @Override
-  @SuppressWarnings("SuspiciousMethodCalls")
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    Map<String, Integer> stateCount = scmmxBean.getContainerStateCount();
-
-    collector.addRecord(SOURCE)
-        .addGauge(Interns.info("OpenContainers",
-            "Number of open containers"),
-            stateCount.get(OPEN.toString()))
-        .addGauge(Interns.info("ClosingContainers",
-            "Number of containers in closing state"),
-            stateCount.get(CLOSING.toString()))
-        .addGauge(Interns.info("QuasiClosedContainers",
-            "Number of containers in quasi closed state"),
-            stateCount.get(QUASI_CLOSED.toString()))
-        .addGauge(Interns.info("ClosedContainers",
-            "Number of containers in closed state"),
-            stateCount.get(CLOSED.toString()))
-        .addGauge(Interns.info("DeletingContainers",
-            "Number of containers in deleting state"),
-            stateCount.get(DELETING.toString()))
-        .addGauge(Interns.info("DeletedContainers",
-            "Number of containers in deleted state"),
-            stateCount.get(DELETED.toString()));
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index 9f6077b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import com.google.protobuf.GeneratedMessage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents
-    .INCREMENTAL_CONTAINER_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
-
-/**
- * This class is responsible for dispatching heartbeat from datanode to
- * appropriate EventHandler at SCM.
- */
-public final class SCMDatanodeHeartbeatDispatcher {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class);
-
-  private final NodeManager nodeManager;
-  private final EventPublisher eventPublisher;
-
-
-  public SCMDatanodeHeartbeatDispatcher(NodeManager nodeManager,
-                                        EventPublisher eventPublisher) {
-    Preconditions.checkNotNull(nodeManager);
-    Preconditions.checkNotNull(eventPublisher);
-    this.nodeManager = nodeManager;
-    this.eventPublisher = eventPublisher;
-  }
-
-
-  /**
-   * Dispatches heartbeat to registered event handlers.
-   *
-   * @param heartbeat heartbeat to be dispatched.
-   *
-   * @return list of SCMCommand
-   */
-  public List<SCMCommand> dispatch(SCMHeartbeatRequestProto heartbeat) {
-    DatanodeDetails datanodeDetails =
-        DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails());
-    List<SCMCommand> commands;
-
-    // If node is not registered, ask the node to re-register. Do not process
-    // Heartbeat for unregistered nodes.
-    if (!nodeManager.isNodeRegistered(datanodeDetails)) {
-      LOG.info("SCM received heartbeat from an unregistered datanode {}. " +
-          "Asking datanode to re-register.", datanodeDetails);
-      UUID dnID = datanodeDetails.getUuid();
-      nodeManager.addDatanodeCommand(dnID, new ReregisterCommand());
-
-      commands = nodeManager.getCommandQueue(dnID);
-
-    } else {
-
-      // should we dispatch heartbeat through eventPublisher?
-      commands = nodeManager.processHeartbeat(datanodeDetails);
-      if (heartbeat.hasNodeReport()) {
-        LOG.debug("Dispatching Node Report.");
-        eventPublisher.fireEvent(
-            NODE_REPORT,
-            new NodeReportFromDatanode(
-                datanodeDetails,
-                heartbeat.getNodeReport()));
-      }
-
-      if (heartbeat.hasContainerReport()) {
-        LOG.debug("Dispatching Container Report.");
-        eventPublisher.fireEvent(
-            CONTAINER_REPORT,
-            new ContainerReportFromDatanode(
-                datanodeDetails,
-                heartbeat.getContainerReport()));
-
-      }
-
-      final List<IncrementalContainerReportProto> icrs =
-          heartbeat.getIncrementalContainerReportList();
-
-      if (icrs.size() > 0) {
-        LOG.debug("Dispatching ICRs.");
-        for (IncrementalContainerReportProto icr : icrs) {
-          eventPublisher.fireEvent(INCREMENTAL_CONTAINER_REPORT,
-              new IncrementalContainerReportFromDatanode(
-                  datanodeDetails, icr));
-        }
-      }
-
-      if (heartbeat.hasContainerActions()) {
-        LOG.debug("Dispatching Container Actions.");
-        eventPublisher.fireEvent(
-            CONTAINER_ACTIONS,
-            new ContainerActionsFromDatanode(
-                datanodeDetails,
-                heartbeat.getContainerActions()));
-      }
-
-      if (heartbeat.hasPipelineReports()) {
-        LOG.debug("Dispatching Pipeline Report.");
-        eventPublisher.fireEvent(
-            PIPELINE_REPORT,
-            new PipelineReportFromDatanode(
-                datanodeDetails,
-                heartbeat.getPipelineReports()));
-
-      }
-
-      if (heartbeat.hasPipelineActions()) {
-        LOG.debug("Dispatching Pipeline Actions.");
-        eventPublisher.fireEvent(
-            PIPELINE_ACTIONS,
-            new PipelineActionsFromDatanode(
-                datanodeDetails,
-                heartbeat.getPipelineActions()));
-      }
-
-      if (heartbeat.getCommandStatusReportsCount() != 0) {
-        for (CommandStatusReportsProto commandStatusReport : heartbeat
-            .getCommandStatusReportsList()) {
-          eventPublisher.fireEvent(
-              CMD_STATUS_REPORT,
-              new CommandStatusReportFromDatanode(
-                  datanodeDetails,
-                  commandStatusReport));
-        }
-      }
-    }
-
-    return commands;
-  }
-
-  /**
-   * Wrapper class for events with the datanode origin.
-   */
-  public static class ReportFromDatanode<T extends GeneratedMessage> {
-
-    private final DatanodeDetails datanodeDetails;
-
-    private final T report;
-
-    public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) {
-      this.datanodeDetails = datanodeDetails;
-      this.report = report;
-    }
-
-    public DatanodeDetails getDatanodeDetails() {
-      return datanodeDetails;
-    }
-
-    public T getReport() {
-      return report;
-    }
-  }
-
-  /**
-   * Node report event payload with origin.
-   */
-  public static class NodeReportFromDatanode
-      extends ReportFromDatanode<NodeReportProto> {
-
-    public NodeReportFromDatanode(DatanodeDetails datanodeDetails,
-        NodeReportProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-  /**
-   * Container report event payload with origin.
-   */
-  public static class ContainerReportFromDatanode
-      extends ReportFromDatanode<ContainerReportsProto> {
-
-    public ContainerReportFromDatanode(DatanodeDetails datanodeDetails,
-        ContainerReportsProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-  /**
-   * Incremental Container report event payload with origin.
-   */
-  public static class IncrementalContainerReportFromDatanode
-      extends ReportFromDatanode<IncrementalContainerReportProto> {
-
-    public IncrementalContainerReportFromDatanode(
-        DatanodeDetails datanodeDetails,
-        IncrementalContainerReportProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-  /**
-   * Container action event payload with origin.
-   */
-  public static class ContainerActionsFromDatanode
-      extends ReportFromDatanode<ContainerActionsProto> {
-
-    public ContainerActionsFromDatanode(DatanodeDetails datanodeDetails,
-                                       ContainerActionsProto actions) {
-      super(datanodeDetails, actions);
-    }
-  }
-
-  /**
-   * Pipeline report event payload with origin.
-   */
-  public static class PipelineReportFromDatanode
-          extends ReportFromDatanode<PipelineReportsProto> {
-
-    public PipelineReportFromDatanode(DatanodeDetails datanodeDetails,
-                                      PipelineReportsProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-  /**
-   * Pipeline action event payload with origin.
-   */
-  public static class PipelineActionsFromDatanode
-      extends ReportFromDatanode<PipelineActionsProto> {
-
-    public PipelineActionsFromDatanode(DatanodeDetails datanodeDetails,
-        PipelineActionsProto actions) {
-      super(datanodeDetails, actions);
-    }
-  }
-
-  /**
-   * Container report event payload with origin.
-   */
-  public static class CommandStatusReportFromDatanode
-      extends ReportFromDatanode<CommandStatusReportsProto> {
-
-    public CommandStatusReportFromDatanode(DatanodeDetails datanodeDetails,
-        CommandStatusReportsProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
deleted file mode 100644
index 530c0a6..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditLoggerType;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.audit.Auditor;
-import org.apache.hadoop.ozone.audit.SCMAction;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.protobuf.BlockingService;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.closeContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
-import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
-import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Protocol Handler for Datanode Protocol.
- */
-public class SCMDatanodeProtocolServer implements
-    StorageContainerDatanodeProtocol, Auditor {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      SCMDatanodeProtocolServer.class);
-
-  private static final AuditLogger AUDIT =
-      new AuditLogger(AuditLoggerType.SCMLOGGER);
-
-  /**
-   * The RPC server that listens to requests from DataNodes.
-   */
-  private final RPC.Server datanodeRpcServer;
-
-  private final StorageContainerManager scm;
-  private final InetSocketAddress datanodeRpcAddress;
-  private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher;
-  private final EventPublisher eventPublisher;
-  private final ProtocolMessageMetrics protocolMessageMetrics;
-
-  public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
-      StorageContainerManager scm, EventPublisher eventPublisher)
-      throws IOException {
-
-    Preconditions.checkNotNull(scm, "SCM cannot be null");
-    Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null");
-
-    this.scm = scm;
-    this.eventPublisher = eventPublisher;
-    final int handlerCount =
-        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
-            OZONE_SCM_HANDLER_COUNT_DEFAULT);
-
-    heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(
-        scm.getScmNodeManager(), eventPublisher);
-
-    RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    protocolMessageMetrics = ProtocolMessageMetrics
-        .create("SCMDatanodeProtocol", "SCM Datanode protocol",
-            StorageContainerDatanodeProtocolProtos.Type.values());
-
-    BlockingService dnProtoPbService =
-        StorageContainerDatanodeProtocolProtos
-            .StorageContainerDatanodeProtocolService
-            .newReflectiveBlockingService(
-                new StorageContainerDatanodeProtocolServerSideTranslatorPB(
-                    this, protocolMessageMetrics));
-
-    InetSocketAddress datanodeRpcAddr =
-        HddsServerUtil.getScmDataNodeBindAddress(conf);
-
-    datanodeRpcServer =
-        startRpcServer(
-            conf,
-            datanodeRpcAddr,
-            StorageContainerDatanodeProtocolPB.class,
-            dnProtoPbService,
-            handlerCount);
-
-    datanodeRpcAddress =
-        updateRPCListenAddress(
-            conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr,
-            datanodeRpcServer);
-
-    if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
-        false)) {
-      datanodeRpcServer.refreshServiceAcl(conf,
-          SCMPolicyProvider.getInstance());
-    }
-  }
-
-  public void start() {
-    LOG.info(
-        StorageContainerManager.buildRpcServerStartMessage(
-            "RPC server for DataNodes", datanodeRpcAddress));
-    protocolMessageMetrics.register();
-    datanodeRpcServer.start();
-  }
-
-  public InetSocketAddress getDatanodeRpcAddress() {
-    return datanodeRpcAddress;
-  }
-
-  @Override
-  public SCMVersionResponseProto getVersion(SCMVersionRequestProto
-      versionRequest)
-      throws IOException {
-    boolean auditSuccess = true;
-    try {
-      return scm.getScmNodeManager().getVersion(versionRequest)
-              .getProtobufMessage();
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logReadFailure(
-          buildAuditMessageForFailure(SCMAction.GET_VERSION, null, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logReadSuccess(
-            buildAuditMessageForSuccess(SCMAction.GET_VERSION, null));
-      }
-    }
-  }
-
-  @Override
-  public SCMRegisteredResponseProto register(
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto,
-      NodeReportProto nodeReport,
-      ContainerReportsProto containerReportsProto,
-          PipelineReportsProto pipelineReportsProto)
-      throws IOException {
-    DatanodeDetails datanodeDetails = DatanodeDetails
-        .getFromProtoBuf(datanodeDetailsProto);
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("datanodeDetails", datanodeDetails.toString());
-
-    // TODO : Return the list of Nodes that forms the SCM HA.
-    RegisteredCommand registeredCommand = scm.getScmNodeManager()
-        .register(datanodeDetails, nodeReport, pipelineReportsProto);
-    if (registeredCommand.getError()
-        == SCMRegisteredResponseProto.ErrorCode.success) {
-      eventPublisher.fireEvent(CONTAINER_REPORT,
-          new SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode(
-              datanodeDetails, containerReportsProto));
-      eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-          new NodeRegistrationContainerReport(datanodeDetails,
-              containerReportsProto));
-      eventPublisher.fireEvent(PIPELINE_REPORT,
-              new PipelineReportFromDatanode(datanodeDetails,
-                      pipelineReportsProto));
-    }
-    try {
-      return getRegisteredResponse(registeredCommand);
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap));
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public static SCMRegisteredResponseProto getRegisteredResponse(
-      RegisteredCommand cmd) {
-    return cmd.getProtoBufMessage();
-  }
-
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(
-      SCMHeartbeatRequestProto heartbeat) throws IOException {
-    List<SCMCommandProto> cmdResponses = new ArrayList<>();
-    for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
-      cmdResponses.add(getCommandResponse(cmd));
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = Maps.newHashMap();
-    auditMap.put("datanodeUUID", heartbeat.getDatanodeDetails().getUuid());
-    auditMap.put("command", flatten(cmdResponses.toString()));
-    try {
-      return SCMHeartbeatResponseProto.newBuilder()
-          .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid())
-          .addAllCommands(cmdResponses).build();
-    } catch (Exception ex) {
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(SCMAction.SEND_HEARTBEAT, auditMap, ex)
-      );
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap)
-        );
-      }
-    }
-  }
-
-  /**
-   * Returns a SCMCommandRepose from the SCM Command.
-   *
-   * @param cmd - Cmd
-   * @return SCMCommandResponseProto
-   * @throws IOException
-   */
-  @VisibleForTesting
-  public SCMCommandProto getCommandResponse(SCMCommand cmd)
-      throws IOException {
-    SCMCommandProto.Builder builder =
-        SCMCommandProto.newBuilder();
-    switch (cmd.getType()) {
-    case reregisterCommand:
-      return builder
-          .setCommandType(reregisterCommand)
-          .setReregisterCommandProto(ReregisterCommandProto
-              .getDefaultInstance())
-          .build();
-    case deleteBlocksCommand:
-      // Once SCM sends out the deletion message, increment the count.
-      // this is done here instead of when SCM receives the ACK, because
-      // DN might not be able to response the ACK for sometime. In case
-      // it times out, SCM needs to re-send the message some more times.
-      List<Long> txs =
-          ((DeleteBlocksCommand) cmd)
-              .blocksTobeDeleted()
-              .stream()
-              .map(tx -> tx.getTxID())
-              .collect(Collectors.toList());
-      scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs);
-      return builder
-          .setCommandType(deleteBlocksCommand)
-          .setDeleteBlocksCommandProto(((DeleteBlocksCommand) cmd).getProto())
-          .build();
-    case closeContainerCommand:
-      return builder
-          .setCommandType(closeContainerCommand)
-          .setCloseContainerCommandProto(
-              ((CloseContainerCommand) cmd).getProto())
-          .build();
-    case deleteContainerCommand:
-      return builder.setCommandType(deleteContainerCommand)
-          .setDeleteContainerCommandProto(
-              ((DeleteContainerCommand) cmd).getProto())
-          .build();
-    case replicateContainerCommand:
-      return builder
-          .setCommandType(replicateContainerCommand)
-          .setReplicateContainerCommandProto(
-              ((ReplicateContainerCommand)cmd).getProto())
-          .build();
-    default:
-      throw new IllegalArgumentException("Scm command " +
-          cmd.getType().toString() + " is not implemented");
-    }
-  }
-
-
-  public void join() throws InterruptedException {
-    LOG.trace("Join RPC server for DataNodes");
-    datanodeRpcServer.join();
-  }
-
-  public void stop() {
-    try {
-      LOG.info("Stopping the RPC server for DataNodes");
-      datanodeRpcServer.stop();
-    } catch (Exception ex) {
-      LOG.error(" datanodeRpcServer stop failed.", ex);
-    }
-    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
-    protocolMessageMetrics.unregister();
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.SUCCESS.toString())
-        .withException(null)
-        .build();
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForFailure(AuditAction op, Map<String,
-      String> auditMap, Throwable throwable) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.FAILURE.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  private static String flatten(String input) {
-    return input
-        .replaceAll(System.lineSeparator(), " ")
-        .trim()
-        .replaceAll(" +", " ");
-  }
-
-  /**
-   * Wrapper class for events with the datanode origin.
-   */
-  public static class NodeRegistrationContainerReport extends
-      ReportFromDatanode<ContainerReportsProto> {
-
-    public NodeRegistrationContainerReport(DatanodeDetails datanodeDetails,
-        ContainerReportsProto report) {
-      super(datanodeDetails, report);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
deleted file mode 100644
index 13b5551..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-import java.util.Map;
-
-/**
- *
- * This is the JMX management interface for scm information.
- */
-@InterfaceAudience.Private
-public interface SCMMXBean extends ServiceRuntimeInfo {
-
-  /**
-   * Get the SCM RPC server port that used to listen to datanode requests.
-   * @return SCM datanode RPC server port
-   */
-  String getDatanodeRpcPort();
-
-  /**
-   * Get the SCM RPC server port that used to listen to client requests.
-   * @return SCM client RPC server port
-   */
-  String getClientRpcPort();
-
-  /**
-   * Get container report info that includes container IO stats of nodes.
-   * @return The datanodeUUid to report json string mapping
-   */
-  Map<String, String> getContainerReport();
-
-  /**
-   * Returns safe mode status.
-   * @return boolean
-   */
-  boolean isInSafeMode();
-
-  /**
-   * Returns live safe mode container threshold.
-   * @return String
-   */
-  double getSafeModeCurrentContainerThreshold();
-
-  /**
-   * Returns the container count in all states.
-   */
-  Map<String, Integer> getContainerStateCount();
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
deleted file mode 100644
index b21a722..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.Service;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.*;
-
-/**
- * {@link PolicyProvider} for SCM protocols.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public final class SCMPolicyProvider extends PolicyProvider {
-
-  private static AtomicReference<SCMPolicyProvider> atomicReference =
-      new AtomicReference<>();
-
-  private SCMPolicyProvider() {
-  }
-
-  @Private
-  @Unstable
-  public static SCMPolicyProvider getInstance() {
-    if (atomicReference.get() == null) {
-      atomicReference.compareAndSet(null, new SCMPolicyProvider());
-    }
-    return atomicReference.get();
-  }
-
-  private static final Service[] SCM_SERVICES =
-      new Service[]{
-          new Service(
-              HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
-              StorageContainerDatanodeProtocol.class),
-          new Service(
-              HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL,
-              StorageContainerLocationProtocol.class),
-          new Service(
-              HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL,
-              ScmBlockLocationProtocol.class),
-          new Service(
-              HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL,
-              SCMSecurityProtocol.class),
-      };
-
-  @SuppressFBWarnings("EI_EXPOSE_REP")
-  @Override
-  public Service[] getServices() {
-    return SCM_SERVICES;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
deleted file mode 100644
index c4b4efd..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import com.google.protobuf.BlockingService;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.Objects;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.security.KerberosInfo;
-
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType.KERBEROS_TRUSTED;
-
-/**
- * The protocol used to perform security related operations with SCM.
- */
-@KerberosInfo(
-    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-@InterfaceAudience.Private
-public class SCMSecurityProtocolServer implements SCMSecurityProtocol {
-
-  private static final Logger LOGGER = LoggerFactory
-      .getLogger(SCMClientProtocolServer.class);
-  private final SecurityConfig config;
-  private final CertificateServer certificateServer;
-  private final RPC.Server rpcServer;
-  private final InetSocketAddress rpcAddress;
-  private final ProtocolMessageMetrics metrics;
-
-  SCMSecurityProtocolServer(OzoneConfiguration conf,
-      CertificateServer certificateServer) throws IOException {
-    this.config = new SecurityConfig(conf);
-    this.certificateServer = certificateServer;
-
-    final int handlerCount =
-        conf.getInt(ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_KEY,
-            ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT);
-    rpcAddress = HddsServerUtil
-        .getScmSecurityInetAddress(conf);
-    // SCM security service RPC service.
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    metrics = new ProtocolMessageMetrics("ScmSecurityProtocol",
-        "SCM Security protocol metrics",
-        SCMSecurityProtocolProtos.Type.values());
-    BlockingService secureProtoPbService =
-        SCMSecurityProtocolProtos.SCMSecurityProtocolService
-            .newReflectiveBlockingService(
-                new SCMSecurityProtocolServerSideTranslatorPB(this, metrics));
-    this.rpcServer =
-        StorageContainerManager.startRpcServer(
-            conf,
-            rpcAddress,
-            SCMSecurityProtocolPB.class,
-            secureProtoPbService,
-            handlerCount);
-    if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
-        false)) {
-      rpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
-    }
-  }
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   *
-   * @param dnDetails   - DataNode Details.
-   * @param certSignReq - Certificate signing request.
-   * @return String         - SCM signed pem encoded certificate.
-   */
-  @Override
-  public String getDataNodeCertificate(
-      DatanodeDetailsProto dnDetails,
-      String certSignReq) throws IOException {
-    LOGGER.info("Processing CSR for dn {}, UUID: {}", dnDetails.getHostName(),
-        dnDetails.getUuid());
-    Objects.requireNonNull(dnDetails);
-    Future<X509CertificateHolder> future =
-        certificateServer.requestCertificate(certSignReq,
-            KERBEROS_TRUSTED);
-
-    try {
-      return CertificateCodec.getPEMEncodedString(future.get());
-    } catch (InterruptedException | ExecutionException e) {
-      LOGGER.error("getDataNodeCertificate operation failed. ", e);
-      throw new IOException("getDataNodeCertificate operation failed. ", e);
-    }
-  }
-
-  /**
-   * Get SCM signed certificate for OM.
-   *
-   * @param omDetails   - OzoneManager Details.
-   * @param certSignReq - Certificate signing request.
-   * @return String         - SCM signed pem encoded certificate.
-   */
-  @Override
-  public String getOMCertificate(OzoneManagerDetailsProto omDetails,
-      String certSignReq) throws IOException {
-    LOGGER.info("Processing CSR for om {}, UUID: {}", omDetails.getHostName(),
-        omDetails.getUuid());
-    Objects.requireNonNull(omDetails);
-    Future<X509CertificateHolder> future =
-        certificateServer.requestCertificate(certSignReq,
-            KERBEROS_TRUSTED);
-
-    try {
-      return CertificateCodec.getPEMEncodedString(future.get());
-    } catch (InterruptedException | ExecutionException e) {
-      LOGGER.error("getOMCertificate operation failed. ", e);
-      throw new IOException("getOMCertificate operation failed. ", e);
-    }
-  }
-
-  /**
-   * Get SCM signed certificate with given serial id.
-   *
-   * @param certSerialId - Certificate serial id.
-   * @return string         - pem encoded SCM signed certificate.
-   */
-  @Override
-  public String getCertificate(String certSerialId) throws IOException {
-    LOGGER.debug("Getting certificate with certificate serial id",
-        certSerialId);
-    try {
-      X509Certificate certificate =
-          certificateServer.getCertificate(certSerialId);
-      if (certificate != null) {
-        return CertificateCodec.getPEMEncodedString(certificate);
-      }
-    } catch (CertificateException e) {
-      LOGGER.error("getCertificate operation failed. ", e);
-      throw new IOException("getCertificate operation failed. ", e);
-    }
-    LOGGER.debug("Certificate with serial id {} not found.", certSerialId);
-    throw new IOException("Certificate not found");
-  }
-
-  /**
-   * Get SCM signed certificate for OM.
-   *
-   * @return string         - Root certificate.
-   */
-  @Override
-  public String getCACertificate() throws IOException {
-    LOGGER.debug("Getting CA certificate.");
-    try {
-      return CertificateCodec.getPEMEncodedString(
-          certificateServer.getCACertificate());
-    } catch (CertificateException e) {
-      LOGGER.error("getRootCertificate operation failed. ", e);
-      throw new IOException("getRootCertificate operation failed. ", e);
-    }
-  }
-
-  public RPC.Server getRpcServer() {
-    return rpcServer;
-  }
-
-  public InetSocketAddress getRpcAddress() {
-    return rpcAddress;
-  }
-
-  public void start() {
-    LOGGER.info(StorageContainerManager.buildRpcServerStartMessage("Starting"
-        + " RPC server for SCMSecurityProtocolServer.", getRpcAddress()));
-    metrics.register();
-    getRpcServer().start();
-  }
-
-  public void stop() {
-    try {
-      LOGGER.info("Stopping the SCMSecurityProtocolServer.");
-      metrics.unregister();
-      getRpcServer().stop();
-    } catch (Exception ex) {
-      LOGGER.error("SCMSecurityProtocolServer stop failed.", ex);
-    }
-  }
-
-  public void join() throws InterruptedException {
-    LOGGER.trace("Join RPC server for SCMSecurityProtocolServer.");
-    getRpcServer().join();
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
deleted file mode 100644
index 7d84fc0..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import java.io.IOException;
-
-/**
- * This interface is used by the StorageContainerManager to allow the
- * dependencies to be injected to the CLI class.
- */
-public interface SCMStarterInterface {
-
-  void start(OzoneConfiguration conf) throws Exception;
-  boolean init(OzoneConfiguration conf, String clusterId)
-      throws IOException;
-  String generateClusterId();
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
deleted file mode 100644
index 73f9cbe..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.common.Storage;
-
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
-import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
-
-/**
- * SCMStorageConfig is responsible for management of the
- * StorageDirectories used by the SCM.
- */
-public class SCMStorageConfig extends Storage {
-
-  /**
-   * Construct SCMStorageConfig.
-   * @throws IOException if any directories are inaccessible.
-   */
-  public SCMStorageConfig(OzoneConfiguration conf) throws IOException {
-    super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR);
-  }
-
-  public void setScmId(String scmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("SCM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(SCM_ID, scmId);
-    }
-  }
-
-  /**
-   * Retrieves the SCM ID from the version file.
-   * @return SCM_ID
-   */
-  public String getScmId() {
-    return getStorageInfo().getProperty(SCM_ID);
-  }
-
-  @Override
-  protected Properties getNodeProperties() {
-    String scmId = getScmId();
-    if (scmId == null) {
-      scmId = UUID.randomUUID().toString();
-    }
-    Properties scmProperties = new Properties();
-    scmProperties.setProperty(SCM_ID, scmId);
-    return scmProperties;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
deleted file mode 100644
index af65e13..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ /dev/null
@@ -1,1103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import com.google.protobuf.BlockingService;
-import java.util.Objects;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
-import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementMetrics;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
-import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreRDBImpl;
-import org.apache.hadoop.hdds.scm.node.DeadNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NewNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeReportHandler;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultCAServer;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.ozone.lock.LockManager;
-import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.util.JvmPauseMonitor;
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * StorageContainerManager is the main entry point for the service that
- * provides information about
- * which SCM nodes host containers.
- *
- * <p>DataNodes report to StorageContainerManager using heartbeat messages.
- * SCM allocates containers
- * and returns a pipeline.
- *
- * <p>A client once it gets a pipeline (a list of datanodes) will connect to
- * the datanodes and create a container, which then can be used to store data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class StorageContainerManager extends ServiceRuntimeInfoImpl
-    implements SCMMXBean {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(StorageContainerManager.class);
-
-  /**
-   * SCM metrics.
-   */
-  private static SCMMetrics metrics;
-
-  /*
-   * RPC Endpoints exposed by SCM.
-   */
-  private final SCMDatanodeProtocolServer datanodeProtocolServer;
-  private final SCMBlockProtocolServer blockProtocolServer;
-  private final SCMClientProtocolServer clientProtocolServer;
-  private SCMSecurityProtocolServer securityProtocolServer;
-
-  /*
-   * State Managers of SCM.
-   */
-  private NodeManager scmNodeManager;
-  private PipelineManager pipelineManager;
-  private ContainerManager containerManager;
-  private BlockManager scmBlockManager;
-  private final SCMStorageConfig scmStorageConfig;
-
-  private SCMMetadataStore scmMetadataStore;
-
-  private final EventQueue eventQueue;
-  /*
-   * HTTP endpoint for JMX access.
-   */
-  private final StorageContainerManagerHttpServer httpServer;
-  /**
-   * SCM super user.
-   */
-  private final String scmUsername;
-  private final Collection<String> scmAdminUsernames;
-  /**
-   * SCM mxbean.
-   */
-  private ObjectName scmInfoBeanName;
-  /**
-   * Key = DatanodeUuid, value = ContainerStat.
-   */
-  private Cache<String, ContainerStat> containerReportCache;
-
-  private ReplicationManager replicationManager;
-
-  private final LeaseManager<Long> commandWatcherLeaseManager;
-
-  private SCMSafeModeManager scmSafeModeManager;
-  private CertificateServer certificateServer;
-  private GrpcTlsConfig grpcTlsConfig;
-
-  private JvmPauseMonitor jvmPauseMonitor;
-  private final OzoneConfiguration configuration;
-  private final SafeModeHandler safeModeHandler;
-  private SCMContainerMetrics scmContainerMetrics;
-  private MetricsSystem ms;
-
-  /**
-   *  Network topology Map.
-   */
-  private NetworkTopology clusterMap;
-
-  /**
-   * Creates a new StorageContainerManager. Configuration will be
-   * updated with information on the actual listening addresses used
-   * for RPC servers.
-   *
-   * @param conf configuration
-   */
-  public StorageContainerManager(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    // default empty configurator means default managers will be used.
-    this(conf, new SCMConfigurator());
-  }
-
-
-  /**
-   * This constructor offers finer control over how SCM comes up.
-   * To use this, user needs to create a SCMConfigurator and set various
-   * managers that user wants SCM to use, if a value is missing then SCM will
-   * use the default value for that manager.
-   *
-   * @param conf - Configuration
-   * @param configurator - configurator
-   */
-  public StorageContainerManager(OzoneConfiguration conf,
-                                 SCMConfigurator configurator)
-      throws IOException, AuthenticationException  {
-    super(HddsVersionInfo.HDDS_VERSION_INFO);
-
-    Objects.requireNonNull(configurator, "configurator cannot not be null");
-    Objects.requireNonNull(conf, "configuration cannot not be null");
-
-    configuration = conf;
-    initMetrics();
-    initContainerReportCache(conf);
-    /**
-     * It is assumed the scm --init command creates the SCM Storage Config.
-     */
-    scmStorageConfig = new SCMStorageConfig(conf);
-    if (scmStorageConfig.getState() != StorageState.INITIALIZED) {
-      LOG.error("Please make sure you have run \'ozone scm --init\' " +
-          "command to generate all the required metadata.");
-      throw new SCMException("SCM not initialized due to storage config " +
-          "failure.", ResultCodes.SCM_NOT_INITIALIZED);
-    }
-
-    /**
-     * Important : This initialization sequence is assumed by some of our tests.
-     * The testSecureOzoneCluster assumes that security checks have to be
-     * passed before any artifacts like SCM DB is created. So please don't
-     * add any other initialization above the Security checks please.
-     */
-    if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-      loginAsSCMUser(conf);
-    }
-
-    // Creates the SCM DBs or opens them if it exists.
-    // A valid pointer to the store is required by all the other services below.
-    initalizeMetadataStore(conf, configurator);
-
-    // Authenticate SCM if security is enabled, this initialization can only
-    // be done after the metadata store is initialized.
-    if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-      initializeCAnSecurityProtocol(conf, configurator);
-    } else {
-      // if no Security, we do not create a Certificate Server at all.
-      // This allows user to boot SCM without security temporarily
-      // and then come back and enable it without any impact.
-      certificateServer = null;
-      securityProtocolServer = null;
-    }
-
-    eventQueue = new EventQueue();
-    long watcherTimeout =
-        conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
-            HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher",
-        watcherTimeout);
-    initializeSystemManagers(conf, configurator);
-
-    CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(pipelineManager, containerManager);
-    NodeReportHandler nodeReportHandler =
-        new NodeReportHandler(scmNodeManager);
-    PipelineReportHandler pipelineReportHandler =
-        new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
-    CommandStatusReportHandler cmdStatusReportHandler =
-        new CommandStatusReportHandler();
-
-    NewNodeHandler newNodeHandler = new NewNodeHandler(pipelineManager, conf);
-    StaleNodeHandler staleNodeHandler =
-        new StaleNodeHandler(scmNodeManager, pipelineManager, conf);
-    DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
-        pipelineManager, containerManager);
-    NonHealthyToHealthyNodeHandler nonHealthyToHealthyNodeHandler =
-        new NonHealthyToHealthyNodeHandler(pipelineManager, conf);
-    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
-    PendingDeleteHandler pendingDeleteHandler =
-        new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
-
-    ContainerReportHandler containerReportHandler =
-        new ContainerReportHandler(scmNodeManager, containerManager);
-
-    IncrementalContainerReportHandler incrementalContainerReportHandler =
-        new IncrementalContainerReportHandler(
-            scmNodeManager, containerManager);
-
-    PipelineActionHandler pipelineActionHandler =
-        new PipelineActionHandler(pipelineManager, conf);
-
-
-    RetriableDatanodeEventWatcher retriableDatanodeEventWatcher =
-        new RetriableDatanodeEventWatcher<>(
-            SCMEvents.RETRIABLE_DATANODE_COMMAND,
-            SCMEvents.DELETE_BLOCK_STATUS,
-            commandWatcherLeaseManager);
-    retriableDatanodeEventWatcher.start(eventQueue);
-
-    scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
-        .OZONE_ADMINISTRATORS);
-    scmUsername = UserGroupInformation.getCurrentUser().getUserName();
-    if (!scmAdminUsernames.contains(scmUsername)) {
-      scmAdminUsernames.add(scmUsername);
-    }
-
-    datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this,
-        eventQueue);
-    blockProtocolServer = new SCMBlockProtocolServer(conf, this);
-    clientProtocolServer = new SCMClientProtocolServer(conf, this);
-    httpServer = new StorageContainerManagerHttpServer(conf);
-
-    safeModeHandler = new SafeModeHandler(configuration,
-        clientProtocolServer, scmBlockManager, replicationManager,
-        pipelineManager);
-
-    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
-    eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager);
-    eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
-    eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT,
-        incrementalContainerReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
-    eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
-    eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
-    eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
-    eventQueue.addHandler(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE,
-        nonHealthyToHealthyNodeHandler);
-    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
-    eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
-    eventQueue
-        .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
-    eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS,
-        (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog());
-    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionHandler);
-    eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler);
-    registerMXBean();
-    registerMetricsSource(this);
-  }
-
-  /**
-   * This function initializes the following managers. If the configurator
-   * specifies a value, we will use it, else we will use the default value.
-   *
-   *  Node Manager
-   *  Pipeline Manager
-   *  Container Manager
-   *  Block Manager
-   *  Replication Manager
-   *  Safe Mode Manager
-   *
-   * @param conf - Ozone Configuration.
-   * @param configurator - A customizer which allows different managers to be
-   *                    used if needed.
-   * @throws IOException - on Failure.
-   */
-  private void initializeSystemManagers(OzoneConfiguration conf,
-                                       SCMConfigurator configurator)
-      throws IOException {
-    if (configurator.getNetworkTopology() != null) {
-      clusterMap = configurator.getNetworkTopology();
-    } else {
-      clusterMap = new NetworkTopologyImpl(conf);
-    }
-
-    if(configurator.getScmNodeManager() != null) {
-      scmNodeManager = configurator.getScmNodeManager();
-    } else {
-      scmNodeManager = new SCMNodeManager(
-          conf, scmStorageConfig, eventQueue, clusterMap);
-    }
-
-    SCMContainerPlacementMetrics placementMetrics =
-        SCMContainerPlacementMetrics.create();
-    ContainerPlacementPolicy containerPlacementPolicy =
-        ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
-            clusterMap, true, placementMetrics);
-
-    if (configurator.getPipelineManager() != null) {
-      pipelineManager = configurator.getPipelineManager();
-    } else {
-      pipelineManager =
-          new SCMPipelineManager(conf, scmNodeManager, eventQueue,
-              grpcTlsConfig);
-    }
-
-    if (configurator.getContainerManager() != null) {
-      containerManager = configurator.getContainerManager();
-    } else {
-      containerManager = new SCMContainerManager(
-          conf, scmNodeManager, pipelineManager, eventQueue);
-    }
-
-    if (configurator.getScmBlockManager() != null) {
-      scmBlockManager = configurator.getScmBlockManager();
-    } else {
-      scmBlockManager = new BlockManagerImpl(conf, this);
-    }
-    if (configurator.getReplicationManager() != null) {
-      replicationManager = configurator.getReplicationManager();
-    }  else {
-      replicationManager = new ReplicationManager(
-          conf.getObject(ReplicationManagerConfiguration.class),
-          containerManager,
-          containerPlacementPolicy,
-          eventQueue,
-          new LockManager<>(conf));
-    }
-    if(configurator.getScmSafeModeManager() != null) {
-      scmSafeModeManager = configurator.getScmSafeModeManager();
-    } else {
-      scmSafeModeManager = new SCMSafeModeManager(conf,
-          containerManager.getContainers(), pipelineManager, eventQueue);
-    }
-  }
-
-  /**
-   * If security is enabled we need to have the Security Protocol and a
-   * default CA. This function initializes those values based on the
-   * configurator.
-   *
-   * @param conf - Config
-   * @param configurator - configurator
-   * @throws IOException - on Failure
-   * @throws AuthenticationException - on Failure
-   */
-  private void initializeCAnSecurityProtocol(OzoneConfiguration conf,
-      SCMConfigurator configurator) throws IOException {
-    if(configurator.getCertificateServer() != null) {
-      this.certificateServer = configurator.getCertificateServer();
-    } else {
-      // This assumes that SCM init has run, and DB metadata stores are created.
-      certificateServer = initializeCertificateServer(
-          getScmStorageConfig().getClusterID(),
-          getScmStorageConfig().getScmId());
-    }
-    // TODO: Support Intermediary CAs in future.
-    certificateServer.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
-    securityProtocolServer = new SCMSecurityProtocolServer(conf,
-        certificateServer);
-
-    grpcTlsConfig = RatisHelper
-        .createTlsClientConfigForSCM(new SecurityConfig(conf),
-            certificateServer);
-  }
-
-  /**
-   * Init the metadata store based on the configurator.
-   * @param conf - Config
-   * @param configurator - configurator
-   * @throws IOException - on Failure
-   */
-  private void initalizeMetadataStore(OzoneConfiguration conf,
-                                      SCMConfigurator configurator)
-      throws IOException {
-    if(configurator.getMetadataStore() != null) {
-      scmMetadataStore = configurator.getMetadataStore();
-    } else {
-      scmMetadataStore = new SCMMetadataStoreRDBImpl(conf);
-      if (scmMetadataStore == null) {
-        throw new SCMException("Unable to initialize metadata store",
-            ResultCodes.SCM_NOT_INITIALIZED);
-      }
-    }
-  }
-
-  /**
-   * Login as the configured user for SCM.
-   *
-   * @param conf
-   */
-  private void loginAsSCMUser(Configuration conf)
-      throws IOException, AuthenticationException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
-              + "Principal: {}, keytab: {}",
-          conf.get(HDDS_SCM_KERBEROS_PRINCIPAL_KEY),
-          conf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY));
-    }
-
-    if (SecurityUtil.getAuthenticationMethod(conf).equals(
-        AuthenticationMethod.KERBEROS)) {
-      UserGroupInformation.setConfiguration(conf);
-      InetSocketAddress socAddr = HddsServerUtil
-          .getScmBlockClientBindAddress(conf);
-      SecurityUtil.login(conf, HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
-          HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
-    } else {
-      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(
-          conf) + " authentication method not support. "
-          + "SCM user login failed.");
-    }
-    LOG.info("SCM login successful.");
-  }
-
-
-  /**
-   * This function creates/initializes a certificate server as needed.
-   * This function is idempotent, so calling this again and again after the
-   * server is initialized is not a problem.
-   *
-   * @param clusterID - Cluster ID
-   * @param scmID     - SCM ID
-   */
-  private CertificateServer initializeCertificateServer(String clusterID,
-      String scmID) throws IOException {
-    // TODO: Support Certificate Server loading via Class Name loader.
-    // So it is easy to use different Certificate Servers if needed.
-    String subject = "scm@" + InetAddress.getLocalHost().getHostName();
-    if(this.scmMetadataStore == null) {
-      LOG.error("Cannot initialize Certificate Server without a valid meta " +
-          "data layer.");
-      throw new SCMException("Cannot initialize CA without a valid metadata " +
-          "store", ResultCodes.SCM_NOT_INITIALIZED);
-    }
-    SCMCertStore certStore = new SCMCertStore(this.scmMetadataStore);
-    return new DefaultCAServer(subject, clusterID, scmID, certStore);
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr        RPC server listening address
-   * @return server startup message
-   */
-  public static String buildRpcServerStartMessage(String description,
-                                                  InetSocketAddress addr) {
-    return addr != null
-        ? String.format("%s is listening at %s", description, addr.toString())
-        : String.format("%s not started", description);
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  public static RPC.Server startRpcServer(
-      OzoneConfiguration conf,
-      InetSocketAddress addr,
-      Class<?> protocol,
-      BlockingService instance,
-      int handlerCount)
-      throws IOException {
-    RPC.Server rpcServer =
-        new RPC.Builder(conf)
-            .setProtocol(protocol)
-            .setInstance(instance)
-            .setBindAddress(addr.getHostString())
-            .setPort(addr.getPort())
-            .setNumHandlers(handlerCount)
-            .setVerbose(false)
-            .setSecretManager(null)
-            .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-  /**
-   * Create an SCM instance based on the supplied configuration.
-   *
-   * @param conf        HDDS configuration
-   * @return SCM instance
-   * @throws IOException, AuthenticationException
-   */
-  public static StorageContainerManager createSCM(
-      OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    if (!HddsUtils.isHddsEnabled(conf)) {
-      System.err.println(
-          "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" +
-              " is set to false");
-      System.exit(1);
-    }
-    return new StorageContainerManager(conf);
-  }
-
-  /**
-   * Routine to set up the Version info for StorageContainerManager.
-   *
-   * @param conf OzoneConfiguration
-   * @return true if SCM initialization is successful, false otherwise.
-   * @throws IOException if init fails due to I/O error
-   */
-  public static boolean scmInit(OzoneConfiguration conf,
-      String clusterId) throws IOException {
-    SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf);
-    StorageState state = scmStorageConfig.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        if (clusterId != null && !clusterId.isEmpty()) {
-          scmStorageConfig.setClusterId(clusterId);
-        }
-        scmStorageConfig.initialize();
-        System.out.println(
-            "SCM initialization succeeded."
-                + "Current cluster id for sd="
-                + scmStorageConfig.getStorageDir()
-                + ";cid="
-                + scmStorageConfig.getClusterID());
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize SCM version file", ioe);
-        return false;
-      }
-    } else {
-      System.out.println(
-          "SCM already initialized. Reusing existing"
-              + " cluster id for sd="
-              + scmStorageConfig.getStorageDir()
-              + ";cid="
-              + scmStorageConfig.getClusterID());
-      return true;
-    }
-  }
-
-  /**
-   * Initialize SCM metrics.
-   */
-  public static void initMetrics() {
-    metrics = SCMMetrics.create();
-  }
-
-  /**
-   * Return SCM metrics instance.
-   */
-  public static SCMMetrics getMetrics() {
-    return metrics == null ? SCMMetrics.create() : metrics;
-  }
-
-  public SCMStorageConfig getScmStorageConfig() {
-    return scmStorageConfig;
-  }
-
-  public SCMDatanodeProtocolServer getDatanodeProtocolServer() {
-    return datanodeProtocolServer;
-  }
-
-  public SCMBlockProtocolServer getBlockProtocolServer() {
-    return blockProtocolServer;
-  }
-
-  public SCMClientProtocolServer getClientProtocolServer() {
-    return clientProtocolServer;
-  }
-
-  public SCMSecurityProtocolServer getSecurityProtocolServer() {
-    return securityProtocolServer;
-  }
-
-  /**
-   * Initialize container reports cache that sent from datanodes.
-   *
-   * @param conf
-   */
-  private void initContainerReportCache(OzoneConfiguration conf) {
-    containerReportCache =
-        CacheBuilder.newBuilder()
-            .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
-            .maximumSize(Integer.MAX_VALUE)
-            .removalListener(
-                new RemovalListener<String, ContainerStat>() {
-                  @Override
-                  public void onRemoval(
-                      RemovalNotification<String, ContainerStat>
-                          removalNotification) {
-                    synchronized (containerReportCache) {
-                      ContainerStat stat = removalNotification.getValue();
-                      // remove invalid container report
-                      metrics.decrContainerStat(stat);
-                      if (LOG.isDebugEnabled()) {
-                        LOG.debug("Remove expired container stat entry for " +
-                            "datanode: {}.", removalNotification.getKey());
-                      }
-                    }
-                  }
-                })
-            .build();
-  }
-
-  private void registerMXBean() {
-    final Map<String, String> jmxProperties = new HashMap<>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.scmInfoBeanName = HddsUtils.registerWithJmxProperties(
-        "StorageContainerManager", "StorageContainerManagerInfo",
-        jmxProperties, this);
-  }
-
-  private void registerMetricsSource(SCMMXBean scmMBean) {
-    scmContainerMetrics = SCMContainerMetrics.create(scmMBean);
-  }
-
-  private void unregisterMXBean() {
-    if (this.scmInfoBeanName != null) {
-      MBeans.unregister(this.scmInfoBeanName);
-      this.scmInfoBeanName = null;
-    }
-  }
-
-  @VisibleForTesting
-  public ContainerInfo getContainerInfo(long containerID) throws
-      IOException {
-    return containerManager.getContainer(ContainerID.valueof(containerID));
-  }
-
-  /**
-   * Returns listening address of StorageLocation Protocol RPC server.
-   *
-   * @return listen address of StorageLocation RPC server
-   */
-  @VisibleForTesting
-  public InetSocketAddress getClientRpcAddress() {
-    return getClientProtocolServer().getClientRpcAddress();
-  }
-
-  @Override
-  public String getClientRpcPort() {
-    InetSocketAddress addr = getClientRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Returns listening address of StorageDatanode Protocol RPC server.
-   *
-   * @return Address where datanode are communicating.
-   */
-  public InetSocketAddress getDatanodeRpcAddress() {
-    return getDatanodeProtocolServer().getDatanodeRpcAddress();
-  }
-
-  @Override
-  public String getDatanodeRpcPort() {
-    InetSocketAddress addr = getDatanodeRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-    LOG.info(
-        buildRpcServerStartMessage(
-            "StorageContainerLocationProtocol RPC server",
-            getClientRpcAddress()));
-
-    ms = HddsUtils.initializeMetrics(configuration, "StorageContainerManager");
-
-    commandWatcherLeaseManager.start();
-    getClientProtocolServer().start();
-
-    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
-        "server", getBlockProtocolServer().getBlockRpcAddress()));
-    getBlockProtocolServer().start();
-
-    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
-        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
-    getDatanodeProtocolServer().start();
-    if (getSecurityProtocolServer() != null) {
-      getSecurityProtocolServer().start();
-    }
-
-    httpServer.start();
-    scmBlockManager.start();
-
-    // Start jvm monitor
-    jvmPauseMonitor = new JvmPauseMonitor();
-    jvmPauseMonitor.init(configuration);
-    jvmPauseMonitor.start();
-
-    setStartTime();
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-
-    try {
-      LOG.info("Stopping Replication Manager Service.");
-      replicationManager.stop();
-    } catch (Exception ex) {
-      LOG.error("Replication manager service stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Lease Manager of the command watchers");
-      commandWatcherLeaseManager.shutdown();
-    } catch (Exception ex) {
-      LOG.error("Lease Manager of the command watchers stop failed");
-    }
-
-    try {
-      LOG.info("Stopping datanode service RPC server");
-      getDatanodeProtocolServer().stop();
-
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager datanode RPC stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping block service RPC server");
-      getBlockProtocolServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager blockRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping the StorageContainerLocationProtocol RPC server");
-      getClientProtocolServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager clientRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Storage Container Manager HTTP server.");
-      httpServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager HTTP server stop failed.", ex);
-    }
-
-    if (getSecurityProtocolServer() != null) {
-      getSecurityProtocolServer().stop();
-    }
-
-    try {
-      LOG.info("Stopping Block Manager Service.");
-      scmBlockManager.stop();
-    } catch (Exception ex) {
-      LOG.error("SCM block manager service stop failed.", ex);
-    }
-
-    if (containerReportCache != null) {
-      containerReportCache.invalidateAll();
-      containerReportCache.cleanUp();
-    }
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-
-    unregisterMXBean();
-    if (scmContainerMetrics != null) {
-      scmContainerMetrics.unRegister();
-    }
-
-    // Event queue must be stopped before the DB store is closed at the end.
-    try {
-      LOG.info("Stopping SCM Event Queue.");
-      eventQueue.close();
-    } catch (Exception ex) {
-      LOG.error("SCM Event Queue stop failed", ex);
-    }
-
-    if (jvmPauseMonitor != null) {
-      jvmPauseMonitor.stop();
-    }
-    IOUtils.cleanupWithLogger(LOG, containerManager);
-    IOUtils.cleanupWithLogger(LOG, pipelineManager);
-
-    try {
-      scmMetadataStore.stop();
-    } catch (Exception ex) {
-      LOG.error("SCM Metadata store stop failed", ex);
-    }
-
-    if (ms != null) {
-      ms.stop();
-    }
-
-    scmSafeModeManager.stop();
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      getBlockProtocolServer().join();
-      getClientProtocolServer().join();
-      getDatanodeProtocolServer().join();
-      if (getSecurityProtocolServer() != null) {
-        getSecurityProtocolServer().join();
-      }
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during StorageContainerManager join.");
-    }
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate Healthy, Dead etc.
-   * @return int -- count
-   */
-  public int getNodeCount(NodeState nodestate) {
-    return scmNodeManager.getNodeCount(nodestate);
-  }
-
-  /**
-   * Returns SCM container manager.
-   */
-  @VisibleForTesting
-  public ContainerManager getContainerManager() {
-    return containerManager;
-  }
-
-  /**
-   * Returns node manager.
-   *
-   * @return - Node Manager
-   */
-  @VisibleForTesting
-  public NodeManager getScmNodeManager() {
-    return scmNodeManager;
-  }
-
-  /**
-   * Returns pipeline manager.
-   *
-   * @return - Pipeline Manager
-   */
-  @VisibleForTesting
-  public PipelineManager getPipelineManager() {
-    return pipelineManager;
-  }
-
-  @VisibleForTesting
-  public BlockManager getScmBlockManager() {
-    return scmBlockManager;
-  }
-
-  @VisibleForTesting
-  public SafeModeHandler getSafeModeHandler() {
-    return safeModeHandler;
-  }
-
-  @VisibleForTesting
-  public SCMSafeModeManager getScmSafeModeManager() {
-    return scmSafeModeManager;
-  }
-
-  @VisibleForTesting
-  public ReplicationManager getReplicationManager() {
-    return replicationManager;
-  }
-
-  public void checkAdminAccess(String remoteUser) throws IOException {
-    if (remoteUser != null) {
-      if (!scmAdminUsernames.contains(remoteUser)) {
-        throw new IOException(
-            "Access denied for user " + remoteUser + ". Superuser privilege " +
-                "is required.");
-      }
-    }
-  }
-
-  /**
-   * Invalidate container stat entry for given datanode.
-   *
-   * @param datanodeUuid
-   */
-  public void removeContainerReport(String datanodeUuid) {
-    synchronized (containerReportCache) {
-      containerReportCache.invalidate(datanodeUuid);
-    }
-  }
-
-  /**
-   * Get container stat of specified datanode.
-   *
-   * @param datanodeUuid
-   * @return
-   */
-  public ContainerStat getContainerReport(String datanodeUuid) {
-    ContainerStat stat = null;
-    synchronized (containerReportCache) {
-      stat = containerReportCache.getIfPresent(datanodeUuid);
-    }
-
-    return stat;
-  }
-
-  /**
-   * Returns a view of the container stat entries. Modifications made to the
-   * map will directly
-   * affect the cache.
-   *
-   * @return
-   */
-  public ConcurrentMap<String, ContainerStat> getContainerReportCache() {
-    return containerReportCache.asMap();
-  }
-
-  @Override
-  public Map<String, String> getContainerReport() {
-    Map<String, String> id2StatMap = new HashMap<>();
-    synchronized (containerReportCache) {
-      ConcurrentMap<String, ContainerStat> map = containerReportCache.asMap();
-      for (Map.Entry<String, ContainerStat> entry : map.entrySet()) {
-        id2StatMap.put(entry.getKey(), entry.getValue().toJsonString());
-      }
-    }
-
-    return id2StatMap;
-  }
-
-  /**
-   * Returns live safe mode container threshold.
-   *
-   * @return String
-   */
-  @Override
-  public double getSafeModeCurrentContainerThreshold() {
-    return getCurrentContainerThreshold();
-  }
-
-  /**
-   * Returns safe mode status.
-   * @return boolean
-   */
-  @Override
-  public boolean isInSafeMode() {
-    return scmSafeModeManager.getInSafeMode();
-  }
-
-  /**
-   * Returns EventPublisher.
-   */
-  public EventPublisher getEventQueue() {
-    return eventQueue;
-  }
-
-  /**
-   * Force SCM out of safe mode.
-   */
-  public boolean exitSafeMode() {
-    scmSafeModeManager.exitSafeMode(eventQueue);
-    return true;
-  }
-
-  @VisibleForTesting
-  public double getCurrentContainerThreshold() {
-    return scmSafeModeManager.getCurrentContainerThreshold();
-  }
-
-  @Override
-  public Map<String, Integer> getContainerStateCount() {
-    Map<String, Integer> nodeStateCount = new HashMap<>();
-    for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
-      nodeStateCount.put(state.toString(),
-          containerManager.getContainerCountByState(state));
-    }
-    return nodeStateCount;
-  }
-
-  /**
-   * Returns the SCM metadata Store.
-   * @return SCMMetadataStore
-   */
-  public SCMMetadataStore getScmMetadataStore() {
-    return scmMetadataStore;
-  }
-
-  /**
-   * Returns the SCM network topology cluster.
-   * @return NetworkTopology
-   */
-  public NetworkTopology getClusterMap() {
-    return this.clusterMap;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
deleted file mode 100644
index dce2a45..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-import java.io.IOException;
-
-/**
- * HttpServer2 wrapper for the Ozone Storage Container Manager.
- */
-public class StorageContainerManagerHttpServer extends BaseHttpServer {
-
-  public StorageContainerManagerHttpServer(Configuration conf)
-      throws IOException {
-    super(conf, "scm");
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
-  }
-
-  @Override protected String getEnabledKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
deleted file mode 100644
index 62910f2..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.common.StorageInfo;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.io.IOException;
-
-/**
- * This class provides a command line interface to start the SCM
- * using Picocli.
- */
-
-@Command(name = "ozone scm",
-    hidden = true, description = "Start or initialize the scm server.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class StorageContainerManagerStarter extends GenericCli {
-
-  private OzoneConfiguration conf;
-  private SCMStarterInterface receiver;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StorageContainerManagerStarter.class);
-
-  public static void main(String[] args) throws Exception {
-    TracingUtil.initTracing("StorageContainerManager");
-    new StorageContainerManagerStarter(
-        new StorageContainerManagerStarter.SCMStarterHelper()).run(args);
-  }
-
-  public StorageContainerManagerStarter(SCMStarterInterface receiverObj) {
-    super();
-    receiver = receiverObj;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    commonInit();
-    startScm();
-    return null;
-  }
-
-  /**
-   * This function implements a sub-command to generate a new
-   * cluster ID from the command line.
-   */
-  @CommandLine.Command(name = "--genclusterid",
-      customSynopsis = "ozone scm [global options] --genclusterid [options]",
-      hidden = false,
-      description = "Generate a new Cluster ID",
-      mixinStandardHelpOptions = true,
-      versionProvider = HddsVersionProvider.class)
-  public void generateClusterId() {
-    commonInit();
-    System.out.println("Generating new cluster id:");
-    System.out.println(receiver.generateClusterId());
-  }
-
-  /**
-   * This function implements a sub-command to allow the SCM to be
-   * initialized from the command line.
-   *
-   * @param clusterId - Cluster ID to use when initializing. If null,
-   *                  a random ID will be generated and used.
-   */
-  @CommandLine.Command(name = "--init",
-      customSynopsis = "ozone scm [global options] --init [options]",
-      hidden = false,
-      description = "Initialize the SCM if not already initialized",
-      mixinStandardHelpOptions = true,
-      versionProvider = HddsVersionProvider.class)
-  public void initScm(@CommandLine.Option(names = { "--clusterid" },
-      description = "Optional: The cluster id to use when formatting SCM",
-      paramLabel = "id") String clusterId)
-      throws Exception {
-    commonInit();
-    boolean result = receiver.init(conf, clusterId);
-    if (!result) {
-      throw new IOException("scm init failed");
-    }
-  }
-
-  /**
-   * This function is used by the command line to start the SCM.
-   */
-  private void startScm() throws Exception {
-    receiver.start(conf);
-  }
-
-  /**
-   * This function should be called by each command to ensure the configuration
-   * is set and print the startup banner message.
-   */
-  private void commonInit() {
-    conf = createOzoneConfiguration();
-
-    String[] originalArgs = getCmd().getParseResult().originalArgs()
-        .toArray(new String[0]);
-    StringUtils.startupShutdownMessage(StorageContainerManager.class,
-        originalArgs, LOG);
-  }
-
-  /**
-   * This static class wraps the external dependencies needed for this command
-   * to execute its tasks. This allows the dependency to be injected for unit
-   * testing.
-   */
-  static class SCMStarterHelper implements SCMStarterInterface {
-
-    public void start(OzoneConfiguration conf) throws Exception {
-      StorageContainerManager stm = StorageContainerManager.createSCM(conf);
-      stm.start();
-      stm.join();
-    }
-
-    public boolean init(OzoneConfiguration conf, String clusterId)
-        throws IOException{
-      return StorageContainerManager.scmInit(conf, clusterId);
-    }
-
-    public String generateClusterId() {
-      return StorageInfo.newClusterID();
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
deleted file mode 100644
index fe07272..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
deleted file mode 100644
index 2a50bca..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.CommandStatusEvent;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventWatcher;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * EventWatcher for start events and completion events with payload of type
- * RetriablePayload and RetriableCompletionPayload respectively.
- */
-public class RetriableDatanodeEventWatcher<T extends CommandStatusEvent>
-    extends EventWatcher<CommandForDatanode, T> {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(RetriableDatanodeEventWatcher.class);
-
-  public RetriableDatanodeEventWatcher(Event<CommandForDatanode> startEvent,
-      Event<T> completionEvent, LeaseManager<Long> leaseManager) {
-    super(startEvent, completionEvent, leaseManager);
-  }
-
-  @Override
-  protected void onTimeout(EventPublisher publisher,
-      CommandForDatanode payload) {
-    LOG.info("RetriableDatanodeCommand type={} with id={} timed out. Retrying.",
-        payload.getCommand().getType(), payload.getId());
-    //put back to the original queue
-    publisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, payload);
-  }
-
-  @Override
-  protected void onFinished(EventPublisher publisher,
-      CommandForDatanode payload) {
-
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
deleted file mode 100644
index b1d2838..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
deleted file mode 100644
index 1c5a334..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
+++ /dev/null
@@ -1,76 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="HDFS Storage Container Manager">
-
-    <title>HDFS Storage Container Manager</title>
-
-    <link href="static/bootstrap-3.4.1/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="scm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS SCM</a>
-        </div>
-
-
-        <navmenu
-                metrics="{ 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-
-
-    </div>
-</header>
-
-<div class="container-fluid" style="margin: 12pt">
-
-    <ng-view></ng-view>
-
-</div><!-- /.container -->
-
-<script src="static/jquery-3.4.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="scm.js"></script>
-<script src="static/bootstrap-3.4.1/js/bootstrap.min.js"></script>
-</body>
-</html>
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html
deleted file mode 100644
index 2666f81..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html
+++ /dev/null
@@ -1,20 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-    <scm-overview>
-    </scm-overview>
-</overview>
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
deleted file mode 100644
index 38ce638..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h2>Node counts</h2>
-
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr ng-repeat="typestat in $ctrl.nodemanagermetrics.NodeCount | orderBy:'key':false:$ctrl.nodeOrder">
-        <td>{{typestat.key}}</td>
-        <td>{{typestat.value}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>Status</h2>
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr>
-        <td>Client Rpc port</td>
-        <td>{{$ctrl.overview.jmx.ClientRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Datanode Rpc port</td>
-        <td>{{$ctrl.overview.jmx.DatanodeRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Safe mode status</td>
-        <td>{{$ctrl.scmmetrics.InSafeMode}}</td>
-    </tr>
-    </tbody>
-</table>
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
deleted file mode 100644
index 2942a56..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-    angular.module('scm', ['ozone', 'nvd3']);
-
-    angular.module('scm').component('scmOverview', {
-        templateUrl: 'scm-overview.html',
-        require: {
-            overview: "^overview"
-        },
-        controller: function ($http) {
-            var ctrl = this;
-            $http.get("jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo")
-                .then(function (result) {
-                    ctrl.nodemanagermetrics = result.data.beans[0];
-                });
-            $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime")
-                .then(function (result) {
-                    ctrl.scmmetrics = result.data.beans[0];
-                });
-
-            var statusSortOrder = {
-                "HEALTHY": "a",
-                "STALE": "b",
-                "DEAD": "c",
-                "UNKNOWN": "z",
-                "DECOMMISSIONING": "x",
-                "DECOMMISSIONED": "y"
-            };
-            ctrl.nodeOrder = function (v1, v2) {
-                //status with non defined sort order will be "undefined"
-                return ("" + statusSortOrder[v1.value]).localeCompare("" + statusSortOrder[v2.value])
-            }
-
-        }
-    });
-
-})();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
deleted file mode 100644
index 6e01e53..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-
-import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test the HDDS server side utilities.
- */
-public class HddsServerUtilTest {
-
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-
-  @Rule
-  public ExpectedException thrown= ExpectedException.none();
-
-  /**
-   * Verify DataNode endpoint lookup failure if neither the client nor
-   * datanode endpoint are configured.
-   */
-  @Test
-  public void testMissingScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
-    thrown.expect(IllegalArgumentException.class);
-    HddsServerUtil.getScmAddressForDataNodes(conf);
-  }
-
-  /**
-   * Verify that the datanode endpoint is parsed correctly.
-   * This tests the logic used by the DataNodes to determine which address
-   * to connect to.
-   */
-  @Test
-  public void testGetScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // First try a client address with just a host name. Verify it falls
-    // back to the default port.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Next try a client address with just a host name and port.
-    // Verify the port is ignored and the default DataNode port is used.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    // Verify that the latter overrides and the port number is still the
-    // default.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8");
-    addr =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    // Verify that the latter overrides and the port number from the latter is
-    // used.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8:200");
-    addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(200));
-  }
-
-
-  /**
-   * Verify that the client endpoint bind address is computed correctly.
-   * This tests the logic used by the SCM to determine its own bind address.
-   */
-  @Test
-  public void testScmClientBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY
-    // is set differently.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY
-    // is set differently. The port number from OZONE_SCM_CLIENT_ADDRESS_KEY
-    // should be respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(100));
-
-    // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected.
-    // Port number should be default if none is specified via
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-
-    // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected.
-    // Port number from OZONE_SCM_CLIENT_ADDRESS_KEY should be
-    // respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(100));
-  }
-
-  /**
-   * Verify that the DataNode endpoint bind address is computed correctly.
-   * This tests the logic used by the SCM to determine its own bind address.
-   */
-  @Test
-  public void testScmDataNodeBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
-    // is set differently.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
-    // is set differently. The port number from OZONE_SCM_DATANODE_ADDRESS_KEY
-    // should be respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(200));
-
-    // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected.
-    // Port number should be default if none is specified via
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected.
-    // Port number from OZONE_SCM_DATANODE_ADDRESS_KEY should be
-    // respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(200));
-  }
-
-
-
-  @Test
-  public void testGetSCMAddresses() {
-    final Configuration conf = new OzoneConfiguration();
-    Collection<InetSocketAddress> addresses = null;
-    InetSocketAddress addr = null;
-    Iterator<InetSocketAddress> it = null;
-
-    // Verify valid IP address setup
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "1.2.3.4");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
-
-    // Verify valid hostname setup
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("scm1"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
-
-    // Verify valid hostname and port
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("scm1"));
-    assertThat(addr.getPort(), is(1234));
-
-    final HashMap<String, Integer> hostsAndPorts =
-        new HashMap<String, Integer>();
-    hostsAndPorts.put("scm1", 1234);
-    hostsAndPorts.put("scm2", 2345);
-    hostsAndPorts.put("scm3", 3456);
-
-    // Verify multiple hosts and port
-    conf.setStrings(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(3));
-    it = addresses.iterator();
-    HashMap<String, Integer> expected1 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
-      InetSocketAddress current = it.next();
-      assertTrue(expected1.remove(current.getHostName(),
-          current.getPort()));
-    }
-    assertTrue(expected1.isEmpty());
-
-    // Verify names with spaces
-    conf.setStrings(
-        ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 ");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(3));
-    it = addresses.iterator();
-    HashMap<String, Integer> expected2 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
-      InetSocketAddress current = it.next();
-      assertTrue(expected2.remove(current.getHostName(),
-          current.getPort()));
-    }
-    assertTrue(expected2.isEmpty());
-
-    // Verify empty value
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("Empty value should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify invalid hostname
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "s..x..:1234");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid hostname should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify invalid port
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid port should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify a mixed case (valid and invalid value both appears)
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234, scm:xyz");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid value should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
deleted file mode 100644
index 38f78ad..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
-    .NodeRegistrationContainerReport;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * Stateless helper functions for Hdds tests.
- */
-public final class HddsTestUtils {
-
-  private HddsTestUtils() {
-  }
-
-  /**
-   * Create Command Status report object.
-   *
-   * @param numOfContainers number of containers to be included in report.
-   * @return CommandStatusReportsProto
-   */
-  public static NodeRegistrationContainerReport
-      createNodeRegistrationContainerReport(int numOfContainers) {
-    return new NodeRegistrationContainerReport(
-        TestUtils.randomDatanodeDetails(),
-        TestUtils.getRandomContainerReports(numOfContainers));
-  }
-
-  /**
-   * Create NodeRegistrationContainerReport object.
-   *
-   * @param dnContainers List of containers to be included in report
-   * @return NodeRegistrationContainerReport
-   */
-  public static NodeRegistrationContainerReport
-      createNodeRegistrationContainerReport(List<ContainerInfo> dnContainers) {
-    List<StorageContainerDatanodeProtocolProtos.ContainerReplicaProto>
-        containers = new ArrayList<>();
-    dnContainers.forEach(c -> {
-      containers.add(TestUtils.getRandomContainerInfo(c.getContainerID()));
-    });
-    return new NodeRegistrationContainerReport(
-        TestUtils.randomDatanodeDetails(),
-        TestUtils.getContainerReports(containers));
-  }
-
-  public static StorageContainerManager getScm(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    conf.setBoolean(OZONE_ENABLED, true);
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return StorageContainerManager.createSCM(conf);
-  }
-
-  /**
-   * Creates list of ContainerInfo.
-   *
-   * @param numContainers number of ContainerInfo to be included in list.
-   * @return {@literal List<ContainerInfo>}
-   */
-  public static List<ContainerInfo> getContainerInfo(int numContainers) {
-    List<ContainerInfo> containerInfoList = new ArrayList<>();
-    for (int i = 0; i < numContainers; i++) {
-      ContainerInfo.Builder builder = new ContainerInfo.Builder();
-      containerInfoList.add(builder
-          .setContainerID(RandomUtils.nextLong())
-          .build());
-    }
-    return containerInfoList;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java
deleted file mode 100644
index abb9668..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import java.lang.reflect.Field;
-
-
-/**
- * This class includes some functions copied from Mockito's
- * Whitebox class for portability reasons.
- *
- * Whitebox methods are accessed differently in different
- * versions of Hadoop. Specifically the availability of the class
- * changed from Apache Hadoop 3.1.0 to Hadoop 3.2.0.
- *
- * Duplicating the test code is ugly but it allows building
- * HDDS portably.
- */
-public final class HddsWhiteboxTestUtils {
-
-  /**
-   * Private constructor to disallow construction.
-   */
-  private HddsWhiteboxTestUtils() {
-  }
-
-  /**
-   * Get the field of the target object.
-   * @param target target object
-   * @param field field name
-   * @return the field of the object
-   */
-  public static Object getInternalState(Object target, String field) {
-    Class<?> c = target.getClass();
-    try {
-      Field f = getFieldFromHierarchy(c, field);
-      f.setAccessible(true);
-      return f.get(target);
-    } catch (Exception e) {
-      throw new RuntimeException(
-          "Unable to set internal state on a private field.", e);
-    }
-  }
-
-  /**
-   * Set the field of the target object.
-   * @param target target object
-   * @param field field name
-   * @param value value to set
-   */
-  public static void setInternalState(
-      Object target, String field, Object value) {
-    Class<?> c = target.getClass();
-    try {
-      Field f = getFieldFromHierarchy(c, field);
-      f.setAccessible(true);
-      f.set(target, value);
-    } catch (Exception e) {
-      throw new RuntimeException(
-          "Unable to set internal state on a private field.", e);
-    }
-  }
-
-  private static Field getFieldFromHierarchy(Class<?> clazz, String field) {
-    Field f = getField(clazz, field);
-    while (f == null && clazz != Object.class) {
-      clazz = clazz.getSuperclass();
-      f = getField(clazz, field);
-    }
-    if (f == null) {
-      throw new RuntimeException(
-          "You want me to set value to this field: '" + field +
-              "' on this class: '" + clazz.getSimpleName() +
-              "' but this field is not declared within hierarchy " +
-              "of this class!");
-    }
-    return f;
-  }
-
-  private static Field getField(Class<?> clazz, String field) {
-    try {
-      return clazz.getDeclaredField(field);
-    } catch (NoSuchFieldException e) {
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
deleted file mode 100644
index 54cc398..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.test.PathUtils;
-
-import org.apache.commons.io.FileUtils;
-import static org.junit.Assert.assertTrue;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for {@link HddsServerUtil}.
- */
-public class TestHddsServerUtils {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestHddsServerUtils.class);
-
-  @Rule
-  public Timeout timeout = new Timeout(300_000);
-
-  @Rule
-  public ExpectedException thrown= ExpectedException.none();
-
-  /**
-   * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
-   */
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testGetDatanodeAddressWithPort() {
-    final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), Integer.parseInt(scmHost.split(":")[1]));
-  }
-
-  /**
-   * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY without port.
-   */
-  @Test
-  public void testGetDatanodeAddressWithoutPort() {
-    final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
-  }
-
-  /**
-   * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to
-   * OZONE_SCM_CLIENT_ADDRESS_KEY.
-   */
-  @Test
-  public void testDatanodeAddressFallbackToClientNoPort() {
-    final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
-  }
-
-  /**
-   * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to
-   * OZONE_SCM_CLIENT_ADDRESS_KEY. Port number defined by
-   * OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
-   */
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testDatanodeAddressFallbackToClientWithPort() {
-    final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
-  }
-
-  /**
-   * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-   * are undefined, test fallback to OZONE_SCM_NAMES.
-   */
-  @Test
-  public void testDatanodeAddressFallbackToScmNamesNoPort() {
-    final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
-  }
-
-  /**
-   * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-   * are undefined, test fallback to OZONE_SCM_NAMES. Port number
-   * defined by OZONE_SCM_NAMES should be ignored.
-   */
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testDatanodeAddressFallbackToScmNamesWithPort() {
-    final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
-  }
-
-  /**
-   * getScmAddressForDataNodes should fail when OZONE_SCM_NAMES has
-   * multiple addresses.
-   */
-  @Test
-  public void testClientFailsWithMultipleScmNames() {
-    final String scmHost = "host123,host456";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsServerUtil.getScmAddressForDataNodes(conf);
-  }
-
-  /**
-   * Test {@link ServerUtils#getScmDbDir}.
-   */
-  @Test
-  public void testGetScmDbDir() {
-    final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
-    final File dbDir = new File(testDir, "scmDbDir");
-    final File metaDir = new File(testDir, "metaDir");   // should be ignored.
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-
-    try {
-      assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
-      assertTrue(dbDir.exists());          // should have been created.
-    } finally {
-      FileUtils.deleteQuietly(dbDir);
-    }
-  }
-
-  /**
-   * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
-   * when OZONE_SCM_DB_DIRS is undefined.
-   */
-  @Test
-  public void testGetScmDbDirWithFallback() {
-    final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
-    final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-    try {
-      assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
-      assertTrue(metaDir.exists());        // should have been created.
-    } finally {
-      FileUtils.deleteQuietly(metaDir);
-    }
-  }
-
-  @Test
-  public void testNoScmDbDirConfigured() {
-    thrown.expect(IllegalArgumentException.class);
-    ServerUtils.getScmDbDir(new OzoneConfiguration());
-  }
-
-  @Test
-  public void testGetStaleNodeInterval() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // Reset OZONE_SCM_STALENODE_INTERVAL to 300s that
-    // larger than max limit value.
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 300, TimeUnit.SECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100);
-    // the max limit value will be returned
-    assertEquals(100000, HddsServerUtil.getStaleNodeInterval(conf));
-
-    // Reset OZONE_SCM_STALENODE_INTERVAL to 10ms that
-    // smaller than min limit value.
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10,
-        TimeUnit.MILLISECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100);
-    // the min limit value will be returned
-    assertEquals(90000, HddsServerUtil.getStaleNodeInterval(conf));
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
deleted file mode 100644
index 0b3edcc..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server os SCM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestStorageContainerManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestStorageContainerManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestStorageContainerManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "localhost:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    StorageContainerManagerHttpServer server = null;
-    try {
-      server = new StorageContainerManagerHttpServer(conf);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(implies(policy.isHttpEnabled() &&
-              !policy.isHttpsEnabled(),
-          !canAccess("https", server.getHttpsAddress())));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(implies(policy.isHttpsEnabled() &&
-              !policy.isHttpEnabled(),
-          !canAccess("http", server.getHttpAddress())));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (IOException e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
deleted file mode 100644
index 37321d7..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ /dev/null
@@ -1,597 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server
-    .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
-import org.apache.hadoop.hdds.scm.server
-    .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.security.authentication.client
-    .AuthenticationException;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ThreadLocalRandom;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * Stateless helper functions to handler scm/datanode connection.
- */
-public final class TestUtils {
-
-  private static ThreadLocalRandom random = ThreadLocalRandom.current();
-
-  private TestUtils() {
-  }
-
-  /**
-   * Creates DatanodeDetails with random UUID.
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails randomDatanodeDetails() {
-    return createDatanodeDetails(UUID.randomUUID());
-  }
-
-  /**
-   * Creates DatanodeDetails with random UUID, specific hostname and network
-   * location.
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails createDatanodeDetails(String hostname,
-       String loc) {
-    String ipAddress = random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256);
-    return createDatanodeDetails(UUID.randomUUID().toString(), hostname,
-        ipAddress, loc);
-  }
-
-  /**
-   * Creates DatanodeDetails using the given UUID.
-   *
-   * @param uuid Datanode's UUID
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails createDatanodeDetails(UUID uuid) {
-    String ipAddress = random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256);
-    return createDatanodeDetails(uuid.toString(), "localhost" + "-" + ipAddress,
-        ipAddress, null);
-  }
-
-  /**
-   * Generates DatanodeDetails from RegisteredCommand.
-   *
-   * @param registeredCommand registration response from SCM
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails getDatanodeDetails(
-      RegisteredCommand registeredCommand) {
-    return createDatanodeDetails(
-        registeredCommand.getDatanode().getUuidString(),
-        registeredCommand.getDatanode().getHostName(),
-        registeredCommand.getDatanode().getIpAddress(),
-        null);
-  }
-
-  /**
-   * Creates DatanodeDetails with the given information.
-   *
-   * @param uuid      Datanode's UUID
-   * @param hostname  hostname of Datanode
-   * @param ipAddress ip address of Datanode
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails createDatanodeDetails(String uuid,
-      String hostname, String ipAddress, String networkLocation) {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName(hostname)
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .setNetworkLocation(networkLocation);
-    return builder.build();
-  }
-
-  /**
-   * Creates a random DatanodeDetails and register it with the given
-   * NodeManager.
-   *
-   * @param nodeManager NodeManager
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails createRandomDatanodeAndRegister(
-      SCMNodeManager nodeManager) {
-    return getDatanodeDetails(
-        nodeManager.register(randomDatanodeDetails(), null,
-                getRandomPipelineReports()));
-  }
-
-  /**
-   * Get specified number of DatanodeDetails and register them with node
-   * manager.
-   *
-   * @param nodeManager node manager to register the datanode ids.
-   * @param count       number of DatanodeDetails needed.
-   *
-   * @return list of DatanodeDetails
-   */
-  public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
-      SCMNodeManager nodeManager, int count) {
-    ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < count; i++) {
-      datanodes.add(createRandomDatanodeAndRegister(nodeManager));
-    }
-    return datanodes;
-  }
-
-  /**
-   * Generates a random NodeReport.
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport() {
-    return getRandomNodeReport(1);
-  }
-
-  /**
-   * Generates random NodeReport with the given number of storage report in it.
-   *
-   * @param numberOfStorageReport number of storage report this node report
-   *                              should have
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
-    UUID nodeId = UUID.randomUUID();
-    return getRandomNodeReport(nodeId, File.separator + nodeId,
-        numberOfStorageReport);
-  }
-
-  /**
-   * Generates random NodeReport for the given nodeId with the given
-   * base path and number of storage report in it.
-   *
-   * @param nodeId                datanode id
-   * @param basePath              base path of storage directory
-   * @param numberOfStorageReport number of storage report
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport(UUID nodeId,
-      String basePath, int numberOfStorageReport) {
-    List<StorageReportProto> storageReports = new ArrayList<>();
-    for (int i = 0; i < numberOfStorageReport; i++) {
-      storageReports.add(getRandomStorageReport(nodeId,
-          basePath + File.separator + i));
-    }
-    return createNodeReport(storageReports);
-  }
-
-  /**
-   * Creates NodeReport with the given storage reports.
-   *
-   * @param reports one or more storage report
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto createNodeReport(
-      StorageReportProto... reports) {
-    return createNodeReport(Arrays.asList(reports));
-  }
-
-  /**
-   * Creates NodeReport with the given storage reports.
-   *
-   * @param reports storage reports to be included in the node report.
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto createNodeReport(
-      List<StorageReportProto> reports) {
-    NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder();
-    nodeReport.addAllStorageReport(reports);
-    return nodeReport.build();
-  }
-
-  /**
-   * Generates random storage report.
-   *
-   * @param nodeId datanode id for which the storage report belongs to
-   * @param path   path of the storage
-   *
-   * @return StorageReportProto
-   */
-  public static StorageReportProto getRandomStorageReport(UUID nodeId,
-      String path) {
-    return createStorageReport(nodeId, path,
-        random.nextInt(1000),
-        random.nextInt(500),
-        random.nextInt(500),
-        StorageTypeProto.DISK);
-  }
-
-  /**
-   * Creates storage report with the given information.
-   *
-   * @param nodeId    datanode id
-   * @param path      storage dir
-   * @param capacity  storage size
-   * @param used      space used
-   * @param remaining space remaining
-   * @param type      type of storage
-   *
-   * @return StorageReportProto
-   */
-  public static StorageReportProto createStorageReport(UUID nodeId, String path,
-      long capacity, long used, long remaining, StorageTypeProto type) {
-    Preconditions.checkNotNull(nodeId);
-    Preconditions.checkNotNull(path);
-    StorageReportProto.Builder srb = StorageReportProto.newBuilder();
-    srb.setStorageUuid(nodeId.toString())
-        .setStorageLocation(path)
-        .setCapacity(capacity)
-        .setScmUsed(used)
-        .setRemaining(remaining);
-    StorageTypeProto storageTypeProto =
-        type == null ? StorageTypeProto.DISK : type;
-    srb.setStorageType(storageTypeProto);
-    return srb.build();
-  }
-
-
-  /**
-   * Generates random container reports.
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getRandomContainerReports() {
-    return getRandomContainerReports(1);
-  }
-
-  /**
-   * Generates random container report with the given number of containers.
-   *
-   * @param numberOfContainers number of containers to be in container report
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getRandomContainerReports(
-      int numberOfContainers) {
-    List<ContainerReplicaProto> containerInfos = new ArrayList<>();
-    for (int i = 0; i < numberOfContainers; i++) {
-      containerInfos.add(getRandomContainerInfo(i));
-    }
-    return getContainerReports(containerInfos);
-  }
-
-
-  public static PipelineReportsProto getRandomPipelineReports() {
-    return PipelineReportsProto.newBuilder().build();
-  }
-
-  public static PipelineReportFromDatanode getPipelineReportFromDatanode(
-      DatanodeDetails dn, PipelineID... pipelineIDs) {
-    PipelineReportsProto.Builder reportBuilder =
-        PipelineReportsProto.newBuilder();
-    for (PipelineID pipelineID : pipelineIDs) {
-      reportBuilder.addPipelineReport(
-          PipelineReport.newBuilder().setPipelineID(pipelineID.getProtobuf()));
-    }
-    return new PipelineReportFromDatanode(dn, reportBuilder.build());
-  }
-
-  public static PipelineActionsFromDatanode getPipelineActionFromDatanode(
-      DatanodeDetails dn, PipelineID... pipelineIDs) {
-    PipelineActionsProto.Builder actionsProtoBuilder =
-        PipelineActionsProto.newBuilder();
-    for (PipelineID pipelineID : pipelineIDs) {
-      ClosePipelineInfo closePipelineInfo =
-          ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf())
-              .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED)
-              .setDetailedReason("").build();
-      actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder()
-          .setClosePipeline(closePipelineInfo)
-          .setAction(PipelineAction.Action.CLOSE)
-          .build());
-    }
-    return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build());
-  }
-
-  /**
-   * Creates container report with the given ContainerInfo(s).
-   *
-   * @param containerInfos one or more ContainerInfo
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getContainerReports(
-      ContainerReplicaProto... containerInfos) {
-    return getContainerReports(Arrays.asList(containerInfos));
-  }
-
-  /**
-   * Creates container report with the given ContainerInfo(s).
-   *
-   * @param containerInfos list of ContainerInfo
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getContainerReports(
-      List<ContainerReplicaProto> containerInfos) {
-    ContainerReportsProto.Builder
-        reportsBuilder = ContainerReportsProto.newBuilder();
-    for (ContainerReplicaProto containerInfo : containerInfos) {
-      reportsBuilder.addReports(containerInfo);
-    }
-    return reportsBuilder.build();
-  }
-
-  /**
-   * Generates random ContainerInfo.
-   *
-   * @param containerId container id of the ContainerInfo
-   *
-   * @return ContainerInfo
-   */
-  public static ContainerReplicaProto getRandomContainerInfo(
-      long containerId) {
-    return createContainerInfo(containerId,
-        OzoneConsts.GB * 5,
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(5),
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(2),
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(5));
-  }
-
-  /**
-   * Creates ContainerInfo with the given details.
-   *
-   * @param containerId id of the container
-   * @param size        size of container
-   * @param keyCount    number of keys
-   * @param bytesUsed   bytes used by the container
-   * @param readCount   number of reads
-   * @param readBytes   bytes read
-   * @param writeCount  number of writes
-   * @param writeBytes  bytes written
-   *
-   * @return ContainerInfo
-   */
-  @SuppressWarnings("parameternumber")
-  public static ContainerReplicaProto createContainerInfo(
-      long containerId, long size, long keyCount, long bytesUsed,
-      long readCount, long readBytes, long writeCount, long writeBytes) {
-    return ContainerReplicaProto.newBuilder()
-        .setContainerID(containerId)
-        .setState(ContainerReplicaProto.State.OPEN)
-        .setSize(size)
-        .setKeyCount(keyCount)
-        .setUsed(bytesUsed)
-        .setReadCount(readCount)
-        .setReadBytes(readBytes)
-        .setWriteCount(writeCount)
-        .setWriteBytes(writeBytes)
-        .build();
-  }
-
-  /**
-   * Create Command Status report object.
-   * @return CommandStatusReportsProto
-   */
-  public static CommandStatusReportsProto createCommandStatusReport(
-      List<CommandStatus> reports) {
-    CommandStatusReportsProto.Builder report = CommandStatusReportsProto
-        .newBuilder();
-    report.addAllCmdStatus(reports);
-    return report.build();
-  }
-
-  public static org.apache.hadoop.hdds.scm.container.ContainerInfo
-      allocateContainer(ContainerManager containerManager)
-      throws IOException {
-    return containerManager
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, "root");
-
-  }
-
-  public static void closeContainer(ContainerManager containerManager,
-      ContainerID id) throws IOException {
-    containerManager.updateContainerState(
-        id, HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager.updateContainerState(
-        id, HddsProtos.LifeCycleEvent.CLOSE);
-
-  }
-
-  /**
-   * Move the container to Quaise close state.
-   * @param containerManager
-   * @param id
-   * @throws IOException
-   */
-  public static void quasiCloseContainer(ContainerManager containerManager,
-      ContainerID id) throws IOException {
-    containerManager.updateContainerState(
-        id, HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager.updateContainerState(
-        id, HddsProtos.LifeCycleEvent.QUASI_CLOSE);
-
-  }
-
-  /**
-   * Construct and returns StorageContainerManager instance using the given
-   * configuration. The ports used by this StorageContainerManager are
-   * randomly selected from free ports available.
-   *
-   * @param conf OzoneConfiguration
-   * @return StorageContainerManager instance
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  public static StorageContainerManager getScm(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    return getScm(conf, new SCMConfigurator());
-  }
-
-  /**
-   * Construct and returns StorageContainerManager instance using the given
-   * configuration and the configurator. The ports used by this
-   * StorageContainerManager are randomly selected from free ports available.
-   *
-   * @param conf OzoneConfiguration
-   * @param configurator SCMConfigurator
-   * @return StorageContainerManager instance
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  public static StorageContainerManager getScm(OzoneConfiguration conf,
-                                               SCMConfigurator configurator)
-      throws IOException, AuthenticationException {
-    conf.setBoolean(OZONE_ENABLED, true);
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return new StorageContainerManager(conf, configurator);
-  }
-
-  public static ContainerInfo getContainer(
-      final HddsProtos.LifeCycleState state) {
-    return new ContainerInfo.Builder()
-        .setContainerID(RandomUtils.nextLong())
-        .setReplicationType(HddsProtos.ReplicationType.RATIS)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.THREE)
-        .setState(state)
-        .setSequenceId(10000L)
-        .setOwner("TEST")
-        .build();
-  }
-
-  public static Set<ContainerReplica> getReplicas(
-      final ContainerID containerId,
-      final ContainerReplicaProto.State state,
-      final DatanodeDetails... datanodeDetails) {
-    return getReplicas(containerId, state, 10000L, datanodeDetails);
-  }
-
-  public static Set<ContainerReplica> getReplicas(
-      final ContainerID containerId,
-      final ContainerReplicaProto.State state,
-      final long sequenceId,
-      final DatanodeDetails... datanodeDetails) {
-    Set<ContainerReplica> replicas = new HashSet<>();
-    for (DatanodeDetails datanode : datanodeDetails) {
-      replicas.add(getReplicas(containerId, state,
-          sequenceId, datanode.getUuid(), datanode));
-    }
-    return replicas;
-  }
-
-  public static ContainerReplica getReplicas(
-      final ContainerID containerId,
-      final ContainerReplicaProto.State state,
-      final long sequenceId,
-      final UUID originNodeId,
-      final DatanodeDetails datanodeDetails) {
-    return ContainerReplica.newBuilder()
-        .setContainerID(containerId)
-        .setContainerState(state)
-        .setDatanodeDetails(datanodeDetails)
-        .setOriginNodeId(originNodeId)
-        .setSequenceId(sequenceId)
-        .build();
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
deleted file mode 100644
index e5c4766..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-
-import static org.apache.hadoop.ozone.OzoneConsts.GB;
-import static org.apache.hadoop.ozone.OzoneConsts.MB;
-
-
-/**
- * Tests for SCM Block Manager.
- */
-public class TestBlockManager {
-  private StorageContainerManager scm;
-  private SCMContainerManager mapping;
-  private MockNodeManager nodeManager;
-  private PipelineManager pipelineManager;
-  private BlockManagerImpl blockManager;
-  private File testDir;
-  private final static long DEFAULT_BLOCK_SIZE = 128 * MB;
-  private static HddsProtos.ReplicationFactor factor;
-  private static HddsProtos.ReplicationType type;
-  private static String containerOwner = "OZONE";
-  private static EventQueue eventQueue;
-  private int numContainerPerOwnerInPipeline;
-  private OzoneConfiguration conf;
-  private SafeModeStatus safeModeStatus = new SafeModeStatus(false);
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    conf = SCMTestUtils.getConf();
-    numContainerPerOwnerInPipeline = conf.getInt(
-        ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-        ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
-
-
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().toString());
-
-    // Override the default Node Manager in SCM with this Mock Node Manager.
-    nodeManager = new MockNodeManager(true, 10);
-    SCMConfigurator configurator = new SCMConfigurator();
-    configurator.setScmNodeManager(nodeManager);
-    scm = TestUtils.getScm(conf, configurator);
-
-    // Initialize these fields so that the tests can pass.
-    mapping = (SCMContainerManager) scm.getContainerManager();
-    pipelineManager = scm.getPipelineManager();
-    blockManager = (BlockManagerImpl) scm.getScmBlockManager();
-
-    eventQueue = new EventQueue();
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS,
-        scm.getSafeModeHandler());
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS,
-        scm.getSafeModeHandler());
-    CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(pipelineManager, mapping);
-    eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
-    if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){
-      factor = HddsProtos.ReplicationFactor.THREE;
-      type = HddsProtos.ReplicationType.RATIS;
-    } else {
-      factor = HddsProtos.ReplicationFactor.ONE;
-      type = HddsProtos.ReplicationType.STAND_ALONE;
-    }
-  }
-
-  @After
-  public void cleanup() throws IOException {
-    scm.stop();
-  }
-
-  @Test
-  public void testAllocateBlock() throws Exception {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInSafeMode();
-    }, 10, 1000 * 5);
-    AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList());
-    Assert.assertNotNull(block);
-  }
-
-  @Test
-  public void testAllocateBlockInParallel() throws Exception {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInSafeMode();
-    }, 10, 1000 * 5);
-    int threadCount = 20;
-    List<ExecutorService> executors = new ArrayList<>(threadCount);
-    for (int i = 0; i < threadCount; i++) {
-      executors.add(Executors.newSingleThreadExecutor());
-    }
-    List<CompletableFuture<AllocatedBlock>> futureList =
-        new ArrayList<>(threadCount);
-    for (int i = 0; i < threadCount; i++) {
-      final CompletableFuture<AllocatedBlock> future =
-          new CompletableFuture<>();
-      CompletableFuture.supplyAsync(() -> {
-        try {
-          future.complete(blockManager
-              .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-                  new ExcludeList()));
-        } catch (IOException e) {
-          future.completeExceptionally(e);
-        }
-        return future;
-      }, executors.get(i));
-      futureList.add(future);
-    }
-    try {
-      CompletableFuture
-          .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
-          .get();
-    } catch (Exception e) {
-      Assert.fail("testAllocateBlockInParallel failed");
-    }
-  }
-
-  @Test
-  public void testAllocateOversizedBlock() throws Exception {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInSafeMode();
-    }, 10, 1000 * 5);
-    long size = 6 * GB;
-    thrown.expectMessage("Unsupported block size");
-    AllocatedBlock block = blockManager.allocateBlock(size,
-        type, factor, containerOwner, new ExcludeList());
-  }
-
-
-  @Test
-  public void testAllocateBlockFailureInSafeMode() throws Exception {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS,
-        new SafeModeStatus(true));
-    GenericTestUtils.waitFor(() -> {
-      return blockManager.isScmInSafeMode();
-    }, 10, 1000 * 5);
-    // Test1: In safe mode expect an SCMException.
-    thrown.expectMessage("SafeModePrecheck failed for "
-        + "allocateBlock");
-    blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList());
-  }
-
-  @Test
-  public void testAllocateBlockSucInSafeMode() throws Exception {
-    // Test2: Exit safe mode and then try allocateBock again.
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInSafeMode();
-    }, 10, 1000 * 5);
-    Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList()));
-  }
-
-  @Test(timeout = 10000)
-  public void testMultipleBlockAllocation()
-      throws IOException, TimeoutException, InterruptedException {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils
-        .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5);
-
-    pipelineManager.createPipeline(type, factor);
-    pipelineManager.createPipeline(type, factor);
-
-    AllocatedBlock allocatedBlock = blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-            new ExcludeList());
-    // block should be allocated in different pipelines
-    GenericTestUtils.waitFor(() -> {
-      try {
-        AllocatedBlock block = blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-                new ExcludeList());
-        return !block.getPipeline().getId()
-            .equals(allocatedBlock.getPipeline().getId());
-      } catch (IOException e) {
-      }
-      return false;
-    }, 100, 1000);
-  }
-
-  private boolean verifyNumberOfContainersInPipelines(
-      int numContainersPerPipeline) {
-    try {
-      for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) {
-        if (pipelineManager.getNumberOfContainers(pipeline.getId())
-            != numContainersPerPipeline) {
-          return false;
-        }
-      }
-    } catch (IOException e) {
-      return false;
-    }
-    return true;
-  }
-
-  @Test(timeout = 10000)
-  public void testMultipleBlockAllocationWithClosedContainer()
-      throws IOException, TimeoutException, InterruptedException {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils
-        .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5);
-
-    // create pipelines
-    for (int i = 0;
-         i < nodeManager.getNodes(HddsProtos.NodeState.HEALTHY).size(); i++) {
-      pipelineManager.createPipeline(type, factor);
-    }
-
-    // wait till each pipeline has the configured number of containers.
-    // After this each pipeline has numContainerPerOwnerInPipeline containers
-    // for each owner
-    GenericTestUtils.waitFor(() -> {
-      try {
-        blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-                new ExcludeList());
-      } catch (IOException e) {
-      }
-      return verifyNumberOfContainersInPipelines(
-          numContainerPerOwnerInPipeline);
-    }, 10, 1000);
-
-    // close all the containers in all the pipelines
-    for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) {
-      for (ContainerID cid : pipelineManager
-          .getContainersInPipeline(pipeline.getId())) {
-        eventQueue.fireEvent(SCMEvents.CLOSE_CONTAINER, cid);
-      }
-    }
-    // wait till no containers are left in the pipelines
-    GenericTestUtils
-        .waitFor(() -> verifyNumberOfContainersInPipelines(0), 10, 5000);
-
-    // allocate block so that each pipeline has the configured number of
-    // containers.
-    GenericTestUtils.waitFor(() -> {
-      try {
-        blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-                new ExcludeList());
-      } catch (IOException e) {
-      }
-      return verifyNumberOfContainersInPipelines(
-          numContainerPerOwnerInPipeline);
-    }, 10, 1000);
-  }
-
-  @Test(timeout = 10000)
-  public void testBlockAllocationWithNoAvailablePipelines()
-      throws IOException, TimeoutException, InterruptedException {
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-    GenericTestUtils
-        .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5);
-
-    for (Pipeline pipeline : pipelineManager.getPipelines()) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    }
-    Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size());
-    Assert.assertNotNull(blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
-            new ExcludeList()));
-    Assert.assertEquals(1, pipelineManager.getPipelines(type, factor).size());
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
deleted file mode 100644
index 5982b4f..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ /dev/null
@@ -1,437 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.when;
-
-/**
- * Tests for DeletedBlockLog.
- */
-public class TestDeletedBlockLog {
-
-  private static DeletedBlockLogImpl deletedBlockLog;
-  private OzoneConfiguration conf;
-  private File testDir;
-  private ContainerManager containerManager;
-  private StorageContainerManager scm;
-  private List<DatanodeDetails> dnList;
-
-  @Before
-  public void setup() throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestDeletedBlockLog.class.getSimpleName());
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_ENABLED, "true");
-    conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    scm = TestUtils.getScm(conf);
-    containerManager = Mockito.mock(SCMContainerManager.class);
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
-        scm.getScmMetadataStore());
-    dnList = new ArrayList<>(3);
-    setupContainerManager();
-  }
-
-  private void setupContainerManager() throws IOException {
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-
-    final ContainerInfo container =
-        new ContainerInfo.Builder().setContainerID(1)
-            .setReplicationFactor(ReplicationFactor.THREE)
-            .setState(HddsProtos.LifeCycleState.CLOSED)
-            .build();
-    final Set<ContainerReplica> replicaSet = dnList.stream()
-        .map(datanodeDetails -> ContainerReplica.newBuilder()
-            .setContainerID(container.containerID())
-            .setContainerState(ContainerReplicaProto.State.OPEN)
-            .setDatanodeDetails(datanodeDetails)
-            .build())
-        .collect(Collectors.toSet());
-
-    when(containerManager.getContainerReplicas(anyObject()))
-        .thenReturn(replicaSet);
-    when(containerManager.getContainer(anyObject()))
-        .thenReturn(container);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    deletedBlockLog.close();
-    scm.stop();
-    scm.join();
-    FileUtils.deleteDirectory(testDir);
-  }
-
-  private Map<Long, List<Long>> generateData(int dataSize) {
-    Map<Long, List<Long>> blockMap = new HashMap<>();
-    Random random = new Random(1);
-    int continerIDBase = random.nextInt(100);
-    int localIDBase = random.nextInt(1000);
-    for (int i = 0; i < dataSize; i++) {
-      long containerID = continerIDBase + i;
-      List<Long> blocks = new ArrayList<>();
-      int blockSize = random.nextInt(30) + 1;
-      for (int j = 0; j < blockSize; j++)  {
-        long localID = localIDBase + j;
-        blocks.add(localID);
-      }
-      blockMap.put(containerID, blocks);
-    }
-    return blockMap;
-  }
-
-  private void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults,
-      DatanodeDetails... dns) {
-    for (DatanodeDetails dnDetails : dns) {
-      deletedBlockLog
-          .commitTransactions(transactionResults, dnDetails.getUuid());
-    }
-  }
-
-  private void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults) {
-    commitTransactions(transactionResults,
-        dnList.toArray(new DatanodeDetails[3]));
-  }
-
-  private void commitTransactions(
-      Collection<DeletedBlocksTransaction> deletedBlocksTransactions,
-      DatanodeDetails... dns) {
-    commitTransactions(deletedBlocksTransactions.stream()
-        .map(this::createDeleteBlockTransactionResult)
-        .collect(Collectors.toList()), dns);
-  }
-
-  private void commitTransactions(
-      Collection<DeletedBlocksTransaction> deletedBlocksTransactions) {
-    commitTransactions(deletedBlocksTransactions.stream()
-        .map(this::createDeleteBlockTransactionResult)
-        .collect(Collectors.toList()));
-  }
-
-  private DeleteBlockTransactionResult createDeleteBlockTransactionResult(
-      DeletedBlocksTransaction transaction) {
-    return DeleteBlockTransactionResult.newBuilder()
-        .setContainerID(transaction.getContainerID()).setSuccess(true)
-        .setTxID(transaction.getTxID()).build();
-  }
-
-  private List<DeletedBlocksTransaction> getTransactions(
-      int maximumAllowedTXNum) throws IOException {
-    DatanodeDeletedBlockTransactions transactions =
-        new DatanodeDeletedBlockTransactions(containerManager,
-            maximumAllowedTXNum, 3);
-    deletedBlockLog.getTransactions(transactions);
-    return transactions.getDatanodeTransactions(dnList.get(0).getUuid());
-  }
-
-  @Test
-  public void testIncrementCount() throws Exception {
-    int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
-
-    // Create 30 TXs in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(30).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-
-    // This will return all TXs, total num 30.
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(40);
-    List<Long> txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID)
-        .collect(Collectors.toList());
-
-    for (int i = 0; i < maxRetry; i++) {
-      deletedBlockLog.incrementCount(txIDs);
-    }
-
-    // Increment another time so it exceed the maxRetry.
-    // On this call, count will be set to -1 which means TX eventually fails.
-    deletedBlockLog.incrementCount(txIDs);
-    blocks = getTransactions(40);
-    for (DeletedBlocksTransaction block : blocks) {
-      Assert.assertEquals(-1, block.getCount());
-    }
-
-    // If all TXs are failed, getTransactions call will always return nothing.
-    blocks = getTransactions(40);
-    Assert.assertEquals(blocks.size(), 0);
-  }
-
-  @Test
-  public void testCommitTransactions() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(20);
-    // Add an invalid txn.
-    blocks.add(
-        DeletedBlocksTransaction.newBuilder().setContainerID(1).setTxID(70)
-            .setCount(0).addLocalID(0).build());
-    commitTransactions(blocks);
-    blocks.remove(blocks.size() - 1);
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(30, blocks.size());
-    commitTransactions(blocks, dnList.get(1), dnList.get(2),
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(30, blocks.size());
-    commitTransactions(blocks, dnList.get(0));
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(0, blocks.size());
-  }
-
-  @Test
-  public void testRandomOperateTransactions() throws Exception {
-    Random random = new Random();
-    int added = 0, committed = 0;
-    List<DeletedBlocksTransaction> blocks = new ArrayList<>();
-    List<Long> txIDs = new ArrayList<>();
-    byte[] latestTxid = DFSUtil.string2Bytes("#LATEST_TXID#");
-    MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid =
-        (preKey, currentKey, nextKey) ->
-            !Arrays.equals(latestTxid, currentKey);
-    // Randomly add/get/commit/increase transactions.
-    for (int i = 0; i < 100; i++) {
-      int state = random.nextInt(4);
-      if (state == 0) {
-        for (Map.Entry<Long, List<Long>> entry :
-            generateData(10).entrySet()){
-          deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-        }
-        added += 10;
-      } else if (state == 1) {
-        blocks = getTransactions(20);
-        txIDs = new ArrayList<>();
-        for (DeletedBlocksTransaction block : blocks) {
-          txIDs.add(block.getTxID());
-        }
-        deletedBlockLog.incrementCount(txIDs);
-      } else if (state == 2) {
-        commitTransactions(blocks);
-        committed += blocks.size();
-        blocks = new ArrayList<>();
-      } else {
-        // verify the number of added and committed.
-        try (TableIterator<Long,
-            ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
-            scm.getScmMetadataStore().getDeletedBlocksTXTable().iterator()) {
-          AtomicInteger count = new AtomicInteger();
-          iter.forEachRemaining((keyValue) -> count.incrementAndGet());
-          Assert.assertEquals(added, count.get() + committed);
-        }
-      }
-    }
-    blocks = getTransactions(1000);
-    commitTransactions(blocks);
-  }
-
-  @Test
-  public void testPersistence() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-    // close db and reopen it again to make sure
-    // transactions are stored persistently.
-    deletedBlockLog.close();
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
-        scm.getScmMetadataStore());
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(10);
-    commitTransactions(blocks);
-    blocks = getTransactions(100);
-    Assert.assertEquals(40, blocks.size());
-    commitTransactions(blocks);
-  }
-
-  @Test
-  public void testDeletedBlockTransactions() throws IOException {
-    int txNum = 10;
-    int maximumAllowedTXNum = 5;
-    List<DeletedBlocksTransaction> blocks = null;
-    List<Long> containerIDs = new LinkedList<>();
-    DatanodeDetails dnId1 = dnList.get(0), dnId2 = dnList.get(1);
-
-    int count = 0;
-    long containerID = 0L;
-
-    // Creates {TXNum} TX in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(txNum)
-        .entrySet()) {
-      count++;
-      containerID = entry.getKey();
-      containerIDs.add(containerID);
-      deletedBlockLog.addTransaction(containerID, entry.getValue());
-
-      // make TX[1-6] for datanode1; TX[7-10] for datanode2
-      if (count <= (maximumAllowedTXNum + 1)) {
-        mockContainerInfo(containerID, dnId1);
-      } else {
-        mockContainerInfo(containerID, dnId2);
-      }
-    }
-
-    DatanodeDeletedBlockTransactions transactions =
-        new DatanodeDeletedBlockTransactions(containerManager,
-            maximumAllowedTXNum, 2);
-    deletedBlockLog.getTransactions(transactions);
-
-    for (UUID id : transactions.getDatanodeIDs()) {
-      List<DeletedBlocksTransaction> txs = transactions
-          .getDatanodeTransactions(id);
-      // delete TX ID
-      commitTransactions(txs);
-    }
-
-    blocks = getTransactions(txNum);
-    // There should be one block remained since dnID1 reaches
-    // the maximum value (5).
-    Assert.assertEquals(1, blocks.size());
-
-    Assert.assertFalse(transactions.isFull());
-    // The number of TX in dnID1 won't more than maximum value.
-    Assert.assertEquals(maximumAllowedTXNum,
-        transactions.getDatanodeTransactions(dnId1.getUuid()).size());
-
-    int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size();
-    // add duplicated container in dnID2, this should be failed.
-    DeletedBlocksTransaction.Builder builder =
-        DeletedBlocksTransaction.newBuilder();
-    builder.setTxID(11);
-    builder.setContainerID(containerID);
-    builder.setCount(0);
-    transactions.addTransaction(builder.build(),
-        null);
-
-    // The number of TX in dnID2 should not be changed.
-    Assert.assertEquals(size,
-        transactions.getDatanodeTransactions(dnId2.getUuid()).size());
-
-    // Add new TX in dnID2, then dnID2 will reach maximum value.
-    containerID = RandomUtils.nextLong();
-    builder = DeletedBlocksTransaction.newBuilder();
-    builder.setTxID(12);
-    builder.setContainerID(containerID);
-    builder.setCount(0);
-    mockContainerInfo(containerID, dnId2);
-    transactions.addTransaction(builder.build(),
-        null);
-    // Since all node are full, then transactions is full.
-    Assert.assertTrue(transactions.isFull());
-  }
-
-  private void mockContainerInfo(long containerID, DatanodeDetails dd)
-      throws IOException {
-    List<DatanodeDetails> dns = Collections.singletonList(dd);
-    Pipeline pipeline = Pipeline.newBuilder()
-            .setType(ReplicationType.STAND_ALONE)
-            .setFactor(ReplicationFactor.ONE)
-            .setState(Pipeline.PipelineState.OPEN)
-            .setId(PipelineID.randomId())
-            .setNodes(dns)
-            .build();
-
-    ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipelineID(pipeline.getId())
-        .setReplicationType(pipeline.getType())
-        .setReplicationFactor(pipeline.getFactor());
-
-    ContainerInfo containerInfo = builder.build();
-    Mockito.doReturn(containerInfo).when(containerManager)
-        .getContainer(ContainerID.valueof(containerID));
-
-    final Set<ContainerReplica> replicaSet = dns.stream()
-        .map(datanodeDetails -> ContainerReplica.newBuilder()
-            .setContainerID(containerInfo.containerID())
-            .setContainerState(ContainerReplicaProto.State.OPEN)
-            .setDatanodeDetails(datanodeDetails)
-            .build())
-        .collect(Collectors.toSet());
-    when(containerManager.getContainerReplicas(
-        ContainerID.valueof(containerID)))
-        .thenReturn(replicaSet);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
deleted file mode 100644
index a67df69..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Make checkstyle happy.
- * */
-package org.apache.hadoop.hdds.scm.block;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
deleted file mode 100644
index 8877b2b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.command;
-
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .CommandStatusReportFromDatanode;
-
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-/**
- * Unit test for command status report handler.
- */
-public class TestCommandStatusReportHandler implements EventPublisher {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestCommandStatusReportHandler.class);
-  private CommandStatusReportHandler cmdStatusReportHandler;
-
-  @Before
-  public void setup() {
-    cmdStatusReportHandler = new CommandStatusReportHandler();
-  }
-
-  @Test
-  public void testCommandStatusReport() {
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-
-    CommandStatusReportFromDatanode report = this.getStatusReport(Collections
-        .emptyList());
-    cmdStatusReportHandler.onMessage(report, this);
-    assertFalse(logCapturer.getOutput().contains("Delete_Block_Status"));
-    assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status"));
-
-    report = this.getStatusReport(this.getCommandStatusList());
-    cmdStatusReportHandler.onMessage(report, this);
-    assertTrue(logCapturer.getOutput().contains("firing event of type " +
-        "Delete_Block_Status"));
-    assertTrue(logCapturer.getOutput().contains("type: " +
-        "deleteBlocksCommand"));
-
-  }
-
-  private CommandStatusReportFromDatanode getStatusReport(
-      List<CommandStatus> reports) {
-    CommandStatusReportsProto report = TestUtils.createCommandStatusReport(
-        reports);
-    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-    return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode(
-        dn, report);
-  }
-
-  @Override
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
-      fireEvent(EVENT_TYPE event, PAYLOAD payload) {
-    LOG.info("firing event of type {}, payload {}", event.getName(), payload
-        .toString());
-  }
-
-  private List<CommandStatus> getCommandStatusList() {
-    List<CommandStatus> reports = new ArrayList<>(3);
-
-    // Add status message for replication, close container and delete block
-    // command.
-    CommandStatus.Builder builder = CommandStatus.newBuilder();
-
-    builder.setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.deleteBlocksCommand);
-    reports.add(builder.build());
-
-    builder.setMsg("Not enough space")
-        .setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.replicateContainerCommand);
-    reports.add(builder.build());
-    return reports;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
deleted file mode 100644
index f529c20..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.hdds.scm.command;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
deleted file mode 100644
index 6f5d435..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ /dev/null
@@ -1,584 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.net.NetConstants;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.Node;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
-import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.assertj.core.util.Preconditions;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
-/**
- * Test Helper for testing container Mapping.
- */
-public class MockNodeManager implements NodeManager {
-  private final static NodeData[] NODES = {
-      new NodeData(10L * OzoneConsts.TB, OzoneConsts.GB),
-      new NodeData(64L * OzoneConsts.TB, 100 * OzoneConsts.GB),
-      new NodeData(128L * OzoneConsts.TB, 256 * OzoneConsts.GB),
-      new NodeData(40L * OzoneConsts.TB, OzoneConsts.TB),
-      new NodeData(256L * OzoneConsts.TB, 200 * OzoneConsts.TB),
-      new NodeData(20L * OzoneConsts.TB, 10 * OzoneConsts.GB),
-      new NodeData(32L * OzoneConsts.TB, 16 * OzoneConsts.TB),
-      new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB),
-      new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB, NodeData.STALE),
-      new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.STALE),
-      new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.DEAD)
-  };
-  private final List<DatanodeDetails> healthyNodes;
-  private final List<DatanodeDetails> staleNodes;
-  private final List<DatanodeDetails> deadNodes;
-  private final Map<DatanodeDetails, SCMNodeStat> nodeMetricMap;
-  private final SCMNodeStat aggregateStat;
-  private boolean safemode;
-  private final Map<UUID, List<SCMCommand>> commandMap;
-  private final Node2PipelineMap node2PipelineMap;
-  private final Node2ContainerMap node2ContainerMap;
-  private NetworkTopology clusterMap;
-  private ConcurrentHashMap<String, Set<String>> dnsToUuidMap;
-
-  public MockNodeManager(boolean initializeFakeNodes, int nodeCount) {
-    this.healthyNodes = new LinkedList<>();
-    this.staleNodes = new LinkedList<>();
-    this.deadNodes = new LinkedList<>();
-    this.nodeMetricMap = new HashMap<>();
-    this.node2PipelineMap = new Node2PipelineMap();
-    this.node2ContainerMap = new Node2ContainerMap();
-    this.dnsToUuidMap = new ConcurrentHashMap();
-    aggregateStat = new SCMNodeStat();
-    if (initializeFakeNodes) {
-      for (int x = 0; x < nodeCount; x++) {
-        DatanodeDetails dd = TestUtils.randomDatanodeDetails();
-        register(dd, null, null);
-        populateNodeMetric(dd, x);
-      }
-    }
-    safemode = false;
-    this.commandMap = new HashMap<>();
-  }
-
-  /**
-   * Invoked from ctor to create some node Metrics.
-   *
-   * @param datanodeDetails - Datanode details
-   */
-  private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) {
-    SCMNodeStat newStat = new SCMNodeStat();
-    long remaining =
-        NODES[x % NODES.length].capacity - NODES[x % NODES.length].used;
-    newStat.set(
-        (NODES[x % NODES.length].capacity),
-        (NODES[x % NODES.length].used), remaining);
-    this.nodeMetricMap.put(datanodeDetails, newStat);
-    aggregateStat.add(newStat);
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.HEALTHY) {
-      healthyNodes.add(datanodeDetails);
-    }
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.STALE) {
-      staleNodes.add(datanodeDetails);
-    }
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.DEAD) {
-      deadNodes.add(datanodeDetails);
-    }
-
-  }
-
-  /**
-   * Sets the safe mode value.
-   * @param safemode boolean
-   */
-  public void setSafemode(boolean safemode) {
-    this.safemode = safemode;
-  }
-
-  /**
-   * Gets all Live Datanodes that is currently communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return List of Datanodes that are Heartbeating SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getNodes(HddsProtos.NodeState nodestate) {
-    if (nodestate == HEALTHY) {
-      return healthyNodes;
-    }
-
-    if (nodestate == STALE) {
-      return staleNodes;
-    }
-
-    if (nodestate == DEAD) {
-      return deadNodes;
-    }
-
-    return null;
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return int -- count
-   */
-  @Override
-  public int getNodeCount(HddsProtos.NodeState nodestate) {
-    List<DatanodeDetails> nodes = getNodes(nodestate);
-    if (nodes != null) {
-      return nodes.size();
-    }
-    return 0;
-  }
-
-  /**
-   * Get all datanodes known to SCM.
-   *
-   * @return List of DatanodeDetails known to SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getAllNodes() {
-    return new ArrayList<>(nodeMetricMap.keySet());
-  }
-
-  /**
-   * Returns the aggregated node stats.
-   * @return the aggregated node stats.
-   */
-  @Override
-  public SCMNodeStat getStats() {
-    return aggregateStat;
-  }
-
-  /**
-   * Return a map of nodes to their stats.
-   * @return a list of individual node stats (live/stale but not dead).
-   */
-  @Override
-  public Map<DatanodeDetails, SCMNodeStat> getNodeStats() {
-    return nodeMetricMap;
-  }
-
-  /**
-   * Return the node stat of the specified datanode.
-   * @param datanodeDetails - datanode details.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  @Override
-  public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) {
-    SCMNodeStat stat = nodeMetricMap.get(datanodeDetails);
-    if (stat == null) {
-      return null;
-    }
-    return new SCMNodeMetric(stat);
-  }
-
-  /**
-   * Returns the node state of a specific node.
-   *
-   * @param dd - DatanodeDetails
-   * @return Healthy/Stale/Dead.
-   */
-  @Override
-  public HddsProtos.NodeState getNodeState(DatanodeDetails dd) {
-    return null;
-  }
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param dnId - datanodeID
-   * @return Set of PipelineID
-   */
-  @Override
-  public Set<PipelineID> getPipelines(DatanodeDetails dnId) {
-    return node2PipelineMap.getPipelines(dnId.getUuid());
-  }
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  @Override
-  public void addPipeline(Pipeline pipeline) {
-    node2PipelineMap.addPipeline(pipeline);
-  }
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  @Override
-  public void removePipeline(Pipeline pipeline) {
-    node2PipelineMap.removePipeline(pipeline);
-  }
-
-  @Override
-  public void addContainer(DatanodeDetails dd,
-                           ContainerID containerId)
-      throws NodeNotFoundException {
-    try {
-      Set<ContainerID> set = node2ContainerMap.getContainers(dd.getUuid());
-      set.add(containerId);
-      node2ContainerMap.setContainersForDatanode(dd.getUuid(), set);
-    } catch (SCMException e) {
-      e.printStackTrace();
-    }
-  }
-
-  @Override
-  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    if(commandMap.containsKey(dnId)) {
-      List<SCMCommand> commandList = commandMap.get(dnId);
-      Preconditions.checkNotNull(commandList);
-      commandList.add(command);
-    } else {
-      List<SCMCommand> commandList = new LinkedList<>();
-      commandList.add(command);
-      commandMap.put(dnId, commandList);
-    }
-  }
-
-  /**
-   * Empty implementation for processNodeReport.
-   *
-   * @param dnUuid
-   * @param nodeReport
-   */
-  @Override
-  public void processNodeReport(DatanodeDetails dnUuid,
-      NodeReportProto nodeReport) {
-    // do nothing
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param uuid - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws SCMException - if datanode is not known. For new datanode use
-   *                        addDatanodeInContainerMap call.
-   */
-  @Override
-  public void setContainers(DatanodeDetails uuid, Set<ContainerID> containerIds)
-      throws NodeNotFoundException {
-    try {
-      node2ContainerMap.setContainersForDatanode(uuid.getUuid(), containerIds);
-    } catch (SCMException e) {
-      throw new NodeNotFoundException(e.getMessage());
-    }
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param uuid - DatanodeID
-   * @return - set of containerIDs
-   */
-  @Override
-  public Set<ContainerID> getContainers(DatanodeDetails uuid) {
-    return node2ContainerMap.getContainers(uuid.getUuid());
-  }
-
-  // Returns the number of commands that is queued to this node manager.
-  public int getCommandCount(DatanodeDetails dd) {
-    List<SCMCommand> list = commandMap.get(dd.getUuid());
-    return (list == null) ? 0 : list.size();
-  }
-
-  public void clearCommandQueue(UUID dnId) {
-    if(commandMap.containsKey(dnId)) {
-      commandMap.put(dnId, new LinkedList<>());
-    }
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. If
-   * the stream is already closed then invoking this method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful attention. It is strongly advised to relinquish the
-   * underlying resources and to internally <em>mark</em> the {@code Closeable}
-   * as closed, prior to throwing the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Gets the version info from SCM.
-   *
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed by
-   * datanode.
-   */
-  @Override
-  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
-    return null;
-  }
-
-  /**
-   * Register the node if the node finds that it is not registered with any
-   * SCM.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param nodeReport NodeReportProto
-   * @return SCMHeartbeatResponseProto
-   */
-  @Override
-  public RegisteredCommand register(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
-    try {
-      node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
-          Collections.emptySet());
-      addEntryTodnsToUuidMap(datanodeDetails.getIpAddress(),
-          datanodeDetails.getUuidString());
-      if (clusterMap != null) {
-        datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
-        clusterMap.add(datanodeDetails);
-      }
-    } catch (SCMException e) {
-      e.printStackTrace();
-    }
-    return null;
-  }
-
-  /**
-   * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs
-   * running on that host. As each address can have many DNs running on it,
-   * this is a one to many mapping.
-   * @param dnsName String representing the hostname or IP of the node
-   * @param uuid String representing the UUID of the registered node.
-   */
-  private synchronized void addEntryTodnsToUuidMap(
-      String dnsName, String uuid) {
-    Set<String> dnList = dnsToUuidMap.get(dnsName);
-    if (dnList == null) {
-      dnList = ConcurrentHashMap.newKeySet();
-      dnsToUuidMap.put(dnsName, dnList);
-    }
-    dnList.add(uuid);
-  }
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   *
-   * @param datanodeDetails - Datanode ID.
-   * @return SCMheartbeat response list
-   */
-  @Override
-  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
-    return null;
-  }
-
-  @Override
-  public Boolean isNodeRegistered(
-      DatanodeDetails datanodeDetails) {
-    return null;
-  }
-
-  @Override
-  public Map<String, Integer> getNodeCount() {
-    Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
-    for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) {
-      nodeCountMap.put(state.toString(), getNodeCount(state));
-    }
-    return nodeCountMap;
-  }
-
-  @Override
-  public Map<String, Long> getNodeInfo() {
-    Map<String, Long> nodeInfo = new HashMap<>();
-    nodeInfo.put("Capacity", aggregateStat.getCapacity().get());
-    nodeInfo.put("Used", aggregateStat.getScmUsed().get());
-    nodeInfo.put("Remaining", aggregateStat.getRemaining().get());
-    return nodeInfo;
-  }
-
-  /**
-   * Makes it easy to add a container.
-   *
-   * @param datanodeDetails datanode details
-   * @param size number of bytes.
-   */
-  public void addContainer(DatanodeDetails datanodeDetails, long size) {
-    SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails);
-    if (stat != null) {
-      aggregateStat.subtract(stat);
-      stat.getCapacity().add(size);
-      aggregateStat.add(stat);
-      nodeMetricMap.put(datanodeDetails, stat);
-    }
-  }
-
-  /**
-   * Makes it easy to simulate a delete of a container.
-   *
-   * @param datanodeDetails datanode Details
-   * @param size number of bytes.
-   */
-  public void delContainer(DatanodeDetails datanodeDetails, long size) {
-    SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails);
-    if (stat != null) {
-      aggregateStat.subtract(stat);
-      stat.getCapacity().subtract(size);
-      aggregateStat.add(stat);
-      nodeMetricMap.put(datanodeDetails, stat);
-    }
-  }
-
-  @Override
-  public void onMessage(CommandForDatanode commandForDatanode,
-                        EventPublisher publisher) {
-    addDatanodeCommand(commandForDatanode.getDatanodeId(),
-        commandForDatanode.getCommand());
-  }
-
-  @Override
-  public List<SCMCommand> getCommandQueue(UUID dnID) {
-    return null;
-  }
-
-  @Override
-  public DatanodeDetails getNodeByUuid(String uuid) {
-    Node node = clusterMap.getNode(NetConstants.DEFAULT_RACK + "/" + uuid);
-    return node == null ? null : (DatanodeDetails)node;
-  }
-
-  @Override
-  public List<DatanodeDetails> getNodesByAddress(String address) {
-    List<DatanodeDetails> results = new LinkedList<>();
-    Set<String> uuids = dnsToUuidMap.get(address);
-    if (uuids == null) {
-      return results;
-    }
-    for(String uuid : uuids) {
-      DatanodeDetails dn = getNodeByUuid(uuid);
-      if (dn != null) {
-        results.add(dn);
-      }
-    }
-    return results;
-  }
-
-  public void setNetworkTopology(NetworkTopology topology) {
-    this.clusterMap = topology;
-  }
-
-  /**
-   * A class to declare some values for the nodes so that our tests
-   * won't fail.
-   */
-  private static class NodeData {
-    public static final long HEALTHY = 1;
-    public static final long STALE = 2;
-    public static final long DEAD = 3;
-
-    private long capacity;
-    private long used;
-
-    private long currentState;
-
-    /**
-     * By default nodes are healthy.
-     * @param capacity
-     * @param used
-     */
-    NodeData(long capacity, long used) {
-      this(capacity, used, HEALTHY);
-    }
-
-    /**
-     * Constructs a nodeDefinition.
-     *
-     * @param capacity capacity.
-     * @param used used.
-     * @param currentState - Healthy, Stale and DEAD nodes.
-     */
-    NodeData(long capacity, long used, long currentState) {
-      this.capacity = capacity;
-      this.used = used;
-      this.currentState = currentState;
-    }
-
-    public long getCapacity() {
-      return capacity;
-    }
-
-    public void setCapacity(long capacity) {
-      this.capacity = capacity;
-    }
-
-    public long getUsed() {
-      return used;
-    }
-
-    public void setUsed(long used) {
-      this.used = used;
-    }
-
-    public long getCurrentState() {
-      return currentState;
-    }
-
-    public void setCurrentState(long currentState) {
-      this.currentState = currentState;
-    }
-
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
deleted file mode 100644
index a8364a4..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
-
-/**
- * Tests the closeContainerEventHandler class.
- */
-public class TestCloseContainerEventHandler {
-
-  private static Configuration configuration;
-  private static MockNodeManager nodeManager;
-  private static SCMPipelineManager pipelineManager;
-  private static SCMContainerManager containerManager;
-  private static long size;
-  private static File testDir;
-  private static EventQueue eventQueue;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    configuration = SCMTestUtils.getConf();
-    size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
-        OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
-    testDir = GenericTestUtils
-        .getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
-    configuration
-        .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    nodeManager = new MockNodeManager(true, 10);
-    pipelineManager =
-        new SCMPipelineManager(configuration, nodeManager, eventQueue, null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), configuration);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-    containerManager = new
-        SCMContainerManager(configuration, nodeManager,
-        pipelineManager, new EventQueue());
-    eventQueue = new EventQueue();
-    eventQueue.addHandler(CLOSE_CONTAINER,
-        new CloseContainerEventHandler(pipelineManager, containerManager));
-    eventQueue.addHandler(DATANODE_COMMAND, nodeManager);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (containerManager != null) {
-      containerManager.close();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Test
-  public void testIfCloseContainerEventHadnlerInvoked() {
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(Math.abs(RandomUtils.nextInt())));
-    eventQueue.processAll(1000);
-    Assert.assertTrue(logCapturer.getOutput()
-        .contains("Close container Event triggered for container"));
-  }
-
-  @Test
-  public void testCloseContainerEventWithInvalidContainer() {
-    long id = Math.abs(RandomUtils.nextInt());
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(id));
-    eventQueue.processAll(1000);
-    Assert.assertTrue(logCapturer.getOutput()
-        .contains("Failed to close the container"));
-  }
-
-  @Test
-  public void testCloseContainerEventWithValidContainers() throws IOException {
-
-    ContainerInfo container = containerManager
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, "ozone");
-    ContainerID id = container.containerID();
-    DatanodeDetails datanode = pipelineManager
-        .getPipeline(container.getPipelineID()).getFirstNode();
-    int closeCount = nodeManager.getCommandCount(datanode);
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    Assert.assertEquals(closeCount + 1,
-        nodeManager.getCommandCount(datanode));
-    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-        containerManager.getContainer(id).getState());
-  }
-
-  @Test
-  public void testCloseContainerEventWithRatis() throws IOException {
-
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerInfo container = containerManager
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, "ozone");
-    ContainerID id = container.containerID();
-    int[] closeCount = new int[3];
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    int i = 0;
-    for (DatanodeDetails details : pipelineManager
-        .getPipeline(container.getPipelineID()).getNodes()) {
-      closeCount[i] = nodeManager.getCommandCount(details);
-      i++;
-    }
-    i = 0;
-    for (DatanodeDetails details : pipelineManager
-        .getPipeline(container.getPipelineID()).getNodes()) {
-      Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
-      i++;
-    }
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    i = 0;
-    // Make sure close is queued for each datanode on the pipeline
-    for (DatanodeDetails details : pipelineManager
-        .getPipeline(container.getPipelineID()).getNodes()) {
-      Assert.assertEquals(closeCount[i] + 1,
-          nodeManager.getCommandCount(details));
-      Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-          containerManager.getContainer(id).getState());
-      i++;
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
deleted file mode 100644
index 09daa59..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerActionsFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Tests ContainerActionsHandler.
- */
-public class TestContainerActionsHandler {
-
-  @Test
-  public void testCloseContainerAction() {
-    EventQueue queue = new EventQueue();
-    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
-    CloseContainerEventHandler closeContainerEventHandler = Mockito.mock(
-        CloseContainerEventHandler.class);
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerEventHandler);
-    queue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
-
-    ContainerAction action = ContainerAction.newBuilder()
-        .setContainerID(1L)
-        .setAction(ContainerAction.Action.CLOSE)
-        .setReason(ContainerAction.Reason.CONTAINER_FULL)
-        .build();
-
-    ContainerActionsProto cap = ContainerActionsProto.newBuilder()
-        .addContainerActions(action)
-        .build();
-
-    ContainerActionsFromDatanode containerActions =
-        new ContainerActionsFromDatanode(
-            TestUtils.randomDatanodeDetails(), cap);
-
-    queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
-    queue.processAll(1000L);
-    verify(closeContainerEventHandler, times(1))
-        .onMessage(ContainerID.valueof(1L), queue);
-
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
deleted file mode 100644
index 41585bc..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ /dev/null
@@ -1,510 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.server
-    .SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas;
-import static org.apache.hadoop.hdds.scm.TestUtils.getContainer;
-
-/**
- * Test the behaviour of the ContainerReportHandler.
- */
-public class TestContainerReportHandler {
-
-  private NodeManager nodeManager;
-  private ContainerManager containerManager;
-  private ContainerStateManager containerStateManager;
-  private EventPublisher publisher;
-
-  @Before
-  public void setup() throws IOException {
-    final Configuration conf = new OzoneConfiguration();
-    this.nodeManager = new MockNodeManager(true, 10);
-    this.containerManager = Mockito.mock(ContainerManager.class);
-    this.containerStateManager = new ContainerStateManager(conf);
-    this.publisher = Mockito.mock(EventPublisher.class);
-
-
-    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainer((ContainerID)invocation.getArguments()[0]));
-
-    Mockito.when(containerManager.getContainerReplicas(
-        Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainerReplicas((ContainerID)invocation.getArguments()[0]));
-
-    Mockito.doAnswer(invocation -> {
-      containerStateManager
-          .updateContainerState((ContainerID)invocation.getArguments()[0],
-              (HddsProtos.LifeCycleEvent)invocation.getArguments()[1]);
-      return null;
-    }).when(containerManager).updateContainerState(
-        Mockito.any(ContainerID.class),
-        Mockito.any(HddsProtos.LifeCycleEvent.class));
-
-    Mockito.doAnswer(invocation -> {
-      containerStateManager.updateContainerReplica(
-          (ContainerID) invocation.getArguments()[0],
-          (ContainerReplica) invocation.getArguments()[1]);
-      return null;
-    }).when(containerManager).updateContainerReplica(
-        Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
-
-    Mockito.doAnswer(invocation -> {
-      containerStateManager.removeContainerReplica(
-          (ContainerID) invocation.getArguments()[0],
-          (ContainerReplica) invocation.getArguments()[1]);
-      return null;
-    }).when(containerManager).removeContainerReplica(
-        Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
-
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    containerStateManager.close();
-  }
-
-  @Test
-  public void testUnderReplicatedContainer()
-      throws NodeNotFoundException, ContainerNotFoundException, SCMException {
-
-    final ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager);
-    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(
-        NodeState.HEALTHY).iterator();
-    final DatanodeDetails datanodeOne = nodeIterator.next();
-    final DatanodeDetails datanodeTwo = nodeIterator.next();
-    final DatanodeDetails datanodeThree = nodeIterator.next();
-
-    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED);
-    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
-    final Set<ContainerID> containerIDSet = Stream.of(
-        containerOne.containerID(), containerTwo.containerID())
-        .collect(Collectors.toSet());
-
-    nodeManager.setContainers(datanodeOne, containerIDSet);
-    nodeManager.setContainers(datanodeTwo, containerIDSet);
-    nodeManager.setContainers(datanodeThree, containerIDSet);
-
-    containerStateManager.loadContainer(containerOne);
-    containerStateManager.loadContainer(containerTwo);
-
-    getReplicas(containerOne.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree)
-        .forEach(r -> {
-          try {
-            containerStateManager.updateContainerReplica(
-                containerOne.containerID(), r);
-          } catch (ContainerNotFoundException ignored) {
-
-          }
-        });
-
-    getReplicas(containerTwo.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree)
-        .forEach(r -> {
-          try {
-            containerStateManager.updateContainerReplica(
-                containerTwo.containerID(), r);
-          } catch (ContainerNotFoundException ignored) {
-
-          }
-        });
-
-
-    // SCM expects both containerOne and containerTwo to be in all the three
-    // datanodes datanodeOne, datanodeTwo and datanodeThree
-
-    // Now datanodeOne is sending container report in which containerOne is
-    // missing.
-
-    // containerOne becomes under replicated.
-    final ContainerReportsProto containerReport = getContainerReportsProto(
-        containerTwo.containerID(), ContainerReplicaProto.State.CLOSED,
-        datanodeOne.getUuidString());
-    final ContainerReportFromDatanode containerReportFromDatanode =
-        new ContainerReportFromDatanode(datanodeOne, containerReport);
-    reportHandler.onMessage(containerReportFromDatanode, publisher);
-    Assert.assertEquals(2, containerManager.getContainerReplicas(
-        containerOne.containerID()).size());
-
-  }
-
-  @Test
-  public void testOverReplicatedContainer() throws NodeNotFoundException,
-      SCMException, ContainerNotFoundException {
-
-    final ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager);
-
-    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(
-        NodeState.HEALTHY).iterator();
-    final DatanodeDetails datanodeOne = nodeIterator.next();
-    final DatanodeDetails datanodeTwo = nodeIterator.next();
-    final DatanodeDetails datanodeThree = nodeIterator.next();
-    final DatanodeDetails datanodeFour = nodeIterator.next();
-
-    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED);
-    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
-
-    final Set<ContainerID> containerIDSet = Stream.of(
-        containerOne.containerID(), containerTwo.containerID())
-        .collect(Collectors.toSet());
-
-    nodeManager.setContainers(datanodeOne, containerIDSet);
-    nodeManager.setContainers(datanodeTwo, containerIDSet);
-    nodeManager.setContainers(datanodeThree, containerIDSet);
-
-    containerStateManager.loadContainer(containerOne);
-    containerStateManager.loadContainer(containerTwo);
-
-    getReplicas(containerOne.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree)
-        .forEach(r -> {
-          try {
-            containerStateManager.updateContainerReplica(
-                containerOne.containerID(), r);
-          } catch (ContainerNotFoundException ignored) {
-
-          }
-        });
-
-    getReplicas(containerTwo.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree)
-        .forEach(r -> {
-          try {
-            containerStateManager.updateContainerReplica(
-                containerTwo.containerID(), r);
-          } catch (ContainerNotFoundException ignored) {
-
-          }
-        });
-
-
-    // SCM expects both containerOne and containerTwo to be in all the three
-    // datanodes datanodeOne, datanodeTwo and datanodeThree
-
-    // Now datanodeFour is sending container report which has containerOne.
-
-    // containerOne becomes over replicated.
-
-    final ContainerReportsProto containerReport = getContainerReportsProto(
-        containerOne.containerID(), ContainerReplicaProto.State.CLOSED,
-        datanodeFour.getUuidString());
-    final ContainerReportFromDatanode containerReportFromDatanode =
-        new ContainerReportFromDatanode(datanodeFour, containerReport);
-    reportHandler.onMessage(containerReportFromDatanode, publisher);
-
-    Assert.assertEquals(4, containerManager.getContainerReplicas(
-        containerOne.containerID()).size());
-  }
-
-
-  @Test
-  public void testClosingToClosed() throws NodeNotFoundException, IOException {
-    /*
-     * The container is in CLOSING state and all the replicas are in
-     * OPEN/CLOSING state.
-     *
-     * The datanode reports that one of the replica is now CLOSED.
-     *
-     * In this case SCM should mark the container as CLOSED.
-     */
-
-    final ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager);
-
-    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(
-        NodeState.HEALTHY).iterator();
-    final DatanodeDetails datanodeOne = nodeIterator.next();
-    final DatanodeDetails datanodeTwo = nodeIterator.next();
-    final DatanodeDetails datanodeThree = nodeIterator.next();
-
-    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING);
-    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
-
-    final Set<ContainerID> containerIDSet = Stream.of(
-        containerOne.containerID(), containerTwo.containerID())
-        .collect(Collectors.toSet());
-
-    final Set<ContainerReplica> containerOneReplicas = getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeOne);
-
-    containerOneReplicas.addAll(getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.OPEN,
-        datanodeTwo, datanodeThree));
-
-    final Set<ContainerReplica> containerTwoReplicas = getReplicas(
-        containerTwo.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree);
-
-    nodeManager.setContainers(datanodeOne, containerIDSet);
-    nodeManager.setContainers(datanodeTwo, containerIDSet);
-    nodeManager.setContainers(datanodeThree, containerIDSet);
-
-    containerStateManager.loadContainer(containerOne);
-    containerStateManager.loadContainer(containerTwo);
-
-    containerOneReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-    containerTwoReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-
-    final ContainerReportsProto containerReport = getContainerReportsProto(
-        containerOne.containerID(), ContainerReplicaProto.State.CLOSED,
-        datanodeOne.getUuidString());
-    final ContainerReportFromDatanode containerReportFromDatanode =
-        new ContainerReportFromDatanode(datanodeOne, containerReport);
-    reportHandler.onMessage(containerReportFromDatanode, publisher);
-
-    Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState());
-  }
-
-  @Test
-  public void testClosingToQuasiClosed()
-      throws NodeNotFoundException, IOException {
-    /*
-     * The container is in CLOSING state and all the replicas are in
-     * OPEN/CLOSING state.
-     *
-     * The datanode reports that the replica is now QUASI_CLOSED.
-     *
-     * In this case SCM should move the container to QUASI_CLOSED.
-     */
-
-    final ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager);
-
-    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(
-        NodeState.HEALTHY).iterator();
-    final DatanodeDetails datanodeOne = nodeIterator.next();
-    final DatanodeDetails datanodeTwo = nodeIterator.next();
-    final DatanodeDetails datanodeThree = nodeIterator.next();
-
-    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING);
-    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
-
-    final Set<ContainerID> containerIDSet = Stream.of(
-        containerOne.containerID(), containerTwo.containerID())
-        .collect(Collectors.toSet());
-
-    final Set<ContainerReplica> containerOneReplicas = getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeOne, datanodeTwo);
-    containerOneReplicas.addAll(getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.OPEN,
-        datanodeThree));
-    final Set<ContainerReplica> containerTwoReplicas = getReplicas(
-        containerTwo.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree);
-
-    nodeManager.setContainers(datanodeOne, containerIDSet);
-    nodeManager.setContainers(datanodeTwo, containerIDSet);
-    nodeManager.setContainers(datanodeThree, containerIDSet);
-
-    containerStateManager.loadContainer(containerOne);
-    containerStateManager.loadContainer(containerTwo);
-
-    containerOneReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-    containerTwoReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-
-    final ContainerReportsProto containerReport = getContainerReportsProto(
-        containerOne.containerID(), ContainerReplicaProto.State.QUASI_CLOSED,
-        datanodeOne.getUuidString());
-    final ContainerReportFromDatanode containerReportFromDatanode =
-        new ContainerReportFromDatanode(datanodeOne, containerReport);
-    reportHandler.onMessage(containerReportFromDatanode, publisher);
-
-    Assert.assertEquals(LifeCycleState.QUASI_CLOSED, containerOne.getState());
-  }
-
-  @Test
-  public void testQuasiClosedToClosed()
-      throws NodeNotFoundException, IOException {
-    /*
-     * The container is in QUASI_CLOSED state.
-     *  - One of the replica is in QUASI_CLOSED state
-     *  - The other two replica are in OPEN/CLOSING state
-     *
-     * The datanode reports the second replica is now CLOSED.
-     *
-     * In this case SCM should CLOSE the container.
-     */
-
-    final ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager);
-    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(
-        NodeState.HEALTHY).iterator();
-
-    final DatanodeDetails datanodeOne = nodeIterator.next();
-    final DatanodeDetails datanodeTwo = nodeIterator.next();
-    final DatanodeDetails datanodeThree = nodeIterator.next();
-
-    final ContainerInfo containerOne =
-        getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerInfo containerTwo =
-        getContainer(LifeCycleState.CLOSED);
-
-    final Set<ContainerID> containerIDSet = Stream.of(
-        containerOne.containerID(), containerTwo.containerID())
-        .collect(Collectors.toSet());
-    final Set<ContainerReplica> containerOneReplicas = getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.QUASI_CLOSED,
-        10000L,
-        datanodeOne);
-    containerOneReplicas.addAll(getReplicas(
-        containerOne.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeTwo, datanodeThree));
-    final Set<ContainerReplica> containerTwoReplicas = getReplicas(
-        containerTwo.containerID(),
-        ContainerReplicaProto.State.CLOSED,
-        datanodeOne, datanodeTwo, datanodeThree);
-
-    nodeManager.setContainers(datanodeOne, containerIDSet);
-    nodeManager.setContainers(datanodeTwo, containerIDSet);
-    nodeManager.setContainers(datanodeThree, containerIDSet);
-
-    containerStateManager.loadContainer(containerOne);
-    containerStateManager.loadContainer(containerTwo);
-
-    containerOneReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-    containerTwoReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            containerTwo.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-
-    final ContainerReportsProto containerReport = getContainerReportsProto(
-        containerOne.containerID(), ContainerReplicaProto.State.CLOSED,
-        datanodeOne.getUuidString());
-
-    final ContainerReportFromDatanode containerReportFromDatanode =
-        new ContainerReportFromDatanode(datanodeOne, containerReport);
-    reportHandler.onMessage(containerReportFromDatanode, publisher);
-
-    Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState());
-  }
-
-  private static ContainerReportsProto getContainerReportsProto(
-      final ContainerID containerId, final ContainerReplicaProto.State state,
-      final String originNodeId) {
-    final ContainerReportsProto.Builder crBuilder =
-        ContainerReportsProto.newBuilder();
-    final ContainerReplicaProto replicaProto =
-        ContainerReplicaProto.newBuilder()
-            .setContainerID(containerId.getId())
-            .setState(state)
-            .setOriginNodeId(originNodeId)
-            .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
-            .setSize(5368709120L)
-            .setUsed(2000000000L)
-            .setKeyCount(100000000L)
-            .setReadCount(100000000L)
-            .setWriteCount(100000000L)
-            .setReadBytes(2000000000L)
-            .setWriteBytes(2000000000L)
-            .setBlockCommitSequenceId(10000L)
-            .setDeleteTransactionId(0)
-            .build();
-    return crBuilder.addReports(replicaProto).build();
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
deleted file mode 100644
index 5c4617c..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Set;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.mockito.Mockito.when;
-
-/**
- * Testing ContainerStatemanager.
- */
-public class TestContainerStateManager {
-
-  private ContainerStateManager containerStateManager;
-
-  @Before
-  public void init() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    containerStateManager = new ContainerStateManager(conf);
-
-  }
-
-  @Test
-  public void checkReplicationStateOK() throws IOException {
-    //GIVEN
-    ContainerInfo c1 = allocateContainer();
-
-    DatanodeDetails d1 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails d2 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails d3 = TestUtils.randomDatanodeDetails();
-
-    addReplica(c1, d1);
-    addReplica(c1, d2);
-    addReplica(c1, d3);
-
-    //WHEN
-    Set<ContainerReplica> replicas = containerStateManager
-        .getContainerReplicas(c1.containerID());
-
-    //THEN
-    Assert.assertEquals(3, replicas.size());
-  }
-
-  @Test
-  public void checkReplicationStateMissingReplica() throws IOException {
-    //GIVEN
-
-    ContainerInfo c1 = allocateContainer();
-
-    DatanodeDetails d1 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails d2 = TestUtils.randomDatanodeDetails();
-
-    addReplica(c1, d1);
-    addReplica(c1, d2);
-
-    //WHEN
-    Set<ContainerReplica> replicas = containerStateManager
-        .getContainerReplicas(c1.containerID());
-
-    Assert.assertEquals(2, replicas.size());
-    Assert.assertEquals(3, c1.getReplicationFactor().getNumber());
-  }
-
-  private void addReplica(ContainerInfo cont, DatanodeDetails node)
-      throws ContainerNotFoundException {
-    ContainerReplica replica = ContainerReplica.newBuilder()
-        .setContainerID(cont.containerID())
-        .setContainerState(ContainerReplicaProto.State.CLOSED)
-        .setDatanodeDetails(node)
-        .build();
-    containerStateManager
-        .updateContainerReplica(cont.containerID(), replica);
-  }
-
-  private ContainerInfo allocateContainer() throws IOException {
-
-    PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class);
-
-    Pipeline pipeline =
-        Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED)
-            .setId(PipelineID.randomId())
-            .setType(HddsProtos.ReplicationType.STAND_ALONE)
-            .setFactor(HddsProtos.ReplicationFactor.THREE)
-            .setNodes(new ArrayList<>()).build();
-
-    when(pipelineManager.createPipeline(HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
-
-    return containerStateManager.allocateContainer(pipelineManager,
-        HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.THREE, "root");
-
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
deleted file mode 100644
index 9468954..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .IncrementalContainerReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.Set;
-
-import static org.apache.hadoop.hdds.scm.TestUtils.getContainer;
-import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas;
-
-/**
- * Test cases to verify the functionality of IncrementalContainerReportHandler.
- */
-public class TestIncrementalContainerReportHandler {
-
-  private NodeManager nodeManager;
-  private ContainerManager containerManager;
-  private ContainerStateManager containerStateManager;
-  private EventPublisher publisher;
-
-  @Before
-  public void setup() throws IOException {
-    final Configuration conf = new OzoneConfiguration();
-    this.containerManager = Mockito.mock(ContainerManager.class);
-    this.nodeManager = Mockito.mock(NodeManager.class);
-    this.containerStateManager = new ContainerStateManager(conf);
-    this.publisher = Mockito.mock(EventPublisher.class);
-
-
-    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainer((ContainerID)invocation.getArguments()[0]));
-
-    Mockito.when(containerManager.getContainerReplicas(
-        Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainerReplicas((ContainerID)invocation.getArguments()[0]));
-
-    Mockito.doAnswer(invocation -> {
-      containerStateManager
-          .updateContainerState((ContainerID)invocation.getArguments()[0],
-              (HddsProtos.LifeCycleEvent)invocation.getArguments()[1]);
-      return null;
-    }).when(containerManager).updateContainerState(
-        Mockito.any(ContainerID.class),
-        Mockito.any(HddsProtos.LifeCycleEvent.class));
-
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    containerStateManager.close();
-  }
-
-
-  @Test
-  public void testClosingToClosed() throws IOException {
-    final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
-    final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
-    final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails();
-    final Set<ContainerReplica> containerReplicas = getReplicas(
-        container.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeOne, datanodeTwo, datanodeThree);
-
-    containerStateManager.loadContainer(container);
-    containerReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            container.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-    final IncrementalContainerReportProto containerReport =
-        getIncrementalContainerReportProto(container.containerID(),
-            ContainerReplicaProto.State.CLOSED,
-            datanodeOne.getUuidString());
-    final IncrementalContainerReportFromDatanode icrFromDatanode =
-        new IncrementalContainerReportFromDatanode(
-            datanodeOne, containerReport);
-    reportHandler.onMessage(icrFromDatanode, publisher);
-    Assert.assertEquals(LifeCycleState.CLOSED, container.getState());
-  }
-
-  @Test
-  public void testClosingToQuasiClosed() throws IOException {
-    final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
-    final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
-    final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails();
-    final Set<ContainerReplica> containerReplicas = getReplicas(
-        container.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeOne, datanodeTwo, datanodeThree);
-
-    containerStateManager.loadContainer(container);
-    containerReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            container.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-
-    final IncrementalContainerReportProto containerReport =
-        getIncrementalContainerReportProto(container.containerID(),
-            ContainerReplicaProto.State.QUASI_CLOSED,
-            datanodeOne.getUuidString());
-    final IncrementalContainerReportFromDatanode icrFromDatanode =
-        new IncrementalContainerReportFromDatanode(
-            datanodeOne, containerReport);
-    reportHandler.onMessage(icrFromDatanode, publisher);
-    Assert.assertEquals(LifeCycleState.QUASI_CLOSED, container.getState());
-  }
-
-  @Test
-  public void testQuasiClosedToClosed() throws IOException {
-    final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails();
-    final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails();
-    final Set<ContainerReplica> containerReplicas = getReplicas(
-        container.containerID(),
-        ContainerReplicaProto.State.CLOSING,
-        datanodeOne, datanodeTwo);
-    containerReplicas.addAll(getReplicas(
-        container.containerID(),
-        ContainerReplicaProto.State.QUASI_CLOSED,
-        datanodeThree));
-
-    containerStateManager.loadContainer(container);
-    containerReplicas.forEach(r -> {
-      try {
-        containerStateManager.updateContainerReplica(
-            container.containerID(), r);
-      } catch (ContainerNotFoundException ignored) {
-
-      }
-    });
-
-    final IncrementalContainerReportProto containerReport =
-        getIncrementalContainerReportProto(container.containerID(),
-            ContainerReplicaProto.State.CLOSED,
-            datanodeThree.getUuidString());
-    final IncrementalContainerReportFromDatanode icr =
-        new IncrementalContainerReportFromDatanode(
-            datanodeOne, containerReport);
-    reportHandler.onMessage(icr, publisher);
-    Assert.assertEquals(LifeCycleState.CLOSED, container.getState());
-  }
-
-  private static IncrementalContainerReportProto
-      getIncrementalContainerReportProto(
-          final ContainerID containerId,
-          final ContainerReplicaProto.State state,
-          final String originNodeId) {
-    final IncrementalContainerReportProto.Builder crBuilder =
-        IncrementalContainerReportProto.newBuilder();
-    final ContainerReplicaProto replicaProto =
-        ContainerReplicaProto.newBuilder()
-            .setContainerID(containerId.getId())
-            .setState(state)
-            .setOriginNodeId(originNodeId)
-            .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
-            .setSize(5368709120L)
-            .setUsed(2000000000L)
-            .setKeyCount(100000000L)
-            .setReadCount(100000000L)
-            .setWriteCount(100000000L)
-            .setReadBytes(2000000000L)
-            .setWriteBytes(2000000000L)
-            .setBlockCommitSequenceId(10000L)
-            .setDeleteTransactionId(0)
-            .build();
-    return crBuilder.addReport(replicaProto).build();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
deleted file mode 100644
index 1631447..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ /dev/null
@@ -1,662 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.lock.LockManager;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static org.apache.hadoop.hdds.scm.TestUtils.createDatanodeDetails;
-import static org.apache.hadoop.hdds.scm.TestUtils.getContainer;
-import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas;
-import static org.apache.hadoop.hdds.scm.TestUtils.randomDatanodeDetails;
-
-/**
- * Test cases to verify the functionality of ReplicationManager.
- */
-public class TestReplicationManager {
-
-  private ReplicationManager replicationManager;
-  private ContainerStateManager containerStateManager;
-  private ContainerPlacementPolicy containerPlacementPolicy;
-  private EventQueue eventQueue;
-  private DatanodeCommandHandler datanodeCommandHandler;
-
-  @Before
-  public void setup() throws IOException, InterruptedException {
-    final Configuration conf = new OzoneConfiguration();
-    final ContainerManager containerManager =
-        Mockito.mock(ContainerManager.class);
-    eventQueue = new EventQueue();
-    containerStateManager = new ContainerStateManager(conf);
-
-    datanodeCommandHandler = new DatanodeCommandHandler();
-    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
-
-    Mockito.when(containerManager.getContainerIDs())
-        .thenAnswer(invocation -> containerStateManager.getAllContainerIDs());
-
-    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainer((ContainerID)invocation.getArguments()[0]));
-
-    Mockito.when(containerManager.getContainerReplicas(
-        Mockito.any(ContainerID.class)))
-        .thenAnswer(invocation -> containerStateManager
-            .getContainerReplicas((ContainerID)invocation.getArguments()[0]));
-
-    containerPlacementPolicy = Mockito.mock(ContainerPlacementPolicy.class);
-
-    Mockito.when(containerPlacementPolicy.chooseDatanodes(
-        Mockito.anyListOf(DatanodeDetails.class),
-        Mockito.anyListOf(DatanodeDetails.class),
-        Mockito.anyInt(), Mockito.anyLong()))
-        .thenAnswer(invocation -> {
-          int count = (int) invocation.getArguments()[2];
-          return IntStream.range(0, count)
-              .mapToObj(i -> randomDatanodeDetails())
-              .collect(Collectors.toList());
-        });
-
-    replicationManager = new ReplicationManager(
-        new ReplicationManagerConfiguration(),
-        containerManager,
-        containerPlacementPolicy,
-        eventQueue,
-        new LockManager<>(conf));
-    replicationManager.start();
-    Thread.sleep(100L);
-  }
-
-
-  /**
-   * Checks if restarting of replication manager works.
-   */
-  @Test
-  public void testReplicationManagerRestart() throws InterruptedException {
-    Assert.assertTrue(replicationManager.isRunning());
-    replicationManager.stop();
-    // Stop is a non-blocking call, it might take sometime for the
-    // ReplicationManager to shutdown
-    Thread.sleep(500);
-    Assert.assertFalse(replicationManager.isRunning());
-    replicationManager.start();
-    Assert.assertTrue(replicationManager.isRunning());
-  }
-
-  /**
-   * Open containers are not handled by ReplicationManager.
-   * This test-case makes sure that ReplicationManages doesn't take
-   * any action on OPEN containers.
-   */
-  @Test
-  public void testOpenContainer() throws SCMException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.OPEN);
-    containerStateManager.loadContainer(container);
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
-
-  }
-
-  /**
-   * If the container is in CLOSING state we resend close container command
-   * to all the datanodes.
-   */
-  @Test
-  public void testClosingContainer() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
-    final ContainerID id = container.containerID();
-
-    containerStateManager.loadContainer(container);
-
-    // Two replicas in CLOSING state
-    final Set<ContainerReplica> replicas = getReplicas(id, State.CLOSING,
-        randomDatanodeDetails(),
-        randomDatanodeDetails());
-
-    // One replica in OPEN state
-    final DatanodeDetails datanode = randomDatanodeDetails();
-    replicas.addAll(getReplicas(id, State.OPEN, datanode));
-
-    for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id, replica);
-    }
-
-    final int currentCloseCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentCloseCommandCount + 3, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand));
-
-    // Update the OPEN to CLOSING
-    for (ContainerReplica replica : getReplicas(id, State.CLOSING, datanode)) {
-      containerStateManager.updateContainerReplica(id, replica);
-    }
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentCloseCommandCount + 6, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand));
-  }
-
-
-  /**
-   * The container is QUASI_CLOSED but two of the replica is still in
-   * open state. ReplicationManager should resend close command to those
-   * datanodes.
-   */
-  @Test
-  public void testQuasiClosedContainerWithTwoOpenReplica() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.OPEN, 1000L, originNodeId, randomDatanodeDetails());
-    final DatanodeDetails datanodeDetails = randomDatanodeDetails();
-    final ContainerReplica replicaThree = getReplicas(
-        id, State.OPEN, 1000L, datanodeDetails.getUuid(), datanodeDetails);
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    containerStateManager.updateContainerReplica(id, replicaThree);
-
-    final int currentCloseCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand);
-    // Two of the replicas are in OPEN state
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentCloseCommandCount + 2, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand));
-    Assert.assertTrue(datanodeCommandHandler.received(
-        SCMCommandProto.Type.closeContainerCommand,
-        replicaTwo.getDatanodeDetails()));
-    Assert.assertTrue(datanodeCommandHandler.received(
-        SCMCommandProto.Type.closeContainerCommand,
-        replicaThree.getDatanodeDetails()));
-  }
-
-  /**
-   * When the container is in QUASI_CLOSED state and all the replicas are
-   * also in QUASI_CLOSED state and doesn't have a quorum to force close
-   * the container, ReplicationManager will not do anything.
-   */
-  @Test
-  public void testHealthyQuasiClosedContainer() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaThree = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    containerStateManager.updateContainerReplica(id, replicaThree);
-
-    // All the QUASI_CLOSED replicas have same originNodeId, so the
-    // container will not be closed. ReplicationManager should take no action.
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
-  }
-
-  /**
-   * When a container is QUASI_CLOSED and we don't have quorum to force close
-   * the container, the container should have all the replicas in QUASI_CLOSED
-   * state, else ReplicationManager will take action.
-   *
-   * In this test case we make one of the replica unhealthy, replication manager
-   * will send delete container command to the datanode which has the unhealthy
-   * replica.
-   */
-  @Test
-  public void testQuasiClosedContainerWithUnhealthyReplica()
-      throws SCMException, ContainerNotFoundException, InterruptedException,
-      ContainerReplicaNotFoundException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaThree = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    containerStateManager.updateContainerReplica(id, replicaThree);
-
-    final int currentDeleteCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
-    final int currentReplicateCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand);
-
-    // All the QUASI_CLOSED replicas have same originNodeId, so the
-    // container will not be closed. ReplicationManager should take no action.
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
-
-    // Make the first replica unhealthy
-    final ContainerReplica unhealthyReplica = getReplicas(
-        id, State.UNHEALTHY, 1000L, originNodeId,
-        replicaOne.getDatanodeDetails());
-    containerStateManager.updateContainerReplica(id, unhealthyReplica);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand));
-    Assert.assertTrue(datanodeCommandHandler.received(
-        SCMCommandProto.Type.deleteContainerCommand,
-        replicaOne.getDatanodeDetails()));
-
-    // Now we will delete the unhealthy replica from in-memory.
-    containerStateManager.removeContainerReplica(id, replicaOne);
-
-    // The container is under replicated as unhealthy replica is removed
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-
-    // We should get replicate command
-    Assert.assertEquals(currentReplicateCommandCount + 1,
-        datanodeCommandHandler.getInvocationCount(
-            SCMCommandProto.Type.replicateContainerCommand));
-  }
-
-  /**
-   * When a QUASI_CLOSED container is over replicated, ReplicationManager
-   * deletes the excess replicas.
-   */
-  @Test
-  public void testOverReplicatedQuasiClosedContainer() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaThree = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaFour = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    containerStateManager.updateContainerReplica(id, replicaThree);
-    containerStateManager.updateContainerReplica(id, replicaFour);
-
-    final int currentDeleteCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand));
-  }
-
-  /**
-   * When a QUASI_CLOSED container is over replicated, ReplicationManager
-   * deletes the excess replicas. While choosing the replica for deletion
-   * ReplicationManager should prioritize unhealthy replica over QUASI_CLOSED
-   * replica.
-   */
-  @Test
-  public void testOverReplicatedQuasiClosedContainerWithUnhealthyReplica()
-      throws SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaThree = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaFour = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    containerStateManager.updateContainerReplica(id, replicaThree);
-    containerStateManager.updateContainerReplica(id, replicaFour);
-
-    final int currentDeleteCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand));
-    Assert.assertTrue(datanodeCommandHandler.received(
-        SCMCommandProto.Type.deleteContainerCommand,
-        replicaOne.getDatanodeDetails()));
-  }
-
-  /**
-   * ReplicationManager should replicate an QUASI_CLOSED replica if it is
-   * under replicated.
-   */
-  @Test
-  public void testUnderReplicatedQuasiClosedContainer() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-
-    final int currentReplicateCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentReplicateCommandCount + 1,
-        datanodeCommandHandler.getInvocationCount(
-            SCMCommandProto.Type.replicateContainerCommand));
-  }
-
-  /**
-   * When a QUASI_CLOSED container is under replicated, ReplicationManager
-   * should re-replicate it. If there are any unhealthy replica, it has to
-   * be deleted.
-   *
-   * In this test case, the container is QUASI_CLOSED and is under replicated
-   * and also has an unhealthy replica.
-   *
-   * In the first iteration of ReplicationManager, it should re-replicate
-   * the container so that it has enough replicas.
-   *
-   * In the second iteration, ReplicationManager should delete the unhealthy
-   * replica.
-   *
-   * In the third iteration, ReplicationManager will re-replicate as the
-   * container has again become under replicated after the unhealthy
-   * replica has been deleted.
-   *
-   */
-  @Test
-  public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica()
-      throws SCMException, ContainerNotFoundException, InterruptedException,
-      ContainerReplicaNotFoundException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final UUID originNodeId = UUID.randomUUID();
-    final ContainerReplica replicaOne = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
-    final ContainerReplica replicaTwo = getReplicas(
-        id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-
-    final int currentReplicateCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand);
-    final int currentDeleteCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentReplicateCommandCount + 1,
-        datanodeCommandHandler.getInvocationCount(
-            SCMCommandProto.Type.replicateContainerCommand));
-
-    Optional<CommandForDatanode> replicateCommand = datanodeCommandHandler
-        .getReceivedCommands().stream()
-        .filter(c -> c.getCommand().getType()
-            .equals(SCMCommandProto.Type.replicateContainerCommand))
-        .findFirst();
-
-    Assert.assertTrue(replicateCommand.isPresent());
-
-    DatanodeDetails newNode = createDatanodeDetails(
-        replicateCommand.get().getDatanodeId());
-    ContainerReplica newReplica = getReplicas(
-        id, State.QUASI_CLOSED, 1000L, originNodeId, newNode);
-    containerStateManager.updateContainerReplica(id, newReplica);
-
-    /*
-     * We have report the replica to SCM, in the next ReplicationManager
-     * iteration it should delete the unhealthy replica.
-     */
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand));
-    // ReplicaTwo should be deleted, that is the unhealthy one
-    Assert.assertTrue(datanodeCommandHandler.received(
-        SCMCommandProto.Type.deleteContainerCommand,
-        replicaTwo.getDatanodeDetails()));
-
-    containerStateManager.removeContainerReplica(id, replicaTwo);
-
-    /*
-     * We have now removed unhealthy replica, next iteration of
-     * ReplicationManager should re-replicate the container as it
-     * is under replicated now
-     */
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(currentReplicateCommandCount + 2,
-        datanodeCommandHandler.getInvocationCount(
-            SCMCommandProto.Type.replicateContainerCommand));
-  }
-
-
-  /**
-   * When a container is QUASI_CLOSED and it has >50% of its replica
-   * in QUASI_CLOSED state with unique origin node id,
-   * ReplicationManager should force close the replica(s) with
-   * highest BCSID.
-   */
-  @Test
-  public void testQuasiClosedToClosed() throws
-      SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
-    final ContainerID id = container.containerID();
-    final Set<ContainerReplica> replicas = getReplicas(id, State.QUASI_CLOSED,
-        randomDatanodeDetails(),
-        randomDatanodeDetails(),
-        randomDatanodeDetails());
-    containerStateManager.loadContainer(container);
-    for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id, replica);
-    }
-
-    final int currentCloseCommandCount = datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand);
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-
-    // All the replicas have same BCSID, so all of them will be closed.
-    Assert.assertEquals(currentCloseCommandCount + 3, datanodeCommandHandler
-        .getInvocationCount(SCMCommandProto.Type.closeContainerCommand));
-
-  }
-
-
-  /**
-   * ReplicationManager should not take any action if the container is
-   * CLOSED and healthy.
-   */
-  @Test
-  public void testHealthyClosedContainer()
-      throws SCMException, ContainerNotFoundException, InterruptedException {
-    final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
-    final ContainerID id = container.containerID();
-    final Set<ContainerReplica> replicas = getReplicas(id, State.CLOSED,
-        randomDatanodeDetails(),
-        randomDatanodeDetails(),
-        randomDatanodeDetails());
-
-    containerStateManager.loadContainer(container);
-    for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id, replica);
-    }
-
-    replicationManager.processContainersNow();
-    // Wait for EventQueue to call the event handler
-    Thread.sleep(100L);
-    Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
-  }
-
-  @Test
-  public void testGeneratedConfig() {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-
-    ReplicationManagerConfiguration rmc =
-        ozoneConfiguration.getObject(ReplicationManagerConfiguration.class);
-
-    //default is not included in ozone-site.xml but generated from annotation
-    //to the ozone-site-generated.xml which should be loaded by the
-    // OzoneConfiguration.
-    Assert.assertEquals(600000, rmc.getEventTimeout());
-
-  }
-
-  @After
-  public void teardown() throws IOException {
-    containerStateManager.close();
-    replicationManager.stop();
-  }
-
-  private class DatanodeCommandHandler implements
-      EventHandler<CommandForDatanode> {
-
-    private AtomicInteger invocation = new AtomicInteger(0);
-    private Map<SCMCommandProto.Type, AtomicInteger> commandInvocation =
-        new HashMap<>();
-    private List<CommandForDatanode> commands = new ArrayList<>();
-
-    @Override
-    public void onMessage(final CommandForDatanode command,
-                          final EventPublisher publisher) {
-      final SCMCommandProto.Type type = command.getCommand().getType();
-      commandInvocation.computeIfAbsent(type, k -> new AtomicInteger(0));
-      commandInvocation.get(type).incrementAndGet();
-      invocation.incrementAndGet();
-      commands.add(command);
-    }
-
-    private int getInvocation() {
-      return invocation.get();
-    }
-
-    private int getInvocationCount(SCMCommandProto.Type type) {
-      return commandInvocation.containsKey(type) ?
-          commandInvocation.get(type).get() : 0;
-    }
-
-    private List<CommandForDatanode> getReceivedCommands() {
-      return commands;
-    }
-
-    /**
-     * Returns true if the command handler has received the given
-     * command type for the provided datanode.
-     *
-     * @param type Command Type
-     * @param datanode DatanodeDetails
-     * @return True if command was received, false otherwise
-     */
-    private boolean received(final SCMCommandProto.Type type,
-                             final DatanodeDetails datanode) {
-      return commands.stream().anyMatch(dc ->
-          dc.getCommand().getType().equals(type) &&
-              dc.getDatanodeId().equals(datanode.getUuid()));
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
deleted file mode 100644
index 75a1ad3..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ /dev/null
@@ -1,318 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.Iterator;
-import java.util.Optional;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-/**
- * Tests for Container ContainerManager.
- */
-public class TestSCMContainerManager {
-  private static SCMContainerManager containerManager;
-  private static MockNodeManager nodeManager;
-  private static PipelineManager pipelineManager;
-  private static File testDir;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-  private static Random random;
-
-  private static final long TIMEOUT = 10000;
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-  @BeforeClass
-  public static void setUp() throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-
-    testDir = GenericTestUtils
-        .getTestDir(TestSCMContainerManager.class.getSimpleName());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        testDir.getAbsolutePath());
-    conf.setTimeDuration(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
-        TIMEOUT,
-        TimeUnit.MILLISECONDS);
-    boolean folderExisted = testDir.exists() || testDir.mkdirs();
-    if (!folderExisted) {
-      throw new IOException("Unable to create test directory path");
-    }
-    nodeManager = new MockNodeManager(true, 10);
-    pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    containerManager = new SCMContainerManager(conf, nodeManager,
-        pipelineManager, new EventQueue());
-    xceiverClientManager = new XceiverClientManager(conf);
-    random = new Random();
-  }
-
-  @AfterClass
-  public static void cleanup() throws IOException {
-    if(containerManager != null) {
-      containerManager.close();
-    }
-    if (pipelineManager != null) {
-      pipelineManager.close();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Before
-  public void clearSafeMode() {
-    nodeManager.setSafemode(false);
-  }
-
-  @Test
-  public void testallocateContainer() throws Exception {
-    ContainerInfo containerInfo = containerManager.allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(),
-        containerOwner);
-    Assert.assertNotNull(containerInfo);
-  }
-
-  @Test
-  public void testallocateContainerDistributesAllocation() throws Exception {
-    /* This is a lame test, we should really be testing something like
-    z-score or make sure that we don't have 3sigma kind of events. Too lazy
-    to write all that code. This test very lamely tests if we have more than
-    5 separate nodes  from the list of 10 datanodes that got allocated a
-    container.
-     */
-    Set<UUID> pipelineList = new TreeSet<>();
-    for (int x = 0; x < 30; x++) {
-      ContainerInfo containerInfo = containerManager.allocateContainer(
-          xceiverClientManager.getType(),
-          xceiverClientManager.getFactor(),
-          containerOwner);
-
-      Assert.assertNotNull(containerInfo);
-      Assert.assertNotNull(containerInfo.getPipelineID());
-      pipelineList.add(pipelineManager.getPipeline(
-          containerInfo.getPipelineID()).getFirstNode()
-          .getUuid());
-    }
-    Assert.assertTrue(pipelineList.size() > 5);
-  }
-
-  @Test
-  public void testAllocateContainerInParallel() throws Exception {
-    int threadCount = 20;
-    List<ExecutorService> executors = new ArrayList<>(threadCount);
-    for (int i = 0; i < threadCount; i++) {
-      executors.add(Executors.newSingleThreadExecutor());
-    }
-    List<CompletableFuture<ContainerInfo>> futureList =
-        new ArrayList<>(threadCount);
-    for (int i = 0; i < threadCount; i++) {
-      final CompletableFuture<ContainerInfo> future = new CompletableFuture<>();
-      CompletableFuture.supplyAsync(() -> {
-        try {
-          ContainerInfo containerInfo = containerManager
-              .allocateContainer(xceiverClientManager.getType(),
-                  xceiverClientManager.getFactor(), containerOwner);
-
-          Assert.assertNotNull(containerInfo);
-          Assert.assertNotNull(containerInfo.getPipelineID());
-          future.complete(containerInfo);
-          return containerInfo;
-        } catch (IOException e) {
-          future.completeExceptionally(e);
-        }
-        return future;
-      }, executors.get(i));
-      futureList.add(future);
-    }
-    try {
-      CompletableFuture
-          .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
-          .get();
-    } catch (Exception e) {
-      Assert.fail("testAllocateBlockInParallel failed");
-    }
-  }
-
-  @Test
-  public void testGetContainer() throws IOException {
-    ContainerInfo containerInfo = containerManager.allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(),
-        containerOwner);
-    Assert.assertNotNull(containerInfo);
-    Pipeline pipeline  = pipelineManager
-        .getPipeline(containerInfo.getPipelineID());
-    Assert.assertNotNull(pipeline);
-    Assert.assertEquals(containerInfo,
-        containerManager.getContainer(containerInfo.containerID()));
-  }
-
-  @Test
-  public void testGetContainerWithPipeline() throws Exception {
-    ContainerInfo contInfo = containerManager
-        .allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    // Add dummy replicas for container.
-    Iterator<DatanodeDetails> nodes = pipelineManager
-        .getPipeline(contInfo.getPipelineID()).getNodes().iterator();
-    DatanodeDetails dn1 = nodes.next();
-    containerManager.updateContainerState(contInfo.containerID(),
-        LifeCycleEvent.FINALIZE);
-    containerManager
-        .updateContainerState(contInfo.containerID(), LifeCycleEvent.CLOSE);
-    ContainerInfo finalContInfo = contInfo;
-    Assert.assertEquals(0,
-        containerManager.getContainerReplicas(
-            finalContInfo.containerID()).size());
-
-    containerManager.updateContainerReplica(contInfo.containerID(),
-        ContainerReplica.newBuilder().setContainerID(contInfo.containerID())
-            .setContainerState(ContainerReplicaProto.State.CLOSED)
-            .setDatanodeDetails(dn1).build());
-
-    Assert.assertEquals(1,
-        containerManager.getContainerReplicas(
-            finalContInfo.containerID()).size());
-
-    contInfo = containerManager.getContainer(contInfo.containerID());
-    Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED);
-    // After closing the container, we should get the replica and construct
-    // standalone pipeline. No more ratis pipeline.
-
-    Set<DatanodeDetails> replicaNodes = containerManager
-        .getContainerReplicas(contInfo.containerID())
-        .stream().map(ContainerReplica::getDatanodeDetails)
-        .collect(Collectors.toSet());
-    Assert.assertTrue(replicaNodes.contains(dn1));
-  }
-
-  @Test
-  public void testGetContainerReplicaWithParallelUpdate() throws Exception {
-    testGetContainerWithPipeline();
-    final Optional<ContainerID> id = containerManager.getContainerIDs()
-        .stream().findFirst();
-    Assert.assertTrue(id.isPresent());
-    final ContainerID cId = id.get();
-    final Optional<ContainerReplica> replica = containerManager
-        .getContainerReplicas(cId).stream().findFirst();
-    Assert.assertTrue(replica.isPresent());
-    final ContainerReplica cReplica = replica.get();
-    final AtomicBoolean runUpdaterThread =
-        new AtomicBoolean(true);
-
-    Thread updaterThread = new Thread(() -> {
-      while (runUpdaterThread.get()) {
-        try {
-          containerManager.removeContainerReplica(cId, cReplica);
-          containerManager.updateContainerReplica(cId, cReplica);
-        } catch (ContainerException e) {
-          Assert.fail("Container Exception: " + e.getMessage());
-        }
-      }
-    });
-
-    updaterThread.setDaemon(true);
-    updaterThread.start();
-
-    IntStream.range(0, 100).forEach(i -> {
-      try {
-        Assert.assertNotNull(containerManager
-            .getContainerReplicas(cId)
-            .stream().map(ContainerReplica::getDatanodeDetails)
-            .collect(Collectors.toSet()));
-      } catch (ContainerNotFoundException e) {
-        Assert.fail("Missing Container " + id);
-      }
-    });
-    runUpdaterThread.set(false);
-  }
-
-  @Test
-  public void testgetNoneExistentContainer() {
-    try {
-      containerManager.getContainer(ContainerID.valueof(
-          random.nextInt() & Integer.MAX_VALUE));
-      Assert.fail();
-    } catch (ContainerNotFoundException ex) {
-      // Success!
-    }
-  }
-
-  @Test
-  public void testCloseContainer() throws IOException {
-    ContainerID id = createContainer().containerID();
-    containerManager.updateContainerState(id,
-        HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager.updateContainerState(id,
-        HddsProtos.LifeCycleEvent.CLOSE);
-    ContainerInfo closedContainer = containerManager.getContainer(id);
-    Assert.assertEquals(LifeCycleState.CLOSED, closedContainer.getState());
-  }
-
-  /**
-   * Creates a container with the given name in SCMContainerManager.
-   * @throws IOException
-   */
-  private ContainerInfo createContainer()
-      throws IOException {
-    nodeManager.setSafemode(false);
-    return containerManager
-        .allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
deleted file mode 100644
index 2f35719..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle happy.
- */
-package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
deleted file mode 100644
index f93aea6..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.hdds.scm.container;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
deleted file mode 100644
index 18c4a64..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.net.NodeSchema;
-import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.when;
-
-/**
- * Test for scm container placement factory.
- */
-public class TestContainerPlacementFactory {
-  // network topology cluster
-  private NetworkTopology cluster;
-  // datanodes array list
-  private List<DatanodeDetails> datanodes = new ArrayList<>();
-  // node storage capacity
-  private final long storageCapacity = 100L;
-  // configuration
-  private Configuration conf;
-  // node manager
-  private NodeManager nodeManager;
-
-  @Before
-  public void setup() {
-    //initialize network topology instance
-    conf = new OzoneConfiguration();
-  }
-
-  @Test
-  public void testRackAwarePolicy() throws IOException {
-    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementRackAware.class.getName());
-
-    NodeSchema[] schemas = new NodeSchema[]
-        {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
-    NodeSchemaManager.getInstance().init(schemas, true);
-    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
-
-    // build datanodes, and network topology
-    String rack = "/rack";
-    String hostname = "node";
-    for (int i = 0; i < 15; i++) {
-      // Totally 3 racks, each has 5 datanodes
-      DatanodeDetails node = TestUtils.createDatanodeDetails(
-          hostname + i, rack + (i / 5));
-      datanodes.add(node);
-      cluster.add(node);
-    }
-
-    // create mock node manager
-    nodeManager = Mockito.mock(NodeManager.class);
-    when(nodeManager.getNodes(NodeState.HEALTHY))
-        .thenReturn(new ArrayList<>(datanodes));
-    when(nodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(storageCapacity, 0L, 100L));
-    when(nodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(storageCapacity, 90L, 10L));
-    when(nodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(storageCapacity, 80L, 20L));
-    when(nodeManager.getNodeStat(datanodes.get(4)))
-        .thenReturn(new SCMNodeMetric(storageCapacity, 70L, 30L));
-
-    ContainerPlacementPolicy policy = ContainerPlacementPolicyFactory
-        .getPolicy(conf, nodeManager, cluster, true,
-            SCMContainerPlacementMetrics.create());
-
-    int nodeNum = 3;
-    List<DatanodeDetails> datanodeDetails =
-        policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(2)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1),
-        datanodeDetails.get(2)));
-  }
-
-  @Test
-  public void testDefaultPolicy() throws IOException {
-    ContainerPlacementPolicy policy = ContainerPlacementPolicyFactory
-        .getPolicy(conf, null, null, true, null);
-    Assert.assertSame(SCMContainerPlacementRandom.class, policy.getClass());
-  }
-
-  /**
-   * A dummy container placement implementation for test.
-   */
-  public static class DummyImpl implements ContainerPlacementPolicy {
-    @Override
-    public List<DatanodeDetails> chooseDatanodes(
-        List<DatanodeDetails> excludedNodes, List<DatanodeDetails> favoredNodes,
-        int nodesRequired, long sizeRequired) {
-      return null;
-    }
-  }
-
-  @Test(expected = SCMException.class)
-  public void testConstuctorNotFound() throws SCMException {
-    // set a placement class which does't have the right constructor implemented
-    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        DummyImpl.class.getName());
-    ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null);
-  }
-
-  @Test(expected = RuntimeException.class)
-  public void testClassNotImplemented() throws SCMException {
-    // set a placement class not implemented
-    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        "org.apache.hadoop.hdds.scm.container.placement.algorithm.HelloWorld");
-    ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
deleted file mode 100644
index 00ec398..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-
-import org.junit.Assert;
-import org.junit.Test;
-import static org.mockito.Matchers.anyObject;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
-
-/**
- * Test for the scm container placement.
- */
-public class TestSCMContainerPlacementCapacity {
-  @Test
-  public void chooseDatanodes() throws SCMException {
-    //given
-    Configuration conf = new OzoneConfiguration();
-
-    List<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < 7; i++) {
-      datanodes.add(TestUtils.randomDatanodeDetails());
-    }
-
-    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
-    when(mockNodeManager.getNodes(NodeState.HEALTHY))
-        .thenReturn(new ArrayList<>(datanodes));
-
-    when(mockNodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
-    when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
-    when(mockNodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(100L, 80L, 20L));
-    when(mockNodeManager.getNodeStat(datanodes.get(4)))
-        .thenReturn(new SCMNodeMetric(100L, 70L, 30L));
-
-    SCMContainerPlacementCapacity scmContainerPlacementRandom =
-        new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true,
-            null);
-
-    List<DatanodeDetails> existingNodes = new ArrayList<>();
-    existingNodes.add(datanodes.get(0));
-    existingNodes.add(datanodes.get(1));
-
-    Map<DatanodeDetails, Integer> selectedCount = new HashMap<>();
-    for (DatanodeDetails datanode : datanodes) {
-      selectedCount.put(datanode, 0);
-    }
-
-    for (int i = 0; i < 1000; i++) {
-
-      //when
-      List<DatanodeDetails> datanodeDetails = scmContainerPlacementRandom
-          .chooseDatanodes(existingNodes, null, 1, 15);
-
-      //then
-      Assert.assertEquals(1, datanodeDetails.size());
-      DatanodeDetails datanode0Details = datanodeDetails.get(0);
-
-      Assert.assertNotEquals(
-          "Datanode 0 should not been selected: excluded by parameter",
-          datanodes.get(0), datanode0Details);
-      Assert.assertNotEquals(
-          "Datanode 1 should not been selected: excluded by parameter",
-          datanodes.get(1), datanode0Details);
-      Assert.assertNotEquals(
-          "Datanode 2 should not been selected: not enough space there",
-          datanodes.get(2), datanode0Details);
-
-      selectedCount
-          .put(datanode0Details, selectedCount.get(datanode0Details) + 1);
-
-    }
-
-    //datanode 4 has less space. Should be selected less times.
-    Assert.assertTrue(selectedCount.get(datanodes.get(3)) > selectedCount
-        .get(datanodes.get(6)));
-    Assert.assertTrue(selectedCount.get(datanodes.get(4)) > selectedCount
-        .get(datanodes.get(6)));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
deleted file mode 100644
index 2d8b816..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.NetConstants;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.net.NodeSchema;
-import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.mockito.Mockito;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.when;
-
-/**
- * Test for the scm container rack aware placement.
- */
-@RunWith(Parameterized.class)
-public class TestSCMContainerPlacementRackAware {
-  private NetworkTopology cluster;
-  private Configuration conf;
-  private NodeManager nodeManager;
-  private Integer datanodeCount;
-  private List<DatanodeDetails> datanodes = new ArrayList<>();
-  // policy with fallback capability
-  private SCMContainerPlacementRackAware policy;
-  // policy prohibit fallback
-  private SCMContainerPlacementRackAware policyNoFallback;
-  // node storage capacity
-  private static final long STORAGE_CAPACITY = 100L;
-  private SCMContainerPlacementMetrics metrics;
-  private static final int NODE_PER_RACK = 5;
-
-  public TestSCMContainerPlacementRackAware(Integer count) {
-    this.datanodeCount = count;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> setupDatanodes() {
-    return Arrays.asList(new Object[][]{{3}, {4}, {5}, {6}, {7}, {8}, {9},
-        {10}, {11}, {12}, {13}, {14}, {15}});
-  }
-
-  @Before
-  public void setup() {
-    //initialize network topology instance
-    conf = new OzoneConfiguration();
-    NodeSchema[] schemas = new NodeSchema[]
-        {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
-    NodeSchemaManager.getInstance().init(schemas, true);
-    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
-
-    // build datanodes, and network topology
-    String rack = "/rack";
-    String hostname = "node";
-    for (int i = 0; i < datanodeCount; i++) {
-      // Totally 3 racks, each has 5 datanodes
-      DatanodeDetails node = TestUtils.createDatanodeDetails(
-          hostname + i, rack + (i / NODE_PER_RACK));
-      datanodes.add(node);
-      cluster.add(node);
-    }
-
-    // create mock node manager
-    nodeManager = Mockito.mock(NodeManager.class);
-    when(nodeManager.getNodes(NodeState.HEALTHY))
-        .thenReturn(new ArrayList<>(datanodes));
-    when(nodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 0L, 100L));
-    if (datanodeCount > 4) {
-      when(nodeManager.getNodeStat(datanodes.get(2)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L));
-      when(nodeManager.getNodeStat(datanodes.get(3)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L));
-      when(nodeManager.getNodeStat(datanodes.get(4)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L));
-    } else if (datanodeCount > 3) {
-      when(nodeManager.getNodeStat(datanodes.get(2)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L));
-      when(nodeManager.getNodeStat(datanodes.get(3)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L));
-    } else if (datanodeCount > 2) {
-      when(nodeManager.getNodeStat(datanodes.get(2)))
-          .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 84L, 16L));
-    }
-
-    // create placement policy instances
-    metrics = SCMContainerPlacementMetrics.create();
-    policy = new SCMContainerPlacementRackAware(
-        nodeManager, conf, cluster, true, metrics);
-    policyNoFallback = new SCMContainerPlacementRackAware(
-        nodeManager, conf, cluster, false, metrics);
-  }
-
-
-  @Test
-  public void chooseNodeWithNoExcludedNodes() throws SCMException {
-    // test choose new datanodes for new pipeline cases
-    // 1 replica
-    int nodeNum = 1;
-    List<DatanodeDetails> datanodeDetails =
-        policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-
-    // 2 replicas
-    nodeNum = 2;
-    datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)) || (datanodeCount % NODE_PER_RACK == 1));
-
-    //  3 replicas
-    nodeNum = 3;
-    datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    // requires at least 2 racks for following statement
-    assumeTrue(datanodeCount > NODE_PER_RACK &&
-        datanodeCount % NODE_PER_RACK > 1);
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(2)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1),
-        datanodeDetails.get(2)));
-
-    //  4 replicas
-    nodeNum = 4;
-    datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    // requires at least 2 racks and enough datanodes for following statement
-    assumeTrue(datanodeCount > NODE_PER_RACK + 1);
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(2)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1),
-        datanodeDetails.get(2)));
-  }
-
-  @Test
-  public void chooseNodeWithExcludedNodes() throws SCMException {
-    // test choose new datanodes for under replicated pipeline
-    // 3 replicas, two existing datanodes on same rack
-    assumeTrue(datanodeCount > NODE_PER_RACK);
-    int nodeNum = 1;
-    List<DatanodeDetails> excludedNodes = new ArrayList<>();
-
-    excludedNodes.add(datanodes.get(0));
-    excludedNodes.add(datanodes.get(1));
-    List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        excludedNodes.get(0)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        excludedNodes.get(1)));
-
-    // 3 replicas, one existing datanode
-    nodeNum = 2;
-    excludedNodes.clear();
-    excludedNodes.add(datanodes.get(0));
-    datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(
-        datanodeDetails.get(0), excludedNodes.get(0)) ||
-        cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1)));
-
-    // 3 replicas, two existing datanodes on different rack
-    nodeNum = 1;
-    excludedNodes.clear();
-    excludedNodes.add(datanodes.get(0));
-    excludedNodes.add(datanodes.get(5));
-    datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(
-        datanodeDetails.get(0), excludedNodes.get(0)) ||
-        cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1)));
-  }
-
-  @Test
-  public void testFallback() throws SCMException {
-    // 5 replicas. there are only 3 racks. policy with fallback should
-    // allocate the 5th datanode though it will break the rack rule(first
-    // 2 replicas on same rack, others on different racks).
-    assumeTrue(datanodeCount > NODE_PER_RACK * 2 &&
-        (datanodeCount % NODE_PER_RACK > 1));
-    int nodeNum = 5;
-    List<DatanodeDetails>  datanodeDetails =
-        policy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(2)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1),
-        datanodeDetails.get(2)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(3)));
-    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(2),
-        datanodeDetails.get(3)));
-
-    // get metrics
-    long totalRequest = metrics.getDatanodeRequestCount();
-    long successCount = metrics.getDatanodeChooseSuccessCount();
-    long tryCount = metrics.getDatanodeChooseAttemptCount();
-    long compromiseCount = metrics.getDatanodeChooseFallbackCount();
-
-    // verify metrics
-    Assert.assertTrue(totalRequest == nodeNum);
-    Assert.assertTrue(successCount == nodeNum);
-    Assert.assertTrue(tryCount > nodeNum);
-    Assert.assertTrue(compromiseCount >= 1);
-  }
-
-  @Test
-  public void testNoFallback() throws SCMException {
-    assumeTrue(datanodeCount > (NODE_PER_RACK * 2) &&
-        (datanodeCount <= NODE_PER_RACK * 3));
-    // 5 replicas. there are only 3 racks. policy prohibit fallback should fail.
-    int nodeNum = 5;
-    try {
-      policyNoFallback.chooseDatanodes(null, null, nodeNum, 15);
-      fail("Fallback prohibited, this call should fail");
-    } catch (Exception e) {
-      assertTrue(e.getClass().getSimpleName().equals("SCMException"));
-    }
-
-    // get metrics
-    long totalRequest = metrics.getDatanodeRequestCount();
-    long successCount = metrics.getDatanodeChooseSuccessCount();
-    long tryCount = metrics.getDatanodeChooseAttemptCount();
-    long compromiseCount = metrics.getDatanodeChooseFallbackCount();
-
-    Assert.assertTrue(totalRequest == nodeNum);
-    Assert.assertTrue(successCount >= 3);
-    Assert.assertTrue(tryCount >= nodeNum);
-    Assert.assertTrue(compromiseCount == 0);
-  }
-
-  @Test
-  public void chooseNodeWithFavoredNodes() throws SCMException {
-    int nodeNum = 1;
-    List<DatanodeDetails> excludedNodes = new ArrayList<>();
-    List<DatanodeDetails> favoredNodes = new ArrayList<>();
-
-    // no excludedNodes, only favoredNodes
-    favoredNodes.add(datanodes.get(0));
-    List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, favoredNodes, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath()
-        .equals(favoredNodes.get(0).getNetworkFullPath()));
-
-    // no overlap between excludedNodes and favoredNodes, favoredNodes can been
-    // chosen.
-    excludedNodes.clear();
-    favoredNodes.clear();
-    excludedNodes.add(datanodes.get(0));
-    favoredNodes.add(datanodes.get(2));
-    datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, favoredNodes, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath()
-        .equals(favoredNodes.get(0).getNetworkFullPath()));
-
-    // there is overlap between excludedNodes and favoredNodes, favoredNodes
-    // should not be chosen.
-    excludedNodes.clear();
-    favoredNodes.clear();
-    excludedNodes.add(datanodes.get(0));
-    favoredNodes.add(datanodes.get(0));
-    datanodeDetails = policy.chooseDatanodes(
-        excludedNodes, favoredNodes, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertFalse(datanodeDetails.get(0).getNetworkFullPath()
-        .equals(favoredNodes.get(0).getNetworkFullPath()));
-  }
-
-  @Test
-  public void testNoInfiniteLoop() throws SCMException {
-    int nodeNum = 1;
-
-    try {
-      // request storage space larger than node capability
-      policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 15);
-      fail("Storage requested exceeds capacity, this call should fail");
-    } catch (Exception e) {
-      assertTrue(e.getClass().getSimpleName().equals("SCMException"));
-    }
-
-    // get metrics
-    long totalRequest = metrics.getDatanodeRequestCount();
-    long successCount = metrics.getDatanodeChooseSuccessCount();
-    long tryCount = metrics.getDatanodeChooseAttemptCount();
-    long compromiseCount = metrics.getDatanodeChooseFallbackCount();
-
-    Assert.assertTrue(totalRequest == nodeNum);
-    Assert.assertTrue(successCount == 0);
-    Assert.assertTrue(tryCount > nodeNum);
-    Assert.assertTrue(compromiseCount == 0);
-  }
-
-  @Test
-  public void testDatanodeWithDefaultNetworkLocation() throws SCMException {
-    String hostname = "node";
-    List<DatanodeDetails> dataList = new ArrayList<>();
-    NetworkTopology clusterMap =
-        new NetworkTopologyImpl(NodeSchemaManager.getInstance());
-    for (int i = 0; i < 15; i++) {
-      // Totally 3 racks, each has 5 datanodes
-      DatanodeDetails node = TestUtils.createDatanodeDetails(
-          hostname + i, null);
-      dataList.add(node);
-      clusterMap.add(node);
-    }
-    Assert.assertEquals(dataList.size(), StringUtils.countMatches(
-        clusterMap.toString(), NetConstants.DEFAULT_RACK));
-
-    // choose nodes to host 3 replica
-    int nodeNum = 3;
-    SCMContainerPlacementRackAware newPolicy =
-        new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true,
-            metrics);
-    List<DatanodeDetails> datanodeDetails =
-        newPolicy.chooseDatanodes(null, null, nodeNum, 15);
-    Assert.assertEquals(nodeNum, datanodeDetails.size());
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(1)));
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0),
-        datanodeDetails.get(2)));
-    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(1),
-        datanodeDetails.get(2)));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
deleted file mode 100644
index 43e3a8d..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-
-import org.junit.Assert;
-import org.junit.Test;
-import static org.mockito.Matchers.anyObject;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
-
-/**
- * Test for the random container placement.
- */
-public class TestSCMContainerPlacementRandom {
-
-  @Test
-  public void chooseDatanodes() throws SCMException {
-    //given
-    Configuration conf = new OzoneConfiguration();
-
-    List<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < 5; i++) {
-      datanodes.add(TestUtils.randomDatanodeDetails());
-    }
-
-    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
-    when(mockNodeManager.getNodes(NodeState.HEALTHY))
-        .thenReturn(new ArrayList<>(datanodes));
-
-    when(mockNodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
-    when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
-
-    SCMContainerPlacementRandom scmContainerPlacementRandom =
-        new SCMContainerPlacementRandom(mockNodeManager, conf, null, true,
-            null);
-
-    List<DatanodeDetails> existingNodes = new ArrayList<>();
-    existingNodes.add(datanodes.get(0));
-    existingNodes.add(datanodes.get(1));
-
-    for (int i = 0; i < 100; i++) {
-      //when
-      List<DatanodeDetails> datanodeDetails = scmContainerPlacementRandom
-          .chooseDatanodes(existingNodes, null, 1, 15);
-
-      //then
-      Assert.assertEquals(1, datanodeDetails.size());
-      DatanodeDetails datanode0Details = datanodeDetails.get(0);
-
-      Assert.assertNotEquals(
-          "Datanode 0 should not been selected: excluded by parameter",
-          datanodes.get(0), datanode0Details);
-      Assert.assertNotEquals(
-          "Datanode 1 should not been selected: excluded by parameter",
-          datanodes.get(1), datanode0Details);
-      Assert.assertNotEquals(
-          "Datanode 2 should not been selected: not enough space there",
-          datanodes.get(2), datanode0Details);
-
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
deleted file mode 100644
index 1423c99..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * SCM Testing and Mocking Utils.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-// Test classes for Replication functionality.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
deleted file mode 100644
index 63cc9bf..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container.states;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Test ContainerAttribute management.
- */
-public class TestContainerAttribute {
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Test
-  public void testInsert() throws SCMException {
-    ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
-    containerAttribute.insert(1, id);
-    Assert.assertEquals(1,
-        containerAttribute.getCollection(1).size());
-    Assert.assertTrue(containerAttribute.getCollection(1).contains(id));
-
-    // Insert again and verify that it overwrites an existing value.
-    ContainerID newId =
-        new ContainerID(42);
-    containerAttribute.insert(1, newId);
-    Assert.assertEquals(1,
-        containerAttribute.getCollection(1).size());
-    Assert.assertTrue(containerAttribute.getCollection(1).contains(newId));
-  }
-
-  @Test
-  public void testHasKey() throws SCMException {
-    ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
-
-    for (int x = 1; x < 42; x++) {
-      containerAttribute.insert(1, new ContainerID(x));
-    }
-    Assert.assertTrue(containerAttribute.hasKey(1));
-    for (int x = 1; x < 42; x++) {
-      Assert.assertTrue(containerAttribute.hasContainerID(1, x));
-    }
-
-    Assert.assertFalse(containerAttribute.hasContainerID(1,
-        new ContainerID(42)));
-  }
-
-  @Test
-  public void testClearSet() throws SCMException {
-    List<String> keyslist = Arrays.asList("Key1", "Key2", "Key3");
-    ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
-    for (String k : keyslist) {
-      for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
-      }
-    }
-    for (String k : keyslist) {
-      Assert.assertEquals(100,
-          containerAttribute.getCollection(k).size());
-    }
-    containerAttribute.clearSet("Key1");
-    Assert.assertEquals(0,
-        containerAttribute.getCollection("Key1").size());
-  }
-
-  @Test
-  public void testRemove() throws SCMException {
-
-    List<String> keyslist = Arrays.asList("Key1", "Key2", "Key3");
-    ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
-
-    for (String k : keyslist) {
-      for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
-      }
-    }
-    for (int x = 1; x < 101; x += 2) {
-      containerAttribute.remove("Key1", new ContainerID(x));
-    }
-
-    for (int x = 1; x < 101; x += 2) {
-      Assert.assertFalse(containerAttribute.hasContainerID("Key1",
-          new ContainerID(x)));
-    }
-
-    Assert.assertEquals(100,
-        containerAttribute.getCollection("Key2").size());
-
-    Assert.assertEquals(100,
-        containerAttribute.getCollection("Key3").size());
-
-    Assert.assertEquals(50,
-        containerAttribute.getCollection("Key1").size());
-  }
-
-  @Test
-  public void tesUpdate() throws SCMException {
-    String key1 = "Key1";
-    String key2 = "Key2";
-    String key3 = "Key3";
-
-    ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
-
-    containerAttribute.insert(key1, id);
-    Assert.assertTrue(containerAttribute.hasContainerID(key1, id));
-    Assert.assertFalse(containerAttribute.hasContainerID(key2, id));
-
-    // This should move the id from key1 bucket to key2 bucket.
-    containerAttribute.update(key1, key2, id);
-    Assert.assertFalse(containerAttribute.hasContainerID(key1, id));
-    Assert.assertTrue(containerAttribute.hasContainerID(key2, id));
-
-    // This should fail since we cannot find this id in the key3 bucket.
-    thrown.expect(SCMException.class);
-    containerAttribute.update(key3, key1, id);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
deleted file mode 100644
index 795dfc1..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
deleted file mode 100644
index 26ffd8d..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.test.PathUtils;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test for different container placement policy.
- */
-public class TestContainerPlacement {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  /**
-   * Returns a new copy of Configuration.
-   *
-   * @return Config
-   */
-  OzoneConfiguration getConf() {
-    return new OzoneConfiguration();
-  }
-
-  /**
-   * Creates a NodeManager.
-   *
-   * @param config - Config for the node manager.
-   * @return SCNNodeManager
-   * @throws IOException
-   */
-
-  SCMNodeManager createNodeManager(OzoneConfiguration config)
-      throws IOException {
-    EventQueue eventQueue = new EventQueue();
-    eventQueue.addHandler(SCMEvents.NEW_NODE,
-        Mockito.mock(NewNodeHandler.class));
-    eventQueue.addHandler(SCMEvents.STALE_NODE,
-        Mockito.mock(StaleNodeHandler.class));
-    eventQueue.addHandler(SCMEvents.DEAD_NODE,
-        Mockito.mock(DeadNodeHandler.class));
-
-    SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
-    Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
-
-    SCMNodeManager nodeManager = new SCMNodeManager(config,
-        storageConfig, eventQueue, null);
-    return nodeManager;
-  }
-
-  SCMContainerManager createContainerManager(Configuration config,
-      NodeManager scmNodeManager) throws IOException {
-    EventQueue eventQueue = new EventQueue();
-    final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-    PipelineManager pipelineManager =
-        new SCMPipelineManager(config, scmNodeManager, eventQueue, null);
-    return new SCMContainerManager(config, scmNodeManager, pipelineManager,
-        eventQueue);
-
-  }
-
-  /**
-   * Test capacity based container placement policy with node reports.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  @Ignore
-  public void testContainerPlacementCapacity() throws IOException,
-      InterruptedException, TimeoutException {
-    OzoneConfiguration conf = getConf();
-    final int nodeCount = 4;
-    final long capacity = 10L * OzoneConsts.GB;
-    final long used = 2L * OzoneConsts.GB;
-    final long remaining = capacity - used;
-
-    final File testDir = PathUtils.getTestDir(
-        TestContainerPlacement.class);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        testDir.getAbsolutePath());
-    conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-
-    SCMNodeManager nodeManager = createNodeManager(conf);
-    SCMContainerManager containerManager =
-        createContainerManager(conf, nodeManager);
-    List<DatanodeDetails> datanodes =
-        TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
-    XceiverClientManager xceiverClientManager = null;
-    try {
-      for (DatanodeDetails datanodeDetails : datanodes) {
-        nodeManager.processHeartbeat(datanodeDetails);
-      }
-
-      //TODO: wait for heartbeat to be processed
-      Thread.sleep(4 * 1000);
-      assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(capacity * nodeCount,
-          (long) nodeManager.getStats().getCapacity().get());
-      assertEquals(used * nodeCount,
-          (long) nodeManager.getStats().getScmUsed().get());
-      assertEquals(remaining * nodeCount,
-          (long) nodeManager.getStats().getRemaining().get());
-
-      xceiverClientManager= new XceiverClientManager(new OzoneConfiguration());
-
-      ContainerInfo container = containerManager
-          .allocateContainer(
-          xceiverClientManager.getType(),
-          xceiverClientManager.getFactor(), "OZONE");
-      assertEquals(xceiverClientManager.getFactor().getNumber(),
-          containerManager.getContainerReplicas(
-              container.containerID()).size());
-    } finally {
-      IOUtils.closeQuietly(containerManager);
-      IOUtils.closeQuietly(nodeManager);
-      if (xceiverClientManager != null) {
-        xceiverClientManager.close();
-      }
-      FileUtil.fullyDelete(testDir);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
deleted file mode 100644
index 7657b54..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Set;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.security.authentication.client
-    .AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test DeadNodeHandler.
- */
-public class TestDeadNodeHandler {
-
-  private StorageContainerManager scm;
-  private SCMNodeManager nodeManager;
-  private ContainerManager containerManager;
-  private NodeReportHandler nodeReportHandler;
-  private DeadNodeHandler deadNodeHandler;
-  private EventPublisher publisher;
-  private EventQueue eventQueue;
-  private String storageDir;
-
-  @Before
-  public void setup() throws IOException, AuthenticationException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    storageDir = GenericTestUtils.getTempPath(
-        TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-    eventQueue = new EventQueue();
-    scm = HddsTestUtils.getScm(conf);
-    nodeManager = (SCMNodeManager) scm.getScmNodeManager();
-    SCMPipelineManager manager =
-        (SCMPipelineManager)scm.getPipelineManager();
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager, manager.getStateManager(),
-            conf);
-    manager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-    containerManager = scm.getContainerManager();
-    deadNodeHandler = new DeadNodeHandler(nodeManager,
-        Mockito.mock(PipelineManager.class), containerManager);
-    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
-    publisher = Mockito.mock(EventPublisher.class);
-    nodeReportHandler = new NodeReportHandler(nodeManager);
-  }
-
-  @After
-  public void teardown() {
-    scm.stop();
-    scm.join();
-    FileUtil.fullyDelete(new File(storageDir));
-  }
-
-  @Test
-  public void testOnMessage() throws IOException, NodeNotFoundException {
-    //GIVEN
-    DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails datanode3 = TestUtils.randomDatanodeDetails();
-
-    String storagePath = GenericTestUtils.getRandomizedTempPath()
-        .concat("/" + datanode1.getUuidString());
-
-    StorageReportProto storageOne = TestUtils.createStorageReport(
-        datanode1.getUuid(), storagePath, 100, 10, 90, null);
-
-    // Standalone pipeline now excludes the nodes which are already used,
-    // is the a proper behavior. Adding 9 datanodes for now to make the
-    // test case happy.
-
-    nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(datanode3,
-        TestUtils.createNodeReport(storageOne), null);
-
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(TestUtils.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
-
-    ContainerInfo container1 =
-        TestUtils.allocateContainer(containerManager);
-    ContainerInfo container2 =
-        TestUtils.allocateContainer(containerManager);
-    ContainerInfo container3 =
-        TestUtils.allocateContainer(containerManager);
-    ContainerInfo container4 =
-        TestUtils.allocateContainer(containerManager);
-
-    registerContainers(datanode1, container1, container2, container4);
-    registerContainers(datanode2, container1, container2);
-    registerContainers(datanode3, container3);
-
-    registerReplicas(containerManager, container1, datanode1, datanode2);
-    registerReplicas(containerManager, container2, datanode1, datanode2);
-    registerReplicas(containerManager, container3, datanode3);
-    registerReplicas(containerManager, container4, datanode1);
-
-    TestUtils.closeContainer(containerManager, container1.containerID());
-    TestUtils.closeContainer(containerManager, container2.containerID());
-    TestUtils.quasiCloseContainer(containerManager, container3.containerID());
-
-    deadNodeHandler.onMessage(datanode1, publisher);
-
-    Set<ContainerReplica> container1Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container1.getContainerID()));
-    Assert.assertEquals(1, container1Replicas.size());
-    Assert.assertEquals(datanode2,
-        container1Replicas.iterator().next().getDatanodeDetails());
-
-    Set<ContainerReplica> container2Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container2.getContainerID()));
-    Assert.assertEquals(1, container2Replicas.size());
-    Assert.assertEquals(datanode2,
-        container2Replicas.iterator().next().getDatanodeDetails());
-
-    Set<ContainerReplica> container3Replicas = containerManager
-            .getContainerReplicas(new ContainerID(container3.getContainerID()));
-    Assert.assertEquals(1, container3Replicas.size());
-    Assert.assertEquals(datanode3,
-        container3Replicas.iterator().next().getDatanodeDetails());
-  }
-
-  private void registerReplicas(ContainerManager contManager,
-      ContainerInfo container, DatanodeDetails... datanodes)
-      throws ContainerNotFoundException {
-    for (DatanodeDetails datanode : datanodes) {
-      contManager.updateContainerReplica(
-          new ContainerID(container.getContainerID()),
-          ContainerReplica.newBuilder()
-              .setContainerState(ContainerReplicaProto.State.OPEN)
-              .setContainerID(container.containerID())
-              .setDatanodeDetails(datanode).build());
-    }
-  }
-
-  /**
-   * Update containers available on the datanode.
-   * @param datanode
-   * @param containers
-   * @throws NodeNotFoundException
-   */
-  private void registerContainers(DatanodeDetails datanode,
-      ContainerInfo... containers)
-      throws NodeNotFoundException {
-    nodeManager
-        .setContainers(datanode,
-            Arrays.stream(containers)
-                .map(container -> new ContainerID(container.getContainerID()))
-                .collect(Collectors.toSet()));
-  }
-
-  private NodeReportFromDatanode getNodeReport(DatanodeDetails dn,
-      StorageReportProto... reports) {
-    NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports);
-    return new NodeReportFromDatanode(dn, nodeReportProto);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
deleted file mode 100644
index 88de27d..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import java.io.IOException;
-import java.util.UUID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test for the Node Report Handler.
- */
-public class TestNodeReportHandler implements EventPublisher {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestNodeReportHandler.class);
-  private NodeReportHandler nodeReportHandler;
-  private SCMNodeManager nodeManager;
-  private String storagePath = GenericTestUtils.getRandomizedTempPath()
-      .concat("/" + UUID.randomUUID().toString());
-
-  @Before
-  public void resetEventCollector() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
-    Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
-    nodeManager =
-        new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock(
-            NetworkTopology.class));
-    nodeReportHandler = new NodeReportHandler(nodeManager);
-  }
-
-  @Test
-  public void testNodeReport() throws IOException {
-    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-    StorageReportProto storageOne = TestUtils
-        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
-
-    SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
-    Assert.assertNull(nodeMetric);
-
-    nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null);
-    nodeMetric = nodeManager.getNodeStat(dn);
-
-    Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
-    Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90);
-    Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10);
-
-    StorageReportProto storageTwo = TestUtils
-        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
-    nodeReportHandler.onMessage(
-        getNodeReport(dn, storageOne, storageTwo), this);
-    nodeMetric = nodeManager.getNodeStat(dn);
-
-    Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200);
-    Assert.assertTrue(nodeMetric.get().getRemaining().get() == 180);
-    Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 20);
-
-  }
-
-  private NodeReportFromDatanode getNodeReport(DatanodeDetails dn,
-      StorageReportProto... reports) {
-    NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports);
-    return new NodeReportFromDatanode(dn, nodeReportProto);
-  }
-
-  @Override
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-      EVENT_TYPE event, PAYLOAD payload) {
-    LOG.info("Event is published: {}", payload);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
deleted file mode 100644
index db76d66..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ /dev/null
@@ -1,1225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
-    .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
-    .NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test the SCM Node Manager class.
- */
-public class TestSCMNodeManager {
-
-  private File testDir;
-  private StorageContainerManager scm;
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @BeforeClass
-  public static void init() throws IOException {
-  }
-
-  @Before
-  public void setup() {
-    testDir = PathUtils.getTestDir(
-        TestSCMNodeManager.class);
-  }
-
-  @After
-  public void cleanup() {
-    if (scm != null) {
-      scm.stop();
-      scm.join();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  /**
-   * Returns a new copy of Configuration.
-   *
-   * @return Config
-   */
-  OzoneConfiguration getConf() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        testDir.getAbsolutePath());
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    return conf;
-  }
-
-  /**
-   * Creates a NodeManager.
-   *
-   * @param config - Config for the node manager.
-   * @return SCNNodeManager
-   * @throws IOException
-   */
-
-  SCMNodeManager createNodeManager(OzoneConfiguration config)
-      throws IOException, AuthenticationException {
-    scm = HddsTestUtils.getScm(config);
-    return (SCMNodeManager) scm.getScmNodeManager();
-  }
-
-  /**
-   * Tests that Node manager handles heartbeats correctly, and comes out of
-   * safe Mode.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmHeartbeat()
-      throws IOException, InterruptedException, AuthenticationException {
-
-    try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      int registeredNodes = 5;
-      // Send some heartbeats from different nodes.
-      for (int x = 0; x < registeredNodes; x++) {
-        DatanodeDetails datanodeDetails = TestUtils
-            .createRandomDatanodeAndRegister(nodeManager);
-        nodeManager.processHeartbeat(datanodeDetails);
-      }
-
-      //TODO: wait for heartbeat to be processed
-      Thread.sleep(4 * 1000);
-      assertTrue("Heartbeat thread should have picked up the" +
-              "scheduled heartbeats.",
-          nodeManager.getAllNodes().size() == registeredNodes);
-    }
-  }
-
-  /**
-   * asserts that if we send no heartbeats node manager stays in safemode.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmNoHeartbeats()
-      throws IOException, InterruptedException, AuthenticationException {
-
-    try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      //TODO: wait for heartbeat to be processed
-      Thread.sleep(4 * 1000);
-      assertTrue("No heartbeats, 0 nodes should be registered",
-          nodeManager.getAllNodes().size() == 0);
-    }
-  }
-
-  /**
-   * Asserts that adding heartbeats after shutdown does not work. This implies
-   * that heartbeat thread has been shutdown safely by closing the node
-   * manager.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmShutdown()
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        100, TimeUnit.MILLISECONDS);
-    SCMNodeManager nodeManager = createNodeManager(conf);
-    DatanodeDetails datanodeDetails = TestUtils
-        .createRandomDatanodeAndRegister(nodeManager);
-    nodeManager.close();
-
-    // These should never be processed.
-    nodeManager.processHeartbeat(datanodeDetails);
-
-    // Let us just wait for 2 seconds to prove that HBs are not processed.
-    Thread.sleep(2 * 1000);
-
-    //TODO: add assertion
-  }
-
-  /**
-   * Asserts that we detect as many healthy nodes as we have generated heartbeat
-   * for.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmHealthyNodeCount()
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    final int count = 10;
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-
-      for (int x = 0; x < count; x++) {
-        DatanodeDetails datanodeDetails = TestUtils
-            .createRandomDatanodeAndRegister(nodeManager);
-        nodeManager.processHeartbeat(datanodeDetails);
-      }
-      //TODO: wait for heartbeat to be processed
-      Thread.sleep(4 * 1000);
-      assertEquals(count, nodeManager.getNodeCount(HEALTHY));
-    }
-  }
-
-  /**
-   * Asserts that if Stale Interval value is more than 5 times the value of HB
-   * processing thread it is a sane value.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmSanityOfUserConfig2()
-      throws IOException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    final int interval = 100;
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
-
-    // This should be 5 times more than  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
-    // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS);
-    createNodeManager(conf).close();
-  }
-
-  /**
-   * Asserts that a single node moves from Healthy to stale node, then from
-   * stale node to dead node if it misses enough heartbeats.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmDetectStaleAndDeadNode()
-      throws IOException, InterruptedException, AuthenticationException {
-    final int interval = 100;
-    final int nodeCount = 10;
-
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
-        MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount);
-
-
-      DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister(
-          nodeManager);
-
-      // Heartbeat once
-      nodeManager.processHeartbeat(staleNode);
-
-      // Heartbeat all other nodes.
-      for (DatanodeDetails dn : nodeList) {
-        nodeManager.processHeartbeat(dn);
-      }
-
-      // Wait for 2 seconds .. and heartbeat good nodes again.
-      Thread.sleep(2 * 1000);
-
-      for (DatanodeDetails dn : nodeList) {
-        nodeManager.processHeartbeat(dn);
-      }
-
-      // Wait for 2 seconds, wait a total of 4 seconds to make sure that the
-      // node moves into stale state.
-      Thread.sleep(2 * 1000);
-      List<DatanodeDetails> staleNodeList = nodeManager.getNodes(STALE);
-      assertEquals("Expected to find 1 stale node",
-          1, nodeManager.getNodeCount(STALE));
-      assertEquals("Expected to find 1 stale node",
-          1, staleNodeList.size());
-      assertEquals("Stale node is not the expected ID", staleNode
-          .getUuid(), staleNodeList.get(0).getUuid());
-      Thread.sleep(1000);
-
-      // heartbeat good nodes again.
-      for (DatanodeDetails dn : nodeList) {
-        nodeManager.processHeartbeat(dn);
-      }
-
-      //  6 seconds is the dead window for this test , so we wait a total of
-      // 7 seconds to make sure that the node moves into dead state.
-      Thread.sleep(2 * 1000);
-
-      // the stale node has been removed
-      staleNodeList = nodeManager.getNodes(STALE);
-      assertEquals("Expected to find 1 stale node",
-          0, nodeManager.getNodeCount(STALE));
-      assertEquals("Expected to find 1 stale node",
-          0, staleNodeList.size());
-
-      // Check for the dead node now.
-      List<DatanodeDetails> deadNodeList = nodeManager.getNodes(DEAD);
-      assertEquals("Expected to find 1 dead node", 1,
-          nodeManager.getNodeCount(DEAD));
-      assertEquals("Expected to find 1 dead node",
-          1, deadNodeList.size());
-      assertEquals("Dead node is not the expected ID", staleNode
-          .getUuid(), deadNodeList.get(0).getUuid());
-    }
-  }
-
-  /**
-   * Simulate a JVM Pause by pausing the health check process
-   * Ensure that none of the nodes with heartbeats become Dead or Stale.
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws AuthenticationException
-   */
-  @Test
-  public void testScmHandleJvmPause()
-      throws IOException, InterruptedException, AuthenticationException {
-    final int healthCheckInterval = 200; // milliseconds
-    final int heartbeatInterval = 1; // seconds
-    final int staleNodeInterval = 3; // seconds
-    final int deadNodeInterval = 6; // seconds
-    ScheduledFuture schedFuture;
-
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        healthCheckInterval, MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-        heartbeatInterval, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-        staleNodeInterval, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
-        deadNodeInterval, SECONDS);
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails node1 =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-      DatanodeDetails node2 =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-
-      nodeManager.processHeartbeat(node1);
-      nodeManager.processHeartbeat(node2);
-
-      // Sleep so that heartbeat processing thread gets to run.
-      Thread.sleep(1000);
-
-      //Assert all nodes are healthy.
-      assertEquals(2, nodeManager.getAllNodes().size());
-      assertEquals(2, nodeManager.getNodeCount(HEALTHY));
-
-      /**
-       * Simulate a JVM Pause and subsequent handling in following steps:
-       * Step 1 : stop heartbeat check process for stale node interval
-       * Step 2 : resume heartbeat check
-       * Step 3 : wait for 1 iteration of heartbeat check thread
-       * Step 4 : retrieve the state of all nodes - assert all are HEALTHY
-       * Step 5 : heartbeat for node1
-       * [TODO : what if there is scheduling delay of test thread in Step 5?]
-       * Step 6 : wait for some time to allow iterations of check process
-       * Step 7 : retrieve the state of all nodes -  assert node2 is STALE
-       * and node1 is HEALTHY
-       */
-
-      // Step 1 : stop health check process (simulate JVM pause)
-      nodeManager.pauseHealthCheck();
-      Thread.sleep(MILLISECONDS.convert(staleNodeInterval, SECONDS));
-
-      // Step 2 : resume health check
-      assertTrue("Unexpected, already skipped heartbeat checks",
-          (nodeManager.getSkippedHealthChecks() == 0));
-      schedFuture = nodeManager.unpauseHealthCheck();
-
-      // Step 3 : wait for 1 iteration of health check
-      try {
-        schedFuture.get();
-        assertTrue("We did not skip any heartbeat checks",
-            nodeManager.getSkippedHealthChecks() > 0);
-      } catch (ExecutionException e) {
-        assertEquals("Unexpected exception waiting for Scheduled Health Check",
-            0, 1);
-      }
-
-      // Step 4 : all nodes should still be HEALTHY
-      assertEquals(2, nodeManager.getAllNodes().size());
-      assertEquals(2, nodeManager.getNodeCount(HEALTHY));
-
-      // Step 5 : heartbeat for node1
-      nodeManager.processHeartbeat(node1);
-
-      // Step 6 : wait for health check process to run
-      Thread.sleep(1000);
-
-      // Step 7 : node2 should transition to STALE
-      assertEquals(1, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(1, nodeManager.getNodeCount(STALE));
-    }
-  }
-
-  /**
-   * Check for NPE when datanodeDetails is passed null for sendHeartbeat.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testScmCheckForErrorOnNullDatanodeDetails()
-      throws IOException, AuthenticationException {
-    try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      nodeManager.processHeartbeat(null);
-    } catch (NullPointerException npe) {
-      GenericTestUtils.assertExceptionContains("Heartbeat is missing " +
-          "DatanodeDetails.", npe);
-    }
-  }
-
-  /**
-   * Asserts that a dead node, stale node and healthy nodes co-exist. The counts
-   * , lists and node ID match the expected node state.
-   * <p/>
-   * This test is pretty complicated because it explores all states of Node
-   * manager in a single test. Please read thru the comments to get an idea of
-   * the current state of the node Manager.
-   * <p/>
-   * This test is written like a state machine to avoid threads and concurrency
-   * issues. This test is replicated below with the use of threads. Avoiding
-   * threads make it easy to debug the state machine.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  /**
-   * These values are very important. Here is what it means so you don't
-   * have to look it up while reading this code.
-   *
-   *  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
-   *  HB processing thread that is running in the SCM. This thread must run
-   *  for the SCM  to process the Heartbeats.
-   *
-   *  OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
-   *  datanodes will send heartbeats to SCM. Please note: This is the only
-   *  config value for node manager that is specified in seconds. We don't
-   *  want SCM heartbeat resolution to be more than in seconds.
-   *  In this test it is not used, but we are forced to set it because we
-   *  have validation code that checks Stale Node interval and Dead Node
-   *  interval is larger than the value of
-   *  OZONE_SCM_HEARTBEAT_INTERVAL.
-   *
-   *  OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
-   *  from the last heartbeat for us to mark a node as stale. In this test
-   *  we set that to 3. That is if a node has not heartbeat SCM for last 3
-   *  seconds we will mark it as stale.
-   *
-   *  OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
-   *  from the last heartbeat for a node to be marked dead. We have an
-   *  additional constraint that this must be at least 2 times bigger than
-   *  Stale node Interval.
-   *
-   *  With these we are trying to explore the state of this cluster with
-   *  various timeouts. Each section is commented so that you can keep
-   *  track of the state of the cluster nodes.
-   *
-   */
-
-  @Test
-  public void testScmClusterIsInExpectedState1()
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
-        MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-
-
-    /**
-     * Cluster state: Healthy: All nodes are heartbeat-ing like normal.
-     */
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails healthyNode =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-      DatanodeDetails staleNode =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-      DatanodeDetails deadNode =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-      nodeManager.processHeartbeat(healthyNode);
-      nodeManager.processHeartbeat(staleNode);
-      nodeManager.processHeartbeat(deadNode);
-
-      // Sleep so that heartbeat processing thread gets to run.
-      Thread.sleep(500);
-
-      //Assert all nodes are healthy.
-      assertEquals(3, nodeManager.getAllNodes().size());
-      assertEquals(3, nodeManager.getNodeCount(HEALTHY));
-
-      /**
-       * Cluster state: Quiesced: We are going to sleep for 3 seconds. Which
-       * means that no node is heartbeating. All nodes should move to Stale.
-       */
-      Thread.sleep(3 * 1000);
-      assertEquals(3, nodeManager.getAllNodes().size());
-      assertEquals(3, nodeManager.getNodeCount(STALE));
-
-
-      /**
-       * Cluster State : Move healthy node back to healthy state, move other 2
-       * nodes to Stale State.
-       *
-       * We heartbeat healthy node after 1 second and let other 2 nodes elapse
-       * the 3 second windows.
-       */
-
-      nodeManager.processHeartbeat(healthyNode);
-      nodeManager.processHeartbeat(staleNode);
-      nodeManager.processHeartbeat(deadNode);
-
-      Thread.sleep(1500);
-      nodeManager.processHeartbeat(healthyNode);
-      Thread.sleep(2 * 1000);
-      assertEquals(1, nodeManager.getNodeCount(HEALTHY));
-
-
-      // 3.5 seconds from last heartbeat for the stale and deadNode. So those
-      //  2 nodes must move to Stale state and the healthy node must
-      // remain in the healthy State.
-      List<DatanodeDetails> healthyList = nodeManager.getNodes(HEALTHY);
-      assertEquals("Expected one healthy node", 1, healthyList.size());
-      assertEquals("Healthy node is not the expected ID", healthyNode
-          .getUuid(), healthyList.get(0).getUuid());
-
-      assertEquals(2, nodeManager.getNodeCount(STALE));
-
-      /**
-       * Cluster State: Allow healthyNode to remain in healthy state and
-       * staleNode to move to stale state and deadNode to move to dead state.
-       */
-
-      nodeManager.processHeartbeat(healthyNode);
-      nodeManager.processHeartbeat(staleNode);
-      Thread.sleep(1500);
-      nodeManager.processHeartbeat(healthyNode);
-      Thread.sleep(2 * 1000);
-
-      // 3.5 seconds have elapsed for stale node, so it moves into Stale.
-      // 7 seconds have elapsed for dead node, so it moves into dead.
-      // 2 Seconds have elapsed for healthy node, so it stays in healthy state.
-      healthyList = nodeManager.getNodes(HEALTHY);
-      List<DatanodeDetails> staleList = nodeManager.getNodes(STALE);
-      List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD);
-
-      assertEquals(3, nodeManager.getAllNodes().size());
-      assertEquals(1, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(1, nodeManager.getNodeCount(STALE));
-      assertEquals(1, nodeManager.getNodeCount(DEAD));
-
-      assertEquals("Expected one healthy node",
-          1, healthyList.size());
-      assertEquals("Healthy node is not the expected ID", healthyNode
-          .getUuid(), healthyList.get(0).getUuid());
-
-      assertEquals("Expected one stale node",
-          1, staleList.size());
-      assertEquals("Stale node is not the expected ID", staleNode
-          .getUuid(), staleList.get(0).getUuid());
-
-      assertEquals("Expected one dead node",
-          1, deadList.size());
-      assertEquals("Dead node is not the expected ID", deadNode
-          .getUuid(), deadList.get(0).getUuid());
-      /**
-       * Cluster State : let us heartbeat all the nodes and verify that we get
-       * back all the nodes in healthy state.
-       */
-      nodeManager.processHeartbeat(healthyNode);
-      nodeManager.processHeartbeat(staleNode);
-      nodeManager.processHeartbeat(deadNode);
-      Thread.sleep(500);
-      //Assert all nodes are healthy.
-      assertEquals(3, nodeManager.getAllNodes().size());
-      assertEquals(3, nodeManager.getNodeCount(HEALTHY));
-    }
-  }
-
-  /**
-   * Heartbeat a given set of nodes at a specified frequency.
-   *
-   * @param manager       - Node Manager
-   * @param list          - List of datanodeIDs
-   * @param sleepDuration - Duration to sleep between heartbeats.
-   * @throws InterruptedException
-   */
-  private void heartbeatNodeSet(SCMNodeManager manager,
-                                List<DatanodeDetails> list,
-                                int sleepDuration) throws InterruptedException {
-    while (!Thread.currentThread().isInterrupted()) {
-      for (DatanodeDetails dn : list) {
-        manager.processHeartbeat(dn);
-      }
-      Thread.sleep(sleepDuration);
-    }
-  }
-
-  /**
-   * Create a set of Nodes with a given prefix.
-   *
-   * @param count  - number of nodes.
-   * @return List of Nodes.
-   */
-  private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
-      count) {
-    List<DatanodeDetails> list = new ArrayList<>();
-    for (int x = 0; x < count; x++) {
-      DatanodeDetails datanodeDetails = TestUtils
-          .createRandomDatanodeAndRegister(nodeManager);
-      list.add(datanodeDetails);
-    }
-    return list;
-  }
-
-  /**
-   * Function that tells us if we found the right number of stale nodes.
-   *
-   * @param nodeManager - node manager
-   * @param count       - number of stale nodes to look for.
-   * @return true if we found the expected number.
-   */
-  private boolean findNodes(NodeManager nodeManager, int count,
-      HddsProtos.NodeState state) {
-    return count == nodeManager.getNodeCount(state);
-  }
-
-  /**
-   * Asserts that we can create a set of nodes that send its heartbeats from
-   * different threads and NodeManager behaves as expected.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   */
-  @Test
-  public void testScmClusterIsInExpectedState2()
-      throws IOException, InterruptedException, TimeoutException,
-      AuthenticationException {
-    final int healthyCount = 5000;
-    final int staleCount = 100;
-    final int deadCount = 10;
-
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
-        MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeDetails> healthyNodeList = createNodeSet(nodeManager,
-          healthyCount);
-      List<DatanodeDetails> staleNodeList = createNodeSet(nodeManager,
-          staleCount);
-      List<DatanodeDetails> deadNodeList = createNodeSet(nodeManager,
-          deadCount);
-
-      Runnable healthyNodeTask = () -> {
-        try {
-          // 2 second heartbeat makes these nodes stay healthy.
-          heartbeatNodeSet(nodeManager, healthyNodeList, 2 * 1000);
-        } catch (InterruptedException ignored) {
-        }
-      };
-
-      Runnable staleNodeTask = () -> {
-        try {
-          // 4 second heartbeat makes these nodes go to stale and back to
-          // healthy again.
-          heartbeatNodeSet(nodeManager, staleNodeList, 4 * 1000);
-        } catch (InterruptedException ignored) {
-        }
-      };
-
-
-      // No Thread just one time HBs the node manager, so that these will be
-      // marked as dead nodes eventually.
-      for (DatanodeDetails dn : deadNodeList) {
-        nodeManager.processHeartbeat(dn);
-      }
-
-
-      Thread thread1 = new Thread(healthyNodeTask);
-      thread1.setDaemon(true);
-      thread1.start();
-
-
-      Thread thread2 = new Thread(staleNodeTask);
-      thread2.setDaemon(true);
-      thread2.start();
-
-      Thread.sleep(10 * 1000);
-
-      // Assert all healthy nodes are healthy now, this has to be a greater
-      // than check since Stale nodes can be healthy when we check the state.
-
-      assertTrue(nodeManager.getNodeCount(HEALTHY) >= healthyCount);
-
-      assertEquals(deadCount, nodeManager.getNodeCount(DEAD));
-
-      List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD);
-
-      for (DatanodeDetails node : deadList) {
-        assertTrue(deadNodeList.contains(node));
-      }
-
-
-
-      // Checking stale nodes is tricky since they have to move between
-      // healthy and stale to avoid becoming dead nodes. So we search for
-      // that state for a while, if we don't find that state waitfor will
-      // throw.
-      GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE),
-          500, 4 * 1000);
-
-      thread1.interrupt();
-      thread2.interrupt();
-    }
-  }
-
-  /**
-   * Asserts that we can handle 6000+ nodes heartbeating SCM.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmCanHandleScale()
-      throws IOException, InterruptedException, TimeoutException,
-      AuthenticationException {
-    final int healthyCount = 3000;
-    final int staleCount = 3000;
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
-        MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1,
-        SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
-        MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000,
-        MILLISECONDS);
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeDetails> healthyList = createNodeSet(nodeManager,
-          healthyCount);
-      List<DatanodeDetails> staleList = createNodeSet(nodeManager,
-          staleCount);
-
-      Runnable healthyNodeTask = () -> {
-        try {
-          heartbeatNodeSet(nodeManager, healthyList, 2 * 1000);
-        } catch (InterruptedException ignored) {
-
-        }
-      };
-
-      Runnable staleNodeTask = () -> {
-        try {
-          heartbeatNodeSet(nodeManager, staleList, 4 * 1000);
-        } catch (InterruptedException ignored) {
-        }
-      };
-
-      Thread thread1 = new Thread(healthyNodeTask);
-      thread1.setDaemon(true);
-      thread1.start();
-
-      Thread thread2 = new Thread(staleNodeTask);
-      thread2.setDaemon(true);
-      thread2.start();
-      Thread.sleep(3 * 1000);
-
-      GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE),
-          500, 20 * 1000);
-      assertEquals("Node count mismatch",
-          healthyCount + staleCount, nodeManager.getAllNodes().size());
-
-      thread1.interrupt();
-      thread2.interrupt();
-    }
-  }
-
-  /**
-   * Test multiple nodes sending initial heartbeat with their node report.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  @Ignore
-  // TODO: Enable this after we implement NodeReportEvent handler.
-  public void testScmStatsFromNodeReport()
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
-        MILLISECONDS);
-    final int nodeCount = 10;
-    final long capacity = 2000;
-    final long used = 100;
-    final long remaining = capacity - used;
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      for (int x = 0; x < nodeCount; x++) {
-        DatanodeDetails datanodeDetails = TestUtils
-            .createRandomDatanodeAndRegister(nodeManager);
-        UUID dnId = datanodeDetails.getUuid();
-        long free = capacity - used;
-        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        StorageReportProto report = TestUtils
-            .createStorageReport(dnId, storagePath, capacity, used, free, null);
-        nodeManager.processHeartbeat(datanodeDetails);
-      }
-      //TODO: wait for heartbeat to be processed
-      Thread.sleep(4 * 1000);
-      assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(capacity * nodeCount, (long) nodeManager.getStats()
-          .getCapacity().get());
-      assertEquals(used * nodeCount, (long) nodeManager.getStats()
-          .getScmUsed().get());
-      assertEquals(remaining * nodeCount, (long) nodeManager.getStats()
-          .getRemaining().get());
-    }
-  }
-
-  /**
-   * Test single node stat update based on nodereport from different heartbeat
-   * status (healthy, stale and dead).
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
-   */
-  @Test
-  @Ignore
-  // TODO: Enable this after we implement NodeReportEvent handler.
-  public void testScmNodeReportUpdate()
-      throws IOException, InterruptedException, TimeoutException,
-      AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    final int heartbeatCount = 5;
-    final int nodeCount = 1;
-    final int interval = 100;
-
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
-        MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails datanodeDetails =
-          TestUtils.createRandomDatanodeAndRegister(nodeManager);
-      final long capacity = 2000;
-      final long usedPerHeartbeat = 100;
-      UUID dnId = datanodeDetails.getUuid();
-      for (int x = 0; x < heartbeatCount; x++) {
-        long scmUsed = x * usedPerHeartbeat;
-        long remaining = capacity - scmUsed;
-        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        StorageReportProto report = TestUtils
-            .createStorageReport(dnId, storagePath, capacity, scmUsed,
-                remaining, null);
-
-        nodeManager.processHeartbeat(datanodeDetails);
-        Thread.sleep(100);
-      }
-
-      final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1);
-      final long expectedRemaining = capacity - expectedScmUsed;
-
-      GenericTestUtils.waitFor(
-          () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed,
-          100, 4 * 1000);
-
-      long foundCapacity = nodeManager.getStats().getCapacity().get();
-      assertEquals(capacity, foundCapacity);
-
-      long foundScmUsed = nodeManager.getStats().getScmUsed().get();
-      assertEquals(expectedScmUsed, foundScmUsed);
-
-      long foundRemaining = nodeManager.getStats().getRemaining().get();
-      assertEquals(expectedRemaining, foundRemaining);
-
-      // Test NodeManager#getNodeStats
-      assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get()
-          .getCapacity().get();
-      assertEquals(capacity, nodeCapacity);
-
-      foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed()
-          .get();
-      assertEquals(expectedScmUsed, foundScmUsed);
-
-      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get()
-          .getRemaining().get();
-      assertEquals(expectedRemaining, foundRemaining);
-
-      // Compare the result from
-      // NodeManager#getNodeStats and NodeManager#getNodeStat
-      SCMNodeStat stat1 = nodeManager.getNodeStats().
-          get(datanodeDetails.getUuid());
-      SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get();
-      assertEquals(stat1, stat2);
-
-      // Wait up to 4s so that the node becomes stale
-      // Verify the usage info should be unchanged.
-      GenericTestUtils.waitFor(
-          () -> nodeManager.getNodeCount(STALE) == 1, 100,
-          4 * 1000);
-      assertEquals(nodeCount, nodeManager.getNodeStats().size());
-
-      foundCapacity = nodeManager.getNodeStat(datanodeDetails).get()
-          .getCapacity().get();
-      assertEquals(capacity, foundCapacity);
-      foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get()
-          .getScmUsed().get();
-      assertEquals(expectedScmUsed, foundScmUsed);
-
-      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get().
-          getRemaining().get();
-      assertEquals(expectedRemaining, foundRemaining);
-
-      // Wait up to 4 more seconds so the node becomes dead
-      // Verify usage info should be updated.
-      GenericTestUtils.waitFor(
-          () -> nodeManager.getNodeCount(DEAD) == 1, 100,
-          4 * 1000);
-
-      assertEquals(0, nodeManager.getNodeStats().size());
-      foundCapacity = nodeManager.getStats().getCapacity().get();
-      assertEquals(0, foundCapacity);
-
-      foundScmUsed = nodeManager.getStats().getScmUsed().get();
-      assertEquals(0, foundScmUsed);
-
-      foundRemaining = nodeManager.getStats().getRemaining().get();
-      assertEquals(0, foundRemaining);
-
-      nodeManager.processHeartbeat(datanodeDetails);
-
-      // Wait up to 5 seconds so that the dead node becomes healthy
-      // Verify usage info should be updated.
-      GenericTestUtils.waitFor(
-          () -> nodeManager.getNodeCount(HEALTHY) == 1,
-          100, 5 * 1000);
-      GenericTestUtils.waitFor(
-          () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed,
-          100, 4 * 1000);
-      assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      foundCapacity = nodeManager.getNodeStat(datanodeDetails).get()
-          .getCapacity().get();
-      assertEquals(capacity, foundCapacity);
-      foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed()
-          .get();
-      assertEquals(expectedScmUsed, foundScmUsed);
-      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get()
-          .getRemaining().get();
-      assertEquals(expectedRemaining, foundRemaining);
-    }
-  }
-
-  @Test
-  public void testHandlingSCMCommandEvent()
-      throws IOException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        100, TimeUnit.MILLISECONDS);
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    UUID dnId = datanodeDetails.getUuid();
-    String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-    StorageReportProto report =
-        TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
-
-    EventQueue eq = new EventQueue();
-    try (SCMNodeManager nodemanager = createNodeManager(conf)) {
-      eq.addHandler(DATANODE_COMMAND, nodemanager);
-
-      nodemanager
-          .register(datanodeDetails, TestUtils.createNodeReport(report),
-                  TestUtils.getRandomPipelineReports());
-      eq.fireEvent(DATANODE_COMMAND,
-          new CommandForDatanode<>(datanodeDetails.getUuid(),
-              new CloseContainerCommand(1L,
-                  PipelineID.randomId())));
-
-      eq.processAll(1000L);
-      List<SCMCommand> command =
-          nodemanager.processHeartbeat(datanodeDetails);
-      Assert.assertEquals(1, command.size());
-      Assert
-          .assertEquals(command.get(0).getClass(), CloseContainerCommand.class);
-    } catch (IOException e) {
-      e.printStackTrace();
-      throw  e;
-    }
-  }
-
-  /**
-   * Test add node into network topology during node register. Datanode
-   * uses Ip address to resolve network location.
-   */
-  @Test
-  public void testScmRegisterNodeWithIpAddress()
-      throws IOException, InterruptedException, AuthenticationException {
-    testScmRegisterNodeWithNetworkTopology(false);
-  }
-
-  /**
-   * Test add node into network topology during node register. Datanode
-   * uses hostname to resolve network location.
-   */
-  @Test
-  public void testScmRegisterNodeWithHostname()
-      throws IOException, InterruptedException, AuthenticationException {
-    testScmRegisterNodeWithNetworkTopology(true);
-  }
-
-  /**
-   * Test getNodesByAddress when using IPs.
-   *
-   */
-  @Test
-  public void testgetNodesByAddressWithIpAddress()
-      throws IOException, InterruptedException, AuthenticationException {
-    testGetNodesByAddress(false);
-  }
-
-  /**
-   * Test getNodesByAddress when using hostnames.
-   */
-  @Test
-  public void testgetNodesByAddressWithHostname()
-      throws IOException, InterruptedException, AuthenticationException {
-    testGetNodesByAddress(true);
-  }
-
-  /**
-   * Test add node into a 4-layer network topology during node register.
-   */
-  @Test
-  public void testScmRegisterNodeWith4LayerNetworkTopology()
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
-        MILLISECONDS);
-
-    // create table mapping file
-    String[] hostNames = {"host1", "host2", "host3", "host4"};
-    String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
-    String mapFile = this.getClass().getClassLoader()
-        .getResource("nodegroup-mapping").getPath();
-
-    // create and register nodes
-    conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        "org.apache.hadoop.net.TableMapping");
-    conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
-    conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE,
-        "network-topology-nodegroup.xml");
-    final int nodeCount = hostNames.length;
-    // use default IP address to resolve node
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
-      for (int i = 0; i < nodeCount; i++) {
-        DatanodeDetails node = TestUtils.createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
-        nodes[i] = node;
-      }
-
-      // verify network topology cluster has all the registered nodes
-      Thread.sleep(4 * 1000);
-      NetworkTopology clusterMap = scm.getClusterMap();
-      assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
-      assertEquals(4, clusterMap.getMaxLevel());
-      List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
-      nodeList.stream().forEach(node ->
-          Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng")));
-    }
-  }
-
-  private void testScmRegisterNodeWithNetworkTopology(boolean useHostname)
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
-        MILLISECONDS);
-
-    // create table mapping file
-    String[] hostNames = {"host1", "host2", "host3", "host4"};
-    String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
-    String mapFile = this.getClass().getClassLoader()
-        .getResource("rack-mapping").getPath();
-
-    // create and register nodes
-    conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        "org.apache.hadoop.net.TableMapping");
-    conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
-    if (useHostname) {
-      conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true");
-    }
-    final int nodeCount = hostNames.length;
-    // use default IP address to resolve node
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
-      for (int i = 0; i < nodeCount; i++) {
-        DatanodeDetails node = TestUtils.createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
-        nodes[i] = node;
-      }
-
-      // verify network topology cluster has all the registered nodes
-      Thread.sleep(4 * 1000);
-      NetworkTopology clusterMap = scm.getClusterMap();
-      assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
-      assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
-      assertEquals(3, clusterMap.getMaxLevel());
-      List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
-      nodeList.stream().forEach(node ->
-          Assert.assertTrue(node.getNetworkLocation().equals("/rack1")));
-
-      // test get node
-      if (useHostname) {
-        Arrays.stream(hostNames).forEach(hostname ->
-            Assert.assertNotEquals(0, nodeManager.getNodesByAddress(hostname)
-                .size()));
-      } else {
-        Arrays.stream(ipAddress).forEach(ip ->
-            Assert.assertNotEquals(0, nodeManager.getNodesByAddress(ip)
-                .size()));
-      }
-    }
-  }
-
-  /**
-   * Test add node into a 4-layer network topology during node register.
-   */
-  private void testGetNodesByAddress(boolean useHostname)
-      throws IOException, InterruptedException, AuthenticationException {
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
-        MILLISECONDS);
-
-    // create a set of hosts - note two hosts on "host1"
-    String[] hostNames = {"host1", "host1", "host2", "host3", "host4"};
-    String[] ipAddress =
-        {"1.2.3.4", "1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
-
-    if (useHostname) {
-      conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true");
-    }
-    final int nodeCount = hostNames.length;
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
-      for (int i = 0; i < nodeCount; i++) {
-        DatanodeDetails node = TestUtils.createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
-      }
-      // test get node
-      Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size());
-      if (useHostname) {
-        Assert.assertEquals(2,
-            nodeManager.getNodesByAddress("host1").size());
-        Assert.assertEquals(1, nodeManager.getNodesByAddress("host2").size());
-        Assert.assertEquals(0, nodeManager.getNodesByAddress("unknown").size());
-      } else {
-        Assert.assertEquals(2,
-            nodeManager.getNodesByAddress("1.2.3.4").size());
-        Assert.assertEquals(1, nodeManager.getNodesByAddress("2.3.4.5").size());
-        Assert.assertEquals(0, nodeManager.getNodesByAddress("1.9.8.7").size());
-      }
-    }
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
deleted file mode 100644
index e12c643..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.*;
-import org.junit.Rule;
-import org.junit.rules.ExpectedException;
-
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.Set;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.io.IOException;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Test Node Storage Map.
- */
-public class TestSCMNodeStorageStatMap {
-  private final static int DATANODE_COUNT = 100;
-  private final long capacity = 10L * OzoneConsts.GB;
-  private final long used = 2L * OzoneConsts.GB;
-  private final long remaining = capacity - used;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private final Map<UUID, Set<StorageLocationReport>> testData =
-      new ConcurrentHashMap<>();
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private void generateData() {
-    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
-      UUID dnId = UUID.randomUUID();
-      Set<StorageLocationReport> reportSet = new HashSet<>();
-      String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" +
-              Integer.toString(dnIndex));
-      StorageLocationReport.Builder builder =
-          StorageLocationReport.newBuilder();
-      builder.setStorageType(StorageType.DISK).setId(dnId.toString())
-          .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
-          .setCapacity(capacity).setFailed(false);
-      reportSet.add(builder.build());
-      testData.put(UUID.randomUUID(), reportSet);
-    }
-  }
-
-  private UUID getFirstKey() {
-    return testData.keySet().iterator().next();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    generateData();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test
-  public void testIsKnownDatanode() throws SCMException {
-    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    UUID knownNode = getFirstKey();
-    UUID unknownNode = UUID.randomUUID();
-    Set<StorageLocationReport> report = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, report);
-    Assert.assertTrue("Not able to detect a known node",
-        map.isKnownDatanode(knownNode));
-    Assert.assertFalse("Unknown node detected",
-        map.isKnownDatanode(unknownNode));
-  }
-
-  @Test
-  public void testInsertNewDatanode() throws SCMException {
-    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    UUID knownNode = getFirstKey();
-    Set<StorageLocationReport> report = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, report);
-    Assert.assertEquals(map.getStorageVolumes(knownNode),
-        testData.get(knownNode));
-    thrown.expect(SCMException.class);
-    thrown.expectMessage("already exists");
-    map.insertNewDatanode(knownNode, report);
-  }
-
-  @Test
-  public void testUpdateUnknownDatanode() throws SCMException {
-    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    UUID unknownNode = UUID.randomUUID();
-    String path = GenericTestUtils.getTempPath(
-        TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode
-            .toString());
-    Set<StorageLocationReport> reportSet = new HashSet<>();
-    StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
-    builder.setStorageType(StorageType.DISK).setId(unknownNode.toString())
-        .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
-        .setCapacity(capacity).setFailed(false);
-    reportSet.add(builder.build());
-    thrown.expect(SCMException.class);
-    thrown.expectMessage("No such datanode");
-    map.updateDatanodeMap(unknownNode, reportSet);
-  }
-
-  @Test
-  public void testProcessNodeReportCheckOneNode() throws IOException {
-    UUID key = getFirstKey();
-    List<StorageReportProto> reportList = new ArrayList<>();
-    Set<StorageLocationReport> reportSet = testData.get(key);
-    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    map.insertNewDatanode(key, reportSet);
-    Assert.assertTrue(map.isKnownDatanode(key));
-    UUID storageId = UUID.randomUUID();
-    String path =
-        GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
-    StorageLocationReport report = reportSet.iterator().next();
-    long reportCapacity = report.getCapacity();
-    long reportScmUsed = report.getScmUsed();
-    long reportRemaining = report.getRemaining();
-    StorageReportProto storageReport = TestUtils.createStorageReport(storageId,
-        path, reportCapacity, reportScmUsed, reportRemaining, null);
-    StorageReportResult result =
-        map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
-    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
-        result.getStatus());
-    StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
-        NodeReportProto.newBuilder();
-    StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
-    reportList.add(srb);
-    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
-    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
-        result.getStatus());
-
-    reportList.add(TestUtils
-        .createStorageReport(UUID.randomUUID(), path, reportCapacity,
-            reportCapacity, 0, null));
-    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
-    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE,
-        result.getStatus());
-    // Mark a disk failed 
-    StorageReportProto srb2 = StorageReportProto.newBuilder()
-        .setStorageUuid(UUID.randomUUID().toString())
-        .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity)
-        .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build();
-    reportList.add(srb2);
-    nrb.addAllStorageReport(reportList);
-    result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
-    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus
-        .FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus());
-
-  }
-
-  @Test
-  public void testProcessMultipleNodeReports() throws SCMException {
-    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    int counter = 1;
-    // Insert all testData into the SCMNodeStorageStatMap Map.
-    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
-        .entrySet()) {
-      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
-    }
-    Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
-    Assert.assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace());
-    Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
-
-    // upadate 1/4th of the datanode to be full
-    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
-        .entrySet()) {
-      Set<StorageLocationReport> reportSet = new HashSet<>();
-      String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry
-              .getKey().toString());
-      StorageLocationReport.Builder builder =
-          StorageLocationReport.newBuilder();
-      builder.setStorageType(StorageType.DISK)
-          .setId(keyEntry.getKey().toString()).setStorageLocation(path)
-          .setScmUsed(capacity).setRemaining(0).setCapacity(capacity)
-          .setFailed(false);
-      reportSet.add(builder.build());
-
-      map.updateDatanodeMap(keyEntry.getKey(), reportSet);
-      counter++;
-      if (counter > DATANODE_COUNT / 4) {
-        break;
-      }
-    }
-    Assert.assertEquals(DATANODE_COUNT / 4,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL)
-            .size());
-    Assert.assertEquals(0,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN)
-            .size());
-    Assert.assertEquals(0.75 * DATANODE_COUNT,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
-            .size(), 0);
-
-    Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
-    Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
-        map.getTotalFreeSpace(), 0);
-    Assert.assertEquals(
-        0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity),
-        map.getTotalSpaceUsed(), 0);
-    counter = 1;
-    // Remove 1/4 of the DataNodes from the Map
-    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
-        .entrySet()) {
-      map.removeDatanode(keyEntry.getKey());
-      counter++;
-      if (counter > DATANODE_COUNT / 4) {
-        break;
-      }
-    }
-
-    Assert.assertEquals(0,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL)
-            .size());
-    Assert.assertEquals(0,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN)
-            .size());
-    Assert.assertEquals(0.75 * DATANODE_COUNT,
-        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
-            .size(), 0);
-
-    Assert
-        .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(),
-            0);
-    Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
-        map.getTotalFreeSpace(), 0);
-    Assert
-        .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0);
-
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
deleted file mode 100644
index 9bce94b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.security.authentication.client
-    .AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.UUID;
-
-/**
- * Verifies the statics in NodeManager.
- */
-public class TestStatisticsUpdate {
-
-  private NodeManager nodeManager;
-  private NodeReportHandler nodeReportHandler;
-
-  @Before
-  public void setup() throws IOException, AuthenticationException {
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    final String storageDir = GenericTestUtils.getTempPath(
-        TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-    conf.set(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, "100ms");
-    conf.set(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, "50ms");
-    conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s");
-    conf.set(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, "2s");
-    final EventQueue eventQueue = new EventQueue();
-    final StorageContainerManager scm = HddsTestUtils.getScm(conf);
-    nodeManager = scm.getScmNodeManager();
-    final DeadNodeHandler deadNodeHandler = new DeadNodeHandler(
-        nodeManager, Mockito.mock(PipelineManager.class),
-        scm.getContainerManager());
-    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
-    nodeReportHandler = new NodeReportHandler(nodeManager);
-  }
-
-  @Test
-  public void testStatisticsUpdate() throws Exception {
-    //GIVEN
-    DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
-
-    String storagePath1 = GenericTestUtils.getRandomizedTempPath()
-        .concat("/" + datanode1.getUuidString());
-    String storagePath2 = GenericTestUtils.getRandomizedTempPath()
-        .concat("/" + datanode2.getUuidString());
-
-    StorageReportProto storageOne = TestUtils.createStorageReport(
-        datanode1.getUuid(), storagePath1, 100, 10, 90, null);
-    StorageReportProto storageTwo = TestUtils.createStorageReport(
-        datanode2.getUuid(), storagePath2, 200, 20, 180, null);
-
-    nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null);
-    nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageTwo), null);
-
-    NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne);
-    NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo);
-
-    nodeReportHandler.onMessage(
-        new NodeReportFromDatanode(datanode1, nodeReportProto1),
-        Mockito.mock(EventPublisher.class));
-    nodeReportHandler.onMessage(
-        new NodeReportFromDatanode(datanode2, nodeReportProto2),
-        Mockito.mock(EventPublisher.class));
-
-    SCMNodeStat stat = nodeManager.getStats();
-    Assert.assertEquals(300L, stat.getCapacity().get().longValue());
-    Assert.assertEquals(270L, stat.getRemaining().get().longValue());
-    Assert.assertEquals(30L, stat.getScmUsed().get().longValue());
-
-    SCMNodeMetric nodeStat = nodeManager.getNodeStat(datanode1);
-    Assert.assertEquals(100L, nodeStat.get().getCapacity().get().longValue());
-    Assert.assertEquals(90L, nodeStat.get().getRemaining().get().longValue());
-    Assert.assertEquals(10L, nodeStat.get().getScmUsed().get().longValue());
-
-    //TODO: Support logic to mark a node as dead in NodeManager.
-
-    nodeManager.processHeartbeat(datanode2);
-    Thread.sleep(1000);
-    nodeManager.processHeartbeat(datanode2);
-    Thread.sleep(1000);
-    nodeManager.processHeartbeat(datanode2);
-    Thread.sleep(1000);
-    nodeManager.processHeartbeat(datanode2);
-    //THEN statistics in SCM should changed.
-    stat = nodeManager.getStats();
-    Assert.assertEquals(200L, stat.getCapacity().get().longValue());
-    Assert.assertEquals(180L,
-        stat.getRemaining().get().longValue());
-    Assert.assertEquals(20L, stat.getScmUsed().get().longValue());
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
deleted file mode 100644
index dfd8397..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.hdds.scm.node;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
deleted file mode 100644
index 77ed907..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Test classes for Node2ContainerMap.
- */
-public class TestNode2ContainerMap {
-  private final static int DATANODE_COUNT = 300;
-  private final static int CONTAINER_COUNT = 1000;
-  private final Map<UUID, TreeSet<ContainerID>> testData = new
-      ConcurrentHashMap<>();
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private void generateData() {
-    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
-      TreeSet<ContainerID> currentSet = new TreeSet<>();
-      for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
-        long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
-        currentSet.add(new ContainerID(currentCnIndex));
-      }
-      testData.put(UUID.randomUUID(), currentSet);
-    }
-  }
-
-  private UUID getFirstKey() {
-    return testData.keySet().iterator().next();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    generateData();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test
-  public void testIsKnownDatanode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID knownNode = getFirstKey();
-    UUID unknownNode = UUID.randomUUID();
-    Set<ContainerID> containerIDs = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-    Assert.assertTrue("Not able to detect a known node",
-        map.isKnownDatanode(knownNode));
-    Assert.assertFalse("Unknown node detected",
-        map.isKnownDatanode(unknownNode));
-  }
-
-  @Test
-  public void testInsertNewDatanode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID knownNode = getFirstKey();
-    Set<ContainerID> containerIDs = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-    Set<ContainerID> readSet = map.getContainers(knownNode);
-
-    // Assert that all elements are present in the set that we read back from
-    // node map.
-    Set newSet = new TreeSet((readSet));
-    Assert.assertTrue(newSet.removeAll(containerIDs));
-    Assert.assertTrue(newSet.size() == 0);
-
-    thrown.expect(SCMException.class);
-    thrown.expectMessage("already exists");
-    map.insertNewDatanode(knownNode, containerIDs);
-
-    map.removeDatanode(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-
-  }
-
-  @Test
-  public void testProcessReportCheckOneNode() throws SCMException {
-    UUID key = getFirstKey();
-    Set<ContainerID> values = testData.get(key);
-    Node2ContainerMap map = new Node2ContainerMap();
-    map.insertNewDatanode(key, values);
-    Assert.assertTrue(map.isKnownDatanode(key));
-    ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL,
-        result.getStatus());
-  }
-
-  @Test
-  public void testUpdateDatanodeMap() throws SCMException {
-    UUID datanodeId = getFirstKey();
-    Set<ContainerID> values = testData.get(datanodeId);
-    Node2ContainerMap map = new Node2ContainerMap();
-    map.insertNewDatanode(datanodeId, values);
-    Assert.assertTrue(map.isKnownDatanode(datanodeId));
-    Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size());
-
-    //remove one container
-    values.remove(values.iterator().next());
-    Assert.assertEquals(CONTAINER_COUNT - 1, values.size());
-    Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size());
-
-    map.setContainersForDatanode(datanodeId, values);
-
-    Assert.assertEquals(values.size(), map.getContainers(datanodeId).size());
-    Assert.assertEquals(values, map.getContainers(datanodeId));
-  }
-
-  @Test
-  public void testProcessReportInsertAll() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-
-    for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
-      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
-    }
-    // Assert all Keys are known datanodes.
-    for (UUID key : testData.keySet()) {
-      Assert.assertTrue(map.isKnownDatanode(key));
-    }
-  }
-
-  /*
-  For ProcessReport we have to test the following scenarios.
-
-  1. New Datanode - A new datanode appears and we have to add that to the
-  SCM's Node2Container Map.
-
-  2.  New Container - A Datanode exists, but a new container is added to that
-   DN. We need to detect that and return a list of added containers.
-
-  3. Missing Container - A Datanode exists, but one of the expected container
-   on that datanode is missing. We need to detect that.
-
-   4. We get a container report that has both the missing and new containers.
-    We need to return separate lists for these.
-   */
-
-  /**
-   * Assert that we are able to detect the addition of a new datanode.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectNewDataNode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    // If we attempt to process a node that is not present in the map,
-    // we get a result back that says, NEW_NODE_FOUND.
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND,
-        result.getStatus());
-    Assert.assertEquals(result.getNewEntries().size(), values.size());
-  }
-
-  /**
-   * This test asserts that processReport is able to detect new containers
-   * when it is added to a datanode. For that we populate the DN with a list
-   * of containerIDs and then add few more containers and make sure that we
-   * are able to detect them.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectNewContainers() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    final int newCount = 100;
-    ContainerID last = values.last();
-    TreeSet<ContainerID> addedContainers = new TreeSet<>();
-    for (int x = 1; x <= newCount; x++) {
-      long cTemp = last.getId() + x;
-      addedContainers.add(new ContainerID(cTemp));
-    }
-
-    // This set is the super set of existing containers and new containers.
-    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
-    newContainersSet.addAll(addedContainers);
-
-    ReportResult result = map.processReport(key, newContainersSet);
-
-    //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND,
-        result.getStatus());
-
-    Assert.assertEquals(addedContainers.size(),
-        result.getNewEntries().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All objects are not removed.",
-        result.getNewEntries().removeAll(addedContainers));
-  }
-
-  /**
-   * This test asserts that processReport is able to detect missing containers
-   * if they are misssing from a list.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectMissingContainers() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    final int removeCount = 100;
-    Random r = new Random();
-
-    ContainerID first = values.first();
-    TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
-    // Pick a random container to remove it is ok to collide no issues.
-    for (int x = 0; x < removeCount; x++) {
-      int startBase = (int) first.getId();
-      long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
-    }
-
-    // This set is a new set with some containers removed.
-    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
-    newContainersSet.removeAll(removedContainers);
-
-    ReportResult result = map.processReport(key, newContainersSet);
-
-
-    //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES,
-        result.getStatus());
-    Assert.assertEquals(removedContainers.size(),
-        result.getMissingEntries().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All missing containers not found.",
-        result.getMissingEntries().removeAll(removedContainers));
-  }
-
-  @Test
-  public void testProcessReportDetectNewAndMissingContainers() throws
-      SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    Set<ContainerID> insertedSet = new TreeSet<>();
-    // Insert nodes from 1..30
-    for (int x = 1; x <= 30; x++) {
-      insertedSet.add(new ContainerID(x));
-    }
-
-
-    final int removeCount = 100;
-    Random r = new Random();
-
-    ContainerID first = values.first();
-    TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
-    // Pick a random container to remove it is ok to collide no issues.
-    for (int x = 0; x < removeCount; x++) {
-      int startBase = (int) first.getId();
-      long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
-    }
-
-    Set<ContainerID> newSet = new TreeSet<>(values);
-    newSet.addAll(insertedSet);
-    newSet.removeAll(removedContainers);
-
-    ReportResult result = map.processReport(key, newSet);
-
-
-    Assert.assertEquals(
-            ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND,
-        result.getStatus());
-    Assert.assertEquals(removedContainers.size(),
-        result.getMissingEntries().size());
-
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All missing containers not found.",
-        result.getMissingEntries().removeAll(removedContainers));
-
-    Assert.assertEquals(insertedSet.size(),
-        result.getNewEntries().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All inserted containers are not found.",
-        result.getNewEntries().removeAll(insertedSet));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
deleted file mode 100644
index 6610fcd..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Test Node2Container Map.
- */
-package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index da05c59..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-/**
- * SCM tests
- */
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
deleted file mode 100644
index 01c53ba..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-
-import java.io.IOException;
-
-/**
- * Mock Ratis Pipeline Provider for Mock Nodes.
- */
-public class MockRatisPipelineProvider extends RatisPipelineProvider {
-
-  public MockRatisPipelineProvider(NodeManager nodeManager,
-                            PipelineStateManager stateManager,
-                            Configuration conf) {
-    super(nodeManager, stateManager, conf, null);
-  }
-
-  protected void initializePipeline(Pipeline pipeline) throws IOException {
-    // do nothing as the datanodes do not exists
-  }
-
-  @Override
-  public void shutdown() {
-    // Do nothing.
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
deleted file mode 100644
index 94c3039..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * This class tests HealthyPipelineSafeMode rule.
- */
-public class TestHealthyPipelineSafeModeRule {
-
-  @Test
-  public void testHealthyPipelineSafeModeRuleWithNoPipelines()
-      throws Exception {
-
-    String storageDir = GenericTestUtils.getTempPath(
-        TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
-    try {
-      EventQueue eventQueue = new EventQueue();
-      List<ContainerInfo> containers = new ArrayList<>();
-      containers.addAll(HddsTestUtils.getContainerInfo(1));
-
-      OzoneConfiguration config = new OzoneConfiguration();
-      MockNodeManager nodeManager = new MockNodeManager(true, 0);
-      config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-      // enable pipeline check
-      config.setBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
-
-
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, eventQueue, null);
-      PipelineProvider mockRatisProvider =
-          new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config);
-      pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-          mockRatisProvider);
-      SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
-
-      HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
-          scmSafeModeManager.getHealthyPipelineSafeModeRule();
-
-      // This should be immediately satisfied, as no pipelines are there yet.
-      Assert.assertTrue(healthyPipelineSafeModeRule.validate());
-    } finally {
-      FileUtil.fullyDelete(new File(storageDir));
-    }
-
-  }
-
-
-  @Test
-  public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
-
-    String storageDir = GenericTestUtils.getTempPath(
-        TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
-
-    try {
-      EventQueue eventQueue = new EventQueue();
-      List<ContainerInfo> containers = new ArrayList<>();
-      containers.addAll(HddsTestUtils.getContainerInfo(1));
-
-      OzoneConfiguration config = new OzoneConfiguration();
-
-      // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are
-      // stale and last one is dead, and this repeats. So for a 12 node, 9
-      // healthy, 2 stale and one dead.
-      MockNodeManager nodeManager = new MockNodeManager(true, 12);
-      config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-      // enable pipeline check
-      config.setBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
-
-
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, eventQueue, null);
-
-      PipelineProvider mockRatisProvider =
-          new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config);
-      pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-          mockRatisProvider);
-
-      // Create 3 pipelines
-      Pipeline pipeline1 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-      Pipeline pipeline2 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-      Pipeline pipeline3 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-
-
-      SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
-
-      HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
-          scmSafeModeManager.getHealthyPipelineSafeModeRule();
-
-
-      // No datanodes have sent pipelinereport from datanode
-      Assert.assertFalse(healthyPipelineSafeModeRule.validate());
-
-      // Fire pipeline report from all datanodes in first pipeline, as here we
-      // have 3 pipelines, 10% is 0.3, when doing ceil it is 1. So, we should
-      // validate should return true after fire pipeline event
-
-
-      //Here testing with out pipelinereport handler, so not moving created
-      // pipelines to allocated state, as pipelines changing to healthy is
-      // handled by pipeline report handler. So, leaving pipeline's in pipeline
-      // manager in open state for test case simplicity.
-
-      firePipelineEvent(pipeline1, eventQueue);
-      GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(),
-          1000, 5000);
-    } finally {
-      FileUtil.fullyDelete(new File(storageDir));
-    }
-
-  }
-
-
-  @Test
-  public void testHealthyPipelineSafeModeRuleWithMixedPipelines()
-      throws Exception {
-
-    String storageDir = GenericTestUtils.getTempPath(
-        TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
-
-    try {
-      EventQueue eventQueue = new EventQueue();
-      List<ContainerInfo> containers = new ArrayList<>();
-      containers.addAll(HddsTestUtils.getContainerInfo(1));
-
-      OzoneConfiguration config = new OzoneConfiguration();
-
-      // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are
-      // stale and last one is dead, and this repeats. So for a 12 node, 9
-      // healthy, 2 stale and one dead.
-      MockNodeManager nodeManager = new MockNodeManager(true, 12);
-      config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-      // enable pipeline check
-      config.setBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
-
-
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, eventQueue, null);
-      PipelineProvider mockRatisProvider =
-          new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config);
-      pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-          mockRatisProvider);
-
-      // Create 3 pipelines
-      Pipeline pipeline1 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.ONE);
-      Pipeline pipeline2 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-      Pipeline pipeline3 =
-          pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-
-
-      SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
-
-      HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
-          scmSafeModeManager.getHealthyPipelineSafeModeRule();
-
-
-      // No datanodes have sent pipelinereport from datanode
-      Assert.assertFalse(healthyPipelineSafeModeRule.validate());
-
-
-      GenericTestUtils.LogCapturer logCapturer =
-          GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
-              SCMSafeModeManager.class));
-
-      // fire event with pipeline report with ratis type and factor 1
-      // pipeline, validate() should return false
-      firePipelineEvent(pipeline1, eventQueue);
-
-      GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
-          "reported count is 0"),
-          1000, 5000);
-      Assert.assertFalse(healthyPipelineSafeModeRule.validate());
-
-      firePipelineEvent(pipeline2, eventQueue);
-      firePipelineEvent(pipeline3, eventQueue);
-
-      GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(),
-          1000, 5000);
-
-    } finally {
-      FileUtil.fullyDelete(new File(storageDir));
-    }
-
-  }
-
-
-  private void firePipelineEvent(Pipeline pipeline, EventQueue eventQueue) {
-    PipelineReportsProto.Builder reportBuilder = PipelineReportsProto
-        .newBuilder();
-
-    reportBuilder.addPipelineReport(PipelineReport.newBuilder()
-        .setPipelineID(pipeline.getId().getProtobuf()));
-
-    // Here no need to fire event from 3 nodes, as already pipeline is in
-    // open state, but doing it.
-    eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-        new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(
-            pipeline.getNodes().get(0), reportBuilder.build()));
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
deleted file mode 100644
index ca54d05..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * This class tests OneReplicaPipelineSafeModeRule.
- */
-public class TestOneReplicaPipelineSafeModeRule {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private OneReplicaPipelineSafeModeRule rule;
-  private SCMPipelineManager pipelineManager;
-  private EventQueue eventQueue;
-
-
-  private void setup(int nodes, int pipelineFactorThreeCount,
-      int pipelineFactorOneCount) throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.setBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
-    ozoneConfiguration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        folder.newFolder().toString());
-
-    List<ContainerInfo> containers = new ArrayList<>();
-    containers.addAll(HddsTestUtils.getContainerInfo(1));
-    MockNodeManager mockNodeManager = new MockNodeManager(true, nodes);
-
-    eventQueue = new EventQueue();
-    pipelineManager =
-        new SCMPipelineManager(ozoneConfiguration, mockNodeManager,
-            eventQueue, null);
-
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(mockNodeManager,
-            pipelineManager.getStateManager(), ozoneConfiguration);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-    createPipelines(pipelineFactorThreeCount,
-        HddsProtos.ReplicationFactor.THREE);
-    createPipelines(pipelineFactorOneCount,
-        HddsProtos.ReplicationFactor.ONE);
-
-    SCMSafeModeManager scmSafeModeManager =
-        new SCMSafeModeManager(ozoneConfiguration, containers,
-            pipelineManager, eventQueue);
-
-    rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule();
-  }
-
-  @Test
-  public void testOneReplicaPipelineRule() throws Exception {
-
-    // As with 30 nodes, We can create 7 pipelines with replication factor 3.
-    // (This is because in node manager for every 10 nodes, 7 nodes are
-    // healthy, 2 are stale one is dead.)
-    int nodes = 30;
-    int pipelineFactorThreeCount = 7;
-    int pipelineCountOne = 0;
-    setup(nodes, pipelineFactorThreeCount, pipelineCountOne);
-
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(
-            LoggerFactory.getLogger(SCMSafeModeManager.class));
-
-    List<Pipeline> pipelines = pipelineManager.getPipelines();
-    for (int i = 0; i < pipelineFactorThreeCount -1; i++) {
-      firePipelineEvent(pipelines.get(i));
-    }
-
-    // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule
-    // validate should be still false.
-
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
-        "reported count is 6"), 1000, 5000);
-
-    Assert.assertFalse(rule.validate());
-
-    //Fire last pipeline event from datanode.
-    firePipelineEvent(pipelines.get(pipelineFactorThreeCount - 1));
-
-    GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000);
-
-  }
-
-
-  @Test
-  public void testOneReplicaPipelineRuleMixedPipelines() throws Exception {
-
-    // As with 30 nodes, We can create 7 pipelines with replication factor 3.
-    // (This is because in node manager for every 10 nodes, 7 nodes are
-    // healthy, 2 are stale one is dead.)
-    int nodes = 30;
-    int pipelineCountThree = 7;
-    int pipelineCountOne = 21;
-
-    setup(nodes, pipelineCountThree, pipelineCountOne);
-
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(
-            LoggerFactory.getLogger(SCMSafeModeManager.class));
-
-    List<Pipeline> pipelines =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE);
-    for (int i = 0; i < pipelineCountOne; i++) {
-      firePipelineEvent(pipelines.get(i));
-    }
-
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
-        "reported count is 0"), 1000, 5000);
-
-    // fired events for one node ratis pipeline, so we will be still false.
-    Assert.assertFalse(rule.validate());
-
-    pipelines =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-    for (int i = 0; i < pipelineCountThree - 1; i++) {
-      firePipelineEvent(pipelines.get(i));
-    }
-
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
-        "reported count is 6"), 1000, 5000);
-
-    //Fire last pipeline event from datanode.
-    firePipelineEvent(pipelines.get(pipelineCountThree - 1));
-
-    GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000);
-
-  }
-
-
-
-  private void createPipelines(int count,
-      HddsProtos.ReplicationFactor factor) throws Exception {
-    for (int i = 0; i < count; i++) {
-      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-          factor);
-    }
-  }
-
-  private void firePipelineEvent(Pipeline pipeline) {
-    PipelineReportsProto.Builder reportBuilder =
-        PipelineReportsProto.newBuilder();
-
-    reportBuilder.addPipelineReport(PipelineReport.newBuilder()
-        .setPipelineID(pipeline.getId().getProtobuf()));
-
-    if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE) {
-      eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(
-              pipeline.getNodes().get(0), reportBuilder.build()));
-      eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(
-              pipeline.getNodes().get(1), reportBuilder.build()));
-      eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(
-              pipeline.getNodes().get(2), reportBuilder.build()));
-    } else {
-      eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(
-              pipeline.getNodes().get(0), reportBuilder.build()));
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
deleted file mode 100644
index 247b38a..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.*;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-
-/** Test class for SCMSafeModeManager.
- */
-public class TestSCMSafeModeManager {
-
-  private static EventQueue queue;
-  private SCMSafeModeManager scmSafeModeManager;
-  private static Configuration config;
-  private List<ContainerInfo> containers = Collections.emptyList();
-
-  @Rule
-  public Timeout timeout = new Timeout(1000 * 300);
-
-  @Rule
-  public final TemporaryFolder tempDir = new TemporaryFolder();
-
-  @BeforeClass
-  public static void setUp() {
-    queue = new EventQueue();
-    config = new OzoneConfiguration();
-  }
-
-  @Test
-  public void testSafeModeState() throws Exception {
-    // Test 1: test for 0 containers
-    testSafeMode(0);
-
-    // Test 2: test for 20 containers
-    testSafeMode(20);
-  }
-
-  @Test
-  public void testSafeModeStateWithNullContainers() {
-    new SCMSafeModeManager(config, Collections.emptyList(),
-        null, queue);
-  }
-
-  private void testSafeMode(int numContainers) throws Exception {
-    containers = new ArrayList<>();
-    containers.addAll(HddsTestUtils.getContainerInfo(numContainers));
-
-    // Currently only considered containers which are not in open state.
-    for (ContainerInfo container : containers) {
-      container.setState(HddsProtos.LifeCycleState.CLOSED);
-    }
-    scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
-
-    assertTrue(scmSafeModeManager.getInSafeMode());
-    queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-        HddsTestUtils.createNodeRegistrationContainerReport(containers));
-
-    long cutOff = (long) Math.ceil(numContainers * config.getDouble(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT));
-
-    Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics()
-        .getNumContainerWithOneReplicaReportedThreshold().value());
-
-    GenericTestUtils.waitFor(() -> {
-      return !scmSafeModeManager.getInSafeMode();
-    }, 100, 1000 * 5);
-
-    Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics()
-        .getCurrentContainersWithOneReplicaReportedCount().value());
-
-  }
-
-  @Test
-  public void testSafeModeExitRule() throws Exception {
-    containers = new ArrayList<>();
-    int numContainers = 100;
-    containers.addAll(HddsTestUtils.getContainerInfo(numContainers));
-    // Assign open state to containers to be included in the safe mode
-    // container list
-    for (ContainerInfo container : containers) {
-      container.setState(HddsProtos.LifeCycleState.CLOSED);
-    }
-    scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
-
-    long cutOff = (long) Math.ceil(numContainers * config.getDouble(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT));
-
-    Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics()
-        .getNumContainerWithOneReplicaReportedThreshold().value());
-
-    assertTrue(scmSafeModeManager.getInSafeMode());
-
-    testContainerThreshold(containers.subList(0, 25), 0.25);
-    Assert.assertEquals(25, scmSafeModeManager.getSafeModeMetrics()
-        .getCurrentContainersWithOneReplicaReportedCount().value());
-    assertTrue(scmSafeModeManager.getInSafeMode());
-    testContainerThreshold(containers.subList(25, 50), 0.50);
-    Assert.assertEquals(50, scmSafeModeManager.getSafeModeMetrics()
-        .getCurrentContainersWithOneReplicaReportedCount().value());
-    assertTrue(scmSafeModeManager.getInSafeMode());
-    testContainerThreshold(containers.subList(50, 75), 0.75);
-    Assert.assertEquals(75, scmSafeModeManager.getSafeModeMetrics()
-        .getCurrentContainersWithOneReplicaReportedCount().value());
-    assertTrue(scmSafeModeManager.getInSafeMode());
-    testContainerThreshold(containers.subList(75, 100), 1.0);
-    Assert.assertEquals(100, scmSafeModeManager.getSafeModeMetrics()
-        .getCurrentContainersWithOneReplicaReportedCount().value());
-
-    GenericTestUtils.waitFor(() -> {
-      return !scmSafeModeManager.getInSafeMode();
-    }, 100, 1000 * 5);
-  }
-
-
-  private OzoneConfiguration createConf(double healthyPercent,
-      double oneReplicaPercent) throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        tempDir.newFolder().toString());
-    conf.setBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
-        true);
-    conf.setDouble(HddsConfigKeys.
-        HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT, healthyPercent);
-    conf.setDouble(HddsConfigKeys.
-        HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT, oneReplicaPercent);
-
-    return conf;
-  }
-
-  @Test
-  public void testSafeModeExitRuleWithPipelineAvailabilityCheck()
-      throws Exception{
-    testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0.90, 1);
-    testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0.10, 0.9);
-    testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0, 0.9);
-    testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0);
-    testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5);
-  }
-
-  @Test
-  public void testFailWithIncorrectValueForHealthyPipelinePercent()
-      throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(100,
-          0.9);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, queue, null);
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
-      fail("testFailWithIncorrectValueForHealthyPipelinePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
-          " 1.0", ex);
-    }
-  }
-
-  @Test
-  public void testFailWithIncorrectValueForOneReplicaPipelinePercent()
-      throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(0.9,
-          200);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, queue, null);
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
-      fail("testFailWithIncorrectValueForOneReplicaPipelinePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
-          " 1.0", ex);
-    }
-  }
-
-  @Test
-  public void testFailWithIncorrectValueForSafeModePercent() throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(0.9, 0.1);
-      conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, queue, null);
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
-      fail("testFailWithIncorrectValueForSafeModePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
-          " 1.0", ex);
-    }
-  }
-
-
-  public void testSafeModeExitRuleWithPipelineAvailabilityCheck(
-      int containerCount, int nodeCount, int pipelineCount,
-      double healthyPipelinePercent, double oneReplicaPercent)
-      throws Exception {
-
-    OzoneConfiguration conf = createConf(healthyPipelinePercent,
-        oneReplicaPercent);
-
-    containers = new ArrayList<>();
-    containers.addAll(HddsTestUtils.getContainerInfo(containerCount));
-
-    MockNodeManager mockNodeManager = new MockNodeManager(true, nodeCount);
-    SCMPipelineManager pipelineManager = new SCMPipelineManager(conf,
-        mockNodeManager, queue, null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(mockNodeManager,
-            pipelineManager.getStateManager(), config);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-
-    for (int i=0; i < pipelineCount; i++) {
-      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE);
-    }
-
-    for (ContainerInfo container : containers) {
-      container.setState(HddsProtos.LifeCycleState.CLOSED);
-    }
-
-    scmSafeModeManager = new SCMSafeModeManager(conf, containers,
-        pipelineManager, queue);
-
-    assertTrue(scmSafeModeManager.getInSafeMode());
-    testContainerThreshold(containers, 1.0);
-
-    List<Pipeline> pipelines = pipelineManager.getPipelines();
-
-    int healthyPipelineThresholdCount =
-        scmSafeModeManager.getHealthyPipelineSafeModeRule()
-            .getHealthyPipelineThresholdCount();
-    int oneReplicaThresholdCount =
-        scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
-            .getThresholdCount();
-
-    Assert.assertEquals(healthyPipelineThresholdCount,
-        scmSafeModeManager.getSafeModeMetrics()
-            .getNumHealthyPipelinesThreshold().value());
-
-    Assert.assertEquals(oneReplicaThresholdCount,
-        scmSafeModeManager.getSafeModeMetrics()
-            .getNumPipelinesWithAtleastOneReplicaReportedThreshold().value());
-
-    // Because even if no pipelines are there, and threshold we set to zero,
-    // we shall a get an event when datanode is registered. In that case,
-    // validate will return true, and add this to validatedRules.
-    if (Math.max(healthyPipelinePercent, oneReplicaThresholdCount) == 0) {
-      firePipelineEvent(pipelines.get(0));
-    }
-
-    for (int i = 0; i < Math.max(healthyPipelineThresholdCount,
-        oneReplicaThresholdCount); i++) {
-      firePipelineEvent(pipelines.get(i));
-
-      if (i < healthyPipelineThresholdCount) {
-        checkHealthy(i + 1);
-        Assert.assertEquals(i + 1,
-            scmSafeModeManager.getSafeModeMetrics()
-                .getCurrentHealthyPipelinesCount().value());
-      }
-
-      if (i < oneReplicaThresholdCount) {
-        checkOpen(i + 1);
-        Assert.assertEquals(i + 1,
-            scmSafeModeManager.getSafeModeMetrics()
-                .getCurrentPipelinesWithAtleastOneReplicaCount().value());
-      }
-    }
-
-    Assert.assertEquals(healthyPipelineThresholdCount,
-        scmSafeModeManager.getSafeModeMetrics()
-            .getCurrentHealthyPipelinesCount().value());
-
-    Assert.assertEquals(oneReplicaThresholdCount,
-        scmSafeModeManager.getSafeModeMetrics()
-            .getCurrentPipelinesWithAtleastOneReplicaCount().value());
-
-
-    GenericTestUtils.waitFor(() -> {
-      return !scmSafeModeManager.getInSafeMode();
-    }, 100, 1000 * 5);
-  }
-
-  private void checkHealthy(int expectedCount) throws Exception{
-    GenericTestUtils.waitFor(() -> scmSafeModeManager
-            .getHealthyPipelineSafeModeRule()
-            .getCurrentHealthyPipelineCount() == expectedCount,
-        100,  5000);
-  }
-
-  private void checkOpen(int expectedCount) throws Exception {
-    GenericTestUtils.waitFor(() -> scmSafeModeManager
-            .getOneReplicaPipelineSafeModeRule()
-            .getCurrentReportedPipelineCount() == expectedCount,
-        1000,  5000);
-  }
-
-  private void firePipelineEvent(Pipeline pipeline) throws Exception {
-    PipelineReportsProto.Builder reportBuilder =
-        PipelineReportsProto.newBuilder();
-
-    reportBuilder.addPipelineReport(PipelineReport.newBuilder()
-        .setPipelineID(pipeline.getId().getProtobuf()));
-    queue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-        new PipelineReportFromDatanode(pipeline.getNodes().get(0),
-            reportBuilder.build()));
-  }
-
-
-  @Test
-  public void testDisableSafeMode() {
-    OzoneConfiguration conf = new OzoneConfiguration(config);
-    conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false);
-    PipelineManager pipelineManager = Mockito.mock(PipelineManager.class);
-    Mockito.doNothing().when(pipelineManager).startPipelineCreator();
-    scmSafeModeManager =
-        new SCMSafeModeManager(conf, containers, pipelineManager, queue);
-    assertFalse(scmSafeModeManager.getInSafeMode());
-  }
-
-  @Test
-  public void testSafeModeDataNodeExitRule() throws Exception {
-    containers = new ArrayList<>();
-    testSafeModeDataNodes(0);
-    testSafeModeDataNodes(3);
-    testSafeModeDataNodes(5);
-  }
-
-  /**
-   * Check that containers in Allocated state are not considered while
-   * computing percentage of containers with at least 1 reported replica in
-   * safe mode exit rule.
-   */
-  @Test
-  public void testContainerSafeModeRule() throws Exception {
-    containers = new ArrayList<>();
-    // Add 100 containers to the list of containers in SCM
-    containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
-    // Assign CLOSED state to first 25 containers and OPEM state to rest
-    // of the containers
-    for (ContainerInfo container : containers.subList(0, 25)) {
-      container.setState(HddsProtos.LifeCycleState.CLOSED);
-    }
-    for (ContainerInfo container : containers.subList(25, 100)) {
-      container.setState(HddsProtos.LifeCycleState.OPEN);
-    }
-
-    scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
-
-    assertTrue(scmSafeModeManager.getInSafeMode());
-
-    // When 10 CLOSED containers are reported by DNs, the computed container
-    // threshold should be 10/25 as there are only 25 CLOSED containers.
-    // Containers in OPEN state should not contribute towards list of
-    // containers while calculating container threshold in SCMSafeNodeManager
-    testContainerThreshold(containers.subList(0, 10), 0.4);
-    assertTrue(scmSafeModeManager.getInSafeMode());
-
-    // When remaining 15 OPEN containers are reported by DNs, the container
-    // threshold should be (10+15)/25.
-    testContainerThreshold(containers.subList(10, 25), 1.0);
-
-    GenericTestUtils.waitFor(() -> {
-      return !scmSafeModeManager.getInSafeMode();
-    }, 100, 1000 * 5);
-  }
-
-  private void testSafeModeDataNodes(int numOfDns) throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration(config);
-    conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDns);
-    scmSafeModeManager = new SCMSafeModeManager(
-        conf, containers, null, queue);
-
-    // Assert SCM is in Safe mode.
-    assertTrue(scmSafeModeManager.getInSafeMode());
-
-    // Register all DataNodes except last one and assert SCM is in safe mode.
-    for (int i = 0; i < numOfDns-1; i++) {
-      queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-          HddsTestUtils.createNodeRegistrationContainerReport(containers));
-      assertTrue(scmSafeModeManager.getInSafeMode());
-      assertTrue(scmSafeModeManager.getCurrentContainerThreshold() == 1);
-    }
-
-    if(numOfDns == 0){
-      GenericTestUtils.waitFor(() -> {
-        return scmSafeModeManager.getInSafeMode();
-      }, 10, 1000 * 10);
-      return;
-    }
-    // Register last DataNode and check that SCM is out of Safe mode.
-    queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-        HddsTestUtils.createNodeRegistrationContainerReport(containers));
-    GenericTestUtils.waitFor(() -> {
-      return !scmSafeModeManager.getInSafeMode();
-    }, 10, 1000 * 10);
-  }
-
-  private void testContainerThreshold(List<ContainerInfo> dnContainers,
-      double expectedThreshold)
-      throws Exception {
-    queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-        HddsTestUtils.createNodeRegistrationContainerReport(dnContainers));
-    GenericTestUtils.waitFor(() -> {
-      double threshold = scmSafeModeManager.getCurrentContainerThreshold();
-      return threshold == expectedThreshold;
-    }, 100, 2000 * 9);
-  }
-
-  @Test
-  public void testSafeModePipelineExitRule() throws Exception {
-    containers = new ArrayList<>();
-    containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
-    String storageDir = GenericTestUtils.getTempPath(
-        TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
-    try{
-      MockNodeManager nodeManager = new MockNodeManager(true, 3);
-      config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-      // enable pipeline check
-      config.setBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
-
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, queue, null);
-
-      PipelineProvider mockRatisProvider =
-          new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config);
-      pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-          mockRatisProvider);
-
-      Pipeline pipeline = pipelineManager.createPipeline(
-          HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE);
-      PipelineReportsProto.Builder reportBuilder = PipelineReportsProto
-          .newBuilder();
-      reportBuilder.addPipelineReport(PipelineReport.newBuilder()
-          .setPipelineID(pipeline.getId().getProtobuf()));
-
-      scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, queue);
-
-      queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-          HddsTestUtils.createNodeRegistrationContainerReport(containers));
-      assertTrue(scmSafeModeManager.getInSafeMode());
-
-      // Trigger the processed pipeline report event
-      queue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT,
-          new PipelineReportFromDatanode(pipeline.getNodes().get(0),
-              reportBuilder.build()));
-
-      GenericTestUtils.waitFor(() -> {
-        return !scmSafeModeManager.getInSafeMode();
-      }, 100, 1000 * 10);
-      pipelineManager.close();
-    } finally {
-      config.setBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
-          false);
-      FileUtil.fullyDelete(new File(storageDir));
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java
deleted file mode 100644
index 5572e9a..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.lock.LockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.HashSet;
-
-/**
- * Tests SafeModeHandler behavior.
- */
-public class TestSafeModeHandler {
-
-
-  private OzoneConfiguration configuration;
-  private SCMClientProtocolServer scmClientProtocolServer;
-  private ReplicationManager replicationManager;
-  private BlockManager blockManager;
-  private SafeModeHandler safeModeHandler;
-  private EventQueue eventQueue;
-  private SCMSafeModeManager.SafeModeStatus safeModeStatus;
-  private PipelineManager scmPipelineManager;
-
-  public void setup(boolean enabled) {
-    configuration = new OzoneConfiguration();
-    configuration.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
-        enabled);
-    configuration.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
-        "3s");
-    scmClientProtocolServer =
-        Mockito.mock(SCMClientProtocolServer.class);
-    eventQueue = new EventQueue();
-    final ContainerManager containerManager =
-        Mockito.mock(ContainerManager.class);
-    Mockito.when(containerManager.getContainerIDs())
-        .thenReturn(new HashSet<>());
-    replicationManager = new ReplicationManager(
-        new ReplicationManagerConfiguration(),
-        containerManager, Mockito.mock(ContainerPlacementPolicy.class),
-        eventQueue, new LockManager(configuration));
-    scmPipelineManager = Mockito.mock(SCMPipelineManager.class);
-    blockManager = Mockito.mock(BlockManagerImpl.class);
-    safeModeHandler =
-        new SafeModeHandler(configuration, scmClientProtocolServer,
-            blockManager, replicationManager, scmPipelineManager);
-
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler);
-    safeModeStatus = new SCMSafeModeManager.SafeModeStatus(false);
-
-  }
-
-  @Test
-  public void testSafeModeHandlerWithSafeModeEnabled() throws Exception {
-    setup(true);
-
-    Assert.assertTrue(safeModeHandler.getSafeModeStatus());
-
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-
-    GenericTestUtils.waitFor(() -> !safeModeHandler.getSafeModeStatus(),
-        1000, 5000);
-
-    Assert.assertFalse(scmClientProtocolServer.getSafeModeStatus());
-    Assert.assertFalse(((BlockManagerImpl) blockManager).isScmInSafeMode());
-    GenericTestUtils.waitFor(() ->
-            replicationManager.isRunning(), 1000, 5000);
-  }
-
-
-  @Test
-  public void testSafeModeHandlerWithSafeModeDisbaled() throws Exception{
-
-    setup(false);
-
-    Assert.assertFalse(safeModeHandler.getSafeModeStatus());
-
-    eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
-
-    Assert.assertFalse(safeModeHandler.getSafeModeStatus());
-    Assert.assertFalse(scmClientProtocolServer.getSafeModeStatus());
-    Assert.assertFalse(((BlockManagerImpl) blockManager).isScmInSafeMode());
-    GenericTestUtils.waitFor(() ->
-        replicationManager.isRunning(), 1000, 5000);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java
deleted file mode 100644
index 098c68b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.safemode;
-/**
- * SCM Safe mode tests.
- */
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
deleted file mode 100644
index d2044f5..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Test class for @{@link SCMBlockProtocolServer}.
- */
-public class TestSCMBlockProtocolServer {
-  private OzoneConfiguration config;
-  private SCMBlockProtocolServer server;
-  private StorageContainerManager scm;
-  private NodeManager nodeManager;
-  private ScmBlockLocationProtocolServerSideTranslatorPB service;
-  private final int nodeCount = 10;
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    File dir = GenericTestUtils.getRandomizedTestDir();
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
-    SCMConfigurator configurator = new SCMConfigurator();
-    scm = TestUtils.getScm(config, configurator);
-    scm.start();
-    scm.exitSafeMode();
-    // add nodes to scm node manager
-    nodeManager = scm.getScmNodeManager();
-    for (int i = 0; i < nodeCount; i++) {
-      nodeManager.register(TestUtils.randomDatanodeDetails(), null, null);
-
-    }
-    server = scm.getBlockProtocolServer();
-    service = new ScmBlockLocationProtocolServerSideTranslatorPB(server,
-        Mockito.mock(ProtocolMessageMetrics.class));
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (scm != null) {
-      scm.stop();
-      scm.join();
-    }
-  }
-
-  @Test
-  public void testSortDatanodes() throws Exception {
-    List<String> nodes = new ArrayList();
-    nodeManager.getAllNodes().stream().forEach(
-        node -> nodes.add(node.getNetworkName()));
-
-    // sort normal datanodes
-    String client;
-    client = nodes.get(0);
-    List<DatanodeDetails> datanodeDetails =
-        server.sortDatanodes(nodes, client);
-    System.out.println("client = " + client);
-    datanodeDetails.stream().forEach(
-        node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
-
-    // illegal client 1
-    client += "X";
-    datanodeDetails = server.sortDatanodes(nodes, client);
-    System.out.println("client = " + client);
-    datanodeDetails.stream().forEach(
-        node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
-    // illegal client 2
-    client = "/default-rack";
-    datanodeDetails = server.sortDatanodes(nodes, client);
-    System.out.println("client = " + client);
-    datanodeDetails.stream().forEach(
-        node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
-
-    // unknown node to sort
-    nodes.add(UUID.randomUUID().toString());
-    ScmBlockLocationProtocolProtos.SortDatanodesRequestProto request =
-        ScmBlockLocationProtocolProtos.SortDatanodesRequestProto
-            .newBuilder()
-            .addAllNodeNetworkName(nodes)
-            .setClient(client)
-            .build();
-    ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp =
-        service.sortDatanodes(request);
-    Assert.assertTrue(resp.getNodeList().size() == nodeCount);
-    System.out.println("client = " + client);
-    resp.getNodeList().stream().forEach(
-        node -> System.out.println(node.getNetworkName()));
-
-    // all unknown nodes
-    nodes.clear();
-    nodes.add(UUID.randomUUID().toString());
-    nodes.add(UUID.randomUUID().toString());
-    nodes.add(UUID.randomUUID().toString());
-    request = ScmBlockLocationProtocolProtos.SortDatanodesRequestProto
-        .newBuilder()
-        .addAllNodeNetworkName(nodes)
-        .setClient(client)
-        .build();
-    resp = service.sortDatanodes(request);
-    System.out.println("client = " + client);
-    Assert.assertTrue(resp.getNodeList().size() == 0);
-    resp.getNodeList().stream().forEach(
-        node -> System.out.println(node.getNetworkName()));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
deleted file mode 100644
index 23568d8..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test class for @{@link SCMClientProtocolServer}.
- * */
-public class TestSCMClientProtocolServer {
-  private SCMClientProtocolServer scmClientProtocolServer;
-  private OzoneConfiguration config;
-  private EventQueue eventQueue;
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    config.set(OZONE_SCM_CLIENT_ADDRESS_KEY,
-        OZONE_SCM_CLIENT_BIND_HOST_DEFAULT + ":0");
-    eventQueue = new EventQueue();
-    scmClientProtocolServer = new SCMClientProtocolServer(config, null);
-    BlockManager blockManager = Mockito.mock(BlockManagerImpl.class);
-    ReplicationManager replicationManager =
-        Mockito.mock(ReplicationManager.class);
-    PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class);
-    SafeModeHandler safeModeHandler = new SafeModeHandler(config,
-        scmClientProtocolServer, blockManager, replicationManager,
-        pipelineManager);
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test
-  public void testAllocateContainerFailureInSafeMode() throws Exception {
-    LambdaTestUtils.intercept(SCMException.class,
-        "SafeModePrecheck failed for allocateContainer", () -> {
-          scmClientProtocolServer.allocateContainer(
-              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "");
-        });
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java
deleted file mode 100644
index 0a2eeef..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.junit.Test;
-
-/**
- * Test metrics that represent container states.
- */
-public class TestSCMContainerMetrics {
-  @Test
-  public void testSCMContainerMetrics() {
-    SCMMXBean scmmxBean = mock(SCMMXBean.class);
-
-    Map<String, Integer> stateInfo = new HashMap<String, Integer>() {{
-        put(HddsProtos.LifeCycleState.OPEN.toString(), 2);
-        put(HddsProtos.LifeCycleState.CLOSING.toString(), 3);
-        put(HddsProtos.LifeCycleState.QUASI_CLOSED.toString(), 4);
-        put(HddsProtos.LifeCycleState.CLOSED.toString(), 5);
-        put(HddsProtos.LifeCycleState.DELETING.toString(), 6);
-        put(HddsProtos.LifeCycleState.DELETED.toString(), 7);
-      }};
-
-
-    when(scmmxBean.getContainerStateCount()).thenReturn(stateInfo);
-
-    MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class);
-    when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb);
-
-    MetricsCollector metricsCollector = mock(MetricsCollector.class);
-    when(metricsCollector.addRecord(anyString())).thenReturn(mb);
-
-    SCMContainerMetrics containerMetrics = new SCMContainerMetrics(scmmxBean);
-
-    containerMetrics.getMetrics(metricsCollector, true);
-
-    verify(mb, times(1)).addGauge(Interns.info("OpenContainers",
-        "Number of open containers"), 2);
-    verify(mb, times(1)).addGauge(Interns.info("ClosingContainers",
-        "Number of containers in closing state"), 3);
-    verify(mb, times(1)).addGauge(Interns.info("QuasiClosedContainers",
-        "Number of containers in quasi closed state"), 4);
-    verify(mb, times(1)).addGauge(Interns.info("ClosedContainers",
-        "Number of containers in closed state"), 5);
-    verify(mb, times(1)).addGauge(Interns.info("DeletingContainers",
-        "Number of containers in deleting state"), 6);
-    verify(mb, times(1)).addGauge(Interns.info("DeletedContainers",
-        "Number of containers in deleted state"), 7);
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index eac8c90..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.scm.server.
-    SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
-
-/**
- * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
- */
-public class TestSCMDatanodeHeartbeatDispatcher {
-
-
-  @Test
-  public void testNodeReportDispatcher() throws IOException {
-
-    AtomicInteger eventReceived = new AtomicInteger();
-
-    NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
-
-    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
-    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any()))
-        .thenReturn(true);
-
-    SCMDatanodeHeartbeatDispatcher dispatcher =
-        new SCMDatanodeHeartbeatDispatcher(mockNodeManager,
-            new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event, NODE_REPORT);
-            eventReceived.incrementAndGet();
-            Assert.assertEquals(nodeReport,
-                ((NodeReportFromDatanode)payload).getReport());
-
-          }
-        });
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-
-    SCMHeartbeatRequestProto heartbeat =
-        SCMHeartbeatRequestProto.newBuilder()
-        .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-        .setNodeReport(nodeReport)
-        .build();
-    dispatcher.dispatch(heartbeat);
-    Assert.assertEquals(1, eventReceived.get());
-
-
-  }
-
-  @Test
-  public void testContainerReportDispatcher() throws IOException {
-
-
-    AtomicInteger eventReceived = new AtomicInteger();
-
-    ContainerReportsProto containerReport =
-        ContainerReportsProto.getDefaultInstance();
-    CommandStatusReportsProto commandStatusReport =
-        CommandStatusReportsProto.getDefaultInstance();
-
-    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
-    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any()))
-        .thenReturn(true);
-
-    SCMDatanodeHeartbeatDispatcher dispatcher =
-        new SCMDatanodeHeartbeatDispatcher(
-            mockNodeManager,
-            new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertTrue(
-                event.equals(CONTAINER_REPORT)
-                    || event.equals(CMD_STATUS_REPORT));
-
-            if (payload instanceof ContainerReportFromDatanode) {
-              Assert.assertEquals(containerReport,
-                  ((ContainerReportFromDatanode) payload).getReport());
-            }
-            if (payload instanceof CommandStatusReportFromDatanode) {
-              Assert.assertEquals(commandStatusReport,
-                  ((CommandStatusReportFromDatanode) payload).getReport());
-            }
-            eventReceived.incrementAndGet();
-          }
-        });
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-
-    SCMHeartbeatRequestProto heartbeat =
-        SCMHeartbeatRequestProto.newBuilder()
-            .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-            .setContainerReport(containerReport)
-            .addCommandStatusReports(commandStatusReport)
-            .build();
-    dispatcher.dispatch(heartbeat);
-    Assert.assertEquals(2, eventReceived.get());
-
-
-  }
-
-  /**
-   * Asserts scm informs datanodes to re-register on a restart.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testScmHeartbeatAfterRestart() throws Exception {
-
-    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
-    SCMDatanodeHeartbeatDispatcher dispatcher =
-        new SCMDatanodeHeartbeatDispatcher(
-            mockNodeManager, Mockito.mock(EventPublisher.class));
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-
-    SCMHeartbeatRequestProto heartbeat =
-        SCMHeartbeatRequestProto.newBuilder()
-            .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-            .build();
-
-    dispatcher.dispatch(heartbeat);
-    // If SCM receives heartbeat from a node after it restarts and the node
-    // is not registered, it should send a Re-Register command back to the node.
-    Mockito.verify(mockNodeManager, Mockito.times(1)).addDatanodeCommand(
-        Mockito.any(UUID.class), Mockito.any(ReregisterCommand.class));
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
deleted file mode 100644
index 8040cb4..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Test class for {@link SCMSecurityProtocolServer}.
- * */
-public class TestSCMSecurityProtocolServer {
-  private SCMSecurityProtocolServer securityProtocolServer;
-  private OzoneConfiguration config;
-
-  @Rule
-  public Timeout timeout = new Timeout(1000 * 20);
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    config.set(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
-        OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT + ":0");
-    securityProtocolServer = new SCMSecurityProtocolServer(config, null);
-  }
-
-  @After
-  public void tearDown() {
-    if (securityProtocolServer != null) {
-      securityProtocolServer.stop();
-      securityProtocolServer = null;
-    }
-    config = null;
-  }
-
-  @Test
-  public void testStart() {
-    securityProtocolServer.start();
-  }
-
-  @Test
-  public void testStop() {
-    securityProtocolServer.stop();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
deleted file mode 100644
index 60a56e3..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import static org.junit.Assert.*;
-
-
-/**
- * This class is used to test the StorageContainerManagerStarter using a mock
- * class to avoid starting any services and hence just test the CLI component.
- */
-public class TestStorageContainerManagerStarter {
-
-  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
-  private final PrintStream originalOut = System.out;
-  private final PrintStream originalErr = System.err;
-
-  private MockSCMStarter mock;
-
-  @Before
-  public void setUpStreams() {
-    System.setOut(new PrintStream(outContent));
-    System.setErr(new PrintStream(errContent));
-    mock = new MockSCMStarter();
-  }
-
-  @After
-  public void restoreStreams() {
-    System.setOut(originalOut);
-    System.setErr(originalErr);
-  }
-
-  @Test
-  public void testCallsStartWhenServerStarted() throws Exception {
-    executeCommand();
-    assertTrue(mock.startCalled);
-  }
-
-  @Test
-  public void testExceptionThrownWhenStartFails() throws Exception {
-    mock.throwOnStart = true;
-    try {
-      executeCommand();
-      fail("Exception show have been thrown");
-    } catch (Exception e) {
-      assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testStartNotCalledWithInvalidParam() throws Exception {
-    executeCommand("--invalid");
-    assertFalse(mock.startCalled);
-  }
-
-  @Test
-  public void testPassingInitSwitchCallsInit() {
-    executeCommand("--init");
-    assertTrue(mock.initCalled);
-  }
-
-  @Test
-  public void testInitSwitchAcceptsClusterIdSSwitch() {
-    executeCommand("--init", "--clusterid=abcdefg");
-    assertEquals("abcdefg", mock.clusterId);
-  }
-
-  @Test
-  public void testInitSwitchWithInvalidParamDoesNotRun() {
-    executeCommand("--init", "--clusterid=abcdefg", "--invalid");
-    assertFalse(mock.initCalled);
-  }
-
-  @Test
-  public void testUnSuccessfulInitThrowsException() {
-    mock.throwOnInit = true;
-    try {
-      executeCommand("--init");
-      fail("Exception show have been thrown");
-    } catch (Exception e) {
-      assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testGenClusterIdRunsGenerate() {
-    executeCommand("--genclusterid");
-    assertTrue(mock.generateCalled);
-  }
-
-  @Test
-  public void testGenClusterIdWithInvalidParamDoesNotRun() {
-    executeCommand("--genclusterid", "--invalid");
-    assertFalse(mock.generateCalled);
-  }
-
-  @Test
-  public void testUsagePrintedOnInvalidInput() {
-    executeCommand("--invalid");
-    Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage");
-    Matcher m = p.matcher(errContent.toString());
-    assertTrue(m.find());
-  }
-
-  private void executeCommand(String... args) {
-    new StorageContainerManagerStarter(mock).execute(args);
-  }
-
-  static class MockSCMStarter implements SCMStarterInterface {
-
-    private boolean initStatus = true;
-    private boolean throwOnStart = false;
-    private boolean throwOnInit  = false;
-    private boolean startCalled = false;
-    private boolean initCalled = false;
-    private boolean generateCalled = false;
-    private String clusterId = null;
-
-    public void start(OzoneConfiguration conf) throws Exception {
-      if (throwOnStart) {
-        throw new Exception("Simulated error on start");
-      }
-      startCalled = true;
-    }
-
-    public boolean init(OzoneConfiguration conf, String cid)
-        throws IOException {
-      if (throwOnInit) {
-        throw new IOException("Simulated error on init");
-      }
-      initCalled = true;
-      clusterId = cid;
-      return initStatus;
-    }
-
-    public String generateClusterId() {
-      generateCalled = true;
-      return "static-cluster-id";
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
deleted file mode 100644
index 6b493ed..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.VersionInfo;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .HeartbeatEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .RegisterEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .VersionEndpointTask;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.mockito.Mockito.mock;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
-    .createEndpoint;
-import static org.hamcrest.Matchers.lessThanOrEqualTo;
-import static org.mockito.Mockito.when;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Tests the endpoints.
- */
-public class TestEndPoint {
-  private static InetSocketAddress serverAddress;
-  private static RPC.Server scmServer;
-  private static ScmTestMock scmServerImpl;
-  private static File testDir;
-  private static Configuration config;
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (scmServer != null) {
-      scmServer.stop();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    serverAddress = SCMTestUtils.getReuseableAddress();
-    scmServerImpl = new ScmTestMock();
-    scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
-        scmServerImpl, serverAddress, 10);
-    testDir = PathUtils.getTestDir(TestEndPoint.class);
-    config = SCMTestUtils.getConf();
-    config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
-    config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    config
-        .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
-  }
-
-  @Test
-  /**
-   * This test asserts that we are able to make a version call to SCM server
-   * and gets back the expected values.
-   */
-  public void testGetVersion() throws Exception {
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .getVersion(null);
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(VersionInfo.DESCRIPTION_KEY,
-          responseProto.getKeys(0).getKey());
-      Assert.assertEquals(VersionInfo.getLatestVersion().getDescription(),
-          responseProto.getKeys(0).getValue());
-    }
-  }
-
-  @Test
-  /**
-   * We make getVersion RPC call, but via the VersionEndpointTask which is
-   * how the state machine would make the call.
-   */
-  public void testGetVersionTask() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, 1000)) {
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          datanodeDetails, conf, getContext(datanodeDetails), null);
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // if version call worked the endpoint should automatically move to the
-      // next state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          newState);
-
-      // Now rpcEndpoint should remember the version it got from SCM
-      Assert.assertNotNull(rpcEndPoint.getVersion());
-    }
-  }
-
-  @Test
-  public void testCheckVersionResponse() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
-        true);
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
-        true);
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, 1000)) {
-      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-          .captureLogs(VersionEndpointTask.LOG);
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          datanodeDetails, conf, getContext(datanodeDetails), null);
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // if version call worked the endpoint should automatically move to the
-      // next state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          newState);
-
-      // Now rpcEndpoint should remember the version it got from SCM
-      Assert.assertNotNull(rpcEndPoint.getVersion());
-
-      // Now change server scmId, so datanode scmId  will be
-      // different from SCM server response scmId
-      String newScmId = UUID.randomUUID().toString();
-      scmServerImpl.setScmId(newScmId);
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      newState = versionTask.call();
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
-            newState);
-      List<HddsVolume> volumesList = ozoneContainer.getVolumeSet()
-          .getFailedVolumesList();
-      Assert.assertTrue(volumesList.size() == 1);
-      File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(),
-          scmServerImpl.getScmId());
-      Assert.assertTrue(logCapturer.getOutput().contains("expected scm " +
-          "directory " + expectedScmDir.getAbsolutePath() + " does not " +
-          "exist"));
-      Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size()
-          == 0);
-      Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList()
-          .size() == 1);
-
-    }
-  }
-
-
-
-  @Test
-  /**
-   * This test makes a call to end point where there is no SCM server. We
-   * expect that versionTask should be able to handle it.
-   */
-  public void testGetVersionToInvalidEndpoint() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    InetSocketAddress nonExistentServerAddress = SCMTestUtils
-        .getReuseableAddress();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        nonExistentServerAddress, 1000)) {
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          datanodeDetails, conf, getContext(datanodeDetails), null);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // This version call did NOT work, so endpoint should remain in the same
-      // state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-          newState);
-    }
-  }
-
-  @Test
-  /**
-   * This test makes a getVersionRPC call, but the DummyStorageServer is
-   * going to respond little slowly. We will assert that we are still in the
-   * GETVERSION state after the timeout.
-   */
-  public void testGetVersionAssertRpcTimeOut() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 100;
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, (int) rpcTimeout)) {
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          datanodeDetails, conf, getContext(datanodeDetails), null);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-
-      scmServerImpl.setRpcResponseDelay(1500);
-      long start = Time.monotonicNow();
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-      long end = Time.monotonicNow();
-      scmServerImpl.setRpcResponseDelay(0);
-      Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance));
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-          newState);
-    }
-  }
-
-  @Test
-  public void testRegister() throws Exception {
-    DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(
-        SCMTestUtils.getConf(), serverAddress, 1000)) {
-      SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .register(nodeToRegister.getProtoBufMessage(), TestUtils
-                  .createNodeReport(
-                      getStorageReports(nodeToRegister.getUuid())),
-              TestUtils.getRandomContainerReports(10),
-                  TestUtils.getRandomPipelineReports());
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(nodeToRegister.getUuidString(),
-          responseProto.getDatanodeUUID());
-      Assert.assertNotNull(responseProto.getClusterID());
-      Assert.assertEquals(10, scmServerImpl.
-          getContainerCountsForDatanode(nodeToRegister));
-      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister));
-    }
-  }
-
-  private StorageReportProto getStorageReports(UUID id) {
-    String storagePath = testDir.getAbsolutePath() + "/" + id;
-    return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null);
-  }
-
-  private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
-      int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-    EndpointStateMachine rpcEndPoint =
-        createEndpoint(conf,
-            scmAddress, rpcTimeout);
-    rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
-    OzoneContainer ozoneContainer = mock(OzoneContainer.class);
-    when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
-        .createNodeReport(getStorageReports(UUID.randomUUID())));
-    ContainerController controller = Mockito.mock(ContainerController.class);
-    when(controller.getContainerReport()).thenReturn(
-        TestUtils.getRandomContainerReports(10));
-    when(ozoneContainer.getController()).thenReturn(controller);
-    when(ozoneContainer.getPipelineReport()).thenReturn(
-            TestUtils.getRandomPipelineReports());
-    RegisterEndpointTask endpointTask =
-        new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
-            mock(StateContext.class));
-    if (!clearDatanodeDetails) {
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      endpointTask.setDatanodeDetails(datanodeDetails);
-    }
-    endpointTask.call();
-    return rpcEndPoint;
-  }
-
-  @Test
-  public void testRegisterTask() throws Exception {
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(serverAddress, 1000, false)) {
-      // Successful register should move us to Heartbeat state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterToInvalidEndpoint() throws Exception {
-    InetSocketAddress address = SCMTestUtils.getReuseableAddress();
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(address, 1000, false)) {
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterNoContainerID() throws Exception {
-    InetSocketAddress address = SCMTestUtils.getReuseableAddress();
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(address, 1000, true)) {
-      // No Container ID, therefore we tell the datanode that we would like to
-      // shutdown.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterRpcTimeout() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 200;
-    scmServerImpl.setRpcResponseDelay(1500);
-    long start = Time.monotonicNow();
-    registerTaskHelper(serverAddress, 1000, false).close();
-    long end = Time.monotonicNow();
-    scmServerImpl.setRpcResponseDelay(0);
-    Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance));
-  }
-
-  @Test
-  public void testHeartbeat() throws Exception {
-    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(dataNode.getProtoBufMessage())
-          .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(UUID.randomUUID())))
-          .build();
-
-      SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(request);
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(0, responseProto.getCommandsCount());
-    }
-  }
-
-  @Test
-  public void testHeartbeatWithCommandStatusReport() throws Exception {
-    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-        createEndpoint(SCMTestUtils.getConf(),
-            serverAddress, 1000)) {
-      // Add some scmCommands for heartbeat response
-      addScmCommands();
-
-
-      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(dataNode.getProtoBufMessage())
-          .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(UUID.randomUUID())))
-          .build();
-
-      SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(request);
-      assertNotNull(responseProto);
-      assertEquals(3, responseProto.getCommandsCount());
-      assertEquals(0, scmServerImpl.getCommandStatusReportCount());
-
-      // Send heartbeat again from heartbeat endpoint task
-      final StateContext stateContext = heartbeatTaskHelper(
-          serverAddress, 3000);
-      Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
-      assertNotNull(map);
-      assertEquals("Should have 1 objects", 1, map.size());
-      assertTrue(map.containsKey(3L));
-      assertEquals(Type.deleteBlocksCommand, map.get(3L).getType());
-      assertEquals(Status.PENDING, map.get(3L).getStatus());
-
-      scmServerImpl.clearScmCommandRequests();
-    }
-  }
-
-  private void addScmCommands() {
-    SCMCommandProto closeCommand = SCMCommandProto.newBuilder()
-        .setCloseContainerCommandProto(
-            CloseContainerCommandProto.newBuilder().setCmdId(1)
-        .setContainerID(1)
-        .setPipelineID(PipelineID.randomId().getProtobuf())
-        .build())
-        .setCommandType(Type.closeContainerCommand)
-        .build();
-    SCMCommandProto replicationCommand = SCMCommandProto.newBuilder()
-        .setReplicateContainerCommandProto(
-            ReplicateContainerCommandProto.newBuilder()
-        .setCmdId(2)
-        .setContainerID(2)
-        .build())
-        .setCommandType(Type.replicateContainerCommand)
-        .build();
-    SCMCommandProto deleteBlockCommand = SCMCommandProto.newBuilder()
-        .setDeleteBlocksCommandProto(
-            DeleteBlocksCommandProto.newBuilder()
-                .setCmdId(3)
-                .addDeletedBlocksTransactions(
-                    DeletedBlocksTransaction.newBuilder()
-                        .setContainerID(45)
-                        .setCount(1)
-                        .setTxID(23)
-                        .build())
-                .build())
-        .setCommandType(Type.deleteBlocksCommand)
-        .build();
-    scmServerImpl.addScmCommandRequest(closeCommand);
-    scmServerImpl.addScmCommandRequest(deleteBlockCommand);
-    scmServerImpl.addScmCommandRequest(replicationCommand);
-  }
-
-  private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress,
-      int rpcTimeout) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-    conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
-    conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    // Mini Ozone cluster will not come up if the port is not true, since
-    // Ratis will exit if the server port cannot be bound. We can remove this
-    // hard coding once we fix the Ratis default behaviour.
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-
-
-    // Create a datanode state machine for stateConext used by endpoint task
-    try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
-        TestUtils.randomDatanodeDetails(), conf, null, null);
-         EndpointStateMachine rpcEndPoint =
-            createEndpoint(conf, scmAddress, rpcTimeout)) {
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto =
-          TestUtils.randomDatanodeDetails().getProtoBufMessage();
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT);
-
-      final StateContext stateContext =
-          new StateContext(conf, DatanodeStateMachine.DatanodeStates.RUNNING,
-              stateMachine);
-
-      HeartbeatEndpointTask endpointTask =
-          new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext);
-      endpointTask.setDatanodeDetailsProto(datanodeDetailsProto);
-      endpointTask.call();
-      Assert.assertNotNull(endpointTask.getDatanodeDetailsProto());
-
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
-          rpcEndPoint.getState());
-      return stateContext;
-    }
-  }
-
-  @Test
-  public void testHeartbeatTask() throws Exception {
-    heartbeatTaskHelper(serverAddress, 1000);
-  }
-
-  @Test
-  public void testHeartbeatTaskToInvalidNode() throws Exception {
-    InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress();
-    heartbeatTaskHelper(invalidAddress, 1000);
-  }
-
-  @Test
-  public void testHeartbeatTaskRpcTimeOut() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 200;
-    scmServerImpl.setRpcResponseDelay(1500);
-    long start = Time.monotonicNow();
-    InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress();
-    heartbeatTaskHelper(invalidAddress, 1000);
-    long end = Time.monotonicNow();
-    scmServerImpl.setRpcResponseDelay(0);
-    Assert.assertThat(end - start,
-        lessThanOrEqualTo(rpcTimeout + tolerance));
-  }
-
-  private StateContext getContext(DatanodeDetails datanodeDetails) {
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    return context;
-  }
-
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
deleted file mode 100644
index da2ae843..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
deleted file mode 100644
index f0b1cbb..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.placement;
-
-import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Asserts that allocation strategy works as expected.
- */
-public class TestContainerPlacement {
-
-  private DescriptiveStatistics computeStatistics(NodeManager nodeManager) {
-    DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics();
-    for (DatanodeDetails dd : nodeManager.getNodes(HEALTHY)) {
-      float weightedValue =
-          nodeManager.getNodeStat(dd).get().getScmUsed().get() / (float)
-              nodeManager.getNodeStat(dd).get().getCapacity().get();
-      descriptiveStatistics.addValue(weightedValue);
-    }
-    return descriptiveStatistics;
-  }
-
-  /**
-   * This test simulates lots of Cluster I/O and updates the metadata in SCM.
-   * We simulate adding and removing containers from the cluster. It asserts
-   * that our placement algorithm has taken the capacity of nodes into
-   * consideration by asserting that standard deviation of used space on these
-   * has improved.
-   */
-  @Test
-  public void testCapacityPlacementYieldsBetterDataDistribution() throws
-      SCMException {
-    final int opsCount = 200 * 1000;
-    final int nodesRequired = 3;
-    Random random = new Random();
-
-    // The nature of init code in MockNodeManager yields similar clusters.
-    MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100);
-    MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100);
-    DescriptiveStatistics beforeCapacity =
-        computeStatistics(nodeManagerCapacity);
-    DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom);
-
-    //Assert that our initial layout of clusters are similar.
-    assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom
-        .getStandardDeviation(), 0.001);
-
-    SCMContainerPlacementCapacity capacityPlacer = new
-        SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration(),
-        null, true, null);
-    SCMContainerPlacementRandom randomPlacer = new
-        SCMContainerPlacementRandom(nodeManagerRandom, new Configuration(),
-        null, true, null);
-
-    for (int x = 0; x < opsCount; x++) {
-      long containerSize = random.nextInt(100) * OzoneConsts.GB;
-      List<DatanodeDetails> nodesCapacity =
-          capacityPlacer.chooseDatanodes(new ArrayList<>(), null, nodesRequired,
-              containerSize);
-      assertEquals(nodesRequired, nodesCapacity.size());
-
-      List<DatanodeDetails> nodesRandom =
-          randomPlacer.chooseDatanodes(nodesCapacity, null, nodesRequired,
-              containerSize);
-
-      // One fifth of all calls are delete
-      if (x % 5 == 0) {
-        deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize);
-        deleteContainer(nodeManagerRandom, nodesRandom, containerSize);
-      } else {
-        createContainer(nodeManagerCapacity, nodesCapacity, containerSize);
-        createContainer(nodeManagerRandom, nodesRandom, containerSize);
-      }
-    }
-    DescriptiveStatistics postCapacity = computeStatistics(nodeManagerCapacity);
-    DescriptiveStatistics postRandom = computeStatistics(nodeManagerRandom);
-
-    // This is a very bold claim, and needs large number of I/O operations.
-    // The claim in this assertion is that we improved the data distribution
-    // of this cluster in relation to the start state of the cluster.
-    Assert.assertTrue(beforeCapacity.getStandardDeviation() >
-        postCapacity.getStandardDeviation());
-
-    // This asserts that Capacity placement yields a better placement
-    // algorithm than random placement, since both cluster started at an
-    // identical state.
-
-    Assert.assertTrue(postRandom.getStandardDeviation() >
-        postCapacity.getStandardDeviation());
-  }
-
-  private void deleteContainer(MockNodeManager nodeManager,
-      List<DatanodeDetails> nodes, long containerSize) {
-    for (DatanodeDetails dd : nodes) {
-      nodeManager.delContainer(dd, containerSize);
-    }
-  }
-
-  private void createContainer(MockNodeManager nodeManager,
-      List<DatanodeDetails> nodes, long containerSize) {
-    for (DatanodeDetails dd : nodes) {
-      nodeManager.addContainer(dd, containerSize);
-    }
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
deleted file mode 100644
index 328ba30..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.placement;
-
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.junit.Test;
-
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests that test Metrics that support placement.
- */
-public class TestDatanodeMetrics {
-  @Test
-  public void testSCMNodeMetric() {
-    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
-    assertEquals((long) stat.getCapacity().get(), 100L);
-    assertEquals(10L, (long) stat.getScmUsed().get());
-    assertEquals(90L, (long) stat.getRemaining().get());
-    SCMNodeMetric metric = new SCMNodeMetric(stat);
-
-    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
-    assertEquals(100L, (long) stat.getCapacity().get());
-    assertEquals(10L, (long) stat.getScmUsed().get());
-    assertEquals(90L, (long) stat.getRemaining().get());
-
-    SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
-    assertTrue(metric.isEqual(newMetric.get()));
-
-    newMetric.add(stat);
-    assertTrue(newMetric.isGreater(metric.get()));
-
-    SCMNodeMetric zeroMetric = new SCMNodeMetric(new SCMNodeStat());
-    // Assert we can handle zero capacity.
-    assertTrue(metric.isGreater(zeroMetric.get()));
-
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
deleted file mode 100644
index ddd751c..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.ozone.container.placement;
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 318c54d..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-// Test classes for replication.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
deleted file mode 100644
index 0ecff3f5..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.node.CommandQueue;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.LinkedList;
-
-/**
- * A Node Manager to test replication.
- */
-public class ReplicationNodeManagerMock implements NodeManager {
-  private final Map<DatanodeDetails, NodeState> nodeStateMap;
-  private final CommandQueue commandQueue;
-
-  /**
-   * A list of Datanodes and current states.
-   * @param nodeState A node state map.
-   */
-  public ReplicationNodeManagerMock(Map<DatanodeDetails, NodeState> nodeState,
-                                    CommandQueue commandQueue) {
-    Preconditions.checkNotNull(nodeState);
-    this.nodeStateMap = nodeState;
-    this.commandQueue = commandQueue;
-  }
-
-  /**
-   * Get the number of data nodes that in all states.
-   *
-   * @return A state to number of nodes that in this state mapping
-   */
-  @Override
-  public Map<String, Integer> getNodeCount() {
-    return null;
-  }
-
-  @Override
-  public Map<String, Long> getNodeInfo() {
-    return null;
-  }
-
-  /**
-   * Gets all Live Datanodes that is currently communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return List of Datanodes that are Heartbeating SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getNodes(NodeState nodestate) {
-    return null;
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return int -- count
-   */
-  @Override
-  public int getNodeCount(NodeState nodestate) {
-    return 0;
-  }
-
-  /**
-   * Get all datanodes known to SCM.
-   *
-   * @return List of DatanodeDetails known to SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getAllNodes() {
-    return null;
-  }
-
-  /**
-   * Returns the aggregated node stats.
-   *
-   * @return the aggregated node stats.
-   */
-  @Override
-  public SCMNodeStat getStats() {
-    return null;
-  }
-
-  /**
-   * Return a map of node stats.
-   *
-   * @return a map of individual node stats (live/stale but not dead).
-   */
-  @Override
-  public Map<DatanodeDetails, SCMNodeStat> getNodeStats() {
-    return null;
-  }
-
-  /**
-   * Return the node stat of the specified datanode.
-   *
-   * @param dd - datanode details.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  @Override
-  public SCMNodeMetric getNodeStat(DatanodeDetails dd) {
-    return null;
-  }
-
-
-  /**
-   * Returns the node state of a specific node.
-   *
-   * @param dd - DatanodeDetails
-   * @return Healthy/Stale/Dead.
-   */
-  @Override
-  public NodeState getNodeState(DatanodeDetails dd) {
-    return nodeStateMap.get(dd);
-  }
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param dnId - datanodeID
-   * @return Set of PipelineID
-   */
-  @Override
-  public Set<PipelineID> getPipelines(DatanodeDetails dnId) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  @Override
-  public void addPipeline(Pipeline pipeline) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  @Override
-  public void removePipeline(Pipeline pipeline) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  @Override
-  public void addContainer(DatanodeDetails datanodeDetails,
-                           ContainerID containerId)
-      throws NodeNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param uuid - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws NodeNotFoundException - if datanode is not known. For new datanode
-   *                                 use addDatanodeInContainerMap call.
-   */
-  @Override
-  public void setContainers(DatanodeDetails uuid, Set<ContainerID> containerIds)
-      throws NodeNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param uuid - DatanodeID
-   * @return - set of containerIDs
-   */
-  @Override
-  public Set<ContainerID> getContainers(DatanodeDetails uuid) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Gets the version info from SCM.
-   *
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed by
-   * datanode.
-   */
-  @Override
-  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
-    return null;
-  }
-
-  /**
-   * Register the node if the node finds that it is not registered with any SCM.
-   *
-   * @param dd DatanodeDetailsProto
-   * @param nodeReport NodeReportProto
-   * @return SCMHeartbeatResponseProto
-   */
-  @Override
-  public RegisteredCommand register(DatanodeDetails dd,
-                                    NodeReportProto nodeReport,
-                                    PipelineReportsProto pipelineReportsProto) {
-    return null;
-  }
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   *
-   * @param dd - Datanode Details.
-   * @return SCMheartbeat response list
-   */
-  @Override
-  public List<SCMCommand> processHeartbeat(DatanodeDetails dd) {
-    return null;
-  }
-
-  @Override
-  public Boolean isNodeRegistered(
-      DatanodeDetails datanodeDetails) {
-    return null;
-  }
-
-  /**
-   * Clears all nodes from the node Manager.
-   */
-  public void clearMap() {
-    this.nodeStateMap.clear();
-  }
-
-  /**
-   * Adds a node to the existing Node manager. This is used only for test
-   * purposes.
-   * @param id DatanodeDetails
-   * @param state State you want to put that node to.
-   */
-  public void addNode(DatanodeDetails id, NodeState state) {
-    nodeStateMap.put(id, state);
-  }
-
-  @Override
-  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    this.commandQueue.addCommand(dnId, command);
-  }
-
-  /**
-   * Empty implementation for processNodeReport.
-   * @param dnUuid
-   * @param nodeReport
-   */
-  @Override
-  public void processNodeReport(DatanodeDetails dnUuid,
-                                NodeReportProto nodeReport) {
-    // do nothing.
-  }
-
-  @Override
-  public void onMessage(CommandForDatanode commandForDatanode,
-                        EventPublisher publisher) {
-    // do nothing.
-  }
-
-  @Override
-  public List<SCMCommand> getCommandQueue(UUID dnID) {
-    return null;
-  }
-
-  @Override
-  public DatanodeDetails getNodeByUuid(String address) {
-    return null;
-  }
-
-  @Override
-  public List<DatanodeDetails> getNodesByAddress(String address) {
-    return new LinkedList<>();
-  }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
deleted file mode 100644
index 4e8a90b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping b/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping
deleted file mode 100644
index 01f7d5d..0000000
--- a/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-host1   /rack1/ng1
-host2   /rack1/ng1
-host3   /rack1/ng2
-host4   /rack1/ng2
-1.2.3.4 /rack1/ng1
-2.3.4.5 /rack1/ng1
-3.4.5.6 /rack1/ng2
-4.5.6.7 /rack1/ng2
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/resources/rack-mapping b/hadoop-hdds/server-scm/src/test/resources/rack-mapping
deleted file mode 100644
index 47eac97..0000000
--- a/hadoop-hdds/server-scm/src/test/resources/rack-mapping
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-host1   /rack1
-host2   /rack1
-host3   /rack1
-host4   /rack1
-1.2.3.4 /rack1
-2.3.4.5 /rack1
-3.4.5.6 /rack1
-4.5.6.7 /rack1
\ No newline at end of file
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
deleted file mode 100644
index 6f0be62..0000000
--- a/hadoop-hdds/tools/pom.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>hadoop-hdds-tools</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Tools</description>
-  <name>Apache Hadoop HDDS Tools</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.25.2</version>
-    </dependency>
-
-  </dependencies>
-</project>
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java
deleted file mode 100644
index f42a8f8..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to group replication manager related operations.
- */
-@Command(
-    name = "replicationmanager",
-    description = "ReplicationManager specific operations",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        ReplicationManagerStartSubcommand.class,
-        ReplicationManagerStopSubcommand.class,
-        ReplicationManagerStatusSubcommand.class
-    })
-public class ReplicationManagerCommands implements Callable<Void> {
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  public SCMCLI getParent() {
-    return parent;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.parent.getCmd().getSubcommands().get("replicationmanager"));
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java
deleted file mode 100644
index 1adec6b..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * This is the handler that process safe mode check command.
- */
-@Command(
-    name = "start",
-    description = "Start ReplicationManager",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ReplicationManagerStartSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class);
-
-  @ParentCommand
-  private ReplicationManagerCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      scmClient.startReplicationManager();
-      LOG.info("Starting ReplicationManager...");
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
deleted file mode 100644
index 2ebf28c..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * This is the handler that process safe mode check command.
- */
-@Command(
-    name = "status",
-    description = "Check if ReplicationManager is running or not",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ReplicationManagerStatusSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class);
-
-  @ParentCommand
-  private ReplicationManagerCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-
-      boolean execReturn = scmClient.getReplicationManagerStatus();
-
-      // Output data list
-      if(execReturn){
-        LOG.info("ReplicationManager is Running.");
-      } else {
-        LOG.info("ReplicationManager is Not Running.");
-      }
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java
deleted file mode 100644
index 7cafd01..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * This is the handler that process safe mode check command.
- */
-@Command(
-    name = "stop",
-    description = "Stop ReplicationManager",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ReplicationManagerStopSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class);
-
-  @ParentCommand
-  private ReplicationManagerCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      scmClient.stopReplicationManager();
-      LOG.info("Stopping ReplicationManager...");
-      LOG.info("Requested SCM to stop ReplicationManager, " +
-          "it might take sometime for the ReplicationManager to stop.");
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
deleted file mode 100644
index 0b5c18e..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands;
-import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.NativeCodeLoader;
-
-import org.apache.commons.lang3.StringUtils;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmSecurityClient;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CONTAINER_SIZE_DEFAULT;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * This class is the CLI of SCM.
- */
-
-/**
- * Container subcommand.
- */
-@Command(name = "ozone scmcli", hidden = true, description =
-    "Developer tools to handle SCM specific "
-        + "operations.",
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        SafeModeCommands.class,
-        ContainerCommands.class,
-        PipelineCommands.class,
-        TopologySubcommand.class,
-        ReplicationManagerCommands.class
-    },
-    mixinStandardHelpOptions = true)
-public class SCMCLI extends GenericCli {
-
-  @Option(names = {"--scm"}, description = "The destination scm (host:port)")
-  private String scm = "";
-
-  /**
-   * Main for the scm shell Command handling.
-   *
-   * @param argv - System Args Strings[]
-   * @throws Exception
-   */
-  public static void main(String[] argv) throws Exception {
-
-    LogManager.resetConfiguration();
-    Logger.getRootLogger().setLevel(Level.INFO);
-    Logger.getRootLogger()
-        .addAppender(new ConsoleAppender(new PatternLayout("%m%n")));
-    Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR);
-
-    new SCMCLI().run(argv);
-  }
-
-  public ScmClient createScmClient()
-      throws IOException {
-
-    OzoneConfiguration ozoneConf = createOzoneConfiguration();
-    if (StringUtils.isNotEmpty(scm)) {
-      ozoneConf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm);
-    }
-    if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) {
-
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY
-              + " should be set in ozone-site.xml or with the --scm option");
-    }
-
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(ozoneConf);
-    int containerSizeGB = (int) ozoneConf.getStorageSize(
-        OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.GB);
-    ContainerOperationClient
-        .setContainerSizeB(containerSizeGB * OzoneConsts.GB);
-
-    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    StorageContainerLocationProtocol client =
-        TracingUtil.createProxy(
-        new StorageContainerLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-                scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf,
-                NetUtils.getDefaultSocketFactory(ozoneConf),
-                Client.getRpcTimeout(ozoneConf))),
-            StorageContainerLocationProtocol.class, ozoneConf);
-
-    XceiverClientManager xceiverClientManager = null;
-    if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
-      SecurityConfig securityConfig = new SecurityConfig(ozoneConf);
-      SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient(
-          (OzoneConfiguration) securityConfig.getConfiguration());
-      String caCertificate =
-          scmSecurityProtocolClient.getCACertificate();
-      xceiverClientManager = new XceiverClientManager(ozoneConf,
-          OzoneConfiguration.of(ozoneConf).getObject(XceiverClientManager
-              .ScmClientConfig.class), caCertificate);
-    } else {
-      xceiverClientManager = new XceiverClientManager(ozoneConf);
-    }
-    return new ContainerOperationClient(client, xceiverClientManager);
-  }
-
-  public void checkContainerExists(ScmClient scmClient, long containerId)
-      throws IOException {
-    ContainerInfo container = scmClient.getContainer(containerId);
-    if (container == null) {
-      throw new IllegalArgumentException("No such container " + containerId);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
deleted file mode 100644
index f969f4c..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process safe mode check command.
- */
-@Command(
-    name = "status",
-    description = "Check if SCM is in safe mode",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class SafeModeCheckSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SafeModeCheckSubcommand.class);
-
-  @ParentCommand
-  private SafeModeCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-
-      boolean execReturn = scmClient.inSafeMode();
-
-      // Output data list
-      if(execReturn){
-        LOG.info("SCM is in safe mode.");
-      } else {
-        LOG.info("SCM is out of safe mode.");
-      }
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java
deleted file mode 100644
index 3a9a63c..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Subcommand to group safe mode related operations.
- */
-@Command(
-    name = "safemode",
-    description = "Safe mode specific operations",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        SafeModeCheckSubcommand.class,
-        SafeModeExitSubcommand.class,
-    })
-public class SafeModeCommands implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SafeModeCommands.class);
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  public SCMCLI getParent() {
-    return parent;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.parent.getCmd().getSubcommands().get("safemode"));
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
deleted file mode 100644
index 9f1db45..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process safe mode exit command.
- */
-@Command(
-    name = "exit",
-    description = "Force SCM out of safe mode",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class SafeModeExitSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SafeModeExitSubcommand.class);
-
-  @ParentCommand
-  private SafeModeCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-
-      boolean execReturn = scmClient.forceExitSafeMode();
-      if(execReturn){
-        LOG.info("SCM exit safe mode successfully.");
-      }
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
deleted file mode 100644
index 7de2e4b..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import picocli.CommandLine;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.Callable;
-
-/**
- * Handler of printTopology command.
- */
-@CommandLine.Command(
-    name = "printTopology",
-    description = "Print a tree of the network topology as reported by SCM",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class TopologySubcommand implements Callable<Void> {
-
-  @CommandLine.ParentCommand
-  private SCMCLI parent;
-
-  private static List<HddsProtos.NodeState> stateArray = new ArrayList<>();
-
-  static {
-    stateArray.add(HEALTHY);
-    stateArray.add(STALE);
-    stateArray.add(DEAD);
-    stateArray.add(DECOMMISSIONING);
-    stateArray.add(DECOMMISSIONED);
-  }
-
-  @CommandLine.Option(names = {"-o", "--order"},
-      description = "Print Topology ordered by network location")
-  private boolean order;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      for (HddsProtos.NodeState state : stateArray) {
-        List<HddsProtos.Node> nodes = scmClient.queryNode(state,
-            HddsProtos.QueryScope.CLUSTER, "");
-        if (nodes != null && nodes.size() > 0) {
-          // show node state
-          System.out.println("State = " + state.toString());
-          if (order) {
-            printOrderedByLocation(nodes);
-          } else {
-            printNodesWithLocation(nodes);
-          }
-        }
-      }
-      return null;
-    }
-  }
-
-  // Format
-  // Location: rack1
-  //  ipAddress(hostName)
-  private void printOrderedByLocation(List<HddsProtos.Node> nodes) {
-    HashMap<String, TreeSet<DatanodeDetails>> tree =
-        new HashMap<>();
-    for (HddsProtos.Node node : nodes) {
-      String location = node.getNodeID().getNetworkLocation();
-      if (location != null && !tree.containsKey(location)) {
-        tree.put(location, new TreeSet<>());
-      }
-      tree.get(location).add(DatanodeDetails.getFromProtoBuf(node.getNodeID()));
-    }
-    ArrayList<String> locations = new ArrayList<>(tree.keySet());
-    Collections.sort(locations);
-
-    locations.forEach(location -> {
-      System.out.println("Location: " + location);
-      tree.get(location).forEach(node -> {
-        System.out.println(" " + node.getIpAddress() + "(" + node.getHostName()
-            + ")");
-      });
-    });
-  }
-
-
-  // Format "ipAddress(hostName)    networkLocation"
-  private void printNodesWithLocation(Collection<HddsProtos.Node> nodes) {
-    nodes.forEach(node -> {
-      System.out.print(" " + node.getNodeID().getIpAddress() + "(" +
-          node.getNodeID().getHostName() + ")");
-      System.out.println("    " +
-          (node.getNodeID().getNetworkLocation() != null ?
-              node.getNodeID().getNetworkLocation() : "NA"));
-    });
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
deleted file mode 100644
index 4bf2013..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * The handler of close container command.
- */
-@Command(
-    name = "close",
-    description = "close container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class CloseSubcommand implements Callable<Void> {
-
-  @ParentCommand
-  private ContainerCommands parent;
-
-  @Parameters(description = "Id of the container to close")
-  private long containerId;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      parent.getParent().checkContainerExists(scmClient, containerId);
-      scmClient.closeContainer(containerId);
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
deleted file mode 100644
index bf17bfd..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to group container related operations.
- */
-@Command(
-    name = "container",
-    description = "Container specific operations",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        ListSubcommand.class,
-        InfoSubcommand.class,
-        DeleteSubcommand.class,
-        CreateSubcommand.class,
-        CloseSubcommand.class
-    })
-public class ContainerCommands implements Callable<Void> {
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  public SCMCLI getParent() {
-    return parent;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.parent.getCmd().getSubcommands().get("container"));
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
deleted file mode 100644
index eb79e50..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process container creation command.
- */
-@Command(
-    name = "create",
-    description = "Create container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class CreateSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CreateSubcommand.class);
-
-  @ParentCommand
-  private ContainerCommands parent;
-
-  @Option(description = "Owner of the new container", defaultValue = "OZONE",
-      required = false, names = {
-      "-o", "--owner"})
-
-  private String owner;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      ContainerWithPipeline container = scmClient.createContainer(owner);
-      LOG.info("Container {} is created.",
-          container.getContainerInfo().getContainerID());
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
deleted file mode 100644
index 4989e03..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process delete container command.
- */
-@Command(
-    name = "delete",
-    description = "Delete container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class DeleteSubcommand implements Callable<Void> {
-
-  @Parameters(description = "Id of the container to close")
-  private long containerId;
-
-  @Option(names = {"-f",
-      "--force"}, description = "forcibly delete the container")
-  private boolean force;
-
-  @ParentCommand
-  private ContainerCommands parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      parent.getParent().checkContainerExists(scmClient, containerId);
-      scmClient.deleteContainer(containerId, force);
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
deleted file mode 100644
index 31fdb1d..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process container info command.
- */
-@Command(
-    name = "info",
-    description = "Show information about a specific container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class InfoSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(InfoSubcommand.class);
-
-  @ParentCommand
-  private ContainerCommands parent;
-
-  @Parameters(description = "Decimal id of the container.")
-  private long containerID;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      ContainerWithPipeline container = scmClient.
-          getContainerWithPipeline(containerID);
-      Preconditions.checkNotNull(container, "Container cannot be null");
-
-      ContainerDataProto containerData = scmClient.readContainer(container
-          .getContainerInfo().getContainerID(), container.getPipeline());
-
-      // Print container report info.
-      LOG.info("Container id: {}", containerID);
-      String openStatus =
-          containerData.getState() == ContainerDataProto.State.OPEN ? "OPEN" :
-              "CLOSED";
-      LOG.info("Container State: {}", openStatus);
-      LOG.info("Container Path: {}", containerData.getContainerPath());
-
-      // Output meta data.
-      String metadataStr = containerData.getMetadataList().stream().map(
-          p -> p.getKey() + ":" + p.getValue())
-          .collect(Collectors.joining(", "));
-      LOG.info("Container Metadata: {}", metadataStr);
-
-      // Print pipeline of an existing container.
-      String machinesStr = container.getPipeline().getNodes().stream().map(
-              DatanodeDetails::getHostName).collect(Collectors.joining(","));
-      LOG.info("Datanodes: [{}]", machinesStr);
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
deleted file mode 100644
index 5169c80..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Help.Visibility;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process container list command.
- */
-@Command(
-    name = "list",
-    description = "List containers",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ListSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ListSubcommand.class);
-
-  @ParentCommand
-  private ContainerCommands parent;
-
-  @Option(names = {"-s", "--start"},
-      description = "Container id to start the iteration", required = true)
-  private long startId = 1;
-
-  @Option(names = {"-c", "--count"},
-      description = "Maximum number of containers to list",
-      defaultValue = "20", showDefaultValue = Visibility.ALWAYS)
-  private int count = 20;
-
-  private void outputContainerInfo(ContainerInfo containerInfo)
-      throws IOException {
-    // Print container report info.
-    LOG.info("{}", containerInfo.toJsonString());
-  }
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-
-      List<ContainerInfo> containerList =
-          scmClient.listContainer(startId, count);
-
-      // Output data list
-      for (ContainerInfo container : containerList) {
-        outputContainerInfo(container);
-      }
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java
deleted file mode 100644
index ff8adbc..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains all of the container related scm commands.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
\ No newline at end of file
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java
deleted file mode 100644
index d358b3c..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * SCM related cli tools.
- */
-/**
- * SCM related cli tools.
- */
-package org.apache.hadoop.hdds.scm.cli;
\ No newline at end of file
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java
deleted file mode 100644
index ec4b1b7..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import picocli.CommandLine;
-
-import java.util.concurrent.Callable;
-
-/**
- * Handler of activate pipeline command.
- */
-@CommandLine.Command(
-    name = "activate",
-    description = "Activates the given Pipeline",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ActivatePipelineSubcommand implements Callable<Void> {
-
-  @CommandLine.ParentCommand
-  private PipelineCommands parent;
-
-  @CommandLine.Parameters(description = "ID of the pipeline to activate")
-  private String pipelineId;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      scmClient.activatePipeline(
-          HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
deleted file mode 100644
index 89a280e..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import picocli.CommandLine;
-
-import java.util.concurrent.Callable;
-
-/**
- * Handler of close pipeline command.
- */
-@CommandLine.Command(
-    name = "close",
-    description = "Close pipeline",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ClosePipelineSubcommand implements Callable<Void> {
-
-  @CommandLine.ParentCommand
-  private PipelineCommands parent;
-
-  @CommandLine.Parameters(description = "ID of the pipeline to close")
-  private String pipelineId;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      scmClient.closePipeline(
-          HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java
deleted file mode 100644
index 4f4f741..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import picocli.CommandLine;
-
-import java.util.concurrent.Callable;
-
-/**
- * Handler of deactivate pipeline command.
- */
-@CommandLine.Command(
-    name = "deactivate",
-    description = "Deactivates the given Pipeline",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class DeactivatePipelineSubcommand implements Callable<Void> {
-
-  @CommandLine.ParentCommand
-  private PipelineCommands parent;
-
-  @CommandLine.Parameters(description = "ID of the pipeline to deactivate")
-  private String pipelineId;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      scmClient.deactivatePipeline(
-          HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
-      return null;
-    }
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
deleted file mode 100644
index 8b3b1b3..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import picocli.CommandLine;
-
-import java.util.concurrent.Callable;
-
-/**
- * Handler of list pipelines command.
- */
-@CommandLine.Command(
-    name = "list",
-    description = "List all active pipelines",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ListPipelinesSubcommand implements Callable<Void> {
-
-  @CommandLine.ParentCommand
-  private PipelineCommands parent;
-
-  @CommandLine.Option(names = {"-ffc", "--filterByFactor"},
-      description = "Filter listed pipelines by Factor(ONE/one)",
-      defaultValue = "",
-      required = false)
-  private String factor;
-
-  @CommandLine.Option(names = {"-fst", "--filterByState"},
-      description = "Filter listed pipelines by State(OPEN/CLOSE)",
-      defaultValue = "",
-      required = false)
-  private String state;
-
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.getParent().createScmClient()) {
-      if (isNullOrEmpty(factor) && isNullOrEmpty(state)) {
-        scmClient.listPipelines().forEach(System.out::println);
-      } else {
-        scmClient.listPipelines().stream()
-            .filter(p -> ((isNullOrEmpty(factor) ||
-                (p.getFactor().toString().compareToIgnoreCase(factor) == 0))
-                && (isNullOrEmpty(state) ||
-                (p.getPipelineState().toString().compareToIgnoreCase(state)
-                    == 0))))
-            .forEach(System.out::println);
-      }
-      return null;
-    }
-  }
-
-  protected static boolean isNullOrEmpty(String str) {
-    return ((str == null) || str.trim().isEmpty());
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java
deleted file mode 100644
index 948a51a..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to group pipeline related operations.
- */
-@Command(
-    name = "pipeline",
-    description = "Pipeline specific operations",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        ListPipelinesSubcommand.class,
-        ActivatePipelineSubcommand.class,
-        DeactivatePipelineSubcommand.class,
-        ClosePipelineSubcommand.class
-    })
-public class PipelineCommands implements Callable<Void> {
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  public SCMCLI getParent() {
-    return parent;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.parent.getCmd().getSubcommands().get("pipeline"));
-  }
-}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java
deleted file mode 100644
index 64924d1..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains all of the pipeline related scm commands.
- */
-package org.apache.hadoop.hdds.scm.cli.pipeline;
\ No newline at end of file
diff --git a/hadoop-ozone/.gitignore b/hadoop-ozone/.gitignore
deleted file mode 100644
index 93c6831..0000000
--- a/hadoop-ozone/.gitignore
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*~
-*.pyc
-.blockade
-.cache
-__pycache__
diff --git a/hadoop-ozone/Jenkinsfile b/hadoop-ozone/Jenkinsfile
deleted file mode 100644
index 0055486..0000000
--- a/hadoop-ozone/Jenkinsfile
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-node("ubuntu") {
-    docker.image('elek/ozone-build').pull()
-    docker.image('elek/ozone-build').inside("--privileged") {
-
-        stage('Checkout') {
-            checkout scm
-            //use this for external Jenkinsfile builds
-            //checkout poll: false, scm: [$class: 'GitSCM', branches: [[name: env.branch]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: 'github-token', url: "https://github.com/${organization}/${repository}.git"]]]
-
-        }
-
-        stage('Clean') {
-            status = sh returnStatus: true, script: 'mvn clean -P hdds -am -pl :hadoop-ozone-dist '
-        }
-
-        stageRunner('Author', "author", {})
-
-        stageRunner('Licence', "rat", {
-            archiveArtifacts 'target/rat-aggregated.txt'
-        }, 'artifact/target/rat-aggregated.txt/*view*/')
-
-        stageRunner('Build', "build", {})
-
-        stageRunner('Findbugs', "findbugs", {
-            archiveArtifacts 'target/findbugs-all.txt'
-
-        }, 'artifact/target/findbugs-all.txt/*view*/')
-
-        stageRunner('Checkstyle', "checkstyle", {
-            checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-errors.xml', unHealthy: ''
-        }, 'checkstyleResult')
-
-        stageRunner('Acceptance', "acceptance", {
-             archiveArtifacts 'hadoop-ozone/dist/target/ozone-0.4.0-SNAPSHOT/smoketest/result/**'
-        })
-
-        stageRunner('Unit test', "unit", {
-            junit '**/target/surefire-reports/*.xml'
-        }, 'testReport/')
-
-    }
-
-}
-
-def stageRunner(name, type, processResult, url = '') {
-    try {
-        stage(name) {
-            prStatusStart(type)
-            status = sh returnStatus: true, script: 'hadoop-ozone/dev-support/checks/' + type + '.sh'
-            processResult()
-            prStatusResult(status, type, url)
-        }
-        return true
-    } catch (RuntimeException ex) {
-        currentBuild.result = "FAILED"
-        return false
-    }
-}
-
-def githubStatus(name, status, description, url='') {
-  commitId = sh(returnStdout: true, script: 'git rev-parse HEAD')
-  context = 'ci/ozone/' + name
-  if (url) {
-    githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status, targetUrl: url
-  } else {
-    githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status
-  }
-}
-def prStatusStart(name) {
-       githubStatus(name,
-                     "PENDING",
-                     name + " is started")
-
-
-}
-
-def prStatusResult(responseCode, name, url = '') {
-    status = "ERROR"
-    desc = "failed"
-    if (responseCode == 0) {
-        status = "SUCCESS"
-        desc = "passed"
-    }
-    message = name + " check is " + desc
-        if (url) {
-            githubStatus(name,
-                          status,
-                          message,
-                          env.BUILD_URL + url)
-        } else {
-            githubStatus(name,
-                          status,
-                          message)
-        }
-
-    if (responseCode != 0) {
-        throw new RuntimeException(message)
-    }
-}
diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml
deleted file mode 100644
index 2fefd8b..0000000
--- a/hadoop-ozone/client/pom.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-client</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Client</description>
-  <name>Apache Hadoop Ozone Client</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
deleted file mode 100644
index 5bae15d..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * This class encapsulates the arguments that are
- * required for creating a bucket.
- */
-public final class BucketArgs {
-
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean versioning;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Custom key/value metadata.
-   */
-  private Map<String, String> metadata;
-
-  /**
-   * Bucket encryption key name.
-   */
-  private String bucketEncryptionKey;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param versioning Bucket version flag.
-   * @param storageType Storage type to be used.
-   * @param acls list of ACLs.
-   * @param metadata map of bucket metadata
-   * @param bucketEncryptionKey bucket encryption key name
-   */
-  private BucketArgs(Boolean versioning, StorageType storageType,
-                     List<OzoneAcl> acls, Map<String, String> metadata,
-                     String bucketEncryptionKey) {
-    this.acls = acls;
-    this.versioning = versioning;
-    this.storageType = storageType;
-    this.metadata = metadata;
-    this.bucketEncryptionKey = bucketEncryptionKey;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public Boolean getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns the ACL's associated with this bucket.
-   * @return {@literal List<OzoneAcl>}
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Custom metadata for the buckets.
-   *
-   * @return key value map
-   */
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  /**
-   * Returns the bucket encryption key name.
-   * @return bucket encryption key
-   */
-  public String getEncryptionKey() {
-    return bucketEncryptionKey;
-  }
-
-  /**
-   * Returns new builder class that builds a OmBucketInfo.
-   *
-   * @return Builder
-   */
-  public static BucketArgs.Builder newBuilder() {
-    return new BucketArgs.Builder();
-  }
-
-  /**
-   * Builder for OmBucketInfo.
-   */
-  public static class Builder {
-    private Boolean versioning;
-    private StorageType storageType;
-    private List<OzoneAcl> acls;
-    private Map<String, String> metadata;
-    private String bucketEncryptionKey;
-
-    public Builder() {
-      metadata = new HashMap<>();
-    }
-
-    public BucketArgs.Builder setVersioning(Boolean versionFlag) {
-      this.versioning = versionFlag;
-      return this;
-    }
-
-    public BucketArgs.Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    public BucketArgs.Builder setAcls(List<OzoneAcl> listOfAcls) {
-      this.acls = listOfAcls;
-      return this;
-    }
-
-    public BucketArgs.Builder addMetadata(String key, String value) {
-      this.metadata.put(key, value);
-      return this;
-    }
-
-    public BucketArgs.Builder setBucketEncryptionKey(String bek) {
-      this.bucketEncryptionKey = bek;
-      return this;
-    }
-    /**
-     * Constructs the BucketArgs.
-     * @return instance of BucketArgs.
-     */
-    public BucketArgs build() {
-      return new BucketArgs(versioning, storageType, acls, metadata,
-          bucketEncryptionKey);
-    }
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
deleted file mode 100644
index 2db4a6d..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ /dev/null
@@ -1,498 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.Objects;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * ObjectStore class is responsible for the client operations that can be
- * performed on Ozone Object Store.
- */
-public class ObjectStore {
-
-  /**
-   * The proxy used for connecting to the cluster and perform
-   * client operations.
-   */
-  // TODO: remove rest api and client
-  private final ClientProtocol proxy;
-
-  /**
-   * Cache size to be used for listVolume calls.
-   */
-  private int listCacheSize;
-
-  /**
-   * Creates an instance of ObjectStore.
-   * @param conf Configuration object.
-   * @param proxy ClientProtocol proxy.
-   */
-  public ObjectStore(Configuration conf, ClientProtocol proxy) {
-    this.proxy = TracingUtil.createProxy(proxy, ClientProtocol.class, conf);
-    this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
-  }
-
-  @VisibleForTesting
-  protected ObjectStore() {
-    proxy = null;
-  }
-
-  @VisibleForTesting
-  public ClientProtocol getClientProxy() {
-    return proxy;
-  }
-
-  /**
-   * Creates the volume with default values.
-   * @param volumeName Name of the volume to be created.
-   * @throws IOException
-   */
-  public void createVolume(String volumeName) throws IOException {
-    proxy.createVolume(volumeName);
-  }
-
-  /**
-   * Creates the volume.
-   * @param volumeName Name of the volume to be created.
-   * @param volumeArgs Volume properties.
-   * @throws IOException
-   */
-  public void createVolume(String volumeName, VolumeArgs volumeArgs)
-      throws IOException {
-    proxy.createVolume(volumeName, volumeArgs);
-  }
-
-  /**
-   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
-   * to access via both S3 and Ozone.
-   * @param userName - S3 user name.
-   * @param s3BucketName - S3 bucket Name.
-   * @throws IOException - On failure, throws an exception like Bucket exists.
-   */
-  public void createS3Bucket(String userName, String s3BucketName) throws
-      IOException {
-    proxy.createS3Bucket(userName, s3BucketName);
-  }
-
-  /**
-   * Deletes an s3 bucket and removes mapping of Ozone volume/bucket.
-   * @param bucketName - S3 Bucket Name.
-   * @throws  IOException in case the bucket cannot be deleted.
-   */
-  public void deleteS3Bucket(String bucketName) throws IOException {
-    proxy.deleteS3Bucket(bucketName);
-  }
-
-  /**
-   * Returns the Ozone Namespace for the S3Bucket. It will return the
-   * OzoneVolume/OzoneBucketName.
-   * @param s3BucketName  - S3 Bucket Name.
-   * @return String - The Ozone canonical name for this s3 bucket. This
-   * string is useful for mounting an OzoneFS.
-   * @throws IOException - Error is throw if the s3bucket does not exist.
-   */
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    return proxy.getOzoneBucketMapping(s3BucketName);
-  }
-
-  /**
-   * Returns the corresponding Ozone volume given an S3 Bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone Volume name.
-   * @throws IOException - Throws if the s3Bucket does not exist.
-   */
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[0];
-
-  }
-
-  /**
-   * Returns the corresponding Ozone bucket name for the given S3 bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone bucket Name.
-   * @throws IOException - Throws if the s3bucket does not exist.
-   */
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[1];
-  }
-
-
-  /**
-   * Returns the volume information.
-   * @param volumeName Name of the volume.
-   * @return OzoneVolume
-   * @throws IOException
-   */
-  public OzoneVolume getVolume(String volumeName) throws IOException {
-    OzoneVolume volume = proxy.getVolumeDetails(volumeName);
-    return volume;
-  }
-
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
-    return proxy.getS3Secret(kerberosID);
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets for a user.
-   * The result can be restricted using bucket prefix, will return all
-   * buckets if bucket prefix is null.
-   *
-   * @param userName user name
-   * @param bucketPrefix Bucket prefix to match
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String bucketPrefix) {
-    return listS3Buckets(userName, bucketPrefix, null);
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets after prevBucket for a
-   * specific user. If prevBucket is null it returns an iterator to iterate over
-   * all the buckets of a user. The result can be restricted using bucket
-   * prefix, will return all buckets if bucket prefix is null.
-   *
-   * @param userName user name
-   * @param bucketPrefix Bucket prefix to match
-   * @param prevBucket Buckets are listed after this bucket
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String bucketPrefix,
-                                                       String prevBucket) {
-    return new S3BucketIterator(userName, bucketPrefix, prevBucket);
-  }
-
-  /**
-   * Returns Iterator to iterate over all the volumes in object store.
-   * The result can be restricted using volume prefix, will return all
-   * volumes if volume prefix is null.
-   *
-   * @param volumePrefix Volume prefix to match
-   * @return {@code Iterator<OzoneVolume>}
-   */
-  public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix)
-      throws IOException {
-    return listVolumes(volumePrefix, null);
-  }
-
-  /**
-   * Returns Iterator to iterate over all the volumes after prevVolume in object
-   * store. If prevVolume is null it iterates from the first volume.
-   * The result can be restricted using volume prefix, will return all
-   * volumes if volume prefix is null.
-   *
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Volumes will be listed after this volume name
-   * @return {@code Iterator<OzoneVolume>}
-   */
-  public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix,
-      String prevVolume) throws IOException {
-    return new VolumeIterator(null, volumePrefix, prevVolume);
-  }
-
-  /**
-   * Returns Iterator to iterate over the list of volumes after prevVolume owned
-   * by a specific user. The result can be restricted using volume prefix, will
-   * return all volumes if volume prefix is null. If user is not null, returns
-   * the volume of current user.
-   *
-   * @param user User Name
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Volumes will be listed after this volume name
-   * @return {@code Iterator<OzoneVolume>}
-   */
-  public Iterator<? extends OzoneVolume> listVolumesByUser(String user,
-      String volumePrefix, String prevVolume)
-      throws IOException {
-    if(Strings.isNullOrEmpty(user)) {
-      user = UserGroupInformation.getCurrentUser().getShortUserName();
-    }
-    return new VolumeIterator(user, volumePrefix, prevVolume);
-  }
-
-  /**
-   * Deletes the volume.
-   * @param volumeName Name of the volume.
-   * @throws IOException
-   */
-  public void deleteVolume(String volumeName) throws IOException {
-    proxy.deleteVolume(volumeName);
-  }
-
-  public KeyProvider getKeyProvider() throws IOException {
-    return proxy.getKeyProvider();
-  }
-
-  public URI getKeyProviderUri() throws IOException {
-    return proxy.getKeyProviderUri();
-  }
-
-  /**
-   * An Iterator to iterate over {@link OzoneVolume} list.
-   */
-  private class VolumeIterator implements Iterator<OzoneVolume> {
-
-    private String user = null;
-    private String volPrefix = null;
-
-    private Iterator<OzoneVolume> currentIterator;
-    private OzoneVolume currentValue;
-
-    /**
-     * Creates an Iterator to iterate over all volumes after
-     * prevVolume of the user. If prevVolume is null it iterates from the
-     * first volume. The returned volumes match volume prefix.
-     * @param user user name
-     * @param volPrefix volume prefix to match
-     */
-    VolumeIterator(String user, String volPrefix, String prevVolume) {
-      this.user = user;
-      this.volPrefix = volPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfVolumes(prevVolume).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfVolumes(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneVolume next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Returns the next set of volume list using proxy.
-     * @param prevVolume previous volume, this will be excluded from the result
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneVolume> getNextListOfVolumes(String prevVolume) {
-      try {
-        //if user is null, we do list of all volumes.
-        if(user != null) {
-          return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize);
-        }
-        return proxy.listVolumes(volPrefix, prevVolume, listCacheSize);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  /**
-   * An Iterator to iterate over {@link OzoneBucket} list.
-   */
-  public class S3BucketIterator implements Iterator<OzoneBucket> {
-
-    private String bucketPrefix = null;
-    private String userName;
-
-    private Iterator<OzoneBucket> currentIterator;
-    private OzoneBucket currentValue;
-
-
-    /**
-     * Creates an Iterator to iterate over all buckets after prevBucket for
-     * a user. If prevBucket is null it returns an iterator which list all
-     * the buckets of the user.
-     * The returned buckets match bucket prefix.
-     * @param user
-     * @param bucketPrefix
-     * @param prevBucket
-     */
-    public S3BucketIterator(String user, String bucketPrefix, String
-        prevBucket) {
-      Objects.requireNonNull(user);
-      this.userName = user;
-      this.bucketPrefix = bucketPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfS3Buckets(prevBucket).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfS3Buckets(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneBucket next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Gets the next set of bucket list using proxy.
-     * @param prevBucket
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneBucket> getNextListOfS3Buckets(String prevBucket) {
-      try {
-        return proxy.listS3Buckets(userName, bucketPrefix, prevBucket,
-            listCacheSize);
-      } catch (OMException e) {
-        if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
-          return new ArrayList<>();
-        } else {
-          throw new RuntimeException(e);
-        }
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  /**
-   * Get a valid Delegation Token.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws IOException
-   */
-  public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws IOException {
-    return proxy.getDelegationToken(renewer);
-  }
-
-  /**
-   * Renew an existing delegation token.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   * @throws IOException
-   */
-  public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    return proxy.renewDelegationToken(token);
-  }
-
-  /**
-   * Cancel an existing delegation token.
-   *
-   * @param token delegation token
-   * @throws IOException
-   */
-  public void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    proxy.cancelDelegationToken(token);
-  }
-
-  /**
-   * @return canonical service name of ozone delegation token.
-   */
-  public String getCanonicalServiceName() {
-    return proxy.getCanonicalServiceName();
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @return true if acl is added successfully, else false.
-   * @throws IOException if there is error.
-   * */
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    return proxy.addAcl(obj, acl);
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @return true if acl is added successfully, else false.
-   * @throws IOException if there is error.
-   */
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    return proxy.removeAcl(obj, acl);
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @return true if acl is added successfully, else false.
-   * @throws IOException if there is error.
-   */
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    return proxy.setAcl(obj, acls);
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @return true if acl is added successfully, else false.
-   * @throws IOException if there is error.
-   */
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    return proxy.getAcl(obj);
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
deleted file mode 100644
index bcd7152..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ /dev/null
@@ -1,624 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.WithMetadata;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-/**
- * A class that encapsulates OzoneBucket.
- */
-public class OzoneBucket extends WithMetadata {
-
-  /**
-   * The proxy used for connecting to the cluster and perform
-   * client operations.
-   */
-  private final ClientProtocol proxy;
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String name;
-  /**
-   * Default replication factor to be used while creating keys.
-   */
-  private final ReplicationFactor defaultReplication;
-
-  /**
-   * Default replication type to be used while creating keys.
-   */
-  private final ReplicationType defaultReplicationType;
-
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean versioning;
-
-  /**
-   * Cache size to be used for listKey calls.
-   */
-  private int listCacheSize;
-
-  /**
-   * Creation time of the bucket.
-   */
-  private long creationTime;
-
-  /**
-   * Bucket Encryption key name if bucket encryption is enabled.
-   */
-  private String encryptionKeyName;
-
-  private OzoneObj ozoneObj;
-
-
-  private OzoneBucket(Configuration conf, String volumeName,
-      String bucketName, ReplicationFactor defaultReplication,
-      ReplicationType defaultReplicationType, ClientProtocol proxy) {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    this.volumeName = volumeName;
-    this.name = bucketName;
-    if (defaultReplication == null) {
-      this.defaultReplication = ReplicationFactor.valueOf(conf.getInt(
-          OzoneConfigKeys.OZONE_REPLICATION,
-          OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
-    } else {
-      this.defaultReplication = defaultReplication;
-    }
-
-    if (defaultReplicationType == null) {
-      this.defaultReplicationType = ReplicationType.valueOf(conf.get(
-          OzoneConfigKeys.OZONE_REPLICATION_TYPE,
-          OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
-    } else {
-      this.defaultReplicationType = defaultReplicationType;
-    }
-    this.proxy = proxy;
-    this.ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE).build();
-  }
-  @SuppressWarnings("parameternumber")
-  public OzoneBucket(Configuration conf, ClientProtocol proxy,
-      String volumeName, String bucketName, StorageType storageType,
-      Boolean versioning, long creationTime, Map<String, String> metadata,
-      String encryptionKeyName) {
-    this(conf, volumeName, bucketName, null, null, proxy);
-    this.storageType = storageType;
-    this.versioning = versioning;
-    this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
-    this.creationTime = creationTime;
-    this.metadata = metadata;
-    this.encryptionKeyName = encryptionKeyName;
-  }
-
-  /**
-   * Constructs OzoneBucket instance.
-   * @param conf Configuration object.
-   * @param proxy ClientProtocol proxy.
-   * @param volumeName Name of the volume the bucket belongs to.
-   * @param bucketName Name of the bucket.
-   * @param storageType StorageType of the bucket.
-   * @param versioning versioning status of the bucket.
-   * @param creationTime creation time of the bucket.
-   */
-  @SuppressWarnings("parameternumber")
-  public OzoneBucket(Configuration conf, ClientProtocol proxy,
-      String volumeName, String bucketName, StorageType storageType,
-      Boolean versioning, long creationTime, Map<String, String> metadata) {
-    this(conf, volumeName, bucketName, null, null, proxy);
-    this.storageType = storageType;
-    this.versioning = versioning;
-    this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
-    this.creationTime = creationTime;
-    this.metadata = metadata;
-  }
-
-  @VisibleForTesting
-  @SuppressWarnings("parameternumber")
-  OzoneBucket(String volumeName, String name,
-      ReplicationFactor defaultReplication,
-      ReplicationType defaultReplicationType, StorageType storageType,
-      Boolean versioning, long creationTime) {
-    this.proxy = null;
-    this.volumeName = volumeName;
-    this.name = name;
-    this.defaultReplication = defaultReplication;
-    this.defaultReplicationType = defaultReplicationType;
-    this.storageType = storageType;
-    this.versioning = versioning;
-    this.creationTime = creationTime;
-    this.ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(name)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE).build();
-  }
-
-
-  /**
-   * Returns Volume Name.
-   *
-   * @return volumeName
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns Bucket Name.
-   *
-   * @return bucketName
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns ACL's associated with the Bucket.
-   *
-   * @return acls
-   */
-  @JsonIgnore
-  public List<OzoneAcl> getAcls() throws IOException {
-    return proxy.getAcl(ozoneObj);
-  }
-
-  /**
-   * Returns StorageType of the Bucket.
-   *
-   * @return storageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns Versioning associated with the Bucket.
-   *
-   * @return versioning
-   */
-  public Boolean getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Returns creation time of the Bucket.
-   *
-   * @return creation time of the bucket
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Return the bucket encryption key name.
-   * @return the bucket encryption key name
-   */
-  public String getEncryptionKeyName() {
-    return encryptionKeyName;
-  }
-
-  /**
-   * Adds ACLs to the Bucket.
-   * @param addAcl ACL to be added
-   * @return true - if acl is successfully added, false if acl already exists
-   * for the bucket.
-   * @throws IOException
-   */
-  public boolean addAcls(OzoneAcl addAcl) throws IOException {
-    return proxy.addAcl(ozoneObj, addAcl);
-  }
-
-  /**
-   * Removes ACLs from the bucket.
-   * @return true - if acl is successfully removed, false if acl to be
-   * removed does not exist for the bucket.
-   * @throws IOException
-   */
-  public boolean removeAcls(OzoneAcl removeAcl) throws IOException {
-    return proxy.removeAcl(ozoneObj, removeAcl);
-  }
-
-  /**
-   * Sets/Changes the storage type of the bucket.
-   * @param newStorageType Storage type to be set
-   * @throws IOException
-   */
-  public void setStorageType(StorageType newStorageType) throws IOException {
-    proxy.setBucketStorageType(volumeName, name, newStorageType);
-    storageType = newStorageType;
-  }
-
-  /**
-   * Enable/Disable versioning of the bucket.
-   * @param newVersioning
-   * @throws IOException
-   */
-  public void setVersioning(Boolean newVersioning) throws IOException {
-    proxy.setBucketVersioning(volumeName, name, newVersioning);
-    versioning = newVersioning;
-  }
-
-  /**
-   * Creates a new key in the bucket, with default replication type RATIS and
-   * with replication factor THREE.
-   * @param key Name of the key to be created.
-   * @param size Size of the data the key will point to.
-   * @return OzoneOutputStream to which the data has to be written.
-   * @throws IOException
-   */
-  public OzoneOutputStream createKey(String key, long size)
-      throws IOException {
-    return createKey(key, size, defaultReplicationType, defaultReplication,
-        new HashMap<>());
-  }
-
-  /**
-   * Creates a new key in the bucket.
-   * @param key Name of the key to be created.
-   * @param size Size of the data the key will point to.
-   * @param type Replication type to be used.
-   * @param factor Replication factor of the key.
-   * @return OzoneOutputStream to which the data has to be written.
-   * @throws IOException
-   */
-  public OzoneOutputStream createKey(String key, long size,
-                                     ReplicationType type,
-                                     ReplicationFactor factor,
-                                     Map<String, String> keyMetadata)
-      throws IOException {
-    return proxy
-        .createKey(volumeName, name, key, size, type, factor, keyMetadata);
-  }
-
-  /**
-   * Reads an existing key from the bucket.
-   * @param key Name of the key to be read.
-   * @return OzoneInputStream the stream using which the data can be read.
-   * @throws IOException
-   */
-  public OzoneInputStream readKey(String key) throws IOException {
-    return proxy.getKey(volumeName, name, key);
-  }
-
-  /**
-   * Returns information about the key.
-   * @param key Name of the key.
-   * @return OzoneKeyDetails Information about the key.
-   * @throws IOException
-   */
-  public OzoneKeyDetails getKey(String key) throws IOException {
-    return proxy.getKeyDetails(volumeName, name, key);
-  }
-
-  /**
-   * Returns Iterator to iterate over all keys in the bucket.
-   * The result can be restricted using key prefix, will return all
-   * keys if key prefix is null.
-   *
-   * @param keyPrefix Bucket prefix to match
-   * @return {@code Iterator<OzoneKey>}
-   */
-  public Iterator<? extends OzoneKey> listKeys(String keyPrefix) {
-    return listKeys(keyPrefix, null);
-  }
-
-  /**
-   * Returns Iterator to iterate over all keys after prevKey in the bucket.
-   * If prevKey is null it iterates from the first key in the bucket.
-   * The result can be restricted using key prefix, will return all
-   * keys if key prefix is null.
-   *
-   * @param keyPrefix Bucket prefix to match
-   * @param prevKey Keys will be listed after this key name
-   * @return {@code Iterator<OzoneKey>}
-   */
-  public Iterator<? extends OzoneKey> listKeys(String keyPrefix,
-      String prevKey) {
-    return new KeyIterator(keyPrefix, prevKey);
-  }
-
-  /**
-   * Deletes key from the bucket.
-   * @param key Name of the key to be deleted.
-   * @throws IOException
-   */
-  public void deleteKey(String key) throws IOException {
-    proxy.deleteKey(volumeName, name, key);
-  }
-
-  public void renameKey(String fromKeyName, String toKeyName)
-      throws IOException {
-    proxy.renameKey(volumeName, name, fromKeyName, toKeyName);
-  }
-
-  /**
-   * Initiate multipart upload for a specified key.
-   * @param keyName
-   * @param type
-   * @param factor
-   * @return OmMultipartInfo
-   * @throws IOException
-   */
-  public OmMultipartInfo initiateMultipartUpload(String keyName,
-                                                 ReplicationType type,
-                                                 ReplicationFactor factor)
-      throws IOException {
-    return  proxy.initiateMultipartUpload(volumeName, name, keyName, type,
-        factor);
-  }
-
-  /**
-   * Initiate multipart upload for a specified key, with default replication
-   * type RATIS and with replication factor THREE.
-   * @param key Name of the key to be created.
-   * @return OmMultipartInfo.
-   * @throws IOException
-   */
-  public OmMultipartInfo initiateMultipartUpload(String key)
-      throws IOException {
-    return initiateMultipartUpload(key, defaultReplicationType,
-        defaultReplication);
-  }
-
-  /**
-   * Create a part key for a multipart upload key.
-   * @param key
-   * @param size
-   * @param partNumber
-   * @param uploadID
-   * @return OzoneOutputStream
-   * @throws IOException
-   */
-  public OzoneOutputStream createMultipartKey(String key, long size,
-                                              int partNumber, String uploadID)
-      throws IOException {
-    return proxy.createMultipartKey(volumeName, name, key, size, partNumber,
-        uploadID);
-  }
-
-  /**
-   * Complete Multipart upload. This will combine all the parts and make the
-   * key visible in ozone.
-   * @param key
-   * @param uploadID
-   * @param partsMap
-   * @return OmMultipartUploadCompleteInfo
-   * @throws IOException
-   */
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(String key,
-      String uploadID, Map<Integer, String> partsMap) throws IOException {
-    return proxy.completeMultipartUpload(volumeName, name, key, uploadID,
-        partsMap);
-  }
-
-  /**
-   * Abort multipart upload request.
-   * @param keyName
-   * @param uploadID
-   * @throws IOException
-   */
-  public void abortMultipartUpload(String keyName, String uploadID) throws
-      IOException {
-    proxy.abortMultipartUpload(volumeName, name, keyName, uploadID);
-  }
-
-  /**
-   * Returns list of parts of a multipart upload key.
-   * @param keyName
-   * @param uploadID
-   * @param partNumberMarker
-   * @param maxParts
-   * @return OzoneMultipartUploadPartListParts
-   */
-  public OzoneMultipartUploadPartListParts listParts(String keyName,
-      String uploadID, int partNumberMarker, int maxParts)  throws IOException {
-    // As at most we  can have 10000 parts for a key, not using iterator. If
-    // needed, it can be done later. So, if we send 10000 as max parts at
-    // most in a single rpc call, we return 0.6 mb, by assuming each part
-    // size as 60 bytes (ignored the replication type size during calculation)
-
-    return proxy.listParts(volumeName, name, keyName, uploadID,
-              partNumberMarker, maxParts);
-  }
-
-  /**
-   * OzoneFS api to get file status for an entry.
-   *
-   * @param keyName Key name
-   * @throws OMException if file does not exist
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public OzoneFileStatus getFileStatus(String keyName) throws IOException {
-    return proxy.getOzoneFileStatus(volumeName, name, keyName);
-  }
-
-  /**
-   * Ozone FS api to create a directory. Parent directories if do not exist
-   * are created for the input directory.
-   *
-   * @param keyName Key name
-   * @throws OMException if any entry in the path exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public void createDirectory(String keyName) throws IOException {
-    proxy.createDirectory(volumeName, name, keyName);
-  }
-
-  /**
-   * OzoneFS api to creates an input stream for a file.
-   *
-   * @param keyName Key name
-   * @throws OMException if given key is not found or it is not a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public OzoneInputStream readFile(String keyName) throws IOException {
-    return proxy.readFile(volumeName, name, keyName);
-  }
-
-  /**
-   * OzoneFS api to creates an output stream for a file.
-   *
-   * @param keyName   Key name
-   * @param overWrite if true existing file at the location will be overwritten
-   * @param recursive if true file would be created even if parent directories
-   *                    do not exist
-   * @throws OMException if given key is a directory
-   *                     if file exists and isOverwrite flag is false
-   *                     if an ancestor exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public OzoneOutputStream createFile(String keyName, long size,
-      ReplicationType type, ReplicationFactor factor, boolean overWrite,
-      boolean recursive) throws IOException {
-    return proxy
-        .createFile(volumeName, name, keyName, size, type, factor, overWrite,
-            recursive);
-  }
-
-  /**
-   * List the status for a file or a directory and its contents.
-   *
-   * @param keyName    Absolute path of the entry to be listed
-   * @param recursive  For a directory if true all the descendants of a
-   *                   particular directory are listed
-   * @param startKey   Key from which listing needs to start. If startKey exists
-   *                   its status is included in the final list.
-   * @param numEntries Number of entries to list from the start key
-   * @return list of file status
-   */
-  public List<OzoneFileStatus> listStatus(String keyName, boolean recursive,
-      String startKey, long numEntries) throws IOException {
-    return proxy
-        .listStatus(volumeName, name, keyName, recursive, startKey, numEntries);
-  }
-
-  /**
-   * Return with the list of the in-flight multipart uploads.
-   *
-   * @param prefix Optional string to filter for the selected keys.
-   */
-  public OzoneMultipartUploadList listMultipartUploads(String prefix)
-      throws IOException {
-    return proxy.listMultipartUploads(volumeName, getName(), prefix);
-  }
-
-  /**
-   * An Iterator to iterate over {@link OzoneKey} list.
-   */
-  private class KeyIterator implements Iterator<OzoneKey> {
-
-    private String keyPrefix = null;
-
-    private Iterator<OzoneKey> currentIterator;
-    private OzoneKey currentValue;
-
-
-    /**
-     * Creates an Iterator to iterate over all keys after prevKey in the bucket.
-     * If prevKey is null it iterates from the first key in the bucket.
-     * The returned keys match key prefix.
-     * @param keyPrefix
-     */
-    KeyIterator(String keyPrefix, String prevKey) {
-      this.keyPrefix = keyPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfKeys(prevKey).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfKeys(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneKey next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Gets the next set of key list using proxy.
-     * @param prevKey
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneKey> getNextListOfKeys(String prevKey) {
-      try {
-        return proxy.listKeys(volumeName, name, keyPrefix, prevKey,
-            listCacheSize);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
deleted file mode 100644
index 0d65d73..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * OzoneClient connects to Ozone Cluster and
- * perform basic operations.
- */
-public class OzoneClient implements Closeable {
-
-  /*
-   * OzoneClient connects to Ozone Cluster and
-   * perform basic operations.
-   *
-   * +-------------+     +---+   +-------------------------------------+
-   * | OzoneClient | --> | C |   | Object Store                        |
-   * |_____________|     | l |   |  +-------------------------------+  |
-   *                     | i |   |  | Volume(s)                     |  |
-   *                     | e |   |  |   +------------------------+  |  |
-   *                     | n |   |  |   | Bucket(s)              |  |  |
-   *                     | t |   |  |   |   +------------------+ |  |  |
-   *                     |   |   |  |   |   | Key -> Value (s) | |  |  |
-   *                     | P |-->|  |   |   |                  | |  |  |
-   *                     | r |   |  |   |   |__________________| |  |  |
-   *                     | o |   |  |   |                        |  |  |
-   *                     | t |   |  |   |________________________|  |  |
-   *                     | o |   |  |                               |  |
-   *                     | c |   |  |_______________________________|  |
-   *                     | o |   |                                     |
-   *                     | l |   |_____________________________________|
-   *                     |___|
-   * Example:
-   * ObjectStore store = client.getObjectStore();
-   * store.createVolume(“volume one”, VolumeArgs);
-   * volume.setQuota(“10 GB”);
-   * OzoneVolume volume = store.getVolume(“volume one”);
-   * volume.createBucket(“bucket one”, BucketArgs);
-   * bucket.setVersioning(true);
-   * OzoneOutputStream os = bucket.createKey(“key one”, 1024);
-   * os.write(byte[]);
-   * os.close();
-   * OzoneInputStream is = bucket.readKey(“key one”);
-   * is.read();
-   * is.close();
-   * bucket.deleteKey(“key one”);
-   * volume.deleteBucket(“bucket one”);
-   * store.deleteVolume(“volume one”);
-   * client.close();
-   */
-
-  private final ClientProtocol proxy;
-  private final ObjectStore objectStore;
-
-  /**
-   * Creates a new OzoneClient object, generally constructed
-   * using {@link OzoneClientFactory}.
-   * @param conf Configuration object
-   * @param proxy ClientProtocol proxy instance
-   */
-  public OzoneClient(Configuration conf, ClientProtocol proxy) {
-    this.proxy = proxy;
-    this.objectStore = new ObjectStore(conf, this.proxy);
-  }
-
-  @VisibleForTesting
-  protected OzoneClient(ObjectStore objectStore) {
-    this.objectStore = objectStore;
-    this.proxy = null;
-  }
-  /**
-   * Returns the object store associated with the Ozone Cluster.
-   * @return ObjectStore
-   */
-  public ObjectStore getObjectStore() {
-    return objectStore;
-  }
-
-  /**
-   * Closes the client and all the underlying resources.
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    proxy.close();
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
deleted file mode 100644
index 2e9080a..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client;
-
-/**
- * This exception is thrown by the Ozone Clients.
- */
-public class OzoneClientException extends Exception {
-  public OzoneClientException() {
-  }
-
-  public OzoneClientException(String s) {
-    super(s);
-  }
-
-  public OzoneClientException(String s, Throwable throwable) {
-    super(s, throwable);
-  }
-
-  public OzoneClientException(Throwable throwable) {
-    super(throwable);
-  }
-
-  public OzoneClientException(String s, Throwable throwable, boolean b,
-      boolean b1) {
-    super(s, throwable, b, b1);
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
deleted file mode 100644
index caf989e..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-import java.lang.reflect.Proxy;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-
-import com.google.common.base.Preconditions;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory class to create OzoneClients.
- */
-public final class OzoneClientFactory {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      OzoneClientFactory.class);
-
-  /**
-   * Private constructor, class is not meant to be initialized.
-   */
-  private OzoneClientFactory(){}
-
-
-  /**
-   * Constructs and return an OzoneClient with default configuration.
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getClient() throws IOException {
-    LOG.info("Creating OzoneClient with default configuration.");
-    return getClient(new OzoneConfiguration());
-  }
-
-  /**
-   * Constructs and return an OzoneClient based on the configuration object.
-   * Protocol type is decided by <code>ozone.client.protocol</code>.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getClient(Configuration config)
-      throws IOException {
-    Preconditions.checkNotNull(config);
-    return getClient(getClientProtocol(config), config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(String omHost)
-      throws IOException {
-    Configuration config = new OzoneConfiguration();
-    int port = OmUtils.getOmRpcPort(config);
-    return getRpcClient(omHost, port, config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @param omRpcPort
-   *        RPC port of OzoneManager.
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort)
-      throws IOException {
-    return getRpcClient(omHost, omRpcPort, new OzoneConfiguration());
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @param omRpcPort
-   *        RPC port of OzoneManager.
-   *
-   * @param omServiceId
-   *        Service ID of OzoneManager HA cluster.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
-      String omServiceId, Configuration config) throws IOException {
-    Preconditions.checkNotNull(omHost);
-    Preconditions.checkNotNull(omRpcPort);
-    Preconditions.checkNotNull(omServiceId);
-    Preconditions.checkNotNull(config);
-    config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort);
-    return getRpcClient(omServiceId, config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @param omRpcPort
-   *        RPC port of OzoneManager.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
-                                         Configuration config)
-      throws IOException {
-    Preconditions.checkNotNull(omHost);
-    Preconditions.checkNotNull(omRpcPort);
-    Preconditions.checkNotNull(config);
-    config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort);
-    return getRpcClient(config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param omServiceId
-   *        Service ID of OzoneManager HA cluster.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(String omServiceId,
-      Configuration config) throws IOException {
-    Preconditions.checkNotNull(omServiceId);
-    Preconditions.checkNotNull(config);
-    // Won't set OZONE_OM_ADDRESS_KEY here since service id is passed directly,
-    // leaving OZONE_OM_ADDRESS_KEY value as is.
-    return getClient(getClientProtocol(config, omServiceId), config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use RPC protocol.
-   *
-   * @param config
-   *        used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRpcClient(Configuration config)
-      throws IOException {
-    Preconditions.checkNotNull(config);
-    return getClient(getClientProtocol(config),
-        config);
-  }
-
-  /**
-   * Creates OzoneClient with the given ClientProtocol and Configuration.
-   *
-   * @param clientProtocol
-   *        Protocol to be used by the OzoneClient
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   */
-  private static OzoneClient getClient(ClientProtocol clientProtocol,
-                                       Configuration config) {
-    OzoneClientInvocationHandler clientHandler =
-        new OzoneClientInvocationHandler(clientProtocol);
-    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
-        OzoneClientInvocationHandler.class.getClassLoader(),
-        new Class<?>[]{ClientProtocol.class}, clientHandler);
-    return new OzoneClient(config, proxy);
-  }
-
-  /**
-   * Returns an instance of Protocol class.
-   *
-   *
-   * @param config
-   *        Configuration used to initialize ClientProtocol.
-   *
-   * @return ClientProtocol
-   *
-   * @throws IOException
-   */
-  private static ClientProtocol getClientProtocol(Configuration config)
-      throws IOException {
-    return getClientProtocol(config, null);
-  }
-
-  /**
-   * Returns an instance of Protocol class.
-   *
-   *
-   * @param config
-   *        Configuration used to initialize ClientProtocol.
-   *
-   * @return ClientProtocol
-   *
-   * @throws IOException
-   */
-  private static ClientProtocol getClientProtocol(Configuration config,
-      String omServiceId) throws IOException {
-    try {
-      return new RpcClient(config, omServiceId);
-    } catch (Exception e) {
-      final String message = "Couldn't create RpcClient protocol";
-      LOG.error(message + " exception: ", e);
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      } else {
-        throw new IOException(message, e);
-      }
-    }
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
deleted file mode 100644
index cdc7702..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-/**
- * Invocation Handler for ozone client which dispatches the call to underlying
- * ClientProtocol implementation.
- */
-public class OzoneClientInvocationHandler implements InvocationHandler {
-
-
-  private static final Logger LOG = LoggerFactory.getLogger(OzoneClient.class);
-  private final ClientProtocol target;
-
-  /**
-   * Constructs OzoneClientInvocationHandler with the proxy.
-   * @param target proxy to be used for method invocation.
-   */
-  public OzoneClientInvocationHandler(ClientProtocol target) {
-    this.target = target;
-  }
-
-  @Override
-  public Object invoke(Object proxy, Method method, Object[] args)
-      throws Throwable {
-    LOG.trace("Invoking method {} on target {}", method, target);
-    try {
-      long startTime = Time.monotonicNow();
-      Object result = method.invoke(target, args);
-      LOG.debug("Call: {} took {} ms", method,
-          Time.monotonicNow() - startTime);
-      return result;
-    } catch(InvocationTargetException iEx) {
-      throw iEx.getCause();
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
deleted file mode 100644
index 8531bfb..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client;
-
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-
-/** A utility class for OzoneClient. */
-public final class OzoneClientUtils {
-
-  private OzoneClientUtils() {}
-
-  public static RetryPolicy createRetryPolicy(int maxRetryCount,
-      long retryInterval) {
-    // retry with fixed sleep between retries
-    return RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        maxRetryCount, retryInterval, TimeUnit.MILLISECONDS);
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
deleted file mode 100644
index d654a60..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-/**
- * A class that encapsulates OzoneKey.
- */
-public class OzoneKey {
-
-  /**
-   * Name of the Volume the Key belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the Bucket the Key belongs to.
-   */
-  private final String bucketName;
-  /**
-   * Name of the Key.
-   */
-  private final String name;
-  /**
-   * Size of the data.
-   */
-  private final long dataSize;
-  /**
-   * Creation time of the key.
-   */
-  private long creationTime;
-  /**
-   * Modification time of the key.
-   */
-  private long modificationTime;
-
-  private ReplicationType replicationType;
-
-  private int replicationFactor;
-
-  /**
-   * Constructs OzoneKey from OmKeyInfo.
-   *
-   */
-  @SuppressWarnings("parameternumber")
-  public OzoneKey(String volumeName, String bucketName,
-                  String keyName, long size, long creationTime,
-                  long modificationTime, ReplicationType type,
-                  int replicationFactor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.name = keyName;
-    this.dataSize = size;
-    this.creationTime = creationTime;
-    this.modificationTime = modificationTime;
-    this.replicationType = type;
-    this.replicationFactor = replicationFactor;
-  }
-
-  /**
-   * Returns Volume Name associated with the Key.
-   *
-   * @return volumeName
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns Bucket Name associated with the Key.
-   *
-   * @return bucketName
-   */
-  public String getBucketName(){
-    return bucketName;
-  }
-
-  /**
-   * Returns the Key Name.
-   *
-   * @return keyName
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns the size of the data.
-   *
-   * @return dataSize
-   */
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  /**
-   * Returns the creation time of the key.
-   *
-   * @return creation time
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns the modification time of the key.
-   *
-   * @return modification time
-   */
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  /**
-   * Returns the replication type of the key.
-   *
-   * @return replicationType
-   */
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  /**
-   * Returns the replication factor of the key.
-   *
-   * @return replicationFactor
-   */
-  public int getReplicationFactor() {
-    return replicationFactor;
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
deleted file mode 100644
index a57b663..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * A class that encapsulates OzoneKeyLocation.
- */
-public class OzoneKeyDetails extends OzoneKey {
-
-  /**
-   * A list of block location information to specify replica locations.
-   */
-  private List<OzoneKeyLocation> ozoneKeyLocations;
-
-  private Map<String, String> metadata;
-
-  private FileEncryptionInfo feInfo;
-
-  /**
-   * Constructs OzoneKeyDetails from OmKeyInfo.
-   */
-  @SuppressWarnings("parameternumber")
-  public OzoneKeyDetails(String volumeName, String bucketName, String keyName,
-                         long size, long creationTime, long modificationTime,
-                         List<OzoneKeyLocation> ozoneKeyLocations,
-                         ReplicationType type, Map<String, String> metadata,
-                         FileEncryptionInfo feInfo, int replicationFactor) {
-    super(volumeName, bucketName, keyName, size, creationTime,
-        modificationTime, type, replicationFactor);
-    this.ozoneKeyLocations = ozoneKeyLocations;
-    this.metadata = metadata;
-    this.feInfo = feInfo;
-  }
-
-  /**
-   * Returns the location detail information of the specific Key.
-   */
-  public List<OzoneKeyLocation> getOzoneKeyLocations() {
-    return ozoneKeyLocations;
-  }
-
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  public FileEncryptionInfo getFileEncryptionInfo() {
-    return feInfo;
-  }
-  /**
-   * Set details of key location.
-   * @param ozoneKeyLocations - details of key location
-   */
-  public void setOzoneKeyLocations(List<OzoneKeyLocation> ozoneKeyLocations) {
-    this.ozoneKeyLocations = ozoneKeyLocations;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java
deleted file mode 100644
index 0ff8ba7..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-/**
- * One key can be stored in one or more containers as one or more blocks.
- * This class represents one such block instance.
- */
-public class OzoneKeyLocation {
-  /**
-   * Which container this key stored.
-   */
-  private final long containerID;
-  /**
-   * Which block this key stored inside a container.
-   */
-  private final long localID;
-  /**
-   * Data length of this key replica.
-   */
-  private final long length;
-  /**
-   * Offset of this key.
-   */
-  private final long offset;
-
-  /**
-   * Constructs OzoneKeyLocation.
-   */
-  public OzoneKeyLocation(long containerID, long localID,
-                  long length, long offset) {
-    this.containerID = containerID;
-    this.localID = localID;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  /**
-   * Returns the containerID of this Key.
-   */
-  public long getContainerID() {
-    return containerID;
-  }
-
-  /**
-   * Returns the localID of this Key.
-   */
-  public long getLocalID() {
-    return localID;
-  }
-
-  /**
-   * Returns the length of this Key.
-   */
-  public long getLength() {
-    return length;
-  }
-
-  /**
-   * Returns the offset of this Key.
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java
deleted file mode 100644
index 6eb76c4..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import java.time.Instant;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-/**
- * Information about one initialized upload.
- */
-public class OzoneMultipartUpload {
-
-  private String volumeName;
-
-  private String bucketName;
-
-  private String keyName;
-
-  private String uploadId;
-
-  private Instant creationTime;
-
-  private ReplicationType replicationType;
-
-  private ReplicationFactor replicationFactor;
-
-  public OzoneMultipartUpload(String volumeName, String bucketName,
-      String keyName, String uploadId, Instant creationTime,
-      ReplicationType replicationType,
-      ReplicationFactor replicationFactor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.uploadId = uploadId;
-    this.creationTime = creationTime;
-    this.replicationType = replicationType;
-    this.replicationFactor = replicationFactor;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public String getUploadId() {
-    return uploadId;
-  }
-
-  public Instant getCreationTime() {
-    return creationTime;
-  }
-
-  public void setCreationTime(Instant creationTime) {
-    this.creationTime = creationTime;
-  }
-
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
deleted file mode 100644
index 38377eb..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import java.util.List;
-
-import com.google.common.base.Preconditions;
-
-/**
- * List of in-flight MPU upoads.
- */
-public class OzoneMultipartUploadList {
-
-  private List<OzoneMultipartUpload> uploads;
-
-  public OzoneMultipartUploadList(
-      List<OzoneMultipartUpload> uploads) {
-    Preconditions.checkNotNull(uploads);
-    this.uploads = uploads;
-  }
-
-  public List<OzoneMultipartUpload> getUploads() {
-    return uploads;
-  }
-
-  public void setUploads(
-      List<OzoneMultipartUpload> uploads) {
-    this.uploads = uploads;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
deleted file mode 100644
index 7ce3148..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Class that represents Multipart upload List parts response.
- */
-public class OzoneMultipartUploadPartListParts {
-
-  private ReplicationType replicationType;
-
-  private ReplicationFactor replicationFactor;
-
-  //When a list is truncated, this element specifies the last part in the list,
-  // as well as the value to use for the part-number-marker request parameter
-  // in a subsequent request.
-  private int nextPartNumberMarker;
-  // Indicates whether the returned list of parts is truncated. A true value
-  // indicates that the list was truncated.
-  // A list can be truncated if the number of parts exceeds the limit
-  // returned in the MaxParts element.
-  private boolean truncated;
-  private List<PartInfo> partInfoList = new ArrayList<>();
-
-  public OzoneMultipartUploadPartListParts(ReplicationType type,
-      ReplicationFactor factor,
-      int nextMarker, boolean truncate) {
-    this.replicationType = type;
-    this.nextPartNumberMarker = nextMarker;
-    this.truncated = truncate;
-    this.replicationFactor = factor;
-  }
-
-  public void addAllParts(List<PartInfo> partInfos) {
-    partInfoList.addAll(partInfos);
-  }
-
-  public void addPart(PartInfo partInfo) {
-    this.partInfoList.add(partInfo);
-  }
-
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public int getNextPartNumberMarker() {
-    return nextPartNumberMarker;
-  }
-
-  public boolean isTruncated() {
-    return truncated;
-  }
-
-  public List<PartInfo> getPartInfoList() {
-    return partInfoList;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-
-  /**
-   * Class that represents each Part information of a multipart upload part.
-   */
-  public static class PartInfo {
-
-    private int partNumber;
-    private String partName;
-    private long modificationTime;
-    private long size;
-
-    public PartInfo(int number, String name, long time, long size) {
-      this.partNumber = number;
-      this.partName = name;
-      this.modificationTime = time;
-      this.size = size;
-    }
-
-    public int getPartNumber() {
-      return partNumber;
-    }
-
-    public String getPartName() {
-      return partName;
-    }
-
-    public long getModificationTime() {
-      return modificationTime;
-    }
-
-    public long getSize() {
-      return size;
-    }
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
deleted file mode 100644
index f2bdfdd..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.helpers.WithMetadata;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * A class that encapsulates OzoneVolume.
- */
-public class OzoneVolume extends WithMetadata {
-
-  /**
-   * The proxy used for connecting to the cluster and perform
-   * client operations.
-   */
-  private final ClientProtocol proxy;
-
-  /**
-   * Name of the Volume.
-   */
-  private final String name;
-
-  /**
-   * Admin Name of the Volume.
-   */
-  private String admin;
-  /**
-   * Owner of the Volume.
-   */
-  private String owner;
-  /**
-   * Quota allocated for the Volume.
-   */
-  private long quotaInBytes;
-  /**
-   * Creation time of the volume.
-   */
-  private long creationTime;
-  /**
-   * Volume ACLs.
-   */
-  private List<OzoneAcl> acls;
-
-  private int listCacheSize;
-
-  /**
-   * Constructs OzoneVolume instance.
-   * @param conf Configuration object.
-   * @param proxy ClientProtocol proxy.
-   * @param name Name of the volume.
-   * @param admin Volume admin.
-   * @param owner Volume owner.
-   * @param quotaInBytes Volume quota in bytes.
-   * @param creationTime creation time of the volume
-   * @param acls ACLs associated with the volume.
-   * @param metadata custom key value metadata.
-   */
-  @SuppressWarnings("parameternumber")
-  public OzoneVolume(Configuration conf, ClientProtocol proxy, String name,
-                     String admin, String owner, long quotaInBytes,
-                     long creationTime, List<OzoneAcl> acls,
-                     Map<String, String> metadata) {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    this.proxy = proxy;
-    this.name = name;
-    this.admin = admin;
-    this.owner = owner;
-    this.quotaInBytes = quotaInBytes;
-    this.creationTime = creationTime;
-    this.acls = acls;
-    this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
-    this.metadata = metadata;
-  }
-
-  @SuppressWarnings("parameternumber")
-  public OzoneVolume(Configuration conf, ClientProtocol proxy, String name,
-                     String admin, String owner, long quotaInBytes,
-                     long creationTime, List<OzoneAcl> acls) {
-    this(conf, proxy, name, admin, owner, quotaInBytes, creationTime, acls,
-        new HashMap<>());
-  }
-
-  @VisibleForTesting
-  protected OzoneVolume(String name, String admin, String owner,
-      long quotaInBytes,
-      long creationTime, List<OzoneAcl> acls) {
-    this.proxy = null;
-    this.name = name;
-    this.admin = admin;
-    this.owner = owner;
-    this.quotaInBytes = quotaInBytes;
-    this.creationTime = creationTime;
-    this.acls = acls;
-    this.metadata = new HashMap<>();
-  }
-
-  /**
-   * Returns Volume name.
-   *
-   * @return volumeName
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns Volume's admin name.
-   *
-   * @return adminName
-   */
-  public String getAdmin() {
-    return admin;
-  }
-
-  /**
-   * Returns Volume's owner name.
-   *
-   * @return ownerName
-   */
-  public String getOwner() {
-    return owner;
-  }
-
-  /**
-   * Returns Quota allocated for the Volume in bytes.
-   *
-   * @return quotaInBytes
-   */
-  public long getQuota() {
-    return quotaInBytes;
-  }
-
-  /**
-   * Returns creation time of the volume.
-   *
-   * @return creation time.
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns OzoneAcl list associated with the Volume.
-   *
-   * @return aclMap
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Sets/Changes the owner of this Volume.
-   * @param owner new owner
-   * @throws IOException
-   */
-  public void setOwner(String owner) throws IOException {
-    proxy.setVolumeOwner(name, owner);
-    this.owner = owner;
-  }
-
-  /**
-   * Sets/Changes the quota of this Volume.
-   * @param quota new quota
-   * @throws IOException
-   */
-  public void setQuota(OzoneQuota  quota) throws IOException {
-    proxy.setVolumeQuota(name, quota);
-    this.quotaInBytes = quota.sizeInBytes();
-  }
-
-  /**
-   * Creates a new Bucket in this Volume, with default values.
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  public void createBucket(String bucketName)
-      throws IOException {
-    proxy.createBucket(name, bucketName);
-  }
-
-  /**
-   * Creates a new Bucket in this Volume, with properties set in bucketArgs.
-   * @param bucketName Name of the Bucket
-   * @param bucketArgs Properties to be set
-   * @throws IOException
-   */
-  public void createBucket(String bucketName, BucketArgs bucketArgs)
-      throws IOException {
-    proxy.createBucket(name, bucketName, bucketArgs);
-  }
-
-  /**
-   * Get the Bucket from this Volume.
-   * @param bucketName Name of the Bucket
-   * @return OzoneBucket
-   * @throws IOException
-   */
-  public OzoneBucket getBucket(String bucketName) throws IOException {
-    OzoneBucket bucket = proxy.getBucketDetails(name, bucketName);
-    return bucket;
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets in the volume.
-   * The result can be restricted using bucket prefix, will return all
-   * buckets if bucket prefix is null.
-   *
-   * @param bucketPrefix Bucket prefix to match
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listBuckets(String bucketPrefix) {
-    return listBuckets(bucketPrefix, null);
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets after prevBucket in the
-   * volume.
-   * If prevBucket is null it iterates from the first bucket in the volume.
-   * The result can be restricted using bucket prefix, will return all
-   * buckets if bucket prefix is null.
-   *
-   * @param bucketPrefix Bucket prefix to match
-   * @param prevBucket Buckets are listed after this bucket
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listBuckets(String bucketPrefix,
-      String prevBucket) {
-    return new BucketIterator(bucketPrefix, prevBucket);
-  }
-
-  /**
-   * Deletes the Bucket from this Volume.
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  public void deleteBucket(String bucketName) throws IOException {
-    proxy.deleteBucket(name, bucketName);
-  }
-
-
-  /**
-   * An Iterator to iterate over {@link OzoneBucket} list.
-   */
-  private class BucketIterator implements Iterator<OzoneBucket> {
-
-    private String bucketPrefix = null;
-
-    private Iterator<OzoneBucket> currentIterator;
-    private OzoneBucket currentValue;
-
-
-    /**
-     * Creates an Iterator to iterate over all buckets after prevBucket in
-     * the volume.
-     * If prevBucket is null it iterates from the first bucket in the volume.
-     * The returned buckets match bucket prefix.
-     * @param bucketPrefix
-     */
-    BucketIterator(String bucketPrefix, String prevBucket) {
-      this.bucketPrefix = bucketPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfBuckets(prevBucket).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfBuckets(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneBucket next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Gets the next set of bucket list using proxy.
-     * @param prevBucket
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneBucket> getNextListOfBuckets(String prevBucket) {
-      try {
-        return proxy.listBuckets(name, bucketPrefix, prevBucket, listCacheSize);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
deleted file mode 100644
index 359e195..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * This class encapsulates the arguments that are
- * required for creating a volume.
- */
-public final class VolumeArgs {
-
-  private final String admin;
-  private final String owner;
-  private final String quota;
-  private final List<OzoneAcl> acls;
-  private Map<String, String> metadata;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param admin Administrator's name.
-   * @param owner Volume owner's name
-   * @param quota Volume Quota.
-   * @param acls User to access rights map.
-   */
-  private VolumeArgs(String admin,
-      String owner,
-      String quota,
-      List<OzoneAcl> acls,
-      Map<String, String> metadata) {
-    this.admin = admin;
-    this.owner = owner;
-    this.quota = quota;
-    this.acls = acls;
-    this.metadata = metadata;
-  }
-
-  /**
-   * Returns the Admin Name.
-   * @return String.
-   */
-  public String getAdmin() {
-    return admin;
-  }
-
-  /**
-   * Returns the owner Name.
-   * @return String
-   */
-  public String getOwner() {
-    return owner;
-  }
-
-  /**
-   * Returns Volume Quota.
-   * @return Quota.
-   */
-  public String getQuota() {
-    return quota;
-  }
-
-  /**
-   * Return custom key value map.
-   *
-   * @return metadata
-   */
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-  /**
-   * Returns new builder class that builds a OmVolumeArgs.
-   *
-   * @return Builder
-   */
-  public static VolumeArgs.Builder newBuilder() {
-    return new VolumeArgs.Builder();
-  }
-
-  /**
-   * Builder for OmVolumeArgs.
-   */
-  public static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volumeQuota;
-    private List<OzoneAcl> listOfAcls;
-    private Map<String, String> metadata = new HashMap<>();
-
-
-    public VolumeArgs.Builder setAdmin(String admin) {
-      this.adminName = admin;
-      return this;
-    }
-
-    public VolumeArgs.Builder setOwner(String owner) {
-      this.ownerName = owner;
-      return this;
-    }
-
-    public VolumeArgs.Builder setQuota(String quota) {
-      this.volumeQuota = quota;
-      return this;
-    }
-
-    public VolumeArgs.Builder addMetadata(String key, String value) {
-      metadata.put(key, value);
-      return this;
-    }
-    public VolumeArgs.Builder setAcls(List<OzoneAcl> acls)
-        throws IOException {
-      this.listOfAcls = acls;
-      return this;
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     * @return CreateVolumeArgs.
-     */
-    public VolumeArgs build() {
-      return new VolumeArgs(adminName, ownerName, volumeQuota, listOfAcls,
-          metadata);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
deleted file mode 100644
index 8381be0..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ChecksumType;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
-import org.apache.hadoop.hdds.scm.storage.BufferPool;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-import java.util.Collection;
-
-/**
- * Helper class used inside {@link BlockOutputStream}.
- * */
-public final class BlockOutputStreamEntry extends OutputStream {
-
-  private OutputStream outputStream;
-  private BlockID blockID;
-  private final String key;
-  private final XceiverClientManager xceiverClientManager;
-  private final Pipeline pipeline;
-  private final ChecksumType checksumType;
-  private final int bytesPerChecksum;
-  private final int chunkSize;
-  // total number of bytes that should be written to this stream
-  private final long length;
-  // the current position of this stream 0 <= currentPosition < length
-  private long currentPosition;
-  private Token<OzoneBlockTokenIdentifier> token;
-
-  private final long streamBufferFlushSize;
-  private final long streamBufferMaxSize;
-  private final long watchTimeout;
-  private BufferPool bufferPool;
-
-  @SuppressWarnings("parameternumber")
-  private BlockOutputStreamEntry(BlockID blockID, String key,
-      XceiverClientManager xceiverClientManager,
-      Pipeline pipeline, String requestId, int chunkSize,
-      long length, long streamBufferFlushSize, long streamBufferMaxSize,
-      long watchTimeout, BufferPool bufferPool,
-      ChecksumType checksumType, int bytesPerChecksum,
-      Token<OzoneBlockTokenIdentifier> token) {
-    this.outputStream = null;
-    this.blockID = blockID;
-    this.key = key;
-    this.xceiverClientManager = xceiverClientManager;
-    this.pipeline = pipeline;
-    this.chunkSize = chunkSize;
-    this.token = token;
-    this.length = length;
-    this.currentPosition = 0;
-    this.streamBufferFlushSize = streamBufferFlushSize;
-    this.streamBufferMaxSize = streamBufferMaxSize;
-    this.watchTimeout = watchTimeout;
-    this.bufferPool = bufferPool;
-    this.checksumType = checksumType;
-    this.bytesPerChecksum = bytesPerChecksum;
-  }
-
-  long getLength() {
-    return length;
-  }
-
-  Token<OzoneBlockTokenIdentifier> getToken() {
-    return token;
-  }
-
-  long getRemaining() {
-    return length - currentPosition;
-  }
-
-  /**
-   * BlockOutputStream is initialized in this function. This makes sure that
-   * xceiverClient initialization is not done during preallocation and only
-   * done when data is written.
-   * @throws IOException if xceiverClient initialization fails
-   */
-  private void checkStream() throws IOException {
-    if (this.outputStream == null) {
-      if (getToken() != null) {
-        UserGroupInformation.getCurrentUser().addToken(getToken());
-      }
-      this.outputStream =
-          new BlockOutputStream(blockID, xceiverClientManager,
-              pipeline, chunkSize, streamBufferFlushSize,
-              streamBufferMaxSize, watchTimeout, bufferPool, checksumType,
-              bytesPerChecksum);
-    }
-  }
-
-
-  @Override
-  public void write(int b) throws IOException {
-    checkStream();
-    outputStream.write(b);
-    this.currentPosition += 1;
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    checkStream();
-    outputStream.write(b, off, len);
-    this.currentPosition += len;
-  }
-
-  @Override
-  public void flush() throws IOException {
-    if (this.outputStream != null) {
-      this.outputStream.flush();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (this.outputStream != null) {
-      this.outputStream.close();
-      // after closing the chunkOutPutStream, blockId would have been
-      // reconstructed with updated bcsId
-      this.blockID = ((BlockOutputStream) outputStream).getBlockID();
-    }
-  }
-
-  boolean isClosed() {
-    if (outputStream != null) {
-      return  ((BlockOutputStream) outputStream).isClosed();
-    }
-    return false;
-  }
-
-  long getTotalAckDataLength() {
-    if (outputStream != null) {
-      BlockOutputStream out = (BlockOutputStream) this.outputStream;
-      blockID = out.getBlockID();
-      return out.getTotalAckDataLength();
-    } else {
-      // For a pre allocated block for which no write has been initiated,
-      // the OutputStream will be null here.
-      // In such cases, the default blockCommitSequenceId will be 0
-      return 0;
-    }
-  }
-
-  Collection<DatanodeDetails> getFailedServers() throws IOException {
-    if (outputStream != null) {
-      BlockOutputStream out = (BlockOutputStream) this.outputStream;
-      return out.getFailedServers();
-    }
-    return null;
-  }
-
-  long getWrittenDataLength() throws IOException {
-    if (outputStream != null) {
-      BlockOutputStream out = (BlockOutputStream) this.outputStream;
-      return out.getWrittenDataLength();
-    } else {
-      // For a pre allocated block for which no write has been initiated,
-      // the OutputStream will be null here.
-      // In such cases, the default blockCommitSequenceId will be 0
-      return 0;
-    }
-  }
-
-  void cleanup(boolean invalidateClient) throws IOException {
-    checkStream();
-    BlockOutputStream out = (BlockOutputStream) this.outputStream;
-    out.cleanup(invalidateClient);
-
-  }
-
-  void writeOnRetry(long len) throws IOException {
-    checkStream();
-    BlockOutputStream out = (BlockOutputStream) this.outputStream;
-    out.writeOnRetry(len);
-    this.currentPosition += len;
-
-  }
-
-  /**
-   * Builder class for ChunkGroupOutputStreamEntry.
-   * */
-  public static class Builder {
-
-    private BlockID blockID;
-    private String key;
-    private XceiverClientManager xceiverClientManager;
-    private Pipeline pipeline;
-    private String requestId;
-    private int chunkSize;
-    private long length;
-    private long streamBufferFlushSize;
-    private long streamBufferMaxSize;
-    private long watchTimeout;
-    private BufferPool bufferPool;
-    private Token<OzoneBlockTokenIdentifier> token;
-    private ChecksumType checksumType;
-    private int bytesPerChecksum;
-
-    public Builder setChecksumType(ChecksumType type) {
-      this.checksumType = type;
-      return this;
-    }
-
-    public Builder setBytesPerChecksum(int bytes) {
-      this.bytesPerChecksum = bytes;
-      return this;
-    }
-
-    public Builder setBlockID(BlockID bID) {
-      this.blockID = bID;
-      return this;
-    }
-
-    public Builder setKey(String keys) {
-      this.key = keys;
-      return this;
-    }
-
-    public Builder setXceiverClientManager(XceiverClientManager
-        xClientManager) {
-      this.xceiverClientManager = xClientManager;
-      return this;
-    }
-
-    public Builder setPipeline(Pipeline ppln) {
-      this.pipeline = ppln;
-      return this;
-    }
-
-    public Builder setRequestId(String request) {
-      this.requestId = request;
-      return this;
-    }
-
-    public Builder setChunkSize(int cSize) {
-      this.chunkSize = cSize;
-      return this;
-    }
-
-    public Builder setLength(long len) {
-      this.length = len;
-      return this;
-    }
-
-    public Builder setStreamBufferFlushSize(long bufferFlushSize) {
-      this.streamBufferFlushSize = bufferFlushSize;
-      return this;
-    }
-
-    public Builder setStreamBufferMaxSize(long bufferMaxSize) {
-      this.streamBufferMaxSize = bufferMaxSize;
-      return this;
-    }
-
-    public Builder setWatchTimeout(long timeout) {
-      this.watchTimeout = timeout;
-      return this;
-    }
-
-    public Builder setbufferPool(BufferPool pool) {
-      this.bufferPool = pool;
-      return this;
-    }
-
-    public Builder setToken(Token<OzoneBlockTokenIdentifier> bToken) {
-      this.token = bToken;
-      return this;
-    }
-
-    public BlockOutputStreamEntry build() {
-      return new BlockOutputStreamEntry(blockID, key,
-          xceiverClientManager, pipeline, requestId, chunkSize,
-          length, streamBufferFlushSize, streamBufferMaxSize, watchTimeout,
-          bufferPool, checksumType, bytesPerChecksum, token);
-    }
-  }
-
-  @VisibleForTesting
-  public OutputStream getOutputStream() {
-    return outputStream;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public XceiverClientManager getXceiverClientManager() {
-    return xceiverClientManager;
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public int getChunkSize() {
-    return chunkSize;
-  }
-
-  public long getCurrentPosition() {
-    return currentPosition;
-  }
-
-  public long getStreamBufferFlushSize() {
-    return streamBufferFlushSize;
-  }
-
-  public long getStreamBufferMaxSize() {
-    return streamBufferMaxSize;
-  }
-
-  public long getWatchTimeout() {
-    return watchTimeout;
-  }
-
-  public BufferPool getBufferPool() {
-    return bufferPool;
-  }
-
-  public void setCurrentPosition(long curPosition) {
-    this.currentPosition = curPosition;
-  }
-}
-
-
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
deleted file mode 100644
index b179ca5..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ /dev/null
@@ -1,354 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.storage.BufferPool;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.*;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-
-/**
- * This class manages the stream entries list and handles block allocation
- * from OzoneManager.
- */
-public class BlockOutputStreamEntryPool {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(BlockOutputStreamEntryPool.class);
-
-  private final List<BlockOutputStreamEntry> streamEntries;
-  private int currentStreamIndex;
-  private final OzoneManagerProtocol omClient;
-  private final OmKeyArgs keyArgs;
-  private final XceiverClientManager xceiverClientManager;
-  private final int chunkSize;
-  private final String requestID;
-  private final long streamBufferFlushSize;
-  private final long streamBufferMaxSize;
-  private final long watchTimeout;
-  private final long blockSize;
-  private final int bytesPerChecksum;
-  private final ContainerProtos.ChecksumType checksumType;
-  private final BufferPool bufferPool;
-  private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
-  private final long openID;
-  private ExcludeList excludeList;
-
-  @SuppressWarnings("parameternumber")
-  public BlockOutputStreamEntryPool(OzoneManagerProtocol omClient,
-      int chunkSize, String requestId, HddsProtos.ReplicationFactor factor,
-      HddsProtos.ReplicationType type, long bufferFlushSize, long bufferMaxSize,
-      long size, long watchTimeout, ContainerProtos.ChecksumType checksumType,
-      int bytesPerChecksum, String uploadID, int partNumber,
-      boolean isMultipart, OmKeyInfo info,
-      XceiverClientManager xceiverClientManager, long openID) {
-    streamEntries = new ArrayList<>();
-    currentStreamIndex = 0;
-    this.omClient = omClient;
-    this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName())
-        .setBucketName(info.getBucketName()).setKeyName(info.getKeyName())
-        .setType(type).setFactor(factor).setDataSize(info.getDataSize())
-        .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID)
-        .setMultipartUploadPartNumber(partNumber).build();
-    this.xceiverClientManager = xceiverClientManager;
-    this.chunkSize = chunkSize;
-    this.requestID = requestId;
-    this.streamBufferFlushSize = bufferFlushSize;
-    this.streamBufferMaxSize = bufferMaxSize;
-    this.blockSize = size;
-    this.watchTimeout = watchTimeout;
-    this.bytesPerChecksum = bytesPerChecksum;
-    this.checksumType = checksumType;
-    this.openID = openID;
-    this.excludeList = new ExcludeList();
-
-    Preconditions.checkState(chunkSize > 0);
-    Preconditions.checkState(streamBufferFlushSize > 0);
-    Preconditions.checkState(streamBufferMaxSize > 0);
-    Preconditions.checkState(blockSize > 0);
-    Preconditions.checkState(streamBufferFlushSize % chunkSize == 0);
-    Preconditions.checkState(streamBufferMaxSize % streamBufferFlushSize == 0);
-    Preconditions.checkState(blockSize % streamBufferMaxSize == 0);
-    this.bufferPool =
-        new BufferPool(chunkSize, (int) streamBufferMaxSize / chunkSize,
-            xceiverClientManager.byteBufferToByteStringConversion());
-  }
-
-  /**
-   * A constructor for testing purpose only.
-   *
-   * @see KeyOutputStream#KeyOutputStream()
-   */
-  @VisibleForTesting
-  BlockOutputStreamEntryPool() {
-    streamEntries = new ArrayList<>();
-    omClient = null;
-    keyArgs = null;
-    xceiverClientManager = null;
-    chunkSize = 0;
-    requestID = null;
-    streamBufferFlushSize = 0;
-    streamBufferMaxSize = 0;
-    bufferPool = new BufferPool(chunkSize, 1);
-    watchTimeout = 0;
-    blockSize = 0;
-    this.checksumType = ContainerProtos.ChecksumType.valueOf(
-        OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT);
-    this.bytesPerChecksum = OzoneConfigKeys
-        .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB
-    currentStreamIndex = 0;
-    openID = -1;
-  }
-
-  /**
-   * When a key is opened, it is possible that there are some blocks already
-   * allocated to it for this open session. In this case, to make use of these
-   * blocks, we need to add these blocks to stream entries. But, a key's version
-   * also includes blocks from previous versions, we need to avoid adding these
-   * old blocks to stream entries, because these old blocks should not be picked
-   * for write. To do this, the following method checks that, only those
-   * blocks created in this particular open version are added to stream entries.
-   *
-   * @param version the set of blocks that are pre-allocated.
-   * @param openVersion the version corresponding to the pre-allocation.
-   * @throws IOException
-   */
-  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
-      long openVersion) throws IOException {
-    // server may return any number of blocks, (0 to any)
-    // only the blocks allocated in this open session (block createVersion
-    // equals to open session version)
-    for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) {
-      if (subKeyInfo.getCreateVersion() == openVersion) {
-        addKeyLocationInfo(subKeyInfo);
-      }
-    }
-  }
-
-  private void addKeyLocationInfo(OmKeyLocationInfo subKeyInfo)
-      throws IOException {
-    Preconditions.checkNotNull(subKeyInfo.getPipeline());
-    UserGroupInformation.getCurrentUser().addToken(subKeyInfo.getToken());
-    BlockOutputStreamEntry.Builder builder =
-        new BlockOutputStreamEntry.Builder()
-            .setBlockID(subKeyInfo.getBlockID())
-            .setKey(keyArgs.getKeyName())
-            .setXceiverClientManager(xceiverClientManager)
-            .setPipeline(subKeyInfo.getPipeline())
-            .setRequestId(requestID)
-            .setChunkSize(chunkSize)
-            .setLength(subKeyInfo.getLength())
-            .setStreamBufferFlushSize(streamBufferFlushSize)
-            .setStreamBufferMaxSize(streamBufferMaxSize)
-            .setWatchTimeout(watchTimeout)
-            .setbufferPool(bufferPool)
-            .setChecksumType(checksumType)
-            .setBytesPerChecksum(bytesPerChecksum)
-            .setToken(subKeyInfo.getToken());
-    streamEntries.add(builder.build());
-  }
-
-  public List<OmKeyLocationInfo> getLocationInfoList()  {
-    List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
-    for (BlockOutputStreamEntry streamEntry : streamEntries) {
-      long length = streamEntry.getCurrentPosition();
-
-      // Commit only those blocks to OzoneManager which are not empty
-      if (length != 0) {
-        OmKeyLocationInfo info =
-            new OmKeyLocationInfo.Builder().setBlockID(streamEntry.getBlockID())
-                .setLength(streamEntry.getCurrentPosition()).setOffset(0)
-                .setToken(streamEntry.getToken())
-                .setPipeline(streamEntry.getPipeline()).build();
-        locationInfoList.add(info);
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "block written " + streamEntry.getBlockID() + ", length " + length
-                + " bcsID " + streamEntry.getBlockID()
-                .getBlockCommitSequenceId());
-      }
-    }
-    return locationInfoList;
-  }
-
-  /**
-   * Discards the subsequent pre allocated blocks and removes the streamEntries
-   * from the streamEntries list for the container which is closed.
-   * @param containerID id of the closed container
-   * @param pipelineId id of the associated pipeline
-   */
-  void discardPreallocatedBlocks(long containerID, PipelineID pipelineId) {
-    // currentStreamIndex < streamEntries.size() signifies that, there are still
-    // pre allocated blocks available.
-
-    // This will be called only to discard the next subsequent unused blocks
-    // in the streamEntryList.
-    if (currentStreamIndex + 1 < streamEntries.size()) {
-      ListIterator<BlockOutputStreamEntry> streamEntryIterator =
-          streamEntries.listIterator(currentStreamIndex + 1);
-      while (streamEntryIterator.hasNext()) {
-        BlockOutputStreamEntry streamEntry = streamEntryIterator.next();
-        Preconditions.checkArgument(streamEntry.getCurrentPosition() == 0);
-        if ((pipelineId != null && streamEntry.getPipeline().getId()
-            .equals(pipelineId)) || (containerID != -1
-            && streamEntry.getBlockID().getContainerID() == containerID)) {
-          streamEntryIterator.remove();
-        }
-      }
-    }
-  }
-
-  List<BlockOutputStreamEntry> getStreamEntries() {
-    return streamEntries;
-  }
-
-  XceiverClientManager getXceiverClientManager() {
-    return xceiverClientManager;
-  }
-
-  String getKeyName() {
-    return keyArgs.getKeyName();
-  }
-
-  long getKeyLength() {
-    return streamEntries.stream().mapToLong(e -> e.getCurrentPosition()).sum();
-  }
-  /**
-   * Contact OM to get a new block. Set the new block with the index (e.g.
-   * first block has index = 0, second has index = 1 etc.)
-   *
-   * The returned block is made to new BlockOutputStreamEntry to write.
-   *
-   * @throws IOException
-   */
-  private void allocateNewBlock() throws IOException {
-    OmKeyLocationInfo subKeyInfo =
-        omClient.allocateBlock(keyArgs, openID, excludeList);
-    addKeyLocationInfo(subKeyInfo);
-  }
-
-
-  void commitKey(long offset) throws IOException {
-    if (keyArgs != null) {
-      // in test, this could be null
-      long length = getKeyLength();
-      Preconditions.checkArgument(offset == length);
-      keyArgs.setDataSize(length);
-      keyArgs.setLocationInfoList(getLocationInfoList());
-      // When the key is multipart upload part file upload, we should not
-      // commit the key, as this is not an actual key, this is a just a
-      // partial key of a large file.
-      if (keyArgs.getIsMultipartKey()) {
-        commitUploadPartInfo =
-            omClient.commitMultipartUploadPart(keyArgs, openID);
-      } else {
-        omClient.commitKey(keyArgs, openID);
-      }
-    } else {
-      LOG.warn("Closing KeyOutputStream, but key args is null");
-    }
-  }
-
-  public BlockOutputStreamEntry getCurrentStreamEntry() {
-    if (streamEntries.isEmpty() || streamEntries.size() <= currentStreamIndex) {
-      return null;
-    } else {
-      return streamEntries.get(currentStreamIndex);
-    }
-  }
-
-  BlockOutputStreamEntry allocateBlockIfNeeded() throws IOException {
-    BlockOutputStreamEntry streamEntry = getCurrentStreamEntry();
-    if (streamEntry != null && streamEntry.isClosed()) {
-      // a stream entry gets closed either by :
-      // a. If the stream gets full
-      // b. it has encountered an exception
-      currentStreamIndex++;
-    }
-    if (streamEntries.size() <= currentStreamIndex) {
-      Preconditions.checkNotNull(omClient);
-      // allocate a new block, if a exception happens, log an error and
-      // throw exception to the caller directly, and the write fails.
-      int succeededAllocates = 0;
-      try {
-        allocateNewBlock();
-        succeededAllocates += 1;
-      } catch (IOException ioe) {
-        LOG.error("Try to allocate more blocks for write failed, already "
-            + "allocated {} blocks for this write.", succeededAllocates, ioe);
-        throw ioe;
-      }
-    }
-    // in theory, this condition should never violate due the check above
-    // still do a sanity check.
-    Preconditions.checkArgument(currentStreamIndex < streamEntries.size());
-    BlockOutputStreamEntry current = streamEntries.get(currentStreamIndex);
-    return current;
-  }
-
-  long computeBufferData() {
-    return bufferPool.computeBufferData();
-  }
-
-  void cleanup() {
-    if (excludeList != null) {
-      excludeList.clear();
-      excludeList = null;
-    }
-    if (bufferPool != null) {
-      bufferPool.clearBufferPool();
-    }
-
-    if (streamEntries != null) {
-      streamEntries.clear();
-    }
-  }
-
-  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    return commitUploadPartInfo;
-  }
-
-  public ExcludeList getExcludeList() {
-    return excludeList;
-  }
-
-  public long getStreamBufferMaxSize() {
-    return streamBufferMaxSize;
-  }
-
-  boolean isEmpty() {
-    return streamEntries.isEmpty();
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
deleted file mode 100644
index ecbb329..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Maintaining a list of BlockInputStream. Read based on offset.
- */
-public class KeyInputStream extends InputStream implements Seekable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyInputStream.class);
-
-  private static final int EOF = -1;
-
-  private String key;
-  private long length = 0;
-  private boolean closed = false;
-
-  // List of BlockInputStreams, one for each block in the key
-  private final List<BlockInputStream> blockStreams;
-
-  // blockOffsets[i] stores the index of the first data byte in
-  // blockStream w.r.t the key data.
-  // For example, let’s say the block size is 200 bytes and block[0] stores
-  // data from indices 0 - 199, block[1] from indices 200 - 399 and so on.
-  // Then, blockOffset[0] = 0 (the offset of the first byte of data in
-  // block[0]), blockOffset[1] = 200 and so on.
-  private long[] blockOffsets = null;
-
-  // Index of the blockStream corresponding to the current position of the
-  // KeyInputStream i.e. offset of the data to be read next
-  private int blockIndex;
-
-  // Tracks the blockIndex corresponding to the last seeked position so that it
-  // can be reset if a new position is seeked.
-  private int blockIndexOfPrevPosition;
-
-  public KeyInputStream() {
-    blockStreams = new ArrayList<>();
-    blockIndex = 0;
-  }
-
-  /**
-   * For each block in keyInfo, add a BlockInputStream to blockStreams.
-   */
-  public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo,
-      XceiverClientManager xceiverClientManager,
-      boolean verifyChecksum) {
-    List<OmKeyLocationInfo> keyLocationInfos = keyInfo
-        .getLatestVersionLocations().getBlocksLatestVersionOnly();
-
-    KeyInputStream keyInputStream = new KeyInputStream();
-    keyInputStream.initialize(keyInfo.getKeyName(), keyLocationInfos,
-        xceiverClientManager, verifyChecksum);
-
-    return new LengthInputStream(keyInputStream, keyInputStream.length);
-  }
-
-  private synchronized void initialize(String keyName,
-      List<OmKeyLocationInfo> blockInfos,
-      XceiverClientManager xceiverClientManager,
-      boolean verifyChecksum) {
-    this.key = keyName;
-    this.blockOffsets = new long[blockInfos.size()];
-    long keyLength = 0;
-    for (int i = 0; i < blockInfos.size(); i++) {
-      OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding stream for accessing {}. The stream will be " +
-            "initialized later.", omKeyLocationInfo);
-      }
-
-      addStream(omKeyLocationInfo, xceiverClientManager,
-          verifyChecksum);
-
-      this.blockOffsets[i] = keyLength;
-      keyLength += omKeyLocationInfo.getLength();
-    }
-    this.length = keyLength;
-  }
-
-  /**
-   * Append another BlockInputStream to the end of the list. Note that the
-   * BlockInputStream is only created here and not initialized. The
-   * BlockInputStream is initialized when a read operation is performed on
-   * the block for the first time.
-   */
-  private synchronized void addStream(OmKeyLocationInfo blockInfo,
-      XceiverClientManager xceiverClientMngr,
-      boolean verifyChecksum) {
-    blockStreams.add(new BlockInputStream(blockInfo.getBlockID(),
-        blockInfo.getLength(), blockInfo.getPipeline(), blockInfo.getToken(),
-        verifyChecksum, xceiverClientMngr));
-  }
-
-  @VisibleForTesting
-  public void addStream(BlockInputStream blockInputStream) {
-    blockStreams.add(blockInputStream);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read() throws IOException {
-    byte[] buf = new byte[1];
-    if (read(buf, 0, 1) == EOF) {
-      return EOF;
-    }
-    return Byte.toUnsignedInt(buf[0]);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public synchronized int read(byte[] b, int off, int len) throws IOException {
-    checkOpen();
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return 0;
-    }
-    int totalReadLen = 0;
-    while (len > 0) {
-      // if we are at the last block and have read the entire block, return
-      if (blockStreams.size() == 0 ||
-          (blockStreams.size() - 1 <= blockIndex &&
-              blockStreams.get(blockIndex)
-                  .getRemaining() == 0)) {
-        return totalReadLen == 0 ? EOF : totalReadLen;
-      }
-
-      // Get the current blockStream and read data from it
-      BlockInputStream current = blockStreams.get(blockIndex);
-      int numBytesToRead = Math.min(len, (int)current.getRemaining());
-      int numBytesRead = current.read(b, off, numBytesToRead);
-      if (numBytesRead != numBytesToRead) {
-        // This implies that there is either data loss or corruption in the
-        // chunk entries. Even EOF in the current stream would be covered in
-        // this case.
-        throw new IOException(String.format(
-            "Inconsistent read for blockID=%s length=%d numBytesRead=%d",
-            current.getBlockID(), current.getLength(), numBytesRead));
-      }
-      totalReadLen += numBytesRead;
-      off += numBytesRead;
-      len -= numBytesRead;
-      if (current.getRemaining() <= 0 &&
-          ((blockIndex + 1) < blockStreams.size())) {
-        blockIndex += 1;
-      }
-    }
-    return totalReadLen;
-  }
-
-  /**
-   * Seeks the KeyInputStream to the specified position. This involves 2 steps:
-   *    1. Updating the blockIndex to the blockStream corresponding to the
-   *    seeked position.
-   *    2. Seeking the corresponding blockStream to the adjusted position.
-   *
-   * For example, let’s say the block size is 200 bytes and block[0] stores
-   * data from indices 0 - 199, block[1] from indices 200 - 399 and so on.
-   * Let’s say we seek to position 240. In the first step, the blockIndex
-   * would be updated to 1 as indices 200 - 399 reside in blockStream[1]. In
-   * the second step, the blockStream[1] would be seeked to position 40 (=
-   * 240 - blockOffset[1] (= 200)).
-   */
-  @Override
-  public synchronized void seek(long pos) throws IOException {
-    checkOpen();
-    if (pos < 0 || pos >= length) {
-      if (pos == 0) {
-        // It is possible for length and pos to be zero in which case
-        // seek should return instead of throwing exception
-        return;
-      }
-      throw new EOFException(
-          "EOF encountered at pos: " + pos + " for key: " + key);
-    }
-
-    // 1. Update the blockIndex
-    if (blockIndex >= blockStreams.size()) {
-      blockIndex = Arrays.binarySearch(blockOffsets, pos);
-    } else if (pos < blockOffsets[blockIndex]) {
-      blockIndex =
-          Arrays.binarySearch(blockOffsets, 0, blockIndex, pos);
-    } else if (pos >= blockOffsets[blockIndex] + blockStreams
-        .get(blockIndex).getLength()) {
-      blockIndex = Arrays
-          .binarySearch(blockOffsets, blockIndex + 1,
-              blockStreams.size(), pos);
-    }
-    if (blockIndex < 0) {
-      // Binary search returns -insertionPoint - 1  if element is not present
-      // in the array. insertionPoint is the point at which element would be
-      // inserted in the sorted array. We need to adjust the blockIndex
-      // accordingly so that blockIndex = insertionPoint - 1
-      blockIndex = -blockIndex - 2;
-    }
-
-    // Reset the previous blockStream's position
-    blockStreams.get(blockIndexOfPrevPosition).resetPosition();
-
-    // 2. Seek the blockStream to the adjusted position
-    blockStreams.get(blockIndex).seek(pos - blockOffsets[blockIndex]);
-    blockIndexOfPrevPosition = blockIndex;
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    return length == 0 ? 0 : blockOffsets[blockIndex] +
-        blockStreams.get(blockIndex).getPos();
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public int available() throws IOException {
-    checkOpen();
-    long remaining = length - getPos();
-    return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE;
-  }
-
-  @Override
-  public void close() throws IOException {
-    closed = true;
-    for (BlockInputStream blockStream : blockStreams) {
-      blockStream.close();
-    }
-  }
-
-  /**
-   * Verify that the input stream is open. Non blocking; this gives
-   * the last state of the volatile {@link #closed} field.
-   * @throws IOException if the connection is closed.
-   */
-  private void checkOpen() throws IOException {
-    if (closed) {
-      throw new IOException(
-          ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + key);
-    }
-  }
-
-  @VisibleForTesting
-  public synchronized int getCurrentStreamIndex() {
-    return blockIndex;
-  }
-
-  @VisibleForTesting
-  public long getRemainingOfIndex(int index) throws IOException {
-    return blockStreams.get(index).getRemaining();
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
deleted file mode 100644
index fd503c3..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ozone.om.helpers.*;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.ratis.protocol.AlreadyClosedException;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-/**
- * Maintaining a list of BlockInputStream. Write based on offset.
- *
- * Note that this may write to multiple containers in one write call. In case
- * that first container succeeded but later ones failed, the succeeded writes
- * are not rolled back.
- *
- * TODO : currently not support multi-thread access.
- */
-public class KeyOutputStream extends OutputStream {
-
-  /**
-   * Defines stream action while calling handleFlushOrClose.
-   */
-  enum StreamAction {
-    FLUSH, CLOSE, FULL
-  }
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(KeyOutputStream.class);
-
-  private boolean closed;
-  private FileEncryptionInfo feInfo;
-  private final Map<Class<? extends Throwable>, RetryPolicy> retryPolicyMap;
-  private int retryCount;
-  private long offset;
-  private final BlockOutputStreamEntryPool blockOutputStreamEntryPool;
-
-  /**
-   * A constructor for testing purpose only.
-   */
-  @VisibleForTesting
-  public KeyOutputStream() {
-    closed = false;
-    this.retryPolicyMap = HddsClientUtils.getExceptionList()
-        .stream()
-        .collect(Collectors.toMap(Function.identity(),
-            e -> RetryPolicies.TRY_ONCE_THEN_FAIL));
-    retryCount = 0;
-    offset = 0;
-    blockOutputStreamEntryPool = new BlockOutputStreamEntryPool();
-  }
-
-  @VisibleForTesting
-  public List<BlockOutputStreamEntry> getStreamEntries() {
-    return blockOutputStreamEntryPool.getStreamEntries();
-  }
-
-  @VisibleForTesting
-  public XceiverClientManager getXceiverClientManager() {
-    return blockOutputStreamEntryPool.getXceiverClientManager();
-  }
-
-  @VisibleForTesting
-  public List<OmKeyLocationInfo> getLocationInfoList() {
-    return blockOutputStreamEntryPool.getLocationInfoList();
-  }
-
-  @VisibleForTesting
-  public int getRetryCount() {
-    return retryCount;
-  }
-
-  @SuppressWarnings("parameternumber")
-  public KeyOutputStream(OpenKeySession handler,
-      XceiverClientManager xceiverClientManager,
-      OzoneManagerProtocol omClient, int chunkSize,
-      String requestId, ReplicationFactor factor, ReplicationType type,
-      long bufferFlushSize, long bufferMaxSize, long size, long watchTimeout,
-      ChecksumType checksumType, int bytesPerChecksum,
-      String uploadID, int partNumber, boolean isMultipart,
-      int maxRetryCount, long retryInterval) {
-    OmKeyInfo info = handler.getKeyInfo();
-    blockOutputStreamEntryPool =
-        new BlockOutputStreamEntryPool(omClient, chunkSize, requestId, factor,
-            type, bufferFlushSize, bufferMaxSize, size, watchTimeout,
-            checksumType, bytesPerChecksum, uploadID, partNumber, isMultipart,
-            info, xceiverClientManager, handler.getId());
-    // Retrieve the file encryption key info, null if file is not in
-    // encrypted bucket.
-    this.feInfo = info.getFileEncryptionInfo();
-    this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException(
-        maxRetryCount, retryInterval);
-    this.retryCount = 0;
-  }
-
-  /**
-   * When a key is opened, it is possible that there are some blocks already
-   * allocated to it for this open session. In this case, to make use of these
-   * blocks, we need to add these blocks to stream entries. But, a key's version
-   * also includes blocks from previous versions, we need to avoid adding these
-   * old blocks to stream entries, because these old blocks should not be picked
-   * for write. To do this, the following method checks that, only those
-   * blocks created in this particular open version are added to stream entries.
-   *
-   * @param version the set of blocks that are pre-allocated.
-   * @param openVersion the version corresponding to the pre-allocation.
-   * @throws IOException
-   */
-  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
-      long openVersion) throws IOException {
-    blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion);
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    byte[] buf = new byte[1];
-    buf[0] = (byte) b;
-    write(buf, 0, 1);
-  }
-
-  /**
-   * Try to write the bytes sequence b[off:off+len) to streams.
-   *
-   * NOTE: Throws exception if the data could not fit into the remaining space.
-   * In which case nothing will be written.
-   * TODO:May need to revisit this behaviour.
-   *
-   * @param b byte data
-   * @param off starting offset
-   * @param len length to write
-   * @throws IOException
-   */
-  @Override
-  public void write(byte[] b, int off, int len)
-      throws IOException {
-    checkNotClosed();
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
-        || ((off + len) < 0)) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return;
-    }
-    handleWrite(b, off, len, false);
-  }
-
-  private void handleWrite(byte[] b, int off, long len, boolean retry)
-      throws IOException {
-    while (len > 0) {
-      try {
-        BlockOutputStreamEntry current =
-            blockOutputStreamEntryPool.allocateBlockIfNeeded();
-        // length(len) will be in int range if the call is happening through
-        // write API of blockOutputStream. Length can be in long range if it
-        // comes via Exception path.
-        int writeLen = Math.min((int) len, (int) current.getRemaining());
-        long currentPos = current.getWrittenDataLength();
-        try {
-          if (retry) {
-            current.writeOnRetry(len);
-          } else {
-            current.write(b, off, writeLen);
-            offset += writeLen;
-          }
-        } catch (IOException ioe) {
-          // for the current iteration, totalDataWritten - currentPos gives the
-          // amount of data already written to the buffer
-
-          // In the retryPath, the total data to be written will always be equal
-          // to or less than the max length of the buffer allocated.
-          // The len specified here is the combined sum of the data length of
-          // the buffers
-          Preconditions.checkState(!retry || len <= blockOutputStreamEntryPool
-              .getStreamBufferMaxSize());
-          int dataWritten = (int) (current.getWrittenDataLength() - currentPos);
-          writeLen = retry ? (int) len : dataWritten;
-          // In retry path, the data written is already accounted in offset.
-          if (!retry) {
-            offset += writeLen;
-          }
-          LOG.debug("writeLen {}, total len {}", writeLen, len);
-          handleException(current, ioe);
-        }
-        if (current.getRemaining() <= 0) {
-          // since the current block is already written close the stream.
-          handleFlushOrClose(StreamAction.FULL);
-        }
-        len -= writeLen;
-        off += writeLen;
-      } catch (Exception e) {
-        markStreamClosed();
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * It performs following actions :
-   * a. Updates the committed length at datanode for the current stream in
-   * datanode.
-   * b. Reads the data from the underlying buffer and writes it the next stream.
-   *
-   * @param streamEntry StreamEntry
-   * @param exception   actual exception that occurred
-   * @throws IOException Throws IOException if Write fails
-   */
-  private void handleException(BlockOutputStreamEntry streamEntry,
-      IOException exception) throws IOException {
-    Throwable t = HddsClientUtils.checkForException(exception);
-    Preconditions.checkNotNull(t);
-    boolean retryFailure = checkForRetryFailure(t);
-    boolean containerExclusionException = false;
-    if (!retryFailure) {
-      containerExclusionException = checkIfContainerToExclude(t);
-    }
-    Pipeline pipeline = streamEntry.getPipeline();
-    PipelineID pipelineId = pipeline.getId();
-    long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
-    //set the correct length for the current stream
-    streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
-    long bufferedDataLen = blockOutputStreamEntryPool.computeBufferData();
-    if (containerExclusionException) {
-      LOG.debug(
-          "Encountered exception {}. The last committed block length is {}, "
-              + "uncommitted data length is {} retry count {}", exception,
-          totalSuccessfulFlushedData, bufferedDataLen, retryCount);
-    } else {
-      LOG.warn(
-          "Encountered exception {} on the pipeline {}. "
-              + "The last committed block length is {}, "
-              + "uncommitted data length is {} retry count {}", exception,
-          pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
-    }
-    Preconditions.checkArgument(
-        bufferedDataLen <= blockOutputStreamEntryPool.getStreamBufferMaxSize());
-    Preconditions.checkArgument(
-        offset - blockOutputStreamEntryPool.getKeyLength() == bufferedDataLen);
-    long containerId = streamEntry.getBlockID().getContainerID();
-    Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
-    Preconditions.checkNotNull(failedServers);
-    ExcludeList excludeList = blockOutputStreamEntryPool.getExcludeList();
-    if (!failedServers.isEmpty()) {
-      excludeList.addDatanodes(failedServers);
-    }
-
-    // if the container needs to be excluded , add the container to the
-    // exclusion list , otherwise add the pipeline to the exclusion list
-    if (containerExclusionException) {
-      excludeList.addConatinerId(ContainerID.valueof(containerId));
-    } else {
-      excludeList.addPipeline(pipelineId);
-    }
-    // just clean up the current stream.
-    streamEntry.cleanup(retryFailure);
-
-    // discard all subsequent blocks the containers and pipelines which
-    // are in the exclude list so that, the very next retry should never
-    // write data on the  closed container/pipeline
-    if (containerExclusionException) {
-      // discard subsequent pre allocated blocks from the streamEntries list
-      // from the closed container
-      blockOutputStreamEntryPool
-          .discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(),
-              null);
-    } else {
-      // In case there is timeoutException or Watch for commit happening over
-      // majority or the client connection failure to the leader in the
-      // pipeline, just discard all the pre allocated blocks on this pipeline.
-      // Next block allocation will happen with excluding this specific pipeline
-      // This will ensure if 2 way commit happens , it cannot span over multiple
-      // blocks
-      blockOutputStreamEntryPool
-          .discardPreallocatedBlocks(-1, pipelineId);
-    }
-    if (bufferedDataLen > 0) {
-      // If the data is still cached in the underlying stream, we need to
-      // allocate new block and write this data in the datanode.
-      handleRetry(exception, bufferedDataLen);
-      // reset the retryCount after handling the exception
-      retryCount = 0;
-    }
-  }
-
-  private void markStreamClosed() {
-    blockOutputStreamEntryPool.cleanup();
-    closed = true;
-  }
-
-  private void handleRetry(IOException exception, long len) throws IOException {
-    RetryPolicy retryPolicy = retryPolicyMap
-        .get(HddsClientUtils.checkForException(exception).getClass());
-    if (retryPolicy == null) {
-      retryPolicy = retryPolicyMap.get(Exception.class);
-    }
-    RetryPolicy.RetryAction action;
-    try {
-      action = retryPolicy.shouldRetry(exception, retryCount, 0, true);
-    } catch (Exception e) {
-      throw e instanceof IOException ? (IOException) e : new IOException(e);
-    }
-    if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
-      String msg = "";
-      if (action.reason != null) {
-        msg = "Retry request failed. " + action.reason;
-        LOG.error(msg, exception);
-      }
-      throw new IOException(msg, exception);
-    }
-
-    // Throw the exception if the thread is interrupted
-    if (Thread.currentThread().isInterrupted()) {
-      LOG.warn("Interrupted while trying for retry");
-      throw exception;
-    }
-    Preconditions.checkArgument(
-        action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
-    if (action.delayMillis > 0) {
-      try {
-        Thread.sleep(action.delayMillis);
-      } catch (InterruptedException e) {
-        throw (IOException) new InterruptedIOException(
-            "Interrupted: action=" + action + ", retry policy=" + retryPolicy)
-            .initCause(e);
-      }
-    }
-    retryCount++;
-    LOG.trace("Retrying Write request. Already tried " + retryCount
-        + " time(s); retry policy is " + retryPolicy);
-    handleWrite(null, 0, len, true);
-  }
-
-  /**
-   * Checks if the provided exception signifies retry failure in ratis client.
-   * In case of retry failure, ratis client throws RaftRetryFailureException
-   * and all succeeding operations are failed with AlreadyClosedException.
-   */
-  private boolean checkForRetryFailure(Throwable t) {
-    return t instanceof RaftRetryFailureException
-        || t instanceof AlreadyClosedException;
-  }
-
-  // Every container specific exception from datatnode will be seen as
-  // StorageContainerException
-  private boolean checkIfContainerToExclude(Throwable t) {
-    return t instanceof StorageContainerException;
-  }
-
-  @Override
-  public void flush() throws IOException {
-    checkNotClosed();
-    handleFlushOrClose(StreamAction.FLUSH);
-  }
-
-  /**
-   * Close or Flush the latest outputStream depending upon the action.
-   * This function gets called when while write is going on, the current stream
-   * gets full or explicit flush or close request is made by client. when the
-   * stream gets full and we try to close the stream , we might end up hitting
-   * an exception in the exception handling path, we write the data residing in
-   * in the buffer pool to a new Block. In cases, as such, when the data gets
-   * written to new stream , it will be at max half full. In such cases, we
-   * should just write the data and not close the stream as the block won't be
-   * completely full.
-   *
-   * @param op Flag which decides whether to call close or flush on the
-   *           outputStream.
-   * @throws IOException In case, flush or close fails with exception.
-   */
-  private void handleFlushOrClose(StreamAction op) throws IOException {
-    if (blockOutputStreamEntryPool.isEmpty()) {
-      return;
-    }
-    while (true) {
-      try {
-        BlockOutputStreamEntry entry =
-            blockOutputStreamEntryPool.getCurrentStreamEntry();
-        if (entry != null) {
-          try {
-            Collection<DatanodeDetails> failedServers =
-                entry.getFailedServers();
-            // failed servers can be null in case there is no data written in
-            // the stream
-            if (failedServers != null && !failedServers.isEmpty()) {
-              blockOutputStreamEntryPool.getExcludeList()
-                  .addDatanodes(failedServers);
-            }
-            switch (op) {
-            case CLOSE:
-              entry.close();
-              break;
-            case FULL:
-              if (entry.getRemaining() == 0) {
-                entry.close();
-              }
-              break;
-            case FLUSH:
-              entry.flush();
-              break;
-            default:
-              throw new IOException("Invalid Operation");
-            }
-          } catch (IOException ioe) {
-            handleException(entry, ioe);
-            continue;
-          }
-        }
-        break;
-      } catch (Exception e) {
-        markStreamClosed();
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * Commit the key to OM, this will add the blocks as the new key blocks.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (closed) {
-      return;
-    }
-    closed = true;
-    try {
-      handleFlushOrClose(StreamAction.CLOSE);
-      blockOutputStreamEntryPool.commitKey(offset);
-    } catch (IOException ioe) {
-      throw ioe;
-    } finally {
-      blockOutputStreamEntryPool.cleanup();
-    }
-  }
-
-  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    return blockOutputStreamEntryPool.getCommitUploadPartInfo();
-  }
-
-  public FileEncryptionInfo getFileEncryptionInfo() {
-    return feInfo;
-  }
-
-  @VisibleForTesting
-  public ExcludeList getExcludeList() {
-    return blockOutputStreamEntryPool.getExcludeList();
-  }
-
-  /**
-   * Builder class of KeyOutputStream.
-   */
-  public static class Builder {
-    private OpenKeySession openHandler;
-    private XceiverClientManager xceiverManager;
-    private OzoneManagerProtocol omClient;
-    private int chunkSize;
-    private String requestID;
-    private ReplicationType type;
-    private ReplicationFactor factor;
-    private long streamBufferFlushSize;
-    private long streamBufferMaxSize;
-    private long blockSize;
-    private long watchTimeout;
-    private ChecksumType checksumType;
-    private int bytesPerChecksum;
-    private String multipartUploadID;
-    private int multipartNumber;
-    private boolean isMultipartKey;
-    private int maxRetryCount;
-    private long retryInterval;
-
-    public Builder setMultipartUploadID(String uploadID) {
-      this.multipartUploadID = uploadID;
-      return this;
-    }
-
-    public Builder setMultipartNumber(int partNumber) {
-      this.multipartNumber = partNumber;
-      return this;
-    }
-
-    public Builder setHandler(OpenKeySession handler) {
-      this.openHandler = handler;
-      return this;
-    }
-
-    public Builder setXceiverClientManager(XceiverClientManager manager) {
-      this.xceiverManager = manager;
-      return this;
-    }
-
-    public Builder setOmClient(OzoneManagerProtocol client) {
-      this.omClient = client;
-      return this;
-    }
-
-    public Builder setChunkSize(int size) {
-      this.chunkSize = size;
-      return this;
-    }
-
-    public Builder setRequestID(String id) {
-      this.requestID = id;
-      return this;
-    }
-
-    public Builder setType(ReplicationType replicationType) {
-      this.type = replicationType;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor replicationFactor) {
-      this.factor = replicationFactor;
-      return this;
-    }
-
-    public Builder setStreamBufferFlushSize(long size) {
-      this.streamBufferFlushSize = size;
-      return this;
-    }
-
-    public Builder setStreamBufferMaxSize(long size) {
-      this.streamBufferMaxSize = size;
-      return this;
-    }
-
-    public Builder setBlockSize(long size) {
-      this.blockSize = size;
-      return this;
-    }
-
-    public Builder setWatchTimeout(long timeout) {
-      this.watchTimeout = timeout;
-      return this;
-    }
-
-    public Builder setChecksumType(ChecksumType cType) {
-      this.checksumType = cType;
-      return this;
-    }
-
-    public Builder setBytesPerChecksum(int bytes) {
-      this.bytesPerChecksum = bytes;
-      return this;
-    }
-
-    public Builder setIsMultipartKey(boolean isMultipart) {
-      this.isMultipartKey = isMultipart;
-      return this;
-    }
-
-    public Builder setMaxRetryCount(int maxCount) {
-      this.maxRetryCount = maxCount;
-      return this;
-    }
-
-    public Builder setRetryInterval(long retryIntervalInMS) {
-      this.retryInterval = retryIntervalInMS;
-      return this;
-    }
-
-    public KeyOutputStream build() {
-      return new KeyOutputStream(openHandler, xceiverManager, omClient,
-          chunkSize, requestID, factor, type, streamBufferFlushSize,
-          streamBufferMaxSize, blockSize, watchTimeout, checksumType,
-          bytesPerChecksum, multipartUploadID, multipartNumber, isMultipartKey,
-          maxRetryCount, retryInterval);
-    }
-  }
-
-  /**
-   * Verify that the output stream is open. Non blocking; this gives
-   * the last state of the volatile {@link #closed} field.
-   * @throws IOException if the connection is closed.
-   */
-  private void checkNotClosed() throws IOException {
-    if (closed) {
-      throw new IOException(
-          ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: "
-              + blockOutputStreamEntryPool.getKeyName());
-    }
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
deleted file mode 100644
index a69740f..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * OzoneInputStream is used to read data from Ozone.
- * It uses {@link KeyInputStream} for reading the data.
- */
-public class OzoneInputStream extends InputStream {
-
-  private final InputStream inputStream;
-
-  /**
-   * Constructs OzoneInputStream with KeyInputStream.
-   *
-   * @param inputStream
-   */
-  public OzoneInputStream(InputStream inputStream) {
-    this.inputStream = inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return inputStream.read();
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    return inputStream.read(b, off, len);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    inputStream.close();
-  }
-
-  @Override
-  public int available() throws IOException {
-    return inputStream.available();
-  }
-
-  public InputStream getInputStream() {
-    return inputStream;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
deleted file mode 100644
index e4a7d6a..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * OzoneOutputStream is used to write data into Ozone.
- * It uses SCM's {@link KeyOutputStream} for writing the data.
- */
-public class OzoneOutputStream extends OutputStream {
-
-  private final OutputStream outputStream;
-
-  /**
-   * Constructs OzoneOutputStream with KeyOutputStream.
-   *
-   * @param outputStream
-   */
-  public OzoneOutputStream(OutputStream outputStream) {
-    this.outputStream = outputStream;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    outputStream.write(b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    outputStream.write(b, off, len);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    outputStream.flush();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    //commitKey can be done here, if needed.
-    outputStream.close();
-  }
-
-  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    if (outputStream instanceof KeyOutputStream) {
-      return ((KeyOutputStream) outputStream).getCommitUploadPartInfo();
-    }
-    // Otherwise return null.
-    return null;
-  }
-
-  public OutputStream getOutputStream() {
-    return outputStream;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
deleted file mode 100644
index 493ece8..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-/**
- * This package contains Ozone I/O classes.
- */
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
deleted file mode 100644
index 7e2591a..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-/**
- * This package contains Ozone Client classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
deleted file mode 100644
index 1b8f5bb..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.protocol;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * An implementer of this interface is capable of connecting to Ozone Cluster
- * and perform client operations. The protocol used for communication is
- * determined by the implementation class specified by
- * property <code>ozone.client.protocol</code>. The build-in implementation
- * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
- * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
- */
-@KerberosInfo(serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
-public interface ClientProtocol {
-
-  /**
-   * Creates a new Volume.
-   * @param volumeName Name of the Volume
-   * @throws IOException
-   */
-  void createVolume(String volumeName)
-      throws IOException;
-
-  /**
-   * Creates a new Volume with properties set in VolumeArgs.
-   * @param volumeName Name of the Volume
-   * @param args Properties to be set for the Volume
-   * @throws IOException
-   */
-  void createVolume(String volumeName, VolumeArgs args)
-      throws IOException;
-
-  /**
-   * Sets the owner of volume.
-   * @param volumeName Name of the Volume
-   * @param owner to be set for the Volume
-   * @throws IOException
-   */
-  void setVolumeOwner(String volumeName, String owner) throws IOException;
-
-  /**
-   * Set Volume Quota.
-   * @param volumeName Name of the Volume
-   * @param quota Quota to be set for the Volume
-   * @throws IOException
-   */
-  void setVolumeQuota(String volumeName, OzoneQuota quota)
-      throws IOException;
-
-  /**
-   * Returns {@link OzoneVolume}.
-   * @param volumeName Name of the Volume
-   * @return {@link OzoneVolume}
-   * @throws IOException
-   * */
-  OzoneVolume getVolumeDetails(String volumeName)
-      throws IOException;
-
-  /**
-   * Checks if a Volume exists and the user with a role specified has access
-   * to the Volume.
-   * @param volumeName Name of the Volume
-   * @param acl requested acls which needs to be checked for access
-   * @return Boolean - True if the user with a role can access the volume.
-   * This is possible for owners of the volume and admin users
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
-      throws IOException;
-
-  /**
-   * Deletes an empty Volume.
-   * @param volumeName Name of the Volume
-   * @throws IOException
-   */
-  void deleteVolume(String volumeName) throws IOException;
-
-  /**
-   * Lists all volumes in the cluster that matches the volumePrefix,
-   * size of the returned list depends on maxListResult. If volume prefix
-   * is null, returns all the volumes. The caller has to make multiple calls
-   * to read all volumes.
-   *
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Starting point of the list, this volume is excluded
-   * @param maxListResult Max number of volumes to return.
-   * @return {@code List<OzoneVolume>}
-   * @throws IOException
-   */
-  List<OzoneVolume> listVolumes(String volumePrefix, String prevVolume,
-                                int maxListResult)
-      throws IOException;
-
-  /**
-   * Lists all volumes in the cluster that are owned by the specified
-   * user and matches the volumePrefix, size of the returned list depends on
-   * maxListResult. If the user is null, return volumes owned by current user.
-   * If volume prefix is null, returns all the volumes. The caller has to make
-   * multiple calls to read all volumes.
-   *
-   * @param user User Name
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Starting point of the list, this volume is excluded
-   * @param maxListResult Max number of volumes to return.
-   * @return {@code List<OzoneVolume>}
-   * @throws IOException
-   */
-  List<OzoneVolume> listVolumes(String user, String volumePrefix,
-                                    String prevVolume, int maxListResult)
-      throws IOException;
-
-  /**
-   * Creates a new Bucket in the Volume.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void createBucket(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Creates a new Bucket in the Volume, with properties set in BucketArgs.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param bucketArgs Bucket Arguments
-   * @throws IOException
-   */
-  void createBucket(String volumeName, String bucketName,
-                    BucketArgs bucketArgs)
-      throws IOException;
-
-
-  /**
-   * Enables or disables Bucket Versioning.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param versioning True to enable Versioning, False to disable.
-   * @throws IOException
-   */
-  void setBucketVersioning(String volumeName, String bucketName,
-                           Boolean versioning)
-      throws IOException;
-
-  /**
-   * Sets the Storage Class of a Bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param storageType StorageType to be set
-   * @throws IOException
-   */
-  void setBucketStorageType(String volumeName, String bucketName,
-                            StorageType storageType)
-      throws IOException;
-
-  /**
-   * Deletes a bucket if it is empty.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * True if the bucket exists and user has read access
-   * to the bucket else throws Exception.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void checkBucketAccess(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Returns {@link OzoneBucket}.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @return {@link OzoneBucket}
-   * @throws IOException
-   */
-  OzoneBucket getBucketDetails(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Returns the List of Buckets in the Volume that matches the bucketPrefix,
-   * size of the returned list depends on maxListResult. The caller has to make
-   * multiple calls to read all volumes.
-   * @param volumeName Name of the Volume
-   * @param bucketPrefix Bucket prefix to match
-   * @param prevBucket Starting point of the list, this bucket is excluded
-   * @param maxListResult Max number of buckets to return.
-   * @return {@code List<OzoneBucket>}
-   * @throws IOException
-   */
-  List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
-                                String prevBucket, int maxListResult)
-      throws IOException;
-
-  /**
-   * Writes a key in an existing bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @param size Size of the data
-   * @param metadata custom key value metadata
-   * @return {@link OzoneOutputStream}
-   *
-   */
-  OzoneOutputStream createKey(String volumeName, String bucketName,
-                              String keyName, long size, ReplicationType type,
-                              ReplicationFactor factor,
-                              Map<String, String> metadata)
-      throws IOException;
-
-  /**
-   * Reads a key from an existing bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @return {@link OzoneInputStream}
-   * @throws IOException
-   */
-  OzoneInputStream getKey(String volumeName, String bucketName, String keyName)
-      throws IOException;
-
-
-  /**
-   * Deletes an existing key.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @throws IOException
-   */
-  void deleteKey(String volumeName, String bucketName, String keyName)
-      throws IOException;
-
-  /**
-   * Renames an existing key within a bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param fromKeyName Name of the Key to be renamed
-   * @param toKeyName New name to be used for the Key
-   * @throws IOException
-   */
-  void renameKey(String volumeName, String bucketName, String fromKeyName,
-      String toKeyName) throws IOException;
-
-  /**
-   * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix,
-   * size of the returned list depends on maxListResult. The caller has
-   * to make multiple calls to read all keys.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyPrefix Bucket prefix to match
-   * @param prevKey Starting point of the list, this key is excluded
-   * @param maxListResult Max number of buckets to return.
-   * @return {@code List<OzoneKey>}
-   * @throws IOException
-   */
-  List<OzoneKey> listKeys(String volumeName, String bucketName,
-                          String keyPrefix, String prevKey, int maxListResult)
-      throws IOException;
-
-
-  /**
-   * Get OzoneKey.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Key name
-   * @return {@link OzoneKey}
-   * @throws IOException
-   */
-  OzoneKeyDetails getKeyDetails(String volumeName, String bucketName,
-                                String keyName)
-      throws IOException;
-
-  /**
-   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
-   * to access via both S3 and Ozone.
-   * @param userName - S3 user name.
-   * @param s3BucketName - S3 bucket Name.
-   * @throws IOException - On failure, throws an exception like Bucket exists.
-   */
-  void createS3Bucket(String userName, String s3BucketName) throws IOException;
-
-  /**
-   * Deletes an s3 bucket and removes mapping of Ozone volume/bucket.
-   * @param bucketName - S3 Bucket Name.
-   * @throws  IOException in case the bucket cannot be deleted.
-   */
-  void deleteS3Bucket(String bucketName) throws IOException;
-
-
-  /**
-   * Returns the Ozone Namespace for the S3Bucket. It will return the
-   * OzoneVolume/OzoneBucketName.
-   * @param s3BucketName  - S3 Bucket Name.
-   * @return String - The Ozone canonical name for this s3 bucket. This
-   * string is useful for mounting an OzoneFS.
-   * @throws IOException - Error is throw if the s3bucket does not exist.
-   */
-  String getOzoneBucketMapping(String s3BucketName) throws IOException;
-
-  /**
-   * Returns the corresponding Ozone volume given an S3 Bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone Volume name.
-   * @throws IOException - Throws if the s3Bucket does not exist.
-   */
-  String getOzoneVolumeName(String s3BucketName) throws IOException;
-
-  /**
-   * Returns the corresponding Ozone bucket name for the given S3 bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone bucket Name.
-   * @throws IOException - Throws if the s3bucket does not exist.
-   */
-  String getOzoneBucketName(String s3BucketName) throws IOException;
-
-  /**
-   * Returns Iterator to iterate over all buckets after prevBucket for a
-   * specific user. If prevBucket is null it returns an iterator to iterate over
-   * all the buckets of a user. The result can be restricted using bucket
-   * prefix, will return all buckets if bucket prefix is null.
-   *
-   * @param userName user name
-   * @param bucketPrefix Bucket prefix to match
-   * @param prevBucket Buckets are listed after this bucket
-   * @return {@code Iterator<OzoneBucket>}
-   * @throws IOException
-   */
-  List<OzoneBucket> listS3Buckets(String userName, String bucketPrefix,
-                                String prevBucket, int maxListResult)
-      throws IOException;
-
-  /**
-   * Close and release the resources.
-   */
-  void close() throws IOException;
-
-  /**
-   * Initiate Multipart upload.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param type
-   * @param factor
-   * @return {@link OmMultipartInfo}
-   * @throws IOException
-   */
-  OmMultipartInfo initiateMultipartUpload(String volumeName, String
-      bucketName, String keyName, ReplicationType type, ReplicationFactor
-      factor) throws IOException;
-
-  /**
-   * Create a part key for a multipart upload key.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param size
-   * @param partNumber
-   * @param uploadID
-   * @return OzoneOutputStream
-   * @throws IOException
-   */
-  OzoneOutputStream createMultipartKey(String volumeName, String bucketName,
-                                       String keyName, long size,
-                                       int partNumber, String uploadID)
-      throws IOException;
-
-  /**
-   * Complete Multipart upload. This will combine all the parts and make the
-   * key visible in ozone.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param uploadID
-   * @param partsMap
-   * @return OmMultipartUploadCompleteInfo
-   * @throws IOException
-   */
-  OmMultipartUploadCompleteInfo completeMultipartUpload(String volumeName,
-      String bucketName, String keyName, String uploadID,
-      Map<Integer, String> partsMap) throws IOException;
-
-  /**
-   * Abort Multipart upload request for the given key with given uploadID.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param uploadID
-   * @throws IOException
-   */
-  void abortMultipartUpload(String volumeName,
-      String bucketName, String keyName, String uploadID) throws IOException;
-
-  /**
-   * Returns list of parts of a multipart upload key.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param uploadID
-   * @param partNumberMarker - returns parts with part number which are greater
-   * than this partNumberMarker.
-   * @param maxParts
-   * @return OmMultipartUploadListParts
-   */
-  OzoneMultipartUploadPartListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException;
-
-  /**
-   * Return with the inflight multipart uploads.
-   */
-  OzoneMultipartUploadList listMultipartUploads(String volumename,
-      String bucketName, String prefix) throws IOException;
-
-  /**
-   * Get a valid Delegation Token.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws IOException
-   */
-  Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws IOException;
-
-  /**
-   * Renew an existing delegation token.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   * @throws IOException
-   */
-  long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException;
-
-  /**
-   * Cancel an existing delegation token.
-   *
-   * @param token delegation token
-   * @throws IOException
-   */
-  void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException;
-
-  /**
-   * returns S3 Secret given kerberos user.
-   * @param kerberosID
-   * @return S3SecretValue
-   * @throws IOException
-   */
-  S3SecretValue getS3Secret(String kerberosID) throws IOException;
-
-  @VisibleForTesting
-  OMFailoverProxyProvider getOMProxyProvider();
-
-  /**
-   * Get KMS client provider.
-   * @return KMS client provider.
-   * @throws IOException
-   */
-  KeyProvider getKeyProvider() throws IOException;
-
-  /**
-   * Get KMS client provider uri.
-   * @return KMS client provider uri.
-   * @throws IOException
-   */
-  URI getKeyProviderUri() throws IOException;
-
-  /**
-   * Get CanonicalServiceName for ozone delegation token.
-   * @return Canonical Service Name of ozone delegation token.
-   */
-  String getCanonicalServiceName();
-
-  /**
-   * Get the Ozone File Status for a particular Ozone key.
-   *
-   * @param volumeName volume name.
-   * @param bucketName bucket name.
-   * @param keyName    key name.
-   * @return OzoneFileStatus for the key.
-   * @throws OMException if file does not exist
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  OzoneFileStatus getOzoneFileStatus(String volumeName, String bucketName,
-      String keyName) throws IOException;
-
-  /**
-   * Creates directory with keyName as the absolute path for the directory.
-   *
-   * @param volumeName Volume name
-   * @param bucketName Bucket name
-   * @param keyName    Absolute path for the directory
-   * @throws OMException if any entry in the path exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  void createDirectory(String volumeName, String bucketName, String keyName)
-      throws IOException;
-
-  /**
-   * Creates an input stream for reading file contents.
-   *
-   * @param volumeName Volume name
-   * @param bucketName Bucket name
-   * @param keyName    Absolute path of the file to be read
-   * @return Input stream for reading the file
-   * @throws OMException if any entry in the path exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  OzoneInputStream readFile(String volumeName, String bucketName,
-      String keyName) throws IOException;
-
-  /**
-   * Creates an output stream for writing to a file.
-   *
-   * @param volumeName Volume name
-   * @param bucketName Bucket name
-   * @param keyName    Absolute path of the file to be written
-   * @param size       Size of data to be written
-   * @param type       Replication Type
-   * @param factor     Replication Factor
-   * @param overWrite  if true existing file at the location will be overwritten
-   * @param recursive  if true file would be created even if parent directories
-   *                   do not exist
-   * @return Output stream for writing to the file
-   * @throws OMException if given key is a directory
-   *                     if file exists and isOverwrite flag is false
-   *                     if an ancestor exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  @SuppressWarnings("checkstyle:parameternumber")
-  OzoneOutputStream createFile(String volumeName, String bucketName,
-      String keyName, long size, ReplicationType type, ReplicationFactor factor,
-      boolean overWrite, boolean recursive) throws IOException;
-
-  /**
-   * List the status for a file or a directory and its contents.
-   *
-   * @param volumeName Volume name
-   * @param bucketName Bucket name
-   * @param keyName    Absolute path of the entry to be listed
-   * @param recursive  For a directory if true all the descendants of a
-   *                   particular directory are listed
-   * @param startKey   Key from which listing needs to start. If startKey exists
-   *                   its status is included in the final list.
-   * @param numEntries Number of entries to list from the start key
-   * @return list of file status
-   */
-  List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
-      String keyName, boolean recursive, String startKey, long numEntries)
-      throws IOException;
-
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for
-   * given object to list of ACLs provided in argument.
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException;
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   * @param obj Ozone object.
-   *
-   * @throws IOException if there is error.
-   * */
-  List<OzoneAcl> getAcl(OzoneObj obj) throws IOException;
-
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
deleted file mode 100644
index f4890a1..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.protocol;
-
-/**
- * This package contains Ozone client protocol library classes.
- */
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
deleted file mode 100644
index 6be7770..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoCodec;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.KMSUtil;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.nio.charset.StandardCharsets;
-import java.security.GeneralSecurityException;
-
-/**
- * KMS utility class for Ozone Data Encryption At-Rest.
- */
-public final class OzoneKMSUtil {
-
-  private static final String UTF8_CSN = StandardCharsets.UTF_8.name();
-  private static final String O3_KMS_PREFIX = "ozone-kms-";
-  private static String keyProviderUriKeyName =
-      "hadoop.security.key.provider.path";
-
-  private OzoneKMSUtil() {
-  }
-
-  public static KeyProvider.KeyVersion decryptEncryptedDataEncryptionKey(
-      FileEncryptionInfo feInfo, KeyProvider keyProvider) throws IOException {
-    if (keyProvider == null) {
-      throw new IOException("No KeyProvider is configured, " +
-          "cannot access an encrypted file");
-    } else {
-      EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
-          feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(),
-          feInfo.getEncryptedDataEncryptionKey());
-
-      try {
-        KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
-            .createKeyProviderCryptoExtension(keyProvider);
-        return cryptoProvider.decryptEncryptedKey(ekv);
-      } catch (GeneralSecurityException gse) {
-        throw new IOException(gse);
-      }
-    }
-  }
-
-  /**
-   * Returns a key to map ozone uri to key provider uri.
-   * Tasks will lookup this key to find key Provider.
-   */
-  public static Text getKeyProviderMapKey(URI namespaceUri) {
-    return new Text(O3_KMS_PREFIX + namespaceUri.getScheme()
-        +"://" + namespaceUri.getAuthority());
-  }
-
-  public static String bytes2String(byte[] bytes) {
-    return bytes2String(bytes, 0, bytes.length);
-  }
-
-  private static String bytes2String(byte[] bytes, int offset, int length) {
-    try {
-      return new String(bytes, offset, length, UTF8_CSN);
-    } catch (UnsupportedEncodingException e) {
-      throw new IllegalArgumentException("UTF8 encoding is not supported", e);
-    }
-  }
-
-  public static URI getKeyProviderUri(UserGroupInformation ugi,
-      URI namespaceUri, String kmsUriSrv, Configuration conf)
-      throws IOException {
-    URI keyProviderUri = null;
-    Credentials credentials = ugi.getCredentials();
-    Text credsKey = null;
-    if (namespaceUri != null) {
-      // from ugi
-      credsKey = getKeyProviderMapKey(namespaceUri);
-      byte[] keyProviderUriBytes = credentials.getSecretKey(credsKey);
-      if (keyProviderUriBytes != null) {
-        keyProviderUri = URI.create(bytes2String(keyProviderUriBytes));
-      }
-    }
-    if (keyProviderUri == null) {
-      // from client conf
-      if (kmsUriSrv == null) {
-        keyProviderUri = KMSUtil.getKeyProviderUri(
-            conf, keyProviderUriKeyName);
-      } else if (!kmsUriSrv.isEmpty()) {
-        // from om server
-        keyProviderUri = URI.create(kmsUriSrv);
-      }
-    }
-    // put back into UGI
-    if (keyProviderUri != null && credsKey != null) {
-      credentials.addSecretKey(
-          credsKey, DFSUtil.string2Bytes(keyProviderUri.toString()));
-    }
-
-    return keyProviderUri;
-  }
-
-  public static KeyProvider getKeyProvider(final Configuration conf,
-      final URI serverProviderUri) throws IOException{
-    if (serverProviderUri == null) {
-      throw new IOException("KMS serverProviderUri is not configured.");
-    }
-    return KMSUtil.createKeyProviderFromUri(conf, serverProviderUri);
-  }
-
-  public static CryptoProtocolVersion getCryptoProtocolVersion(
-      FileEncryptionInfo feInfo) throws IOException {
-    CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion();
-    if (!CryptoProtocolVersion.supports(version)) {
-      throw new IOException("Client does not support specified " +
-              "CryptoProtocolVersion " + version.getDescription() +
-              " version number" + version.getVersion());
-    } else {
-      return version;
-    }
-  }
-
-  public static void checkCryptoProtocolVersion(
-          FileEncryptionInfo feInfo) throws IOException {
-    CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion();
-    if (!CryptoProtocolVersion.supports(version)) {
-      throw new IOException("Client does not support specified " +
-              "CryptoProtocolVersion " + version.getDescription() +
-              " version number" + version.getVersion());
-    }
-  }
-
-  public static CryptoCodec getCryptoCodec(Configuration conf,
-      FileEncryptionInfo feInfo) throws IOException {
-    CipherSuite suite = feInfo.getCipherSuite();
-    if (suite.equals(CipherSuite.UNKNOWN)) {
-      throw new IOException("NameNode specified unknown CipherSuite with ID " +
-              suite.getUnknownValue() + ", cannot instantiate CryptoCodec.");
-    } else {
-      CryptoCodec codec = CryptoCodec.getInstance(conf, suite);
-      if (codec == null) {
-        throw new OMException("No configuration found for the cipher suite " +
-                suite.getConfigSuffix() + " prefixed with " +
-                "hadoop.security.crypto.codec.classes. Please see the" +
-                " example configuration hadoop.security.crypto.codec.classes." +
-                "EXAMPLE CIPHER SUITE at core-default.xml for details.",
-                OMException.ResultCodes.UNKNOWN_CIPHER_SUITE);
-      } else {
-        return codec;
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
deleted file mode 100644
index 06351ab..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ /dev/null
@@ -1,1177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.crypto.CryptoInputStream;
-import org.apache.hadoop.crypto.CryptoOutputStream;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ChecksumType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.KeyInputStream;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.protocolPB
-    .OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.ozone.security.GDPRSymmetricKey;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.io.Text;
-import org.apache.logging.log4j.util.Strings;
-import org.apache.ratis.protocol.ClientId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.crypto.Cipher;
-import javax.crypto.CipherInputStream;
-import javax.crypto.CipherOutputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.security.InvalidKeyException;
-import java.security.SecureRandom;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-
-/**
- * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode
- * to execute client calls. This uses RPC protocol for communication
- * with the servers.
- */
-public class RpcClient implements ClientProtocol {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RpcClient.class);
-
-  private final OzoneConfiguration conf;
-  private final OzoneManagerProtocol ozoneManagerClient;
-  private final XceiverClientManager xceiverClientManager;
-  private final int chunkSize;
-  private final ChecksumType checksumType;
-  private final int bytesPerChecksum;
-  private boolean verifyChecksum;
-  private final UserGroupInformation ugi;
-  private final ACLType userRights;
-  private final ACLType groupRights;
-  private final long streamBufferFlushSize;
-  private final long streamBufferMaxSize;
-  private final long blockSize;
-  private final long watchTimeout;
-  private final ClientId clientId = ClientId.randomId();
-  private final int maxRetryCount;
-  private final long retryInterval;
-  private Text dtService;
-  private final boolean topologyAwareReadEnabled;
-
-  /**
-    * Creates RpcClient instance with the given configuration.
-    * @param conf Configuration
-    * @param omServiceId OM HA Service ID, set this to null if not HA
-    * @throws IOException
-    */
-  public RpcClient(Configuration conf, String omServiceId) throws IOException {
-    Preconditions.checkNotNull(conf);
-    this.conf = new OzoneConfiguration(conf);
-    this.ugi = UserGroupInformation.getCurrentUser();
-    // Get default acl rights for user and group.
-    OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class);
-    this.userRights = aclConfig.getUserDefaultRights();
-    this.groupRights = aclConfig.getGroupDefaultRights();
-
-    this.ozoneManagerClient = TracingUtil.createProxy(
-        new OzoneManagerProtocolClientSideTranslatorPB(
-            this.conf, clientId.toString(), omServiceId, ugi),
-        OzoneManagerProtocol.class, conf
-    );
-
-    ServiceInfoEx serviceInfoEx = ozoneManagerClient.getServiceInfo();
-    String caCertPem = null;
-    if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-      caCertPem = serviceInfoEx.getCaCertificate();
-    }
-
-    this.xceiverClientManager = new XceiverClientManager(conf,
-        OzoneConfiguration.of(conf).getObject(XceiverClientManager.
-            ScmClientConfig.class), caCertPem);
-
-    int configuredChunkSize = (int) conf
-        .getStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
-            ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
-    if(configuredChunkSize > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) {
-      LOG.warn("The chunk size ({}) is not allowed to be more than"
-              + " the maximum size ({}),"
-              + " resetting to the maximum size.",
-          configuredChunkSize, OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
-      chunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
-    } else {
-      chunkSize = configuredChunkSize;
-    }
-    streamBufferFlushSize = (long) conf
-        .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE,
-            OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT,
-            StorageUnit.BYTES);
-    streamBufferMaxSize = (long) conf
-        .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE,
-            OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT,
-            StorageUnit.BYTES);
-    blockSize = (long) conf.getStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE,
-        OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
-    watchTimeout =
-        conf.getTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT,
-            OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-
-    int configuredChecksumSize = (int) conf.getStorageSize(
-        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM,
-        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT,
-        StorageUnit.BYTES);
-    if(configuredChecksumSize <
-        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) {
-      LOG.warn("The checksum size ({}) is not allowed to be less than the " +
-              "minimum size ({}), resetting to the minimum size.",
-          configuredChecksumSize,
-          OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE);
-      bytesPerChecksum =
-          OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE;
-    } else {
-      bytesPerChecksum = configuredChecksumSize;
-    }
-
-    String checksumTypeStr = conf.get(
-        OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE,
-        OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT);
-    checksumType = ChecksumType.valueOf(checksumTypeStr);
-    this.verifyChecksum =
-        conf.getBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM,
-            OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT);
-    maxRetryCount =
-        conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys.
-            OZONE_CLIENT_MAX_RETRIES_DEFAULT);
-    retryInterval = OzoneUtils.getTimeDurationInMS(conf,
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL,
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL_DEFAULT);
-    dtService = getOMProxyProvider().getCurrentProxyDelegationToken();
-    topologyAwareReadEnabled = conf.getBoolean(
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
-  }
-
-  @Override
-  public void createVolume(String volumeName) throws IOException {
-    createVolume(volumeName, VolumeArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createVolume(String volumeName, VolumeArgs volArgs)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName);
-    Preconditions.checkNotNull(volArgs);
-
-    String admin = volArgs.getAdmin() == null ?
-        ugi.getUserName() : volArgs.getAdmin();
-    String owner = volArgs.getOwner() == null ?
-        ugi.getUserName() : volArgs.getOwner();
-    long quota = volArgs.getQuota() == null ?
-        OzoneConsts.MAX_QUOTA_IN_BYTES :
-        OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes();
-    List<OzoneAcl> listOfAcls = new ArrayList<>();
-    //User ACL
-    listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
-            owner, userRights, ACCESS));
-    //Group ACLs of the User
-    List<String> userGroups = Arrays.asList(UserGroupInformation
-        .createRemoteUser(owner).getGroupNames());
-    userGroups.stream().forEach((group) -> listOfAcls.add(
-        new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS)));
-    //ACLs from VolumeArgs
-    if(volArgs.getAcls() != null) {
-      listOfAcls.addAll(volArgs.getAcls());
-    }
-
-    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder();
-    builder.setVolume(volumeName);
-    builder.setAdminName(admin);
-    builder.setOwnerName(owner);
-    builder.setQuotaInBytes(quota);
-    builder.addAllMetadata(volArgs.getMetadata());
-
-    //Remove duplicates and add ACLs
-    for (OzoneAcl ozoneAcl :
-        listOfAcls.stream().distinct().collect(Collectors.toList())) {
-      builder.addOzoneAcls(OzoneAcl.toProtobuf(ozoneAcl));
-    }
-
-    if (volArgs.getQuota() == null) {
-      LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner);
-    } else {
-      LOG.info("Creating Volume: {}, with {} as owner "
-              + "and quota set to {} bytes.", volumeName, owner, quota);
-    }
-    ozoneManagerClient.createVolume(builder.build());
-  }
-
-  @Override
-  public void setVolumeOwner(String volumeName, String owner)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName);
-    Preconditions.checkNotNull(owner);
-    ozoneManagerClient.setOwner(volumeName, owner);
-  }
-
-  @Override
-  public void setVolumeQuota(String volumeName, OzoneQuota quota)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName);
-    Preconditions.checkNotNull(quota);
-    long quotaInBytes = quota.sizeInBytes();
-    ozoneManagerClient.setQuota(volumeName, quotaInBytes);
-  }
-
-  @Override
-  public OzoneVolume getVolumeDetails(String volumeName)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName);
-    OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName);
-    return new OzoneVolume(
-        conf,
-        this,
-        volume.getVolume(),
-        volume.getAdminName(),
-        volume.getOwnerName(),
-        volume.getQuotaInBytes(),
-        volume.getCreationTime(),
-        volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(OzoneAcl::fromProtobuf).collect(Collectors.toList()),
-        volume.getMetadata());
-  }
-
-  @Override
-  public boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public void deleteVolume(String volumeName) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName);
-    ozoneManagerClient.deleteVolume(volumeName);
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String volumePrefix, String prevVolume,
-                                       int maxListResult)
-      throws IOException {
-    List<OmVolumeArgs> volumes = ozoneManagerClient.listAllVolumes(
-        volumePrefix, prevVolume, maxListResult);
-
-    return volumes.stream().map(volume -> new OzoneVolume(
-        conf,
-        this,
-        volume.getVolume(),
-        volume.getAdminName(),
-        volume.getOwnerName(),
-        volume.getQuotaInBytes(),
-        volume.getCreationTime(),
-        volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(OzoneAcl::fromProtobuf).collect(Collectors.toList())))
-        .collect(Collectors.toList());
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String user, String volumePrefix,
-                                       String prevVolume, int maxListResult)
-      throws IOException {
-    List<OmVolumeArgs> volumes = ozoneManagerClient.listVolumeByUser(
-        user, volumePrefix, prevVolume, maxListResult);
-
-    return volumes.stream().map(volume -> new OzoneVolume(
-        conf,
-        this,
-        volume.getVolume(),
-        volume.getAdminName(),
-        volume.getOwnerName(),
-        volume.getQuotaInBytes(),
-        volume.getCreationTime(),
-        volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(OzoneAcl::fromProtobuf).collect(Collectors.toList()),
-        volume.getMetadata()))
-        .collect(Collectors.toList());
-  }
-
-  @Override
-  public void createBucket(String volumeName, String bucketName)
-      throws IOException {
-    // Set acls of current user.
-    createBucket(volumeName, bucketName,
-        BucketArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createBucket(
-      String volumeName, String bucketName, BucketArgs bucketArgs)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    Preconditions.checkNotNull(bucketArgs);
-
-    Boolean isVersionEnabled = bucketArgs.getVersioning() == null ?
-        Boolean.FALSE : bucketArgs.getVersioning();
-    StorageType storageType = bucketArgs.getStorageType() == null ?
-        StorageType.DEFAULT : bucketArgs.getStorageType();
-    BucketEncryptionKeyInfo bek = null;
-    if (bucketArgs.getEncryptionKey() != null) {
-      bek = new BucketEncryptionKeyInfo.Builder()
-          .setKeyName(bucketArgs.getEncryptionKey()).build();
-    }
-
-    List<OzoneAcl> listOfAcls = getAclList();
-    //ACLs from BucketArgs
-    if(bucketArgs.getAcls() != null) {
-      listOfAcls.addAll(bucketArgs.getAcls());
-    }
-
-    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setIsVersionEnabled(isVersionEnabled)
-        .addAllMetadata(bucketArgs.getMetadata())
-        .setStorageType(storageType)
-        .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList()));
-
-    if (bek != null) {
-      builder.setBucketEncryptionKey(bek);
-    }
-
-    LOG.info("Creating Bucket: {}/{}, with Versioning {} and " +
-            "Storage Type set to {} and Encryption set to {} ",
-        volumeName, bucketName, isVersionEnabled, storageType, bek != null);
-    ozoneManagerClient.createBucket(builder.build());
-  }
-
-  /**
-   * Helper function to get default acl list for current user.
-   *
-   * @return listOfAcls
-   * */
-  private List<OzoneAcl> getAclList() {
-    return OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
-        userRights, groupRights);
-  }
-
-  /**
-   * Get a valid Delegation Token.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws IOException
-   */
-  @Override
-  public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws IOException {
-
-    Token<OzoneTokenIdentifier> token =
-        ozoneManagerClient.getDelegationToken(renewer);
-    if (token != null) {
-      token.setService(dtService);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Created token {} for dtService {}", token, dtService);
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cannot get ozone delegation token for renewer {} to " +
-            "access service {}", renewer, dtService);
-      }
-    }
-    return token;
-  }
-
-  /**
-   * Renew an existing delegation token.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   * @throws IOException
-   */
-  @Override
-  public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    return ozoneManagerClient.renewDelegationToken(token);
-  }
-
-  /**
-   * Cancel an existing delegation token.
-   *
-   * @param token delegation token
-   * @throws IOException
-   */
-  @Override
-  public void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    ozoneManagerClient.cancelDelegationToken(token);
-  }
-
-  /**
-   * Returns s3 secret given a kerberos user.
-   * @param kerberosID
-   * @return S3SecretValue
-   * @throws IOException
-   */
-  @Override
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
-        "kerberosID cannot be null or empty.");
-
-    return ozoneManagerClient.getS3Secret(kerberosID);
-  }
-
-  @Override
-  @VisibleForTesting
-  public OMFailoverProxyProvider getOMProxyProvider() {
-    return ozoneManagerClient.getOMFailoverProxyProvider();
-  }
-
-  @Override
-  public void setBucketVersioning(
-      String volumeName, String bucketName, Boolean versioning)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    Preconditions.checkNotNull(versioning);
-    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setIsVersionEnabled(versioning);
-    ozoneManagerClient.setBucketProperty(builder.build());
-  }
-
-  @Override
-  public void setBucketStorageType(
-      String volumeName, String bucketName, StorageType storageType)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    Preconditions.checkNotNull(storageType);
-    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setStorageType(storageType);
-    ozoneManagerClient.setBucketProperty(builder.build());
-  }
-
-  @Override
-  public void deleteBucket(
-      String volumeName, String bucketName) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    ozoneManagerClient.deleteBucket(volumeName, bucketName);
-  }
-
-  @Override
-  public void checkBucketAccess(
-      String volumeName, String bucketName) throws IOException {
-
-  }
-
-  @Override
-  public OzoneBucket getBucketDetails(
-      String volumeName, String bucketName) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    OmBucketInfo bucketInfo =
-        ozoneManagerClient.getBucketInfo(volumeName, bucketName);
-    return new OzoneBucket(
-        conf,
-        this,
-        bucketInfo.getVolumeName(),
-        bucketInfo.getBucketName(),
-        bucketInfo.getStorageType(),
-        bucketInfo.getIsVersionEnabled(),
-        bucketInfo.getCreationTime(),
-        bucketInfo.getMetadata(),
-        bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo
-            .getEncryptionKeyInfo().getKeyName() : null);
-  }
-
-  @Override
-  public List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
-                                       String prevBucket, int maxListResult)
-      throws IOException {
-    List<OmBucketInfo> buckets = ozoneManagerClient.listBuckets(
-        volumeName, prevBucket, bucketPrefix, maxListResult);
-
-    return buckets.stream().map(bucket -> new OzoneBucket(
-        conf,
-        this,
-        bucket.getVolumeName(),
-        bucket.getBucketName(),
-        bucket.getStorageType(),
-        bucket.getIsVersionEnabled(),
-        bucket.getCreationTime(),
-        bucket.getMetadata(),
-        bucket.getEncryptionKeyInfo() != null ? bucket
-            .getEncryptionKeyInfo().getKeyName() : null))
-        .collect(Collectors.toList());
-  }
-
-  @Override
-  public OzoneOutputStream createKey(
-      String volumeName, String bucketName, String keyName, long size,
-      ReplicationType type, ReplicationFactor factor,
-      Map<String, String> metadata)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(keyName, type, factor);
-    String requestId = UUID.randomUUID().toString();
-
-    if(Boolean.valueOf(metadata.get(OzoneConsts.GDPR_FLAG))){
-      try{
-        GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom());
-        metadata.putAll(gKey.getKeyDetails());
-      }catch (Exception e) {
-        if(e instanceof InvalidKeyException &&
-            e.getMessage().contains("Illegal key size or default parameters")) {
-          LOG.error("Missing Unlimited Strength Policy jars. Please install " +
-              "Java Cryptography Extension (JCE) Unlimited Strength " +
-              "Jurisdiction Policy Files");
-        }
-        throw new IOException(e);
-      }
-    }
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(size)
-        .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
-        .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
-        .addAllMetadata(metadata)
-        .setAcls(getAclList())
-        .build();
-
-    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
-    return createOutputStream(openKey, requestId, type, factor);
-  }
-
-  private KeyProvider.KeyVersion getDEK(FileEncryptionInfo feInfo)
-      throws IOException {
-    // check crypto protocol version
-    OzoneKMSUtil.checkCryptoProtocolVersion(feInfo);
-    KeyProvider.KeyVersion decrypted;
-    decrypted = OzoneKMSUtil.decryptEncryptedDataEncryptionKey(feInfo,
-        getKeyProvider());
-    return decrypted;
-  }
-
-  @Override
-  public OzoneInputStream getKey(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    Preconditions.checkNotNull(keyName);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .setSortDatanodesInPipeline(topologyAwareReadEnabled)
-        .build();
-    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
-    return createInputStream(keyInfo);
-  }
-
-  @Override
-  public void deleteKey(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    Preconditions.checkNotNull(keyName);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .build();
-    ozoneManagerClient.deleteKey(keyArgs);
-  }
-
-  @Override
-  public void renameKey(String volumeName, String bucketName,
-      String fromKeyName, String toKeyName) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(fromKeyName)
-        .build();
-    ozoneManagerClient.renameKey(keyArgs, toKeyName);
-  }
-
-  @Override
-  public List<OzoneKey> listKeys(String volumeName, String bucketName,
-                                 String keyPrefix, String prevKey,
-                                 int maxListResult)
-      throws IOException {
-    List<OmKeyInfo> keys = ozoneManagerClient.listKeys(
-        volumeName, bucketName, prevKey, keyPrefix, maxListResult);
-
-    return keys.stream().map(key -> new OzoneKey(
-        key.getVolumeName(),
-        key.getBucketName(),
-        key.getKeyName(),
-        key.getDataSize(),
-        key.getCreationTime(),
-        key.getModificationTime(),
-        ReplicationType.valueOf(key.getType().toString()),
-        key.getFactor().getNumber()))
-        .collect(Collectors.toList());
-  }
-
-  @Override
-  public OzoneKeyDetails getKeyDetails(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(keyName);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .setSortDatanodesInPipeline(topologyAwareReadEnabled)
-        .build();
-    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
-
-    List<OzoneKeyLocation> ozoneKeyLocations = new ArrayList<>();
-    keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().forEach(
-        (a) -> ozoneKeyLocations.add(new OzoneKeyLocation(a.getContainerID(),
-            a.getLocalID(), a.getLength(), a.getOffset())));
-    return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(),
-        keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(),
-        keyInfo.getModificationTime(), ozoneKeyLocations, ReplicationType
-        .valueOf(keyInfo.getType().toString()), keyInfo.getMetadata(),
-        keyInfo.getFileEncryptionInfo(), keyInfo.getFactor().getNumber());
-  }
-
-  @Override
-  public void createS3Bucket(String userName, String s3BucketName)
-      throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(userName), "user name " +
-        "cannot be null or empty.");
-
-    Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " +
-        "name cannot be null or empty.");
-    ozoneManagerClient.createS3Bucket(userName, s3BucketName);
-  }
-
-  @Override
-  public void deleteS3Bucket(String s3BucketName)
-      throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " +
-        "name cannot be null or empty.");
-    ozoneManagerClient.deleteS3Bucket(s3BucketName);
-  }
-
-  @Override
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " +
-        "name cannot be null or empty.");
-    return ozoneManagerClient.getOzoneBucketMapping(s3BucketName);
-  }
-
-  @Override
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[0];
-
-  }
-
-  @Override
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[1];
-  }
-
-  @Override
-  public List<OzoneBucket> listS3Buckets(String userName, String bucketPrefix,
-                                         String prevBucket, int maxListResult)
-      throws IOException {
-    List<OmBucketInfo> buckets = ozoneManagerClient.listS3Buckets(
-        userName, prevBucket, bucketPrefix, maxListResult);
-
-    return buckets.stream().map(bucket -> new OzoneBucket(
-        conf,
-        this,
-        bucket.getVolumeName(),
-        bucket.getBucketName(),
-        bucket.getStorageType(),
-        bucket.getIsVersionEnabled(),
-        bucket.getCreationTime(),
-        bucket.getMetadata(),
-        bucket.getEncryptionKeyInfo() != null ?
-            bucket.getEncryptionKeyInfo().getKeyName(): null))
-        .collect(Collectors.toList());
-  }
-
-  @Override
-  public void close() throws IOException {
-    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
-    IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
-  }
-
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(String volumeName,
-                                                String bucketName,
-                                                String keyName,
-                                                ReplicationType type,
-                                                ReplicationFactor factor)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(keyName, type, factor);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
-        .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
-        .setAcls(getAclList())
-        .build();
-    OmMultipartInfo multipartInfo = ozoneManagerClient
-        .initiateMultipartUpload(keyArgs);
-    return multipartInfo;
-  }
-
-  @Override
-  public OzoneOutputStream createMultipartKey(String volumeName,
-                                              String bucketName,
-                                              String keyName,
-                                              long size,
-                                              int partNumber,
-                                              String uploadID)
-      throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(keyName, uploadID);
-    Preconditions.checkArgument(partNumber > 0 && partNumber <=10000, "Part " +
-        "number should be greater than zero and less than or equal to 10000");
-    Preconditions.checkArgument(size >=0, "size should be greater than or " +
-        "equal to zero");
-    String requestId = UUID.randomUUID().toString();
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(size)
-        .setIsMultipartKey(true)
-        .setMultipartUploadID(uploadID)
-        .setMultipartUploadPartNumber(partNumber)
-        .setAcls(getAclList())
-        .build();
-
-    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
-    KeyOutputStream keyOutputStream =
-        new KeyOutputStream.Builder()
-            .setHandler(openKey)
-            .setXceiverClientManager(xceiverClientManager)
-            .setOmClient(ozoneManagerClient)
-            .setChunkSize(chunkSize)
-            .setRequestID(requestId)
-            .setType(openKey.getKeyInfo().getType())
-            .setFactor(openKey.getKeyInfo().getFactor())
-            .setStreamBufferFlushSize(streamBufferFlushSize)
-            .setStreamBufferMaxSize(streamBufferMaxSize)
-            .setWatchTimeout(watchTimeout)
-            .setBlockSize(blockSize)
-            .setBytesPerChecksum(bytesPerChecksum)
-            .setChecksumType(checksumType)
-            .setMultipartNumber(partNumber)
-            .setMultipartUploadID(uploadID)
-            .setIsMultipartKey(true)
-            .setMaxRetryCount(maxRetryCount)
-            .setRetryInterval(retryInterval)
-            .build();
-    keyOutputStream.addPreallocateBlocks(
-        openKey.getKeyInfo().getLatestVersionLocations(),
-        openKey.getOpenVersion());
-    return new OzoneOutputStream(keyOutputStream);
-  }
-
-  @Override
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(
-      String volumeName, String bucketName, String keyName, String uploadID,
-      Map<Integer, String> partsMap) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(keyName, uploadID);
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setMultipartUploadID(uploadID)
-        .setAcls(getAclList())
-        .build();
-
-    OmMultipartUploadCompleteList
-        omMultipartUploadCompleteList = new OmMultipartUploadCompleteList(
-        partsMap);
-
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
-        ozoneManagerClient.completeMultipartUpload(keyArgs,
-            omMultipartUploadCompleteList);
-
-    return omMultipartUploadCompleteInfo;
-
-  }
-
-  @Override
-  public void abortMultipartUpload(String volumeName,
-       String bucketName, String keyName, String uploadID) throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(keyName, uploadID);
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setMultipartUploadID(uploadID)
-        .build();
-    ozoneManagerClient.abortMultipartUpload(omKeyArgs);
-  }
-
-  @Override
-  public OzoneMultipartUploadPartListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException {
-    HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    HddsClientUtils.checkNotNull(uploadID);
-    Preconditions.checkArgument(maxParts > 0, "Max Parts Should be greater " +
-        "than zero");
-    Preconditions.checkArgument(partNumberMarker >= 0, "Part Number Marker " +
-        "Should be greater than or equal to zero, as part numbers starts from" +
-        " 1 and ranges till 10000");
-    OmMultipartUploadListParts omMultipartUploadListParts =
-        ozoneManagerClient.listParts(volumeName, bucketName, keyName,
-            uploadID, partNumberMarker, maxParts);
-
-    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-        new OzoneMultipartUploadPartListParts(ReplicationType
-            .fromProto(omMultipartUploadListParts.getReplicationType()),
-            ReplicationFactor
-                .fromProto(omMultipartUploadListParts.getReplicationFactor()),
-            omMultipartUploadListParts.getNextPartNumberMarker(),
-            omMultipartUploadListParts.isTruncated());
-
-    for (OmPartInfo omPartInfo : omMultipartUploadListParts.getPartInfoList()) {
-      ozoneMultipartUploadPartListParts.addPart(
-          new OzoneMultipartUploadPartListParts.PartInfo(
-              omPartInfo.getPartNumber(), omPartInfo.getPartName(),
-              omPartInfo.getModificationTime(), omPartInfo.getSize()));
-    }
-    return ozoneMultipartUploadPartListParts;
-
-  }
-
-  @Override
-  public OzoneMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName, String prefix) throws IOException {
-
-    OmMultipartUploadList omMultipartUploadList =
-        ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix);
-    List<OzoneMultipartUpload> uploads = omMultipartUploadList.getUploads()
-        .stream()
-        .map(upload -> new OzoneMultipartUpload(upload.getVolumeName(),
-            upload.getBucketName(),
-            upload.getKeyName(),
-            upload.getUploadId(),
-            upload.getCreationTime(),
-            ReplicationType.fromProto(upload.getReplicationType()),
-            ReplicationFactor.fromProto(upload.getReplicationFactor())))
-        .collect(Collectors.toList());
-    OzoneMultipartUploadList result = new OzoneMultipartUploadList(uploads);
-    return result;
-  }
-
-  @Override
-  public OzoneFileStatus getOzoneFileStatus(String volumeName,
-      String bucketName, String keyName) throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .build();
-    return ozoneManagerClient.getFileStatus(keyArgs);
-  }
-
-  @Override
-  public void createDirectory(String volumeName, String bucketName,
-      String keyName) throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setAcls(getAclList())
-        .build();
-    ozoneManagerClient.createDirectory(keyArgs);
-  }
-
-  @Override
-  public OzoneInputStream readFile(String volumeName, String bucketName,
-      String keyName) throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setSortDatanodesInPipeline(topologyAwareReadEnabled)
-        .build();
-    OmKeyInfo keyInfo = ozoneManagerClient.lookupFile(keyArgs);
-    return createInputStream(keyInfo);
-  }
-
-  @Override
-  public OzoneOutputStream createFile(String volumeName, String bucketName,
-      String keyName, long size, ReplicationType type, ReplicationFactor factor,
-      boolean overWrite, boolean recursive) throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(size)
-        .setType(HddsProtos.ReplicationType.valueOf(type.name()))
-        .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
-        .setAcls(getAclList())
-        .build();
-    OpenKeySession keySession =
-        ozoneManagerClient.createFile(keyArgs, overWrite, recursive);
-    return createOutputStream(keySession, UUID.randomUUID().toString(), type,
-        factor);
-  }
-
-  @Override
-  public List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
-      String keyName, boolean recursive, String startKey, long numEntries)
-      throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .build();
-    return ozoneManagerClient
-        .listStatus(keyArgs, recursive, startKey, numEntries);
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    return ozoneManagerClient.addAcl(obj, acl);
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    return ozoneManagerClient.removeAcl(obj, acl);
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    return ozoneManagerClient.setAcl(obj, acls);
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    return ozoneManagerClient.getAcl(obj);
-  }
-
-  private OzoneInputStream createInputStream(OmKeyInfo keyInfo)
-      throws IOException {
-    LengthInputStream lengthInputStream = KeyInputStream
-        .getFromOmKeyInfo(keyInfo, xceiverClientManager,
-            verifyChecksum);
-    FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo();
-    if (feInfo != null) {
-      final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
-      final CryptoInputStream cryptoIn =
-          new CryptoInputStream(lengthInputStream.getWrappedStream(),
-              OzoneKMSUtil.getCryptoCodec(conf, feInfo),
-              decrypted.getMaterial(), feInfo.getIV());
-      return new OzoneInputStream(cryptoIn);
-    } else {
-      try{
-        GDPRSymmetricKey gk;
-        Map<String, String> keyInfoMetadata = keyInfo.getMetadata();
-        if(Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))){
-          gk = new GDPRSymmetricKey(
-              keyInfoMetadata.get(OzoneConsts.GDPR_SECRET),
-              keyInfoMetadata.get(OzoneConsts.GDPR_ALGORITHM)
-          );
-          gk.getCipher().init(Cipher.DECRYPT_MODE, gk.getSecretKey());
-          return new OzoneInputStream(
-              new CipherInputStream(lengthInputStream, gk.getCipher()));
-        }
-      }catch (Exception ex){
-        throw new IOException(ex);
-      }
-    }
-    return new OzoneInputStream(lengthInputStream.getWrappedStream());
-  }
-
-  private OzoneOutputStream createOutputStream(OpenKeySession openKey,
-      String requestId, ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    KeyOutputStream keyOutputStream =
-        new KeyOutputStream.Builder()
-            .setHandler(openKey)
-            .setXceiverClientManager(xceiverClientManager)
-            .setOmClient(ozoneManagerClient)
-            .setChunkSize(chunkSize)
-            .setRequestID(requestId)
-            .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
-            .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
-            .setStreamBufferFlushSize(streamBufferFlushSize)
-            .setStreamBufferMaxSize(streamBufferMaxSize)
-            .setWatchTimeout(watchTimeout)
-            .setBlockSize(blockSize)
-            .setChecksumType(checksumType)
-            .setBytesPerChecksum(bytesPerChecksum)
-            .setMaxRetryCount(maxRetryCount)
-            .setRetryInterval(retryInterval)
-            .build();
-    keyOutputStream
-        .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(),
-            openKey.getOpenVersion());
-    final FileEncryptionInfo feInfo = keyOutputStream.getFileEncryptionInfo();
-    if (feInfo != null) {
-      KeyProvider.KeyVersion decrypted = getDEK(feInfo);
-      final CryptoOutputStream cryptoOut =
-          new CryptoOutputStream(keyOutputStream,
-              OzoneKMSUtil.getCryptoCodec(conf, feInfo),
-              decrypted.getMaterial(), feInfo.getIV());
-      return new OzoneOutputStream(cryptoOut);
-    } else {
-      try{
-        GDPRSymmetricKey gk;
-        Map<String, String> openKeyMetadata =
-            openKey.getKeyInfo().getMetadata();
-        if(Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))){
-          gk = new GDPRSymmetricKey(
-              openKeyMetadata.get(OzoneConsts.GDPR_SECRET),
-              openKeyMetadata.get(OzoneConsts.GDPR_ALGORITHM)
-          );
-          gk.getCipher().init(Cipher.ENCRYPT_MODE, gk.getSecretKey());
-          return new OzoneOutputStream(
-              new CipherOutputStream(keyOutputStream, gk.getCipher()));
-        }
-      }catch (Exception ex){
-        throw new IOException(ex);
-      }
-
-      return new OzoneOutputStream(keyOutputStream);
-    }
-  }
-
-  @Override
-  public KeyProvider getKeyProvider() throws IOException {
-    return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri());
-  }
-
-  @Override
-  public URI getKeyProviderUri() throws IOException {
-    // TODO: fix me to support kms instances for difference OMs
-    return OzoneKMSUtil.getKeyProviderUri(ugi,
-        null, null, conf);
-  }
-
-  @Override
-  public String getCanonicalServiceName() {
-    return (dtService != null) ? dtService.toString() : null;
-  }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java
deleted file mode 100644
index 0fcc3fc..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-/**
- * This package contains Ozone rpc client library classes.
- */
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
deleted file mode 100644
index ff4aeb3..0000000
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import java.net.InetSocketAddress;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-
-/**
- * This test class verifies the parsing of SCM endpoint config settings. The
- * parsing logic is in
- * {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
- */
-public class TestHddsClientUtils {
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-
-  @Rule
-  public ExpectedException thrown= ExpectedException.none();
-
-  /**
-   * Verify client endpoint lookup failure if it is not configured.
-   */
-  @Test
-  public void testMissingScmClientAddress() {
-    final Configuration conf = new OzoneConfiguration();
-    thrown.expect(IllegalArgumentException.class);
-    HddsUtils.getScmAddressForClients(conf);
-  }
-
-  /**
-   * Verify that the client endpoint can be correctly parsed from
-   * configuration.
-   */
-  @Test
-  public void testGetScmClientAddress() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // First try a client address with just a host name. Verify it falls
-    // back to the default port.
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsUtils.getScmAddressForClients(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(OZONE_SCM_CLIENT_PORT_DEFAULT));
-
-    // Next try a client address with a host name and port. Verify both
-    // are used correctly.
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    addr = HddsUtils.getScmAddressForClients(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(100));
-  }
-
-  @Test
-  public void testgetOmSocketAddress() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // First try a client address with just a host name. Verify it falls
-    // back to the default port.
-    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = OmUtils.getOmAddress(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
-
-    // Next try a client address with just a host name and port. Verify the port
-    // is ignored and the default OM port is used.
-    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100");
-    addr = OmUtils.getOmAddress(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(100));
-
-    // Assert the we are able to use default configs if no value is specified.
-    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
-    addr = OmUtils.getOmAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
-  }
-
-  @Test
-  public void testBlockClientFallbackToClientNoPort() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
-    // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
-    final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
-        conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testBlockClientFallbackToClientWithPort() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
-    // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
-    //
-    // Verify that the OZONE_SCM_CLIENT_ADDRESS_KEY port number is ignored,
-    // if present. Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
-    final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
-        conf);
-    assertEquals(scmHost.split(":")[0], address.getHostName());
-    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  public void testBlockClientFallbackToScmNamesNoPort() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-    // are undefined it should fallback to OZONE_SCM_NAMES.
-    final String scmHost = "host456";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
-        conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testBlockClientFallbackToScmNamesWithPort() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-    // are undefined it should fallback to OZONE_SCM_NAMES.
-    //
-    // Verify that the OZONE_SCM_NAMES port number is ignored, if present.
-    // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
-    final String scmHost = "host456:200";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
-        conf);
-    assertEquals(scmHost.split(":")[0], address.getHostName());
-    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  public void testClientFallbackToScmNamesNoPort() {
-    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
-    // to OZONE_SCM_NAMES.
-    final String scmHost = "host456";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
-    assertEquals(scmHost, address.getHostName());
-    assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  @SuppressWarnings("StringSplitter")
-  public void testClientFallbackToScmNamesWithPort() {
-    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
-    // to OZONE_SCM_NAMES.
-    //
-    // Verify that the OZONE_SCM_NAMES port number is ignored, if present.
-    // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
-    final String scmHost = "host456:300";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
-    assertEquals(scmHost.split(":")[0], address.getHostName());
-    assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
-  }
-
-  @Test
-  public void testBlockClientFailsWithMultipleScmNames() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-    // are undefined, fail if OZONE_SCM_NAMES has multiple SCMs.
-    final String scmHost = "host123,host456";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsUtils.getScmAddressForBlockClients(conf);
-  }
-
-  @Test
-  public void testClientFailsWithMultipleScmNames() {
-    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, fail if OZONE_SCM_NAMES
-    // has multiple SCMs.
-    final String scmHost = "host123,host456";
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsUtils.getScmAddressForClients(conf);
-  }
-}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java
deleted file mode 100644
index be63eab..0000000
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-/**
- * This package contains test classes for Ozone Client.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
deleted file mode 100644
index 49fb5e3..0000000
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.junit.Assert.*;
-
-/**
- * Test class for {@link OzoneKMSUtil}.
- * */
-public class TestOzoneKMSUtil {
-  private OzoneConfiguration config;
-
-  @Before
-  public void setUp() {
-    config = new OzoneConfiguration();
-    config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true);
-  }
-
-  @Test
-  public void getKeyProvider() {
-    try {
-      OzoneKMSUtil.getKeyProvider(config, null);
-      fail("Expected IOException.");
-    } catch (IOException ioe) {
-      assertEquals(ioe.getMessage(), "KMS serverProviderUri is " +
-          "not configured.");
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index df58f36..0000000
--- a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.apache.hadoop.ozone.protocol.proto"/>
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
deleted file mode 100644
index 09ac27a..0000000
--- a/hadoop-ozone/common/pom.xml
+++ /dev/null
@@ -1,189 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-common</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Common</description>
-  <name>Apache Hadoop Ozone Common</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-compress</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>com.squareup.okhttp</groupId>
-          <artifactId>okhttp</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-tools</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <resources>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <excludes>
-          <exclude>ozone-version-info.properties</exclude>
-        </excludes>
-        <filtering>false</filtering>
-      </resource>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <includes>
-          <include>ozone-version-info.properties</include>
-        </includes>
-        <filtering>true</filtering>
-      </resource>
-    </resources>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>version-info</id>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>version-info</goal>
-            </goals>
-            <configuration>
-              <source>
-                <directory>${basedir}/../</directory>
-                <includes>
-                  <include>*/src/main/java/**/*.java</include>
-                  <include>*/src/main/proto/*.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>
-                  ${basedir}/../../hadoop-hdds/common/src/main/proto/
-                </param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>OzoneManagerProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-  <profiles>
-    <profile>
-      <id>k8s-dev</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>io.fabric8</groupId>
-            <artifactId>docker-maven-plugin</artifactId>
-            <version>0.29.0</version>
-            <configuration>
-              <images>
-                
-              </images>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
deleted file mode 100755
index cd8f202..0000000
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The name of the script being executed.
-HADOOP_SHELL_EXECNAME="ozone"
-MYNAME="${BASH_SOURCE-$0}"
-JVM_PID="$$"
-## @description  build up the hdfs command's usage text.
-## @audience     public
-## @stability    stable
-## @replaceable  no
-function hadoop_usage
-{
-  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
-  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
-  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode"
-  hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
-  hadoop_add_option "--loglevel level" "set the log4j level for this command"
-  hadoop_add_option "--workers" "turn on worker mode"
-
-  hadoop_add_subcommand "auditparser" client "runs audit parser tool"
-  hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
-  hadoop_add_subcommand "datanode" daemon "run a HDDS datanode"
-  hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
-  hadoop_add_subcommand "freon" client "runs an ozone data generator"
-  hadoop_add_subcommand "fs" client "run a filesystem command on Ozone file system. Equivalent to 'hadoop fs'"
-  hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path"
-  hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
-  hadoop_add_subcommand "getconf" client "get ozone config values from configuration"
-  hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
-  hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
-  hadoop_add_subcommand "om" daemon "Ozone Manager"
-  hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
-  hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway"
-  hadoop_add_subcommand "csi" daemon "run the standalone CSI daemon"
-  hadoop_add_subcommand "recon" daemon "run the Recon service"
-  hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
-  hadoop_add_subcommand "sh" client "command line interface for object store operations"
-  hadoop_add_subcommand "s3" client "command line interface for s3 related operations"
-  hadoop_add_subcommand "insight" client "tool to get runtime opeartion information"
-  hadoop_add_subcommand "version" client "print the version"
-  hadoop_add_subcommand "dtutil" client "operations related to delegation tokens"
-  hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool"
-
-  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
-}
-
-## @description  Default command handler for hadoop command
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        CLI arguments
-function ozonecmd_case
-{
-  subcmd=$1
-  shift
-
-  ozone_default_log4j="${HADOOP_CONF_DIR}/log4j.properties"
-  ozone_shell_log4j="${HADOOP_CONF_DIR}/ozone-shell-log4j.properties"
-  if [ ! -f "${ozone_shell_log4j}" ]; then
-    ozone_shell_log4j=${ozone_default_log4j}
-  fi
-
-  case ${subcmd} in
-    auditparser)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.audit.parser.AuditParser
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    classpath)
-      if [[ "$#" -gt 0 ]]; then
-        OZONE_RUN_ARTIFACT_NAME="$1"
-        HADOOP_CLASSNAME="org.apache.hadoop.util.Classpath"
-        #remove the artifact name and replace it with glob
-        # (We need at least one argument to execute the Classpath helper class)
-        HADOOP_SUBCMD_ARGS[0]="--glob"
-      else
-        hadoop_finalize
-        echo "Usage: ozone classpath <ARTIFACTNAME>"
-        echo "Where the artifact name is one of:"
-        echo ""
-        ls -1 ${HADOOP_HDFS_HOME}/share/ozone/classpath/ | sed 's/.classpath//'
-        exit -1
-      fi
-    ;;
-    datanode)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      # Add JVM parameter (org.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false)
-      # for disabling netty PooledByteBufAllocator thread caches for non-netty threads.
-      # This parameter significantly reduces GC pressure for Datanode.
-      # Corresponding Ratis issue https://issues.apache.org/jira/browse/RATIS-534.
-      HDDS_DN_OPTS="${HDDS_DN_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/dn-audit-log4j2.properties -Dorg.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDDS_DN_OPTS}"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-datanode"
-    ;;
-    envvars)
-      echo "JAVA_HOME='${JAVA_HOME}'"
-      echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
-      echo "HDFS_DIR='${HDFS_DIR}'"
-      echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
-      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
-      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
-      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
-      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
-      if [[ -n "${QATESTMODE}" ]]; then
-        echo "MYNAME=${MYNAME}"
-        echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}"
-      fi
-      exit 0
-    ;;
-    freon)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
-      OZONE_FREON_OPTS="${OZONE_FREON_OPTS} -Dhadoop.log.file=ozone-freon.log -Dlog4j.configuration=file:${ozone_shell_log4j}"
-      HADOOP_OPTS="${HADOOP_OPTS} ${OZONE_FREON_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    genesis)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    getconf)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    om)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManagerStarter
-      HDFS_OM_OPTS="${HDFS_OM_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/om-audit-log4j2.properties"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
-    ;;
-    sh | shell)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.OzoneShell
-      HDFS_OM_SH_OPTS="${HDFS_OM_SH_OPTS} -Dhadoop.log.file=ozone-shell.log
-      -Dlog4j.configuration=file:${ozone_shell_log4j}"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_SH_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
-    ;;
-    s3)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.s3.S3Shell
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
-    ;;
-    scm)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManagerStarter'
-      hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS"
-      HDFS_STORAGECONTAINERMANAGER_OPTS="${HDFS_STORAGECONTAINERMANAGER_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/scm-audit-log4j2.properties"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-server-scm"
-    ;;
-    s3g)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway'
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway"
-    ;;
-    csi)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.ozone.csi.CsiServer'
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-csi"
-    ;;
-    recon)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.ozone.recon.ReconServer'
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-recon"
-    ;;
-    fs)
-      HADOOP_CLASSNAME=org.apache.hadoop.fs.ozone.OzoneFsShell
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    scmcli)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdds.scm.cli.SCMCLI
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_SCM_CLI_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-tools"
-    ;;
-    insight)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.insight.Insight
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_SCM_CLI_OPTS}"
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-insight"
-    ;;
-    version)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.util.OzoneVersionInfo
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-common"
-    ;;
-    genconf)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    dtutil)
-      HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
-    ;;
-    upgrade)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.upgrade.InPlaceUpgrade
-      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-upgrade"
-    ;;
-    *)
-      HADOOP_CLASSNAME="${subcmd}"
-      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
-        hadoop_exit_with_usage 1
-      fi
-    ;;
-  esac
-}
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
-  # shellcheck source=./hadoop-ozone/common/src/main/bin/ozone-config.sh
-  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
-  exit 1
-fi
-
-# now that we have support code, let's abs MYNAME so we can use it later
-MYNAME=$(hadoop_abs "${MYNAME}")
-
-if [[ $# = 0 ]]; then
-  hadoop_exit_with_usage 1
-fi
-
-HADOOP_SUBCMD=$1
-shift
-
-
-if hadoop_need_reexec ozone "${HADOOP_SUBCMD}"; then
-  hadoop_uservar_su ozone "${HADOOP_SUBCMD}" \
-    "${MYNAME}" \
-    "--reexec" \
-    "${HADOOP_USER_PARAMS[@]}"
-  exit $?
-fi
-
-hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-HADOOP_SUBCMD_ARGS=("$@")
-
-if declare -f ozone_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
-  hadoop_debug "Calling dynamically: ozone_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}"
-  "ozone_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
-else
-  ozonecmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
-fi
-
-
-#
-# Setting up classpath based on the generate classpath descriptors
-#
-if [ ! "$OZONE_RUN_ARTIFACT_NAME" ]; then
-   echo "ERROR: Ozone components require to set OZONE_RUN_ARTIFACT_NAME to set the classpath"
-   exit -1
-fi
-export HDDS_LIB_JARS_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib"
-CLASSPATH_FILE="${HADOOP_HDFS_HOME}/share/ozone/classpath/${OZONE_RUN_ARTIFACT_NAME}.classpath"
-if [ ! "$CLASSPATH_FILE" ]; then
-   echo "ERROR: Classpath file descriptor $CLASSPATH_FILE is missing"
-   exit -1
-fi
-# shellcheck disable=SC1090,SC2086
-source $CLASSPATH_FILE
-OIFS=$IFS
-IFS=':'
-# shellcheck disable=SC2154
-for jar in $classpath; do
-   hadoop_add_classpath "$jar"
-done
-hadoop_add_classpath "${HADOOP_HDFS_HOME}/share/ozone/web"
-
-#We need to add the artifact manually as it's not part the generated classpath desciptor
-ARTIFACT_LIB_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib"
-MAIN_ARTIFACT=$(find "$ARTIFACT_LIB_DIR" -name "${OZONE_RUN_ARTIFACT_NAME}-*.jar")
-if [ ! "$MAIN_ARTIFACT" ]; then
-   echo "ERROR: Component jar file $MAIN_ARTIFACT is missing from ${HADOOP_HDFS_HOME}/share/ozone/lib"
-fi
-hadoop_add_classpath "${MAIN_ARTIFACT}"
-IFS=$OIFS
-
-
-hadoop_add_client_opts
-
-if [[ ${HADOOP_WORKER_MODE} = true ]]; then
-  hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" "${HADOOP_USER_PARAMS[@]}"
-  exit $?
-fi
-
-hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-# everything is in globals at this point, so call the generic handler
-hadoop_generic_java_subcmd_handler
diff --git a/hadoop-ozone/common/src/main/bin/ozone-config.sh b/hadoop-ozone/common/src/main/bin/ozone-config.sh
deleted file mode 100755
index 5ccb646..0000000
--- a/hadoop-ozone/common/src/main/bin/ozone-config.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the ozone scripts with source command
-# should not be executed directly
-
-function hadoop_subproject_init
-{
-  if [[ -z "${HADOOP_OZONE_ENV_PROCESSED}" ]]; then
-    if [[ -e "${HADOOP_CONF_DIR}/ozone-env.sh" ]]; then
-      . "${HADOOP_CONF_DIR}/ozone-env.sh"
-      export HADOOP_OZONE_ENV_PROCESSED=true
-    fi
-  fi
-  HADOOP_OZONE_HOME="${HADOOP_OZONE_HOME:-$HADOOP_HOME}"
-
-}
-
-if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
-  _hd_this="${BASH_SOURCE-$0}"
-  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
-fi
-
-# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
-
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
-   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
-else
-  echo "ERROR: Hadoop common not found." 2>&1
-  exit 1
-fi
-
-# HADOOP_OZONE_DELEGATED_CLASSES defines a list of classes which will be loaded by default
-# class loader of application instead of isolated class loader. With this way we can solve
-# incompatible problem when using hadoop3.x + ozone with older hadoop version.
-#export HADOOP_OZONE_DELEGATED_CLASSES=
-
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
deleted file mode 100755
index 9ddaab6..0000000
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Start hadoop hdfs and ozone daemons.
-# Run this on master node.
-## @description  usage info
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_usage
-{
-  echo "Usage: start-ozone.sh"
-}
-
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
-  # shellcheck disable=SC1090
-  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
-elif [[ -f "${bin}/../libexec/ozone-config.sh" ]]; then
-  HADOOP_HOME="${bin}/../"
-  HADOOP_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
-  exit 1
-fi
-
-# get arguments
-if [[ $# -ge 1 ]]; then
-  startOpt="$1"
-  shift
-  case "$startOpt" in
-    -upgrade)
-      nameStartOpt="$startOpt"
-    ;;
-    -rollback)
-      dataStartOpt="$startOpt"
-    ;;
-    *)
-      hadoop_exit_with_usage 1
-    ;;
-  esac
-fi
-
-#Add other possible options
-nameStartOpt="$nameStartOpt $*"
-
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED}
-# == "true" ]]; then
-#  echo "Ozone is not supported in a security enabled cluster."
-#  exit 1
-#fi
-
-#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
-#  echo "Ozone is not supported in a security enabled cluster."
-#  exit 1
-#fi
-
-#---------------------------------------------------------
-# Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
-if [[ "${OZONE_ENABLED}" != "true" ]]; then
-  echo "Operation is not supported because ozone is not enabled."
-  exit -1
-fi
-
-#---------------------------------------------------------
-# datanodes (using default workers file)
-
-echo "Starting datanodes"
-hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
-    --workers \
-    --config "${HADOOP_CONF_DIR}" \
-    --daemon start \
-    datanode ${dataStartOpt}
-(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
-
-#---------------------------------------------------------
-# Ozone ozonemanager nodes
-OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null)
-echo "Starting Ozone Manager nodes [${OM_NODES}]"
-if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
-  OM_NODES=$(hostname)
-fi
-
-hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
-  --workers \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${OM_NODES}" \
-  --daemon start \
-  om
-
-HADOOP_JUMBO_RETCOUNTER=$?
-
-#---------------------------------------------------------
-# Ozone storagecontainermanager nodes
-SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -storagecontainermanagers 2>/dev/null)
-echo "Starting storage container manager nodes [${SCM_NODES}]"
-hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
-  --workers \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${SCM_NODES}" \
-  --daemon start \
-  scm
-
-(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
-
-exit ${HADOOP_JUMBO_RETCOUNTER}
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
deleted file mode 100755
index c07d42b..0000000
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop hdfs and ozone daemons.
-# Run this on master node.
-## @description  usage info
-## @audience     private
-## @stability    evolving
-## @replaceable  no
-function hadoop_usage
-{
-  echo "Usage: stop-ozone.sh"
-}
-
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
-  # shellcheck disable=SC1090
-  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
-  exit 1
-fi
-
-#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
-#  echo "Ozone is not supported in a security enabled cluster."
-#  exit 1
-#fi
-
-#---------------------------------------------------------
-# Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
-if [[ "${OZONE_ENABLED}" != "true" ]]; then
-  echo "Operation is not supported because ozone is not enabled."
-  exit -1
-fi
-
-#---------------------------------------------------------
-# datanodes (using default workers file)
-
-echo "Stopping datanodes"
-
-hadoop_uservar_su ozone datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
-  --workers \
-  --config "${HADOOP_CONF_DIR}" \
-  --daemon stop \
-  datanode
-
-#---------------------------------------------------------
-# Ozone Manager nodes
-OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null)
-echo "Stopping Ozone Manager nodes [${OM_NODES}]"
-if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
-  OM_NODES=$(hostname)
-fi
-
-hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
-  --workers \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${OM_NODES}" \
-  --daemon stop \
-  om
-
-#---------------------------------------------------------
-# Ozone storagecontainermanager nodes
-SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -storagecontainermanagers 2>/dev/null)
-echo "Stopping storage container manager nodes [${SCM_NODES}]"
-hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
-  --workers \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${SCM_NODES}" \
-  --daemon stop \
-  scm
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
deleted file mode 100644
index 8d5ca6d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.protocol;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
-
-/**
- * Ozone specific storage types.
- */
-public enum StorageType {
-  RAM_DISK,
-  SSD,
-  DISK,
-  ARCHIVE;
-
-  public static final StorageType DEFAULT = DISK;
-
-  public StorageTypeProto toProto() {
-    switch (this) {
-    case DISK:
-      return StorageTypeProto.DISK;
-    case SSD:
-      return StorageTypeProto.SSD;
-    case ARCHIVE:
-      return StorageTypeProto.ARCHIVE;
-    case RAM_DISK:
-      return StorageTypeProto.RAM_DISK;
-    default:
-      throw new IllegalStateException(
-          "BUG: StorageType not found, type=" + this);
-    }
-  }
-
-  public static StorageType valueOf(StorageTypeProto type) {
-    switch (type) {
-    case DISK:
-      return DISK;
-    case SSD:
-      return SSD;
-    case ARCHIVE:
-      return ARCHIVE;
-    case RAM_DISK:
-      return RAM_DISK;
-    default:
-      throw new IllegalStateException(
-          "BUG: StorageTypeProto not found, type=" + type);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
deleted file mode 100644
index 89d7de0..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocol;
-/**
- * Helper classes for the hdds protocol.
- */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
deleted file mode 100644
index 8e129c9d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ /dev/null
@@ -1,528 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import com.google.common.base.Joiner;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.security.SecureRandom;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Optional;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Strings;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.compressors.CompressorException;
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Stateless helper functions for the server and client side of OM
- * communication.
- */
-public final class OmUtils {
-  public static final Logger LOG = LoggerFactory.getLogger(OmUtils.class);
-  private static final SecureRandom SRAND = new SecureRandom();
-  private static byte[] randomBytes = new byte[32];
-
-  private OmUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that is used by OM.
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getOmAddress(Configuration conf) {
-    return NetUtils.createSocketAddr(getOmRpcAddress(conf));
-  }
-
-  /**
-   * Retrieve the socket address that is used by OM.
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static String getOmRpcAddress(Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_OM_ADDRESS_KEY);
-
-    return host.orElse(OZONE_OM_BIND_HOST_DEFAULT) + ":" +
-        getOmRpcPort(conf);
-  }
-
-  /**
-   * Retrieve the socket address that is used by OM as specified by the confKey.
-   * Return null if the specified conf key is not set.
-   * @param conf configuration
-   * @param confKey configuration key to lookup address from
-   * @return Target InetSocketAddress for the OM RPC server.
-   */
-  public static String getOmRpcAddress(Configuration conf, String confKey) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf, confKey);
-
-    if (host.isPresent()) {
-      return host.get() + ":" + getOmRpcPort(conf, confKey);
-    } else {
-      // The specified confKey is not set
-      return null;
-    }
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to OM.
-   * @param conf
-   * @return Target InetSocketAddress for the OM service endpoint.
-   */
-  public static InetSocketAddress getOmAddressForClients(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_OM_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          OZONE_OM_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    return NetUtils.createSocketAddr(
-        host.get() + ":" + getOmRpcPort(conf));
-  }
-
-  /**
-   * Returns true if OZONE_OM_SERVICE_IDS_KEY is defined and not empty.
-   * @param conf Configuration
-   * @return true if OZONE_OM_SERVICE_IDS_KEY is defined and not empty;
-   * else false.
-   */
-  public static boolean isServiceIdsDefined(Configuration conf) {
-    String val = conf.get(OZONE_OM_SERVICE_IDS_KEY);
-    return val != null && val.length() > 0;
-  }
-
-  /**
-   * Returns true if HA for OzoneManager is configured for the given service id.
-   * @param conf Configuration
-   * @param serviceId OM HA cluster service ID
-   * @return true if HA is configured in the configuration; else false.
-   */
-  public static boolean isOmHAServiceId(Configuration conf, String serviceId) {
-    Collection<String> omServiceIds = conf.getTrimmedStringCollection(
-        OZONE_OM_SERVICE_IDS_KEY);
-    return omServiceIds.contains(serviceId);
-  }
-
-  public static int getOmRpcPort(Configuration conf) {
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        OZONE_OM_ADDRESS_KEY);
-    return port.orElse(OZONE_OM_PORT_DEFAULT);
-  }
-
-  /**
-   * Retrieve the port that is used by OM as specified by the confKey.
-   * Return default port if port is not specified in the confKey.
-   * @param conf configuration
-   * @param confKey configuration key to lookup address from
-   * @return Port on which OM RPC server will listen on
-   */
-  public static int getOmRpcPort(Configuration conf, String confKey) {
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf, confKey);
-    return port.orElse(OZONE_OM_PORT_DEFAULT);
-  }
-
-  public static int getOmRestPort(Configuration conf) {
-    // If no port number is specified then we'll just try the default
-    // HTTP BindPort.
-    final Optional<Integer> port =
-        getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY);
-    return port.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
-  }
-
-  /**
-   * Get the location where OM should store its metadata directories.
-   * Fall back to OZONE_METADATA_DIRS if not defined.
-   *
-   * @param conf - Config
-   * @return File path, after creating all the required Directories.
-   */
-  public static File getOmDbDir(Configuration conf) {
-    return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS);
-  }
-
-  /**
-   * Checks if the OM request is read only or not.
-   * @param omRequest OMRequest proto
-   * @return True if its readOnly, false otherwise.
-   */
-  public static boolean isReadOnly(
-      OzoneManagerProtocolProtos.OMRequest omRequest) {
-    OzoneManagerProtocolProtos.Type cmdType = omRequest.getCmdType();
-    switch (cmdType) {
-    case CheckVolumeAccess:
-    case InfoVolume:
-    case ListVolume:
-    case InfoBucket:
-    case ListBuckets:
-    case LookupKey:
-    case ListKeys:
-    case InfoS3Bucket:
-    case ListS3Buckets:
-    case ServiceList:
-    case ListMultiPartUploadParts:
-    case GetFileStatus:
-    case LookupFile:
-    case ListStatus:
-    case GetAcl:
-    case DBUpdates:
-    case ListMultipartUploads:
-      return true;
-    case CreateVolume:
-    case SetVolumeProperty:
-    case DeleteVolume:
-    case CreateBucket:
-    case SetBucketProperty:
-    case DeleteBucket:
-    case CreateKey:
-    case RenameKey:
-    case DeleteKey:
-    case CommitKey:
-    case AllocateBlock:
-    case CreateS3Bucket:
-    case DeleteS3Bucket:
-    case InitiateMultiPartUpload:
-    case CommitMultiPartUpload:
-    case CompleteMultiPartUpload:
-    case AbortMultiPartUpload:
-    case GetS3Secret:
-    case GetDelegationToken:
-    case RenewDelegationToken:
-    case CancelDelegationToken:
-    case CreateDirectory:
-    case CreateFile:
-    case RemoveAcl:
-    case SetAcl:
-    case AddAcl:
-    case PurgeKeys:
-      return false;
-    default:
-      LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
-      return false;
-    }
-  }
-
-  public static byte[] getMD5Digest(String input) throws IOException {
-    try {
-      MessageDigest md = MessageDigest.getInstance(OzoneConsts.MD5_HASH);
-      return md.digest(input.getBytes(StandardCharsets.UTF_8));
-    } catch (NoSuchAlgorithmException ex) {
-      throw new IOException("Error creating an instance of MD5 digest.\n" +
-          "This could possibly indicate a faulty JRE");
-    }
-  }
-
-  public static byte[] getSHADigest() throws IOException {
-    try {
-      SRAND.nextBytes(randomBytes);
-      MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-      return sha.digest(randomBytes);
-    } catch (NoSuchAlgorithmException ex) {
-      throw new IOException("Error creating an instance of SHA-256 digest.\n" +
-          "This could possibly indicate a faulty JRE");
-    }
-  }
-
-  /**
-   * Add non empty and non null suffix to a key.
-   */
-  private static String addSuffix(String key, String suffix) {
-    if (suffix == null || suffix.isEmpty()) {
-      return key;
-    }
-    assert !suffix.startsWith(".") :
-        "suffix '" + suffix + "' should not already have '.' prepended.";
-    return key + "." + suffix;
-  }
-
-  /**
-   * Concatenate list of suffix strings '.' separated.
-   */
-  private static String concatSuffixes(String... suffixes) {
-    if (suffixes == null) {
-      return null;
-    }
-    return Joiner.on(".").skipNulls().join(suffixes);
-  }
-
-  /**
-   * Return configuration key of format key.suffix1.suffix2...suffixN.
-   */
-  public static String addKeySuffixes(String key, String... suffixes) {
-    String keySuffix = concatSuffixes(suffixes);
-    return addSuffix(key, keySuffix);
-  }
-
-  /**
-   * Match input address to local address.
-   * Return true if it matches, false otherwsie.
-   */
-  public static boolean isAddressLocal(InetSocketAddress addr) {
-    return NetUtils.isLocalAddress(addr.getAddress());
-  }
-
-  /**
-   * Get a collection of all omNodeIds for the given omServiceId.
-   */
-  public static Collection<String> getOMNodeIds(Configuration conf,
-      String omServiceId) {
-    String key = addSuffix(OZONE_OM_NODES_KEY, omServiceId);
-    return conf.getTrimmedStringCollection(key);
-  }
-
-  /**
-   * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
-   * returns a list with a single null value.
-   */
-  public static Collection<String> emptyAsSingletonNull(Collection<String>
-      coll) {
-    if (coll == null || coll.isEmpty()) {
-      return Collections.singletonList(null);
-    } else {
-      return coll;
-    }
-  }
-
-  /**
-   * Write OM DB Checkpoint to an output stream as a compressed file (tgz).
-   * @param checkpoint checkpoint file
-   * @param destination desination output stream.
-   * @throws IOException
-   */
-  public static void writeOmDBCheckpointToStream(DBCheckpoint checkpoint,
-                                                 OutputStream destination)
-      throws IOException {
-
-    try (CompressorOutputStream gzippedOut = new CompressorStreamFactory()
-        .createCompressorOutputStream(CompressorStreamFactory.GZIP,
-            destination)) {
-
-      try (ArchiveOutputStream archiveOutputStream =
-               new TarArchiveOutputStream(gzippedOut)) {
-
-        Path checkpointPath = checkpoint.getCheckpointLocation();
-        for (Path path : Files.list(checkpointPath)
-            .collect(Collectors.toList())) {
-          if (path != null) {
-            Path fileName = path.getFileName();
-            if (fileName != null) {
-              includeFile(path.toFile(), fileName.toString(),
-                  archiveOutputStream);
-            }
-          }
-        }
-      }
-    } catch (CompressorException e) {
-      throw new IOException(
-          "Can't compress the checkpoint: " +
-              checkpoint.getCheckpointLocation(), e);
-    }
-  }
-
-  private static void includeFile(File file, String entryName,
-                           ArchiveOutputStream archiveOutputStream)
-      throws IOException {
-    ArchiveEntry archiveEntry =
-        archiveOutputStream.createArchiveEntry(file, entryName);
-    archiveOutputStream.putArchiveEntry(archiveEntry);
-    try (FileInputStream fis = new FileInputStream(file)) {
-      IOUtils.copy(fis, archiveOutputStream);
-    }
-    archiveOutputStream.closeArchiveEntry();
-  }
-
-  /**
-   * If a OM conf is only set with key suffixed with OM Node ID, return the
-   * set value.
-   * @return if the value is set for key suffixed with OM Node ID, return the
-   * value, else return null.
-   */
-  public static String getConfSuffixedWithOMNodeId(Configuration conf,
-      String confKey, String omServiceID, String omNodeId) {
-    String suffixedConfKey = OmUtils.addKeySuffixes(
-        confKey, omServiceID, omNodeId);
-    String confValue = conf.getTrimmed(suffixedConfKey);
-    if (StringUtils.isNotEmpty(confValue)) {
-      return confValue;
-    }
-    return null;
-  }
-
-  /**
-   * Returns the http address of peer OM node.
-   * @param conf Configuration
-   * @param omNodeId peer OM node ID
-   * @param omNodeHostAddr peer OM node host address
-   * @return http address of peer OM node in the format <hostName>:<port>
-   */
-  public static String getHttpAddressForOMPeerNode(Configuration conf,
-      String omServiceId, String omNodeId, String omNodeHostAddr) {
-    final Optional<String> bindHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId));
-
-    final Optional<Integer> addressPort = getPortNumberFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
-
-    final Optional<String> addressHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
-
-    String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
-
-    return hostName + ":" + addressPort.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
-  }
-
-  /**
-   * Returns the https address of peer OM node.
-   * @param conf Configuration
-   * @param omNodeId peer OM node ID
-   * @param omNodeHostAddr peer OM node host address
-   * @return https address of peer OM node in the format <hostName>:<port>
-   */
-  public static String getHttpsAddressForOMPeerNode(Configuration conf,
-      String omServiceId, String omNodeId, String omNodeHostAddr) {
-    final Optional<String> bindHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId));
-
-    final Optional<Integer> addressPort = getPortNumberFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
-
-    final Optional<String> addressHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
-
-    String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
-
-    return hostName + ":" +
-        addressPort.orElse(OZONE_OM_HTTPS_BIND_PORT_DEFAULT);
-  }
-
-  /**
-   * Get the local directory where ratis logs will be stored.
-   */
-  public static String getOMRatisDirectory(Configuration conf) {
-    String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR);
-
-    if (Strings.isNullOrEmpty(storageDir)) {
-      storageDir = HddsServerUtil.getDefaultRatisDirectory(conf);
-    }
-    return storageDir;
-  }
-
-  public static String getOMRatisSnapshotDirectory(Configuration conf) {
-    String snapshotDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR);
-
-    if (Strings.isNullOrEmpty(snapshotDir)) {
-      snapshotDir = Paths.get(getOMRatisDirectory(conf),
-          "snapshot").toString();
-    }
-    return snapshotDir;
-  }
-
-  public static File createOMDir(String dirPath) {
-    File dirFile = new File(dirPath);
-    if (!dirFile.exists() && !dirFile.mkdirs()) {
-      throw new IllegalArgumentException("Unable to create path: " + dirFile);
-    }
-    return dirFile;
-  }
-
-  /**
-   * Prepares key info to be moved to deletedTable.
-   * 1. It strips GDPR metadata from key info
-   * 2. For given object key, if the repeatedOmKeyInfo instance is null, it
-   * implies that no entry for the object key exists in deletedTable so we
-   * create a new instance to include this key, else we update the existing
-   * repeatedOmKeyInfo instance.
-   * @param keyInfo args supplied by client
-   * @param repeatedOmKeyInfo key details from deletedTable
-   * @return {@link RepeatedOmKeyInfo}
-   * @throws IOException if I/O Errors when checking for key
-   */
-  public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
-      RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{
-    // If this key is in a GDPR enforced bucket, then before moving
-    // KeyInfo to deletedTable, remove the GDPR related metadata from
-    // KeyInfo.
-    if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
-      keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
-      keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
-      keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
-    }
-
-    if(repeatedOmKeyInfo == null) {
-      //The key doesn't exist in deletedTable, so create a new instance.
-      repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
-    } else {
-      //The key exists in deletedTable, so update existing instance.
-      repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
-    }
-
-    return repeatedOmKeyInfo;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
deleted file mode 100644
index 6a74342..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-/**
- * OzoneACL classes define bucket ACLs used in OZONE.
- *
- * ACLs in Ozone follow this pattern.
- * <ul>
- * <li>user:name:rw
- * <li>group:name:rw
- * <li>world::rw
- * </ul>
- */
-@JsonIgnoreProperties(value = {"aclBitSet"})
-public class OzoneAcl {
-
-  private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]";
-  private ACLIdentityType type;
-  private String name;
-  private BitSet aclBitSet;
-  private AclScope aclScope;
-  private static final List<ACLType> EMPTY_LIST = new ArrayList<>(0);
-  public static final BitSet ZERO_BITSET = new BitSet(0);
-
-  /**
-   * Default constructor.
-   */
-  public OzoneAcl() {
-  }
-
-  /**
-   * Constructor for OzoneAcl.
-   *
-   * @param type   - Type
-   * @param name   - Name of user
-   * @param acl    - Rights
-   * @param scope  - AclScope
-   */
-  public OzoneAcl(ACLIdentityType type, String name, ACLType acl,
-      AclScope scope) {
-    this.name = name;
-    this.aclBitSet = new BitSet(ACLType.getNoOfAcls());
-    aclBitSet.set(acl.ordinal(), true);
-    this.type = type;
-    if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) {
-      if (!name.equals(ACLIdentityType.WORLD.name()) &&
-          !name.equals(ACLIdentityType.ANONYMOUS.name()) &&
-          name.length() != 0) {
-        throw new IllegalArgumentException("Unexpected name:{" + name +
-            "} for type WORLD, ANONYMOUS. It should be WORLD & " +
-            "ANONYMOUS respectively.");
-      }
-      // For type WORLD and ANONYMOUS we allow only one acl to be set.
-      this.name = type.name();
-    }
-    if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP))
-        && (name.length() == 0)) {
-      throw new IllegalArgumentException("User or group name is required");
-    }
-    aclScope = scope;
-  }
-
-  /**
-   * Constructor for OzoneAcl.
-   *
-   * @param type   - Type
-   * @param name   - Name of user
-   * @param acls   - Rights
-   * @param scope  - AclScope
-   */
-  public OzoneAcl(ACLIdentityType type, String name, BitSet acls,
-      AclScope scope) {
-    Objects.requireNonNull(type);
-    Objects.requireNonNull(acls);
-
-    if(acls.cardinality() > ACLType.getNoOfAcls()) {
-      throw new IllegalArgumentException("Acl bitset passed has unexpected " +
-          "size. bitset size:" + acls.cardinality() + ", bitset:"
-          + acls.toString());
-    }
-    this.aclBitSet = (BitSet) acls.clone();
-
-    this.name = name;
-    this.type = type;
-    if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) {
-      if (!name.equals(ACLIdentityType.WORLD.name()) &&
-          !name.equals(ACLIdentityType.ANONYMOUS.name()) &&
-          name.length() != 0) {
-        throw new IllegalArgumentException("Unexpected name:{" + name +
-            "} for type WORLD, ANONYMOUS. It should be WORLD & " +
-            "ANONYMOUS respectively.");
-      }
-      // For type WORLD and ANONYMOUS we allow only one acl to be set.
-      this.name = type.name();
-    }
-    if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP))
-        && (name.length() == 0)) {
-      throw new IllegalArgumentException("User or group name is required");
-    }
-    aclScope = scope;
-  }
-
-  /**
-   * Parses an ACL string and returns the ACL object. If acl scope is not
-   * passed in input string then scope is set to ACCESS.
-   *
-   * @param acl - Acl String , Ex. user:anu:rw
-   *
-   * @return - Ozone ACLs
-   */
-  public static OzoneAcl parseAcl(String acl)
-      throws IllegalArgumentException {
-    if ((acl == null) || acl.isEmpty()) {
-      throw new IllegalArgumentException("ACLs cannot be null or empty");
-    }
-    String[] parts = acl.trim().split(":");
-    if (parts.length < 3) {
-      throw new IllegalArgumentException("ACLs are not in expected format");
-    }
-
-    ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase());
-    BitSet acls = new BitSet(ACLType.getNoOfAcls());
-
-    String bits = parts[2];
-
-    // Default acl scope is ACCESS.
-    AclScope aclScope = AclScope.ACCESS;
-
-    // Check if acl string contains scope info.
-    if(parts[2].matches(ACL_SCOPE_REGEX)) {
-      int indexOfOpenBracket = parts[2].indexOf("[");
-      bits = parts[2].substring(0, indexOfOpenBracket);
-      aclScope = AclScope.valueOf(parts[2].substring(indexOfOpenBracket + 1,
-          parts[2].indexOf("]")));
-    }
-
-    // Set all acl bits.
-    for (char ch : bits.toCharArray()) {
-      acls.set(ACLType.getACLRight(String.valueOf(ch)).ordinal());
-    }
-
-    // TODO : Support sanitation of these user names by calling into
-    // userAuth Interface.
-    return new OzoneAcl(aclType, parts[1], acls, aclScope);
-  }
-
-  /**
-   * Parses an ACL string and returns the ACL object.
-   *
-   * @param acls - Acl String , Ex. user:anu:rw
-   *
-   * @return - Ozone ACLs
-   */
-  public static List<OzoneAcl> parseAcls(String acls)
-      throws IllegalArgumentException {
-    if ((acls == null) || acls.isEmpty()) {
-      throw new IllegalArgumentException("ACLs cannot be null or empty");
-    }
-    String[] parts = acls.trim().split(",");
-    if (parts.length < 1) {
-      throw new IllegalArgumentException("ACLs are not in expected format");
-    }
-    List<OzoneAcl> ozAcls = new ArrayList<>();
-
-    for(String acl:parts) {
-      ozAcls.add(parseAcl(acl));
-    }
-    return ozAcls;
-  }
-
-  public static OzoneAclInfo toProtobuf(OzoneAcl acl) {
-    OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder()
-        .setName(acl.getName())
-        .setType(OzoneAclType.valueOf(acl.getType().name()))
-        .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name()))
-        .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray()));
-    return builder.build();
-  }
-
-  public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) {
-    BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray());
-    return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()),
-        protoAcl.getName(), aclRights,
-        AclScope.valueOf(protoAcl.getAclScope().name()));
-  }
-
-  /**
-   * Helper function to convert a proto message of type {@link OzoneAclInfo}
-   * to {@link OzoneAcl} with acl scope of type ACCESS.
-   *
-   * @param protoAcl
-   * @return OzoneAcl
-   * */
-  public static OzoneAcl fromProtobufWithAccessType(OzoneAclInfo protoAcl) {
-    BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray());
-    return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()),
-        protoAcl.getName(), aclRights, AclScope.ACCESS);
-  }
-
-  /**
-   * Helper function to convert an {@link OzoneAcl} to proto message of type
-   * {@link OzoneAclInfo} with acl scope of type ACCESS.
-   *
-   * @param acl
-   * @return OzoneAclInfo
-   * */
-  public static OzoneAclInfo toProtobufWithAccessType(OzoneAcl acl) {
-    OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder()
-        .setName(acl.getName())
-        .setType(OzoneAclType.valueOf(acl.getType().name()))
-        .setAclScope(OzoneAclScope.ACCESS)
-        .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray()));
-    return builder.build();
-  }
-
-  public AclScope getAclScope() {
-    return aclScope;
-  }
-
-  @Override
-  public String toString() {
-    return type + ":" + name + ":" + ACLType.getACLString(aclBitSet)
-        + "[" + aclScope + "]";
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables.
-   *
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return Objects.hash(this.getName(), this.getAclBitSet(),
-                        this.getType().toString(), this.getAclScope());
-  }
-
-  /**
-   * Returns name.
-   *
-   * @return name
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns Rights.
-   *
-   * @return - Rights
-   */
-  public BitSet getAclBitSet() {
-    return aclBitSet;
-  }
-
-  public List<ACLType> getAclList() {
-    if(aclBitSet !=  null) {
-      return aclBitSet.stream().mapToObj(a ->
-          ACLType.values()[a]).collect(Collectors.toList());
-    }
-    return EMPTY_LIST;
-  }
-
-  /**
-   * Returns Type.
-   *
-   * @return type
-   */
-  public ACLIdentityType getType() {
-    return type;
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    OzoneAcl otherAcl = (OzoneAcl) obj;
-    return otherAcl.getName().equals(this.getName()) &&
-        otherAcl.getType().equals(this.getType()) &&
-        otherAcl.getAclBitSet().equals(this.getAclBitSet()) &&
-        otherAcl.getAclScope().equals(this.getAclScope());
-  }
-
-  public OzoneAcl setAclScope(AclScope scope) {
-    this.aclScope = scope;
-    return this;
-  }
-
-  /**
-   * Scope of ozone acl.
-   * */
-  public enum AclScope {
-    ACCESS,
-    DEFAULT;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java
deleted file mode 100644
index e732dc2..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Indicates that a method has been passed illegal or invalid argument. This
- * exception is thrown instead of IllegalArgumentException to differentiate the
- * exception thrown in Hadoop implementation from the one thrown in JDK.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class OzoneIllegalArgumentException extends IllegalArgumentException {
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * Constructs exception with the specified detail message.
-   * @param message detailed message.
-   */
-  public OzoneIllegalArgumentException(final String message) {
-    super(message);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
deleted file mode 100644
index 89c5dfa..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define Audit Action types for OzoneManager.
- */
-public enum OMAction implements AuditAction {
-
-  // WRITE Actions
-  ALLOCATE_BLOCK,
-  ALLOCATE_KEY,
-  COMMIT_KEY,
-  CREATE_VOLUME,
-  CREATE_BUCKET,
-  DELETE_VOLUME,
-  DELETE_BUCKET,
-  DELETE_KEY,
-  RENAME_KEY,
-  SET_OWNER,
-  SET_QUOTA,
-  UPDATE_VOLUME,
-  UPDATE_BUCKET,
-  UPDATE_KEY,
-  PURGE_KEYS,
-
-  // S3 Bucket
-  CREATE_S3_BUCKET,
-  DELETE_S3_BUCKET,
-
-  // READ Actions
-  CHECK_VOLUME_ACCESS,
-  LIST_BUCKETS,
-  LIST_VOLUMES,
-  LIST_KEYS,
-  READ_VOLUME,
-  READ_BUCKET,
-  READ_KEY,
-  LIST_S3BUCKETS,
-  INITIATE_MULTIPART_UPLOAD,
-  COMMIT_MULTIPART_UPLOAD_PARTKEY,
-  COMPLETE_MULTIPART_UPLOAD,
-  LIST_MULTIPART_UPLOAD_PARTS,
-  LIST_MULTIPART_UPLOADS,
-  ABORT_MULTIPART_UPLOAD,
-
-  //ACL Actions
-  ADD_ACL,
-  GET_ACL,
-  SET_ACL,
-  REMOVE_ACL,
-
-  //FS Actions
-  GET_FILE_STATUS,
-  CREATE_DIRECTORY,
-  CREATE_FILE,
-  LOOKUP_FILE,
-  LIST_STATUS,
-
-  GET_S3_SECRET;
-
-  @Override
-  public String getAction() {
-    return this.toString();
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
deleted file mode 100644
index 0f88790..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-/**
- * This package defines OMAction - an implementation of AuditAction
- * OMAction defines audit action types for various actions that will be
- * audited in OzoneManager.
- */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
deleted file mode 100644
index baf1887..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import java.io.FilterInputStream;
-import java.io.InputStream;
-
-/**
- * An input stream with length.
- */
-public class LengthInputStream extends FilterInputStream {
-
-  private final long length;
-
-  /**
-   * Create an stream.
-   * @param in the underlying input stream.
-   * @param length the length of the stream.
-   */
-  public LengthInputStream(InputStream in, long length) {
-    super(in);
-    this.length = length;
-  }
-
-  /** @return the length. */
-  public long getLength() {
-    return length;
-  }
-
-  public InputStream getWrappedStream() {
-    return in;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
deleted file mode 100644
index ece1ff4..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-/**
- * IO related ozone helper classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
deleted file mode 100644
index 3c60e59..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * CLI utility to print out ozone related configuration.
- */
-public class OzoneGetConf extends Configured implements Tool {
-
-  private static final String DESCRIPTION = "ozone getconf is utility for "
-      + "getting configuration information from the config file.\n";
-
-  enum Command {
-    INCLUDE_FILE("-includeFile",
-        "gets the include file path that defines the datanodes " +
-            "that can join the cluster."),
-    EXCLUDE_FILE("-excludeFile",
-        "gets the exclude file path that defines the datanodes " +
-            "that need to decommissioned."),
-    OZONEMANAGER("-ozonemanagers",
-        "gets list of Ozone Manager nodes in the cluster"),
-    STORAGECONTAINERMANAGER("-storagecontainermanagers",
-        "gets list of ozone storage container manager nodes in the cluster"),
-    CONFKEY("-confKey [key]", "gets a specific key from the configuration");
-
-    private static final Map<String, OzoneGetConf.CommandHandler> HANDLERS;
-
-    static {
-      HANDLERS = new HashMap<String, OzoneGetConf.CommandHandler>();
-      HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()),
-          new OzoneManagersCommandHandler());
-      HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
-          new StorageContainerManagersCommandHandler());
-      HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()),
-          new PrintConfKeyCommandHandler());
-    }
-
-    private final String cmd;
-    private final String description;
-
-    Command(String cmd, String description) {
-      this.cmd = cmd;
-      this.description = description;
-    }
-
-    public String getName() {
-      return cmd.split(" ")[0];
-    }
-
-    public String getUsage() {
-      return cmd;
-    }
-
-    public String getDescription() {
-      return description;
-    }
-
-    public static OzoneGetConf.CommandHandler getHandler(String cmd) {
-      return HANDLERS.get(StringUtils.toLowerCase(cmd));
-    }
-  }
-
-  static final String USAGE;
-  static {
-    HdfsConfiguration.init();
-
-    /* Initialize USAGE based on Command values */
-    StringBuilder usage = new StringBuilder(DESCRIPTION);
-    usage.append("\nozone getconf \n");
-    for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
-      usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
-          + "\n");
-    }
-    USAGE = usage.toString();
-  }
-
-  /**
-   * Handler to return value for key corresponding to the
-   * {@link OzoneGetConf.Command}.
-   */
-  static class CommandHandler {
-
-    @SuppressWarnings("visibilitymodifier")
-    protected String key; // Configuration key to lookup
-
-    CommandHandler() {
-      this(null);
-    }
-
-    CommandHandler(String key) {
-      this.key = key;
-    }
-
-    final int doWork(OzoneGetConf tool, String[] args) {
-      try {
-        checkArgs(args);
-
-        return doWorkInternal(tool, args);
-      } catch (Exception e) {
-        tool.printError(e.getMessage());
-      }
-      return -1;
-    }
-
-    protected void checkArgs(String[] args) {
-      if (args.length > 0) {
-        throw new HadoopIllegalArgumentException(
-            "Did not expect argument: " + args[0]);
-      }
-    }
-
-
-    /** Method to be overridden by sub classes for specific behavior. */
-    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
-
-      String value = tool.getConf().getTrimmed(key);
-      if (value != null) {
-        tool.printOut(value);
-        return 0;
-      }
-      tool.printError("Configuration " + key + " is missing.");
-      return -1;
-    }
-  }
-
-  static class PrintConfKeyCommandHandler extends OzoneGetConf.CommandHandler {
-    @Override
-    protected void checkArgs(String[] args) {
-      if (args.length != 1) {
-        throw new HadoopIllegalArgumentException(
-            "usage: " + OzoneGetConf.Command.CONFKEY.getUsage());
-      }
-    }
-
-    @Override
-    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
-      this.key = args[0];
-      return super.doWorkInternal(tool, args);
-    }
-  }
-
-  private final PrintStream out; // Stream for printing command output
-  private final PrintStream err; // Stream for printing error
-
-  protected OzoneGetConf(Configuration conf) {
-    this(conf, System.out, System.err);
-  }
-
-  protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) {
-    super(conf);
-    this.out = out;
-    this.err = err;
-  }
-
-  void printError(String message) {
-    err.println(message);
-  }
-
-  void printOut(String message) {
-    out.println(message);
-  }
-
-  private void printUsage() {
-    printError(USAGE);
-  }
-
-  /**
-   * Main method that runs the tool for given arguments.
-   * @param args arguments
-   * @return return status of the command
-   */
-  private int doWork(String[] args) {
-    if (args.length >= 1) {
-      OzoneGetConf.CommandHandler handler =
-          OzoneGetConf.Command.getHandler(args[0]);
-      if (handler != null) {
-        return handler.doWork(this, Arrays.copyOfRange(args, 1, args.length));
-      }
-    }
-    printUsage();
-    return -1;
-  }
-
-  @Override
-  public int run(final String[] args) throws Exception {
-    return SecurityUtil.doAsCurrentUser(
-          new PrivilegedExceptionAction<Integer>() {
-            @Override
-            public Integer run() throws Exception {
-              return doWork(args);
-            }
-          });
-  }
-
-  /**
-   * Handler for {@link Command#STORAGECONTAINERMANAGER}.
-   */
-  static class StorageContainerManagersCommandHandler extends CommandHandler {
-
-    @Override
-    public int doWorkInternal(OzoneGetConf tool, String[] args)
-        throws IOException {
-      Collection<InetSocketAddress> addresses = HddsUtils
-          .getSCMAddresses(tool.getConf());
-
-      for (InetSocketAddress addr : addresses) {
-        tool.printOut(addr.getHostName());
-      }
-      return 0;
-    }
-  }
-
-  /**
-   * Handler for {@link Command#OZONEMANAGER}.
-   */
-  static class OzoneManagersCommandHandler extends CommandHandler {
-    @Override
-    public int doWorkInternal(OzoneGetConf tool, String[] args)
-        throws IOException {
-      tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName());
-      return 0;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
-    Configuration conf = new Configuration();
-    conf.addResource(new OzoneConfiguration());
-    int res = ToolRunner.run(new OzoneGetConf(conf), args);
-    System.exit(res);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
deleted file mode 100644
index 150c64e..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.freon;
-/**
- * Classes related to Ozone tools.
- */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
deleted file mode 100644
index dcb9b5c..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.util.concurrent.TimeUnit;
-
-import org.apache.ratis.util.TimeDuration;
-
-/**
- * Ozone Manager Constants.
- */
-public final class OMConfigKeys {
-  /**
-   * Never constructed.
-   */
-  private OMConfigKeys() {
-  }
-
-  // Location where the OM stores its DB files. In the future we may support
-  // multiple entries for performance (sharding)..
-  public static final String OZONE_OM_DB_DIRS = "ozone.om.db.dirs";
-
-  public static final String OZONE_OM_HANDLER_COUNT_KEY =
-      "ozone.om.handler.count.key";
-  public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20;
-
-  public static final String OZONE_OM_SERVICE_IDS_KEY =
-      "ozone.om.service.ids";
-  public static final String OZONE_OM_NODES_KEY =
-      "ozone.om.nodes";
-  public static final String OZONE_OM_NODE_ID_KEY =
-      "ozone.om.node.id";
-
-  public static final String OZONE_OM_ADDRESS_KEY =
-      "ozone.om.address";
-  public static final String OZONE_OM_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-  public static final int OZONE_OM_PORT_DEFAULT = 9862;
-
-  public static final String OZONE_OM_HTTP_ENABLED_KEY =
-      "ozone.om.http.enabled";
-  public static final String OZONE_OM_HTTP_BIND_HOST_KEY =
-      "ozone.om.http-bind-host";
-  public static final String OZONE_OM_HTTPS_BIND_HOST_KEY =
-      "ozone.om.https-bind-host";
-  public static final String OZONE_OM_HTTP_ADDRESS_KEY =
-      "ozone.om.http-address";
-  public static final String OZONE_OM_HTTPS_ADDRESS_KEY =
-      "ozone.om.https-address";
-  public static final String OZONE_OM_KEYTAB_FILE =
-      "ozone.om.keytab.file";
-  public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874;
-  public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875;
-
-  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
-  public static final String OZONE_OM_DB_CACHE_SIZE_MB =
-      "ozone.om.db.cache.size.mb";
-  public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_OM_USER_MAX_VOLUME =
-      "ozone.om.user.max.volume";
-  public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024;
-
-  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
-      "ozone.key.deleting.limit.per.task";
-  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
-
-  public static final String OZONE_OM_METRICS_SAVE_INTERVAL =
-      "ozone.om.save.metrics.interval";
-  public static final String OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT = "5m";
-
-  /**
-   * OM Ratis related configurations.
-   */
-  public static final String OZONE_OM_RATIS_ENABLE_KEY
-      = "ozone.om.ratis.enable";
-  public static final boolean OZONE_OM_RATIS_ENABLE_DEFAULT
-      = false;
-  public static final String OZONE_OM_RATIS_PORT_KEY
-      = "ozone.om.ratis.port";
-  public static final int OZONE_OM_RATIS_PORT_DEFAULT
-      = 9872;
-  public static final String OZONE_OM_RATIS_RPC_TYPE_KEY
-      = "ozone.om.ratis.rpc.type";
-  public static final String OZONE_OM_RATIS_RPC_TYPE_DEFAULT
-      = "GRPC";
-
-  // OM Ratis Log configurations
-  public static final String OZONE_OM_RATIS_STORAGE_DIR
-      = "ozone.om.ratis.storage.dir";
-  public static final String OZONE_OM_RATIS_SEGMENT_SIZE_KEY
-      = "ozone.om.ratis.segment.size";
-  public static final String OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT
-      = "16KB";
-  public static final String OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
-      = "ozone.om.ratis.segment.preallocated.size";
-  public static final String OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
-      = "16KB";
-
-  // OM Ratis Log Appender configurations
-  public static final String
-      OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
-      "ozone.om.ratis.log.appender.queue.num-elements";
-  public static final int
-      OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1024;
-  public static final String OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
-      "ozone.om.ratis.log.appender.queue.byte-limit";
-  public static final String
-      OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
-  public static final String OZONE_OM_RATIS_LOG_PURGE_GAP =
-      "ozone.om.ratis.log.purge.gap";
-  public static final int OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000;
-
-  // OM Snapshot configurations
-  public static final String OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY
-      = "ozone.om.ratis.snapshot.auto.trigger.threshold";
-  public static final long
-      OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT
-      = 400000;
-
-  // OM Ratis server configurations
-  public static final String OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_KEY
-      = "ozone.om.ratis.server.request.timeout";
-  public static final TimeDuration
-      OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT
-      = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String
-      OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY
-      = "ozone.om.ratis.server.retry.cache.timeout";
-  public static final TimeDuration
-      OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT
-      = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
-  public static final String OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY
-      = "ozone.om.ratis.minimum.timeout";
-  public static final TimeDuration OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT
-      = TimeDuration.valueOf(1, TimeUnit.SECONDS);
-
-  // OM Ratis client configurations
-  public static final String OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY
-      = "ozone.om.ratis.client.request.timeout.duration";
-  public static final TimeDuration
-      OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-      = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY
-      = "ozone.om.ratis.client.request.max.retries";
-  public static final int OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT
-      = 180;
-  public static final String OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY
-      = "ozone.om.ratis.client.request.retry.interval";
-  public static final TimeDuration
-      OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT
-      = TimeDuration.valueOf(100, TimeUnit.MILLISECONDS);
-
-  // OM Ratis Leader Election configurations
-  public static final String
-      OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      "ozone.om.leader.election.minimum.timeout.duration";
-  public static final TimeDuration
-      OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(1, TimeUnit.SECONDS);
-  public static final String OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY
-      = "ozone.om.ratis.server.failure.timeout.duration";
-  public static final TimeDuration
-      OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT
-      = TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
-  // OM Leader server role check interval
-  public static final String OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY
-      = "ozone.om.ratis.server.role.check.interval";
-  public static final TimeDuration
-      OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT
-      = TimeDuration.valueOf(15, TimeUnit.SECONDS);
-
-  // OM SnapshotProvider configurations
-  public static final String OZONE_OM_RATIS_SNAPSHOT_DIR =
-      "ozone.om.ratis.snapshot.dir";
-  public static final String OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY =
-      "ozone.om.snapshot.provider.socket.timeout";
-  public static final TimeDuration
-      OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT =
-      TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS);
-
-  public static final String OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY =
-      "ozone.om.snapshot.provider.connection.timeout";
-  public static final TimeDuration
-      OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT =
-      TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS);
-
-  public static final String OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY =
-      "ozone.om.snapshot.provider.request.timeout";
-  public static final TimeDuration
-      OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT =
-      TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS);
-
-  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
-      + "kerberos.keytab.file";
-  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
-      + ".kerberos.principal";
-  public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE =
-      "ozone.om.http.kerberos.keytab";
-  public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY
-      = "ozone.om.http.kerberos.principal";
-  // Delegation token related keys
-  public static final String  DELEGATION_REMOVER_SCAN_INTERVAL_KEY =
-      "ozone.manager.delegation.remover.scan.interval";
-  public static final long    DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT =
-      60*60*1000;
-  public static final String  DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
-      "ozone.manager.delegation.token.renew-interval";
-  public static final long    DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
-      24*60*60*1000;  // 1 day = 86400000 ms
-  public static final String  DELEGATION_TOKEN_MAX_LIFETIME_KEY =
-      "ozone.manager.delegation.token.max-lifetime";
-  public static final long    DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
-      7*24*60*60*1000; // 7 days
-
-  public static final String OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY =
-      "ozone.manager.db.checkpoint.transfer.bandwidthPerSec";
-  public static final long OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT =
-      0;  //no throttling
-
-  // Comma separated acls (users, groups) allowing clients accessing
-  // OM client protocol
-  // when hadoop.security.authorization is true, this needs to be set in
-  // hadoop-policy.xml, "*" allows all users/groups to access.
-  public static final String OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL =
-      "ozone.om.security.client.protocol.acl";
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
deleted file mode 100644
index 673d26a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * OM metadata manager interface.
- */
-public interface OMMetadataManager {
-  /**
-   * Start metadata manager.
-   *
-   * @param configuration
-   * @throws IOException
-   */
-  void start(OzoneConfiguration configuration) throws IOException;
-
-  /**
-   * Stop metadata manager.
-   */
-  void stop() throws Exception;
-
-  /**
-   * Get metadata store.
-   *
-   * @return metadata store.
-   */
-  @VisibleForTesting
-  DBStore getStore();
-
-  /**
-   * Returns the OzoneManagerLock used on Metadata DB.
-   *
-   * @return OzoneManagerLock
-   */
-  OzoneManagerLock getLock();
-
-  /**
-   * Given a volume return the corresponding DB key.
-   *
-   * @param volume - Volume name
-   */
-  String getVolumeKey(String volume);
-
-  /**
-   * Given a user return the corresponding DB key.
-   *
-   * @param user - User name
-   */
-  String getUserKey(String user);
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   *
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  String getBucketKey(String volume, String bucket);
-
-  /**
-   * Given a volume, bucket and a key, return the corresponding DB key.
-   *
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key    - key name
-   * @return DB key as String.
-   */
-
-  String getOzoneKey(String volume, String bucket, String key);
-
-  /**
-   * Given a volume, bucket and a key, return the corresponding DB directory
-   * key.
-   *
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key    - key name
-   * @return DB directory key as String.
-   */
-  String getOzoneDirKey(String volume, String bucket, String key);
-
-
-  /**
-   * Returns the DB key name of a open key in OM metadata store. Should be
-   * #open# prefix followed by actual key name.
-   *
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key - key name
-   * @param id - the id for this open
-   * @return bytes of DB key.
-   */
-  String getOpenKey(String volume, String bucket, String key, long id);
-
-  /**
-   * Given a volume, check if it is empty, i.e there are no buckets inside it.
-   *
-   * @param volume - Volume name
-   */
-  boolean isVolumeEmpty(String volume) throws IOException;
-
-  /**
-   * Given a volume/bucket, check if it is empty, i.e there are no keys inside
-   * it.
-   *
-   * @param volume - Volume name
-   * @param bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  boolean isBucketEmpty(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link OmBucketInfo} in the given
-   * volume.
-   *
-   * @param volumeName the name of the volume. This argument is required, this
-   * method returns buckets in this given volume.
-   * @param startBucket the start bucket name. Only the buckets whose name is
-   * after this value will be included in the result. This key is excluded from
-   * the result.
-   * @param bucketPrefix bucket name prefix. Only the buckets whose name has
-   * this prefix will be included in the result.
-   * @param maxNumOfBuckets the maximum number of buckets to return. It ensures
-   * the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<OmBucketInfo> listBuckets(String volumeName, String startBucket,
-      String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link OmKeyInfo} in the given
-   * bucket.
-   *
-   * @param volumeName the name of the volume.
-   * @param bucketName the name of the bucket.
-   * @param startKey the start key name, only the keys whose name is after this
-   * value will be included in the result. This key is excluded from the
-   * result.
-   * @param keyPrefix key name prefix, only the keys whose name has this prefix
-   * will be included in the result.
-   * @param maxKeys the maximum number of keys to return. It ensures the size of
-   * the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<OmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null, returns
-   * all volumes.
-   *
-   * @param userName volume owner
-   * @param prefix the volume prefix used to filter the listing result.
-   * @param startKey the start volume name determines where to start listing
-   * from, this key is excluded from the result.
-   * @param maxKeys the maximum number of volumes to return.
-   * @return a list of {@link OmVolumeArgs}
-   * @throws IOException
-   */
-  List<OmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the key
-   * name and all its associated block IDs. A pending deletion key is stored
-   * with #deleting# prefix in OM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} represent keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in OM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-
-  /**
-   * Returns the user Table.
-   *
-   * @return UserTable.
-   */
-  Table<String, UserVolumeInfo> getUserTable();
-
-  /**
-   * Returns the Volume Table.
-   *
-   * @return VolumeTable.
-   */
-  Table<String, OmVolumeArgs> getVolumeTable();
-
-  /**
-   * Returns the BucketTable.
-   *
-   * @return BucketTable.
-   */
-  Table<String, OmBucketInfo> getBucketTable();
-
-  /**
-   * Returns the KeyTable.
-   *
-   * @return KeyTable.
-   */
-  Table<String, OmKeyInfo> getKeyTable();
-
-  /**
-   * Get Deleted Table.
-   *
-   * @return Deleted Table.
-   */
-  Table<String, RepeatedOmKeyInfo> getDeletedTable();
-
-  /**
-   * Gets the OpenKeyTable.
-   *
-   * @return Table.
-   */
-  Table<String, OmKeyInfo> getOpenKeyTable();
-
-  /**
-   * Gets the DelegationTokenTable.
-   *
-   * @return Table.
-   */
-  Table<OzoneTokenIdentifier, Long> getDelegationTokenTable();
-
-  /**
-   * Gets the S3Bucket to Ozone Volume/bucket mapping table.
-   *
-   * @return Table.
-   */
-
-  Table<String, String> getS3Table();
-
-  /**
-   * Gets the Ozone prefix path to its acl mapping table.
-   * @return Table.
-   */
-  Table<String, OmPrefixInfo> getPrefixTable();
-
-  /**
-   * Returns the DB key name of a multipart upload key in OM metadata store.
-   *
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key - key name
-   * @param uploadId - the upload id for this key
-   * @return bytes of DB key.
-   */
-  String getMultipartKey(String volume, String bucket, String key, String
-      uploadId);
-
-
-  /**
-   * Gets the multipart info table which holds the information about
-   * multipart upload information of the keys.
-   * @return Table
-   */
-  Table<String, OmMultipartKeyInfo> getMultipartInfoTable();
-
-  /**
-   * Gets the S3 Secrets table.
-   * @return Table
-   */
-  Table<String, S3SecretValue> getS3SecretTable();
-
-  /**
-   * Returns number of rows in a table.  This should not be used for very
-   * large tables.
-   * @param table
-   * @return long
-   * @throws IOException
-   */
-  <KEY, VALUE> long countRowsInTable(Table<KEY, VALUE> table)
-      throws IOException;
-
-  /**
-   * Returns an estimated number of rows in a table.  This is much quicker
-   * than {@link OMMetadataManager#countRowsInTable} but the result can be
-   * inaccurate.
-   * @param table Table
-   * @return long Estimated number of rows in the table.
-   * @throws IOException
-   */
-  <KEY, VALUE> long countEstimatedRowsInTable(Table<KEY, VALUE> table)
-      throws IOException;
-
-  /**
-   * Return the existing upload keys which includes volumeName, bucketName,
-   * keyName.
-   */
-  List<String> getMultipartUploadKeys(String volumeName,
-      String bucketName, String prefix) throws IOException;
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java
deleted file mode 100644
index 3ca8cbb..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-
-import java.io.IOException;
-/**
- * Interface to manager s3 secret.
- */
-public interface S3SecretManager {
-
-  S3SecretValue getS3Secret(String kerberosID) throws IOException;
-
-  /**
-   * API to get s3 secret for given awsAccessKey.
-   * @param awsAccessKey
-   * */
-  String getS3UserSecretString(String awsAccessKey) throws IOException;
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
deleted file mode 100644
index fb566582..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.om;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.security.OzoneSecurityException;
-import org.apache.logging.log4j.util.Strings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK;
-import static org.apache.hadoop.ozone.security.OzoneSecurityException.ResultCodes.S3_SECRET_NOT_FOUND;
-
-/**
- * S3 Secret manager.
- */
-public class S3SecretManagerImpl implements S3SecretManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3SecretManagerImpl.class);
-  /**
-   * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock.
-   */
-  private final OMMetadataManager omMetadataManager;
-  private final OzoneConfiguration configuration;
-
-  /**
-   * Constructs S3SecretManager.
-   *
-   * @param omMetadataManager
-   */
-  public S3SecretManagerImpl(OzoneConfiguration configuration,
-      OMMetadataManager omMetadataManager) {
-    this.configuration = configuration;
-    this.omMetadataManager = omMetadataManager;
-  }
-
-  @Override
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
-        "kerberosID cannot be null or empty.");
-    S3SecretValue result = null;
-    omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID);
-    try {
-      S3SecretValue s3Secret =
-          omMetadataManager.getS3SecretTable().get(kerberosID);
-      if(s3Secret == null) {
-        byte[] secret = OmUtils.getSHADigest();
-        result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret));
-        omMetadataManager.getS3SecretTable().put(kerberosID, result);
-      } else {
-        return s3Secret;
-      }
-    } finally {
-      omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID);
-    }
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
-    }
-    return result;
-  }
-
-  @Override
-  public String getS3UserSecretString(String kerberosID)
-      throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
-        "awsAccessKeyId cannot be null or empty.");
-    LOG.trace("Get secret for awsAccessKey:{}", kerberosID);
-
-    S3SecretValue s3Secret;
-    omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID);
-    try {
-      s3Secret = omMetadataManager.getS3SecretTable().get(kerberosID);
-      if (s3Secret == null) {
-        throw new OzoneSecurityException("S3 secret not found for " +
-            "awsAccessKeyId " + kerberosID, S3_SECRET_NOT_FOUND);
-      }
-    } finally {
-      omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID);
-    }
-
-    return s3Secret.getAwsSecret();
-  }
-
-  public OMMetadataManager getOmMetadataManager() {
-    return omMetadataManager;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java
deleted file mode 100644
index 8f4d0fc..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import java.io.IOException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Codec to encode OmBucketInfo as byte array.
- */
-public class OmBucketInfoCodec implements Codec<OmBucketInfo> {
-
-  @Override
-  public byte[] toPersistedFormat(OmBucketInfo object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getProtobuf().toByteArray();
-  }
-
-  @Override
-  public OmBucketInfo fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
deleted file mode 100644
index 0c52a24..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import java.io.IOException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Codec to encode OmKeyInfo as byte array.
- */
-public class OmKeyInfoCodec implements Codec<OmKeyInfo> {
-
-  @Override
-  public byte[] toPersistedFormat(OmKeyInfo object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getProtobuf().toByteArray();
-  }
-
-  @Override
-  public OmKeyInfo fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java
deleted file mode 100644
index 4f6a7b1..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import java.io.IOException;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-
-/**
- * Codec Registry for OmMultipartKeyInfo.
- */
-public class OmMultipartKeyInfoCodec implements Codec<OmMultipartKeyInfo> {
-
-  @Override
-  public byte[] toPersistedFormat(OmMultipartKeyInfo object)
-      throws IOException {
-    Preconditions.checkNotNull(object,
-        "Null object can't be converted to byte array.");
-    return object.getProto().toByteArray();
-
-  }
-
-  @Override
-  /**
-   * Construct {@link OmMultipartKeyInfo} from byte[]. If unable to convert
-   * return null.
-   */
-  public OmMultipartKeyInfo fromPersistedFormat(byte[] rawData)
-      throws IOException {
-    Preconditions.checkNotNull(rawData,
-        "Null byte array can't converted to real object.");
-    try {
-      return OmMultipartKeyInfo.getFromProto(OzoneManagerProtocolProtos
-          .MultipartKeyInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
deleted file mode 100644
index df3c90d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo;
-
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import java.io.IOException;
-
-/**
- * Codec to encode PrefixAcl as byte array.
- */
-public class OmPrefixInfoCodec implements Codec<OmPrefixInfo> {
-
-  @Override
-  public byte[] toPersistedFormat(OmPrefixInfo object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getProtobuf().toByteArray();
-  }
-
-  @Override
-  public OmPrefixInfo fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return OmPrefixInfo.getFromProtobuf(PrefixInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java
deleted file mode 100644
index e283e92..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import java.io.IOException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Codec to encode OmVolumeArgsCodec as byte array.
- */
-public class OmVolumeArgsCodec implements Codec<OmVolumeArgs> {
-
-  @Override
-  public byte[] toPersistedFormat(OmVolumeArgs object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getProtobuf().toByteArray();
-  }
-
-  @Override
-  public OmVolumeArgs fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return OmVolumeArgs.getFromProtobuf(VolumeInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
deleted file mode 100644
index a0ef4a5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.utils.db.Codec;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RepeatedKeyInfo;
-
-import java.io.IOException;
-
-/**
- * Codec to encode RepeatedOmKeyInfo as byte array.
- */
-public class RepeatedOmKeyInfoCodec implements Codec<RepeatedOmKeyInfo> {
-  @Override
-  public byte[] toPersistedFormat(RepeatedOmKeyInfo object)
-      throws IOException {
-    Preconditions.checkNotNull(object,
-        "Null object can't be converted to byte array.");
-    return object.getProto().toByteArray();
-  }
-
-  @Override
-  public RepeatedOmKeyInfo fromPersistedFormat(byte[] rawData)
-      throws IOException {
-    Preconditions.checkNotNull(rawData,
-        "Null byte array can't converted to real object.");
-    try {
-      return RepeatedOmKeyInfo.getFromProto(RepeatedKeyInfo.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java
deleted file mode 100644
index 7ea3fda..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.codec;
-
-
-import java.io.IOException;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-
-/**
- * Codec to encode S3SecretValue as byte array.
- */
-public class S3SecretValueCodec implements Codec<S3SecretValue> {
-
-  @Override
-  public byte[] toPersistedFormat(S3SecretValue object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getProtobuf().toByteArray();
-  }
-
-  @Override
-  public S3SecretValue fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return S3SecretValue.fromProtobuf(
-          OzoneManagerProtocolProtos.S3Secret.parseFrom(rawData));
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java
deleted file mode 100644
index 626fa01..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import java.io.IOException;
-
-/**
- * Codec to encode TokenIdentifierCodec as byte array.
- */
-public class TokenIdentifierCodec implements Codec<OzoneTokenIdentifier> {
-
-  @Override
-  public byte[] toPersistedFormat(OzoneTokenIdentifier object) {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.getBytes();
-  }
-
-  @Override
-  public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData)
-      throws IOException {
-    Preconditions.checkNotNull(rawData,
-        "Null byte array can't converted to real object.");
-    try {
-      return OzoneTokenIdentifier.readProtoBuf(rawData);
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java
deleted file mode 100644
index 2545454..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.codec;
-
-import java.io.IOException;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Codec to encode UserVolumeInfo as byte array.
- */
-public class UserVolumeInfoCodec implements Codec<UserVolumeInfo> {
-
-  @Override
-  public byte[] toPersistedFormat(UserVolumeInfo object) throws IOException {
-    Preconditions
-        .checkNotNull(object, "Null object can't be converted to byte array.");
-    return object.toByteArray();
-  }
-
-  @Override
-  public UserVolumeInfo fromPersistedFormat(byte[] rawData) throws IOException {
-    Preconditions
-        .checkNotNull(rawData,
-            "Null byte array can't converted to real object.");
-    try {
-      return UserVolumeInfo.parseFrom(rawData);
-    } catch (InvalidProtocolBufferException e) {
-      throw new IllegalArgumentException(
-          "Can't encode the the raw data from the byte array", e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java
deleted file mode 100644
index df6ed9c..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * Utility classes to encode/decode DTO objects to/from byte array.
- */
-
-/**
- * Utility classes to encode/decode DTO objects to/from byte array.
- */
-package org.apache.hadoop.ozone.om.codec;
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java
deleted file mode 100644
index 974ab0e..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by
- * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when
- * a read request is received by a non leader OM node.
- */
-public class NotLeaderException extends IOException {
-
-  private final String currentPeerId;
-  private final String leaderPeerId;
-
-  public NotLeaderException(String currentPeerIdStr) {
-    super("OM " + currentPeerIdStr + " is not the leader. Could not " +
-        "determine the leader node.");
-    this.currentPeerId = currentPeerIdStr;
-    this.leaderPeerId = null;
-  }
-
-  public NotLeaderException(String currentPeerIdStr,
-      String suggestedLeaderPeerIdStr) {
-    super("OM " + currentPeerIdStr + " is not the leader. Suggested leader is "
-        + suggestedLeaderPeerIdStr);
-    this.currentPeerId = currentPeerIdStr;
-    this.leaderPeerId = suggestedLeaderPeerIdStr;
-  }
-
-  public String getSuggestedLeaderNodeId() {
-    return leaderPeerId;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
deleted file mode 100644
index 268471a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by Ozone Manager.
- */
-public class OMException extends IOException {
-
-  public static final String STATUS_CODE = "STATUS_CODE=";
-  private final OMException.ResultCodes result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public OMException(OMException.ResultCodes result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   */
-  public OMException(String message, OMException.ResultCodes result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public OMException(String message, Throwable cause,
-      OMException.ResultCodes result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public OMException(Throwable cause, OMException.ResultCodes result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns resultCode.
-   * @return ResultCode
-   */
-  public OMException.ResultCodes getResult() {
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return result + " " + super.toString();
-  }
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ResultCodes {
-
-    OK,
-
-    VOLUME_NOT_UNIQUE,
-
-    VOLUME_NOT_FOUND,
-
-    VOLUME_NOT_EMPTY,
-
-    VOLUME_ALREADY_EXISTS,
-
-    USER_NOT_FOUND,
-
-    USER_TOO_MANY_VOLUMES,
-
-    BUCKET_NOT_FOUND,
-
-    BUCKET_NOT_EMPTY,
-
-    BUCKET_ALREADY_EXISTS,
-
-    KEY_ALREADY_EXISTS,
-
-    KEY_NOT_FOUND,
-
-    INVALID_KEY_NAME,
-
-    ACCESS_DENIED,
-
-    INTERNAL_ERROR,
-
-    KEY_ALLOCATION_ERROR,
-
-    KEY_DELETION_ERROR,
-
-    KEY_RENAME_ERROR,
-
-    METADATA_ERROR,
-
-    OM_NOT_INITIALIZED,
-
-    SCM_VERSION_MISMATCH_ERROR,
-
-    S3_BUCKET_NOT_FOUND,
-
-    S3_BUCKET_ALREADY_EXISTS,
-
-    INITIATE_MULTIPART_UPLOAD_ERROR,
-
-    MULTIPART_UPLOAD_PARTFILE_ERROR,
-
-    NO_SUCH_MULTIPART_UPLOAD_ERROR,
-
-    MISMATCH_MULTIPART_LIST,
-
-    MISSING_UPLOAD_PARTS,
-
-    COMPLETE_MULTIPART_UPLOAD_ERROR,
-
-    ENTITY_TOO_SMALL,
-
-    ABORT_MULTIPART_UPLOAD_FAILED,
-
-    S3_SECRET_NOT_FOUND,
-
-    INVALID_AUTH_METHOD,
-
-    INVALID_TOKEN,
-
-    TOKEN_EXPIRED,
-
-    TOKEN_ERROR_OTHER,
-
-    LIST_MULTIPART_UPLOAD_PARTS_FAILED,
-
-    SCM_IN_SAFE_MODE,
-
-    INVALID_REQUEST,
-
-    BUCKET_ENCRYPTION_KEY_NOT_FOUND,
-
-    UNKNOWN_CIPHER_SUITE,
-
-    INVALID_KMS_PROVIDER,
-
-    TOKEN_CREATION_ERROR,
-
-    FILE_NOT_FOUND,
-
-    DIRECTORY_NOT_FOUND,
-
-    FILE_ALREADY_EXISTS,
-
-    NOT_A_FILE,
-
-    PERMISSION_DENIED, // Error codes used during acl validation
-
-    TIMEOUT, // Error codes used during acl validation
-
-    PREFIX_NOT_FOUND,
-
-    S3_BUCKET_INVALID_LENGTH,
-
-    RATIS_ERROR, // Error in Ratis server
-
-    INVALID_PATH_IN_ACL_REQUEST, // Error code when path name is invalid during
-    // acl requests.
-
-    USER_MISMATCH // Error code when requested user name passed is different
-    // from remote user.
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
deleted file mode 100644
index 5091545..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.exceptions;
-// Exception thrown by OM.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
deleted file mode 100644
index 32684de..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.FailoverProxyProvider;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-
-/**
- * A failover proxy provider implementation which allows clients to configure
- * multiple OMs to connect to. In case of OM failover, client can try
- * connecting to another OM node from the list of proxies.
- */
-public class OMFailoverProxyProvider implements
-    FailoverProxyProvider<OzoneManagerProtocolPB>, Closeable {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OMFailoverProxyProvider.class);
-
-  // Map of OMNodeID to its proxy
-  private Map<String, ProxyInfo<OzoneManagerProtocolPB>> omProxies;
-  private Map<String, OMProxyInfo> omProxyInfos;
-  private List<String> omNodeIDList;
-
-  private String currentProxyOMNodeId;
-  private int currentProxyIndex;
-
-  private final Configuration conf;
-  private final long omVersion;
-  private final UserGroupInformation ugi;
-  private final Text delegationTokenService;
-
-  private final String omServiceId;
-
-  public OMFailoverProxyProvider(OzoneConfiguration configuration,
-      UserGroupInformation ugi, String omServiceId) throws IOException {
-    this.conf = configuration;
-    this.omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    this.ugi = ugi;
-    this.omServiceId = omServiceId;
-    loadOMClientConfigs(conf, this.omServiceId);
-    this.delegationTokenService = computeDelegationTokenService();
-
-    currentProxyIndex = 0;
-    currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex);
-  }
-
-  public OMFailoverProxyProvider(OzoneConfiguration configuration,
-      UserGroupInformation ugi) throws IOException {
-    this(configuration, ugi, null);
-  }
-
-  private void loadOMClientConfigs(Configuration config, String omSvcId)
-      throws IOException {
-    this.omProxies = new HashMap<>();
-    this.omProxyInfos = new HashMap<>();
-    this.omNodeIDList = new ArrayList<>();
-
-    Collection<String> omServiceIds = Collections.singletonList(omSvcId);
-
-    for (String serviceId : OmUtils.emptyAsSingletonNull(omServiceIds)) {
-      Collection<String> omNodeIds = OmUtils.getOMNodeIds(config, serviceId);
-
-      for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
-
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-            serviceId, nodeId);
-        String rpcAddrStr = OmUtils.getOmRpcAddress(config, rpcAddrKey);
-        if (rpcAddrStr == null) {
-          continue;
-        }
-
-        OMProxyInfo omProxyInfo = new OMProxyInfo(nodeId, rpcAddrStr);
-
-        if (omProxyInfo.getAddress() != null) {
-
-          ProxyInfo<OzoneManagerProtocolPB> proxyInfo =
-              new ProxyInfo(null, omProxyInfo.toString());
-
-          // For a non-HA OM setup, nodeId might be null. If so, we assign it
-          // a dummy value
-          if (nodeId == null) {
-            nodeId = OzoneConsts.OM_NODE_ID_DUMMY;
-          }
-          omProxies.put(nodeId, proxyInfo);
-          omProxyInfos.put(nodeId, omProxyInfo);
-          omNodeIDList.add(nodeId);
-        } else {
-          LOG.error("Failed to create OM proxy for {} at address {}",
-              nodeId, rpcAddrStr);
-        }
-      }
-    }
-
-    if (omProxies.isEmpty()) {
-      throw new IllegalArgumentException("Could not find any configured " +
-          "addresses for OM. Please configure the system with "
-          + OZONE_OM_ADDRESS_KEY);
-    }
-  }
-
-  @VisibleForTesting
-  public synchronized String getCurrentProxyOMNodeId() {
-    return currentProxyOMNodeId;
-  }
-
-  private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress)
-      throws IOException {
-    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-    return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi,
-        conf, NetUtils.getDefaultSocketFactory(conf),
-        Client.getRpcTimeout(conf));
-  }
-
-  /**
-   * Get the proxy object which should be used until the next failover event
-   * occurs. RPC proxy object is intialized lazily.
-   * @return the OM proxy object to invoke methods upon
-   */
-  @Override
-  public synchronized ProxyInfo getProxy() {
-    ProxyInfo currentProxyInfo = omProxies.get(currentProxyOMNodeId);
-    createOMProxyIfNeeded(currentProxyInfo, currentProxyOMNodeId);
-    return currentProxyInfo;
-  }
-
-  /**
-   * Creates proxy object if it does not already exist.
-   */
-  private void createOMProxyIfNeeded(ProxyInfo proxyInfo,
-      String nodeId) {
-    if (proxyInfo.proxy == null) {
-      InetSocketAddress address = omProxyInfos.get(nodeId).getAddress();
-      try {
-        proxyInfo.proxy = createOMProxy(address);
-      } catch (IOException ioe) {
-        LOG.error("{} Failed to create RPC proxy to OM at {}",
-            this.getClass().getSimpleName(), address, ioe);
-        throw new RuntimeException(ioe);
-      }
-    }
-  }
-
-  public Text getCurrentProxyDelegationToken() {
-    return delegationTokenService;
-  }
-
-  private Text computeDelegationTokenService() {
-    // For HA, this will return "," separated address of all OM's.
-    StringBuilder rpcAddress = new StringBuilder();
-    int count = 0;
-    for (Map.Entry<String, OMProxyInfo> omProxyInfoSet :
-        omProxyInfos.entrySet()) {
-      count++;
-      rpcAddress =
-          rpcAddress.append(
-              omProxyInfoSet.getValue().getDelegationTokenService());
-
-      if (omProxyInfos.size() != count) {
-        rpcAddress.append(",");
-      }
-    }
-
-    return new Text(rpcAddress.toString());
-  }
-
-
-
-  /**
-   * Called whenever an error warrants failing over. It is determined by the
-   * retry policy.
-   */
-  @Override
-  public void performFailover(OzoneManagerProtocolPB currentProxy) {
-    int newProxyIndex = incrementProxyIndex();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
-          newProxyIndex, omNodeIDList.get(newProxyIndex));
-    }
-  }
-
-  /**
-   * Update the proxy index to the next proxy in the list.
-   * @return the new proxy index
-   */
-  private synchronized int incrementProxyIndex() {
-    currentProxyIndex = (currentProxyIndex + 1) % omProxies.size();
-    currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex);
-    return currentProxyIndex;
-  }
-
-  @Override
-  public Class<OzoneManagerProtocolPB> getInterface() {
-    return OzoneManagerProtocolPB.class;
-  }
-
-  /**
-   * Performs failover if the leaderOMNodeId returned through OMReponse does
-   * not match the current leaderOMNodeId cached by the proxy provider.
-   */
-  public void performFailoverIfRequired(String newLeaderOMNodeId) {
-    if (newLeaderOMNodeId == null) {
-      LOG.debug("No suggested leader nodeId. Performing failover to next peer" +
-          " node");
-      performFailover(null);
-    } else {
-      if (updateLeaderOMNodeId(newLeaderOMNodeId)) {
-        LOG.debug("Failing over OM proxy to nodeId: {}", newLeaderOMNodeId);
-      }
-    }
-  }
-
-  /**
-   * Failover to the OM proxy specified by the new leader OMNodeId.
-   * @param newLeaderOMNodeId OMNodeId to failover to.
-   * @return true if failover is successful, false otherwise.
-   */
-  synchronized boolean updateLeaderOMNodeId(String newLeaderOMNodeId) {
-    if (!currentProxyOMNodeId.equals(newLeaderOMNodeId)) {
-      if (omProxies.containsKey(newLeaderOMNodeId)) {
-        currentProxyOMNodeId = newLeaderOMNodeId;
-        currentProxyIndex = omNodeIDList.indexOf(currentProxyOMNodeId);
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Close all the proxy objects which have been opened over the lifetime of
-   * the proxy provider.
-   */
-  @Override
-  public synchronized void close() throws IOException {
-    for (ProxyInfo<OzoneManagerProtocolPB> proxy : omProxies.values()) {
-      OzoneManagerProtocolPB omProxy = proxy.proxy;
-      if (omProxy != null) {
-        RPC.stopProxy(omProxy);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public List<ProxyInfo> getOMProxies() {
-    return new ArrayList<ProxyInfo>(omProxies.values());
-  }
-
-  @VisibleForTesting
-  public List<OMProxyInfo> getOMProxyInfos() {
-    return new ArrayList<OMProxyInfo>(omProxyInfos.values());
-  }
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
deleted file mode 100644
index b429ca0..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-
-import java.net.InetSocketAddress;
-
-/**
- * Class to store OM proxy information.
- */
-public class OMProxyInfo {
-  private String nodeId;
-  private String rpcAddrStr;
-  private InetSocketAddress rpcAddr;
-  private Text dtService;
-
-  OMProxyInfo(String nodeID, String rpcAddress) {
-    this.nodeId = nodeID;
-    this.rpcAddrStr = rpcAddress;
-    this.rpcAddr = NetUtils.createSocketAddr(rpcAddrStr);
-    this.dtService = SecurityUtil.buildTokenService(rpcAddr);
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder()
-        .append("nodeId=")
-        .append(nodeId)
-        .append(",nodeAddress=")
-        .append(rpcAddrStr);
-    return sb.toString();
-  }
-
-  public InetSocketAddress getAddress() {
-    return rpcAddr;
-  }
-
-  public Text getDelegationTokenService() {
-    return dtService;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
deleted file mode 100644
index a95f09f..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-/**
- * This package contains Ozone Client's OM Proxy classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java
deleted file mode 100644
index e1ae0bb..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-
-/**
- * Encryption key info for bucket encryption key.
- */
-public class BucketEncryptionKeyInfo {
-  private final CryptoProtocolVersion version;
-  private final CipherSuite suite;
-  private final String keyName;
-
-  public BucketEncryptionKeyInfo(
-      CryptoProtocolVersion version, CipherSuite suite,
-      String keyName) {
-    this.version = version;
-    this.suite = suite;
-    this.keyName = keyName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public CipherSuite getSuite() {
-    return suite;
-  }
-
-  public CryptoProtocolVersion getVersion() {
-    return version;
-  }
-
-  /**
-   * Builder for BucketEncryptionKeyInfo.
-   */
-  public static class Builder {
-    private CryptoProtocolVersion version;
-    private CipherSuite suite;
-    private String keyName;
-
-    public Builder setKeyName(String name) {
-      this.keyName = name;
-      return this;
-    }
-
-    public Builder setSuite(CipherSuite cs) {
-      this.suite = cs;
-      return this;
-    }
-
-    public Builder setVersion(CryptoProtocolVersion ver) {
-      this.version = ver;
-      return this;
-    }
-
-    public BucketEncryptionKeyInfo build() {
-      return new BucketEncryptionKeyInfo(version, suite, keyName);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java
deleted file mode 100644
index 0f82fe5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-
-/**
- * A simple class for representing an encryption bucket. Presently an encryption
- * bucket only has a path (the root of the encryption zone), a key name, and a
- * unique id. The id is used to implement batched listing of encryption zones.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class EncryptionBucketInfo {
-
-  private final CipherSuite suite;
-  private final CryptoProtocolVersion version;
-  private final String keyName;
-
-  private final long id;
-  private final String path;
-
-  public EncryptionBucketInfo(long id, String path, CipherSuite suite,
-                        CryptoProtocolVersion version, String keyName) {
-    this.id = id;
-    this.path = path;
-    this.suite = suite;
-    this.version = version;
-    this.keyName = keyName;
-  }
-
-  public long getId() {
-    return id;
-  }
-
-  public String getPath() {
-    return path;
-  }
-
-  public CipherSuite getSuite() {
-    return suite;
-  }
-
-  public CryptoProtocolVersion getVersion() {
-    return version;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(13, 31)
-        .append(id)
-        .append(path)
-        .append(suite)
-        .append(version)
-        .append(keyName).
-            toHashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (obj == this) {
-      return true;
-    }
-    if (obj.getClass() != getClass()) {
-      return false;
-    }
-
-    EncryptionBucketInfo rhs = (EncryptionBucketInfo) obj;
-    return new EqualsBuilder().
-        append(id, rhs.id).
-        append(path, rhs.path).
-        append(suite, rhs.suite).
-        append(version, rhs.version).
-        append(keyName, rhs.keyName).
-        isEquals();
-  }
-
-  @Override
-  public String toString() {
-    return "EncryptionBucketInfo [id=" + id +
-        ", path=" + path +
-        ", suite=" + suite +
-        ", version=" + version +
-        ", keyName=" + keyName + "]";
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java
deleted file mode 100644
index 4da8d2b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
-
-/**
- * Convert from/to hdds KeyValue protobuf structure.
- */
-public final class KeyValueUtil {
-  private KeyValueUtil() {
-  }
-
-  /**
-   * Parse Key,Value map data from protobuf representation.
-   */
-  public static Map<String, String> getFromProtobuf(List<KeyValue> metadata) {
-    return metadata.stream()
-        .collect(Collectors.toMap(KeyValue::getKey,
-            KeyValue::getValue));
-  }
-
-  /**
-   * Encode key value map to protobuf.
-   */
-  public static List<KeyValue> toProtobuf(Map<String, String> keyValueMap) {
-    List<KeyValue> metadataList = new LinkedList<>();
-    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
-      metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
-          setValue(entry.getValue()).build());
-    }
-    return metadataList;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
deleted file mode 100644
index c1930c8..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.SizeInBytes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ratis helper methods for OM Ratis server and client.
- */
-public final class OMRatisHelper {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      OMRatisHelper.class);
-
-  private OMRatisHelper() {
-  }
-
-  /**
-   * Creates a new RaftClient object.
-   *
-   * @param rpcType     Replication Type
-   * @param omId        OM id of the client
-   * @param group       RaftGroup
-   * @param retryPolicy Retry policy
-   * @return RaftClient object
-   */
-  public static RaftClient newRaftClient(RpcType rpcType, String omId, RaftGroup
-      group, RetryPolicy retryPolicy, Configuration conf) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group);
-    }
-    final RaftProperties properties = new RaftProperties();
-    RaftConfigKeys.Rpc.setType(properties, rpcType);
-
-    final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    GrpcConfigKeys.setMessageSizeMax(
-        properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize));
-
-    return RaftClient.newBuilder()
-        .setRaftGroup(group)
-        .setLeaderId(getRaftPeerId(omId))
-        .setProperties(properties)
-        .setRetryPolicy(retryPolicy)
-        .build();
-  }
-
-  static RaftPeerId getRaftPeerId(String omId) {
-    return RaftPeerId.valueOf(omId);
-  }
-
-  public static ByteString convertRequestToByteString(OMRequest request) {
-    byte[] requestBytes = request.toByteArray();
-    return ByteString.copyFrom(requestBytes);
-  }
-
-  public static OMRequest convertByteStringToOMRequest(ByteString byteString)
-      throws InvalidProtocolBufferException {
-    byte[] bytes = byteString.toByteArray();
-    return OMRequest.parseFrom(bytes);
-  }
-
-  public static Message convertResponseToMessage(OMResponse response) {
-    byte[] requestBytes = response.toByteArray();
-    return Message.valueOf(ByteString.copyFrom(requestBytes));
-  }
-
-  public static OMResponse getOMResponseFromRaftClientReply(
-      RaftClientReply reply) throws InvalidProtocolBufferException {
-    byte[] bytes = reply.getMessage().getContent().toByteArray();
-    return OMResponse.newBuilder(OMResponse.parseFrom(bytes))
-        .setLeaderOMNodeId(reply.getReplierId())
-        .build();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
deleted file mode 100644
index aa6e8f5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.Auditable;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A class that encapsulates Bucket Arguments.
- */
-public final class OmBucketArgs extends WithMetadata implements Auditable {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   */
-  private OmBucketArgs(String volumeName, String bucketName,
-      Boolean isVersionEnabled, StorageType storageType,
-      Map<String, String> metadata) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-    this.metadata = metadata;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public Boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns new builder class that builds a OmBucketArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  @Override
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, this.volumeName);
-    auditMap.put(OzoneConsts.BUCKET, this.bucketName);
-    auditMap.put(OzoneConsts.GDPR_FLAG,
-        this.metadata.get(OzoneConsts.GDPR_FLAG));
-    auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
-                String.valueOf(this.isVersionEnabled));
-    if(this.storageType != null){
-      auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name());
-    }
-    return auditMap;
-  }
-
-  /**
-   * Builder for OmBucketArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-    private Map<String, String> metadata;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder addMetadata(Map<String, String> metadataMap) {
-      this.metadata = metadataMap;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    /**
-     * Constructs the OmBucketArgs.
-     * @return instance of OmBucketArgs.
-     */
-    public OmBucketArgs build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      return new OmBucketArgs(volumeName, bucketName, isVersionEnabled,
-          storageType, metadata);
-    }
-  }
-
-  /**
-   * Creates BucketArgs protobuf from OmBucketArgs.
-   */
-  public BucketArgs getProtobuf() {
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName);
-    if(isVersionEnabled != null) {
-      builder.setIsVersionEnabled(isVersionEnabled);
-    }
-    if(storageType != null) {
-      builder.setStorageType(storageType.toProto());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates OmBucketArgs.
-   * @param bucketArgs
-   * @return instance of OmBucketArgs
-   */
-  public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
-    return new OmBucketArgs(bucketArgs.getVolumeName(),
-        bucketArgs.getBucketName(),
-        bucketArgs.hasIsVersionEnabled() ?
-            bucketArgs.getIsVersionEnabled() : null,
-        bucketArgs.hasStorageType() ? StorageType.valueOf(
-            bucketArgs.getStorageType()) : null,
-        KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()));
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
deleted file mode 100644
index eb10802..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.Auditable;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketInfo;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A class that encapsulates Bucket Info.
- */
-public final class OmBucketInfo extends WithMetadata implements Auditable {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-  /**
-   * Creation time of bucket.
-   */
-  private final long creationTime;
-
-  /**
-   * Bucket encryption key info if encryption is enabled.
-   */
-  private BucketEncryptionKeyInfo bekInfo;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param acls - list of ACLs.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   * @param creationTime - Bucket creation time.
-   * @param metadata - metadata.
-   * @param bekInfo - bucket encryption key info.
-   */
-  @SuppressWarnings("checkstyle:ParameterNumber")
-  private OmBucketInfo(String volumeName,
-                       String bucketName,
-                       List<OzoneAcl> acls,
-                       boolean isVersionEnabled,
-                       StorageType storageType,
-                       long creationTime,
-                       Map<String, String> metadata,
-                       BucketEncryptionKeyInfo bekInfo) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.acls = acls;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-    this.creationTime = creationTime;
-    this.metadata = metadata;
-    this.bekInfo = bekInfo;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns the ACL's associated with this bucket.
-   * @return {@literal List<OzoneAcl>}
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Add an ozoneAcl to list of existing Acl set.
-   * @param ozoneAcl
-   * @return true - if successfully added, false if not added or acl is
-   * already existing in the acl list.
-   */
-  public boolean addAcl(OzoneAcl ozoneAcl) {
-    return OzoneAclUtil.addAcl(acls, ozoneAcl);
-  }
-
-  /**
-   * Remove acl from existing acl list.
-   * @param ozoneAcl
-   * @return true - if successfully removed, false if not able to remove due
-   * to that acl is not in the existing acl list.
-   */
-  public boolean removeAcl(OzoneAcl ozoneAcl) {
-    return OzoneAclUtil.removeAcl(acls, ozoneAcl);
-  }
-
-  /**
-   * Reset the existing acl list.
-   * @param ozoneAcls
-   * @return true - if successfully able to reset.
-   */
-  public boolean setAcls(List<OzoneAcl> ozoneAcls) {
-    return OzoneAclUtil.setAcl(acls, ozoneAcls);
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns bucket encryption key info.
-   * @return bucket encryption key info
-   */
-  public BucketEncryptionKeyInfo getEncryptionKeyInfo() {
-    return bekInfo;
-  }
-
-
-  /**
-   * Returns new builder class that builds a OmBucketInfo.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  @Override
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, this.volumeName);
-    auditMap.put(OzoneConsts.BUCKET, this.bucketName);
-    auditMap.put(OzoneConsts.GDPR_FLAG,
-        this.metadata.get(OzoneConsts.GDPR_FLAG));
-    auditMap.put(OzoneConsts.ACLS,
-        (this.acls != null) ? this.acls.toString() : null);
-    auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
-        String.valueOf(this.isVersionEnabled));
-    auditMap.put(OzoneConsts.STORAGE_TYPE,
-        (this.storageType != null) ? this.storageType.name() : null);
-    auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime));
-    return auditMap;
-  }
-
-  /**
-   * Builder for OmBucketInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private List<OzoneAcl> acls;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-    private long creationTime;
-    private Map<String, String> metadata;
-    private BucketEncryptionKeyInfo bekInfo;
-
-    public Builder() {
-      //Default values
-      this.acls = new LinkedList<>();
-      this.isVersionEnabled = false;
-      this.storageType = StorageType.DISK;
-      this.metadata = new HashMap<>();
-    }
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      if (listOfAcls != null) {
-        this.acls.addAll(listOfAcls);
-      }
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      metadata.put(key, value);
-      return this;
-    }
-
-    public Builder addAllMetadata(Map<String, String> additionalMetadata) {
-      if (additionalMetadata != null) {
-        metadata.putAll(additionalMetadata);
-      }
-      return this;
-    }
-
-    public Builder setBucketEncryptionKey(
-        BucketEncryptionKeyInfo info) {
-      this.bekInfo = info;
-      return this;
-    }
-
-    /**
-     * Constructs the OmBucketInfo.
-     * @return instance of OmBucketInfo.
-     */
-    public OmBucketInfo build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(acls);
-      Preconditions.checkNotNull(isVersionEnabled);
-      Preconditions.checkNotNull(storageType);
-
-      return new OmBucketInfo(volumeName, bucketName, acls,
-          isVersionEnabled, storageType, creationTime, metadata, bekInfo);
-    }
-  }
-
-  /**
-   * Creates BucketInfo protobuf from OmBucketInfo.
-   */
-  public BucketInfo getProtobuf() {
-    BucketInfo.Builder bib =  BucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .addAllAcls(OzoneAclUtil.toProtobuf(acls))
-        .setIsVersionEnabled(isVersionEnabled)
-        .setStorageType(storageType.toProto())
-        .setCreationTime(creationTime)
-        .addAllMetadata(KeyValueUtil.toProtobuf(metadata));
-    if (bekInfo != null && bekInfo.getKeyName() != null) {
-      bib.setBeinfo(OMPBHelper.convert(bekInfo));
-    }
-    return bib.build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates OmBucketInfo.
-   * @param bucketInfo
-   * @return instance of OmBucketInfo
-   */
-  public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) {
-    OmBucketInfo.Builder obib = OmBucketInfo.newBuilder()
-        .setVolumeName(bucketInfo.getVolumeName())
-        .setBucketName(bucketInfo.getBucketName())
-        .setAcls(bucketInfo.getAclsList().stream().map(
-            OzoneAcl::fromProtobuf).collect(Collectors.toList()))
-        .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
-        .setStorageType(StorageType.valueOf(bucketInfo.getStorageType()))
-        .setCreationTime(bucketInfo.getCreationTime());
-    if (bucketInfo.getMetadataList() != null) {
-      obib.addAllMetadata(KeyValueUtil
-          .getFromProtobuf(bucketInfo.getMetadataList()));
-    }
-    if (bucketInfo.hasBeinfo()) {
-      obib.setBucketEncryptionKey(OMPBHelper.convert(bucketInfo.getBeinfo()));
-    }
-    return obib.build();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    OmBucketInfo that = (OmBucketInfo) o;
-    return creationTime == that.creationTime &&
-        volumeName.equals(that.volumeName) &&
-        bucketName.equals(that.bucketName) &&
-        Objects.equals(acls, that.acls) &&
-        Objects.equals(isVersionEnabled, that.isVersionEnabled) &&
-        storageType == that.storageType &&
-        Objects.equals(metadata, that.metadata) &&
-        Objects.equals(bekInfo, that.bekInfo);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
deleted file mode 100644
index 6bca3aa..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.Auditable;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Args for key. Client use this to specify key's attributes on  key creation
- * (putKey()).
- */
-public final class OmKeyArgs implements Auditable {
-  private final String volumeName;
-  private final String bucketName;
-  private final String keyName;
-  private long dataSize;
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
-  private List<OmKeyLocationInfo> locationInfoList;
-  private final boolean isMultipartKey;
-  private final String multipartUploadID;
-  private final int multipartUploadPartNumber;
-  private Map<String, String> metadata;
-  private boolean refreshPipeline;
-  private boolean sortDatanodesInPipeline;
-  private List<OzoneAcl> acls;
-
-  @SuppressWarnings("parameternumber")
-  private OmKeyArgs(String volumeName, String bucketName, String keyName,
-      long dataSize, ReplicationType type, ReplicationFactor factor,
-      List<OmKeyLocationInfo> locationInfoList, boolean isMultipart,
-      String uploadID, int partNumber,
-      Map<String, String> metadataMap, boolean refreshPipeline,
-      List<OzoneAcl> acls, boolean sortDatanode) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    this.type = type;
-    this.factor = factor;
-    this.locationInfoList = locationInfoList;
-    this.isMultipartKey = isMultipart;
-    this.multipartUploadID = uploadID;
-    this.multipartUploadPartNumber = partNumber;
-    this.metadata = metadataMap;
-    this.refreshPipeline = refreshPipeline;
-    this.acls = acls;
-    this.sortDatanodesInPipeline = sortDatanode;
-  }
-
-  public boolean getIsMultipartKey() {
-    return isMultipartKey;
-  }
-
-  public String getMultipartUploadID() {
-    return multipartUploadID;
-  }
-
-  public int getMultipartUploadPartNumber() {
-    return multipartUploadPartNumber;
-  }
-
-  public ReplicationType getType() {
-    return type;
-  }
-
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    dataSize = size;
-  }
-
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  public void setMetadata(Map<String, String> metadata) {
-    this.metadata = metadata;
-  }
-
-  public void setLocationInfoList(List<OmKeyLocationInfo> locationInfoList) {
-    this.locationInfoList = locationInfoList;
-  }
-
-  public List<OmKeyLocationInfo> getLocationInfoList() {
-    return locationInfoList;
-  }
-
-  public boolean getRefreshPipeline() {
-    return refreshPipeline;
-  }
-
-  public boolean getSortDatanodes() {
-    return sortDatanodesInPipeline;
-  }
-
-  @Override
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, this.volumeName);
-    auditMap.put(OzoneConsts.BUCKET, this.bucketName);
-    auditMap.put(OzoneConsts.KEY, this.keyName);
-    auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(this.dataSize));
-    auditMap.put(OzoneConsts.REPLICATION_TYPE,
-        (this.type != null) ? this.type.name() : null);
-    auditMap.put(OzoneConsts.REPLICATION_FACTOR,
-        (this.factor != null) ? this.factor.name() : null);
-    auditMap.put(OzoneConsts.KEY_LOCATION_INFO,
-        (this.locationInfoList != null) ? locationInfoList.toString() : null);
-    return auditMap;
-  }
-
-  @VisibleForTesting
-  public void addLocationInfo(OmKeyLocationInfo locationInfo) {
-    if (this.locationInfoList == null) {
-      locationInfoList = new ArrayList<>();
-    }
-    locationInfoList.add(locationInfo);
-  }
-
-  /**
-   * Builder class of OmKeyArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private ReplicationType type;
-    private ReplicationFactor factor;
-    private List<OmKeyLocationInfo> locationInfoList;
-    private boolean isMultipartKey;
-    private String multipartUploadID;
-    private int multipartUploadPartNumber;
-    private Map<String, String> metadata = new HashMap<>();
-    private boolean refreshPipeline;
-    private boolean sortDatanodesInPipeline;
-    private List<OzoneAcl> acls;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setType(ReplicationType replicationType) {
-      this.type = replicationType;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor replicationFactor) {
-      this.factor = replicationFactor;
-      return this;
-    }
-
-    public Builder setLocationInfoList(List<OmKeyLocationInfo> locationInfos) {
-      this.locationInfoList = locationInfos;
-      return this;
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      this.acls = listOfAcls;
-      return this;
-    }
-
-    public Builder setIsMultipartKey(boolean isMultipart) {
-      this.isMultipartKey = isMultipart;
-      return this;
-    }
-
-    public Builder setMultipartUploadID(String uploadID) {
-      this.multipartUploadID = uploadID;
-      return this;
-    }
-
-    public Builder setMultipartUploadPartNumber(int partNumber) {
-      this.multipartUploadPartNumber = partNumber;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      this.metadata.put(key, value);
-      return this;
-    }
-
-    public Builder addAllMetadata(Map<String, String> metadatamap) {
-      this.metadata.putAll(metadatamap);
-      return this;
-    }
-
-    public Builder setRefreshPipeline(boolean refresh) {
-      this.refreshPipeline = refresh;
-      return this;
-    }
-
-    public Builder setSortDatanodesInPipeline(boolean sort) {
-      this.sortDatanodesInPipeline = sort;
-      return this;
-    }
-
-    public OmKeyArgs build() {
-      return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type,
-          factor, locationInfoList, isMultipartKey, multipartUploadID,
-          multipartUploadPartNumber, metadata, refreshPipeline, acls,
-          sortDatanodesInPipeline);
-    }
-
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
deleted file mode 100644
index 83adee9..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ /dev/null
@@ -1,421 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Args for key block. The block instance for the key requested in putKey.
- * This is returned from OM to client, and client use class to talk to
- * datanode. Also, this is the metadata written to om.db on server side.
- */
-public final class OmKeyInfo extends WithMetadata {
-  private final String volumeName;
-  private final String bucketName;
-  // name of key client specified
-  private String keyName;
-  private long dataSize;
-  private List<OmKeyLocationInfoGroup> keyLocationVersions;
-  private final long creationTime;
-  private long modificationTime;
-  private HddsProtos.ReplicationType type;
-  private HddsProtos.ReplicationFactor factor;
-  private FileEncryptionInfo encInfo;
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-
-  @SuppressWarnings("parameternumber")
-  OmKeyInfo(String volumeName, String bucketName, String keyName,
-      List<OmKeyLocationInfoGroup> versions, long dataSize,
-      long creationTime, long modificationTime,
-      HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor,
-      Map<String, String> metadata,
-      FileEncryptionInfo encInfo, List<OzoneAcl> acls) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    // it is important that the versions are ordered from old to new.
-    // Do this sanity check when versions got loaded on creating OmKeyInfo.
-    // TODO : this is not necessary, here only because versioning is still a
-    // work in-progress, remove this following check when versioning is
-    // complete and prove correctly functioning
-    long currentVersion = -1;
-    for (OmKeyLocationInfoGroup version : versions) {
-      Preconditions.checkArgument(
-            currentVersion + 1 == version.getVersion());
-      currentVersion = version.getVersion();
-    }
-    this.keyLocationVersions = versions;
-    this.creationTime = creationTime;
-    this.modificationTime = modificationTime;
-    this.factor = factor;
-    this.type = type;
-    this.metadata = metadata;
-    this.encInfo = encInfo;
-    this.acls = acls;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    this.dataSize = size;
-  }
-
-  public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
-    return keyLocationVersions.size() == 0? null :
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-  }
-
-  public List<OmKeyLocationInfoGroup> getKeyLocationVersions() {
-    return keyLocationVersions;
-  }
-
-  public void updateModifcationTime() {
-    this.modificationTime = Time.monotonicNow();
-  }
-
-  /**
-   * updates the length of the each block in the list given.
-   * This will be called when the key is being committed to OzoneManager.
-   *
-   * @param locationInfoList list of locationInfo
-   */
-  public void updateLocationInfoList(List<OmKeyLocationInfo> locationInfoList) {
-    long latestVersion = getLatestVersionLocations().getVersion();
-    OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations();
-    List<OmKeyLocationInfo> currentList =
-        keyLocationInfoGroup.getLocationList();
-    List<OmKeyLocationInfo> latestVersionList =
-        keyLocationInfoGroup.getBlocksLatestVersionOnly();
-    // Updates the latest locationList in the latest version only with
-    // given locationInfoList here.
-    // TODO : The original allocated list and the updated list here may vary
-    // as the containers on the Datanode on which the blocks were pre allocated
-    // might get closed. The diff of blocks between these two lists here
-    // need to be garbage collected in case the ozone client dies.
-    currentList.removeAll(latestVersionList);
-    // set each of the locationInfo object to the latest version
-    locationInfoList.stream().forEach(omKeyLocationInfo -> omKeyLocationInfo
-        .setCreateVersion(latestVersion));
-    currentList.addAll(locationInfoList);
-  }
-
-  /**
-   * Append a set of blocks to the latest version. Note that these blocks are
-   * part of the latest version, not a new version.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @param updateTime if true, will update modification time.
-   * @throws IOException
-   */
-  public synchronized void appendNewBlocks(
-      List<OmKeyLocationInfo> newLocationList, boolean updateTime)
-      throws IOException {
-    if (keyLocationVersions.size() == 0) {
-      throw new IOException("Appending new block, but no version exist");
-    }
-    OmKeyLocationInfoGroup currentLatestVersion =
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-    currentLatestVersion.appendNewBlocks(newLocationList);
-    if (updateTime) {
-      setModificationTime(Time.now());
-    }
-  }
-
-  /**
-   * Add a new set of blocks. The new blocks will be added as appending a new
-   * version to the all version list.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @param updateTime - if true, updates modification time.
-   * @throws IOException
-   */
-  public synchronized long addNewVersion(
-      List<OmKeyLocationInfo> newLocationList, boolean updateTime)
-      throws IOException {
-    long latestVersionNum;
-    if (keyLocationVersions.size() == 0) {
-      // no version exist, these blocks are the very first version.
-      keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList));
-      latestVersionNum = 0;
-    } else {
-      // it is important that the new version are always at the tail of the list
-      OmKeyLocationInfoGroup currentLatestVersion =
-          keyLocationVersions.get(keyLocationVersions.size() - 1);
-      // the new version is created based on the current latest version
-      OmKeyLocationInfoGroup newVersion =
-          currentLatestVersion.generateNextVersion(newLocationList);
-      keyLocationVersions.add(newVersion);
-      latestVersionNum = newVersion.getVersion();
-    }
-
-    if (updateTime) {
-      setModificationTime(Time.now());
-    }
-    return latestVersionNum;
-  }
-
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public void setModificationTime(long modificationTime) {
-    this.modificationTime = modificationTime;
-  }
-
-  public FileEncryptionInfo getFileEncryptionInfo() {
-    return encInfo;
-  }
-
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  public boolean addAcl(OzoneAcl acl) {
-    return OzoneAclUtil.addAcl(acls, acl);
-  }
-
-  public boolean removeAcl(OzoneAcl acl) {
-    return OzoneAclUtil.removeAcl(acls, acl);
-  }
-
-  public boolean setAcls(List<OzoneAcl> newAcls) {
-    return OzoneAclUtil.setAcl(acls, newAcls);
-  }
-
-  /**
-   * Builder of OmKeyInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups =
-        new ArrayList<>();
-    private long creationTime;
-    private long modificationTime;
-    private HddsProtos.ReplicationType type;
-    private HddsProtos.ReplicationFactor factor;
-    private Map<String, String> metadata;
-    private FileEncryptionInfo encInfo;
-    private List<OzoneAcl> acls;
-
-    public Builder() {
-      this.metadata = new HashMap<>();
-      omKeyLocationInfoGroups = new ArrayList<>();
-      acls = new ArrayList<>();
-    }
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setOmKeyLocationInfos(
-        List<OmKeyLocationInfoGroup> omKeyLocationInfoList) {
-      this.omKeyLocationInfoGroups = omKeyLocationInfoList;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setCreationTime(long crTime) {
-      this.creationTime = crTime;
-      return this;
-    }
-
-    public Builder setModificationTime(long mTime) {
-      this.modificationTime = mTime;
-      return this;
-    }
-
-    public Builder setReplicationFactor(HddsProtos.ReplicationFactor replFact) {
-      this.factor = replFact;
-      return this;
-    }
-
-    public Builder setReplicationType(HddsProtos.ReplicationType replType) {
-      this.type = replType;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      metadata.put(key, value);
-      return this;
-    }
-
-    public Builder addAllMetadata(Map<String, String> newMetadata) {
-      metadata.putAll(newMetadata);
-      return this;
-    }
-
-    public Builder setFileEncryptionInfo(FileEncryptionInfo feInfo) {
-      this.encInfo = feInfo;
-      return this;
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      if (listOfAcls != null) {
-        this.acls.addAll(listOfAcls);
-      }
-      return this;
-    }
-
-    public OmKeyInfo build() {
-      return new OmKeyInfo(
-          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor, metadata,
-          encInfo, acls);
-    }
-  }
-
-  public KeyInfo getProtobuf() {
-    long latestVersion = keyLocationVersions.size() == 0 ? -1 :
-        keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
-    KeyInfo.Builder kb = KeyInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(dataSize)
-        .setFactor(factor)
-        .setType(type)
-        .addAllKeyLocationList(keyLocationVersions.stream()
-            .map(OmKeyLocationInfoGroup::getProtobuf)
-            .collect(Collectors.toList()))
-        .setLatestVersion(latestVersion)
-        .setCreationTime(creationTime)
-        .setModificationTime(modificationTime)
-        .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
-        .addAllAcls(OzoneAclUtil.toProtobuf(acls));
-    if (encInfo != null) {
-      kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
-    }
-    return kb.build();
-  }
-
-  public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyInfo.getVolumeName())
-        .setBucketName(keyInfo.getBucketName())
-        .setKeyName(keyInfo.getKeyName())
-        .setOmKeyLocationInfos(keyInfo.getKeyLocationListList().stream()
-            .map(OmKeyLocationInfoGroup::getFromProtobuf)
-            .collect(Collectors.toList()))
-        .setDataSize(keyInfo.getDataSize())
-        .setCreationTime(keyInfo.getCreationTime())
-        .setModificationTime(keyInfo.getModificationTime())
-        .setReplicationType(keyInfo.getType())
-        .setReplicationFactor(keyInfo.getFactor())
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()))
-        .setFileEncryptionInfo(keyInfo.hasFileEncryptionInfo() ?
-            OMPBHelper.convert(keyInfo.getFileEncryptionInfo()): null)
-        .setAcls(OzoneAclUtil.fromProtobuf(keyInfo.getAclsList()))
-        .build();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    OmKeyInfo omKeyInfo = (OmKeyInfo) o;
-    return dataSize == omKeyInfo.dataSize &&
-        creationTime == omKeyInfo.creationTime &&
-        modificationTime == omKeyInfo.modificationTime &&
-        volumeName.equals(omKeyInfo.volumeName) &&
-        bucketName.equals(omKeyInfo.bucketName) &&
-        keyName.equals(omKeyInfo.keyName) &&
-        Objects
-            .equals(keyLocationVersions, omKeyInfo.keyLocationVersions) &&
-        type == omKeyInfo.type &&
-        factor == omKeyInfo.factor &&
-        Objects.equals(metadata, omKeyInfo.metadata) &&
-        Objects.equals(acls, omKeyInfo.acls);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(volumeName, bucketName, keyName);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
deleted file mode 100644
index b81fcd0..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
-import org.apache.hadoop.security.token.Token;
-
-import java.util.Objects;
-
-/**
- * One key can be too huge to fit in one container. In which case it gets split
- * into a number of subkeys. This class represents one such subkey instance.
- */
-public final class OmKeyLocationInfo {
-  private final BlockID blockID;
-  // the id of this subkey in all the subkeys.
-  private long length;
-  private final long offset;
-  // Block token, required for client authentication when security is enabled.
-  private Token<OzoneBlockTokenIdentifier> token;
-  // the version number indicating when this block was added
-  private long createVersion;
-
-  private Pipeline pipeline;
-
-  private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length,
-                            long offset) {
-    this.blockID = blockID;
-    this.pipeline = pipeline;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length,
-      long offset, Token<OzoneBlockTokenIdentifier> token) {
-    this.blockID = blockID;
-    this.pipeline = pipeline;
-    this.length = length;
-    this.offset = offset;
-    this.token = token;
-  }
-
-  public void setCreateVersion(long version) {
-    createVersion = version;
-  }
-
-  public long getCreateVersion() {
-    return createVersion;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-  public void setLength(long length) {
-    this.length = length;
-  }
-
-  public long getOffset() {
-    return offset;
-  }
-
-  public long getBlockCommitSequenceId() {
-    return blockID.getBlockCommitSequenceId();
-  }
-
-  public Token<OzoneBlockTokenIdentifier> getToken() {
-    return token;
-  }
-
-  public void setToken(Token<OzoneBlockTokenIdentifier> token) {
-    this.token = token;
-  }
-
-  public void setPipeline(Pipeline pipeline) {
-    this.pipeline = pipeline;
-  }
-
-  /**
-   * Builder of OmKeyLocationInfo.
-   */
-  public static class Builder {
-    private BlockID blockID;
-    private long length;
-    private long offset;
-    private Token<OzoneBlockTokenIdentifier> token;
-    private Pipeline pipeline;
-
-    public Builder setBlockID(BlockID blockId) {
-      this.blockID = blockId;
-      return this;
-    }
-
-    @SuppressWarnings("checkstyle:hiddenfield")
-    public Builder setPipeline(Pipeline pipeline) {
-      this.pipeline = pipeline;
-      return this;
-    }
-
-    public Builder setLength(long len) {
-      this.length = len;
-      return this;
-    }
-
-    public Builder setOffset(long off) {
-      this.offset = off;
-      return this;
-    }
-
-    public Builder setToken(Token<OzoneBlockTokenIdentifier> bToken) {
-      this.token = bToken;
-      return this;
-    }
-
-    public OmKeyLocationInfo build() {
-      if (token == null) {
-        return new OmKeyLocationInfo(blockID, pipeline, length, offset);
-      } else {
-        return new OmKeyLocationInfo(blockID, pipeline, length, offset, token);
-      }
-    }
-  }
-
-  public KeyLocation getProtobuf() {
-    KeyLocation.Builder builder = KeyLocation.newBuilder()
-        .setBlockID(blockID.getProtobuf())
-        .setLength(length)
-        .setOffset(offset)
-        .setCreateVersion(createVersion);
-    if (this.token != null) {
-      builder.setToken(this.token.toTokenProto());
-    }
-    try {
-      builder.setPipeline(pipeline.getProtobufMessage());
-    } catch (UnknownPipelineStateException e) {
-      //TODO: fix me: we should not return KeyLocation without pipeline.
-    }
-    return builder.build();
-  }
-
-  private static Pipeline getPipeline(KeyLocation keyLocation) {
-    try {
-      return keyLocation.hasPipeline() ?
-          Pipeline.getFromProtobuf(keyLocation.getPipeline()) : null;
-    } catch (UnknownPipelineStateException e) {
-      return null;
-    }
-  }
-
-  public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
-    OmKeyLocationInfo info = new OmKeyLocationInfo(
-        BlockID.getFromProtobuf(keyLocation.getBlockID()),
-        getPipeline(keyLocation),
-        keyLocation.getLength(),
-        keyLocation.getOffset());
-    if(keyLocation.hasToken()) {
-      info.token =  new Token<>(keyLocation.getToken());
-    }
-    info.setCreateVersion(keyLocation.getCreateVersion());
-    return info;
-  }
-
-  @Override
-  public String toString() {
-    return "{blockID={containerID=" + blockID.getContainerID() +
-        ", localID=" + blockID.getLocalID() + "}" +
-        ", length=" + length +
-        ", offset=" + offset +
-        ", token=" + token +
-        ", pipeline=" + pipeline +
-        ", createVersion=" + createVersion + '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    OmKeyLocationInfo that = (OmKeyLocationInfo) o;
-    return length == that.length &&
-        offset == that.offset &&
-        createVersion == that.createVersion &&
-        Objects.equals(blockID, that.blockID) &&
-        Objects.equals(token, that.token) &&
-        Objects.equals(pipeline, that.pipeline);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(blockID, length, offset, token, createVersion,
-        pipeline);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
deleted file mode 100644
index 8bdcee38..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * A list of key locations. This class represents one single version of the
- * blocks of a key.
- */
-public class OmKeyLocationInfoGroup {
-  private final long version;
-  private final List<OmKeyLocationInfo> locationList;
-
-  public OmKeyLocationInfoGroup(long version,
-                                List<OmKeyLocationInfo> locations) {
-    this.version = version;
-    this.locationList = locations;
-  }
-
-  /**
-   * Return only the blocks that are created in the most recent version.
-   *
-   * @return the list of blocks that are created in the latest version.
-   */
-  public List<OmKeyLocationInfo> getBlocksLatestVersionOnly() {
-    List<OmKeyLocationInfo> list = new ArrayList<>();
-    locationList.stream().filter(x -> x.getCreateVersion() == version)
-        .forEach(list::add);
-    return list;
-  }
-
-  public long getVersion() {
-    return version;
-  }
-
-  public List<OmKeyLocationInfo> getLocationList() {
-    return locationList;
-  }
-
-  public KeyLocationList getProtobuf() {
-    return KeyLocationList.newBuilder()
-        .setVersion(version)
-        .addAllKeyLocations(
-            locationList.stream().map(OmKeyLocationInfo::getProtobuf)
-                .collect(Collectors.toList()))
-        .build();
-  }
-
-  public static OmKeyLocationInfoGroup getFromProtobuf(
-      KeyLocationList keyLocationList) {
-    return new OmKeyLocationInfoGroup(
-        keyLocationList.getVersion(),
-        keyLocationList.getKeyLocationsList().stream()
-            .map(OmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()));
-  }
-
-  /**
-   * Given a new block location, generate a new version list based upon this
-   * one.
-   *
-   * @param newLocationList a list of new location to be added.
-   * @return
-   */
-  OmKeyLocationInfoGroup generateNextVersion(
-      List<OmKeyLocationInfo> newLocationList) throws IOException {
-    // TODO : revisit if we can do this method more efficiently
-    // one potential inefficiency here is that later version always include
-    // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
-    // more
-    List<OmKeyLocationInfo> newList = new ArrayList<>();
-    newList.addAll(locationList);
-    for (OmKeyLocationInfo newInfo : newLocationList) {
-      // all these new blocks will have addVersion of current version + 1
-      newInfo.setCreateVersion(version + 1);
-      newList.add(newInfo);
-    }
-    return new OmKeyLocationInfoGroup(version + 1, newList);
-  }
-
-  void appendNewBlocks(List<OmKeyLocationInfo> newLocationList)
-      throws IOException {
-    for (OmKeyLocationInfo info : newLocationList) {
-      info.setCreateVersion(version);
-      locationList.add(info);
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("version:").append(version).append(" ");
-    for (OmKeyLocationInfo kli : locationList) {
-      sb.append(kli.getLocalID()).append(" || ");
-    }
-    return sb.toString();
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
deleted file mode 100644
index 646cb42..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-/**
- * This class holds information about the response from commit multipart
- * upload part request.
- */
-public class OmMultipartCommitUploadPartInfo {
-
-  private final String partName;
-
-  public OmMultipartCommitUploadPartInfo(String name) {
-    this.partName = name;
-  }
-
-  public String getPartName() {
-    return partName;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java
deleted file mode 100644
index 98913d3..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-/**
- * Class which holds information about the response of initiate multipart
- * upload request.
- */
-public class OmMultipartInfo {
-
-  private String volumeName;
-  private String bucketName;
-  private String keyName;
-  private String uploadID;
-
-  /**
-   * Construct OmMultipartInfo object which holds information about the
-   * response from initiate multipart upload request.
-   * @param volume
-   * @param bucket
-   * @param key
-   * @param id
-   */
-  public OmMultipartInfo(String volume, String bucket, String key, String id) {
-    this.volumeName = volume;
-    this.bucketName = bucket;
-    this.keyName = key;
-    this.uploadID = id;
-  }
-
-  /**
-   * Return volume name.
-   * @return volumeName
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Return bucket name.
-   * @return bucketName
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Return key name.
-   * @return keyName
-   */
-  public String getKeyName() {
-    return keyName;
-  }
-
-  /**
-   * Return uploadID.
-   * @return uploadID
-   */
-  public String getUploadID() {
-    return uploadID;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
deleted file mode 100644
index 80123fd..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .PartKeyInfo;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * This class represents multipart upload information for a key, which holds
- * upload part information of the key.
- */
-public class OmMultipartKeyInfo {
-  private String uploadID;
-  private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
-
-  /**
-   * Construct OmMultipartKeyInfo object which holds multipart upload
-   * information for a key.
-   */
-  public OmMultipartKeyInfo(String id, Map<Integer, PartKeyInfo> list) {
-    this.uploadID = id;
-    this.partKeyInfoList = new TreeMap<>(list);
-  }
-
-  /**
-   * Returns the uploadID for this multi part upload of a key.
-   * @return uploadID
-   */
-  public String getUploadID() {
-    return uploadID;
-  }
-
-  public TreeMap<Integer, PartKeyInfo> getPartKeyInfoMap() {
-    return partKeyInfoList;
-  }
-
-  public void addPartKeyInfo(int partNumber, PartKeyInfo partKeyInfo) {
-    this.partKeyInfoList.put(partNumber, partKeyInfo);
-  }
-
-  public PartKeyInfo getPartKeyInfo(int partNumber) {
-    return partKeyInfoList.get(partNumber);
-  }
-
-
-  /**
-   * Construct OmMultipartInfo from MultipartKeyInfo proto object.
-   * @param multipartKeyInfo
-   * @return OmMultipartKeyInfo
-   */
-  public static OmMultipartKeyInfo getFromProto(MultipartKeyInfo
-                                                 multipartKeyInfo) {
-    Map<Integer, PartKeyInfo> list = new HashMap<>();
-    multipartKeyInfo.getPartKeyInfoListList().stream().forEach(partKeyInfo
-        -> list.put(partKeyInfo.getPartNumber(), partKeyInfo));
-    return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(), list);
-  }
-
-  /**
-   * Construct MultipartKeyInfo from this object.
-   * @return MultipartKeyInfo
-   */
-  public MultipartKeyInfo getProto() {
-    MultipartKeyInfo.Builder builder = MultipartKeyInfo.newBuilder()
-        .setUploadID(uploadID);
-    partKeyInfoList.forEach((key, value) -> builder.addPartKeyInfoList(value));
-    return builder.build();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (this == other) {
-      return true;
-    }
-    return other instanceof OmMultipartKeyInfo && uploadID.equals(
-        ((OmMultipartKeyInfo)other).getUploadID());
-  }
-
-  @Override
-  public int hashCode() {
-    return uploadID.hashCode();
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java
deleted file mode 100644
index 9d2d2ae..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.time.Instant;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-
-/**
- * Information about one initialized upload.
- */
-public class OmMultipartUpload {
-
-  private String volumeName;
-
-  private String bucketName;
-
-  private String keyName;
-
-  private String uploadId;
-
-  private Instant creationTime;
-
-  private HddsProtos.ReplicationType replicationType;
-
-  private HddsProtos.ReplicationFactor replicationFactor;
-
-  public OmMultipartUpload(String volumeName, String bucketName,
-      String keyName, String uploadId) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.uploadId = uploadId;
-  }
-
-  public OmMultipartUpload(String volumeName, String bucketName,
-      String keyName, String uploadId, Instant creationDate) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.uploadId = uploadId;
-    this.creationTime = creationDate;
-  }
-
-  public OmMultipartUpload(String volumeName, String bucketName,
-      String keyName, String uploadId, Instant creationTime,
-      ReplicationType replicationType,
-      ReplicationFactor replicationFactor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.uploadId = uploadId;
-    this.creationTime = creationTime;
-    this.replicationType = replicationType;
-    this.replicationFactor = replicationFactor;
-  }
-
-  public static OmMultipartUpload from(String key) {
-    String[] split = key.split(OM_KEY_PREFIX);
-    if (split.length < 5) {
-      throw new IllegalArgumentException("Key " + key
-          + " doesn't have enough segments to be a valid multipart upload key");
-    }
-    String uploadId = split[split.length - 1];
-    String volume = split[1];
-    String bucket = split[2];
-    return new OmMultipartUpload(volume, bucket,
-        key.substring(volume.length() + bucket.length() + 3,
-            key.length() - uploadId.length() - 1), uploadId);
-  }
-
-  public String getDbKey() {
-    return OmMultipartUpload
-        .getDbKey(volumeName, bucketName, keyName, uploadId);
-  }
-
-  public static String getDbKey(String volume, String bucket, String key,
-      String uploadId) {
-    return getDbKey(volume, bucket, key) + OM_KEY_PREFIX + uploadId;
-
-  }
-
-  public static String getDbKey(String volume, String bucket, String key) {
-    return OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket +
-        OM_KEY_PREFIX + key;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public String getUploadId() {
-    return uploadId;
-  }
-
-  public Instant getCreationTime() {
-    return creationTime;
-  }
-
-  public void setCreationTime(Instant creationTime) {
-    this.creationTime = creationTime;
-  }
-
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public void setReplicationType(
-      ReplicationType replicationType) {
-    this.replicationType = replicationType;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-
-  public void setReplicationFactor(
-      ReplicationFactor replicationFactor) {
-    this.replicationFactor = replicationFactor;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java
deleted file mode 100644
index 71ce882..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-/**
- * This class holds information about the response of complete Multipart
- * upload request.
- */
-public class OmMultipartUploadCompleteInfo {
-
-  private String volume;
-  private String bucket;
-  private String key;
-  private String hash; // this is used as ETag for S3.
-
-  public OmMultipartUploadCompleteInfo(String volumeName, String bucketName,
-                                       String keyName, String md5) {
-    this.volume = volumeName;
-    this.bucket = bucketName;
-    this.key = keyName;
-    this.hash = md5;
-  }
-
-  public String getVolume() {
-    return volume;
-  }
-
-  public void setVolume(String volume) {
-    this.volume = volume;
-  }
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public String getHash() {
-    return hash;
-  }
-
-  public void setHash(String hash) {
-    this.hash = hash;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
deleted file mode 100644
index 50c0a47..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * This class represents multipart list, which is required for
- * CompleteMultipart upload request.
- */
-public class OmMultipartUploadCompleteList {
-
-  private final TreeMap<Integer, String> multipartMap;
-
-  /**
-   * Construct OmMultipartUploadCompleteList which holds multipart map which
-   * contains part number and part name.
-   * @param partMap
-   */
-  public OmMultipartUploadCompleteList(Map<Integer, String> partMap) {
-    this.multipartMap = new TreeMap<>(partMap);
-  }
-
-  /**
-   * Return multipartMap which is a map of part number and part name.
-   * @return multipartMap
-   */
-  public TreeMap<Integer, String> getMultipartMap() {
-    return multipartMap;
-  }
-
-  /**
-   * Construct Part list from the multipartMap.
-   * @return List<Part>
-   */
-  public List<Part> getPartsList() {
-    List<Part> partList = new ArrayList<>();
-    multipartMap.forEach((partNumber, partName) -> partList.add(Part
-        .newBuilder().setPartName(partName).setPartNumber(partNumber).build()));
-    return partList;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java
deleted file mode 100644
index 0c13a0d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.List;
-
-/**
- * List of in-flight MPU uploads.
- */
-public class OmMultipartUploadList {
-
-  private List<OmMultipartUpload> uploads;
-
-  public OmMultipartUploadList(
-      List<OmMultipartUpload> uploads) {
-    this.uploads = uploads;
-  }
-
-  public List<OmMultipartUpload> getUploads() {
-    return uploads;
-  }
-
-  public void setUploads(
-      List<OmMultipartUpload> uploads) {
-    this.uploads = uploads;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
deleted file mode 100644
index ba0cd42..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .PartInfo;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Class which is response for the list parts of a multipart upload key.
- */
-public class OmMultipartUploadListParts {
-
-  private HddsProtos.ReplicationType replicationType;
-
-  private HddsProtos.ReplicationFactor replicationFactor;
-
-  //When a list is truncated, this element specifies the last part in the list,
-  // as well as the value to use for the part-number-marker request parameter
-  // in a subsequent request.
-  private int nextPartNumberMarker;
- // Indicates whether the returned list of parts is truncated. A true value
- // indicates that the list was truncated.
- // A list can be truncated if the number of parts exceeds the limit
- // returned in the MaxParts element.
-  private boolean truncated;
-
-  private final List<OmPartInfo> partInfoList = new ArrayList<>();
-
-  public OmMultipartUploadListParts(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor,
-      int nextMarker, boolean truncate) {
-    this.replicationType = type;
-    this.replicationFactor = factor;
-
-    this.nextPartNumberMarker = nextMarker;
-    this.truncated = truncate;
-  }
-
-  public void addPart(OmPartInfo partInfo) {
-    partInfoList.add(partInfo);
-  }
-
-  public HddsProtos.ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public int getNextPartNumberMarker() {
-    return nextPartNumberMarker;
-  }
-
-  public boolean isTruncated() {
-    return truncated;
-  }
-
-  public void setReplicationType(HddsProtos.ReplicationType replicationType) {
-    this.replicationType = replicationType;
-  }
-
-  public List<OmPartInfo> getPartInfoList() {
-    return partInfoList;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-
-  public void addPartList(List<OmPartInfo> partInfos) {
-    this.partInfoList.addAll(partInfos);
-  }
-
-  public void addProtoPartList(List<PartInfo> partInfos) {
-    partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo(
-        partInfo.getPartNumber(), partInfo.getPartName(),
-        partInfo.getModificationTime(), partInfo.getSize())));
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
deleted file mode 100644
index b4f0d16..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE;
-
-/**
- * This helper class keeps a map of all user and their permissions.
- */
-@SuppressWarnings("ProtocolBufferOrdinal")
-public class OmOzoneAclMap {
-  // per Acl Type user:rights map
-  private ArrayList<Map<String, BitSet>> accessAclMap;
-  private List<OzoneAclInfo> defaultAclList;
-
-  OmOzoneAclMap() {
-    accessAclMap = new ArrayList<>();
-    defaultAclList = new ArrayList<>();
-    for (OzoneAclType aclType : OzoneAclType.values()) {
-      accessAclMap.add(aclType.ordinal(), new HashMap<>());
-    }
-  }
-
-  private Map<String, BitSet> getAccessAclMap(OzoneAclType type) {
-    return accessAclMap.get(type.ordinal());
-  }
-
-  // For a given acl type and user, get the stored acl
-  private BitSet getAcl(OzoneAclType type, String user) {
-    return getAccessAclMap(type).get(user);
-  }
-
-  public List<OzoneAcl> getAcl() {
-    List<OzoneAcl> acls = new ArrayList<>();
-
-    acls.addAll(getAccessAcls());
-    acls.addAll(defaultAclList.stream().map(a ->
-        OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()));
-    return acls;
-  }
-
-  private Collection<? extends OzoneAcl> getAccessAcls() {
-    List<OzoneAcl> acls = new ArrayList<>();
-    for (OzoneAclType type : OzoneAclType.values()) {
-      accessAclMap.get(type.ordinal()).entrySet().stream().
-          forEach(entry -> acls.add(new OzoneAcl(ACLIdentityType.
-              valueOf(type.name()), entry.getKey(), entry.getValue(),
-              OzoneAcl.AclScope.ACCESS)));
-    }
-    return acls;
-  }
-
-  // Add a new acl to the map
-  public void addAcl(OzoneAcl acl) throws OMException {
-    Objects.requireNonNull(acl, "Acl should not be null.");
-    if (acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)) {
-      defaultAclList.add(OzoneAcl.toProtobuf(acl));
-      return;
-    }
-
-    OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name());
-    if (!getAccessAclMap(aclType).containsKey(acl.getName())) {
-      getAccessAclMap(aclType).put(acl.getName(), acl.getAclBitSet());
-    } else {
-      // Check if we are adding new rights to existing acl.
-      BitSet temp = (BitSet) acl.getAclBitSet().clone();
-      BitSet curRights = (BitSet) getAccessAclMap(aclType).
-          get(acl.getName()).clone();
-      temp.or(curRights);
-
-      if (temp.equals(curRights)) {
-        // throw exception if acl is already added.
-        throw new OMException("Acl " + acl + " already exist.",
-            INVALID_REQUEST);
-      }
-      getAccessAclMap(aclType).replace(acl.getName(), temp);
-    }
-  }
-
-  // Add a new acl to the map
-  public void setAcls(List<OzoneAcl> acls) throws OMException {
-    Objects.requireNonNull(acls, "Acls should not be null.");
-    // Remove all Acls.
-    for (OzoneAclType type : OzoneAclType.values()) {
-      accessAclMap.get(type.ordinal()).clear();
-    }
-    // Add acls.
-    for (OzoneAcl acl : acls) {
-      addAcl(acl);
-    }
-  }
-
-  // Add a new acl to the map
-  public void removeAcl(OzoneAcl acl) throws OMException {
-    Objects.requireNonNull(acl, "Acl should not be null.");
-    if (acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)) {
-      defaultAclList.remove(OzoneAcl.toProtobuf(acl));
-      return;
-    }
-
-    OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name());
-    if (getAccessAclMap(aclType).containsKey(acl.getName())) {
-      BitSet aclRights = getAccessAclMap(aclType).get(acl.getName());
-      BitSet bits = (BitSet) acl.getAclBitSet().clone();
-      bits.and(aclRights);
-
-      if (bits.equals(ZERO_BITSET)) {
-        // throw exception if acl doesn't exist.
-        throw new OMException("Acl [" + acl + "] doesn't exist.",
-            INVALID_REQUEST);
-      }
-
-      acl.getAclBitSet().and(aclRights);
-      aclRights.xor(acl.getAclBitSet());
-
-      // Remove the acl as all rights are already set to 0.
-      if (aclRights.equals(ZERO_BITSET)) {
-        getAccessAclMap(aclType).remove(acl.getName());
-      }
-    } else {
-      // throw exception if acl doesn't exist.
-      throw new OMException("Acl [" + acl + "] doesn't exist.",
-          INVALID_REQUEST);
-    }
-  }
-
-  // Add a new acl to the map
-  public void addAcl(OzoneAclInfo acl) throws OMException {
-    Objects.requireNonNull(acl, "Acl should not be null.");
-    if (acl.getAclScope().equals(OzoneAclInfo.OzoneAclScope.DEFAULT)) {
-      defaultAclList.add(acl);
-      return;
-    }
-
-    if (!getAccessAclMap(acl.getType()).containsKey(acl.getName())) {
-      BitSet acls = BitSet.valueOf(acl.getRights().toByteArray());
-      getAccessAclMap(acl.getType()).put(acl.getName(), acls);
-    } else {
-      // throw exception if acl is already added.
-
-      throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST);
-    }
-  }
-
-  // for a given acl, check if the user has access rights
-  public boolean hasAccess(OzoneAclInfo acl) {
-    if (acl == null) {
-      return false;
-    }
-
-    BitSet aclBitSet = getAcl(acl.getType(), acl.getName());
-    if (aclBitSet == null) {
-      return false;
-    }
-    BitSet result = BitSet.valueOf(acl.getRights().toByteArray());
-    result.and(aclBitSet);
-    return (!result.equals(ZERO_BITSET) || aclBitSet.get(ALL.ordinal()))
-        && !aclBitSet.get(NONE.ordinal());
-  }
-
-  /**
-   * For a given acl, check if the user has access rights.
-   * Acl's are checked in followoing order:
-   * 1. Acls for USER.
-   * 2. Acls for GROUPS.
-   * 3. Acls for WORLD.
-   * 4. Acls for ANONYMOUS.
-   * @param acl
-   * @param ugi
-   *
-   * @return true if given ugi has acl set, else false.
-   * */
-  public boolean hasAccess(ACLType acl, UserGroupInformation ugi) {
-    if (acl == null) {
-      return false;
-    }
-    if (ugi == null) {
-      return false;
-    }
-
-    // Check acls in user acl list.
-    return checkAccessForOzoneAclType(OzoneAclType.USER, acl, ugi)
-        || checkAccessForOzoneAclType(OzoneAclType.GROUP, acl, ugi)
-        || checkAccessForOzoneAclType(OzoneAclType.WORLD, acl, ugi)
-        || checkAccessForOzoneAclType(OzoneAclType.ANONYMOUS, acl, ugi);
-  }
-
-  /**
-   * Helper function to check acl access for OzoneAclType.
-   * */
-  private boolean checkAccessForOzoneAclType(OzoneAclType identityType,
-      ACLType acl, UserGroupInformation ugi) {
-
-    switch (identityType) {
-    case USER:
-      return OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType,
-          ugi.getUserName()));
-    case GROUP:
-      // Check access for user groups.
-      for (String userGroup : ugi.getGroupNames()) {
-        if (OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType,
-            userGroup))) {
-          // Return true if any user group has required permission.
-          return true;
-        }
-      }
-      break;
-    default:
-      // For type WORLD and ANONYMOUS we set acl type as name.
-      if(OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType,
-          identityType.name()))) {
-        return true;
-      }
-
-    }
-    return false;
-  }
-
-  // Convert this map to OzoneAclInfo Protobuf List
-  public List<OzoneAclInfo> ozoneAclGetProtobuf() {
-    List<OzoneAclInfo> aclList = new LinkedList<>();
-    for (OzoneAclType type : OzoneAclType.values()) {
-      for (Map.Entry<String, BitSet> entry :
-          accessAclMap.get(type.ordinal()).entrySet()) {
-        OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder()
-            .setName(entry.getKey())
-            .setType(type)
-            .setAclScope(OzoneAclScope.ACCESS)
-            .setRights(ByteString.copyFrom(entry.getValue().toByteArray()));
-
-        aclList.add(builder.build());
-      }
-    }
-    aclList.addAll(defaultAclList);
-    return aclList;
-  }
-
-  // Create map from list of OzoneAclInfos
-  public static OmOzoneAclMap ozoneAclGetFromProtobuf(
-      List<OzoneAclInfo> aclList) throws OMException {
-    OmOzoneAclMap aclMap = new OmOzoneAclMap();
-    for (OzoneAclInfo acl : aclList) {
-      aclMap.addAcl(acl);
-    }
-    return aclMap;
-  }
-
-  public Collection<? extends OzoneAcl> getAclsByScope(OzoneAclScope scope) {
-    if (scope.equals(OzoneAclScope.DEFAULT)) {
-      return defaultAclList.stream().map(a ->
-          OzoneAcl.fromProtobuf(a)).collect(Collectors.toList());
-    } else {
-      return getAcl();
-    }
-  }
-
-  public List<OzoneAclInfo> getDefaultAclList() {
-    return defaultAclList;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
deleted file mode 100644
index 2d753a5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartInfo;
-
-/**
- * Class that defines information about each part of a multipart upload key.
- */
-public class OmPartInfo {
-  private int partNumber;
-  private String partName;
-  private long modificationTime;
-  private long size;
-
-  public OmPartInfo(int number, String name, long time, long size) {
-    this.partNumber = number;
-    this.partName = name;
-    this.modificationTime = time;
-    this.size = size;
-  }
-
-  public int getPartNumber() {
-    return partNumber;
-  }
-
-  public String getPartName() {
-    return partName;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public long getSize() {
-    return size;
-  }
-
-  public PartInfo getProto() {
-    return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName)
-       .setModificationTime(modificationTime)
-       .setSize(size).build();
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
deleted file mode 100644
index 26b5b1d7..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-/**
- * Wrapper class for Ozone prefix path info, currently mainly target for ACL but
- * can be extended for other OzFS optimizations in future.
- */
-// TODO: support Auditable interface
-public final class OmPrefixInfo extends WithMetadata {
-
-  private String name;
-  private List<OzoneAcl> acls;
-
-  public OmPrefixInfo(String name, List<OzoneAcl> acls,
-      Map<String, String> metadata) {
-    this.name = name;
-    this.acls = acls;
-    this.metadata = metadata;
-  }
-
-  /**
-   * Returns the ACL's associated with this prefix.
-   * @return {@literal List<OzoneAcl>}
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  public boolean addAcl(OzoneAcl acl) {
-    return OzoneAclUtil.addAcl(acls, acl);
-  }
-
-  public boolean removeAcl(OzoneAcl acl) {
-    return OzoneAclUtil.removeAcl(acls, acl);
-  }
-
-  public boolean setAcls(List<OzoneAcl> newAcls) {
-    return OzoneAclUtil.setAcl(acls, newAcls);
-  }
-
-  /**
-   * Returns the name of the prefix path.
-   * @return name of the prefix path.
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns new builder class that builds a OmPrefixInfo.
-   *
-   * @return Builder
-   */
-  public static OmPrefixInfo.Builder newBuilder() {
-    return new OmPrefixInfo.Builder();
-  }
-
-  /**
-   * Builder for OmPrefixInfo.
-   */
-  public static class Builder {
-    private String name;
-    private List<OzoneAcl> acls;
-    private Map<String, String> metadata;
-
-    public Builder() {
-      //Default values
-      this.acls = new LinkedList<>();
-      this.metadata = new HashMap<>();
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      if (listOfAcls != null) {
-        acls.addAll(listOfAcls);
-      }
-      return this;
-    }
-
-    public Builder setName(String n) {
-      this.name = n;
-      return this;
-    }
-
-    public OmPrefixInfo.Builder addMetadata(String key, String value) {
-      metadata.put(key, value);
-      return this;
-    }
-
-    public OmPrefixInfo.Builder addAllMetadata(
-        Map<String, String> additionalMetadata) {
-      if (additionalMetadata != null) {
-        metadata.putAll(additionalMetadata);
-      }
-      return this;
-    }
-
-    /**
-     * Constructs the OmPrefixInfo.
-     * @return instance of OmPrefixInfo.
-     */
-    public OmPrefixInfo build() {
-      Preconditions.checkNotNull(name);
-      return new OmPrefixInfo(name, acls, metadata);
-    }
-  }
-
-  /**
-   * Creates PrefixInfo protobuf from OmPrefixInfo.
-   */
-  public PrefixInfo getProtobuf() {
-    PrefixInfo.Builder pib =  PrefixInfo.newBuilder().setName(name)
-        .addAllMetadata(KeyValueUtil.toProtobuf(metadata));
-    if (acls != null) {
-      pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
-    }
-    return pib.build();
-  }
-
-  /**
-   * Parses PrefixInfo protobuf and creates OmPrefixInfo.
-   * @param prefixInfo
-   * @return instance of OmPrefixInfo
-   */
-  public static OmPrefixInfo getFromProtobuf(PrefixInfo prefixInfo) {
-    OmPrefixInfo.Builder opib = OmPrefixInfo.newBuilder()
-        .setName(prefixInfo.getName());
-    if (prefixInfo.getMetadataList() != null) {
-      opib.addAllMetadata(KeyValueUtil
-          .getFromProtobuf(prefixInfo.getMetadataList()));
-    }
-    if (prefixInfo.getAclsList() != null) {
-      opib.setAcls(OzoneAclUtil.fromProtobuf(prefixInfo.getAclsList()));
-    }
-    return opib.build();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    OmPrefixInfo that = (OmPrefixInfo) o;
-    return name.equals(that.name) &&
-        Objects.equals(acls, that.acls) &&
-        Objects.equals(metadata, that.metadata);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(name);
-  }
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
deleted file mode 100644
index 6453e8e..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ /dev/null
@@ -1,359 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.Auditable;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
-
-import com.google.common.base.Preconditions;
-
-
-/**
- * A class that encapsulates the OmVolumeArgs Args.
- */
-public final class OmVolumeArgs extends WithMetadata implements Auditable {
-  private final String adminName;
-  private String ownerName;
-  private final String volume;
-  private long creationTime;
-  private long quotaInBytes;
-  private final OmOzoneAclMap aclMap;
-  private long objectID;
-  private long updateID;
-
-  /**
-   * Set the Object ID. If this value is already set then this function throws.
-   * There is a reason why we cannot use the final here. The OmVolumeArgs is
-   * deserialized from the protobuf in many places in code. We need to set
-   * this object ID, after it is deserialized.
-   *
-   * @param obId - long
-   */
-  public void setObjectID(long obId) {
-    if(this.objectID != 0) {
-      throw new UnsupportedOperationException("Attempt to modify object ID " +
-          "which is not zero. Current Object ID is " + this.objectID);
-    }
-    this.objectID = obId;
-  }
-
-  /**
-   * Returns a monotonically increasing ID, that denotes the last update.
-   * Each time an update happens, this ID is incremented.
-   * @return long
-   */
-  public long getUpdateID() {
-    return updateID;
-  }
-
-  /**
-   * Sets the update ID. For each modification of this object, we will set
-   * this to a value greater than the current value.
-   * @param updateID  long
-   */
-  public void setUpdateID(long updateID) {
-    this.updateID = updateID;
-  }
-
-  /**
-   * A immutable identity field for this object.
-   * @return  long.
-   */
-  public long getObjectID() {
-    return objectID;
-  }
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param adminName  - Administrator's name.
-   * @param ownerName  - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param metadata - metadata map for custom key/value data.
-   * @param aclMap - User to access rights map.
-   * @param creationTime - Volume creation time.
-   * @param  objectID - ID of this object.
-   * @param updateID - A sequence number that denotes the last update on this
-   * object. This is a monotonically increasing number.
-   */
-  @SuppressWarnings({"checkstyle:ParameterNumber", "This is invoked from a " +
-      "builder."})
-  private OmVolumeArgs(String adminName, String ownerName, String volume,
-                       long quotaInBytes, Map<String, String> metadata,
-                       OmOzoneAclMap aclMap, long creationTime, long objectID,
-                      long updateID) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.metadata = metadata;
-    this.aclMap = aclMap;
-    this.creationTime = creationTime;
-    this.objectID = objectID;
-    this.updateID = updateID;
-  }
-
-
-  public void setOwnerName(String newOwner) {
-    this.ownerName = newOwner;
-  }
-
-  public void setQuotaInBytes(long quotaInBytes) {
-    this.quotaInBytes = quotaInBytes;
-  }
-
-  public void setCreationTime(long time) {
-    this.creationTime = time;
-  }
-
-  public void addAcl(OzoneAcl acl) throws OMException {
-    this.aclMap.addAcl(acl);
-  }
-
-  public void setAcls(List<OzoneAcl> acls) throws OMException {
-    this.aclMap.setAcls(acls);
-  }
-
-  public void removeAcl(OzoneAcl acl) throws OMException {
-    this.aclMap.removeAcl(acl);
-  }
-
-  /**
-   * Returns the Admin Name.
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns creation time.
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public OmOzoneAclMap getAclMap() {
-    return aclMap;
-  }
-  /**
-   * Returns new builder class that builds a OmVolumeArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  @Override
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.ADMIN, this.adminName);
-    auditMap.put(OzoneConsts.OWNER, this.ownerName);
-    auditMap.put(OzoneConsts.VOLUME, this.volume);
-    auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime));
-    auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes));
-    auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID()));
-    auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID()));
-    return auditMap;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    OmVolumeArgs that = (OmVolumeArgs) o;
-    return Objects.equals(this.objectID, that.objectID);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(this.objectID);
-  }
-
-  /**
-   * Builder for OmVolumeArgs.
-   */
-  public static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long creationTime;
-    private long quotaInBytes;
-    private Map<String, String> metadata;
-    private OmOzoneAclMap aclMap;
-    private long objectID;
-    private long updateID;
-
-    /**
-     * Sets the Object ID for this Object.
-     * Object ID are unique and immutable identifier for each object in the
-     * System.
-     * @param objectID - long
-     */
-    public void setObjectID(long objectID) {
-      this.objectID = objectID;
-    }
-
-    /**
-     * Sets the update ID for this Object. Update IDs are monotonically
-     * increasing values which are updated each time there is an update.
-     * @param updateID - long
-     */
-    public void setUpdateID(long updateID) {
-      this.updateID = updateID;
-    }
-
-
-
-    /**
-     * Constructs a builder.
-     */
-    public Builder() {
-      metadata = new HashMap<>();
-      aclMap = new OmOzoneAclMap();
-    }
-
-    public Builder setAdminName(String admin) {
-      this.adminName = admin;
-      return this;
-    }
-
-    public Builder setOwnerName(String owner) {
-      this.ownerName = owner;
-      return this;
-    }
-
-    public Builder setVolume(String volumeName) {
-      this.volume = volumeName;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    public Builder setQuotaInBytes(long quota) {
-      this.quotaInBytes = quota;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      metadata.put(key, value); // overwrite if present.
-      return this;
-    }
-
-    public Builder addAllMetadata(Map<String, String> additionalMetaData) {
-      if (additionalMetaData != null) {
-        metadata.putAll(additionalMetaData);
-      }
-      return this;
-    }
-
-    public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
-      aclMap.addAcl(acl);
-      return this;
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     * @return CreateVolumeArgs.
-     */
-    public OmVolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          metadata, aclMap, creationTime, objectID, updateID);
-    }
-
-  }
-
-  public VolumeInfo getProtobuf() {
-    List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
-    return VolumeInfo.newBuilder()
-        .setAdminName(adminName)
-        .setOwnerName(ownerName)
-        .setVolume(volume)
-        .setQuotaInBytes(quotaInBytes)
-        .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
-        .addAllVolumeAcls(aclList)
-        .setCreationTime(
-            creationTime == 0 ? System.currentTimeMillis() : creationTime)
-        .setObjectID(objectID)
-        .setUpdateID(updateID)
-        .build();
-  }
-
-  public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo)
-      throws OMException {
-    OmOzoneAclMap aclMap =
-        OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
-    return new OmVolumeArgs(
-        volInfo.getAdminName(),
-        volInfo.getOwnerName(),
-        volInfo.getVolume(),
-        volInfo.getQuotaInBytes(),
-        KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()),
-        aclMap,
-        volInfo.getCreationTime(),
-        volInfo.getObjectID(),
-        volInfo.getUpdateID());
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
deleted file mode 100644
index 11ee622..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-/**
- * This class represents a open key "session". A session here means a key is
- * opened by a specific client, the client sends the handler to server, such
- * that servers can recognize this client, and thus know how to close the key.
- */
-public class OpenKeySession {
-  private final long id;
-  private final OmKeyInfo keyInfo;
-  // the version of the key when it is being opened in this session.
-  // a block that has a create version equals to open version means it will
-  // be committed only when this open session is closed.
-  private long openVersion;
-
-  public OpenKeySession(long id, OmKeyInfo info, long version) {
-    this.id = id;
-    this.keyInfo = info;
-    this.openVersion = version;
-  }
-
-  public long getOpenVersion() {
-    return this.openVersion;
-  }
-
-  public OmKeyInfo getKeyInfo() {
-    return keyInfo;
-  }
-
-  public long getId() {
-    return id;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java
deleted file mode 100644
index fd42fea..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java
+++ /dev/null
@@ -1,286 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE;
-
-/**
- * Helper class for ozone acls operations.
- */
-public final class OzoneAclUtil {
-
-  private OzoneAclUtil(){
-  }
-
-  /**
-   * Helper function to get access acl list for current user.
-   *
-   * @param userName
-   * @param userGroups
-   * @return list of OzoneAcls
-   * */
-  public static List<OzoneAcl> getAclList(String userName,
-      List<String> userGroups, ACLType userRights, ACLType groupRights) {
-
-    List<OzoneAcl> listOfAcls = new ArrayList<>();
-
-    // User ACL.
-    listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS));
-    if(userGroups != null) {
-      // Group ACLs of the User.
-      userGroups.forEach((group) -> listOfAcls.add(
-          new OzoneAcl(GROUP, group, groupRights, ACCESS)));
-    }
-    return listOfAcls;
-  }
-
-  /**
-   * Check if acl right requested for given RequestContext exist
-   * in provided acl list.
-   * Acl validation rules:
-   * 1. If user/group has ALL bit set than all user should have all rights.
-   * 2. If user/group has NONE bit set than user/group will not have any right.
-   * 3. For all other individual rights individual bits should be set.
-   *
-   * @param acls
-   * @param context
-   * @return return true if acl list contains right requsted in context.
-   * */
-  public static boolean checkAclRight(List<OzoneAcl> acls,
-      RequestContext context) throws OMException {
-    String[] userGroups = context.getClientUgi().getGroupNames();
-    String userName = context.getClientUgi().getUserName();
-    ACLType aclToCheck = context.getAclRights();
-    for (OzoneAcl a : acls) {
-      if(checkAccessInAcl(a, userGroups, userName, aclToCheck)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private static boolean checkAccessInAcl(OzoneAcl a, String[] groups,
-      String username, ACLType aclToCheck) {
-    BitSet rights = a.getAclBitSet();
-    switch (a.getType()) {
-    case USER:
-      if (a.getName().equals(username)) {
-        return checkIfAclBitIsSet(aclToCheck, rights);
-      }
-      break;
-    case GROUP:
-      for (String grp : groups) {
-        if (a.getName().equals(grp)) {
-          return checkIfAclBitIsSet(aclToCheck, rights);
-        }
-      }
-      break;
-
-    default:
-      return checkIfAclBitIsSet(aclToCheck, rights);
-    }
-    return false;
-  }
-
-  /**
-   * Check if acl right requested for given RequestContext exist
-   * in provided acl list.
-   * Acl validation rules:
-   * 1. If user/group has ALL bit set than all user should have all rights.
-   * 2. If user/group has NONE bit set than user/group will not have any right.
-   * 3. For all other individual rights individual bits should be set.
-   *
-   * @param acls
-   * @param context
-   * @return return true if acl list contains right requsted in context.
-   * */
-  public static boolean checkAclRights(List<OzoneAcl> acls,
-      RequestContext context) throws OMException {
-    String[] userGroups = context.getClientUgi().getGroupNames();
-    String userName = context.getClientUgi().getUserName();
-    ACLType aclToCheck = context.getAclRights();
-    for (OzoneAcl acl : acls) {
-      if (checkAccessInAcl(acl, userGroups, userName, aclToCheck)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Helper function to check if bit for given acl is set.
-   * @param acl
-   * @param bitset
-   * @return True of acl bit is set else false.
-   * */
-  public static boolean checkIfAclBitIsSet(IAccessAuthorizer.ACLType acl,
-      BitSet bitset) {
-    if (bitset == null) {
-      return false;
-    }
-
-    return ((bitset.get(acl.ordinal())
-        || bitset.get(ALL.ordinal()))
-        && !bitset.get(NONE.ordinal()));
-  }
-
-  /**
-   * Helper function to inherit default ACL as access ACL for child object.
-   * 1. deep copy of OzoneAcl to avoid unexpected parent default ACL change
-   * 2. merge inherited access ACL with existing access ACL via
-   * OzoneUtils.addAcl().
-   * @param acls
-   * @param parentAcls
-   * @return true if acls inherited DEFAULT acls from parentAcls successfully,
-   * false otherwise.
-   */
-  public static boolean inheritDefaultAcls(List<OzoneAcl> acls,
-      List<OzoneAcl> parentAcls) {
-    List<OzoneAcl> inheritedAcls = null;
-    if (parentAcls != null && !parentAcls.isEmpty()) {
-      inheritedAcls = parentAcls.stream()
-          .filter(a -> a.getAclScope() == DEFAULT)
-          .map(acl -> new OzoneAcl(acl.getType(), acl.getName(),
-              acl.getAclBitSet(), OzoneAcl.AclScope.ACCESS))
-          .collect(Collectors.toList());
-    }
-    if (inheritedAcls != null && !inheritedAcls.isEmpty()) {
-      inheritedAcls.stream().forEach(acl -> addAcl(acls, acl));
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Convert a list of OzoneAclInfo(protoc) to list of OzoneAcl(java).
-   * @param protoAcls
-   * @return list of OzoneAcl.
-   */
-  public static List<OzoneAcl> fromProtobuf(List<OzoneAclInfo> protoAcls) {
-    return protoAcls.stream().map(acl->OzoneAcl.fromProtobuf(acl))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Convert a list of OzoneAcl(java) to list of OzoneAclInfo(protoc).
-   * @param protoAcls
-   * @return list of OzoneAclInfo.
-   */
-  public static List<OzoneAclInfo> toProtobuf(List<OzoneAcl> protoAcls) {
-    return protoAcls.stream().map(acl->OzoneAcl.toProtobuf(acl))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Add an OzoneAcl to existing list of OzoneAcls.
-   * @param existingAcls
-   * @param acl
-   * @return true if current OzoneAcls are changed, false otherwise.
-   */
-  public static boolean addAcl(List<OzoneAcl> existingAcls, OzoneAcl acl) {
-    if (existingAcls == null || acl == null) {
-      return false;
-    }
-
-    for (OzoneAcl a: existingAcls) {
-      if (a.getName().equals(acl.getName()) &&
-          a.getType().equals(acl.getType()) &&
-          a.getAclScope().equals(acl.getAclScope())) {
-        BitSet current = a.getAclBitSet();
-        BitSet original = (BitSet) current.clone();
-        current.or(acl.getAclBitSet());
-        if (current.equals(original)) {
-          return false;
-        }
-        return true;
-      }
-    }
-
-    existingAcls.add(acl);
-    return true;
-  }
-
-  /**
-   * remove OzoneAcl from existing list of OzoneAcls.
-   * @param existingAcls
-   * @param acl
-   * @return true if current OzoneAcls are changed, false otherwise.
-   */
-  public static boolean removeAcl(List<OzoneAcl> existingAcls, OzoneAcl acl) {
-    if (existingAcls == null || existingAcls.isEmpty() || acl == null) {
-      return false;
-    }
-
-    for (OzoneAcl a: existingAcls) {
-      if (a.getName().equals(acl.getName()) &&
-          a.getType().equals(acl.getType()) &&
-          a.getAclScope().equals(acl.getAclScope())) {
-        BitSet current = a.getAclBitSet();
-        BitSet original = (BitSet) current.clone();
-        current.andNot(acl.getAclBitSet());
-
-        if (current.equals(original)) {
-          return false;
-        }
-
-        if (current.isEmpty()) {
-          existingAcls.remove(a);
-        }
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Set existingAcls to newAcls.
-   * @param existingAcls
-   * @param newAcls
-   * @return true if newAcls are set successfully, false otherwise.
-   */
-  public static boolean setAcl(List<OzoneAcl> existingAcls,
-      List<OzoneAcl> newAcls) {
-    if (existingAcls == null) {
-      return false;
-    } else {
-      existingAcls.clear();
-      if (newAcls != null) {
-        existingAcls.addAll(newAcls);
-      }
-    }
-    return true;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
deleted file mode 100644
index 07f3194..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.fs.Path;
-
-import java.nio.file.Paths;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-
-/**
- * Utility class for OzoneFileSystem.
- */
-public final class OzoneFSUtils {
-
-  private OzoneFSUtils() {}
-
-  /**
-   * Returns string representation of path after removing the leading slash.
-   */
-  public static String pathToKey(Path path) {
-    return path.toString().substring(1);
-  }
-
-  /**
-   * Returns string representation of the input path parent. The function adds
-   * a trailing slash if it does not exist and returns an empty string if the
-   * parent is root.
-   */
-  public static String getParent(String keyName) {
-    java.nio.file.Path parentDir = Paths.get(keyName).getParent();
-    if (parentDir == null) {
-      return "";
-    }
-    return addTrailingSlashIfNeeded(parentDir.toString());
-  }
-
-  /**
-   * The function returns immediate child of given ancestor in a particular
-   * descendant. For example if ancestor is /a/b and descendant is /a/b/c/d/e
-   * the function should return /a/b/c/. If the descendant itself is the
-   * immediate child then it is returned as is without adding a trailing slash.
-   * This is done to distinguish files from a directory as in ozone files do
-   * not carry a trailing slash.
-   */
-  public static String getImmediateChild(String descendant, String ancestor) {
-    ancestor =
-        !ancestor.isEmpty() ? addTrailingSlashIfNeeded(ancestor) : ancestor;
-    if (!descendant.startsWith(ancestor)) {
-      return null;
-    }
-    java.nio.file.Path descendantPath = Paths.get(descendant);
-    java.nio.file.Path ancestorPath = Paths.get(ancestor);
-    int ancestorPathNameCount =
-        ancestor.isEmpty() ? 0 : ancestorPath.getNameCount();
-    if (descendantPath.getNameCount() - ancestorPathNameCount > 1) {
-      return addTrailingSlashIfNeeded(
-          ancestor + descendantPath.getName(ancestorPathNameCount));
-    }
-    return descendant;
-  }
-
-  public static String addTrailingSlashIfNeeded(String key) {
-    if (!key.endsWith(OZONE_URI_DELIMITER)) {
-      return key + OZONE_URI_DELIMITER;
-    } else {
-      return key;
-    }
-  }
-
-  public static boolean isFile(String keyName) {
-    return !keyName.endsWith(OZONE_URI_DELIMITER);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
deleted file mode 100644
index 8717946..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.protocolPB.PBHelper;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
-
-import java.io.IOException;
-import java.net.URI;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-
-/**
- * File Status of the Ozone Key.
- */
-public class OzoneFileStatus extends FileStatus {
-
-  private static final long serialVersionUID = 1L;
-
-  transient private OmKeyInfo keyInfo;
-
-  public OzoneFileStatus(OmKeyInfo key, long blockSize, boolean isDirectory) {
-    super(key.getDataSize(), isDirectory, key.getFactor().getNumber(),
-        blockSize, key.getModificationTime(), getPath(key.getKeyName()));
-    keyInfo = key;
-  }
-
-  public OzoneFileStatus(FileStatus status) throws IOException {
-    super(status);
-  }
-
-  // Use this constructor only for directories
-  public OzoneFileStatus(String keyName) {
-    super(0, true, 0, 0, 0, getPath(keyName));
-  }
-
-  public OzoneFileStatusProto getProtobuf() throws IOException {
-    return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this))
-        .build();
-  }
-
-  public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response)
-      throws IOException {
-    return new OzoneFileStatus(PBHelper.convert(response.getStatus()));
-  }
-
-  public static Path getPath(String keyName) {
-    return new Path(OZONE_URI_DELIMITER + keyName);
-  }
-
-  public FileStatus makeQualified(URI defaultUri, Path parent,
-                                  String owner, String group) {
-    // fully-qualify path
-    setPath(parent.makeQualified(defaultUri, null));
-    setGroup(group);
-    setOwner(owner);
-    if (isDirectory()) {
-      setPermission(FsPermission.getDirDefault());
-    } else {
-      setPermission(FsPermission.getFileDefault());
-    }
-    return this; // API compatibility
-  }
-
-  /** Get the modification time of the file/directory.
-   *
-   * o3fs uses objects as "fake" directories, which are not updated to
-   * reflect the accurate modification time. We choose to report the
-   * current time because some parts of the ecosystem (e.g. the
-   * HistoryServer) use modification time to ignore "old" directories.
-   *
-   * @return for files the modification time in milliseconds since January 1,
-   *         1970 UTC or for directories the current time.
-   */
-  @Override
-  public long getModificationTime(){
-    if (isDirectory()) {
-      return System.currentTimeMillis();
-    } else {
-      return super.getModificationTime();
-    }
-  }
-
-  public OmKeyInfo getKeyInfo() {
-    return keyInfo;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    return super.equals(o);
-  }
-
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
deleted file mode 100644
index c28c2c8..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.ArrayList;
-import java.util.List;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RepeatedKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyInfo;
-
-/**
- * Args for deleted keys. This is written to om metadata deletedTable.
- * Once a key is deleted, it is moved to om metadata deletedTable. Having a
- * {label: List<OMKeyInfo>} ensures that if users create & delete keys with
- * exact same uri multiple times, all the delete instances are bundled under
- * the same key name. This is useful as part of GDPR compliance where an
- * admin wants to confirm if a given key is deleted from deletedTable metadata.
- */
-public class RepeatedOmKeyInfo {
-  private List<OmKeyInfo> omKeyInfoList;
-
-  public RepeatedOmKeyInfo(List<OmKeyInfo> omKeyInfos) {
-    this.omKeyInfoList = omKeyInfos;
-  }
-
-  public RepeatedOmKeyInfo(OmKeyInfo omKeyInfos) {
-    this.omKeyInfoList = new ArrayList<>();
-    this.omKeyInfoList.add(omKeyInfos);
-  }
-
-  public void addOmKeyInfo(OmKeyInfo info) {
-    this.omKeyInfoList.add(info);
-  }
-
-  public List<OmKeyInfo> getOmKeyInfoList() {
-    return omKeyInfoList;
-  }
-
-  public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo
-      repeatedKeyInfo) {
-    List<OmKeyInfo> list = new ArrayList<>();
-    for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) {
-      list.add(OmKeyInfo.getFromProtobuf(k));
-    }
-    return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build();
-  }
-
-  public RepeatedKeyInfo getProto() {
-    List<KeyInfo> list = new ArrayList<>();
-    for(OmKeyInfo k : omKeyInfoList) {
-      list.add(k.getProtobuf());
-    }
-
-    RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder()
-        .addAllKeyInfo(list);
-    return builder.build();
-  }
-
-  /**
-   * Builder of RepeatedOmKeyInfo.
-   */
-  public static class Builder {
-    private List<OmKeyInfo> omKeyInfos;
-
-    public Builder(){}
-
-    public Builder setOmKeyInfos(List<OmKeyInfo> infoList) {
-      this.omKeyInfos = infoList;
-      return this;
-    }
-
-    public RepeatedOmKeyInfo build() {
-      return new RepeatedOmKeyInfo(omKeyInfos);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
deleted file mode 100644
index 5f65114..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-import java.util.Objects;
-
-/**
- * S3Secret to be saved in database.
- */
-public class S3SecretValue {
-  private String kerberosID;
-  private String awsSecret;
-
-  public S3SecretValue(String kerberosID, String awsSecret) {
-    this.kerberosID = kerberosID;
-    this.awsSecret = awsSecret;
-  }
-
-  public String getKerberosID() {
-    return kerberosID;
-  }
-
-  public void setKerberosID(String kerberosID) {
-    this.kerberosID = kerberosID;
-  }
-
-  public String getAwsSecret() {
-    return awsSecret;
-  }
-
-  public void setAwsSecret(String awsSecret) {
-    this.awsSecret = awsSecret;
-  }
-
-  public String getAwsAccessKey() {
-    return kerberosID;
-  }
-
-  public static S3SecretValue fromProtobuf(
-      OzoneManagerProtocolProtos.S3Secret s3Secret) {
-    return new S3SecretValue(s3Secret.getKerberosID(), s3Secret.getAwsSecret());
-  }
-
-  public OzoneManagerProtocolProtos.S3Secret getProtobuf() {
-    return OzoneManagerProtocolProtos.S3Secret.newBuilder()
-        .setAwsSecret(this.awsSecret)
-        .setKerberosID(this.kerberosID)
-        .build();
-  }
-
-  @Override
-  public String toString() {
-    return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    S3SecretValue that = (S3SecretValue) o;
-    return kerberosID.equals(that.kerberosID) &&
-        awsSecret.equals(that.awsSecret);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(kerberosID, awsSecret);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
deleted file mode 100644
index dce4f8e..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * ServiceInfo holds the config details of Ozone services.
- */
-public final class ServiceInfo {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ServiceInfo.class);
-  private static final ObjectWriter WRITER =
-      new ObjectMapper().writerWithDefaultPrettyPrinter();
-
-  /**
-   * Type of node/service.
-   */
-  private NodeType nodeType;
-  /**
-   * Hostname of the node in which the service is running.
-   */
-  private String hostname;
-
-  /**
-   * List of ports the service listens to.
-   */
-  private Map<ServicePort.Type, Integer> ports;
-
-  /**
-   * Default constructor for JSON deserialization.
-   */
-  public ServiceInfo() {}
-
-  /**
-   * Constructs the ServiceInfo for the {@code nodeType}.
-   * @param nodeType type of node/service
-   * @param hostname hostname of the service
-   * @param portList list of ports the service listens to
-   */
-  private ServiceInfo(
-      NodeType nodeType, String hostname, List<ServicePort> portList) {
-    Preconditions.checkNotNull(nodeType);
-    Preconditions.checkNotNull(hostname);
-    this.nodeType = nodeType;
-    this.hostname = hostname;
-    this.ports = new HashMap<>();
-    for (ServicePort port : portList) {
-      ports.put(port.getType(), port.getValue());
-    }
-  }
-
-  /**
-   * Returns the type of node/service.
-   * @return node type
-   */
-  public NodeType getNodeType() {
-    return nodeType;
-  }
-
-  /**
-   * Returns the hostname of the service.
-   * @return hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * Returns ServicePort.Type to port mappings.
-   * @return ports
-   */
-  public Map<ServicePort.Type, Integer> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Returns the port for given type.
-   *
-   * @param type the type of port.
-   *             ex: RPC, HTTP, HTTPS, etc..
-   * @throws NullPointerException if the service doesn't support the given type
-   */
-  @JsonIgnore
-  public int getPort(ServicePort.Type type) {
-    return ports.get(type);
-  }
-
-  /**
-   * Returns the address of the service (hostname with port of the given type).
-   * @param portType the type of port, eg. RPC, HTTP, etc.
-   * @return service address (hostname with port of the given type)
-   */
-  @JsonIgnore
-  public String getServiceAddress(ServicePort.Type portType) {
-    return hostname + ":" + getPort(portType);
-  }
-
-  /**
-   * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo.
-   *
-   * @return OzoneManagerProtocolProtos.ServiceInfo
-   */
-  @JsonIgnore
-  public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() {
-    OzoneManagerProtocolProtos.ServiceInfo.Builder builder =
-        OzoneManagerProtocolProtos.ServiceInfo.newBuilder();
-    builder.setNodeType(nodeType)
-        .setHostname(hostname)
-        .addAllServicePorts(
-            ports.entrySet().stream()
-                .map(
-                    entry ->
-                        ServicePort.newBuilder()
-                            .setType(entry.getKey())
-                            .setValue(entry.getValue()).build())
-                .collect(Collectors.toList()));
-    return builder.build();
-  }
-
-  /**
-   * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
-   *
-   * @return {@link ServiceInfo}
-   */
-  @JsonIgnore
-  public static ServiceInfo getFromProtobuf(
-      OzoneManagerProtocolProtos.ServiceInfo serviceInfo) {
-    return new ServiceInfo(serviceInfo.getNodeType(),
-        serviceInfo.getHostname(),
-        serviceInfo.getServicePortsList());
-  }
-
-  /**
-   * Creates a new builder to build {@link ServiceInfo}.
-   * @return {@link ServiceInfo.Builder}
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder used to build/construct {@link ServiceInfo}.
-   */
-  public static class Builder {
-
-    private NodeType node;
-    private String host;
-    private List<ServicePort> portList = new ArrayList<>();
-
-
-    /**
-     * Sets the node/service type.
-     * @param nodeType type of node
-     * @return the builder
-     */
-    public Builder setNodeType(NodeType nodeType) {
-      node = nodeType;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of the service.
-     * @param hostname service hostname
-     * @return the builder
-     */
-    public Builder setHostname(String hostname) {
-      host = hostname;
-      return this;
-    }
-
-    /**
-     * Adds the service port to the service port list.
-     * @param servicePort RPC port
-     * @return the builder
-     */
-    public Builder addServicePort(ServicePort servicePort) {
-      portList.add(servicePort);
-      return this;
-    }
-
-
-    /**
-     * Builds and returns {@link ServiceInfo} with the set values.
-     * @return {@link ServiceInfo}
-     */
-    public ServiceInfo build() {
-      return new ServiceInfo(node, host, portList);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java
deleted file mode 100644
index a90be63..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.List;
-
-/**
- * Wrapper class for service discovery, design for broader usage such as
- * security, etc.
- */
-public class ServiceInfoEx {
-
-  private List<ServiceInfo> infoList;
-
-  // PEM encoded string of SCM CA certificate.
-  private String caCertificate;
-
-  public ServiceInfoEx(List<ServiceInfo> infoList,
-      String caCertificate) {
-    this.infoList = infoList;
-    this.caCertificate = caCertificate;
-  }
-
-  public List<ServiceInfo> getServiceInfoList() {
-    return infoList;
-  }
-
-  public String getCaCertificate() {
-    return caCertificate;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
deleted file mode 100644
index 6fc7c8f..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A class that encapsulates the createVolume Args.
- */
-public final class VolumeArgs {
-  private final String adminName;
-  private final String ownerName;
-  private final String volume;
-  private final long quotaInBytes;
-  private final Map<String, String> extendedAttributes;
-
-  /**
-   * Private constructor, constructed via builder.
-   *
-   * @param adminName - Administrator name.
-   * @param ownerName - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param keyValueMap - keyValue map.
-   */
-  private VolumeArgs(String adminName, String ownerName, String volume,
-      long quotaInBytes, Map<String, String> keyValueMap) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.extendedAttributes = keyValueMap;
-  }
-
-  /**
-   * Returns the Admin Name.
-   *
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   *
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   *
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   *
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public Map<String, String> getExtendedAttributes() {
-    return extendedAttributes;
-  }
-
-  static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long quotaInBytes;
-    private Map<String, String> extendedAttributes;
-
-    /**
-     * Constructs a builder.
-     */
-    Builder() {
-      extendedAttributes = new HashMap<>();
-    }
-
-    public void setAdminName(String adminName) {
-      this.adminName = adminName;
-    }
-
-    public void setOwnerName(String ownerName) {
-      this.ownerName = ownerName;
-    }
-
-    public void setVolume(String volume) {
-      this.volume = volume;
-    }
-
-    public void setQuotaInBytes(long quotaInBytes) {
-      this.quotaInBytes = quotaInBytes;
-    }
-
-    public void addMetadata(String key, String value) {
-      extendedAttributes.put(key, value); // overwrite if present.
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     *
-     * @return CreateVolumeArgs.
-     */
-    public VolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          extendedAttributes);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java
deleted file mode 100644
index 5c49a15a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Mixin class to handle custom metadata.
- */
-public class WithMetadata {
-
-  @SuppressWarnings("visibilitymodifier")
-  protected Map<String, String> metadata = new HashMap<>();
-
-  /**
-   * Custom key value metadata.
-   */
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  /**
-   * Set custom key value metadata.
-   */
-  public void setMetadata(Map<String, String> metadata) {
-    this.metadata = metadata;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
deleted file mode 100644
index b1211d8..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
deleted file mode 100644
index 31f0924..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.lock;
-
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.function.Consumer;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.lock.LockManager;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK;
-
-/**
- * Provides different locks to handle concurrency in OzoneMaster.
- * We also maintain lock hierarchy, based on the weight.
- *
- * <table>
- *   <caption></caption>
- *   <tr>
- *     <td><b> WEIGHT </b></td> <td><b> LOCK </b></td>
- *   </tr>
- *   <tr>
- *     <td> 0 </td> <td> S3 Bucket Lock </td>
- *   </tr>
- *   <tr>
- *     <td> 1 </td> <td> Volume Lock </td>
- *   </tr>
- *   <tr>
- *     <td> 2 </td> <td> Bucket Lock </td>
- *   </tr>
- *   <tr>
- *     <td> 3 </td> <td> User Lock </td>
- *   </tr>
- *   <tr>
- *     <td> 4 </td> <td> S3 Secret Lock</td>
- *   </tr>
- *   <tr>
- *     <td> 5 </td> <td> Prefix Lock </td>
- *   </tr>
- * </table>
- *
- * One cannot obtain a lower weight lock while holding a lock with higher
- * weight. The other way around is possible. <br>
- * <br>
- * <p>
- * For example:
- * <br>
- * {@literal ->} acquire volume lock (will work)<br>
- *   {@literal +->} acquire bucket lock (will work)<br>
- *     {@literal +-->} acquire s3 bucket lock (will throw Exception)<br>
- * </p>
- * <br>
- */
-
-public class OzoneManagerLock {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerLock.class);
-
-  private static final String READ_LOCK = "read";
-  private static final String WRITE_LOCK = "write";
-
-  private final LockManager<String> manager;
-  private final ThreadLocal<Short> lockSet = ThreadLocal.withInitial(
-      () -> Short.valueOf((short)0));
-
-
-  /**
-   * Creates new OzoneManagerLock instance.
-   * @param conf Configuration object
-   */
-  public OzoneManagerLock(Configuration conf) {
-    boolean fair = conf.getBoolean(OZONE_MANAGER_FAIR_LOCK,
-        OZONE_MANAGER_FAIR_LOCK_DEFAULT);
-    manager = new LockManager<>(conf, fair);
-  }
-
-  /**
-   * Acquire lock on resource.
-   *
-   * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same
-   * thread acquiring lock again is allowed.
-   *
-   * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread
-   * acquiring lock again is not allowed.
-   *
-   * Special Note for USER_LOCK: Single thread can acquire single user lock/
-   * multi user lock. But not both at the same time.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  @Deprecated
-  public boolean acquireLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    return lock(resource, resourceName, manager::writeLock, WRITE_LOCK);
-  }
-
-  /**
-   * Acquire read lock on resource.
-   *
-   * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same
-   * thread acquiring lock again is allowed.
-   *
-   * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread
-   * acquiring lock again is not allowed.
-   *
-   * Special Note for USER_LOCK: Single thread can acquire single user lock/
-   * multi user lock. But not both at the same time.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  public boolean acquireReadLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    return lock(resource, resourceName, manager::readLock, READ_LOCK);
-  }
-
-
-  /**
-   * Acquire write lock on resource.
-   *
-   * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same
-   * thread acquiring lock again is allowed.
-   *
-   * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread
-   * acquiring lock again is not allowed.
-   *
-   * Special Note for USER_LOCK: Single thread can acquire single user lock/
-   * multi user lock. But not both at the same time.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  public boolean acquireWriteLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    return lock(resource, resourceName, manager::writeLock, WRITE_LOCK);
-  }
-
-  private boolean lock(Resource resource, String resourceName,
-      Consumer<String> lockFn, String lockType) {
-    if (!resource.canLock(lockSet.get())) {
-      String errorMessage = getErrorMessage(resource);
-      LOG.error(errorMessage);
-      throw new RuntimeException(errorMessage);
-    } else {
-      lockFn.accept(resourceName);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name,
-            resourceName);
-      }
-      lockSet.set(resource.setLock(lockSet.get()));
-      return true;
-    }
-  }
-
-  /**
-   * Generate resource name to be locked.
-   * @param resource
-   * @param resources
-   */
-  private String generateResourceName(Resource resource, String... resources) {
-    if (resources.length == 1 && resource != Resource.BUCKET_LOCK) {
-      return OzoneManagerLockUtil.generateResourceLockName(resource,
-          resources[0]);
-    } else if (resources.length == 2 && resource == Resource.BUCKET_LOCK) {
-      return OzoneManagerLockUtil.generateBucketLockName(resources[0],
-          resources[1]);
-    } else {
-      throw new IllegalArgumentException("acquire lock is supported on single" +
-          " resource for all locks except for resource bucket");
-    }
-  }
-
-  private String getErrorMessage(Resource resource) {
-    return "Thread '" + Thread.currentThread().getName() + "' cannot " +
-        "acquire " + resource.name + " lock while holding " +
-        getCurrentLocks().toString() + " lock(s).";
-
-  }
-
-  private List<String> getCurrentLocks() {
-    List<String> currentLocks = new ArrayList<>();
-    short lockSetVal = lockSet.get();
-    for (Resource value : Resource.values()) {
-      if (value.isLevelLocked(lockSetVal)) {
-        currentLocks.add(value.getName());
-      }
-    }
-    return currentLocks;
-  }
-
-  /**
-   * Acquire lock on multiple users.
-   * @param firstUser
-   * @param secondUser
-   */
-  public boolean acquireMultiUserLock(String firstUser, String secondUser) {
-    Resource resource = Resource.USER_LOCK;
-    firstUser = generateResourceName(resource, firstUser);
-    secondUser = generateResourceName(resource, secondUser);
-
-    if (!resource.canLock(lockSet.get())) {
-      String errorMessage = getErrorMessage(resource);
-      LOG.error(errorMessage);
-      throw new RuntimeException(errorMessage);
-    } else {
-      // When acquiring multiple user locks, the reason for doing lexical
-      // order comparision is to avoid deadlock scenario.
-
-      // Example: 1st thread acquire lock(ozone, hdfs)
-      // 2nd thread acquire lock(hdfs, ozone).
-      // If we don't acquire user locks in an order, there can be a deadlock.
-      // 1st thread acquired lock on ozone, waiting for lock on hdfs, 2nd
-      // thread acquired lock on hdfs, waiting for lock on ozone.
-      // To avoid this when we acquire lock on multiple users, we acquire
-      // locks in lexical order, which can help us to avoid dead locks.
-      // Now if first thread acquires lock on hdfs, 2nd thread wait for lock
-      // on hdfs, and first thread acquires lock on ozone. Once after first
-      // thread releases user locks, 2nd thread acquires them.
-
-      int compare = firstUser.compareTo(secondUser);
-      String temp;
-
-      // Order the user names in sorted order. Swap them.
-      if (compare > 0) {
-        temp = secondUser;
-        secondUser = firstUser;
-        firstUser = temp;
-      }
-
-      if (compare == 0) {
-        // both users are equal.
-        manager.writeLock(firstUser);
-      } else {
-        manager.writeLock(firstUser);
-        try {
-          manager.writeLock(secondUser);
-        } catch (Exception ex) {
-          // We got an exception acquiring 2nd user lock. Release already
-          // acquired user lock, and throw exception to the user.
-          manager.writeUnlock(firstUser);
-          throw ex;
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name,
-            firstUser, secondUser);
-      }
-      lockSet.set(resource.setLock(lockSet.get()));
-      return true;
-    }
-  }
-
-
-
-  /**
-   * Release lock on multiple users.
-   * @param firstUser
-   * @param secondUser
-   */
-  public void releaseMultiUserLock(String firstUser, String secondUser) {
-    Resource resource = Resource.USER_LOCK;
-    firstUser = generateResourceName(resource, firstUser);
-    secondUser = generateResourceName(resource, secondUser);
-
-    int compare = firstUser.compareTo(secondUser);
-
-    String temp;
-    // Order the user names in sorted order. Swap them.
-    if (compare > 0) {
-      temp = secondUser;
-      secondUser = firstUser;
-      firstUser = temp;
-    }
-
-    if (compare == 0) {
-      // both users are equal.
-      manager.writeUnlock(firstUser);
-    } else {
-      manager.writeUnlock(firstUser);
-      manager.writeUnlock(secondUser);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Release Write {} lock on resource {} and {}", resource.name,
-          firstUser, secondUser);
-    }
-    lockSet.set(resource.clearLock(lockSet.get()));
-  }
-
-  /**
-   * Release write lock on resource.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  public void releaseWriteLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK);
-  }
-
-  /**
-   * Release read lock on resource.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  public void releaseReadLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    unlock(resource, resourceName, manager::readUnlock, READ_LOCK);
-  }
-
-  /**
-   * Release write lock on resource.
-   * @param resource - Type of the resource.
-   * @param resources - Resource names on which user want to acquire lock.
-   * For Resource type BUCKET_LOCK, first param should be volume, second param
-   * should be bucket name. For remaining all resource only one param should
-   * be passed.
-   */
-  @Deprecated
-  public void releaseLock(Resource resource, String... resources) {
-    String resourceName = generateResourceName(resource, resources);
-    unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK);
-  }
-
-  private void unlock(Resource resource, String resourceName,
-      Consumer<String> lockFn, String lockType) {
-    // TODO: Not checking release of higher order level lock happened while
-    // releasing lower order level lock, as for that we need counter for
-    // locks, as some locks support acquiring lock again.
-    lockFn.accept(resourceName);
-    // clear lock
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name,
-          resourceName);
-    }
-    lockSet.set(resource.clearLock(lockSet.get()));
-  }
-
-  /**
-   * Resource defined in Ozone.
-   */
-  public enum Resource {
-    // For S3 Bucket need to allow only for S3, that should be means only 1.
-    S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1
-
-    // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3)
-    VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2
-
-    // For bucket we need to allow both s3 bucket, volume and bucket. Which
-    // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7
-    BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4
-
-    // For user we need to allow s3 bucket, volume, bucket and user lock.
-    // Which is 8  4 + 2 + 1 = 15
-    USER_LOCK((byte) 3, "USER_LOCK"), // 15
-
-    S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31
-    PREFIX_LOCK((byte) 5, "PREFIX_LOCK"); //63
-
-    // level of the resource
-    private byte lockLevel;
-
-    // This will tell the value, till which we can allow locking.
-    private short mask;
-
-    // This value will help during setLock, and also will tell whether we can
-    // re-acquire lock or not.
-    private short setMask;
-
-    // Name of the resource.
-    private String name;
-
-    Resource(byte pos, String name) {
-      this.lockLevel = pos;
-      this.mask = (short) (Math.pow(2, lockLevel + 1) - 1);
-      this.setMask = (short) Math.pow(2, lockLevel);
-      this.name = name;
-    }
-
-    boolean canLock(short lockSetVal) {
-
-      // For USER_LOCK, S3_SECRET_LOCK and  PREFIX_LOCK we shall not allow
-      // re-acquire locks from single thread. 2nd condition is we have
-      // acquired one of these locks, but after that trying to acquire a lock
-      // with less than equal of lockLevel, we should disallow.
-      if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask ||
-          (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask ||
-          (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask)
-          && setMask <= lockSetVal) {
-        return false;
-      }
-
-
-      // Our mask is the summation of bits of all previous possible locks. In
-      // other words it is the largest possible value for that bit position.
-
-      // For example for Volume lock, bit position is 1, and mask is 3. Which
-      // is the largest value that can be represented with 2 bits is 3.
-      // Therefore if lockSet is larger than mask we have to return false i.e
-      // some other higher order lock has been acquired.
-
-      return lockSetVal <= mask;
-    }
-
-    /**
-     * Set Lock bits in lockSetVal.
-     *
-     * @param lockSetVal
-     * @return Updated value which has set lock bits.
-     */
-    short setLock(short lockSetVal) {
-      return (short) (lockSetVal | setMask);
-    }
-
-    /**
-     * Clear lock from lockSetVal.
-     *
-     * @param lockSetVal
-     * @return Updated value which has cleared lock bits.
-     */
-    short clearLock(short lockSetVal) {
-      return (short) (lockSetVal & ~setMask);
-    }
-
-    /**
-     * Return true, if this level is locked, else false.
-     * @param lockSetVal
-     */
-    boolean isLevelLocked(short lockSetVal) {
-      return (lockSetVal & setMask) == setMask;
-    }
-
-    String getName() {
-      return name;
-    }
-
-    short getMask() {
-      return mask;
-    }
-  }
-
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java
deleted file mode 100644
index 78a42aa..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.lock;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_SECRET;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
-
-/**
- * Utility class contains helper functions required for OM lock.
- */
-final class OzoneManagerLockUtil {
-
-
-  private OzoneManagerLockUtil() {
-  }
-
-  /**
-   * Generate resource lock name for the given resource name.
-   *
-   * @param resource
-   * @param resourceName
-   */
-  public static String generateResourceLockName(
-      OzoneManagerLock.Resource resource, String resourceName) {
-
-    if (resource == OzoneManagerLock.Resource.S3_BUCKET_LOCK) {
-      return OM_S3_PREFIX + resourceName;
-    } else if (resource == OzoneManagerLock.Resource.VOLUME_LOCK) {
-      return OM_KEY_PREFIX + resourceName;
-    } else if (resource == OzoneManagerLock.Resource.USER_LOCK) {
-      return OM_USER_PREFIX + resourceName;
-    } else if (resource == OzoneManagerLock.Resource.S3_SECRET_LOCK) {
-      return OM_S3_SECRET + resourceName;
-    } else if (resource == OzoneManagerLock.Resource.PREFIX_LOCK) {
-      return OM_PREFIX + resourceName;
-    } else {
-      // This is for developers who mistakenly call this method with resource
-      // bucket type, as for bucket type we need bucket and volumeName.
-      throw new IllegalArgumentException("Bucket resource type is passed, " +
-          "to get BucketResourceLockName, use generateBucketLockName method");
-    }
-
-  }
-
-  /**
-   * Generate bucket lock name.
-   * @param volumeName
-   * @param bucketName
-   */
-  public static String generateBucketLockName(String volumeName,
-      String bucketName) {
-    return OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName;
-
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java
deleted file mode 100644
index 5feac5f..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.lock;
-
-/**
- * Classes related to ozone manager lock.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
deleted file mode 100644
index 1744cff..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om;
-/**
- This package contains client side protocol library to communicate with OM.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
deleted file mode 100644
index 1434dca..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.protocol;
-
-import java.io.IOException;
-
-/**
- * Protocol to talk to OM HA. These methods are needed only called from
- * OmRequestHandler.
- */
-public interface OzoneManagerHAProtocol {
-
-  /**
-   * Store the snapshot index i.e. the raft log index, corresponding to the
-   * last transaction applied to the OM RocksDB, in OM metadata dir on disk.
-   * @return the snapshot index
-   * @throws IOException
-   */
-  long saveRatisSnapshot() throws IOException;
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
deleted file mode 100644
index a236695..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ /dev/null
@@ -1,530 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.protocol;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
-
-/**
- * Protocol to talk to OM.
- */
-@KerberosInfo(
-    serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
-@TokenInfo(OzoneDelegationTokenSelector.class)
-public interface OzoneManagerProtocol
-    extends OzoneManagerSecurityProtocol, Closeable {
-
-  @SuppressWarnings("checkstyle:ConstantName")
-  /**
-   * Version 1: Initial version.
-   */
-  long versionID = 1L;
-
-  /**
-   * Creates a volume.
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  void createVolume(OmVolumeArgs args) throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   * @param volume  - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner) throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Checks if the specified user can access this volume.
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Lists volume owned by a specific user.
-   * @param userName - user name
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<OmVolumeArgs> listVolumeByUser(String userName, String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Lists volume all volumes in the cluster.
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<OmVolumeArgs> listAllVolumes(String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - BucketInfo to create Bucket.
-   * @throws IOException
-   */
-  void createBucket(OmBucketInfo bucketInfo) throws IOException;
-
-  /**
-   * Gets the bucket information.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @return OmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(OmBucketArgs args) throws IOException;
-
-  /**
-   * Open the given key and return an open key session.
-   *
-   * @param args the args of the key.
-   * @return OpenKeySession instance that client uses to talk to container.
-   * @throws IOException
-   */
-  OpenKeySession openKey(OmKeyArgs args) throws IOException;
-
-  /**
-   * Commit a key. This will make the change from the client visible. The client
-   * is identified by the clientID.
-   *
-   * @param args the key to commit
-   * @param clientID the client identification
-   * @throws IOException
-   */
-  void commitKey(OmKeyArgs args, long clientID) throws IOException;
-
-  /**
-   * Allocate a new block, it is assumed that the client is having an open key
-   * session going on. This block will be appended to this open key session.
-   *
-   * @param args the key to append
-   * @param clientID the client identification
-   * @param excludeList List of datanodes/containers to exclude during block
-   *                    allocation
-   * @return an allocated block
-   * @throws IOException
-   */
-  OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID,
-      ExcludeList excludeList) throws IOException;
-
-
-  /**
-   * Look up for the container of an existing key.
-   *
-   * @param args the args of the key.
-   * @return OmKeyInfo instance that client uses to talk to container.
-   * @throws IOException
-   */
-  OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
-
-  /**
-   * Rename an existing key within a bucket.
-   * @param args the args of the key.
-   * @param toKeyName New name to be used for the Key
-   * @throws IOException
-   */
-  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  void deleteKey(OmKeyArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link OmBucketInfo}
-   * in the given volume. Argument volumeName is required, others
-   * are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param startBucketName
-   *   the start bucket name, only the buckets whose name is
-   *   after this value will be included in the result.
-   * @param bucketPrefix
-   *   bucket name prefix, only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<OmBucketInfo> listBuckets(String volumeName,
-      String startBucketName, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link OmKeyInfo}
-   * in the given bucket. Argument volumeName, bucketName is required,
-   * others are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKeyName
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<OmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKeyName, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns list of Ozone services with its configuration details.
-   *
-   * @return list of Ozone services
-   * @throws IOException
-   */
-  List<ServiceInfo> getServiceList() throws IOException;
-
-  ServiceInfoEx getServiceInfo() throws IOException;
-
-  /*
-   * S3 Specific functionality that is supported by Ozone Manager.
-   */
-
-  /**
-   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
-   * to access via both S3 and Ozone.
-   * @param userName - S3 user name.
-   * @param s3BucketName - S3 bucket Name.
-   * @throws IOException - On failure, throws an exception like Bucket exists.
-   */
-  void createS3Bucket(String userName, String s3BucketName) throws IOException;
-
-  /**
-   * Delets an S3 bucket inside Ozone manager and deletes the mapping.
-   * @param s3BucketName - S3 bucket Name.
-   * @throws IOException in case the bucket cannot be deleted.
-   */
-  void deleteS3Bucket(String s3BucketName) throws IOException;
-
-  /**
-   * Returns the Ozone Namespace for the S3Bucket. It will return the
-   * OzoneVolume/OzoneBucketName.
-   * @param s3BucketName  - S3 Bucket Name.
-   * @return String - The Ozone canonical name for this s3 bucket. This
-   * string is useful for mounting an OzoneFS.
-   * @throws IOException - Error is throw if the s3bucket does not exist.
-   */
-  String getOzoneBucketMapping(String s3BucketName) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link OmBucketInfo}
-   * for the given user. Argument username is required, others
-   * are optional.
-   *
-   * @param userName
-   *   user Name.
-   * @param startBucketName
-   *   the start bucket name, only the buckets whose name is
-   *   after this value will be included in the result.
-   * @param bucketPrefix
-   *   bucket name prefix, only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<OmBucketInfo> listS3Buckets(String userName, String startBucketName,
-                                   String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-  /**
-   * Initiate multipart upload for the specified key.
-   * @param keyArgs
-   * @return MultipartInfo
-   * @throws IOException
-   */
-  OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException;
-
-
-  /**
-   * Commit Multipart upload part file.
-   * @param omKeyArgs
-   * @param clientID
-   * @return OmMultipartCommitUploadPartInfo
-   * @throws IOException
-   */
-  OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
-      OmKeyArgs omKeyArgs, long clientID) throws IOException;
-
-  /**
-   * Complete Multipart upload Request.
-   * @param omKeyArgs
-   * @param multipartUploadList
-   * @return OmMultipartUploadCompleteInfo
-   * @throws IOException
-   */
-  OmMultipartUploadCompleteInfo completeMultipartUpload(
-      OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
-      throws IOException;
-
-  /**
-   * Abort multipart upload.
-   * @param omKeyArgs
-   * @throws IOException
-   */
-  void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException;
-
-  /**
-   * Returns list of parts of a multipart upload key.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param uploadID
-   * @param partNumberMarker
-   * @param maxParts
-   * @return OmMultipartUploadListParts
-   */
-  OmMultipartUploadListParts listParts(String volumeName, String bucketName,
-      String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException;
-
-  /**
-   * List in-flight uploads.
-   */
-  OmMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName, String prefix) throws IOException;
-  /**
-   * Gets s3Secret for given kerberos user.
-   * @param kerberosID
-   * @return S3SecretValue
-   * @throws IOException
-   */
-  S3SecretValue getS3Secret(String kerberosID) throws IOException;
-
-  /**
-   * Get the OM Client's Retry and Failover Proxy provider.
-   * @return OMFailoverProxyProvider
-   */
-  OMFailoverProxyProvider getOMFailoverProxyProvider();
-
-  /**
-   * OzoneFS api to get file status for an entry.
-   *
-   * @param keyArgs Key args
-   * @throws OMException if file does not exist
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  OzoneFileStatus getFileStatus(OmKeyArgs keyArgs) throws IOException;
-
-  /**
-   * Ozone FS api to create a directory. Parent directories if do not exist
-   * are created for the input directory.
-   *
-   * @param args Key args
-   * @throws OMException if any entry in the path exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  void createDirectory(OmKeyArgs args) throws IOException;
-
-  /**
-   * OzoneFS api to creates an output stream for a file.
-   *
-   * @param keyArgs   Key args
-   * @param overWrite if true existing file at the location will be overwritten
-   * @param recursive if true file would be created even if parent directories
-   *                  do not exist
-   * @throws OMException if given key is a directory
-   *                     if file exists and isOverwrite flag is false
-   *                     if an ancestor exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  OpenKeySession createFile(OmKeyArgs keyArgs, boolean overWrite,
-      boolean recursive) throws IOException;
-
-  /**
-   * OzoneFS api to lookup for a file.
-   *
-   * @param keyArgs Key args
-   * @throws OMException if given key is not found or it is not a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  OmKeyInfo lookupFile(OmKeyArgs keyArgs) throws IOException;
-
-  /**
-   * List the status for a file or a directory and its contents.
-   *
-   * @param keyArgs    Key args
-   * @param recursive  For a directory if true all the descendants of a
-   *                   particular directory are listed
-   * @param startKey   Key from which listing needs to start. If startKey exists
-   *                   its status is included in the final list.
-   * @param numEntries Number of entries to list from the start key
-   * @return list of file status
-   */
-  List<OzoneFileStatus> listStatus(OmKeyArgs keyArgs, boolean recursive,
-      String startKey, long numEntries) throws IOException;
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for
-   * given object to list of ACLs provided in argument.
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException;
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   * @param obj Ozone object.
-   *
-   * @throws IOException if there is error.
-   * */
-  List<OzoneAcl> getAcl(OzoneObj obj) throws IOException;
-
-  /**
-   * Get DB updates since a specific sequence number.
-   * @param dbUpdatesRequest request that encapsulates a sequence number.
-   * @return Wrapper containing the updates.
-   * @throws SequenceNumberNotFoundException if db is unable to read the data.
-   */
-  DBUpdatesWrapper getDBUpdates(
-      OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest)
-      throws IOException;
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java
deleted file mode 100644
index 3e90899..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.protocol;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.Idempotent;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-
-/**
- * Security protocol for a secure OzoneManager.
- */
-@KerberosInfo(
-    serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
-public interface OzoneManagerSecurityProtocol {
-
-  /**
-   * Get a valid Delegation Token.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws OMException
-   */
-  @Idempotent
-  Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws OMException;
-
-  /**
-   * Renew an existing delegation token.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   * @throws OMException
-   */
-  @Idempotent
-  long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException;
-
-  /**
-   * Cancel an existing delegation token.
-   *
-   * @param token delegation token
-   * @throws OMException
-   */
-  @Idempotent
-  void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException;
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java
deleted file mode 100644
index 6f58e2d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.protocol;
-
-/**
- * This will be used in the OzoneManager Server, as few of the methods in
- * OzoneManagerHAProtocol need not be exposed to Om clients. This interface
- * extends both OzoneManagerHAProtocol and OzoneManagerProtocol.
- */
-public interface OzoneManagerServerProtocol extends OzoneManagerProtocol,
-    OzoneManagerHAProtocol {
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
deleted file mode 100644
index 9c7f388..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.protocol;
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
deleted file mode 100644
index c9dc8ec..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,1569 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.protocolPB;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.om.exceptions.NotLeaderException;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
-
-
-
-/**
- *  The client side implementation of OzoneManagerProtocol.
- */
-
-@InterfaceAudience.Private
-public final class OzoneManagerProtocolClientSideTranslatorPB
-    implements OzoneManagerProtocol, ProtocolTranslator {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final OMFailoverProxyProvider omFailoverProxyProvider;
-  private final OzoneManagerProtocolPB rpcProxy;
-  private final String clientID;
-  private static final Logger FAILOVER_PROXY_PROVIDER_LOG =
-      LoggerFactory.getLogger(OMFailoverProxyProvider.class);
-
-  public OzoneManagerProtocolClientSideTranslatorPB(
-      OzoneManagerProtocolPB proxy, String clientId) {
-    this.rpcProxy = proxy;
-    this.clientID = clientId;
-    this.omFailoverProxyProvider = null;
-  }
-
-  /**
-   * Constructor for OM Protocol Client. This creates a {@link RetryProxy}
-   * over {@link OMFailoverProxyProvider} proxy. OMFailoverProxyProvider has
-   * one {@link OzoneManagerProtocolPB} proxy pointing to each OM node in the
-   * cluster.
-   */
-  public OzoneManagerProtocolClientSideTranslatorPB(OzoneConfiguration conf,
-      String clientId, String omServiceId, UserGroupInformation ugi)
-      throws IOException {
-    this.omFailoverProxyProvider = new OMFailoverProxyProvider(conf, ugi,
-        omServiceId);
-
-    int maxRetries = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
-    int maxFailovers = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
-    int sleepBase = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
-    int sleepMax = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
-
-    this.rpcProxy =
-        createRetryProxy(omFailoverProxyProvider, maxRetries, maxFailovers,
-            sleepBase, sleepMax);
-    this.clientID = clientId;
-  }
-
-  /**
-   * Creates a {@link RetryProxy} encapsulating the
-   * {@link OMFailoverProxyProvider}. The retry proxy fails over on network
-   * exception or if the current proxy is not the leader OM.
-   */
-  private OzoneManagerProtocolPB createRetryProxy(
-      OMFailoverProxyProvider failoverProxyProvider,
-      int maxRetries, int maxFailovers, int delayMillis, int maxDelayBase) {
-
-    RetryPolicy retryPolicyOnNetworkException = RetryPolicies
-        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-            maxFailovers, maxRetries, delayMillis, maxDelayBase);
-
-    RetryPolicy retryPolicy = new RetryPolicy() {
-      @Override
-      public RetryAction shouldRetry(Exception exception, int retries,
-          int failovers, boolean isIdempotentOrAtMostOnce)
-          throws Exception {
-
-        if (exception instanceof ServiceException) {
-          Throwable cause = exception.getCause();
-          if (cause instanceof NotLeaderException) {
-            NotLeaderException notLeaderException = (NotLeaderException) cause;
-            omFailoverProxyProvider.performFailoverIfRequired(
-                notLeaderException.getSuggestedLeaderNodeId());
-            return getRetryAction(RetryAction.RETRY, retries, failovers);
-          } else {
-            return getRetryAction(RetryAction.FAILOVER_AND_RETRY, retries,
-                failovers);
-          }
-        } else if (exception instanceof EOFException) {
-          return getRetryAction(RetryAction.FAILOVER_AND_RETRY, retries,
-              failovers);
-        } else {
-          return retryPolicyOnNetworkException.shouldRetry(
-              exception, retries, failovers, isIdempotentOrAtMostOnce);
-        }
-      }
-
-      private RetryAction getRetryAction(RetryAction fallbackAction,
-          int retries, int failovers) {
-        if (retries < maxRetries && failovers < maxFailovers) {
-          return fallbackAction;
-        } else {
-          FAILOVER_PROXY_PROVIDER_LOG.error("Failed to connect to OM. " +
-              "Attempted {} retries and {} failovers", retries, failovers);
-          return RetryAction.FAIL;
-        }
-      }
-    };
-
-    OzoneManagerProtocolPB proxy = (OzoneManagerProtocolPB) RetryProxy.create(
-        OzoneManagerProtocolPB.class, failoverProxyProvider, retryPolicy);
-    return proxy;
-  }
-
-  @VisibleForTesting
-  public OMFailoverProxyProvider getOMFailoverProxyProvider() {
-    return omFailoverProxyProvider;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  /**
-   * Returns a OMRequest builder with specified type.
-   * @param cmdType type of the request
-   */
-  private OMRequest.Builder createOMRequest(Type cmdType) {
-
-    return OMRequest.newBuilder()
-        .setCmdType(cmdType)
-        .setClientId(clientID);
-  }
-
-  /**
-   * Submits client request to OM server.
-   * @param omRequest client request
-   * @return response from OM
-   * @throws IOException thrown if any Protobuf service exception occurs
-   */
-  private OMResponse submitRequest(OMRequest omRequest)
-      throws IOException {
-    try {
-      OMRequest payload = OMRequest.newBuilder(omRequest)
-          .setTraceID(TracingUtil.exportCurrentSpan())
-          .build();
-
-      OMResponse omResponse =
-          rpcProxy.submitRequest(NULL_RPC_CONTROLLER, payload);
-
-      if (omResponse.hasLeaderOMNodeId() && omFailoverProxyProvider != null) {
-        String leaderOmId = omResponse.getLeaderOMNodeId();
-
-        // Failover to the OM node returned by OMReponse leaderOMNodeId if
-        // current proxy is not pointing to that node.
-        omFailoverProxyProvider.performFailoverIfRequired(leaderOmId);
-      }
-
-      return omResponse;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(OmVolumeArgs args) throws IOException {
-    CreateVolumeRequest.Builder req =
-        CreateVolumeRequest.newBuilder();
-    VolumeInfo volumeInfo = args.getProtobuf();
-    req.setVolumeInfo(volumeInfo);
-
-    OMRequest omRequest = createOMRequest(Type.CreateVolume)
-        .setCreateVolumeRequest(req)
-        .build();
-
-    OMResponse omResponse = submitRequest(omRequest);
-    handleError(omResponse);
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setOwnerName(owner);
-
-    OMRequest omRequest = createOMRequest(Type.SetVolumeProperty)
-        .setSetVolumePropertyRequest(req)
-        .build();
-
-    OMResponse omResponse = submitRequest(omRequest);
-    handleError(omResponse);
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setQuotaInBytes(quota);
-
-    OMRequest omRequest = createOMRequest(Type.SetVolumeProperty)
-        .setSetVolumePropertyRequest(req)
-        .build();
-
-    OMResponse omResponse = submitRequest(omRequest);
-    handleError(omResponse);
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
-      IOException {
-    CheckVolumeAccessRequest.Builder req =
-        CheckVolumeAccessRequest.newBuilder();
-    req.setVolumeName(volume).setUserAcl(userAcl);
-
-    OMRequest omRequest = createOMRequest(Type.CheckVolumeAccess)
-        .setCheckVolumeAccessRequest(req)
-        .build();
-
-    OMResponse omResponse = submitRequest(omRequest);
-
-    if (omResponse.getStatus() == ACCESS_DENIED) {
-      return false;
-    } else if (omResponse.getStatus() == OK) {
-      return true;
-    } else {
-      handleError(omResponse);
-      return false;
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return OmVolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-
-    OMRequest omRequest = createOMRequest(Type.InfoVolume)
-        .setInfoVolumeRequest(req)
-        .build();
-
-    InfoVolumeResponse resp =
-        handleError(submitRequest(omRequest)).getInfoVolumeResponse();
-
-
-    return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-
-    OMRequest omRequest = createOMRequest(Type.DeleteVolume)
-        .setDeleteVolumeRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
-                                             String prevKey, int maxKeys)
-      throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setUserName(userName);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
-    return listVolume(builder.build());
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey,
-                                           int maxKeys) throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
-    return listVolume(builder.build());
-  }
-
-  private List<OmVolumeArgs> listVolume(ListVolumeRequest request)
-      throws IOException {
-
-    OMRequest omRequest = createOMRequest(Type.ListVolume)
-        .setListVolumeRequest(request)
-        .build();
-
-    ListVolumeResponse resp =
-        handleError(submitRequest(omRequest)).getListVolumeResponse();
-    List<OmVolumeArgs> list = new ArrayList<>(resp.getVolumeInfoList().size());
-    for (VolumeInfo info : resp.getVolumeInfoList()) {
-      list.add(OmVolumeArgs.getFromProtobuf(info));
-    }
-    return list;
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
-    CreateBucketRequest.Builder req =
-        CreateBucketRequest.newBuilder();
-    BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
-    req.setBucketInfo(bucketInfoProtobuf);
-
-    OMRequest omRequest = createOMRequest(Type.CreateBucket)
-        .setCreateBucketRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return OmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public OmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    InfoBucketRequest.Builder req =
-        InfoBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-
-    OMRequest omRequest = createOMRequest(Type.InfoBucket)
-        .setInfoBucketRequest(req)
-        .build();
-
-    InfoBucketResponse resp =
-        handleError(submitRequest(omRequest)).getInfoBucketResponse();
-
-    return OmBucketInfo.getFromProtobuf(resp.getBucketInfo());
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(OmBucketArgs args)
-      throws IOException {
-    SetBucketPropertyRequest.Builder req =
-        SetBucketPropertyRequest.newBuilder();
-    BucketArgs bucketArgs = args.getProtobuf();
-    req.setBucketArgs(bucketArgs);
-
-    OMRequest omRequest = createOMRequest(Type.SetBucketProperty)
-        .setSetBucketPropertyRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  /**
-   * List buckets in a volume.
-   *
-   * @param volumeName
-   * @param startKey
-   * @param prefix
-   * @param count
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public List<OmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int count) throws IOException {
-    List<OmBucketInfo> buckets = new ArrayList<>();
-    ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setCount(count);
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-    ListBucketsRequest request = reqBuilder.build();
-
-    OMRequest omRequest = createOMRequest(Type.ListBuckets)
-        .setListBucketsRequest(request)
-        .build();
-
-    ListBucketsResponse resp = handleError(submitRequest(omRequest))
-        .getListBucketsResponse();
-
-    buckets.addAll(
-          resp.getBucketInfoList().stream()
-              .map(OmBucketInfo::getFromProtobuf)
-              .collect(Collectors.toList()));
-    return buckets;
-
-  }
-
-  /**
-   * Create a new open session of the key, then use the returned meta info to
-   * talk to data node to actually write the key.
-   * @param args the args for the key to be allocated
-   * @return a handler to the key, returned client
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
-    CreateKeyRequest.Builder req = CreateKeyRequest.newBuilder();
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName());
-
-    if(args.getAcls() != null) {
-      keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a ->
-          OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
-    }
-
-    if (args.getFactor() != null) {
-      keyArgs.setFactor(args.getFactor());
-    }
-
-    if (args.getType() != null) {
-      keyArgs.setType(args.getType());
-    }
-
-    if (args.getDataSize() > 0) {
-      keyArgs.setDataSize(args.getDataSize());
-    }
-
-    if (args.getMetadata() != null && args.getMetadata().size() > 0) {
-      keyArgs.addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata()));
-    }
-    req.setKeyArgs(keyArgs.build());
-
-    if (args.getMultipartUploadID() != null) {
-      keyArgs.setMultipartUploadID(args.getMultipartUploadID());
-    }
-
-    if (args.getMultipartUploadPartNumber() > 0) {
-      keyArgs.setMultipartNumber(args.getMultipartUploadPartNumber());
-    }
-
-    keyArgs.setIsMultipartKey(args.getIsMultipartKey());
-
-
-    req.setKeyArgs(keyArgs.build());
-
-    OMRequest omRequest = createOMRequest(Type.CreateKey)
-        .setCreateKeyRequest(req)
-        .build();
-
-    CreateKeyResponse keyResponse =
-        handleError(submitRequest(omRequest)).getCreateKeyResponse();
-    return new OpenKeySession(keyResponse.getID(),
-        OmKeyInfo.getFromProtobuf(keyResponse.getKeyInfo()),
-        keyResponse.getOpenVersion());
-  }
-
-  private OMResponse handleError(OMResponse resp) throws OMException {
-    if (resp.getStatus() != OK) {
-      throw new OMException(resp.getMessage(),
-          ResultCodes.values()[resp.getStatus().ordinal()]);
-    }
-    return resp;
-  }
-
-  @Override
-  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientId,
-      ExcludeList excludeList) throws IOException {
-    AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize());
-
-    if (args.getFactor() != null) {
-      keyArgs.setFactor(args.getFactor());
-    }
-
-    if (args.getType() != null) {
-      keyArgs.setType(args.getType());
-    }
-
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientId);
-    req.setExcludeList(excludeList.getProtoBuf());
-
-
-    OMRequest omRequest = createOMRequest(Type.AllocateBlock)
-        .setAllocateBlockRequest(req)
-        .build();
-
-    AllocateBlockResponse resp = handleError(submitRequest(omRequest))
-        .getAllocateBlockResponse();
-    return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
-  }
-  @Override
-  public void commitKey(OmKeyArgs args, long clientId)
-      throws IOException {
-    CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
-    List<OmKeyLocationInfo> locationInfoList = args.getLocationInfoList();
-    Preconditions.checkNotNull(locationInfoList);
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize())
-        .addAllKeyLocations(
-            locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf)
-                .collect(Collectors.toList())).build();
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientId);
-
-    OMRequest omRequest = createOMRequest(Type.CommitKey)
-        .setCommitKeyRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-
-  }
-
-
-  @Override
-  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
-    LookupKeyRequest.Builder req = LookupKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize())
-        .setSortDatanodes(args.getSortDatanodes())
-        .build();
-    req.setKeyArgs(keyArgs);
-
-    OMRequest omRequest = createOMRequest(Type.LookupKey)
-        .setLookupKeyRequest(req)
-        .build();
-
-    LookupKeyResponse resp =
-        handleError(submitRequest(omRequest)).getLookupKeyResponse();
-
-    return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
-  }
-
-  @Override
-  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
-    RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setToKeyName(toKeyName);
-
-    OMRequest omRequest = createOMRequest(Type.RenameKey)
-        .setRenameKeyRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(OmKeyArgs args) throws IOException {
-    DeleteKeyRequest.Builder req = DeleteKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
-    req.setKeyArgs(keyArgs);
-
-    OMRequest omRequest = createOMRequest(Type.DeleteKey)
-        .setDeleteKeyRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-
-    OMRequest omRequest = createOMRequest(Type.DeleteBucket)
-        .setDeleteBucketRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  /**
-   * List keys in a bucket.
-   */
-  @Override
-  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String prefix, int maxKeys) throws IOException {
-    List<OmKeyInfo> keys = new ArrayList<>();
-    ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setBucketName(bucketName);
-    reqBuilder.setCount(maxKeys);
-
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-
-    ListKeysRequest req = reqBuilder.build();
-
-    OMRequest omRequest = createOMRequest(Type.ListKeys)
-        .setListKeysRequest(req)
-        .build();
-
-    ListKeysResponse resp =
-        handleError(submitRequest(omRequest)).getListKeysResponse();
-    keys.addAll(
-        resp.getKeyInfoList().stream()
-            .map(OmKeyInfo::getFromProtobuf)
-            .collect(Collectors.toList()));
-    return keys;
-
-  }
-
-  @Override
-  public void createS3Bucket(String userName, String s3BucketName)
-      throws IOException {
-    S3CreateBucketRequest req = S3CreateBucketRequest.newBuilder()
-        .setUserName(userName)
-        .setS3Bucketname(s3BucketName)
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.CreateS3Bucket)
-        .setCreateS3BucketRequest(req)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  @Override
-  public void deleteS3Bucket(String s3BucketName) throws IOException {
-    S3DeleteBucketRequest request  = S3DeleteBucketRequest.newBuilder()
-        .setS3BucketName(s3BucketName)
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.DeleteS3Bucket)
-        .setDeleteS3BucketRequest(request)
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  @Override
-  public String getOzoneBucketMapping(String s3BucketName)
-      throws IOException {
-    S3BucketInfoRequest request  = S3BucketInfoRequest.newBuilder()
-        .setS3BucketName(s3BucketName)
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.InfoS3Bucket)
-        .setInfoS3BucketRequest(request)
-        .build();
-
-    S3BucketInfoResponse resp = handleError(submitRequest(omRequest))
-        .getInfoS3BucketResponse();
-    return resp.getOzoneMapping();
-  }
-
-  @Override
-  public List<OmBucketInfo> listS3Buckets(String userName, String startKey,
-                                          String prefix, int count)
-      throws IOException {
-    List<OmBucketInfo> buckets = new ArrayList<>();
-    S3ListBucketsRequest.Builder reqBuilder = S3ListBucketsRequest.newBuilder();
-    reqBuilder.setUserName(userName);
-    reqBuilder.setCount(count);
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-    S3ListBucketsRequest request = reqBuilder.build();
-
-    OMRequest omRequest = createOMRequest(Type.ListS3Buckets)
-        .setListS3BucketsRequest(request)
-        .build();
-
-    S3ListBucketsResponse resp = handleError(submitRequest(omRequest))
-        .getListS3BucketsResponse();
-
-    buckets.addAll(
-        resp.getBucketInfoList().stream()
-            .map(OmBucketInfo::getFromProtobuf)
-            .collect(Collectors.toList()));
-    return buckets;
-
-  }
-
-  @Override
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
-    GetS3SecretRequest request = GetS3SecretRequest.newBuilder()
-        .setKerberosID(kerberosID)
-        .build();
-    OMRequest omRequest = createOMRequest(Type.GetS3Secret)
-        .setGetS3SecretRequest(request)
-        .build();
-    final GetS3SecretResponse resp = handleError(submitRequest(omRequest))
-        .getGetS3SecretResponse();
-
-    return S3SecretValue.fromProtobuf(resp.getS3Secret());
-
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws
-      IOException {
-
-    MultipartInfoInitiateRequest.Builder multipartInfoInitiateRequest =
-        MultipartInfoInitiateRequest.newBuilder();
-
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(omKeyArgs.getVolumeName())
-        .setBucketName(omKeyArgs.getBucketName())
-        .setKeyName(omKeyArgs.getKeyName())
-        .setFactor(omKeyArgs.getFactor())
-        .addAllAcls(omKeyArgs.getAcls().stream().map(a ->
-            OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
-        .setType(omKeyArgs.getType());
-    multipartInfoInitiateRequest.setKeyArgs(keyArgs.build());
-
-    OMRequest omRequest = createOMRequest(
-        Type.InitiateMultiPartUpload)
-        .setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest.build())
-        .build();
-
-    MultipartInfoInitiateResponse resp = handleError(submitRequest(omRequest))
-        .getInitiateMultiPartUploadResponse();
-
-    return new OmMultipartInfo(resp.getVolumeName(), resp.getBucketName(), resp
-        .getKeyName(), resp.getMultipartUploadID());
-  }
-
-  @Override
-  public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
-      OmKeyArgs omKeyArgs, long clientId) throws IOException {
-
-    List<OmKeyLocationInfo> locationInfoList = omKeyArgs.getLocationInfoList();
-    Preconditions.checkNotNull(locationInfoList);
-
-
-    MultipartCommitUploadPartRequest.Builder multipartCommitUploadPartRequest
-        = MultipartCommitUploadPartRequest.newBuilder();
-
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(omKeyArgs.getVolumeName())
-        .setBucketName(omKeyArgs.getBucketName())
-        .setKeyName(omKeyArgs.getKeyName())
-        .setMultipartUploadID(omKeyArgs.getMultipartUploadID())
-        .setIsMultipartKey(omKeyArgs.getIsMultipartKey())
-        .setMultipartNumber(omKeyArgs.getMultipartUploadPartNumber())
-        .setDataSize(omKeyArgs.getDataSize())
-        .addAllKeyLocations(
-            locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf)
-                .collect(Collectors.toList()));
-    multipartCommitUploadPartRequest.setClientID(clientId);
-    multipartCommitUploadPartRequest.setKeyArgs(keyArgs.build());
-
-    OMRequest omRequest = createOMRequest(
-        Type.CommitMultiPartUpload)
-        .setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest
-            .build())
-        .build();
-
-    MultipartCommitUploadPartResponse response =
-        handleError(submitRequest(omRequest))
-        .getCommitMultiPartUploadResponse();
-
-    OmMultipartCommitUploadPartInfo info = new
-        OmMultipartCommitUploadPartInfo(response.getPartName());
-    return info;
-  }
-
-  @Override
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(
-      OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
-      throws IOException {
-    MultipartUploadCompleteRequest.Builder multipartUploadCompleteRequest =
-        MultipartUploadCompleteRequest.newBuilder();
-
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(omKeyArgs.getVolumeName())
-        .setBucketName(omKeyArgs.getBucketName())
-        .setKeyName(omKeyArgs.getKeyName())
-        .addAllAcls(omKeyArgs.getAcls().stream().map(a ->
-            OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
-        .setMultipartUploadID(omKeyArgs.getMultipartUploadID());
-
-    multipartUploadCompleteRequest.setKeyArgs(keyArgs.build());
-    multipartUploadCompleteRequest.addAllPartsList(multipartUploadList
-        .getPartsList());
-
-    OMRequest omRequest = createOMRequest(
-        Type.CompleteMultiPartUpload)
-        .setCompleteMultiPartUploadRequest(
-            multipartUploadCompleteRequest.build()).build();
-
-    MultipartUploadCompleteResponse response =
-        handleError(submitRequest(omRequest))
-        .getCompleteMultiPartUploadResponse();
-
-    OmMultipartUploadCompleteInfo info = new
-        OmMultipartUploadCompleteInfo(response.getVolume(), response
-        .getBucket(), response.getKey(), response.getHash());
-    return info;
-  }
-
-  @Override
-  public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(omKeyArgs.getVolumeName())
-        .setBucketName(omKeyArgs.getBucketName())
-        .setKeyName(omKeyArgs.getKeyName())
-        .setMultipartUploadID(omKeyArgs.getMultipartUploadID());
-
-    MultipartUploadAbortRequest.Builder multipartUploadAbortRequest =
-        MultipartUploadAbortRequest.newBuilder();
-    multipartUploadAbortRequest.setKeyArgs(keyArgs);
-
-    OMRequest omRequest = createOMRequest(
-        Type.AbortMultiPartUpload)
-        .setAbortMultiPartUploadRequest(multipartUploadAbortRequest.build())
-        .build();
-
-    handleError(submitRequest(omRequest));
-
-  }
-
-  @Override
-  public OmMultipartUploadListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID,
-      int partNumberMarker, int maxParts) throws IOException {
-    MultipartUploadListPartsRequest.Builder multipartUploadListPartsRequest =
-        MultipartUploadListPartsRequest.newBuilder();
-    multipartUploadListPartsRequest.setVolume(volumeName)
-        .setBucket(bucketName).setKey(keyName).setUploadID(uploadID)
-        .setPartNumbermarker(partNumberMarker).setMaxParts(maxParts);
-
-    OMRequest omRequest = createOMRequest(Type.ListMultiPartUploadParts)
-        .setListMultipartUploadPartsRequest(
-            multipartUploadListPartsRequest.build()).build();
-
-    MultipartUploadListPartsResponse response =
-        handleError(submitRequest(omRequest))
-            .getListMultipartUploadPartsResponse();
-
-
-    OmMultipartUploadListParts omMultipartUploadListParts =
-        new OmMultipartUploadListParts(response.getType(), response.getFactor(),
-            response.getNextPartNumberMarker(), response.getIsTruncated());
-    omMultipartUploadListParts.addProtoPartList(response.getPartsListList());
-
-    return omMultipartUploadListParts;
-
-  }
-
-  @Override
-  public OmMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName,
-      String prefix) throws IOException {
-    ListMultipartUploadsRequest request = ListMultipartUploadsRequest
-        .newBuilder()
-        .setVolume(volumeName)
-        .setBucket(bucketName)
-        .setPrefix(prefix == null ? "" : prefix)
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.ListMultipartUploads)
-        .setListMultipartUploadsRequest(request)
-        .build();
-
-    ListMultipartUploadsResponse listMultipartUploadsResponse =
-        handleError(submitRequest(omRequest)).getListMultipartUploadsResponse();
-
-    List<OmMultipartUpload> uploadList =
-        listMultipartUploadsResponse.getUploadsListList()
-            .stream()
-            .map(proto -> new OmMultipartUpload(
-                proto.getVolumeName(),
-                proto.getBucketName(),
-                proto.getKeyName(),
-                proto.getUploadId(),
-                Instant.ofEpochMilli(proto.getCreationTime()),
-                proto.getType(),
-                proto.getFactor()
-            ))
-            .collect(Collectors.toList());
-
-    OmMultipartUploadList response = new OmMultipartUploadList(uploadList);
-
-    return response;
-  }
-
-  public List<ServiceInfo> getServiceList() throws IOException {
-    ServiceListRequest req = ServiceListRequest.newBuilder().build();
-
-    OMRequest omRequest = createOMRequest(Type.ServiceList)
-        .setServiceListRequest(req)
-        .build();
-
-    final ServiceListResponse resp = handleError(submitRequest(omRequest))
-        .getServiceListResponse();
-
-    return resp.getServiceInfoList().stream()
-          .map(ServiceInfo::getFromProtobuf)
-          .collect(Collectors.toList());
-
-  }
-
-  @Override
-  public ServiceInfoEx getServiceInfo() throws IOException {
-    ServiceListRequest req = ServiceListRequest.newBuilder().build();
-
-    OMRequest omRequest = createOMRequest(Type.ServiceList)
-        .setServiceListRequest(req)
-        .build();
-
-    final ServiceListResponse resp = handleError(submitRequest(omRequest))
-        .getServiceListResponse();
-
-    return new ServiceInfoEx(
-        resp.getServiceInfoList().stream()
-            .map(ServiceInfo::getFromProtobuf)
-            .collect(Collectors.toList()),
-        resp.getCaCertificate());
-  }
-
-  /**
-   * Get a valid Delegation Token.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws OMException
-   */
-  @Override
-  public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws OMException {
-    GetDelegationTokenRequestProto req = GetDelegationTokenRequestProto
-        .newBuilder()
-        .setRenewer(renewer == null ? "" : renewer.toString())
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.GetDelegationToken)
-        .setGetDelegationTokenRequest(req)
-        .build();
-
-    final GetDelegationTokenResponseProto resp;
-    try {
-      resp =
-          handleError(submitRequest(omRequest)).getGetDelegationTokenResponse();
-      return resp.getResponse().hasToken() ?
-          OMPBHelper.convertToDelegationToken(resp.getResponse().getToken())
-          : null;
-    } catch (IOException e) {
-      if(e instanceof OMException) {
-        throw (OMException)e;
-      }
-      throw new OMException("Get delegation token failed.", e,
-          TOKEN_ERROR_OTHER);
-    }
-  }
-
-  /**
-   * Renew an existing delegation token.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   */
-  @Override
-  public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException {
-    RenewDelegationTokenRequestProto req =
-        RenewDelegationTokenRequestProto.newBuilder().
-            setToken(OMPBHelper.convertToTokenProto(token)).
-            build();
-
-    OMRequest omRequest = createOMRequest(Type.RenewDelegationToken)
-        .setRenewDelegationTokenRequest(req)
-        .build();
-
-    final RenewDelegationTokenResponseProto resp;
-    try {
-      resp = handleError(submitRequest(omRequest))
-          .getRenewDelegationTokenResponse();
-      return resp.getResponse().getNewExpiryTime();
-    } catch (IOException e) {
-      if(e instanceof OMException) {
-        throw (OMException)e;
-      }
-      throw new OMException("Renew delegation token failed.", e,
-          TOKEN_ERROR_OTHER);
-    }
-  }
-
-  /**
-   * Cancel an existing delegation token.
-   *
-   * @param token delegation token
-   */
-  @Override
-  public void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException {
-    CancelDelegationTokenRequestProto req = CancelDelegationTokenRequestProto
-        .newBuilder()
-        .setToken(OMPBHelper.convertToTokenProto(token))
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.CancelDelegationToken)
-        .setCancelDelegationTokenRequest(req)
-        .build();
-
-    final CancelDelegationTokenResponseProto resp;
-    try {
-      handleError(submitRequest(omRequest));
-    } catch (IOException e) {
-      if(e instanceof OMException) {
-        throw (OMException)e;
-      }
-      throw new OMException("Cancel delegation token failed.", e,
-          TOKEN_ERROR_OTHER);
-    }
-  }
-
-  /**
-   * Get File Status for an Ozone key.
-   *
-   * @param args
-   * @return OzoneFileStatus for the key.
-   * @throws IOException
-   */
-  public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .build();
-    GetFileStatusRequest req =
-        GetFileStatusRequest.newBuilder()
-            .setKeyArgs(keyArgs)
-            .build();
-
-    OMRequest omRequest = createOMRequest(Type.GetFileStatus)
-        .setGetFileStatusRequest(req)
-        .build();
-
-    final GetFileStatusResponse resp;
-    try {
-      resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse();
-    } catch (IOException e) {
-      throw e;
-    }
-    return OzoneFileStatus.getFromProtobuf(resp.getStatus());
-  }
-
-  @Override
-  public void createDirectory(OmKeyArgs args) throws IOException {
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .addAllAcls(args.getAcls().stream().map(a ->
-            OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
-        .build();
-    CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder()
-        .setKeyArgs(keyArgs)
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.CreateDirectory)
-        .setCreateDirectoryRequest(request)
-        .build();
-
-    handleError(submitRequest(omRequest));
-  }
-
-  @Override
-  public OmKeyInfo lookupFile(OmKeyArgs args)
-      throws IOException {
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setSortDatanodes(args.getSortDatanodes())
-        .build();
-    LookupFileRequest lookupFileRequest = LookupFileRequest.newBuilder()
-            .setKeyArgs(keyArgs)
-            .build();
-    OMRequest omRequest = createOMRequest(Type.LookupFile)
-        .setLookupFileRequest(lookupFileRequest)
-        .build();
-    LookupFileResponse resp =
-        handleError(submitRequest(omRequest)).getLookupFileResponse();
-    return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    AddAclRequest req = AddAclRequest.newBuilder()
-        .setObj(OzoneObj.toProtobuf(obj))
-        .setAcl(OzoneAcl.toProtobuf(acl))
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.AddAcl)
-        .setAddAclRequest(req)
-        .build();
-    AddAclResponse addAclResponse =
-        handleError(submitRequest(omRequest)).getAddAclResponse();
-
-    return addAclResponse.getResponse();
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    RemoveAclRequest req = RemoveAclRequest.newBuilder()
-        .setObj(OzoneObj.toProtobuf(obj))
-        .setAcl(OzoneAcl.toProtobuf(acl))
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.RemoveAcl)
-        .setRemoveAclRequest(req)
-        .build();
-    RemoveAclResponse response =
-        handleError(submitRequest(omRequest)).getRemoveAclResponse();
-
-    return response.getResponse();
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    SetAclRequest.Builder builder = SetAclRequest.newBuilder()
-        .setObj(OzoneObj.toProtobuf(obj));
-
-    acls.forEach(a -> builder.addAcl(OzoneAcl.toProtobuf(a)));
-
-    OMRequest omRequest = createOMRequest(Type.SetAcl)
-        .setSetAclRequest(builder.build())
-        .build();
-    SetAclResponse response =
-        handleError(submitRequest(omRequest)).getSetAclResponse();
-
-    return response.getResponse();
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    GetAclRequest req = GetAclRequest.newBuilder()
-        .setObj(OzoneObj.toProtobuf(obj))
-        .build();
-
-    OMRequest omRequest = createOMRequest(Type.GetAcl)
-        .setGetAclRequest(req)
-        .build();
-    GetAclResponse response =
-        handleError(submitRequest(omRequest)).getGetAclResponse();
-    List<OzoneAcl> acls = new ArrayList<>();
-    response.getAclsList().stream().forEach(a ->
-        acls.add(OzoneAcl.fromProtobuf(a)));
-    return acls;
-  }
-
-  @Override
-  public DBUpdatesWrapper getDBUpdates(DBUpdatesRequest dbUpdatesRequest)
-      throws IOException {
-    OMRequest omRequest = createOMRequest(Type.DBUpdates)
-        .setDbUpdatesRequest(dbUpdatesRequest)
-        .build();
-
-    DBUpdatesResponse dbUpdatesResponse =
-        handleError(submitRequest(omRequest)).getDbUpdatesResponse();
-
-    DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
-    for (ByteString byteString : dbUpdatesResponse.getDataList()) {
-      dbUpdatesWrapper.addWriteBatch(byteString.toByteArray(), 0L);
-    }
-    dbUpdatesWrapper.setCurrentSequenceNumber(
-        dbUpdatesResponse.getSequenceNumber());
-    return dbUpdatesWrapper;
-  }
-
-  @Override
-  public OpenKeySession createFile(OmKeyArgs args,
-      boolean overWrite, boolean recursive) throws IOException {
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize())
-        .setType(args.getType())
-        .setFactor(args.getFactor())
-        .addAllAcls(args.getAcls().stream().map(a ->
-            OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
-        .build();
-    CreateFileRequest createFileRequest = CreateFileRequest.newBuilder()
-            .setKeyArgs(keyArgs)
-            .setIsOverwrite(overWrite)
-            .setIsRecursive(recursive)
-            .build();
-    OMRequest omRequest = createOMRequest(Type.CreateFile)
-        .setCreateFileRequest(createFileRequest)
-        .build();
-    CreateFileResponse resp =
-        handleError(submitRequest(omRequest)).getCreateFileResponse();
-    return new OpenKeySession(resp.getID(),
-        OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
-  }
-
-  @Override
-  public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
-      String startKey, long numEntries) throws IOException {
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .build();
-    ListStatusRequest listStatusRequest =
-        ListStatusRequest.newBuilder()
-            .setKeyArgs(keyArgs)
-            .setRecursive(recursive)
-            .setStartKey(startKey)
-            .setNumEntries(numEntries)
-            .build();
-    OMRequest omRequest = createOMRequest(Type.ListStatus)
-        .setListStatusRequest(listStatusRequest)
-        .build();
-    ListStatusResponse listStatusResponse =
-        handleError(submitRequest(omRequest)).getListStatusResponse();
-    List<OzoneFileStatus> statusList =
-        new ArrayList<>(listStatusResponse.getStatusesCount());
-    for (OzoneFileStatusProto fileStatus : listStatusResponse
-        .getStatusesList()) {
-      statusList.add(OzoneFileStatus.getFromProtobuf(fileStatus));
-    }
-    return statusList;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
deleted file mode 100644
index 69083dc..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneManagerService;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector;
-
-/**
- * Protocol used to communicate with OM.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol",
-    protocolVersion = 1)
-@KerberosInfo(
-    serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
-@TokenInfo(OzoneDelegationTokenSelector.class)
-@InterfaceAudience.Private
-public interface OzoneManagerProtocolPB
-    extends OzoneManagerService.BlockingInterface {
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
deleted file mode 100644
index d595edf..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.protocolPB;
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index 69d94b6..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-/**
- * Classes related to ozone REST interface.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
deleted file mode 100644
index 4ff5f6a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketEncryptionInfoProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CipherSuiteProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CryptoProtocolVersionProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .FileEncryptionInfoProto;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * Utilities for converting protobuf classes.
- */
-public final class OMPBHelper {
-
-  private OMPBHelper() {
-    /** Hidden constructor */
-  }
-
-  /**
-   * Converts Ozone delegation token to @{@link TokenProto}.
-   * @return tokenProto
-   */
-  public static TokenProto convertToTokenProto(Token<?> tok) {
-    if(tok == null){
-      throw new IllegalArgumentException("Invalid argument: token is null");
-    }
-
-    return TokenProto.newBuilder().
-        setIdentifier(getByteString(tok.getIdentifier())).
-        setPassword(getByteString(tok.getPassword())).
-        setKind(tok.getKind().toString()).
-        setService(tok.getService().toString()).build();
-  }
-
-  public static ByteString getByteString(byte[] bytes) {
-    // return singleton to reduce object allocation
-    return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes);
-  }
-
-  /**
-   * Converts @{@link TokenProto} to Ozone delegation token.
-   *
-   * @return Ozone
-   */
-  public static Token<OzoneTokenIdentifier> convertToDelegationToken(
-      TokenProto tokenProto) {
-    return new Token<>(tokenProto.getIdentifier()
-        .toByteArray(), tokenProto.getPassword().toByteArray(), new Text(
-        tokenProto.getKind()), new Text(tokenProto.getService()));
-  }
-
-  public static BucketEncryptionKeyInfo convert(
-      BucketEncryptionInfoProto beInfo) {
-    if (beInfo == null) {
-      throw new IllegalArgumentException("Invalid argument: bucket encryption" +
-          " info is null");
-    }
-
-    return new BucketEncryptionKeyInfo(
-        beInfo.hasCryptoProtocolVersion()?
-            convert(beInfo.getCryptoProtocolVersion()) : null,
-        beInfo.hasSuite()? convert(beInfo.getSuite()) : null,
-        beInfo.getKeyName());
-  }
-
-
-  public static BucketEncryptionInfoProto convert(
-      BucketEncryptionKeyInfo beInfo) {
-    if (beInfo == null || beInfo.getKeyName() == null) {
-      throw new IllegalArgumentException("Invalid argument: bucket encryption" +
-          " info is null");
-    }
-
-    BucketEncryptionInfoProto.Builder bb = BucketEncryptionInfoProto
-        .newBuilder().setKeyName(beInfo.getKeyName());
-
-    if (beInfo.getSuite() != null) {
-      bb.setSuite(convert(beInfo.getSuite()));
-    }
-    if (beInfo.getVersion()!= null) {
-      bb.setCryptoProtocolVersion(convert(beInfo.getVersion()));
-    }
-    return bb.build();
-  }
-
-  public static FileEncryptionInfoProto convert(
-      FileEncryptionInfo info) {
-    if (info == null) {
-      return null;
-    }
-    return OzoneManagerProtocolProtos.FileEncryptionInfoProto.newBuilder()
-        .setSuite(convert(info.getCipherSuite()))
-        .setCryptoProtocolVersion(convert(info.getCryptoProtocolVersion()))
-        .setKey(getByteString(info.getEncryptedDataEncryptionKey()))
-        .setIv(getByteString(info.getIV()))
-        .setEzKeyVersionName(info.getEzKeyVersionName())
-        .setKeyName(info.getKeyName())
-        .build();
-  }
-
-  public static FileEncryptionInfo convert(FileEncryptionInfoProto proto) {
-    if (proto == null) {
-      return null;
-    }
-    CipherSuite suite = convert(proto.getSuite());
-    CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion());
-    byte[] key = proto.getKey().toByteArray();
-    byte[] iv = proto.getIv().toByteArray();
-    String ezKeyVersionName = proto.getEzKeyVersionName();
-    String keyName = proto.getKeyName();
-    return new FileEncryptionInfo(suite, version, key, iv, keyName,
-        ezKeyVersionName);
-  }
-
-  public static CipherSuite convert(CipherSuiteProto proto) {
-    switch(proto) {
-    case AES_CTR_NOPADDING:
-      return CipherSuite.AES_CTR_NOPADDING;
-    default:
-      // Set to UNKNOWN and stash the unknown enum value
-      CipherSuite suite = CipherSuite.UNKNOWN;
-      suite.setUnknownValue(proto.getNumber());
-      return suite;
-    }
-  }
-
-  public static CipherSuiteProto convert(CipherSuite suite) {
-    switch (suite) {
-    case UNKNOWN:
-      return CipherSuiteProto.UNKNOWN;
-    case AES_CTR_NOPADDING:
-      return CipherSuiteProto.AES_CTR_NOPADDING;
-    default:
-      return null;
-    }
-  }
-
-  public static CryptoProtocolVersionProto convert(
-      CryptoProtocolVersion version) {
-    switch(version) {
-    case UNKNOWN:
-      return OzoneManagerProtocolProtos.CryptoProtocolVersionProto
-          .UNKNOWN_PROTOCOL_VERSION;
-    case ENCRYPTION_ZONES:
-      return OzoneManagerProtocolProtos.CryptoProtocolVersionProto
-          .ENCRYPTION_ZONES;
-    default:
-      return null;
-    }
-  }
-
-  public static CryptoProtocolVersion convert(
-      CryptoProtocolVersionProto proto) {
-    switch(proto) {
-    case ENCRYPTION_ZONES:
-      return CryptoProtocolVersion.ENCRYPTION_ZONES;
-    default:
-      // Set to UNKNOWN and stash the unknown enum value
-      CryptoProtocolVersion version = CryptoProtocolVersion.UNKNOWN;
-      version.setUnknownValue(proto.getNumber());
-      return version;
-    }
-  }
-
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
deleted file mode 100644
index 8361bac..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * Helper class for converting protobuf objects.
- */
-public final class OzonePBHelper {
-
-  private OzonePBHelper() {
-    /** Hidden constructor */
-  }
-
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 860386d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * This package contains classes for the Protocol Buffers binding of Ozone
- * protocols.
- */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java
deleted file mode 100644
index 575c9ea..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.util.StringUtils;
-import org.apache.kerby.util.Hex;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.crypto.Mac;
-import javax.crypto.spec.SecretKeySpec;
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.security.GeneralSecurityException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-/**
- * AWS v4 authentication payload validator. For more details refer to AWS
- * documentation https://docs.aws.amazon.com/general/latest/gr/
- * sigv4-create-canonical-request.html.
- **/
-final class AWSV4AuthValidator {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AWSV4AuthValidator.class);
-  private static final String HMAC_SHA256_ALGORITHM = "HmacSHA256";
-  private static final Charset UTF_8 = Charset.forName("utf-8");
-
-  private AWSV4AuthValidator() {
-  }
-
-  private static String urlDecode(String str) {
-    try {
-      return URLDecoder.decode(str, UTF_8.name());
-    } catch (UnsupportedEncodingException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  public static String hash(String payload) throws NoSuchAlgorithmException {
-    MessageDigest md = MessageDigest.getInstance("SHA-256");
-    md.update(payload.getBytes(UTF_8));
-    return String.format("%064x", new java.math.BigInteger(1, md.digest()));
-  }
-
-  private static byte[] sign(byte[] key, String msg) {
-    try {
-      SecretKeySpec signingKey = new SecretKeySpec(key, HMAC_SHA256_ALGORITHM);
-      Mac mac = Mac.getInstance(HMAC_SHA256_ALGORITHM);
-      mac.init(signingKey);
-      return mac.doFinal(msg.getBytes(StandardCharsets.UTF_8));
-    } catch (GeneralSecurityException gse) {
-      throw new RuntimeException(gse);
-    }
-  }
-
-  /**
-   * Returns signing key.
-   *
-   * @param key
-   * @param strToSign
-   *
-   * SignatureKey = HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4" +
-   * "<YourSecretAccessKey>","20130524"),"us-east-1"),"s3"),"aws4_request")
-   *
-   * For more details refer to AWS documentation: https://docs.aws.amazon
-   * .com/AmazonS3/latest/API/sig-v4-header-based-auth.html
-   *
-   * */
-  private static byte[] getSigningKey(String key, String strToSign) {
-    String[] signData = StringUtils.split(StringUtils.split(strToSign,
-        '\n')[2], '/');
-    String dateStamp = signData[0];
-    String regionName = signData[1];
-    String serviceName = signData[2];
-    byte[] kDate = sign(("AWS4" + key).getBytes(UTF_8), dateStamp);
-    byte[] kRegion = sign(kDate, regionName);
-    byte[] kService = sign(kRegion, serviceName);
-    byte[] kSigning = sign(kService, "aws4_request");
-    LOG.info(Hex.encode(kSigning));
-    return kSigning;
-  }
-
-  /**
-   * Validate request by comparing Signature from request. Returns true if
-   * aws request is legit else returns false.
-   * Signature = HEX(HMAC_SHA256(key, String to Sign))
-   *
-   * For more details refer to AWS documentation: https://docs.aws.amazon.com
-   * /AmazonS3/latest/API/sigv4-streaming.html
-   */
-  public static boolean validateRequest(String strToSign, String signature,
-      String userKey) {
-    String expectedSignature = Hex.encode(sign(getSigningKey(userKey,
-        strToSign), strToSign));
-    return expectedSignature.equals(signature);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
deleted file mode 100644
index 0fd6b08..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.security.SecureRandom;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.crypto.Cipher;
-import javax.crypto.spec.SecretKeySpec;
-
-/**
- * Symmetric Key structure for GDPR.
- */
-public class GDPRSymmetricKey {
-
-  private SecretKeySpec secretKey;
-  private Cipher cipher;
-  private String algorithm;
-  private String secret;
-
-  public SecretKeySpec getSecretKey() {
-    return secretKey;
-  }
-
-  public Cipher getCipher() {
-    return cipher;
-  }
-
-  /**
-   * Default constructor creates key with default values.
-   * @throws Exception
-   */
-  public GDPRSymmetricKey(SecureRandom secureRandom) throws Exception {
-    algorithm = OzoneConsts.GDPR_ALGORITHM_NAME;
-    secret = RandomStringUtils.random(
-        OzoneConsts.GDPR_DEFAULT_RANDOM_SECRET_LENGTH,
-        0, 0, true, true, null, secureRandom);
-    this.secretKey = new SecretKeySpec(
-        secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm);
-    this.cipher = Cipher.getInstance(algorithm);
-  }
-
-  /**
-   * Overloaded constructor creates key with specified values.
-   * @throws Exception
-   */
-  public GDPRSymmetricKey(String secret, String algorithm) throws Exception {
-    Preconditions.checkNotNull(secret, "Secret cannot be null");
-    //TODO: When we add feature to allow users to customize the secret length,
-    // we need to update this length check Precondition
-    Preconditions.checkArgument(secret.length() == 16,
-        "Secret must be exactly 16 characters");
-    Preconditions.checkNotNull(algorithm, "Algorithm cannot be null");
-    this.secret = secret;
-    this.algorithm = algorithm;
-    this.secretKey = new SecretKeySpec(
-        secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm);
-    this.cipher = Cipher.getInstance(algorithm);
-  }
-
-  public Map<String, String> getKeyDetails() {
-    Map<String, String> keyDetail = new HashMap<>();
-    keyDetail.put(OzoneConsts.GDPR_SECRET, this.secret);
-    keyDetail.put(OzoneConsts.GDPR_ALGORITHM, this.algorithm);
-    return keyDetail;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
deleted file mode 100644
index 5cc7823..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.EnumSet;
-
-/**
- * SecretManager for Ozone Master block tokens.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class OzoneBlockTokenSecretManager extends
-    OzoneSecretManager<OzoneBlockTokenIdentifier> {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneBlockTokenSecretManager.class);;
-  // Will be set by grpc clients for individual datanodes.
-  static final Text SERVICE = new Text("HDDS_SERVICE");
-  private final String omCertSerialId;
-
-  /**
-   * Create a secret manager.
-   *
-   * @param conf
-   * @param blockTokenExpirytime token expiry time for expired tokens in
-   * milliseconds
-   */
-  public OzoneBlockTokenSecretManager(SecurityConfig conf,
-      long blockTokenExpirytime, String omCertSerialId) {
-    super(conf, blockTokenExpirytime, blockTokenExpirytime, SERVICE, LOG);
-    this.omCertSerialId = omCertSerialId;
-  }
-
-  @Override
-  public OzoneBlockTokenIdentifier createIdentifier() {
-    throw new SecurityException("Ozone block token can't be created "
-        + "without owner and access mode information.");
-  }
-
-  public OzoneBlockTokenIdentifier createIdentifier(String owner,
-      String blockId, EnumSet<AccessModeProto> modes, long maxLength) {
-    return new OzoneBlockTokenIdentifier(owner, blockId, modes,
-        getTokenExpiryTime(), omCertSerialId, maxLength);
-  }
-
-  /**
-   * Generate an block token for specified user, blockId. Service field for
-   * token is set to blockId.
-   *
-   * @param user
-   * @param blockId
-   * @param modes
-   * @param maxLength
-   * @return token
-   */
-  public Token<OzoneBlockTokenIdentifier> generateToken(String user,
-      String blockId, EnumSet<AccessModeProto> modes, long maxLength) {
-    OzoneBlockTokenIdentifier tokenIdentifier = createIdentifier(user,
-        blockId, modes, maxLength);
-    if (LOG.isTraceEnabled()) {
-      long expiryTime = tokenIdentifier.getExpiryDate();
-      String tokenId = tokenIdentifier.toString();
-      LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}",
-          expiryTime, tokenId);
-    }
-    // Pass blockId as service.
-    return new Token<>(tokenIdentifier.getBytes(),
-        createPassword(tokenIdentifier), tokenIdentifier.getKind(),
-        new Text(blockId));
-  }
-
-  /**
-   * Generate an block token for current user.
-   *
-   * @param blockId
-   * @param modes
-   * @return token
-   */
-  public Token<OzoneBlockTokenIdentifier> generateToken(String blockId,
-      EnumSet<AccessModeProto> modes, long maxLength) throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    String userID = (ugi == null ? null : ugi.getShortUserName());
-    return generateToken(userID, blockId, modes, maxLength);
-  }
-
-  @Override
-  public byte[] retrievePassword(OzoneBlockTokenIdentifier identifier)
-      throws InvalidToken {
-    validateToken(identifier);
-    return createPassword(identifier);
-  }
-
-  @Override
-  public long renewToken(Token<OzoneBlockTokenIdentifier> token,
-      String renewer) throws IOException {
-    throw new UnsupportedOperationException("Renew token operation is not " +
-        "supported for ozone block tokens.");
-  }
-
-  @Override
-  public OzoneBlockTokenIdentifier cancelToken(Token<OzoneBlockTokenIdentifier>
-      token, String canceller) throws IOException {
-    throw new UnsupportedOperationException("Cancel token operation is not " +
-        "supported for ozone block tokens.");
-  }
-
-  /**
-   * Find the OzoneBlockTokenInfo for the given token id, and verify that if the
-   * token is not expired.
-   */
-  public boolean validateToken(OzoneBlockTokenIdentifier identifier)
-      throws InvalidToken {
-    long now = Time.now();
-    if (identifier.getExpiryDate() < now) {
-      throw new InvalidToken("token " + formatTokenId(identifier) + " is " +
-          "expired, current time: " + Time.formatTime(now) +
-          " expiry time: " + identifier.getExpiryDate());
-    }
-
-    if (!verifySignature(identifier, createPassword(identifier))) {
-      throw new InvalidToken("Tampered/Invalid token.");
-    }
-    return true;
-  }
-
-  /**
-   * Validates if given hash is valid.
-   *
-   * @param identifier
-   * @param password
-   */
-  public boolean verifySignature(OzoneBlockTokenIdentifier identifier,
-      byte[] password) {
-    throw new UnsupportedOperationException("This operation is not " +
-        "supported for block tokens.");
-  }
-
-  /**
-   * Should be called before this object is used.
-   * @param client
-   */
-  @Override
-  public synchronized void start(CertificateClient client) throws IOException {
-    super.start(client);
-  }
-
-  /**
-   * Returns expiry time by adding configured expiry time with current time.
-   *
-   * @return Expiry time.
-   */
-  private long getTokenExpiryTime() {
-    return Time.now() + getTokenRenewInterval();
-  }
-
-  /**
-   * Should be called before this object is used.
-   */
-  @Override
-  public synchronized void stop() throws IOException {
-    super.stop();
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
deleted file mode 100644
index 0de8ac6..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
+++ /dev/null
@@ -1,560 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.S3SecretManager;
-import org.apache.hadoop.ozone.om.S3SecretManagerImpl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.security.OzoneSecretStore.OzoneManagerSecretState;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier.TokenInfo;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.HadoopKerberosName;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN;
-
-/**
- * SecretManager for Ozone Master. Responsible for signing identifiers with
- * private key,
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class OzoneDelegationTokenSecretManager
-    extends OzoneSecretManager<OzoneTokenIdentifier> {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneDelegationTokenSecretManager.class);
-  private final Map<OzoneTokenIdentifier, TokenInfo> currentTokens;
-  private final OzoneSecretStore store;
-  private final S3SecretManagerImpl s3SecretManager;
-  private Thread tokenRemoverThread;
-  private final long tokenRemoverScanInterval;
-  private String omCertificateSerialId;
-  /**
-   * If the delegation token update thread holds this lock, it will not get
-   * interrupted.
-   */
-  private Object noInterruptsLock = new Object();
-
-  private boolean isRatisEnabled;
-
-  /**
-   * Create a secret manager.
-   *
-   * @param conf configuration.
-   * @param tokenMaxLifetime the maximum lifetime of the delegation tokens in
-   * milliseconds
-   * @param tokenRenewInterval how often the tokens must be renewed in
-   * milliseconds
-   * @param dtRemoverScanInterval how often the tokens are scanned for expired
-   * tokens in milliseconds
-   * @param certClient certificate client to SCM CA
-   */
-  public OzoneDelegationTokenSecretManager(OzoneConfiguration conf,
-      long tokenMaxLifetime, long tokenRenewInterval,
-      long dtRemoverScanInterval, Text service,
-      S3SecretManager s3SecretManager, CertificateClient certClient)
-      throws IOException {
-    super(new SecurityConfig(conf), tokenMaxLifetime, tokenRenewInterval,
-        service, LOG);
-    setCertClient(certClient);
-    currentTokens = new ConcurrentHashMap();
-    this.tokenRemoverScanInterval = dtRemoverScanInterval;
-    this.s3SecretManager = (S3SecretManagerImpl) s3SecretManager;
-    this.store = new OzoneSecretStore(conf,
-        this.s3SecretManager.getOmMetadataManager());
-    isRatisEnabled = conf.getBoolean(
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT);
-    loadTokenSecretState(store.loadState());
-  }
-
-  @Override
-  public OzoneTokenIdentifier createIdentifier() {
-    return OzoneTokenIdentifier.newInstance();
-  }
-
-  /**
-   * Create new Identifier with given,owner,renwer and realUser.
-   *
-   * @return T
-   */
-  public OzoneTokenIdentifier createIdentifier(Text owner, Text renewer,
-      Text realUser) {
-    return OzoneTokenIdentifier.newInstance(owner, renewer, realUser);
-  }
-
-  /**
-   * Returns {@link Token} for given identifier.
-   *
-   * @param owner
-   * @param renewer
-   * @param realUser
-   * @return Token
-   * @throws IOException to allow future exceptions to be added without breaking
-   * compatibility
-   */
-  public Token<OzoneTokenIdentifier> createToken(Text owner, Text renewer,
-      Text realUser)
-      throws IOException {
-    OzoneTokenIdentifier identifier = createIdentifier(owner, renewer,
-        realUser);
-    updateIdentifierDetails(identifier);
-
-    byte[] password = createPassword(identifier.getBytes(),
-        getCurrentKey().getPrivateKey());
-    long expiryTime = identifier.getIssueDate() + getTokenRenewInterval();
-
-    // For HA ratis will take care of updating.
-    // This will be removed, when HA/Non-HA code is merged.
-    if (!isRatisEnabled) {
-      addToTokenStore(identifier, password, expiryTime);
-    }
-
-    Token<OzoneTokenIdentifier> token = new Token<>(identifier.getBytes(),
-        password, identifier.getKind(), getService());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Created delegation token: {}", token);
-    }
-    return token;
-  }
-
-  /**
-   * Add delegation token in to in-memory map of tokens.
-   * @param token
-   * @param ozoneTokenIdentifier
-   * @return renewTime - If updated successfully, return renewTime.
-   */
-  public long updateToken(Token<OzoneTokenIdentifier> token,
-      OzoneTokenIdentifier ozoneTokenIdentifier) {
-    long renewTime =
-        ozoneTokenIdentifier.getIssueDate() + getTokenRenewInterval();
-    TokenInfo tokenInfo = new TokenInfo(renewTime, token.getPassword(),
-        ozoneTokenIdentifier.getTrackingId());
-    currentTokens.put(ozoneTokenIdentifier, tokenInfo);
-    return renewTime;
-  }
-
-  /**
-   * Stores given identifier in token store.
-   *
-   * @param identifier
-   * @param password
-   * @throws IOException
-   */
-  private void addToTokenStore(OzoneTokenIdentifier identifier,
-      byte[] password, long renewTime)
-      throws IOException {
-    TokenInfo tokenInfo = new TokenInfo(renewTime, password,
-        identifier.getTrackingId());
-    currentTokens.put(identifier, tokenInfo);
-    store.storeToken(identifier, tokenInfo.getRenewDate());
-  }
-
-  /**
-   * Updates issue date, master key id and sequence number for identifier.
-   *
-   * @param identifier the identifier to validate
-   */
-  private void updateIdentifierDetails(OzoneTokenIdentifier identifier) {
-    int sequenceNum;
-    long now = Time.now();
-    sequenceNum = incrementDelegationTokenSeqNum();
-    identifier.setIssueDate(now);
-    identifier.setMasterKeyId(getCurrentKey().getKeyId());
-    identifier.setSequenceNumber(sequenceNum);
-    identifier.setMaxDate(now + getTokenMaxLifetime());
-    identifier.setOmCertSerialId(getOmCertificateSerialId());
-  }
-
-  /**
-   * Get OM certificate serial id.
-   * */
-  private String getOmCertificateSerialId() {
-    if (omCertificateSerialId == null) {
-      omCertificateSerialId =
-          getCertClient().getCertificate().getSerialNumber().toString();
-    }
-    return omCertificateSerialId;
-  }
-
-  /**
-   * Renew a delegation token.
-   *
-   * @param token the token to renew
-   * @param renewer the full principal name of the user doing the renewal
-   * @return the new expiration time
-   * @throws InvalidToken if the token is invalid
-   * @throws AccessControlException if the user can't renew token
-   */
-  @Override
-  public synchronized long renewToken(Token<OzoneTokenIdentifier> token,
-      String renewer) throws IOException {
-    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
-    DataInputStream in = new DataInputStream(buf);
-    OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(in);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Token renewal for identifier: {}, total currentTokens: {}",
-          formatTokenId(id), currentTokens.size());
-    }
-
-    long now = Time.now();
-    if (id.getMaxDate() < now) {
-      throw new OMException(renewer + " tried to renew an expired token "
-          + formatTokenId(id) + " max expiration date: "
-          + Time.formatTime(id.getMaxDate())
-          + " currentTime: " + Time.formatTime(now), TOKEN_EXPIRED);
-    }
-    validateToken(id);
-    if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
-      throw new AccessControlException(renewer +
-          " tried to renew a token " + formatTokenId(id)
-          + " without a renewer");
-    }
-    if (!id.getRenewer().toString().equals(renewer)) {
-      throw new AccessControlException(renewer
-          + " tries to renew a token " + formatTokenId(id)
-          + " with non-matching renewer " + id.getRenewer());
-    }
-
-    long renewTime = Math.min(id.getMaxDate(), now + getTokenRenewInterval());
-
-    // For HA ratis will take care of updating.
-    // This will be removed, when HA/Non-HA code is merged.
-    if (!isRatisEnabled) {
-      try {
-        addToTokenStore(id, token.getPassword(), renewTime);
-      } catch (IOException e) {
-        LOG.error("Unable to update token " + id.getSequenceNumber(), e);
-      }
-    }
-    return renewTime;
-  }
-
-  public void updateRenewToken(Token<OzoneTokenIdentifier> token,
-      OzoneTokenIdentifier ozoneTokenIdentifier, long expiryTime) {
-    //TODO: Instead of having in-memory map inside this class, we can use
-    // cache from table and make this table cache clean up policy NEVER. In
-    // this way, we don't need to maintain seperate in-memory map. To do this
-    // work we need to merge HA/Non-HA code.
-    TokenInfo tokenInfo = new TokenInfo(expiryTime, token.getPassword(),
-        ozoneTokenIdentifier.getTrackingId());
-    currentTokens.put(ozoneTokenIdentifier, tokenInfo);
-  }
-
-  /**
-   * Cancel a token by removing it from store and cache.
-   *
-   * @return Identifier of the canceled token
-   * @throws InvalidToken for invalid token
-   * @throws AccessControlException if the user isn't allowed to cancel
-   */
-  public OzoneTokenIdentifier cancelToken(Token<OzoneTokenIdentifier> token,
-      String canceller) throws IOException {
-    OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(
-        token.getIdentifier());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Token cancellation requested for identifier: {}",
-          formatTokenId(id));
-    }
-
-    if (id.getUser() == null) {
-      throw new InvalidToken("Token with no owner " + formatTokenId(id));
-    }
-    String owner = id.getUser().getUserName();
-    Text renewer = id.getRenewer();
-    HadoopKerberosName cancelerKrbName = new HadoopKerberosName(canceller);
-    String cancelerShortName = cancelerKrbName.getShortName();
-    if (!canceller.equals(owner)
-        && (renewer == null || renewer.toString().isEmpty()
-        || !cancelerShortName
-        .equals(renewer.toString()))) {
-      throw new AccessControlException(canceller
-          + " is not authorized to cancel the token " + formatTokenId(id));
-    }
-
-    // For HA ratis will take care of removal.
-    // This check will be removed, when HA/Non-HA code is merged.
-    if (!isRatisEnabled) {
-      try {
-        store.removeToken(id);
-      } catch (IOException e) {
-        LOG.error("Unable to remove token " + id.getSequenceNumber(), e);
-      }
-      TokenInfo info = currentTokens.remove(id);
-      if (info == null) {
-        throw new InvalidToken("Token not found " + formatTokenId(id));
-      }
-    } else {
-      // Check whether token is there in-memory map of tokens or not on the
-      // OM leader.
-      TokenInfo info = currentTokens.get(id);
-      if (info == null) {
-        throw new InvalidToken("Token not found in-memory map of tokens" +
-            formatTokenId(id));
-      }
-    }
-    return id;
-  }
-
-  /**
-   * Remove the expired token from in-memory map.
-   * @param ozoneTokenIdentifier
-   * @throws IOException
-   */
-  public void removeToken(OzoneTokenIdentifier ozoneTokenIdentifier) {
-    currentTokens.remove(ozoneTokenIdentifier);
-  }
-
-  @Override
-  public byte[] retrievePassword(OzoneTokenIdentifier identifier)
-      throws InvalidToken {
-    if(identifier.getTokenType().equals(S3TOKEN)) {
-      return validateS3Token(identifier);
-    }
-    return validateToken(identifier).getPassword();
-  }
-
-  /**
-   * Checks if TokenInfo for the given identifier exists in database and if the
-   * token is expired.
-   */
-  private TokenInfo validateToken(OzoneTokenIdentifier identifier)
-      throws InvalidToken {
-    TokenInfo info = currentTokens.get(identifier);
-    if (info == null) {
-      throw new InvalidToken("token " + formatTokenId(identifier)
-          + " can't be found in cache");
-    }
-    long now = Time.now();
-    if (info.getRenewDate() < now) {
-      throw new InvalidToken("token " + formatTokenId(identifier) + " is " +
-          "expired, current time: " + Time.formatTime(now) +
-          " expected renewal time: " + Time.formatTime(info.getRenewDate()));
-    }
-    if (!verifySignature(identifier, info.getPassword())) {
-      throw new InvalidToken("Tampered/Invalid token.");
-    }
-    return info;
-  }
-
-  /**
-   * Validates if given hash is valid.
-   *
-   * @param identifier
-   * @param password
-   */
-  public boolean verifySignature(OzoneTokenIdentifier identifier,
-      byte[] password) {
-    try {
-      return getCertClient().verifySignature(identifier.getBytes(), password,
-          getCertClient().getCertificate(identifier.getOmCertSerialId()));
-    } catch (CertificateException e) {
-      return false;
-    }
-  }
-
-  /**
-   * Validates if a S3 identifier is valid or not.
-   * */
-  private byte[] validateS3Token(OzoneTokenIdentifier identifier)
-      throws InvalidToken {
-    LOG.trace("Validating S3Token for identifier:{}", identifier);
-    String awsSecret;
-    try {
-      awsSecret = s3SecretManager.getS3UserSecretString(identifier
-          .getAwsAccessId());
-    } catch (IOException e) {
-      LOG.error("Error while validating S3 identifier:{}",
-          identifier, e);
-      throw new InvalidToken("No S3 secret found for S3 identifier:"
-          + identifier);
-    }
-
-    if (awsSecret == null) {
-      throw new InvalidToken("No S3 secret found for S3 identifier:"
-          + identifier);
-    }
-
-    if (AWSV4AuthValidator.validateRequest(identifier.getStrToSign(),
-        identifier.getSignature(), awsSecret)) {
-      return identifier.getSignature().getBytes(UTF_8);
-    }
-    throw new InvalidToken("Invalid S3 identifier:"
-        + identifier);
-
-  }
-
-  private void loadTokenSecretState(
-      OzoneManagerSecretState<OzoneTokenIdentifier> state) throws IOException {
-    LOG.info("Loading token state into token manager.");
-    for (Map.Entry<OzoneTokenIdentifier, Long> entry :
-        state.getTokenState().entrySet()) {
-      addPersistedDelegationToken(entry.getKey(), entry.getValue());
-    }
-  }
-
-  private void addPersistedDelegationToken(OzoneTokenIdentifier identifier,
-      long renewDate) throws IOException {
-    if (isRunning()) {
-      // a safety check
-      throw new IOException(
-          "Can't add persisted delegation token to a running SecretManager.");
-    }
-
-    byte[] password = createPassword(identifier.getBytes(),
-        getCertClient().getPrivateKey());
-    if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
-      setDelegationTokenSeqNum(identifier.getSequenceNumber());
-    }
-    if (currentTokens.get(identifier) == null) {
-      currentTokens.put(identifier, new TokenInfo(renewDate,
-          password, identifier.getTrackingId()));
-    } else {
-      throw new IOException("Same delegation token being added twice: "
-          + formatTokenId(identifier));
-    }
-  }
-
-  /**
-   * Should be called before this object is used.
-   */
-  @Override
-  public synchronized void start(CertificateClient certClient)
-      throws IOException {
-    super.start(certClient);
-    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
-    tokenRemoverThread.start();
-  }
-
-  public void stopThreads() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Stopping expired delegation token remover thread");
-    }
-    setIsRunning(false);
-
-    if (tokenRemoverThread != null) {
-      synchronized (noInterruptsLock) {
-        tokenRemoverThread.interrupt();
-      }
-      try {
-        tokenRemoverThread.join();
-      } catch (InterruptedException e) {
-        throw new RuntimeException(
-            "Unable to join on token removal thread", e);
-      }
-    }
-  }
-
-  /**
-   * Stops the OzoneDelegationTokenSecretManager.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void stop() throws IOException {
-    super.stop();
-    stopThreads();
-    if (this.store != null) {
-      this.store.close();
-    }
-  }
-
-  /**
-   * Remove expired delegation tokens from cache and persisted store.
-   */
-  private void removeExpiredToken() {
-    long now = Time.now();
-    synchronized (this) {
-      Iterator<Map.Entry<OzoneTokenIdentifier,
-          TokenInfo>> i = currentTokens.entrySet().iterator();
-      while (i.hasNext()) {
-        Map.Entry<OzoneTokenIdentifier,
-            TokenInfo> entry = i.next();
-        long renewDate = entry.getValue().getRenewDate();
-        if (renewDate < now) {
-          i.remove();
-          try {
-            store.removeToken(entry.getKey());
-          } catch (IOException e) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Failed to remove expired token {}", entry.getValue());
-            }
-          }
-        }
-      }
-    }
-  }
-
-  private class ExpiredTokenRemover extends Thread {
-
-    private long lastTokenCacheCleanup;
-
-    @Override
-    public void run() {
-      LOG.info("Starting expired delegation token remover thread, "
-          + "tokenRemoverScanInterval=" + getTokenRemoverScanInterval()
-          / (60 * 1000) + " min(s)");
-      try {
-        while (isRunning()) {
-          long now = Time.now();
-          if (lastTokenCacheCleanup + getTokenRemoverScanInterval()
-              < now) {
-            removeExpiredToken();
-            lastTokenCacheCleanup = now;
-          }
-          try {
-            Thread.sleep(Math.min(5000,
-                getTokenRemoverScanInterval())); // 5 seconds
-          } catch (InterruptedException ie) {
-            LOG.error("ExpiredTokenRemover received " + ie);
-          }
-        }
-      } catch (Throwable t) {
-        LOG.error("ExpiredTokenRemover thread received unexpected exception",
-            t);
-        Runtime.getRuntime().exit(-1);
-      }
-    }
-  }
-
-  public long getTokenRemoverScanInterval() {
-    return tokenRemoverScanInterval;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
deleted file mode 100644
index 68afaaf..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import java.util.Collection;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A delegation token selector that is specialized for Ozone.
- */
-@InterfaceAudience.Private
-public class OzoneDelegationTokenSelector
-    extends AbstractDelegationTokenSelector<OzoneTokenIdentifier> {
-
-  public OzoneDelegationTokenSelector() {
-    super(OzoneTokenIdentifier.KIND_NAME);
-  }
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneDelegationTokenSelector.class);
-
-  @Override
-  public Token<OzoneTokenIdentifier> selectToken(Text service,
-      Collection<Token<? extends TokenIdentifier>> tokens) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Getting token for service {}", service);
-    }
-    Token token = getSelectedTokens(service, tokens);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got tokens: {} for service {}", token, service);
-    }
-    return token;
-  }
-
-  private Token<OzoneTokenIdentifier> getSelectedTokens(Text service,
-      Collection<Token<? extends TokenIdentifier>> tokens) {
-    if (service == null) {
-      return null;
-    }
-    for (Token<? extends TokenIdentifier> token : tokens) {
-      if (OzoneTokenIdentifier.KIND_NAME.equals(token.getKind())
-          && token.getService().toString().contains(service.toString())) {
-        return (Token<OzoneTokenIdentifier>) token;
-      }
-    }
-    return null;
-  }
-
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java
deleted file mode 100644
index 39260fe..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
-import java.io.ByteArrayInputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SecretKeyProto;
-
-/**
- * Wrapper class for Ozone/Hdds secret keys. Used in delegation tokens and block
- * tokens.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class OzoneSecretKey implements Writable {
-
-  private int keyId;
-  private long expiryDate;
-  private PrivateKey privateKey;
-  private PublicKey publicKey;
-  private SecurityConfig securityConfig;
-
-  public OzoneSecretKey(int keyId, long expiryDate, KeyPair keyPair) {
-    Preconditions.checkNotNull(keyId);
-    this.keyId = keyId;
-    this.expiryDate = expiryDate;
-    this.privateKey = keyPair.getPrivate();
-    this.publicKey = keyPair.getPublic();
-  }
-
-  /*
-   * Create new instance using default signature algorithm and provider.
-   * */
-  public OzoneSecretKey(int keyId, long expiryDate, byte[] pvtKey,
-      byte[] publicKey) {
-    Preconditions.checkNotNull(pvtKey);
-    Preconditions.checkNotNull(publicKey);
-
-    this.securityConfig = new SecurityConfig(new OzoneConfiguration());
-    this.keyId = keyId;
-    this.expiryDate = expiryDate;
-    this.privateKey = SecurityUtil.getPrivateKey(pvtKey, securityConfig);
-    this.publicKey = SecurityUtil.getPublicKey(publicKey, securityConfig);
-  }
-
-  public int getKeyId() {
-    return keyId;
-  }
-
-  public long getExpiryDate() {
-    return expiryDate;
-  }
-
-  public PrivateKey getPrivateKey() {
-    return privateKey;
-  }
-
-  public PublicKey getPublicKey() {
-    return publicKey;
-  }
-
-  public byte[] getEncodedPrivateKey() {
-    return privateKey.getEncoded();
-  }
-
-  public byte[] getEncodedPubliceKey() {
-    return publicKey.getEncoded();
-  }
-
-  public void setExpiryDate(long expiryDate) {
-    this.expiryDate = expiryDate;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    SecretKeyProto token = SecretKeyProto.newBuilder()
-        .setKeyId(getKeyId())
-        .setExpiryDate(getExpiryDate())
-        .setPrivateKeyBytes(ByteString.copyFrom(getEncodedPrivateKey()))
-        .setPublicKeyBytes(ByteString.copyFrom(getEncodedPubliceKey()))
-        .build();
-    out.write(token.toByteArray());
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    SecretKeyProto secretKey = SecretKeyProto.parseFrom((DataInputStream) in);
-    expiryDate = secretKey.getExpiryDate();
-    keyId = secretKey.getKeyId();
-    privateKey = SecurityUtil.getPrivateKey(secretKey.getPrivateKeyBytes()
-        .toByteArray(), securityConfig);
-    publicKey = SecurityUtil.getPublicKey(secretKey.getPublicKeyBytes()
-        .toByteArray(), securityConfig);
-  }
-
-  @Override
-  public int hashCode() {
-    HashCodeBuilder hashCodeBuilder = new HashCodeBuilder(537, 963);
-    hashCodeBuilder.append(getExpiryDate())
-        .append(getKeyId())
-        .append(getEncodedPrivateKey())
-        .append(getEncodedPubliceKey());
-
-    return hashCodeBuilder.build();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    }
-
-    if (obj instanceof OzoneSecretKey) {
-      OzoneSecretKey that = (OzoneSecretKey) obj;
-      return new EqualsBuilder()
-          .append(this.keyId, that.keyId)
-          .append(this.expiryDate, that.expiryDate)
-          .append(this.privateKey, that.privateKey)
-          .append(this.publicKey, that.publicKey)
-          .build();
-    }
-    return false;
-  }
-
-  /**
-   * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}.
-   */
-  static OzoneSecretKey readProtoBuf(DataInput in) throws IOException {
-    Preconditions.checkNotNull(in);
-    SecretKeyProto key = SecretKeyProto.parseFrom((DataInputStream) in);
-    return new OzoneSecretKey(key.getKeyId(), key.getExpiryDate(),
-        key.getPrivateKeyBytes().toByteArray(),
-        key.getPublicKeyBytes().toByteArray());
-  }
-
-  /**
-   * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}.
-   */
-  static OzoneSecretKey readProtoBuf(byte[] identifier) throws IOException {
-    Preconditions.checkNotNull(identifier);
-    DataInputStream in = new DataInputStream(new ByteArrayInputStream(
-        identifier));
-    return readProtoBuf(in);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
deleted file mode 100644
index 06fc071..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.slf4j.Logger;
-
-import java.io.IOException;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.PrivateKey;
-import java.security.Signature;
-import java.security.SignatureException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * SecretManager for Ozone Master. Responsible for signing identifiers with
- * private key,
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public abstract class OzoneSecretManager<T extends TokenIdentifier>
-    extends SecretManager<T> {
-
-  private final Logger logger;
-  /**
-   * The name of the Private/Public Key based hashing algorithm.
-   */
-  private final SecurityConfig securityConfig;
-  private final long tokenMaxLifetime;
-  private final long tokenRenewInterval;
-  private final Text service;
-  private CertificateClient certClient;
-  private volatile boolean running;
-  private OzoneSecretKey currentKey;
-  private AtomicInteger currentKeyId;
-  private AtomicInteger tokenSequenceNumber;
-
-  /**
-   * Create a secret manager.
-   *
-   * @param secureConf configuration.
-   * @param tokenMaxLifetime the maximum lifetime of the delegation tokens in
-   * milliseconds
-   * @param tokenRenewInterval how often the tokens must be renewed in
-   * milliseconds
-   * @param service name of service
-   * @param logger logger for the secret manager
-   */
-  public OzoneSecretManager(SecurityConfig secureConf, long tokenMaxLifetime,
-      long tokenRenewInterval, Text service, Logger logger) {
-    this.securityConfig = secureConf;
-    this.tokenMaxLifetime = tokenMaxLifetime;
-    this.tokenRenewInterval = tokenRenewInterval;
-    currentKeyId = new AtomicInteger();
-    tokenSequenceNumber = new AtomicInteger();
-    this.service = service;
-    this.logger = logger;
-  }
-
-
-  /**
-   * Compute HMAC of the identifier using the private key and return the output
-   * as password.
-   *
-   * @param identifier
-   * @param privateKey
-   * @return byte[] signed byte array
-   */
-  public byte[] createPassword(byte[] identifier, PrivateKey privateKey)
-      throws OzoneSecurityException {
-    try {
-      Signature rsaSignature = Signature.getInstance(
-          getDefaultSignatureAlgorithm());
-      rsaSignature.initSign(privateKey);
-      rsaSignature.update(identifier);
-      return rsaSignature.sign();
-    } catch (InvalidKeyException | NoSuchAlgorithmException |
-        SignatureException ex) {
-      throw new OzoneSecurityException("Error while creating HMAC hash for " +
-          "token.", ex, OzoneSecurityException.ResultCodes
-          .SECRET_MANAGER_HMAC_ERROR);
-    }
-  }
-
-  @Override
-  public byte[] createPassword(T identifier) {
-    if (logger.isDebugEnabled()) {
-      logger.debug("Creating password for identifier: {}, currentKey: {}",
-          formatTokenId(identifier), currentKey.getKeyId());
-    }
-    byte[] password = null;
-    try {
-      password = createPassword(identifier.getBytes(),
-          currentKey.getPrivateKey());
-    } catch (IOException ioe) {
-      logger.error("Could not store token {}!!", formatTokenId(identifier),
-          ioe);
-    }
-    return password;
-  }
-
-  /**
-   * Renew a delegation token.
-   *
-   * @param token the token to renew
-   * @param renewer the full principal name of the user doing the renewal
-   * @return the new expiration time
-   * @throws InvalidToken           if the token is invalid
-   * @throws AccessControlException if the user can't renew token
-   */
-  public abstract long renewToken(Token<T> token, String renewer)
-      throws IOException;
-  /**
-   * Cancel a token by removing it from store and cache.
-   *
-   * @return Identifier of the canceled token
-   * @throws InvalidToken           for invalid token
-   * @throws AccessControlException if the user isn't allowed to cancel
-   */
-  public abstract T cancelToken(Token<T> token, String canceller)
-      throws IOException;
-
-  public int incrementCurrentKeyId() {
-    return currentKeyId.incrementAndGet();
-  }
-
-  public int getDelegationTokenSeqNum() {
-    return tokenSequenceNumber.get();
-  }
-
-  public void setDelegationTokenSeqNum(int seqNum) {
-    tokenSequenceNumber.set(seqNum);
-  }
-
-  public int incrementDelegationTokenSeqNum() {
-    return tokenSequenceNumber.incrementAndGet();
-  }
-
-  /**
-   * Update the current master key. This is called once by start method before
-   * tokenRemoverThread is created,
-   */
-  private OzoneSecretKey updateCurrentKey(KeyPair keyPair) throws IOException {
-    logger.info("Updating the current master key for generating tokens");
-
-    // TODO: fix me based on the certificate expire time to set the key
-    // expire time.
-    int newCurrentId = incrementCurrentKeyId();
-    OzoneSecretKey newKey = new OzoneSecretKey(newCurrentId, -1,
-        keyPair);
-    currentKey = newKey;
-    return currentKey;
-  }
-
-  public String formatTokenId(T id) {
-    return "(" + id + ")";
-  }
-
-  /**
-   * Should be called before this object is used.
-   *
-   * @param client
-   * @throws IOException
-   */
-  public synchronized void start(CertificateClient client)
-      throws IOException {
-    Preconditions.checkState(!isRunning());
-    setCertClient(client);
-    updateCurrentKey(new KeyPair(certClient.getPublicKey(),
-        certClient.getPrivateKey()));
-    setIsRunning(true);
-  }
-
-  /**
-   * Stops the OzoneDelegationTokenSecretManager.
-   *
-   * @throws IOException
-   */
-  public synchronized void stop() throws IOException {
-    setIsRunning(false);
-  }
-
-  public String getDefaultSignatureAlgorithm() {
-    return securityConfig.getSignatureAlgo();
-  }
-
-  public long getTokenMaxLifetime() {
-    return tokenMaxLifetime;
-  }
-
-  public long getTokenRenewInterval() {
-    return tokenRenewInterval;
-  }
-
-  public Text getService() {
-    return service;
-  }
-
-  /**
-   * Is Secret Manager running.
-   *
-   * @return true if secret mgr is running
-   */
-  public synchronized boolean isRunning() {
-    return running;
-  }
-
-  public void setIsRunning(boolean val) {
-    running = val;
-  }
-
-  public OzoneSecretKey getCurrentKey() {
-    return currentKey;
-  }
-
-  public AtomicInteger getCurrentKeyId() {
-    return currentKeyId;
-  }
-
-  public AtomicInteger getTokenSequenceNumber() {
-    return tokenSequenceNumber;
-  }
-
-  public CertificateClient getCertClient() {
-    return certClient;
-  }
-
-  public void setCertClient(CertificateClient client) {
-    this.certClient = client;
-  }
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java
deleted file mode 100644
index 23c28d8..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * SecretStore for Ozone Master.
- */
-public class OzoneSecretStore implements Closeable {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneSecretStore.class);
-  private OMMetadataManager omMetadataManager;
-  @Override
-  public void close() throws IOException {
-    if (omMetadataManager != null) {
-      try {
-        omMetadataManager.getDelegationTokenTable().close();
-      } catch (Exception e) {
-        throw new IOException("Error while closing OzoneSecretStore.", e);
-      }
-    }
-  }
-
-
-  /**
-   * Support class to maintain state of OzoneSecretStore.
-   */
-  public static class OzoneManagerSecretState<T> {
-    private Map<T, Long> tokenState = new HashMap<>();
-    public Map<T, Long> getTokenState() {
-      return tokenState;
-    }
-  }
-
-  public OzoneSecretStore(OzoneConfiguration conf,
-      OMMetadataManager omMetadataManager) {
-    this.omMetadataManager = omMetadataManager;
-  }
-
-  public OzoneManagerSecretState loadState() throws IOException {
-    OzoneManagerSecretState<Integer> state = new OzoneManagerSecretState();
-    int numTokens = loadTokens(state);
-    LOG.info("Loaded " + numTokens + " tokens");
-    return state;
-  }
-
-  public void storeToken(OzoneTokenIdentifier tokenId, long renewDate)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing token {}", tokenId.getSequenceNumber());
-    }
-
-    try {
-      omMetadataManager.getDelegationTokenTable().put(tokenId, renewDate);
-    } catch (IOException e) {
-      LOG.error("Unable to store token " + tokenId.toString(), e);
-      throw e;
-    }
-  }
-
-  public void updateToken(OzoneTokenIdentifier tokenId, long renewDate)
-      throws IOException {
-    storeToken(tokenId, renewDate);
-  }
-
-  public void removeToken(OzoneTokenIdentifier tokenId) throws IOException {
-    try {
-      omMetadataManager.getDelegationTokenTable().delete(tokenId);
-    } catch (IOException e) {
-      LOG.error("Unable to remove token {}", tokenId.toString(), e);
-      throw e;
-    }
-  }
-
-  public int loadTokens(OzoneManagerSecretState state) throws IOException {
-    int loadedToken = 0;
-    try (TableIterator<OzoneTokenIdentifier, ? extends
-        KeyValue<OzoneTokenIdentifier, Long>> iterator =
-             omMetadataManager.getDelegationTokenTable().iterator()){
-      iterator.seekToFirst();
-      while(iterator.hasNext()) {
-        KeyValue<OzoneTokenIdentifier, Long> kv = iterator.next();
-        state.tokenState.put(kv.getKey(), kv.getValue());
-        loadedToken++;
-      }
-    }
-    return loadedToken;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java
deleted file mode 100644
index d8a014b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import java.io.IOException;
-
-/**
- * Security exceptions thrown at Ozone layer.
- */
-public class OzoneSecurityException extends IOException {
-  private final OzoneSecurityException.ResultCodes result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public OzoneSecurityException(OzoneSecurityException.ResultCodes result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   */
-  public OzoneSecurityException(String message,
-      OzoneSecurityException.ResultCodes result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public OzoneSecurityException(String message, Throwable cause,
-                     OzoneSecurityException.ResultCodes result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public OzoneSecurityException(Throwable cause,
-      OzoneSecurityException.ResultCodes result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns resultCode.
-   * @return ResultCode
-   */
-  public OzoneSecurityException.ResultCodes getResult() {
-    return result;
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ResultCodes {
-    OM_PUBLIC_PRIVATE_KEY_FILE_NOT_EXIST,
-    S3_SECRET_NOT_FOUND,
-    SECRET_MANAGER_HMAC_ERROR
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java
deleted file mode 100644
index f5e114a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN;
-
-/**
- * The token identifier for Ozone Master.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class OzoneTokenIdentifier extends
-    AbstractDelegationTokenIdentifier {
-
-  public final static Text KIND_NAME = new Text("OzoneToken");
-  private String omCertSerialId;
-  private Type tokenType;
-  private String awsAccessId;
-  private String signature;
-  private String strToSign;
-
-  /**
-   * Create an empty delegation token identifier.
-   */
-  public OzoneTokenIdentifier() {
-    super();
-    this.tokenType = Type.DELEGATION_TOKEN;
-  }
-
-  /**
-   * Create a new ozone master delegation token identifier.
-   *
-   * @param owner the effective username of the token owner
-   * @param renewer the username of the renewer
-   * @param realUser the real username of the token owner
-   */
-  public OzoneTokenIdentifier(Text owner, Text renewer, Text realUser) {
-    super(owner, renewer, realUser);
-    this.tokenType = Type.DELEGATION_TOKEN;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public Text getKind() {
-    return KIND_NAME;
-  }
-
-  /**
-   * Overrides default implementation to write using Protobuf.
-   *
-   * @param out output stream
-   * @throws IOException
-   */
-  @Override
-  public void write(DataOutput out) throws IOException {
-    OMTokenProto.Builder builder = OMTokenProto.newBuilder()
-        .setMaxDate(getMaxDate())
-        .setType(getTokenType())
-        .setOwner(getOwner().toString())
-        .setRealUser(getRealUser().toString())
-        .setRenewer(getRenewer().toString())
-        .setIssueDate(getIssueDate())
-        .setMaxDate(getMaxDate())
-        .setSequenceNumber(getSequenceNumber())
-        .setMasterKeyId(getMasterKeyId());
-
-    // Set s3 specific fields.
-    if (getTokenType().equals(S3TOKEN)) {
-      builder.setAccessKeyId(getAwsAccessId())
-          .setSignature(getSignature())
-          .setStrToSign(getStrToSign());
-    } else {
-      builder.setOmCertSerialId(getOmCertSerialId());
-    }
-    OMTokenProto token = builder.build();
-    out.write(token.toByteArray());
-  }
-
-  /**
-   * Overrides default implementation to read using Protobuf.
-   *
-   * @param in input stream
-   * @throws IOException
-   */
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    OMTokenProto token = OMTokenProto.parseFrom((DataInputStream) in);
-    setTokenType(token.getType());
-    setMaxDate(token.getMaxDate());
-    setOwner(new Text(token.getOwner()));
-    setRealUser(new Text(token.getRealUser()));
-    setRenewer(new Text(token.getRenewer()));
-    setIssueDate(token.getIssueDate());
-    setMaxDate(token.getMaxDate());
-    setSequenceNumber(token.getSequenceNumber());
-    setMasterKeyId(token.getMasterKeyId());
-    setOmCertSerialId(token.getOmCertSerialId());
-
-    // Set s3 specific fields.
-    if (getTokenType().equals(S3TOKEN)) {
-      setAwsAccessId(token.getAccessKeyId());
-      setSignature(token.getSignature());
-      setStrToSign(token.getStrToSign());
-    }
-  }
-
-  /**
-   * Reads protobuf encoded input stream to construct {@link
-   * OzoneTokenIdentifier}.
-   */
-  public static OzoneTokenIdentifier readProtoBuf(DataInput in)
-      throws IOException {
-    OMTokenProto token = OMTokenProto.parseFrom((DataInputStream) in);
-    OzoneTokenIdentifier identifier = new OzoneTokenIdentifier();
-    identifier.setTokenType(token.getType());
-    identifier.setMaxDate(token.getMaxDate());
-
-    // Set type specific fields.
-    if (token.getType().equals(S3TOKEN)) {
-      identifier.setSignature(token.getSignature());
-      identifier.setStrToSign(token.getStrToSign());
-      identifier.setAwsAccessId(token.getAccessKeyId());
-    } else {
-      identifier.setRenewer(new Text(token.getRenewer()));
-      identifier.setOwner(new Text(token.getOwner()));
-      identifier.setRealUser(new Text(token.getRealUser()));
-      identifier.setIssueDate(token.getIssueDate());
-      identifier.setSequenceNumber(token.getSequenceNumber());
-      identifier.setMasterKeyId(token.getMasterKeyId());
-    }
-    identifier.setOmCertSerialId(token.getOmCertSerialId());
-    return identifier;
-  }
-
-  /**
-   * Reads protobuf encoded input stream to construct {@link
-   * OzoneTokenIdentifier}.
-   */
-  public static OzoneTokenIdentifier readProtoBuf(byte[] identifier)
-      throws IOException {
-    DataInputStream in = new DataInputStream(new ByteArrayInputStream(
-        identifier));
-    return readProtoBuf(in);
-  }
-
-  /**
-   * Creates new instance.
-   */
-  public static OzoneTokenIdentifier newInstance() {
-    return new OzoneTokenIdentifier();
-  }
-
-  /**
-   * Creates new instance.
-   */
-  public static OzoneTokenIdentifier newInstance(Text owner, Text renewer,
-      Text realUser) {
-    return new OzoneTokenIdentifier(owner, renewer, realUser);
-  }
-
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!(obj instanceof OzoneTokenIdentifier)) {
-      return false;
-    }
-    OzoneTokenIdentifier that = (OzoneTokenIdentifier) obj;
-    return new EqualsBuilder()
-        .append(getOmCertSerialId(), that.getOmCertSerialId())
-        .append(getMaxDate(), that.getMaxDate())
-        .append(getIssueDate(), that.getIssueDate())
-        .append(getMasterKeyId(), that.getMasterKeyId())
-        .append(getOwner(), that.getOwner())
-        .append(getRealUser(), that.getRealUser())
-        .append(getRenewer(), that.getRenewer())
-        .append(getKind(), that.getKind())
-        .append(getSequenceNumber(), that.getSequenceNumber())
-        .build();
-  }
-
-  /**
-   * Class to encapsulate a token's renew date and password.
-   */
-  @InterfaceStability.Evolving
-  public static class TokenInfo {
-
-    private long renewDate;
-    private byte[] password;
-    private String trackingId;
-
-    public TokenInfo(long renewDate, byte[] password) {
-      this(renewDate, password, null);
-    }
-
-    public TokenInfo(long renewDate, byte[] password,
-        String trackingId) {
-      this.renewDate = renewDate;
-      this.password = Arrays.copyOf(password, password.length);
-      this.trackingId = trackingId;
-    }
-
-    /**
-     * returns renew date.
-     */
-    public long getRenewDate() {
-      return renewDate;
-    }
-
-    /**
-     * returns password.
-     */
-    byte[] getPassword() {
-      return password;
-    }
-
-    /**
-     * returns tracking id.
-     */
-    public String getTrackingId() {
-      return trackingId;
-    }
-  }
-
-  public String getOmCertSerialId() {
-    return omCertSerialId;
-  }
-
-  public void setOmCertSerialId(String omCertSerialId) {
-    this.omCertSerialId = omCertSerialId;
-  }
-
-  public Type getTokenType() {
-    return tokenType;
-  }
-
-  public void setTokenType(Type tokenType) {
-    this.tokenType = tokenType;
-  }
-
-  public String getAwsAccessId() {
-    return awsAccessId;
-  }
-
-  public void setAwsAccessId(String awsAccessId) {
-    this.awsAccessId = awsAccessId;
-  }
-
-  public String getSignature() {
-    return signature;
-  }
-
-  public void setSignature(String signature) {
-    this.signature = signature;
-  }
-
-  public String getStrToSign() {
-    return strToSign;
-  }
-
-  public void setStrToSign(String strToSign) {
-    this.strToSign = strToSign;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append(getKind())
-        .append(" owner=").append(getOwner())
-        .append(", renewer=").append(getRenewer())
-        .append(", realUser=").append(getRealUser())
-        .append(", issueDate=").append(getIssueDate())
-        .append(", maxDate=").append(getMaxDate())
-        .append(", sequenceNumber=").append(getSequenceNumber())
-        .append(", masterKeyId=").append(getMasterKeyId())
-        .append(", strToSign=").append(getStrToSign())
-        .append(", signature=").append(getSignature())
-        .append(", awsAccessKeyId=").append(getAwsAccessId());
-    return buffer.toString();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java
deleted file mode 100644
index d8a2660..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-
-import java.util.BitSet;
-
-/**
- * Public API for Ozone ACLs. Security providers providing support for Ozone
- * ACLs should implement this.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"})
-@InterfaceStability.Evolving
-public interface IAccessAuthorizer {
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozoneObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @throws org.apache.hadoop.ozone.om.exceptions.OMException
-   * @return true if user has access else false.
-   */
-  boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
-      throws OMException;
-
-  /**
-   * ACL rights.
-   */
-  enum ACLType {
-    READ,
-    WRITE,
-    CREATE,
-    LIST,
-    DELETE,
-    READ_ACL,
-    WRITE_ACL,
-    ALL,
-    NONE;
-    private static int length = ACLType.values().length;
-    private static ACLType[] vals = ACLType.values();
-
-    public static int getNoOfAcls() {
-      return length;
-    }
-
-    public static ACLType getAclTypeFromOrdinal(int ordinal) {
-      if (ordinal > length - 1 && ordinal > -1) {
-        throw new IllegalArgumentException("Ordinal greater than array lentgh" +
-            ". ordinal:" + ordinal);
-      }
-      return vals[ordinal];
-    }
-
-    /**
-     * Returns the ACL rights based on passed in String.
-     *
-     * @param type ACL right string
-     * @return ACLType
-     */
-    public static ACLType getACLRight(String type) {
-      if (type == null || type.isEmpty()) {
-        throw new IllegalArgumentException("ACL right cannot be empty");
-      }
-
-      switch (type) {
-      case OzoneConsts.OZONE_ACL_READ:
-        return ACLType.READ;
-      case OzoneConsts.OZONE_ACL_WRITE:
-        return ACLType.WRITE;
-      case OzoneConsts.OZONE_ACL_CREATE:
-        return ACLType.CREATE;
-      case OzoneConsts.OZONE_ACL_DELETE:
-        return ACLType.DELETE;
-      case OzoneConsts.OZONE_ACL_LIST:
-        return ACLType.LIST;
-      case OzoneConsts.OZONE_ACL_READ_ACL:
-        return ACLType.READ_ACL;
-      case OzoneConsts.OZONE_ACL_WRITE_ACL:
-        return ACLType.WRITE_ACL;
-      case OzoneConsts.OZONE_ACL_ALL:
-        return ACLType.ALL;
-      case OzoneConsts.OZONE_ACL_NONE:
-        return ACLType.NONE;
-      default:
-        throw new IllegalArgumentException("[" + type + "] ACL right is not " +
-            "recognized");
-      }
-
-    }
-
-    /**
-     * Returns String representation of ACL rights.
-     *
-     * @param acls ACLType
-     * @return String representation of acl
-     */
-    public static String getACLString(BitSet acls) {
-      StringBuffer sb = new StringBuffer();
-      acls.stream().forEach(acl -> {
-        sb.append(getAclString(ACLType.values()[acl]));
-      });
-      return sb.toString();
-    }
-
-    public static String getAclString(ACLType acl) {
-      switch (acl) {
-      case READ:
-        return OzoneConsts.OZONE_ACL_READ;
-      case WRITE:
-        return OzoneConsts.OZONE_ACL_WRITE;
-      case CREATE:
-        return OzoneConsts.OZONE_ACL_CREATE;
-      case DELETE:
-        return OzoneConsts.OZONE_ACL_DELETE;
-      case LIST:
-        return OzoneConsts.OZONE_ACL_LIST;
-      case READ_ACL:
-        return OzoneConsts.OZONE_ACL_READ_ACL;
-      case WRITE_ACL:
-        return OzoneConsts.OZONE_ACL_WRITE_ACL;
-      case ALL:
-        return OzoneConsts.OZONE_ACL_ALL;
-      case NONE:
-        return OzoneConsts.OZONE_ACL_NONE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-    }
-
-  }
-
-  /**
-   * Type of acl identity.
-   */
-  enum ACLIdentityType {
-    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
-    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
-    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE),
-    ANONYMOUS(OzoneConsts.OZONE_ACL_ANONYMOUS_TYPE),
-    CLIENT_IP(OzoneConsts.OZONE_ACL_IP_TYPE);
-
-    // TODO: Add support for acl checks based on CLIENT_IP.
-
-    @Override
-    public String toString() {
-      return value;
-    }
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    /**
-     * Init OzoneACLtypes enum.
-     *
-     * @param val String type for this enum.
-     */
-    ACLIdentityType(String val) {
-      value = val;
-    }
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java
deleted file mode 100644
index b300fcd..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-/**
- * Marker interface for objects supported by Ozone.
- * */
-public interface IOzoneObj {
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java
deleted file mode 100644
index ae37bc8..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-
-/**
- * Default implementation for {@link IAccessAuthorizer}.
- * */
-public class OzoneAccessAuthorizer implements IAccessAuthorizer {
-
-  @Override
-  public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
-      throws OMException {
-    return true;
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java
deleted file mode 100644
index b51af56..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigTag;
-import org.apache.hadoop.hdds.conf.ConfigType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-
-/**
- * Ozone ACL config pojo.
- * */
-@ConfigGroup(prefix = "ozone.om")
-public class OzoneAclConfig {
-  // OM Default user/group permissions
-  private ACLType userDefaultRights = ACLType.ALL;
-  private ACLType groupDefaultRights = ACLType.ALL;
-
-  @Config(key = "user.rights",
-      defaultValue = "ALL",
-      type = ConfigType.STRING,
-      tags = {ConfigTag.OM, ConfigTag.SECURITY},
-      description = "Default user permissions set for an object in " +
-          "OzoneManager."
-  )
-  public void setUserDefaultRights(String userRights) {
-    if(userRights == null) {
-      userRights = "ALL";
-    }
-    this.userDefaultRights = ACLType.valueOf(userRights);
-  }
-
-  @Config(key = "group.rights",
-      defaultValue = "ALL",
-      type = ConfigType.STRING,
-      tags = {ConfigTag.OM, ConfigTag.SECURITY},
-      description = "Default group permissions set for an object in " +
-          "OzoneManager."
-  )
-  public void setGroupDefaultRights(String groupRights) {
-    if(groupRights == null) {
-      groupRights = "ALL";
-    }
-    this.groupDefaultRights = ACLType.valueOf(groupRights);
-  }
-
-  public ACLType getUserDefaultRights() {
-    return userDefaultRights;
-  }
-
-  public ACLType getGroupDefaultRights() {
-    return groupDefaultRights;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
deleted file mode 100644
index 4a95e55..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.*;
-
-/**
- * Class representing an unique ozone object.
- * */
-public abstract class OzoneObj implements IOzoneObj {
-
-  private final ResourceType resType;
-
-  private final StoreType storeType;
-
-  OzoneObj(ResourceType resType, StoreType storeType) {
-
-    Preconditions.checkNotNull(resType);
-    Preconditions.checkNotNull(storeType);
-    this.resType = resType;
-    this.storeType = storeType;
-  }
-
-  public static OzoneManagerProtocolProtos.OzoneObj toProtobuf(OzoneObj obj) {
-    return OzoneManagerProtocolProtos.OzoneObj.newBuilder()
-        .setResType(ObjectType.valueOf(obj.getResourceType().name()))
-        .setStoreType(valueOf(obj.getStoreType().name()))
-        .setPath(obj.getPath()).build();
-  }
-
-  public ResourceType getResourceType() {
-    return resType;
-  }
-
-  @Override
-  public String toString() {
-    return "OzoneObj{" +
-        "resType=" + resType +
-        ", storeType=" + storeType +
-        ", path='" + getPath() + '\'' +
-        '}';
-  }
-
-  public StoreType getStoreType() {
-    return storeType;
-  }
-
-  public abstract String getVolumeName();
-
-  public abstract String getBucketName();
-
-  public abstract String getKeyName();
-
-  /**
-   * Get PrefixName.
-   * A prefix name is like a key name under the bucket but
-   * are mainly used for ACL for now and persisted into a separate prefix table.
-   *
-   * @return prefix name.
-   */
-  public abstract String getPrefixName();
-
-  /**
-   * Get full path of a key or prefix including volume and bucket.
-   * @return full path of a key or prefix.
-   */
-  public abstract String getPath();
-
-  /**
-   * Ozone Objects supported for ACL.
-   */
-  public enum ResourceType {
-    VOLUME(OzoneConsts.VOLUME),
-    BUCKET(OzoneConsts.BUCKET),
-    KEY(OzoneConsts.KEY),
-    PREFIX(OzoneConsts.PREFIX);
-
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    @Override
-    public String toString() {
-      return value;
-    }
-
-    ResourceType(String resType) {
-      value = resType;
-    }
-  }
-
-  /**
-   * Ozone Objects supported for ACL.
-   */
-  public enum StoreType {
-    OZONE(OzoneConsts.OZONE),
-    S3(OzoneConsts.S3);
-
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    @Override
-    public String toString() {
-      return value;
-    }
-
-    StoreType(String objType) {
-      value = objType;
-    }
-  }
-
-  public Map<String, String> toAuditMap() {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.RESOURCE_TYPE, this.getResourceType().value);
-    auditMap.put(OzoneConsts.STORAGE_TYPE, this.getStoreType().value);
-    auditMap.put(OzoneConsts.VOLUME, this.getVolumeName());
-    auditMap.put(OzoneConsts.BUCKET, this.getBucketName());
-    auditMap.put(OzoneConsts.KEY, this.getKeyName());
-    return auditMap;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
deleted file mode 100644
index cbae18c..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-
-/**
- * Class representing an ozone object.
- * It can be a volume with non-null volumeName (bucketName=null & name=null)
- * or a bucket with non-null volumeName and bucketName (name=null)
- * or a key with non-null volumeName, bucketName and key name
- * (via getKeyName)
- * or a prefix with non-null volumeName, bucketName and prefix name
- * (via getPrefixName)
- */
-public final class OzoneObjInfo extends OzoneObj {
-
-  private final String volumeName;
-  private final String bucketName;
-  private final String name;
-
-  /**
-   *
-   * @param resType
-   * @param storeType
-   * @param volumeName
-   * @param bucketName
-   * @param name - keyName/PrefixName
-   */
-  private OzoneObjInfo(ResourceType resType, StoreType storeType,
-      String volumeName, String bucketName, String name) {
-    super(resType, storeType);
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.name = name;
-  }
-
-  @Override
-  public String getPath() {
-    switch (getResourceType()) {
-    case VOLUME:
-      return OZONE_URI_DELIMITER + getVolumeName();
-    case BUCKET:
-      return OZONE_URI_DELIMITER + getVolumeName()
-          + OZONE_URI_DELIMITER + getBucketName();
-    case KEY:
-      return OZONE_URI_DELIMITER + getVolumeName()
-          + OZONE_URI_DELIMITER + getBucketName()
-          + OZONE_URI_DELIMITER + getKeyName();
-    case PREFIX:
-      return OZONE_URI_DELIMITER + getVolumeName()
-          + OZONE_URI_DELIMITER + getBucketName()
-          + OZONE_URI_DELIMITER + getPrefixName();
-    default:
-      throw new IllegalArgumentException("Unknown resource " +
-        "type" + getResourceType());
-    }
-  }
-
-  @Override
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  @Override
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  @Override
-  public String getKeyName() {
-    return name;
-  }
-
-  @Override
-  public String getPrefixName() {
-    return name;
-  }
-
-
-  public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj
-      proto) {
-    Builder builder = new Builder()
-        .setResType(ResourceType.valueOf(proto.getResType().name()))
-        .setStoreType(StoreType.valueOf(proto.getStoreType().name()));
-    String[] tokens = StringUtils.split(proto.getPath(),
-        OZONE_URI_DELIMITER, 3);
-    if(tokens == null) {
-      throw new IllegalArgumentException("Unexpected path:" + proto.getPath());
-    }
-    // Set volume name.
-    switch (proto.getResType()) {
-    case VOLUME:
-      builder.setVolumeName(tokens[0]);
-      break;
-    case BUCKET:
-      if (tokens.length < 2) {
-        throw new IllegalArgumentException("Unexpected argument for " +
-            "Ozone bucket. Path:" + proto.getPath());
-      }
-      builder.setVolumeName(tokens[0]);
-      builder.setBucketName(tokens[1]);
-      break;
-    case KEY:
-      if (tokens.length < 3) {
-        throw new IllegalArgumentException("Unexpected argument for " +
-            "Ozone key. Path:" + proto.getPath());
-      }
-      builder.setVolumeName(tokens[0]);
-      builder.setBucketName(tokens[1]);
-      builder.setKeyName(tokens[2]);
-      break;
-    case PREFIX:
-      if (tokens.length < 3) {
-        throw new IllegalArgumentException("Unexpected argument for " +
-            "Ozone Prefix. Path:" + proto.getPath());
-      }
-      builder.setVolumeName(tokens[0]);
-      builder.setBucketName(tokens[1]);
-      builder.setPrefixName(tokens[2]);
-      break;
-    default:
-      throw new IllegalArgumentException("Unexpected type for " +
-          "Ozone key. Type:" + proto.getResType());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Inner builder class.
-   */
-  public static class Builder {
-
-    private OzoneObj.ResourceType resType;
-    private OzoneObj.StoreType storeType;
-    private String volumeName;
-    private String bucketName;
-    private String name;
-
-    public static Builder newBuilder() {
-      return new Builder();
-    }
-
-    public static Builder fromKeyArgs(OmKeyArgs args) {
-      return new Builder()
-          .setVolumeName(args.getVolumeName())
-          .setBucketName(args.getBucketName())
-          .setKeyName(args.getKeyName())
-          .setResType(ResourceType.KEY);
-    }
-
-    public Builder setResType(OzoneObj.ResourceType res) {
-      this.resType = res;
-      return this;
-    }
-
-    public Builder setStoreType(OzoneObj.StoreType store) {
-      this.storeType = store;
-      return this;
-    }
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.name = key;
-      return this;
-    }
-
-    public Builder setPrefixName(String prefix) {
-      this.name = prefix;
-      return this;
-    }
-
-    public OzoneObjInfo build() {
-      return new OzoneObjInfo(resType, storeType, volumeName, bucketName, name);
-    }
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java
deleted file mode 100644
index 3295827..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-
-import java.net.InetAddress;
-
-/**
- * This class encapsulates information required for Ozone ACLs.
- * */
-public class RequestContext {
-  private final String host;
-  private final InetAddress ip;
-  private final UserGroupInformation clientUgi;
-  private final String serviceId;
-  private final ACLIdentityType aclType;
-  private final ACLType aclRights;
-
-  public RequestContext(String host, InetAddress ip,
-      UserGroupInformation clientUgi, String serviceId,
-      ACLIdentityType aclType, ACLType aclRights) {
-    this.host = host;
-    this.ip = ip;
-    this.clientUgi = clientUgi;
-    this.serviceId = serviceId;
-    this.aclType = aclType;
-    this.aclRights = aclRights;
-  }
-
-  /**
-   * Builder class for @{@link RequestContext}.
-   */
-  public static class Builder {
-    private String host;
-    private InetAddress ip;
-    private UserGroupInformation clientUgi;
-    private String serviceId;
-    private IAccessAuthorizer.ACLIdentityType aclType;
-    private IAccessAuthorizer.ACLType aclRights;
-
-    public Builder setHost(String bHost) {
-      this.host = bHost;
-      return this;
-    }
-
-    public Builder setIp(InetAddress cIp) {
-      this.ip = cIp;
-      return this;
-    }
-
-    public Builder setClientUgi(UserGroupInformation cUgi) {
-      this.clientUgi = cUgi;
-      return this;
-    }
-
-    public Builder setServiceId(String sId) {
-      this.serviceId = sId;
-      return this;
-    }
-
-    public Builder setAclType(ACLIdentityType acl) {
-      this.aclType = acl;
-      return this;
-    }
-
-    public Builder setAclRights(ACLType aclRight) {
-      this.aclRights = aclRight;
-      return this;
-    }
-
-    public RequestContext build() {
-      return new RequestContext(host, ip, clientUgi, serviceId, aclType,
-          aclRights);
-    }
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  public String getHost() {
-    return host;
-  }
-
-  public InetAddress getIp() {
-    return ip;
-  }
-
-  public UserGroupInformation getClientUgi() {
-    return clientUgi;
-  }
-
-  public String getServiceId() {
-    return serviceId;
-  }
-
-  public ACLIdentityType getAclType() {
-    return aclType;
-  }
-
-  public ACLType getAclRights() {
-    return aclRights;
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
deleted file mode 100644
index 5c572ef..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-/**
- * Classes related to ozone Ozone ACL.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java
deleted file mode 100644
index 457f891..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-/**
- * Ozone security related classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java
deleted file mode 100644
index 82398b7..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.util;
-
-/**
- * Defines a functional interface having two inputs and returns boolean as
- * output.
- */
-@FunctionalInterface
-public interface BooleanBiFunction<LEFT, RIGHT> {
-  boolean apply(LEFT left, RIGHT right);
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
deleted file mode 100644
index 69c5791..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.util;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.ClassUtil;
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
-import org.apache.hadoop.hdds.utils.VersionInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class returns build information about Hadoop components.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public final class OzoneVersionInfo {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneVersionInfo.class);
-
-  public static final VersionInfo OZONE_VERSION_INFO =
-      new VersionInfo("ozone");
-
-  private OzoneVersionInfo() {}
-
-  public static void main(String[] args) {
-    System.out.println(
-        "                  //////////////                 \n" +
-        "               ////////////////////              \n" +
-        "            ////////     ////////////////        \n" +
-        "           //////      ////////////////          \n" +
-        "          /////      ////////////////  /         \n" +
-        "         /////            ////////   ///         \n" +
-        "         ////           ////////    /////        \n" +
-        "        /////         ////////////////           \n" +
-        "        /////       ////////////////   //        \n" +
-        "         ////     ///////////////   /////        \n" +
-        "         /////  ///////////////     ////         \n" +
-        "          /////       //////      /////          \n" +
-        "           //////   //////       /////           \n" +
-        "             ///////////     ////////            \n" +
-        "               //////  ////////////              \n" +
-        "               ///   //////////                  \n" +
-            "              /    " + OZONE_VERSION_INFO.getVersion() + "("
-            + OZONE_VERSION_INFO.getRelease() + ")\n");
-    System.out.println(
-        "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " +
-            OZONE_VERSION_INFO.getRevision());
-    System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on "
-        + OZONE_VERSION_INFO.getDate());
-    System.out.println(
-        "Compiled with protoc " + OZONE_VERSION_INFO.getProtocVersion());
-    System.out.println(
-        "From source with checksum " + OZONE_VERSION_INFO.getSrcChecksum()
-            + "\n");
-    LOG.debug("This command was run using " +
-        ClassUtil.findContainingJar(OzoneVersionInfo.class));
-    HddsVersionInfo.main(args);
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java
deleted file mode 100644
index 3009c9a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.util;
-
-import java.util.HashMap;
-
-/**
- * Wrapper class for Radix tree node representing Ozone prefix path segment
- * separated by "/".
- */
-public class RadixNode<T> {
-
-  public RadixNode(String name) {
-    this.name = name;
-    this.children = new HashMap<>();
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public boolean hasChildren() {
-    return children.isEmpty();
-  }
-
-  public HashMap<String, RadixNode> getChildren() {
-    return children;
-  }
-
-  public void setValue(T v) {
-    this.value = v;
-  }
-
-  public T getValue() {
-    return value;
-  }
-
-  private HashMap<String, RadixNode> children;
-
-  private String name;
-
-  // TODO: k/v pairs for more metadata as needed
-  private T value;
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java
deleted file mode 100644
index 597f58d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.util;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-
-/**
- * Wrapper class for handling Ozone prefix path lookup of ACL APIs
- * with radix tree.
- */
-public class RadixTree<T> {
-
-  /**
-   * create a empty radix tree with root only.
-   */
-  public RadixTree() {
-    root = new RadixNode<T>(PATH_DELIMITER);
-  }
-
-  /**
-   * If the Radix tree contains root only.
-   * @return true if the radix tree contains root only.
-   */
-  public boolean isEmpty() {
-    return root.hasChildren();
-  }
-
-  /**
-   * Insert prefix tree node without value, value can be ACL or other metadata
-   * of the prefix path.
-   * @param path
-   */
-  public void insert(String path) {
-    insert(path, null);
-  }
-
-  /**
-   * Insert prefix tree node with value, value can be ACL or other metadata
-   * of the prefix path.
-   * @param path
-   * @param val
-   */
-  public void insert(String path, T val) {
-    // all prefix path inserted should end with "/"
-    RadixNode<T> n = root;
-    Path p = Paths.get(path);
-    for (int level = 0; level < p.getNameCount(); level++) {
-      HashMap<String, RadixNode> child = n.getChildren();
-      String component = p.getName(level).toString();
-      if (child.containsKey(component)) {
-        n = child.get(component);
-      } else {
-        RadixNode tmp = new RadixNode(component);
-        child.put(component, tmp);
-        n = tmp;
-      }
-    }
-    if (val != null) {
-      n.setValue(val);
-    }
-  }
-
-  /**
-   * Get the last node in the exact prefix path that matches in the tree.
-   * @param path - prefix path
-   * @return last node in the prefix tree or null if non exact prefix matchl
-   */
-  public RadixNode<T> getLastNodeInPrefixPath(String path) {
-    List<RadixNode<T>> lpp = getLongestPrefixPath(path);
-    Path p = Paths.get(path);
-    if (lpp.size() != p.getNameCount() + 1) {
-      return null;
-    } else {
-      return lpp.get(p.getNameCount());
-    }
-  }
-
-  /**
-   * Remove prefix path.
-   * @param path
-   */
-  public void removePrefixPath(String path) {
-    Path p = Paths.get(path);
-    removePrefixPathInternal(root, p, 0);
-  }
-
-  /**
-   * Recursively remove non-overlapped part of the prefix path from radix tree.
-   * @param current current radix tree node.
-   * @param path prefix path to be removed.
-   * @param level current recursive level.
-   * @return true if current radix node can be removed.
-   *             (not overlapped with other path),
-   *         false otherwise.
-   */
-  private boolean removePrefixPathInternal(RadixNode<T> current,
-      Path path, int level) {
-    // last component is processed
-    if (level == path.getNameCount()) {
-      return current.hasChildren();
-    }
-
-    // not last component, recur for next component
-    String name = path.getName(level).toString();
-    RadixNode<T> node = current.getChildren().get(name);
-    if (node == null)  {
-      return false;
-    }
-
-    if (removePrefixPathInternal(node, path, level+1)) {
-      current.getChildren().remove(name);
-      return current.hasChildren();
-    }
-    return false;
-  }
-
-  /**
-   * Get the longest prefix path.
-   * @param path - prefix path.
-   * @return longest prefix path as list of RadixNode.
-   */
-  public List<RadixNode<T>> getLongestPrefixPath(String path) {
-    RadixNode n = root;
-    Path p = Paths.get(path);
-    int level = 0;
-    List<RadixNode<T>> result = new ArrayList<>();
-    result.add(root);
-    while (level < p.getNameCount()) {
-      HashMap<String, RadixNode> children = n.getChildren();
-      if (children.isEmpty()) {
-        break;
-      }
-      String component = p.getName(level).toString();
-      if (children.containsKey(component)) {
-        n = children.get(component);
-        result.add(n);
-        level++;
-      } else {
-        break;
-      }
-    }
-    return result;
-  }
-
-  @VisibleForTesting
-  /**
-   * Convert radix path to string format for output.
-   * @param path - radix path represented by list of radix nodes.
-   * @return radix path as string separated by "/".
-   * Note: the path will always be normalized with and ending "/".
-   */
-  public static String radixPathToString(List<RadixNode<Integer>> path) {
-    StringBuilder sb = new StringBuilder();
-    for (RadixNode n : path) {
-      sb.append(n.getName());
-      sb.append(n.getName().equals(PATH_DELIMITER) ? "" : PATH_DELIMITER);
-    }
-    return sb.toString();
-  }
-
-  /**
-   * Get the longest prefix path.
-   * @param path - prefix path.
-   * @return longest prefix path as String separated by "/".
-   */
-  public String getLongestPrefix(String path) {
-    RadixNode<T> n = root;
-    Path p = Paths.get(path);
-    int level = 0;
-    while (level < p.getNameCount()) {
-      HashMap<String, RadixNode> children = n.getChildren();
-      if (children.isEmpty()) {
-        break;
-      }
-      String component = p.getName(level).toString();
-      if (children.containsKey(component)) {
-        n = children.get(component);
-        level++;
-      } else {
-        break;
-      }
-    }
-
-    if (level >= 1) {
-      Path longestMatch =
-          Paths.get(root.getName()).resolve(p.subpath(0, level));
-      String ret = longestMatch.toString();
-      return path.endsWith("/") ?  ret + "/" : ret;
-    } else {
-      return root.getName();
-    }
-  }
-
-  // root of a radix tree has a name of "/" and may optionally has it value.
-  private RadixNode root;
-
-  private final static String PATH_DELIMITER = OzoneConsts.OZONE_URI_DELIMITER;
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
deleted file mode 100644
index 7bc89c1..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.util;
-
-/**
- * Ozone utilities.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
deleted file mode 100644
index e146d31..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Locale;
-import java.util.TimeZone;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import com.google.common.base.Preconditions;
-import org.apache.ratis.util.TimeDuration;
-
-/**
- * Set of Utility functions used in ozone.
- */
-@InterfaceAudience.Private
-public final class OzoneUtils {
-
-  public static final String ENCODING_NAME = "UTF-8";
-  public static final Charset ENCODING = Charset.forName(ENCODING_NAME);
-
-  private OzoneUtils() {
-    // Never constructed
-  }
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
-      new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      SimpleDateFormat format = new SimpleDateFormat(
-          OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
-      format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
-
-      return format;
-    }
-  };
-
-  /**
-   * Verifies that max key length is a valid value.
-   *
-   * @param length
-   *          The max key length to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyMaxKeyLength(String length)
-      throws IllegalArgumentException {
-    int maxKey = 0;
-    try {
-      maxKey = Integer.parseInt(length);
-    } catch (NumberFormatException nfe) {
-      throw new IllegalArgumentException(
-          "Invalid max key length, the vaule should be digital.");
-    }
-
-    if (maxKey <= 0) {
-      throw new IllegalArgumentException(
-          "Invalid max key length, the vaule should be a positive number.");
-    }
-  }
-
-  /**
-   * Returns a random Request ID.
-   *
-   * Request ID is returned to the client as well as flows through the system
-   * facilitating debugging on why a certain request failed.
-   *
-   * @return String random request ID
-   */
-  public static String getRequestID() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Return host name if possible.
-   *
-   * @return Host Name or localhost
-   */
-  public static String getHostName() {
-    String host = "localhost";
-    try {
-      host = InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException e) {
-      // Ignore the error
-    }
-    return host;
-  }
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatTime(long millis) {
-    return DATE_FORMAT.get().format(millis);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDate(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return DATE_FORMAT.get().parse(date).getTime();
-  }
-
-  public static boolean isOzoneEnabled(Configuration conf) {
-    return HddsUtils.isHddsEnabled(conf);
-  }
-
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-    HddsClientUtils.verifyResourceName(resName);
-  }
-
-  /**
-   * Return the TimeDuration configured for the given key. If not configured,
-   * return the default value.
-   */
-  public static TimeDuration getTimeDuration(Configuration conf, String key,
-      TimeDuration defaultValue) {
-    TimeUnit defaultTimeUnit = defaultValue.getUnit();
-    long timeDurationInDefaultUnit = conf.getTimeDuration(key,
-        defaultValue.getDuration(), defaultTimeUnit);
-    return TimeDuration.valueOf(timeDurationInDefaultUnit, defaultTimeUnit);
-  }
-
-  /**
-   * Return the time configured for the given key in milliseconds.
-   */
-  public static long getTimeDurationInMS(Configuration conf, String key,
-      TimeDuration defaultValue) {
-    return getTimeDuration(conf, key, defaultValue)
-        .toLong(TimeUnit.MILLISECONDS);
-  }
-
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
deleted file mode 100644
index 178157f..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.utils;
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
deleted file mode 100644
index d82fdf2..0000000
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ /dev/null
@@ -1,1107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-syntax = "proto2";
-option java_package = "org.apache.hadoop.ozone.protocol.proto";
-option java_outer_classname = "OzoneManagerProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.ozone;
-
-/**
-This file contains the protocol to communicate with
-Ozone Manager. Ozone Manager manages the namespace for ozone.
-This is similar to Namenode for Ozone.
-*/
-
-import "hdds.proto";
-import "Security.proto";
-import "FSProtos.proto";
-
-enum Type {
-  CreateVolume = 11;
-  SetVolumeProperty = 12;
-  CheckVolumeAccess = 13;
-  InfoVolume = 14;
-  DeleteVolume = 15;
-  ListVolume = 16;
-
-  CreateBucket = 21;
-  InfoBucket = 22;
-  SetBucketProperty = 23;
-  DeleteBucket = 24;
-  ListBuckets = 25;
-
-  CreateKey = 31;
-  LookupKey = 32;
-  RenameKey = 33;
-  DeleteKey = 34;
-  ListKeys = 35;
-  CommitKey = 36;
-  AllocateBlock = 37;
-
-  CreateS3Bucket = 41;
-  DeleteS3Bucket = 42;
-  InfoS3Bucket = 43;
-  ListS3Buckets = 44;
-  InitiateMultiPartUpload = 45;
-  CommitMultiPartUpload = 46;
-  CompleteMultiPartUpload = 47;
-  AbortMultiPartUpload = 48;
-  GetS3Secret = 49;
-  ListMultiPartUploadParts = 50;
-
-  ServiceList = 51;
-  DBUpdates = 53;
-
-  GetDelegationToken = 61;
-  RenewDelegationToken = 62;
-  CancelDelegationToken = 63;
-
-  GetFileStatus = 70;
-  CreateDirectory = 71;
-  CreateFile = 72;
-  LookupFile = 73;
-  ListStatus = 74;
-  AddAcl = 75;
-  RemoveAcl = 76;
-  SetAcl = 77;
-  GetAcl = 78;
-
-  PurgeKeys = 81;
-
-  ListMultipartUploads = 82;
-}
-
-message OMRequest {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  required string clientId = 3;
-
-  optional UserInfo userInfo = 4;
-
-
-  optional CreateVolumeRequest              createVolumeRequest            = 11;
-  optional SetVolumePropertyRequest         setVolumePropertyRequest       = 12;
-  optional CheckVolumeAccessRequest         checkVolumeAccessRequest       = 13;
-  optional InfoVolumeRequest                infoVolumeRequest              = 14;
-  optional DeleteVolumeRequest              deleteVolumeRequest            = 15;
-  optional ListVolumeRequest                listVolumeRequest              = 16;
-
-  optional CreateBucketRequest              createBucketRequest            = 21;
-  optional InfoBucketRequest                infoBucketRequest              = 22;
-  optional SetBucketPropertyRequest         setBucketPropertyRequest       = 23;
-  optional DeleteBucketRequest              deleteBucketRequest            = 24;
-  optional ListBucketsRequest               listBucketsRequest             = 25;
-
-  optional CreateKeyRequest                 createKeyRequest               = 31;
-  optional LookupKeyRequest                 lookupKeyRequest               = 32;
-  optional RenameKeyRequest                 renameKeyRequest               = 33;
-  optional DeleteKeyRequest                 deleteKeyRequest               = 34;
-  optional ListKeysRequest                  listKeysRequest                = 35;
-  optional CommitKeyRequest                 commitKeyRequest               = 36;
-  optional AllocateBlockRequest             allocateBlockRequest           = 37;
-
-  optional S3CreateBucketRequest            createS3BucketRequest          = 41;
-  optional S3DeleteBucketRequest            deleteS3BucketRequest          = 42;
-  optional S3BucketInfoRequest              infoS3BucketRequest            = 43;
-  optional S3ListBucketsRequest             listS3BucketsRequest           = 44;
-  optional MultipartInfoInitiateRequest     initiateMultiPartUploadRequest = 45;
-  optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest   = 46;
-  optional MultipartUploadCompleteRequest   completeMultiPartUploadRequest = 47;
-  optional MultipartUploadAbortRequest      abortMultiPartUploadRequest    = 48;
-  optional GetS3SecretRequest               getS3SecretRequest             = 49;
-  optional MultipartUploadListPartsRequest  listMultipartUploadPartsRequest = 50;
-
-  optional ServiceListRequest               serviceListRequest             = 51;
-  optional DBUpdatesRequest                  dbUpdatesRequest              = 53;
-
-  optional hadoop.common.GetDelegationTokenRequestProto getDelegationTokenRequest = 61;
-  optional hadoop.common.RenewDelegationTokenRequestProto renewDelegationTokenRequest= 62;
-  optional hadoop.common.CancelDelegationTokenRequestProto cancelDelegationTokenRequest = 63;
-  optional UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = 64;
-  optional UpdateRenewDelegationTokenRequest updatedRenewDelegationTokenRequest = 65;
-
-  optional GetFileStatusRequest             getFileStatusRequest           = 70;
-  optional CreateDirectoryRequest           createDirectoryRequest         = 71;
-  optional CreateFileRequest                createFileRequest              = 72;
-  optional LookupFileRequest                lookupFileRequest              = 73;
-  optional ListStatusRequest                listStatusRequest              = 74;
-  optional AddAclRequest                    addAclRequest                  = 75;
-  optional RemoveAclRequest                 removeAclRequest               = 76;
-  optional SetAclRequest                    setAclRequest                  = 77;
-  optional GetAclRequest                    getAclRequest                  = 78;
-
-  optional PurgeKeysRequest                 purgeKeysRequest               = 81;
-
-  optional UpdateGetS3SecretRequest         updateGetS3SecretRequest       = 82;
-  optional ListMultipartUploadsRequest      listMultipartUploadsRequest    = 83;
-}
-
-message OMResponse {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  optional bool success = 3 [default=true];
-
-  optional string message = 4;
-
-  required Status status = 5;
-
-  optional string leaderOMNodeId = 6;
-
-  optional CreateVolumeResponse              createVolumeResponse          = 11;
-  optional SetVolumePropertyResponse         setVolumePropertyResponse     = 12;
-  optional CheckVolumeAccessResponse         checkVolumeAccessResponse     = 13;
-  optional InfoVolumeResponse                infoVolumeResponse            = 14;
-  optional DeleteVolumeResponse              deleteVolumeResponse          = 15;
-  optional ListVolumeResponse                listVolumeResponse            = 16;
-
-  optional CreateBucketResponse              createBucketResponse          = 21;
-  optional InfoBucketResponse                infoBucketResponse            = 22;
-  optional SetBucketPropertyResponse         setBucketPropertyResponse     = 23;
-  optional DeleteBucketResponse              deleteBucketResponse          = 24;
-  optional ListBucketsResponse               listBucketsResponse           = 25;
-
-  optional CreateKeyResponse                 createKeyResponse             = 31;
-  optional LookupKeyResponse                 lookupKeyResponse             = 32;
-  optional RenameKeyResponse                 renameKeyResponse             = 33;
-  optional DeleteKeyResponse                 deleteKeyResponse             = 34;
-  optional ListKeysResponse                  listKeysResponse              = 35;
-  optional CommitKeyResponse                 commitKeyResponse             = 36;
-  optional AllocateBlockResponse             allocateBlockResponse         = 37;
-
-  optional S3CreateBucketResponse            createS3BucketResponse        = 41;
-  optional S3DeleteBucketResponse            deleteS3BucketResponse        = 42;
-  optional S3BucketInfoResponse              infoS3BucketResponse          = 43;
-  optional S3ListBucketsResponse             listS3BucketsResponse         = 44;
-  optional MultipartInfoInitiateResponse   initiateMultiPartUploadResponse = 45;
-  optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46;
-  optional MultipartUploadCompleteResponse completeMultiPartUploadResponse = 47;
-  optional MultipartUploadAbortResponse    abortMultiPartUploadResponse    = 48;
-  optional GetS3SecretResponse               getS3SecretResponse           = 49;
-  optional MultipartUploadListPartsResponse listMultipartUploadPartsResponse = 50;
-
-  optional ServiceListResponse               ServiceListResponse           = 51;
-  optional DBUpdatesResponse                 dbUpdatesResponse             = 52;
-
-  optional GetDelegationTokenResponseProto getDelegationTokenResponse = 61;
-  optional RenewDelegationTokenResponseProto renewDelegationTokenResponse = 62;
-  optional CancelDelegationTokenResponseProto cancelDelegationTokenResponse = 63;
-
-  optional GetFileStatusResponse              getFileStatusResponse        = 70;
-  optional CreateDirectoryResponse            createDirectoryResponse      = 71;
-  optional CreateFileResponse                 createFileResponse           = 72;
-  optional LookupFileResponse                 lookupFileResponse           = 73;
-  optional ListStatusResponse                 listStatusResponse           = 74;
-  optional AddAclResponse                    addAclResponse                = 75;
-  optional RemoveAclResponse                 removeAclResponse             = 76;
-  optional SetAclResponse                   setAclResponse                 = 77;
-  optional GetAclResponse                    getAclResponse                = 78;
-
-  optional PurgeKeysResponse                  purgeKeysResponse            = 81;
-
-  optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82;
-}
-
-enum Status {
-    OK = 1;
-    VOLUME_NOT_UNIQUE = 2;
-    VOLUME_NOT_FOUND = 3;
-    VOLUME_NOT_EMPTY = 4;
-    VOLUME_ALREADY_EXISTS = 5;
-    USER_NOT_FOUND = 6;
-    USER_TOO_MANY_VOLUMES = 7;
-    BUCKET_NOT_FOUND = 8;
-    BUCKET_NOT_EMPTY = 9;
-    BUCKET_ALREADY_EXISTS = 10;
-    KEY_ALREADY_EXISTS = 11;
-    KEY_NOT_FOUND = 12;
-    INVALID_KEY_NAME = 13;
-    ACCESS_DENIED = 14;
-    INTERNAL_ERROR = 15;
-    KEY_ALLOCATION_ERROR = 16;
-    KEY_DELETION_ERROR = 17;
-    KEY_RENAME_ERROR = 18;
-    METADATA_ERROR = 19;
-    OM_NOT_INITIALIZED = 20;
-    SCM_VERSION_MISMATCH_ERROR = 21;
-    S3_BUCKET_NOT_FOUND = 22;
-    S3_BUCKET_ALREADY_EXISTS = 23;
-
-    INITIATE_MULTIPART_UPLOAD_ERROR = 24;
-    MULTIPART_UPLOAD_PARTFILE_ERROR = 25;
-    NO_SUCH_MULTIPART_UPLOAD_ERROR = 26;
-    MISMATCH_MULTIPART_LIST = 27;
-    MISSING_UPLOAD_PARTS = 28;
-    COMPLETE_MULTIPART_UPLOAD_ERROR = 29;
-    ENTITY_TOO_SMALL = 30;
-    ABORT_MULTIPART_UPLOAD_FAILED = 31;
-
-    S3_SECRET_NOT_FOUND = 32;
-
-    INVALID_AUTH_METHOD = 33;
-    INVALID_TOKEN = 34;
-    TOKEN_EXPIRED = 35;
-    TOKEN_ERROR_OTHER = 36;
-    LIST_MULTIPART_UPLOAD_PARTS_FAILED = 37;
-    SCM_IN_SAFE_MODE = 38;
-    INVALID_REQUEST = 39;
-
-    BUCKET_ENCRYPTION_KEY_NOT_FOUND = 40;
-    UNKNOWN_CIPHER_SUITE = 41;
-    INVALID_KMS_PROVIDER = 42;
-    TOKEN_CREATION_ERROR = 43;
-
-    FILE_NOT_FOUND = 44;
-    DIRECTORY_NOT_FOUND = 45;
-    FILE_ALREADY_EXISTS = 46;
-    NOT_A_FILE = 47;
-    PERMISSION_DENIED = 48;
-    TIMEOUT = 49;
-    PREFIX_NOT_FOUND=50;
-
-    S3_BUCKET_INVALID_LENGTH = 51; // s3 bucket invalid length.
-
-    RATIS_ERROR = 52;
-
-    INVALID_PATH_IN_ACL_REQUEST = 53; // Invalid path name in acl request.
-
-    USER_MISMATCH = 54; // Error code when requested user name passed is
-    // different from remote user.
-}
-
-
-message VolumeInfo {
-    required string adminName = 1;
-    required string ownerName = 2;
-    required string volume = 3;
-    optional uint64 quotaInBytes = 4;
-    repeated hadoop.hdds.KeyValue metadata = 5;
-    repeated OzoneAclInfo volumeAcls = 6;
-    optional uint64 creationTime = 7;
-    optional uint64 objectID = 8;
-    optional uint64 updateID = 9;
-}
-
-/**
-    User information which will be extracted during RPC context and used
-    during validating Acl.
-*/
-message UserInfo {
-    optional string userName = 1;
-    optional string remoteAddress = 3;
-}
-
-/**
-  This will be used during OM HA, once leader generates token sends this
-  request via ratis to persist to OM DB. This request will be internally used
-   by OM for replicating token across a quorum of OMs.
-*/
-message UpdateGetDelegationTokenRequest {
-    required GetDelegationTokenResponseProto getDelegationTokenResponse = 1;
-}
-
-/**
-  This will be used during OM HA, once leader renews token, sends this
-   request via ratis to persist to OM DB. This request will be internally used
-   by OM for replicating renewed token information across a quorum of OMs.
-*/
-message UpdateRenewDelegationTokenRequest {
-    required hadoop.common.RenewDelegationTokenRequestProto
-    renewDelegationTokenRequest = 1;
-    required RenewDelegationTokenResponseProto renewDelegationTokenResponse = 2;
-}
-
-/**
-    Creates a volume
-*/
-message CreateVolumeRequest {
-    required VolumeInfo volumeInfo = 1;
-}
-
-message CreateVolumeResponse {
-
-}
-
-message UserVolumeInfo {
-    repeated string volumeNames = 1;
-    optional uint64 objectID = 2;
-    optional uint64 updateID = 3;
-}
-
-/**
-    Changes the Volume Properties -- like ownership and quota for a volume.
-*/
-message SetVolumePropertyRequest {
-    required string volumeName = 1;
-    optional string ownerName = 2;
-    optional uint64 quotaInBytes = 3;
-}
-
-message SetVolumePropertyResponse {
-
-}
-
-/**
- * Checks if the user has specified permissions for the volume
- */
-message CheckVolumeAccessRequest {
-    required string volumeName = 1;
-    required OzoneAclInfo userAcl = 2;
-}
-
-message CheckVolumeAccessResponse {
-
-}
-
-
-/**
-    Returns information about a volume.
-*/
-
-message InfoVolumeRequest {
-    required string volumeName = 1;
-}
-
-message InfoVolumeResponse {
-    optional VolumeInfo volumeInfo = 2;
-}
-
-/**
-    Deletes an existing volume.
-*/
-message DeleteVolumeRequest {
-    required string volumeName = 1;
-}
-
-message DeleteVolumeResponse {
-
-}
-
-
-/**
-    List Volumes -- List all volumes in the cluster or by user.
-*/
-
-message ListVolumeRequest {
-    enum Scope {
-        USER_VOLUMES = 1;   // User volumes -- called by user
-        VOLUMES_BY_USER = 2; // User volumes - called by Admin
-        VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
-    }
-    required Scope scope = 1;
-    optional string userName = 2;
-    optional string prefix = 3;
-    optional string prevKey = 4;
-    optional uint32 maxKeys = 5;
-}
-
-message ListVolumeResponse {
-    repeated VolumeInfo volumeInfo = 2;
-}
-
-message BucketInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    repeated OzoneAclInfo acls = 3;
-    required bool isVersionEnabled = 4 [default = false];
-    required StorageTypeProto storageType = 5 [default = DISK];
-    optional uint64 creationTime = 6;
-    repeated hadoop.hdds.KeyValue metadata = 7;
-    optional BucketEncryptionInfoProto beinfo = 8;
-}
-
-enum StorageTypeProto {
-    DISK = 1;
-    SSD = 2;
-    ARCHIVE = 3;
-    RAM_DISK = 4;
-}
-
-/**
- * Cipher suite.
- */
-enum CipherSuiteProto {
-    UNKNOWN = 1;
-    AES_CTR_NOPADDING = 2;
-}
-
-/**
- * Crypto protocol version used to access encrypted files.
- */
-enum CryptoProtocolVersionProto {
-    UNKNOWN_PROTOCOL_VERSION = 1;
-    ENCRYPTION_ZONES = 2;
-}
-/**
- * Encryption information for bucket (bucket key)
- */
-message BucketEncryptionInfoProto {
-    required string keyName = 1;
-    optional CipherSuiteProto suite = 2;
-    optional CryptoProtocolVersionProto cryptoProtocolVersion = 3;
-}
-
-/**
- * Encryption information for a file.
- */
-message FileEncryptionInfoProto {
-    required CipherSuiteProto suite = 1;
-    required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
-    required bytes key = 3;
-    required bytes iv = 4;
-    required string keyName = 5;
-    required string ezKeyVersionName = 6;
-}
-
-/**
- * Encryption information for an individual
- * file within an encryption zone
- */
-message PerFileEncryptionInfoProto {
-    required bytes key = 1;
-    required bytes iv = 2;
-    required string ezKeyVersionName = 3;
-}
-
-message DataEncryptionKeyProto {
-    required uint32 keyId = 1;
-    required bytes nonce = 3;
-    required bytes encryptionKey = 4;
-    required uint64 expiryDate = 5;
-    optional string encryptionAlgorithm = 6;
-}
-
-message BucketArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    optional bool isVersionEnabled = 5;
-    optional StorageTypeProto storageType = 6;
-    repeated hadoop.hdds.KeyValue metadata = 7;
-}
-
-message PrefixInfo {
-    required string name = 1;
-    repeated OzoneAclInfo acls = 2;
-    repeated hadoop.hdds.KeyValue metadata = 3;
-}
-
-message OzoneObj {
-  enum ObjectType {
-    VOLUME = 1;
-    BUCKET = 2;
-    KEY = 3;
-    PREFIX = 4;
-  }
-
-  enum StoreType {
-    OZONE = 1;
-    S3 = 2;
-  }
-  required ObjectType resType = 1;
-  required StoreType storeType = 2  [default = S3];
-  required string path = 3;
-}
-
-message OzoneAclInfo {
-    enum OzoneAclType {
-        USER = 1;
-        GROUP = 2;
-        WORLD = 3;
-        ANONYMOUS = 4;
-        CLIENT_IP = 5;
-    }
-
-    enum OzoneAclScope {
-      ACCESS = 0;
-      DEFAULT = 1;
-    }
-
-    required OzoneAclType type = 1;
-    required string name = 2;
-    required bytes rights = 3;
-    required OzoneAclScope aclScope = 4 [default = ACCESS];
-}
-
-message GetAclRequest {
-  required OzoneObj obj = 1;
-}
-
-message GetAclResponse {
-  repeated OzoneAclInfo acls = 1;
-}
-
-message AddAclRequest {
-  required OzoneObj obj = 1;
-  required OzoneAclInfo acl = 2;
-}
-
-message AddAclResponse {
-  required bool response = 1;
-}
-
-message RemoveAclRequest {
-  required OzoneObj obj = 1;
-  required OzoneAclInfo acl = 2;
-}
-
-message RemoveAclResponse {
-  required bool response = 1;
-}
-
-message SetAclRequest {
-  required OzoneObj obj = 1;
-  repeated OzoneAclInfo acl = 2;
-}
-
-message SetAclResponse {
-  required bool response = 1;
-}
-
-message CreateBucketRequest {
-    required BucketInfo bucketInfo = 1;
-}
-
-message CreateBucketResponse {
-}
-
-message InfoBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message InfoBucketResponse {
-    optional BucketInfo bucketInfo = 2;
-}
-
-message SetBucketPropertyRequest {
-    optional BucketArgs bucketArgs = 1;
-}
-
-message SetBucketPropertyResponse {
-
-}
-
-message DeleteBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message DeleteBucketResponse {
-
-}
-
-message ListBucketsRequest {
-    required string volumeName = 1;
-    optional string startKey = 2;
-    optional string prefix = 3;
-    optional int32 count = 4;
-}
-
-message ListBucketsResponse {
-
-    repeated BucketInfo bucketInfo = 2;
-}
-
-message KeyArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    optional uint64 dataSize = 4;
-    optional hadoop.hdds.ReplicationType type = 5;
-    optional hadoop.hdds.ReplicationFactor factor = 6;
-    repeated KeyLocation keyLocations = 7;
-    optional bool isMultipartKey = 8;
-    optional string multipartUploadID = 9;
-    optional uint32 multipartNumber = 10;
-    repeated hadoop.hdds.KeyValue metadata = 11;
-    repeated OzoneAclInfo acls = 12;
-    // This will be set when the request is received in pre-Execute. This
-    // value is used in setting creation/modification time depending on the
-    // request type.
-    optional uint64 modificationTime = 13;
-    optional bool sortDatanodes = 14;
-}
-
-message KeyLocation {
-    required hadoop.hdds.BlockID blockID = 1;
-    required uint64 offset = 3;
-    required uint64 length = 4;
-    // indicated at which version this block gets created.
-    optional uint64 createVersion = 5;
-    optional hadoop.common.TokenProto token = 6;
-    // Walk around to include pipeline info for client read/write
-    // without talking to scm.
-    // NOTE: the pipeline info may change after pipeline close.
-    // So eventually, we will have to change back to call scm to
-    // get the up to date pipeline information. This will need o3fs
-    // provide not only a OM delegation token but also a SCM delegation token
-    optional hadoop.hdds.Pipeline pipeline = 7;
-}
-
-message KeyLocationList {
-    optional uint64 version = 1;
-    repeated KeyLocation keyLocations = 2;
-    optional FileEncryptionInfoProto fileEncryptionInfo = 3;
-}
-
-message KeyInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    required uint64 dataSize = 4;
-    required hadoop.hdds.ReplicationType type = 5;
-    required hadoop.hdds.ReplicationFactor factor = 6;
-    repeated KeyLocationList keyLocationList = 7;
-    required uint64 creationTime = 8;
-    required uint64 modificationTime = 9;
-    optional uint64 latestVersion = 10;
-    repeated hadoop.hdds.KeyValue metadata = 11;
-    optional FileEncryptionInfoProto fileEncryptionInfo = 12;
-    repeated OzoneAclInfo acls = 13;
-}
-
-message RepeatedKeyInfo {
-    repeated KeyInfo keyInfo = 1;
-}
-
-message OzoneFileStatusProto {
-    required hadoop.fs.FileStatusProto status = 1;
-}
-
-message GetFileStatusRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message GetFileStatusResponse {
-    required OzoneFileStatusProto status = 1;
-}
-
-message CreateDirectoryRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message CreateDirectoryResponse {
-}
-
-message CreateFileRequest {
-    required KeyArgs keyArgs = 1;
-    required bool isRecursive = 2;
-    required bool isOverwrite = 3;
-    // Set in OM HA during preExecute step. This way all OM's use same ID in
-    // OM HA.
-    optional uint64 clientID = 4;
-}
-
-message CreateFileResponse {
-
-    optional KeyInfo keyInfo = 1;
-    // clients' followup request may carry this ID for stateful operations
-    // (similar to a cookie).
-    optional uint64 ID = 2;
-    optional uint64 openVersion = 3;
-}
-
-message LookupFileRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message LookupFileResponse {
-    optional KeyInfo keyInfo = 1;
-}
-
-message ListStatusRequest {
-    required KeyArgs keyArgs = 1;
-    required bool recursive = 2;
-    required string startKey = 3;
-    required uint64 numEntries = 4;
-}
-
-message ListStatusResponse {
-    repeated OzoneFileStatusProto statuses = 1;
-}
-
-message CreateKeyRequest {
-    required KeyArgs keyArgs = 1;
-    // Set in OM HA during preExecute step. This way all OM's use same ID in
-    // OM HA.
-    optional uint64 clientID = 2;
-}
-
-message CreateKeyResponse {
-    optional KeyInfo keyInfo = 2;
-    // clients' followup request may carry this ID for stateful operations
-    // (similar to a cookie).
-    optional uint64 ID = 3;
-    optional uint64 openVersion = 4;
-}
-
-message LookupKeyRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message LookupKeyResponse {
-    optional KeyInfo keyInfo = 2;
-    // clients' followup request may carry this ID for stateful operations (similar
-    // to a cookie).
-    optional uint64 ID = 3;
-    // TODO : allow specifiying a particular version to read.
-    optional uint64 openVersion = 4;
-}
-
-message RenameKeyRequest{
-    required KeyArgs keyArgs = 1;
-    required string toKeyName = 2;
-}
-
-message RenameKeyResponse{
-
-}
-
-message DeleteKeyRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message DeleteKeyResponse {
-
-    optional KeyInfo keyInfo = 2;
-    // clients' followup request may carry this ID for stateful operations
-    // (similar to a cookie).
-    optional uint64 ID = 3;
-    optional uint64 openVersion = 4;
-}
-
-message PurgeKeysRequest {
-    repeated string keys = 1;
-}
-
-message PurgeKeysResponse {
-
-}
-
-message OMTokenProto {
-    enum Type {
-      DELEGATION_TOKEN = 1;
-      S3TOKEN = 2;
-    };
-    required Type   type           = 1;
-    optional uint32 version        = 2;
-    optional string owner          = 3;
-    optional string renewer        = 4;
-    optional string realUser       = 5;
-    optional uint64 issueDate      = 6;
-    optional uint64 maxDate        = 7;
-    optional uint32 sequenceNumber = 8;
-    optional uint32 masterKeyId    = 9;
-    optional uint64 expiryDate     = 10;
-    optional string omCertSerialId = 11;
-    optional string accessKeyId    = 12;
-    optional string signature      = 13;
-    optional string strToSign      = 14;
-}
-
-message SecretKeyProto {
-    required uint32 keyId = 1;
-    required uint64 expiryDate = 2;
-    required bytes privateKeyBytes = 3;
-    required bytes publicKeyBytes = 4;
-}
-
-message ListKeysRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    optional string startKey = 3;
-    optional string prefix = 4;
-    optional int32 count = 5;
-}
-
-message ListKeysResponse {
-    repeated KeyInfo keyInfo = 2;
-}
-
-message CommitKeyRequest {
-    required KeyArgs keyArgs = 1;
-    required uint64 clientID = 2;
-}
-
-message CommitKeyResponse {
-
-}
-
-message AllocateBlockRequest {
-    required KeyArgs keyArgs = 1;
-    required uint64 clientID = 2;
-    optional hadoop.hdds.ExcludeListProto excludeList = 3;
-    // During HA on one of the OM nodes, we allocate block and send the
-    // AllocateBlockRequest with keyLocation set. If this is set, no need to
-    // call scm again in OM Ratis applyTransaction just append it to DB.
-    optional KeyLocation keyLocation = 4;
-}
-
-message AllocateBlockResponse {
-
-    optional KeyLocation keyLocation = 2;
-}
-
-message ServiceListRequest {
-}
-
-message DBUpdatesRequest {
-    required uint64 sequenceNumber = 1;
-}
-
-message ServiceListResponse {
-
-    repeated ServiceInfo serviceInfo = 2;
-    // When security is enabled, return SCM CA certificate to Ozone client
-    // to set up gRPC TLS for client to authenticate server(DN).
-    optional string caCertificate = 3;
-}
-
-message DBUpdatesResponse {
-    required uint64 sequenceNumber = 1;
-    repeated bytes data = 2;
-}
-
-message ServicePort {
-    enum Type {
-        RPC = 1;
-        HTTP = 2;
-        HTTPS = 3;
-        RATIS = 4;
-    };
-    required Type type = 1;
-    required uint32 value = 2;
-}
-
-message ServiceInfo {
-    required hadoop.hdds.NodeType nodeType = 1;
-    required string hostname = 2;
-    repeated ServicePort servicePorts = 3;
-}
-
-message S3CreateBucketRequest {
-    required string userName = 1;
-    required string s3bucketname = 2;
-    // This will be set during OM HA by one of the OM node. In future if more
-    // data fields are required to create volume/bucket we can add them to
-    // this. This is the reason for creating a new message type for this.
-    // S3CreateBucket means create volume from userName and create bucket
-    // with s3BucketName.
-    optional S3CreateVolumeInfo s3CreateVolumeInfo = 3;
-}
-
-message S3CreateVolumeInfo {
-    // Creation time set in preExecute on one of the OM node.
-    required uint64 creationTime = 1;
-}
-
-message S3CreateBucketResponse {
-
-}
-
-message S3DeleteBucketRequest {
-    required string s3bucketName = 1;
-}
-
-message S3DeleteBucketResponse {
-
-}
-
-message S3BucketInfoRequest {
-    required string s3bucketName = 1;
-}
-message S3BucketInfoResponse {
-
-    optional string ozoneMapping = 2;
-}
-
-message S3ListBucketsRequest {
-    required string userName = 1;
-    optional string startKey = 2;
-    optional string prefix = 3;
-    optional int32 count = 4;
-}
-
-message S3ListBucketsResponse {
-   repeated BucketInfo bucketInfo = 2;
-}
-
-message MultipartInfoInitiateRequest {
-    required KeyArgs keyArgs = 1;
-
-}
-
-message MultipartInfoInitiateResponse {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    required string multipartUploadID = 4;
-}
-
-message MultipartKeyInfo {
-    required string uploadID = 4;
-    repeated PartKeyInfo partKeyInfoList = 5;
-}
-
-message PartKeyInfo {
-    required string partName = 1;
-    required uint32 partNumber = 2;
-    required KeyInfo partKeyInfo = 3;
-}
-
-message MultipartCommitUploadPartRequest {
-    required KeyArgs keyArgs = 1;
-    required uint64 clientID = 2;
-}
-
-message MultipartCommitUploadPartResponse {
-    // This one is returned as Etag for S3.
-    optional string partName = 1;
-}
-
-message MultipartUploadCompleteRequest {
-    required KeyArgs keyArgs = 1;
-    repeated Part partsList = 2;
-}
-
-message MultipartUploadCompleteResponse {
-    optional string volume = 1;
-    optional string bucket = 2;
-    optional string key = 3;
-    optional string hash = 4; // This will be used as etag for s3
-}
-
-message Part {
-    required uint32 partNumber = 1;
-    required string partName = 2;
-}
-
-message MultipartUploadAbortRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message MultipartUploadAbortResponse {
-
-}
-message MultipartUploadListPartsRequest {
-    required string volume = 1;
-    required string bucket = 2;
-    required string key = 3;
-    required string uploadID = 4;
-    optional uint32 partNumbermarker = 5;
-    optional uint32 maxParts = 6;
-}
-
-message MultipartUploadListPartsResponse {
-    optional hadoop.hdds.ReplicationType type = 2;
-    optional hadoop.hdds.ReplicationFactor factor = 3;
-    optional uint32 nextPartNumberMarker = 4;
-    optional bool isTruncated = 5;
-    repeated PartInfo partsList = 6;
-
-}
-
-message ListMultipartUploadsRequest {
-    required string volume = 1;
-    required string bucket = 2;
-    required string prefix = 3;
-}
-
-message ListMultipartUploadsResponse {
-    optional bool isTruncated = 1;
-    repeated MultipartUploadInfo uploadsList = 2;
-}
-
-message MultipartUploadInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    required string uploadId = 4;
-    required uint64 creationTime = 5;
-    required hadoop.hdds.ReplicationType type = 6;
-    required hadoop.hdds.ReplicationFactor factor = 7;
-
-}
-
-message PartInfo {
-    required uint32 partNumber = 1;
-    required string partName = 2;
-    required uint64 modificationTime = 3;
-    required uint64 size = 4;
-}
-
-message GetDelegationTokenResponseProto {
-
-    optional hadoop.common.GetDelegationTokenResponseProto response = 2;
-}
-
-message RenewDelegationTokenResponseProto {
-
-    optional hadoop.common.RenewDelegationTokenResponseProto response = 2;
-}
-
-message CancelDelegationTokenResponseProto {
-
-    optional hadoop.common.CancelDelegationTokenResponseProto response = 2;
-}
-
-message S3Secret {
-    required string kerberosID = 1;
-    required string awsSecret = 2;
-}
-
-message GetS3SecretRequest {
-    required string kerberosID = 1;
-}
-
-message GetS3SecretResponse {
-    required S3Secret s3Secret = 2;
-}
-
-/**
-  This will be used internally by OM to replicate S3 Secret across quorum of
-  OM's.
-*/
-message UpdateGetS3SecretRequest {
-    required string kerberosID = 1;
-    required string awsSecret = 2;
-}
-
-/**
- The OM service that takes care of Ozone namespace.
-*/
-service OzoneManagerService {
-    // A client-to-OM RPC to send client requests to OM Ratis server
-    rpc submitRequest(OMRequest)
-          returns(OMResponse);
-}
diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties
deleted file mode 100644
index 599f14d..0000000
--- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-version=${declared.ozone.version}
-release=${ozone.release}
-revision=${version-info.scm.commit}
-branch=${version-info.scm.branch}
-user=${user.name}
-date=${version-info.build.time}
-url=${version-info.scm.uri}
-srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
diff --git a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
deleted file mode 100644
index 3fff7f5..0000000
--- a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then
-   hadoop_add_profile ozone
-fi
-
-
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java
deleted file mode 100644
index ce743fe..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Unit tests for {@link OmUtils}.
- */
-public class TestOmUtils {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Rule
-  public Timeout timeout = new Timeout(60_000);
-
-  @Rule
-  public ExpectedException thrown= ExpectedException.none();
-
-  /**
-   * Test {@link OmUtils#getOmDbDir}.
-   */
-  @Test
-  public void testGetOmDbDir() {
-    final File testDir = PathUtils.getTestDir(TestOmUtils.class);
-    final File dbDir = new File(testDir, "omDbDir");
-    final File metaDir = new File(testDir, "metaDir");   // should be ignored.
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-
-    try {
-      assertEquals(dbDir, OmUtils.getOmDbDir(conf));
-      assertTrue(dbDir.exists());          // should have been created.
-    } finally {
-      FileUtils.deleteQuietly(dbDir);
-    }
-  }
-
-  /**
-   * Test {@link OmUtils#getOmDbDir} with fallback to OZONE_METADATA_DIRS
-   * when OZONE_OM_DB_DIRS is undefined.
-   */
-  @Test
-  public void testGetOmDbDirWithFallback() {
-    final File testDir = PathUtils.getTestDir(TestOmUtils.class);
-    final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
-
-    try {
-      assertEquals(metaDir, OmUtils.getOmDbDir(conf));
-      assertTrue(metaDir.exists());        // should have been created.
-    } finally {
-      FileUtils.deleteQuietly(metaDir);
-    }
-  }
-
-  @Test
-  public void testNoOmDbDirConfigured() {
-    thrown.expect(IllegalArgumentException.class);
-    OmUtils.getOmDbDir(new OzoneConfiguration());
-  }
-
-  @Test
-  public void testWriteCheckpointToOutputStream() throws Exception {
-
-    FileInputStream fis = null;
-    FileOutputStream fos = null;
-
-    try {
-      String testDirName = folder.newFolder().getAbsolutePath();
-      File file = new File(testDirName + "/temp1.txt");
-      FileWriter writer = new FileWriter(file);
-      writer.write("Test data 1");
-      writer.close();
-
-      file = new File(testDirName + "/temp2.txt");
-      writer = new FileWriter(file);
-      writer.write("Test data 2");
-      writer.close();
-
-      File outputFile =
-          new File(Paths.get(testDirName, "output_file.tgz").toString());
-      TestDBCheckpoint dbCheckpoint = new TestDBCheckpoint(
-          Paths.get(testDirName));
-      OmUtils.writeOmDBCheckpointToStream(dbCheckpoint,
-          new FileOutputStream(outputFile));
-      assertNotNull(outputFile);
-    } finally {
-      IOUtils.closeStream(fis);
-      IOUtils.closeStream(fos);
-    }
-  }
-
-}
-
-class TestDBCheckpoint implements DBCheckpoint {
-
-  private Path checkpointFile;
-
-  TestDBCheckpoint(Path checkpointFile) {
-    this.checkpointFile = checkpointFile;
-  }
-
-  @Override
-  public Path getCheckpointLocation() {
-    return checkpointFile;
-  }
-
-  @Override
-  public long getCheckpointTimestamp() {
-    return 0;
-  }
-
-  @Override
-  public long getLatestSequenceNumber() {
-    return 0;
-  }
-
-  @Override
-  public long checkpointCreationTimeTaken() {
-    return 0;
-  }
-
-  @Override
-  public void cleanupCheckpoint() throws IOException {
-    FileUtils.deleteDirectory(checkpointFile.toFile());
-  }
-
-  @Override
-  public void setRatisSnapshotIndex(long omRatisSnapshotIndex) {
-  }
-
-  @Override
-  public long getRatisSnapshotIndex() {
-    return 0;
-  }
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
deleted file mode 100644
index 17fc9b5..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class is to test acl storage and retrieval in ozone store.
- */
-public class TestOzoneAcls {
-
-  @Test
-  public void testAclParse() {
-    HashMap<String, Boolean> testMatrix;
-    testMatrix = new HashMap<>();
-
-    testMatrix.put("user:bilbo:r", Boolean.TRUE);
-    testMatrix.put("user:bilbo:w", Boolean.TRUE);
-    testMatrix.put("user:bilbo:rw", Boolean.TRUE);
-    testMatrix.put("user:bilbo:a", Boolean.TRUE);
-    testMatrix.put("    user:bilbo:a   ", Boolean.TRUE);
-
-
-    // ACLs makes no judgement on the quality of
-    // user names. it is for the userAuth interface
-    // to determine if a user name is really a name
-    testMatrix.put(" user:*:rw", Boolean.TRUE);
-    testMatrix.put(" user:~!:rw", Boolean.TRUE);
-
-
-    testMatrix.put("", Boolean.FALSE);
-    testMatrix.put(null, Boolean.FALSE);
-    testMatrix.put(" user:bilbo:", Boolean.FALSE);
-    testMatrix.put(" user:bilbo:rx", Boolean.TRUE);
-    testMatrix.put(" user:bilbo:rwdlncxy", Boolean.TRUE);
-    testMatrix.put(" group:bilbo:rwdlncxy", Boolean.TRUE);
-    testMatrix.put(" world::rwdlncxy", Boolean.TRUE);
-    testMatrix.put(" user:bilbo:rncxy", Boolean.TRUE);
-    testMatrix.put(" group:bilbo:ncxy", Boolean.TRUE);
-    testMatrix.put(" world::ncxy", Boolean.TRUE);
-    testMatrix.put(" user:bilbo:rwcxy", Boolean.TRUE);
-    testMatrix.put(" group:bilbo:rwcxy", Boolean.TRUE);
-    testMatrix.put(" world::rwcxy", Boolean.TRUE);
-    testMatrix.put(" user:bilbo:mk", Boolean.FALSE);
-    testMatrix.put(" user::rw", Boolean.FALSE);
-    testMatrix.put("user11:bilbo:rw", Boolean.FALSE);
-    testMatrix.put(" user:::rw", Boolean.FALSE);
-
-    testMatrix.put(" group:hobbit:r", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:w", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:rw", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:a", Boolean.TRUE);
-    testMatrix.put(" group:*:rw", Boolean.TRUE);
-    testMatrix.put(" group:~!:rw", Boolean.TRUE);
-
-    testMatrix.put(" group:hobbit:", Boolean.FALSE);
-    testMatrix.put(" group:hobbit:rx", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:mk", Boolean.FALSE);
-    testMatrix.put(" group::", Boolean.FALSE);
-    testMatrix.put(" group::rw", Boolean.FALSE);
-    testMatrix.put(" group22:hobbit:", Boolean.FALSE);
-    testMatrix.put(" group:::rw", Boolean.FALSE);
-
-    testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:a", Boolean.FALSE);
-    testMatrix.put("JUNK group:*:rw", Boolean.FALSE);
-    testMatrix.put("JUNK group:~!:rw", Boolean.FALSE);
-
-    testMatrix.put(" world::r", Boolean.TRUE);
-    testMatrix.put(" world::w", Boolean.TRUE);
-    testMatrix.put(" world::rw", Boolean.TRUE);
-    testMatrix.put(" world::a", Boolean.TRUE);
-
-    testMatrix.put(" world:bilbo:w", Boolean.FALSE);
-    testMatrix.put(" world:bilbo:rw", Boolean.FALSE);
-    testMatrix.put(" anonymous:bilbo:w", Boolean.FALSE);
-    testMatrix.put(" anonymous:ANONYMOUS:w", Boolean.TRUE);
-    testMatrix.put(" anonymous::rw", Boolean.TRUE);
-    testMatrix.put(" world:WORLD:rw", Boolean.TRUE);
-
-    Set<String> keys = testMatrix.keySet();
-    for (String key : keys) {
-      if (testMatrix.get(key)) {
-        OzoneAcl.parseAcl(key);
-      } else {
-        try {
-          OzoneAcl.parseAcl(key);
-          // should never get here since parseAcl will throw
-          fail("An exception was expected but did not happen. Key: " + key);
-        } catch (IllegalArgumentException e) {
-          // nothing to do
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testAclValues() throws Exception {
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    assertEquals(acl.getName(), "bilbo");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertEquals(ACLIdentityType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:a");
-    assertEquals("bilbo", acl.getName());
-    assertTrue(acl.getAclBitSet().get(ALL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertEquals(ACLIdentityType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:r");
-    assertEquals("bilbo", acl.getName());
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:w");
-    assertEquals("bilbo", acl.getName());
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("group:hobbit:a");
-    assertEquals(acl.getName(), "hobbit");
-    assertTrue(acl.getAclBitSet().get(ALL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(READ.ordinal()));
-    assertEquals(ACLIdentityType.GROUP, acl.getType());
-
-    acl = OzoneAcl.parseAcl("world::a");
-    assertEquals(acl.getName(), "WORLD");
-    assertTrue(acl.getAclBitSet().get(ALL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertEquals(ACLIdentityType.WORLD, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy");
-    assertEquals(acl.getName(), "bilbo");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-
-    acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy");
-    assertEquals(acl.getName(), "hadoop");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.GROUP, acl.getType());
-
-    acl = OzoneAcl.parseAcl("world::rwdlncxy");
-    assertEquals(acl.getName(), "WORLD");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.WORLD, acl.getType());
-
-    // Acls with scope info.
-    acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[DEFAULT]");
-    assertEquals(acl.getName(), "bilbo");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT));
-
-    acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
-    assertEquals(acl.getName(), "bilbo");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.ACCESS));
-
-    acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy[ACCESS]");
-    assertEquals(acl.getName(), "hadoop");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.GROUP, acl.getType());
-    assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.ACCESS));
-
-    acl = OzoneAcl.parseAcl("world::rwdlncxy[DEFAULT]");
-    assertEquals(acl.getName(), "WORLD");
-    assertTrue(acl.getAclBitSet().get(READ.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(DELETE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(LIST.ordinal()));
-    assertTrue(acl.getAclBitSet().get(NONE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(CREATE.ordinal()));
-    assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal()));
-    assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal()));
-    assertFalse(acl.getAclBitSet().get(ALL.ordinal()));
-    assertEquals(ACLIdentityType.WORLD, acl.getType());
-    assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT));
-
-
-
-    LambdaTestUtils.intercept(IllegalArgumentException.class, "ACL right" +
-            " is not", () -> OzoneAcl.parseAcl("world::rwdlncxncxdfsfgbny"
-    ));
-  }
-
-  @Test
-  public void testBitSetToListConversion() throws Exception {
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-
-    List<ACLType> rights = acl.getAclList();
-    assertTrue(rights.size() == 2);
-    assertTrue(rights.contains(READ));
-    assertTrue(rights.contains(WRITE));
-    assertFalse(rights.contains(CREATE));
-
-    acl = OzoneAcl.parseAcl("user:bilbo:a");
-
-    rights = acl.getAclList();
-    assertTrue(rights.size() == 1);
-    assertTrue(rights.contains(ALL));
-    assertFalse(rights.contains(WRITE));
-    assertFalse(rights.contains(CREATE));
-
-    acl = OzoneAcl.parseAcl("user:bilbo:cxy");
-    rights = acl.getAclList();
-    assertTrue(rights.size() == 3);
-    assertTrue(rights.contains(CREATE));
-    assertTrue(rights.contains(READ_ACL));
-    assertTrue(rights.contains(WRITE_ACL));
-    assertFalse(rights.contains(WRITE));
-    assertFalse(rights.contains(READ));
-
-    List<OzoneAcl> acls = OzoneAcl.parseAcls("user:bilbo:cxy,group:hadoop:a");
-    assertTrue(acls.size() == 2);
-    rights = acls.get(0).getAclList();
-    assertTrue(rights.size() == 3);
-    assertTrue(rights.contains(CREATE));
-    assertTrue(rights.contains(READ_ACL));
-    assertTrue(rights.contains(WRITE_ACL));
-    assertFalse(rights.contains(WRITE));
-    assertFalse(rights.contains(READ));
-    rights = acls.get(1).getAclList();
-    assertTrue(rights.contains(ALL));
-
-    acls = OzoneAcl.parseAcls("user:bilbo:cxy[ACCESS]," +
-        "group:hadoop:a[DEFAULT],world::r[DEFAULT]");
-    assertTrue(acls.size() == 3);
-    rights = acls.get(0).getAclList();
-    assertTrue(rights.size() == 3);
-    assertTrue(rights.contains(CREATE));
-    assertTrue(rights.contains(READ_ACL));
-    assertTrue(rights.contains(WRITE_ACL));
-    assertFalse(rights.contains(WRITE));
-    assertFalse(rights.contains(READ));
-    rights = acls.get(1).getAclList();
-    assertTrue(rights.contains(ALL));
-
-    assertTrue(acls.get(0).getName().equals("bilbo"));
-    assertTrue(acls.get(1).getName().equals("hadoop"));
-    assertTrue(acls.get(2).getName().equals("WORLD"));
-    assertTrue(acls.get(0).getAclScope().equals(OzoneAcl.AclScope.ACCESS));
-    assertTrue(acls.get(1).getAclScope().equals(OzoneAcl.AclScope.DEFAULT));
-    assertTrue(acls.get(2).getAclScope().equals(OzoneAcl.AclScope.DEFAULT));
-  }
-
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java
deleted file mode 100644
index 7a537c0..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.codec;
-
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.UUID;
-
-/**
- * This class tests OmMultipartKeyInfoCodec.
- */
-public class TestOmMultipartKeyInfoCodec {
-
-  @Test
-  public void testOmMultipartKeyInfoCodec() {
-    OmMultipartKeyInfoCodec codec = new OmMultipartKeyInfoCodec();
-    OmMultipartKeyInfo omMultipartKeyInfo = new OmMultipartKeyInfo(UUID
-        .randomUUID().toString(), new HashMap<>());
-    byte[] data = new byte[0];
-    try {
-      data = codec.toPersistedFormat(omMultipartKeyInfo);
-    } catch (java.io.IOException e) {
-      e.printStackTrace();
-    }
-    Assert.assertNotNull(data);
-
-    OmMultipartKeyInfo multipartKeyInfo = null;
-    try {
-      multipartKeyInfo = codec.fromPersistedFormat(data);
-    } catch (java.io.IOException e) {
-      e.printStackTrace();
-    }
-    Assert.assertEquals(omMultipartKeyInfo, multipartKeyInfo);
-
-    // When random byte data passed returns null.
-    try {
-      codec.fromPersistedFormat("random".getBytes());
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Can't encode the the raw " +
-          "data from the byte array", ex);
-    } catch (java.io.IOException e) {
-      e.printStackTrace();
-    }
-
-  }
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java
deleted file mode 100644
index f06bf38..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.codec;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.LinkedList;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class test OmPrefixInfoCodec.
- */
-public class TestOmPrefixInfoCodec {
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-
-  private OmPrefixInfoCodec codec;
-
-  @Before
-  public void setUp() {
-    codec = new OmPrefixInfoCodec();
-  }
-
-  @Test
-  public void testCodecWithIncorrectValues() throws Exception {
-    try {
-      codec.fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8));
-      fail("testCodecWithIncorrectValues failed");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Can't encode the the raw " +
-          "data from the byte array", ex);
-    }
-  }
-
-  @Test
-  public void testCodecWithNullDataFromTable() throws Exception {
-    thrown.expect(NullPointerException.class);
-    codec.fromPersistedFormat(null);
-  }
-
-
-  @Test
-  public void testCodecWithNullDataFromUser() throws Exception {
-    thrown.expect(NullPointerException.class);
-    codec.toPersistedFormat(null);
-  }
-
-  @Test
-  public void testToAndFromPersistedFormat() throws IOException {
-
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER,
-        "hive", ACLType.ALL, ACCESS);
-    acls.add(ozoneAcl);
-    OmPrefixInfo opiSave = OmPrefixInfo.newBuilder()
-        .setName("/user/hive/warehouse")
-        .setAcls(acls)
-        .addMetadata("id", "100")
-        .build();
-
-    OmPrefixInfo opiLoad = codec.fromPersistedFormat(
-        codec.toPersistedFormat(opiSave));
-
-    assertTrue("Load saved prefix info should match",
-        opiLoad.equals(opiSave));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java
deleted file mode 100644
index 549c374..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.codec;
-
-import java.nio.charset.StandardCharsets;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class test S3SecretValueCodec.
- */
-public class TestS3SecretValueCodec {
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private S3SecretValueCodec codec;
-
-  @Before
-  public void initialize() {
-    codec = new S3SecretValueCodec();
-  }
-  @Test
-  public void testCodecWithCorrectData() throws Exception {
-
-    S3SecretValue s3SecretValue =
-        new S3SecretValue(UUID.randomUUID().toString(),
-            UUID.randomUUID().toString());
-
-    byte[] data = codec.toPersistedFormat(s3SecretValue);
-    Assert.assertNotNull(data);
-
-    S3SecretValue docdedS3Secret = codec.fromPersistedFormat(data);
-
-    Assert.assertEquals(s3SecretValue, docdedS3Secret);
-
-  }
-
-  @Test
-  public void testCodecWithIncorrectValues() throws Exception {
-    try {
-      codec.fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8));
-      fail("testCodecWithIncorrectValues failed");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Can't encode the the raw " +
-          "data from the byte array", ex);
-    }
-  }
-
-  @Test
-  public void testCodecWithNullDataFromTable() throws Exception {
-    thrown.expect(NullPointerException.class);
-    codec.fromPersistedFormat(null);
-  }
-
-
-  @Test
-  public void testCodecWithNullDataFromUser() throws Exception {
-    thrown.expect(NullPointerException.class);
-    codec.toPersistedFormat(null);
-  }
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java
deleted file mode 100644
index 8b5690a..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * Utility classes to encode/decode DTO objects to/from byte array.
- */
-
-/**
- * Unit tests for codec's in OM.
- */
-package org.apache.hadoop.ozone.om.codec;
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java
deleted file mode 100644
index 24b5307..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.exceptions;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test code mappping.
- */
-public class TestResultCodes {
-
-  @Test
-  public void codeMapping() {
-    Assert.assertEquals(ResultCodes.values().length, Status.values().length);
-    for (int i = 0; i < ResultCodes.values().length; i++) {
-      ResultCodes codeValue = ResultCodes.values()[i];
-      Status protoBufValue = Status.values()[i];
-      Assert.assertTrue(String
-          .format("Protobuf/Enum constant name mismatch %s %s", codeValue,
-              protoBufValue), sameName(codeValue.name(), protoBufValue.name()));
-      ResultCodes converted = ResultCodes.values()[protoBufValue.ordinal()];
-      Assert.assertEquals(codeValue, converted);
-
-    }
-  }
-
-  private boolean sameName(String codeValue, String protoBufValue) {
-    return codeValue.equals(protoBufValue);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java
deleted file mode 100644
index a3bc8ad..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test BucketInfo.
- */
-public class TestOmBucketInfo {
-
-  @Test
-  public void protobufConversion() {
-    OmBucketInfo bucket = OmBucketInfo.newBuilder()
-        .setBucketName("bucket")
-        .setVolumeName("vol1")
-        .setCreationTime(1L)
-        .setIsVersionEnabled(false)
-        .setStorageType(StorageType.ARCHIVE)
-        .build();
-
-    OmBucketInfo afterSerialization =
-        OmBucketInfo.getFromProtobuf(bucket.getProtobuf());
-
-    Assert.assertEquals(bucket, afterSerialization);
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
deleted file mode 100644
index a1fa324..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test OmKeyInfo.
- */
-public class TestOmKeyInfo {
-
-  @Test
-  public void protobufConversion() {
-    OmKeyInfo key = new Builder()
-        .setKeyName("key1")
-        .setBucketName("bucket")
-        .setVolumeName("vol1")
-        .setCreationTime(123L)
-        .setModificationTime(123L)
-        .setDataSize(123L)
-        .setReplicationFactor(ReplicationFactor.THREE)
-        .setReplicationType(ReplicationType.RATIS)
-        .addMetadata("key1", "value1")
-        .addMetadata("key2", "value2")
-        .build();
-
-    OmKeyInfo keyAfterSerialization =
-        OmKeyInfo.getFromProtobuf(key.getProtobuf());
-
-    Assert.assertEquals(key, keyAfterSerialization);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java
deleted file mode 100644
index f321da2..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test utilities inside OmMutipartUpload.
- */
-public class TestOmMultipartUpload {
-
-  @Test
-  public void from() {
-    String key1 =
-        OmMultipartUpload.getDbKey("vol1", "bucket1", "dir1/key1", "uploadId");
-    OmMultipartUpload info = OmMultipartUpload.from(key1);
-
-    Assert.assertEquals("vol1", info.getVolumeName());
-    Assert.assertEquals("bucket1", info.getBucketName());
-    Assert.assertEquals("dir1/key1", info.getKeyName());
-    Assert.assertEquals("uploadId", info.getUploadId());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
deleted file mode 100644
index b1a4e45..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for OzoneAcls utility class.
- */
-public class TestOzoneAclUtil {
-
-  private static final List<OzoneAcl> DEFAULT_ACLS =
-      getDefaultAcls(new OzoneConfiguration());
-
-  private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1",
-      ACLType.READ_ACL, ACCESS);
-
-  private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2",
-      ACLType.WRITE, ACCESS);
-
-  private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1",
-      ACLType.ALL, ACCESS);
-
-  @Test
-  public void testAddAcl() throws IOException {
-    List<OzoneAcl> currentAcls = getDefaultAcls(new OzoneConfiguration());
-    assertTrue(currentAcls.size() > 0);
-
-    // Add new permission to existing acl entry.
-    OzoneAcl oldAcl = currentAcls.get(0);
-    OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
-        ACLType.READ_ACL, ACCESS);
-
-    addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size());
-    // Add same permission again and verify result
-    addAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size());
-
-    // Add a new user acl entry.
-    addAndVerifyAcl(currentAcls, USER1, true, DEFAULT_ACLS.size() + 1);
-    // Add same acl entry again and verify result
-    addAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size() + 1);
-
-    // Add a new group acl entry.
-    addAndVerifyAcl(currentAcls, GROUP1, true, DEFAULT_ACLS.size() + 2);
-    // Add same acl entry again and verify result
-    addAndVerifyAcl(currentAcls, GROUP1, false, DEFAULT_ACLS.size() + 2);
-  }
-
-  @Test
-  public void testRemoveAcl() {
-    List<OzoneAcl> currentAcls = null;
-
-    // add/remove to/from null OzoneAcls
-    removeAndVerifyAcl(currentAcls, USER1, false, 0);
-    addAndVerifyAcl(currentAcls, USER1, false, 0);
-    removeAndVerifyAcl(currentAcls, USER1, false, 0);
-
-    currentAcls = getDefaultAcls(new OzoneConfiguration());
-    assertTrue(currentAcls.size() > 0);
-
-    // Add new permission to existing acl entru.
-    OzoneAcl oldAcl = currentAcls.get(0);
-    OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
-        ACLType.READ_ACL, ACCESS);
-
-    // Remove non existing acl entry
-    removeAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size());
-
-    // Remove non existing acl permission
-    removeAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size());
-
-    // Add new permission to existing acl entry.
-    addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size());
-
-    // Remove the new permission added.
-    removeAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size());
-
-    removeAndVerifyAcl(currentAcls, oldAcl, true, DEFAULT_ACLS.size() - 1);
-  }
-
-  private void addAndVerifyAcl(List<OzoneAcl> currentAcls, OzoneAcl addedAcl,
-      boolean expectedResult, int expectedSize) {
-    assertEquals(expectedResult, OzoneAclUtil.addAcl(currentAcls, addedAcl));
-    if (currentAcls != null) {
-      boolean verified = verifyAclAdded(currentAcls, addedAcl);
-      assertTrue("addedAcl: " + addedAcl + " should exist in the" +
-          " current acls: " + currentAcls, verified);
-      assertEquals(expectedSize, currentAcls.size());
-    }
-  }
-
-  private void removeAndVerifyAcl(List<OzoneAcl> currentAcls,
-      OzoneAcl removedAcl, boolean expectedResult, int expectedSize) {
-    assertEquals(expectedResult, OzoneAclUtil.removeAcl(currentAcls,
-        removedAcl));
-    if (currentAcls != null) {
-      boolean verified = verifyAclRemoved(currentAcls, removedAcl);
-      assertTrue("removedAcl: " + removedAcl + " should not exist in the" +
-          " current acls: " + currentAcls, verified);
-      assertEquals(expectedSize, currentAcls.size());
-    }
-  }
-
-  private boolean verifyAclRemoved(List<OzoneAcl> acls, OzoneAcl removedAcl) {
-    for (OzoneAcl acl : acls) {
-      if (acl.getName().equals(removedAcl.getName()) &&
-          acl.getType().equals(removedAcl.getType()) &&
-          acl.getAclScope().equals(removedAcl.getAclScope())) {
-        BitSet temp = (BitSet) acl.getAclBitSet().clone();
-        temp.and(removedAcl.getAclBitSet());
-        return !temp.equals(removedAcl.getAclBitSet());
-      }
-    }
-    return true;
-  }
-
-  private boolean verifyAclAdded(List<OzoneAcl> acls, OzoneAcl newAcl) {
-    for (OzoneAcl acl : acls) {
-      if (acl.getName().equals(newAcl.getName()) &&
-          acl.getType().equals(newAcl.getType()) &&
-          acl.getAclScope().equals(newAcl.getAclScope())) {
-        BitSet temp = (BitSet) acl.getAclBitSet().clone();
-        temp.and(newAcl.getAclBitSet());
-        return temp.equals(newAcl.getAclBitSet());
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Helper function to get default acl list for current user.
-   *
-   * @return list of ozoneAcls.
-   * @throws IOException
-   * */
-  private static List<OzoneAcl> getDefaultAcls(OzoneConfiguration conf) {
-    List<OzoneAcl> ozoneAcls = new ArrayList<>();
-    //User ACL
-    UserGroupInformation ugi;
-    try {
-      ugi = UserGroupInformation.getCurrentUser();
-    } catch (IOException ioe) {
-      ugi = UserGroupInformation.createRemoteUser("user0");
-    }
-
-    OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class);
-    IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights();
-    IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights();
-
-    OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER,
-        ugi.getUserName(), userRights, ACCESS));
-    //Group ACLs of the User
-    List<String> userGroups = Arrays.asList(ugi.getGroupNames());
-    userGroups.stream().forEach((group) -> OzoneAclUtil.addAcl(ozoneAcls,
-        new OzoneAcl(GROUP, group, groupRights, ACCESS)));
-    return ozoneAcls;
-  }
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java
deleted file mode 100644
index e62423a..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.helpers;
-/**
- * Unit tests of helpers.
- */
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
deleted file mode 100644
index 8438cbf..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.lock;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.Stack;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import static org.junit.Assert.fail;
-
-/**
- * Class tests OzoneManagerLock.
- */
-public class TestOzoneManagerLock {
-  @Test
-  public void acquireResourceLock() {
-    String[] resourceName;
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      resourceName = generateResourceName(resource);
-      testResourceLock(resourceName, resource);
-    }
-  }
-
-  private void testResourceLock(String[] resourceName,
-      OzoneManagerLock.Resource resource) {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireLock(resource, resourceName);
-    lock.releaseLock(resource, resourceName);
-    Assert.assertTrue(true);
-  }
-
-  @Test
-  public void reacquireResourceLock() {
-    String[] resourceName;
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      resourceName = generateResourceName(resource);
-      testResourceReacquireLock(resourceName, resource);
-    }
-  }
-
-  private void testResourceReacquireLock(String[] resourceName,
-      OzoneManagerLock.Resource resource) {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-
-    // Lock re-acquire not allowed by same thread.
-    if (resource == OzoneManagerLock.Resource.USER_LOCK ||
-        resource == OzoneManagerLock.Resource.S3_SECRET_LOCK ||
-        resource == OzoneManagerLock.Resource.PREFIX_LOCK){
-      lock.acquireLock(resource, resourceName);
-      try {
-        lock.acquireLock(resource, resourceName);
-        fail("reacquireResourceLock failed");
-      } catch (RuntimeException ex) {
-        String message = "cannot acquire " + resource.getName() + " lock " +
-            "while holding [" + resource.getName() + "] lock(s).";
-        Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message));
-      }
-      lock.releaseLock(resource, resourceName);
-      Assert.assertTrue(true);
-    } else {
-      lock.acquireLock(resource, resourceName);
-      lock.acquireLock(resource, resourceName);
-      lock.releaseLock(resource, resourceName);
-      lock.releaseLock(resource, resourceName);
-      Assert.assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testLockingOrder() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    String[] resourceName;
-
-    // What this test does is iterate all resources. For each resource
-    // acquire lock, and then in inner loop acquire all locks with higher
-    // lock level, finally release the locks.
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      Stack<ResourceInfo> stack = new Stack<>();
-      resourceName = generateResourceName(resource);
-      lock.acquireLock(resource, resourceName);
-      stack.push(new ResourceInfo(resourceName, resource));
-      for (OzoneManagerLock.Resource higherResource :
-          OzoneManagerLock.Resource.values()) {
-        if (higherResource.getMask() > resource.getMask()) {
-          resourceName = generateResourceName(higherResource);
-          lock.acquireLock(higherResource, resourceName);
-          stack.push(new ResourceInfo(resourceName, higherResource));
-        }
-      }
-      // Now release locks
-      while (!stack.empty()) {
-        ResourceInfo resourceInfo = stack.pop();
-        lock.releaseLock(resourceInfo.getResource(),
-            resourceInfo.getLockName());
-      }
-    }
-    Assert.assertTrue(true);
-  }
-
-  @Test
-  public void testLockViolationsWithOneHigherLevelLock() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      for (OzoneManagerLock.Resource higherResource :
-          OzoneManagerLock.Resource.values()) {
-        if (higherResource.getMask() > resource.getMask()) {
-          String[] resourceName = generateResourceName(higherResource);
-          lock.acquireLock(higherResource, resourceName);
-          try {
-            lock.acquireLock(resource, generateResourceName(resource));
-            fail("testLockViolationsWithOneHigherLevelLock failed");
-          } catch (RuntimeException ex) {
-            String message = "cannot acquire " + resource.getName() + " lock " +
-                "while holding [" + higherResource.getName() + "] lock(s).";
-            Assert.assertTrue(ex.getMessage(),
-                ex.getMessage().contains(message));
-          }
-          lock.releaseLock(higherResource, resourceName);
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testLockViolations() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    String[] resourceName;
-
-    // What this test does is iterate all resources. For each resource
-    // acquire an higher level lock above the resource, and then take the the
-    // lock. This should fail. Like that it tries all error combinations.
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      Stack<ResourceInfo> stack = new Stack<>();
-      List<String> currentLocks = new ArrayList<>();
-      Queue<ResourceInfo> queue = new LinkedList<>();
-      for (OzoneManagerLock.Resource higherResource :
-          OzoneManagerLock.Resource.values()) {
-        if (higherResource.getMask() > resource.getMask()) {
-          resourceName = generateResourceName(higherResource);
-          lock.acquireLock(higherResource, resourceName);
-          stack.push(new ResourceInfo(resourceName, higherResource));
-          currentLocks.add(higherResource.getName());
-          queue.add(new ResourceInfo(resourceName, higherResource));
-          // try to acquire lower level lock
-          try {
-            resourceName = generateResourceName(resource);
-            lock.acquireLock(resource, resourceName);
-          } catch (RuntimeException ex) {
-            String message = "cannot acquire " + resource.getName() + " lock " +
-                "while holding " + currentLocks.toString() + " lock(s).";
-            Assert.assertTrue(ex.getMessage(),
-                ex.getMessage().contains(message));
-          }
-        }
-      }
-
-      // Now release locks
-      while (!stack.empty()) {
-        ResourceInfo resourceInfo = stack.pop();
-        lock.releaseLock(resourceInfo.getResource(),
-            resourceInfo.getLockName());
-      }
-    }
-  }
-
-  @Test
-  public void releaseLockWithOutAcquiringLock() {
-    OzoneManagerLock lock =
-        new OzoneManagerLock(new OzoneConfiguration());
-    try {
-      lock.releaseLock(OzoneManagerLock.Resource.USER_LOCK, "user3");
-      fail("releaseLockWithOutAcquiringLock failed");
-    } catch (IllegalMonitorStateException ex) {
-      String message = "Releasing lock on resource $user3 without acquiring " +
-          "lock";
-      Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message));
-    }
-  }
-
-
-  private String[] generateResourceName(OzoneManagerLock.Resource resource) {
-    if (resource == OzoneManagerLock.Resource.BUCKET_LOCK) {
-      return new String[]{UUID.randomUUID().toString(),
-          UUID.randomUUID().toString()};
-    } else {
-      return new String[]{UUID.randomUUID().toString()};
-    }
-  }
-
-
-  /**
-   * Class used to store locked resource info.
-   */
-  public class ResourceInfo {
-    private String[] lockName;
-    private OzoneManagerLock.Resource resource;
-
-    ResourceInfo(String[] resourceName, OzoneManagerLock.Resource resource) {
-      this.lockName = resourceName;
-      this.resource = resource;
-    }
-
-    public String[] getLockName() {
-      return lockName;
-    }
-
-    public OzoneManagerLock.Resource getResource() {
-      return resource;
-    }
-  }
-
-  @Test
-  public void acquireMultiUserLock() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireMultiUserLock("user1", "user2");
-    lock.releaseMultiUserLock("user1", "user2");
-    Assert.assertTrue(true);
-  }
-
-  @Test
-  public void reAcquireMultiUserLock() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireMultiUserLock("user1", "user2");
-    try {
-      lock.acquireMultiUserLock("user1", "user2");
-      fail("reAcquireMultiUserLock failed");
-    } catch (RuntimeException ex) {
-      String message = "cannot acquire USER_LOCK lock while holding " +
-          "[USER_LOCK] lock(s).";
-      Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message));
-    }
-    lock.releaseMultiUserLock("user1", "user2");
-  }
-
-  @Test
-  public void acquireMultiUserLockAfterUserLock() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireLock(OzoneManagerLock.Resource.USER_LOCK, "user3");
-    try {
-      lock.acquireMultiUserLock("user1", "user2");
-      fail("acquireMultiUserLockAfterUserLock failed");
-    } catch (RuntimeException ex) {
-      String message = "cannot acquire USER_LOCK lock while holding " +
-          "[USER_LOCK] lock(s).";
-      Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message));
-    }
-    lock.releaseLock(OzoneManagerLock.Resource.USER_LOCK, "user3");
-  }
-
-  @Test
-  public void acquireUserLockAfterMultiUserLock() {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireMultiUserLock("user1", "user2");
-    try {
-      lock.acquireLock(OzoneManagerLock.Resource.USER_LOCK, "user3");
-      fail("acquireUserLockAfterMultiUserLock failed");
-    } catch (RuntimeException ex) {
-      String message = "cannot acquire USER_LOCK lock while holding " +
-          "[USER_LOCK] lock(s).";
-      Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message));
-    }
-    lock.releaseMultiUserLock("user1", "user2");
-  }
-
-  @Test
-  public void testLockResourceParallel() throws Exception {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-
-    for (OzoneManagerLock.Resource resource :
-        OzoneManagerLock.Resource.values()) {
-      final String[] resourceName = generateResourceName(resource);
-      lock.acquireLock(resource, resourceName);
-
-      AtomicBoolean gotLock = new AtomicBoolean(false);
-      new Thread(() -> {
-        lock.acquireLock(resource, resourceName);
-        gotLock.set(true);
-        lock.releaseLock(resource, resourceName);
-      }).start();
-      // Let's give some time for the new thread to run
-      Thread.sleep(100);
-      // Since the new thread is trying to get lock on same resource,
-      // it will wait.
-      Assert.assertFalse(gotLock.get());
-      lock.releaseLock(resource, resourceName);
-      // Since we have released the lock, the new thread should have the lock
-      // now.
-      // Let's give some time for the new thread to run
-      Thread.sleep(100);
-      Assert.assertTrue(gotLock.get());
-    }
-
-  }
-
-  @Test
-  public void testMultiLockResourceParallel() throws Exception {
-    OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration());
-    lock.acquireMultiUserLock("user2", "user1");
-
-    AtomicBoolean gotLock = new AtomicBoolean(false);
-    new Thread(() -> {
-      lock.acquireMultiUserLock("user1", "user2");
-      gotLock.set(true);
-      lock.releaseMultiUserLock("user1", "user2");
-    }).start();
-    // Let's give some time for the new thread to run
-    Thread.sleep(100);
-    // Since the new thread is trying to get lock on same resource, it will
-    // wait.
-    Assert.assertFalse(gotLock.get());
-    lock.releaseMultiUserLock("user2", "user1");
-    // Since we have released the lock, the new thread should have the lock
-    // now.
-    // Let's give some time for the new thread to run
-    Thread.sleep(100);
-    Assert.assertTrue(gotLock.get());
-  }
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java
deleted file mode 100644
index 149794a..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.lock;
-/**
- * Unit tests of OzoneManager lock.
- */
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
deleted file mode 100644
index 2784b6c..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.junit.Assert.*;
-
-/**
- * Test for {@link AWSV4AuthValidator}.
- * */
-@RunWith(Parameterized.class)
-public class TestAWSV4AuthValidator {
-
-  private String strToSign;
-  private String signature;
-  private String awsAccessKey;
-
-  public TestAWSV4AuthValidator(String strToSign, String signature,
-      String awsAccessKey) {
-    this.strToSign = strToSign;
-    this.signature = signature;
-    this.awsAccessKey = awsAccessKey;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {
-            "AWS4-HMAC-SHA256\n" +
-                "20190221T002037Z\n" +
-                "20190221/us-west-1/s3/aws4_request\n" +
-                "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d" +
-                "91851294efc47d",
-            "56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf" +
-                "4765f46a14cd745ad",
-            "dbaksbzljandlkandlsd"
-        },
-        {
-            "AWS4-HMAC-SHA256\n" +
-                "20150830T123600Z\n" +
-                "20150830/us-east-1/iam/aws4_request\n" +
-                "f536975d06c0309214f805bb90ccff089219ecd68b2" +
-                "577efef23edd43b7e1a59",
-            "5d672d79c15b13162d9279b0855cfba" +
-                "6789a8edb4c82c400e06b5924a6f2b5d7",
-            "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
-        }
-
-    });
-  }
-
-  @Test
-  public void testValidateRequest() {
-    assertTrue(AWSV4AuthValidator.validateRequest(strToSign, signature,
-        awsAccessKey));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
deleted file mode 100644
index 39c6220..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.security.SecureRandom;
-
-/**
- * Tests GDPRSymmetricKey structure.
- */
-public class TestGDPRSymmetricKey {
-
-  @Test
-  public void testKeyGenerationWithDefaults() throws Exception {
-    GDPRSymmetricKey gkey = new GDPRSymmetricKey(new SecureRandom());
-
-    Assert.assertTrue(gkey.getCipher().getAlgorithm()
-        .equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME));
-
-    gkey.getKeyDetails().forEach(
-        (k, v) -> Assert.assertTrue(v.length() > 0));
-  }
-
-  @Test
-  public void testKeyGenerationWithValidInput() throws Exception {
-    GDPRSymmetricKey gkey = new GDPRSymmetricKey(
-        RandomStringUtils.randomAlphabetic(16),
-        OzoneConsts.GDPR_ALGORITHM_NAME);
-
-    Assert.assertTrue(gkey.getCipher().getAlgorithm()
-        .equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME));
-
-    gkey.getKeyDetails().forEach(
-        (k, v) -> Assert.assertTrue(v.length() > 0));
-  }
-
-  @Test
-  public void testKeyGenerationWithInvalidInput() throws Exception {
-    GDPRSymmetricKey gkey = null;
-    try{
-      gkey = new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5),
-          OzoneConsts.GDPR_ALGORITHM_NAME);
-    } catch (IllegalArgumentException ex) {
-      Assert.assertTrue(ex.getMessage()
-          .equalsIgnoreCase("Secret must be exactly 16 characters"));
-      Assert.assertTrue(gkey == null);
-    }
-  }
-
-
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java
deleted file mode 100644
index 85ea03e..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-
-import static org.apache.hadoop.ozone.security.OzoneTokenIdentifier.KIND_NAME;
-
-/**
- * Class to test OzoneDelegationTokenSelector.
- */
-public class TestOzoneDelegationTokenSelector {
-
-
-  @Test
-  public void testTokenSelector() {
-
-    // set dummy details for identifier and password in token.
-    byte[] identifier =
-        RandomStringUtils.randomAlphabetic(10)
-            .getBytes(StandardCharsets.UTF_8);
-    byte[] password =
-        RandomStringUtils.randomAlphabetic(10)
-            .getBytes(StandardCharsets.UTF_8);
-
-    Token<OzoneTokenIdentifier> tokenIdentifierToken =
-        new Token<>(identifier, password, KIND_NAME, getService());
-
-    OzoneDelegationTokenSelector ozoneDelegationTokenSelector =
-        new OzoneDelegationTokenSelector();
-
-    Text service = new Text("om1:9862");
-
-    Token<OzoneTokenIdentifier> selectedToken =
-        ozoneDelegationTokenSelector.selectToken(service,
-            Collections.singletonList(tokenIdentifierToken));
-
-
-    Assert.assertNotNull(selectedToken);
-
-
-    tokenIdentifierToken.setService(new Text("om1:9863"));
-    selectedToken =
-        ozoneDelegationTokenSelector.selectToken(service,
-            Collections.singletonList(tokenIdentifierToken));
-
-    Assert.assertNull(selectedToken);
-
-    service = new Text("om1:9863");
-    selectedToken =
-        ozoneDelegationTokenSelector.selectToken(service,
-            Collections.singletonList(tokenIdentifierToken));
-
-    Assert.assertNotNull(selectedToken);
-
-  }
-
-
-  private Text getService() {
-    return new Text("om1:9862,om2:9862,om3:9862");
-  }
-
-
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
deleted file mode 100644
index ab24b1b..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.junit.Test;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType.*;
-import static org.junit.Assert.*;
-import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
-
-/**
- * Test class for {@link OzoneObjInfo}.
- * */
-public class TestOzoneObjInfo {
-
-  private OzoneObjInfo objInfo;
-  private OzoneObjInfo.Builder builder;
-  private String volume = "vol1";
-  private String bucket = "bucket1";
-  private String key = "key1";
-  private static final OzoneObj.StoreType STORE = OzoneObj.StoreType.OZONE;
-
-
-  @Test
-  public void testGetVolumeName() {
-
-    builder = getBuilder(volume, bucket, key);
-    objInfo = builder.build();
-    assertEquals(objInfo.getVolumeName(), volume);
-
-    objInfo = getBuilder(null, null, null).build();
-    assertEquals(objInfo.getVolumeName(), null);
-
-    objInfo = getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getVolumeName(), volume);
-  }
-
-  private OzoneObjInfo.Builder getBuilder(String withVolume,
-      String withBucket,
-      String withKey) {
-    return OzoneObjInfo.Builder.newBuilder()
-        .setResType(ResourceType.VOLUME)
-        .setStoreType(STORE)
-        .setVolumeName(withVolume)
-        .setBucketName(withBucket)
-        .setKeyName(withKey);
-  }
-
-  @Test
-  public void testGetBucketName() {
-    objInfo = getBuilder(volume, bucket, key).build();
-    assertEquals(objInfo.getBucketName(), bucket);
-
-    objInfo =getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getBucketName(), null);
-
-    objInfo =getBuilder(null, bucket, null).build();
-    assertEquals(objInfo.getBucketName(), bucket);
-  }
-
-  @Test
-  public void testGetKeyName() {
-    objInfo = getBuilder(volume, bucket, key).build();
-    assertEquals(objInfo.getKeyName(), key);
-
-    objInfo = getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-
-    objInfo = getBuilder(null, bucket, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-
-    objInfo = getBuilder(null, null, key).build();
-    assertEquals(objInfo.getKeyName(), key);
-  }
-
-  @Test
-  public void testFromProtobufOp() {
-    // Key with long path.
-    key = "dir1/dir2/dir3/dir4/dir5/abc.txt";
-    OzoneManagerProtocolProtos.OzoneObj protoObj = OzoneManagerProtocolProtos.
-        OzoneObj.newBuilder()
-        .setResType(KEY)
-        .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE)
-        .setPath(volume + OZONE_URI_DELIMITER +
-            bucket + OZONE_URI_DELIMITER + key)
-        .build();
-
-    objInfo = OzoneObjInfo.fromProtobuf(protoObj);
-    assertEquals(objInfo.getKeyName(), key);
-    objInfo = getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, bucket, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, null, key).build();
-    assertEquals(objInfo.getKeyName(), key);
-
-    // Key with long path.
-    key = "dir1/dir2/dir3/dir4/dir5/abc.txt";
-    protoObj = OzoneManagerProtocolProtos.
-        OzoneObj.newBuilder()
-        .setResType(KEY)
-        .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE)
-        .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER +
-            bucket + OZONE_URI_DELIMITER + key)
-        .build();
-
-    objInfo = OzoneObjInfo.fromProtobuf(protoObj);
-    assertEquals(objInfo.getKeyName(), key);
-    objInfo = getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, bucket, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, null, key).build();
-    assertEquals(objInfo.getKeyName(), key);
-
-    // Key with long path.
-    key = "dir1/dir2/dir3/dir4/dir5/";
-    protoObj = OzoneManagerProtocolProtos.
-        OzoneObj.newBuilder()
-        .setResType(KEY)
-        .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE)
-        .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER +
-            bucket + OZONE_URI_DELIMITER + key)
-        .build();
-
-    objInfo = OzoneObjInfo.fromProtobuf(protoObj);
-    assertEquals(objInfo.getKeyName(), key);
-    objInfo = getBuilder(volume, null, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, bucket, null).build();
-    assertEquals(objInfo.getKeyName(), null);
-    objInfo = getBuilder(null, null, key).build();
-    assertEquals(objInfo.getKeyName(), key);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java
deleted file mode 100644
index 57b0268..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.util;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test Ozone Radix tree operations.
- */
-public class TestRadixTree {
-
-  final static RadixTree<Integer> ROOT = new RadixTree<>();
-
-  @BeforeClass
-  public static void setupRadixTree() {
-    // Test prefix paths with an empty tree
-    assertEquals(true, ROOT.isEmpty());
-    assertEquals("/", ROOT.getLongestPrefix("/a/b/c"));
-    assertEquals("/", RadixTree.radixPathToString(
-        ROOT.getLongestPrefixPath("/a/g")));
-    // Build Radix tree below for testing.
-    //                a
-    //                |
-    //                b
-    //             /    \
-    //            c        e
-    //           / \    /   \   \
-    //          d   f  g   dir1  dir2(1000)
-    //          |
-    //          g
-    //          |
-    //          h
-    ROOT.insert("/a/b/c/d");
-    ROOT.insert("/a/b/c/d/g/h");
-    ROOT.insert("/a/b/c/f");
-    ROOT.insert("/a/b/e/g");
-    ROOT.insert("/a/b/e/dir1");
-    ROOT.insert("/a/b/e/dir2", 1000);
-  }
-
-  /**
-   * Tests if insert and build prefix tree is correct.
-   */
-  @Test
-  public  void testGetLongestPrefix() {
-    assertEquals("/a/b/c", ROOT.getLongestPrefix("/a/b/c"));
-    assertEquals("/a/b", ROOT.getLongestPrefix("/a/b"));
-    assertEquals("/a", ROOT.getLongestPrefix("/a"));
-    assertEquals("/a/b/e/g", ROOT.getLongestPrefix("/a/b/e/g/h"));
-
-    assertEquals("/", ROOT.getLongestPrefix("/d/b/c"));
-    assertEquals("/a/b/e", ROOT.getLongestPrefix("/a/b/e/dir3"));
-    assertEquals("/a/b/c/d", ROOT.getLongestPrefix("/a/b/c/d/p"));
-
-    assertEquals("/a/b/c/f", ROOT.getLongestPrefix("/a/b/c/f/p"));
-  }
-
-  @Test
-  public void testGetLongestPrefixPath() {
-    List<RadixNode<Integer>> lpp =
-        ROOT.getLongestPrefixPath("/a/b/c/d/g/p");
-    RadixNode<Integer> lpn = lpp.get(lpp.size()-1);
-    assertEquals("g", lpn.getName());
-    lpn.setValue(100);
-
-    List<RadixNode<Integer>> lpq =
-        ROOT.getLongestPrefixPath("/a/b/c/d/g/q");
-    RadixNode<Integer> lqn = lpp.get(lpq.size()-1);
-    System.out.print(RadixTree.radixPathToString(lpq));
-    assertEquals(lpn, lqn);
-    assertEquals("g", lqn.getName());
-    assertEquals(100, (int)lqn.getValue());
-
-    assertEquals("/a/", RadixTree.radixPathToString(
-        ROOT.getLongestPrefixPath("/a/g")));
-
-  }
-
-  @Test
-  public void testGetLastNoeInPrefixPath() {
-    assertEquals(null, ROOT.getLastNodeInPrefixPath("/a/g"));
-    RadixNode<Integer> ln = ROOT.getLastNodeInPrefixPath("/a/b/e/dir1");
-    assertEquals("dir1", ln.getName());
-  }
-
-  @Test
-  public void testRemovePrefixPath() {
-
-    // Remove, test and restore
-    // Remove partially overlapped path
-    ROOT.removePrefixPath("/a/b/c/d/g/h");
-    assertEquals("/a/b/c", ROOT.getLongestPrefix("a/b/c/d"));
-    ROOT.insert("/a/b/c/d/g/h");
-
-    // Remove fully overlapped path
-    ROOT.removePrefixPath("/a/b/c/d");
-    assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d"));
-    ROOT.insert("/a/b/c/d");
-
-    // Remove non existing path
-    ROOT.removePrefixPath("/d/a");
-    assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d"));
-  }
-
-
-}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java
deleted file mode 100644
index a6acd30..0000000
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.util;
-/**
- * Unit tests of generic ozone utils.
- */
diff --git a/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 62d72d2..0000000
--- a/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="csi.v1"/>
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml
deleted file mode 100644
index 6e7b807..0000000
--- a/hadoop-ozone/csi/pom.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-csi</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone CSI service</description>
-  <name>Apache Hadoop Ozone CSI service</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <grpc.version>1.17.1</grpc.version>
-  </properties>
-  <dependencies>
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java-util</artifactId>
-      <version>3.5.1</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.google.protobuf</groupId>
-          <artifactId>protobuf-java</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-config</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-common</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <version>26.0-android</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-      <version>3.5.1</version>
-    </dependency>
-    <dependency>
-      <groupId>io.grpc</groupId>
-      <artifactId>grpc-netty</artifactId>
-      <version>${grpc.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-transport-native-epoll</artifactId>
-      <version>4.1.30.Final</version>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-transport-native-unix-common</artifactId>
-      <version>4.1.30.Final</version>
-    </dependency>
-    <dependency>
-      <groupId>io.grpc</groupId>
-      <artifactId>grpc-protobuf</artifactId>
-      <version>${grpc.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.google.protobuf</groupId>
-          <artifactId>protobuf-java</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>io.grpc</groupId>
-      <artifactId>grpc-stub</artifactId>
-      <version>${grpc.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>com.google.guava</groupId>
-          <artifactId>guava</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.protobuf</groupId>
-          <artifactId>protobuf-java</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty-all</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
-
-
-  <build>
-    <extensions>
-      <extension>
-        <groupId>kr.motd.maven</groupId>
-        <artifactId>os-maven-plugin</artifactId>
-        <version>${os-maven-plugin.version}</version>
-      </extension>
-    </extensions>
-    <plugins>
-      <plugin>
-        <groupId>org.xolstice.maven.plugins</groupId>
-        <artifactId>protobuf-maven-plugin</artifactId>
-        <version>${protobuf-maven-plugin.version}</version>
-        <extensions>true</extensions>
-        <configuration>
-          <protocArtifact>
-            com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
-          </protocArtifact>
-          <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
-          <includes>
-            <include>csi.proto</include>
-          </includes>
-          <outputDirectory>target/generated-sources/java</outputDirectory>
-          <clearOutputDirectory>false</clearOutputDirectory>
-        </configuration>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>compile</goal>
-              <goal>test-compile</goal>
-              <goal>compile-custom</goal>
-              <goal>test-compile-custom</goal>
-            </goals>
-            <configuration>
-              <pluginId>grpc-java</pluginId>
-              <pluginArtifact>
-                io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
-              </pluginArtifact>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-enforcer-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>depcheck</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
-          </excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java
deleted file mode 100644
index 65b7250..0000000
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.csi;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-
-import csi.v1.ControllerGrpc.ControllerImplBase;
-import csi.v1.Csi.CapacityRange;
-import csi.v1.Csi.ControllerGetCapabilitiesRequest;
-import csi.v1.Csi.ControllerGetCapabilitiesResponse;
-import csi.v1.Csi.ControllerServiceCapability;
-import csi.v1.Csi.ControllerServiceCapability.RPC;
-import csi.v1.Csi.ControllerServiceCapability.RPC.Type;
-import csi.v1.Csi.CreateVolumeRequest;
-import csi.v1.Csi.CreateVolumeResponse;
-import csi.v1.Csi.DeleteVolumeRequest;
-import csi.v1.Csi.DeleteVolumeResponse;
-import csi.v1.Csi.Volume;
-import io.grpc.stub.StreamObserver;
-
-/**
- * CSI controller service.
- * <p>
- * This service usually runs only once and responsible for the creation of
- * the volume.
- */
-public class ControllerService extends ControllerImplBase {
-
-  private final String volumeOwner;
-
-  private long defaultVolumeSize;
-
-  private OzoneClient ozoneClient;
-
-  public ControllerService(OzoneClient ozoneClient, long volumeSize,
-      String volumeOwner) {
-    this.volumeOwner = volumeOwner;
-    this.defaultVolumeSize = volumeSize;
-    this.ozoneClient = ozoneClient;
-  }
-
-  @Override
-  public void createVolume(CreateVolumeRequest request,
-      StreamObserver<CreateVolumeResponse> responseObserver) {
-    try {
-      ozoneClient.getObjectStore()
-          .createS3Bucket(volumeOwner, request.getName());
-
-      long size = findSize(request.getCapacityRange());
-
-      CreateVolumeResponse response = CreateVolumeResponse.newBuilder()
-          .setVolume(Volume.newBuilder()
-              .setVolumeId(request.getName())
-              .setCapacityBytes(size))
-          .build();
-
-      responseObserver.onNext(response);
-      responseObserver.onCompleted();
-    } catch (IOException e) {
-      responseObserver.onError(e);
-    }
-  }
-
-  private long findSize(CapacityRange capacityRange) {
-    if (capacityRange.getRequiredBytes() != 0) {
-      return capacityRange.getRequiredBytes();
-    } else {
-      if (capacityRange.getLimitBytes() != 0) {
-        return Math.min(defaultVolumeSize, capacityRange.getLimitBytes());
-      } else {
-        //~1 gig
-        return defaultVolumeSize;
-      }
-    }
-  }
-
-  @Override
-  public void deleteVolume(DeleteVolumeRequest request,
-      StreamObserver<DeleteVolumeResponse> responseObserver) {
-    try {
-      ozoneClient.getObjectStore().deleteS3Bucket(request.getVolumeId());
-
-      DeleteVolumeResponse response = DeleteVolumeResponse.newBuilder()
-          .build();
-
-      responseObserver.onNext(response);
-      responseObserver.onCompleted();
-    } catch (IOException e) {
-      responseObserver.onError(e);
-    }
-  }
-
-  @Override
-  public void controllerGetCapabilities(
-      ControllerGetCapabilitiesRequest request,
-      StreamObserver<ControllerGetCapabilitiesResponse> responseObserver) {
-    ControllerGetCapabilitiesResponse response =
-        ControllerGetCapabilitiesResponse.newBuilder()
-            .addCapabilities(
-                ControllerServiceCapability.newBuilder().setRpc(
-                    RPC.newBuilder().setType(Type.CREATE_DELETE_VOLUME)))
-            .build();
-    responseObserver.onNext(response);
-    responseObserver.onCompleted();
-  }
-}
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java
deleted file mode 100644
index df5127c..0000000
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.csi;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigTag;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.util.StringUtils;
-
-import io.grpc.Server;
-import io.grpc.netty.NettyServerBuilder;
-import io.netty.channel.epoll.EpollEventLoopGroup;
-import io.netty.channel.epoll.EpollServerDomainSocketChannel;
-import io.netty.channel.unix.DomainSocketAddress;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-
-/**
- * CLI entrypoint of the CSI service daemon.
- */
-@Command(name = "ozone csi",
-    hidden = true, description = "CSI service daemon.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class CsiServer extends GenericCli implements Callable<Void> {
-
-  private final static Logger LOG = LoggerFactory.getLogger(CsiServer.class);
-
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-    CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class);
-
-    OzoneClient rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
-
-    EpollEventLoopGroup group = new EpollEventLoopGroup();
-
-    if (csiConfig.getVolumeOwner().isEmpty()) {
-      throw new IllegalArgumentException(
-          "ozone.csi.owner is not set. You should set this configuration "
-              + "variable to define which user should own all the created "
-              + "buckets.");
-    }
-
-    Server server =
-        NettyServerBuilder
-            .forAddress(new DomainSocketAddress(csiConfig.getSocketPath()))
-            .channelType(EpollServerDomainSocketChannel.class)
-            .workerEventLoopGroup(group)
-            .bossEventLoopGroup(group)
-            .addService(new IdentitiyService())
-            .addService(new ControllerService(rpcClient,
-                csiConfig.getDefaultVolumeSize(), csiConfig.getVolumeOwner()))
-            .addService(new NodeService(csiConfig))
-            .build();
-
-    server.start();
-    server.awaitTermination();
-    rpcClient.close();
-    return null;
-  }
-
-  public static void main(String[] args) {
-
-    StringUtils.startupShutdownMessage(CsiServer.class, args, LOG);
-    new CsiServer().run(args);
-  }
-
-  /**
-   * Configuration settings specific to the CSI server.
-   */
-  @ConfigGroup(prefix = "ozone.csi")
-  public static class CsiConfig {
-    private String socketPath;
-    private long defaultVolumeSize;
-    private String s3gAddress;
-    private String volumeOwner;
-
-    public String getSocketPath() {
-      return socketPath;
-    }
-
-    public String getVolumeOwner() {
-      return volumeOwner;
-    }
-
-    @Config(key = "owner",
-        defaultValue = "",
-        description =
-            "This is the username which is used to create the requested "
-                + "storage. Used as a hadoop username and the generated ozone"
-                + " volume used to store all the buckets. WARNING: It can "
-                + "be a security hole to use CSI in a secure environments as "
-                + "ALL the users can request the mount of a specific bucket "
-                + "via the CSI interface.",
-        tags = ConfigTag.STORAGE)
-    public void setVolumeOwner(String volumeOwner) {
-      this.volumeOwner = volumeOwner;
-    }
-
-    @Config(key = "socket",
-        defaultValue = "/var/lib/csi.sock",
-        description =
-            "The socket where all the CSI services will listen (file name).",
-        tags = ConfigTag.STORAGE)
-    public void setSocketPath(String socketPath) {
-      this.socketPath = socketPath;
-    }
-
-    public long getDefaultVolumeSize() {
-      return defaultVolumeSize;
-    }
-
-    @Config(key = "default-volume-size",
-        defaultValue = "1000000000",
-        description =
-            "The default size of the create volumes (if not specified).",
-        tags = ConfigTag.STORAGE)
-    public void setDefaultVolumeSize(long defaultVolumeSize) {
-      this.defaultVolumeSize = defaultVolumeSize;
-    }
-
-    public String getS3gAddress() {
-      return s3gAddress;
-    }
-
-    @Config(key = "s3g.address",
-        defaultValue = "http://localhost:9878",
-        description =
-            "The default size of the created volumes (if not specified in the"
-                + " requests).",
-        tags = ConfigTag.STORAGE)
-    public void setS3gAddress(String s3gAddress) {
-      this.s3gAddress = s3gAddress;
-    }
-  }
-}
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java
deleted file mode 100644
index 5a0c4c8..0000000
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.csi;
-
-import org.apache.hadoop.ozone.util.OzoneVersionInfo;
-
-import com.google.protobuf.BoolValue;
-import csi.v1.Csi.GetPluginCapabilitiesResponse;
-import csi.v1.Csi.GetPluginInfoResponse;
-import csi.v1.Csi.PluginCapability;
-import csi.v1.Csi.PluginCapability.Service;
-import static csi.v1.Csi.PluginCapability.Service.Type.CONTROLLER_SERVICE;
-import csi.v1.Csi.ProbeResponse;
-import csi.v1.IdentityGrpc.IdentityImplBase;
-import io.grpc.stub.StreamObserver;
-
-/**
- * Implementation of the CSI identity service.
- */
-public class IdentitiyService extends IdentityImplBase {
-
-  @Override
-  public void getPluginInfo(csi.v1.Csi.GetPluginInfoRequest request,
-      StreamObserver<csi.v1.Csi.GetPluginInfoResponse> responseObserver) {
-    GetPluginInfoResponse response = GetPluginInfoResponse.newBuilder()
-        .setName("org.apache.hadoop.ozone")
-        .setVendorVersion(OzoneVersionInfo.OZONE_VERSION_INFO.getVersion())
-        .build();
-    responseObserver.onNext(response);
-    responseObserver.onCompleted();
-  }
-
-  @Override
-  public void getPluginCapabilities(
-      csi.v1.Csi.GetPluginCapabilitiesRequest request,
-      StreamObserver<GetPluginCapabilitiesResponse> responseObserver) {
-    GetPluginCapabilitiesResponse response =
-        GetPluginCapabilitiesResponse.newBuilder()
-            .addCapabilities(PluginCapability.newBuilder().setService(
-                Service.newBuilder().setType(CONTROLLER_SERVICE)))
-            .build();
-    responseObserver.onNext(response);
-    responseObserver.onCompleted();
-
-  }
-
-  @Override
-  public void probe(csi.v1.Csi.ProbeRequest request,
-      StreamObserver<csi.v1.Csi.ProbeResponse> responseObserver) {
-    ProbeResponse response = ProbeResponse.newBuilder()
-        .setReady(BoolValue.of(true))
-        .build();
-    responseObserver.onNext(response);
-    responseObserver.onCompleted();
-
-  }
-}
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java
deleted file mode 100644
index 8edda59..0000000
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.csi;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.ozone.csi.CsiServer.CsiConfig;
-
-import csi.v1.Csi.NodeGetCapabilitiesRequest;
-import csi.v1.Csi.NodeGetCapabilitiesResponse;
-import csi.v1.Csi.NodeGetInfoRequest;
-import csi.v1.Csi.NodeGetInfoResponse;
-import csi.v1.Csi.NodePublishVolumeRequest;
-import csi.v1.Csi.NodePublishVolumeResponse;
-import csi.v1.Csi.NodeUnpublishVolumeRequest;
-import csi.v1.Csi.NodeUnpublishVolumeResponse;
-import csi.v1.NodeGrpc.NodeImplBase;
-import io.grpc.stub.StreamObserver;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of the CSI node service.
- */
-public class NodeService extends NodeImplBase {
-
-  private static final Logger LOG = LoggerFactory.getLogger(NodeService.class);
-
-  private String s3Endpoint;
-
-  public NodeService(CsiConfig configuration) {
-    this.s3Endpoint = configuration.getS3gAddress();
-
-  }
-
-  @Override
-  public void nodePublishVolume(NodePublishVolumeRequest request,
-      StreamObserver<NodePublishVolumeResponse> responseObserver) {
-
-    try {
-      Files.createDirectories(Paths.get(request.getTargetPath()));
-      String mountCommand =
-          String.format("goofys --endpoint %s %s %s",
-              s3Endpoint,
-              request.getVolumeId(),
-              request.getTargetPath());
-      LOG.info("Executing {}", mountCommand);
-
-      executeCommand(mountCommand);
-
-      responseObserver.onNext(NodePublishVolumeResponse.newBuilder()
-          .build());
-      responseObserver.onCompleted();
-
-    } catch (Exception e) {
-      responseObserver.onError(e);
-    }
-
-  }
-
-  private void executeCommand(String mountCommand)
-      throws IOException, InterruptedException {
-    Process exec = Runtime.getRuntime().exec(mountCommand);
-    exec.waitFor(10, TimeUnit.SECONDS);
-
-    LOG.info("Command is executed with  stdout: {}, stderr: {}",
-        IOUtils.toString(exec.getInputStream(), "UTF-8"),
-        IOUtils.toString(exec.getErrorStream(), "UTF-8"));
-    if (exec.exitValue() != 0) {
-      throw new RuntimeException(String
-          .format("Return code of the command %s was %d", mountCommand,
-              exec.exitValue()));
-    }
-  }
-
-  @Override
-  public void nodeUnpublishVolume(NodeUnpublishVolumeRequest request,
-      StreamObserver<NodeUnpublishVolumeResponse> responseObserver) {
-    String umountCommand =
-        String.format("fusermount -u %s", request.getTargetPath());
-    LOG.info("Executing {}", umountCommand);
-
-    try {
-      executeCommand(umountCommand);
-
-      responseObserver.onNext(NodeUnpublishVolumeResponse.newBuilder()
-          .build());
-      responseObserver.onCompleted();
-
-    } catch (Exception e) {
-      responseObserver.onError(e);
-    }
-
-  }
-
-  @Override
-  public void nodeGetCapabilities(NodeGetCapabilitiesRequest request,
-      StreamObserver<NodeGetCapabilitiesResponse> responseObserver) {
-    NodeGetCapabilitiesResponse response =
-        NodeGetCapabilitiesResponse.newBuilder()
-            .build();
-    responseObserver.onNext(response);
-    responseObserver.onCompleted();
-  }
-
-  @Override
-  public void nodeGetInfo(NodeGetInfoRequest request,
-      StreamObserver<NodeGetInfoResponse> responseObserver) {
-    NodeGetInfoResponse response = null;
-    try {
-      response = NodeGetInfoResponse.newBuilder()
-          .setNodeId(InetAddress.getLocalHost().getHostName())
-          .build();
-      responseObserver.onNext(response);
-      responseObserver.onCompleted();
-    } catch (UnknownHostException e) {
-      responseObserver.onError(e);
-    }
-
-  }
-}
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java
deleted file mode 100644
index 1b558dd..0000000
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.csi;
-
-/**
- * Container Storage Interface server implementation for Ozone.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/csi/src/main/proto/csi.proto b/hadoop-ozone/csi/src/main/proto/csi.proto
deleted file mode 100644
index 3bd53a0..0000000
--- a/hadoop-ozone/csi/src/main/proto/csi.proto
+++ /dev/null
@@ -1,1323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Code generated by make; DO NOT EDIT.
-syntax = "proto3";
-package csi.v1;
-
-import "google/protobuf/descriptor.proto";
-import "google/protobuf/timestamp.proto";
-import "google/protobuf/wrappers.proto";
-
-option go_package = "csi";
-
-extend google.protobuf.FieldOptions {
-  // Indicates that a field MAY contain information that is sensitive
-  // and MUST be treated as such (e.g. not logged).
-  bool csi_secret = 1059;
-}
-service Identity {
-  rpc GetPluginInfo(GetPluginInfoRequest)
-    returns (GetPluginInfoResponse) {}
-
-  rpc GetPluginCapabilities(GetPluginCapabilitiesRequest)
-    returns (GetPluginCapabilitiesResponse) {}
-
-  rpc Probe (ProbeRequest)
-    returns (ProbeResponse) {}
-}
-
-service Controller {
-  rpc CreateVolume (CreateVolumeRequest)
-    returns (CreateVolumeResponse) {}
-
-  rpc DeleteVolume (DeleteVolumeRequest)
-    returns (DeleteVolumeResponse) {}
-
-  rpc ControllerPublishVolume (ControllerPublishVolumeRequest)
-    returns (ControllerPublishVolumeResponse) {}
-
-  rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest)
-    returns (ControllerUnpublishVolumeResponse) {}
-
-  rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest)
-    returns (ValidateVolumeCapabilitiesResponse) {}
-
-  rpc ListVolumes (ListVolumesRequest)
-    returns (ListVolumesResponse) {}
-
-  rpc GetCapacity (GetCapacityRequest)
-    returns (GetCapacityResponse) {}
-
-  rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest)
-    returns (ControllerGetCapabilitiesResponse) {}
-
-  rpc CreateSnapshot (CreateSnapshotRequest)
-    returns (CreateSnapshotResponse) {}
-
-  rpc DeleteSnapshot (DeleteSnapshotRequest)
-    returns (DeleteSnapshotResponse) {}
-
-  rpc ListSnapshots (ListSnapshotsRequest)
-    returns (ListSnapshotsResponse) {}
-
-  rpc ControllerExpandVolume (ControllerExpandVolumeRequest)
-    returns (ControllerExpandVolumeResponse) {}
-}
-
-service Node {
-  rpc NodeStageVolume (NodeStageVolumeRequest)
-    returns (NodeStageVolumeResponse) {}
-
-  rpc NodeUnstageVolume (NodeUnstageVolumeRequest)
-    returns (NodeUnstageVolumeResponse) {}
-
-  rpc NodePublishVolume (NodePublishVolumeRequest)
-    returns (NodePublishVolumeResponse) {}
-
-  rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest)
-    returns (NodeUnpublishVolumeResponse) {}
-
-  rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest)
-    returns (NodeGetVolumeStatsResponse) {}
-
-
-  rpc NodeExpandVolume(NodeExpandVolumeRequest)
-    returns (NodeExpandVolumeResponse) {}
-
-
-  rpc NodeGetCapabilities (NodeGetCapabilitiesRequest)
-    returns (NodeGetCapabilitiesResponse) {}
-
-  rpc NodeGetInfo (NodeGetInfoRequest)
-    returns (NodeGetInfoResponse) {}
-}
-message GetPluginInfoRequest {
-  // Intentionally empty.
-}
-
-message GetPluginInfoResponse {
-  // The name MUST follow domain name notation format
-  // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD
-  // include the plugin's host company name and the plugin name,
-  // to minimize the possibility of collisions. It MUST be 63
-  // characters or less, beginning and ending with an alphanumeric
-  // character ([a-z0-9A-Z]) with dashes (-), dots (.), and
-  // alphanumerics between. This field is REQUIRED.
-  string name = 1;
-
-  // This field is REQUIRED. Value of this field is opaque to the CO.
-  string vendor_version = 2;
-
-  // This field is OPTIONAL. Values are opaque to the CO.
-  map<string, string> manifest = 3;
-}
-message GetPluginCapabilitiesRequest {
-  // Intentionally empty.
-}
-
-message GetPluginCapabilitiesResponse {
-  // All the capabilities that the controller service supports. This
-  // field is OPTIONAL.
-  repeated PluginCapability capabilities = 1;
-}
-
-// Specifies a capability of the plugin.
-message PluginCapability {
-  message Service {
-    enum Type {
-      UNKNOWN = 0;
-      // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for
-      // the ControllerService. Plugins SHOULD provide this capability.
-      // In rare cases certain plugins MAY wish to omit the
-      // ControllerService entirely from their implementation, but such
-      // SHOULD NOT be the common case.
-      // The presence of this capability determines whether the CO will
-      // attempt to invoke the REQUIRED ControllerService RPCs, as well
-      // as specific RPCs as indicated by ControllerGetCapabilities.
-      CONTROLLER_SERVICE = 1;
-
-      // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for
-      // this plugin MAY NOT be equally accessible by all nodes in the
-      // cluster. The CO MUST use the topology information returned by
-      // CreateVolumeRequest along with the topology information
-      // returned by NodeGetInfo to ensure that a given volume is
-      // accessible from a given node when scheduling workloads.
-      VOLUME_ACCESSIBILITY_CONSTRAINTS = 2;
-    }
-    Type type = 1;
-  }
-
-  message VolumeExpansion {
-    enum Type {
-      UNKNOWN = 0;
-
-      // ONLINE indicates that volumes may be expanded when published to
-      // a node. When a Plugin implements this capability it MUST
-      // implement either the EXPAND_VOLUME controller capability or the
-      // EXPAND_VOLUME node capability or both. When a plugin supports
-      // ONLINE volume expansion and also has the EXPAND_VOLUME
-      // controller capability then the plugin MUST support expansion of
-      // volumes currently published and available on a node. When a
-      // plugin supports ONLINE volume expansion and also has the
-      // EXPAND_VOLUME node capability then the plugin MAY support
-      // expansion of node-published volume via NodeExpandVolume.
-      //
-      // Example 1: Given a shared filesystem volume (e.g. GlusterFs),
-      //   the Plugin may set the ONLINE volume expansion capability and
-      //   implement ControllerExpandVolume but not NodeExpandVolume.
-      //
-      // Example 2: Given a block storage volume type (e.g. EBS), the
-      //   Plugin may set the ONLINE volume expansion capability and
-      //   implement both ControllerExpandVolume and NodeExpandVolume.
-      //
-      // Example 3: Given a Plugin that supports volume expansion only
-      //   upon a node, the Plugin may set the ONLINE volume
-      //   expansion capability and implement NodeExpandVolume but not
-      //   ControllerExpandVolume.
-      ONLINE = 1;
-
-      // OFFLINE indicates that volumes currently published and
-      // available on a node SHALL NOT be expanded via
-      // ControllerExpandVolume. When a plugin supports OFFLINE volume
-      // expansion it MUST implement either the EXPAND_VOLUME controller
-      // capability or both the EXPAND_VOLUME controller capability and
-      // the EXPAND_VOLUME node capability.
-      //
-      // Example 1: Given a block storage volume type (e.g. Azure Disk)
-      //   that does not support expansion of "node-attached" (i.e.
-      //   controller-published) volumes, the Plugin may indicate
-      //   OFFLINE volume expansion support and implement both
-      //   ControllerExpandVolume and NodeExpandVolume.
-      OFFLINE = 2;
-    }
-  }
-
-  oneof type {
-    // Service that the plugin supports.
-    Service service = 1;
-    VolumeExpansion volume_expansion = 2;
-  }
-}
-message ProbeRequest {
-  // Intentionally empty.
-}
-
-message ProbeResponse {
-  // Readiness allows a plugin to report its initialization status back
-  // to the CO. Initialization for some plugins MAY be time consuming
-  // and it is important for a CO to distinguish between the following
-  // cases:
-  //
-  // 1) The plugin is in an unhealthy state and MAY need restarting. In
-  //    this case a gRPC error code SHALL be returned.
-  // 2) The plugin is still initializing, but is otherwise perfectly
-  //    healthy. In this case a successful response SHALL be returned
-  //    with a readiness value of `false`. Calls to the plugin's
-  //    Controller and/or Node services MAY fail due to an incomplete
-  //    initialization state.
-  // 3) The plugin has finished initializing and is ready to service
-  //    calls to its Controller and/or Node services. A successful
-  //    response is returned with a readiness value of `true`.
-  //
-  // This field is OPTIONAL. If not present, the caller SHALL assume
-  // that the plugin is in a ready state and is accepting calls to its
-  // Controller and/or Node services (according to the plugin's reported
-  // capabilities).
-  .google.protobuf.BoolValue ready = 1;
-}
-message CreateVolumeRequest {
-  // The suggested name for the storage space. This field is REQUIRED.
-  // It serves two purposes:
-  // 1) Idempotency - This name is generated by the CO to achieve
-  //    idempotency.  The Plugin SHOULD ensure that multiple
-  //    `CreateVolume` calls for the same name do not result in more
-  //    than one piece of storage provisioned corresponding to that
-  //    name. If a Plugin is unable to enforce idempotency, the CO's
-  //    error recovery logic could result in multiple (unused) volumes
-  //    being provisioned.
-  //    In the case of error, the CO MUST handle the gRPC error codes
-  //    per the recovery behavior defined in the "CreateVolume Errors"
-  //    section below.
-  //    The CO is responsible for cleaning up volumes it provisioned
-  //    that it no longer needs. If the CO is uncertain whether a volume
-  //    was provisioned or not when a `CreateVolume` call fails, the CO
-  //    MAY call `CreateVolume` again, with the same name, to ensure the
-  //    volume exists and to retrieve the volume's `volume_id` (unless
-  //    otherwise prohibited by "CreateVolume Errors").
-  // 2) Suggested name - Some storage systems allow callers to specify
-  //    an identifier by which to refer to the newly provisioned
-  //    storage. If a storage system supports this, it can optionally
-  //    use this name as the identifier for the new volume.
-  // Any Unicode string that conforms to the length limit is allowed
-  // except those containing the following banned characters:
-  // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
-  // (These are control characters other than commonly used whitespace.)
-  string name = 1;
-
-  // This field is OPTIONAL. This allows the CO to specify the capacity
-  // requirement of the volume to be provisioned. If not specified, the
-  // Plugin MAY choose an implementation-defined capacity range. If
-  // specified it MUST always be honored, even when creating volumes
-  // from a source; which MAY force some backends to internally extend
-  // the volume after creating it.
-  CapacityRange capacity_range = 2;
-
-  // The capabilities that the provisioned volume MUST have. SP MUST
-  // provision a volume that will satisfy ALL of the capabilities
-  // specified in this list. Otherwise SP MUST return the appropriate
-  // gRPC error code.
-  // The Plugin MUST assume that the CO MAY use the provisioned volume
-  // with ANY of the capabilities specified in this list.
-  // For example, a CO MAY specify two volume capabilities: one with
-  // access mode SINGLE_NODE_WRITER and another with access mode
-  // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the
-  // provisioned volume can be used in either mode.
-  // This also enables the CO to do early validation: If ANY of the
-  // specified volume capabilities are not supported by the SP, the call
-  // MUST return the appropriate gRPC error code.
-  // This field is REQUIRED.
-  repeated VolumeCapability volume_capabilities = 3;
-
-  // Plugin specific parameters passed in as opaque key-value pairs.
-  // This field is OPTIONAL. The Plugin is responsible for parsing and
-  // validating these parameters. COs will treat these as opaque.
-  map<string, string> parameters = 4;
-
-  // Secrets required by plugin to complete volume creation request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 5 [(csi_secret) = true];
-
-  // If specified, the new volume will be pre-populated with data from
-  // this source. This field is OPTIONAL.
-  VolumeContentSource volume_content_source = 6;
-
-  // Specifies where (regions, zones, racks, etc.) the provisioned
-  // volume MUST be accessible from.
-  // An SP SHALL advertise the requirements for topological
-  // accessibility information in documentation. COs SHALL only specify
-  // topological accessibility information supported by the SP.
-  // This field is OPTIONAL.
-  // This field SHALL NOT be specified unless the SP has the
-  // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
-  // If this field is not specified and the SP has the
-  // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY
-  // choose where the provisioned volume is accessible from.
-  TopologyRequirement accessibility_requirements = 7;
-}
-
-// Specifies what source the volume will be created from. One of the
-// type fields MUST be specified.
-message VolumeContentSource {
-  message SnapshotSource {
-    // Contains identity information for the existing source snapshot.
-    // This field is REQUIRED. Plugin is REQUIRED to support creating
-    // volume from snapshot if it supports the capability
-    // CREATE_DELETE_SNAPSHOT.
-    string snapshot_id = 1;
-  }
-
-  message VolumeSource {
-    // Contains identity information for the existing source volume.
-    // This field is REQUIRED. Plugins reporting CLONE_VOLUME
-    // capability MUST support creating a volume from another volume.
-    string volume_id = 1;
-  }
-
-  oneof type {
-    SnapshotSource snapshot = 1;
-    VolumeSource volume = 2;
-  }
-}
-
-message CreateVolumeResponse {
-  // Contains all attributes of the newly created volume that are
-  // relevant to the CO along with information required by the Plugin
-  // to uniquely identify the volume. This field is REQUIRED.
-  Volume volume = 1;
-}
-
-// Specify a capability of a volume.
-message VolumeCapability {
-  // Indicate that the volume will be accessed via the block device API.
-  message BlockVolume {
-    // Intentionally empty, for now.
-  }
-
-  // Indicate that the volume will be accessed via the filesystem API.
-  message MountVolume {
-    // The filesystem type. This field is OPTIONAL.
-    // An empty string is equal to an unspecified field value.
-    string fs_type = 1;
-
-    // The mount options that can be used for the volume. This field is
-    // OPTIONAL. `mount_flags` MAY contain sensitive information.
-    // Therefore, the CO and the Plugin MUST NOT leak this information
-    // to untrusted entities. The total size of this repeated field
-    // SHALL NOT exceed 4 KiB.
-    repeated string mount_flags = 2;
-  }
-
-  // Specify how a volume can be accessed.
-  message AccessMode {
-    enum Mode {
-      UNKNOWN = 0;
-
-      // Can only be published once as read/write on a single node, at
-      // any given time.
-      SINGLE_NODE_WRITER = 1;
-
-      // Can only be published once as readonly on a single node, at
-      // any given time.
-      SINGLE_NODE_READER_ONLY = 2;
-
-      // Can be published as readonly at multiple nodes simultaneously.
-      MULTI_NODE_READER_ONLY = 3;
-
-      // Can be published at multiple nodes simultaneously. Only one of
-      // the node can be used as read/write. The rest will be readonly.
-      MULTI_NODE_SINGLE_WRITER = 4;
-
-      // Can be published as read/write at multiple nodes
-      // simultaneously.
-      MULTI_NODE_MULTI_WRITER = 5;
-    }
-
-    // This field is REQUIRED.
-    Mode mode = 1;
-  }
-
-  // Specifies what API the volume will be accessed using. One of the
-  // following fields MUST be specified.
-  oneof access_type {
-    BlockVolume block = 1;
-    MountVolume mount = 2;
-  }
-
-  // This is a REQUIRED field.
-  AccessMode access_mode = 3;
-}
-
-// The capacity of the storage space in bytes. To specify an exact size,
-// `required_bytes` and `limit_bytes` SHALL be set to the same value. At
-// least one of the these fields MUST be specified.
-message CapacityRange {
-  // Volume MUST be at least this big. This field is OPTIONAL.
-  // A value of 0 is equal to an unspecified field value.
-  // The value of this field MUST NOT be negative.
-  int64 required_bytes = 1;
-
-  // Volume MUST not be bigger than this. This field is OPTIONAL.
-  // A value of 0 is equal to an unspecified field value.
-  // The value of this field MUST NOT be negative.
-  int64 limit_bytes = 2;
-}
-
-// Information about a specific volume.
-message Volume {
-  // The capacity of the volume in bytes. This field is OPTIONAL. If not
-  // set (value of 0), it indicates that the capacity of the volume is
-  // unknown (e.g., NFS share).
-  // The value of this field MUST NOT be negative.
-  int64 capacity_bytes = 1;
-
-  // The identifier for this volume, generated by the plugin.
-  // This field is REQUIRED.
-  // This field MUST contain enough information to uniquely identify
-  // this specific volume vs all other volumes supported by this plugin.
-  // This field SHALL be used by the CO in subsequent calls to refer to
-  // this volume.
-  // The SP is NOT responsible for global uniqueness of volume_id across
-  // multiple SPs.
-  string volume_id = 2;
-
-  // Opaque static properties of the volume. SP MAY use this field to
-  // ensure subsequent volume validation and publishing calls have
-  // contextual information.
-  // The contents of this field SHALL be opaque to a CO.
-  // The contents of this field SHALL NOT be mutable.
-  // The contents of this field SHALL be safe for the CO to cache.
-  // The contents of this field SHOULD NOT contain sensitive
-  // information.
-  // The contents of this field SHOULD NOT be used for uniquely
-  // identifying a volume. The `volume_id` alone SHOULD be sufficient to
-  // identify the volume.
-  // A volume uniquely identified by `volume_id` SHALL always report the
-  // same volume_context.
-  // This field is OPTIONAL and when present MUST be passed to volume
-  // validation and publishing calls.
-  map<string, string> volume_context = 3;
-
-  // If specified, indicates that the volume is not empty and is
-  // pre-populated with data from the specified source.
-  // This field is OPTIONAL.
-  VolumeContentSource content_source = 4;
-
-  // Specifies where (regions, zones, racks, etc.) the provisioned
-  // volume is accessible from.
-  // A plugin that returns this field MUST also set the
-  // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
-  // An SP MAY specify multiple topologies to indicate the volume is
-  // accessible from multiple locations.
-  // COs MAY use this information along with the topology information
-  // returned by NodeGetInfo to ensure that a given volume is accessible
-  // from a given node when scheduling workloads.
-  // This field is OPTIONAL. If it is not specified, the CO MAY assume
-  // the volume is equally accessible from all nodes in the cluster and
-  // MAY schedule workloads referencing the volume on any available
-  // node.
-  //
-  // Example 1:
-  //   accessible_topology = {"region": "R1", "zone": "Z2"}
-  // Indicates a volume accessible only from the "region" "R1" and the
-  // "zone" "Z2".
-  //
-  // Example 2:
-  //   accessible_topology =
-  //     {"region": "R1", "zone": "Z2"},
-  //     {"region": "R1", "zone": "Z3"}
-  // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3"
-  // in the "region" "R1".
-  repeated Topology accessible_topology = 5;
-}
-
-message TopologyRequirement {
-  // Specifies the list of topologies the provisioned volume MUST be
-  // accessible from.
-  // This field is OPTIONAL. If TopologyRequirement is specified either
-  // requisite or preferred or both MUST be specified.
-  //
-  // If requisite is specified, the provisioned volume MUST be
-  // accessible from at least one of the requisite topologies.
-  //
-  // Given
-  //   x = number of topologies provisioned volume is accessible from
-  //   n = number of requisite topologies
-  // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
-  // If x==n, then the SP MUST make the provisioned volume available to
-  // all topologies from the list of requisite topologies. If it is
-  // unable to do so, the SP MUST fail the CreateVolume call.
-  // For example, if a volume should be accessible from a single zone,
-  // and requisite =
-  //   {"region": "R1", "zone": "Z2"}
-  // then the provisioned volume MUST be accessible from the "region"
-  // "R1" and the "zone" "Z2".
-  // Similarly, if a volume should be accessible from two zones, and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"}
-  // then the provisioned volume MUST be accessible from the "region"
-  // "R1" and both "zone" "Z2" and "zone" "Z3".
-  //
-  // If x<n, then the SP SHALL choose x unique topologies from the list
-  // of requisite topologies. If it is unable to do so, the SP MUST fail
-  // the CreateVolume call.
-  // For example, if a volume should be accessible from a single zone,
-  // and requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"}
-  // then the SP may choose to make the provisioned volume available in
-  // either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
-  // Similarly, if a volume should be accessible from two zones, and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"},
-  //   {"region": "R1", "zone": "Z4"}
-  // then the provisioned volume MUST be accessible from any combination
-  // of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
-  //  "R1/Z4", or "R1/Z3" and "R1/Z4".
-  //
-  // If x>n, then the SP MUST make the provisioned volume available from
-  // all topologies from the list of requisite topologies and MAY choose
-  // the remaining x-n unique topologies from the list of all possible
-  // topologies. If it is unable to do so, the SP MUST fail the
-  // CreateVolume call.
-  // For example, if a volume should be accessible from two zones, and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"}
-  // then the provisioned volume MUST be accessible from the "region"
-  // "R1" and the "zone" "Z2" and the SP may select the second zone
-  // independently, e.g. "R1/Z4".
-  repeated Topology requisite = 1;
-
-  // Specifies the list of topologies the CO would prefer the volume to
-  // be provisioned in.
-  //
-  // This field is OPTIONAL. If TopologyRequirement is specified either
-  // requisite or preferred or both MUST be specified.
-  //
-  // An SP MUST attempt to make the provisioned volume available using
-  // the preferred topologies in order from first to last.
-  //
-  // If requisite is specified, all topologies in preferred list MUST
-  // also be present in the list of requisite topologies.
-  //
-  // If the SP is unable to to make the provisioned volume available
-  // from any of the preferred topologies, the SP MAY choose a topology
-  // from the list of requisite topologies.
-  // If the list of requisite topologies is not specified, then the SP
-  // MAY choose from the list of all possible topologies.
-  // If the list of requisite topologies is specified and the SP is
-  // unable to to make the provisioned volume available from any of the
-  // requisite topologies it MUST fail the CreateVolume call.
-  //
-  // Example 1:
-  // Given a volume should be accessible from a single zone, and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"}
-  // preferred =
-  //   {"region": "R1", "zone": "Z3"}
-  // then the the SP SHOULD first attempt to make the provisioned volume
-  // available from "zone" "Z3" in the "region" "R1" and fall back to
-  // "zone" "Z2" in the "region" "R1" if that is not possible.
-  //
-  // Example 2:
-  // Given a volume should be accessible from a single zone, and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"},
-  //   {"region": "R1", "zone": "Z4"},
-  //   {"region": "R1", "zone": "Z5"}
-  // preferred =
-  //   {"region": "R1", "zone": "Z4"},
-  //   {"region": "R1", "zone": "Z2"}
-  // then the the SP SHOULD first attempt to make the provisioned volume
-  // accessible from "zone" "Z4" in the "region" "R1" and fall back to
-  // "zone" "Z2" in the "region" "R1" if that is not possible. If that
-  // is not possible, the SP may choose between either the "zone"
-  // "Z3" or "Z5" in the "region" "R1".
-  //
-  // Example 3:
-  // Given a volume should be accessible from TWO zones (because an
-  // opaque parameter in CreateVolumeRequest, for example, specifies
-  // the volume is accessible from two zones, aka synchronously
-  // replicated), and
-  // requisite =
-  //   {"region": "R1", "zone": "Z2"},
-  //   {"region": "R1", "zone": "Z3"},
-  //   {"region": "R1", "zone": "Z4"},
-  //   {"region": "R1", "zone": "Z5"}
-  // preferred =
-  //   {"region": "R1", "zone": "Z5"},
-  //   {"region": "R1", "zone": "Z3"}
-  // then the the SP SHOULD first attempt to make the provisioned volume
-  // accessible from the combination of the two "zones" "Z5" and "Z3" in
-  // the "region" "R1". If that's not possible, it should fall back to
-  // a combination of "Z5" and other possibilities from the list of
-  // requisite. If that's not possible, it should fall back  to a
-  // combination of "Z3" and other possibilities from the list of
-  // requisite. If that's not possible, it should fall back  to a
-  // combination of other possibilities from the list of requisite.
-  repeated Topology preferred = 2;
-}
-
-// Topology is a map of topological domains to topological segments.
-// A topological domain is a sub-division of a cluster, like "region",
-// "zone", "rack", etc.
-// A topological segment is a specific instance of a topological domain,
-// like "zone3", "rack3", etc.
-// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
-// Valid keys have two segments: an OPTIONAL prefix and name, separated
-// by a slash (/), for example: "com.company.example/zone".
-// The key name segment is REQUIRED. The prefix is OPTIONAL.
-// The key name MUST be 63 characters or less, begin and end with an
-// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
-// underscores (_), dots (.), or alphanumerics in between, for example
-// "zone".
-// The key prefix MUST be 63 characters or less, begin and end with a
-// lower-case alphanumeric character ([a-z0-9]), contain only
-// dashes (-), dots (.), or lower-case alphanumerics in between, and
-// follow domain name notation format
-// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
-// The key prefix SHOULD include the plugin's host company name and/or
-// the plugin name, to minimize the possibility of collisions with keys
-// from other plugins.
-// If a key prefix is specified, it MUST be identical across all
-// topology keys returned by the SP (across all RPCs).
-// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
-// MUST not both exist.
-// Each value (topological segment) MUST contain 1 or more strings.
-// Each string MUST be 63 characters or less and begin and end with an
-// alphanumeric character with '-', '_', '.', or alphanumerics in
-// between.
-message Topology {
-  map<string, string> segments = 1;
-}
-message DeleteVolumeRequest {
-  // The ID of the volume to be deprovisioned.
-  // This field is REQUIRED.
-  string volume_id = 1;
-
-  // Secrets required by plugin to complete volume deletion request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 2 [(csi_secret) = true];
-}
-
-message DeleteVolumeResponse {
-  // Intentionally empty.
-}
-message ControllerPublishVolumeRequest {
-  // The ID of the volume to be used on a node.
-  // This field is REQUIRED.
-  string volume_id = 1;
-
-  // The ID of the node. This field is REQUIRED. The CO SHALL set this
-  // field to match the node ID returned by `NodeGetInfo`.
-  string node_id = 2;
-
-  // Volume capability describing how the CO intends to use this volume.
-  // SP MUST ensure the CO can use the published volume as described.
-  // Otherwise SP MUST return the appropriate gRPC error code.
-  // This is a REQUIRED field.
-  VolumeCapability volume_capability = 3;
-
-  // Indicates SP MUST publish the volume in readonly mode.
-  // CO MUST set this field to false if SP does not have the
-  // PUBLISH_READONLY controller capability.
-  // This is a REQUIRED field.
-  bool readonly = 4;
-
-  // Secrets required by plugin to complete controller publish volume
-  // request. This field is OPTIONAL. Refer to the
-  // `Secrets Requirements` section on how to use this field.
-  map<string, string> secrets = 5 [(csi_secret) = true];
-
-  // Volume context as returned by CO in CreateVolumeRequest. This field
-  // is OPTIONAL and MUST match the volume_context of the volume
-  // identified by `volume_id`.
-  map<string, string> volume_context = 6;
-}
-
-message ControllerPublishVolumeResponse {
-  // Opaque static publish properties of the volume. SP MAY use this
-  // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume`
-  // calls calls have contextual information.
-  // The contents of this field SHALL be opaque to a CO.
-  // The contents of this field SHALL NOT be mutable.
-  // The contents of this field SHALL be safe for the CO to cache.
-  // The contents of this field SHOULD NOT contain sensitive
-  // information.
-  // The contents of this field SHOULD NOT be used for uniquely
-  // identifying a volume. The `volume_id` alone SHOULD be sufficient to
-  // identify the volume.
-  // This field is OPTIONAL and when present MUST be passed to
-  // subsequent `NodeStageVolume` or `NodePublishVolume` calls
-  map<string, string> publish_context = 1;
-}
-message ControllerUnpublishVolumeRequest {
-  // The ID of the volume. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The ID of the node. This field is OPTIONAL. The CO SHOULD set this
-  // field to match the node ID returned by `NodeGetInfo` or leave it
-  // unset. If the value is set, the SP MUST unpublish the volume from
-  // the specified node. If the value is unset, the SP MUST unpublish
-  // the volume from all nodes it is published to.
-  string node_id = 2;
-
-  // Secrets required by plugin to complete controller unpublish volume
-  // request. This SHOULD be the same secrets passed to the
-  // ControllerPublishVolume call for the specified volume.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 3 [(csi_secret) = true];
-}
-
-message ControllerUnpublishVolumeResponse {
-  // Intentionally empty.
-}
-message ValidateVolumeCapabilitiesRequest {
-  // The ID of the volume to check. This field is REQUIRED.
-  string volume_id = 1;
-
-  // Volume context as returned by CO in CreateVolumeRequest. This field
-  // is OPTIONAL and MUST match the volume_context of the volume
-  // identified by `volume_id`.
-  map<string, string> volume_context = 2;
-
-  // The capabilities that the CO wants to check for the volume. This
-  // call SHALL return "confirmed" only if all the volume capabilities
-  // specified below are supported. This field is REQUIRED.
-  repeated VolumeCapability volume_capabilities = 3;
-
-  // See CreateVolumeRequest.parameters.
-  // This field is OPTIONAL.
-  map<string, string> parameters = 4;
-
-  // Secrets required by plugin to complete volume validation request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 5 [(csi_secret) = true];
-}
-
-message ValidateVolumeCapabilitiesResponse {
-  message Confirmed {
-    // Volume context validated by the plugin.
-    // This field is OPTIONAL.
-    map<string, string> volume_context = 1;
-
-    // Volume capabilities supported by the plugin.
-    // This field is REQUIRED.
-    repeated VolumeCapability volume_capabilities = 2;
-
-    // The volume creation parameters validated by the plugin.
-    // This field is OPTIONAL.
-    map<string, string> parameters = 3;
-  }
-
-  // Confirmed indicates to the CO the set of capabilities that the
-  // plugin has validated. This field SHALL only be set to a non-empty
-  // value for successful validation responses.
-  // For successful validation responses, the CO SHALL compare the
-  // fields of this message to the originally requested capabilities in
-  // order to guard against an older plugin reporting "valid" for newer
-  // capability fields that it does not yet understand.
-  // This field is OPTIONAL.
-  Confirmed confirmed = 1;
-
-  // Message to the CO if `confirmed` above is empty. This field is
-  // OPTIONAL.
-  // An empty string is equal to an unspecified field value.
-  string message = 2;
-}
-message ListVolumesRequest {
-  // If specified (non-zero value), the Plugin MUST NOT return more
-  // entries than this number in the response. If the actual number of
-  // entries is more than this number, the Plugin MUST set `next_token`
-  // in the response which can be used to get the next page of entries
-  // in the subsequent `ListVolumes` call. This field is OPTIONAL. If
-  // not specified (zero value), it means there is no restriction on the
-  // number of entries that can be returned.
-  // The value of this field MUST NOT be negative.
-  int32 max_entries = 1;
-
-  // A token to specify where to start paginating. Set this field to
-  // `next_token` returned by a previous `ListVolumes` call to get the
-  // next page of entries. This field is OPTIONAL.
-  // An empty string is equal to an unspecified field value.
-  string starting_token = 2;
-}
-
-message ListVolumesResponse {
-  message Entry {
-    Volume volume = 1;
-  }
-
-  repeated Entry entries = 1;
-
-  // This token allows you to get the next page of entries for
-  // `ListVolumes` request. If the number of entries is larger than
-  // `max_entries`, use the `next_token` as a value for the
-  // `starting_token` field in the next `ListVolumes` request. This
-  // field is OPTIONAL.
-  // An empty string is equal to an unspecified field value.
-  string next_token = 2;
-}
-message GetCapacityRequest {
-  // If specified, the Plugin SHALL report the capacity of the storage
-  // that can be used to provision volumes that satisfy ALL of the
-  // specified `volume_capabilities`. These are the same
-  // `volume_capabilities` the CO will use in `CreateVolumeRequest`.
-  // This field is OPTIONAL.
-  repeated VolumeCapability volume_capabilities = 1;
-
-  // If specified, the Plugin SHALL report the capacity of the storage
-  // that can be used to provision volumes with the given Plugin
-  // specific `parameters`. These are the same `parameters` the CO will
-  // use in `CreateVolumeRequest`. This field is OPTIONAL.
-  map<string, string> parameters = 2;
-
-  // If specified, the Plugin SHALL report the capacity of the storage
-  // that can be used to provision volumes that in the specified
-  // `accessible_topology`. This is the same as the
-  // `accessible_topology` the CO returns in a `CreateVolumeResponse`.
-  // This field is OPTIONAL. This field SHALL NOT be set unless the
-  // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability.
-  Topology accessible_topology = 3;
-}
-
-message GetCapacityResponse {
-  // The available capacity, in bytes, of the storage that can be used
-  // to provision volumes. If `volume_capabilities` or `parameters` is
-  // specified in the request, the Plugin SHALL take those into
-  // consideration when calculating the available capacity of the
-  // storage. This field is REQUIRED.
-  // The value of this field MUST NOT be negative.
-  int64 available_capacity = 1;
-}
-message ControllerGetCapabilitiesRequest {
-  // Intentionally empty.
-}
-
-message ControllerGetCapabilitiesResponse {
-  // All the capabilities that the controller service supports. This
-  // field is OPTIONAL.
-  repeated ControllerServiceCapability capabilities = 1;
-}
-
-// Specifies a capability of the controller service.
-message ControllerServiceCapability {
-  message RPC {
-    enum Type {
-      UNKNOWN = 0;
-      CREATE_DELETE_VOLUME = 1;
-      PUBLISH_UNPUBLISH_VOLUME = 2;
-      LIST_VOLUMES = 3;
-      GET_CAPACITY = 4;
-      // Currently the only way to consume a snapshot is to create
-      // a volume from it. Therefore plugins supporting
-      // CREATE_DELETE_SNAPSHOT MUST support creating volume from
-      // snapshot.
-      CREATE_DELETE_SNAPSHOT = 5;
-      LIST_SNAPSHOTS = 6;
-
-      // Plugins supporting volume cloning at the storage level MAY
-      // report this capability. The source volume MUST be managed by
-      // the same plugin. Not all volume sources and parameters
-      // combinations MAY work.
-      CLONE_VOLUME = 7;
-
-      // Indicates the SP supports ControllerPublishVolume.readonly
-      // field.
-      PUBLISH_READONLY = 8;
-
-      // See VolumeExpansion for details.
-      EXPAND_VOLUME = 9;
-    }
-
-    Type type = 1;
-  }
-
-  oneof type {
-    // RPC that the controller supports.
-    RPC rpc = 1;
-  }
-}
-message CreateSnapshotRequest {
-  // The ID of the source volume to be snapshotted.
-  // This field is REQUIRED.
-  string source_volume_id = 1;
-
-  // The suggested name for the snapshot. This field is REQUIRED for
-  // idempotency.
-  // Any Unicode string that conforms to the length limit is allowed
-  // except those containing the following banned characters:
-  // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
-  // (These are control characters other than commonly used whitespace.)
-  string name = 2;
-
-  // Secrets required by plugin to complete snapshot creation request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 3 [(csi_secret) = true];
-
-  // Plugin specific parameters passed in as opaque key-value pairs.
-  // This field is OPTIONAL. The Plugin is responsible for parsing and
-  // validating these parameters. COs will treat these as opaque.
-  // Use cases for opaque parameters:
-  // - Specify a policy to automatically clean up the snapshot.
-  // - Specify an expiration date for the snapshot.
-  // - Specify whether the snapshot is readonly or read/write.
-  // - Specify if the snapshot should be replicated to some place.
-  // - Specify primary or secondary for replication systems that
-  //   support snapshotting only on primary.
-  map<string, string> parameters = 4;
-}
-
-message CreateSnapshotResponse {
-  // Contains all attributes of the newly created snapshot that are
-  // relevant to the CO along with information required by the Plugin
-  // to uniquely identify the snapshot. This field is REQUIRED.
-  Snapshot snapshot = 1;
-}
-
-// Information about a specific snapshot.
-message Snapshot {
-  // This is the complete size of the snapshot in bytes. The purpose of
-  // this field is to give CO guidance on how much space is needed to
-  // create a volume from this snapshot. The size of the volume MUST NOT
-  // be less than the size of the source snapshot. This field is
-  // OPTIONAL. If this field is not set, it indicates that this size is
-  // unknown. The value of this field MUST NOT be negative and a size of
-  // zero means it is unspecified.
-  int64 size_bytes = 1;
-
-  // The identifier for this snapshot, generated by the plugin.
-  // This field is REQUIRED.
-  // This field MUST contain enough information to uniquely identify
-  // this specific snapshot vs all other snapshots supported by this
-  // plugin.
-  // This field SHALL be used by the CO in subsequent calls to refer to
-  // this snapshot.
-  // The SP is NOT responsible for global uniqueness of snapshot_id
-  // across multiple SPs.
-  string snapshot_id = 2;
-
-  // Identity information for the source volume. Note that creating a
-  // snapshot from a snapshot is not supported here so the source has to
-  // be a volume. This field is REQUIRED.
-  string source_volume_id = 3;
-
-  // Timestamp when the point-in-time snapshot is taken on the storage
-  // system. This field is REQUIRED.
-  .google.protobuf.Timestamp creation_time = 4;
-
-  // Indicates if a snapshot is ready to use as a
-  // `volume_content_source` in a `CreateVolumeRequest`. The default
-  // value is false. This field is REQUIRED.
-  bool ready_to_use = 5;
-}
-message DeleteSnapshotRequest {
-  // The ID of the snapshot to be deleted.
-  // This field is REQUIRED.
-  string snapshot_id = 1;
-
-  // Secrets required by plugin to complete snapshot deletion request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 2 [(csi_secret) = true];
-}
-
-message DeleteSnapshotResponse {}
-// List all snapshots on the storage system regardless of how they were
-// created.
-message ListSnapshotsRequest {
-  // If specified (non-zero value), the Plugin MUST NOT return more
-  // entries than this number in the response. If the actual number of
-  // entries is more than this number, the Plugin MUST set `next_token`
-  // in the response which can be used to get the next page of entries
-  // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If
-  // not specified (zero value), it means there is no restriction on the
-  // number of entries that can be returned.
-  // The value of this field MUST NOT be negative.
-  int32 max_entries = 1;
-
-  // A token to specify where to start paginating. Set this field to
-  // `next_token` returned by a previous `ListSnapshots` call to get the
-  // next page of entries. This field is OPTIONAL.
-  // An empty string is equal to an unspecified field value.
-  string starting_token = 2;
-
-  // Identity information for the source volume. This field is OPTIONAL.
-  // It can be used to list snapshots by volume.
-  string source_volume_id = 3;
-
-  // Identity information for a specific snapshot. This field is
-  // OPTIONAL. It can be used to list only a specific snapshot.
-  // ListSnapshots will return with current snapshot information
-  // and will not block if the snapshot is being processed after
-  // it is cut.
-  string snapshot_id = 4;
-}
-
-message ListSnapshotsResponse {
-  message Entry {
-    Snapshot snapshot = 1;
-  }
-
-  repeated Entry entries = 1;
-
-  // This token allows you to get the next page of entries for
-  // `ListSnapshots` request. If the number of entries is larger than
-  // `max_entries`, use the `next_token` as a value for the
-  // `starting_token` field in the next `ListSnapshots` request. This
-  // field is OPTIONAL.
-  // An empty string is equal to an unspecified field value.
-  string next_token = 2;
-}
-message ControllerExpandVolumeRequest {
-  // The ID of the volume to expand. This field is REQUIRED.
-  string volume_id = 1;
-
-  // This allows CO to specify the capacity requirements of the volume
-  // after expansion. This field is REQUIRED.
-  CapacityRange capacity_range = 2;
-
-  // Secrets required by the plugin for expanding the volume.
-  // This field is OPTIONAL.
-  map<string, string> secrets = 3 [(csi_secret) = true];
-}
-
-message ControllerExpandVolumeResponse {
-  // Capacity of volume after expansion. This field is REQUIRED.
-  int64 capacity_bytes = 1;
-
-  // Whether node expansion is required for the volume. When true
-  // the CO MUST make NodeExpandVolume RPC call on the node. This field
-  // is REQUIRED.
-  bool node_expansion_required = 2;
-}
-message NodeStageVolumeRequest {
-  // The ID of the volume to publish. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The CO SHALL set this field to the value returned by
-  // `ControllerPublishVolume` if the corresponding Controller Plugin
-  // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
-  // left unset if the corresponding Controller Plugin does not have
-  // this capability. This is an OPTIONAL field.
-  map<string, string> publish_context = 2;
-
-  // The path to which the volume MAY be staged. It MUST be an
-  // absolute path in the root filesystem of the process serving this
-  // request, and MUST be a directory. The CO SHALL ensure that there
-  // is only one `staging_target_path` per volume. The CO SHALL ensure
-  // that the path is directory and that the process serving the
-  // request has `read` and `write` permission to that directory. The
-  // CO SHALL be responsible for creating the directory if it does not
-  // exist.
-  // This is a REQUIRED field.
-  string staging_target_path = 3;
-
-  // Volume capability describing how the CO intends to use this volume.
-  // SP MUST ensure the CO can use the staged volume as described.
-  // Otherwise SP MUST return the appropriate gRPC error code.
-  // This is a REQUIRED field.
-  VolumeCapability volume_capability = 4;
-
-  // Secrets required by plugin to complete node stage volume request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 5 [(csi_secret) = true];
-
-  // Volume context as returned by CO in CreateVolumeRequest. This field
-  // is OPTIONAL and MUST match the volume_context of the volume
-  // identified by `volume_id`.
-  map<string, string> volume_context = 6;
-}
-
-message NodeStageVolumeResponse {
-  // Intentionally empty.
-}
-message NodeUnstageVolumeRequest {
-  // The ID of the volume. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The path at which the volume was staged. It MUST be an absolute
-  // path in the root filesystem of the process serving this request.
-  // This is a REQUIRED field.
-  string staging_target_path = 2;
-}
-
-message NodeUnstageVolumeResponse {
-  // Intentionally empty.
-}
-message NodePublishVolumeRequest {
-  // The ID of the volume to publish. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The CO SHALL set this field to the value returned by
-  // `ControllerPublishVolume` if the corresponding Controller Plugin
-  // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
-  // left unset if the corresponding Controller Plugin does not have
-  // this capability. This is an OPTIONAL field.
-  map<string, string> publish_context = 2;
-
-  // The path to which the volume was staged by `NodeStageVolume`.
-  // It MUST be an absolute path in the root filesystem of the process
-  // serving this request.
-  // It MUST be set if the Node Plugin implements the
-  // `STAGE_UNSTAGE_VOLUME` node capability.
-  // This is an OPTIONAL field.
-  string staging_target_path = 3;
-
-  // The path to which the volume will be published. It MUST be an
-  // absolute path in the root filesystem of the process serving this
-  // request. The CO SHALL ensure uniqueness of target_path per volume.
-  // The CO SHALL ensure that the parent directory of this path exists
-  // and that the process serving the request has `read` and `write`
-  // permissions to that parent directory.
-  // For volumes with an access type of block, the SP SHALL place the
-  // block device at target_path.
-  // For volumes with an access type of mount, the SP SHALL place the
-  // mounted directory at target_path.
-  // Creation of target_path is the responsibility of the SP.
-  // This is a REQUIRED field.
-  string target_path = 4;
-
-  // Volume capability describing how the CO intends to use this volume.
-  // SP MUST ensure the CO can use the published volume as described.
-  // Otherwise SP MUST return the appropriate gRPC error code.
-  // This is a REQUIRED field.
-  VolumeCapability volume_capability = 5;
-
-  // Indicates SP MUST publish the volume in readonly mode.
-  // This field is REQUIRED.
-  bool readonly = 6;
-
-  // Secrets required by plugin to complete node publish volume request.
-  // This field is OPTIONAL. Refer to the `Secrets Requirements`
-  // section on how to use this field.
-  map<string, string> secrets = 7 [(csi_secret) = true];
-
-  // Volume context as returned by CO in CreateVolumeRequest. This field
-  // is OPTIONAL and MUST match the volume_context of the volume
-  // identified by `volume_id`.
-  map<string, string> volume_context = 8;
-}
-
-message NodePublishVolumeResponse {
-  // Intentionally empty.
-}
-message NodeUnpublishVolumeRequest {
-  // The ID of the volume. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The path at which the volume was published. It MUST be an absolute
-  // path in the root filesystem of the process serving this request.
-  // The SP MUST delete the file or directory it created at this path.
-  // This is a REQUIRED field.
-  string target_path = 2;
-}
-
-message NodeUnpublishVolumeResponse {
-  // Intentionally empty.
-}
-message NodeGetVolumeStatsRequest {
-  // The ID of the volume. This field is REQUIRED.
-  string volume_id = 1;
-
-  // It can be any valid path where volume was previously
-  // staged or published.
-  // It MUST be an absolute path in the root filesystem of
-  // the process serving this request.
-  // This is a REQUIRED field.
-  string volume_path = 2;
-}
-
-message NodeGetVolumeStatsResponse {
-  // This field is OPTIONAL.
-  repeated VolumeUsage usage = 1;
-}
-
-message VolumeUsage {
-  enum Unit {
-    UNKNOWN = 0;
-    BYTES = 1;
-    INODES = 2;
-  }
-  // The available capacity in specified Unit. This field is OPTIONAL.
-  // The value of this field MUST NOT be negative.
-  int64 available = 1;
-
-  // The total capacity in specified Unit. This field is REQUIRED.
-  // The value of this field MUST NOT be negative.
-  int64 total = 2;
-
-  // The used capacity in specified Unit. This field is OPTIONAL.
-  // The value of this field MUST NOT be negative.
-  int64 used = 3;
-
-  // Units by which values are measured. This field is REQUIRED.
-  Unit unit = 4;
-}
-message NodeGetCapabilitiesRequest {
-  // Intentionally empty.
-}
-
-message NodeGetCapabilitiesResponse {
-  // All the capabilities that the node service supports. This field
-  // is OPTIONAL.
-  repeated NodeServiceCapability capabilities = 1;
-}
-
-// Specifies a capability of the node service.
-message NodeServiceCapability {
-  message RPC {
-    enum Type {
-      UNKNOWN = 0;
-      STAGE_UNSTAGE_VOLUME = 1;
-      // If Plugin implements GET_VOLUME_STATS capability
-      // then it MUST implement NodeGetVolumeStats RPC
-      // call for fetching volume statistics.
-      GET_VOLUME_STATS = 2;
-      // See VolumeExpansion for details.
-      EXPAND_VOLUME = 3;
-    }
-
-    Type type = 1;
-  }
-
-  oneof type {
-    // RPC that the controller supports.
-    RPC rpc = 1;
-  }
-}
-message NodeGetInfoRequest {
-}
-
-message NodeGetInfoResponse {
-  // The identifier of the node as understood by the SP.
-  // This field is REQUIRED.
-  // This field MUST contain enough information to uniquely identify
-  // this specific node vs all other nodes supported by this plugin.
-  // This field SHALL be used by the CO in subsequent calls, including
-  // `ControllerPublishVolume`, to refer to this node.
-  // The SP is NOT responsible for global uniqueness of node_id across
-  // multiple SPs.
-  string node_id = 1;
-
-  // Maximum number of volumes that controller can publish to the node.
-  // If value is not set or zero CO SHALL decide how many volumes of
-  // this type can be published by the controller to the node. The
-  // plugin MUST NOT set negative values here.
-  // This field is OPTIONAL.
-  int64 max_volumes_per_node = 2;
-
-  // Specifies where (regions, zones, racks, etc.) the node is
-  // accessible from.
-  // A plugin that returns this field MUST also set the
-  // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
-  // COs MAY use this information along with the topology information
-  // returned in CreateVolumeResponse to ensure that a given volume is
-  // accessible from a given node when scheduling workloads.
-  // This field is OPTIONAL. If it is not specified, the CO MAY assume
-  // the node is not subject to any topological constraint, and MAY
-  // schedule workloads that reference any volume V, such that there are
-  // no topological constraints declared for V.
-  //
-  // Example 1:
-  //   accessible_topology =
-  //     {"region": "R1", "zone": "R2"}
-  // Indicates the node exists within the "region" "R1" and the "zone"
-  // "Z2".
-  Topology accessible_topology = 3;
-}
-message NodeExpandVolumeRequest {
-  // The ID of the volume. This field is REQUIRED.
-  string volume_id = 1;
-
-  // The path on which volume is available. This field is REQUIRED.
-  string volume_path = 2;
-
-  // This allows CO to specify the capacity requirements of the volume
-  // after expansion. If capacity_range is omitted then a plugin MAY
-  // inspect the file system of the volume to determine the maximum
-  // capacity to which the volume can be expanded. In such cases a
-  // plugin MAY expand the volume to its maximum capacity.
-  // This field is OPTIONAL.
-  CapacityRange capacity_range = 3;
-}
-
-message NodeExpandVolumeResponse {
-  // The capacity of the volume in bytes. This field is OPTIONAL.
-  int64 capacity_bytes = 1;
-}
diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml
deleted file mode 100644
index 0f5c3c4..0000000
--- a/hadoop-ozone/datanode/pom.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-datanode</artifactId>
-  <name>Apache Hadoop Ozone Datanode</name>
-  <packaging>jar</packaging>
-  <version>0.5.0-SNAPSHOT</version>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.xml.bind</groupId>
-          <artifactId>jaxb-impl</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.activation</groupId>
-      <artifactId>activation</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/dev-support/checks/README.md b/hadoop-ozone/dev-support/checks/README.md
deleted file mode 100755
index ba7202c..0000000
--- a/hadoop-ozone/dev-support/checks/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Ozone checks
-
-This directory contains a collection of easy-to-use helper scripts to execute various type of tests on the ozone/hdds codebase.
-
-The contract of the scripts are very simple:
-
- 1. Executing the scripts without any parameter will check the hdds/ozone project
- 2. Shell exit code represents the result of the check (if failed, exits with non-zero code)
- 3. Detailed information may be saved to the $OUTPUT_DIR (if it's not set, root level ./target will be used).
- 4. The standard output should contain all the log about the build AND the results.
- 5. The content of the $OUTPUT_DIR can be:
-    * `summary.html`/`summary.md`/`summary.txt`: contains a human readable overview about the failed tests (used by reporting)
-    * `failures`: contains a simple number (used by reporting)
diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
deleted file mode 100755
index 81551d1..0000000
--- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-REPORT_DIR=${REPORT_DIR:-$PWD}
-
-_realpath() {
-  if realpath "$@" > /dev/null; then
-    realpath "$@"
-  else
-    local relative_to
-    relative_to=$(realpath "${1/--relative-to=/}") || return 1
-    realpath "$2" | sed -e "s@${relative_to}/@@"
-  fi
-}
-
-## generate summary txt file
-find "." -name 'TEST*.xml' -print0 \
-    | xargs -n1 -0 "grep" -l -E "<failure|<error" \
-    | awk -F/ '{sub("'"TEST-"'",""); sub(".xml",""); print $NF}' \
-    | tee "$REPORT_DIR/summary.txt"
-
-#Copy heap dump and dump leftovers
-find "." -name "*.hprof" \
-    -or -name "*.dump" \
-    -or -name "*.dumpstream" \
-    -or -name "hs_err_*.log" \
-  -exec cp {} "$REPORT_DIR/" \;
-
-## Add the tests where the JVM is crashed
-grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \
-  | grep -v -e 'Crashed tests' -e '--' \
-  | cut -f2- -d' ' \
-  | sort -u >> "${REPORT_DIR}/summary.txt"
-
-## Check if Maven was killed
-if grep -q 'Killed.* mvn .* test ' "${REPORT_DIR}/output.log"; then
-  echo 'Maven test run was killed' >> "${REPORT_DIR}/summary.txt"
-fi
-
-#Collect of all of the report failes of FAILED tests
-while IFS= read -r -d '' dir; do
-   while IFS=$'\n' read -r file; do
-      DIR_OF_TESTFILE=$(dirname "$file")
-      NAME_OF_TESTFILE=$(basename "$file")
-      NAME_OF_TEST="${NAME_OF_TESTFILE%.*}"
-      DESTDIRNAME=$(_realpath --relative-to="$PWD" "$DIR_OF_TESTFILE/../..") || continue
-      mkdir -p "$REPORT_DIR/$DESTDIRNAME"
-      #shellcheck disable=SC2086
-      cp -r "$DIR_OF_TESTFILE"/*$NAME_OF_TEST* "$REPORT_DIR/$DESTDIRNAME/"
-   done < <(grep -l -r FAILURE --include="*.txt" "$dir" | grep -v output.txt)
-done < <(find "." -name surefire-reports -print0)
-
-## generate summary markdown file
-export SUMMARY_FILE="$REPORT_DIR/summary.md"
-for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); do
-
-    FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq)
-
-    for FAILURE in $FAILURES; do
-        TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")"
-        TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}"
-        printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE"
-    done
-done
-
-if [ -s "$SUMMARY_FILE" ]; then
-   printf "# Failing tests: \n\n" | cat - "$SUMMARY_FILE" > temp && mv temp "$SUMMARY_FILE"
-fi
-
-## generate counter
-wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures"
diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh
deleted file mode 100755
index ee03c58..0000000
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/acceptance"}
-mkdir -p "$REPORT_DIR"
-
-OZONE_VERSION=$(grep "<ozone.version>" "$DIR/../../pom.xml" | sed 's/<[^>]*>//g'|  sed 's/^[ \t]*//')
-DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION"
-
-if [ ! -d "$DIST_DIR" ]; then
-    echo "Distribution dir is missing. Doing a full build"
-    "$DIR/build.sh"
-fi
-
-cd "$DIST_DIR/compose" || exit 1
-./test-all.sh
-RES=$?
-cp result/* "$REPORT_DIR/"
-cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html"
-exit $RES
diff --git a/hadoop-ozone/dev-support/checks/author.sh b/hadoop-ozone/dev-support/checks/author.sh
deleted file mode 100755
index 92903f9..0000000
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/author"}
-mkdir -p "$REPORT_DIR"
-REPORT_FILE="$REPORT_DIR/summary.txt"
-
-#hide this string to not confuse yetus
-AUTHOR="uthor"
-AUTHOR="@a${AUTHOR}"
-
-grep -r --include="*.java" "$AUTHOR" . | tee "$REPORT_FILE"
-
-wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures"
-
-if [[ -s "${REPORT_FILE}" ]]; then
-   exit 1
-fi
diff --git a/hadoop-ozone/dev-support/checks/blockade.sh b/hadoop-ozone/dev-support/checks/blockade.sh
deleted file mode 100755
index a48d2b5..0000000
--- a/hadoop-ozone/dev-support/checks/blockade.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-OZONE_VERSION=$(grep "<ozone.version>" "$DIR/../../pom.xml" | sed 's/<[^>]*>//g'|  sed 's/^[ \t]*//')
-cd "$DIR/../../dist/target/ozone-$OZONE_VERSION/tests" || exit 1
-
-source ${DIR}/../../dist/target/ozone-${OZONE_VERSION}/compose/ozoneblockade/.env
-export OZONE_RUNNER_VERSION
-export HDDS_VERSION
-
-python -m pytest -s blockade
-exit $?
diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh
deleted file mode 100755
index 1197330..0000000
--- a/hadoop-ozone/dev-support/checks/build.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-export MAVEN_OPTS="-Xmx4096m"
-mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install
-exit $?
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
deleted file mode 100755
index 685bf14..0000000
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-BASE_DIR="$(pwd -P)"
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"}
-mkdir -p "$REPORT_DIR"
-REPORT_FILE="$REPORT_DIR/summary.txt"
-
-mvn -B -fn checkstyle:check -f pom.ozone.xml
-
-#Print out the exact violations with parsing XML results with sed
-find "." -name checkstyle-errors.xml -print0 \
-  | xargs -0 sed '$!N; /<file.*\n<\/file/d;P;D' \
-  | sed \
-      -e '/<\?xml.*>/d' \
-      -e '/<checkstyle.*/d' \
-      -e '/<\/.*/d' \
-      -e 's/<file name="\([^"]*\)".*/\1/' \
-      -e 's/<error.*line="\([[:digit:]]*\)".*message="\([^"]*\)".*/ \1: \2/' \
-      -e "s!^${BASE_DIR}/!!" \
-  | tee "$REPORT_FILE"
-
-## generate counter
-grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures"
-
-if [[ -s "${REPORT_FILE}" ]]; then
-   exit 1
-fi
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh
deleted file mode 100755
index ccbf2ed..0000000
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-if ! type unionBugs >/dev/null 2>&1 || ! type convertXmlToText >/dev/null 2>&1; then
-  mvn -B -fae compile spotbugs:check -f pom.ozone.xml
-  exit $?
-fi
-
-mvn -B -fae compile spotbugs:spotbugs -f pom.ozone.xml
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"}
-mkdir -p "$REPORT_DIR"
-REPORT_FILE="$REPORT_DIR/summary.txt"
-
-touch "$REPORT_FILE"
-
-find hadoop-hdds hadoop-ozone -name spotbugsXml.xml -print0 | xargs -0 unionBugs -output "${REPORT_DIR}"/summary.xml
-convertXmlToText "${REPORT_DIR}"/summary.xml | tee -a "${REPORT_FILE}"
-convertXmlToText -html:fancy-hist.xsl "${REPORT_DIR}"/summary.xml "${REPORT_DIR}"/summary.html
-
-wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures"
-
-if [[ -s "${REPORT_FILE}" ]]; then
-   exit 1
-fi
diff --git a/hadoop-ozone/dev-support/checks/integration.sh b/hadoop-ozone/dev-support/checks/integration.sh
deleted file mode 100755
index 52e3576..0000000
--- a/hadoop-ozone/dev-support/checks/integration.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-export MAVEN_OPTS="-Xmx4096m"
-mvn -B install -f pom.ozone.xml -DskipTests
-mvn -B -fn test -f pom.ozone.xml -pl :hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \
-  -Dtest=\!TestMiniChaosOzoneCluster "$@"
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/integration"}
-mkdir -p "$REPORT_DIR"
-
-# shellcheck source=hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
-source "$DIR/_mvn_unit_report.sh"
-
-if [[ -s "$REPORT_DIR/summary.txt" ]] ; then
-    exit 1
-fi
-exit 0
diff --git a/hadoop-ozone/dev-support/checks/isolation.sh b/hadoop-ozone/dev-support/checks/isolation.sh
deleted file mode 100755
index 1280235..0000000
--- a/hadoop-ozone/dev-support/checks/isolation.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-hadooplines=$(git diff --name-only HEAD~1..HEAD | grep -v hadoop-ozone | grep -c -v hadoop-hdds  )
-if [ "$hadooplines" == "0" ]; then
-  echo "Only ozone/hdds subprojects are changed"
-  exit 0
-else
-  echo "Main hadoop projects are changed in an ozone patch."
-  echo "Please do it in a HADOOP/HDFS patch and test it with hadoop precommit tests"
-  exit 1
-fi
diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh
deleted file mode 100755
index 464d636..0000000
--- a/hadoop-ozone/dev-support/checks/rat.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/rat"}
-mkdir -p "$REPORT_DIR"
-
-REPORT_FILE="$REPORT_DIR/summary.txt"
-
-cd hadoop-hdds || exit 1
-mvn -B -fn org.apache.rat:apache-rat-plugin:0.13:check
-cd ../hadoop-ozone || exit 1
-mvn -B -fn org.apache.rat:apache-rat-plugin:0.13:check
-
-cd "$DIR/../../.." || exit 1
-
-grep -r --include=rat.txt "!????" hadoop-hdds hadoop-ozone | tee "$REPORT_FILE"
-
-wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures"
-
-if [[ -s "${REPORT_FILE}" ]]; then
-   exit 1
-fi
-
diff --git a/hadoop-ozone/dev-support/checks/shellcheck.sh b/hadoop-ozone/dev-support/checks/shellcheck.sh
deleted file mode 100755
index 2b67118..0000000
--- a/hadoop-ozone/dev-support/checks/shellcheck.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/shellcheck"}
-mkdir -p "$REPORT_DIR"
-REPORT_FILE="$REPORT_DIR/summary.txt"
-
-echo "" > "$OUTPUT_FILE"
-if [[ "$(uname -s)" = "Darwin" ]]; then
-  find hadoop-hdds hadoop-ozone -type f -perm '-500'
-else
-  find hadoop-hdds hadoop-ozone -type f -executable
-fi \
-  | grep -v -e target/ -e node_modules/ -e '\.\(ico\|py\|yml\)$' \
-  | xargs -n1 shellcheck \
-  | tee "$REPORT_FILE"
-
-wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures"
-
-if [[ -s "${REPORT_FILE}" ]]; then
-   exit 1
-fi
diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh
deleted file mode 100755
index 6a12412..0000000
--- a/hadoop-ozone/dev-support/checks/unit.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
-
-export MAVEN_OPTS="-Xmx4096m"
-mvn -B -fn test -f pom.ozone.xml -pl \!:hadoop-ozone-integration-test,\!:hadoop-ozone-filesystem,\!:hadoop-ozone-tools "$@"
-
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/unit"}
-mkdir -p "$REPORT_DIR"
-
-# shellcheck source=hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
-source "$DIR/_mvn_unit_report.sh"
-
-if [[ -s "$REPORT_DIR/summary.txt" ]] ; then
-    exit 1
-fi
-exit 0
diff --git a/hadoop-ozone/dev-support/docker/Dockerfile b/hadoop-ozone/dev-support/docker/Dockerfile
deleted file mode 100644
index 045e1f6..0000000
--- a/hadoop-ozone/dev-support/docker/Dockerfile
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-FROM alpine
-RUN apk add --update --no-cache bash alpine-sdk maven grep openjdk8 py-pip rsync procps autoconf automake libtool findutils
-
-#Install real glibc
-RUN apk --no-cache add ca-certificates wget && \
-    wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
-    wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk && \
-    apk add glibc-2.28-r0.apk
-
-#Install protobuf
-RUN mkdir -p /usr/local/src/ && \
-    cd /usr/local/src/ && \
-    wget https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz && \
-    tar xvf protobuf-2.5.0.tar.gz && \
-    cd protobuf-2.5.0 && \
-    ./autogen.sh && \
-    ./configure --prefix=/usr && \
-    make && \
-    make install && \
-    protoc --version
-
-#Findbug install
-RUN mkdir -p /opt && \
-    curl -sL https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-3.0.1.tar.gz/download | tar -xz  && \
-     mv findbugs-* /opt/findbugs
-
-#Install apache-ant
-RUN mkdir -p /opt && \
-    curl -sL 'https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/ant/binaries/apache-ant-1.10.5-bin.tar.gz' | tar -xz  && \
-       mv apache-ant* /opt/ant
-
-#Install docker-compose
-RUN pip install docker-compose
-
-#Install pytest==2.8.7
-RUN pip install pytest==2.8.7
-
-ENV PATH=$PATH:/opt/findbugs/bin
-
-RUN addgroup -g 1000 default && \
-   for i in $(seq 1 2000); do adduser jenkins$i -u $i -G default -h /tmp/ -H -D; done
-
-#This is a very huge local maven cache. Usually the mvn repository is not safe to be 
-#shared between builds as concurrent installls are not handled very well
-#A simple workaround is to provide all the required 3rd party lib in the docker image
-#It will be cached by docker, and any additional dependency can be downloaded, artifacts
-#can be installed
-USER jenkins1000
-RUN cd /tmp && \
-   git clone --depth=1 https://gitbox.apache.org/repos/asf/hadoop.git -b trunk && \
-   cd /tmp/hadoop && \
-   mvn package dependency:go-offline -DskipTests -P hdds -pl :hadoop-ozone-dist -am && \
-   rm -rf /tmp/.m2/repository/org/apache/hadoop/*hdds* && \
-   rm -rf /tmp/.m2/repository/org/apache/hadoop/*ozone* && \
-   find /tmp/.m2/repository -exec chmod o+wx {} \;
diff --git a/hadoop-ozone/dev-support/intellij/install-runconfigs.sh b/hadoop-ozone/dev-support/intellij/install-runconfigs.sh
deleted file mode 100755
index fc877bd..0000000
--- a/hadoop-ozone/dev-support/intellij/install-runconfigs.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-SRC_DIR="$SCRIPT_DIR/runConfigurations"
-DEST_DIR="$SCRIPT_DIR/../../../.idea/runConfigurations/"
-mkdir -p "$DEST_DIR"
-#shellcheck disable=SC2010
-ls -1 "$SRC_DIR" | grep -v ozone-site.xml | xargs -n1 -I FILE cp "$SRC_DIR/FILE" "$DEST_DIR"
diff --git a/hadoop-ozone/dev-support/intellij/log4j.properties b/hadoop-ozone/dev-support/intellij/log4j.properties
deleted file mode 100644
index bc62e32..0000000
--- a/hadoop-ozone/dev-support/intellij/log4j.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-# log4j configuration used during build and unit tests
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-log4j.logger.io.jagertraecing=DEBUG
diff --git a/hadoop-ozone/dev-support/intellij/ozone-site.xml b/hadoop-ozone/dev-support/intellij/ozone-site.xml
deleted file mode 100644
index 2eb79aa..0000000
--- a/hadoop-ozone/dev-support/intellij/ozone-site.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-  <property>
-    <name>hdds.profiler.endpoint.enabled</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.address</name>
-    <value>localhost</value>
-  </property>
-  <property>
-    <name>ozone.enabled</name>
-    <value>True</value>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.id</name>
-    <value>/tmp/datanode.id</value>
-  </property>
-  <property>
-    <name>ozone.scm.client.address</name>
-    <value>localhost</value>
-  </property>
-  <property>
-    <name>ozone.metadata.dirs</name>
-    <value>/tmp/metadata</value>
-  </property>
-  <property>
-    <name>ozone.scm.names</name>
-    <value>localhost</value>
-  </property>
-  <property>
-    <name>ozone.om.address</name>
-    <value>localhost</value>
-  </property>
-  <property>
-    <name>ozone.enabled</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>ozone.scm.container.size</name>
-    <value>10MB</value>
-  </property>
-  <property>
-    <name>ozone.scm.block.size</name>
-    <value>1MB</value>
-  </property>
-  <property>
-    <name>hdds.datanode.storage.utilization.critical.threshold</name>
-    <value>0.99</value>
-  </property>
-  <property>
-    <name>hdds.prometheus.endpoint.enabled</name>
-    <value>true</value>
-  </property>
-</configuration>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml
deleted file mode 100644
index 7f2a3e1..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="Datanode" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.HddsDatanodeService" />
-    <module name="hadoop-ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml
deleted file mode 100644
index 9d964d4..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="FreonStandalone" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.freon.Freon" />
-    <module name="hadoop-ozone-tools" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml rk" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml
deleted file mode 100644
index c2aaf1c..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="OzoneManager" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.om.OzoneManagerStarter" />
-    <module name="hadoop-ozone-ozone-manager" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml
deleted file mode 100644
index 70fab5d..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="OzoneManagerInit" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.om.OzoneManagerStarter" />
-    <module name="hadoop-ozone-ozone-manager" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --init" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml
deleted file mode 100644
index 2d0bf80..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="OzoneShell" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.web.ozShell.OzoneShell" />
-    <module name="hadoop-ozone-ozone-manager" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml volume create /vol1" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml
deleted file mode 100644
index 6c8e0ec..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="Recon" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.recon.ReconServer" />
-    <module name="hadoop-ozone-recon" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml
deleted file mode 100644
index 93f4a9d..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="S3Gateway" type="Application" factoryName="Application">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.s3.Gateway" />
-    <module name="hadoop-ozone-s3gateway" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml
deleted file mode 100644
index 46104d3..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="StorageContainerManager" type="Application" factoryName="Application" nameIsGenerated="falsee">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.hdds.scm.server.StorageContainerManagerStarter" />
-    <module name="hadoop-hdds-server-scm" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml
deleted file mode 100644
index f3ef26b..0000000
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<component name="ProjectRunConfigurationManager">
-  <configuration default="false" name="StorageContainerManagerInit" type="Application" factoryName="Application" nameIsGenerated="false">
-    <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.hdds.scm.server.StorageContainerManagerStarter" />
-    <module name="hadoop-hdds-server-scm" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --init" />
-    <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
-    <extension name="coverage">
-      <pattern>
-        <option name="PATTERN" value="org.apache.hadoop.ozone.*" />
-        <option name="ENABLED" value="true" />
-      </pattern>
-    </extension>
-    <method v="2">
-      <option name="Make" enabled="true" />
-    </method>
-  </configuration>
-</component>
diff --git a/hadoop-ozone/dist/README.md b/hadoop-ozone/dist/README.md
deleted file mode 100644
index 88132ec..0000000
--- a/hadoop-ozone/dist/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Ozone Distribution
-
-This folder contains the project to create the binary ozone distribution and provide all the helper script and docker files to start it locally or in the cluster.
-
-## Testing with local docker based cluster
-
-After a full dist build you can find multiple docker-compose based cluster definition in the `target/ozone-*/compose` folder.
-
-Please check the README files there.
-
-Usually you can start the cluster with:
-
-```
-cd compose/ozone
-docker-compose up -d
-```
-
-## Testing on Kubernetes
-
-You can also test the ozone cluster in kubernetes. If you have no active kubernetes cluster you can start a local one with minikube:
-
-```
-minikube start
-```
-
-For testing in kubernetes you need to:
-
-1. Create a docker image with the new build
-2. Upload it to a docker registery
-3. Deploy the cluster with apply kubernetes resources
-
-The easiest way to do all these steps is using the [skaffold](https://github.com/GoogleContainerTools/skaffold) tool. After the [installation of skaffold](https://github.com/GoogleContainerTools/skaffold#installation), you can execute
-
-```
-skaffold run
-```
-
-in this  (`hadoop-ozone/dist`) folder.
-
-The default kubernetes resources set (`src/main/k8s/`) contains NodePort based service definitions for the Ozone Manager, Storage Container Manager and the S3 gateway.
-
-With minikube you can access the services with:
-
-```
-minikube service s3g-public
-minikube service om-public
-minikube service scm-public
-```
-
-### Monitoring
-
-Apache Hadoop Ozone supports Prometheus out-of the box. It contains a prometheus compatible exporter servlet. To start the monitoring you need a prometheus deploy in your kubernetes  cluster:
-
-```
-cd src/main/k8s/prometheus
-kubectl apply -f .
-```
-
-The prometheus ui also could be access via a NodePort service:
-
-```
-minikube service prometheus-public
-```
-
-### Notes on the Kubernetes setup
-
-Please not that the provided kubernetes resources are not suitable production:
-
-1. There are no security setup
-2. The datanode is started in StatefulSet instead of DaemonSet (To make it possible to scale it up on one node minikube cluster)
-3. All the UI pages are published with NodePort services
\ No newline at end of file
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
deleted file mode 100755
index 72f6c3b..0000000
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# project.build.directory
-BASEDIR=$1
-
-#hdds.version
-HDDS_VERSION=$2
-
-## @audience     private
-## @stability    evolving
-function run()
-{
-  declare res
-
-  echo "\$ ${*}"
-  "${@}"
-  res=$?
-  if [[ ${res} != 0 ]]; then
-    echo
-    echo "Failed!"
-    echo
-    exit "${res}"
-  fi
-}
-
-## @audience     private
-## @stability    evolving
-function findfileindir()
-{
-  declare file="$1"
-  declare dir="${2:-./share}"
-  declare count
-
-  count=$(find "${dir}" -iname "${file}" | wc -l)
-
-  #shellcheck disable=SC2086
-  echo ${count}
-}
-
-
-# shellcheck disable=SC2164
-ROOT=$(cd "${BASEDIR}"/../../..;pwd)
-echo
-echo "Current directory $(pwd)"
-echo
-
-run rm -rf "ozone-${HDDS_VERSION}"
-run mkdir "ozone-${HDDS_VERSION}"
-run cd "ozone-${HDDS_VERSION}"
-
-run cp -p "${ROOT}/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt" "NOTICE.txt"
-run cp -p "${ROOT}/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt" "LICENSE.txt"
-run cp -pr "${ROOT}/hadoop-ozone/dist/src/main/license/bin/licenses" "licenses"
-run cp -p "${ROOT}/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE" "licenses/LICENSE-ozone-recon.txt"
-run cp -p "${ROOT}/README.txt" .
-
-run mkdir -p ./share/hadoop/mapreduce
-run mkdir -p ./share/hadoop/ozone
-run mkdir -p ./share/hadoop/hdds
-run mkdir -p ./share/hadoop/yarn
-run mkdir -p ./share/hadoop/hdfs
-run mkdir -p ./share/hadoop/common
-
-touch ./share/hadoop/mapreduce/.keep
-touch ./share/hadoop/yarn/.keep
-touch ./share/hadoop/hdfs/.keep
-touch ./share/hadoop/common/.keep
-
-
-run mkdir -p ./share/ozone/web
-run mkdir -p ./bin
-run mkdir -p ./sbin
-run mkdir -p ./etc
-run mkdir -p ./libexec
-run mkdir -p ./tests
-
-run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
-run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" "etc/hadoop"
-run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" "etc/hadoop"
-run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/"
-run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/docker"
-
-run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.sh" "libexec/"
-run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.cmd" "libexec/"
-run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-functions.sh" "libexec/"
-run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone-config.sh" "libexec/"
-run cp -r "${ROOT}/hadoop-ozone/common/src/main/shellprofile.d" "libexec/"
-
-
-run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh" "sbin/"
-run cp "${ROOT}/hadoop-hdds/common/src/main/bin/workers.sh" "sbin/"
-run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/"
-run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"
-
-# fault injection tests
-run cp  -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests
-
-# Optional documentation, could be missing
-cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./
-
-#Copy docker compose files
-#compose files are preprocessed: properties (eg. project.version) are replaced first by maven.
-run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" .
-run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" .
-run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes
-run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" .
-
-#workaround for https://issues.apache.org/jira/browse/MRESOURCES-236
-find ./compose -name "*.sh" -exec chmod 755 {} \;
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching b/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching
deleted file mode 100755
index 408233a..0000000
--- a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# project.version
-VERSION=$1
-
-# project.build.directory
-BASEDIR=$2
-
-## @audience     private
-## @stability    evolving
-function run()
-{
-  declare res
-
-  echo "\$ ${*}"
-  "${@}"
-  res=$?
-  if [[ ${res} != 0 ]]; then
-    echo
-    echo "Failed!"
-    echo
-    exit "${res}"
-  fi
-}
-
-run tar -c -f "hadoop-ozone-${VERSION}.tar" "ozone-${VERSION}"
-run gzip -f "hadoop-ozone-${VERSION}.tar"
-echo
-echo "Ozone dist tar available at: ${BASEDIR}/hadoop-ozone-${VERSION}.tar.gz"
-echo
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
deleted file mode 100644
index 57b9a0c..0000000
--- a/hadoop-ozone/dist/pom.xml
+++ /dev/null
@@ -1,428 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-dist</artifactId>
-  <name>Apache Hadoop Ozone Distribution</name>
-  <packaging>pom</packaging>
-  <version>0.5.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-    <docker.ozone-runner.version>20190717-1</docker.ozone-runner.version>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-classpath-files</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>target/ozone-${ozone.version}/share/ozone/classpath
-              </outputDirectory>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-server-scm</artifactId>
-                  <version>${hdds.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-hdds-server-scm.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-tools</artifactId>
-                  <version>${hdds.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-hdds-tools.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-s3gateway</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-s3gateway.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-csi</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-csi.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-ozone-manager</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-ozone-manager.classpath
-                  </destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-tools</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-tools.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-filesystem</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-filesystem.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-common</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-common.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-datanode</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-datanode.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-recon</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-recon.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-upgrade</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-upgrade.classpath</destFileName>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-ozone-insight</artifactId>
-                  <version>${ozone.version}</version>
-                  <classifier>classpath</classifier>
-                  <type>cp</type>
-                  <destFileName>hadoop-ozone-insight.classpath</destFileName>
-                </artifactItem>
-              </artifactItems>
-            </configuration>
-          </execution>
-          <execution>
-            <id>copy-jars</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>copy-dependencies</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>target/ozone-${ozone.version}/share/ozone/lib
-              </outputDirectory>
-              <includeScope>runtime</includeScope>
-            </configuration>
-          </execution>
-          <execution>
-            <id>copy-omitted-jars</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>target/ozone-${ozone.version}/share/ozone/lib
-              </outputDirectory>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>com.google.protobuf</groupId>
-                  <artifactId>protobuf-java</artifactId>
-                  <version>3.5.1</version>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>com.google.guava</groupId>
-                  <artifactId>guava</artifactId>
-                  <version>26.0-android</version>
-                </artifactItem>
-              </artifactItems>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-resources-plugin</artifactId>
-        <version>3.1.0</version>
-        <executions>
-          <execution>
-            <id>copy-compose-files</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${basedir}/target/compose</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>src/main/compose</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>copy-and-filter-dockerfile</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${project.build.directory}</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>src/main/docker</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>copy-k8s</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${basedir}/target/k8s</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>src/main/k8s</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>dist</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <executable>${shell-executable}</executable>
-              <workingDirectory>${project.build.directory}</workingDirectory>
-              <arguments>
-                <argument>
-                  ${basedir}/dev-support/bin/dist-layout-stitching
-                </argument>
-                <argument>${project.build.directory}</argument>
-                <argument>${hdds.version}</argument>
-              </arguments>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <!-- there is no problem to have multiple versions of the jar files from
-      here. As the dependencies will be handled in a separated way with
-      separated classpath definitions-->
-      <plugin>
-        <artifactId>maven-enforcer-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>depcheck</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-tools</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-s3gateway</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-csi</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-tools</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-datanode</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-recon</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-docs</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-upgrade</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-insight</artifactId>
-    </dependency>
-  </dependencies>
-  <profiles>
-    <profile>
-      <id>docker-build</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>io.fabric8</groupId>
-            <artifactId>docker-maven-plugin</artifactId>
-            <version>0.29.0</version>
-            <executions>
-              <execution>
-                <goals>
-                  <goal>build</goal>
-                </goals>
-                <phase>package</phase>
-              </execution>
-            </executions>
-            <configuration>
-              <images>
-                
-              </images>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>docker-push</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>io.fabric8</groupId>
-            <artifactId>docker-maven-plugin</artifactId>
-            <version>0.29.0</version>
-            <executions>
-              <execution>
-                <goals>
-                  <goal>push</goal>
-                </goals>
-                <phase>package</phase>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>dist</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>exec-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>tar-ozone</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>exec</goal>
-                </goals>
-                <configuration>
-                  <executable>${shell-executable}</executable>
-                  <workingDirectory>${project.build.directory}
-                  </workingDirectory>
-                  <arguments>
-                    <argument>${basedir}/dev-support/bin/dist-tar-stitching
-                    </argument>
-                    <argument>${hdds.version}</argument>
-                    <argument>${project.build.directory}</argument>
-                  </arguments>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-
-</project>
diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml
deleted file mode 100644
index 25e35c8..0000000
--- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml
+++ /dev/null
@@ -1,101 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<assembly
-        xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
-  <id>ozone-src</id>
-  <formats>
-    <format>tar.gz</format>
-  </formats>
-  <includeBaseDirectory>true</includeBaseDirectory>
-  <files>
-    <file>
-      <source>pom.ozone.xml</source>
-      <outputDirectory>/</outputDirectory>
-      <destName>pom.xml</destName>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/LICENSE.txt</source>
-      <outputDirectory>/</outputDirectory>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/NOTICE.txt</source>
-      <outputDirectory>/</outputDirectory>
-    </file>
-    <file>
-      <source> hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt</source>
-      <outputDirectory>/licenses</outputDirectory>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt</source>
-      <outputDirectory>/licenses</outputDirectory>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt</source>
-      <outputDirectory>/licenses</outputDirectory>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt</source>
-      <outputDirectory>/licenses</outputDirectory>
-    </file>
-    <file>
-      <source>hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt</source>
-      <outputDirectory>/licenses</outputDirectory>
-    </file>
-  </files>
-  <fileSets>
-    <fileSet>
-      <directory>.</directory>
-      <includes>
-        <include>pom.ozone.xml</include>
-        <include>README.txt</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>hadoop-hdds</directory>
-      <useDefaultExcludes>true</useDefaultExcludes>
-      <excludes>
-        <exclude>**/.classpath</exclude>
-        <exclude>**/.project</exclude>
-        <exclude>**/.settings</exclude>
-        <exclude>**/*.iml</exclude>
-        <exclude>**/target/**</exclude>
-      </excludes>
-    </fileSet>
-    <fileSet>
-      <directory>hadoop-ozone</directory>
-      <useDefaultExcludes>true</useDefaultExcludes>
-      <excludes>
-        <exclude>**/ozone-recon-web/build/**</exclude>
-        <exclude>**/ozone-recon-web/node_modules/**</exclude>
-        <exclude>**/.classpath</exclude>
-        <exclude>**/.project</exclude>
-        <exclude>**/.settings</exclude>
-        <exclude>**/*.iml</exclude>
-        <exclude>**/target/**</exclude>
-      </excludes>
-    </fileSet>
-    <fileSet>
-      <directory>hadoop-ozone/dist/src/main/license/src</directory>
-      <includes>
-        <include>**/*.txt</include>
-      </includes>
-
-    </fileSet>
-  </fileSets>
-</assembly>
diff --git a/hadoop-ozone/dist/src/main/compose/README.md b/hadoop-ozone/dist/src/main/compose/README.md
deleted file mode 100644
index 8189d2c..0000000
--- a/hadoop-ozone/dist/src/main/compose/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Docker cluster definitions
-
-This directory contains multiple docker cluster definitions to start local pseudo cluster with different configuration.
-
-It helps to start local (multi-node like) pseudo cluster with docker and docker-compose and obviously it's not for production.
-
-You may find more information in the specific subdirectories but in generic you can use the following commands:
-
-## Usage
-
-To start a cluster go to a subdirectory and start the cluster:
-
-```
-docker-compose up -d
-```
-
-You can check the logs of all the components with:
-
-```
-docker-compose logs
-```
-
-In case of a problem you can destroy the cluster an delete all the local state with:
-
-```
-docker-compose down
-```
-
-(Note: a simple docker-compose stop may not delete all the local data).
-
-You can scale up and down the components:
-
-```
-docker-compose scale datanode=5
-```
-
-Usually the key webui ports are published on the docker host.
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini b/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini
deleted file mode 100644
index a845146..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-[auth.anonymous]
-enabled=true
-org_role=Editor
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json
deleted file mode 100644
index 7644b12..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json
+++ /dev/null
@@ -1,1344 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "iteration": 1544553994120,
-  "links": [],
-  "panels": [
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "id": 22,
-      "panels": [],
-      "repeat": null,
-      "title": "Total Count",
-      "type": "row"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": null,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 0,
-        "y": 1
-      },
-      "hideTimeOverride": false,
-      "id": 12,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": "entity",
-      "repeatDirection": "h",
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "volume",
-          "value": "volume"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]s",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "[[entity]] created",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "current"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": null,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 8,
-        "y": 1
-      },
-      "hideTimeOverride": false,
-      "id": 28,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 12,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "bucket",
-          "value": "bucket"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]s",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "[[entity]] created",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "current"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": null,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 16,
-        "y": 1
-      },
-      "hideTimeOverride": false,
-      "id": 29,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 12,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "key",
-          "value": "key"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]s",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "[[entity]] created",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "current"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": 100,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 0,
-        "y": 6
-      },
-      "hideTimeOverride": false,
-      "id": 18,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": "entity",
-      "repeatDirection": "h",
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "volume",
-          "value": "volume"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]_ops",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "$entity Ops",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "total"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": 100,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 8,
-        "y": 6
-      },
-      "hideTimeOverride": false,
-      "id": 30,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 18,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "bucket",
-          "value": "bucket"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]_ops",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "$entity Ops",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "total"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorPostfix": false,
-      "colorPrefix": false,
-      "colorValue": false,
-      "colors": [
-        "#7eb26d",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "format": "none",
-      "gauge": {
-        "maxValue": 100,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 5,
-        "w": 8,
-        "x": 16,
-        "y": 6
-      },
-      "hideTimeOverride": false,
-      "id": 31,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 18,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "key",
-          "value": "key"
-        }
-      },
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": true,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "__name__",
-      "targets": [
-        {
-          "expr": "om_metrics_num_[[entity]]_ops",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "timeShift": null,
-      "title": "$entity Ops",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "total"
-    },
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 11
-      },
-      "id": 14,
-      "panels": [],
-      "repeat": null,
-      "title": "Object Creation Rate",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 0,
-        "y": 12
-      },
-      "id": 24,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "entity",
-      "repeatDirection": "h",
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "volume",
-          "value": "volume"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1h])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1h])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "[[entity]] created - Hourly",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": false,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 8,
-        "y": 12
-      },
-      "id": 32,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 24,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "bucket",
-          "value": "bucket"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1h])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1h])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "[[entity]] created - Hourly",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": false,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 16,
-        "y": 12
-      },
-      "id": 33,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 24,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "key",
-          "value": "key"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1h])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1h])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "[[entity]] created - Hourly",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": false,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 0,
-        "y": 19
-      },
-      "id": 27,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "entity",
-      "repeatDirection": "h",
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "volume",
-          "value": "volume"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1d])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1d])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "24h",
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "[[entity]] created - Daily",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 8,
-        "y": 19
-      },
-      "id": 34,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 27,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "bucket",
-          "value": "bucket"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1d])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1d])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "24h",
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "[[entity]] created - Daily",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "fill": 1,
-      "gridPos": {
-        "h": 7,
-        "w": 8,
-        "x": 16,
-        "y": 19
-      },
-      "id": 35,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": null,
-      "repeatDirection": "h",
-      "repeatIteration": 1544553994120,
-      "repeatPanelId": 27,
-      "scopedVars": {
-        "entity": {
-          "selected": false,
-          "text": "key",
-          "value": "key"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_creates[1d])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "rate(om_metrics_num_[[entity]]_commits[1d])",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "24h",
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "[[entity]] created - Daily",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": "1m",
-  "schemaVersion": 16,
-  "style": "dark",
-  "tags": [],
-  "templating": {
-    "list": [
-      {
-        "allValue": "all",
-        "current": {
-          "tags": [],
-          "text": "All",
-          "value": [
-            "$__all"
-          ]
-        },
-        "hide": 0,
-        "includeAll": true,
-        "label": null,
-        "multi": true,
-        "name": "entity",
-        "options": [
-          {
-            "selected": true,
-            "text": "All",
-            "value": "$__all"
-          },
-          {
-            "selected": false,
-            "text": "volume",
-            "value": "volume"
-          },
-          {
-            "selected": false,
-            "text": "bucket",
-            "value": "bucket"
-          },
-          {
-            "selected": false,
-            "text": "key",
-            "value": "key"
-          }
-        ],
-        "query": "volume,bucket,key",
-        "skipUrlSync": false,
-        "type": "custom"
-      }
-    ]
-  },
-  "time": {
-    "from": "now/d",
-    "to": "now"
-  },
-  "timepicker": {
-    "refresh_intervals": [
-      "5s",
-      "10s",
-      "30s",
-      "1m",
-      "5m",
-      "15m",
-      "30m",
-      "1h",
-      "2h",
-      "1d"
-    ],
-    "time_options": [
-      "5m",
-      "15m",
-      "1h",
-      "6h",
-      "12h",
-      "24h",
-      "2d",
-      "7d",
-      "30d"
-    ]
-  },
-  "timezone": "",
-  "title": "Ozone  - Object Metrics",
-  "uid": "yakEh0Eik",
-  "version": 1
-}
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json
deleted file mode 100644
index a22e3d7..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json
+++ /dev/null
@@ -1,875 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "iteration": 1544554371864,
-  "links": [],
-  "panels": [
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "id": 69,
-      "panels": [],
-      "repeat": "servername",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        }
-      },
-      "title": "RPC Queue Time Summary - [[servername]]",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "fill": 1,
-      "gridPos": {
-        "h": 9,
-        "w": 12,
-        "x": 0,
-        "y": 1
-      },
-      "id": 47,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": true,
-      "targets": [
-        {
-          "expr": "rpc_rpc_queue_time_avg_time{servername=~\"$servername\"}",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Avg Queue Time",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": true,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "fill": 1,
-      "gridPos": {
-        "h": 9,
-        "w": 12,
-        "x": 12,
-        "y": 1
-      },
-      "id": 48,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": true,
-      "targets": [
-        {
-          "expr": "rpc_rpc_queue_time_num_ops{servername=~\"$servername\"}",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Number of Ops",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": true,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 10
-      },
-      "id": 39,
-      "panels": [],
-      "repeat": null,
-      "title": "RPC Call Queue Length",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "fill": 1,
-      "gridPos": {
-        "h": 9,
-        "w": 24,
-        "x": 0,
-        "y": 11
-      },
-      "id": 29,
-      "legend": {
-        "avg": false,
-        "current": true,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": true,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "servername",
-      "repeatDirection": "h",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "rpc_call_queue_length{servername=~\"$servername\"}",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "legendFormat": "",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "$servername",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 20
-      },
-      "id": 37,
-      "panels": [],
-      "repeat": "window",
-      "scopedVars": {
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "title": "RPC Deferred Processing Time [[window]]s window",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 8,
-        "w": 24,
-        "x": 0,
-        "y": 21
-      },
-      "id": 35,
-      "legend": {
-        "alignAsTable": false,
-        "avg": true,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": true,
-        "show": true,
-        "total": true,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "servername",
-      "repeatDirection": "v",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        },
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        },
-        {
-          "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "C"
-        },
-        {
-          "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "$servername",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": true,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 29
-      },
-      "id": 34,
-      "panels": [],
-      "repeat": "window",
-      "scopedVars": {
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "title": "RPC Queue Time [[window]]s window",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 8,
-        "w": 24,
-        "x": 0,
-        "y": 30
-      },
-      "id": 32,
-      "legend": {
-        "alignAsTable": false,
-        "avg": true,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": true,
-        "show": true,
-        "total": true,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "servername",
-      "repeatDirection": "v",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        },
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "avg(rpc_rpc_queue_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "avg(rpc_rpc_queue_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        },
-        {
-          "expr": "avg(rpc_rpc_queue_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "C"
-        },
-        {
-          "expr": "avg(rpc_rpc_queue_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "$servername",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": false,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    },
-    {
-      "collapsed": false,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 38
-      },
-      "id": 14,
-      "panels": [],
-      "repeat": "window",
-      "scopedVars": {
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "title": "RPC Processing Time [[window]]s window",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "decimals": null,
-      "fill": 1,
-      "gridPos": {
-        "h": 9,
-        "w": 24,
-        "x": 0,
-        "y": 39
-      },
-      "id": 24,
-      "legend": {
-        "alignAsTable": false,
-        "avg": true,
-        "current": false,
-        "hideEmpty": true,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": true,
-        "show": true,
-        "total": true,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": "servername",
-      "repeatDirection": "v",
-      "scopedVars": {
-        "servername": {
-          "selected": true,
-          "text": "OzoneManagerService",
-          "value": "OzoneManagerService"
-        },
-        "window": {
-          "selected": true,
-          "text": "60",
-          "value": "60"
-        }
-      },
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "avg(rpc_rpc_processing_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "refId": "A"
-        },
-        {
-          "expr": "avg(rpc_rpc_processing_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "B"
-        },
-        {
-          "expr": "avg(rpc_rpc_processing_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "C"
-        },
-        {
-          "expr": "avg(rpc_rpc_processing_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": "1h",
-      "timeRegions": [],
-      "timeShift": "1h",
-      "title": "$servername",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": true,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "none",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": false
-        }
-      ],
-      "yaxis": {
-        "align": true,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": false,
-  "schemaVersion": 16,
-  "style": "dark",
-  "tags": [],
-  "templating": {
-    "list": [
-      {
-        "allValue": "All",
-        "current": {
-          "text": "OzoneManagerService",
-          "value": [
-            "OzoneManagerService"
-          ]
-        },
-        "datasource": "Prometheus",
-        "definition": "label_values(servername)",
-        "hide": 0,
-        "includeAll": true,
-        "label": "servername",
-        "multi": true,
-        "name": "servername",
-        "options": [],
-        "query": "label_values(servername)",
-        "refresh": 2,
-        "regex": "",
-        "skipUrlSync": false,
-        "sort": 1,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": "All",
-        "current": {
-          "tags": [],
-          "text": "60",
-          "value": [
-            "60"
-          ]
-        },
-        "hide": 0,
-        "includeAll": true,
-        "label": null,
-        "multi": true,
-        "name": "window",
-        "options": [
-          {
-            "selected": false,
-            "text": "All",
-            "value": "$__all"
-          },
-          {
-            "selected": true,
-            "text": "60",
-            "value": "60"
-          },
-          {
-            "selected": false,
-            "text": "300",
-            "value": "300"
-          }
-        ],
-        "query": "60,300",
-        "skipUrlSync": false,
-        "type": "custom"
-      }
-    ]
-  },
-  "time": {
-    "from": "now/d",
-    "to": "now"
-  },
-  "timepicker": {
-    "refresh_intervals": [
-      "5s",
-      "10s",
-      "30s",
-      "1m",
-      "5m",
-      "15m",
-      "30m",
-      "1h",
-      "2h",
-      "1d"
-    ],
-    "time_options": [
-      "5m",
-      "15m",
-      "1h",
-      "6h",
-      "12h",
-      "24h",
-      "2d",
-      "7d",
-      "30d"
-    ]
-  },
-  "timezone": "",
-  "title": "Ozone  - RPC Metrics",
-  "uid": "yDSkL0Pmk",
-  "version": 1
-}
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml b/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml
deleted file mode 100755
index 1485f72..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- name: 'default'
-  org_id: 1
-  folder: ''
-  type: 'file'
-  options:
-    folder: '/var/lib/grafana/dashboards'
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml b/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml
deleted file mode 100755
index 4d33c23..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-datasources:
-- name: 'Prometheus'
-  type: 'prometheus'
-  access: 'proxy'
-  org_id: 1
-  url: 'http://prometheus:9090'
-  is_default: true
-  version: 1
-  editable: true
diff --git a/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml b/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml
deleted file mode 100644
index af567d9..0000000
--- a/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-global:
-  scrape_interval:     15s # By default, scrape targets every 15 seconds.
-
-scrape_configs:
-  - job_name: ozone
-    metrics_path: /prom
-    static_configs:
-     - targets:
-        - "scm:9876"
-        - "om:9874"
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env
deleted file mode 100644
index df9065c..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HADOOP_VERSION=3
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml
deleted file mode 100644
index 7d8295d..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   namenode:
-      image: apache/hadoop:${HADOOP_VERSION}
-      ports:
-      - 9870:9870
-      env_file:
-      - ./docker-config
-      environment:
-         ENSURE_NAMENODE_DIR: "/tmp/hadoop-root/dfs/name"
-      command: ["hdfs", "namenode"]
-   datanode:
-      image: apache/hadoop:${HADOOP_VERSION}
-      ports:
-        - 9864
-      volumes:
-         - ../..:/opt/ozone
-      command: ["hdfs","datanode"]
-      environment:
-         HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozoneplugin/*.jar
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["ozone","scm"]
-   s3g:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9878:9878
-      env_file:
-          - ./docker-config
-      command: ["ozone","s3g"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
deleted file mode 100644
index 63bbbd8..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config
deleted file mode 100644
index 7936238..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
-HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
-
-CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/
-
-MAPRED-SITE.XML_mapreduce.framework.name=yarn
-MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.map.memory.mb=4096
-MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096
-MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g
-
-YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
-YARN_SITE.XML_yarn.timeline-service.enabled=true
-#YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true
-#YARN_SITE.XML_yarn.timeline-service.hostname=jhs
-#YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/
-
-YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false
-YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=6000
-YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false
-YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle
-YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false
-
-YARN-SITE.XML_yarn.resourcemanager.hostname=rm
-YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true
-
-#YARN-SITE.XML_yarn.log-aggregation-enable=true
-#YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600
-
-#YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
-#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor
-#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop
-YARN-SITE.XML_yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage=99
-YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false
-
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=*
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=*
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings=
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env
deleted file mode 100644
index 27fc576..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=@hdds.version@
-#TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet.
-# See: HADOOP-16092 for more details.
-HADOOP_IMAGE=flokkr/hadoop
-HADOOP_VERSION=2.7.7
-OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
deleted file mode 100644
index 17f5ee5..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-  datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9864
-    command: ["/opt/hadoop/bin/ozone","datanode"]
-    env_file:
-      - docker-config
-      - ../common-config
-  om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: om
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9874:9874
-    environment:
-      WAITFOR: scm:9876
-      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-    env_file:
-      - docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","om"]
-  s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: s3g
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9878:9878
-    env_file:
-      - ./docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","s3g"]
-  scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: scm
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9876:9876
-    env_file:
-      - docker-config
-      - ../common-config
-    environment:
-      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-    command: ["/opt/hadoop/bin/ozone","scm"]
-  rm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: rm
-    volumes:
-      - ../../..:/opt/ozone
-    ports:
-      - 8088:8088
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
-    command: ["yarn", "resourcemanager"]
-  nm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: nm
-    volumes:
-      - ../../..:/opt/ozone
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
-      WAIT_FOR: rm:8088
-    command: ["yarn","nodemanager"]
-# Optional section: comment out this part to get DNS resolution for all the containers.
-#  dns:
-#    image: andyshinn/dnsmasq:2.76
-#    ports:
-#        - 53:53/udp
-#        - 53:53/tcp
-#    volumes:
-#      - "/var/run/docker.sock:/var/run/docker.sock"
-#    command:
-#      - "-k"
-#      - "-d"
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
deleted file mode 100644
index fccdace..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh
deleted file mode 100755
index a2ab5d8..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm createmrenv.robot
-
-
-#rm is the container name (resource manager) and not the rm command
-execute_command_in_container rm sudo apk add --update py-pip
-execute_command_in_container rm sudo pip install robotframework
-
-# reinitialize the directories to use
-export OZONE_DIR=/opt/ozone
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-execute_robot_test rm ozonefs/hadoopo3fs.robot
-
-execute_robot_test rm -v hadoop.version:2.7.7 mapreduce.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env
deleted file mode 100644
index 4cb4271..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=@hdds.version@
-#TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet.
-# See: HADOOP-16092 for more details.
-HADOOP_IMAGE=flokkr/hadoop
-HADOOP_VERSION=3.1.2
-OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
deleted file mode 100644
index e3696fc..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-  datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9864
-    command: ["/opt/hadoop/bin/ozone","datanode"]
-    env_file:
-      - docker-config
-      - ../common-config
-  om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: om
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9874:9874
-    environment:
-      WAITFOR: scm:9876
-      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-    env_file:
-      - docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","om"]
-  s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: s3g
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9878:9878
-    env_file:
-      - ./docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","s3g"]
-  scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: scm
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9876:9876
-    env_file:
-      - docker-config
-      - ../common-config
-    environment:
-      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-    command: ["/opt/hadoop/bin/ozone","scm"]
-  rm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: rm
-    volumes:
-      - ../../..:/opt/ozone
-    ports:
-      - 8088:8088
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-    command: ["yarn", "resourcemanager"]
-  nm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: nm
-    volumes:
-      - ../../..:/opt/ozone
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-      WAIT_FOR: rm:8088
-    command: ["yarn","nodemanager"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
deleted file mode 100644
index d7ead21..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh
deleted file mode 100755
index 03caea3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm createmrenv.robot
-
-
-#rm is the container name (resource manager) and not the rm command
-execute_command_in_container rm sudo apk add --update py-pip
-execute_command_in_container rm sudo pip install robotframework
-
-# reinitialize the directories to use
-export OZONE_DIR=/opt/ozone
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-execute_robot_test rm ozonefs/hadoopo3fs.robot
-
-execute_robot_test rm  -v hadoop.version:3.1.2 mapreduce.robot
-
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env
deleted file mode 100644
index 70ba4b6..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=@hdds.version@
-HADOOP_IMAGE=apache/hadoop
-HADOOP_VERSION=3
-OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
deleted file mode 100644
index c25d36c..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-  datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9864
-    command: ["/opt/hadoop/bin/ozone","datanode"]
-    env_file:
-      - docker-config
-      - ../common-config
-  om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: om
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9874:9874
-    environment:
-      WAITFOR: scm:9876
-      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-    env_file:
-      - docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","om"]
-  s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: s3g
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9878:9878
-    env_file:
-      - ./docker-config
-      - ../common-config
-    command: ["/opt/hadoop/bin/ozone","s3g"]
-  scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: scm
-    volumes:
-      - ../../..:/opt/hadoop
-    ports:
-      - 9876:9876
-    env_file:
-      - docker-config
-      - ../common-config
-    environment:
-      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-    command: ["/opt/hadoop/bin/ozone","scm"]
-  rm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: rm
-    volumes:
-      - ../../..:/opt/ozone
-    ports:
-      - 8088:8088
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-    command: ["yarn", "resourcemanager"]
-  nm:
-    image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
-    hostname: nm
-    volumes:
-      - ../../..:/opt/ozone
-    env_file:
-      - ./docker-config
-      - ../common-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-      WAIT_FOR: rm:8088
-    command: ["yarn","nodemanager"]
-# Optional section: comment out this part to get DNS resolution for all the containers.
-#    Add 127.0.0.1 (or the ip of your docker machine) to the resolv.conf to get local DNS resolution
-#    For all the containers (including resource managers and Node manager UI)
-#  dns:
-#    image: andyshinn/dnsmasq:2.76
-#    ports:
-#        - 53:53/udp
-#        - 53:53/tcp
-#    volumes:
-#      - "/var/run/docker.sock:/var/run/docker.sock"
-#    command:
-#      - "-k"
-#      - "-d"
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
deleted file mode 100644
index d7ead21..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh
deleted file mode 100755
index b1910a5..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm createmrenv.robot
-
-# reinitialize the directories to use
-export OZONE_DIR=/opt/ozone
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../../testlib.sh"
-
-execute_robot_test rm ozonefs/hadoopo3fs.robot
-
-execute_robot_test rm mapreduce.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml
deleted file mode 100644
index 2cd2ce8..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om1:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9880:9874
-         - 9890:9872
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   om2:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9882:9874
-         - 9892:9872
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-         - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   om3:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9884:9874
-         - 9894:9872
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-         - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
deleted file mode 100644
index f3de99a..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
-CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1
-OZONE-SITE.XML_ozone.om.service.ids=id1
-OZONE-SITE.XML_ozone.om.nodes.id1=om1,om2,om3
-OZONE-SITE.XML_ozone.om.address.id1.om1=om1
-OZONE-SITE.XML_ozone.om.address.id1.om2=om2
-OZONE-SITE.XML_ozone.om.address.id1.om3=om3
-OZONE-SITE.XML_ozone.om.ratis.enable=true
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-ASYNC_PROFILER_HOME=/opt/profiler
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
deleted file mode 100644
index 38e2ef3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
-   recon:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9888:9888
-      env_file:
-          - ./docker-config
-      environment:
-         WAITFOR: om:9874
-      command: ["/opt/hadoop/bin/ozone","recon"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
deleted file mode 100644
index 61d1378..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
-OZONE-SITE.XML_ozone.recon.om.db.dir=/data/metadata/recon
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-ASYNC_PROFILER_HOME=/opt/profiler
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
deleted file mode 100755
index f4bfcc3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm basic/basic.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env
deleted file mode 100644
index 249827b..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=0.5.0-SNAPSHOT
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
deleted file mode 100644
index a66eff6..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode_1:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-      networks:
-         net:
-            ipv4_address: 10.5.0.4
-   datanode_2:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-      networks:
-         net:
-            ipv4_address: 10.5.0.5
-   datanode_3:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-      networks:
-         net:
-            ipv4_address: 10.5.0.6
-   datanode_4:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-      networks:
-         net:
-            ipv4_address: 10.5.0.7
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-      networks:
-         net:
-            ipv4_address: 10.5.0.70
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
-      networks:
-         net:
-            ipv4_address: 10.5.0.71
-networks:
-   net:
-     driver: bridge
-     ipam:
-       config:
-         - subnet: 10.5.0.0/16
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config
deleted file mode 100644
index ac6a367..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
-OZONE-SITE.XML_ozone.scm.container.placement.impl=org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware
-OZONE-SITE.XML_net.topology.node.switch.mapping.impl=org.apache.hadoop.net.TableMapping
-OZONE-SITE.XML_net.topology.table.file.name=/opt/hadoop/compose/ozone-topology/network-config
-OZONE-SITE.XML_dfs.network.topology.aware.read.enable=true
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-ASYNC_PROFILER_HOME=/opt/profiler
-HDDS_DN_OPTS=-Dmodule.name=datanode
-HDFS_OM_OPTS=-Dmodule.name=om
-HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm
-HDFS_OM_SH_OPTS=-Dmodule.name=sh
-HDFS_SCM_CLI_OPTS=-Dmodule.name=scmcli
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config
deleted file mode 100644
index 5c6af82..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-10.5.0.4	/rack1
-10.5.0.5	/rack1
-10.5.0.6	/rack1
-10.5.0.7	/rack2
-10.5.0.8	/rack2
-10.5.0.9	/rack2
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh
deleted file mode 100755
index d7402df..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env 4
-
-#Due to the limitation of the current auditparser test, it should be the
-#first test in a clean cluster.
-
-#Disabling for now, audit parser tool during parse getting exception.
-#execute_robot_test om auditparser
-
-execute_robot_test scm basic/basic.robot
-
-execute_robot_test scm topology/scmcli.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
deleted file mode 100644
index 145ce3e..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-        - 9882
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      privileged: true #required by the profiler
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
deleted file mode 100644
index 380b529..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-ASYNC_PROFILER_HOME=/opt/profiler
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
deleted file mode 100755
index e06f817..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env
-
-#Due to the limitation of the current auditparser test, it should be the
-#first test in a clean cluster.
-
-#Disabling for now, audit parser tool during parse getting exception.
-#execute_robot_test om auditparser
-
-execute_robot_test scm basic/basic.robot
-
-execute_robot_test scm gdpr/gdpr.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml
deleted file mode 100644
index 703329f..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
-   ozone_client:
-       image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-       volumes:
-         - ../..:/opt/hadoop
-       ports:
-         - 9869
-       command: ["tail", "-f","/etc/passwd"]
-       env_file:
-         - ./docker-config
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
deleted file mode 100644
index 4d5466c..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.client.max.retries=10
-OZONE-SITE.XML_ozone.scm.stale.node.interval=2m
-OZONE-SITE.XML_ozone.scm.dead.node.interval=5m
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
-OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s
-OZONE-SITE.XML_hdds.heartbeat.interval=2s
-OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
-OZONE-SITE.XML_hdds.scm.replication.thread.interval=6s
-OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
-OZONE-SITE.XML_dfs.ratis.server.failure.duration=35s
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md b/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md
deleted file mode 100644
index 62328e0..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Compose files for local performance tests
-
-This directory contains docker-compose definition for an ozone cluster where
-all the metrics are saved to a prometheus instance, and profiling and Jaeger
-tracing is turned on and set up.
-
-Prometheus follows a pull based approach where the metrics are published
- on a HTTP endpoint.
-
-Prometheus compatible metrics endpoint can be enabled by setting `hdds.prometheus.endpoint.enabled` property to `true`
-
-## How to start
-
-Start the cluster with `docker-compose`
-
-```
-docker-compose up -d
-```
-
-Note: The freon test will be started after 30 seconds.
-
-## How to use
-
-You can check the ozone web ui:
-
-OzoneManager: http://localhost:9874
-SCM: http://localhost:9876
-
-You can check the ozone metrics from the prometheus web ui.
-
-http://localhost:9090/graph
-
-You can view Grafana dashboards at:
-
-http://localhost:3000
-
-Default dashboards available are:
-Ozone - Object Metrics
-Ozone - RPC Metrics
-
-You can access the Jaeger UI at:
-http://localhost:16686
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml
deleted file mode 100644
index fa20540..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      command: ["ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["ozone","scm"]
-   jaeger:
-     image: jaegertracing/all-in-one:latest
-     environment:
-       COLLECTOR_ZIPKIN_HTTP_PORT: 9411
-     ports:
-       - 16686:16686
-   prometheus:
-     image: prom/prometheus
-     volumes:
-       - "../common/prometheus/prometheus.yml:/etc/prometheus.yml"
-     command: ["--config.file","/etc/prometheus.yml"]
-     ports:
-        - 9090:9090
-   freon:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      environment:
-         SLEEP_SECONDS: 30
-      env_file:
-         - ./docker-config
-      command: ["ozone","freon","rk"]
-   grafana:
-      image: grafana/grafana
-      volumes:
-         - "../common/grafana/dashboards:/var/lib/grafana/dashboards"
-         - "../common/grafana/provisioning:/etc/grafana/provisioning"
-         - "../common/grafana/conf/grafana.ini:/etc/grafana/grafana.ini"
-      command: ["-config","/etc/grafana/grafana.ini"]
-      ports:
-         - 3000:3000
-   s3g:
-     image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-     volumes:
-       - ../..:/opt/hadoop
-     ports:
-       - 9878:9878
-     env_file:
-       - ./docker-config
-     command: ["ozone","s3g"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
deleted file mode 100644
index d2d3452..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.prometheus.endpoint.enabled=true
-OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
-
-ASYNC_PROFILER_HOME=/opt/profiler
-
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-JAEGER_SAMPLER_PARAM=1
-JAEGER_SAMPLER_TYPE=const
-JAEGER_AGENT_HOST=jaeger
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh
deleted file mode 100755
index f4bfcc3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm basic/basic.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml
deleted file mode 100644
index 78fd996..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   s3g:
-      image: haproxy:latest
-      volumes:
-         - ../..:/opt/hadoop
-         - ./haproxy-conf/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
-      ports:
-         - 9878:9878
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      command: ["ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["ozone","scm"]
-   s3g1:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9879:9878
-      env_file:
-          - ./docker-config
-      command: ["ozone","s3g"]
-   s3g2:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9880:9878
-      env_file:
-         - ./docker-config
-      command: ["ozone","s3g"]
-   s3g3:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9881:9878
-      env_file:
-         - ./docker-config
-      command: ["ozone","s3g"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config
deleted file mode 100644
index d3efa2e..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg
deleted file mode 100644
index 5af09fa..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Simple configuration for an HTTP proxy listening on port 9878 on all
-# interfaces and forwarding requests to a multiple multiple S3 servers in round
-# robin fashion.
-global
-    daemon
-    maxconn 256
-
-defaults
-    mode http
-    timeout connect 5000ms
-    timeout client 50000ms
-    timeout server 50000ms
-
-frontend http-in
-    bind *:9878
-    default_backend servers
-
-backend servers
-    balance roundrobin
-    server server1 s3g1:9878 maxconn 32
-    server server2 s3g2:9878 maxconn 32
-    server server3 s3g3:9878 maxconn 32
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh
deleted file mode 100755
index f4bfcc3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm basic/basic.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/.env b/hadoop-ozone/dist/src/main/compose/ozones3/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml
deleted file mode 100644
index cc4bfd2..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      command: ["ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["ozone","om"]
-   scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["ozone","scm"]
-   s3g:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9878:9878
-      env_file:
-          - ./docker-config
-      command: ["ozone","s3g"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
deleted file mode 100644
index d3efa2e..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/test.sh b/hadoop-ozone/dist/src/main/compose/ozones3/test.sh
deleted file mode 100755
index 0160da9..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozones3/test.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-start_docker_env
-
-execute_robot_test scm basic/basic.robot
-
-execute_robot_test scm s3
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env
deleted file mode 100644
index 96ab163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys
deleted file mode 100644
index ae39052..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config
deleted file mode 100644
index 6506916..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-Host *
- UserKnownHostsFile /dev/null
- StrictHostKeyChecking no
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
deleted file mode 100644
index cbde0f2..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-JAVA_HOME=/usr/lib/jvm/jre
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa
deleted file mode 100644
index 6632ce5..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEA4BJi6WJuAa1ratShvYYWVwmYBqxE57btHjU6NtVN1SnPZx/f
-6LezOpQGsLBXE/bl7uG+fD05Z378B/0wE5QhYwvJ9Ge0jsfhVOi90p/FEYfR2l+C
-9LRfLvO6AgA+HQa1BoYQd+norh/XQQVpcukSn32Cb642rW3OxsQTv3uTklDLapPw
-0lSpLJgYWcbrayorwjYw4rIfVYhiH/G+ckvzJRBpmlSgKYpNXyeYjpZaDPxoKQet
-GFf4W6nwnnG1oHojNYCvdoHTnAP1sdQu34/AUiOYg0hOnmXhfNIDN+NtZb8jw1sl
-YRWhm5GuK1wV/32/UEkQQPUU6oUSgBJgCH+PRwIDAQABAoIBAQDI1TH6ZNKchkck
-9XgSWsBjOqIcOQN5fCeDT8nho8WjLVpL3/Hcr+ngsxRcAXHK3xyvw33r9SQic1qJ
-/pC8u6RBFivo95qJ7vU0GXcp9TG4yLd6tui1U4WMm784U+dYNM7EDh1snSaECt3v
-1V3yNJ0QfnoOh2NShn0zAkOA+M4H8Nx2SudMCsjcbK9+fYxzW3hX+sJpMKdjG1HW
-DUz+I7cW7t0EGaVrgVSV+eR58LiXu+14YDNMrySiejB4nD2sKrx93XgiCBECCsBN
-GLQGJCztaXoAY+5Kf/aJ9EEf2wBF3GecRk+XIAd87PeDmeahLQAVkAJ/rD1vsKFs
-8kWi6CrhAoGBAP7leG/dMBhlfvROpBddIfWm2i8B+oZiAlSjdYGz+/ZhUaByXk18
-pupMGiiMQR1ziPnEg0gNgR2ZkH54qrXPn5WcQa4rlSEtUsZEp5v5WblhfX2QwKzY
-G/uhA+mB7wXpQkSmXo0LclfPF2teROQrG1OyfWkWbxFH4i3+em7sL95jAoGBAOEK
-v+wscqkMLW7Q8ONbWMCCBlmMHr6baB3VDCYZx25lr+GIF5zmJJFTmF2rq2VSAlts
-qx1AGmaUSo78kC5FuJvSNTL6a1Us5ucdthQZM3N8pAz+OAE+QEU+BsdA27yAh3tO
-yKDsMFNHKtXcgy5LeB5gzENLlNyw2jgkRv2Ef77NAoGAVH8DHqoHEH9Mx3XuRWR1
-JnaqKx0PzE5fEWmiQV3Fr5XxNivTgQJKXq7dmQVtbHLpPErdbhwz6fkHAjXD+UMb
-VsAWscL2y6m3n8wQd87/5EkiDWbXyDRXimGE53pQHviFJDa2bzEVNXCMBeaZFb4I
-cAViN1zdcrAOlUqfkXewIpsCgYB8wsXl/DpRB+RENGfn0+OfTjaQ/IKq72NIbq1+
-jfondQ6N/TICFQEe5HZrL9okoNOXteYjoD9CsWGoZdLVJGgVUvOVYImSvgMBDFK+
-T75bfzU/0sxfvBBLkviVDJsFpUf3D5VgybB86s6Po+HCD6r3RHjZshRESXOhflMx
-B3z+3QKBgE2Lwo0DuwUGkm6k8psyn3x8EiXNsiNw12cojicFTyKUYLHxMBeVbCLW
-3j3pxSggJgRuBLLzixUHbHp91r2ARTy28naK7R/la8yKVqK6ojcikN2mQsCHYtwB
-nuFwXr42ytn6G+9Wn4xT64tGjRCqyZn0/v0XsPjVCyrZ6G7EtNHP
------END RSA PRIVATE KEY-----
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub
deleted file mode 100644
index ae39052..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
deleted file mode 100644
index b078000..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-FROM apache/ozone-runner
-RUN sudo yum install -y openssh-clients openssh-server
-
-RUN sudo ssh-keygen -A
-RUN sudo mkdir -p /run/sshd
-RUN sudo sed -i "s/.*UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config
-RUN sudo sed -i "s/.*PermitUserEnvironment.*/PermitUserEnvironment yes/g" /etc/ssh/sshd_config
-RUN sudo sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
-
-#/opt/hadoop is mounted, we can't use it as a home
-RUN sudo usermod -d /opt hadoop
-ADD .ssh /opt/.ssh
-RUN sudo chown -R hadoop /opt/.ssh
-RUN sudo chown hadoop /opt
-RUN sudo chmod 600 /opt/.ssh/*
-RUN sudo chmod 700 /opt/.ssh
-
-RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/jre/" >> /etc/profile'
-CMD ["sudo","/usr/sbin/sshd","-D"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md b/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md
deleted file mode 100644
index 2531fa4..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# start-ozone environment
-
-This is an example environment to use/test `./sbin/start-ozone.sh` and `./sbin/stop-ozone.sh` scripts.
-
-There are ssh connections between the containers and the start/stop scripts could handle the start/stop process
-similar to a real cluster.
-
-To use it, first start the cluster:
-
-```
-docker-copmose up -d
-```
-
-After a successfull startup (which starts only the ssh daemons) you can start ozone:
-
-```
-./start.sh
-```
-
-Check it the java processes are started:
-
-```
-./ps.sh
-```
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
deleted file mode 100644
index 62f1163..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      build: .
-      volumes:
-        - ../..:/opt/hadoop
-      ports:
-        - 9864
-      env_file:
-        - ./docker-config
-   om:
-      build: .
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9874:9874
-      env_file:
-          - ./docker-config
-   scm:
-      build: .
-      volumes:
-         - ../..:/opt/hadoop
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
deleted file mode 100644
index fe713e0..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
-OZONE-SITE.XML_ozone.ksm.address=ksm
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=true
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh
deleted file mode 100755
index d5e2c38..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-docker-compose ps -q | xargs -n1 -I CONTAINER docker exec CONTAINER ps xa
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
deleted file mode 100755
index 49fc506..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-set -x
-docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1  docker inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers
-docker-compose ps | grep ozonescripts | awk '{print $1}' | xargs -I CONTAINER -n1 docker exec CONTAINER cp /opt/hadoop/etc/hadoop/workers /etc/hadoop/workers
-docker-compose exec scm /opt/hadoop/bin/ozone scm --init
-docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
-#We need a running SCM for om objectstore creation
-#TODO create a utility to wait for the startup
-sleep 10
-docker-compose exec om /opt/hadoop/bin/ozone om --init
-docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh
deleted file mode 100755
index a3ce08a..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-docker-compose exec scm /opt/hadoop/sbin/stop-ozone.sh
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
deleted file mode 100644
index 37227ac..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-HADOOP_VERSION=3
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md
deleted file mode 100644
index 1426270..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-# Secure Docker-compose with KMS, Yarn RM and NM
-This docker compose allows to test Sample Map Reduce Jobs with OzoneFileSystem
-It is a superset of ozonesecure docker-compose, which add Yarn NM/RM in addition
-to Ozone OM/SCM/NM/DN and Kerberos KDC.
-
-## Basic setup
-
-```
-cd $(git rev-parse --show-toplevel)/hadoop-ozone/dist/target/ozone-@project.version@/compose/ozonesecure-mr
-
-docker-compose up -d
-```
-
-## Ozone Manager Setup
-
-```
-docker-compose exec om bash
-
-kinit -kt /etc/security/keytabs/testuser.keytab testuser/om@EXAMPLE.COM
-
-ozone sh volume create /vol1
-
-ozone sh bucket create /vol1/bucket1
-
-ozone sh key put /vol1/bucket1/key1 LICENSE.txt
-
-ozone fs -ls o3fs://bucket1.vol1/
-```
-
-## Yarn Resource Manager Setup
-```
-docker-compose exec rm bash
-
-kinit -kt /etc/security/keytabs/hadoop.keytab hadoop/rm@EXAMPLE.COM
-export HADOOP_MAPRED_HOME=/opt/hadoop/share/hadoop/mapreduce
-
-export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/hadoop/share/hadoop/mapreduce/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-
-hadoop fs -mkdir /user
-hadoop fs -mkdir /user/hadoop
-```
-
-## Run Examples
-
-### WordCount
-```
-yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1.count
-
-hadoop fs -cat /key1.count/part-r-00000
-```
-
-### Pi
-```
-yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar pi 10 100
-```
-
-### RandomWrite
-```
-yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar randomwriter -Dtest.randomwrite.total_bytes=10000000  o3fs://bucket1.vol1/randomwrite.out
-```
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
deleted file mode 100644
index 53e0142..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
+++ /dev/null
@@ -1,135 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3.5"
-services:
-  kdc:
-    build:
-      context: docker-image/docker-krb5
-      dockerfile: Dockerfile-krb5
-      args:
-        buildno: 1
-    hostname: kdc
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/hadoop
-  kms:
-    image: apache/hadoop:${HADOOP_VERSION}
-    networks:
-      - ozone
-    ports:
-      - 9600:9600
-    env_file:
-      - ./docker-config
-    command: ["hadoop", "kms"]
-  datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9864
-    command: ["/opt/hadoop/bin/ozone","datanode"]
-    env_file:
-      - docker-config
-  om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: om
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9874:9874
-    environment:
-      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-    env_file:
-      - docker-config
-    command: ["/opt/hadoop/bin/ozone","om"]
-  s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: s3g
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9878:9878
-    env_file:
-      - ./docker-config
-    command: ["/opt/hadoop/bin/ozone","s3g"]
-  scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: scm
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9876:9876
-    env_file:
-      - docker-config
-    environment:
-      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-    command: ["/opt/hadoop/bin/ozone","scm"]
-  rm:
-    image: apache/hadoop:${HADOOP_VERSION}
-    hostname: rm
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/ozone
-    ports:
-      - 8088:8088
-    env_file:
-      - ./docker-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-    command: ["yarn", "resourcemanager"]
-  nm:
-    image: apache/hadoop:${HADOOP_VERSION}
-    hostname: nm
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/ozone
-    env_file:
-      - ./docker-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-      WAIT_FOR: rm:8088
-    command: ["yarn","nodemanager"]
-  jhs:
-    image: apache/hadoop:${HADOOP_VERSION}
-    container_name: jhs
-    hostname: jhs
-    networks:
-      - ozone
-    volumes:
-      - ../..:/opt/ozone
-    ports:
-      - 8188:8188
-    env_file:
-      - ./docker-config
-    environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-      WAIT_FOR: rm:8088
-    command: ["yarn","timelineserver"]
-networks:
-  ozone:
-    name: ozone
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
deleted file mode 100644
index 646fd02..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
+++ /dev/null
@@ -1,133 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.block.token.enabled=true
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
-OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE-SITE.XML_ozone.administrators=*
-
-OZONE-SITE.XML_ozone.security.enabled=true
-OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.om.http.kerberos.principal=HTTP/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM
-HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
-HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
-HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
-HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
-CORE-SITE.XML_dfs.data.transfer.protection=authentication
-CORE-SITE.XML_hadoop.security.authentication=kerberos
-CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*///L
-CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
-
-#temporary disable authorization as org.apache.hadoop.yarn.server.api.ResourceTrackerPB is not properly annotated to support it
-CORE-SITE.XML_hadoop.security.authorization=false
-HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
-HADOOP-POLICY.XML_org.apache.hadoop.yarn.server.api.ResourceTracker.acl=*
-
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-CORE-SITE.XML_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.vol1/
-
-MAPRED-SITE.XML_mapreduce.framework.name=yarn
-MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
-MAPRED-SITE.XML_mapreduce.map.memory.mb=2048
-MAPRED-SITE.XML_mapreduce.reduce.memory.mb=2048
-#MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2048
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
-
-YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
-YARN-SITE.XML_yarn.timeline-service.enabled=true
-YARN-SITE.XML_yarn.timeline-service.generic.application.history.enabled=true
-YARN-SITE.XML_yarn.timeline-service.hostname=jhs
-YARN-SITE.XML_yarn.timeline-service.principal=jhs/jhs@EXAMPLE.COM
-YARN-SITE.XML_yarn.timeline-service.keytab=/etc/security/keytabs/jhs.keytab
-YARN-SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/
-
-YARN-SITE.XML_yarn.nodemanager.principal=nm/_HOST@EXAMPLE.COM
-YARN-SITE.XML_yarn.nodemanager.keytab=/etc/security/keytabs/nm.keytab
-YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false
-YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600
-YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false
-YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle
-YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false
-
-YARN-SITE.XML_yarn.resourcemanager.hostname=rm
-YARN-SITE.XML_yarn.resourcemanager.keytab=/etc/security/keytabs/rm.keytab
-YARN-SITE.XML_yarn.resourcemanager.principal=rm/rm@EXAMPLE.COM
-YARN-SITE.XML_yarn.resourcemanager.system.metrics.publisher.enabled=true
-
-YARN-SITE.XML_yarn.log-aggregation-enable=true
-YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600
-YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600
-
-# Yarn LinuxContainer requires the /opt/hadoop/etc/hadoop to be owned by root and not modifiable by other users,
-# which prevents start.sh from changing the configurations based on docker-config
-# YARN-SITE.XML_yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
-# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor
-# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop
-
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=*
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=*
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings=
-CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
-
-OZONE_DATANODE_SECURE_USER=root
-KEYTAB_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop spark
-KERBEROS_KEYSTORES=hadoop
-KERBEROS_SERVER=kdc
-JAVA_HOME=/usr/lib/jvm/jre
-JSVC_HOME=/usr/bin
-SLEEP_SECONDS=5
-KERBEROS_ENABLED=true
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5 b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5
deleted file mode 100644
index 6c6c816..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-FROM openjdk:8u191-jdk-alpine3.9
-# hadolint ignore=DL3018
-RUN apk add --no-cache bash ca-certificates openssl krb5-server krb5 && rm -rf /var/cache/apk/* && update-ca-certificates
-RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64
-RUN chmod +x /usr/local/bin/dumb-init
-RUN wget -O /root/issuer https://github.com/ajayydv/docker/raw/kdc/issuer
-RUN chmod +x /root/issuer
-WORKDIR /opt
-COPY krb5.conf /etc/
-COPY kadm5.acl /var/lib/krb5kdc/kadm5.acl
-RUN kdb5_util create -s -P Welcome1
-RUN kadmin.local -q "addprinc -randkey admin/admin@EXAMPLE.COM"
-RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/admin@EXAMPLE.COM"
-COPY launcher.sh .
-RUN chmod +x /opt/launcher.sh
-RUN mkdir -p /data
-ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"]
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md
deleted file mode 100644
index 60b675c..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Experimental UNSECURE krb5 Kerberos container.
-
-Only for development. Not for production.
-
-The docker image contains a rest service which provides keystore and keytab files without any authentication!
-
-Master password: Welcome1
-
-Principal: admin/admin@EXAMPLE.COM Password: Welcome1
-
-Test:
-
-```
-docker run --net=host krb5
-
-docker run --net=host -it --entrypoint=bash krb5
-kinit admin/admin
-#pwd: Welcome1
-klist
-```
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl
deleted file mode 100644
index f0cd660..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-*/admin@EXAMPLE.COM x
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf
deleted file mode 100644
index 0c274d3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[logging]
-default = FILE:/var/log/krb5libs.log
-kdc = FILE:/var/log/krb5kdc.log
-admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- dns_canonicalize_hostname = false
- dns_lookup_realm = false
- ticket_lifetime = 24h
- renew_lifetime = 7d
- forwardable = true
- rdns = false
- default_realm = EXAMPLE.COM
-
-[realms]
- EXAMPLE.COM = {
-  kdc = localhost
-  admin_server = localhost
-  max_renewable_life = 7d
- }
-
-[domain_realm]
- .example.com = EXAMPLE.COM
- example.com = EXAMPLE.COM
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh
deleted file mode 100644
index 0824f7b..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-/root/issuer &
-krb5kdc -n &
-sleep 4
-kadmind -nofork &
-sleep 2
-tail -f /var/log/krb5kdc.log &
-tail -f /var/log/kadmind.log
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
deleted file mode 100755
index cc6ebf0..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-export SECURITY_ENABLED=true
-
-start_docker_env
-
-execute_robot_test om kinit.robot
-
-execute_robot_test om createmrenv.robot
-
-# reinitialize the directories to use
-export OZONE_DIR=/opt/ozone
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-execute_robot_test rm kinit-hadoop.robot
-
-execute_robot_test rm mapreduce.robot
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
deleted file mode 100644
index 37227ac..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-HADOOP_VERSION=3
-OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md
deleted file mode 100644
index 0ce9a0a..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-# Experimental UNSECURE krb5 Kerberos container.
-
-Only for development. Not for production.
-
-#### Dockerfile for KDC:
-* ./docker-image/docker-krb5/Dockerfile-krb5
-
-#### Dockerfile for SCM,OM and DataNode:
-* ./docker-image/runner/Dockerfile
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
deleted file mode 100644
index de60a411..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-  kdc:
-    build:
-      context: docker-image/docker-krb5
-      dockerfile: Dockerfile-krb5
-      args:
-        buildno: 1
-    hostname: kdc
-    volumes:
-      - ../..:/opt/hadoop
-  
-  kms:
-      image: apache/hadoop:${HADOOP_VERSION}
-      ports:
-      - 9600:9600
-      env_file:
-      - ./docker-config
-      command: ["hadoop", "kms"]
-
-  datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9864
-    command: ["/opt/hadoop/bin/ozone","datanode"]
-    env_file:
-      - docker-config
-  om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: om
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9874:9874
-    environment:
-      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-    env_file:
-      - docker-config
-    command: ["/opt/hadoop/bin/ozone","om"]
-  s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: s3g
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9878:9878
-    env_file:
-      - ./docker-config
-    command: ["/opt/hadoop/bin/ozone","s3g"]
-  recon:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: recon
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9888:9888
-    env_file:
-      - ./docker-config
-    environment:
-      WAITFOR: om:9874
-    command: ["/opt/hadoop/bin/ozone","recon"]
-  scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: scm
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 9876:9876
-    env_file:
-      - docker-config
-    environment:
-      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-    command: ["/opt/hadoop/bin/ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
deleted file mode 100644
index 44af35e..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ /dev/null
@@ -1,90 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.block.token.enabled=true
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
-OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE-SITE.XML_ozone.recon.authentication.kerberos.principal=HTTP/recon@EXAMPLE.COM
-OZONE-SITE.XML_ozone.recon.keytab.file=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
-OZONE-SITE.XML_recon.om.snapshot.task.initial.delay=20s
-
-OZONE-SITE.XML_ozone.security.enabled=true
-OZONE-SITE.XML_ozone.acl.enabled=true
-OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer
-OZONE-SITE.XML_ozone.administrators=*
-OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.om.http.kerberos.principal=HTTP/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM
-HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
-HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
-HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
-HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
-CORE-SITE.XML_dfs.data.transfer.protection=authentication
-CORE-SITE.XML_hadoop.security.authentication=kerberos
-CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
-CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
-
-CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false
-CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret
-CORE-SITE.XML_hadoop.http.authentication.type=kerberos
-CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
-CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-CORE-SITE.XML_hadoop.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer
-
-CORE-SITE.XML_hadoop.security.authorization=true
-HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
-
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
-
-OZONE_DATANODE_SECURE_USER=root
-SECURITY_ENABLED=true
-KEYTAB_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn om scm HTTP testuser testuser2 s3g
-KERBEROS_KEYSTORES=hadoop
-KERBEROS_SERVER=kdc
-JAVA_HOME=/usr/lib/jvm/jre
-JSVC_HOME=/usr/bin
-SLEEP_SECONDS=5
-KERBEROS_ENABLED=true
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5 b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
deleted file mode 100644
index 1a6097e..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-FROM openjdk:8u191-jdk-alpine3.9
-RUN apk add --update bash ca-certificates openssl krb5-server krb5 && rm -rf /var/cache/apk/* && update-ca-certificates
-RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64
-RUN chmod +x /usr/local/bin/dumb-init
-RUN wget -O /root/issuer https://github.com/ajayydv/docker/raw/kdc/issuer
-RUN chmod +x /root/issuer
-WORKDIR /opt
-ADD krb5.conf /etc/
-ADD kadm5.acl /var/lib/krb5kdc/kadm5.acl
-RUN kdb5_util create -s -P Welcome1
-RUN kadmin.local -q "addprinc -randkey admin/admin@EXAMPLE.COM"
-RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/admin@EXAMPLE.COM"
-ADD launcher.sh .
-RUN chmod +x /opt/launcher.sh
-RUN mkdir -p /data
-ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"]
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md
deleted file mode 100644
index b864a5f..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Experimental UNSECURE krb5 Kerberos container.
-
-Only for development. Not for production.
-
-The docker image contains a rest service which provides keystore and keytab files without any authentication!
-
-Master password: Welcome1
-
-Principal: admin/admin@EXAMPLE.COM Password: Welcome1
-
-Test:
-
-```
-docker run --net=host krb5
-
-docker run --net=host -it --entrypoint=bash krb5
-kinit admin/admin 
-#pwd: Welcome1
-klist
-```
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
deleted file mode 100644
index f0cd660..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-*/admin@EXAMPLE.COM x
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf
deleted file mode 100644
index 0c274d3..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[logging]
-default = FILE:/var/log/krb5libs.log
-kdc = FILE:/var/log/krb5kdc.log
-admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- dns_canonicalize_hostname = false
- dns_lookup_realm = false
- ticket_lifetime = 24h
- renew_lifetime = 7d
- forwardable = true
- rdns = false
- default_realm = EXAMPLE.COM
-
-[realms]
- EXAMPLE.COM = {
-  kdc = localhost
-  admin_server = localhost
-  max_renewable_life = 7d
- }
-
-[domain_realm]
- .example.com = EXAMPLE.COM
- example.com = EXAMPLE.COM
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh
deleted file mode 100644
index 0824f7b..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-/root/issuer &
-krb5kdc -n &
-sleep 4
-kadmind -nofork &
-sleep 2
-tail -f /var/log/krb5kdc.log &
-tail -f /var/log/kadmind.log
-
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
deleted file mode 100755
index f328463..0000000
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-export SECURITY_ENABLED=true
-
-start_docker_env
-
-execute_robot_test scm kinit.robot
-
-execute_robot_test scm basic
-
-execute_robot_test scm security
-
-execute_robot_test scm ozonefs/ozonefs.robot
-
-execute_robot_test s3g s3
-
-execute_robot_test scm scmcli
-
-stop_docker_env
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh
deleted file mode 100755
index afa5d56..0000000
--- a/hadoop-ozone/dist/src/main/compose/test-all.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Test executor to test all the compose/*/test.sh test scripts.
-#
-
-SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
-ALL_RESULT_DIR="$SCRIPT_DIR/result"
-
-mkdir -p "$ALL_RESULT_DIR"
-rm "$ALL_RESULT_DIR/*"
-
-RESULT=0
-IFS=$'\n'
-# shellcheck disable=SC2044
-for test in $(find "$SCRIPT_DIR" -name test.sh | sort); do
-  echo "Executing test in $(dirname "$test")"
-
-  #required to read the .env file from the right location
-  cd "$(dirname "$test")" || continue
-  ./test.sh
-  ret=$?
-  if [[ $ret -ne 0 ]]; then
-      RESULT=1
-      echo "ERROR: Test execution of $(dirname "$test") is FAILED!!!!"
-  fi
-  RESULT_DIR="$(dirname "$test")/result"
-  cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/
-done
-
-rebot -N "smoketests" -d "$SCRIPT_DIR/result" "$SCRIPT_DIR/result/robot-*.xml"
-exit $RESULT
diff --git a/hadoop-ozone/dist/src/main/compose/test-single.sh b/hadoop-ozone/dist/src/main/compose/test-single.sh
deleted file mode 100755
index 629a9bc..0000000
--- a/hadoop-ozone/dist/src/main/compose/test-single.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Single test executor, can start a single robot test in any running container.
-#
-
-
-COMPOSE_DIR="$PWD"
-export COMPOSE_DIR
-
-if [[ ! -f "$COMPOSE_DIR/docker-compose.yaml" ]]; then
-    echo "docker-compose.yaml is missing from the current dir. Please run this command from a docker-compose environment."
-    exit 1
-fi
-if (( $# != 2 )); then
-cat << EOF
-   Single test executor
-
-   Usage:
-
-     ../test-single.sh <container> <robot_test>
-
-        container: Name of the running docker-compose container (docker-compose.yaml is required in the current directory)
-
-        robot_test: name of the robot test or directory relative to the smoketest dir.
-
-
-
-EOF
-
-fi
-
-# shellcheck source=testlib.sh
-source "$COMPOSE_DIR/../testlib.sh"
-
-create_results_dir
-
-execute_robot_test "$1" "$2"
-
-generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
deleted file mode 100755
index b20dca8..0000000
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-set -e
-
-COMPOSE_ENV_NAME=$(basename "$COMPOSE_DIR")
-COMPOSE_FILE=$COMPOSE_DIR/docker-compose.yaml
-RESULT_DIR=${RESULT_DIR:-"$COMPOSE_DIR/result"}
-RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result"
-SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest"
-
-## @description create results directory, purging any prior data
-create_results_dir() {
-  #delete previous results
-  rm -rf "$RESULT_DIR"
-  mkdir -p "$RESULT_DIR"
-  #Should be writeable from the docker containers where user is different.
-  chmod ogu+w "$RESULT_DIR"
-}
-
-## @description print the number of datanodes up
-## @param the docker-compose file
-count_datanodes() {
-  local compose_file=$1
-
-  local jmx_url='http://scm:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo'
-  if [[ "${SECURITY_ENABLED}" == 'true' ]]; then
-    docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && curl --negotiate -u : -s '${jmx_url}'"
-  else
-    docker-compose -f "${compose_file}" exec -T scm curl -s "${jmx_url}"
-  fi \
-    | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' || true
-}
-
-## @description wait until datanodes are up (or 30 seconds)
-## @param the docker-compose file
-## @param number of datanodes to wait for (default: 3)
-wait_for_datanodes(){
-  local compose_file=$1
-  local -i datanode_count=${2:-3}
-
-  #Reset the timer
-  SECONDS=0
-
-  #Don't give it up until 30 seconds
-  while [[ $SECONDS -lt 90 ]]; do
-
-     #This line checks the number of HEALTHY datanodes registered in scm over the
-     # jmx HTTP servlet
-     datanodes=$(count_datanodes "${compose_file}")
-     if [[ "$datanodes" ]]; then
-       if [[ ${datanodes} -ge ${datanode_count} ]]; then
-
-         #It's up and running. Let's return from the function.
-         echo "$datanodes datanodes are up and registered to the scm"
-         return
-       else
-
-           #Print it only if a number. Could be not a number if scm is not yet started
-           echo "$datanodes datanode is up and healthy (until now)"
-         fi
-     fi
-
-      sleep 2
-   done
-   echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
-   return 1
-}
-
-## @description  Starts a docker-compose based test environment
-## @param number of datanodes to start and wait for (default: 3)
-start_docker_env(){
-  local -i datanode_count=${1:-3}
-
-  create_results_dir
-
-  docker-compose -f "$COMPOSE_FILE" --no-ansi down
-  docker-compose -f "$COMPOSE_FILE" --no-ansi up -d --scale datanode="${datanode_count}" \
-    && wait_for_datanodes "$COMPOSE_FILE" "${datanode_count}" \
-    && sleep 10
-
-  if [[ $? -gt 0 ]]; then
-    OUTPUT_NAME="$COMPOSE_ENV_NAME"
-    stop_docker_env
-    return 1
-  fi
-}
-
-## @description  Execute robot tests in a specific container.
-## @param        Name of the container in the docker-compose file
-## @param        robot test file or directory relative to the smoketest dir
-execute_robot_test(){
-  CONTAINER="$1"
-  shift 1 #Remove first argument which was the container name
-  # shellcheck disable=SC2206
-  ARGUMENTS=($@)
-  TEST="${ARGUMENTS[${#ARGUMENTS[@]}-1]}" #Use last element as the test name
-  unset 'ARGUMENTS[${#ARGUMENTS[@]}-1]' #Remove the last element, remainings are the custom parameters
-  TEST_NAME=$(basename "$TEST")
-  TEST_NAME="$(basename "$COMPOSE_DIR")-${TEST_NAME%.*}"
-  set +e
-  OUTPUT_NAME="$COMPOSE_ENV_NAME-$TEST_NAME-$CONTAINER"
-  OUTPUT_PATH="$RESULT_DIR_INSIDE/robot-$OUTPUT_NAME.xml"
-  docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE"
-  # shellcheck disable=SC2068
-  docker-compose -f "$COMPOSE_FILE" exec -T -e  SECURITY_ENABLED="${SECURITY_ENABLED}" "$CONTAINER" python -m robot ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST"
-
-  FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}')
-  docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/"
-  set -e
-
-}
-
-
-## @description  Execute specific command in docker container
-## @param        container name
-## @param        specific command to execute
-execute_command_in_container(){
-  set -e
-  # shellcheck disable=SC2068
-  docker-compose -f "$COMPOSE_FILE" exec -T $@
-  set +e
-}
-
-
-## @description  Stops a docker-compose based test environment (with saving the logs)
-stop_docker_env(){
-  docker-compose -f "$COMPOSE_FILE" --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log"
-  if [ "${KEEP_RUNNING:-false}" = false ]; then
-     docker-compose -f "$COMPOSE_FILE" --no-ansi down
-  fi
-}
-
-## @description  Generate robot framework reports based on the saved results.
-generate_report(){
-
-  if command -v rebot > /dev/null 2>&1; then
-     #Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
-     rebot -d "$RESULT_DIR" "$RESULT_DIR/robot-*.xml"
-  else
-     echo "Robot framework is not installed, the reports can be generated (sudo pip install robotframework)."
-     exit 1
-  fi
-}
diff --git a/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties
deleted file mode 100644
index 3c4d045..0000000
--- a/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=30
-
-filter=read,write
-# filter.read.onMatch=DENY avoids logging all READ events
-# filter.read.onMatch=ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch=NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type=MarkerFilter
-filter.read.marker=READ
-filter.read.onMatch=DENY
-filter.read.onMismatch=NEUTRAL
-
-# filter.write.onMatch=DENY avoids logging all WRITE events
-# filter.write.onMatch=ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type=MarkerFilter
-filter.write.marker=WRITE
-filter.write.onMatch=NEUTRAL
-filter.write.onMismatch=NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-# Uncomment following section to enable logging to console appender also
-#appenders=console, rolling
-#appender.console.type=Console
-#appender.console.name=STDOUT
-#appender.console.layout.type=PatternLayout
-#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-# Comment this line when using both console and rolling appenders
-appenders=rolling
-
-#Rolling File Appender with size & time thresholds.
-#Rolling is triggered when either threshold is breached.
-#The rolled over file is compressed by default
-#Time interval is specified in seconds 86400s=1 day
-appender.rolling.type=RollingFile
-appender.rolling.name=RollingFile
-appender.rolling.fileName =${sys:hadoop.log.dir}/dn-audit-${hostName}.log
-appender.rolling.filePattern=${sys:hadoop.log.dir}/dn-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
-appender.rolling.layout.type=PatternLayout
-appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-appender.rolling.policies.type=Policies
-appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-appender.rolling.policies.time.interval=86400
-appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-appender.rolling.policies.size.size=64MB
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=DNAudit
-logger.audit.level=INFO
-logger.audit.appenderRefs=rolling
-logger.audit.appenderRef.file.ref=RollingFile
-
-rootLogger.level=INFO
-#rootLogger.appenderRefs=stdout
-#rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-ozone/dist/src/main/conf/log4j.properties b/hadoop-ozone/dist/src/main/conf/log4j.properties
deleted file mode 100644
index ae42c61..0000000
--- a/hadoop-ozone/dist/src/main/conf/log4j.properties
+++ /dev/null
@@ -1,158 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=ALL
-
-# Null Appender
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Rolling File Appender - cap space usage at 5gb.
-#
-hadoop.log.maxfilesize=256MB
-hadoop.log.maxbackupindex=20
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollover at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to log normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=DEBUG
-
-#
-#Security appender
-#
-hadoop.security.logger=INFO,NullAppender
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# Daily Rolling Security appender
-#
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-
-# Custom Logging levels
-# AWS SDK & S3A FileSystem
-#log4j.logger.com.amazonaws=ERROR
-log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-
-log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-# Do not log into datanode logs. Remove this line to have single log.
-log4j.additivity.org.apache.hadoop.ozone=false
-
-# For development purposes, log both to console and log file.
-log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-log4j.appender.OZONE.Threshold=info
-log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
- %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-# Real ozone logger that writes to ozone.log
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-log4j.appender.FILE.Threshold=debug
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-%m%n
-
-# Log levels of third-party libraries
-log4j.logger.org.apache.commons.beanutils=WARN
-
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
deleted file mode 100644
index 57577e1..0000000
--- a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=30
-
-filter=read,write
-# filter.read.onMatch=DENY avoids logging all READ events
-# filter.read.onMatch=ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch=NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type=MarkerFilter
-filter.read.marker=READ
-filter.read.onMatch=DENY
-filter.read.onMismatch=NEUTRAL
-
-# filter.write.onMatch=DENY avoids logging all WRITE events
-# filter.write.onMatch=ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type=MarkerFilter
-filter.write.marker=WRITE
-filter.write.onMatch=NEUTRAL
-filter.write.onMismatch=NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-# Uncomment following section to enable logging to console appender also
-#appenders=console, rolling
-#appender.console.type=Console
-#appender.console.name=STDOUT
-#appender.console.layout.type=PatternLayout
-#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-# Comment this line when using both console and rolling appenders
-appenders=rolling
-
-#Rolling File Appender with size & time thresholds.
-#Rolling is triggered when either threshold is breached.
-#The rolled over file is compressed by default
-#Time interval is specified in seconds 86400s=1 day
-appender.rolling.type=RollingFile
-appender.rolling.name=RollingFile
-appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
-appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
-appender.rolling.layout.type=PatternLayout
-appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-appender.rolling.policies.type=Policies
-appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-appender.rolling.policies.time.interval=86400
-appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-appender.rolling.policies.size.size=64MB
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=OMAudit
-logger.audit.level=INFO
-logger.audit.appenderRefs=rolling
-logger.audit.appenderRef.file.ref=RollingFile
-
-rootLogger.level=INFO
-#rootLogger.appenderRefs=stdout
-#rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
deleted file mode 100644
index e8f5f2d..0000000
--- a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.log.dir=.
-hadoop.log.file=ozone-shell.log
-
-log4j.rootLogger=INFO,FILE
-
-log4j.threshold=ALL
-
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.file=${hadoop.log.dir}/${hadoop.log.file}
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{1}:%L - %m%n
-
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
diff --git a/hadoop-ozone/dist/src/main/conf/ozone-site.xml b/hadoop-ozone/dist/src/main/conf/ozone-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-ozone/dist/src/main/conf/ozone-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>
diff --git a/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties
deleted file mode 100644
index 3f81561..0000000
--- a/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=30
-
-filter=read,write
-# filter.read.onMatch=DENY avoids logging all READ events
-# filter.read.onMatch=ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch=NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type=MarkerFilter
-filter.read.marker=READ
-filter.read.onMatch=DENY
-filter.read.onMismatch=NEUTRAL
-
-# filter.write.onMatch=DENY avoids logging all WRITE events
-# filter.write.onMatch=ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type=MarkerFilter
-filter.write.marker=WRITE
-filter.write.onMatch=NEUTRAL
-filter.write.onMismatch=NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-# Uncomment following section to enable logging to console appender also
-#appenders=console, rolling
-#appender.console.type=Console
-#appender.console.name=STDOUT
-#appender.console.layout.type=PatternLayout
-#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-# Comment this line when using both console and rolling appenders
-appenders=rolling
-
-#Rolling File Appender with size & time thresholds.
-#Rolling is triggered when either threshold is breached.
-#The rolled over file is compressed by default
-#Time interval is specified in seconds 86400s=1 day
-appender.rolling.type=RollingFile
-appender.rolling.name=RollingFile
-appender.rolling.fileName =${sys:hadoop.log.dir}/scm-audit-${hostName}.log
-appender.rolling.filePattern=${sys:hadoop.log.dir}/scm-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
-appender.rolling.layout.type=PatternLayout
-appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-appender.rolling.policies.type=Policies
-appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-appender.rolling.policies.time.interval=86400
-appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-appender.rolling.policies.size.size=64MB
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=SCMAudit
-logger.audit.level=INFO
-logger.audit.appenderRefs=rolling
-logger.audit.appenderRef.file.ref=RollingFile
-
-rootLogger.level=INFO
-#rootLogger.appenderRefs=stdout
-#rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-ozone/dist/src/main/docker/Dockerfile b/hadoop-ozone/dist/src/main/docker/Dockerfile
deleted file mode 100644
index 3b0e8fe..0000000
--- a/hadoop-ozone/dist/src/main/docker/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM apache/ozone-runner:@docker.ozone-runner.version@
-
-ADD --chown=hadoop . /opt/hadoop
-
-WORKDIR /opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh
deleted file mode 100755
index cb5f016..0000000
--- a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/env bash
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-set -e
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-if [ -n "$SLEEP_SECONDS" ]; then
-   echo "Sleeping for $SLEEP_SECONDS seconds"
-   sleep "$SLEEP_SECONDS"
-fi
-
-#
-# You can wait for an other TCP port with these settings.
-#
-# Example:
-#
-# export WAITFOR=localhost:9878
-#
-# With an optional parameter, you can also set the maximum
-# time of waiting with (in seconds) with WAITFOR_TIMEOUT.
-# (The default is 300 seconds / 5 minutes.)
-if [ -n "$WAITFOR" ]; then
-  echo "Waiting for the service $WAITFOR"
-  WAITFOR_HOST=$(printf "%s\n" "$WAITFOR"| cut -d : -f 1)
-  WAITFOR_PORT=$(printf "%s\n" "$WAITFOR"| cut -d : -f 2)
-  for i in $(seq "${WAITFOR_TIMEOUT:-300}" -1 0) ; do
-    set +e
-    nc -z "$WAITFOR_HOST" "$WAITFOR_PORT" > /dev/null 2>&1
-    result=$?
-    set -e
-    if [ $result -eq 0 ] ; then
-      break
-    fi
-    sleep 1
-  done
-  if [ "$i" -eq 0 ]; then
-     echo "Waiting for service $WAITFOR is timed out." >&2
-     exit 1
-  f
-  fi
-fi
-
-if [ -n "$KERBEROS_ENABLED" ]; then
-  echo "Setting up kerberos!!"
-  KERBEROS_SERVER=${KERBEROS_SERVER:-krb5}
-  ISSUER_SERVER=${ISSUER_SERVER:-$KERBEROS_SERVER\:8081}
-  echo "KDC ISSUER_SERVER => $ISSUER_SERVER"
-
-  if [ -n "$SLEEP_SECONDS" ]; then
-    echo "Sleeping for ${SLEEP_SECONDS} seconds"
-    sleep "$SLEEP_SECONDS"
-  fi
-
-  if [ -z "$KEYTAB_DIR" ]; then
-    KEYTAB_DIR='/etc/security/keytabs'
-  fi
-  while true
-    do
-      set +e
-      STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://"$ISSUER_SERVER"/keytab/test/test)
-      set -e
-      if [ "$STATUS" -eq 200 ]; then
-        echo "Got 200, KDC service ready!!"
-        break
-      else
-        echo "Got $STATUS :( KDC service not ready yet..."
-      fi
-      sleep 5
-    done
-
-    HOST_NAME=$(hostname -f)
-    export HOST_NAME
-    for NAME in ${KERBEROS_KEYTABS}; do
-      echo "Download $NAME/$HOSTNAME@EXAMPLE.COM keytab file to $KEYTAB_DIR/$NAME.keytab"
-      wget "http://$ISSUER_SERVER/keytab/$HOST_NAME/$NAME" -O "$KEYTAB_DIR/$NAME.keytab"
-      klist -kt "$KEYTAB_DIR/$NAME.keytab"
-      KERBEROS_ENABLED=true
-    done
-
-    #Optional: let's try to adjust the krb5.conf
-    sudo sed -i "s/krb5/$KERBEROS_SERVER/g" "/etc/krb5.conf" || true
-fi
-
-CONF_DESTINATION_DIR="${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
-
-#Try to copy the defaults
-set +e
-if [[ -d "/opt/ozone/etc/hadoop" ]]; then
-   cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1
-elif [[ -d "/opt/hadoop/etc/hadoop" ]]; then
-   cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1
-fi
-set -e
-
-"$DIR"/envtoconf.py --destination "$CONF_DESTINATION_DIR"
-
-if [ -n "$ENSURE_SCM_INITIALIZED" ]; then
-  if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then
-    # Improve om and scm start up options
-    /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init
-  fi
-fi
-
-if [ -n "$ENSURE_OM_INITIALIZED" ]; then
-  if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then
-    # Improve om and scm start up options
-    /opt/hadoop/bin/ozone om --init ||  /opt/hadoop/bin/ozone om -createObjectStore
-  fi
-fi
-
-# Supports byteman script to instrument hadoop process with byteman script
-#
-#
-if [ -n "$BYTEMAN_SCRIPT" ] || [ -n "$BYTEMAN_SCRIPT_URL" ]; then
-
-  export PATH=$PATH:$BYTEMAN_DIR/bin
-
-  if [ -n "$BYTEMAN_SCRIPT_URL" ]; then
-    wget "$BYTEMAN_SCRIPT_URL" -O /tmp/byteman.btm
-    export BYTEMAN_SCRIPT=/tmp/byteman.btm
-  fi
-
-  if [ ! -f "$BYTEMAN_SCRIPT" ]; then
-    echo "ERROR: The defined $BYTEMAN_SCRIPT does not exist!!!"
-    exit 255
-  fi
-
-  AGENT_STRING="-javaagent:/opt/byteman.jar=script:$BYTEMAN_SCRIPT"
-  export HADOOP_OPTS="$AGENT_STRING $HADOOP_OPTS"
-  echo "Process is instrumented with adding $AGENT_STRING to HADOOP_OPTS"
-fi
-
-exec "$@"
diff --git a/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py b/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py
deleted file mode 100755
index 0e2c368..0000000
--- a/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/python
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""convert environment variables to config"""
-
-import os
-import re
-
-import argparse
-
-import sys
-import transformation
-
-class Simple(object):
-  """Simple conversion"""
-  def __init__(self, args):
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--destination", help="Destination directory", required=True)
-    self.args = parser.parse_args(args=args)
-    # copy the default files to file.raw in destination directory
-
-    self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", "cfg", 'conf']
-    self.output_dir = self.args.destination
-    self.excluded_envs = ['HADOOP_CONF_DIR']
-    self.configurables = {}
-
-  def destination_file_path(self, name, extension):
-    """destination file path"""
-    return os.path.join(self.output_dir, "{}.{}".format(name, extension))
-
-  def write_env_var(self, name, extension, key, value):
-    """Write environment variables"""
-    with open(self.destination_file_path(name, extension) + ".raw", "a") as myfile:
-      myfile.write("{}: {}\n".format(key, value))
-
-  def process_envs(self):
-    """Process environment variables"""
-    for key in os.environ.keys():
-      if key in self.excluded_envs:
-          continue
-      pattern = re.compile("[_\\.]")
-      parts = pattern.split(key)
-      extension = None
-      name = parts[0].lower()
-      if len(parts) > 1:
-        extension = parts[1].lower()
-        config_key = key[len(name) + len(extension) + 2:].strip()
-      if extension and "!" in extension:
-        splitted = extension.split("!")
-        extension = splitted[0]
-        fmt = splitted[1]
-        config_key = key[len(name) + len(extension) + len(fmt) + 3:].strip()
-      else:
-        fmt = extension
-
-      if extension and extension in self.known_formats:
-        if name not in self.configurables.keys():
-          with open(self.destination_file_path(name, extension) + ".raw", "w") as myfile:
-            myfile.write("")
-        self.configurables[name] = (extension, fmt)
-        self.write_env_var(name, extension, config_key, os.environ[key])
-      else:
-        for configurable_name in self.configurables:
-          if key.lower().startswith(configurable_name.lower()):
-            self.write_env_var(configurable_name,
-                               self.configurables[configurable_name],
-                               key[len(configurable_name) + 1:],
-                               os.environ[key])
-
-  def transform(self):
-    """transform"""
-    for configurable_name in self.configurables:
-      name = configurable_name
-      extension, fmt = self.configurables[name]
-
-      destination_path = self.destination_file_path(name, extension)
-
-      with open(destination_path + ".raw", "r") as myfile:
-        content = myfile.read()
-        transformer_func = getattr(transformation, "to_" + fmt)
-        content = transformer_func(content)
-        with open(destination_path, "w") as myfile:
-          myfile.write(content)
-
-  def main(self):
-    """main"""
-
-    # add the
-    self.process_envs()
-
-    # copy file.ext.raw to file.ext in the destination directory, and
-    # transform to the right format (eg. key: value ===> XML)
-    self.transform()
-
-
-def main():
-  """main"""
-  Simple(sys.argv[1:]).main()
-
-
-if __name__ == '__main__':
-  Simple(sys.argv[1:]).main()
diff --git a/hadoop-ozone/dist/src/main/dockerbin/transformation.py b/hadoop-ozone/dist/src/main/dockerbin/transformation.py
deleted file mode 100755
index 5e708ce..0000000
--- a/hadoop-ozone/dist/src/main/dockerbin/transformation.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""This module transform properties into different format"""
-def render_yaml(yaml_root, prefix=""):
-  """render yaml"""
-  result = ""
-  if isinstance(yaml_root, dict):
-    if prefix:
-      result += "\n"
-      for key in yaml_root:
-        result += "{}{}: {}".format(prefix, key, render_yaml(
-            yaml_root[key], prefix + "   "))
-  elif isinstance(yaml_root, list):
-    result += "\n"
-    for item in yaml_root:
-      result += prefix + " - " + render_yaml(item, prefix + " ")
-  else:
-    result += "{}\n".format(yaml_root)
-  return result
-
-
-def to_yaml(content):
-  """transform to yaml"""
-  props = process_properties(content)
-
-  keys = props.keys()
-  yaml_props = {}
-  for key in keys:
-    parts = key.split(".")
-    node = yaml_props
-    prev_part = None
-    parent_node = {}
-    for part in parts[:-1]:
-      if part.isdigit():
-        if isinstance(node, dict):
-          parent_node[prev_part] = []
-          node = parent_node[prev_part]
-        while len(node) <= int(part):
-          node.append({})
-        parent_node = node
-        node = node[int(node)]
-      else:
-        if part not in node:
-          node[part] = {}
-        parent_node = node
-        node = node[part]
-      prev_part = part
-    if parts[-1].isdigit():
-      if isinstance(node, dict):
-        parent_node[prev_part] = []
-        node = parent_node[prev_part]
-      node.append(props[key])
-    else:
-      node[parts[-1]] = props[key]
-
-  return render_yaml(yaml_props)
-
-
-def to_yml(content):
-  """transform to yml"""
-  return to_yaml(content)
-
-
-def to_properties(content):
-  """transform to properties"""
-  result = ""
-  props = process_properties(content)
-  for key, val in props.items():
-    result += "{}: {}\n".format(key, val)
-  return result
-
-
-def to_env(content):
-  """transform to environment variables"""
-  result = ""
-  props = process_properties(content)
-  for key, val in props:
-    result += "{}={}\n".format(key, val)
-  return result
-
-
-def to_sh(content):
-  """transform to shell"""
-  result = ""
-  props = process_properties(content)
-  for key, val in props:
-    result += "export {}=\"{}\"\n".format(key, val)
-  return result
-
-
-def to_cfg(content):
-  """transform to config"""
-  result = ""
-  props = process_properties(content)
-  for key, val in props:
-    result += "{}={}\n".format(key, val)
-  return result
-
-
-def to_conf(content):
-  """transform to configuration"""
-  result = ""
-  props = process_properties(content)
-  for key, val in props:
-    result += "export {}={}\n".format(key, val)
-  return result
-
-
-def to_xml(content):
-  """transform to xml"""
-  result = "<configuration>\n"
-  props = process_properties(content)
-  for key in props:
-    result += "<property><name>{0}</name><value>{1}</value></property>\n". \
-      format(key, props[key])
-  result += "</configuration>"
-  return result
-
-
-def process_properties(content, sep=': ', comment_char='#'):
-  """
-  Read the file passed as parameter as a properties file.
-  """
-  props = {}
-  for line in content.split("\n"):
-    sline = line.strip()
-    if sline and not sline.startswith(comment_char):
-      key_value = sline.split(sep)
-      key = key_value[0].strip()
-      value = sep.join(key_value[1:]).strip().strip('"')
-      props[key] = value
-
-  return props
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml
deleted file mode 100644
index 8fdc155..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Jaeger tracing server
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml
deleted file mode 100644
index 4796092..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: Service
-metadata:
-  name: jaeger
-spec:
-  clusterIP: None
-  selector:
-    app: jaeger
-    component: jaeger
-  ports:
-    - name: ui
-      port: 16686
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: jaeger
-spec:
-  selector:
-    matchLabels:
-      app: jaeger
-      component: jaeger
-  replicas: 1
-  serviceName: jaeger
-  template:
-    metadata:
-      labels:
-        app: jaeger
-        component: jaeger
-    spec:
-      containers:
-        - name: jaeger
-          image: jaegertracing/all-in-one:latest
-          ports:
-            - containerPort: 16686
-              name: web
-          env:
-            - name: COLLECTOR_ZIPKIN_HTTP_PORT
-              value: "9411"
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml
deleted file mode 100644
index e7c2222..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-kind: Deployment
-apiVersion: apps/v1
-metadata:
-  name: csi-provisioner
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: csi-provisioner
-  template:
-    metadata:
-      labels:
-        app: csi-provisioner
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-        - name: csi-provisioner
-          image: quay.io/k8scsi/csi-provisioner:v1.0.1
-          args:
-            - "--csi-address=/var/lib/csi/csi.sock"
-          volumeMounts:
-            - name: socket-dir
-              mountPath: /var/lib/csi/
-        - name: ozone-csi
-          image: "@docker.image@"
-          volumeMounts:
-            - name: socket-dir
-              mountPath: /var/lib/csi/
-          imagePullPolicy: Always
-          envFrom:
-            - configMapRef:
-                name: config
-          args:
-           - ozone
-           - csi
-      volumes:
-        - name: socket-dir
-          emptyDir:
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml
deleted file mode 100644
index f0ca37c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: storage.k8s.io/v1beta1
-kind: CSIDriver
-metadata:
-  name: org.apache.hadoop.ozone
-spec:
-  attachRequired: false
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml
deleted file mode 100644
index 6c3a1ac..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-kind: DaemonSet
-apiVersion: apps/v1beta2
-metadata:
-  name: csi-node
-spec:
-  selector:
-    matchLabels:
-      app: csi-node
-  template:
-    metadata:
-      labels:
-        app: csi-node
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-        - name: driver-registrar
-          image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
-          args:
-            - "--v=4"
-            - "--csi-address=/var/lib/csi/csi.sock"
-            - "--kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock"
-          env:
-            - name: KUBE_NODE_NAME
-              valueFrom:
-                fieldRef:
-                  fieldPath: spec.nodeName
-          volumeMounts:
-            - name: plugin-dir
-              mountPath: /var/lib/csi
-            - name: registration-dir
-              mountPath: /registration/
-        - name: csi-node
-          image: "@docker.image@"
-          securityContext:
-            runAsUser: 0
-            privileged: true
-            capabilities:
-              add: ["SYS_ADMIN"]
-            allowPrivilegeEscalation: true
-          args:
-            - ozone
-            - csi
-          envFrom:
-            - configMapRef:
-                name: config
-          imagePullPolicy: "Always"
-          volumeMounts:
-            - name: plugin-dir
-              mountPath: /var/lib/csi
-            - name: pods-mount-dir
-              mountPath: /var/lib/kubelet/pods
-              mountPropagation: "Bidirectional"
-            - name: fuse-device
-              mountPath: /dev/fuse
-            - name: dbus
-              mountPath: /var/run/dbus
-            - name: systemd
-              mountPath: /run/systemd
-      volumes:
-        - name: plugin-dir
-          hostPath:
-            path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone
-            type: DirectoryOrCreate
-        - name: registration-dir
-          hostPath:
-            path: /var/lib/kubelet/plugins_registry/
-            type: DirectoryOrCreate
-        - name: pods-mount-dir
-          hostPath:
-            path: /var/lib/kubelet/pods
-            type: Directory
-        - name: fuse-device
-          hostPath:
-            path: /dev/fuse
-        - name: dbus
-          hostPath:
-            path: /var/run/dbus
-        - name: systemd
-          hostPath:
-            path: /run/systemd
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml
deleted file mode 100644
index d83ffb3..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  namespace: default
-  name: csi-ozone
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone
-rules:
-  - apiGroups: [""]
-    resources: ["secrets"]
-    verbs: ["get", "list"]
-  - apiGroups: [""]
-    resources: ["events"]
-    verbs: ["list", "watch", "create", "update", "patch"]
-  - apiGroups: [""]
-    resources: ["nodes"]
-    verbs: ["get", "list", "update","watch"]
-  - apiGroups: [""]
-    resources: ["namespaces"]
-    verbs: ["get", "list"]
-  - apiGroups: ["storage.k8s.io"]
-    resources: ["storageclasses"]
-    verbs: ["get", "list", "watch"]
-  - apiGroups: [""]
-    resources: ["persistentvolumeclaims"]
-    verbs: ["get", "list", "watch", "update"]
-  - apiGroups: [""]
-    resources: ["persistentvolumes"]
-    verbs: ["get", "list", "watch", "update", "create"]
-  - apiGroups: ["storage.k8s.io"]
-    resources: ["volumeattachments"]
-    verbs: ["get", "list", "watch", "update"]
-  - apiGroups: ["storage.k8s.io"]
-    resources: ["csinodes"]
-    verbs: ["get", "list", "watch"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone
-subjects:
-  - kind: ServiceAccount
-    name: csi-ozone
-    namespace: default
-roleRef:
-  kind: ClusterRole
-  name: csi-ozone
-  apiGroup: rbac.authorization.k8s.io
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml
deleted file mode 100644
index 9780160..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
-  name: ozone
-provisioner: org.apache.hadoop.ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml
deleted file mode 100644
index 14c2ea30..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/csi
-description: Configuration for CSI interface
----
-- type: Add
-  trigger:
-    metadata:
-      name: config
-  path:
-    - data
-  value:
-    OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878
-    OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock
-    OZONE-SITE.XML_ozone.csi.owner: hadoop
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml
deleted file mode 100644
index 5f5e70b5..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: config
-data:
-
-  OZONE-SITE.XML_hdds.datanode.dir: "/data/storage"
-  OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data"
-  OZONE-SITE.XML_ozone.metadata.dirs: "/data/metadata"
-  OZONE-SITE.XML_ozone.scm.block.client.address: "scm-0.scm"
-  OZONE-SITE.XML_ozone.om.address: "om-0.om"
-  OZONE-SITE.XML_ozone.scm.client.address: "scm-0.scm"
-  OZONE-SITE.XML_ozone.scm.names: "scm-0.scm"
-  OZONE-SITE.XML_ozone.enabled: "true"
-  LOG4J.PROPERTIES_log4j.rootLogger: "INFO, stdout"
-  LOG4J.PROPERTIES_log4j.appender.stdout: "org.apache.log4j.ConsoleAppender"
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout: "org.apache.log4j.PatternLayout"
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: "%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n"
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml
deleted file mode 100644
index 7c221d9..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  clusterIP: None
-  selector:
-     app: ozone
-     component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
deleted file mode 100644
index 88a4308..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in cdatanodepliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: datanode
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: datanode
-  serviceName: datanode
-  replicas: 3
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: datanode
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9882"
-        prometheus.io/path: "/prom"
-    spec:
-      affinity:
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-          - labelSelector:
-              matchExpressions:
-              - key: component
-                operator: In
-                values:
-                - datanode
-            topologyKey: "kubernetes.io/hostname"
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: datanode
-        image: "@docker.image@"
-        args: ["ozone","datanode"]
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml
deleted file mode 100644
index 7a717bf..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/emptydir
-description: Add empty dir based ephemeral persistence
----
-- type: Add
-  trigger:
-    metadata:
-      labels:
-        app.kubernetes.io/component: ozone
-  path:
-    - spec
-    - template
-    - spec
-    - (initContainers|containers)
-    - "*"
-    - volumeMounts
-  value:
-    - name: data
-      mountPath: /data
-- type: Add
-  trigger:
-    metadata:
-      labels:
-        app.kubernetes.io/component: ozone
-  path:
-    - spec
-    - template
-    - spec
-    - volumes
-  value:
-    - name: data
-      emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml
deleted file mode 100644
index 33a818d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/persistence
-description: Add real PVC based persistence
----
-- type: Add
-  path:
-    - spec
-  trigger:
-    kind: StatefulSet
-  value:
-      volumeClaimTemplates:
-      - metadata:
-          name: data
-        spec:
-          accessModes: [ "ReadWriteOnce" ]
-          resources:
-            requests:
-              storage: 2Gi
-- type: Add
-  trigger:
-     metadata:
-        name: datanode
-  path:
-    - spec
-    - template
-    - spec
-  value:
-    affinity:
-      podAntiAffinity:
-        requiredDuringSchedulingIgnoredDuringExecution:
-        - labelSelector:
-            matchExpressions:
-            - key: component
-              operator: In
-              values:
-              - datanode
-          topologyKey: kubernetes.io/hostname
-- type: Add
-  trigger:
-    metadata:
-      labels:
-        app.kubernetes.io/component: ozone
-  path:
-    - spec
-    - template
-    - spec
-    - (initContainers|containers)
-    - "*"
-    - volumeMounts
-  value:
-    - name: data
-      mountPath: /data
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml
deleted file mode 100644
index d76931a..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/profiler
-description: Enable profiler endpoint.
----
-- type: Add
-  trigger:
-    metadata:
-      name: config
-  path:
-    - data
-  value:
-    OZONE-SITE.XML_hdds.profiler.endpoint.enabled: "true"
-    ASYNC_PROFILER_HOME: /opt/profiler
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
deleted file mode 100644
index 604df1f..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/prometheus
-description: Enable prometheus monitoring in Ozone
----
-- type: Add
-  trigger:
-    metadata:
-      name: config
-  path:
-    - data
-  value:
-    OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true"
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml
deleted file mode 100644
index 007b8d1..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/tracing
-description: Enable jaeger tracing
----
-- type: Add
-  path:
-    - spec
-    - template
-    - spec
-    - containers
-    - .*
-    - env
-  value:
-     - name: JAEGER_SAMPLER_TYPE
-       value: probabilistic
-     - name: JAEGER_SAMPLER_PARAM
-       value: "0.01"
-     - name: JAEGER_AGENT_HOST
-       value: jaeger-0.jaeger
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml
deleted file mode 100644
index 2707d30..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Apache Hadoop Ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml
deleted file mode 100644
index c6e29f3..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Load test tool for Apache Hadoop Ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml
deleted file mode 100644
index 40ebc98..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: freon
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone
-      component: freon
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: freon
-    spec:
-      containers:
-        - name: freon
-          image: "@docker.image@"
-          args: ["ozone","freon", "rk", "--factor=THREE", "--replicationType=RATIS"]
-          envFrom:
-            - configMapRef:
-                name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml
deleted file mode 100644
index a6462fe..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: Service
-metadata:
-  name: om
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  clusterIP: None
-  selector:
-     app: ozone
-     component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
deleted file mode 100644
index befc21e..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: om
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: om
-  serviceName: om
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: om
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9874"
-        prometheus.io/path: "/prom"
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: om
-        image: "@docker.image@"
-        args: ["ozone","om"]
-        env:
-        - name: WAITFOR
-          value: scm-0.scm:9876
-        - name: ENSURE_OM_INITIALIZED
-          value: /data/metadata/om/current/VERSION
-        livenessProbe:
-          tcpSocket:
-            port: 9862
-          initialDelaySeconds: 30
-      volumes: []
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml
deleted file mode 100644
index c99bbd2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  clusterIP: None
-  selector:
-     app: ozone
-     component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
deleted file mode 100644
index fc8ff9a..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: s3g
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: s3g
-  serviceName: s3g
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: s3g
-    spec:
-      containers:
-      - name: s3g
-        image: "@docker.image@"
-        args: ["ozone","s3g"]
-        livenessProbe:
-          httpGet:
-            path: /
-            port: 9878
-          initialDelaySeconds: 30
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml
deleted file mode 100644
index f8a05ab..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm
-spec:
-  ports:
-    - port: 9876
-      name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
deleted file mode 100644
index d386afc..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: scm
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: scm
-  serviceName: scm
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: scm
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9876"
-        prometheus.io/path: "/prom"
-    spec:
-      securityContext:
-        fsGroup: 1000
-      initContainers:
-      - name: init
-        image: "@docker.image@"
-        args: ["ozone","scm", "--init"]
-      containers:
-      - name: scm
-        image: "@docker.image@"
-        args: ["ozone","scm"]
-        livenessProbe:
-          tcpSocket:
-            port: 9861
-          initialDelaySeconds: 30
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml
deleted file mode 100644
index c8ae632..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-- type: Add
-  path:
-    - spec
-    - template
-    - spec
-    - ".*"
-    - ".*"
-    - envFrom
-  value:
-    - configMapRef:
-        name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml
deleted file mode 100644
index f880987..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: prometheusconf
-data:
-  prometheus.yaml: |-
-    global:
-      scrape_interval: 15s
-    scrape_configs:
-    - job_name: jmxexporter
-      kubernetes_sd_configs:
-      - role: pod
-      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-      relabel_configs:
-      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
-        action: keep
-        regex: true
-      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
-        action: replace
-        target_label: __metrics_path__
-        regex: (.+)
-      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
-        action: replace
-        regex: ([^:]+)(?::\d+)?;(\d+)
-        replacement: $1:$2
-        target_label: __address__
-      - action: labelmap
-        regex: __meta_kubernetes_pod_label_(.+)
-      - source_labels: [__meta_kubernetes_namespace]
-        action: replace
-        target_label: kubernetes_namespace
-      - source_labels: [__meta_kubernetes_pod_name]
-        action: replace
-        target_label: kubernetes_pod_name
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml
deleted file mode 100644
index 6825e91..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: ozone/prometheus
-description: Enable prometheus monitoring in Ozone
----
-- type: Add
-  trigger:
-      metadata:
-          name: config
-  path:
-    - data
-  value:
-    OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true"
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml
deleted file mode 100644
index 6368403..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: apps/v1beta1
-kind: Deployment
-metadata:
-  name: prometheus
-  labels:
-    app: prometheus
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: prometheus
-  template:
-    metadata:
-      labels:
-        app: prometheus
-    spec:
-        serviceAccountName: prometheus-operator
-        containers:
-          - name: prometheus
-            image: prom/prometheus
-            args: ["--config.file=/conf/prometheus.yaml"]
-            ports:
-               - containerPort: 9090
-            volumeMounts:
-              - name: config
-                mountPath: "/conf"
-                readOnly: true
-        volumes:
-          - name: config
-            configMap:
-              name: prometheusconf
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml
deleted file mode 100644
index 20809e5..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Prometheus monitoring
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml
deleted file mode 100644
index 194e9f4..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: prometheus
-rules:
-- apiGroups: [""]
-  resources:
-  - nodes
-  - services
-  - endpoints
-  - pods
-  verbs: ["get", "list", "watch"]
-- apiGroups: [""]
-  resources:
-  - configmaps
-  verbs: ["get"]
-- nonResourceURLs: ["/metrics"]
-  verbs: ["get"]
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml
deleted file mode 100644
index ef5105d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
-  name: prometheus-operator
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: prometheus
-subjects:
-- kind: ServiceAccount
-  name: prometheus-operator
-  namespace: default
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml
deleted file mode 100644
index d5ba196..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: prometheus-operator
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml
deleted file mode 100644
index e07aafc..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-kind: Service
-apiVersion: v1
-metadata:
-  name: prometheus
-spec:
-  selector:
-    app: prometheus
-  ports:
-  - protocol: TCP
-    port: 9090
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml
deleted file mode 100644
index 54203bd..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Simple python based webserver with persistent volume claim.
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml
deleted file mode 100644
index d8e7578..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: ozone-csi-test-webserver
-  labels:
-    app: ozone-csi-test-webserver
-  annotations: {}
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone-csi-test-webserver
-  template:
-    metadata:
-      labels:
-        app: ozone-csi-test-webserver
-    spec:
-      containers:
-      - name: web
-        image: python:3.7.3-alpine3.8
-        args:
-           - python
-           - -m
-           - http.server
-           - --directory
-           - /www
-        volumeMounts:
-        - mountPath: /www
-          name: webroot
-      volumes:
-      - name: webroot
-        persistentVolumeClaim:
-          claimName: ozone-csi-test-webserver
-          readOnly: false
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml
deleted file mode 100644
index 6a53a43..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  type: NodePort
-  ports:
-  - port: 8000
-    name: web
-  selector:
-    app: ozone-csi-test-webserver
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml
deleted file mode 100644
index 4b1e44b..0000000
--- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible
deleted file mode 100644
index e00d9ce..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-source:
-  - path: ../../definitions
-import:
-  - path: ozone
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-    - type: PublishStatefulSet
-    - type: ozone/emptydir
-  - path: ozone/freon
-    destination: freon
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-header: |-
-  # Licensed to the Apache Software Foundation (ASF) under one
-  # or more contributor license agreements.  See the NOTICE file
-  # distributed with this work for additional information
-  # regarding copyright ownership.  The ASF licenses this file
-  # to you under the Apache License, Version 2.0 (the
-  # "License"); you may not use this file except in compliance
-  # with the License.  You may obtain a copy of the License at
-  #
-  #     http://www.apache.org/licenses/LICENSE-2.0
-  #
-  # Unless required by applicable law or agreed to in writing, software
-  # distributed under the License is distributed on an "AS IS" BASIS,
-  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  # See the License for the specific language governing permissions and
-  # limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header
deleted file mode 100644
index 635f0d9..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
deleted file mode 100644
index 94d16d2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: config
-data:
-  OZONE-SITE.XML_hdds.datanode.dir: /data/storage
-  OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data
-  OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
-  OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.om.address: om-0.om
-  OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.scm.names: scm-0.scm
-  OZONE-SITE.XML_ozone.enabled: "true"
-  LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
-  LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd
-    HH:mm:ss} %-5p %c{1}:%L - %m%n'
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml
deleted file mode 100644
index 89b5914..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode-public
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  selector:
-    app: ozone
-    component: datanode
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml
deleted file mode 100644
index 929e7a2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  clusterIP: None
-  selector:
-    app: ozone
-    component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
deleted file mode 100644
index c393ead..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: datanode
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: datanode
-  serviceName: datanode
-  replicas: 3
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: datanode
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9882"
-        prometheus.io/path: /prom
-    spec:
-      affinity:
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-          - labelSelector:
-              matchExpressions:
-              - key: component
-                operator: In
-                values:
-                - datanode
-            topologyKey: kubernetes.io/hostname
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: datanode
-        image: '@docker.image@'
-        args:
-        - ozone
-        - datanode
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml
deleted file mode 100644
index 1662c4e..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: freon
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone
-      component: freon
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: freon
-    spec:
-      containers:
-      - name: freon
-        image: '@docker.image@'
-        args:
-        - ozone
-        - freon
-        - rk
-        - --factor=THREE
-        - --replicationType=RATIS
-        envFrom:
-        - configMapRef:
-            name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml
deleted file mode 100644
index deb2c33..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om-public
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  selector:
-    app: ozone
-    component: om
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml
deleted file mode 100644
index 617277d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
deleted file mode 100644
index 5de01f5..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: om
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: om
-  serviceName: om
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: om
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9874"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: om
-        image: '@docker.image@'
-        args:
-        - ozone
-        - om
-        env:
-        - name: WAITFOR
-          value: scm-0.scm:9876
-        - name: ENSURE_OM_INITIALIZED
-          value: /data/metadata/om/current/VERSION
-        livenessProbe:
-          tcpSocket:
-            port: 9862
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml
deleted file mode 100644
index d2b2420..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g-public
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  selector:
-    app: ozone
-    component: s3g
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml
deleted file mode 100644
index dd1ca83..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  clusterIP: None
-  selector:
-    app: ozone
-    component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml
deleted file mode 100644
index 2409583..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: s3g
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: s3g
-  serviceName: s3g
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: s3g
-    spec:
-      containers:
-      - name: s3g
-        image: '@docker.image@'
-        args:
-        - ozone
-        - s3g
-        livenessProbe:
-          httpGet:
-            path: /
-            port: 9878
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml
deleted file mode 100644
index e3246fc..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm-public
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  selector:
-    app: ozone
-    component: scm
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml
deleted file mode 100644
index 0df15d6..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml
deleted file mode 100644
index 0f8173c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: scm
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: scm
-  serviceName: scm
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: scm
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9876"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      initContainers:
-      - name: init
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        - --init
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      containers:
-      - name: scm
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        livenessProbe:
-          tcpSocket:
-            port: 9861
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible
deleted file mode 100644
index 3390db0..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-source:
-  - path: ../../definitions
-import:
-  - path: ozone
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-    - type: PublishStatefulSet
-    - type: ozone/emptydir
-    - type: Remove
-      trigger:
-         metadata:
-            name: datanode
-      path:
-        - spec
-        - template
-        - spec
-        - affinity
-  - path: ozone/freon
-    destination: freon
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-header: |-
-  # Licensed to the Apache Software Foundation (ASF) under one
-  # or more contributor license agreements.  See the NOTICE file
-  # distributed with this work for additional information
-  # regarding copyright ownership.  The ASF licenses this file
-  # to you under the Apache License, Version 2.0 (the
-  # "License"); you may not use this file except in compliance
-  # with the License.  You may obtain a copy of the License at
-  #
-  #     http://www.apache.org/licenses/LICENSE-2.0
-  #
-  # Unless required by applicable law or agreed to in writing, software
-  # distributed under the License is distributed on an "AS IS" BASIS,
-  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  # See the License for the specific language governing permissions and
-  # limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header
deleted file mode 100644
index 635f0d9..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
deleted file mode 100644
index 94d16d2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: config
-data:
-  OZONE-SITE.XML_hdds.datanode.dir: /data/storage
-  OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data
-  OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
-  OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.om.address: om-0.om
-  OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.scm.names: scm-0.scm
-  OZONE-SITE.XML_ozone.enabled: "true"
-  LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
-  LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd
-    HH:mm:ss} %-5p %c{1}:%L - %m%n'
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml
deleted file mode 100644
index 89b5914..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode-public
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  selector:
-    app: ozone
-    component: datanode
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml
deleted file mode 100644
index 929e7a2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  clusterIP: None
-  selector:
-    app: ozone
-    component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
deleted file mode 100644
index db91864..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: datanode
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: datanode
-  serviceName: datanode
-  replicas: 3
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: datanode
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9882"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: datanode
-        image: '@docker.image@'
-        args:
-        - ozone
-        - datanode
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml
deleted file mode 100644
index 1662c4e..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: freon
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone
-      component: freon
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: freon
-    spec:
-      containers:
-      - name: freon
-        image: '@docker.image@'
-        args:
-        - ozone
-        - freon
-        - rk
-        - --factor=THREE
-        - --replicationType=RATIS
-        envFrom:
-        - configMapRef:
-            name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml
deleted file mode 100644
index deb2c33..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om-public
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  selector:
-    app: ozone
-    component: om
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml
deleted file mode 100644
index 617277d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml
deleted file mode 100644
index 5de01f5..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: om
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: om
-  serviceName: om
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: om
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9874"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: om
-        image: '@docker.image@'
-        args:
-        - ozone
-        - om
-        env:
-        - name: WAITFOR
-          value: scm-0.scm:9876
-        - name: ENSURE_OM_INITIALIZED
-          value: /data/metadata/om/current/VERSION
-        livenessProbe:
-          tcpSocket:
-            port: 9862
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml
deleted file mode 100644
index d2b2420..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g-public
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  selector:
-    app: ozone
-    component: s3g
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml
deleted file mode 100644
index dd1ca83..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  clusterIP: None
-  selector:
-    app: ozone
-    component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml
deleted file mode 100644
index 2409583..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: s3g
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: s3g
-  serviceName: s3g
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: s3g
-    spec:
-      containers:
-      - name: s3g
-        image: '@docker.image@'
-        args:
-        - ozone
-        - s3g
-        livenessProbe:
-          httpGet:
-            path: /
-            port: 9878
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml
deleted file mode 100644
index e3246fc..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm-public
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  selector:
-    app: ozone
-    component: scm
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml
deleted file mode 100644
index 0df15d6..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml
deleted file mode 100644
index 0f8173c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: scm
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: scm
-  serviceName: scm
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: scm
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9876"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      initContainers:
-      - name: init
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        - --init
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      containers:
-      - name: scm
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        livenessProbe:
-          tcpSocket:
-            port: 9861
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible
deleted file mode 100644
index 96e8c62..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-source:
-  - path: ../../definitions
-import:
-  - path: ozone
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-    - type: ozone/prometheus
-    - type: PublishStatefulSet
-    - type: ozone/tracing
-    - type: ozone/profiler
-    - type: ozone/emptydir
-    - type: ozone/csi
-  - path: prometheus
-  - path: jaeger
-    transformations:
-    - type: PublishService
-  - path: ozone/freon
-    destination: freon
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-    - type: ozone/tracing
-  - path: pv-test
-    destination: pv-test
-  - path: ozone-csi
-    destination: csi
-  - path: pv-test
-    destination: pv-test
-transformations:
-  - type: Namespace
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header
deleted file mode 100644
index 635f0d9..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
deleted file mode 100644
index 43d11a4..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: config
-data:
-  OZONE-SITE.XML_hdds.datanode.dir: /data/storage
-  OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data
-  OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
-  OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.om.address: om-0.om
-  OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.scm.names: scm-0.scm
-  OZONE-SITE.XML_ozone.enabled: "true"
-  LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
-  LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd
-    HH:mm:ss} %-5p %c{1}:%L - %m%n'
-  OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true"
-  OZONE-SITE.XML_hdds.profiler.endpoint.enabled: "true"
-  ASYNC_PROFILER_HOME: /opt/profiler
-  OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878
-  OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock
-  OZONE-SITE.XML_ozone.csi.owner: hadoop
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml
deleted file mode 100644
index fe44532..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: DaemonSet
-apiVersion: apps/v1beta2
-metadata:
-  name: csi-node
-spec:
-  selector:
-    matchLabels:
-      app: csi-node
-  template:
-    metadata:
-      labels:
-        app: csi-node
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-      - name: driver-registrar
-        image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
-        args:
-        - --v=4
-        - --csi-address=/var/lib/csi/csi.sock
-        - --kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock
-        env:
-        - name: KUBE_NODE_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: spec.nodeName
-        volumeMounts:
-        - name: plugin-dir
-          mountPath: /var/lib/csi
-        - name: registration-dir
-          mountPath: /registration/
-      - name: csi-node
-        image: '@docker.image@'
-        securityContext:
-          runAsUser: 0
-          privileged: true
-          capabilities:
-            add:
-            - SYS_ADMIN
-          allowPrivilegeEscalation: true
-        args:
-        - ozone
-        - csi
-        envFrom:
-        - configMapRef:
-            name: config
-        imagePullPolicy: Always
-        volumeMounts:
-        - name: plugin-dir
-          mountPath: /var/lib/csi
-        - name: pods-mount-dir
-          mountPath: /var/lib/kubelet/pods
-          mountPropagation: Bidirectional
-        - name: fuse-device
-          mountPath: /dev/fuse
-        - name: dbus
-          mountPath: /var/run/dbus
-        - name: systemd
-          mountPath: /run/systemd
-      volumes:
-      - name: plugin-dir
-        hostPath:
-          path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone
-          type: DirectoryOrCreate
-      - name: registration-dir
-        hostPath:
-          path: /var/lib/kubelet/plugins_registry/
-          type: DirectoryOrCreate
-      - name: pods-mount-dir
-        hostPath:
-          path: /var/lib/kubelet/pods
-          type: Directory
-      - name: fuse-device
-        hostPath:
-          path: /dev/fuse
-      - name: dbus
-        hostPath:
-          path: /var/run/dbus
-      - name: systemd
-        hostPath:
-          path: /run/systemd
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml
deleted file mode 100644
index 927ba6f..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone-default
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - secrets
-  verbs:
-  - get
-  - list
-- apiGroups:
-  - ""
-  resources:
-  - events
-  verbs:
-  - list
-  - watch
-  - create
-  - update
-  - patch
-- apiGroups:
-  - ""
-  resources:
-  - nodes
-  verbs:
-  - get
-  - list
-  - update
-  - watch
-- apiGroups:
-  - ""
-  resources:
-  - namespaces
-  verbs:
-  - get
-  - list
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - storageclasses
-  verbs:
-  - get
-  - list
-  - watch
-- apiGroups:
-  - ""
-  resources:
-  - persistentvolumeclaims
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-- apiGroups:
-  - ""
-  resources:
-  - persistentvolumes
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-  - create
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - volumeattachments
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - csinodes
-  verbs:
-  - get
-  - list
-  - watch
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml
deleted file mode 100644
index 948e759..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone-default
-subjects:
-- kind: ServiceAccount
-  name: csi-ozone
-  namespace: default
-roleRef:
-  kind: ClusterRole
-  name: csi-ozone-default
-  apiGroup: rbac.authorization.k8s.io
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml
deleted file mode 100644
index 628d2a1..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  namespace: default
-  name: csi-ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml
deleted file mode 100644
index 03478ff..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: Deployment
-apiVersion: apps/v1
-metadata:
-  name: csi-provisioner
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: csi-provisioner
-  template:
-    metadata:
-      labels:
-        app: csi-provisioner
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-      - name: csi-provisioner
-        image: quay.io/k8scsi/csi-provisioner:v1.0.1
-        args:
-        - --csi-address=/var/lib/csi/csi.sock
-        volumeMounts:
-        - name: socket-dir
-          mountPath: /var/lib/csi/
-      - name: ozone-csi
-        image: '@docker.image@'
-        volumeMounts:
-        - name: socket-dir
-          mountPath: /var/lib/csi/
-        imagePullPolicy: Always
-        envFrom:
-        - configMapRef:
-            name: config
-        args:
-        - ozone
-        - csi
-      volumes:
-      - name: socket-dir
-        emptyDir: null
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml
deleted file mode 100644
index e657c50..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: storage.k8s.io/v1beta1
-kind: CSIDriver
-metadata:
-  name: org.apache.hadoop.ozone
-spec:
-  attachRequired: false
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml
deleted file mode 100644
index c6c1c6c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
-  name: ozone
-provisioner: org.apache.hadoop.ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml
deleted file mode 100644
index 89b5914..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode-public
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  selector:
-    app: ozone
-    component: datanode
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml
deleted file mode 100644
index 929e7a2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  clusterIP: None
-  selector:
-    app: ozone
-    component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml
deleted file mode 100644
index 475ce69..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: datanode
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: datanode
-  serviceName: datanode
-  replicas: 3
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: datanode
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9882"
-        prometheus.io/path: /prom
-    spec:
-      affinity:
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-          - labelSelector:
-              matchExpressions:
-              - key: component
-                operator: In
-                values:
-                - datanode
-            topologyKey: kubernetes.io/hostname
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: datanode
-        image: '@docker.image@'
-        args:
-        - ozone
-        - datanode
-        envFrom:
-        - configMapRef:
-            name: config
-        env:
-        - name: JAEGER_SAMPLER_TYPE
-          value: probabilistic
-        - name: JAEGER_SAMPLER_PARAM
-          value: "0.01"
-        - name: JAEGER_AGENT_HOST
-          value: jaeger-0.jaeger
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml
deleted file mode 100644
index 88c9045..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: freon
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone
-      component: freon
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: freon
-    spec:
-      containers:
-      - name: freon
-        image: '@docker.image@'
-        args:
-        - ozone
-        - freon
-        - rk
-        - --factor=THREE
-        - --replicationType=RATIS
-        envFrom:
-        - configMapRef:
-            name: config
-        env:
-        - name: JAEGER_SAMPLER_TYPE
-          value: probabilistic
-        - name: JAEGER_SAMPLER_PARAM
-          value: "0.01"
-        - name: JAEGER_AGENT_HOST
-          value: jaeger-0.jaeger
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml
deleted file mode 100644
index fb06569..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: jaeger-public
-spec:
-  selector:
-    app: jaeger
-    component: jaeger
-  ports:
-  - name: ui
-    port: 16686
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml
deleted file mode 100644
index 6e6125a..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: jaeger
-spec:
-  clusterIP: None
-  selector:
-    app: jaeger
-    component: jaeger
-  ports:
-  - name: ui
-    port: 16686
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml
deleted file mode 100644
index 5141014..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: jaeger
-spec:
-  selector:
-    matchLabels:
-      app: jaeger
-      component: jaeger
-  replicas: 1
-  serviceName: jaeger
-  template:
-    metadata:
-      labels:
-        app: jaeger
-        component: jaeger
-    spec:
-      containers:
-      - name: jaeger
-        image: jaegertracing/all-in-one:latest
-        ports:
-        - containerPort: 16686
-          name: web
-        env:
-        - name: COLLECTOR_ZIPKIN_HTTP_PORT
-          value: "9411"
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml
deleted file mode 100644
index deb2c33..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om-public
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  selector:
-    app: ozone
-    component: om
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml
deleted file mode 100644
index 617277d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml
deleted file mode 100644
index 36df22c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: om
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: om
-  serviceName: om
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: om
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9874"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: om
-        image: '@docker.image@'
-        args:
-        - ozone
-        - om
-        env:
-        - name: WAITFOR
-          value: scm-0.scm:9876
-        - name: ENSURE_OM_INITIALIZED
-          value: /data/metadata/om/current/VERSION
-        - name: JAEGER_SAMPLER_TYPE
-          value: probabilistic
-        - name: JAEGER_SAMPLER_PARAM
-          value: "0.01"
-        - name: JAEGER_AGENT_HOST
-          value: jaeger-0.jaeger
-        livenessProbe:
-          tcpSocket:
-            port: 9862
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml
deleted file mode 100644
index bf62be6..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: prometheus-default
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - nodes
-  - services
-  - endpoints
-  - pods
-  verbs:
-  - get
-  - list
-  - watch
-- apiGroups:
-  - ""
-  resources:
-  - configmaps
-  verbs:
-  - get
-- nonResourceURLs:
-  - /metrics
-  verbs:
-  - get
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml
deleted file mode 100644
index 86a188a..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1beta1
-kind: Deployment
-metadata:
-  name: prometheus
-  labels:
-    app: prometheus
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: prometheus
-  template:
-    metadata:
-      labels:
-        app: prometheus
-    spec:
-      serviceAccountName: prometheus-operator
-      containers:
-      - name: prometheus
-        image: prom/prometheus
-        args:
-        - --config.file=/conf/prometheus.yaml
-        ports:
-        - containerPort: 9090
-        volumeMounts:
-        - name: config
-          mountPath: /conf
-          readOnly: true
-      volumes:
-      - name: config
-        configMap:
-          name: prometheusconf
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml
deleted file mode 100644
index 13ac066..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
-  name: prometheus-operator-default
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: prometheus-default
-subjects:
-- kind: ServiceAccount
-  name: prometheus-operator
-  namespace: default
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml
deleted file mode 100644
index f816888..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: prometheus-operator
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml
deleted file mode 100644
index 312cf58..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: Service
-apiVersion: v1
-metadata:
-  name: prometheus
-spec:
-  selector:
-    app: prometheus
-  ports:
-  - protocol: TCP
-    port: 9090
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml
deleted file mode 100644
index 6d5b123..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: prometheusconf
-data:
-  prometheus.yaml: |-
-    global:
-      scrape_interval: 15s
-    scrape_configs:
-    - job_name: jmxexporter
-      kubernetes_sd_configs:
-      - role: pod
-      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-      relabel_configs:
-      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
-        action: keep
-        regex: true
-      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
-        action: replace
-        target_label: __metrics_path__
-        regex: (.+)
-      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
-        action: replace
-        regex: ([^:]+)(?::\d+)?;(\d+)
-        replacement: $1:$2
-        target_label: __address__
-      - action: labelmap
-        regex: __meta_kubernetes_pod_label_(.+)
-      - source_labels: [__meta_kubernetes_namespace]
-        action: replace
-        target_label: kubernetes_namespace
-      - source_labels: [__meta_kubernetes_pod_name]
-        action: replace
-        target_label: kubernetes_pod_name
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml
deleted file mode 100644
index 04edcec..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: ozone-csi-test-webserver
-  labels:
-    app: ozone-csi-test-webserver
-  annotations: {}
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone-csi-test-webserver
-  template:
-    metadata:
-      labels:
-        app: ozone-csi-test-webserver
-    spec:
-      containers:
-      - name: web
-        image: python:3.7.3-alpine3.8
-        args:
-        - python
-        - -m
-        - http.server
-        - --directory
-        - /www
-        volumeMounts:
-        - mountPath: /www
-          name: webroot
-      volumes:
-      - name: webroot
-        persistentVolumeClaim:
-          claimName: ozone-csi-test-webserver
-          readOnly: false
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml
deleted file mode 100644
index 4b1e44b..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml
deleted file mode 100644
index 6a53a43..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  type: NodePort
-  ports:
-  - port: 8000
-    name: web
-  selector:
-    app: ozone-csi-test-webserver
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml
deleted file mode 100644
index d2b2420..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g-public
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  selector:
-    app: ozone
-    component: s3g
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml
deleted file mode 100644
index dd1ca83..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  clusterIP: None
-  selector:
-    app: ozone
-    component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml
deleted file mode 100644
index 0feb368..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: s3g
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: s3g
-  serviceName: s3g
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: s3g
-    spec:
-      containers:
-      - name: s3g
-        image: '@docker.image@'
-        args:
-        - ozone
-        - s3g
-        livenessProbe:
-          httpGet:
-            path: /
-            port: 9878
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        env:
-        - name: JAEGER_SAMPLER_TYPE
-          value: probabilistic
-        - name: JAEGER_SAMPLER_PARAM
-          value: "0.01"
-        - name: JAEGER_AGENT_HOST
-          value: jaeger-0.jaeger
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml
deleted file mode 100644
index e3246fc..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm-public
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  selector:
-    app: ozone
-    component: scm
-  type: NodePort
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml
deleted file mode 100644
index 0df15d6..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml
deleted file mode 100644
index 246f8c4..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: scm
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: scm
-  serviceName: scm
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: scm
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9876"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      initContainers:
-      - name: init
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        - --init
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      containers:
-      - name: scm
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        livenessProbe:
-          tcpSocket:
-            port: 9861
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        env:
-        - name: JAEGER_SAMPLER_TYPE
-          value: probabilistic
-        - name: JAEGER_SAMPLER_PARAM
-          value: "0.01"
-        - name: JAEGER_AGENT_HOST
-          value: jaeger-0.jaeger
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes:
-      - name: data
-        emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible
deleted file mode 100644
index 2fb527c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-source:
-  - path: ../../definitions
-import:
-  - path: ozone
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-    - type: ozone/persistence
-    - type: ozone/csi
-  - path: ozone/freon
-    destination: freon
-    transformations:
-    - type: Image
-      image: "@docker.image@"
-  - path: pv-test
-    destination: pv-test
-  - path: ozone-csi
-    destination: csi
-  - path: pv-test
-    destination: pv-test
-transformations:
-  - type: Namespace
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header
deleted file mode 100644
index 635f0d9..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
deleted file mode 100644
index e554145..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: config
-data:
-  OZONE-SITE.XML_hdds.datanode.dir: /data/storage
-  OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data
-  OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
-  OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.om.address: om-0.om
-  OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
-  OZONE-SITE.XML_ozone.scm.names: scm-0.scm
-  OZONE-SITE.XML_ozone.enabled: "true"
-  LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
-  LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
-  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd
-    HH:mm:ss} %-5p %c{1}:%L - %m%n'
-  OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878
-  OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock
-  OZONE-SITE.XML_ozone.csi.owner: hadoop
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml
deleted file mode 100644
index fe44532..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: DaemonSet
-apiVersion: apps/v1beta2
-metadata:
-  name: csi-node
-spec:
-  selector:
-    matchLabels:
-      app: csi-node
-  template:
-    metadata:
-      labels:
-        app: csi-node
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-      - name: driver-registrar
-        image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
-        args:
-        - --v=4
-        - --csi-address=/var/lib/csi/csi.sock
-        - --kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock
-        env:
-        - name: KUBE_NODE_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: spec.nodeName
-        volumeMounts:
-        - name: plugin-dir
-          mountPath: /var/lib/csi
-        - name: registration-dir
-          mountPath: /registration/
-      - name: csi-node
-        image: '@docker.image@'
-        securityContext:
-          runAsUser: 0
-          privileged: true
-          capabilities:
-            add:
-            - SYS_ADMIN
-          allowPrivilegeEscalation: true
-        args:
-        - ozone
-        - csi
-        envFrom:
-        - configMapRef:
-            name: config
-        imagePullPolicy: Always
-        volumeMounts:
-        - name: plugin-dir
-          mountPath: /var/lib/csi
-        - name: pods-mount-dir
-          mountPath: /var/lib/kubelet/pods
-          mountPropagation: Bidirectional
-        - name: fuse-device
-          mountPath: /dev/fuse
-        - name: dbus
-          mountPath: /var/run/dbus
-        - name: systemd
-          mountPath: /run/systemd
-      volumes:
-      - name: plugin-dir
-        hostPath:
-          path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone
-          type: DirectoryOrCreate
-      - name: registration-dir
-        hostPath:
-          path: /var/lib/kubelet/plugins_registry/
-          type: DirectoryOrCreate
-      - name: pods-mount-dir
-        hostPath:
-          path: /var/lib/kubelet/pods
-          type: Directory
-      - name: fuse-device
-        hostPath:
-          path: /dev/fuse
-      - name: dbus
-        hostPath:
-          path: /var/run/dbus
-      - name: systemd
-        hostPath:
-          path: /run/systemd
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml
deleted file mode 100644
index 927ba6f..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone-default
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - secrets
-  verbs:
-  - get
-  - list
-- apiGroups:
-  - ""
-  resources:
-  - events
-  verbs:
-  - list
-  - watch
-  - create
-  - update
-  - patch
-- apiGroups:
-  - ""
-  resources:
-  - nodes
-  verbs:
-  - get
-  - list
-  - update
-  - watch
-- apiGroups:
-  - ""
-  resources:
-  - namespaces
-  verbs:
-  - get
-  - list
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - storageclasses
-  verbs:
-  - get
-  - list
-  - watch
-- apiGroups:
-  - ""
-  resources:
-  - persistentvolumeclaims
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-- apiGroups:
-  - ""
-  resources:
-  - persistentvolumes
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-  - create
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - volumeattachments
-  verbs:
-  - get
-  - list
-  - watch
-  - update
-- apiGroups:
-  - storage.k8s.io
-  resources:
-  - csinodes
-  verbs:
-  - get
-  - list
-  - watch
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml
deleted file mode 100644
index 948e759..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: csi-ozone-default
-subjects:
-- kind: ServiceAccount
-  name: csi-ozone
-  namespace: default
-roleRef:
-  kind: ClusterRole
-  name: csi-ozone-default
-  apiGroup: rbac.authorization.k8s.io
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml
deleted file mode 100644
index 628d2a1..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  namespace: default
-  name: csi-ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml
deleted file mode 100644
index 03478ff..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: Deployment
-apiVersion: apps/v1
-metadata:
-  name: csi-provisioner
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: csi-provisioner
-  template:
-    metadata:
-      labels:
-        app: csi-provisioner
-    spec:
-      serviceAccount: csi-ozone
-      containers:
-      - name: csi-provisioner
-        image: quay.io/k8scsi/csi-provisioner:v1.0.1
-        args:
-        - --csi-address=/var/lib/csi/csi.sock
-        volumeMounts:
-        - name: socket-dir
-          mountPath: /var/lib/csi/
-      - name: ozone-csi
-        image: '@docker.image@'
-        volumeMounts:
-        - name: socket-dir
-          mountPath: /var/lib/csi/
-        imagePullPolicy: Always
-        envFrom:
-        - configMapRef:
-            name: config
-        args:
-        - ozone
-        - csi
-      volumes:
-      - name: socket-dir
-        emptyDir: null
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml
deleted file mode 100644
index e657c50..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: storage.k8s.io/v1beta1
-kind: CSIDriver
-metadata:
-  name: org.apache.hadoop.ozone
-spec:
-  attachRequired: false
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml
deleted file mode 100644
index c6c1c6c..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
-  name: ozone
-provisioner: org.apache.hadoop.ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml
deleted file mode 100644
index 929e7a2..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: datanode
-spec:
-  ports:
-  - port: 9870
-    name: rpc
-  clusterIP: None
-  selector:
-    app: ozone
-    component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml
deleted file mode 100644
index a3aa528..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: datanode
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: datanode
-  serviceName: datanode
-  replicas: 3
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: datanode
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9882"
-        prometheus.io/path: /prom
-    spec:
-      affinity:
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-          - labelSelector:
-              matchExpressions:
-              - key: component
-                operator: In
-                values:
-                - datanode
-            topologyKey: kubernetes.io/hostname
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: datanode
-        image: '@docker.image@'
-        args:
-        - ozone
-        - datanode
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-  volumeClaimTemplates:
-  - metadata:
-      name: data
-    spec:
-      accessModes:
-      - ReadWriteOnce
-      resources:
-        requests:
-          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml
deleted file mode 100644
index 1662c4e..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: freon
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone
-      component: freon
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: freon
-    spec:
-      containers:
-      - name: freon
-        image: '@docker.image@'
-        args:
-        - ozone
-        - freon
-        - rk
-        - --factor=THREE
-        - --replicationType=RATIS
-        envFrom:
-        - configMapRef:
-            name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml
deleted file mode 100644
index 617277d..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: om
-spec:
-  ports:
-  - port: 9874
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml
deleted file mode 100644
index ad0b16e..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: om
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: om
-  serviceName: om
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: om
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9874"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      containers:
-      - name: om
-        image: '@docker.image@'
-        args:
-        - ozone
-        - om
-        env:
-        - name: WAITFOR
-          value: scm-0.scm:9876
-        - name: ENSURE_OM_INITIALIZED
-          value: /data/metadata/om/current/VERSION
-        livenessProbe:
-          tcpSocket:
-            port: 9862
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      volumes: []
-  volumeClaimTemplates:
-  - metadata:
-      name: data
-    spec:
-      accessModes:
-      - ReadWriteOnce
-      resources:
-        requests:
-          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml
deleted file mode 100644
index 04edcec..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: ozone-csi-test-webserver
-  labels:
-    app: ozone-csi-test-webserver
-  annotations: {}
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: ozone-csi-test-webserver
-  template:
-    metadata:
-      labels:
-        app: ozone-csi-test-webserver
-    spec:
-      containers:
-      - name: web
-        image: python:3.7.3-alpine3.8
-        args:
-        - python
-        - -m
-        - http.server
-        - --directory
-        - /www
-        volumeMounts:
-        - mountPath: /www
-          name: webroot
-      volumes:
-      - name: webroot
-        persistentVolumeClaim:
-          claimName: ozone-csi-test-webserver
-          readOnly: false
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml
deleted file mode 100644
index 4b1e44b..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: ozone
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml
deleted file mode 100644
index 6a53a43..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: ozone-csi-test-webserver
-  labels: {}
-  annotations: {}
-spec:
-  type: NodePort
-  ports:
-  - port: 8000
-    name: web
-  selector:
-    app: ozone-csi-test-webserver
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml
deleted file mode 100644
index dd1ca83..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: s3g
-spec:
-  ports:
-  - port: 9878
-    name: rest
-  clusterIP: None
-  selector:
-    app: ozone
-    component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml
deleted file mode 100644
index 6e96fb7..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: s3g
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: s3g
-  serviceName: s3g
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: s3g
-    spec:
-      containers:
-      - name: s3g
-        image: '@docker.image@'
-        args:
-        - ozone
-        - s3g
-        livenessProbe:
-          httpGet:
-            path: /
-            port: 9878
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-  volumeClaimTemplates:
-  - metadata:
-      name: data
-    spec:
-      accessModes:
-      - ReadWriteOnce
-      resources:
-        requests:
-          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml
deleted file mode 100644
index 0df15d6..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: scm
-spec:
-  ports:
-  - port: 9876
-    name: ui
-  clusterIP: None
-  selector:
-    app: ozone
-    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml
deleted file mode 100644
index d4d6513..0000000
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: scm
-  labels:
-    app.kubernetes.io/component: ozone
-spec:
-  selector:
-    matchLabels:
-      app: ozone
-      component: scm
-  serviceName: scm
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: ozone
-        component: scm
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9876"
-        prometheus.io/path: /prom
-    spec:
-      securityContext:
-        fsGroup: 1000
-      initContainers:
-      - name: init
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        - --init
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-      containers:
-      - name: scm
-        image: '@docker.image@'
-        args:
-        - ozone
-        - scm
-        livenessProbe:
-          tcpSocket:
-            port: 9861
-          initialDelaySeconds: 30
-        envFrom:
-        - configMapRef:
-            name: config
-        volumeMounts:
-        - name: data
-          mountPath: /data
-  volumeClaimTemplates:
-  - metadata:
-      name: data
-    spec:
-      accessModes:
-      - ReadWriteOnce
-      resources:
-        requests:
-          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
deleted file mode 100644
index 6e661af..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ /dev/null
@@ -1,443 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
---------------------------------------------------------------------------------
-This product bundles various third-party components under other open source
-licenses. This section summarizes those components and their licenses.
-See licenses/ for text of these licenses.
-
-EPL
-=====================
-
-   org.eclipse.jetty:jetty-http
-   org.eclipse.jetty:jetty-io
-   org.eclipse.jetty:jetty-security
-   org.eclipse.jetty:jetty-server
-   org.eclipse.jetty:jetty-servlet
-   org.eclipse.jetty:jetty-util
-   org.eclipse.jetty:jetty-util-ajax
-   org.eclipse.jetty:jetty-webapp
-   org.eclipse.jetty:jetty-xml
-
-
-BSD
-=====================
-
-   org.codehaus.woodstox:stax2-api
-
-
-GPL with classpath exception
-=====================
-
-   org.openjdk.jmh:jmh-core
-   org.openjdk.jmh:jmh-generator-annprocess
-
-
-CDDL
-=====================
-
-   com.sun.jersey:jersey-core
-   com.sun.jersey:jersey-json
-   com.sun.jersey:jersey-server
-   com.sun.jersey:jersey-servlet
-   com.sun.xml.bind:jaxb-core
-   com.sun.xml.bind:jaxb-impl
-   javax.activation:activation
-   javax.servlet.jsp:jsp-api
-   javax.ws.rs:javax.ws.rs-api
-   javax.ws.rs:jsr311-api
-   javax.xml.bind:jaxb-api
-   org.glassfish.hk2.external:aopalliance-repackaged
-   org.glassfish.hk2.external:jakarta.inject
-   org.glassfish.hk2.external:javax.inject
-   org.glassfish.hk2:guice-bridge
-   org.glassfish.hk2:hk2-api
-   org.glassfish.hk2:hk2-locator
-   org.glassfish.hk2:hk2-utils
-   org.glassfish.hk2:osgi-resource-locator
-   org.glassfish.jersey.containers:jersey-container-servlet
-   org.glassfish.jersey.containers:jersey-container-servlet-core
-   org.glassfish.jersey.core:jersey-client
-   org.glassfish.jersey.core:jersey-common
-   org.glassfish.jersey.core:jersey-server
-   org.glassfish.jersey.ext.cdi:jersey-cdi1x
-   org.glassfish.jersey.ext:jersey-entity-filtering
-   org.glassfish.jersey.inject:jersey-hk2
-   org.glassfish.jersey.media:jersey-media-jaxb
-   org.glassfish.jersey.media:jersey-media-json-jackson
-
-
-Apache License
-=====================
-
-   com.fasterxml.jackson.core:jackson-annotations
-   com.fasterxml.jackson.core:jackson-core
-   com.fasterxml.jackson.core:jackson-databind
-   com.fasterxml.jackson.dataformat:jackson-dataformat-xml
-   com.fasterxml.jackson.module:jackson-module-jaxb-annotations
-   com.fasterxml.woodstox:woodstox-core
-   com.github.stephenc.jcip:jcip-annotations
-   com.google.api.grpc:proto-google-common-protos
-   com.google.code.gson:gson
-   com.google.errorprone:error_prone_annotations
-   com.google.guava:guava
-   com.google.inject.extensions:guice-assistedinject
-   com.google.inject.extensions:guice-multibindings
-   com.google.inject.extensions:guice-servlet
-   com.google.inject:guice
-   com.jolbox:bonecp
-   com.lmax:disruptor
-   com.nimbusds:nimbus-jose-jwt
-   com.squareup.okhttp3:okhttp
-   com.squareup.okio:okio
-   info.picocli:picocli
-   io.dropwizard.metrics:metrics-core
-   io.grpc:grpc-context
-   io.grpc:grpc-core
-   io.grpc:grpc-netty
-   io.grpc:grpc-protobuf
-   io.grpc:grpc-protobuf-lite
-   io.grpc:grpc-stub
-   io.jaegertracing:jaeger-client
-   io.jaegertracing:jaeger-core
-   io.jaegertracing:jaeger-thrift
-   io.jaegertracing:jaeger-tracerresolver
-   io.netty:netty
-   io.netty:netty-all
-   io.netty:netty-buffer
-   io.netty:netty-codec
-   io.netty:netty-codec-http
-   io.netty:netty-codec-http2
-   io.netty:netty-codec-socks
-   io.netty:netty-common
-   io.netty:netty-handler
-   io.netty:netty-handler-proxy
-   io.netty:netty-resolver
-   io.netty:netty-transport
-   io.netty:netty-transport-native-epoll
-   io.netty:netty-transport-native-unix-common
-   io.opencensus:opencensus-api
-   io.opencensus:opencensus-contrib-grpc-metrics
-   io.opentracing.contrib:opentracing-tracerresolver
-   io.opentracing:opentracing-api
-   io.opentracing:opentracing-noop
-   io.opentracing:opentracing-util
-   javax.enterprise:cdi-api
-   javax.inject:javax.inject
-   javax.validation:validation-api
-   log4j:log4j
-   net.minidev:accessors-smart
-   net.minidev:json-smart
-   org.bouncycastle:bcpkix-jdk15on
-   org.bouncycastle:bcprov-jdk15on
-   org.codehaus.jackson:jackson-core-asl
-   org.codehaus.jackson:jackson-jaxrs
-   org.codehaus.jackson:jackson-mapper-asl
-   org.codehaus.jackson:jackson-xc
-   org.codehaus.jettison:jettison
-   org.hamcrest:hamcrest-all
-   org.javassist:javassist
-   org.jboss.weld.servlet:weld-servlet
-   org.jooq:jooq
-   org.jooq:jooq-codegen
-   org.jooq:jooq-meta
-   org.rocksdb:rocksdbjni
-   org.springframework:spring-beans
-   org.springframework:spring-core
-   org.springframework:spring-jcl
-   org.springframework:spring-jdbc
-   org.springframework:spring-tx
-   org.xerial.snappy:snappy-java
-   org.xerial:sqlite-jdbc
-   org.yaml:snakeyaml
-
-
-MIT
-=====================
-
-   net.sf.jopt-simple:jopt-simple
-   org.codehaus.mojo:animal-sniffer-annotations
-   org.slf4j:slf4j-api
-   org.slf4j:slf4j-log4j12
-
-
-EPL 2.0
-=====================
-
-   jakarta.annotation:jakarta.annotation-api
-
-
-CDDL + GPLv2 with classpath exception
-=====================
-
-   javax.annotation:javax.annotation-api
-   javax.el:javax.el-api
-   javax.interceptor:javax.interceptor-api
-   javax.servlet:javax.servlet-api
-
-
-Public Domain
-=====================
-
-   aopalliance:aopalliance
-   org.tukaani:xz
-
-
-BSD 3-Clause
-=====================
-
-   com.google.code.findbugs:jsr305
-   com.google.protobuf:protobuf-java
-   com.google.protobuf:protobuf-java-util
-   com.google.re2j:re2j
-   com.jcraft:jsch
-   com.thoughtworks.paranamer:paranamer
-   org.fusesource.leveldbjni:leveldbjni-all
-   org.ow2.asm:asm
-
-
-BSD 2-Clause
-=====================
-
-   dnsjava:dnsjava
-
-
---------------------------------------------------------------------------------
-hadoop-hdds-server-scm, hadoop-ozone-ozone-manager, hadoop-ozone-s3gateway and hadoop-hdds-server-framework
-contains the source of the following javascript/css components (See licenses/ for text of these licenses):
-
-Apache Software Foundation License 2.0
-=====================
-
-nvd3-1.8.5.min.js.map
-nvd3-1.8.5.min.css.map
-nvd3-1.8.5.min.js
-AbstractFuture.java
-TimeoutFuture.java
-
-
-BSD 3-Clause
-=====================
-
-d3-3.5.17.min.js
-d3-3.5.17.min.js
-glyphicons-*
-
-MIT License
-=====================
-
-bootstrap-3.4.1
-css/bootstrap-*
-bootstrap.min.js
-angular-route-1.6.4.min.js
-angular-nvd3-1.0.9.min.js
-angular-1.6.4.min.js
-jquery-3.4.1.min.js
-
---------------------------------------------------------------------------------
-recon server uses a huge number of javascript and css dependencies. See the
-licenses/LICENSE-ozone-recon.txt for the detailed list of the dependencies and licenses.
-
---------------------------------------------------------------------------------
-ratis-thirdparty-misc is a shaded dependency which includes additional 3rd party dependencies in shaded form.
-For the detailed list of the dependencies and the associated licenses see licenses/LICENSE-ratis-thirdparty-misc.txt.
diff --git a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt
deleted file mode 100644
index 674b74d..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt
+++ /dev/null
@@ -1,520 +0,0 @@
-Apache Hadoop
-Copyright 2006 and onwards The Apache Software Foundation.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Export Control Notice
----------------------
-
-This distribution includes cryptographic software.  The country in
-which you currently reside may have restrictions on the import,
-possession, use, and/or re-export to another country, of
-encryption software.  BEFORE using any encryption software, please
-check your country's laws, regulations and policies concerning the
-import, possession, or use, and re-export of encryption software, to
-see if this is permitted.  See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and
-Security (BIS), has classified this software as Export Commodity
-Control Number (ECCN) 5D002.C.1, which includes information security
-software using or performing cryptographic functions with asymmetric
-algorithms.  The form and manner of this Apache Software Foundation
-distribution makes it eligible for export under the License Exception
-ENC Technology Software Unrestricted (TSU) exception (see the BIS
-Export Administration Regulations, Section 740.13) for both object
-code and source code.
-
-This software uses the SSL libraries from the Jetty project written
-by mortbay.org abd BouncyCastle Java cryptography APIs written by the
- Legion of the Bouncy Castle Inc.
-
-**********************
-THIRD PARTY COMPONENTS
-**********************
-This software includes third party software subject to the following copyrights:
-
-io.netty:netty-all
-====================
-
-
-                            The Netty Project
-                            =================
-
-Please visit the Netty web site for more information:
-
-  * https://netty.io/
-
-Copyright 2014 The Netty Project
-
-The Netty Project licenses this file to you under the Apache License,
-version 2.0 (the "License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at:
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations
-under the License.
-
-Also, please refer to each LICENSE.<component>.txt file, which is located in
-the 'license' directory of the distribution file, for the license terms of the
-components that this product depends on.
-
--------------------------------------------------------------------------------
-This product contains the extensions to Java Collections Framework which has
-been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
-
-  * LICENSE:
-    * license/LICENSE.jsr166y.txt (Public Domain)
-  * HOMEPAGE:
-    * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
-    * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
-
-This product contains a modified version of Robert Harder's Public Domain
-Base64 Encoder and Decoder, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.base64.txt (Public Domain)
-  * HOMEPAGE:
-    * http://iharder.sourceforge.net/current/java/base64/
-
-This product contains a modified portion of 'Webbit', an event based
-WebSocket and HTTP server, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.webbit.txt (BSD License)
-  * HOMEPAGE:
-    * https://github.com/joewalnes/webbit
-
-This product contains a modified portion of 'SLF4J', a simple logging
-facade for Java, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.slf4j.txt (MIT License)
-  * HOMEPAGE:
-    * http://www.slf4j.org/
-
-This product contains a modified portion of 'Apache Harmony', an open source
-Java SE, which can be obtained at:
-
-  * NOTICE:
-    * license/NOTICE.harmony.txt
-  * LICENSE:
-    * license/LICENSE.harmony.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://archive.apache.org/dist/harmony/
-
-This product contains a modified portion of 'jbzip2', a Java bzip2 compression
-and decompression library written by Matthew J. Francis. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jbzip2.txt (MIT License)
-  * HOMEPAGE:
-    * https://code.google.com/p/jbzip2/
-
-This product contains a modified portion of 'libdivsufsort', a C API library to construct
-the suffix array and the Burrows-Wheeler transformed string for any input string of
-a constant-size alphabet written by Yuta Mori. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.libdivsufsort.txt (MIT License)
-  * HOMEPAGE:
-    * https://github.com/y-256/libdivsufsort
-
-This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
- which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jctools.txt (ASL2 License)
-  * HOMEPAGE:
-    * https://github.com/JCTools/JCTools
-
-This product optionally depends on 'JZlib', a re-implementation of zlib in
-pure Java, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jzlib.txt (BSD style License)
-  * HOMEPAGE:
-    * http://www.jcraft.com/jzlib/
-
-This product optionally depends on 'Compress-LZF', a Java library for encoding and
-decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.compress-lzf.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/ning/compress
-
-This product optionally depends on 'lz4', a LZ4 Java compression
-and decompression library written by Adrien Grand. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.lz4.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/jpountz/lz4-java
-
-This product optionally depends on 'lzma-java', a LZMA Java compression
-and decompression library, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.lzma-java.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/jponge/lzma-java
-
-This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
-and decompression library written by William Kinney. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jfastlz.txt (MIT License)
-  * HOMEPAGE:
-    * https://code.google.com/p/jfastlz/
-
-This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
-interchange format, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.protobuf.txt (New BSD License)
-  * HOMEPAGE:
-    * https://github.com/google/protobuf
-
-This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
-a temporary self-signed X.509 certificate when the JVM does not provide the
-equivalent functionality.  It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.bouncycastle.txt (MIT License)
-  * HOMEPAGE:
-    * http://www.bouncycastle.org/
-
-This product optionally depends on 'Snappy', a compression library produced
-by Google Inc, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.snappy.txt (New BSD License)
-  * HOMEPAGE:
-    * https://github.com/google/snappy
-
-This product optionally depends on 'JBoss Marshalling', an alternative Java
-serialization API, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jboss-marshalling.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/jboss-remoting/jboss-marshalling
-
-This product optionally depends on 'Caliper', Google's micro-
-benchmarking framework, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.caliper.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/google/caliper
-
-This product optionally depends on 'Apache Commons Logging', a logging
-framework, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.commons-logging.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://commons.apache.org/logging/
-
-This product optionally depends on 'Apache Log4J', a logging framework, which
-can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.log4j.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://logging.apache.org/log4j/
-
-This product optionally depends on 'Aalto XML', an ultra-high performance
-non-blocking XML processor, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.aalto-xml.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://wiki.fasterxml.com/AaltoHome
-
-This product contains a modified version of 'HPACK', a Java implementation of
-the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.hpack.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/twitter/hpack
-
-This product contains a modified version of 'HPACK', a Java implementation of
-the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.hyper-hpack.txt (MIT License)
-  * HOMEPAGE:
-    * https://github.com/python-hyper/hpack/
-
-This product contains a modified version of 'HPACK', a Java implementation of
-the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.nghttp2-hpack.txt (MIT License)
-  * HOMEPAGE:
-    * https://github.com/nghttp2/nghttp2/
-
-This product contains a modified portion of 'Apache Commons Lang', a Java library
-provides utilities for the java.lang API, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.commons-lang.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://commons.apache.org/proper/commons-lang/
-
-
-This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
-
-  * LICENSE:
-    * license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/takari/maven-wrapper
-
-
-io.grpc:grpc-core
-====================
-
-Copyright 2014 The gRPC Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
------------------------------------------------------------------------
-
-This product contains a modified portion of 'OkHttp', an open source
-HTTP & SPDY client for Android and Java applications, which can be obtained
-at:
-
-  * LICENSE:
-    * okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/square/okhttp
-  * LOCATION_IN_GRPC:
-    * okhttp/third_party/okhttp
-
-This product contains a modified portion of 'Envoy', an open source
-cloud-native high-performance edge/middle/service proxy, which can be
-obtained at:
-
-  * LICENSE:
-    * xds/third_party/envoy/LICENSE (Apache License 2.0)
-  * NOTICE:
-    * xds/third_party/envoy/NOTICE
-  * HOMEPAGE:
-    * https://www.envoyproxy.io
-  * LOCATION_IN_GRPC:
-    * xds/third_party/envoy
-
-This product contains a modified portion of 'gogoprotobuf',
-an open source Protocol Buffers support for Go with Gadgets,
-which can be obtained at:
-
-  * LICENSE:
-    * xds/third_party/gogoproto/LICENSE
-  * HOMEPAGE:
-    * https://github.com/gogo/protobuf
-  * LOCATION_IN_GRPC:
-    * xds/third_party/gogoproto
-
-This product contains a modified portion of 'protoc-gen-validate (PGV)',
-an open source protoc plugin to generate polyglot message validators,
-which can be obtained at:
-
-  * LICENSE:
-    * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/lyft/protoc-gen-validate
-  * LOCATION_IN_GRPC:
-    * xds/third_party/protoc-gen-validate
-
-This product contains a modified portion of 'udpa',
-an open source universal data plane API, which can be obtained at:
-
-  * LICENSE:
-    * xds/third_party/udpa/LICENSE (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/cncf/udpa
-  * LOCATION_IN_GRPC:
-    * xds/third_party/udpa
-
-
-com.fasterxml.jackson.dataformat:jackson-dataformat-xml
-====================
-
-# Jackson JSON processor
-
-Jackson is a high-performance, Free/Open Source JSON processing library.
-It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
-been in development since 2007.
-It is currently developed by a community of developers, as well as supported
-commercially by FasterXML.com.
-
-## Licensing
-
-Jackson core and extension components may licensed under different licenses.
-To find the details that apply to this artifact see the accompanying LICENSE file.
-For more information, including possible other licensing options, contact
-FasterXML.com (http://fasterxml.com).
-
-## Credits
-
-A list of contributors may be found from CREDITS file, which is included
-in some artifacts (usually source distributions); but is always available
-from the source code management (SCM) system project uses.
-
-
-com.jolbox:bonecp
-====================
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for the BoneCP (Java connection pool).                ==
-   =========================================================================
-
-   BoneCP
-   Copyright 2010 Wallace Wadge
-
-   This product includes software developed by
-   Wallace Wadge (http://jolbox.com/).
-
-org.codehaus.jackson:jackson-mapper-asl
-====================
-
-This product currently only contains code developed by authors
-of specific components, as identified by the source code files;
-if such notes are missing files have been created by
-Tatu Saloranta.
-
-For additional credits (generally to people who reported problems)
-see CREDITS file.
-
-
-com.google.inject:guice
-====================
-
-
-Google Guice - Core Library
-Copyright 2006-2015 Google, Inc.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-
-
-
-com.google.inject.extensions:guice-assistedinject
-====================
-
-
-Google Guice - Extensions - AssistedInject
-Copyright 2006-2015 Google, Inc.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-
-
-
-org.codehaus.jackson:jackson-xc
-====================
-
-This product currently only contains code developed by authors
-of specific components, as identified by the source code files;
-if such notes are missing files have been created by
-Tatu Saloranta.
-
-For additional credits (generally to people who reported problems)
-see CREDITS file.
-
-
-org.codehaus.jackson:jackson-jaxrs
-====================
-
-This product currently only contains code developed by authors
-of specific components, as identified by the source code files;
-if such notes are missing files have been created by
-Tatu Saloranta.
-
-For additional credits (generally to people who reported problems)
-see CREDITS file.
-
-
-com.google.inject.extensions:guice-servlet
-====================
-
-
-Google Guice - Extensions - Servlet
-Copyright 2006-2015 Google, Inc.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-
-
-
-org.codehaus.jackson:jackson-core-asl
-====================
-
-This product currently only contains code developed by authors
-of specific components, as identified by the source code files;
-if such notes are missing files have been created by
-Tatu Saloranta.
-
-For additional credits (generally to people who reported problems)
-see CREDITS file.
-
-
-org.bouncycastle:bcprov-jdk15on
-====================
-
-Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-Source: https://bouncycastle.org/license.html
-
-log4j:log4j
-====================
-
-Apache log4j
-Copyright 2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-com.google.inject.extensions:guice-multibindings
-====================
-
-
-Google Guice - Extensions - MultiBindings
-Copyright 2006-2015 Google, Inc.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-
-====================
-ratis-thirdparty-misc is a shaded dependency which includes additional 3rd party dependencies in shaded form.
-For the detailed list of the dependencies and the associated NOTICE file see licenses/NOTICE-ratis-thirdparty-misc.txt.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt
deleted file mode 100644
index d96c6fc..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-The MIT License (MIT)
-Copyright (c) 2014 Konstantin Skipor
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-and associated documentation files (the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
-LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
-OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt
deleted file mode 100644
index 6f3880f..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2010-2017 Google, Inc. http://angularjs.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt
deleted file mode 100644
index 8424760..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-The JSR-305 reference implementation (lib/jsr305.jar) is
-distributed under the terms of the New BSD license:
-
-  http://www.opensource.org/licenses/bsd-license.php
-
-See the JSR-305 home page for more information:
-
-  http://code.google.com/p/jsr-305/
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt
deleted file mode 100644
index b620ae6..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-This is a work derived from Russ Cox's RE2 in Go, whose license
-http://golang.org/LICENSE is as follows:
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
-
-   * Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in
-     the documentation and/or other materials provided with the
-     distribution.
-
-   * Neither the name of Google Inc. nor the names of its contributors
-     may be used to endorse or promote products derived from this
-     software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt
deleted file mode 100644
index edd491d..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-JSch 0.0.* was released under the GNU LGPL license.  Later, we have switched
-over to a BSD-style license.
-
-------------------------------------------------------------------------------
-Copyright (c) 2002-2015 Atsuhiko Yamanaka, JCraft,Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  1. Redistributions of source code must retain the above copyright notice,
-     this list of conditions and the following disclaimer.
-
-  2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in
-     the documentation and/or other materials provided with the distribution.
-
-  3. The names of the authors may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt
deleted file mode 100644
index c1eec74..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt
+++ /dev/null
@@ -1,274 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
-
-1. Definitions.
-
-     1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-     1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-     1.4. "Executable" means the Covered Software in any form other than Source Code.
-
-     1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License.
-
-     1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-     1.7. "License" means this document.
-
-     1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means the Source Code and Executable form of any of the following:
-
-     A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-     B. Any new file that contains any part of the Original Software or previous Modification; or
-
-     C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-     1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License.
-
-     1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-     1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-     1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-     2.1. The Initial Developer Grant.
-
-     Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-     (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-     (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-     (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-     (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-     2.2. Contributor Grant.
-
-     Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-     (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-     (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-     (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-     (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Availability of Source Code.
-
-     Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-     3.2. Modifications.
-
-     The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-     3.3. Required Notices.
-
-     You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-     3.4. Application of Additional Terms.
-
-     You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-     3.5. Distribution of Executable Versions.
-
-     You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-     3.6. Larger Works.
-
-     You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-     4.1. New Versions.
-
-     Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-     4.2. Effect of New Versions.
-
-     You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-     4.3. Modified Versions.
-
-     When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-     COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-     6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-     6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-     6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
-
-     6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-     The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-----------
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library.  Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.  An independent module is a module which is not derived from or based on this library.  If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.  If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt
deleted file mode 100644
index b1c74f9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. §
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt
deleted file mode 100644
index 9eab879..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-[ ParaNamer used to be 'Pubic Domain', but since it includes a small piece of ASM it is now the same license as that: BSD ]
-
- Portions copyright (c) 2006-2018 Paul Hammant & ThoughtWorks Inc
- Portions copyright (c) 2000-2007 INRIA, France Telecom
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt
deleted file mode 100644
index c71e3f2..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2010-2015, Michael Bostock
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* The name Michael Bostock may not be used to endorse or promote products
-  derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt
deleted file mode 100644
index 70bae6b9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 1998-2011, Brian Wellington.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
-  * Redistributions in binary form must reproduce the above copyright notice,
-    this list of conditions and the following disclaimer in the documentation
-    and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt
deleted file mode 100644
index e55f344..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt
+++ /dev/null
@@ -1,277 +0,0 @@
-Eclipse Public License - v 2.0
-
-    THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
-    PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
-    OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
-1. DEFINITIONS
-
-"Contribution" means:
-
-  a) in the case of the initial Contributor, the initial content
-     Distributed under this Agreement, and
-
-  b) in the case of each subsequent Contributor:
-     i) changes to the Program, and
-     ii) additions to the Program;
-  where such changes and/or additions to the Program originate from
-  and are Distributed by that particular Contributor. A Contribution
-  "originates" from a Contributor if it was added to the Program by
-  such Contributor itself or anyone acting on such Contributor's behalf.
-  Contributions do not include changes or additions to the Program that
-  are not Modified Works.
-
-"Contributor" means any person or entity that Distributes the Program.
-
-"Licensed Patents" mean patent claims licensable by a Contributor which
-are necessarily infringed by the use or sale of its Contribution alone
-or when combined with the Program.
-
-"Program" means the Contributions Distributed in accordance with this
-Agreement.
-
-"Recipient" means anyone who receives the Program under this Agreement
-or any Secondary License (as applicable), including Contributors.
-
-"Derivative Works" shall mean any work, whether in Source Code or other
-form, that is based on (or derived from) the Program and for which the
-editorial revisions, annotations, elaborations, or other modifications
-represent, as a whole, an original work of authorship.
-
-"Modified Works" shall mean any work in Source Code or other form that
-results from an addition to, deletion from, or modification of the
-contents of the Program, including, for purposes of clarity any new file
-in Source Code form that contains any contents of the Program. Modified
-Works shall not include works that contain only declarations,
-interfaces, types, classes, structures, or files of the Program solely
-in each case in order to link to, bind by name, or subclass the Program
-or Modified Works thereof.
-
-"Distribute" means the acts of a) distributing or b) making available
-in any manner that enables the transfer of a copy.
-
-"Source Code" means the form of a Program preferred for making
-modifications, including but not limited to software source code,
-documentation source, and configuration files.
-
-"Secondary License" means either the GNU General Public License,
-Version 2.0, or any later versions of that license, including any
-exceptions or additional permissions as identified by the initial
-Contributor.
-
-2. GRANT OF RIGHTS
-
-  a) Subject to the terms of this Agreement, each Contributor hereby
-  grants Recipient a non-exclusive, worldwide, royalty-free copyright
-  license to reproduce, prepare Derivative Works of, publicly display,
-  publicly perform, Distribute and sublicense the Contribution of such
-  Contributor, if any, and such Derivative Works.
-
-  b) Subject to the terms of this Agreement, each Contributor hereby
-  grants Recipient a non-exclusive, worldwide, royalty-free patent
-  license under Licensed Patents to make, use, sell, offer to sell,
-  import and otherwise transfer the Contribution of such Contributor,
-  if any, in Source Code or other form. This patent license shall
-  apply to the combination of the Contribution and the Program if, at
-  the time the Contribution is added by the Contributor, such addition
-  of the Contribution causes such combination to be covered by the
-  Licensed Patents. The patent license shall not apply to any other
-  combinations which include the Contribution. No hardware per se is
-  licensed hereunder.
-
-  c) Recipient understands that although each Contributor grants the
-  licenses to its Contributions set forth herein, no assurances are
-  provided by any Contributor that the Program does not infringe the
-  patent or other intellectual property rights of any other entity.
-  Each Contributor disclaims any liability to Recipient for claims
-  brought by any other entity based on infringement of intellectual
-  property rights or otherwise. As a condition to exercising the
-  rights and licenses granted hereunder, each Recipient hereby
-  assumes sole responsibility to secure any other intellectual
-  property rights needed, if any. For example, if a third party
-  patent license is required to allow Recipient to Distribute the
-  Program, it is Recipient's responsibility to acquire that license
-  before distributing the Program.
-
-  d) Each Contributor represents that to its knowledge it has
-  sufficient copyright rights in its Contribution, if any, to grant
-  the copyright license set forth in this Agreement.
-
-  e) Notwithstanding the terms of any Secondary License, no
-  Contributor makes additional grants to any Recipient (other than
-  those set forth in this Agreement) as a result of such Recipient's
-  receipt of the Program under the terms of a Secondary License
-  (if permitted under the terms of Section 3).
-
-3. REQUIREMENTS
-
-3.1 If a Contributor Distributes the Program in any form, then:
-
-  a) the Program must also be made available as Source Code, in
-  accordance with section 3.2, and the Contributor must accompany
-  the Program with a statement that the Source Code for the Program
-  is available under this Agreement, and informs Recipients how to
-  obtain it in a reasonable manner on or through a medium customarily
-  used for software exchange; and
-
-  b) the Contributor may Distribute the Program under a license
-  different than this Agreement, provided that such license:
-     i) effectively disclaims on behalf of all other Contributors all
-     warranties and conditions, express and implied, including
-     warranties or conditions of title and non-infringement, and
-     implied warranties or conditions of merchantability and fitness
-     for a particular purpose;
-
-     ii) effectively excludes on behalf of all other Contributors all
-     liability for damages, including direct, indirect, special,
-     incidental and consequential damages, such as lost profits;
-
-     iii) does not attempt to limit or alter the recipients' rights
-     in the Source Code under section 3.2; and
-
-     iv) requires any subsequent distribution of the Program by any
-     party to be under a license that satisfies the requirements
-     of this section 3.
-
-3.2 When the Program is Distributed as Source Code:
-
-  a) it must be made available under this Agreement, or if the
-  Program (i) is combined with other material in a separate file or
-  files made available under a Secondary License, and (ii) the initial
-  Contributor attached to the Source Code the notice described in
-  Exhibit A of this Agreement, then the Program may be made available
-  under the terms of such Secondary Licenses, and
-
-  b) a copy of this Agreement must be included with each copy of
-  the Program.
-
-3.3 Contributors may not remove or alter any copyright, patent,
-trademark, attribution notices, disclaimers of warranty, or limitations
-of liability ("notices") contained within the Program from any copy of
-the Program which they Distribute, provided that Contributors may add
-their own appropriate notices.
-
-4. COMMERCIAL DISTRIBUTION
-
-Commercial distributors of software may accept certain responsibilities
-with respect to end users, business partners and the like. While this
-license is intended to facilitate the commercial use of the Program,
-the Contributor who includes the Program in a commercial product
-offering should do so in a manner which does not create potential
-liability for other Contributors. Therefore, if a Contributor includes
-the Program in a commercial product offering, such Contributor
-("Commercial Contributor") hereby agrees to defend and indemnify every
-other Contributor ("Indemnified Contributor") against any losses,
-damages and costs (collectively "Losses") arising from claims, lawsuits
-and other legal actions brought by a third party against the Indemnified
-Contributor to the extent caused by the acts or omissions of such
-Commercial Contributor in connection with its distribution of the Program
-in a commercial product offering. The obligations in this section do not
-apply to any claims or Losses relating to any actual or alleged
-intellectual property infringement. In order to qualify, an Indemnified
-Contributor must: a) promptly notify the Commercial Contributor in
-writing of such claim, and b) allow the Commercial Contributor to control,
-and cooperate with the Commercial Contributor in, the defense and any
-related settlement negotiations. The Indemnified Contributor may
-participate in any such claim at its own expense.
-
-For example, a Contributor might include the Program in a commercial
-product offering, Product X. That Contributor is then a Commercial
-Contributor. If that Commercial Contributor then makes performance
-claims, or offers warranties related to Product X, those performance
-claims and warranties are such Commercial Contributor's responsibility
-alone. Under this section, the Commercial Contributor would have to
-defend claims against the other Contributors related to those performance
-claims and warranties, and if a court requires any other Contributor to
-pay any damages as a result, the Commercial Contributor must pay
-those damages.
-
-5. NO WARRANTY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
-PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS"
-BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
-IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF
-TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
-PURPOSE. Each Recipient is solely responsible for determining the
-appropriateness of using and distributing the Program and assumes all
-risks associated with its exercise of rights under this Agreement,
-including but not limited to the risks and costs of program errors,
-compliance with applicable laws, damage to or loss of data, programs
-or equipment, and unavailability or interruption of operations.
-
-6. DISCLAIMER OF LIABILITY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
-PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS
-SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
-PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
-EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-7. GENERAL
-
-If any provision of this Agreement is invalid or unenforceable under
-applicable law, it shall not affect the validity or enforceability of
-the remainder of the terms of this Agreement, and without further
-action by the parties hereto, such provision shall be reformed to the
-minimum extent necessary to make such provision valid and enforceable.
-
-If Recipient institutes patent litigation against any entity
-(including a cross-claim or counterclaim in a lawsuit) alleging that the
-Program itself (excluding combinations of the Program with other software
-or hardware) infringes such Recipient's patent(s), then such Recipient's
-rights granted under Section 2(b) shall terminate as of the date such
-litigation is filed.
-
-All Recipient's rights under this Agreement shall terminate if it
-fails to comply with any of the material terms or conditions of this
-Agreement and does not cure such failure in a reasonable period of
-time after becoming aware of such noncompliance. If all Recipient's
-rights under this Agreement terminate, Recipient agrees to cease use
-and distribution of the Program as soon as reasonably practicable.
-However, Recipient's obligations under this Agreement and any licenses
-granted by Recipient relating to the Program shall continue and survive.
-
-Everyone is permitted to copy and distribute copies of this Agreement,
-but in order to avoid inconsistency the Agreement is copyrighted and
-may only be modified in the following manner. The Agreement Steward
-reserves the right to publish new versions (including revisions) of
-this Agreement from time to time. No one other than the Agreement
-Steward has the right to modify this Agreement. The Eclipse Foundation
-is the initial Agreement Steward. The Eclipse Foundation may assign the
-responsibility to serve as the Agreement Steward to a suitable separate
-entity. Each new version of the Agreement will be given a distinguishing
-version number. The Program (including Contributions) may always be
-Distributed subject to the version of the Agreement under which it was
-received. In addition, after a new version of the Agreement is published,
-Contributor may elect to Distribute the Program (including its
-Contributions) under the new version.
-
-Except as expressly stated in Sections 2(a) and 2(b) above, Recipient
-receives no rights or licenses to the intellectual property of any
-Contributor under this Agreement, whether expressly, by implication,
-estoppel or otherwise. All rights in the Program not expressly granted
-under this Agreement are reserved. Nothing in this Agreement is intended
-to be enforceable by any entity that is not a Contributor or Recipient.
-No third-party beneficiary rights are created under this Agreement.
-
-Exhibit A - Form of Secondary Licenses Notice
-
-"This Source Code may also be made available under the following
-Secondary Licenses when the conditions for such availability set forth
-in the Eclipse Public License, v. 2.0 are satisfied: {name license(s),
-version(s), and exceptions or additional permissions here}."
-
-  Simply including a copy of this Agreement, including this Exhibit A
-  is not sufficient to license the Source Code under Secondary Licenses.
-
-  If it is not possible or desirable to put the notice in a particular
-  file, then You may include the notice in a location (such as a LICENSE
-  file in a relevant directory) where a recipient would be likely to
-  look for such a notice.
-
-  You may add additional accurate notices of copyright ownership.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt
deleted file mode 100644
index 9be5078..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt
+++ /dev/null
@@ -1,134 +0,0 @@
-
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-1. Definitions.
-
-1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications.
-
-1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-1.4. "Executable" means the Covered Software in any form other than Source Code.
-
-1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License.
-
-1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-1.7. "License" means this document.
-
-1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-1.9. "Modifications" means the Source Code and Executable form of any of the following:
-
-A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-B. Any new file that contains any part of the Original Software or previous Modification; or
-
-C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License.
-
-1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-2.1. The Initial Developer Grant.
-
-Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-2.2. Contributor Grant.
-
-Conditioned upon Your compliance with Section 3.1 below and
-subject to third party intellectual property claims, each
-Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-3.1. Availability of Source Code.
-
-Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-3.2. Modifications.
-
-The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-3.3. Required Notices.
-
-You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-3.4. Application of Additional Terms.
-
-You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-3.5. Distribution of Executable Versions.
-
-You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-3.6. Larger Works.
-
-You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-4.1. New Versions.
-
-Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-4.2. Effect of New Versions.
-
-You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-4.3. Modified Versions.
-
-When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt
deleted file mode 100644
index a0ccc93..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt
+++ /dev/null
@@ -1,263 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-1. Definitions.
-
-   1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications.
-
-   1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-   1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-   1.4. Executable. means the Covered Software in any form other than Source Code.
-
-   1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License.
-
-   1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-   1.7. License. means this document.
-
-   1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-   1.9. Modifications. means the Source Code and Executable form of any of the following:
-
-        A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-        B. Any new file that contains any part of the Original Software or previous Modification; or
-
-        C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-   1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License.
-
-   1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-   1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-   1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-      2.1. The Initial Developer Grant.
-
-      Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-         (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-         (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-        (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-        (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-        (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-        (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-        (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-        (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-      3.1. Availability of Source Code.
-      Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-      3.2. Modifications.
-      The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-      3.3. Required Notices.
-      You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-      3.4. Application of Additional Terms.
-      You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-      3.5. Distribution of Executable Versions.
-      You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-      3.6. Larger Works.
-      You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-      4.1. New Versions.
-      Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-      4.2. Effect of New Versions.
-      You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-      4.3. Modified Versions.
-      When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-   COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-      6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-      6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-      6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-   The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-   This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-   As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-   NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-
-   The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words
-
-"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt
deleted file mode 100644
index a0ccc93..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt
+++ /dev/null
@@ -1,263 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-1. Definitions.
-
-   1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications.
-
-   1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-   1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-   1.4. Executable. means the Covered Software in any form other than Source Code.
-
-   1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License.
-
-   1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-   1.7. License. means this document.
-
-   1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-   1.9. Modifications. means the Source Code and Executable form of any of the following:
-
-        A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-        B. Any new file that contains any part of the Original Software or previous Modification; or
-
-        C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-   1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License.
-
-   1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-   1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-   1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-      2.1. The Initial Developer Grant.
-
-      Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-         (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-         (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-        (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-        (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-        (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-        (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-        (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-        (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-      3.1. Availability of Source Code.
-      Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-      3.2. Modifications.
-      The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-      3.3. Required Notices.
-      You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-      3.4. Application of Additional Terms.
-      You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-      3.5. Distribution of Executable Versions.
-      You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-      3.6. Larger Works.
-      You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-      4.1. New Versions.
-      Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-      4.2. Effect of New Versions.
-      You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-      4.3. Modified Versions.
-      When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-   COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-      6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-      6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-      6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-   The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-   This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-   As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-   NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-
-   The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words
-
-"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt
deleted file mode 100644
index a0ccc93..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt
+++ /dev/null
@@ -1,263 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-1. Definitions.
-
-   1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications.
-
-   1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-   1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-   1.4. Executable. means the Covered Software in any form other than Source Code.
-
-   1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License.
-
-   1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-   1.7. License. means this document.
-
-   1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-   1.9. Modifications. means the Source Code and Executable form of any of the following:
-
-        A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-        B. Any new file that contains any part of the Original Software or previous Modification; or
-
-        C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-   1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License.
-
-   1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-   1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-   1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-      2.1. The Initial Developer Grant.
-
-      Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-         (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-         (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-        (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-        (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-        (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-        (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-        (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-        (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-      3.1. Availability of Source Code.
-      Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-      3.2. Modifications.
-      The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-      3.3. Required Notices.
-      You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-      3.4. Application of Additional Terms.
-      You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-      3.5. Distribution of Executable Versions.
-      You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-      3.6. Larger Works.
-      You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-      4.1. New Versions.
-      Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-      4.2. Effect of New Versions.
-      You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-      4.3. Modified Versions.
-      When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-   COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-      6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-      6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-      6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-   The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-   This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-   As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-   NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-
-   The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words
-
-"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt
deleted file mode 100644
index a0ccc93..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt
+++ /dev/null
@@ -1,263 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
-
-1. Definitions.
-
-   1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications.
-
-   1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-   1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-   1.4. Executable. means the Covered Software in any form other than Source Code.
-
-   1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License.
-
-   1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-   1.7. License. means this document.
-
-   1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-   1.9. Modifications. means the Source Code and Executable form of any of the following:
-
-        A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-        B. Any new file that contains any part of the Original Software or previous Modification; or
-
-        C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-   1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License.
-
-   1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-   1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-   1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-      2.1. The Initial Developer Grant.
-
-      Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-         (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-         (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-        (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-        (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-        (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-        (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-        (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-        (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-      3.1. Availability of Source Code.
-      Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-      3.2. Modifications.
-      The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-      3.3. Required Notices.
-      You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-      3.4. Application of Additional Terms.
-      You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-      3.5. Distribution of Executable Versions.
-      You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-      3.6. Larger Works.
-      You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-      4.1. New Versions.
-      Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-      4.2. Effect of New Versions.
-      You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-      4.3. Modified Versions.
-      When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-   COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-      6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-      6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-      6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-   The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-   This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-   As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-   NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-
-   The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words
-
-"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt
deleted file mode 100644
index b1c74f9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. §
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt
deleted file mode 100644
index b1c74f9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. §
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt
deleted file mode 100644
index b1c74f9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. §
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt
deleted file mode 100644
index 833a843..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt
+++ /dev/null
@@ -1,274 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
-
-1. Definitions.
-
-     1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
-     1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
-     1.4. "Executable" means the Covered Software in any form other than Source Code.
-
-     1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License.
-
-     1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
-     1.7. "License" means this document.
-
-     1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means the Source Code and Executable form of any of the following:
-
-     A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
-     B. Any new file that contains any part of the Original Software or previous Modification; or
-
-     C. Any new file that is contributed or otherwise made available under the terms of this License.
-
-     1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License.
-
-     1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
-     1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
-     1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
-     2.1. The Initial Developer Grant.
-
-     Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-     (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
-     (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
-     (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
-     (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
-     2.2. Contributor Grant.
-
-     Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-     (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
-     (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
-     (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
-     (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Availability of Source Code.
-
-     Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
-     3.2. Modifications.
-
-     The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
-     3.3. Required Notices.
-
-     You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
-     3.4. Application of Additional Terms.
-
-     You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
-     3.5. Distribution of Executable Versions.
-
-     You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
-     3.6. Larger Works.
-
-     You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
-     4.1. New Versions.
-
-     Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
-     4.2. Effect of New Versions.
-
-     You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
-     4.3. Modified Versions.
-
-     When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
-     COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-     6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
-     6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
-     6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
-
-     6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-     The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-----------
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
-   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
-   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
-   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
-   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
-   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
-   One line to give the program's name and a brief idea of what it does.
-
-   Copyright (C)
-
-   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
-   Gnomovision version 69, Copyright (C) year name of author
-   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
-   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-   signature of Ty Coon, 1 April 1989
-   Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library.  Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.  An independent module is a module which is not derived from or based on this library.  If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.  If you do not wish to do so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt
deleted file mode 100644
index 80babca..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. ß
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt
deleted file mode 100644
index 6acfaf4..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt
+++ /dev/null
@@ -1,415 +0,0 @@
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License 1.0 which is available at
-https://www.eclipse.org/org/documents/epl-1.0/EPL-1.0.txt
-or the Apache Software License 2.0 which is available at
-https://www.apache.org/licenses/LICENSE-2.0
-
-
-
-Eclipse Public License - v 1.0
-
-THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
-LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
-CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
-1. DEFINITIONS
-
-"Contribution" means:
-
-a) in the case of the initial Contributor, the initial code and documentation
-   distributed under this Agreement, and
-b) in the case of each subsequent Contributor:
-    i) changes to the Program, and
-   ii) additions to the Program;
-
-   where such changes and/or additions to the Program originate from and are
-   distributed by that particular Contributor. A Contribution 'originates'
-   from a Contributor if it was added to the Program by such Contributor
-   itself or anyone acting on such Contributor's behalf. Contributions do not
-   include additions to the Program which: (i) are separate modules of
-   software distributed in conjunction with the Program under their own
-   license agreement, and (ii) are not derivative works of the Program.
-
-"Contributor" means any person or entity that distributes the Program.
-
-"Licensed Patents" mean patent claims licensable by a Contributor which are
-necessarily infringed by the use or sale of its Contribution alone or when
-combined with the Program.
-
-"Program" means the Contributions distributed in accordance with this
-Agreement.
-
-"Recipient" means anyone who receives the Program under this Agreement,
-including all Contributors.
-
-2. GRANT OF RIGHTS
-  a) Subject to the terms of this Agreement, each Contributor hereby grants
-     Recipient a non-exclusive, worldwide, royalty-free copyright license to
-     reproduce, prepare derivative works of, publicly display, publicly
-     perform, distribute and sublicense the Contribution of such Contributor,
-     if any, and such derivative works, in source code and object code form.
-  b) Subject to the terms of this Agreement, each Contributor hereby grants
-     Recipient a non-exclusive, worldwide, royalty-free patent license under
-     Licensed Patents to make, use, sell, offer to sell, import and otherwise
-     transfer the Contribution of such Contributor, if any, in source code and
-     object code form. This patent license shall apply to the combination of
-     the Contribution and the Program if, at the time the Contribution is
-     added by the Contributor, such addition of the Contribution causes such
-     combination to be covered by the Licensed Patents. The patent license
-     shall not apply to any other combinations which include the Contribution.
-     No hardware per se is licensed hereunder.
-  c) Recipient understands that although each Contributor grants the licenses
-     to its Contributions set forth herein, no assurances are provided by any
-     Contributor that the Program does not infringe the patent or other
-     intellectual property rights of any other entity. Each Contributor
-     disclaims any liability to Recipient for claims brought by any other
-     entity based on infringement of intellectual property rights or
-     otherwise. As a condition to exercising the rights and licenses granted
-     hereunder, each Recipient hereby assumes sole responsibility to secure
-     any other intellectual property rights needed, if any. For example, if a
-     third party patent license is required to allow Recipient to distribute
-     the Program, it is Recipient's responsibility to acquire that license
-     before distributing the Program.
-  d) Each Contributor represents that to its knowledge it has sufficient
-     copyright rights in its Contribution, if any, to grant the copyright
-     license set forth in this Agreement.
-
-3. REQUIREMENTS
-
-A Contributor may choose to distribute the Program in object code form under
-its own license agreement, provided that:
-
-  a) it complies with the terms and conditions of this Agreement; and
-  b) its license agreement:
-      i) effectively disclaims on behalf of all Contributors all warranties
-         and conditions, express and implied, including warranties or
-         conditions of title and non-infringement, and implied warranties or
-         conditions of merchantability and fitness for a particular purpose;
-     ii) effectively excludes on behalf of all Contributors all liability for
-         damages, including direct, indirect, special, incidental and
-         consequential damages, such as lost profits;
-    iii) states that any provisions which differ from this Agreement are
-         offered by that Contributor alone and not by any other party; and
-     iv) states that source code for the Program is available from such
-         Contributor, and informs licensees how to obtain it in a reasonable
-         manner on or through a medium customarily used for software exchange.
-
-When the Program is made available in source code form:
-
-  a) it must be made available under this Agreement; and
-  b) a copy of this Agreement must be included with each copy of the Program.
-     Contributors may not remove or alter any copyright notices contained
-     within the Program.
-
-Each Contributor must identify itself as the originator of its Contribution,
-if
-any, in a manner that reasonably allows subsequent Recipients to identify the
-originator of the Contribution.
-
-4. COMMERCIAL DISTRIBUTION
-
-Commercial distributors of software may accept certain responsibilities with
-respect to end users, business partners and the like. While this license is
-intended to facilitate the commercial use of the Program, the Contributor who
-includes the Program in a commercial product offering should do so in a manner
-which does not create potential liability for other Contributors. Therefore,
-if a Contributor includes the Program in a commercial product offering, such
-Contributor ("Commercial Contributor") hereby agrees to defend and indemnify
-every other Contributor ("Indemnified Contributor") against any losses,
-damages and costs (collectively "Losses") arising from claims, lawsuits and
-other legal actions brought by a third party against the Indemnified
-Contributor to the extent caused by the acts or omissions of such Commercial
-Contributor in connection with its distribution of the Program in a commercial
-product offering. The obligations in this section do not apply to any claims
-or Losses relating to any actual or alleged intellectual property
-infringement. In order to qualify, an Indemnified Contributor must:
-a) promptly notify the Commercial Contributor in writing of such claim, and
-b) allow the Commercial Contributor to control, and cooperate with the
-Commercial Contributor in, the defense and any related settlement
-negotiations. The Indemnified Contributor may participate in any such claim at
-its own expense.
-
-For example, a Contributor might include the Program in a commercial product
-offering, Product X. That Contributor is then a Commercial Contributor. If
-that Commercial Contributor then makes performance claims, or offers
-warranties related to Product X, those performance claims and warranties are
-such Commercial Contributor's responsibility alone. Under this section, the
-Commercial Contributor would have to defend claims against the other
-Contributors related to those performance claims and warranties, and if a
-court requires any other Contributor to pay any damages as a result, the
-Commercial Contributor must pay those damages.
-
-5. NO WARRANTY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
-IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each
-Recipient is solely responsible for determining the appropriateness of using
-and distributing the Program and assumes all risks associated with its
-exercise of rights under this Agreement , including but not limited to the
-risks and costs of program errors, compliance with applicable laws, damage to
-or loss of data, programs or equipment, and unavailability or interruption of
-operations.
-
-6. DISCLAIMER OF LIABILITY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
-CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION
-LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
-EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY
-OF SUCH DAMAGES.
-
-7. GENERAL
-
-If any provision of this Agreement is invalid or unenforceable under
-applicable law, it shall not affect the validity or enforceability of the
-remainder of the terms of this Agreement, and without further action by the
-parties hereto, such provision shall be reformed to the minimum extent
-necessary to make such provision valid and enforceable.
-
-If Recipient institutes patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Program itself
-(excluding combinations of the Program with other software or hardware)
-infringes such Recipient's patent(s), then such Recipient's rights granted
-under Section 2(b) shall terminate as of the date such litigation is filed.
-
-All Recipient's rights under this Agreement shall terminate if it fails to
-comply with any of the material terms or conditions of this Agreement and does
-not cure such failure in a reasonable period of time after becoming aware of
-such noncompliance. If all Recipient's rights under this Agreement terminate,
-Recipient agrees to cease use and distribution of the Program as soon as
-reasonably practicable. However, Recipient's obligations under this Agreement
-and any licenses granted by Recipient relating to the Program shall continue
-and survive.
-
-Everyone is permitted to copy and distribute copies of this Agreement, but in
-order to avoid inconsistency the Agreement is copyrighted and may only be
-modified in the following manner. The Agreement Steward reserves the right to
-publish new versions (including revisions) of this Agreement from time to
-time. No one other than the Agreement Steward has the right to modify this
-Agreement. The Eclipse Foundation is the initial Agreement Steward. The
-Eclipse Foundation may assign the responsibility to serve as the Agreement
-Steward to a suitable separate entity. Each new version of the Agreement will
-be given a distinguishing version number. The Program (including
-Contributions) may always be distributed subject to the version of the
-Agreement under which it was received. In addition, after a new version of the
-Agreement is published, Contributor may elect to distribute the Program
-(including its Contributions) under the new version. Except as expressly
-stated in Sections 2(a) and 2(b) above, Recipient receives no rights or
-licenses to the intellectual property of any Contributor under this Agreement,
-whether expressly, by implication, estoppel or otherwise. All rights in the
-Program not expressly granted under this Agreement are reserved.
-
-This Agreement is governed by the laws of the State of New York and the
-intellectual property laws of the United States of America. No party to this
-Agreement will bring a legal action under this Agreement more than one year
-after the cause of action arose. Each party waives its rights to a jury trial in
-any resulting litigation.
-
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt
deleted file mode 100644
index 4593054..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright JS Foundation and other contributors, https://js.foundation/
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt
deleted file mode 100644
index 54b2732..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- The MIT License
-
- Copyright (c) 2004-2016 Paul R. Holser, Jr.
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt
deleted file mode 100644
index 0955544..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Copyright (c) 2011-2014 Novus Partners, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
-file except in compliance with the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software distributed under the
- License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- express or implied. See the License for the specific language governing permissions and
-  limitations under the License.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt
deleted file mode 100644
index f88186c..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-  The MIT License
-
-  Copyright (c) 2009 codehaus.org.
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt
deleted file mode 100644
index 9b53393..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-This copy of Stax2 API is licensed under the
-Simplified BSD License (also known as "2-clause BSD", or "FreeBSD License")
-See the License for details about distribution rights, and the
-specific rights regarding derivate works.
-
-You may obtain a copy of the License at:
-
-http://www.opensource.org/licenses/bsd-license.php
-
-with details of:
-
-<COPYRIGHT HOLDER> = FasterXML.com
-<YEAR> = 2010-
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt
deleted file mode 100644
index 8edd375..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2011 FuseSource Corp. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of FuseSource Corp. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt
deleted file mode 100644
index b1c74f9..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt
+++ /dev/null
@@ -1,759 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
-
-1. Definitions.
-
-    1.1. "Contributor" means each individual or entity that creates or
-    contributes to the creation of Modifications.
-
-    1.2. "Contributor Version" means the combination of the Original
-    Software, prior Modifications used by a Contributor (if any), and
-    the Modifications made by that particular Contributor.
-
-    1.3. "Covered Software" means (a) the Original Software, or (b)
-    Modifications, or (c) the combination of files containing Original
-    Software with files containing Modifications, in each case including
-    portions thereof.
-
-    1.4. "Executable" means the Covered Software in any form other than
-    Source Code.
-
-    1.5. "Initial Developer" means the individual or entity that first
-    makes Original Software available under this License.
-
-    1.6. "Larger Work" means a work which combines Covered Software or
-    portions thereof with code not governed by the terms of this License.
-
-    1.7. "License" means this document.
-
-    1.8. "Licensable" means having the right to grant, to the maximum
-    extent possible, whether at the time of the initial grant or
-    subsequently acquired, any and all of the rights conveyed herein.
-
-    1.9. "Modifications" means the Source Code and Executable form of
-    any of the following:
-
-    A. Any file that results from an addition to, deletion from or
-    modification of the contents of a file containing Original Software
-    or previous Modifications;
-
-    B. Any new file that contains any part of the Original Software or
-    previous Modification; or
-
-    C. Any new file that is contributed or otherwise made available
-    under the terms of this License.
-
-    1.10. "Original Software" means the Source Code and Executable form
-    of computer software code that is originally released under this
-    License.
-
-    1.11. "Patent Claims" means any patent claim(s), now owned or
-    hereafter acquired, including without limitation, method, process,
-    and apparatus claims, in any patent Licensable by grantor.
-
-    1.12. "Source Code" means (a) the common form of computer software
-    code in which modifications are made and (b) associated
-    documentation included in or with such code.
-
-    1.13. "You" (or "Your") means an individual or a legal entity
-    exercising rights under, and complying with all of the terms of,
-    this License. For legal entities, "You" includes any entity which
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants.
-
-    2.1. The Initial Developer Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, the Initial Developer
-    hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Initial Developer, to use, reproduce,
-    modify, display, perform, sublicense and distribute the Original
-    Software (or portions thereof), with or without Modifications,
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using or selling of
-    Original Software, to make, have made, use, practice, sell, and
-    offer for sale, and/or otherwise dispose of the Original Software
-    (or portions thereof).
-
-    (c) The licenses granted in Sections 2.1(a) and (b) are effective on
-    the date Initial Developer first distributes or otherwise makes the
-    Original Software available to a third party under the terms of this
-    License.
-
-    (d) Notwithstanding Section 2.1(b) above, no patent license is
-    granted: (1) for code that You delete from the Original Software, or
-    (2) for infringements caused by: (i) the modification of the
-    Original Software, or (ii) the combination of the Original Software
-    with other software or devices.
-
-    2.2. Contributor Grant.
-
-    Conditioned upon Your compliance with Section 3.1 below and subject
-    to third party intellectual property claims, each Contributor hereby
-    grants You a world-wide, royalty-free, non-exclusive license:
-
-    (a) under intellectual property rights (other than patent or
-    trademark) Licensable by Contributor to use, reproduce, modify,
-    display, perform, sublicense and distribute the Modifications
-    created by such Contributor (or portions thereof), either on an
-    unmodified basis, with other Modifications, as Covered Software
-    and/or as part of a Larger Work; and
-
-    (b) under Patent Claims infringed by the making, using, or selling
-    of Modifications made by that Contributor either alone and/or in
-    combination with its Contributor Version (or portions of such
-    combination), to make, use, sell, offer for sale, have made, and/or
-    otherwise dispose of: (1) Modifications made by that Contributor (or
-    portions thereof); and (2) the combination of Modifications made by
-    that Contributor with its Contributor Version (or portions of such
-    combination).
-
-    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
-    on the date Contributor first distributes or otherwise makes the
-    Modifications available to a third party.
-
-    (d) Notwithstanding Section 2.2(b) above, no patent license is
-    granted: (1) for any code that Contributor has deleted from the
-    Contributor Version; (2) for infringements caused by: (i) third
-    party modifications of Contributor Version, or (ii) the combination
-    of Modifications made by that Contributor with other software
-    (except as part of the Contributor Version) or other devices; or (3)
-    under Patent Claims infringed by Covered Software in the absence of
-    Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
-    3.1. Availability of Source Code.
-
-    Any Covered Software that You distribute or otherwise make available
-    in Executable form must also be made available in Source Code form
-    and that Source Code form must be distributed only under the terms
-    of this License. You must include a copy of this License with every
-    copy of the Source Code form of the Covered Software You distribute
-    or otherwise make available. You must inform recipients of any such
-    Covered Software in Executable form as to how they can obtain such
-    Covered Software in Source Code form in a reasonable manner on or
-    through a medium customarily used for software exchange.
-
-    3.2. Modifications.
-
-    The Modifications that You create or to which You contribute are
-    governed by the terms of this License. You represent that You
-    believe Your Modifications are Your original creation(s) and/or You
-    have sufficient rights to grant the rights conveyed by this License.
-
-    3.3. Required Notices.
-
-    You must include a notice in each of Your Modifications that
-    identifies You as the Contributor of the Modification. You may not
-    remove or alter any copyright, patent or trademark notices contained
-    within the Covered Software, or any notices of licensing or any
-    descriptive text giving attribution to any Contributor or the
-    Initial Developer.
-
-    3.4. Application of Additional Terms.
-
-    You may not offer or impose any terms on any Covered Software in
-    Source Code form that alters or restricts the applicable version of
-    this License or the recipients' rights hereunder. You may choose to
-    offer, and to charge a fee for, warranty, support, indemnity or
-    liability obligations to one or more recipients of Covered Software.
-    However, you may do so only on Your own behalf, and not on behalf of
-    the Initial Developer or any Contributor. You must make it
-    absolutely clear that any such warranty, support, indemnity or
-    liability obligation is offered by You alone, and You hereby agree
-    to indemnify the Initial Developer and every Contributor for any
-    liability incurred by the Initial Developer or such Contributor as a
-    result of warranty, support, indemnity or liability terms You offer.
-
-    3.5. Distribution of Executable Versions.
-
-    You may distribute the Executable form of the Covered Software under
-    the terms of this License or under the terms of a license of Your
-    choice, which may contain terms different from this License,
-    provided that You are in compliance with the terms of this License
-    and that the license for the Executable form does not attempt to
-    limit or alter the recipient's rights in the Source Code form from
-    the rights set forth in this License. If You distribute the Covered
-    Software in Executable form under a different license, You must make
-    it absolutely clear that any terms which differ from this License
-    are offered by You alone, not by the Initial Developer or
-    Contributor. You hereby agree to indemnify the Initial Developer and
-    every Contributor for any liability incurred by the Initial
-    Developer or such Contributor as a result of any such terms You offer.
-
-    3.6. Larger Works.
-
-    You may create a Larger Work by combining Covered Software with
-    other code not governed by the terms of this License and distribute
-    the Larger Work as a single product. In such a case, You must make
-    sure the requirements of this License are fulfilled for the Covered
-    Software.
-
-4. Versions of the License.
-
-    4.1. New Versions.
-
-    Oracle is the initial license steward and may publish revised and/or
-    new versions of this License from time to time. Each version will be
-    given a distinguishing version number. Except as provided in Section
-    4.3, no one other than the license steward has the right to modify
-    this License.
-
-    4.2. Effect of New Versions.
-
-    You may always continue to use, distribute or otherwise make the
-    Covered Software available under the terms of the version of the
-    License under which You originally received the Covered Software. If
-    the Initial Developer includes a notice in the Original Software
-    prohibiting it from being distributed or otherwise made available
-    under any subsequent version of the License, You must distribute and
-    make the Covered Software available under the terms of the version
-    of the License under which You originally received the Covered
-    Software. Otherwise, You may also choose to use, distribute or
-    otherwise make the Covered Software available under the terms of any
-    subsequent version of the License published by the license steward.
-
-    4.3. Modified Versions.
-
-    When You are an Initial Developer and You want to create a new
-    license for Your Original Software, You may create and use a
-    modified version of this License if You: (a) rename the license and
-    remove any references to the name of the license steward (except to
-    note that the license differs from this License); and (b) otherwise
-    make it clear that the license contains terms which differ from this
-    License.
-
-5. DISCLAIMER OF WARRANTY.
-
-    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-    WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-    INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
-    IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-    NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-    THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
-    DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
-    OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
-    REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
-    ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
-    AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
-    6.1. This License and the rights granted hereunder will terminate
-    automatically if You fail to comply with terms herein and fail to
-    cure such breach within 30 days of becoming aware of the breach.
-    Provisions which, by their nature, must remain in effect beyond the
-    termination of this License shall survive.
-
-    6.2. If You assert a patent infringement claim (excluding
-    declaratory judgment actions) against Initial Developer or a
-    Contributor (the Initial Developer or Contributor against whom You
-    assert such claim is referred to as "Participant") alleging that the
-    Participant Software (meaning the Contributor Version where the
-    Participant is a Contributor or the Original Software where the
-    Participant is the Initial Developer) directly or indirectly
-    infringes any patent, then any and all rights granted directly or
-    indirectly to You by such Participant, the Initial Developer (if the
-    Initial Developer is not the Participant) and all Contributors under
-    Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
-    from Participant terminate prospectively and automatically at the
-    expiration of such 60 day notice period, unless if within such 60
-    day period You withdraw Your claim with respect to the Participant
-    Software against such Participant either unilaterally or pursuant to
-    a written agreement with Participant.
-
-    6.3. If You assert a patent infringement claim against Participant
-    alleging that the Participant Software directly or indirectly
-    infringes any patent where such claim is resolved (such as by
-    license or settlement) prior to the initiation of patent
-    infringement litigation, then the reasonable value of the licenses
-    granted by such Participant under Sections 2.1 or 2.2 shall be taken
-    into account in determining the amount or value of any payment or
-    license.
-
-    6.4. In the event of termination under Sections 6.1 or 6.2 above,
-    all end user licenses that have been validly granted by You or any
-    distributor hereunder prior to termination (excluding licenses
-    granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
-    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-    (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
-    INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
-    COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
-    TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
-    CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
-    LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
-    FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
-    LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
-    POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
-    APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
-    PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
-    LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
-    LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
-    AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
-    The Covered Software is a "commercial item," as that term is defined
-    in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-    software" (as that term is defined at 48 C.F.R. §
-    252.227-7014(a)(1)) and "commercial computer software documentation"
-    as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
-    with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
-    (June 1995), all U.S. Government End Users acquire Covered Software
-    with only those rights set forth herein. This U.S. Government Rights
-    clause is in lieu of, and supersedes, any other FAR, DFAR, or other
-    clause or provision that addresses Government rights in computer
-    software under this License.
-
-9. MISCELLANEOUS.
-
-    This License represents the complete agreement concerning subject
-    matter hereof. If any provision of this License is held to be
-    unenforceable, such provision shall be reformed only to the extent
-    necessary to make it enforceable. This License shall be governed by
-    the law of the jurisdiction specified in a notice contained within
-    the Original Software (except to the extent applicable law, if any,
-    provides otherwise), excluding such jurisdiction's conflict-of-law
-    provisions. Any litigation relating to this License shall be subject
-    to the jurisdiction of the courts located in the jurisdiction and
-    venue specified in a notice contained within the Original Software,
-    with the losing party responsible for costs, including, without
-    limitation, court costs and reasonable attorneys' fees and expenses.
-    The application of the United Nations Convention on Contracts for
-    the International Sale of Goods is expressly excluded. Any law or
-    regulation which provides that the language of a contract shall be
-    construed against the drafter shall not apply to this License. You
-    agree that You alone are responsible for compliance with the United
-    States export administration regulations (and the export control
-    laws and regulation of any other countries) when You use, distribute
-    or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
-    As between Initial Developer and the Contributors, each party is
-    responsible for claims and damages arising, directly or indirectly,
-    out of its utilization of rights under this License and You agree to
-    work with Initial Developer and Contributors to distribute such
-    responsibility on an equitable basis. Nothing herein is intended or
-    shall be deemed to constitute any admission of liability.
-
-------------------------------------------------------------------------
-
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
-LICENSE (CDDL)
-
-The code released under the CDDL shall be governed by the laws of the
-State of California (excluding conflict-of-law provisions). Any
-litigation relating to this License shall be subject to the jurisdiction
-of the Federal Courts of the Northern District of California and the
-state courts of the State of California, with venue lying in Santa Clara
-County, California.
-
-
-
-  The GNU General Public License (GPL) Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-51 Franklin Street, Fifth Floor
-Boston, MA 02110-1335
-USA
-
-Everyone is permitted to copy and distribute verbatim copies
-of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is
-intended to guarantee your freedom to share and change free software--to
-make sure the software is free for all its users. This General Public
-License applies to most of the Free Software Foundation's software and
-to any other program whose authors commit to using it. (Some other Free
-Software Foundation software is covered by the GNU Library General
-Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.
-Our General Public Licenses are designed to make sure that you have the
-freedom to distribute copies of free software (and charge for this
-service if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone
-to deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis
-or for a fee, you must give the recipients all the rights that you have.
-You must make sure that they, too, receive or can get the source code.
-And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.
-We wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must
-be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a
-notice placed by the copyright holder saying it may be distributed under
-the terms of this General Public License. The "Program", below, refers
-to any such program or work, and a "work based on the Program" means
-either the Program or any derivative work under copyright law: that is
-to say, a work containing the Program or a portion of it, either
-verbatim or with modifications and/or translated into another language.
-(Hereinafter, translation is included without limitation in the term
-"modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of running
-the Program is not restricted, and the output from the Program is
-covered only if its contents constitute a work based on the Program
-(independent of having been made by running the Program). Whether that
-is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source
-code as you receive it, in any medium, provided that you conspicuously
-and appropriately publish on each copy an appropriate copyright notice
-and disclaimer of warranty; keep intact all the notices that refer to
-this License and to the absence of any warranty; and give any other
-recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of
-it, thus forming a work based on the Program, and copy and distribute
-such modifications or work under the terms of Section 1 above, provided
-that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any part
-    thereof, to be licensed as a whole at no charge to all third parties
-    under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a notice
-    that there is no warranty (or else, saying that you provide a
-    warranty) and that users may redistribute the program under these
-    conditions, and telling the user how to view a copy of this License.
-    (Exception: if the Program itself is interactive but does not
-    normally print such an announcement, your work based on the Program
-    is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program, and
-can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based on
-the Program, the distribution of the whole must be on the terms of this
-License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of a
-storage or distribution medium does not bring the other work under the
-scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your cost
-    of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code. (This alternative is allowed
-    only for noncommercial distribution and only if you received the
-    program in object code or executable form with such an offer, in
-    accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source code
-means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to control
-compilation and installation of the executable. However, as a special
-exception, the source code distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies the
-executable.
-
-If distribution of executable or object code is made by offering access
-to copy from a designated place, then offering equivalent access to copy
-the source code from the same place counts as distribution of the source
-code, even though third parties are not compelled to copy the source
-along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt otherwise
-to copy, modify, sublicense or distribute the Program is void, and will
-automatically terminate your rights under this License. However, parties
-who have received copies, or rights, from you under this License will
-not have their licenses terminated so long as such parties remain in
-full compliance.
-
-5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and all
-its terms and conditions for copying, distributing or modifying the
-Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further restrictions
-on the recipients' exercise of the rights granted herein. You are not
-responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot distribute
-so as to satisfy simultaneously your obligations under this License and
-any other pertinent obligations, then as a consequence you may not
-distribute the Program at all. For example, if a patent license would
-not permit royalty-free redistribution of the Program by all those who
-receive copies directly or indirectly through you, then the only way you
-could satisfy both it and this License would be to refrain entirely from
-distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is implemented
-by public license practices. Many people have made generous
-contributions to the wide range of software distributed through that
-system in reliance on consistent application of that system; it is up to
-the author/donor to decide if he or she is willing to distribute
-software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be
-a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License may
-add an explicit geographical distribution limitation excluding those
-countries, so that distribution is permitted only in or among countries
-not thus excluded. In such case, this License incorporates the
-limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new
-versions of the General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Program does not specify a version
-number of this License, you may choose any version ever published by the
-Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the
-author to ask for permission. For software which is copyrighted by the
-Free Software Foundation, write to the Free Software Foundation; we
-sometimes make exceptions for this. Our decision will be guided by the
-two goals of preserving the free status of all derivatives of our free
-software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
-EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
-ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
-YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
-NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
-DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
-DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
-(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
-INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
-THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
-OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful, but
-    WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-    General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
-    `show w'. This is free software, and you are welcome to redistribute
-    it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the
-appropriate parts of the General Public License. Of course, the commands
-you use may be called something other than `show w' and `show c'; they
-could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the
-    program `Gnomovision' (which makes passes at compilers) written by
-    James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications
-with the library. If this is what you want to do, use the GNU Library
-General Public License instead of this License.
-
-#
-
-Certain source files distributed by Oracle America, Inc. and/or its
-affiliates are subject to the following clarification and special
-exception to the GPLv2, based on the GNU Project exception for its
-Classpath libraries, known as the GNU Classpath Exception, but only
-where Oracle has expressly included in the particular source file's
-header the words "Oracle designates this particular file as subject to
-the "Classpath" exception as provided by Oracle in the LICENSE file
-that accompanied this code."
-
-You should also note that Oracle includes multiple, independent
-programs in this software package. Some of those programs are provided
-under licenses deemed incompatible with the GPLv2 by the Free Software
-Foundation and others.  For example, the package includes programs
-licensed under the Apache License, Version 2.0.  Such programs are
-licensed to you under their original licenses.
-
-Oracle facilitates your further distribution of this package by adding
-the Classpath Exception to the necessary parts of its GPLv2 code, which
-permits you to use that code in combination with other independent
-modules not licensed under the GPLv2.  However, note that this would
-not permit you to commingle code under an incompatible license with
-Oracle's GPLv2 licensed code by, for example, cutting and pasting such
-code into a file also containing Oracle's GPLv2 licensed code and then
-distributing the result.  Additionally, if you were to remove the
-Classpath Exception from any of the files to which it applies and
-distribute the result, you would likely be required to license some or
-all of the other code in that distribution under the GPLv2 as well, and
-since the GPLv2 is incompatible with the license terms of some items
-included in the distribution by Oracle, removing the Classpath
-Exception could therefore effectively compromise your ability to
-further distribute the package.
-
-Proceed with caution and we recommend that you obtain the advice of a
-lawyer skilled in open source matters before removing the Classpath
-Exception or making modifications to this package which may
-subsequently be redistributed and/or involve the use of third party
-software.
-
-CLASSPATH EXCEPTION
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library.  Thus, the terms and
-conditions of the GNU General Public License version 2 cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module.  An independent module is a module which is not derived from or
-based on this library.  If you modify this library, you may extend this
-exception to your version of the library, but you are not obligated to
-do so.  If you do not wish to do so, delete this exception statement
-from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt
deleted file mode 100644
index b40a0f4..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt
+++ /dev/null
@@ -1,347 +0,0 @@
-The GNU General Public License (GPL)
-
-Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license
-document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share
-and change it.  By contrast, the GNU General Public License is intended to
-guarantee your freedom to share and change free software--to make sure the
-software is free for all its users.  This General Public License applies to
-most of the Free Software Foundation's software and to any other program whose
-authors commit to using it.  (Some other Free Software Foundation software is
-covered by the GNU Library General Public License instead.) You can apply it to
-your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.  Our
-General Public Licenses are designed to make sure that you have the freedom to
-distribute copies of free software (and charge for this service if you wish),
-that you receive source code or can get it if you want it, that you can change
-the software or use pieces of it in new free programs; and that you know you
-can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny
-you these rights or to ask you to surrender the rights.  These restrictions
-translate to certain responsibilities for you if you distribute copies of the
-software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for
-a fee, you must give the recipients all the rights that you have.  You must
-make sure that they, too, receive or can get the source code.  And you must
-show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2)
-offer you this license which gives you legal permission to copy, distribute
-and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that
-everyone understands that there is no warranty for this free software.  If the
-software is modified by someone else and passed on, we want its recipients to
-know that what they have is not the original, so that any problems introduced
-by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.  We
-wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program proprietary.
-To prevent this, we have made it clear that any patent must be licensed for
-everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification
-follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice
-placed by the copyright holder saying it may be distributed under the terms of
-this General Public License.  The "Program", below, refers to any such program
-or work, and a "work based on the Program" means either the Program or any
-derivative work under copyright law: that is to say, a work containing the
-Program or a portion of it, either verbatim or with modifications and/or
-translated into another language.  (Hereinafter, translation is included
-without limitation in the term "modification".) Each licensee is addressed as
-"you".
-
-Activities other than copying, distribution and modification are not covered by
-this License; they are outside its scope.  The act of running the Program is
-not restricted, and the output from the Program is covered only if its contents
-constitute a work based on the Program (independent of having been made by
-running the Program).  Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as
-you receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice and
-disclaimer of warranty; keep intact all the notices that refer to this License
-and to the absence of any warranty; and give any other recipients of the
-Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may
-at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus
-forming a work based on the Program, and copy and distribute such modifications
-or work under the terms of Section 1 above, provided that you also meet all of
-these conditions:
-
-    a) You must cause the modified files to carry prominent notices stating
-    that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in whole or
-    in part contains or is derived from the Program or any part thereof, to be
-    licensed as a whole at no charge to all third parties under the terms of
-    this License.
-
-    c) If the modified program normally reads commands interactively when run,
-    you must cause it, when started running for such interactive use in the
-    most ordinary way, to print or display an announcement including an
-    appropriate copyright notice and a notice that there is no warranty (or
-    else, saying that you provide a warranty) and that users may redistribute
-    the program under these conditions, and telling the user how to view a copy
-    of this License.  (Exception: if the Program itself is interactive but does
-    not normally print such an announcement, your work based on the Program is
-    not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If identifiable
-sections of that work are not derived from the Program, and can be reasonably
-considered independent and separate works in themselves, then this License, and
-its terms, do not apply to those sections when you distribute them as separate
-works.  But when you distribute the same sections as part of a whole which is a
-work based on the Program, the distribution of the whole must be on the terms
-of this License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your
-rights to work written entirely by you; rather, the intent is to exercise the
-right to control the distribution of derivative or collective works based on
-the Program.
-
-In addition, mere aggregation of another work not based on the Program with the
-Program (or with a work based on the Program) on a volume of a storage or
-distribution medium does not bring the other work under the scope of this
-License.
-
-3. You may copy and distribute the Program (or a work based on it, under
-Section 2) in object code or executable form under the terms of Sections 1 and
-2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable source
-    code, which must be distributed under the terms of Sections 1 and 2 above
-    on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three years, to
-    give any third party, for a charge no more than your cost of physically
-    performing source distribution, a complete machine-readable copy of the
-    corresponding source code, to be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code.  (This alternative is allowed only
-    for noncommercial distribution and only if you received the program in
-    object code or executable form with such an offer, in accord with
-    Subsection b above.)
-
-The source code for a work means the preferred form of the work for making
-modifications to it.  For an executable work, complete source code means all
-the source code for all modules it contains, plus any associated interface
-definition files, plus the scripts used to control compilation and installation
-of the executable.  However, as a special exception, the source code
-distributed need not include anything that is normally distributed (in either
-source or binary form) with the major components (compiler, kernel, and so on)
-of the operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the source
-code from the same place counts as distribution of the source code, even though
-third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as
-expressly provided under this License.  Any attempt otherwise to copy, modify,
-sublicense or distribute the Program is void, and will automatically terminate
-your rights under this License.  However, parties who have received copies, or
-rights, from you under this License will not have their licenses terminated so
-long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it.
-However, nothing else grants you permission to modify or distribute the Program
-or its derivative works.  These actions are prohibited by law if you do not
-accept this License.  Therefore, by modifying or distributing the Program (or
-any work based on the Program), you indicate your acceptance of this License to
-do so, and all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program),
-the recipient automatically receives a license from the original licensor to
-copy, distribute or modify the Program subject to these terms and conditions.
-You may not impose any further restrictions on the recipients' exercise of the
-rights granted herein.  You are not responsible for enforcing compliance by
-third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues), conditions
-are imposed on you (whether by court order, agreement or otherwise) that
-contradict the conditions of this License, they do not excuse you from the
-conditions of this License.  If you cannot distribute so as to satisfy
-simultaneously your obligations under this License and any other pertinent
-obligations, then as a consequence you may not distribute the Program at all.
-For example, if a patent license would not permit royalty-free redistribution
-of the Program by all those who receive copies directly or indirectly through
-you, then the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply and
-the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or
-other property right claims or to contest validity of any such claims; this
-section has the sole purpose of protecting the integrity of the free software
-distribution system, which is implemented by public license practices.  Many
-people have made generous contributions to the wide range of software
-distributed through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing to
-distribute software through any other system and a licensee cannot impose that
-choice.
-
-This section is intended to make thoroughly clear what is believed to be a
-consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain
-countries either by patents or by copyrighted interfaces, the original
-copyright holder who places the Program under this License may add an explicit
-geographical distribution limitation excluding those countries, so that
-distribution is permitted only in or among countries not thus excluded.  In
-such case, this License incorporates the limitation as if written in the body
-of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the
-General Public License from time to time.  Such new versions will be similar in
-spirit to the present version, but may differ in detail to address new problems
-or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any later
-version", you have the option of following the terms and conditions either of
-that version or of any later version published by the Free Software Foundation.
-If the Program does not specify a version number of this License, you may
-choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs
-whose distribution conditions are different, write to the author to ask for
-permission.  For software which is copyrighted by the Free Software Foundation,
-write to the Free Software Foundation; we sometimes make exceptions for this.
-Our decision will be guided by the two goals of preserving the free status of
-all derivatives of our free software and of promoting the sharing and reuse of
-software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
-THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN OTHERWISE
-STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
-PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND
-PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE,
-YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
-ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE
-PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR
-INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA
-BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER
-OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible
-use to the public, the best way to achieve this is to make it free software
-which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program.  It is safest to attach
-them to the start of each source file to most effectively convey the exclusion
-of warranty; and each file should have at least the "copyright" line and a
-pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify it
-    under the terms of the GNU General Public License as published by the Free
-    Software Foundation; either version 2 of the License, or (at your option)
-    any later version.
-
-    This program is distributed in the hope that it will be useful, but WITHOUT
-    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-    more details.
-
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc., 59
-    Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it
-starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
-    with ABSOLUTELY NO WARRANTY; for details type 'show w'.  This is free
-    software, and you are welcome to redistribute it under certain conditions;
-    type 'show c' for details.
-
-The hypothetical commands 'show w' and 'show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may be
-called something other than 'show w' and 'show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.  Here
-is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-    'Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Library General Public
-License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL
-
-Certain source files distributed by Oracle America and/or its affiliates are
-subject to the following clarification and special exception to the GPL, but
-only where Oracle has expressly included in the particular source file's header
-the words "Oracle designates this particular file as subject to the "Classpath"
-exception as provided by Oracle in the LICENSE file that accompanied this code."
-
-    Linking this library statically or dynamically with other modules is making
-    a combined work based on this library.  Thus, the terms and conditions of
-    the GNU General Public License cover the whole combination.
-
-    As a special exception, the copyright holders of this library give you
-    permission to link this library with independent modules to produce an
-    executable, regardless of the license terms of these independent modules,
-    and to copy and distribute the resulting executable under terms of your
-    choice, provided that you also meet, for each linked independent module,
-    the terms and conditions of the license of that module.  An independent
-    module is a module which is not derived from or based on this library.  If
-    you modify this library, you may extend this exception to your version of
-    the library, but you are not obligated to do so.  If you do not wish to do
-    so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt
deleted file mode 100644
index b40a0f4..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt
+++ /dev/null
@@ -1,347 +0,0 @@
-The GNU General Public License (GPL)
-
-Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license
-document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share
-and change it.  By contrast, the GNU General Public License is intended to
-guarantee your freedom to share and change free software--to make sure the
-software is free for all its users.  This General Public License applies to
-most of the Free Software Foundation's software and to any other program whose
-authors commit to using it.  (Some other Free Software Foundation software is
-covered by the GNU Library General Public License instead.) You can apply it to
-your programs, too.
-
-When we speak of free software, we are referring to freedom, not price.  Our
-General Public Licenses are designed to make sure that you have the freedom to
-distribute copies of free software (and charge for this service if you wish),
-that you receive source code or can get it if you want it, that you can change
-the software or use pieces of it in new free programs; and that you know you
-can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny
-you these rights or to ask you to surrender the rights.  These restrictions
-translate to certain responsibilities for you if you distribute copies of the
-software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for
-a fee, you must give the recipients all the rights that you have.  You must
-make sure that they, too, receive or can get the source code.  And you must
-show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2)
-offer you this license which gives you legal permission to copy, distribute
-and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that
-everyone understands that there is no warranty for this free software.  If the
-software is modified by someone else and passed on, we want its recipients to
-know that what they have is not the original, so that any problems introduced
-by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents.  We
-wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program proprietary.
-To prevent this, we have made it clear that any patent must be licensed for
-everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification
-follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice
-placed by the copyright holder saying it may be distributed under the terms of
-this General Public License.  The "Program", below, refers to any such program
-or work, and a "work based on the Program" means either the Program or any
-derivative work under copyright law: that is to say, a work containing the
-Program or a portion of it, either verbatim or with modifications and/or
-translated into another language.  (Hereinafter, translation is included
-without limitation in the term "modification".) Each licensee is addressed as
-"you".
-
-Activities other than copying, distribution and modification are not covered by
-this License; they are outside its scope.  The act of running the Program is
-not restricted, and the output from the Program is covered only if its contents
-constitute a work based on the Program (independent of having been made by
-running the Program).  Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as
-you receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice and
-disclaimer of warranty; keep intact all the notices that refer to this License
-and to the absence of any warranty; and give any other recipients of the
-Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may
-at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus
-forming a work based on the Program, and copy and distribute such modifications
-or work under the terms of Section 1 above, provided that you also meet all of
-these conditions:
-
-    a) You must cause the modified files to carry prominent notices stating
-    that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in whole or
-    in part contains or is derived from the Program or any part thereof, to be
-    licensed as a whole at no charge to all third parties under the terms of
-    this License.
-
-    c) If the modified program normally reads commands interactively when run,
-    you must cause it, when started running for such interactive use in the
-    most ordinary way, to print or display an announcement including an
-    appropriate copyright notice and a notice that there is no warranty (or
-    else, saying that you provide a warranty) and that users may redistribute
-    the program under these conditions, and telling the user how to view a copy
-    of this License.  (Exception: if the Program itself is interactive but does
-    not normally print such an announcement, your work based on the Program is
-    not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If identifiable
-sections of that work are not derived from the Program, and can be reasonably
-considered independent and separate works in themselves, then this License, and
-its terms, do not apply to those sections when you distribute them as separate
-works.  But when you distribute the same sections as part of a whole which is a
-work based on the Program, the distribution of the whole must be on the terms
-of this License, whose permissions for other licensees extend to the entire
-whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your
-rights to work written entirely by you; rather, the intent is to exercise the
-right to control the distribution of derivative or collective works based on
-the Program.
-
-In addition, mere aggregation of another work not based on the Program with the
-Program (or with a work based on the Program) on a volume of a storage or
-distribution medium does not bring the other work under the scope of this
-License.
-
-3. You may copy and distribute the Program (or a work based on it, under
-Section 2) in object code or executable form under the terms of Sections 1 and
-2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable source
-    code, which must be distributed under the terms of Sections 1 and 2 above
-    on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three years, to
-    give any third party, for a charge no more than your cost of physically
-    performing source distribution, a complete machine-readable copy of the
-    corresponding source code, to be distributed under the terms of Sections 1
-    and 2 above on a medium customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer to
-    distribute corresponding source code.  (This alternative is allowed only
-    for noncommercial distribution and only if you received the program in
-    object code or executable form with such an offer, in accord with
-    Subsection b above.)
-
-The source code for a work means the preferred form of the work for making
-modifications to it.  For an executable work, complete source code means all
-the source code for all modules it contains, plus any associated interface
-definition files, plus the scripts used to control compilation and installation
-of the executable.  However, as a special exception, the source code
-distributed need not include anything that is normally distributed (in either
-source or binary form) with the major components (compiler, kernel, and so on)
-of the operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the source
-code from the same place counts as distribution of the source code, even though
-third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as
-expressly provided under this License.  Any attempt otherwise to copy, modify,
-sublicense or distribute the Program is void, and will automatically terminate
-your rights under this License.  However, parties who have received copies, or
-rights, from you under this License will not have their licenses terminated so
-long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it.
-However, nothing else grants you permission to modify or distribute the Program
-or its derivative works.  These actions are prohibited by law if you do not
-accept this License.  Therefore, by modifying or distributing the Program (or
-any work based on the Program), you indicate your acceptance of this License to
-do so, and all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program),
-the recipient automatically receives a license from the original licensor to
-copy, distribute or modify the Program subject to these terms and conditions.
-You may not impose any further restrictions on the recipients' exercise of the
-rights granted herein.  You are not responsible for enforcing compliance by
-third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues), conditions
-are imposed on you (whether by court order, agreement or otherwise) that
-contradict the conditions of this License, they do not excuse you from the
-conditions of this License.  If you cannot distribute so as to satisfy
-simultaneously your obligations under this License and any other pertinent
-obligations, then as a consequence you may not distribute the Program at all.
-For example, if a patent license would not permit royalty-free redistribution
-of the Program by all those who receive copies directly or indirectly through
-you, then the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply and
-the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or
-other property right claims or to contest validity of any such claims; this
-section has the sole purpose of protecting the integrity of the free software
-distribution system, which is implemented by public license practices.  Many
-people have made generous contributions to the wide range of software
-distributed through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing to
-distribute software through any other system and a licensee cannot impose that
-choice.
-
-This section is intended to make thoroughly clear what is believed to be a
-consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain
-countries either by patents or by copyrighted interfaces, the original
-copyright holder who places the Program under this License may add an explicit
-geographical distribution limitation excluding those countries, so that
-distribution is permitted only in or among countries not thus excluded.  In
-such case, this License incorporates the limitation as if written in the body
-of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the
-General Public License from time to time.  Such new versions will be similar in
-spirit to the present version, but may differ in detail to address new problems
-or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any later
-version", you have the option of following the terms and conditions either of
-that version or of any later version published by the Free Software Foundation.
-If the Program does not specify a version number of this License, you may
-choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs
-whose distribution conditions are different, write to the author to ask for
-permission.  For software which is copyrighted by the Free Software Foundation,
-write to the Free Software Foundation; we sometimes make exceptions for this.
-Our decision will be guided by the two goals of preserving the free status of
-all derivatives of our free software and of promoting the sharing and reuse of
-software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
-THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN OTHERWISE
-STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
-PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND
-PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE,
-YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
-ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE
-PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR
-INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA
-BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER
-OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible
-use to the public, the best way to achieve this is to make it free software
-which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program.  It is safest to attach
-them to the start of each source file to most effectively convey the exclusion
-of warranty; and each file should have at least the "copyright" line and a
-pointer to where the full notice is found.
-
-    One line to give the program's name and a brief idea of what it does.
-
-    Copyright (C) <year> <name of author>
-
-    This program is free software; you can redistribute it and/or modify it
-    under the terms of the GNU General Public License as published by the Free
-    Software Foundation; either version 2 of the License, or (at your option)
-    any later version.
-
-    This program is distributed in the hope that it will be useful, but WITHOUT
-    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-    more details.
-
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc., 59
-    Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it
-starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
-    with ABSOLUTELY NO WARRANTY; for details type 'show w'.  This is free
-    software, and you are welcome to redistribute it under certain conditions;
-    type 'show c' for details.
-
-The hypothetical commands 'show w' and 'show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may be
-called something other than 'show w' and 'show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.  Here
-is a sample; alter the names:
-
-    Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-    'Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-    signature of Ty Coon, 1 April 1989
-
-    Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Library General Public
-License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL
-
-Certain source files distributed by Oracle America and/or its affiliates are
-subject to the following clarification and special exception to the GPL, but
-only where Oracle has expressly included in the particular source file's header
-the words "Oracle designates this particular file as subject to the "Classpath"
-exception as provided by Oracle in the LICENSE file that accompanied this code."
-
-    Linking this library statically or dynamically with other modules is making
-    a combined work based on this library.  Thus, the terms and conditions of
-    the GNU General Public License cover the whole combination.
-
-    As a special exception, the copyright holders of this library give you
-    permission to link this library with independent modules to produce an
-    executable, regardless of the license terms of these independent modules,
-    and to copy and distribute the resulting executable under terms of your
-    choice, provided that you also meet, for each linked independent module,
-    the terms and conditions of the license of that module.  An independent
-    module is a module which is not derived from or based on this library.  If
-    you modify this library, you may extend this exception to your version of
-    the library, but you are not obligated to do so.  If you do not wish to do
-    so, delete this exception statement from your version.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt
deleted file mode 100644
index 4d19185..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-
- ASM: a very small and fast Java bytecode manipulation framework
- Copyright (c) 2000-2011 INRIA, France Telecom
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt
deleted file mode 100644
index 744377c..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (c) 2004-2017 QOS.ch
-All rights reserved.
-
-Permission is hereby granted, free  of charge, to any person obtaining
-a  copy  of this  software  and  associated  documentation files  (the
-"Software"), to  deal in  the Software without  restriction, including
-without limitation  the rights to  use, copy, modify,  merge, publish,
-distribute,  sublicense, and/or sell  copies of  the Software,  and to
-permit persons to whom the Software  is furnished to do so, subject to
-the following conditions:
-
-The  above  copyright  notice  and  this permission  notice  shall  be
-included in all copies or substantial portions of the Software.
-
-THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
-EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
-MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt
deleted file mode 100644
index 19b305b..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Copyright 2008 Google Inc.  All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Code generated by the Protocol Buffer compiler is owned by the owner
-of the input file used when generating it.  This code is not
-standalone and requires a support library to be linked with it.  This
-support library is itself covered by the above license.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt
deleted file mode 100644
index 31b5318..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt
+++ /dev/null
@@ -1,353 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-================================================================================================
-
-Apache Ratis subcomponents
-
-Apache Ratis includes a number of subcomponents with separate copyright notices and license terms.
-Your use of the source code for these subcomponents is subject to the terms and conditions of the
-following licenses.
-
-------------------------------------------------------------------------------------------------
-This product bundles SLF4J artifacts which are available under the folling licence:
-
- Copyright (c) 2004-2017 QOS.ch
- All rights reserved.
-
- Permission is hereby granted, free  of charge, to any person obtaining
- a  copy  of this  software  and  associated  documentation files  (the
- "Software"), to  deal in  the Software without  restriction, including
- without limitation  the rights to  use, copy, modify,  merge, publish,
- distribute,  sublicense, and/or sell  copies of  the Software,  and to
- permit persons to whom the Software  is furnished to do so, subject to
- the following conditions:
-
- The  above  copyright  notice  and  this permission  notice  shall  be
- included in all copies or substantial portions of the Software.
-
- THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
- EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
- MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-------------------------------------------------------------------------------------------------
-This product bundles Google Protobuf which is available under the following licence:
-
-Copyright 2008 Google Inc.  All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Code generated by the Protocol Buffer compiler is owned by the owner
-of the input file used when generating it.  This code is not
-standalone and requires a support library to be linked with it.  This
-support library is itself covered by the above license.
-
-------------------------------------------------------------------------------------------------
-This product bundles artifacts from the following projects
-which are available under the Apache License 2.0.
-
-Google Guava
-Netty
-Opencensus
-Grpc
-JCTools
-JCommander
-Javapoet
-J2objc (annotations)
-Google Error prone annotations
-Google auto value annotations
-
-------------------------------------------------------------------------------------------------
-The annotations from typetools/checker-framework and animal-sniffer/animal-sniffer-annotations
-are licensed under the MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-------------------------------------------------------------------------------------------------
-The product bundles Crc32 implementation in ratis-common, which is licensed under
-Apache License 2.0 with the following comment:
-
-Some portions of this file Copyright (c) 2004-2006 Intel Corportation
-and licensed under the BSD license.
-
-BSD license:
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided
-that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
-and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------------------------
-The product bundles annotations from checkerframework, which is licensed under MIT:
-
-"The annotations are licensed under the MIT License"
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt
deleted file mode 100644
index 7e3cbd6..0000000
--- a/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt
+++ /dev/null
@@ -1,340 +0,0 @@
-Apache Ratis
-Copyright 2017-2019 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-The binary distribution of this project bundles binaries of
-
---------------------------------------------------------------------------------
-Netty artifacts (io.nett:netty-*)
-
-Licensed under Apache License 2.0 with the following notice:
-
-The Netty Project
-
-Please visit the Netty web site for more information:
-
-  * http://netty.io/
-
-Copyright 2014 The Netty Project
-
-The Netty Project licenses this file to you under the Apache License,
-version 2.0 (the "License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at:
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations
-under the License.
-
-Also, please refer to each LICENSE.<component>.txt file, which is located in
-the 'license' directory of the distribution file, for the license terms of the
-components that this product depends on.
-
-This product contains the extensions to Java Collections Framework which has
-been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
-
-  * LICENSE:
-    * license/LICENSE.jsr166y.txt (Public Domain)
-  * HOMEPAGE:
-    * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
-    * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
-
-This product contains a modified version of Robert Harder's Public Domain
-Base64 Encoder and Decoder, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.base64.txt (Public Domain)
-  * HOMEPAGE:
-    * http://iharder.sourceforge.net/current/java/base64/
-
-This product contains a modified portion of 'Webbit', an event based
-WebSocket and HTTP server, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.webbit.txt (BSD License)
-  * HOMEPAGE:
-    * https://github.com/joewalnes/webbit
-
-This product contains a modified portion of 'SLF4J', a simple logging
-facade for Java, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.slf4j.txt (MIT License)
-  * HOMEPAGE:
-    * http://www.slf4j.org/
-
-This product contains a modified portion of 'Apache Harmony', an open source
-Java SE, which can be obtained at:
-
-  * NOTICE:
-    * license/NOTICE.harmony.txt
-  * LICENSE:
-    * license/LICENSE.harmony.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://archive.apache.org/dist/harmony/
-
-This product contains a modified portion of 'jbzip2', a Java bzip2 compression
-and decompression library written by Matthew J. Francis. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jbzip2.txt (MIT License)
-  * HOMEPAGE:
-    * https://code.google.com/p/jbzip2/
-
-This product contains a modified portion of 'libdivsufsort', a C API library to construct
-the suffix array and the Burrows-Wheeler transformed string for any input string of
-a constant-size alphabet written by Yuta Mori. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.libdivsufsort.txt (MIT License)
-  * HOMEPAGE:
-    * https://github.com/y-256/libdivsufsort
-
-This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
- which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jctools.txt (ASL2 License)
-  * HOMEPAGE:
-    * https://github.com/JCTools/JCTools
-
-This product optionally depends on 'JZlib', a re-implementation of zlib in
-pure Java, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jzlib.txt (BSD style License)
-  * HOMEPAGE:
-    * http://www.jcraft.com/jzlib/
-
-This product optionally depends on 'Compress-LZF', a Java library for encoding and
-decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.compress-lzf.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/ning/compress
-
-This product optionally depends on 'lz4', a LZ4 Java compression
-and decompression library written by Adrien Grand. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.lz4.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/jpountz/lz4-java
-
-This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
-and decompression library written by William Kinney. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jfastlz.txt (MIT License)
-  * HOMEPAGE:
-    * https://code.google.com/p/jfastlz/
-
-This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
-interchange format, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.protobuf.txt (New BSD License)
-  * HOMEPAGE:
-    * https://github.com/google/protobuf
-
-This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
-a temporary self-signed X.509 certificate when the JVM does not provide the
-equivalent functionality.  It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.bouncycastle.txt (MIT License)
-  * HOMEPAGE:
-    * http://www.bouncycastle.org/
-
-This product optionally depends on 'Snappy', a compression library produced
-by Google Inc, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.snappy.txt (New BSD License)
-  * HOMEPAGE:
-    * https://github.com/google/snappy
-
-This product optionally depends on 'JBoss Marshalling', an alternative Java
-serialization API, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
-  * HOMEPAGE:
-    * http://www.jboss.org/jbossmarshalling
-
-This product optionally depends on 'Caliper', Google's micro-
-benchmarking framework, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.caliper.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/google/caliper
-
-This product optionally depends on 'Apache Commons Logging', a logging
-framework, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.commons-logging.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://commons.apache.org/logging/
-
-This product optionally depends on 'Apache Log4J', a logging framework, which
-can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.log4j.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://logging.apache.org/log4j/
-
-This product optionally depends on 'Aalto XML', an ultra-high performance
-non-blocking XML processor, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.aalto-xml.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * http://wiki.fasterxml.com/AaltoHome
-
-This product contains a modified version of 'HPACK', a Java implementation of
-the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.hpack.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/twitter/hpack
-
-This product contains a modified portion of 'Apache Commons Lang', a Java library
-provides utilities for the java.lang API, which can be obtained at:
-
-  * LICENSE:
-    * license/LICENSE.commons-lang.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://commons.apache.org/proper/commons-lang/
-
-
-This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
-
-  * LICENSE:
-    * license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
-  * HOMEPAGE:
-* https://github.com/takari/maven-wrapper
-
------------------------------------------------------------------------
-Code hale / Dropwizard metrics (3.x)
-
-Licensed under Apache License 2.0 with the following notice:
-
-Metrics
-Copyright 2010-2013 Coda Hale and Yammer, Inc.
-
-This product includes software developed by Coda Hale and Yammer, Inc.
-
-This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
-LongAdder), which was released with the following comments:
-
-    Written by Doug Lea with assistance from members of JCP JSR-166
-    Expert Group and released to the public domain, as explained at
-http://creativecommons.org/publicdomain/zero/1.0/
-
------------------------------------------------------------------------
-JCommander
-
-Licensed under Apache License 2.0 with the following notice:
-
-JCommander Copyright Notices
-============================
-
-Copyright 2010 Cedric Beust <cedric@beust.com>
-
-
------------------------------------------------------------------------
-GRPC-java
-
-Licensed under Apache License 2.0 with the following notice:
-
-Copyright 2014 The gRPC Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-
-This product contains a modified portion of 'OkHttp', an open source
-HTTP & SPDY client for Android and Java applications, which can be obtained
-at:
-
-  * LICENSE:
-    * okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
-  * HOMEPAGE:
-    * https://github.com/square/okhttp
-  * LOCATION_IN_GRPC:
-    * okhttp/third_party/okhttp
-
-This product contains a modified portion of 'Netty', an open source
-networking library, which can be obtained at:
-
-  * LICENSE:
-    * netty/third_party/netty/LICENSE.txt (Apache License 2.0)
-  * HOMEPAGE:
-    * https://netty.io
-  * LOCATION_IN_GRPC:
-* netty/third_party/netty
------------------------------------------------------------------------
-The JSR-305 reference implementation (jsr305.jar) is distributed under the terms of the New BSD:
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
-this list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
-AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------------------------------------------------------------------------
-This product uses the dropwizard-hadoop-metrics2.
-
-Copyright 2016 Josh Elser
-
-Licensed under the Apache License v2.0
-
------------------------------------------------------------------------
-This product uses https://github.com/mbocek/docker-ganglia/
-
-Contributed by Michal Bocek
-
-Licensed under the Apache License v2.0
-https://github.com/mbocek/docker-ganglia/blob/master/LICENSE
-
------------------------------------------------------------------------
-This product uses https://github.com/graphite-project/docker-graphite-statsd
-
-Copyright (c) 2013-2016 Nathan Hopkins
-
-Licensed under the MIT License
-
---
diff --git a/hadoop-ozone/dist/src/main/license/src/LICENSE.txt b/hadoop-ozone/dist/src/main/license/src/LICENSE.txt
deleted file mode 100644
index 4b1b8c0..0000000
--- a/hadoop-ozone/dist/src/main/license/src/LICENSE.txt
+++ /dev/null
@@ -1,239 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
---------------------------------------------------------------------------------
-This product bundles various third-party components under other open source
-licenses. This section summarizes those components and their licenses.
-See licenses/ for text of these licenses.
-
-
-Apache Software Foundation License 2.0
---------------------------------------
-
-hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
-hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map
-hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
-hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
-hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
-
-
-BSD 3-Clause
-------------
-
-hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
-hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/d3-3.5.17.min.js
-hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-*
-hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js
-
-MIT License
------------
-
-hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1
-hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-*
-
-hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js
-hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
-hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
-
-hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js
-hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js
diff --git a/hadoop-ozone/dist/src/main/license/src/NOTICE.txt b/hadoop-ozone/dist/src/main/license/src/NOTICE.txt
deleted file mode 100644
index 2803728..0000000
--- a/hadoop-ozone/dist/src/main/license/src/NOTICE.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Apache Hadoop
-Copyright 2006 and onwards The Apache Software Foundation.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Export Control Notice
----------------------
-
-This distribution includes cryptographic software.  The country in
-which you currently reside may have restrictions on the import,
-possession, use, and/or re-export to another country, of
-encryption software.  BEFORE using any encryption software, please
-check your country's laws, regulations and policies concerning the
-import, possession, or use, and re-export of encryption software, to
-see if this is permitted.  See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and
-Security (BIS), has classified this software as Export Commodity
-Control Number (ECCN) 5D002.C.1, which includes information security
-software using or performing cryptographic functions with asymmetric
-algorithms.  The form and manner of this Apache Software Foundation
-distribution makes it eligible for export under the License Exception
-ENC Technology Software Unrestricted (TSU) exception (see the BIS
-Export Administration Regulations, Section 740.13) for both object
-code and source code.
-
-The following provides more details on the included cryptographic software:
-
-This software uses the SSL libraries from the Jetty project written
-by mortbay.org abd BouncyCastle Java cryptography APIs written by the
- Legion of the Bouncy Castle Inc.
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md
deleted file mode 100644
index 2581412..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Important
-
-The files from this directory are not copied by automatically to the source distribution package.
-
-If you add any of the files to here,
- * please also adjust `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file.
- * and copy the dependency to ../../bin/licenses (if it's included in the bin tar)
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt
deleted file mode 100644
index d96c6fc..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-The MIT License (MIT)
-Copyright (c) 2014 Konstantin Skipor
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-and associated documentation files (the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
-LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
-OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt
deleted file mode 100644
index 6f3880f..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2010-2017 Google, Inc. http://angularjs.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt
deleted file mode 100644
index c71e3f2..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2010-2015, Michael Bostock
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* The name Michael Bostock may not be used to endorse or promote products
-  derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt
deleted file mode 100644
index 4593054..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright JS Foundation and other contributors, https://js.foundation/
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt
deleted file mode 100644
index 0955544..0000000
--- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Copyright (c) 2011-2014 Novus Partners, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
-file except in compliance with the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software distributed under the
- License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- express or implied. See the License for the specific language governing permissions and
-  limitations under the License.
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/ozone/README.txt b/hadoop-ozone/dist/src/main/ozone/README.txt
deleted file mode 100644
index 6bbd83f..0000000
--- a/hadoop-ozone/dist/src/main/ozone/README.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-This is the distribution of Apache Hadoop Ozone.
-
-Ozone is a submodule of Hadoop with separated release cycle. For more information, check
-
-   http://ozone.hadoop.apache.org
-
-      and
-
-   https://cwiki.apache.org/confluence/display/HADOOP/Ozone+Contributor+Guide
-
-For more information about Hadoop, check:
-
-   http://hadoop.apache.org
-
-This distribution includes cryptographic software.  The country in
-which you currently reside may have restrictions on the import,
-possession, use, and/or re-export to another country, of
-encryption software.  BEFORE using any encryption software, please
-check your country's laws, regulations and policies concerning the
-import, possession, or use, and re-export of encryption software, to
-see if this is permitted.  See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and
-Security (BIS), has classified this software as Export Commodity
-Control Number (ECCN) 5D002.C.1, which includes information security
-software using or performing cryptographic functions with asymmetric
-algorithms.  The form and manner of this Apache Software Foundation
-distribution makes it eligible for export under the License Exception
-ENC Technology Software Unrestricted (TSU) exception (see the BIS
-Export Administration Regulations, Section 740.13) for both object
-code and source code.
-
-The following provides more details on the included cryptographic
-software:
-  Hadoop Core uses the SSL libraries from the Jetty project written
-by mortbay.org.
diff --git a/hadoop-ozone/dist/src/main/smoketest/.env b/hadoop-ozone/dist/src/main/smoketest/.env
deleted file mode 100644
index 47a25e1..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/.env
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HADOOP_VERSION=3
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/README.md b/hadoop-ozone/dist/src/main/smoketest/README.md
deleted file mode 100644
index d181b8a..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-## Ozone Acceptance Tests
-
-This directory contains a [robotframework](http://robotframework.org/) based test suite for Ozone to make it easier to check the current state of the package.
-
-You can run in in any environment after [installing](https://github.com/robotframework/robotframework/blob/master/INSTALL.rst)
-
-```
-cd $DIRECTORY_OF_OZONE
-robot smoketest/basic
-```
-
-The argument of the `robot` could be any robot file or directory.
-
-The current configuration in the robot files (hostnames, ports) are adjusted for the docker-based setup but you can easily modify it for any environment.
-
-# Run tests in docker environment
-
-In the ./compose folder there are additional test scripts to make it easy to run all tests or run a specific test in a docker environment.
-
-## Test one environment
-
-Go to the compose directory and execute the test.sh directly from there:
-
-```
-cd compose/ozone
-./test.sh
-```
-
-The results will be saved to the `compose/ozone/results`
-
-## Run all the tests
-
-```
-cd compose
-./test-all.sh
-```
-
-The results will be combined to the `compose/results` folder.
-
-## Run one specific test case
-
-Start the compose environment and execute test:
-
-```
-cd compose/ozone
-docker-compose up -d
-#wait....
-../test-single.sh scm basic/basic.robot
-```
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/__init__.robot b/hadoop-ozone/dist/src/main/smoketest/__init__.robot
deleted file mode 100644
index f8835df..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/__init__.robot
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-*** Settings ***
-Documentation       Smoketest ozone secure cluster
-Resource            commonlib.robot
-Suite Setup         Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit test user     testuser     testuser.keytab
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot b/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot
deleted file mode 100644
index 1caae75..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoketest ozone cluster startup
-Library             OperatingSystem
-Library             BuiltIn
-Resource            ../commonlib.robot
-
-*** Variables ***
-${user}              hadoop
-${count}             4
-${auditworkdir}      /tmp/
-
-*** Keywords ***
-Set username
-    ${hostname} =          Execute         hostname
-    Set Suite Variable     ${user}         testuser/${hostname}@EXAMPLE.COM
-    [return]               ${user}
-
-*** Test Cases ***
-Initiating freon to generate data
-    ${result} =        Execute              ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 1
-                       Wait Until Keyword Succeeds      3min       10sec     Should contain   ${result}   Number of Keys added: 125
-                       Should Not Contain               ${result}  ERROR
-
-Testing audit parser
-    ${logdir} =        Get Environment Variable      HADOOP_LOG_DIR     /var/log/hadoop
-    ${logfile} =       Execute              ls -t "${logdir}" | grep om-audit | head -1
-                       Execute              ozone auditparser "${auditworkdir}/audit.db" load "${logdir}/${logfile}"
-    ${result} =        Execute              ozone auditparser "${auditworkdir}/audit.db" template top5cmds
-                       Should Contain       ${result}  ALLOCATE_KEY
-    ${result} =        Execute              ozone auditparser "${auditworkdir}/audit.db" template top5users
-    Run Keyword If     '${SECURITY_ENABLED}' == 'true'      Set username
-                       Should Contain       ${result}  ${user}
-    ${result} =        Execute              ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_VOLUME' and RESULT='SUCCESS'"
-    ${result} =        Convert To Number     ${result}
-                       Should be true       ${result}>${count}
-    ${result} =        Execute              ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_BUCKET' and RESULT='SUCCESS'"
-    ${result} =        Convert To Number     ${result}
-                       Should be true       ${result}>${count}
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
deleted file mode 100644
index edaee5e..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoketest ozone cluster startup
-Library             OperatingSystem
-Resource            ../commonlib.robot
-
-*** Variables ***
-${DATANODE_HOST}        datanode
-
-
-*** Test Cases ***
-
-Check webui static resources
-    Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit HTTP user
-    ${result} =        Execute                curl --negotiate -u : -s -I http://scm:9876/static/bootstrap-3.4.1/js/bootstrap.min.js
-                       Should contain         ${result}    200
-
-Start freon testing
-    ${result} =        Execute              ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 1
-                       Wait Until Keyword Succeeds      3min       10sec     Should contain   ${result}   Number of Keys added: 125
-                       Should Not Contain               ${result}  ERROR
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
deleted file mode 100644
index 689e4af..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
+++ /dev/null
@@ -1,138 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Test ozone shell CLI usage
-Library             OperatingSystem
-Resource            ../commonlib.robot
-Test Setup          Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit test user     testuser     testuser.keytab
-Test Timeout        2 minute
-Suite Setup         Generate prefix
-
-*** Variables ***
-${prefix}    generated
-
-*** Keywords ***
-Generate prefix
-   ${random} =         Generate Random String  5  [NUMBERS]
-   Set Suite Variable  ${prefix}  ${random}
-
-*** Test Cases ***
-RpcClient with port
-   Test ozone shell       o3://            om:9862     ${prefix}-rpcwoport
-
-RpcClient volume acls
-   Test Volume Acls       o3://            om:9862     ${prefix}-rpcwoport2
-
-RpcClient bucket acls
-    Test Bucket Acls      o3://            om:9862     ${prefix}-rpcwoport2
-
-RpcClient key acls
-    Test Key Acls         o3://            om:9862     ${prefix}-rpcwoport2
-
-RpcClient without host
-    Test ozone shell      o3://            ${EMPTY}    ${prefix}-rpcwport
-
-RpcClient without scheme
-    Test ozone shell      ${EMPTY}         ${EMPTY}    ${prefix}-rpcwoscheme
-
-
-*** Keywords ***
-Test ozone shell
-    [arguments]     ${protocol}         ${server}       ${volume}
-    ${result} =     Execute             ozone sh volume create ${protocol}${server}/${volume} --quota 100TB
-                    Should not contain  ${result}       Failed
-    ${result} =     Execute             ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}")'
-                    Should contain      ${result}       creationTime
-    ${result} =     Execute             ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '. | select(.name=="${volume}")'
-                    Should contain      ${result}       creationTime
-# TODO: Disable updating the owner, acls should be used to give access to other user.        
-                    Execute             ozone sh volume update ${protocol}${server}/${volume} --quota 10TB
-#    ${result} =     Execute             ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
-#                    Should Be Equal     ${result}       bill
-    ${result} =     Execute             ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}") | .quota'
-                    Should Be Equal     ${result}       10995116277760
-                    Execute             ozone sh bucket create ${protocol}${server}/${volume}/bb1
-    ${result} =     Execute             ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .storageType'
-                    Should Be Equal     ${result}       DISK
-    ${result} =     Execute             ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .volumeName'
-                    Should Be Equal     ${result}       ${volume}
-                    Run Keyword         Test key handling       ${protocol}       ${server}       ${volume}
-                    Execute             ozone sh bucket delete ${protocol}${server}/${volume}/bb1
-                    Execute             ozone sh volume delete ${protocol}${server}/${volume}
-
-Test Volume Acls
-    [arguments]     ${protocol}         ${server}       ${volume}
-    Execute         ozone sh volume create ${protocol}${server}/${volume}
-    ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh volume addacl ${protocol}${server}/${volume} -a user:superuser1:rwxy[DEFAULT]
-    ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" .
-    ${result} =     Execute             ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy
-    ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" .
-    ${result} =     Execute             ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
-    ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" .
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" .
-
-Test Bucket Acls
-    [arguments]     ${protocol}         ${server}       ${volume}
-    Execute             ozone sh bucket create ${protocol}${server}/${volume}/bb1
-    ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh bucket addacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:rwxy
-    ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    ${result} =     Execute             ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy
-    ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
-    ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" .
-
-
-Test key handling
-    [arguments]     ${protocol}         ${server}       ${volume}
-                    Execute             ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt
-                    Execute             rm -f NOTICE.txt.1
-                    Execute             ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1
-                    Execute             ls -l NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1")'
-                    Should contain      ${result}       creationTime
-    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1") | .name'
-                    Should Be Equal     ${result}       key1
-                    Execute             ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2
-    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.name'
-                    Should Be Equal     ${result}       key2
-                    Execute             ozone sh key delete ${protocol}${server}/${volume}/bb1/key2
-
-Test key Acls
-    [arguments]     ${protocol}         ${server}       ${volume}
-    Execute         ozone sh key put ${protocol}${server}/${volume}/bb1/key2 /opt/hadoop/NOTICE.txt
-    ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh key addacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy
-    ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    ${result} =     Execute             ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy
-    ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc
-    ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
deleted file mode 100644
index 88f6c4a..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Library             OperatingSystem
-Library             String
-Library             BuiltIn
-
-*** Variables ***
-${SECURITY_ENABLED}                 %{SECURITY_ENABLED}
-
-*** Keywords ***
-Execute
-    [arguments]                     ${command}
-    ${rc}                           ${output} =                 Run And Return Rc And Output           ${command}
-    Log                             ${output}
-    Should Be Equal As Integers     ${rc}                       0
-    [return]                        ${output}
-
-Execute And Ignore Error
-    [arguments]                     ${command}
-    ${rc}                           ${output} =                 Run And Return Rc And Output           ${command}
-    Log                             ${output}
-    [return]                        ${output}
-
-Execute and checkrc
-    [arguments]                     ${command}                  ${expected_error_code}
-    ${rc}                           ${output} =                 Run And Return Rc And Output           ${command}
-    Log                             ${output}
-    Should Be Equal As Integers     ${rc}                       ${expected_error_code}
-    [return]                        ${output}
-
-Compare files
-    [arguments]                 ${file1}                   ${file2}
-    ${checksumbefore} =         Execute                    md5sum ${file1} | awk '{print $1}'
-    ${checksumafter} =          Execute                    md5sum ${file2} | awk '{print $1}'
-                                Should Be Equal            ${checksumbefore}            ${checksumafter}
-
-Install aws cli
-    ${rc}              ${output} =                 Run And Return Rc And Output           which apt-get
-    Run Keyword if     '${rc}' == '0'              Install aws cli s3 debian
-    ${rc}              ${output} =                 Run And Return Rc And Output           yum --help
-    Run Keyword if     '${rc}' == '0'              Install aws cli s3 centos
-
-Kinit HTTP user
-    ${hostname} =       Execute                    hostname
-    Wait Until Keyword Succeeds      2min       10sec      Execute            kinit -k HTTP/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab
-
-Kinit test user
-    [arguments]                      ${user}       ${keytab}
-    ${hostname} =       Execute                    hostname
-    Set Suite Variable  ${TEST_USER}               ${user}/${hostname}@EXAMPLE.COM
-    Wait Until Keyword Succeeds      2min       10sec      Execute            kinit -k ${user}/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/${keytab}
diff --git a/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot b/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot
deleted file mode 100644
index da97001..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Create bucket and volume for any other testings
-Library             OperatingSystem
-Resource            commonlib.robot
-Test Timeout        2 minute
-
-
-*** Variables ***
-${volume}       vol1
-${bucket}       bucket1
-
-
-*** Keywords ***
-Create volume
-    ${result} =     Execute             ozone sh volume create /${volume} --user hadoop --quota 100TB
-                    Should not contain  ${result}       Failed
-Create bucket
-                    Execute             ozone sh bucket create /${volume}/${bucket}
-
-*** Test Cases ***
-Test ozone shell
-    ${result} =     Execute And Ignore Error             ozone sh bucket info /${volume}/${bucket}
-                    Run Keyword if      "VOLUME_NOT_FOUND" in """${result}"""       Create volume
-                    Run Keyword if      "VOLUME_NOT_FOUND" in """${result}"""       Create bucket
-                    Run Keyword if      "BUCKET_NOT_FOUND" in """${result}"""       Create bucket
-    ${result} =     Execute             ozone sh bucket info /${volume}/${bucket}
-                    Should not contain  ${result}  NOT_FOUND
diff --git a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot
deleted file mode 100644
index 2f93e6c..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Create directories required for MR test
-Library             OperatingSystem
-Resource            commonlib.robot
-Test Timeout        2 minute
-
-
-*** Variables ***
-${volume}       vol1
-${bucket}       bucket1
-
-
-*** Keywords ***
-Create volume
-    ${result} =     Execute             ozone sh volume create /${volume} --user hadoop --quota 100TB
-                    Should not contain  ${result}       Failed
-Create bucket
-                    Execute             ozone sh bucket create /${volume}/${bucket}
-
-*** Test Cases ***
-Create test volume, bucket and key
-    ${result} =     Execute And Ignore Error             ozone sh bucket info /${volume}/${bucket}
-                    Run Keyword if      "VOLUME_NOT_FOUND" in """${result}"""       Create volume
-                    Run Keyword if      "VOLUME_NOT_FOUND" in """${result}"""       Create bucket
-                    Run Keyword if      "BUCKET_NOT_FOUND" in """${result}"""       Create bucket
-    ${result} =     Execute             ozone sh bucket info /${volume}/${bucket}
-                    Should not contain  ${result}  NOT_FOUND
-                    Execute             ozone sh key put /vol1/bucket1/key1 LICENSE.txt
-
-Create user dir for hadoop
-         Execute        ozone fs -mkdir /user
-         Execute        ozone fs -mkdir /user/hadoop
diff --git a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot b/hadoop-ozone/dist/src/main/smoketest/env-compose.robot
deleted file mode 100644
index d529d7f..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       High level utilities to execute commands and tests in docker-compose based environments.
-Resource            commonlib.robot
-
-
-*** Keywords ***
-
-Run tests on host
-    [arguments]        ${host}       ${robotfile}
-    ${result} =        Execute       docker-compose exec ${host} robot smoketest/${robotfile}
-
-Execute on host
-    [arguments]                     ${host}     ${command}
-    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose exec ${host} ${command}
-    Log                             ${output}
-    Should Be Equal As Integers     ${rc}                       0
-    [return]                        ${output}
diff --git a/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot b/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot
deleted file mode 100644
index f4705eb..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot
+++ /dev/null
@@ -1,89 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoketest Ozone GDPR Feature
-Library             OperatingSystem
-Library             BuiltIn
-Library             String
-Resource            ../commonlib.robot
-Suite Setup         Generate volume
-
-*** Variables ***
-${volume}    generated
-
-*** Keywords ***
-Generate volume
-   ${random} =         Generate Random String  5  [LOWER]
-   Set Suite Variable  ${volume}  ${random}
-
-*** Test Cases ***
-Test GDPR disabled
-  Test GDPR(disabled) without explicit options      ${volume}
-
-Test GDPR --enforcegdpr=true
-  Test GDPR with --enforcegdpr=true                 ${volume}
-
-Test GDPR -g=true
-  Test GDPR with -g=true                            ${volume}
-
-Test GDPR -g=false
-  Test GDPR with -g=false                            ${volume}
-
-*** Keywords ***
-Test GDPR(disabled) without explicit options
-    [arguments]     ${volume}
-                    Execute             ozone sh volume create /${volume} --quota 100TB
-                    Execute             ozone sh bucket create /${volume}/mybucket1
-    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket1") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       null
-                    Execute             ozone sh key put /${volume}/mybucket1/mykey /opt/hadoop/NOTICE.txt
-                    Execute             rm -f NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info /${volume}/mybucket1/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       null
-                    Execute             ozone sh key delete /${volume}/mybucket1/mykey
-
-Test GDPR with --enforcegdpr=true
-    [arguments]     ${volume}
-                    Execute             ozone sh bucket create --enforcegdpr=true /${volume}/mybucket2
-    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket2 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket2") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       true
-                    Execute             ozone sh key put /${volume}/mybucket2/mykey /opt/hadoop/NOTICE.txt
-                    Execute             rm -f NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info /${volume}/mybucket2/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       true
-                    Execute             ozone sh key delete /${volume}/mybucket2/mykey
-
-Test GDPR with -g=true
-    [arguments]     ${volume}
-                    Execute             ozone sh bucket create -g=true /${volume}/mybucket3
-    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket3 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket3") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       true
-                    Execute             ozone sh key put /${volume}/mybucket3/mykey /opt/hadoop/NOTICE.txt
-                    Execute             rm -f NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info /${volume}/mybucket3/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       true
-                    Execute             ozone sh key delete /${volume}/mybucket3/mykey
-
-Test GDPR with -g=false
-    [arguments]     ${volume}
-                    Execute             ozone sh bucket create /${volume}/mybucket4
-    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket4 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket4") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       null
-                    Execute             ozone sh key put /${volume}/mybucket4/mykey /opt/hadoop/NOTICE.txt
-                    Execute             rm -f NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info /${volume}/mybucket4/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
-                    Should Be Equal     ${result}       null
-                    Execute             ozone sh key delete /${volume}/mybucket4/mykey
diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot b/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot
deleted file mode 100644
index 5d85555..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Kinit test user
-Library             OperatingSystem
-Resource            commonlib.robot
-Test Timeout        2 minute
-
-
-*** Test Cases ***
-Kinit
-   Kinit test user     hadoop     hadoop.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit.robot b/hadoop-ozone/dist/src/main/smoketest/kinit.robot
deleted file mode 100644
index c9c1b75..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/kinit.robot
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Kinit test user
-Library             OperatingSystem
-Resource            commonlib.robot
-Test Timeout        2 minute
-
-
-*** Variables ***
-${testuser}          testuser
-
-*** Test Cases ***
-Kinit
-   Kinit test user     ${testuser}     ${testuser}.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
deleted file mode 100644
index 789ec4f..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Execute MR jobs
-Library             OperatingSystem
-Resource            commonlib.robot
-Test Timeout        4 minute
-
-
-*** Variables ***
-${volume}          vol1
-${bucket}          bucket1
-${hadoop.version}  3.2.0
-
-
-*** Test cases ***
-Execute PI calculation
-                    ${output} =      Execute                 yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar pi 3 3
-                    Should Contain   ${output}               completed successfully
-
-Execute WordCount
-                    ${random}        Generate Random String  2   [NUMBERS]
-                    ${output} =      Execute                 yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1-${random}.count
-                    Should Contain   ${output}               completed successfully
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot
deleted file mode 100644
index 8d12a52..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Test ozone fs with hadoopfs
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-
-*** Variables ***
-${DATANODE_HOST}        datanode
-${PREFIX}               ozone
-
-*** Test cases ***
-
-Test hadoop dfs
-    ${random} =        Generate Random String  5  [NUMBERS]
-    ${result} =        Execute                    hdfs dfs -put /opt/hadoop/NOTICE.txt o3fs://bucket1.vol1/${PREFIX}-${random}
-    ${result} =        Execute                    hdfs dfs -ls o3fs://bucket1.vol1/
-                       Should contain             ${result}   ${PREFIX}-${random}
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
deleted file mode 100644
index f728691..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
+++ /dev/null
@@ -1,112 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Ozonefs test
-Library             OperatingSystem
-Resource            ../commonlib.robot
-
-*** Variables ***
-
-
-*** Test Cases ***
-Create volume and bucket
-    Execute             ozone sh volume create o3://om/fstest --quota 100TB
-    Execute             ozone sh volume create o3://om/fstest2 --quota 100TB
-    Execute             ozone sh bucket create o3://om/fstest/bucket1
-    Execute             ozone sh bucket create o3://om/fstest/bucket2
-    Execute             ozone sh bucket create o3://om/fstest2/bucket3
-
-Check volume from ozonefs
-    ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/
-
-Run ozoneFS tests
-                        Execute               ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain    ${result}         testdir/deep
-                        Execute               ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/testdir/deep/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain    ${result}         NOTICE.txt
-
-                        Execute               ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir/deep/PUTFILE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain    ${result}         PUTFILE.txt
-
-    ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/testdir/deep/
-                        Should contain    ${result}         NOTICE.txt
-                        Should contain    ${result}         PUTFILE.txt
-
-                        Execute               ozone fs -mv o3fs://bucket1.fstest/testdir/deep/NOTICE.txt o3fs://bucket1.fstest/testdir/deep/MOVED.TXT
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain    ${result}         MOVED.TXT
-                        Should not contain  ${result}       NOTICE.txt
-
-                        Execute               ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep/subdir1
-                        Execute               ozone fs -cp o3fs://bucket1.fstest/testdir/deep/MOVED.TXT o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain    ${result}         subdir1/NOTICE.txt
-
-    ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/testdir/deep/subdir1/
-                        Should contain    ${result}         NOTICE.txt
-
-                        Execute               ozone fs -cat o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
-                        Should not contain  ${result}       Failed
-
-                        Execute               ozone fs -rm o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should not contain  ${result}       NOTICE.txt
-
-    ${result} =         Execute               ozone fs -rmdir o3fs://bucket1.fstest/testdir/deep/subdir1/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should not contain  ${result}       subdir1
-
-                        Execute               ozone fs -touch o3fs://bucket1.fstest/testdir/TOUCHFILE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should contain  ${result}       TOUCHFILE.txt
-
-                        Execute               ozone fs -rm -r o3fs://bucket1.fstest/testdir/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
-                        Should not contain  ${result}       testdir
-
-                        Execute               rm -Rf localdir1
-                        Execute               mkdir localdir1
-                        Execute               cp NOTICE.txt localdir1/LOCAL.txt
-                        Execute               ozone fs -mkdir -p o3fs://bucket1.fstest/testdir1
-                        Execute               ozone fs -copyFromLocal localdir1 o3fs://bucket1.fstest/testdir1/
-                        Execute               ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir1/NOTICE.txt
-
-    ${result} =         Execute               ozone fs -ls -R o3fs://bucket1.fstest/testdir1/
-                        Should contain    ${result}         localdir1/LOCAL.txt
-                        Should contain    ${result}         testdir1/NOTICE.txt
-
-                        Execute               ozone fs -mkdir -p o3fs://bucket2.fstest/testdir2
-                        Execute               ozone fs -mkdir -p o3fs://bucket3.fstest2/testdir3
-
-                        Execute               ozone fs -cp o3fs://bucket1.fstest/testdir1/localdir1 o3fs://bucket2.fstest/testdir2/
-
-                        Execute               ozone fs -cp o3fs://bucket1.fstest/testdir1/localdir1 o3fs://bucket3.fstest2/testdir3/
-
-                        Execute               ozone sh key put o3://om/fstest/bucket1/KEY.txt NOTICE.txt
-    ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/KEY.txt
-                        Should contain    ${result}         KEY.txt
-    ${rc}  ${result} =  Run And Return Rc And Output        ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/KEY.txt
-                        Should Be Equal As Integers     ${rc}                1
-                        Should contain    ${result}         File exists
-                        Execute               rm -Rf GET.txt
-                        Execute               ozone fs -get o3fs://bucket1.fstest/KEY.txt GET.txt
-                        Execute               ls -l GET.txt
-    ${rc}  ${result} =  Run And Return Rc And Output        ozone fs -ls o3fs://abcde.pqrs/
-                        Should Be Equal As Integers     ${rc}                1
-                        Should Match Regexp    ${result}         (Check access operation failed)|(Volume pqrs is not found)
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
deleted file mode 100644
index 40e7df1..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ /dev/null
@@ -1,274 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Keywords ***
-Create Random file
-    [arguments]             ${size_in_megabytes}
-    Execute                 dd if=/dev/urandom of=/tmp/part1 bs=1048576 count=${size_in_megabytes}
-
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-Test Multipart Upload
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-# initiate again
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey
-    ${nextUploadID} =   Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-                        Should Not Be Equal     ${uploadID}  ${nextUploadID}
-
-# upload part
-# each part should be minimum 5mb, other wise during complete multipart
-# upload we get error entity too small. So, considering further complete
-# multipart upload, uploading each part as 5MB file, exception is for last part
-
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID}
-                        Should contain          ${result}    ETag
-# override part
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID}
-                        Should contain          ${result}    ETag
-
-
-Test Multipart Upload Complete
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey1
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-
-#upload parts
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID}
-    ${eTag1} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-                        Execute                 echo "Part2" > /tmp/part2
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID}
-    ${eTag2} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-#complete multipart upload
-    ${result} =         Execute AWSS3APICli     complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey1
-                        Should contain          ${result}    ETag
-
-#read file and check the key
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key multipartKey1 /tmp/multipartKey1.result
-                                Execute                    cat /tmp/part1 /tmp/part2 >> /tmp/multipartKey1
-    Compare files               /tmp/multipartKey1         /tmp/multipartKey1.result
-
-Test Multipart Upload Complete Entity too small
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey2
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-
-#upload parts
-                        Execute                 echo "Part1" > /tmp/part1
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey2 --part-number 1 --body /tmp/part1 --upload-id ${uploadID}
-    ${eTag1} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-                        Execute                 echo "Part2" > /tmp/part2
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey2 --part-number 2 --body /tmp/part2 --upload-id ${uploadID}
-    ${eTag2} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-#complete multipart upload
-    ${result} =         Execute AWSS3APICli and checkrc  complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey2 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'    255
-                        Should contain          ${result}    EntityTooSmall
-
-
-Test Multipart Upload Complete Invalid part
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey3
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-
-#upload parts
-                        Execute                 echo "Part1" > /tmp/part1
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey3 --part-number 1 --body /tmp/part1 --upload-id ${uploadID}
-    ${eTag1} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-                        Execute                 echo "Part2" > /tmp/part2
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey3 --part-number 2 --body /tmp/part2 --upload-id ${uploadID}
-    ${eTag2} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-#complete multipart upload
-    ${result} =         Execute AWSS3APICli and checkrc  complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]'    255
-                        Should contain          ${result}    InvalidPart
-
-Test abort Multipart upload
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey4 --storage-class REDUCED_REDUNDANCY
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-
-    ${result} =         Execute AWSS3APICli and checkrc    abort-multipart-upload --bucket ${BUCKET} --key multipartKey4 --upload-id ${uploadID}    0
-
-Test abort Multipart upload with invalid uploadId
-    ${result} =         Execute AWSS3APICli and checkrc    abort-multipart-upload --bucket ${BUCKET} --key multipartKey5 --upload-id "random"    255
-
-Upload part with Incorrect uploadID
-                        Execute                 echo "Multipart upload" > /tmp/testfile
-        ${result} =     Execute AWSS3APICli and checkrc     upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/testfile --upload-id "random"  255
-                        Should contain          ${result}    NoSuchUpload
-
-Test list parts
-#initiate multipart upload
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key multipartKey5
-    ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    multipartKey
-                        Should contain          ${result}    UploadId
-
-#upload parts
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey5 --part-number 1 --body /tmp/part1 --upload-id ${uploadID}
-    ${eTag1} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-                        Execute                 echo "Part2" > /tmp/part2
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} --key multipartKey5 --part-number 2 --body /tmp/part2 --upload-id ${uploadID}
-    ${eTag2} =          Execute and checkrc     echo '${result}' | jq -r '.ETag'   0
-                        Should contain          ${result}    ETag
-
-#list parts
-    ${result} =         Execute AWSS3APICli   list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID}
-    ${part1} =          Execute and checkrc    echo '${result}' | jq -r '.Parts[0].ETag'  0
-    ${part2} =          Execute and checkrc    echo '${result}' | jq -r '.Parts[1].ETag'  0
-                        Should Be equal       ${part1}    ${eTag1}
-                        Should contain        ${part2}    ${eTag2}
-                        Should contain        ${result}    STANDARD
-
-#list parts with max-items and next token
-    ${result} =         Execute AWSS3APICli   list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} --max-items 1
-    ${part1} =          Execute and checkrc    echo '${result}' | jq -r '.Parts[0].ETag'  0
-    ${token} =          Execute and checkrc    echo '${result}' | jq -r '.NextToken'  0
-                        Should Be equal       ${part1}    ${eTag1}
-                        Should contain        ${result}   STANDARD
-
-    ${result} =         Execute AWSS3APICli   list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} --max-items 1 --starting-token ${token}
-    ${part2} =          Execute and checkrc    echo '${result}' | jq -r '.Parts[0].ETag'  0
-                       Should Be equal       ${part2}    ${eTag2}
-                       Should contain        ${result}   STANDARD
-
-#finally abort it
-    ${result} =         Execute AWSS3APICli and checkrc    abort-multipart-upload --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID}    0
-
-Test Multipart Upload with the simplified aws s3 cp API
-                        Create Random file      22
-                        Execute AWSS3Cli        cp /tmp/part1 s3://${BUCKET}/mpyawscli
-                        Execute AWSS3Cli        cp s3://${BUCKET}/mpyawscli /tmp/part1.result
-                        Execute AWSS3Cli        rm s3://${BUCKET}/mpyawscli
-                        Compare files           /tmp/part1        /tmp/part1.result
-
-Test Multipart Upload Put With Copy
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     put-object --bucket ${BUCKET} --key copytest/source --body /tmp/part1
-
-
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key copytest/destination
-
-    ${uploadID} =       Execute and checkrc      echo '${result}' | jq -r '.UploadId'    0
-                        Should contain           ${result}    ${BUCKET}
-                        Should contain           ${result}    UploadId
-
-    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source
-                        Should contain           ${result}    ${BUCKET}
-                        Should contain           ${result}    ETag
-                        Should contain           ${result}    LastModified
-    ${eTag1} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
-
-
-                        Execute AWSS3APICli     complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]'
-                        Execute AWSS3APICli     get-object --bucket ${BUCKET} --key copytest/destination /tmp/part-result
-
-                        Compare files           /tmp/part1        /tmp/part-result
-
-Test Multipart Upload Put With Copy and range
-    Run Keyword         Create Random file      10
-    ${result} =         Execute AWSS3APICli     put-object --bucket ${BUCKET} --key copyrange/source --body /tmp/part1
-
-
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key copyrange/destination
-
-    ${uploadID} =       Execute and checkrc      echo '${result}' | jq -r '.UploadId'    0
-                        Should contain           ${result}    ${BUCKET}
-                        Should contain           ${result}    UploadId
-
-    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758
-                        Should contain           ${result}    ${BUCKET}
-                        Should contain           ${result}    ETag
-                        Should contain           ${result}    LastModified
-    ${eTag1} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
-
-    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760
-                        Should contain           ${result}    ${BUCKET}
-                        Should contain           ${result}    ETag
-                        Should contain           ${result}    LastModified
-    ${eTag2} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
-
-
-                        Execute AWSS3APICli     complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
-                        Execute AWSS3APICli     get-object --bucket ${BUCKET} --key copyrange/destination /tmp/part-result
-
-                        Compare files           /tmp/part1        /tmp/part-result
-
-Test Multipart Upload list
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key listtest/key1
-    ${uploadID1} =      Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    listtest/key1
-                        Should contain          ${result}    UploadId
-
-    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key listtest/key2
-    ${uploadID2} =      Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    listtest/key2
-                        Should contain          ${result}    UploadId
-
-    ${result} =         Execute AWSS3APICli     list-multipart-uploads --bucket ${BUCKET} --prefix listtest
-                        Should contain          ${result}    ${uploadID1}
-                        Should contain          ${result}    ${uploadID2}
-
-    ${count} =          Execute and checkrc      echo '${result}' | jq -r '.Uploads | length'  0
-                        Should Be Equal          ${count}     2
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/README.md b/hadoop-ozone/dist/src/main/smoketest/s3/README.md
deleted file mode 100644
index 70ccda7..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-## Ozone S3 Gatway Acceptance Tests
-
-Note: the aws cli based acceptance tests can be cross-checked with the original AWS s3 endpoint.
-
-You need to
-
-  1. Create a bucket
-  2. Configure your local aws cli
-  3. Set bucket/endpointurl during the robot test execution
-
-```
-robot -v bucket:ozonetest -v OZONE_TEST:false -v OZONE_S3_SET_CREDENTIALS:false -v ENDPOINT_URL:https://s3.us-east-2.amazonaws.com smoketest/s3
-```
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot b/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot
deleted file mode 100644
index f1bbea9..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            ./commonawslib.robot
-Test Setup          Setup s3 tests
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
deleted file mode 100644
index 8762d5d..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            ./commonawslib.robot
-Suite Setup         Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-File upload and directory list
-                        Execute                   date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli          cp /tmp/testfile s3://${BUCKET}
-                        Should contain            ${result}         upload
-    ${result} =         Execute AWSS3Cli          cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file
-                        Should contain            ${result}         upload
-    ${result} =         Execute AWSS3Cli          ls s3://${BUCKET}
-                        Should contain            ${result}         testfile
-                        Should contain            ${result}         dir1
-                        Should not contain        ${result}         dir2
-    ${result} =         Execute AWSS3Cli          ls s3://${BUCKET}/dir1/
-                        Should not contain        ${result}         testfile
-                        Should not contain        ${result}         dir1
-                        Should contain            ${result}         dir2
-    ${result} =         Execute AWSS3Cli          ls s3://${BUCKET}/dir1/dir2/file
-                        Should not contain        ${result}         testfile
-                        Should not contain        ${result}         dir1
-                        Should contain            ${result}         file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot
deleted file mode 100644
index 4d85992..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-Create bucket which already exists
-# Bucket already is created in Test Setup.
-    ${result} =         Execute AWSS3APICli         create-bucket --bucket ${BUCKET}
-                        Should contain              ${result}         ${BUCKET}
-                        Should contain              ${result}         Location
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot
deleted file mode 100644
index 2ce5002..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-Head Bucket not existent
-    ${result} =         Execute AWSS3APICli     head-bucket --bucket ${BUCKET}
-    ${result} =         Execute AWSS3APICli and checkrc      head-bucket --bucket ozonenosuchbucketqqweqwe  255
-                        Should contain          ${result}    Bad Request
-                        Should contain          ${result}    400
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot
deleted file mode 100644
index 4fe9b65..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-List buckets
-    ${result} =         Execute AWSS3APICli     list-buckets | jq -r '.Buckets[].Name'
-                        Should contain          ${result}    ${BUCKET}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
deleted file mode 100644
index 1335635..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ /dev/null
@@ -1,81 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Resource            ../commonlib.robot
-Resource            ../commonlib.robot
-
-*** Variables ***
-${OZONE_S3_HEADER_VERSION}     v4
-${OZONE_S3_SET_CREDENTIALS}    true
-${BUCKET}                      bucket-999
-
-*** Keywords ***
-Execute AWSS3APICli
-    [Arguments]       ${command}
-    ${output} =       Execute                    aws s3api --endpoint-url ${ENDPOINT_URL} ${command}
-    [return]          ${output}
-
-Execute AWSS3APICli and checkrc
-    [Arguments]       ${command}                 ${expected_error_code}
-    ${output} =       Execute and checkrc        aws s3api --endpoint-url ${ENDPOINT_URL} ${command}  ${expected_error_code}
-    [return]          ${output}
-
-Execute AWSS3Cli
-    [Arguments]       ${command}
-    ${output} =       Execute                     aws s3 --endpoint-url ${ENDPOINT_URL} ${command}
-    [return]          ${output}
-
-Install aws cli s3 centos
-    Execute            sudo yum install -y awscli
-
-Install aws cli s3 debian
-    Execute            sudo apt-get install -y awscli
-
-Setup v2 headers
-                        Set Environment Variable   AWS_ACCESS_KEY_ID       ANYID
-                        Set Environment Variable   AWS_SECRET_ACCESS_KEY   ANYKEY
-
-Setup v4 headers
-    Run Keyword if      '${SECURITY_ENABLED}' == 'true'     Kinit test user    testuser    testuser.keytab
-    ${result} =         Execute                    ozone s3 getsecret
-    ${accessKey} =      Get Regexp Matches         ${result}     (?<=awsAccessKey=).*
-    ${accessKey} =      Get Variable Value         ${accessKey}  sdsdasaasdasd
-    ${secret} =         Get Regexp Matches         ${result}     (?<=awsSecret=).*
-
-    ${len}=             Get Length  ${accessKey}
-    ${accessKey}=       Set Variable If   ${len} > 0  ${accessKey[0]}    kljdfslff
-    ${len}=             Get Length  ${secret}
-    ${secret}=          Set Variable If    ${len} > 0  ${secret[0]}      dhafldhlf
-                        Execute                    aws configure set default.s3.signature_version s3v4
-                        Execute                    aws configure set aws_access_key_id ${accessKey}
-                        Execute                    aws configure set aws_secret_access_key ${secret}
-                        Execute                    aws configure set region us-west-1
-
-Setup incorrect credentials for S3
-                        Execute                    aws configure set default.s3.signature_version s3v4
-                        Execute                    aws configure set aws_access_key_id dlfknslnfslf
-                        Execute                    aws configure set aws_secret_access_key dlfknslnfslf
-                        Execute                    aws configure set region us-west-1
-
-Create bucket
-    ${postfix} =         Generate Random String  5  [NUMBERS]
-    Set Suite Variable   ${BUCKET}                  bucket-${postfix}
-    Execute AWSS3APICli  create-bucket --bucket ${BUCKET}
-
-Setup s3 tests
-    Run Keyword        Install aws cli
-    Run Keyword if    '${OZONE_S3_SET_CREDENTIALS}' == 'true'    Setup v4 headers
-    Run Keyword if    '${BUCKET}' == 'generated'                 Create bucket
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
deleted file mode 100644
index c6b568c..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-${DESTBUCKET}         generated1
-
-
-*** Keywords ***
-Create Dest Bucket
-
-    ${postfix} =         Generate Random String  5  [NUMBERS]
-    Set Suite Variable   ${DESTBUCKET}             destbucket-${postfix}
-    Execute AWSS3APICli  create-bucket --bucket ${DESTBUCKET}
-
-
-*** Test Cases ***
-Copy Object Happy Scenario
-    Run Keyword if    '${DESTBUCKET}' == 'generated1'    Create Dest Bucket
-                        Execute                    date > /tmp/copyfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix copyobject/
-                        Should contain             ${result}         f1
-
-    ${result} =         Execute AWSS3ApiCli        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${DESTBUCKET} --prefix copyobject/
-                        Should contain             ${result}         f1
-    #copying again will not throw error
-    ${result} =         Execute AWSS3ApiCli        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${DESTBUCKET} --prefix copyobject/
-                        Should contain             ${result}         f1
-
-Copy Object Where Bucket is not available
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1      255
-                        Should contain             ${result}        NoSuchBucket
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1  255
-                        Should contain             ${result}        NoSuchBucket
-
-Copy Object Where both source and dest are same with change to storageclass
-     ${result} =         Execute AWSS3APICli        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${DESTBUCKET}/copyobject/f1
-                         Should contain             ${result}        ETag
-
-Copy Object Where Key not available
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey       255
-                        Should contain             ${result}        NoSuchKey
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
deleted file mode 100644
index 9e57d50..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-Delete file with s3api
-                        Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix deletetestapi/
-                        Should contain             ${result}         f1
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapi/f1
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix deletetestapi/
-                        Should not contain         ${result}         f1
-#In case of HTTP 500, the error code is printed out to the console.
-                        Should not contain         ${result}         500
-
-Delete file with s3api, file doesn't exist
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/
-                        Should not contain         ${result}         thereisnosuchfile
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key thereisnosuchfile
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/
-                        Should not contain         ${result}         thereisnosuchfile
-
-Delete dir with s3api
-                        Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli           cp /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapidir/
-                        Should contain             ${result}         f1
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapidir/
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapidir/
-                        Should contain             ${result}         f1
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapidir/f1
-
-
-Delete file with s3api, file doesn't exist, prefix of a real file
-                        Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli           cp /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapiprefix/
-                        Should contain             ${result}         filefile
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapiprefix/file
-    ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapiprefix/
-                        Should contain             ${result}         filefile
-    ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapiprefix/filefile
-
-
-
-Delete file with s3api, bucket doesn't exist
-    ${result} =         Execute AWSS3APICli and checkrc   delete-object --bucket ${BUCKET}-nosuchbucket --key f1      255
-                        Should contain                    ${result}         NoSuchBucket
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
deleted file mode 100644
index 542ef99..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-Delete file with multi delete
-                        Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix multidelete/
-                        Should contain             ${result}         multidelete/f1
-                        Should contain             ${result}         multidelete/f2
-                        Should contain             ${result}         multidelete/f3
-                        Should contain             ${result}         STANDARD
-                        Should not contain         ${result}         REDUCED_REDUNDANCY
-    ${result} =         Execute AWSS3APICli        delete-objects --bucket ${BUCKET} --delete 'Objects=[{Key=multidelete/f1},{Key=multidelete/f2},{Key=multidelete/f4}]'
-                        Should not contain         ${result}         Error
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix multidelete/
-                        Should not contain         ${result}         multidelete/f1
-                        Should not contain         ${result}         multidelete/f2
-                        Should contain             ${result}         multidelete/f3
-                        Should contain             ${result}         STANDARD
-                        Should not contain         ${result}         REDUCED_REDUNDANCY
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
deleted file mode 100644
index 1b2a504..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
+++ /dev/null
@@ -1,154 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway test with aws cli
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            commonawslib.robot
-Test Setup          Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${OZONE_TEST}         true
-${BUCKET}             generated
-
-*** Test Cases ***
-
-Put object to s3
-                        Execute                    echo "Randomtext" > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix putobject/
-                        Should contain             ${result}         f1
-
-                        Execute                    touch -f /tmp/zerobyte
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key putobject/zerobyte --body /tmp/zerobyte
-    ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix putobject/
-                        Should contain             ${result}         zerobyte
-
-#This test depends on the previous test case. Can't be executes alone
-Get object from s3
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 /tmp/testfile.result
-    Compare files               /tmp/testfile              /tmp/testfile.result
-
-Get Partial object from s3 with both start and endoffset
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-4 /tmp/testfile1.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 0-4/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=0 bs=1 count=5 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile1.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=2-4 /tmp/testfile1.result1
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 2-4/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=2 bs=1 count=3 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile1.result1
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-# end offset greater than file size and start with in file length
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=2-1000 /tmp/testfile1.result2
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 2-10/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=2 bs=1 count=9 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile1.result2
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Get Partial object from s3 with both start and endoffset(start offset and endoffset is greater than file size)
-    ${result} =                 Execute AWSS3APICli and checkrc        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=10000-10000 /tmp/testfile2.result   255
-                                Should contain             ${result}        InvalidRange
-
-
-Get Partial object from s3 with both start and endoffset(end offset is greater than file size)
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-10000 /tmp/testfile2.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 0-10/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    cat /tmp/testfile
-    ${actualData} =             Execute                    cat /tmp/testfile2.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Get Partial object from s3 with only start offset
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0- /tmp/testfile3.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 0-10/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    cat /tmp/testfile
-    ${actualData} =             Execute                    cat /tmp/testfile3.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Get Partial object from s3 with both start and endoffset which are equal
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-0 /tmp/testfile4.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 0-0/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=0 bs=1 count=1 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile4.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=4-4 /tmp/testfile5.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 4-4/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=4 bs=1 count=1 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile5.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Get Partial object from s3 to get last n bytes
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-4 /tmp/testfile6.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 7-10/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    dd if=/tmp/testfile skip=7 bs=1 count=4 2>/dev/null
-    ${actualData} =             Execute                    cat /tmp/testfile6.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-# if end is greater than file length, returns whole file
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-10000 /tmp/testfile7.result
-                                Should contain             ${result}        ContentRange
-                                Should contain             ${result}        bytes 0-10/11
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    cat /tmp/testfile
-    ${actualData} =             Execute                    cat /tmp/testfile7.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Incorrect values for end and start offset
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-11-10000 /tmp/testfile8.result
-                                Should not contain         ${result}        ContentRange
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    cat /tmp/testfile
-    ${actualData} =             Execute                    cat /tmp/testfile8.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-    ${result} =                 Execute AWSS3ApiCli        get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=11-8 /tmp/testfile9.result
-                                Should not contain         ${result}        ContentRange
-                                Should contain             ${result}        AcceptRanges
-    ${expectedData} =           Execute                    cat /tmp/testfile
-    ${actualData} =             Execute                    cat /tmp/testfile8.result
-                                Should Be Equal            ${expectedData}            ${actualData}
-
-Zero byte file
-    ${result} =                 Execute AWSS3APICli and checkrc        get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-0 /tmp/testfile2.result   255
-                                Should contain             ${result}        InvalidRange
-
-    ${result} =                 Execute AWSS3APICli and checkrc        get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-1 /tmp/testfile2.result   255
-                                Should contain             ${result}        InvalidRange
-
-    ${result} =                 Execute AWSS3APICli and checkrc        get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-10000 /tmp/testfile2.result   255
-                                Should contain             ${result}        InvalidRange
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot
deleted file mode 100644
index 74ba4e7..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       S3 gateway web ui test
-Library             OperatingSystem
-Library             String
-Resource            ../commonlib.robot
-Resource            ./commonawslib.robot
-Suite Setup         Setup s3 tests
-
-*** Variables ***
-${ENDPOINT_URL}       http://s3g:9878
-${BUCKET}             generated
-
-*** Test Cases ***
-
-S3 Gateway Web UI
-    Run Keyword if      '${SECURITY_ENABLED}' == 'true'     Kinit HTTP user
-    ${result} =         Execute                             curl --negotiate -u : -v ${ENDPOINT_URL}
-                        Should contain      ${result}       HTTP/1.1 307 Temporary Redirect
-    ${result} =         Execute                             curl --negotiate -u : -v ${ENDPOINT_URL}/static/index.html
-                        Should contain      ${result}       Apache Hadoop Ozone S3
diff --git a/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot
deleted file mode 100644
index 6a6f0b0..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoketest ozone cluster startup
-Library             OperatingSystem
-Library             BuiltIn
-Resource            ../commonlib.robot
-
-*** Variables ***
-
-
-*** Test Cases ***
-Run list pipeline
-    ${output} =         Execute          ozone scmcli pipeline list
-                        Should contain   ${output}   Type:RATIS, Factor:ONE, State:OPEN
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
deleted file mode 100644
index ee4688c..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
+++ /dev/null
@@ -1,131 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoke test to start cluster with docker-compose environments.
-Library             OperatingSystem
-Library             String
-Library             BuiltIn
-Resource            ../commonlib.robot
-
-*** Variables ***
-${ENDPOINT_URL}    http://s3g:9878
-
-*** Keywords ***
-Setup volume names
-    ${random}            Generate Random String  2   [NUMBERS]
-    Set Suite Variable   ${volume1}            fstest${random}
-    Set Suite Variable   ${volume2}            fstest2${random}
-    Set Suite Variable   ${volume3}            fstest3${random}
-
-*** Test Cases ***
-Create volume bucket with wrong credentials
-    Execute             kdestroy
-    ${rc}               ${output} =          Run And Return Rc And Output       ozone sh volume create o3://om/fstest
-    Should contain      ${output}       Client cannot authenticate via
-
-Create volume bucket with credentials
-                        # Authenticate testuser
-    Run Keyword         Kinit test user     testuser     testuser.keytab
-    Run Keyword         Setup volume names
-    Execute             ozone sh volume create o3://om/${volume1} 
-    Execute             ozone sh volume create o3://om/${volume2}
-    Execute             ozone sh bucket create o3://om/${volume1}/bucket1
-    Execute             ozone sh bucket create o3://om/${volume1}/bucket2
-    Execute             ozone sh bucket create o3://om/${volume2}/bucket3
-
-Check volume from ozonefs
-    ${result} =         Execute          ozone fs -ls o3fs://bucket1.${volume1}/
-
-Test Volume Acls
-    ${result} =     Execute             ozone sh volume create ${volume3}
-                    Should not contain  ${result}       Failed
-    ${result} =     Execute             ozone sh volume getacl ${volume3}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh volume addacl ${volume3} -a user:superuser1:rwxy[DEFAULT]
-    ${result} =     Execute             ozone sh volume getacl ${volume3}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    ${result} =     Execute             ozone sh volume removeacl ${volume3} -a user:superuser1:xy
-    ${result} =     Execute             ozone sh volume getacl ${volume3}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh volume setacl ${volume3} -al user:superuser1:rwxy,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
-    ${result} =     Execute             ozone sh volume getacl ${volume3}
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\"
-
-Test Bucket Acls
-    ${result} =     Execute             ozone sh bucket create ${volume3}/bk1
-                    Should not contain  ${result}       Failed
-    ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh bucket addacl ${volume3}/bk1 -a user:superuser1:rwxy
-    ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    ${result} =     Execute             ozone sh bucket removeacl ${volume3}/bk1 -a user:superuser1:xy
-    ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh bucket setacl ${volume3}/bk1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
-    ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\"
-
-Test key Acls
-    Execute            ozone sh key put ${volume3}/bk1/key1 /opt/hadoop/NOTICE.txt
-    ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
-    ${result} =     Execute             ozone sh key addacl ${volume3}/bk1/key1 -a user:superuser1:rwxy
-    ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    ${result} =     Execute             ozone sh key removeacl ${volume3}/bk1/key1 -a user:superuser1:xy
-    ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh key setacl ${volume3}/bk1/key1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc
-    ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
-    Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-    Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\"
-
-Test native authorizer
-    Execute         ozone sh volume removeacl ${volume3} -a group:root:a
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser2    testuser2.keytab
-    ${result} =     Execute And Ignore Error         ozone sh bucket list /${volume3}/    
-                    Should contain      ${result}    PERMISSION_DENIED
-    ${result} =     Execute And Ignore Error         ozone sh key list /${volume3}/bk1      
-                    Should contain      ${result}    PERMISSION_DENIED
-    ${result} =     Execute And Ignore Error         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xy
-                    Should contain      ${result}    PERMISSION_DENIED User testuser2/scm@EXAMPLE.COM doesn't have WRITE_ACL permission to access volume
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser     testuser.keytab
-    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xyrw
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser2    testuser2.keytab
-    ${result} =     Execute And Ignore Error         ozone sh bucket list /${volume3}/
-                    Should contain      ${result}    PERMISSION_DENIED org.apache.hadoop.ozone.om.exceptions.OMException: User testuser2/scm@EXAMPLE.COM doesn't have LIST permission to access volume
-    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:l
-    Execute         ozone sh bucket list /${volume3}/
-    Execute         ozone sh volume getacl /${volume3}/
-    
-    ${result} =     Execute And Ignore Error         ozone sh key list /${volume3}/bk1  
-    Should contain      ${result}    PERMISSION_DENIED
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser     testuser.keytab
-    Execute         ozone sh bucket addacl ${volume3}/bk1 -a user:testuser2/scm@EXAMPLE.COM:a
-    Execute         ozone sh bucket getacl /${volume3}/bk1
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser2    testuser2.keytab
-    Execute         ozone sh bucket getacl /${volume3}/bk1
-    Execute         ozone sh key list /${volume3}/bk1
-    Execute         kdestroy
-    Run Keyword     Kinit test user     testuser    testuser.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot
deleted file mode 100644
index 90166fe..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoke test to start cluster with docker-compose environments.
-Library             OperatingSystem
-Library             String
-Library             BuiltIn
-Resource            ../commonlib.robot
-Resource            ../s3/commonawslib.robot
-
-*** Variables ***
-${ENDPOINT_URL}     http://s3g:9878
-
-*** Keywords ***
-Setup volume names
-    ${random}            Generate Random String  2   [NUMBERS]
-    Set Suite Variable   ${volume1}            fstest${random}
-    Set Suite Variable   ${volume2}            fstest2${random}
-
-*** Test Cases ***
-Secure S3 test Success
-    Run Keyword         Setup s3 tests
-    ${output} =         Execute          aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123
-    ${output} =         Execute          aws s3api --endpoint-url ${ENDPOINT_URL} list-buckets
-                        Should contain   ${output}         bucket-test123
-
-Secure S3 test Failure
-    Run Keyword         Setup incorrect credentials for S3
-    ${rc}  ${result} =  Run And Return Rc And Output  aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123
-    Should Be True	${rc} > 0
-
diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh
deleted file mode 100755
index e0a26b0..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/test.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-RESULT_DIR=result
-#delete previous results
-rm -rf "${DIR:?}/$RESULT_DIR"
-
-REPLACEMENT="$DIR/../compose/test-all.sh"
-echo "THIS SCRIPT IS DEPRECATED. Please use $REPLACEMENT instead."
-
-${REPLACEMENT}
-RESULT=$?
-cp -r "$DIR/../compose/result" "$DIR"
-exit $RESULT
diff --git a/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot b/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot
deleted file mode 100644
index 823981d..0000000
--- a/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       Smoketest ozone cluster startup
-Library             OperatingSystem
-Library             BuiltIn
-Resource            ../commonlib.robot
-
-*** Variables ***
-
-
-*** Test Cases ***
-Run printTopology
-    ${output} =         Execute          ozone scmcli printTopology
-                        Should contain   ${output}         10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net)    /rack2
-Run printTopology -o
-    ${output} =         Execute          ozone scmcli printTopology -o
-                        Should contain   ${output}         Location: /rack2
-                        Should contain   ${output}         10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net)
diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml
deleted file mode 100644
index 3b29480c..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone-fault-injection-test</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-network-tests</artifactId>
-  <description>Apache Hadoop Ozone Network Tests</description>
-  <name>Apache Hadoop Ozone Network Tests</name>
-  <packaging>jar</packaging>
-
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-resources-plugin</artifactId>
-        <version>3.1.0</version>
-        <executions>
-          <execution>
-            <id>copy-resources</id>
-            <phase>process-resources</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${project.build.directory}</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>src/test/compose</directory>
-                  <filtering>true</filtering>
-                  <includes>
-                    <include>docker-compose.yaml</include>
-                    <include>docker-config</include>
-                  </includes>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>it</id>
-      <properties>
-        <ozone.home>${basedir}../../dist/target/ozone-${project.version}</ozone.home>
-      </properties>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>exec-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <phase>integration-test</phase>
-                <goals>
-                  <goal>exec</goal>
-                </goals>
-                <configuration>
-                  <executable>python</executable>
-                  <arguments>
-                    <argument>-m</argument>
-                    <argument>pytest</argument>
-                    <argument>-s</argument>
-                    <argument>${basedir}/src/test/blockade/</argument>
-                  </arguments>
-                  <environmentVariables>
-                    <OZONE_HOME>
-                      ${ozone.home}
-                    </OZONE_HOME>
-                    <MAVEN_TEST>
-                      ${project.build.directory}
-                    </MAVEN_TEST>
-                  </environmentVariables>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-
-</project>
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md
deleted file mode 100644
index 7fb62b3..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-## Blockade Tests
-Following python packages need to be installed before running the tests :
-
-1. blockade
-2. pytest==3.2.0
-
-Running test as part of the maven build:
-
-```
-mvn clean verify -Pit
-```
-
-Running test as part of the released binary:
-
-You can execute all blockade tests with following command:
-
-```
-cd $OZONE_HOME
-python -m pytest tests/blockade
-```
-
-You can also execute specific blockade tests with following command:
-
-```
-cd $OZONE_HOME
-python -m pytest tests/blockade/< PATH TO PYTHON FILE >
-e.g: python -m pytest tests/blockade/test_blockade_datanode_isolation.py
-```
\ No newline at end of file
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py
deleted file mode 100644
index 582c4cc..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import os
-import time
-import subprocess
-import pytest
-
-EPOCH_TIME = int(time.time())
-
-
-def pytest_addoption(parser):
-  parser.addoption("--output-dir",
-                   action="store",
-                   default="/tmp/BlockadeTests",
-                   help="location of output directory where output log "
-                        "and plot files will be created")
-  parser.addoption("--log-format",
-                   action="store",
-                   default="%(asctime)s|%(levelname)s|%(threadName)s|"
-                           "%(filename)s:%(lineno)s -"
-                           " %(funcName)s()|%(message)s",
-                   help="specify log format")
-  parser.addoption("--log-level",
-                   action="store",
-                   default="info",
-                   help="specify log level")
-  parser.addoption("--containerStatusSleep",
-                   action="store",
-                   default="900",
-                   help="sleep time before checking container status")
-  parser.addoption("--runSecondPhase",
-                   action="store",
-                   default="false",
-                   help="run second phase of the tests")
-
-
-@pytest.fixture
-def run_second_phase(request):
-  """
-  :param request:
-  This function returns if the user has opted for running second phase
-  of the tests.
-  """
-  return request.config.getoption("--runSecondPhase")
-
-
-def pytest_configure(config):
-  global OUTPUT_DIR
-  os.environ["CONTAINER_STATUS_SLEEP"] = config.option.containerStatusSleep
-  OUTPUT_DIR = "%s/%s" % (config.option.output_dir, EPOCH_TIME)
-  try:
-    os.makedirs(OUTPUT_DIR)
-  except OSError, e:
-    raise Exception(e.strerror + ": " + e.filename)
-  log_file = os.path.join(OUTPUT_DIR, "output.log")
-
-  if config.option.log_level == "trace":
-    loglevel = eval("logging.DEBUG")
-  else:
-    loglevel = eval("logging." + config.option.log_level.upper())
-  logformatter = logging.Formatter(config.option.log_format)
-  logging.basicConfig(filename=log_file,
-                      filemode='w',
-                      level=loglevel,
-                      format=config.option.log_format)
-  console = logging.StreamHandler()
-  console.setLevel(loglevel)
-  console.setFormatter(logformatter)
-  logging.getLogger('').addHandler(console)
-
-
-def pytest_report_teststatus(report):
-  logger = logging.getLogger('main')
-  loc, line, name = report.location
-  if report.outcome == 'skipped':
-    pass
-  elif report.when == 'setup':
-    logger.info("RUNNING TEST \"%s\" at location \"%s\" at line number"
-                " \"%s\"" % (name, loc, str(line)))
-  elif report.when == 'call':
-    logger.info("TEST \"%s\" %s in %3.2f seconds" %
-                (name, report.outcome.upper(), report.duration))
-    log_file_path = "%s/%s_all_docker.log" % \
-                    (OUTPUT_DIR, name)
-    gather_docker_logs(log_file_path)
-
-
-def pytest_sessionfinish(session):
-  logger = logging.getLogger('main')
-  logger.info("ALL TESTS FINISHED")
-  logger.info("ALL logs present in following directory: %s", OUTPUT_DIR)
-
-
-def gather_docker_logs(log_file_path):
-  docker_compose_file = os.environ["DOCKER_COMPOSE_FILE"]
-  output = subprocess.check_output(["docker-compose", "-f",
-                                    docker_compose_file, "logs"])
-  with open(log_file_path, "w") as text_file:
-    text_file.write(output)
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py
deleted file mode 100644
index 13878a1..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
\ No newline at end of file
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py
deleted file mode 100644
index 7e32f09..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module has apis to create and remove a blockade cluster"""
-
-from subprocess import call
-import logging
-import util
-
-logger = logging.getLogger(__name__)
-
-
-class Blockade(object):
-
-    @classmethod
-    def blockade_destroy(cls):
-        logger.info("Running blockade destroy")
-        call(["blockade", "destroy"])
-
-    @classmethod
-    def blockade_up(cls):
-        logger.info("Running blockade up")
-        call(["blockade", "up"])
-
-    @classmethod
-    def blockade_status(cls):
-        logger.info("Running blockade status")
-        return call(["blockade", "status"])
-
-    @classmethod
-    def make_flaky(cls, flaky_node):
-        logger.info("flaky node: %s", flaky_node)
-        output = call(["blockade", "flaky", flaky_node])
-        assert output == 0, "flaky command failed with exit code=[%s]" % output
-
-    @classmethod
-    def blockade_fast_all(cls):
-        output = call(["blockade", "fast", "--all"])
-        assert output == 0, "fast command failed with exit code=[%s]" % output
-
-    @classmethod
-    def blockade_create_partition(cls, *args):
-        nodes = ""
-        for node_list in args:
-            nodes = nodes + ','.join(node_list) + " "
-        exit_code, output = \
-            util.run_command("blockade partition %s" % nodes)
-        assert exit_code == 0, \
-            "blockade partition command failed with exit code=[%s]" % output
-
-    @classmethod
-    def blockade_join(cls):
-        exit_code = call(["blockade", "join"])
-        assert exit_code == 0, "blockade join command failed with exit code=[%s]" \
-                               % exit_code
-
-    @classmethod
-    def blockade_stop(cls, node, all_nodes=False):
-        if all_nodes:
-            output = call(["blockade", "stop", "--all"])
-        else:
-            output = call(["blockade", "stop", node])
-        assert output == 0, "blockade stop command failed with exit code=[%s]" \
-                            % output
-
-    @classmethod
-    def blockade_start(cls, node, all_nodes=False):
-        if all_nodes:
-            output = call(["blockade", "start", "--all"])
-        else:
-            output = call(["blockade", "start", node])
-        assert output == 0, "blockade start command failed with " \
-                            "exit code=[%s]" % output
-
-    @classmethod
-    def blockade_add(cls, node):
-        output = call(["blockade", "add", node])
-        assert output == 0, "blockade add command failed"
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py
deleted file mode 100644
index 9d40cf4..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from ozone import util
-from ozone.cluster import Command
-
-
-class OzoneClient:
-
-    __logger__ = logging.getLogger(__name__)
-
-    def __init__(self, cluster):
-        self.cluster = cluster
-        pass
-
-    def create_volume(self, volume_name):
-        OzoneClient.__logger__.info("Creating Volume %s" % volume_name)
-        command = [Command.ozone, "sh volume create /%s --user root" % volume_name]
-        util.run_docker_command(command, self.cluster.client)
-
-    def create_bucket(self, volume_name, bucket_name):
-        OzoneClient.__logger__.info("Creating Bucket %s in Volume %s" % (bucket_name, volume_name))
-        command = [Command.ozone, "sh bucket create /%s/%s" % (volume_name, bucket_name)]
-        util.run_docker_command(command, self.cluster.client)
-
-    def put_key(self, source_file, volume_name, bucket_name, key_name, replication_factor=None):
-        OzoneClient.__logger__.info("Creating Key %s in %s/%s" % (key_name, volume_name, bucket_name))
-        exit_code, output = util.run_docker_command(
-            "ls %s" % source_file, self.cluster.client)
-        assert exit_code == 0, "%s does not exist" % source_file
-        command = [Command.ozone, "sh key put /%s/%s/%s %s" %
-                   (volume_name, bucket_name, key_name, source_file)]
-        if replication_factor:
-            command.append("--replication=%s" % replication_factor)
-
-        exit_code, output = util.run_docker_command(command, self.cluster.client)
-        assert exit_code == 0, "Ozone put Key failed with output=[%s]" % output
-
-    def get_key(self, volume_name, bucket_name, key_name, file_path='.'):
-        OzoneClient.__logger__.info("Reading key %s from %s/%s" % (key_name, volume_name, bucket_name))
-        command = [Command.ozone, "sh key get /%s/%s/%s %s" %
-                   (volume_name, bucket_name, key_name, file_path)]
-        exit_code, output = util.run_docker_command(command, self.cluster.client)
-        assert exit_code == 0, "Ozone get Key failed with output=[%s]" % output
-
-    def run_freon(self, num_volumes, num_buckets, num_keys, key_size,
-                  replication_type="RATIS", replication_factor="THREE"):
-        """
-        Runs freon on the cluster.
-        """
-        command = [Command.freon,
-                   " rk",
-                   " --numOfVolumes " + str(num_volumes),
-                   " --numOfBuckets " + str(num_buckets),
-                   " --numOfKeys " + str(num_keys),
-                   " --keySize " + str(key_size),
-                   " --replicationType " + replication_type,
-                   " --factor " + replication_factor]
-        return util.run_docker_command(command, self.cluster.client)
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
deleted file mode 100644
index 1616083..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import os
-import re
-import subprocess
-import sys
-import yaml
-import time
-
-
-from os import environ
-from subprocess import call
-from ozone import util
-from ozone.constants import Command
-from ozone.blockade import Blockade
-from ozone.client import OzoneClient
-from ozone.container import Container
-from ozone.exceptions import ContainerNotFoundError
-
-
-class Configuration:
-    """
-    Configurations to be used while starting Ozone Cluster.
-    Here @property decorators is used to achieve getters, setters and delete
-    behaviour for 'datanode_count' attribute.
-    @datanode_count.setter will set the value for 'datanode_count' attribute.
-    @datanode_count.deleter will delete the current value of 'datanode_count'
-    attribute.
-    """
-
-    def __init__(self):
-        if "MAVEN_TEST" in os.environ:
-            compose_dir = environ.get("MAVEN_TEST")
-            self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml")
-        elif "OZONE_HOME" in os.environ:
-            compose_dir = os.path.join(environ.get("OZONE_HOME"), "compose", "ozoneblockade")
-            self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml")
-        else:
-            __parent_dir__ = os.path.dirname(os.path.dirname(os.path.dirname(
-                os.path.dirname(os.path.realpath(__file__)))))
-            self.docker_compose_file = os.path.join(__parent_dir__,
-                                                    "compose", "ozoneblockade",
-                                                    "docker-compose.yaml")
-        self._datanode_count = 3
-        os.environ["DOCKER_COMPOSE_FILE"] = self.docker_compose_file
-
-    @property
-    def datanode_count(self):
-        return self._datanode_count
-
-    @datanode_count.setter
-    def datanode_count(self, datanode_count):
-        self._datanode_count = datanode_count
-
-    @datanode_count.deleter
-    def datanode_count(self):
-        del self._datanode_count
-
-
-class OzoneCluster(object):
-    """
-    This represents Ozone Cluster.
-    Here @property decorators is used to achieve getters, setters and delete
-    behaviour for 'om', 'scm', 'datanodes' and 'client' attributes.
-    """
-
-    __logger__ = logging.getLogger(__name__)
-
-    def __init__(self, conf):
-        self.conf = conf
-        self.docker_compose_file = conf.docker_compose_file
-        self._om = None
-        self._scm = None
-        self._datanodes = None
-        self._client = None
-        self.scm_uuid = None
-        self.datanode_dir = None
-
-    @property
-    def om(self):
-        return self._om
-
-    @om.setter
-    def om(self, om):
-        self._om = om
-
-    @om.deleter
-    def om(self):
-        del self._om
-
-    @property
-    def scm(self):
-        return self._scm
-
-    @scm.setter
-    def scm(self, scm):
-        self._scm = scm
-
-    @scm.deleter
-    def scm(self):
-        del self._scm
-
-    @property
-    def datanodes(self):
-        return self._datanodes
-
-    @datanodes.setter
-    def datanodes(self, datanodes):
-        self._datanodes = datanodes
-
-    @datanodes.deleter
-    def datanodes(self):
-        del self._datanodes
-
-    @property
-    def client(self):
-        return self._client
-
-    @client.setter
-    def client(self, client):
-        self._client = client
-
-    @client.deleter
-    def client(self):
-        del self._client
-
-    @classmethod
-    def create(cls, config=Configuration()):
-        return OzoneCluster(config)
-
-    def start(self):
-        """
-        Start Ozone Cluster in docker containers.
-        """
-
-        # check if docker is up.
-
-        if "OZONE_RUNNER_VERSION" not in os.environ:
-            self.__logger__.error("OZONE_RUNNER_VERSION is not set.")
-            sys.exit(1)
-
-        if "HDDS_VERSION" not in os.environ:
-            self.__logger__.error("HDDS_VERSION is not set.")
-            sys.exit(1)
-
-        self.__logger__.info("Starting Ozone Cluster")
-        if Blockade.blockade_status() == 0:
-            Blockade.blockade_destroy()
-
-        Blockade.blockade_up()
-
-        call([Command.docker_compose, "-f", self.docker_compose_file,
-              "up", "-d", "--scale",
-              "datanode=" + str(self.conf.datanode_count)])
-        self.__logger__.info("Waiting 10s for cluster start up...")
-        # Remove the sleep and wait only till the cluster is out of safemode
-        time.sleep(10)
-        output = subprocess.check_output([Command.docker_compose, "-f",
-                                          self.docker_compose_file, "ps"])
-        node_list = []
-        for out in output.split("\n")[2:-1]:
-            node = out.split(" ")[0]
-            node_list.append(node)
-            Blockade.blockade_add(node)
-
-        self.om = filter(lambda x: 'om' in x, node_list)[0]
-        self.scm = filter(lambda x: 'scm' in x, node_list)[0]
-        self.datanodes = sorted(list(filter(lambda x: 'datanode' in x, node_list)))
-        self.client = filter(lambda x: 'ozone_client' in x, node_list)[0]
-        self.scm_uuid = self.__get_scm_uuid__()
-        self.datanode_dir = self.get_conf_value("hdds.datanode.dir")
-
-        assert node_list, "no node found in the cluster!"
-        self.__logger__.info("blockade created with nodes %s", ' '.join(node_list))
-
-    def get_conf_value(self, key):
-        """
-        Returns the value of given configuration key.
-        """
-        command = [Command.ozone, "getconf -confKey " + key]
-        exit_code, output = util.run_docker_command(command, self.om)
-        return str(output).strip()
-
-    def scale_datanode(self, datanode_count):
-        """
-        Commission new datanodes to the running cluster.
-        """
-        call([Command.docker_compose, "-f", self.docker_compose_file,
-              "up", "-d", "--scale", "datanode=" + datanode_count])
-
-    def partition_network(self, *args):
-        """
-        Partition the network which is used by the cluster.
-        """
-        Blockade.blockade_create_partition(*args)
-
-    def restore_network(self):
-        """
-        Restores the network partition.
-        """
-        Blockade.blockade_join()
-
-    def __get_scm_uuid__(self):
-        """
-        Returns SCM's UUID.
-        """
-        ozone_metadata_dir = self.get_conf_value("ozone.metadata.dirs")
-        command = "cat %s/scm/current/VERSION" % ozone_metadata_dir
-        exit_code, output = util.run_docker_command(command, self.scm)
-        output_list = output.split("\n")
-        key_value = [x for x in output_list if re.search(r"\w+=\w+", x)]
-        uuid = [token for token in key_value if 'scmUuid' in token]
-        return uuid.pop().split("=")[1].strip()
-
-    def get_client(self):
-        return OzoneClient(self)
-
-    def get_container(self, container_id):
-        command = [Command.ozone, "scmcli list -c=1 -s=%s | grep containerID", container_id - 1]
-        exit_code, output = util.run_docker_command(command, self.om)
-        if exit_code != 0:
-            raise ContainerNotFoundError(container_id)
-        return Container(container_id, self)
-
-    def is_container_replica_exist(self, container_id, datanode):
-        container_parent_path = "%s/hdds/%s/current/containerDir0" % \
-                                (self.datanode_dir, self.scm_uuid)
-        command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id)
-        exit_code, output = util.run_docker_command(command, datanode)
-        container_path = output.strip()
-        if not container_path:
-            return False
-        return True
-
-    def get_containers_on_datanode(self, datanode):
-        """
-        Returns all the container on given datanode.
-        """
-        container_parent_path = "%s/hdds/%s/current/containerDir0" % \
-                                (self.datanode_dir, self.scm_uuid)
-        command = "find %s -type f -name '*.container'" % container_parent_path
-        exit_code, output = util.run_docker_command(command, datanode)
-        containers = []
-
-        container_list = map(str.strip, output.split("\n"))
-        for container_path in container_list:
-            # Reading the container file.
-            exit_code, output = util.run_docker_command(
-              "cat " + container_path, datanode)
-            if exit_code is not 0:
-                continue
-            data = output.split("\n")
-            # Reading key value pairs from container file.
-            key_value = [x for x in data if re.search(r"\w+:\s\w+", x)]
-            content = "\n".join(key_value)
-            content_yaml = yaml.load(content)
-            if content_yaml is None:
-                continue
-            containers.append(Container(content_yaml.get('containerID'), self))
-        return containers
-
-    def get_container_state(self, container_id, datanode):
-        container_parent_path = "%s/hdds/%s/current/containerDir0" % \
-                                (self.datanode_dir, self.scm_uuid)
-        command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id)
-        exit_code, output = util.run_docker_command(command, datanode)
-        container_path = output.strip()
-        if not container_path:
-            raise ContainerNotFoundError("Container not found!")
-
-        # Reading the container file.
-        exit_code, output = util.run_docker_command("cat " + container_path, datanode)
-        if exit_code != 0:
-            raise ContainerNotFoundError("Container not found!")
-        data = output.split("\n")
-        # Reading key value pairs from container file.
-        key_value = [x for x in data if re.search(r"\w+:\s\w+", x)]
-        content = "\n".join(key_value)
-        content_yaml = yaml.load(content)
-        return str(content_yaml.get('state')).lstrip()
-
-    def get_container_datanodes(self, container_id):
-        result = []
-        for datanode in self.datanodes:
-            container_parent_path = "%s/hdds/%s/current/containerDir0" % \
-                                    (self.datanode_dir, self.scm_uuid)
-            command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id)
-            exit_code, output = util.run_docker_command(command, datanode)
-            if output.strip():
-                result.append(datanode)
-        return result
-
-    def stop(self):
-        """
-        Stops the Ozone Cluster.
-        """
-        self.__logger__.info("Stopping Ozone Cluster")
-        call([Command.docker_compose, "-f", self.docker_compose_file, "down"])
-        Blockade.blockade_destroy()
-
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py
deleted file mode 100644
index a79d6b1..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class Command(object):
-    docker = "docker"
-    docker_compose = "docker-compose"
-    ozone = "/opt/hadoop/bin/ozone"
-    freon = "/opt/hadoop/bin/ozone freon"
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
deleted file mode 100644
index 65c6b2f..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import util
-from ozone.exceptions import ContainerNotFoundError
-
-
-class Container:
-
-    def __init__(self, container_id, cluster):
-        self.container_id = container_id
-        self.cluster = cluster
-
-    def is_on(self, datanode):
-        return self.cluster.is_container_replica_exist(self.container_id, datanode)
-
-    def get_datanode_states(self):
-        dns = self.cluster.get_container_datanodes(self.container_id)
-        states = []
-        for dn in dns:
-            states.append(self.get_state(dn))
-        return states
-
-    def get_state(self, datanode):
-        return self.cluster.get_container_state(self.container_id, datanode)
-
-    def wait_until_replica_is_quasi_closed(self, datanode):
-        def predicate():
-            try:
-                if self.cluster.get_container_state(self.container_id, datanode) == 'QUASI_CLOSED':
-                    return True
-                else:
-                    return False
-            except ContainerNotFoundError:
-                return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("Replica is not quasi closed!")
-
-    def wait_until_one_replica_is_quasi_closed(self):
-        def predicate():
-            dns = self.cluster.get_container_datanodes(self.container_id)
-            for dn in dns:
-                if self.cluster.get_container_state(self.container_id, dn) == 'QUASI_CLOSED':
-                    return True
-                else:
-                    return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("None of the container replica is quasi closed!")
-
-    def wait_until_replica_is_closed(self, datanode):
-        def predicate():
-            try:
-                if self.cluster.get_container_state(self.container_id, datanode) == 'CLOSED':
-                    return True
-                else:
-                    return False
-            except ContainerNotFoundError:
-                return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("Replica is not closed!")
-
-    def wait_until_one_replica_is_closed(self):
-        def predicate():
-            dns = self.cluster.get_container_datanodes(self.container_id)
-            for dn in dns:
-                if self.cluster.get_container_state(self.container_id, dn) == 'CLOSED':
-                    return True
-            return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("None of the container replica is closed!")
-
-    def wait_until_two_replicas_are_closed(self):
-        def predicate():
-            dns = self.cluster.get_container_datanodes(self.container_id)
-            closed_count = 0
-            for dn in dns:
-                if self.cluster.get_container_state(self.container_id, dn) == 'CLOSED':
-                    closed_count = closed_count + 1
-            if closed_count > 1:
-                return True
-            return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("None of the container replica is closed!")
-
-    def wait_until_all_replicas_are_closed(self):
-        def predicate():
-            try:
-                dns = self.cluster.get_container_datanodes(self.container_id)
-                for dn in dns:
-                    if self.cluster.get_container_state(self.container_id, dn) != 'CLOSED':
-                        return False
-                return True
-            except ContainerNotFoundError:
-                return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("Not all the replicas are closed!")
-
-    def wait_until_replica_is_not_open_anymore(self, datanode):
-        def predicate():
-            try:
-                if self.cluster.get_container_state(self.container_id, datanode) != 'OPEN' and \
-                  self.cluster.get_container_state(self.container_id, datanode) != 'CLOSING':
-                    return True
-                else:
-                    return False
-            except ContainerNotFoundError:
-                return False
-
-        util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10)
-        if not predicate():
-            raise Exception("Replica is not closed!")
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py
deleted file mode 100644
index 9917eaa..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class ContainerNotFoundError(RuntimeError):
-    """ ContainerNotFoundError run-time error. """
-    def __init__(self, *args, **kwargs):
-        pass
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py
deleted file mode 100644
index 066b16f6..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import time
-import re
-import subprocess
-
-from ozone.constants import Command
-
-logger = logging.getLogger(__name__)
-
-
-def wait_until(predicate, timeout, check_frequency=1):
-    deadline = time.time() + timeout
-    while time.time() < deadline:
-        if predicate():
-            return
-        time.sleep(check_frequency)
-
-
-def run_docker_command(command, run_on):
-    if isinstance(command, list):
-        command = ' '.join(command)
-    command = [Command.docker,
-               "exec " + run_on,
-               command]
-    return run_command(command)
-
-
-def run_command(cmd):
-    command = cmd
-    if isinstance(cmd, list):
-        command = ' '.join(cmd)
-    logger.info("RUNNING: %s", command)
-    all_output = ""
-    my_process = subprocess.Popen(command,  stdout=subprocess.PIPE,
-                                  stderr=subprocess.STDOUT, shell=True)
-    while my_process.poll() is None:
-        op = my_process.stdout.readline()
-        if op:
-            all_output += op
-            logger.info(op)
-    other_output = my_process.communicate()
-    other_output = other_output[0].strip()
-    if other_output != "":
-        all_output += other_output
-    reg = re.compile(r"(\r\n|\n)$")
-    logger.debug("Output: %s", all_output)
-    all_output = reg.sub("", all_output, 1)
-    return my_process.returncode, all_output
-
-
-def get_checksum(file_path, run_on):
-    command = "md5sum  %s" % file_path
-    exit_code, output = run_docker_command(command, run_on)
-    assert exit_code == 0, "Cant find checksum"
-    output_split = output.split("\n")
-    result = ""
-    for line in output_split:
-        if line.find("Warning") >= 0 or line.find("is not a tty") >= 0:
-            logger.info("skip this line: %s", line)
-        else:
-            result = result + line
-    checksum = result.split(" ")
-    return checksum[0]
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
deleted file mode 100644
index 6420564..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import time
-import logging
-import ozone.util
-
-from ozone.cluster import OzoneCluster
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_client_failure_isolate_two_datanodes():
-    """
-    In this test, all DNs are isolated from each other.
-    two of the DNs cannot communicate with any other node in the cluster.
-    Expectation :
-    Write should fail.
-    Keys written before partition created should be read.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    epoch_time = int(time.time())
-    volume_name = "%s-%s" % ("volume", epoch_time)
-    bucket_name = "%s-%s" % ("bucket", epoch_time)
-    key_name = "key-1"
-
-    oz_client.create_volume(volume_name)
-    oz_client.create_bucket(volume_name, bucket_name)
-    oz_client.put_key("/etc/passwd", volume_name, bucket_name, key_name, "THREE")
-
-    first_set = [om, scm, dns[0], client]
-    second_set = [dns[1]]
-    third_set = [dns[2]]
-
-    logger.info("Partitioning the network")
-    cluster.partition_network(first_set, second_set, third_set)
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code != 0, "freon run should have failed."
-
-    oz_client.get_key(volume_name, bucket_name, key_name, "/tmp/")
-
-    file_checksum = ozone.util.get_checksum("/etc/passwd", client)
-    key_checksum = ozone.util.get_checksum("/tmp/%s" % key_name, client)
-
-    assert file_checksum == key_checksum
-
-
-def test_client_failure_isolate_one_datanode():
-    """
-    In this test, one of the DNs is isolated from all other nodes.
-    Expectation :
-    Write should pass.
-    Keys written before partition created can be read.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    epoch_time = int(time.time())
-    volume_name = "%s-%s" % ("volume", epoch_time)
-    bucket_name = "%s-%s" % ("bucket", epoch_time)
-    key_name = "key-1"
-
-    oz_client.create_volume(volume_name)
-    oz_client.create_bucket(volume_name, bucket_name)
-    oz_client.put_key("/etc/passwd", volume_name, bucket_name, key_name, "THREE")
-
-    first_set = [om, scm, dns[0], dns[1], client]
-    second_set = [dns[2]]
-
-    logger.info("Partitioning the network")
-    cluster.partition_network(first_set, second_set)
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert re.search("3 way commit failed", output) is not None
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-    oz_client.get_key(volume_name, bucket_name, key_name, "/tmp/")
-
-    file_checksum = ozone.util.get_checksum("/etc/passwd", client)
-    key_checksum = ozone.util.get_checksum("/tmp/%s" % key_name, cluster.client)
-
-    assert file_checksum == key_checksum
-
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py
deleted file mode 100644
index 7f1d34e..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from ozone.cluster import OzoneCluster
-from ozone.exceptions import ContainerNotFoundError
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_isolate_single_datanode():
-    """
-    In this test case we will create a network partition in such a way that
-    one of the DN will not be able to communicate with other datanodes
-    but it will be able to communicate with SCM.
-
-    Once the network partition happens, SCM detects it and closes the pipeline,
-    which in-turn closes the containers.
-
-    The container on the first two DN will get CLOSED as they have quorum.
-    The container replica on the third node will be QUASI_CLOSED as it is not
-    able to connect with the other DNs and it doesn't have latest BCSID.
-
-    Once we restore the network, the stale replica on the third DN will be
-    deleted and a latest replica will be copied from any one of the other
-    DNs.
-
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    # Partition the network
-    first_set = [om, scm, dns[0], dns[1], client]
-    second_set = [om, scm, dns[2], client]
-    logger.info("Partitioning the network")
-    cluster.partition_network(first_set, second_set)
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[0])
-
-    # The same set of containers should also be in datanode[2]
-
-    for container in containers:
-        assert container.is_on(dns[2])
-
-    logger.info("Waiting for container to be CLOSED")
-    for container in containers:
-        container.wait_until_one_replica_is_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        try:
-            assert container.get_state(dns[2]) == 'CLOSING' or \
-                   container.get_state(dns[2]) == 'QUASI_CLOSED'
-        except ContainerNotFoundError:
-            assert True
-
-    # Since the replica in datanode[2] doesn't have the latest BCSID,
-    # ReplicationManager will delete it and copy a closed replica.
-    # We will now restore the network and datanode[2] should get a
-    # closed replica of the container
-    logger.info("Restoring the network")
-    cluster.restore_network()
-
-    logger.info("Waiting for the replica to be CLOSED")
-    for container in containers:
-        container.wait_until_replica_is_closed(dns[2])
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_datanode_isolation_all():
-    """
-    In this test case we will create a network partition in such a way that
-    all DNs cannot communicate with each other.
-    All DNs will be able to communicate with SCM.
-
-    Once the network partition happens, SCM detects it and closes the pipeline,
-    which in-turn tries to close the containers.
-    At least one of the replica should be in closed state
-
-    Once we restore the network, there will be three closed replicas.
-
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    logger.info("Partitioning the network")
-    first_set = [om, scm, dns[0], client]
-    second_set = [om, scm, dns[1], client]
-    third_set = [om, scm, dns[2], client]
-    cluster.partition_network(first_set, second_set, third_set)
-
-    containers = cluster.get_containers_on_datanode(dns[0])
-    container = containers.pop()
-
-    logger.info("Waiting for a replica to be CLOSED")
-    container.wait_until_one_replica_is_closed()
-
-    # At least one of the replica should be in closed state
-    assert 'CLOSED' in container.get_datanode_states()
-
-    logger.info("Restoring the network")
-    cluster.restore_network()
-
-    logger.info("Waiting for the container to be replicated")
-    container.wait_until_all_replicas_are_closed()
-    # After restoring the network all the replicas should be in CLOSED state
-    for state in container.get_datanode_states():
-        assert state == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py
deleted file mode 100644
index 6b68d61..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import random
-import pytest
-
-from ozone.blockade import Blockade
-from ozone.cluster import OzoneCluster
-
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-@pytest.mark.parametrize("flaky_node", ["datanode"])
-def test_flaky(flaky_node):
-    """
-    In these tests, we make the network of the nodes as flaky using blockade.
-    One of the DNs selected randomly and network of the DN is made flaky.
-
-    Once HA is in place, we can go ahead and make OM and SCM network flaky.
-
-    """
-    flaky_container_name = {
-        "scm": cluster.scm,
-        "om": cluster.om,
-        "datanode": random.choice(cluster.datanodes),
-        "all": "--all"
-    }[flaky_node]
-
-    Blockade.make_flaky(flaky_container_name)
-    exit_code, output = cluster.get_client().run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py
deleted file mode 100644
index 10220b9..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from ozone.cluster import OzoneCluster
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_one_dn_isolate_scm_other_dn():
-    """
-    In this test, one of the DNs cannot communicate with SCM and other DNs.
-    Other DNs can communicate with each other and SCM .
-    Expectation : The container should eventually have two closed replicas.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    # Partition the network
-    first_set = [dns[0], client]
-    second_set = [scm, om, dns[1], dns[2], client]
-    cluster.partition_network(first_set, second_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-    containers = cluster.get_containers_on_datanode(dns[1])
-    for container in containers:
-        container.wait_until_one_replica_is_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'OPEN'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    cluster.restore_network()
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_one_dn_isolate_other_dn():
-    """
-    In this test, one of the DNs (first DN) cannot communicate
-    other DNs but can communicate with SCM.
-    One of the other two DNs (second DN) cannot communicate with SCM.
-    Expectation :
-    The container replica state in first DN can be either closed or
-    quasi-closed.
-    The container replica state in second DN can be either closed or open.
-    The container should eventually have at lease one closed replica.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    # Partition the network
-    first_set = [om, scm, dns[0], client]
-    second_set = [om, dns[1], dns[2], client]
-    third_set = [scm, dns[2], client]
-    cluster.partition_network(first_set, second_set, third_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[0])
-    for container in containers:
-        container.wait_until_replica_is_quasi_closed(dns[0])
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'QUASI_CLOSED'
-        assert container.get_state(dns[1]) == 'OPEN' or \
-            container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'QUASI_CLOSED' or \
-            container.get_state(dns[2]) == 'CLOSED'
-
-    cluster.restore_network()
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py
deleted file mode 100644
index 6f01c84..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import time
-import logging
-
-from ozone.cluster import OzoneCluster
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_three_dns_isolate_one_scm_failure():
-    """
-    In this test, all DNs are isolated from each other.
-    One of the DNs (third DN) cannot communicate with SCM.
-    Expectation :
-    The container replica state in first DN should be closed.
-    The container replica state in second DN should be closed.
-    The container replica state in third DN should be open.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, scm, dns[0], client]
-    second_set = [om, scm, dns[1], client]
-    third_set = [om, dns[2], client]
-
-    cluster.partition_network(first_set, second_set, third_set)
-    containers = cluster.get_containers_on_datanode(dns[0])
-    for container in containers:
-        container.wait_until_replica_is_closed(dns[0])
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'OPEN'
-
-    cluster.restore_network()
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_three_dns_isolate_two_scm_failure():
-    """
-    In this test, all DNs are isolated from each other.
-    two DNs cannot communicate with SCM (second DN and third DN)
-    Expectation :
-    The container replica state in first DN should be quasi-closed.
-    The container replica state in second DN should be open.
-    The container replica state in third DN should be open.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, scm, dns[0], client]
-    second_set = [om, dns[1], client]
-    third_set = [om, dns[2], client]
-
-    cluster.partition_network(first_set, second_set, third_set)
-    containers = cluster.get_containers_on_datanode(dns[0])
-    for container in containers:
-        container.wait_until_replica_is_quasi_closed(dns[0])
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'QUASI_CLOSED'
-        assert container.get_state(dns[1]) == 'OPEN'
-        assert container.get_state(dns[2]) == 'OPEN'
-
-    cluster.restore_network()
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_three_dns_isolate_three_scm_failure():
-    """
-    In this test, all DNs are isolated from each other and also cannot
-    communicate with SCM.
-    Expectation :
-    The container replica state in first DN should be open.
-    The container replica state in second DN should be open.
-    The container replica state in third DN should be open.
-    """
-    om = cluster.om
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, dns[0], client]
-    second_set = [om, dns[1], client]
-    third_set = [om, dns[2], client]
-
-    cluster.partition_network(first_set, second_set, third_set)
-
-    # Wait till the datanodes are marked as stale by SCM
-    time.sleep(150)
-
-    containers = cluster.get_containers_on_datanode(dns[0])
-    for container in containers:
-        assert container.get_state(dns[0]) == 'OPEN'
-        assert container.get_state(dns[1]) == 'OPEN'
-        assert container.get_state(dns[2]) == 'OPEN'
-
-    cluster.restore_network()
-
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py
deleted file mode 100644
index 20b0cc3..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from ozone.cluster import OzoneCluster
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_two_dns_isolate_scm_same_partition():
-    """
-    In this test, there are three DNs,
-    DN1 is on a network partition and
-    DN2, DN3 are on a different network partition.
-    DN2 and DN3 cannot communicate with SCM.
-    Expectation :
-    The container replica state in DN1 should be quasi-closed.
-    The container replica state in DN2 should be open.
-    The container replica state in DN3 should be open.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, dns[1], dns[2], client]
-    second_set = [om, scm, dns[0], client]
-    cluster.partition_network(first_set, second_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[0])
-
-    for container in containers:
-        container.wait_until_one_replica_is_quasi_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'QUASI_CLOSED'
-        assert container.get_state(dns[1]) == 'OPEN'
-        assert container.get_state(dns[2]) == 'OPEN'
-
-    cluster.restore_network()
-
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_two_dns_isolate_scm_different_partition():
-    """
-    In this test, there are three DNs,
-    DN1 is on a network partition and
-    DN2, DN3 are on a different network partition.
-    DN1 and DN2 cannot communicate with SCM.
-    Expectation :
-    The container replica state in DN1 should be open.
-    The container replica states can be either 'closed'
-    in DN2 and DN3 or 'open' in DN2 and 'quasi-closed' in DN3.
-    """
-
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, dns[0], client]
-    second_set = [om, dns[1], dns[2], client]
-    third_set = [scm, dns[2], client]
-    cluster.partition_network(first_set, second_set, third_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[2])
-
-    for container in containers:
-        container.wait_until_replica_is_not_open_anymore(dns[2])
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'OPEN'
-        assert (container.get_state(dns[1]) == 'CLOSED' and
-                container.get_state(dns[2]) == 'CLOSED') or \
-               (container.get_state(dns[1]) == 'OPEN' and
-                container.get_state(dns[2]) == 'QUASI_CLOSED')
-
-    cluster.restore_network()
-
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py
deleted file mode 100644
index f48ddf3..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from ozone.cluster import OzoneCluster
-
-logger = logging.getLogger(__name__)
-
-
-def setup_function():
-    global cluster
-    cluster = OzoneCluster.create()
-    cluster.start()
-
-
-def teardown_function():
-    cluster.stop()
-
-
-def test_scm_isolation_one_node():
-    """
-    In this test, one of the DNs cannot communicate with SCM.
-    Other DNs can communicate with SCM.
-    Expectation : The container should eventually have at least two closed
-    replicas.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, dns[0], dns[1], dns[2], client]
-    second_set = [om, scm, dns[1], dns[2], client]
-    cluster.partition_network(first_set, second_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[1])
-
-    for container in containers:
-        container.wait_until_two_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-        assert container.get_state(dns[0]) == 'OPEN' or \
-            container.get_state(dns[0]) == 'CLOSED'
-
-    cluster.restore_network()
-
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
-
-
-def test_scm_isolation_two_node():
-    """
-    In this test, two DNs cannot communicate with SCM.
-    Expectation : The container should eventually have at three closed replicas
-    or, two open replicas and one quasi-closed replica.
-    """
-    om = cluster.om
-    scm = cluster.scm
-    dns = cluster.datanodes
-    client = cluster.client
-    oz_client = cluster.get_client()
-
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    first_set = [om, dns[0], dns[1], dns[2], client]
-    second_set = [om, scm, dns[1], client]
-    cluster.partition_network(first_set, second_set)
-    oz_client.run_freon(1, 1, 1, 10240)
-
-    containers = cluster.get_containers_on_datanode(dns[1])
-
-    for container in containers:
-        container.wait_until_replica_is_not_open_anymore(dns[1])
-
-    for container in containers:
-        state = container.get_state(dns[1])
-        assert state == 'QUASI_CLOSED' or state == 'CLOSED'
-
-        if state == 'QUASI_CLOSED':
-            assert container.get_state(dns[0]) == 'OPEN'
-            assert container.get_state(dns[2]) == 'OPEN'
-        else:
-            assert container.get_state(dns[0]) == 'CLOSED'
-            assert container.get_state(dns[2]) == 'CLOSED'
-
-    cluster.restore_network()
-
-    for container in containers:
-        container.wait_until_all_replicas_are_closed()
-
-    for container in containers:
-        assert container.get_state(dns[0]) == 'CLOSED'
-        assert container.get_state(dns[1]) == 'CLOSED'
-        assert container.get_state(dns[2]) == 'CLOSED'
-
-    exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
-    assert exit_code == 0, "freon run failed with output=[%s]" % output
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml
deleted file mode 100644
index 7175eb8..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   datanode:
-      image: ${docker.image}
-      ports:
-        - 9864
-      command: ["/opt/hadoop/bin/ozone","datanode"]
-      env_file:
-        - ./docker-config
-   om:
-      image: ${docker.image}
-      ports:
-         - 9874:9874
-      environment:
-         ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
-      env_file:
-          - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-      image: ${docker.image}
-      ports:
-         - 9876:9876
-      env_file:
-          - ./docker-config
-      environment:
-          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/ozone","scm"]
-   ozone_client:
-       image: ${docker.image}
-       ports:
-         - 9869
-       command: ["tail", "-f","/etc/passwd"]
-       env_file:
-         - ./docker-config
diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config
deleted file mode 100644
index 1db1a79..0000000
--- a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OZONE-SITE.XML_ozone.om.address=om
-OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.enabled=True
-OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.handler.type=distributed
-OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_ozone.scm.dead.node.interval=5m
-OZONE-SITE.XML_ozone.replication=1
-OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
-HDFS-SITE.XML_rpc.metrics.quantile.enable=true
-HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
-LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
-LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
-LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-
-#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
-#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
-
-#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
-LOG4J2.PROPERTIES_monitorInterval=30
-LOG4J2.PROPERTIES_filter=read,write
-LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.read.marker=READ
-LOG4J2.PROPERTIES_filter.read.onMatch=DENY
-LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.write.marker=WRITE
-LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_appenders=console, rolling
-LOG4J2.PROPERTIES_appender.console.type=Console
-LOG4J2.PROPERTIES_appender.console.name=STDOUT
-LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
-LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
-LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
-LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
-LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
-LOG4J2.PROPERTIES_loggers=audit
-LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
-LOG4J2.PROPERTIES_logger.audit.name=OMAudit
-LOG4J2.PROPERTIES_logger.audit.level=INFO
-LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
-LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
-LOG4J2.PROPERTIES_rootLogger.level=INFO
-LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
-LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml
deleted file mode 100644
index 395c534..0000000
--- a/hadoop-ozone/fault-injection-test/pom.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-fault-injection-test</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Fault Injection Tests</description>
-  <name>Apache Hadoop Ozone Fault Injection Tests</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>network-tests</module>
-  </modules>
-
-</project>
diff --git a/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 55abc26..0000000
--- a/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-</FindBugsFilter>
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
deleted file mode 100644
index 8287334..0000000
--- a/hadoop-ozone/insight/pom.xml
+++ /dev/null
@@ -1,131 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-insight</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Insight Tool</description>
-  <name>Apache Hadoop Ozone Insight Tool</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <!-- Genesis requires server side components -->
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.activation</groupId>
-      <artifactId>activation</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-      <version>3.2.4</version>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <version>1.19</version>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <version>1.19</version>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-integration-test</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
-          </excludeFilterFile>
-          <fork>true</fork>
-          <maxHeap>2048</maxHeap>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java
deleted file mode 100644
index a23b876..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.server.PrometheusMetricsSink;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.insight.LoggerSource.Level;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import com.google.protobuf.ProtocolMessageEnum;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
-
-/**
- * Default implementation of Insight point logic.
- */
-public abstract class BaseInsightPoint implements InsightPoint {
-
-  /**
-   * List the related metrics.
-   */
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    return new ArrayList<>();
-  }
-
-  /**
-   * List the related configuration.
-   */
-  @Override
-  public List<Class> getConfigurationClasses() {
-    return new ArrayList<>();
-  }
-
-  /**
-   * List the related loggers.
-   *
-   * @param verbose true if verbose logging is requested.
-   */
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    return loggers;
-  }
-
-  /**
-   * Create scm client.
-   */
-  public ScmClient createScmClient(OzoneConfiguration ozoneConf)
-      throws IOException {
-
-    if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) {
-
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY
-              + " should be set in ozone-site.xml");
-    }
-
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(ozoneConf);
-    int containerSizeGB = (int) ozoneConf.getStorageSize(
-        OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.GB);
-    ContainerOperationClient
-        .setContainerSizeB(containerSizeGB * OzoneConsts.GB);
-
-    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    StorageContainerLocationProtocol client =
-        TracingUtil.createProxy(
-            new StorageContainerLocationProtocolClientSideTranslatorPB(
-                RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-                    scmAddress, UserGroupInformation.getCurrentUser(),
-                    ozoneConf,
-                    NetUtils.getDefaultSocketFactory(ozoneConf),
-                    Client.getRpcTimeout(ozoneConf))),
-            StorageContainerLocationProtocol.class, ozoneConf);
-    return new ContainerOperationClient(
-        client, new XceiverClientManager(ozoneConf));
-  }
-
-  /**
-   * Convenient method to define default log levels.
-   */
-  public Level defaultLevel(boolean verbose) {
-    return verbose ? Level.TRACE : Level.DEBUG;
-  }
-
-  /**
-   * Default metrics for any message type based RPC ServerSide translators.
-   */
-  public void addProtocolMessageMetrics(List<MetricGroupDisplay> metrics,
-      String prefix,
-      Component.Type component,
-      ProtocolMessageEnum[] types) {
-
-    MetricGroupDisplay messageTypeCounters =
-        new MetricGroupDisplay(component, "Message type counters");
-    for (ProtocolMessageEnum type : types) {
-      String typeName = type.toString();
-      MetricDisplay metricDisplay = new MetricDisplay("Number of " + typeName,
-          prefix + "_" + PrometheusMetricsSink
-              .normalizeName(typeName));
-      messageTypeCounters.addMetrics(metricDisplay);
-    }
-    metrics.add(messageTypeCounters);
-  }
-
-  /**
-   * Rpc metrics for any hadoop rpc endpoint.
-   */
-  public void addRpcMetrics(List<MetricGroupDisplay> metrics,
-      Component.Type component,
-      Map<String, String> filter) {
-    MetricGroupDisplay connection =
-        new MetricGroupDisplay(component, "RPC connections");
-    connection.addMetrics(new MetricDisplay("Open connections",
-        "rpc_num_open_connections", filter));
-    connection.addMetrics(
-        new MetricDisplay("Dropped connections", "rpc_num_dropped_connections",
-            filter));
-    connection.addMetrics(
-        new MetricDisplay("Received bytes", "rpc_received_bytes",
-            filter));
-    connection.addMetrics(
-        new MetricDisplay("Sent bytes", "rpc_sent_bytes",
-            filter));
-    metrics.add(connection);
-
-    MetricGroupDisplay queue = new MetricGroupDisplay(component, "RPC queue");
-    queue.addMetrics(new MetricDisplay("RPC average queue time",
-        "rpc_rpc_queue_time_avg_time", filter));
-    queue.addMetrics(
-        new MetricDisplay("RPC call queue length", "rpc_call_queue_length",
-            filter));
-    metrics.add(queue);
-
-    MetricGroupDisplay performance =
-        new MetricGroupDisplay(component, "RPC performance");
-    performance.addMetrics(new MetricDisplay("RPC processing time average",
-        "rpc_rpc_processing_time_avg_time", filter));
-    performance.addMetrics(
-        new MetricDisplay("Number of slow calls", "rpc_rpc_slow_calls",
-            filter));
-    metrics.add(performance);
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java
deleted file mode 100644
index 4c3875c..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.om.KeyManagerInsight;
-import org.apache.hadoop.ozone.insight.om.OmProtocolInsight;
-import org.apache.hadoop.ozone.insight.scm.EventQueueInsight;
-import org.apache.hadoop.ozone.insight.scm.NodeManagerInsight;
-import org.apache.hadoop.ozone.insight.scm.ReplicaManagerInsight;
-import org.apache.hadoop.ozone.insight.scm.ScmProtocolBlockLocationInsight;
-import org.apache.hadoop.ozone.insight.scm.ScmProtocolContainerLocationInsight;
-import org.apache.hadoop.ozone.insight.scm.ScmProtocolSecurityInsight;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-
-import picocli.CommandLine;
-
-/**
- * Parent class for all the insight subcommands.
- */
-public class BaseInsightSubCommand {
-
-  @CommandLine.ParentCommand
-  private Insight insightCommand;
-
-  public InsightPoint getInsight(OzoneConfiguration configuration,
-      String selection) {
-    Map<String, InsightPoint> insights = createInsightPoints(configuration);
-
-    if (!insights.containsKey(selection)) {
-      throw new RuntimeException(String
-          .format("No such component; %s. Available components: %s", selection,
-              insights.keySet()));
-    }
-    return insights.get(selection);
-  }
-
-  /**
-   * Utility to get the host base on a component.
-   */
-  public String getHost(OzoneConfiguration conf, Component component) {
-    if (component.getHostname() != null) {
-      return "http://" + component.getHostname() + ":" + component.getPort();
-    } else if (component.getName() == Type.SCM) {
-      Optional<String> scmHost =
-          HddsUtils.getHostNameFromConfigKeys(conf,
-              ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-              ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-      return "http://" + scmHost.get() + ":9876";
-    } else if (component.getName() == Type.OM) {
-      Optional<String> omHost =
-          HddsUtils.getHostNameFromConfigKeys(conf,
-              OMConfigKeys.OZONE_OM_ADDRESS_KEY);
-      return "http://" + omHost.get() + ":9874";
-    } else {
-      throw new IllegalArgumentException(
-          "Component type is not supported: " + component.getName());
-    }
-
-  }
-
-  public Map<String, InsightPoint> createInsightPoints(
-      OzoneConfiguration configuration) {
-    Map<String, InsightPoint> insights = new LinkedHashMap<>();
-    insights.put("scm.node-manager", new NodeManagerInsight());
-    insights.put("scm.replica-manager", new ReplicaManagerInsight());
-    insights.put("scm.event-queue", new EventQueueInsight());
-    insights.put("scm.protocol.block-location",
-        new ScmProtocolBlockLocationInsight());
-    insights.put("scm.protocol.container-location",
-        new ScmProtocolContainerLocationInsight());
-    insights.put("scm.protocol.security",
-             new ScmProtocolSecurityInsight());
-    insights.put("om.key-manager", new KeyManagerInsight());
-    insights.put("om.protocol.client", new OmProtocolInsight());
-
-    return insights;
-  }
-
-  public Insight getInsightCommand() {
-    return insightCommand;
-  }
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java
deleted file mode 100644
index 261ae49..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.util.Objects;
-
-/**
- * Identifier an ozone component.
- */
-public class Component {
-
-  /**
-   * The type of the component (eg. scm, s3g...)
-   */
-  private Type name;
-
-  /**
-   * Unique identifier of the instance (uuid or index). Can be null for
-   * non-HA server component.
-   */
-  private String id;
-
-  /**
-   * Hostname of the component. Optional, may help to find the right host
-   * name.
-   */
-  private String hostname;
-
-  /**
-   * HTTP service port. Optional.
-   */
-  private int port;
-
-  public Component(Type name) {
-    this.name = name;
-  }
-
-  public Component(Type name, String id) {
-    this.name = name;
-    this.id = id;
-  }
-
-  public Component(Type name, String id, String hostname) {
-    this.name = name;
-    this.id = id;
-    this.hostname = hostname;
-  }
-
-  public Component(Type name, String id, String hostname, int port) {
-    this.name = name;
-    this.id = id;
-    this.hostname = hostname;
-    this.port = port;
-  }
-
-  public Type getName() {
-    return name;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public String getHostname() {
-    return hostname;
-  }
-
-  public int getPort() {
-    return port;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Component that = (Component) o;
-    return Objects.equals(name, that.name) &&
-        Objects.equals(id, that.id);
-  }
-
-  public String prefix() {
-    return name + (id != null && id.length() > 0 ? "-" + id : "");
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(name, id);
-  }
-
-  /**
-   * Ozone component types.
-   */
-  public enum Type {
-    SCM, OM, DATANODE, S3G, RECON;
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
deleted file mode 100644
index e32ecd7..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.insight.Component.Type;
-
-import picocli.CommandLine;
-
-import java.lang.reflect.Method;
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to show configuration values/documentation.
- */
-@CommandLine.Command(
-    name = "config",
-    description = "Show configuration for a specific subcomponents",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ConfigurationSubCommand extends BaseInsightSubCommand
-    implements Callable<Void> {
-
-  @CommandLine.Parameters(description = "Name of the insight point (use list "
-      + "to check the available options)")
-  private String insightName;
-
-  @Override
-  public Void call() throws Exception {
-    InsightPoint insight =
-        getInsight(getInsightCommand().createOzoneConfiguration(), insightName);
-    System.out.println(
-        "Configuration for `" + insightName + "` (" + insight.getDescription()
-            + ")");
-    System.out.println();
-    for (Class clazz : insight.getConfigurationClasses()) {
-      showConfig(clazz);
-
-    }
-    return null;
-  }
-
-  private void showConfig(Class clazz) {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.addResource(getHost(conf, new Component(Type.SCM)) + "/conf");
-    ConfigGroup configGroup =
-        (ConfigGroup) clazz.getAnnotation(ConfigGroup.class);
-    if (configGroup == null) {
-      return;
-    }
-
-    String prefix = configGroup.prefix();
-
-    for (Method method : clazz.getMethods()) {
-      if (method.isAnnotationPresent(Config.class)) {
-        Config config = method.getAnnotation(Config.class);
-        String key = prefix + "." + config.key();
-        System.out.println(">>> " + key);
-        System.out.println("       default: " + config.defaultValue());
-        System.out.println("       current: " + conf.get(key));
-        System.out.println();
-        System.out.println(config.description());
-        System.out.println();
-        System.out.println();
-
-      }
-    }
-
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java
deleted file mode 100644
index 690783ee..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-
-import picocli.CommandLine;
-
-/**
- * Command line utility to check logs/metrics of internal ozone components.
- */
-@CommandLine.Command(name = "ozone insight",
-    hidden = true, description = "Show debug information about a selected "
-    + "Ozone component",
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {ListSubCommand.class, LogSubcommand.class,
-        MetricsSubCommand.class, ConfigurationSubCommand.class},
-    mixinStandardHelpOptions = true)
-public class Insight extends GenericCli {
-
-  public static void main(String[] args) throws Exception {
-    new Insight().run(args);
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java
deleted file mode 100644
index 1284cfa..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.util.List;
-
-/**
- * Definition of a specific insight points.
- */
-public interface InsightPoint {
-
-  /**
-   * Human readdable description.
-   */
-  String getDescription();
-
-  /**
-   * List of the related loggers.
-   */
-  List<LoggerSource> getRelatedLoggers(boolean verbose);
-
-  /**
-   * List of the related metrics.
-   */
-  List<MetricGroupDisplay> getMetrics();
-
-  /**
-   * List of the configuration classes.
-   */
-  List<Class> getConfigurationClasses();
-
-
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java
deleted file mode 100644
index 8f91398..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import picocli.CommandLine;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to list of the available insight points.
- */
-@CommandLine.Command(
-    name = "list",
-    description = "Show available insight points.",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class ListSubCommand extends BaseInsightSubCommand
-    implements Callable<Void> {
-
-  @CommandLine.Parameters(defaultValue = "")
-  private String insightPrefix;
-
-  @Override
-  public Void call() throws Exception {
-
-    System.out.println("Available insight points:\n\n");
-
-    Map<String, InsightPoint> insightPoints =
-        createInsightPoints(new OzoneConfiguration());
-    for (Entry<String, InsightPoint> entry : insightPoints.entrySet()) {
-      if (insightPrefix == null || entry.getKey().startsWith(insightPrefix)) {
-        System.out.println(String.format("  %-33s    %s", entry.getKey(),
-            entry.getValue().getDescription()));
-      }
-    }
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java
deleted file mode 100644
index 2e8787f..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.insight.LoggerSource.Level;
-
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClientBuilder;
-import picocli.CommandLine;
-
-/**
- * Subcommand to display log.
- */
-@CommandLine.Command(
-    name = "log",
-    aliases = "logs",
-    description = "Show log4j events related to the insight point",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class LogSubcommand extends BaseInsightSubCommand
-    implements Callable<Void> {
-
-  @CommandLine.Parameters(description = "Name of the insight point (use list "
-      + "to check the available options)")
-  private String insightName;
-
-  @CommandLine.Option(names = "-v", description = "Enable verbose mode to "
-      + "show more information / detailed message")
-  private boolean verbose;
-
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration conf =
-        getInsightCommand().createOzoneConfiguration();
-    InsightPoint insight =
-        getInsight(conf, insightName);
-
-    List<LoggerSource> loggers = insight.getRelatedLoggers(verbose);
-
-    for (LoggerSource logger : loggers) {
-      setLogLevel(conf, logger.getLoggerName(), logger.getComponent(),
-          logger.getLevel());
-    }
-
-    Set<Component> sources = loggers.stream().map(LoggerSource::getComponent)
-        .collect(Collectors.toSet());
-    try {
-      streamLog(conf, sources, loggers);
-    } finally {
-      for (LoggerSource logger : loggers) {
-        setLogLevel(conf, logger.getLoggerName(), logger.getComponent(),
-            Level.INFO);
-      }
-    }
-    return null;
-  }
-
-  private void streamLog(OzoneConfiguration conf, Set<Component> sources,
-      List<LoggerSource> relatedLoggers) {
-    List<Thread> loggers = new ArrayList<>();
-    for (Component sourceComponent : sources) {
-      loggers.add(new Thread(
-          () -> streamLog(conf, sourceComponent, relatedLoggers)));
-    }
-    for (Thread thread : loggers) {
-      thread.start();
-    }
-    for (Thread thread : loggers) {
-      try {
-        thread.join();
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-      }
-    }
-  }
-
-  private void streamLog(OzoneConfiguration conf, Component logComponent,
-      List<LoggerSource> loggers) {
-    HttpClient client = HttpClientBuilder.create().build();
-
-    HttpGet get = new HttpGet(getHost(conf, logComponent) + "/logstream");
-    try {
-      HttpResponse execute = client.execute(get);
-      try (BufferedReader bufferedReader = new BufferedReader(
-          new InputStreamReader(execute.getEntity().getContent(),
-              StandardCharsets.UTF_8))) {
-        bufferedReader.lines()
-            .filter(line -> {
-              for (LoggerSource logger : loggers) {
-                if (line.contains(logger.getLoggerName())) {
-                  return true;
-                }
-              }
-              return false;
-            })
-            .map(this::processLogLine)
-            .map(l -> "[" + logComponent.prefix() + "] " + l)
-            .forEach(System.out::println);
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  public String processLogLine(String line) {
-    Pattern p = Pattern.compile("<json>(.*)</json>");
-    Matcher m = p.matcher(line);
-    StringBuffer sb = new StringBuffer();
-    while (m.find()) {
-      m.appendReplacement(sb, "\n" + m.group(1).replaceAll("\\\\n", "\n"));
-    }
-    m.appendTail(sb);
-    return sb.toString();
-  }
-
-  private void setLogLevel(OzoneConfiguration conf, String name,
-      Component component, LoggerSource.Level level) {
-    HttpClient client = HttpClientBuilder.create().build();
-
-    String request = String
-        .format("/logLevel?log=%s&level=%s", name,
-            level);
-    String hostName = getHost(conf, component);
-    HttpGet get = new HttpGet(hostName + request);
-    try {
-      HttpResponse execute = client.execute(get);
-      if (execute.getStatusLine().getStatusCode() != 200) {
-        throw new RuntimeException(
-            "Can't set the log level: " + hostName + " -> HTTP " + execute
-                .getStatusLine().getStatusCode());
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java
deleted file mode 100644
index 180b3e8..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.apache.hadoop.ozone.insight.Component.Type;
-
-/**
- * Definition of a log source.
- */
-public class LoggerSource {
-
-  /**
-   * Id of the component where the log is generated.
-   */
-  private Component component;
-
-  /**
-   * Log4j/slf4j logger name.
-   */
-  private String loggerName;
-
-  /**
-   * Log level.
-   */
-  private Level level;
-
-  public LoggerSource(Component component, String loggerName, Level level) {
-    this.component = component;
-    this.loggerName = loggerName;
-    this.level = level;
-  }
-
-  public LoggerSource(Type componentType, Class<?> loggerClass,
-      Level level) {
-    this(new Component(componentType), loggerClass.getCanonicalName(), level);
-  }
-
-  public Component getComponent() {
-    return component;
-  }
-
-  public String getLoggerName() {
-    return loggerName;
-  }
-
-  public Level getLevel() {
-    return level;
-  }
-
-  /**
-   * Log level definition.
-   */
-  public enum Level {
-    TRACE, DEBUG, INFO, WARN, ERROR
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java
deleted file mode 100644
index 395c14c..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Definition of one displayable hadoop metrics.
- */
-public class MetricDisplay {
-
-  /**
-   * Prometheus metrics name.
-   */
-  private String id;
-
-  /**
-   * Human readable definition of the metrhics.
-   */
-  private String description;
-
-  /**
-   * Prometheus metrics tag to filter out the right metrics.
-   */
-  private Map<String, String> filter;
-
-  public MetricDisplay(String description, String id) {
-    this(description, id, new HashMap<>());
-  }
-
-  public MetricDisplay(String description, String id,
-      Map<String, String> filter) {
-    this.id = id;
-    this.description = description;
-    this.filter = filter;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public String getDescription() {
-    return description;
-  }
-
-  public Map<String, String> getFilter() {
-    return filter;
-  }
-
-  public boolean checkLine(String line) {
-    return false;
-  }
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java
deleted file mode 100644
index 08fd60c..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.insight.Component.Type;
-
-/**
- * Definition of a group of metrics which can be displayed.
- */
-public class MetricGroupDisplay {
-
-  /**
-   * List fhe included metrics.
-   */
-  private List<MetricDisplay> metrics = new ArrayList<>();
-
-  /**
-   * Name of the component which includes the metrics (scm, om,...).
-   */
-  private Component component;
-
-  /**
-   * Human readable description.
-   */
-  private String description;
-
-  public MetricGroupDisplay(Component component, String description) {
-    this.component = component;
-    this.description = description;
-  }
-
-  public MetricGroupDisplay(Type componentType, String metricName) {
-    this(new Component(componentType), metricName);
-  }
-
-  public List<MetricDisplay> getMetrics() {
-    return metrics;
-  }
-
-  public void addMetrics(MetricDisplay item) {
-    this.metrics.add(item);
-  }
-
-  public String getDescription() {
-    return description;
-  }
-
-  public Component getComponent() {
-    return component;
-  }
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java
deleted file mode 100644
index d320c82..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClientBuilder;
-import picocli.CommandLine;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
-import java.util.*;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-import java.util.stream.Collectors;
-
-/**
- * Command line interface to show metrics for a specific component.
- */
-@CommandLine.Command(
-    name = "metrics",
-    aliases = "metric",
-    description = "Show available metrics.",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class MetricsSubCommand extends BaseInsightSubCommand
-    implements Callable<Void> {
-
-  @CommandLine.Parameters(description = "Name of the insight point (use list "
-      + "to check the available options)")
-  private String insightName;
-
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration conf =
-        getInsightCommand().createOzoneConfiguration();
-    InsightPoint insight =
-        getInsight(conf, insightName);
-    Set<Component> sources =
-        insight.getMetrics().stream().map(MetricGroupDisplay::getComponent)
-            .collect(Collectors.toSet());
-    Map<Component, List<String>> metrics = getMetrics(conf, sources);
-    System.out.println(
-        "Metrics for `" + insightName + "` (" + insight.getDescription() + ")");
-    System.out.println();
-    for (MetricGroupDisplay group : insight.getMetrics()) {
-      System.out.println(group.getDescription());
-      System.out.println();
-      for (MetricDisplay display : group.getMetrics()) {
-        System.out.println("  " + display.getDescription() + ": " + selectValue(
-            metrics.get(group.getComponent()), display));
-      }
-      System.out.println();
-      System.out.println();
-
-    }
-    return null;
-  }
-
-  private Map<Component, List<String>> getMetrics(OzoneConfiguration conf,
-      Collection<Component> sources) {
-    Map<Component, List<String>> result = new HashMap<>();
-    for (Component source : sources) {
-      result.put(source, getMetrics(conf, source));
-    }
-    return result;
-  }
-
-  private String selectValue(List<String> metrics,
-      MetricDisplay metricDisplay) {
-    for (String line : metrics) {
-      if (line.startsWith(metricDisplay.getId())) {
-        boolean filtered = false;
-        for (Entry<String, String> filter : metricDisplay.getFilter()
-            .entrySet()) {
-          if (!line
-              .contains(filter.getKey() + "=\"" + filter.getValue() + "\"")) {
-            filtered = true;
-          }
-        }
-        if (!filtered) {
-          return line.split(" ")[1];
-        }
-      }
-    }
-    return "???";
-  }
-
-  private List<String> getMetrics(OzoneConfiguration conf,
-      Component component) {
-    HttpClient client = HttpClientBuilder.create().build();
-    HttpGet get = new HttpGet(getHost(conf, component) + "/prom");
-    try {
-      HttpResponse execute = client.execute(get);
-      if (execute.getStatusLine().getStatusCode() != 200) {
-        throw new RuntimeException(
-            "Can't read prometheus metrics endpoint" + execute.getStatusLine()
-                .getStatusCode());
-      }
-      try (BufferedReader bufferedReader = new BufferedReader(
-          new InputStreamReader(execute.getEntity().getContent(),
-              StandardCharsets.UTF_8))) {
-        return bufferedReader.lines().collect(Collectors.toList());
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java
deleted file mode 100644
index b87955e..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.datanode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.InsightPoint;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-
-/**
- * Insight definition for datanode/pipline metrics.
- */
-public class RatisInsight extends BaseInsightPoint implements InsightPoint {
-
-  private OzoneConfiguration conf;
-
-  public RatisInsight(OzoneConfiguration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> result = new ArrayList<>();
-    try {
-      ScmClient scmClient = createScmClient(conf);
-      Pipeline pipeline = scmClient.listPipelines()
-          .stream()
-          .filter(d -> d.getNodes().size() > 1)
-          .findFirst()
-          .get();
-      for (DatanodeDetails datanode : pipeline.getNodes()) {
-        Component dn =
-            new Component(Type.DATANODE, datanode.getUuid().toString(),
-                datanode.getHostName(), 9882);
-        result
-            .add(new LoggerSource(dn, "org.apache.ratis.server.impl",
-                defaultLevel(verbose)));
-      }
-    } catch (IOException e) {
-      throw new RuntimeException("Can't enumerate required logs", e);
-    }
-
-    return result;
-  }
-
-  @Override
-  public String getDescription() {
-    return "More information about one ratis datanode ring.";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java
deleted file mode 100644
index 97dd495..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.insight.datanode;
-
-/**
- * Insight points for the ozone datanodes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java
deleted file mode 100644
index 515cf38..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.om;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricDisplay;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-import org.apache.hadoop.ozone.om.KeyManagerImpl;
-
-/**
- * Insight implementation for the key management related operations.
- */
-public class KeyManagerInsight extends BaseInsightPoint {
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> display = new ArrayList<>();
-
-    MetricGroupDisplay state =
-        new MetricGroupDisplay(Type.OM, "Key related metrics");
-    state
-        .addMetrics(new MetricDisplay("Number of keys", "om_metrics_num_keys"));
-    state.addMetrics(new MetricDisplay("Number of key operations",
-        "om_metrics_num_key_ops"));
-
-    display.add(state);
-
-    MetricGroupDisplay key =
-        new MetricGroupDisplay(Type.OM, "Key operation stats");
-    for (String operation : new String[] {"allocate", "commit", "lookup",
-        "list", "delete"}) {
-      key.addMetrics(new MetricDisplay(
-          "Number of key " + operation + "s (failure + success)",
-          "om_metrics_num_key_" + operation));
-      key.addMetrics(
-          new MetricDisplay("Number of failed key " + operation + "s",
-              "om_metrics_num_key_" + operation + "_fails"));
-    }
-    display.add(key);
-
-    return display;
-  }
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.OM, KeyManagerImpl.class,
-            defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public String getDescription() {
-    return "OM Key Manager";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java
deleted file mode 100644
index 502ba60..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.om;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
-
-/**
- * Insight definition for the OM RPC server.
- */
-public class OmProtocolInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.OM,
-            OzoneManagerProtocolServerSideTranslatorPB.class,
-            defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> metrics = new ArrayList<>();
-
-    Map<String, String> filter = new HashMap<>();
-    filter.put("servername", "OzoneManagerService");
-
-    addRpcMetrics(metrics, Type.OM, filter);
-
-    addProtocolMessageMetrics(metrics, "om_client_protocol", Type.OM,
-        OzoneManagerProtocolProtos.Type.values());
-
-    return metrics;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Ozone Manager RPC endpoint";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java
deleted file mode 100644
index c0dfc4d..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.insight.om;
-
-/**
- * Insight points for the Ozone Manager.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java
deleted file mode 100644
index a77524d7..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.insight;
-
-/**
- * Framework to collect log/metrics and configuration for specified ozone
- * components.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java
deleted file mode 100644
index 5a88cd2..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-
-/**
- * Insight definition to check internal events.
- */
-public class EventQueueInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers
-        .add(new LoggerSource(Type.SCM, EventQueue.class,
-            defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Information about the internal async event delivery";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java
deleted file mode 100644
index c4fb025..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricDisplay;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-
-/**
- * Insight definition to check node manager / node report events.
- */
-public class NodeManagerInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.SCM, SCMNodeManager.class,
-            defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> display = new ArrayList<>();
-
-    MetricGroupDisplay nodes =
-        new MetricGroupDisplay(Type.SCM, "Node counters");
-
-    nodes.addMetrics(
-        new MetricDisplay("Healthy Nodes", "scm_node_manager_healthy_nodes"));
-    nodes.addMetrics(
-        new MetricDisplay("Dead Nodes", "scm_node_manager_dead_nodes"));
-
-    display.add(nodes);
-
-    MetricGroupDisplay hb =
-        new MetricGroupDisplay(Type.SCM, "HB processing stats");
-    hb.addMetrics(
-        new MetricDisplay("HB processed", "scm_node_manager_num_hb_processed"));
-    hb.addMetrics(new MetricDisplay("HB processing failed",
-        "scm_node_manager_num_hb_processing_failed"));
-    display.add(hb);
-
-    return display;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM Datanode management related information.";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java
deleted file mode 100644
index ec87f3f..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-
-/**
- * Insight definition to chech the replication manager internal state.
- */
-public class ReplicaManagerInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(new LoggerSource(Type.SCM, ReplicationManager.class,
-        defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> display = new ArrayList<>();
-    return display;
-  }
-
-  @Override
-  public List<Class> getConfigurationClasses() {
-    List<Class> result = new ArrayList<>();
-    result.add(ReplicationManager.ReplicationManagerConfiguration.class);
-    return result;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM closed container replication manager";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java
deleted file mode 100644
index f67f641..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.scm.server.SCMBlockProtocolServer;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
-
-/**
- * Insight metric to check the SCM block location protocol behaviour.
- */
-public class ScmProtocolBlockLocationInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.SCM,
-            ScmBlockLocationProtocolServerSideTranslatorPB.class,
-            defaultLevel(verbose)));
-    loggers.add(new LoggerSource(Type.SCM,
-        SCMBlockProtocolServer.class,
-        defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> metrics = new ArrayList<>();
-
-    Map<String, String> filter = new HashMap<>();
-    filter.put("servername", "StorageContainerLocationProtocolService");
-
-    addRpcMetrics(metrics, Type.SCM, filter);
-
-    addProtocolMessageMetrics(metrics, "scm_block_location_protocol",
-        Type.SCM, ScmBlockLocationProtocolProtos.Type.values());
-
-    return metrics;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM Block location protocol endpoint";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java
deleted file mode 100644
index d6db589..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-
-/**
- * Insight metric to check the SCM block location protocol behaviour.
- */
-public class ScmProtocolContainerLocationInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.SCM,
-            StorageContainerLocationProtocolServerSideTranslatorPB.class,
-            defaultLevel(verbose)));
-    new LoggerSource(Type.SCM,
-        StorageContainerLocationProtocolService.class,
-        defaultLevel(verbose));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> metrics = new ArrayList<>();
-
-    Map<String, String> filter = new HashMap<>();
-    filter.put("servername", "StorageContainerLocationProtocolService");
-
-    addRpcMetrics(metrics, Type.SCM, filter);
-
-    addProtocolMessageMetrics(metrics, "scm_container_location_protocol",
-        Type.SCM, StorageContainerLocationProtocolProtos.Type.values());
-
-    return metrics;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM Container location protocol endpoint";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java
deleted file mode 100644
index 289af89..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
-
-/**
- * Insight metric to check the SCM datanode protocol behaviour.
- */
-public class ScmProtocolDatanodeInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.SCM,
-            SCMDatanodeProtocolServer.class,
-            defaultLevel(verbose)));
-    loggers.add(
-        new LoggerSource(Type.SCM,
-            StorageContainerDatanodeProtocolServerSideTranslatorPB.class,
-            defaultLevel(verbose)));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> metrics = new ArrayList<>();
-
-    Map<String, String> filter = new HashMap<>();
-    filter.put("servername", "StorageContainerDatanodeProtocolService");
-
-    addRpcMetrics(metrics, Type.SCM, filter);
-
-    addProtocolMessageMetrics(metrics, "scm_datanode_protocol",
-        Type.SCM, StorageContainerDatanodeProtocolProtos.Type.values());
-
-    return metrics;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM Datanode protocol endpoint";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java
deleted file mode 100644
index 734da34..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight.scm;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.server.SCMSecurityProtocolServer;
-import org.apache.hadoop.ozone.insight.BaseInsightPoint;
-import org.apache.hadoop.ozone.insight.Component.Type;
-import org.apache.hadoop.ozone.insight.LoggerSource;
-import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-
-/**
- * Insight metric to check the SCM block location protocol behaviour.
- */
-public class ScmProtocolSecurityInsight extends BaseInsightPoint {
-
-  @Override
-  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
-    List<LoggerSource> loggers = new ArrayList<>();
-    loggers.add(
-        new LoggerSource(Type.SCM,
-            SCMSecurityProtocolServerSideTranslatorPB.class,
-            defaultLevel(verbose)));
-    new LoggerSource(Type.SCM,
-        SCMSecurityProtocolServer.class,
-        defaultLevel(verbose));
-    return loggers;
-  }
-
-  @Override
-  public List<MetricGroupDisplay> getMetrics() {
-    List<MetricGroupDisplay> metrics = new ArrayList<>();
-
-    Map<String, String> filter = new HashMap<>();
-    filter.put("servername", "SCMSecurityProtocolService");
-
-    addRpcMetrics(metrics, Type.SCM, filter);
-
-    addProtocolMessageMetrics(metrics, "scm_security_protocol",
-        Type.SCM, SCMSecurityProtocolProtos.Type.values());
-
-    return metrics;
-  }
-
-  @Override
-  public String getDescription() {
-    return "SCM Block location protocol endpoint";
-  }
-
-}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java
deleted file mode 100644
index 0966fbd..0000000
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.insight.scm;
-
-/**
- * Insight points for the Storage Container Manager.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java
deleted file mode 100644
index 67c2f70..0000000
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.insight;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Testing utility methods of the log subcommand test.
- */
-public class LogSubcommandTest {
-
-  @Test
-  public void filterLog() {
-    LogSubcommand logSubcommand = new LogSubcommand();
-    String result = logSubcommand.processLogLine(
-        "2019-08-04 12:27:08,648 [TRACE|org.apache.hadoop.hdds.scm.node"
-            + ".SCMNodeManager|SCMNodeManager] HB is received from "
-            + "[datanode=localhost]: <json>storageReport {\\n  storageUuid: "
-            + "\"DS-29204db6-a615-4106-9dd4-ce294c2f4cf6\"\\n  "
-            + "storageLocation: \"/tmp/hadoop-elek/dfs/data\"\\n  capacity: "
-            + "8348086272\\n  scmUsed: 4096\\n  remaining: 8246956032n  "
-            + "storageType: DISK\\n  failed: falsen}\\n</json>\n");
-    Assert.assertEquals(3, result.split("\n").length);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
deleted file mode 100644
index 5593f28..0000000
--- a/hadoop-ozone/integration-test/pom.xml
+++ /dev/null
@@ -1,136 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-integration-test</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Integration Tests</description>
-  <name>Apache Hadoop Ozone Integration Tests</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minikdc</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-s3gateway</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-csi</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-recon</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-kms</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-kms</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-  </dependencies>
-
-
-</project>
diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
deleted file mode 100755
index 002fe94..0000000
--- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-date=$(date +"%Y-%m-%d--%H-%M-%S-%Z")
-fileformat=".MiniOzoneChaosCluster.log"
-heapformat=".dump"
-current="/tmp/"
-filename="${current}${date}${fileformat}"
-heapdumpfile="${current}${date}${heapformat}"
-
-export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heapdumpfile} -Dorg.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false"
-
-echo "logging to ${filename}"
-echo "heapdump to ${heapdumpfile}"
-
-echo "Starting MiniOzoneChaosCluster"
-mvn clean install -DskipTests > "${filename}" 2>&1
-mvn exec:java \
-  -Dexec.mainClass="org.apache.hadoop.ozone.TestMiniChaosOzoneCluster" \
-  -Dexec.classpathScope=test \
-  -Dexec.args="$*" >> "${filename}" 2>&1
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
deleted file mode 100644
index e4f1a37..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ /dev/null
@@ -1,470 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.UUID;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Tests for ContainerStateManager.
- */
-public class TestContainerStateManagerIntegration {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestContainerStateManagerIntegration.class);
-
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster cluster;
-  private XceiverClientManager xceiverClientManager;
-  private StorageContainerManager scm;
-  private ContainerManager containerManager;
-  private ContainerStateManager containerStateManager;
-  private String containerOwner = "OZONE";
-  private int numContainerPerOwnerInPipeline;
-
-
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    numContainerPerOwnerInPipeline =
-        conf.getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-            ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    cluster.waitTobeOutOfSafeMode();
-    xceiverClientManager = new XceiverClientManager(conf);
-    scm = cluster.getStorageContainerManager();
-    containerManager = scm.getContainerManager();
-    containerStateManager = ((SCMContainerManager)containerManager)
-        .getContainerStateManager();
-  }
-
-  @After
-  public void cleanUp() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testAllocateContainer() throws IOException {
-    // Allocate a container and verify the container info
-    ContainerWithPipeline container1 = scm.getClientProtocolServer()
-        .allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-            container1.getPipeline());
-    Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
-        info.getContainerID());
-    Assert.assertEquals(containerOwner, info.getOwner());
-    Assert.assertEquals(xceiverClientManager.getType(),
-        info.getReplicationType());
-    Assert.assertEquals(xceiverClientManager.getFactor(),
-        info.getReplicationFactor());
-    Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState());
-
-    // Check there are two containers in ALLOCATED state after allocation
-    ContainerWithPipeline container2 = scm.getClientProtocolServer()
-        .allocateContainer(
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    int numContainers = containerStateManager
-        .getMatchingContainerIDs(containerOwner,
-            xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-            HddsProtos.LifeCycleState.OPEN).size();
-    Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
-        container2.getContainerInfo().getContainerID());
-    Assert.assertEquals(3, numContainers);
-  }
-
-  @Test
-  public void testAllocateContainerWithDifferentOwner() throws IOException {
-
-    // Allocate a container and verify the container info
-    ContainerWithPipeline container1 = scm.getClientProtocolServer()
-        .allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-            container1.getPipeline());
-    Assert.assertNotNull(info);
-
-    String newContainerOwner = "OZONE_NEW";
-    ContainerWithPipeline container2 = scm.getClientProtocolServer()
-        .allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), newContainerOwner);
-    ContainerInfo info2 = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner,
-            container1.getPipeline());
-    Assert.assertNotNull(info2);
-
-    Assert.assertNotEquals(info.containerID(), info2.containerID());
-  }
-
-  @Test
-  public void testContainerStateManagerRestart() throws IOException,
-      TimeoutException, InterruptedException, AuthenticationException {
-    // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
-
-    for (int i = 0; i < 10; i++) {
-
-      ContainerWithPipeline container = scm.getClientProtocolServer()
-          .allocateContainer(
-              xceiverClientManager.getType(),
-              xceiverClientManager.getFactor(), containerOwner);
-      if (i >= 5) {
-        scm.getContainerManager().updateContainerState(container
-                .getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.FINALIZE);
-      }
-    }
-
-    cluster.restartStorageContainerManager(true);
-
-    List<ContainerInfo> result = cluster.getStorageContainerManager()
-        .getContainerManager().listContainer(null, 100);
-
-    long matchCount = result.stream()
-        .filter(info ->
-            info.getOwner().equals(containerOwner))
-        .filter(info ->
-            info.getReplicationType() == xceiverClientManager.getType())
-        .filter(info ->
-            info.getReplicationFactor() == xceiverClientManager.getFactor())
-        .filter(info ->
-            info.getState() == HddsProtos.LifeCycleState.OPEN)
-        .count();
-    Assert.assertEquals(5, matchCount);
-    matchCount = result.stream()
-        .filter(info ->
-            info.getOwner().equals(containerOwner))
-        .filter(info ->
-            info.getReplicationType() == xceiverClientManager.getType())
-        .filter(info ->
-            info.getReplicationFactor() == xceiverClientManager.getFactor())
-        .filter(info ->
-            info.getState() == HddsProtos.LifeCycleState.CLOSING)
-        .count();
-    Assert.assertEquals(5, matchCount);
-  }
-
-  @Test
-  public void testGetMatchingContainer() throws IOException {
-    long cid;
-    ContainerWithPipeline container1 = scm.getClientProtocolServer().
-        allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    cid = container1.getContainerInfo().getContainerID();
-
-    // each getMatchingContainer call allocates a container in the
-    // pipeline till the pipeline has numContainerPerOwnerInPipeline number of
-    // containers.
-    for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
-      ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-              container1.getPipeline());
-      Assert.assertTrue(info.getContainerID() > cid);
-      cid = info.getContainerID();
-    }
-
-    // At this point there are already three containers in the pipeline.
-    // next container should be the same as first container
-    ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-            container1.getPipeline());
-    Assert.assertEquals(container1.getContainerInfo().getContainerID(),
-        info.getContainerID());
-  }
-
-  @Test
-  public void testGetMatchingContainerWithExcludedList() throws IOException {
-    long cid;
-    ContainerWithPipeline container1 = scm.getClientProtocolServer().
-        allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    cid = container1.getContainerInfo().getContainerID();
-
-    // each getMatchingContainer call allocates a container in the
-    // pipeline till the pipeline has numContainerPerOwnerInPipeline number of
-    // containers.
-    for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
-      ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-              container1.getPipeline());
-      Assert.assertTrue(info.getContainerID() > cid);
-      cid = info.getContainerID();
-    }
-
-    // At this point there are already three containers in the pipeline.
-    // next container should be the same as first container
-    ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-            container1.getPipeline(), Collections.singletonList(new
-                ContainerID(1)));
-    Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
-        info.getContainerID());
-  }
-
-
-  @Test
-  public void testCreateContainerLogicWithExcludedList() throws IOException {
-    long cid;
-    ContainerWithPipeline container1 = scm.getClientProtocolServer().
-        allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    cid = container1.getContainerInfo().getContainerID();
-
-    for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
-      ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-              container1.getPipeline());
-      Assert.assertTrue(info.getContainerID() > cid);
-      cid = info.getContainerID();
-    }
-
-    ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-            container1.getPipeline(), Arrays.asList(new ContainerID(1), new
-                ContainerID(2), new ContainerID(3)));
-    Assert.assertEquals(info.getContainerID(), 4);
-  }
-
-  @Test
-  @Ignore("TODO:HDDS-1159")
-  public void testGetMatchingContainerMultipleThreads()
-      throws IOException, InterruptedException {
-    ContainerWithPipeline container1 = scm.getClientProtocolServer().
-        allocateContainer(xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    Map<Long, Long> container2MatchedCount = new ConcurrentHashMap<>();
-
-    // allocate blocks using multiple threads
-    int numBlockAllocates = 100000;
-    for (int i = 0; i < numBlockAllocates; i++) {
-      CompletableFuture.supplyAsync(() -> {
-        ContainerInfo info = containerManager
-            .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
-                container1.getPipeline());
-        container2MatchedCount
-            .compute(info.getContainerID(), (k, v) -> v == null ? 1L : v + 1);
-        return null;
-      });
-    }
-
-    // make sure pipeline has has numContainerPerOwnerInPipeline number of
-    // containers.
-    Assert.assertEquals(scm.getPipelineManager()
-            .getNumberOfContainers(container1.getPipeline().getId()),
-        numContainerPerOwnerInPipeline);
-    Thread.sleep(5000);
-    long threshold = 2000;
-    // check the way the block allocations are distributed in the different
-    // containers.
-    for (Long matchedCount : container2MatchedCount.values()) {
-      // TODO: #CLUTIL Look at the division of block allocations in different
-      // containers.
-      LOG.error("Total allocated block = " + matchedCount);
-      Assert.assertTrue(matchedCount <=
-          numBlockAllocates / container2MatchedCount.size() + threshold
-          && matchedCount >=
-          numBlockAllocates / container2MatchedCount.size() - threshold);
-    }
-  }
-
-  @Test
-  public void testUpdateContainerState() throws IOException {
-    NavigableSet<ContainerID> containerList = containerStateManager
-        .getMatchingContainerIDs(containerOwner,
-            xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-            HddsProtos.LifeCycleState.OPEN);
-    int containers = containerList == null ? 0 : containerList.size();
-    Assert.assertEquals(0, containers);
-
-    // Allocate container1 and update its state from
-    // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
-    ContainerWithPipeline container1 = scm.getClientProtocolServer()
-        .allocateContainer(
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.OPEN).size();
-    Assert.assertEquals(1, containers);
-
-    containerManager
-        .updateContainerState(container1.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.FINALIZE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.CLOSING).size();
-    Assert.assertEquals(1, containers);
-
-    containerManager
-        .updateContainerState(container1.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.CLOSE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.CLOSED).size();
-    Assert.assertEquals(1, containers);
-
-    containerManager
-        .updateContainerState(container1.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.DELETE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.DELETING).size();
-    Assert.assertEquals(1, containers);
-
-    containerManager
-        .updateContainerState(container1.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.CLEANUP);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.DELETED).size();
-    Assert.assertEquals(1, containers);
-
-    // Allocate container1 and update its state from
-    // OPEN -> CLOSING -> CLOSED
-    ContainerWithPipeline container3 = scm.getClientProtocolServer()
-        .allocateContainer(
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-    containerManager
-        .updateContainerState(container3.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager
-        .updateContainerState(container3.getContainerInfo().containerID(),
-            HddsProtos.LifeCycleEvent.CLOSE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
-        xceiverClientManager.getType(), xceiverClientManager.getFactor(),
-        HddsProtos.LifeCycleState.CLOSED).size();
-    Assert.assertEquals(1, containers);
-  }
-
-
-  @Test
-  public void testReplicaMap() throws Exception {
-    DatanodeDetails dn1 = DatanodeDetails.newBuilder().setHostName("host1")
-        .setIpAddress("1.1.1.1")
-        .setUuid(UUID.randomUUID().toString()).build();
-    DatanodeDetails dn2 = DatanodeDetails.newBuilder().setHostName("host2")
-        .setIpAddress("2.2.2.2")
-        .setUuid(UUID.randomUUID().toString()).build();
-
-    // Test 1: no replica's exist
-    ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong());
-    Set<ContainerReplica> replicaSet;
-    try {
-      containerStateManager.getContainerReplicas(containerID);
-      Assert.fail();
-    } catch (ContainerNotFoundException ex) {
-      // expected.
-    }
-
-    ContainerWithPipeline container = scm.getClientProtocolServer()
-        .allocateContainer(
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(), containerOwner);
-
-    ContainerID id = container.getContainerInfo().containerID();
-
-    // Test 2: Add replica nodes and then test
-    ContainerReplica replicaOne = ContainerReplica.newBuilder()
-        .setContainerID(id)
-        .setContainerState(ContainerReplicaProto.State.OPEN)
-        .setDatanodeDetails(dn1)
-        .build();
-    ContainerReplica replicaTwo = ContainerReplica.newBuilder()
-        .setContainerID(id)
-        .setContainerState(ContainerReplicaProto.State.OPEN)
-        .setDatanodeDetails(dn2)
-        .build();
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(2, replicaSet.size());
-    Assert.assertTrue(replicaSet.contains(replicaOne));
-    Assert.assertTrue(replicaSet.contains(replicaTwo));
-
-    // Test 3: Remove one replica node and then test
-    containerStateManager.removeContainerReplica(id, replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(1, replicaSet.size());
-    Assert.assertFalse(replicaSet.contains(replicaOne));
-    Assert.assertTrue(replicaSet.contains(replicaTwo));
-
-    // Test 3: Remove second replica node and then test
-    containerStateManager.removeContainerReplica(id, replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(0, replicaSet.size());
-    Assert.assertFalse(replicaSet.contains(replicaOne));
-    Assert.assertFalse(replicaSet.contains(replicaTwo));
-
-    // Test 4: Re-insert dn1
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(1, replicaSet.size());
-    Assert.assertTrue(replicaSet.contains(replicaOne));
-    Assert.assertFalse(replicaSet.contains(replicaTwo));
-
-    // Re-insert dn2
-    containerStateManager.updateContainerReplica(id, replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(2, replicaSet.size());
-    Assert.assertTrue(replicaSet.contains(replicaOne));
-    Assert.assertTrue(replicaSet.contains(replicaTwo));
-
-    // Re-insert dn1
-    containerStateManager.updateContainerReplica(id, replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id);
-    Assert.assertEquals(2, replicaSet.size());
-    Assert.assertTrue(replicaSet.contains(replicaOne));
-    Assert.assertTrue(replicaSet.contains(replicaTwo));
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
deleted file mode 100644
index 5643cb6..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.metrics;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.HashMap;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.fail;
-
-/**
- * Class used to test {@link SCMContainerManagerMetrics}.
- */
-public class TestSCMContainerManagerMetrics {
-
-  private MiniOzoneCluster cluster;
-  private StorageContainerManager scm;
-  private String containerOwner = "OZONE";
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "3000s");
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-  }
-
-
-  @After
-  public void teardown() {
-    cluster.shutdown();
-  }
-
-  @Test
-  public void testContainerOpsMetrics() throws IOException {
-    MetricsRecordBuilder metrics;
-    ContainerManager containerManager = scm.getContainerManager();
-    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    long numSuccessfulCreateContainers = getLongCounter(
-        "NumSuccessfulCreateContainers", metrics);
-
-    ContainerInfo containerInfo = containerManager.allocateContainer(
-        HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, containerOwner);
-
-    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
-        metrics), ++numSuccessfulCreateContainers);
-
-    try {
-      containerManager.allocateContainer(
-          HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE, containerOwner);
-      fail("testContainerOpsMetrics failed");
-    } catch (IOException ex) {
-      // Here it should fail, so it should have the old metric value.
-      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-      Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
-          metrics), numSuccessfulCreateContainers);
-      Assert.assertEquals(getLongCounter("NumFailureCreateContainers",
-          metrics), 1);
-    }
-
-    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    long numSuccessfulDeleteContainers = getLongCounter(
-        "NumSuccessfulDeleteContainers", metrics);
-
-    containerManager.deleteContainer(
-        new ContainerID(containerInfo.getContainerID()));
-
-    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
-        metrics), numSuccessfulDeleteContainers + 1);
-
-
-    try {
-      // Give random container to delete.
-      containerManager.deleteContainer(
-          new ContainerID(RandomUtils.nextLong(10000, 20000)));
-      fail("testContainerOpsMetrics failed");
-    } catch (IOException ex) {
-      // Here it should fail, so it should have the old metric value.
-      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-      Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
-          metrics), numSuccessfulCreateContainers);
-      Assert.assertEquals(getLongCounter("NumFailureDeleteContainers",
-          metrics), 1);
-    }
-
-    containerManager.listContainer(
-        new ContainerID(containerInfo.getContainerID()), 1);
-    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumListContainerOps",
-        metrics), 1);
-
-  }
-
-  @Test
-  public void testReportProcessingMetrics() throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String key = "key1";
-
-    MetricsRecordBuilder metrics =
-        getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumContainerReportsProcessedSuccessful",
-        metrics), 1);
-
-    // Create key should create container on DN.
-    cluster.getRpcClient().getObjectStore().getClientProxy()
-        .createVolume(volumeName);
-    cluster.getRpcClient().getObjectStore().getClientProxy()
-        .createBucket(volumeName, bucketName);
-    OzoneOutputStream ozoneOutputStream = cluster.getRpcClient()
-        .getObjectStore().getClientProxy().createKey(volumeName, bucketName,
-            key, 0, ReplicationType.RATIS, ReplicationFactor.ONE,
-            new HashMap<>());
-
-    String data = "file data";
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
-    ozoneOutputStream.close();
-
-
-    GenericTestUtils.waitFor(() -> {
-      final MetricsRecordBuilder scmMetrics =
-          getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-      return getLongCounter("NumICRReportsProcessedSuccessful",
-          scmMetrics) == 1;
-    }, 1000, 500000);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
deleted file mode 100644
index c7470a3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationType.RATIS;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationFactor.THREE;
-
-/**
- * Test for the Node2Pipeline map.
- */
-public class TestNode2PipelineMap {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-  private static ContainerWithPipeline ratisContainer;
-  private static ContainerManager containerManager;
-  private static PipelineManager pipelineManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-    containerManager = scm.getContainerManager();
-    pipelineManager = scm.getPipelineManager();
-    ContainerInfo containerInfo = containerManager.allocateContainer(
-        RATIS, THREE, "testOwner");
-    ratisContainer = new ContainerWithPipeline(containerInfo,
-        pipelineManager.getPipeline(containerInfo.getPipelineID()));
-    pipelineManager = scm.getPipelineManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testPipelineMap() throws IOException {
-
-    Set<ContainerID> set = pipelineManager
-        .getContainersInPipeline(ratisContainer.getPipeline().getId());
-
-    ContainerID cId = ratisContainer.getContainerInfo().containerID();
-    Assert.assertEquals(1, set.size());
-    set.forEach(containerID ->
-        Assert.assertEquals(containerID, cId));
-
-    List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
-    Assert.assertEquals(3, dns.size());
-
-    // get pipeline details by dnid
-    Set<PipelineID> pipelines = scm.getScmNodeManager()
-        .getPipelines(dns.get(0));
-    Assert.assertTrue(pipelines.contains(ratisContainer.getPipeline().getId()));
-
-    // Now close the container and it should not show up while fetching
-    // containers by pipeline
-    containerManager
-        .updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager
-        .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
-    Set<ContainerID> set2 = pipelineManager.getContainersInPipeline(
-        ratisContainer.getPipeline().getId());
-    Assert.assertEquals(0, set2.size());
-
-    pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
-    pipelines = scm.getScmNodeManager()
-        .getPipelines(dns.get(0));
-    Assert
-        .assertFalse(pipelines.contains(ratisContainer.getPipeline().getId()));
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
deleted file mode 100644
index 3207878..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationType.RATIS;
-
-/**
- * Test Node failure detection and handling in Ratis.
- */
-public class TestNodeFailure {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static Pipeline ratisPipelineOne;
-  private static Pipeline ratisPipelineTwo;
-  private static ContainerManager containerManager;
-  private static PipelineManager pipelineManager;
-  private static long timeForFailure;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        10, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
-        10, TimeUnit.SECONDS);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(6)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(1000)
-        .build();
-    cluster.waitForClusterToBeReady();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    containerManager = scm.getContainerManager();
-    pipelineManager = scm.getPipelineManager();
-    ratisPipelineOne = pipelineManager.getPipeline(
-        containerManager.allocateContainer(
-        RATIS, THREE, "testOwner").getPipelineID());
-    ratisPipelineTwo = pipelineManager.getPipeline(
-        containerManager.allocateContainer(
-        RATIS, THREE, "testOwner").getPipelineID());
-    // At this stage, there should be 2 pipeline one with 1 open container each.
-    // Try closing the both the pipelines, one with a closed container and
-    // the other with an open container.
-    timeForFailure = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-            .getDuration(), TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Ignore
-  // Enable this after we implement teardown pipeline logic once a datanode
-  // dies.
-  @Test(timeout = 300_000L)
-  public void testPipelineFail() throws InterruptedException, IOException,
-      TimeoutException {
-    Assert.assertEquals(ratisPipelineOne.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Pipeline pipelineToFail = ratisPipelineOne;
-    DatanodeDetails dnToFail = pipelineToFail.getFirstNode();
-    cluster.shutdownHddsDatanode(dnToFail);
-
-    // wait for sufficient time for the callback to be triggered
-    Thread.sleep(3 * timeForFailure);
-
-    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        pipelineManager.getPipeline(ratisPipelineOne.getId())
-            .getPipelineState());
-    Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        pipelineManager.getPipeline(ratisPipelineTwo.getId())
-            .getPipelineState());
-    // Now restart the datanode and make sure that a new pipeline is created.
-    cluster.setWaitForClusterToBeReadyTimeout(300000);
-    cluster.restartHddsDatanode(dnToFail, true);
-    Pipeline ratisPipelineThree = pipelineManager.getPipeline(
-        containerManager.allocateContainer(
-            RATIS, THREE, "testOwner").getPipelineID());
-    //Assert that new container is not created from the ratis 2 pipeline
-    Assert.assertNotEquals(ratisPipelineThree.getId(),
-        ratisPipelineTwo.getId());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
deleted file mode 100644
index c583559..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
-
-/**
- * Tests for Pipeline Closing.
- */
-public class TestPipelineClose {
-
-  private MiniOzoneCluster cluster;
-  private OzoneConfiguration conf;
-  private StorageContainerManager scm;
-  private ContainerWithPipeline ratisContainer;
-  private ContainerManager containerManager;
-  private PipelineManager pipelineManager;
-
-  private long pipelineDestroyTimeoutInMillis;
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-    conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000,
-        TimeUnit.MILLISECONDS);
-    pipelineDestroyTimeoutInMillis = 5000;
-    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
-        pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS);
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-    containerManager = scm.getContainerManager();
-    pipelineManager = scm.getPipelineManager();
-    ContainerInfo containerInfo = containerManager
-        .allocateContainer(RATIS, THREE, "testOwner");
-    ratisContainer = new ContainerWithPipeline(containerInfo,
-        pipelineManager.getPipeline(containerInfo.getPipelineID()));
-    pipelineManager = scm.getPipelineManager();
-    // At this stage, there should be 2 pipeline one with 1 open container each.
-    // Try closing the both the pipelines, one with a closed container and
-    // the other with an open container.
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testPipelineCloseWithClosedContainer() throws IOException {
-    Set<ContainerID> set = pipelineManager
-        .getContainersInPipeline(ratisContainer.getPipeline().getId());
-
-    ContainerID cId = ratisContainer.getContainerInfo().containerID();
-    Assert.assertEquals(1, set.size());
-    set.forEach(containerID -> Assert.assertEquals(containerID, cId));
-
-    // Now close the container and it should not show up while fetching
-    // containers by pipeline
-    containerManager
-        .updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
-    containerManager
-        .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
-
-    Set<ContainerID> setClosed = pipelineManager
-        .getContainersInPipeline(ratisContainer.getPipeline().getId());
-    Assert.assertEquals(0, setClosed.size());
-
-    pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
-    for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) {
-      // Assert that the pipeline has been removed from Node2PipelineMap as well
-      Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn)
-          .contains(ratisContainer.getPipeline().getId()));
-    }
-  }
-
-  @Test
-  public void testPipelineCloseWithOpenContainer()
-      throws IOException, TimeoutException, InterruptedException {
-    Set<ContainerID> setOpen = pipelineManager.getContainersInPipeline(
-        ratisContainer.getPipeline().getId());
-    Assert.assertEquals(1, setOpen.size());
-
-    pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
-    GenericTestUtils.waitFor(() -> {
-      try {
-        return containerManager
-            .getContainer(ratisContainer.getContainerInfo().containerID())
-            .getState() == HddsProtos.LifeCycleState.CLOSING;
-      } catch (ContainerNotFoundException e) {
-        return false;
-      }
-    }, 100, 10000);
-  }
-
-  @Test
-  public void testPipelineCloseWithPipelineAction() throws Exception {
-    List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
-    PipelineActionsFromDatanode
-        pipelineActionsFromDatanode = TestUtils
-        .getPipelineActionFromDatanode(dns.get(0),
-            ratisContainer.getPipeline().getId());
-    // send closing action for pipeline
-    PipelineActionHandler pipelineActionHandler =
-        new PipelineActionHandler(pipelineManager, conf);
-    pipelineActionHandler
-        .onMessage(pipelineActionsFromDatanode, new EventQueue());
-    Thread.sleep((int) (pipelineDestroyTimeoutInMillis * 1.2));
-    OzoneContainer ozoneContainer =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer();
-    List<PipelineReport> pipelineReports =
-        ozoneContainer.getPipelineReport().getPipelineReportList();
-    for (PipelineReport pipelineReport : pipelineReports) {
-      // ensure the pipeline is not reported by any dn
-      Assert.assertNotEquals(
-          PipelineID.getFromProtobuf(pipelineReport.getPipelineID()),
-          ratisContainer.getPipeline().getId());
-    }
-
-    try {
-      pipelineManager.getPipeline(ratisContainer.getPipeline().getId());
-      Assert.fail("Pipeline should not exist in SCM");
-    } catch (PipelineNotFoundException e) {
-    }
-  }
-
-  @Test
-  public void testPipelineCloseWithLogFailure() throws IOException {
-
-    EventQueue eventQ = (EventQueue) scm.getEventQueue();
-    PipelineActionHandler pipelineActionTest =
-        Mockito.mock(PipelineActionHandler.class);
-    eventQ.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionTest);
-    ArgumentCaptor<PipelineActionsFromDatanode> actionCaptor =
-        ArgumentCaptor.forClass(PipelineActionsFromDatanode.class);
-
-    ContainerInfo containerInfo = containerManager
-        .allocateContainer(RATIS, THREE, "testOwner");
-    ContainerWithPipeline containerWithPipeline =
-        new ContainerWithPipeline(containerInfo,
-            pipelineManager.getPipeline(containerInfo.getPipelineID()));
-    Pipeline openPipeline = containerWithPipeline.getPipeline();
-    RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId());
-
-    try {
-      pipelineManager.getPipeline(openPipeline.getId());
-    } catch (PipelineNotFoundException e) {
-      Assert.assertTrue("pipeline should exist", false);
-    }
-
-    DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0);
-    int index = cluster.getHddsDatanodeIndex(datanodeDetails);
-
-    XceiverServerRatis xceiverRatis =
-        (XceiverServerRatis) cluster.getHddsDatanodes().get(index)
-        .getDatanodeStateMachine().getContainer().getWriteChannel();
-
-    /**
-     * Notify Datanode Ratis Server endpoint of a Ratis log failure.
-     * This is expected to trigger an immediate pipeline actions report to SCM
-     */
-    xceiverRatis.handleNodeLogFailure(groupId, null);
-
-    // verify SCM receives a pipeline action report "immediately"
-    Mockito.verify(pipelineActionTest, Mockito.timeout(100))
-        .onMessage(
-            actionCaptor.capture(),
-            Mockito.any(EventPublisher.class));
-
-    PipelineActionsFromDatanode actionsFromDatanode =
-        actionCaptor.getValue();
-
-    // match the pipeline id
-    verifyCloseForPipeline(openPipeline, actionsFromDatanode);
-  }
-
-  private boolean verifyCloseForPipeline(Pipeline pipeline,
-      PipelineActionsFromDatanode report) {
-    UUID uuidToFind = pipeline.getId().getId();
-
-    boolean found = false;
-    for (StorageContainerDatanodeProtocolProtos.PipelineAction action :
-        report.getReport().getPipelineActionsList()) {
-      if (action.getAction() ==
-          StorageContainerDatanodeProtocolProtos.PipelineAction.Action.CLOSE) {
-        PipelineID closedPipelineId = PipelineID.
-              getFromProtobuf(action.getClosePipeline().getPipelineID());
-
-        if (closedPipelineId.getId().equals(uuidToFind)) {
-          found = true;
-        }
-      }
-    }
-
-    Assert.assertTrue("SCM did not receive a Close action for the Pipeline",
-        found);
-    return found;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
deleted file mode 100644
index 0bbfb53..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
+++ /dev/null
@@ -1,475 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Test for PipelineStateManager.
- */
-public class TestPipelineStateManager {
-
-  private PipelineStateManager stateManager;
-
-  @Before
-  public void init() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    stateManager = new PipelineStateManager(conf);
-  }
-
-  private Pipeline createDummyPipeline(int numNodes) {
-    return createDummyPipeline(HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, numNodes);
-  }
-
-  private Pipeline createDummyPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, int numNodes) {
-    List<DatanodeDetails> nodes = new ArrayList<>();
-    for (int i = 0; i < numNodes; i++) {
-      nodes.add(TestUtils.randomDatanodeDetails());
-    }
-    return Pipeline.newBuilder()
-        .setType(type)
-        .setFactor(factor)
-        .setNodes(nodes)
-        .setState(Pipeline.PipelineState.ALLOCATED)
-        .setId(PipelineID.randomId())
-        .build();
-  }
-
-  @Test
-  public void testAddAndGetPipeline() throws IOException {
-    Pipeline pipeline = createDummyPipeline(0);
-    try {
-      stateManager.addPipeline(pipeline);
-      Assert.fail("Pipeline should not have been added");
-    } catch (IllegalArgumentException e) {
-      // replication factor and number of nodes in the pipeline do not match
-      Assert.assertTrue(e.getMessage().contains("do not match"));
-    }
-
-    // add a pipeline
-    pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-
-    try {
-      stateManager.addPipeline(pipeline);
-      Assert.fail("Pipeline should not have been added");
-    } catch (IOException e) {
-      // Can not add a pipeline twice
-      Assert.assertTrue(e.getMessage().contains("Duplicate pipeline ID"));
-    }
-
-    // verify pipeline returned is same
-    Pipeline pipeline1 = stateManager.getPipeline(pipeline.getId());
-    Assert.assertTrue(pipeline == pipeline1);
-
-    // clean up
-    removePipeline(pipeline);
-  }
-
-  @Test
-  public void testGetPipelines() throws IOException {
-    // In start there should be no pipelines
-    Assert.assertTrue(stateManager.getPipelines().isEmpty());
-
-    Set<Pipeline> pipelines = new HashSet<>();
-    Pipeline pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getId());
-    pipelines.add(pipeline);
-    pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getId());
-    pipelines.add(pipeline);
-
-    Set<Pipeline> pipelines1 = new HashSet<>(stateManager.getPipelines(
-        HddsProtos.ReplicationType.RATIS));
-    Assert.assertEquals(pipelines1.size(), pipelines.size());
-
-    pipelines1 = new HashSet<>(stateManager.getPipelines());
-    Assert.assertEquals(pipelines1.size(), pipelines.size());
-
-    // clean up
-    for (Pipeline pipeline1 : pipelines) {
-      removePipeline(pipeline1);
-    }
-  }
-
-  @Test
-  public void testGetPipelinesByTypeAndFactor() throws IOException {
-    Set<Pipeline> pipelines = new HashSet<>();
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
-          .values()) {
-        for (int i = 0; i < 5; i++) {
-          // 5 pipelines in allocated state for each type and factor
-          Pipeline pipeline =
-              createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          pipelines.add(pipeline);
-
-          // 5 pipelines in open state for each type and factor
-          pipeline = createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          stateManager.openPipeline(pipeline.getId());
-          pipelines.add(pipeline);
-
-          // 5 pipelines in closed state for each type and factor
-          pipeline = createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          stateManager.finalizePipeline(pipeline.getId());
-          pipelines.add(pipeline);
-        }
-      }
-    }
-
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
-          .values()) {
-        // verify pipelines received
-        List<Pipeline> pipelines1 =
-            stateManager.getPipelines(type, factor);
-        Assert.assertEquals(15, pipelines1.size());
-        pipelines1.stream().forEach(p -> {
-          Assert.assertEquals(type, p.getType());
-          Assert.assertEquals(factor, p.getFactor());
-        });
-      }
-    }
-
-    //clean up
-    for (Pipeline pipeline : pipelines) {
-      removePipeline(pipeline);
-    }
-  }
-
-  @Test
-  public void testGetPipelinesByTypeAndState() throws IOException {
-    Set<Pipeline> pipelines = new HashSet<>();
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-      for (int i = 0; i < 5; i++) {
-        // 5 pipelines in allocated state for each type and factor
-        Pipeline pipeline =
-            createDummyPipeline(type, factor, factor.getNumber());
-        stateManager.addPipeline(pipeline);
-        pipelines.add(pipeline);
-
-        // 5 pipelines in open state for each type and factor
-        pipeline = createDummyPipeline(type, factor, factor.getNumber());
-        stateManager.addPipeline(pipeline);
-        stateManager.openPipeline(pipeline.getId());
-        pipelines.add(pipeline);
-
-        // 5 pipelines in closed state for each type and factor
-        pipeline = createDummyPipeline(type, factor, factor.getNumber());
-        stateManager.addPipeline(pipeline);
-        stateManager.finalizePipeline(pipeline.getId());
-        pipelines.add(pipeline);
-      }
-    }
-
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      // verify pipelines received
-      List<Pipeline> pipelines1 = stateManager
-          .getPipelines(type, Pipeline.PipelineState.OPEN);
-      Assert.assertEquals(5, pipelines1.size());
-      pipelines1.forEach(p -> {
-        Assert.assertEquals(type, p.getType());
-        Assert.assertEquals(Pipeline.PipelineState.OPEN, p.getPipelineState());
-      });
-
-      pipelines1 = stateManager
-          .getPipelines(type, Pipeline.PipelineState.OPEN,
-              Pipeline.PipelineState.CLOSED, Pipeline.PipelineState.ALLOCATED);
-      Assert.assertEquals(15, pipelines1.size());
-    }
-
-    //clean up
-    for (Pipeline pipeline : pipelines) {
-      removePipeline(pipeline);
-    }
-  }
-
-  @Test
-  public void testGetPipelinesByTypeFactorAndState() throws IOException {
-    Set<Pipeline> pipelines = new HashSet<>();
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
-          .values()) {
-        for (int i = 0; i < 5; i++) {
-          // 5 pipelines in allocated state for each type and factor
-          Pipeline pipeline =
-              createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          pipelines.add(pipeline);
-
-          // 5 pipelines in open state for each type and factor
-          pipeline = createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          stateManager.openPipeline(pipeline.getId());
-          pipelines.add(pipeline);
-
-          // 5 pipelines in dormant state for each type and factor
-          pipeline = createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          stateManager.openPipeline(pipeline.getId());
-          stateManager.deactivatePipeline(pipeline.getId());
-          pipelines.add(pipeline);
-
-          // 5 pipelines in closed state for each type and factor
-          pipeline = createDummyPipeline(type, factor, factor.getNumber());
-          stateManager.addPipeline(pipeline);
-          stateManager.finalizePipeline(pipeline.getId());
-          pipelines.add(pipeline);
-        }
-      }
-    }
-
-    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
-        .values()) {
-      for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
-          .values()) {
-        for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) {
-          // verify pipelines received
-          List<Pipeline> pipelines1 =
-              stateManager.getPipelines(type, factor, state);
-          Assert.assertEquals(5, pipelines1.size());
-          pipelines1.forEach(p -> {
-            Assert.assertEquals(type, p.getType());
-            Assert.assertEquals(factor, p.getFactor());
-            Assert.assertEquals(state, p.getPipelineState());
-          });
-        }
-      }
-    }
-
-    //clean up
-    for (Pipeline pipeline : pipelines) {
-      removePipeline(pipeline);
-    }
-  }
-
-  @Test
-  public void testAddAndGetContainer() throws IOException {
-    long containerID = 0;
-    Pipeline pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    pipeline = stateManager.getPipeline(pipeline.getId());
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
-
-    // move pipeline to open state
-    stateManager.openPipeline(pipeline.getId());
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
-
-    //verify the number of containers returned
-    Set<ContainerID> containerIDs =
-        stateManager.getContainers(pipeline.getId());
-    Assert.assertEquals(containerIDs.size(), containerID);
-
-    removePipeline(pipeline);
-    try {
-      stateManager.addContainerToPipeline(pipeline.getId(),
-          ContainerID.valueof(++containerID));
-      Assert.fail("Container should not have been added");
-    } catch (IOException e) {
-      // Can not add a container to removed pipeline
-      Assert.assertTrue(e.getMessage().contains("not found"));
-    }
-  }
-
-  @Test
-  public void testRemovePipeline() throws IOException {
-    Pipeline pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    // close the pipeline
-    stateManager.openPipeline(pipeline.getId());
-    stateManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
-
-    try {
-      stateManager.removePipeline(pipeline.getId());
-      Assert.fail("Pipeline should not have been removed");
-    } catch (IOException e) {
-      // can not remove a pipeline which already has containers
-      Assert.assertTrue(e.getMessage().contains("not yet closed"));
-    }
-
-    // close the pipeline
-    stateManager.finalizePipeline(pipeline.getId());
-    // remove containers and then remove the pipeline
-    removePipeline(pipeline);
-  }
-
-  @Test
-  public void testRemoveContainer() throws IOException {
-    long containerID = 1;
-    Pipeline pipeline = createDummyPipeline(1);
-    // create an open pipeline in stateMap
-    stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getId());
-
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
-    Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size());
-    stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
-    Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
-
-    // add two containers in the pipeline
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
-    stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
-    Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size());
-
-    // move pipeline to closing state
-    stateManager.finalizePipeline(pipeline.getId());
-
-    stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
-    stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(--containerID));
-    Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
-
-    // clean up
-    stateManager.removePipeline(pipeline.getId());
-  }
-
-  @Test
-  public void testFinalizePipeline() throws IOException {
-    Pipeline pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    // finalize on ALLOCATED pipeline
-    stateManager.finalizePipeline(pipeline.getId());
-    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getId()).getPipelineState());
-    // clean up
-    removePipeline(pipeline);
-
-    pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getId());
-    // finalize on OPEN pipeline
-    stateManager.finalizePipeline(pipeline.getId());
-    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getId()).getPipelineState());
-    // clean up
-    removePipeline(pipeline);
-
-    pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getId());
-    stateManager.finalizePipeline(pipeline.getId());
-    // finalize should work on already closed pipeline
-    stateManager.finalizePipeline(pipeline.getId());
-    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getId()).getPipelineState());
-    // clean up
-    removePipeline(pipeline);
-  }
-
-  @Test
-  public void testOpenPipeline() throws IOException {
-    Pipeline pipeline = createDummyPipeline(1);
-    stateManager.addPipeline(pipeline);
-    // open on ALLOCATED pipeline
-    stateManager.openPipeline(pipeline.getId());
-    Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        stateManager.getPipeline(pipeline.getId()).getPipelineState());
-
-    stateManager.openPipeline(pipeline.getId());
-    // open should work on already open pipeline
-    Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        stateManager.getPipeline(pipeline.getId()).getPipelineState());
-    // clean up
-    removePipeline(pipeline);
-  }
-
-  @Test
-  public void testQueryPipeline() throws IOException {
-    Pipeline pipeline = createDummyPipeline(HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.THREE, 3);
-    // pipeline in allocated state should not be reported
-    stateManager.addPipeline(pipeline);
-    Assert.assertEquals(0, stateManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size());
-
-    // pipeline in open state should be reported
-    stateManager.openPipeline(pipeline.getId());
-    Assert.assertEquals(1, stateManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size());
-
-    Pipeline pipeline2 = createDummyPipeline(HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.THREE, 3);
-    pipeline2 = Pipeline.newBuilder(pipeline2)
-        .setState(Pipeline.PipelineState.OPEN)
-        .build();
-    // pipeline in open state should be reported
-    stateManager.addPipeline(pipeline2);
-    Assert.assertEquals(2, stateManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size());
-
-    // pipeline in closed state should not be reported
-    stateManager.finalizePipeline(pipeline2.getId());
-    Assert.assertEquals(1, stateManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size());
-
-    // clean up
-    removePipeline(pipeline);
-    removePipeline(pipeline2);
-  }
-
-  private void removePipeline(Pipeline pipeline) throws IOException {
-    stateManager.finalizePipeline(pipeline.getId());
-    stateManager.removePipeline(pipeline.getId());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
deleted file mode 100644
index 6ace90c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests for RatisPipelineUtils.
- */
-public class TestRatisPipelineCreateAndDestroy {
-
-  private static MiniOzoneCluster cluster;
-  private OzoneConfiguration conf = new OzoneConfiguration();
-  private static PipelineManager pipelineManager;
-
-  public void init(int numDatanodes) throws Exception {
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        GenericTestUtils.getRandomizedTempPath());
-    cluster = MiniOzoneCluster.newBuilder(conf)
-            .setNumDatanodes(numDatanodes)
-            .setHbInterval(1000)
-            .setHbProcessorInterval(1000)
-            .build();
-    cluster.waitForClusterToBeReady();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    pipelineManager = scm.getPipelineManager();
-  }
-
-  @After
-  public void cleanup() {
-    cluster.shutdown();
-  }
-
-  @Test(timeout = 180000)
-  public void testAutomaticPipelineCreationOnPipelineDestroy()
-      throws Exception {
-    init(6);
-    // make sure two pipelines are created
-    waitForPipelines(2);
-    List<Pipeline> pipelines = pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN);
-    for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    }
-    // make sure two pipelines are created
-    waitForPipelines(2);
-  }
-
-  @Test(timeout = 180000)
-  public void testPipelineCreationOnNodeRestart() throws Exception {
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-        5, TimeUnit.SECONDS);
-    init(3);
-    // make sure a pipelines is created
-    waitForPipelines(1);
-    List<HddsDatanodeService> dns = new ArrayList<>(cluster.getHddsDatanodes());
-
-    List<Pipeline> pipelines =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-    for (HddsDatanodeService dn : dns) {
-      cluster.shutdownHddsDatanode(dn.getDatanodeDetails());
-    }
-
-    // try creating another pipeline now
-    try {
-      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE);
-      Assert.fail("pipeline creation should fail after shutting down pipeline");
-    } catch (IOException ioe) {
-      // As now all datanodes are shutdown, they move to stale state, there
-      // will be no sufficient datanodes to create the pipeline.
-      Assert.assertTrue(ioe instanceof InsufficientDatanodesException);
-    }
-
-    // make sure pipelines is destroyed
-    waitForPipelines(0);
-    for (HddsDatanodeService dn : dns) {
-      cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
-    }
-
-    // destroy the existing pipelines
-    for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    }
-    // make sure pipelines is created after node start
-    pipelineManager.triggerPipelineCreation();
-    waitForPipelines(1);
-  }
-
-  private void waitForPipelines(int numPipelines)
-      throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(() -> pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size() == numPipelines, 100, 40000);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
deleted file mode 100644
index 4b3d5d6..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Test for RatisPipelineProvider.
- */
-public class TestRatisPipelineProvider {
-
-  private NodeManager nodeManager;
-  private PipelineProvider provider;
-  private PipelineStateManager stateManager;
-
-  @Before
-  public void init() throws Exception {
-    nodeManager = new MockNodeManager(true, 10);
-    stateManager = new PipelineStateManager(new OzoneConfiguration());
-    provider = new MockRatisPipelineProvider(nodeManager,
-        stateManager, new OzoneConfiguration());
-  }
-
-  private void createPipelineAndAssertions(
-          HddsProtos.ReplicationFactor factor) throws IOException {
-    Pipeline pipeline = provider.create(factor);
-    stateManager.addPipeline(pipeline);
-    Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-            Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-    Pipeline pipeline1 = provider.create(factor);
-    stateManager.addPipeline(pipeline1);
-    // New pipeline should not overlap with the previous created pipeline
-    Assert.assertTrue(
-        CollectionUtils.intersection(pipeline.getNodes(), pipeline1.getNodes())
-            .isEmpty());
-    Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline1.getFactor(), factor);
-    Assert.assertEquals(pipeline1.getPipelineState(),
-            Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber());
-  }
-
-  @Test
-  public void testCreatePipelineWithFactor() throws IOException {
-    HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline = provider.create(factor);
-    stateManager.addPipeline(pipeline);
-    Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-
-    factor = HddsProtos.ReplicationFactor.ONE;
-    Pipeline pipeline1 = provider.create(factor);
-    stateManager.addPipeline(pipeline1);
-    // New pipeline should overlap with the previous created pipeline,
-    // and one datanode should overlap between the two types.
-    Assert.assertEquals(
-        CollectionUtils.intersection(pipeline.getNodes(),
-            pipeline1.getNodes()).size(), 1);
-    Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline1.getFactor(), factor);
-    Assert.assertEquals(pipeline1.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber());
-  }
-
-  @Test
-  public void testCreatePipelineWithFactorThree() throws IOException {
-    createPipelineAndAssertions(HddsProtos.ReplicationFactor.THREE);
-  }
-
-  @Test
-  public void testCreatePipelineWithFactorOne() throws IOException {
-    createPipelineAndAssertions(HddsProtos.ReplicationFactor.ONE);
-  }
-
-  private List<DatanodeDetails> createListOfNodes(int nodeCount) {
-    List<DatanodeDetails> nodes = new ArrayList<>();
-    for (int i = 0; i < nodeCount; i++) {
-      nodes.add(TestUtils.randomDatanodeDetails());
-    }
-    return nodes;
-  }
-
-  @Test
-  public void testCreatePipelineWithNodes() {
-    HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline =
-        provider.create(factor, createListOfNodes(factor.getNumber()));
-    Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(
-        pipeline.getPipelineState(), Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-
-    factor = HddsProtos.ReplicationFactor.ONE;
-    pipeline = provider.create(factor, createListOfNodes(factor.getNumber()));
-    Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-  }
-
-  @Test
-  public void testCreatePipelinesDnExclude() throws IOException {
-
-    // We need 9 Healthy DNs in MockNodeManager.
-    NodeManager mockNodeManager = new MockNodeManager(true, 12);
-    PipelineStateManager stateManagerMock =
-        new PipelineStateManager(new OzoneConfiguration());
-    PipelineProvider providerMock = new MockRatisPipelineProvider(
-        mockNodeManager, stateManagerMock, new OzoneConfiguration());
-
-    // Use up first 3 DNs for an open pipeline.
-    List<DatanodeDetails> openPiplineDns = mockNodeManager.getAllNodes()
-        .subList(0, 3);
-    HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-
-    Pipeline openPipeline = Pipeline.newBuilder()
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(factor)
-        .setNodes(openPiplineDns)
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .build();
-
-    stateManagerMock.addPipeline(openPipeline);
-
-    // Use up next 3 DNs also for an open pipeline.
-    List<DatanodeDetails> moreOpenPiplineDns = mockNodeManager.getAllNodes()
-        .subList(3, 6);
-    Pipeline anotherOpenPipeline = Pipeline.newBuilder()
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(factor)
-        .setNodes(moreOpenPiplineDns)
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .build();
-    stateManagerMock.addPipeline(anotherOpenPipeline);
-
-    // Use up next 3 DNs also for a closed pipeline.
-    List<DatanodeDetails> closedPiplineDns = mockNodeManager.getAllNodes()
-        .subList(6, 9);
-    Pipeline anotherClosedPipeline = Pipeline.newBuilder()
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(factor)
-        .setNodes(closedPiplineDns)
-        .setState(Pipeline.PipelineState.CLOSED)
-        .setId(PipelineID.randomId())
-        .build();
-    stateManagerMock.addPipeline(anotherClosedPipeline);
-
-    Pipeline pipeline = providerMock.create(factor);
-    Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-    List<DatanodeDetails> pipelineNodes = pipeline.getNodes();
-
-    // Pipline nodes cannot be from open pipelines.
-    Assert.assertTrue(
-        pipelineNodes.parallelStream().filter(dn ->
-        (openPiplineDns.contains(dn) || moreOpenPiplineDns.contains(dn)))
-        .count() == 0);
-
-    // Since we have only 9 Healthy DNs, at least 1 pipeline node should have
-    // been from the closed pipeline DN list.
-    Assert.assertTrue(pipelineNodes.parallelStream().filter(
-        closedPiplineDns::contains).count() > 0);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
deleted file mode 100644
index 2a486b1..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Test cases to verify PipelineManager.
- */
-public class TestSCMPipelineManager {
-  private static MockNodeManager nodeManager;
-  private static File testDir;
-  private static Configuration conf;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    testDir = GenericTestUtils
-        .getTestDir(TestSCMPipelineManager.class.getSimpleName());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    boolean folderExisted = testDir.exists() || testDir.mkdirs();
-    if (!folderExisted) {
-      throw new IOException("Unable to create test directory path");
-    }
-    nodeManager = new MockNodeManager(true, 20);
-  }
-
-  @After
-  public void cleanup() {
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Test
-  public void testPipelineReload() throws IOException {
-    SCMPipelineManager pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), conf);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-    Set<Pipeline> pipelines = new HashSet<>();
-    for (int i = 0; i < 5; i++) {
-      Pipeline pipeline = pipelineManager
-          .createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-      pipelines.add(pipeline);
-    }
-    pipelineManager.close();
-
-    // new pipeline manager should be able to load the pipelines from the db
-    pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), conf);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-    for (Pipeline p : pipelines) {
-      pipelineManager.openPipeline(p.getId());
-    }
-    List<Pipeline> pipelineList =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS);
-    Assert.assertEquals(pipelines, new HashSet<>(pipelineList));
-
-    // clean up
-    for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    }
-    pipelineManager.close();
-  }
-
-  @Test
-  public void testRemovePipeline() throws IOException {
-    SCMPipelineManager pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), conf);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-    Pipeline pipeline = pipelineManager
-        .createPipeline(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-    pipelineManager.openPipeline(pipeline.getId());
-    pipelineManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-    pipelineManager.close();
-
-    // new pipeline manager should not be able to load removed pipelines
-    pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    try {
-      pipelineManager.getPipeline(pipeline.getId());
-      Assert.fail("Pipeline should not have been retrieved");
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("not found"));
-    }
-
-    // clean up
-    pipelineManager.close();
-  }
-
-  @Test
-  public void testPipelineReport() throws IOException {
-    EventQueue eventQueue = new EventQueue();
-    SCMPipelineManager pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, eventQueue, null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), conf);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-    SCMSafeModeManager scmSafeModeManager =
-        new SCMSafeModeManager(new OzoneConfiguration(),
-            new ArrayList<>(), pipelineManager, eventQueue);
-
-    // create a pipeline in allocated state with no dns yet reported
-    Pipeline pipeline = pipelineManager
-        .createPipeline(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-    Assert
-        .assertFalse(pipelineManager.getPipeline(pipeline.getId()).isHealthy());
-    Assert
-        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
-
-    // get pipeline report from each dn in the pipeline
-    PipelineReportHandler pipelineReportHandler =
-        new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
-    for (DatanodeDetails dn: pipeline.getNodes()) {
-      PipelineReportFromDatanode pipelineReportFromDatanode =
-          TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId());
-      // pipeline is not healthy until all dns report
-      Assert.assertFalse(
-          pipelineManager.getPipeline(pipeline.getId()).isHealthy());
-      pipelineReportHandler
-          .onMessage(pipelineReportFromDatanode, new EventQueue());
-    }
-
-    // pipeline is healthy when all dns report
-    Assert
-        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isHealthy());
-    // pipeline should now move to open state
-    Assert
-        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
-
-    // close the pipeline
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-
-    for (DatanodeDetails dn: pipeline.getNodes()) {
-      PipelineReportFromDatanode pipelineReportFromDatanode =
-          TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId());
-      // pipeline report for destroyed pipeline should be ignored
-      pipelineReportHandler
-          .onMessage(pipelineReportFromDatanode, new EventQueue());
-    }
-
-    try {
-      pipelineManager.getPipeline(pipeline.getId());
-      Assert.fail("Pipeline should not have been retrieved");
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("not found"));
-    }
-
-    // clean up
-    pipelineManager.close();
-  }
-
-  @Test
-  public void testPipelineCreationFailedMetric() throws Exception {
-    MockNodeManager nodeManagerMock = new MockNodeManager(true,
-        20);
-    SCMPipelineManager pipelineManager =
-        new SCMPipelineManager(conf, nodeManagerMock, new EventQueue(), null);
-    PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManagerMock,
-            pipelineManager.getStateManager(), conf);
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMPipelineMetrics.class.getSimpleName());
-    long numPipelineCreated = getLongCounter("NumPipelineCreated",
-        metrics);
-    Assert.assertTrue(numPipelineCreated == 0);
-
-    // 3 DNs are unhealthy.
-    // Create 5 pipelines (Use up 15 Datanodes)
-    for (int i = 0; i < 5; i++) {
-      Pipeline pipeline = pipelineManager
-          .createPipeline(HddsProtos.ReplicationType.RATIS,
-              HddsProtos.ReplicationFactor.THREE);
-      Assert.assertNotNull(pipeline);
-    }
-
-    metrics = getMetrics(
-        SCMPipelineMetrics.class.getSimpleName());
-    numPipelineCreated = getLongCounter("NumPipelineCreated", metrics);
-    Assert.assertTrue(numPipelineCreated == 5);
-
-    long numPipelineCreateFailed = getLongCounter(
-        "NumPipelineCreationFailed", metrics);
-    Assert.assertTrue(numPipelineCreateFailed == 0);
-
-    //This should fail...
-    try {
-      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE);
-      Assert.fail();
-    } catch (InsufficientDatanodesException idEx) {
-      Assert.assertEquals(
-          "Cannot create pipeline of factor 3 using 1 nodes.",
-          idEx.getMessage());
-    }
-
-    metrics = getMetrics(
-        SCMPipelineMetrics.class.getSimpleName());
-    numPipelineCreated = getLongCounter("NumPipelineCreated", metrics);
-    Assert.assertTrue(numPipelineCreated == 5);
-
-    numPipelineCreateFailed = getLongCounter(
-        "NumPipelineCreationFailed", metrics);
-    Assert.assertTrue(numPipelineCreateFailed == 0);
-  }
-
-  @Test
-  public void testActivateDeactivatePipeline() throws IOException {
-    final SCMPipelineManager pipelineManager =
-        new SCMPipelineManager(conf, nodeManager, new EventQueue(), null);
-    final PipelineProvider mockRatisProvider =
-        new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), conf);
-
-    pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
-        mockRatisProvider);
-
-    final Pipeline pipeline = pipelineManager
-        .createPipeline(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-    final PipelineID pid = pipeline.getId();
-
-    pipelineManager.openPipeline(pid);
-    pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1));
-
-    Assert.assertTrue(pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE,
-            Pipeline.PipelineState.OPEN).contains(pipeline));
-
-    Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        pipelineManager.getPipeline(pid).getPipelineState());
-
-    pipelineManager.deactivatePipeline(pid);
-    Assert.assertEquals(Pipeline.PipelineState.DORMANT,
-        pipelineManager.getPipeline(pid).getPipelineState());
-
-    Assert.assertFalse(pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE,
-            Pipeline.PipelineState.OPEN).contains(pipeline));
-
-    pipelineManager.activatePipeline(pid);
-
-    Assert.assertTrue(pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE,
-            Pipeline.PipelineState.OPEN).contains(pipeline));
-
-    pipelineManager.close();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
deleted file mode 100644
index 459a67a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.protocol.proto
-        .HddsProtos.ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.protocol.proto
-        .HddsProtos.ReplicationType.RATIS;
-
-/**
- * Test SCM restart and recovery wrt pipelines.
- */
-public class TestSCMRestart {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static Pipeline ratisPipeline1;
-  private static Pipeline ratisPipeline2;
-  private static ContainerManager containerManager;
-  private static ContainerManager newContainerManager;
-  private static PipelineManager pipelineManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(4)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(1000)
-        .build();
-    cluster.waitForClusterToBeReady();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    containerManager = scm.getContainerManager();
-    pipelineManager = scm.getPipelineManager();
-    ratisPipeline1 = pipelineManager.getPipeline(
-        containerManager.allocateContainer(
-        RATIS, THREE, "Owner1").getPipelineID());
-    ratisPipeline2 = pipelineManager.getPipeline(
-        containerManager.allocateContainer(
-        RATIS, ONE, "Owner2").getPipelineID());
-    // At this stage, there should be 2 pipeline one with 1 open container
-    // each. Try restarting the SCM and then discover that pipeline are in
-    // correct state.
-    cluster.restartStorageContainerManager(true);
-    newContainerManager = cluster.getStorageContainerManager()
-        .getContainerManager();
-    pipelineManager = cluster.getStorageContainerManager().getPipelineManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testPipelineWithScmRestart() throws IOException {
-    // After restart make sure that the pipeline are still present
-    Pipeline ratisPipeline1AfterRestart =
-        pipelineManager.getPipeline(ratisPipeline1.getId());
-    Pipeline ratisPipeline2AfterRestart =
-        pipelineManager.getPipeline(ratisPipeline2.getId());
-    Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
-    Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
-    Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
-    Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
-
-    // Try creating a new container, it should be from the same pipeline
-    // as was before restart
-    ContainerInfo containerInfo = newContainerManager
-        .allocateContainer(RATIS, THREE, "Owner1");
-    Assert.assertEquals(containerInfo.getPipelineID(), ratisPipeline1.getId());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
deleted file mode 100644
index 22fd95b..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Test for SimplePipelineProvider.
- */
-public class TestSimplePipelineProvider {
-
-  private NodeManager nodeManager;
-  private PipelineProvider provider;
-  private PipelineStateManager stateManager;
-
-  @Before
-  public void init() throws Exception {
-    nodeManager = new MockNodeManager(true, 10);
-    stateManager = new PipelineStateManager(new OzoneConfiguration());
-    provider = new SimplePipelineProvider(nodeManager);
-  }
-
-  @Test
-  public void testCreatePipelineWithFactor() throws IOException {
-    HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline = provider.create(factor);
-    stateManager.addPipeline(pipeline);
-    Assert.assertEquals(pipeline.getType(),
-        HddsProtos.ReplicationType.STAND_ALONE);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-
-    factor = HddsProtos.ReplicationFactor.ONE;
-    Pipeline pipeline1 = provider.create(factor);
-    stateManager.addPipeline(pipeline1);
-    Assert.assertEquals(pipeline1.getType(),
-        HddsProtos.ReplicationType.STAND_ALONE);
-    Assert.assertEquals(pipeline1.getFactor(), factor);
-    Assert.assertEquals(pipeline1.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber());
-  }
-
-  private List<DatanodeDetails> createListOfNodes(int nodeCount) {
-    List<DatanodeDetails> nodes = new ArrayList<>();
-    for (int i = 0; i < nodeCount; i++) {
-      nodes.add(TestUtils.randomDatanodeDetails());
-    }
-    return nodes;
-  }
-
-  @Test
-  public void testCreatePipelineWithNodes() throws IOException {
-    HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline =
-        provider.create(factor, createListOfNodes(factor.getNumber()));
-    Assert.assertEquals(pipeline.getType(),
-        HddsProtos.ReplicationType.STAND_ALONE);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-
-    factor = HddsProtos.ReplicationFactor.ONE;
-    pipeline = provider.create(factor, createListOfNodes(factor.getNumber()));
-    Assert.assertEquals(pipeline.getType(),
-        HddsProtos.ReplicationType.STAND_ALONE);
-    Assert.assertEquals(pipeline.getFactor(), factor);
-    Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.OPEN);
-    Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
deleted file mode 100644
index f685b17..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Package info tests.
- */
-package org.apache.hadoop.hdds.scm.pipeline;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
deleted file mode 100644
index 7cfd555..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.safemode;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests SCM Safe mode with pipeline rules.
- */
-
-public class TestSCMSafeModeWithPipelineRules {
-
-  private static MiniOzoneCluster cluster;
-  private OzoneConfiguration conf = new OzoneConfiguration();
-  private PipelineManager pipelineManager;
-  private MiniOzoneCluster.Builder clusterBuilder;
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  public void setup(int numDatanodes) throws Exception {
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        temporaryFolder.newFolder().toString());
-    conf.setBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
-        true);
-    conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "10s");
-    conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
-    clusterBuilder = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numDatanodes)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(1000);
-
-    cluster = clusterBuilder.build();
-    cluster.waitForClusterToBeReady();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    pipelineManager = scm.getPipelineManager();
-  }
-
-
-  @Test
-  public void testScmSafeMode() throws Exception {
-
-    int datanodeCount = 6;
-    setup(datanodeCount);
-
-    waitForRatis3NodePipelines(datanodeCount/3);
-    waitForRatis1NodePipelines(datanodeCount);
-
-    int totalPipelineCount = datanodeCount + (datanodeCount/3);
-
-    //Cluster is started successfully
-    cluster.stop();
-
-    cluster.restartOzoneManager();
-    cluster.restartStorageContainerManager(false);
-
-    pipelineManager = cluster.getStorageContainerManager().getPipelineManager();
-    List<Pipeline> pipelineList =
-        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
-
-
-    pipelineList.get(0).getNodes().forEach(datanodeDetails -> {
-      try {
-        cluster.restartHddsDatanode(datanodeDetails, false);
-      } catch (Exception ex) {
-        fail("Datanode restart failed");
-      }
-    });
-
-
-    SCMSafeModeManager scmSafeModeManager =
-        cluster.getStorageContainerManager().getScmSafeModeManager();
-
-
-    // Ceil(0.1 * 2) is 1, as one pipeline is healthy healthy pipeline rule is
-    // satisfied
-
-    GenericTestUtils.waitFor(() ->
-        scmSafeModeManager.getHealthyPipelineSafeModeRule()
-            .validate(), 1000, 60000);
-
-    // As Ceil(0.9 * 2) is 2, and from second pipeline no datanodes's are
-    // reported this rule is not met yet.
-    GenericTestUtils.waitFor(() ->
-        !scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
-            .validate(), 1000, 60000);
-
-    Assert.assertTrue(cluster.getStorageContainerManager().isInSafeMode());
-
-    DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode();
-    // Now restart one datanode from the 2nd pipeline
-    try {
-      cluster.restartHddsDatanode(restartedDatanode, false);
-    } catch (Exception ex) {
-      fail("Datanode restart failed");
-    }
-
-
-    GenericTestUtils.waitFor(() ->
-        scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
-            .validate(), 1000, 60000);
-
-    GenericTestUtils.waitFor(() -> !scmSafeModeManager.getInSafeMode(), 1000,
-        60000);
-
-    // As after safemode wait time is not completed, we should have total
-    // pipeline's as original count 6(1 node pipelines) + 2 (3 node pipeline)
-    Assert.assertEquals(totalPipelineCount,
-        pipelineManager.getPipelines().size());
-
-    ReplicationManager replicationManager =
-        cluster.getStorageContainerManager().getReplicationManager();
-
-    GenericTestUtils.waitFor(() ->
-        replicationManager.isRunning(), 1000, 60000);
-
-
-    // As 4 datanodes are reported, 4 single node pipeline and 1 3 node
-    // pipeline.
-
-    waitForRatis1NodePipelines(4);
-    waitForRatis3NodePipelines(1);
-
-    // Restart other datanodes in the pipeline, and after some time we should
-    // have same count as original.
-    pipelineList.get(1).getNodes().forEach(datanodeDetails -> {
-      try {
-        if (!restartedDatanode.equals(datanodeDetails)) {
-          cluster.restartHddsDatanode(datanodeDetails, false);
-        }
-      } catch (Exception ex) {
-        fail("Datanode restart failed");
-      }
-    });
-
-    waitForRatis1NodePipelines(datanodeCount);
-    waitForRatis3NodePipelines(datanodeCount/3);
-
-  }
-
-  @After
-  public void tearDown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-  private void waitForRatis3NodePipelines(int numPipelines)
-      throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(() -> pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN)
-        .size() == numPipelines, 100, 60000);
-  }
-
-  private void waitForRatis1NodePipelines(int numPipelines)
-      throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(() -> pipelineManager
-        .getPipelines(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, Pipeline.PipelineState.OPEN)
-        .size() == numPipelines, 100, 60000);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
deleted file mode 100644
index 2eef206..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Level;
-import org.apache.ratis.grpc.client.GrpcClientProtocolClient;
-import org.apache.ratis.util.LogUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.Executors;
-
-/**
- * This class causes random failures in the chaos cluster.
- */
-public class MiniOzoneChaosCluster extends MiniOzoneClusterImpl {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(MiniOzoneChaosCluster.class);
-
-  private final int numDatanodes;
-  private final ScheduledExecutorService executorService;
-
-  private ScheduledFuture scheduledFuture;
-
-  private enum FailureMode {
-    NODES_RESTART,
-    NODES_SHUTDOWN
-  }
-
-  public MiniOzoneChaosCluster(OzoneConfiguration conf,
-                               OzoneManager ozoneManager,
-                       StorageContainerManager scm,
-                       List<HddsDatanodeService> hddsDatanodes) {
-    super(conf, ozoneManager, scm, hddsDatanodes);
-
-    this.executorService =  Executors.newSingleThreadScheduledExecutor();
-    this.numDatanodes = getHddsDatanodes().size();
-    LOG.info("Starting MiniOzoneChaosCluster with {} datanodes", numDatanodes);
-    LogUtils.setLogLevel(GrpcClientProtocolClient.LOG, Level.WARN);
-  }
-
-  // Get the number of datanodes to fail in the cluster.
-  private int getNumberOfNodesToFail() {
-    return RandomUtils.nextBoolean() ? 1 : 2;
-  }
-
-  // Should the failed node wait for SCM to register the even before
-  // restart, i.e fast restart or not.
-  private boolean isFastRestart() {
-    return RandomUtils.nextBoolean();
-  }
-
-  // Should the selected node be stopped or started.
-  private boolean shouldStop() {
-    return RandomUtils.nextBoolean();
-  }
-
-  // Get the datanode index of the datanode to fail.
-  private int getNodeToFail() {
-    return RandomUtils.nextInt() % numDatanodes;
-  }
-
-  private void restartNodes() {
-    final int numNodesToFail = getNumberOfNodesToFail();
-    LOG.info("Will restart {} nodes to simulate failure", numNodesToFail);
-    for (int i = 0; i < numNodesToFail; i++) {
-      boolean failureMode = isFastRestart();
-      int failedNodeIndex = getNodeToFail();
-      String failString = failureMode ? "Fast" : "Slow";
-      DatanodeDetails dn =
-          getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails();
-      try {
-        LOG.info("{} Restarting DataNode: {}", failString, dn.getUuid());
-        restartHddsDatanode(failedNodeIndex, failureMode);
-        LOG.info("{} Completed restarting Datanode: {}", failString,
-            dn.getUuid());
-      } catch (Exception e) {
-        LOG.error("Failed to restartNodes Datanode {}", dn.getUuid(), e);
-      }
-    }
-  }
-
-  private void shutdownNodes() {
-    final int numNodesToFail = getNumberOfNodesToFail();
-    LOG.info("Will shutdown {} nodes to simulate failure", numNodesToFail);
-    for (int i = 0; i < numNodesToFail; i++) {
-      boolean shouldStop = shouldStop();
-      int failedNodeIndex = getNodeToFail();
-      String stopString = shouldStop ? "Stopping" : "Restarting";
-      DatanodeDetails dn =
-          getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails();
-      try {
-        LOG.info("{} DataNode {}", stopString, dn.getUuid());
-
-        if (shouldStop) {
-          shutdownHddsDatanode(failedNodeIndex);
-        } else {
-          restartHddsDatanode(failedNodeIndex, true);
-        }
-        LOG.info("Completed {} DataNode {}", stopString, dn.getUuid());
-
-      } catch (Exception e) {
-        LOG.error("Failed {} Datanode {}", stopString, dn.getUuid(), e);
-      }
-    }
-  }
-
-  private FailureMode getFailureMode() {
-    return FailureMode.
-        values()[RandomUtils.nextInt() % FailureMode.values().length];
-  }
-
-  // Fail nodes randomly at configured timeout period.
-  private void fail() {
-    FailureMode mode = getFailureMode();
-    switch (mode) {
-    case NODES_RESTART:
-      restartNodes();
-      break;
-    case NODES_SHUTDOWN:
-      shutdownNodes();
-      break;
-
-    default:
-      LOG.error("invalid failure mode:{}", mode);
-      break;
-    }
-  }
-
-  void startChaos(long initialDelay, long period, TimeUnit timeUnit) {
-    LOG.info("Starting Chaos with failure period:{} unit:{} numDataNodes:{}",
-        period, timeUnit, numDatanodes);
-    scheduledFuture = executorService.scheduleAtFixedRate(this::fail,
-        initialDelay, period, timeUnit);
-  }
-
-  void stopChaos() throws Exception {
-    if (scheduledFuture != null) {
-      scheduledFuture.cancel(false);
-      scheduledFuture.get();
-    }
-  }
-
-  public void shutdown() {
-    try {
-      stopChaos();
-      executorService.shutdown();
-      executorService.awaitTermination(1, TimeUnit.DAYS);
-      //this should be called after stopChaos to be sure that the
-      //datanode collection is not modified during the shutdown
-      super.shutdown();
-    } catch (Exception e) {
-      LOG.error("failed to shutdown MiniOzoneChaosCluster", e);
-    }
-  }
-
-  /**
-   * Builder for configuring the MiniOzoneChaosCluster to run.
-   */
-  public static class Builder extends MiniOzoneClusterImpl.Builder {
-
-    /**
-     * Creates a new Builder.
-     *
-     * @param conf configuration
-     */
-    public Builder(OzoneConfiguration conf) {
-      super(conf);
-    }
-
-    /**
-     * Sets the number of HddsDatanodes to be started as part of
-     * MiniOzoneChaosCluster.
-     *
-     * @param val number of datanodes
-     *
-     * @return MiniOzoneChaosCluster.Builder
-     */
-    public Builder setNumDatanodes(int val) {
-      super.setNumDatanodes(val);
-      return this;
-    }
-
-    @Override
-    void initializeConfiguration() throws IOException {
-      super.initializeConfiguration();
-      conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
-          2, StorageUnit.KB);
-      conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE,
-          16, StorageUnit.KB);
-      conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE,
-          4, StorageUnit.KB);
-      conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE,
-          8, StorageUnit.KB);
-      conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-          1, StorageUnit.MB);
-      conf.setTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, 1000,
-          TimeUnit.MILLISECONDS);
-      conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 10,
-          TimeUnit.SECONDS);
-      conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 20,
-          TimeUnit.SECONDS);
-      conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
-          TimeUnit.SECONDS);
-      conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1,
-          TimeUnit.SECONDS);
-      conf.setTimeDuration(
-          ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, 5,
-          TimeUnit.SECONDS);
-      conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-          1, TimeUnit.SECONDS);
-      conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1,
-          TimeUnit.SECONDS);
-      conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
-      conf.setInt("hdds.scm.replication.thread.interval", 10 * 1000);
-      conf.setInt("hdds.scm.replication.event.timeout", 20 * 1000);
-      conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100);
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100);
-    }
-
-    @Override
-    public MiniOzoneChaosCluster build() throws IOException {
-      DefaultMetricsSystem.setMiniClusterMode(true);
-      initializeConfiguration();
-      StorageContainerManager scm;
-      OzoneManager om;
-      try {
-        scm = createSCM();
-        scm.start();
-        om = createOM();
-        if(certClient != null) {
-          om.setCertClient(certClient);
-        }
-      } catch (AuthenticationException ex) {
-        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
-      }
-
-      om.start();
-      final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
-      MiniOzoneChaosCluster cluster =
-          new MiniOzoneChaosCluster(conf, om, scm, hddsDatanodes);
-      if (startDataNodes) {
-        cluster.startHddsDatanodes();
-      }
-      return cluster;
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
deleted file mode 100644
index 0aba968..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ /dev/null
@@ -1,472 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Optional;
-import java.util.UUID;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Interface used for MiniOzoneClusters.
- */
-public interface MiniOzoneCluster {
-
-  /**
-   * Returns the Builder to construct MiniOzoneCluster.
-   *
-   * @param conf OzoneConfiguration
-   *
-   * @return MiniOzoneCluster builder
-   */
-  static Builder newBuilder(OzoneConfiguration conf) {
-    return new MiniOzoneClusterImpl.Builder(conf);
-  }
-
-  /**
-   * Returns the Builder to construct MiniOzoneHACluster.
-   *
-   * @param conf OzoneConfiguration
-   *
-   * @return MiniOzoneCluster builder
-   */
-  static Builder newHABuilder(OzoneConfiguration conf) {
-    return new MiniOzoneHAClusterImpl.Builder(conf);
-  }
-
-  /**
-   * Returns the configuration object associated with the MiniOzoneCluster.
-   *
-   * @return Configuration
-   */
-  Configuration getConf();
-
-  /**
-   * Waits for the cluster to be ready, this call blocks till all the
-   * configured {@link HddsDatanodeService} registers with
-   * {@link StorageContainerManager}.
-   *
-   * @throws TimeoutException In case of timeout
-   * @throws InterruptedException In case of interrupt while waiting
-   */
-  void waitForClusterToBeReady() throws TimeoutException, InterruptedException;
-
-  /**
-   * Sets the timeout value after which
-   * {@link MiniOzoneCluster#waitForClusterToBeReady} times out.
-   *
-   * @param timeoutInMs timeout value in milliseconds
-   */
-  void setWaitForClusterToBeReadyTimeout(int timeoutInMs);
-
-  /**
-   * Waits/blocks till the cluster is out of safe mode.
-   *
-   * @throws TimeoutException TimeoutException In case of timeout
-   * @throws InterruptedException In case of interrupt while waiting
-   */
-  void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException;
-
-  /**
-   * Returns OzoneManager Service ID.
-   *
-   * @return Service ID String
-   */
-  String getServiceId();
-
-  /**
-   * Returns {@link StorageContainerManager} associated with this
-   * {@link MiniOzoneCluster} instance.
-   *
-   * @return {@link StorageContainerManager} instance
-   */
-  StorageContainerManager getStorageContainerManager();
-
-  /**
-   * Returns {@link OzoneManager} associated with this
-   * {@link MiniOzoneCluster} instance.
-   *
-   * @return {@link OzoneManager} instance
-   */
-  OzoneManager getOzoneManager();
-
-  /**
-   * Returns the list of {@link HddsDatanodeService} which are part of this
-   * {@link MiniOzoneCluster} instance.
-   *
-   * @return List of {@link HddsDatanodeService}
-   */
-  List<HddsDatanodeService> getHddsDatanodes();
-
-  /**
-   * Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}.
-   *
-   * @return {@link OzoneClient}
-   * @throws IOException
-   */
-  OzoneClient getClient() throws IOException;
-
-  /**
-   * Returns an RPC based {@link OzoneClient} to access the
-   * {@link MiniOzoneCluster}.
-   *
-   * @return {@link OzoneClient}
-   * @throws IOException
-   */
-  OzoneClient getRpcClient() throws IOException;
-
-  /**
-   * Returns StorageContainerLocationClient to communicate with
-   * {@link StorageContainerManager} associated with the MiniOzoneCluster.
-   *
-   * @return StorageContainerLocation Client
-   * @throws IOException
-   */
-  StorageContainerLocationProtocolClientSideTranslatorPB
-      getStorageContainerLocationClient() throws IOException;
-
-  /**
-   * Restarts StorageContainerManager instance.
-   *
-   * @param waitForDatanode
-   * @throws IOException
-   * @throws TimeoutException
-   * @throws InterruptedException
-   */
-  void restartStorageContainerManager(boolean waitForDatanode)
-      throws InterruptedException, TimeoutException, IOException,
-      AuthenticationException;
-
-  /**
-   * Restarts OzoneManager instance.
-   *
-   * @throws IOException
-   */
-  void restartOzoneManager() throws IOException;
-
-  /**
-   * Restart a particular HddsDatanode.
-   *
-   * @param i index of HddsDatanode in the MiniOzoneCluster
-   */
-  void restartHddsDatanode(int i, boolean waitForDatanode)
-      throws InterruptedException, TimeoutException;
-
-  int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException;
-
-  /**
-   * Restart a particular HddsDatanode.
-   *
-   * @param dn HddsDatanode in the MiniOzoneCluster
-   */
-  void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
-      throws InterruptedException, TimeoutException, IOException;
-  /**
-   * Shutdown a particular HddsDatanode.
-   *
-   * @param i index of HddsDatanode in the MiniOzoneCluster
-   */
-  void shutdownHddsDatanode(int i);
-
-  /**
-   * Shutdown a particular HddsDatanode.
-   *
-   * @param dn HddsDatanode in the MiniOzoneCluster
-   */
-  void shutdownHddsDatanode(DatanodeDetails dn) throws IOException;
-
-  /**
-   * Shutdown the MiniOzoneCluster and delete the storage dirs.
-   */
-  void shutdown();
-
-  /**
-   * Stop the MiniOzoneCluster without any cleanup.
-   */
-  void stop();
-
-  /**
-   * Start Scm.
-   */
-  void startScm() throws IOException;
-
-  /**
-   * Start DataNodes.
-   */
-  void startHddsDatanodes();
-
-  /**
-   * Shuts down all the DataNodes.
-   */
-  void shutdownHddsDatanodes();
-
-  /**
-   * Builder class for MiniOzoneCluster.
-   */
-  @SuppressWarnings("visibilitymodifier")
-  abstract class Builder {
-
-    protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
-    protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100;
-    protected static final int ACTIVE_OMS_NOT_SET = -1;
-
-    protected final OzoneConfiguration conf;
-    protected String path;
-
-    protected String clusterId;
-    protected String omServiceId;
-    protected int numOfOMs;
-    protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET;
-
-    protected Optional<Boolean> enableTrace = Optional.of(false);
-    protected Optional<Integer> hbInterval = Optional.empty();
-    protected Optional<Integer> hbProcessorInterval = Optional.empty();
-    protected Optional<String> scmId = Optional.empty();
-    protected Optional<String> omId = Optional.empty();
-
-    protected Boolean ozoneEnabled = true;
-    protected Boolean randomContainerPort = true;
-    protected Optional<Integer> chunkSize = Optional.empty();
-    protected Optional<Long> streamBufferFlushSize = Optional.empty();
-    protected Optional<Long> streamBufferMaxSize = Optional.empty();
-    protected Optional<Long> blockSize = Optional.empty();
-    protected Optional<StorageUnit> streamBufferSizeUnit = Optional.empty();
-    // Use relative smaller number of handlers for testing
-    protected int numOfOmHandlers = 20;
-    protected int numOfScmHandlers = 20;
-    protected int numOfDatanodes = 1;
-    protected boolean  startDataNodes = true;
-    protected CertificateClient certClient;
-
-    protected Builder(OzoneConfiguration conf) {
-      this.conf = conf;
-      setClusterId(UUID.randomUUID().toString());
-    }
-
-    /**
-     * Sets the cluster Id.
-     *
-     * @param id cluster Id
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setClusterId(String id) {
-      clusterId = id;
-      path = GenericTestUtils.getTempPath(
-          MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId);
-      return this;
-    }
-
-    public Builder setStartDataNodes(boolean nodes) {
-      this.startDataNodes = nodes;
-      return this;
-    }
-
-    /**
-     * Sets the certificate client.
-     *
-     * @param client
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setCertificateClient(CertificateClient client) {
-      this.certClient = client;
-      return this;
-    }
-
-    /**
-     * Sets the SCM id.
-     *
-     * @param id SCM Id
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setScmId(String id) {
-      scmId = Optional.of(id);
-      return this;
-    }
-
-    /**
-     * Sets the OM id.
-     *
-     * @param id OM Id
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setOmId(String id) {
-      omId = Optional.of(id);
-      return this;
-    }
-
-    /**
-     * If set to true container service will be started in a random port.
-     *
-     * @param randomPort enable random port
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setRandomContainerPort(boolean randomPort) {
-      randomContainerPort = randomPort;
-      return this;
-    }
-
-    /**
-     * Sets the number of HddsDatanodes to be started as part of
-     * MiniOzoneCluster.
-     *
-     * @param val number of datanodes
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setNumDatanodes(int val) {
-      numOfDatanodes = val;
-      return this;
-    }
-
-    /**
-     * Sets the number of HeartBeat Interval of Datanodes, the value should be
-     * in MilliSeconds.
-     *
-     * @param val HeartBeat interval in milliseconds
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setHbInterval(int val) {
-      hbInterval = Optional.of(val);
-      return this;
-    }
-
-    /**
-     * Sets the number of HeartBeat Processor Interval of Datanodes,
-     * the value should be in MilliSeconds.
-     *
-     * @param val HeartBeat Processor interval in milliseconds
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setHbProcessorInterval(int val) {
-      hbProcessorInterval = Optional.of(val);
-      return this;
-    }
-
-    /**
-     * When set to true, enables trace level logging.
-     *
-     * @param trace true or false
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setTrace(Boolean trace) {
-      enableTrace = Optional.of(trace);
-      return this;
-    }
-
-    /**
-     * Modifies the configuration such that Ozone will be disabled.
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder disableOzone() {
-      ozoneEnabled = false;
-      return this;
-    }
-
-    /**
-     * Sets the chunk size.
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setChunkSize(int size) {
-      chunkSize = Optional.of(size);
-      return this;
-    }
-
-    /**
-     * Sets the flush size for stream buffer.
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setStreamBufferFlushSize(long size) {
-      streamBufferFlushSize = Optional.of(size);
-      return this;
-    }
-
-    /**
-     * Sets the max size for stream buffer.
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setStreamBufferMaxSize(long size) {
-      streamBufferMaxSize = Optional.of(size);
-      return this;
-    }
-
-    /**
-     * Sets the block size for stream buffer.
-     *
-     * @return MiniOzoneCluster.Builder
-     */
-    public Builder setBlockSize(long size) {
-      blockSize = Optional.of(size);
-      return this;
-    }
-
-    public Builder setNumOfOzoneManagers(int numOMs) {
-      this.numOfOMs = numOMs;
-      return this;
-    }
-
-    public Builder setNumOfActiveOMs(int numActiveOMs) {
-      this.numOfActiveOMs = numActiveOMs;
-      return this;
-    }
-
-    public Builder setStreamBufferSizeUnit(StorageUnit unit) {
-      this.streamBufferSizeUnit = Optional.of(unit);
-      return this;
-    }
-
-    public Builder setOMServiceId(String serviceId) {
-      this.omServiceId = serviceId;
-      return this;
-    }
-
-    /**
-     * Constructs and returns MiniOzoneCluster.
-     *
-     * @return {@link MiniOzoneCluster}
-     *
-     * @throws IOException
-     */
-    public abstract MiniOzoneCluster build() throws IOException;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
deleted file mode 100644
index ac76482..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ /dev/null
@@ -1,663 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .DFS_CONTAINER_IPC_PORT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .DFS_CONTAINER_IPC_RANDOM_PORT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .DFS_CONTAINER_RATIS_IPC_PORT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
-
-/**
- * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
- * running tests.  The cluster consists of a OzoneManager,
- * StorageContainerManager and multiple DataNodes.
- */
-@InterfaceAudience.Private
-public class MiniOzoneClusterImpl implements MiniOzoneCluster {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(MiniOzoneClusterImpl.class);
-
-  private final OzoneConfiguration conf;
-  private StorageContainerManager scm;
-  private OzoneManager ozoneManager;
-  private final List<HddsDatanodeService> hddsDatanodes;
-
-  // Timeout for the cluster to be ready
-  private int waitForClusterToBeReadyTimeout = 60000; // 1 min
-  private CertificateClient caClient;
-
-  /**
-   * Creates a new MiniOzoneCluster.
-   *
-   * @throws IOException if there is an I/O error
-   */
-  MiniOzoneClusterImpl(OzoneConfiguration conf,
-                       OzoneManager ozoneManager,
-                       StorageContainerManager scm,
-                       List<HddsDatanodeService> hddsDatanodes) {
-    this.conf = conf;
-    this.ozoneManager = ozoneManager;
-    this.scm = scm;
-    this.hddsDatanodes = hddsDatanodes;
-  }
-
-  /**
-   * Creates a new MiniOzoneCluster without the OzoneManager. This is used by
-   * {@link MiniOzoneHAClusterImpl} for starting multiple OzoneManagers.
-   * @param conf
-   * @param scm
-   * @param hddsDatanodes
-   */
-  MiniOzoneClusterImpl(OzoneConfiguration conf, StorageContainerManager scm,
-      List<HddsDatanodeService> hddsDatanodes) {
-    this.conf = conf;
-    this.scm = scm;
-    this.hddsDatanodes = hddsDatanodes;
-  }
-
-  public OzoneConfiguration getConf() {
-    return conf;
-  }
-
-  public String getServiceId() {
-    // Non-HA cluster doesn't have OM Service Id.
-    return null;
-  }
-
-  /**
-   * Waits for the Ozone cluster to be ready for processing requests.
-   */
-  @Override
-  public void waitForClusterToBeReady()
-      throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(() -> {
-      final int healthy = scm.getNodeCount(HEALTHY);
-      final boolean isReady = healthy == hddsDatanodes.size();
-      LOG.info("{}. Got {} of {} DN Heartbeats.",
-          isReady? "Cluster is ready" : "Waiting for cluster to be ready",
-          healthy, hddsDatanodes.size());
-      return isReady;
-    }, 1000, waitForClusterToBeReadyTimeout);
-  }
-
-  /**
-   * Sets the timeout value after which
-   * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out.
-   *
-   * @param timeoutInMs timeout value in milliseconds
-   */
-  @Override
-  public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) {
-    waitForClusterToBeReadyTimeout = timeoutInMs;
-  }
-
-  /**
-   * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out
-   * of Safe mode.
-   *
-   * @throws TimeoutException
-   * @throws InterruptedException
-   */
-  @Override
-  public void waitTobeOutOfSafeMode()
-      throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(() -> {
-      if (!scm.isInSafeMode()) {
-        return true;
-      }
-      LOG.info("Waiting for cluster to be ready. No datanodes found");
-      return false;
-    }, 100, 1000 * 45);
-  }
-
-  @Override
-  public StorageContainerManager getStorageContainerManager() {
-    return this.scm;
-  }
-
-  @Override
-  public OzoneManager getOzoneManager() {
-    return this.ozoneManager;
-  }
-
-  @Override
-  public List<HddsDatanodeService> getHddsDatanodes() {
-    return hddsDatanodes;
-  }
-
-  @Override
-  public int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException {
-    for (HddsDatanodeService service : hddsDatanodes) {
-      if (service.getDatanodeDetails().equals(dn)) {
-        return hddsDatanodes.indexOf(service);
-      }
-    }
-    throw new IOException(
-        "Not able to find datanode with datanode Id " + dn.getUuid());
-  }
-
-  @Override
-  public OzoneClient getClient() throws IOException {
-    return OzoneClientFactory.getClient(conf);
-  }
-
-  @Override
-  public OzoneClient getRpcClient() throws IOException {
-    return OzoneClientFactory.getRpcClient(conf);
-  }
-
-  /**
-   * Returns an RPC proxy connected to this cluster's StorageContainerManager
-   * for accessing container location information.  Callers take ownership of
-   * the proxy and must close it when done.
-   *
-   * @return RPC proxy for accessing container location information
-   * @throws IOException if there is an I/O error
-   */
-  @Override
-  public StorageContainerLocationProtocolClientSideTranslatorPB
-      getStorageContainerLocationClient() throws IOException {
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
-    InetSocketAddress address = scm.getClientRpcAddress();
-    LOG.info(
-        "Creating StorageContainerLocationProtocol RPC client with address {}",
-        address);
-    return new StorageContainerLocationProtocolClientSideTranslatorPB(
-        RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-            address, UserGroupInformation.getCurrentUser(), conf,
-            NetUtils.getDefaultSocketFactory(conf),
-            Client.getRpcTimeout(conf)));
-  }
-
-  @Override
-  public void restartStorageContainerManager(boolean waitForDatanode)
-      throws TimeoutException, InterruptedException, IOException,
-      AuthenticationException {
-    scm.stop();
-    scm.join();
-    scm = StorageContainerManager.createSCM(conf);
-    scm.start();
-    if (waitForDatanode) {
-      waitForClusterToBeReady();
-    }
-  }
-
-  @Override
-  public void restartOzoneManager() throws IOException {
-    ozoneManager.stop();
-    ozoneManager.restart();
-  }
-
-  @Override
-  public void restartHddsDatanode(int i, boolean waitForDatanode)
-      throws InterruptedException, TimeoutException {
-    HddsDatanodeService datanodeService = hddsDatanodes.get(i);
-    datanodeService.stop();
-    datanodeService.join();
-    // ensure same ports are used across restarts.
-    OzoneConfiguration config = datanodeService.getConf();
-    int currentPort = datanodeService.getDatanodeDetails()
-        .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-    config.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
-    config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
-    int ratisPort = datanodeService.getDatanodeDetails()
-        .getPort(DatanodeDetails.Port.Name.RATIS).getValue();
-    config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
-    config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
-    hddsDatanodes.remove(i);
-    if (waitForDatanode) {
-      // wait for node to be removed from SCM healthy node list.
-      waitForClusterToBeReady();
-    }
-    String[] args = new String[]{};
-    HddsDatanodeService service =
-        HddsDatanodeService.createHddsDatanodeService(args);
-    hddsDatanodes.add(i, service);
-    service.start(config);
-    if (waitForDatanode) {
-      // wait for the node to be identified as a healthy node again.
-      waitForClusterToBeReady();
-    }
-  }
-
-  @Override
-  public void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
-      throws InterruptedException, TimeoutException, IOException {
-    restartHddsDatanode(getHddsDatanodeIndex(dn), waitForDatanode);
-  }
-
-  @Override
-  public void shutdownHddsDatanode(int i) {
-    hddsDatanodes.get(i).stop();
-  }
-
-  @Override
-  public void shutdownHddsDatanode(DatanodeDetails dn) throws IOException {
-    shutdownHddsDatanode(getHddsDatanodeIndex(dn));
-  }
-
-  @Override
-  public void shutdown() {
-    try {
-      LOG.info("Shutting down the Mini Ozone Cluster");
-
-      File baseDir = new File(GenericTestUtils.getTempPath(
-          MiniOzoneClusterImpl.class.getSimpleName() + "-" +
-              scm.getClientProtocolServer().getScmInfo().getClusterId()));
-      stop();
-      FileUtils.deleteDirectory(baseDir);
-      ContainerCache.getInstance(conf).shutdownCache();
-      DefaultMetricsSystem.shutdown();
-    } catch (IOException e) {
-      LOG.error("Exception while shutting down the cluster.", e);
-    }
-  }
-
-  @Override
-  public void stop() {
-    LOG.info("Stopping the Mini Ozone Cluster");
-    stopOM(ozoneManager);
-    stopDatanodes(hddsDatanodes);
-    stopSCM(scm);
-  }
-
-  /**
-   * Start Scm.
-   */
-  @Override
-  public void startScm() throws IOException {
-    scm.start();
-  }
-
-  /**
-   * Start DataNodes.
-   */
-  @Override
-  public void startHddsDatanodes() {
-    hddsDatanodes.forEach((datanode) -> {
-      datanode.setCertificateClient(getCAClient());
-      datanode.start();
-    });
-  }
-
-  @Override
-  public void shutdownHddsDatanodes() {
-    hddsDatanodes.forEach((datanode) -> {
-      try {
-        shutdownHddsDatanode(datanode.getDatanodeDetails());
-      } catch (IOException e) {
-        LOG.error("Exception while trying to shutdown datanodes:", e);
-      }
-    });
-  }
-
-  private CertificateClient getCAClient() {
-    return this.caClient;
-  }
-
-  private void setCAClient(CertificateClient client) {
-    this.caClient = client;
-  }
-
-  private static void stopDatanodes(
-      Collection<HddsDatanodeService> hddsDatanodes) {
-    if (!hddsDatanodes.isEmpty()) {
-      LOG.info("Stopping the HddsDatanodes");
-      hddsDatanodes.parallelStream()
-          .forEach(MiniOzoneClusterImpl::stopDatanode);
-    }
-  }
-
-  private static void stopDatanode(HddsDatanodeService dn) {
-    if (dn != null) {
-      dn.stop();
-      dn.join();
-    }
-  }
-
-  private static void stopSCM(StorageContainerManager scm) {
-    if (scm != null) {
-      LOG.info("Stopping the StorageContainerManager");
-      scm.stop();
-      scm.join();
-    }
-  }
-
-  private static void stopOM(OzoneManager om) {
-    if (om != null) {
-      LOG.info("Stopping the OzoneManager");
-      om.stop();
-      om.join();
-    }
-  }
-
-  /**
-   * Builder for configuring the MiniOzoneCluster to run.
-   */
-  public static class Builder extends MiniOzoneCluster.Builder {
-
-    /**
-     * Creates a new Builder.
-     *
-     * @param conf configuration
-     */
-    public Builder(OzoneConfiguration conf) {
-      super(conf);
-    }
-
-    @Override
-    public MiniOzoneCluster build() throws IOException {
-      DefaultMetricsSystem.setMiniClusterMode(true);
-      initializeConfiguration();
-      StorageContainerManager scm = null;
-      OzoneManager om = null;
-      List<HddsDatanodeService> hddsDatanodes = Collections.emptyList();
-      try {
-        scm = createSCM();
-        scm.start();
-        om = createOM();
-        if(certClient != null) {
-          om.setCertClient(certClient);
-        }
-        om.start();
-
-        hddsDatanodes = createHddsDatanodes(scm);
-        MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, om, scm,
-            hddsDatanodes);
-        cluster.setCAClient(certClient);
-        if (startDataNodes) {
-          cluster.startHddsDatanodes();
-        }
-        return cluster;
-      } catch (Exception ex) {
-        stopOM(om);
-        if (startDataNodes) {
-          stopDatanodes(hddsDatanodes);
-        }
-        stopSCM(scm);
-        removeConfiguration();
-
-        if (ex instanceof IOException) {
-          throw (IOException) ex;
-        }
-        if (ex instanceof RuntimeException) {
-          throw (RuntimeException) ex;
-        }
-        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
-      }
-    }
-
-    /**
-     * Initializes the configuration required for starting MiniOzoneCluster.
-     *
-     * @throws IOException
-     */
-    void initializeConfiguration() throws IOException {
-      conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, ozoneEnabled);
-      Path metaDir = Paths.get(path, "ozone-meta");
-      Files.createDirectories(metaDir);
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
-      if (!chunkSize.isPresent()) {
-        //set it to 1MB by default in tests
-        chunkSize = Optional.of(1);
-      }
-      if (!streamBufferFlushSize.isPresent()) {
-        streamBufferFlushSize = Optional.of((long)chunkSize.get());
-      }
-      if (!streamBufferMaxSize.isPresent()) {
-        streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get());
-      }
-      if (!blockSize.isPresent()) {
-        blockSize = Optional.of(2 * streamBufferMaxSize.get());
-      }
-
-      if (!streamBufferSizeUnit.isPresent()) {
-        streamBufferSizeUnit = Optional.of(StorageUnit.MB);
-      }
-      conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
-          chunkSize.get(), streamBufferSizeUnit.get());
-      conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE,
-          streamBufferFlushSize.get(), streamBufferSizeUnit.get());
-      conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE,
-          streamBufferMaxSize.get(), streamBufferSizeUnit.get());
-      conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(),
-          streamBufferSizeUnit.get());
-      configureTrace();
-    }
-
-    void removeConfiguration() {
-      FileUtils.deleteQuietly(new File(path));
-    }
-
-    /**
-     * Creates a new StorageContainerManager instance.
-     *
-     * @return {@link StorageContainerManager}
-     *
-     * @throws IOException
-     */
-    StorageContainerManager createSCM()
-        throws IOException, AuthenticationException {
-      configureSCM();
-      SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-      initializeScmStorage(scmStore);
-      return StorageContainerManager.createSCM(conf);
-    }
-
-    private void initializeScmStorage(SCMStorageConfig scmStore)
-        throws IOException {
-      if (scmStore.getState() == StorageState.INITIALIZED) {
-        return;
-      }
-      scmStore.setClusterId(clusterId);
-      if (!scmId.isPresent()) {
-        scmId = Optional.of(UUID.randomUUID().toString());
-      }
-      scmStore.setScmId(scmId.get());
-      scmStore.initialize();
-    }
-
-    void initializeOmStorage(OMStorage omStorage) throws IOException{
-      if (omStorage.getState() == StorageState.INITIALIZED) {
-        return;
-      }
-      omStorage.setClusterId(clusterId);
-      omStorage.setScmId(scmId.get());
-      omStorage.setOmId(omId.orElse(UUID.randomUUID().toString()));
-      // Initialize ozone certificate client if security is enabled.
-      if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-        OzoneManager.initializeSecurity(conf, omStorage);
-      }
-      omStorage.initialize();
-    }
-
-    /**
-     * Creates a new OzoneManager instance.
-     *
-     * @return {@link OzoneManager}
-     *
-     * @throws IOException
-     */
-    OzoneManager createOM()
-        throws IOException, AuthenticationException {
-      configureOM();
-      OMStorage omStore = new OMStorage(conf);
-      initializeOmStorage(omStore);
-      return OzoneManager.createOm(conf);
-    }
-
-    /**
-     * Creates HddsDatanodeService(s) instance.
-     *
-     * @return List of HddsDatanodeService
-     *
-     * @throws IOException
-     */
-    List<HddsDatanodeService> createHddsDatanodes(
-        StorageContainerManager scm) throws IOException {
-      configureHddsDatanodes();
-      String scmAddress =  scm.getDatanodeRpcAddress().getHostString() +
-          ":" + scm.getDatanodeRpcAddress().getPort();
-      String[] args = new String[] {};
-      conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
-      List<HddsDatanodeService> hddsDatanodes = new ArrayList<>();
-      for (int i = 0; i < numOfDatanodes; i++) {
-        OzoneConfiguration dnConf = new OzoneConfiguration(conf);
-        String datanodeBaseDir = path + "/datanode-" + Integer.toString(i);
-        Path metaDir = Paths.get(datanodeBaseDir, "meta");
-        Path dataDir = Paths.get(datanodeBaseDir, "data", "containers");
-        Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis");
-        Path wrokDir = Paths.get(datanodeBaseDir, "data", "replication",
-            "work");
-        Files.createDirectories(metaDir);
-        Files.createDirectories(dataDir);
-        Files.createDirectories(ratisDir);
-        Files.createDirectories(wrokDir);
-        dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
-        dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.toString());
-        dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
-            ratisDir.toString());
-        dnConf.set(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR,
-            wrokDir.toString());
-
-        HddsDatanodeService datanode
-            = HddsDatanodeService.createHddsDatanodeService(args);
-        datanode.setConfiguration(dnConf);
-        hddsDatanodes.add(datanode);
-      }
-      return hddsDatanodes;
-    }
-
-    private void configureSCM() {
-      conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-      conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
-      conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
-          "3s");
-      configureSCMheartbeat();
-    }
-
-    private void configureSCMheartbeat() {
-      if (hbInterval.isPresent()) {
-        conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-            hbInterval.get(), TimeUnit.MILLISECONDS);
-
-      } else {
-        conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-            DEFAULT_HB_INTERVAL_MS,
-            TimeUnit.MILLISECONDS);
-      }
-
-      if (hbProcessorInterval.isPresent()) {
-        conf.setTimeDuration(
-            ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-            hbProcessorInterval.get(),
-            TimeUnit.MILLISECONDS);
-      } else {
-        conf.setTimeDuration(
-            ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-            DEFAULT_HB_PROCESSOR_INTERVAL_MS,
-            TimeUnit.MILLISECONDS);
-      }
-    }
-
-
-    private void configureOM() {
-      conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-      conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
-    }
-
-    private void configureHddsDatanodes() {
-      conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
-          randomContainerPort);
-      conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
-          randomContainerPort);
-    }
-
-    private void configureTrace() {
-      if (enableTrace.isPresent()) {
-        conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY,
-            enableTrace.get());
-        GenericTestUtils.setRootLogLevel(Level.TRACE);
-      }
-      GenericTestUtils.setRootLogLevel(Level.INFO);
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
deleted file mode 100644
index 006d854..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.BindException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * MiniOzoneHAClusterImpl creates a complete in-process Ozone cluster
- * with OM HA suitable for running tests.  The cluster consists of a set of
- * OzoneManagers, StorageContainerManager and multiple DataNodes.
- */
-public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(MiniOzoneHAClusterImpl.class);
-
-  private Map<String, OzoneManager> ozoneManagerMap;
-  private List<OzoneManager> ozoneManagers;
-  private String omServiceId;
-
-  // Active OMs denote OMs which are up and running
-  private List<OzoneManager> activeOMs;
-  private List<OzoneManager> inactiveOMs;
-
-  private static final Random RANDOM = new Random();
-  private static final int RATIS_LEADER_ELECTION_TIMEOUT = 1000; // 1 seconds
-
-  public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds
-
-  /**
-   * Creates a new MiniOzoneCluster with OM HA.
-   *
-   * @throws IOException if there is an I/O error
-   */
-
-  private MiniOzoneHAClusterImpl(
-      OzoneConfiguration conf,
-      Map<String, OzoneManager> omMap,
-      List<OzoneManager> activeOMList,
-      List<OzoneManager> inactiveOMList,
-      StorageContainerManager scm,
-      List<HddsDatanodeService> hddsDatanodes,
-      String omServiceId) {
-    super(conf, scm, hddsDatanodes);
-    this.ozoneManagerMap = omMap;
-    this.ozoneManagers = new ArrayList<>(omMap.values());
-    this.activeOMs = activeOMList;
-    this.inactiveOMs = inactiveOMList;
-    this.omServiceId = omServiceId;
-  }
-
-  @Override
-  public String getServiceId() {
-    return omServiceId;
-  }
-
-  /**
-   * Returns the first OzoneManager from the list.
-   * @return
-   */
-  @Override
-  public OzoneManager getOzoneManager() {
-    return this.ozoneManagers.get(0);
-  }
-
-  @Override
-  public OzoneClient getRpcClient() throws IOException {
-    return OzoneClientFactory.getRpcClient(getServiceId(), getConf());
-  }
-
-  public boolean isOMActive(String omNodeId) {
-    return activeOMs.contains(ozoneManagerMap.get(omNodeId));
-  }
-
-  public OzoneManager getOzoneManager(int index) {
-    return this.ozoneManagers.get(index);
-  }
-
-  public OzoneManager getOzoneManager(String omNodeId) {
-    return this.ozoneManagerMap.get(omNodeId);
-  }
-
-  /**
-   * Start a previously inactive OM.
-   */
-  public void startInactiveOM(String omNodeID) throws IOException {
-    OzoneManager ozoneManager = ozoneManagerMap.get(omNodeID);
-    if (!inactiveOMs.contains(ozoneManager)) {
-      throw new IOException("OM is already active.");
-    } else {
-      ozoneManager.start();
-      activeOMs.add(ozoneManager);
-      inactiveOMs.remove(ozoneManager);
-    }
-  }
-
-  @Override
-  public void restartOzoneManager() throws IOException {
-    for (OzoneManager ozoneManager : ozoneManagers) {
-      ozoneManager.stop();
-      ozoneManager.restart();
-    }
-  }
-
-  @Override
-  public void stop() {
-    for (OzoneManager ozoneManager : ozoneManagers) {
-      if (ozoneManager != null) {
-        LOG.info("Stopping the OzoneManager " + ozoneManager.getOMNodeId());
-        ozoneManager.stop();
-        ozoneManager.join();
-      }
-    }
-    super.stop();
-  }
-
-  public void stopOzoneManager(int index) {
-    ozoneManagers.get(index).stop();
-  }
-
-  public void stopOzoneManager(String omNodeId) {
-    ozoneManagerMap.get(omNodeId).stop();
-  }
-
-  /**
-   * Builder for configuring the MiniOzoneCluster to run.
-   */
-  public static class Builder extends MiniOzoneClusterImpl.Builder {
-
-    private final String nodeIdBaseStr = "omNode-";
-    private List<OzoneManager> activeOMs = new ArrayList<>();
-    private List<OzoneManager> inactiveOMs = new ArrayList<>();
-
-    /**
-     * Creates a new Builder.
-     *
-     * @param conf configuration
-     */
-    public Builder(OzoneConfiguration conf) {
-      super(conf);
-    }
-
-    @Override
-    public MiniOzoneCluster build() throws IOException {
-      if (numOfActiveOMs > numOfOMs) {
-        throw new IllegalArgumentException("Number of active OMs cannot be " +
-            "more than the total number of OMs");
-      }
-
-      // If num of ActiveOMs is not set, set it to numOfOMs.
-      if (numOfActiveOMs == ACTIVE_OMS_NOT_SET) {
-        numOfActiveOMs = numOfOMs;
-      }
-      DefaultMetricsSystem.setMiniClusterMode(true);
-      initializeConfiguration();
-      StorageContainerManager scm;
-      Map<String, OzoneManager> omMap;
-      try {
-        scm = createSCM();
-        scm.start();
-        omMap = createOMService();
-      } catch (AuthenticationException ex) {
-        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
-      }
-
-      final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
-      MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(
-          conf, omMap, activeOMs, inactiveOMs, scm, hddsDatanodes, omServiceId);
-      if (startDataNodes) {
-        cluster.startHddsDatanodes();
-      }
-      return cluster;
-    }
-
-    /**
-     * Initialize OM configurations.
-     * @throws IOException
-     */
-    @Override
-    void initializeConfiguration() throws IOException {
-      super.initializeConfiguration();
-      conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-      conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
-      conf.setTimeDuration(
-          OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-          RATIS_LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
-      conf.setTimeDuration(
-          OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY,
-          NODE_FAILURE_TIMEOUT, TimeUnit.MILLISECONDS);
-      conf.setInt(OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
-          10);
-    }
-
-    /**
-     * Start OM service with multiple OMs.
-     * @return list of OzoneManagers
-     * @throws IOException
-     * @throws AuthenticationException
-     */
-    private Map<String, OzoneManager> createOMService() throws IOException,
-        AuthenticationException {
-
-      Map<String, OzoneManager> omMap = new HashMap<>();
-
-      int retryCount = 0;
-      int basePort = 10000;
-
-      while (true) {
-        try {
-          basePort = 10000 + RANDOM.nextInt(1000) * 4;
-          initHAConfig(basePort);
-
-          for (int i = 1; i<= numOfOMs; i++) {
-            // Set nodeId
-            String nodeId = nodeIdBaseStr + i;
-            OzoneConfiguration config = new OzoneConfiguration(conf);
-            config.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId);
-            // Set the OM http(s) address to null so that the cluster picks
-            // up the address set with service ID and node ID in initHAConfig
-            config.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "");
-            config.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "");
-
-            // Set metadata/DB dir base path
-            String metaDirPath = path + "/" + nodeId;
-            config.set(OZONE_METADATA_DIRS, metaDirPath);
-            OMStorage omStore = new OMStorage(config);
-            initializeOmStorage(omStore);
-
-            OzoneManager om = OzoneManager.createOm(config);
-            om.setCertClient(certClient);
-            omMap.put(nodeId, om);
-
-            if (i <= numOfActiveOMs) {
-              om.start();
-              activeOMs.add(om);
-              LOG.info("Started OzoneManager RPC server at " +
-                  om.getOmRpcServerAddr());
-            } else {
-              inactiveOMs.add(om);
-              LOG.info("Intialized OzoneManager at " + om.getOmRpcServerAddr()
-                  + ". This OM is currently inactive (not running).");
-            }
-          }
-
-          // Set default OM address to point to the first OM. Clients would
-          // try connecting to this address by default
-          conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-              NetUtils.getHostPortString(omMap.get(nodeIdBaseStr + 1)
-                  .getOmRpcServerAddr()));
-
-          break;
-        } catch (BindException e) {
-          for (OzoneManager om : omMap.values()) {
-            om.stop();
-            om.join();
-            LOG.info("Stopping OzoneManager server at " +
-                om.getOmRpcServerAddr());
-          }
-          omMap.clear();
-          ++retryCount;
-          LOG.info("MiniOzoneHACluster port conflicts, retried " +
-              retryCount + " times");
-        }
-      }
-      return omMap;
-    }
-
-    /**
-     * Initialize HA related configurations.
-     */
-    private void initHAConfig(int basePort) throws IOException {
-      // Set configurations required for starting OM HA service
-      conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
-      String omNodesKey = OmUtils.addKeySuffixes(
-          OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
-      StringBuilder omNodesKeyValue = new StringBuilder();
-
-      int port = basePort;
-
-      for (int i = 1; i <= numOfOMs; i++, port+=6) {
-        String omNodeId = nodeIdBaseStr + i;
-        omNodesKeyValue.append(",").append(omNodeId);
-        String omAddrKey = OmUtils.addKeySuffixes(
-            OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId);
-        String omHttpAddrKey = OmUtils.addKeySuffixes(
-            OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId);
-        String omHttpsAddrKey = OmUtils.addKeySuffixes(
-            OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId);
-        String omRatisPortKey = OmUtils.addKeySuffixes(
-            OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId);
-
-        conf.set(omAddrKey, "127.0.0.1:" + port);
-        conf.set(omHttpAddrKey, "127.0.0.1:" + (port + 2));
-        conf.set(omHttpsAddrKey, "127.0.0.1:" + (port + 3));
-        conf.setInt(omRatisPortKey, port + 4);
-      }
-
-      conf.set(omNodesKey, omNodesKeyValue.substring(1));
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
deleted file mode 100644
index 6ced6d6..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.chaos.TestProbability;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A Simple Load generator for testing.
- */
-public class MiniOzoneLoadGenerator {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(MiniOzoneLoadGenerator.class);
-
-  private static String keyNameDelimiter = "_";
-
-  private ThreadPoolExecutor writeExecutor;
-  private int numWriteThreads;
-  // number of buffer to be allocated, each is allocated with length which
-  // is multiple of 2, each buffer is populated with random data.
-  private int numBuffers;
-  private List<ByteBuffer> buffers;
-
-  private AtomicBoolean isWriteThreadRunning;
-
-  private final List<OzoneBucket> ozoneBuckets;
-
-  private final AtomicInteger agedFileWrittenIndex;
-  private final ExecutorService agedFileExecutor;
-  private final OzoneBucket agedLoadBucket;
-  private final TestProbability agedWriteProbability;
-
-  MiniOzoneLoadGenerator(List<OzoneBucket> bucket,
-                         OzoneBucket agedLoadBucket, int numThreads,
-      int numBuffers) {
-    this.ozoneBuckets = bucket;
-    this.numWriteThreads = numThreads;
-    this.numBuffers = numBuffers;
-    this.writeExecutor = new ThreadPoolExecutor(numThreads, numThreads, 100,
-        TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
-        new ThreadPoolExecutor.CallerRunsPolicy());
-    this.writeExecutor.prestartAllCoreThreads();
-
-    this.agedFileWrittenIndex = new AtomicInteger(0);
-    this.agedFileExecutor = Executors.newSingleThreadExecutor();
-    this.agedLoadBucket = agedLoadBucket;
-    this.agedWriteProbability = TestProbability.valueOf(10);
-
-    this.isWriteThreadRunning = new AtomicBoolean(false);
-
-    // allocate buffers and populate random data.
-    buffers = new ArrayList<>();
-    for (int i = 0; i < numBuffers; i++) {
-      int size = (int) StorageUnit.KB.toBytes(1 << i);
-      ByteBuffer buffer = ByteBuffer.allocate(size);
-      buffer.put(RandomUtils.nextBytes(size));
-      buffers.add(buffer);
-    }
-  }
-
-  // Start IO load on an Ozone bucket.
-  private void load(long runTimeMillis) {
-    long threadID = Thread.currentThread().getId();
-    LOG.info("Started IO Thread:{}.", threadID);
-    String threadName = Thread.currentThread().getName();
-    long startTime = Time.monotonicNow();
-
-    while (isWriteThreadRunning.get() &&
-        (Time.monotonicNow() < startTime + runTimeMillis)) {
-      OzoneBucket bucket =
-          ozoneBuckets.get((int) (Math.random() * ozoneBuckets.size()));
-      try {
-        int index = RandomUtils.nextInt();
-        String keyName = writeData(index, bucket, threadName);
-
-        readData(bucket, keyName, index);
-
-        deleteKey(bucket, keyName);
-      } catch (Exception e) {
-        LOG.error("LOADGEN: Exiting due to exception", e);
-        break;
-      }
-    }
-    // This will terminate other threads too.
-    isWriteThreadRunning.set(false);
-    LOG.info("Terminating IO thread:{}.", threadID);
-  }
-
-
-  private String writeData(int keyIndex, OzoneBucket bucket, String threadName)
-      throws Exception {
-    // choose a random buffer.
-    ByteBuffer buffer = buffers.get(keyIndex % numBuffers);
-    int bufferCapacity = buffer.capacity();
-
-    String keyName = getKeyName(keyIndex, threadName);
-    LOG.trace("LOADGEN: Writing key {}", keyName);
-    try (OzoneOutputStream stream = bucket.createKey(keyName,
-        bufferCapacity, ReplicationType.RATIS, ReplicationFactor.THREE,
-        new HashMap<>())) {
-      stream.write(buffer.array());
-      LOG.trace("LOADGEN: Written key {}", keyName);
-    } catch (Throwable t) {
-      LOG.error("LOADGEN: Create key:{} failed with exception, skipping",
-          keyName, t);
-      throw t;
-    }
-
-    return keyName;
-  }
-
-  private void readData(OzoneBucket bucket, String keyName, int index)
-      throws Exception {
-    LOG.trace("LOADGEN: Reading key {}", keyName);
-
-    ByteBuffer buffer = buffers.get(index % numBuffers);
-    int bufferCapacity = buffer.capacity();
-
-    try (OzoneInputStream stream = bucket.readKey(keyName)) {
-      byte[] readBuffer = new byte[bufferCapacity];
-      int readLen = stream.read(readBuffer);
-
-      if (readLen < bufferCapacity) {
-        throw new IOException("Read mismatch, key:" + keyName +
-            " read data length:" + readLen +
-            " is smaller than excepted:" + bufferCapacity);
-      }
-
-      if (!Arrays.equals(readBuffer, buffer.array())) {
-        throw new IOException("Read mismatch, key:" + keyName +
-            " read data does not match the written data");
-      }
-      LOG.trace("LOADGEN: Read key {}", keyName);
-    } catch (Throwable t) {
-      LOG.error("LOADGEN: Read key:{} failed with exception", keyName, t);
-      throw t;
-    }
-  }
-
-  private void deleteKey(OzoneBucket bucket, String keyName) throws Exception {
-    LOG.trace("LOADGEN: Deleting key {}", keyName);
-    try {
-      bucket.deleteKey(keyName);
-      LOG.trace("LOADGEN: Deleted key {}", keyName);
-    } catch (Throwable t) {
-      LOG.error("LOADGEN: Unable to delete key:{}", keyName, t);
-      throw t;
-    }
-  }
-
-  private Optional<Integer> randomKeyToRead() {
-    int currentIndex = agedFileWrittenIndex.get();
-    return currentIndex != 0
-      ? Optional.of(RandomUtils.nextInt(0, currentIndex))
-      : Optional.empty();
-  }
-
-  private void startAgedFilesLoad(long runTimeMillis) {
-    long threadID = Thread.currentThread().getId();
-    LOG.info("AGED LOADGEN: Started Aged IO Thread:{}.", threadID);
-    String threadName = Thread.currentThread().getName();
-    long startTime = Time.monotonicNow();
-
-    while (isWriteThreadRunning.get() &&
-        (Time.monotonicNow() < startTime + runTimeMillis)) {
-
-      String keyName = null;
-      try {
-        if (agedWriteProbability.isTrue()) {
-          keyName = writeData(agedFileWrittenIndex.getAndIncrement(),
-              agedLoadBucket, threadName);
-        } else {
-          Optional<Integer> index = randomKeyToRead();
-          if (index.isPresent()) {
-            keyName = getKeyName(index.get(), threadName);
-            readData(agedLoadBucket, keyName, index.get());
-          }
-        }
-      } catch (Throwable t) {
-        LOG.error("AGED LOADGEN: {} Exiting due to exception", keyName, t);
-        break;
-      }
-    }
-    // This will terminate other threads too.
-    isWriteThreadRunning.set(false);
-    LOG.info("Terminating IO thread:{}.", threadID);
-  }
-
-  void startIO(long time, TimeUnit timeUnit) {
-    List<CompletableFuture<Void>> writeFutures = new ArrayList<>();
-    LOG.info("Starting MiniOzoneLoadGenerator for time {}:{} with {} buffers " +
-            "and {} threads", time, timeUnit, numBuffers, numWriteThreads);
-    if (isWriteThreadRunning.compareAndSet(false, true)) {
-      // Start the IO thread
-      for (int i = 0; i < numWriteThreads; i++) {
-        writeFutures.add(
-            CompletableFuture.runAsync(() -> load(timeUnit.toMillis(time)),
-                writeExecutor));
-      }
-
-      writeFutures.add(CompletableFuture.runAsync(() ->
-              startAgedFilesLoad(timeUnit.toMillis(time)), agedFileExecutor));
-
-      // Wait for IO to complete
-      for (CompletableFuture<Void> f : writeFutures) {
-        try {
-          f.get();
-        } catch (Throwable t) {
-          LOG.error("startIO failed with exception", t);
-        }
-      }
-    }
-  }
-
-  public void shutdownLoadGenerator() {
-    try {
-      writeExecutor.shutdown();
-      writeExecutor.awaitTermination(1, TimeUnit.DAYS);
-    } catch (Exception e) {
-      LOG.error("error while closing ", e);
-    }
-  }
-
-  private static String getKeyName(int keyIndex, String threadName) {
-    return threadName + keyNameDelimiter + keyIndex;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
deleted file mode 100644
index 2023e0e4..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.test.LambdaTestUtils.VoidCallable;
-
-import org.apache.ratis.util.function.CheckedConsumer;
-import org.junit.Assert;
-
-/**
- * Helper class for Tests.
- */
-public final class OzoneTestUtils {
-  /**
-   * Never Constructed.
-   */
-  private OzoneTestUtils() {
-  }
-
-  /**
-   * Close containers which contain the blocks listed in
-   * omKeyLocationInfoGroups.
-   *
-   * @param omKeyLocationInfoGroups locationInfos for a key.
-   * @param scm StorageContainerManager instance.
-   * @return true if close containers is successful.
-   * @throws IOException
-   */
-  public static void closeContainers(
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups,
-      StorageContainerManager scm) throws Exception {
-    performOperationOnKeyContainers((blockID) -> {
-      if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
-          .getState() == HddsProtos.LifeCycleState.OPEN) {
-        scm.getContainerManager()
-            .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
-                HddsProtos.LifeCycleEvent.FINALIZE);
-      }
-      if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
-          .getState() == HddsProtos.LifeCycleState.CLOSING) {
-        scm.getContainerManager()
-            .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
-                HddsProtos.LifeCycleEvent.CLOSE);
-      }
-      Assert.assertFalse(scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
-          .isOpen());
-    }, omKeyLocationInfoGroups);
-  }
-
-  /**
-   * Performs the provided consumer on containers which contain the blocks
-   * listed in omKeyLocationInfoGroups.
-   *
-   * @param consumer Consumer which accepts BlockID as argument.
-   * @param omKeyLocationInfoGroups locationInfos for a key.
-   * @throws IOException
-   */
-  public static void performOperationOnKeyContainers(
-      CheckedConsumer<BlockID, Exception> consumer,
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
-
-    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
-        omKeyLocationInfoGroups) {
-      List<OmKeyLocationInfo> omKeyLocationInfos =
-          omKeyLocationInfoGroup.getLocationList();
-      for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
-        BlockID blockID = omKeyLocationInfo.getBlockID();
-        consumer.accept(blockID);
-      }
-    }
-  }
-
-  public static <E extends Throwable> void expectOmException(
-      OMException.ResultCodes code,
-      VoidCallable eval)
-      throws Exception {
-    try {
-      eval.call();
-      Assert.fail("OMException is expected");
-    } catch (OMException ex) {
-      Assert.assertEquals(code, ex.getResult());
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
deleted file mode 100644
index 4e127a3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Helpers for Ratis tests.
- */
-public interface RatisTestHelper {
-  Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class);
-
-  /** For testing Ozone with Ratis. */
-  class RatisTestSuite implements Closeable {
-    static final RpcType RPC = SupportedRpcType.GRPC;
-    static final int NUM_DATANODES = 3;
-
-    private final OzoneConfiguration conf;
-    private final MiniOzoneCluster cluster;
-
-    /**
-     * Create a {@link MiniOzoneCluster} for testing by setting.
-     *   OZONE_ENABLED = true
-     *   RATIS_ENABLED = true
-     */
-    public RatisTestSuite()
-        throws IOException, TimeoutException, InterruptedException {
-      conf = newOzoneConfiguration(RPC);
-
-      cluster = newMiniOzoneCluster(NUM_DATANODES, conf);
-    }
-
-    public OzoneConfiguration getConf() {
-      return conf;
-    }
-
-    public MiniOzoneCluster getCluster() {
-      return cluster;
-    }
-
-    public ClientProtocol newOzoneClient()
-        throws IOException {
-      return new RpcClient(conf, null);
-    }
-
-    @Override
-    public void close() {
-      cluster.shutdown();
-    }
-
-    public int getDatanodeOzoneRestPort() {
-      return cluster.getHddsDatanodes().get(0).getDatanodeDetails()
-          .getPort(DatanodeDetails.Port.Name.REST).getValue();
-    }
-  }
-
-  static OzoneConfiguration newOzoneConfiguration(RpcType rpc) {
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    initRatisConf(rpc, conf);
-    return conf;
-  }
-
-  static void initRatisConf(RpcType rpc, Configuration conf) {
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name());
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
-    LOG.info(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-        + " = " + rpc.name());
-  }
-
-  static MiniOzoneCluster newMiniOzoneCluster(
-      int numDatanodes, OzoneConfiguration conf)
-      throws IOException, TimeoutException, InterruptedException {
-    final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(1000)
-        .setNumDatanodes(numDatanodes).build();
-    cluster.waitForClusterToBeReady();
-    return cluster;
-  }
-
-  static void initXceiverServerRatis(
-      RpcType rpc, DatanodeDetails dd, Pipeline pipeline) throws IOException {
-    final RaftPeer p = RatisHelper.toRaftPeer(dd);
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(conf);
-    final TimeDuration requestTimeout =
-        RatisHelper.getClientRequestTimeout(conf);
-    final RaftClient client =
-        newRaftClient(rpc, p, RatisHelper.createRetryPolicy(conf),
-            maxOutstandingRequests, requestTimeout);
-    client.groupAdd(RatisHelper.newRaftGroup(pipeline), p.getId());
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
deleted file mode 100644
index 129cf04..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests container operations (TODO currently only supports create)
- * from cblock clients.
- */
-public class TestContainerOperations {
-
-  private static ScmClient storageClient;
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConf;
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    int containerSizeGB = 5;
-    ContainerOperationClient.setContainerSizeB(
-        containerSizeGB * OzoneConsts.GB);
-    ozoneConf = new OzoneConfiguration();
-    ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(1).build();
-    StorageContainerLocationProtocolClientSideTranslatorPB client =
-        cluster.getStorageContainerLocationClient();
-    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    storageClient = new ContainerOperationClient(
-        client, new XceiverClientManager(ozoneConf));
-    cluster.waitForClusterToBeReady();
-  }
-
-  @AfterClass
-  public static void cleanup() throws Exception {
-    if(cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * A simple test to create a container with {@link ContainerOperationClient}.
-   * @throws Exception
-   */
-  @Test
-  public void testCreate() throws Exception {
-    ContainerWithPipeline container = storageClient.createContainer(HddsProtos
-        .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
-        .ONE, "OZONE");
-    assertEquals(container.getContainerInfo().getContainerID(), storageClient
-        .getContainer(container.getContainerInfo().getContainerID())
-        .getContainerID());
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
deleted file mode 100644
index 2d2d028..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.
-    StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-
-/**
- * Tests the idempotent operations in ContainerStateMachine.
- */
-public class TestContainerStateMachineIdempotency {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConfig;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster =
-        MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(ozoneConfig);
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void testContainerStateMachineIdempotency() throws Exception {
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    Pipeline pipeline = container.getPipeline();
-    XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
-    try {
-      //create the container
-      ContainerProtocolCalls.createContainer(client, containerID, null);
-      // call create Container again
-      BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-      byte[] data =
-          RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
-      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-          ContainerTestHelper
-              .getWriteChunkRequest(container.getPipeline(), blockID,
-                  data.length);
-      client.sendCommand(writeChunkRequest);
-
-      //Make the write chunk request again without requesting for overWrite
-      client.sendCommand(writeChunkRequest);
-      // Now, explicitly make a putKey request for the block.
-      ContainerProtos.ContainerCommandRequestProto putKeyRequest =
-          ContainerTestHelper
-              .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
-      client.sendCommand(putKeyRequest).getPutBlock();
-      // send the putBlock again
-      client.sendCommand(putKeyRequest);
-
-      // close container call
-      ContainerProtocolCalls.closeContainer(client, containerID, null);
-      ContainerProtocolCalls.closeContainer(client, containerID, null);
-    } catch (IOException ioe) {
-      Assert.fail("Container operation failed" + ioe);
-    }
-    xceiverClientManager.releaseClient(client, false);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
deleted file mode 100644
index e27aa85..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.Scanner;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-/**
- * Utility to help to generate test data.
- */
-public final class TestDataUtil {
-
-  private TestDataUtil() {
-  }
-
-  public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster,
-      String volumeName, String bucketName) throws IOException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-
-    OzoneClient client = cluster.getClient();
-
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setAdmin(adminName)
-        .setOwner(userName)
-        .build();
-
-    ObjectStore objectStore = client.getObjectStore();
-
-    objectStore.createVolume(volumeName, volumeArgs);
-
-    OzoneVolume volume = objectStore.getVolume(volumeName);
-
-    BucketArgs omBucketArgs = BucketArgs.newBuilder()
-        .setStorageType(StorageType.DISK)
-        .build();
-
-    volume.createBucket(bucketName, omBucketArgs);
-    return volume.getBucket(bucketName);
-
-  }
-
-  public static void createKey(OzoneBucket bucket, String keyName,
-      String content) throws IOException {
-    try (OutputStream stream = bucket
-        .createKey(keyName, content.length(), ReplicationType.STAND_ALONE,
-            ReplicationFactor.ONE, new HashMap<>())) {
-      stream.write(content.getBytes());
-    }
-  }
-
-  public static String getKey(OzoneBucket bucket, String keyName)
-      throws IOException {
-    try (InputStream stream = bucket.readKey(keyName)) {
-      return new Scanner(stream).useDelimiter("\\A").next();
-    }
-  }
-
-  public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster)
-      throws IOException {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    return createVolumeAndBucket(cluster, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
deleted file mode 100644
index bb66474..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test Read Write with Mini Ozone Chaos Cluster.
- */
-@Command(description = "Starts IO with MiniOzoneChaosCluster",
-    name = "chaos", mixinStandardHelpOptions = true)
-public class TestMiniChaosOzoneCluster implements Runnable {
-
-  @Option(names = {"-d", "--numDatanodes"},
-      description = "num of datanodes")
-  private static int numDatanodes = 20;
-
-  @Option(names = {"-t", "--numThreads"},
-      description = "num of IO threads")
-  private static int numThreads = 10;
-
-  @Option(names = {"-b", "--numBuffers"},
-      description = "num of IO buffers")
-  private static int numBuffers = 16;
-
-  @Option(names = {"-m", "--numMinutes"},
-      description = "total run time")
-  private static int numMinutes = 1440; // 1 day by default
-
-  @Option(names = {"-n", "--numClients"},
-      description = "no of clients writing to OM")
-  private static int numClients = 3;
-
-  @Option(names = {"-i", "--failureInterval"},
-      description = "time between failure events in seconds")
-  private static int failureInterval = 300; // 5 second period between failures.
-
-  private static MiniOzoneChaosCluster cluster;
-  private static MiniOzoneLoadGenerator loadGenerator;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    cluster = new MiniOzoneChaosCluster.Builder(new OzoneConfiguration())
-        .setNumDatanodes(numDatanodes).build();
-    cluster.waitForClusterToBeReady();
-
-    String volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
-    String bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
-    ObjectStore store = cluster.getRpcClient().getObjectStore();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    List<OzoneBucket> ozoneBuckets = new ArrayList<>(numClients);
-    for (int i = 0; i < numClients; i++) {
-      ozoneBuckets.add(volume.getBucket(bucketName));
-    }
-
-    String agedBucketName =
-        RandomStringUtils.randomAlphabetic(10).toLowerCase();
-
-    volume.createBucket(agedBucketName);
-    OzoneBucket agedLoadBucket = volume.getBucket(agedBucketName);
-    loadGenerator =
-        new MiniOzoneLoadGenerator(ozoneBuckets, agedLoadBucket, numThreads,
-            numBuffers);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (loadGenerator != null) {
-      loadGenerator.shutdownLoadGenerator();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  public void run() {
-    try {
-      init();
-      cluster.startChaos(failureInterval, failureInterval, TimeUnit.SECONDS);
-      loadGenerator.startIO(numMinutes, TimeUnit.MINUTES);
-    } catch (Exception e) {
-    } finally {
-      shutdown();
-    }
-  }
-
-  public static void main(String... args) {
-    CommandLine.run(new TestMiniChaosOzoneCluster(), System.err, args);
-  }
-
-  @Test
-  public void testReadWriteWithChaosCluster() {
-    cluster.startChaos(5, 10, TimeUnit.SECONDS);
-    loadGenerator.startIO(1, TimeUnit.MINUTES);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
deleted file mode 100644
index efc2736..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
-import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
-import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.test.TestGenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.yaml.snakeyaml.Yaml;
-
-/**
- * Test cases for mini ozone cluster.
- */
-public class TestMiniOzoneCluster {
-
-  private MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  private final static File TEST_ROOT = TestGenericTestUtils.getTestDir();
-  private final static File WRITE_TMP = new File(TEST_ROOT, "write");
-  private final static File READ_TMP = new File(TEST_ROOT, "read");
-
-  @BeforeClass
-  public static void setup() {
-    conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, TEST_ROOT.toString());
-    conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    WRITE_TMP.mkdirs();
-    READ_TMP.mkdirs();
-  }
-
-  @After
-  public void cleanup() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @AfterClass
-  public static void afterClass() {
-    FileUtils.deleteQuietly(WRITE_TMP);
-    FileUtils.deleteQuietly(READ_TMP);
-  }
-
-  @Test(timeout = 30000)
-  public void testStartMultipleDatanodes() throws Exception {
-    final int numberOfNodes = 3;
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numberOfNodes)
-        .build();
-    cluster.waitForClusterToBeReady();
-    List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
-    assertEquals(numberOfNodes, datanodes.size());
-    for(HddsDatanodeService dn : datanodes) {
-      // Create a single member pipe line
-      List<DatanodeDetails> dns = new ArrayList<>();
-      dns.add(dn.getDatanodeDetails());
-      Pipeline pipeline = Pipeline.newBuilder()
-          .setState(Pipeline.PipelineState.OPEN)
-          .setId(PipelineID.randomId())
-          .setType(HddsProtos.ReplicationType.STAND_ALONE)
-          .setFactor(HddsProtos.ReplicationFactor.ONE)
-          .setNodes(dns)
-          .build();
-
-      // Verify client is able to connect to the container
-      try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){
-        client.connect();
-        assertTrue(client.isConnected(pipeline.getFirstNode()));
-      }
-    }
-  }
-
-  @Test
-  public void testDatanodeIDPersistent() throws Exception {
-    // Generate IDs for testing
-    DatanodeDetails id1 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails id2 = TestUtils.randomDatanodeDetails();
-    DatanodeDetails id3 = TestUtils.randomDatanodeDetails();
-    id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1));
-    id2.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 2));
-    id3.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 3));
-
-    // Add certificate serial  id.
-    String certSerialId = "" + RandomUtils.nextLong();
-    id1.setCertSerialId(certSerialId);
-
-    // Write a single ID to the file and read it out
-    File validIdsFile = new File(WRITE_TMP, "valid-values.id");
-    validIdsFile.delete();
-    ContainerUtils.writeDatanodeDetailsTo(id1, validIdsFile);
-    // Validate using yaml parser
-    Yaml yaml = new Yaml();
-    try {
-      yaml.load(new FileReader(validIdsFile));
-    } catch (Exception e) {
-      Assert.fail("Failed parsing datanode id yaml.");
-    }
-    DatanodeDetails validId = ContainerUtils.readDatanodeDetailsFrom(
-        validIdsFile);
-
-    assertEquals(validId.getCertSerialId(), certSerialId);
-    assertEquals(id1, validId);
-    assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage());
-
-    // Read should return an empty value if file doesn't exist
-    File nonExistFile = new File(READ_TMP, "non_exist.id");
-    nonExistFile.delete();
-    try {
-      ContainerUtils.readDatanodeDetailsFrom(nonExistFile);
-      Assert.fail();
-    } catch (Exception e) {
-      assertTrue(e instanceof IOException);
-    }
-
-    // Read should fail if the file is malformed
-    File malformedFile = new File(READ_TMP, "malformed.id");
-    createMalformedIDFile(malformedFile);
-    try {
-      ContainerUtils.readDatanodeDetailsFrom(malformedFile);
-      fail("Read a malformed ID file should fail");
-    } catch (Exception e) {
-      assertTrue(e instanceof IOException);
-    }
-
-    // Test upgrade scenario - protobuf file instead of yaml
-    File protoFile = new File(WRITE_TMP, "valid-proto.id");
-    try (FileOutputStream out = new FileOutputStream(protoFile)) {
-      HddsProtos.DatanodeDetailsProto proto = id1.getProtoBufMessage();
-      proto.writeTo(out);
-    }
-    validId = ContainerUtils.readDatanodeDetailsFrom(protoFile);
-    assertEquals(validId.getCertSerialId(), certSerialId);
-    assertEquals(id1, validId);
-    assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage());
-  }
-
-  @Test
-  public void testContainerRandomPort() throws IOException {
-    Configuration ozoneConf = SCMTestUtils.getConf();
-    File testDir = PathUtils.getTestDir(TestOzoneContainer.class);
-    ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
-    ozoneConf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        TEST_ROOT.toString());
-
-    // Each instance of SM will create an ozone container
-    // that bounds to a random port.
-    ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
-    ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
-        true);
-    List<DatanodeStateMachine> stateMachines = new ArrayList<>();
-    try {
-
-      for (int i = 0; i < 3; i++) {
-        stateMachines.add(new DatanodeStateMachine(
-            TestUtils.randomDatanodeDetails(), ozoneConf, null, null));
-      }
-
-      //we need to start all the servers to get the fix ports
-      for (DatanodeStateMachine dsm : stateMachines) {
-        dsm.getContainer().getReadChannel().start();
-        dsm.getContainer().getWriteChannel().start();
-
-      }
-
-      for (DatanodeStateMachine dsm : stateMachines) {
-        dsm.getContainer().getWriteChannel().stop();
-        dsm.getContainer().getReadChannel().stop();
-
-      }
-
-      //after the start the real port numbers should be available AND unique
-      HashSet<Integer> ports = new HashSet<Integer>();
-      for (DatanodeStateMachine dsm : stateMachines) {
-        int readPort = dsm.getContainer().getReadChannel().getIPCPort();
-
-        assertNotEquals("Port number of the service is not updated", 0,
-            readPort);
-
-        assertTrue("Port of datanode service is conflicted with other server.",
-            ports.add(readPort));
-
-        int writePort = dsm.getContainer().getWriteChannel().getIPCPort();
-
-        assertNotEquals("Port number of the service is not updated", 0,
-            writePort);
-        assertTrue("Port of datanode service is conflicted with other server.",
-            ports.add(writePort));
-      }
-
-    } finally {
-      for (DatanodeStateMachine dsm : stateMachines) {
-        dsm.close();
-      }
-    }
-
-    // Turn off the random port flag and test again
-    ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-    try (
-        DatanodeStateMachine sm1 = new DatanodeStateMachine(
-            TestUtils.randomDatanodeDetails(), ozoneConf,  null, null);
-        DatanodeStateMachine sm2 = new DatanodeStateMachine(
-            TestUtils.randomDatanodeDetails(), ozoneConf,  null, null);
-        DatanodeStateMachine sm3 = new DatanodeStateMachine(
-            TestUtils.randomDatanodeDetails(), ozoneConf,  null, null);
-    ) {
-      HashSet<Integer> ports = new HashSet<Integer>();
-      assertTrue(ports.add(sm1.getContainer().getReadChannel().getIPCPort()));
-      assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort()));
-      assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort()));
-      assertEquals(ports.iterator().next().intValue(),
-          conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-              OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT));
-    }
-  }
-
-  private void createMalformedIDFile(File malformedFile)
-      throws IOException{
-    malformedFile.delete();
-    DatanodeDetails id = TestUtils.randomDatanodeDetails();
-    ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
-
-    FileOutputStream out = new FileOutputStream(malformedFile);
-    out.write("malformed".getBytes());
-    out.close();
-  }
-
-  /**
-   * Test that a DN can register with SCM even if it was started before the SCM.
-   * @throws Exception
-   */
-  @Test (timeout = 300_000)
-  public void testDNstartAfterSCM() throws Exception {
-    // Start a cluster with 1 DN
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    // Stop the SCM
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    scm.stop();
-
-    // Restart DN
-    cluster.restartHddsDatanode(0, false);
-
-    // DN should be in GETVERSION state till the SCM is restarted.
-    // Check DN endpoint state for 20 seconds
-    DatanodeStateMachine dnStateMachine = cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine();
-    for (int i = 0; i < 20; i++) {
-      for (EndpointStateMachine endpoint :
-          dnStateMachine.getConnectionManager().getValues()) {
-        Assert.assertEquals(
-            EndpointStateMachine.EndPointStates.GETVERSION,
-            endpoint.getState());
-      }
-      Thread.sleep(1000);
-    }
-
-    // DN should successfully register with the SCM after SCM is restarted.
-    // Restart the SCM
-    cluster.restartStorageContainerManager(true);
-    // Wait for DN to register
-    cluster.waitForClusterToBeReady();
-    // DN should be in HEARTBEAT state after registering with the SCM
-    for (EndpointStateMachine endpoint :
-        dnStateMachine.getConnectionManager().getValues()) {
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
-          endpoint.getState());
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
deleted file mode 100644
index fa0e73d..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.conf.TestConfigurationFieldsBase;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
-import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys;
-
-import java.util.Arrays;
-
-/**
- * Tests if configuration constants documented in ozone-defaults.xml.
- */
-public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
-
-  @Override
-  public void initializeMemberVariables() {
-    xmlFilename = "ozone-default.xml";
-    configurationClasses =
-        new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
-            OMConfigKeys.class, HddsConfigKeys.class,
-            ReconServerConfigKeys.class,
-            S3GatewayConfigKeys.class
-        };
-    errorIfMissingConfigProps = true;
-    errorIfMissingXmlProps = true;
-    xmlPropsToSkipCompare.add("hadoop.tags.custom");
-    xmlPropsToSkipCompare.add("ozone.om.nodes.EXAMPLEOMSERVICEID");
-    addPropertiesNotInXml();
-  }
-
-  private void addPropertiesNotInXml() {
-    configurationPropsToSkipCompare.addAll(Arrays.asList(
-        HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA,
-        HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT,
-        HddsConfigKeys.HDDS_KEY_ALGORITHM,
-        HddsConfigKeys.HDDS_SECURITY_PROVIDER,
-        OMConfigKeys.OZONE_OM_NODES_KEY,
-        OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE,
-        OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY
-    ));
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
deleted file mode 100644
index ca1f179..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ /dev/null
@@ -1,896 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.PrivilegedExceptionAction;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.ServerSocketUtil;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.KerberosAuthException;
-import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.bouncycastle.asn1.x500.RDN;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x500.style.BCStyle;
-import org.bouncycastle.cert.jcajce.JcaX509CertificateHolder;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.security.cert.X509Certificate;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.temporal.ChronoUnit;
-import java.util.Date;
-
-import static junit.framework.TestCase.assertNotNull;
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.event.Level.INFO;
-
-/**
- * Test class to for security enabled Ozone cluster.
- */
-@InterfaceAudience.Private
-public final class TestSecureOzoneCluster {
-
-  private static final String TEST_USER = "testUgiUser@EXAMPLE.COM";
-  private static final String COMPONENT = "test";
-  private static final int CLIENT_TIMEOUT = 2 * 1000;
-  private Logger logger = LoggerFactory
-      .getLogger(TestSecureOzoneCluster.class);
-
-  @Rule
-  public Timeout timeout = new Timeout(80000);
-
-  private MiniKdc miniKdc;
-  private OzoneConfiguration conf;
-  private File workDir;
-  private static Properties securityProperties;
-  private File scmKeytab;
-  private File spnegoKeytab;
-  private File omKeyTab;
-  private File testUserKeytab;
-  private String curUser;
-  private String testUserPrincipal;
-  private UserGroupInformation testKerberosUgi;
-  private StorageContainerManager scm;
-  private OzoneManager om;
-  private String host;
-
-  private static String clusterId;
-  private static String scmId;
-  private static String omId;
-  private OzoneManagerProtocolClientSideTranslatorPB omClient;
-  private KeyPair keyPair;
-  private Path metaDirPath;
-  @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
-  private String omCertSerialId = "9879877970576";
-
-  @Before
-  public void init() {
-    try {
-      conf = new OzoneConfiguration();
-      conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
-
-      conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil
-              .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
-      conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil
-              .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
-      conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
-              ServerSocketUtil.getPort(ScmConfigKeys
-                      .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
-      conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
-              ServerSocketUtil.getPort(ScmConfigKeys
-                      .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
-
-      DefaultMetricsSystem.setMiniClusterMode(true);
-      final String path = folder.newFolder().toString();
-      metaDirPath = Paths.get(path, "om-meta");
-      conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
-      conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-          KERBEROS.toString());
-
-      startMiniKdc();
-      setSecureConfig(conf);
-      createCredentialsInKDC(conf, miniKdc);
-      generateKeyPair(conf);
-//      OzoneManager.setTestSecureOmFlag(true);
-    } catch (IOException e) {
-      logger.error("Failed to initialize TestSecureOzoneCluster", e);
-    } catch (Exception e) {
-      logger.error("Failed to initialize TestSecureOzoneCluster", e);
-    }
-  }
-
-  @After
-  public void stop() {
-    try {
-      stopMiniKdc();
-      if (scm != null) {
-        scm.stop();
-      }
-      if (om != null) {
-        om.stop();
-      }
-      if (omClient != null) {
-        omClient.close();
-      }
-    } catch (Exception e) {
-      logger.error("Failed to stop TestSecureOzoneCluster", e);
-    }
-  }
-
-  private void createCredentialsInKDC(Configuration configuration,
-                                      MiniKdc kdc) throws Exception {
-    createPrincipal(scmKeytab,
-        configuration.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY));
-    createPrincipal(spnegoKeytab,
-        configuration.get(ScmConfigKeys
-            .HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY));
-    createPrincipal(testUserKeytab, testUserPrincipal);
-    createPrincipal(omKeyTab,
-        configuration.get(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
-  }
-
-  private void createPrincipal(File keytab, String... principal)
-      throws Exception {
-    miniKdc.createPrincipal(keytab, principal);
-  }
-
-  private void startMiniKdc() throws Exception {
-    workDir = GenericTestUtils
-        .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
-    securityProperties = MiniKdc.createConf();
-    miniKdc = new MiniKdc(securityProperties, workDir);
-    miniKdc.start();
-  }
-
-  private void stopMiniKdc() {
-    miniKdc.stop();
-  }
-
-  private void setSecureConfig(Configuration configuration) throws IOException {
-    configuration.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    configuration.setBoolean(OZONE_ENABLED, true);
-    host = InetAddress.getLocalHost().getCanonicalHostName()
-        .toLowerCase();
-    String realm = miniKdc.getRealm();
-    curUser = UserGroupInformation.getCurrentUser()
-        .getUserName();
-    configuration.set(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
-    configuration.set(OZONE_ADMINISTRATORS, curUser);
-
-    configuration.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
-        "scm/" + host + "@" + realm);
-    configuration.set(ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY,
-        "HTTP_SCM/" + host + "@" + realm);
-
-    configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
-        "om/" + host + "@" + realm);
-    configuration.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY,
-        "HTTP_OM/" + host + "@" + realm);
-
-    scmKeytab = new File(workDir, "scm.keytab");
-    spnegoKeytab = new File(workDir, "http.keytab");
-    omKeyTab = new File(workDir, "om.keytab");
-    testUserKeytab = new File(workDir, "testuser.keytab");
-    testUserPrincipal = "test@" + realm;
-
-    configuration.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
-        scmKeytab.getAbsolutePath());
-    configuration.set(
-        ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
-        spnegoKeytab.getAbsolutePath());
-    configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
-        omKeyTab.getAbsolutePath());
-    conf.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
-        spnegoKeytab.getAbsolutePath());
-  }
-
-  @Test
-  public void testSecureScmStartupSuccess() throws Exception {
-
-    initSCM();
-    scm = StorageContainerManager.createSCM(conf);
-    //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-    Assert.assertEquals(clusterId, scmInfo.getClusterId());
-    Assert.assertEquals(scmId, scmInfo.getScmId());
-  }
-
-  @Test
-  public void testSCMSecurityProtocol() throws Exception {
-
-    initSCM();
-    scm = HddsTestUtils.getScm(conf);
-    //Reads the SCM Info from SCM instance
-    try {
-      scm.start();
-
-      // Case 1: User with Kerberos credentials should succeed.
-      UserGroupInformation ugi =
-          UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-              testUserPrincipal, testUserKeytab.getCanonicalPath());
-      ugi.setAuthenticationMethod(KERBEROS);
-      SCMSecurityProtocol scmSecurityProtocolClient =
-          HddsClientUtils.getScmSecurityClient(conf, ugi);
-      assertNotNull(scmSecurityProtocolClient);
-      String caCert = scmSecurityProtocolClient.getCACertificate();
-      LambdaTestUtils.intercept(RemoteException.class, "Certificate not found",
-          () -> scmSecurityProtocolClient.getCertificate("1"));
-      assertNotNull(caCert);
-
-      // Case 2: User without Kerberos credentials should fail.
-      ugi = UserGroupInformation.createRemoteUser("test");
-      ugi.setAuthenticationMethod(AuthMethod.TOKEN);
-      SCMSecurityProtocol finalScmSecurityProtocolClient =
-          HddsClientUtils.getScmSecurityClient(conf, ugi);
-
-      LambdaTestUtils.intercept(IOException.class, "Client cannot" +
-              " authenticate via:[KERBEROS]",
-          () -> finalScmSecurityProtocolClient.getCACertificate());
-      LambdaTestUtils.intercept(IOException.class, "Client cannot" +
-              " authenticate via:[KERBEROS]",
-          () -> finalScmSecurityProtocolClient.getCertificate("1"));
-    } finally {
-      if (scm != null) {
-        scm.stop();
-      }
-    }
-  }
-
-  private void initSCM()
-      throws IOException, AuthenticationException {
-
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-
-    final String path = folder.newFolder().toString();
-    Path scmPath = Paths.get(path, "scm-meta");
-    File temp = scmPath.toFile();
-    if(!temp.exists()) {
-      temp.mkdirs();
-    }
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    scmStore.setClusterId(clusterId);
-    scmStore.setScmId(scmId);
-    // writes the version file properties
-    scmStore.initialize();
-  }
-
-  @Test
-  public void testSecureScmStartupFailure() throws Exception {
-    initSCM();
-    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
-
-    LambdaTestUtils.intercept(IOException.class,
-        "Running in secure mode, but config doesn't have a keytab",
-        () -> {
-          StorageContainerManager.createSCM(conf);
-        });
-
-    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
-        "scm/_HOST@EXAMPLE.com");
-    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
-        "/etc/security/keytabs/scm.keytab");
-
-    testCommonKerberosFailures(
-        () -> StorageContainerManager.createSCM(conf));
-
-  }
-
-  private void testCommonKerberosFailures(Callable callable) throws Exception {
-    LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
-        + "to login: for principal:", callable);
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "OAuth2");
-
-    LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
-            + " attribute value for hadoop.security.authentication of OAuth2",
-        callable);
-
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "KERBEROS_SSL");
-    LambdaTestUtils.intercept(AuthenticationException.class,
-        "KERBEROS_SSL authentication method not",
-        callable);
-  }
-
-  /**
-   * Tests the secure om Initialization Failure.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testSecureOMInitializationFailure() throws Exception {
-    initSCM();
-    // Create a secure SCM instance as om client will connect to it
-    scm = StorageContainerManager.createSCM(conf);
-    setupOm(conf);
-    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
-        "non-existent-user@EXAMPLE.com");
-    testCommonKerberosFailures(() -> OzoneManager.createOm(conf));
-  }
-
-  /**
-   * Tests the secure om Initialization success.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testSecureOmInitializationSuccess() throws Exception {
-    initSCM();
-    // Create a secure SCM instance as om client will connect to it
-    scm = StorageContainerManager.createSCM(conf);
-    LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG);
-    GenericTestUtils.setLogLevel(OzoneManager.LOG, INFO);
-
-    setupOm(conf);
-    try {
-      om.start();
-    } catch (Exception ex) {
-      // Expects timeout failure from scmClient in om but om user login via
-      // kerberos should succeed.
-      assertTrue(logs.getOutput().contains("Ozone Manager login"
-          + " successful"));
-    }
-  }
-
-  /**
-   * Performs following tests for delegation token.
-   * 1. Get valid delegation token
-   * 2. Test successful token renewal.
-   * 3. Client can authenticate using token.
-   * 4. Delegation token renewal without Kerberos auth fails.
-   * 5. Test success of token cancellation.
-   * 5. Test failure of token cancellation.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testDelegationToken() throws Exception {
-
-    // Capture logs for assertions
-    LogCapturer logs = LogCapturer.captureLogs(Server.AUDITLOG);
-    LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger());
-    GenericTestUtils
-        .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO);
-
-    // Setup secure OM for start
-    setupOm(conf);
-    long omVersion =
-        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    try {
-      // Start OM
-      om.setCertClient(new CertificateClientTestImpl(conf));
-      om.start();
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      String username = ugi.getUserName();
-
-      // Get first OM client which will authenticate via Kerberos
-      omClient = new OzoneManagerProtocolClientSideTranslatorPB(
-          RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-              OmUtils.getOmAddress(conf), ugi, conf,
-              NetUtils.getDefaultSocketFactory(conf),
-              CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5));
-
-      // Assert if auth was successful via Kerberos
-      assertFalse(logs.getOutput().contains(
-          "Auth successful for " + username + " (auth:KERBEROS)"));
-
-      // Case 1: Test successful delegation token.
-      Token<OzoneTokenIdentifier> token = omClient
-          .getDelegationToken(new Text("om"));
-
-      // Case 2: Test successful token renewal.
-      long renewalTime = omClient.renewDelegationToken(token);
-      assertTrue(renewalTime > 0);
-
-      // Check if token is of right kind and renewer is running om instance
-      Assert.assertEquals(token.getKind().toString(), "OzoneToken");
-      Assert.assertEquals(token.getService().toString(),
-          OmUtils.getOmRpcAddress(conf));
-      omClient.close();
-
-      // Create a remote ugi and set its authentication method to Token
-      UserGroupInformation testUser = UserGroupInformation
-          .createRemoteUser(TEST_USER);
-      testUser.addToken(token);
-      testUser.setAuthenticationMethod(AuthMethod.TOKEN);
-      UserGroupInformation.setLoginUser(testUser);
-
-      // Get Om client, this time authentication should happen via Token
-      testUser.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          omClient = new OzoneManagerProtocolClientSideTranslatorPB(
-              RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-                  OmUtils.getOmAddress(conf), testUser, conf,
-                  NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT),
-              RandomStringUtils.randomAscii(5));
-          return null;
-        }
-      });
-
-      // Case 3: Test Client can authenticate using token.
-      assertFalse(logs.getOutput().contains(
-          "Auth successful for " + username + " (auth:TOKEN)"));
-      OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND,
-          () -> omClient.deleteVolume("vol1"));
-      assertTrue(logs.getOutput().contains("Auth successful for "
-          + username + " (auth:TOKEN)"));
-
-      // Case 4: Test failure of token renewal.
-      // Call to renewDelegationToken will fail but it will confirm that
-      // initial connection via DT succeeded
-      omLogs.clearOutput();
-
-      LambdaTestUtils.intercept(OMException.class, "INVALID_AUTH_METHOD",
-          () -> {
-            try {
-              omClient.renewDelegationToken(token);
-            } catch (OMException ex) {
-              assertTrue(ex.getResult().equals(INVALID_AUTH_METHOD));
-              throw ex;
-            }
-          });
-      assertTrue(logs.getOutput().contains(
-          "Auth successful for " + username + " (auth:TOKEN)"));
-      omLogs.clearOutput();
-      //testUser.setAuthenticationMethod(AuthMethod.KERBEROS);
-      UserGroupInformation.setLoginUser(ugi);
-      omClient = new OzoneManagerProtocolClientSideTranslatorPB(
-          RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-              OmUtils.getOmAddress(conf), ugi, conf,
-              NetUtils.getDefaultSocketFactory(conf),
-              Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5));
-
-      // Case 5: Test success of token cancellation.
-      omClient.cancelDelegationToken(token);
-      omClient.close();
-
-      // Wait for client to timeout
-      Thread.sleep(CLIENT_TIMEOUT);
-
-      assertFalse(logs.getOutput().contains("Auth failed for"));
-
-      // Case 6: Test failure of token cancellation.
-      // Get Om client, this time authentication using Token will fail as
-      // token is not in cache anymore.
-      omClient = new OzoneManagerProtocolClientSideTranslatorPB(
-          RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-              OmUtils.getOmAddress(conf), testUser, conf,
-              NetUtils.getDefaultSocketFactory(conf),
-              Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5));
-      LambdaTestUtils.intercept(OMException.class, "Cancel delegation " +
-              "token failed",
-          () -> {
-            try {
-              omClient.cancelDelegationToken(token);
-            } catch (OMException ex) {
-              assertTrue(ex.getResult().equals(TOKEN_ERROR_OTHER));
-              throw ex;
-            }
-          });
-
-      assertTrue(logs.getOutput().contains("Auth failed for"));
-    } finally {
-      om.stop();
-      om.join();
-    }
-  }
-
-  private void generateKeyPair(OzoneConfiguration config) throws Exception {
-    HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(conf);
-    keyPair = keyGenerator.generateKey();
-    KeyCodec pemWriter = new KeyCodec(new SecurityConfig(config), COMPONENT);
-    pemWriter.writeKey(keyPair, true);
-  }
-
-  /**
-   * Tests delegation token renewal.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testDelegationTokenRenewal() throws Exception {
-    GenericTestUtils
-        .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO);
-    LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger());
-
-    // Setup secure OM for start.
-    OzoneConfiguration newConf = new OzoneConfiguration(conf);
-    newConf.setLong(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY, 500);
-    setupOm(newConf);
-    long omVersion =
-        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    OzoneManager.setTestSecureOmFlag(true);
-    // Start OM
-
-    try {
-      om.setCertClient(new CertificateClientTestImpl(conf));
-      om.start();
-
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
-      // Get first OM client which will authenticate via Kerberos
-      omClient = new OzoneManagerProtocolClientSideTranslatorPB(RPC.getProxy(
-          OzoneManagerProtocolPB.class, omVersion, OmUtils.getOmAddress(conf),
-          ugi, conf, NetUtils.getDefaultSocketFactory(conf),
-          CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5));
-
-      // Since client is already connected get a delegation token
-      Token<OzoneTokenIdentifier> token = omClient.getDelegationToken(
-          new Text("om"));
-
-      // Check if token is of right kind and renewer is running om instance
-      Assert.assertEquals(token.getKind().toString(), "OzoneToken");
-      Assert.assertEquals(token.getService().toString(), OmUtils
-          .getOmRpcAddress(conf));
-
-      // Renew delegation token
-      long expiryTime = omClient.renewDelegationToken(token);
-      assertTrue(expiryTime > 0);
-      omLogs.clearOutput();
-
-      // Test failure of delegation renewal
-      // 1. When token maxExpiryTime exceeds
-      Thread.sleep(500);
-      LambdaTestUtils.intercept(OMException.class,
-          "TOKEN_EXPIRED",
-          () -> {
-            try {
-              omClient.renewDelegationToken(token);
-            } catch (OMException ex) {
-              assertTrue(ex.getResult().equals(TOKEN_EXPIRED));
-              throw ex;
-            }
-          });
-
-      omLogs.clearOutput();
-
-      // 2. When renewer doesn't match (implicitly covers when renewer is
-      // null or empty )
-      Token token2 = omClient.getDelegationToken(new Text("randomService"));
-      LambdaTestUtils.intercept(OMException.class,
-          "Delegation token renewal failed",
-          () -> omClient.renewDelegationToken(token2));
-      assertTrue(omLogs.getOutput().contains(" with non-matching " +
-          "renewer randomService"));
-      omLogs.clearOutput();
-
-      // 3. Test tampered token
-      OzoneTokenIdentifier tokenId = OzoneTokenIdentifier.readProtoBuf(
-          token.getIdentifier());
-      tokenId.setRenewer(new Text("om"));
-      tokenId.setMaxDate(System.currentTimeMillis() * 2);
-      Token<OzoneTokenIdentifier> tamperedToken = new Token<>(
-          tokenId.getBytes(), token2.getPassword(), token2.getKind(),
-          token2.getService());
-      LambdaTestUtils.intercept(OMException.class,
-          "Delegation token renewal failed",
-          () -> omClient.renewDelegationToken(tamperedToken));
-      assertTrue(omLogs.getOutput().contains("can't be found in " +
-          "cache"));
-      omLogs.clearOutput();
-
-    } finally {
-      om.stop();
-      om.join();
-    }
-  }
-
-  private void setupOm(OzoneConfiguration config) throws Exception {
-    OMStorage omStore = new OMStorage(config);
-    omStore.setClusterId("testClusterId");
-    omStore.setScmId("testScmId");
-    omStore.setOmCertSerialId(omCertSerialId);
-    // writes the version file properties
-    omStore.initialize();
-    OzoneManager.setTestSecureOmFlag(true);
-    om = OzoneManager.createOm(config);
-  }
-
-  @Test
-  public void testGetS3Secret() throws Exception {
-
-    // Setup secure OM for start
-    setupOm(conf);
-    long omVersion =
-        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    try {
-      // Start OM
-      om.setCertClient(new CertificateClientTestImpl(conf));
-      om.start();
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      String username = ugi.getUserName();
-
-      // Get first OM client which will authenticate via Kerberos
-      omClient = new OzoneManagerProtocolClientSideTranslatorPB(
-          RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-              OmUtils.getOmAddress(conf), ugi, conf,
-              NetUtils.getDefaultSocketFactory(conf),
-              CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5));
-
-      //Creates a secret since it does not exist
-      S3SecretValue firstAttempt = omClient
-          .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
-
-      //Fetches the secret from db since it was created in previous step
-      S3SecretValue secondAttempt = omClient
-          .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
-
-      //secret fetched on both attempts must be same
-      assertTrue(firstAttempt.getAwsSecret()
-          .equals(secondAttempt.getAwsSecret()));
-
-      //access key fetched on both attempts must be same
-      assertTrue(firstAttempt.getAwsAccessKey()
-          .equals(secondAttempt.getAwsAccessKey()));
-
-
-      try {
-        omClient.getS3Secret("HADOOP/JOHNDOE");
-        fail("testGetS3Secret failed");
-      } catch (IOException ex) {
-        GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex);
-      }
-    } finally {
-      if(om != null){
-        om.stop();
-      }
-    }
-  }
-
-  /**
-   * Tests functionality to init secure OM when it is already initialized.
-   */
-  @Test
-  public void testSecureOmReInit() throws Exception {
-    LogCapturer omLogs =
-        LogCapturer.captureLogs(OzoneManager.getLogger());
-    omLogs.clearOutput();
-
-    /**
-     * As all these processes run inside the same JVM, there are issues around
-     * the Hadoop UGI if different processes run with different principals.
-     * In this test, the OM has to contact the SCM to download certs. SCM runs
-     * as scm/host@REALM, but the OM logs in as om/host@REALM, and then the test
-     * fails, and the OM is unable to contact the SCM due to kerberos login
-     * issues. To work around that, have the OM run as the same principal as the
-     * SCM, and then the test passes.
-     *
-     * TODO: Need to look into this further to see if there is a better way to
-     *       address this problem.
-     */
-    String realm = miniKdc.getRealm();
-    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
-        "scm/" + host + "@" + realm);
-    omKeyTab = new File(workDir, "scm.keytab");
-    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
-        omKeyTab.getAbsolutePath());
-
-    initSCM();
-    try {
-      scm = HddsTestUtils.getScm(conf);
-      scm.start();
-      conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false);
-      OMStorage omStore = new OMStorage(conf);
-      initializeOmStorage(omStore);
-      OzoneManager.setTestSecureOmFlag(true);
-      om = OzoneManager.createOm(conf);
-
-      assertNull(om.getCertificateClient());
-      assertFalse(omLogs.getOutput().contains("Init response: GETCERT"));
-      assertFalse(omLogs.getOutput().contains("Successfully stored " +
-          "SCM signed certificate"));
-
-      conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-      OzoneManager.omInit(conf);
-      om.stop();
-      om = OzoneManager.createOm(conf);
-
-      Assert.assertNotNull(om.getCertificateClient());
-      Assert.assertNotNull(om.getCertificateClient().getPublicKey());
-      Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
-      Assert.assertNotNull(om.getCertificateClient().getCertificate());
-      assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
-      assertTrue(omLogs.getOutput().contains("Successfully stored " +
-          "SCM signed certificate"));
-      X509Certificate certificate = om.getCertificateClient().getCertificate();
-      validateCertificate(certificate);
-
-    } finally {
-      if (scm != null) {
-        scm.stop();
-      }
-    }
-
-  }
-
-  /**
-   * Test functionality to get SCM signed certificate for OM.
-   */
-  @Test
-  public void testSecureOmInitSuccess() throws Exception {
-    LogCapturer omLogs =
-        LogCapturer.captureLogs(OzoneManager.getLogger());
-    omLogs.clearOutput();
-    initSCM();
-    try {
-      scm = HddsTestUtils.getScm(conf);
-      scm.start();
-
-      OMStorage omStore = new OMStorage(conf);
-      initializeOmStorage(omStore);
-      OzoneManager.setTestSecureOmFlag(true);
-      om = OzoneManager.createOm(conf);
-
-      Assert.assertNotNull(om.getCertificateClient());
-      Assert.assertNotNull(om.getCertificateClient().getPublicKey());
-      Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
-      Assert.assertNotNull(om.getCertificateClient().getCertificate());
-      assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
-      assertTrue(omLogs.getOutput().contains("Successfully stored " +
-          "SCM signed certificate"));
-      X509Certificate certificate = om.getCertificateClient().getCertificate();
-      validateCertificate(certificate);
-      String pemEncodedCACert =
-          scm.getSecurityProtocolServer().getCACertificate();
-      X509Certificate caCert = CertificateCodec.getX509Cert(pemEncodedCACert);
-      X509Certificate caCertStored = om.getCertificateClient()
-          .getCertificate(caCert.getSerialNumber().toString());
-      assertEquals(caCert, caCertStored);
-    } finally {
-      if (scm != null) {
-        scm.stop();
-      }
-      if (om != null) {
-        om.stop();
-      }
-
-    }
-
-  }
-
-  public void validateCertificate(X509Certificate cert) throws Exception {
-
-    // Assert that we indeed have a self signed certificate.
-    X500Name x500Issuer = new JcaX509CertificateHolder(cert).getIssuer();
-    RDN cn = x500Issuer.getRDNs(BCStyle.CN)[0];
-    String hostName = InetAddress.getLocalHost().getHostName();
-    String scmUser = "scm@" + hostName;
-    Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
-
-    // Subject name should be om login user in real world but in this test
-    // UGI has scm user context.
-    Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
-
-    LocalDate today = LocalDateTime.now().toLocalDate();
-    Date invalidDate;
-
-    // Make sure the end date is honored.
-    invalidDate = java.sql.Date.valueOf(today.plus(1, ChronoUnit.DAYS));
-    assertTrue(cert.getNotAfter().after(invalidDate));
-
-    invalidDate = java.sql.Date.valueOf(today.plus(400, ChronoUnit.DAYS));
-    assertTrue(cert.getNotAfter().before(invalidDate));
-
-    assertTrue(cert.getSubjectDN().toString().contains(scmId));
-    assertTrue(cert.getSubjectDN().toString().contains(clusterId));
-
-    assertTrue(cert.getIssuerDN().toString().contains(scmUser));
-    assertTrue(cert.getIssuerDN().toString().contains(scmId));
-    assertTrue(cert.getIssuerDN().toString().contains(clusterId));
-
-    // Verify that certificate matches the public key.
-    String encodedKey1 = cert.getPublicKey().toString();
-    String encodedKey2 = om.getCertificateClient().getPublicKey().toString();
-    Assert.assertEquals(encodedKey1, encodedKey2);
-  }
-
-  private void initializeOmStorage(OMStorage omStorage) throws IOException {
-    if (omStorage.getState() == Storage.StorageState.INITIALIZED) {
-      return;
-    }
-    omStorage.setClusterId(clusterId);
-    omStorage.setScmId(scmId);
-    omStorage.setOmId(omId);
-    // Initialize ozone certificate client if security is enabled.
-    if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-      OzoneManager.initializeSecurity(conf, omStorage);
-    }
-    omStorage.initialize();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
deleted file mode 100644
index ba072f8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ /dev/null
@@ -1,656 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
-    .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
-import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.net.DNSToSwitchMapping;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
-import org.junit.Assert;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Test class that exercises the StorageContainerManager.
- */
-public class TestStorageContainerManager {
-  private static XceiverClientManager xceiverClientManager;
-  private static final Logger LOG = LoggerFactory.getLogger(
-            TestStorageContainerManager.class);
-
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
-
-  @BeforeClass
-  public static void setup() throws IOException {
-    xceiverClientManager = new XceiverClientManager(new OzoneConfiguration());
-  }
-
-  @AfterClass
-  public static void cleanup() {
-    if (xceiverClientManager != null) {
-      xceiverClientManager.close();
-    }
-  }
-
-  @Test
-  public void testRpcPermission() throws Exception {
-    // Test with default configuration
-    OzoneConfiguration defaultConf = new OzoneConfiguration();
-    testRpcPermissionWithConf(defaultConf, "unknownUser", true);
-
-    // Test with ozone.administrators defined in configuration
-    OzoneConfiguration ozoneConf = new OzoneConfiguration();
-    ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS,
-        "adminUser1, adminUser2");
-    // Non-admin user will get permission denied.
-    testRpcPermissionWithConf(ozoneConf, "unknownUser", true);
-    // Admin user will pass the permission check.
-    testRpcPermissionWithConf(ozoneConf, "adminUser2", false);
-  }
-
-  private void testRpcPermissionWithConf(
-      OzoneConfiguration ozoneConf, String fakeRemoteUsername,
-      boolean expectPermissionDenied) throws Exception {
-    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build();
-    cluster.waitForClusterToBeReady();
-    try {
-
-      SCMClientProtocolServer mockClientServer = Mockito.spy(
-          cluster.getStorageContainerManager().getClientProtocolServer());
-      when(mockClientServer.getRpcRemoteUsername())
-          .thenReturn(fakeRemoteUsername);
-
-      try {
-        mockClientServer.deleteContainer(
-            ContainerTestHelper.getTestContainerID());
-        fail("Operation should fail, expecting an IOException here.");
-      } catch (Exception e) {
-        if (expectPermissionDenied) {
-          verifyPermissionDeniedException(e, fakeRemoteUsername);
-        } else {
-          // If passes permission check, it should fail with
-          // container not exist exception.
-          Assert.assertTrue(e.getMessage()
-              .contains("container doesn't exist"));
-        }
-      }
-
-      try {
-        ContainerWithPipeline container2 = mockClientServer
-            .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE,  "OZONE");
-        if (expectPermissionDenied) {
-          fail("Operation should fail, expecting an IOException here.");
-        } else {
-          Assert.assertEquals(1, container2.getPipeline().getNodes().size());
-        }
-      } catch (Exception e) {
-        verifyPermissionDeniedException(e, fakeRemoteUsername);
-      }
-
-      try {
-        ContainerWithPipeline container3 = mockClientServer
-            .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, "OZONE");
-        if (expectPermissionDenied) {
-          fail("Operation should fail, expecting an IOException here.");
-        } else {
-          Assert.assertEquals(1, container3.getPipeline().getNodes().size());
-        }
-      } catch (Exception e) {
-        verifyPermissionDeniedException(e, fakeRemoteUsername);
-      }
-
-      try {
-        mockClientServer.getContainer(
-            ContainerTestHelper.getTestContainerID());
-        fail("Operation should fail, expecting an IOException here.");
-      } catch (Exception e) {
-        if (expectPermissionDenied) {
-          verifyPermissionDeniedException(e, fakeRemoteUsername);
-        } else {
-          // If passes permission check, it should fail with
-          // key not exist exception.
-          Assert.assertTrue(e instanceof ContainerNotFoundException);
-        }
-      }
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  private void verifyPermissionDeniedException(Exception e, String userName) {
-    String expectedErrorMessage = "Access denied for user "
-        + userName + ". " + "Superuser privilege is required.";
-    Assert.assertTrue(e instanceof IOException);
-    Assert.assertEquals(expectedErrorMessage, e.getMessage());
-  }
-
-  @Test
-  public void testBlockDeletionTransactions() throws Exception {
-    int numKeys = 5;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        3000,
-        TimeUnit.MILLISECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        1, TimeUnit.SECONDS);
-    // Reset container provision size, otherwise only one container
-    // is created by default.
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-        numKeys);
-
-    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(100)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    try {
-      DeletedBlockLog delLog = cluster.getStorageContainerManager()
-          .getScmBlockManager().getDeletedBlockLog();
-      Assert.assertEquals(0, delLog.getNumOfValidTransactions());
-
-      // Create {numKeys} random names keys.
-      TestStorageContainerManagerHelper helper =
-          new TestStorageContainerManagerHelper(cluster, conf);
-      Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
-      // Wait for container report
-      Thread.sleep(1000);
-      for (OmKeyInfo keyInfo : keyLocations.values()) {
-        OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
-            cluster.getStorageContainerManager());
-      }
-
-      Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
-          keyLocations, helper);
-      Set<Long> containerIDs = containerBlocks.keySet();
-
-      // Verify a few TX gets created in the TX log.
-      Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
-
-      // Once TXs are written into the log, SCM starts to fetch TX
-      // entries from the log and schedule block deletions in HB interval,
-      // after sometime, all the TX should be proceed and by then
-      // the number of containerBlocks of all known containers will be
-      // empty again.
-      GenericTestUtils.waitFor(() -> {
-        try {
-          return delLog.getNumOfValidTransactions() == 0;
-        } catch (IOException e) {
-          return false;
-        }
-      }, 1000, 10000);
-      Assert.assertTrue(helper.getAllBlocks(containerIDs).isEmpty());
-
-      // Continue the work, add some TXs that with known container names,
-      // but unknown block IDs.
-      for (Long containerID : containerBlocks.keySet()) {
-        // Add 2 TXs per container.
-        delLog.addTransaction(containerID,
-            Collections.singletonList(RandomUtils.nextLong()));
-        delLog.addTransaction(containerID,
-            Collections.singletonList(RandomUtils.nextLong()));
-      }
-
-      // Verify a few TX gets created in the TX log.
-      Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
-
-      // These blocks cannot be found in the container, skip deleting them
-      // eventually these TX will success.
-      GenericTestUtils.waitFor(() -> {
-        try {
-          return delLog.getFailedTransactions().size() == 0;
-        } catch (IOException e) {
-          return false;
-        }
-      }, 1000, 10000);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBlockDeletingThrottling() throws Exception {
-    int numKeys = 15;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        100, TimeUnit.MILLISECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-        numKeys);
-
-    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(3000)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    try {
-      DeletedBlockLog delLog = cluster.getStorageContainerManager()
-          .getScmBlockManager().getDeletedBlockLog();
-      Assert.assertEquals(0, delLog.getNumOfValidTransactions());
-
-      int limitSize = 1;
-      // Reset limit value to 1, so that we only allow one TX is dealt per
-      // datanode.
-      SCMBlockDeletingService delService = cluster.getStorageContainerManager()
-          .getScmBlockManager().getSCMBlockDeletingService();
-      delService.setBlockDeleteTXNum(limitSize);
-
-      // Create {numKeys} random names keys.
-      TestStorageContainerManagerHelper helper =
-          new TestStorageContainerManagerHelper(cluster, conf);
-      Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
-      // Wait for container report
-      Thread.sleep(5000);
-      for (OmKeyInfo keyInfo : keyLocations.values()) {
-        OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
-            cluster.getStorageContainerManager());
-      }
-
-      createDeleteTXLog(delLog, keyLocations, helper);
-      // Verify a few TX gets created in the TX log.
-      Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
-
-      // Verify the size in delete commands is expected.
-      GenericTestUtils.waitFor(() -> {
-        NodeManager nodeManager = cluster.getStorageContainerManager()
-            .getScmNodeManager();
-        List<SCMCommand> commands = nodeManager.processHeartbeat(
-            nodeManager.getNodes(NodeState.HEALTHY).get(0));
-
-        if (commands != null) {
-          for (SCMCommand cmd : commands) {
-            if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
-              List<DeletedBlocksTransaction> deletedTXs =
-                  ((DeleteBlocksCommand) cmd).blocksTobeDeleted();
-              return deletedTXs != null && deletedTXs.size() == limitSize;
-            }
-          }
-        }
-        return false;
-      }, 500, 10000);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  private Map<Long, List<Long>> createDeleteTXLog(DeletedBlockLog delLog,
-      Map<String, OmKeyInfo> keyLocations,
-      TestStorageContainerManagerHelper helper) throws IOException {
-    // These keys will be written into a bunch of containers,
-    // gets a set of container names, verify container containerBlocks
-    // on datanodes.
-    Set<Long> containerNames = new HashSet<>();
-    for (Map.Entry<String, OmKeyInfo> entry : keyLocations.entrySet()) {
-      entry.getValue().getLatestVersionLocations().getLocationList()
-          .forEach(loc -> containerNames.add(loc.getContainerID()));
-    }
-
-    // Total number of containerBlocks of these containers should be equal to
-    // total number of containerBlocks via creation call.
-    int totalCreatedBlocks = 0;
-    for (OmKeyInfo info : keyLocations.values()) {
-      totalCreatedBlocks += info.getKeyLocationVersions().size();
-    }
-    Assert.assertTrue(totalCreatedBlocks > 0);
-    Assert.assertEquals(totalCreatedBlocks,
-        helper.getAllBlocks(containerNames).size());
-
-    // Create a deletion TX for each key.
-    Map<Long, List<Long>> containerBlocks = Maps.newHashMap();
-    for (OmKeyInfo info : keyLocations.values()) {
-      List<OmKeyLocationInfo> list =
-          info.getLatestVersionLocations().getLocationList();
-      list.forEach(location -> {
-        if (containerBlocks.containsKey(location.getContainerID())) {
-          containerBlocks.get(location.getContainerID())
-              .add(location.getBlockID().getLocalID());
-        } else {
-          List<Long> blks = Lists.newArrayList();
-          blks.add(location.getBlockID().getLocalID());
-          containerBlocks.put(location.getContainerID(), blks);
-        }
-      });
-    }
-    for (Map.Entry<Long, List<Long>> tx : containerBlocks.entrySet()) {
-      delLog.addTransaction(tx.getKey(), tx.getValue());
-    }
-
-    return containerBlocks;
-  }
-
-  @Test
-  public void testSCMInitialization() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final String path = GenericTestUtils.getTempPath(
-        UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-
-    // This will initialize SCM
-    StorageContainerManager.scmInit(conf, "testClusterId");
-
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-    Assert.assertEquals("testClusterId", scmStore.getClusterID());
-    StorageContainerManager.scmInit(conf, "testClusterIdNew");
-    Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-    Assert.assertEquals("testClusterId", scmStore.getClusterID());
-  }
-
-  @Test
-  public void testSCMReinitialization() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final String path = GenericTestUtils.getTempPath(
-        UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    //This will set the cluster id in the version file
-    MiniOzoneCluster cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    try {
-      // This will initialize SCM
-      StorageContainerManager.scmInit(conf, "testClusterId");
-      SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-      Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-      Assert.assertNotEquals("testClusterId", scmStore.getClusterID());
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testSCMInitializationFailure()
-      throws IOException, AuthenticationException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    exception.expect(SCMException.class);
-    exception.expectMessage(
-        "SCM not initialized due to storage config failure");
-    StorageContainerManager.createSCM(conf);
-  }
-
-  @Test
-  public void testScmInfo() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    try {
-      Path scmPath = Paths.get(path, "scm-meta");
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-      conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-      SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-      StorageContainerManager scm = StorageContainerManager.createSCM(conf);
-      //Reads the SCM Info from SCM instance
-      ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-      Assert.assertEquals(clusterId, scmInfo.getClusterId());
-      Assert.assertEquals(scmId, scmInfo.getScmId());
-
-      String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion();
-      String actualVersion = scm.getSoftwareVersion();
-      Assert.assertEquals(expectedVersion, actualVersion);
-    } finally {
-      FileUtils.deleteQuietly(new File(path));
-    }
-  }
-
-  /**
-   * Test datanode heartbeat well processed with a 4-layer network topology.
-   */
-  @Test(timeout = 60000)
-  public void testScmProcessDatanodeHeartbeat() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String scmId = UUID.randomUUID().toString();
-    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        StaticMapping.class, DNSToSwitchMapping.class);
-    StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
-        Collections.singleton(HddsUtils.getHostName(conf))).get(0),
-        "/rack1");
-
-    final int datanodeNum = 3;
-    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(datanodeNum)
-        .setScmId(scmId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-
-    try {
-      // first sleep 10s
-      Thread.sleep(10000);
-      // verify datanode heartbeats are well processed
-      long heartbeatCheckerIntervalMs =
-          MiniOzoneCluster.Builder.DEFAULT_HB_INTERVAL_MS;
-      long start = Time.monotonicNow();
-      Thread.sleep(heartbeatCheckerIntervalMs * 2);
-
-      List<DatanodeDetails> allNodes = scm.getScmNodeManager().getAllNodes();
-      Assert.assertEquals(datanodeNum, allNodes.size());
-      for (DatanodeDetails node : allNodes) {
-        DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
-            .getNodeByUuid(node.getUuidString());
-        Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
-        Assert.assertEquals(datanodeInfo.getUuidString(),
-            datanodeInfo.getNetworkName());
-        Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
-      }
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testCloseContainerCommandOnRestart() throws Exception {
-    int numKeys = 15;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        100, TimeUnit.MILLISECONDS);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-        numKeys);
-
-    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(3000)
-        .setTrace(false)
-        .setNumDatanodes(1)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    try {
-      TestStorageContainerManagerHelper helper =
-          new TestStorageContainerManagerHelper(cluster, conf);
-
-      helper.createKeys(10, 4096);
-      Thread.sleep(5000);
-
-      StorageContainerManager scm = cluster.getStorageContainerManager();
-      List<ContainerInfo> containers = cluster.getStorageContainerManager()
-          .getContainerManager().getContainers();
-      Assert.assertNotNull(containers);
-      ContainerInfo selectedContainer = containers.iterator().next();
-
-      // Stop processing HB
-      scm.getDatanodeProtocolServer().stop();
-
-      scm.getContainerManager().updateContainerState(selectedContainer
-          .containerID(), HddsProtos.LifeCycleEvent.FINALIZE);
-      cluster.restartStorageContainerManager(true);
-      scm = cluster.getStorageContainerManager();
-      EventPublisher publisher = mock(EventPublisher.class);
-      ReplicationManager replicationManager = scm.getReplicationManager();
-      Field f = ReplicationManager.class.getDeclaredField("eventPublisher");
-      f.setAccessible(true);
-      Field modifiersField = Field.class.getDeclaredField("modifiers");
-      modifiersField.setAccessible(true);
-      modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
-      f.set(replicationManager, publisher);
-      scm.getReplicationManager().start();
-      Thread.sleep(2000);
-
-      UUID dnUuid = cluster.getHddsDatanodes().iterator().next()
-          .getDatanodeDetails().getUuid();
-
-      CloseContainerCommand closeContainerCommand =
-          new CloseContainerCommand(selectedContainer.getContainerID(),
-              selectedContainer.getPipelineID(), false);
-
-      CommandForDatanode commandForDatanode = new CommandForDatanode(
-          dnUuid, closeContainerCommand);
-
-      verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new
-          CloseContainerCommandMatcher(dnUuid, commandForDatanode)));
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  @SuppressWarnings("visibilitymodifier")
-  static class CloseContainerCommandMatcher
-      extends ArgumentMatcher<CommandForDatanode> {
-
-    private final CommandForDatanode cmd;
-    private final UUID uuid;
-
-    CloseContainerCommandMatcher(UUID uuid, CommandForDatanode cmd) {
-      this.uuid = uuid;
-      this.cmd = cmd;
-    }
-
-    @Override
-    public boolean matches(Object argument) {
-      CommandForDatanode cmdRight = (CommandForDatanode) argument;
-      CloseContainerCommand left = (CloseContainerCommand) cmd.getCommand();
-      CloseContainerCommand right =
-          (CloseContainerCommand) cmdRight.getCommand();
-      return cmdRight.getDatanodeId().equals(uuid)
-          && left.getContainerID() == right.getContainerID()
-          && left.getPipelineID().equals(right.getPipelineID())
-          && left.getType() == right.getType()
-          && left.getProto().equals(right.getProto());
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
deleted file mode 100644
index 9beddd4..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.RandomStringUtils;
-
-/**
- * A helper class used by {@link TestStorageContainerManager} to generate
- * some keys and helps to verify containers and blocks locations.
- */
-public class TestStorageContainerManagerHelper {
-
-  private final MiniOzoneCluster cluster;
-  private final Configuration conf;
-
-  public TestStorageContainerManagerHelper(MiniOzoneCluster cluster,
-      Configuration conf) throws IOException {
-    this.cluster = cluster;
-    this.conf = conf;
-  }
-
-  public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
-      throws Exception {
-    Map<String, OmKeyInfo> keyLocationMap = Maps.newHashMap();
-
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-    // Write 20 keys in bucketName.
-    Set<String> keyNames = Sets.newHashSet();
-    for (int i = 0; i < numOfKeys; i++) {
-      String keyName = RandomStringUtils.randomAlphabetic(5) + i;
-      keyNames.add(keyName);
-
-      TestDataUtil
-          .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5));
-    }
-
-    for (String key : keyNames) {
-      OmKeyArgs arg = new OmKeyArgs.Builder()
-          .setVolumeName(bucket.getVolumeName())
-          .setBucketName(bucket.getName())
-          .setKeyName(key)
-          .setRefreshPipeline(true)
-          .build();
-      OmKeyInfo location = cluster.getOzoneManager()
-          .lookupKey(arg);
-      keyLocationMap.put(key, location);
-    }
-    return keyLocationMap;
-  }
-
-  public List<String> getPendingDeletionBlocks(Long containerID)
-      throws IOException {
-    List<String> pendingDeletionBlocks = Lists.newArrayList();
-    ReferenceCountedDB meta = getContainerMetadata(containerID);
-    KeyPrefixFilter filter =
-        new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-    List<Map.Entry<byte[], byte[]>> kvs = meta.getStore()
-        .getRangeKVs(null, Integer.MAX_VALUE, filter);
-    kvs.forEach(entry -> {
-      String key = DFSUtil.bytes2String(entry.getKey());
-      pendingDeletionBlocks
-          .add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
-    });
-    meta.close();
-    return pendingDeletionBlocks;
-  }
-
-  public List<Long> getAllBlocks(Set<Long> containerIDs)
-      throws IOException {
-    List<Long> allBlocks = Lists.newArrayList();
-    for (Long containerID : containerIDs) {
-      allBlocks.addAll(getAllBlocks(containerID));
-    }
-    return allBlocks;
-  }
-
-  public List<Long> getAllBlocks(Long containeID) throws IOException {
-    List<Long> allBlocks = Lists.newArrayList();
-    ReferenceCountedDB meta = getContainerMetadata(containeID);
-    List<Map.Entry<byte[], byte[]>> kvs =
-        meta.getStore().getRangeKVs(null, Integer.MAX_VALUE,
-            MetadataKeyFilters.getNormalKeyFilter());
-    kvs.forEach(entry -> {
-      allBlocks.add(Longs.fromByteArray(entry.getKey()));
-    });
-    meta.close();
-    return allBlocks;
-  }
-
-  private ReferenceCountedDB getContainerMetadata(Long containerID)
-      throws IOException {
-    ContainerWithPipeline containerWithPipeline = cluster
-        .getStorageContainerManager().getClientProtocolServer()
-        .getContainerWithPipeline(containerID);
-
-    DatanodeDetails dn =
-        containerWithPipeline.getPipeline().getFirstNode();
-    OzoneContainer containerServer =
-        getContainerServerByDatanodeUuid(dn.getUuidString());
-    KeyValueContainerData containerData =
-        (KeyValueContainerData) containerServer.getContainerSet()
-        .getContainer(containerID).getContainerData();
-    return BlockUtils.getDB(containerData, conf);
-  }
-
-  private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
-      throws IOException {
-    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
-      if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) {
-        return dn.getDatanodeStateMachine().getContainer();
-      }
-    }
-    throw new IOException("Unable to get the ozone container "
-        + "for given datanode ID " + dnUUID);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java
deleted file mode 100644
index 41b8e56..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.chaos;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.RandomUtils;
-
-/**
- * This class is used to find out if a certain event is true.
- * Every event is assigned a propbability and the isTrue function returns true
- * when the probability has been met.
- */
-final public class TestProbability {
-  private int pct;
-
-  private TestProbability(int pct) {
-    Preconditions.checkArgument(pct <= 100 && pct > 0);
-    this.pct = pct;
-  }
-
-  public boolean isTrue() {
-    return (RandomUtils.nextInt(0, 100) <= pct);
-  }
-
-  public static TestProbability valueOf(int pct) {
-    return new TestProbability(pct);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
deleted file mode 100644
index d05093f..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
-import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
-
-import java.io.InputStream;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.cert.CertStore;
-import java.security.cert.X509Certificate;
-import java.time.LocalDate;
-import java.time.temporal.ChronoUnit;
-import java.util.List;
-
-/**
- * Test implementation for CertificateClient. To be used only for test
- * purposes.
- */
-
-public class CertificateClientTestImpl implements CertificateClient {
-
-  private final SecurityConfig securityConfig;
-  private final KeyPair keyPair;
-  private final Configuration config;
-  private final X509Certificate x509Certificate;
-
-  public CertificateClientTestImpl(OzoneConfiguration conf) throws Exception {
-    securityConfig = new SecurityConfig(conf);
-    HDDSKeyGenerator keyGen =
-        new HDDSKeyGenerator(securityConfig.getConfiguration());
-    keyPair = keyGen.generateKey();
-    config = conf;
-    SelfSignedCertificate.Builder builder =
-        SelfSignedCertificate.newBuilder()
-            .setBeginDate(LocalDate.now())
-            .setEndDate(LocalDate.now().plus(365, ChronoUnit.DAYS))
-            .setClusterID("cluster1")
-            .setKey(keyPair)
-            .setSubject("localhost")
-            .setConfiguration(config)
-            .setScmID("TestScmId1")
-            .makeCA();
-    X509CertificateHolder certificateHolder = null;
-    certificateHolder = builder.build();
-    x509Certificate = new JcaX509CertificateConverter().getCertificate(
-        certificateHolder);
-  }
-
-  @Override
-  public PrivateKey getPrivateKey() {
-    return keyPair.getPrivate();
-  }
-
-  @Override
-  public PublicKey getPublicKey() {
-    return keyPair.getPublic();
-  }
-
-  /**
-   * Returns the certificate  of the specified component if it exists on the
-   * local system.
-   *
-   * @return certificate or Null if there is no data.
-   */
-  @Override
-  public X509Certificate getCertificate(String certSerialId)
-      throws CertificateException {
-    return x509Certificate;
-  }
-
-  @Override
-  public X509Certificate getCertificate() {
-    return x509Certificate;
-  }
-
-  @Override
-  public X509Certificate getCACertificate() {
-    return x509Certificate;
-  }
-
-  @Override
-  public boolean verifyCertificate(X509Certificate certificate) {
-    return true;
-  }
-
-  @Override
-  public byte[] signDataStream(InputStream stream)
-      throws CertificateException {
-    return new byte[0];
-  }
-
-  @Override
-  public byte[] signData(byte[] data) throws CertificateException {
-    return new byte[0];
-  }
-
-  @Override
-  public boolean verifySignature(InputStream stream, byte[] signature,
-      X509Certificate cert) throws CertificateException {
-    return true;
-  }
-
-  @Override
-  public boolean verifySignature(byte[] data, byte[] signature,
-      X509Certificate cert) throws CertificateException {
-    return true;
-  }
-
-  @Override
-  public CertificateSignRequest.Builder getCSRBuilder() {
-    return new CertificateSignRequest.Builder();
-  }
-
-  @Override
-  public X509Certificate queryCertificate(String query) {
-    return null;
-  }
-
-  @Override
-  public void storeCertificate(String cert, boolean force)
-      throws CertificateException {
-  }
-
-  @Override
-  public void storeCertificate(String cert, boolean force, boolean caCert)
-      throws CertificateException {
-  }
-
-  /**
-   * Stores the trusted chain of certificates for a specific component.
-   *
-   * @param keyStore - Cert Store.
-   * @throws CertificateException - on Error.
-   */
-  @Override
-  public void storeTrustChain(CertStore keyStore) throws CertificateException {
-
-  }
-
-  @Override
-  public void storeTrustChain(List<X509Certificate> certificates)
-      throws CertificateException {
-
-  }
-
-  @Override
-  public InitResponse init() throws CertificateException {
-    return null;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java
deleted file mode 100644
index b1023e8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-/**
- * Ozone Client tests.
- */
-package org.apache.hadoop.ozone.client;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
deleted file mode 100644
index cf570d2..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-
-/**
- * This class tests the 2 way commit in Ratis.
- */
-public class Test2WayCommitInRatis {
-
-  private MiniOzoneCluster cluster;
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private String volumeName;
-  private String bucketName;
-  private int chunkSize;
-  private int flushSize;
-  private int maxFlushSize;
-  private int blockSize;
-  private StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  private void startCluster(OzoneConfiguration conf) throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        1, TimeUnit.SECONDS);
-
-    conf.setQuietMode(false);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(7)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    volumeName = "watchforcommithandlingtest";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  private void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-  @Test
-  public void test2WayCommitForRetryfailure() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-    startCluster(conf);
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    XceiverClientSpi xceiverClient = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        xceiverClient.getPipeline());
-    Pipeline pipeline = xceiverClient.getPipeline();
-    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
-    XceiverClientReply reply = xceiverClient.sendCommandAsync(
-        ContainerTestHelper.getCreateContainerRequest(
-            container1.getContainerInfo().getContainerID(),
-            xceiverClient.getPipeline()));
-    reply.getResponse().get();
-    Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    reply = xceiverClient.sendCommandAsync(ContainerTestHelper
-        .getCloseContainer(pipeline,
-            container1.getContainerInfo().getContainerID()));
-    reply.getResponse().get();
-    xceiverClient.watchForCommit(reply.getLogIndex(), 20000);
-
-    // commitInfo Map will be reduced to 2 here
-    Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
-    clientManager.releaseClient(xceiverClient, false);
-    Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
-    Assert
-        .assertTrue(logCapturer.getOutput().contains("Committed by majority"));
-    logCapturer.stopCapturing();
-    shutdown();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
deleted file mode 100644
index 623b11d..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.
-    HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.
-    HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
-    HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
-    OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests the validity BCSID of a container.
- */
-public class TestBCSID {
-
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static MiniOzoneCluster cluster;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static String volumeName;
-  private static String bucketName;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    String path = GenericTestUtils
-        .getTempPath(TestBCSID.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.setQuietMode(false);
-    cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200)
-            .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    volumeName = "bcsid";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBCSID() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    key.write("ratis".getBytes());
-    key.close();
-
-    // get the name of a valid container.
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
-        setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    List<OmKeyLocationInfo> keyLocationInfos =
-        keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
-    Assert.assertEquals(1, keyLocationInfos.size());
-    OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0);
-
-    long blockCommitSequenceId =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerReport().getBlockCommitSequenceId();
-    Assert.assertTrue(blockCommitSequenceId > 0);
-
-    // make sure the persisted block Id in OM is same as that seen in the
-    // container report to be reported to SCM.
-    Assert.assertEquals(blockCommitSequenceId,
-        omKeyLocationInfo.getBlockCommitSequenceId());
-
-    // verify that on restarting the datanode, it reloads the BCSID correctly.
-    cluster.restartHddsDatanode(0, true);
-    Assert.assertEquals(blockCommitSequenceId,
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerReport().getBlockCommitSequenceId());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
deleted file mode 100644
index 399b977..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
+++ /dev/null
@@ -1,696 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
-import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests BlockOutputStream class.
- */
-public class TestBlockOutputStream {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static int flushSize;
-  private static int maxFlushSize;
-  private static int blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
-    conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(7)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockoutputstream";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBufferCaching() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = 50;
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data less than a chunk size, the data will just sit
-    // in the buffer, with only one buffer being allocated in the buffer pool
-
-    Assert.assertEquals(1, blockOutputStream.getBufferPool().getSize());
-    //Just the writtenDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    // no data will be flushed till now
-    Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
-    Assert.assertEquals(pendingWriteChunkCount,
-        XceiverClientManager.getXceiverClientMetrics()
-            .getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        XceiverClientManager.getXceiverClientMetrics()
-            .getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // commitIndex2FlushedData Map will be empty here
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().isEmpty());
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    // flush is a sync call, all pending operations will complete
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    // we have just written data less than a chunk size, the data will just sit
-    // in the buffer, with only one buffer being allocated in the buffer pool
-
-    Assert.assertEquals(1, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(0,
-        blockOutputStream.getBufferPool().getBuffer(0).position());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    // flush ensures watchForCommit updates the total length acknowledged
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-
-    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 2,
-        metrics.getTotalOpCount());
-
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testFlushChunk() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = flushSize;
-    // write data equal to 2 chunks
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertEquals(pendingWriteChunkCount + 2,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount + 1,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data equal flush Size = 2 chunks, at this time
-    // buffer pool will have 2 buffers allocated worth of chunk size
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
-
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
-    // flush is a sync call, all pending operations will complete
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    // flush ensures watchForCommit updates the total length acknowledged
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 3,
-        metrics.getTotalOpCount());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testMultiChunkWrite() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = chunkSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertEquals(pendingWriteChunkCount + 1,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data equal flush Size > 1 chunk, at this time
-    // buffer pool will have 2 buffers allocated worth of chunk size
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    // since data written is still less than flushLength, flushLength will
-    // still be 0.
-    Assert.assertEquals(0,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
-
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-    Assert.assertEquals(writeChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    // flush ensures watchForCommit updates the total length acknowledged
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 3,
-        metrics.getTotalOpCount());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testMultiChunkWrite2() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = flushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertEquals(pendingWriteChunkCount + 2,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount + 1,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-
-    Assert.assertEquals(3, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(flushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
-
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    Assert.assertEquals(flushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertEquals(0,
-        blockOutputStream.getCommitIndex2flushedDataMap().size());
-
-    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
-    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
-    key.close();
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 5,
-        metrics.getTotalOpCount());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testFullBufferCondition() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6,
-        metrics.getTotalOpCount());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testWriteWithExceedingMaxBufferLimit() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6,
-        metrics.getTotalOpCount());
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8,
-        metrics.getTotalOpCount());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    validateData(keyName, data1);
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
deleted file mode 100644
index 8649837..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ /dev/null
@@ -1,1218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests failure detection and handling in BlockOutputStream Class.
- */
-public class TestBlockOutputStreamWithFailures {
-
-  private static MiniOzoneCluster cluster;
-  private OzoneConfiguration conf = new OzoneConfiguration();
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private int chunkSize;
-  private int flushSize;
-  private int maxFlushSize;
-  private int blockSize;
-  private String volumeName;
-  private String bucketName;
-  private String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "1s");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5, TimeUnit.SECONDS);
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
-    conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7)
-        .setBlockSize(blockSize).setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockoutputstream";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testWatchForCommitWithCloseContainerException()
-      throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 4 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // flush is a sync call, all pending operations will complete
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // Close the containers on the Datanode and write more data
-    ContainerTestHelper.waitForContainerClose(key, cluster);
-    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
-    // once exception is hit
-    key.write(data1);
-
-    // As a part of handling the exception, 4 failed writeChunks  will be
-    // rewritten plus one partial chunk plus two putBlocks for flushSize
-    // and one flush for partial chunk
-    key.flush();
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof ContainerNotOpenException);
-
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // commitInfoMap will remain intact as there is no server failure
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount());
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void testWatchForCommitDatanodeFailure() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes at least putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    // since data written is still less than flushLength, flushLength will
-    // still be 0.
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast flushSize worth of data buffer
-    // where each entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    //  flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    Pipeline pipeline = raftClient.getPipeline();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-
-    // again write data with more than max buffer limit. This will call
-    // watchForCommit again. Since the commit will happen 2 way, the
-    // commitInfoMap will get updated for servers which are alive
-    key.write(data1);
-
-    key.flush();
-    Assert.assertEquals(2, raftClient.getCommitInfoMap().size());
-
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(blockSize, blockOutputStream.getTotalAckDataLength());
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // in total, there are 8 full write chunks + 2 partial chunks written
-    Assert.assertEquals(writeChunkCount + 10,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    // 4 flushes at flushSize boundaries + 2 flush for partial chunks
-    Assert.assertEquals(putBlockCount + 6,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 16, metrics.getTotalOpCount());
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void test2DatanodesFailure() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // acked by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    Pipeline pipeline = raftClient.getPipeline();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
-    // again write data with more than max buffer limit. This will call
-    // watchForCommit again. Since the commit will happen 2 way, the
-    // commitInfoMap will get updated for servers which are alive
-
-    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
-    // once exception is hit
-    key.write(data1);
-
-    // As a part of handling the exception, 4 failed writeChunks  will be
-    // rewritten plus one partial chunk plus two putBlocks for flushSize
-    // and one flush for partial chunk
-    key.flush();
-
-    Throwable ioException = HddsClientUtils.checkForException(
-        blockOutputStream.getIoException());
-    // Since, 2 datanodes went down,
-    // a) if the pipeline gets destroyed quickly it will hit
-    //    GroupMismatchException.
-    // b) will hit close container exception if the container is closed
-    //    but pipeline is still not destroyed.
-    // c) will fail with RaftRetryFailureException if the leader election
-    //    did not finish before the request retry count finishes.
-    Assert.assertTrue(ioException instanceof RaftRetryFailureException
-        || ioException instanceof GroupMismatchException
-        || ioException instanceof ContainerNotOpenException);
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // now close the stream, It will update the ack length after watchForCommit
-
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    key.close();
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testFailureWithPrimeSizedData() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = 167;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            == pendingWriteChunkCount + 1);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            == pendingPutBlockCount);
-    Assert.assertEquals(writeChunkCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 1, metrics.getTotalOpCount());
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength());
-
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() == 0);
-
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 3, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // Close the containers on the Datanode and write more data
-    ContainerTestHelper.waitForContainerClose(key, cluster);
-    key.write(data1);
-
-    // As a part of handling the exception, 2 failed writeChunks  will be
-    // rewritten plus 1 putBlocks for flush
-    // and one flush for partial chunk
-    key.flush();
-
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof ContainerNotOpenException);
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-
-    // commitInfoMap will remain intact as there is no server failure
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 6,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 9, metrics.getTotalOpCount());
-    Assert.assertTrue(keyOutputStream.getLocationInfoList().size() == 0);
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void testExceptionDuringClose() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = 167;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            == pendingWriteChunkCount + 1);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            == pendingPutBlockCount);
-    Assert.assertEquals(writeChunkCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 1, metrics.getTotalOpCount());
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength());
-
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() == 0);
-
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 1,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 3, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // Close the containers on the Datanode and write more data
-    ContainerTestHelper.waitForContainerClose(key, cluster);
-    key.write(data1);
-
-    // commitInfoMap will remain intact as there is no server failure
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    // now close the stream, It will hit exception
-    key.close();
-
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof ContainerNotOpenException);
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 6,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 9, metrics.getTotalOpCount());
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void testWatchForCommitWithSingleNodeRatis() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 0, ReplicationFactor.ONE);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 4 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // flush is a sync call, all pending operations will complete
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-    // Close the containers on the Datanode and write more data
-    ContainerTestHelper.waitForContainerClose(key, cluster);
-    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
-    // once exception is hit
-    key.write(data1);
-
-    // As a part of handling the exception, 4 failed writeChunks  will be
-    // rewritten plus one partial chunk plus two putBlocks for flushSize
-    // and one flush for partial chunk
-    key.flush();
-
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof ContainerNotOpenException);
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // commitInfoMap will remain intact as there is no server failure
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount());
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void testDatanodeFailureWithSingleNodeRatis() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 0, ReplicationFactor.ONE);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes at least putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast flushSize worth of data buffer
-    // where each entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    //  flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-    Pipeline pipeline = raftClient.getPipeline();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-
-    // again write data with more than max buffer limit. This will call
-    // watchForCommit again. No write will happen in the current block and
-    // data will be rewritten to the next block.
-
-    key.write(data1);
-
-    key.flush();
-
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof RaftRetryFailureException);
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // in total, there are 14 full write chunks, 5 before the failure injection,
-    // 4 chunks after which we detect the failure and then 5 again on the next
-    // block
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    // 3 flushes at flushSize boundaries before failure injection + 2
-    // flush failed + 3 more flushes for the next block
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount());
-    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    cluster.restartHddsDatanode(pipeline.getNodes().get(0), true);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  @Test
-  public void testDatanodeFailureWithPreAllocation() throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
-    long putBlockCount =
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount =
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 3 * blockSize,
-            ReplicationFactor.ONE);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes at least putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount());
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 3);
-    OutputStream stream =
-        keyOutputStream.getStreamEntries().get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // ack'd by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-
-    // watchForCommit will clean up atleast flushSize worth of data buffer
-    // where each entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount());
-
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    //  flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
-
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-    Pipeline pipeline = raftClient.getPipeline();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-
-    // again write data with more than max buffer limit. This will call
-    // watchForCommit again. No write will happen and
-
-    key.write(data1);
-
-    key.flush();
-
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof RaftRetryFailureException);
-
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
-
-    // now close the stream, It will update the ack length after watchForCommit
-    key.close();
-    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-
-    // in total, there are 14 full write chunks, 5 before the failure injection,
-    // 4 chunks after which we detect the failure and then 5 again on the next
-    // block
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-
-    // 3 flushes at flushSize boundaries before failure injection + 2
-    // flush failed + 3 more flushes for the next block
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount());
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    cluster.restartHddsDatanode(pipeline.getNodes().get(0), true);
-    validateData(keyName, dataString.concat(dataString).getBytes());
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return createKey(keyName, type, size, ReplicationFactor.THREE);
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size, ReplicationFactor factor) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, factor, size, objectStore, volumeName,
-            bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
deleted file mode 100644
index e551ab1..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ /dev/null
@@ -1,474 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Tests Close Container Exception handling by Ozone Client.
- */
-public class TestCloseContainerHandlingByClient {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static int blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    chunkSize = (int) OzoneConsts.MB;
-    blockSize = 4 * chunkSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
-    conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "closecontainerexceptionhandlingtest";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBlockWritesWithFlushAndClose() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    // write data more than 1 chunk
-    byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
-        .getBytes(UTF_8);
-    key.write(data);
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-    key.write(data);
-    key.flush();
-    key.close();
-    // read the key from OM again and match the length.The length will still
-    // be the equal to the original data size.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
-
-    // Written the same data twice
-    String dataString = new String(data, UTF_8);
-    dataString = dataString.concat(dataString);
-    validateData(keyName, dataString.getBytes(UTF_8));
-  }
-
-  @Test
-  public void testBlockWritesCloseConsistency() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    // write data more than 1 chunk
-    byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
-        .getBytes(UTF_8);
-    key.write(data);
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-    key.close();
-    // read the key from OM again and match the length.The length will still
-    // be the equal to the original data size.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(data.length, keyInfo.getDataSize());
-    validateData(keyName, data);
-  }
-
-  @Test
-  public void testMultiBlockWrites() throws Exception {
-
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, (3 * blockSize));
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    // With the initial size provided, it should have preallocated 4 blocks
-    Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
-    // write data more than 1 block
-    byte[] data =
-        ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
-            .getBytes(UTF_8);
-    Assert.assertEquals(data.length, 3 * blockSize);
-    key.write(data);
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-    // write 1 more block worth of data. It will fail and new block will be
-    // allocated
-    key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
-        .getBytes(UTF_8));
-
-    key.close();
-    // read the key from OM again and match the length.The length will still
-    // be the equal to the original data size.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    List<OmKeyLocationInfo> keyLocationInfos =
-        keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
-    // Though we have written only block initially, the close will hit
-    // closeContainerException and remaining data in the chunkOutputStream
-    // buffer will be copied into a different allocated block and will be
-    // committed.
-    Assert.assertEquals(4, keyLocationInfos.size());
-    Assert.assertEquals(4 * blockSize, keyInfo.getDataSize());
-    for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
-      Assert.assertEquals(blockSize, locationInfo.getLength());
-    }
-  }
-
-  @Test
-  public void testMultiBlockWrites2() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    // With the initial size provided, it should have pre allocated 2 blocks
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    String dataString =
-        ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
-    byte[] data = dataString.getBytes(UTF_8);
-    key.write(data);
-    // 2 block are completely written to the DataNode in 3 blocks.
-    // Data of length half of chunkSize resides in the chunkOutput stream buffer
-    String dataString2 =
-        ContainerTestHelper.getFixedLengthString(keyString, chunkSize);
-    key.write(dataString2.getBytes(UTF_8));
-    key.flush();
-
-    String dataString3 =
-        ContainerTestHelper.getFixedLengthString(keyString, chunkSize);
-    key.write(dataString3.getBytes(UTF_8));
-    key.flush();
-
-    String dataString4 =
-        ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2);
-    key.write(dataString4.getBytes(UTF_8));
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-
-    key.close();
-    // read the key from OM again and match the length.The length will still
-    // be the equal to the original data size.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    // Though we have written only block initially, the close will hit
-    // closeContainerException and remaining data in the chunkOutputStream
-    // buffer will be copied into a different allocated block and will be
-    // committed.
-
-    String dataCommitted =
-        dataString.concat(dataString2).concat(dataString3).concat(dataString4);
-    Assert.assertEquals(dataCommitted.getBytes(UTF_8).length,
-        keyInfo.getDataSize());
-    validateData(keyName, dataCommitted.getBytes(UTF_8));
-  }
-
-  @Test
-  public void testMultiBlockWrites3() throws Exception {
-
-    String keyName = getKeyName();
-    int keyLen = 4 * blockSize;
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, keyLen);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    // With the initial size provided, it should have preallocated 4 blocks
-    Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
-    // write data 4 blocks and one more chunk
-    byte[] writtenData =
-        ContainerTestHelper.getFixedLengthString(keyString, keyLen)
-            .getBytes(UTF_8);
-    byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
-    Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
-    key.write(data);
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-    // write 3 more chunks worth of data. It will fail and new block will be
-    // allocated. This write completes 4 blocks worth of data written to key
-    data = Arrays.copyOfRange(writtenData, 3 * blockSize + chunkSize, keyLen);
-    key.write(data);
-
-    key.close();
-    // read the key from OM again and match the length and data.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    List<OmKeyLocationInfo> keyLocationInfos =
-        keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
-    OzoneVolume volume = objectStore.getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OzoneInputStream inputStream = bucket.readKey(keyName);
-    byte[] readData = new byte[keyLen];
-    inputStream.read(readData);
-    Assert.assertArrayEquals(writtenData, readData);
-
-    // Though we have written only block initially, the close will hit
-    // closeContainerException and remaining data in the chunkOutputStream
-    // buffer will be copied into a different allocated block and will be
-    // committed.
-    long length = 0;
-    for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
-      length += locationInfo.getLength();
-    }
-    Assert.assertEquals(4 * blockSize, length);
-  }
-
-  private void waitForContainerClose(OzoneOutputStream outputStream)
-      throws Exception {
-    ContainerTestHelper
-        .waitForContainerClose(outputStream, cluster);
-  }
-
-  @Ignore // test needs to be fixed after close container is handled for
-  // non-existent containers on datanode. Test closes pre allocated containers
-  // on the datanode.
-  @Test
-  public void testDiscardPreallocatedBlocks() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    // With the initial size provided, it should have pre allocated 4 blocks
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    String dataString =
-        ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
-    byte[] data = dataString.getBytes(UTF_8);
-    key.write(data);
-    List<OmKeyLocationInfo> locationInfos =
-        new ArrayList<>(keyOutputStream.getLocationInfoList());
-    long containerID = locationInfos.get(0).getContainerID();
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(1, datanodes.size());
-    waitForContainerClose(key);
-    dataString =
-        ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
-    data = dataString.getBytes(UTF_8);
-    key.write(data);
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-
-    // the 1st block got written. Now all the containers are closed, so the 2nd
-    // pre allocated block will be removed from the list and new block should
-    // have been allocated
-    Assert.assertTrue(
-        keyOutputStream.getLocationInfoList().get(0).getBlockID()
-            .equals(locationInfos.get(0).getBlockID()));
-    Assert.assertFalse(
-        keyOutputStream.getLocationInfoList().get(1).getBlockID()
-            .equals(locationInfos.get(1).getBlockID()));
-    key.close();
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-
-  @Test
-  public void testBlockWriteViaRatis() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
-        .getBytes(UTF_8);
-    key.write(data);
-
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
-        setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    waitForContainerClose(key);
-    // Again Write the Data. This will throw an exception which will be handled
-    // and new blocks will be allocated
-    key.write(data);
-    key.flush();
-    // The write will fail but exception will be handled and length will be
-    // updated correctly in OzoneManager once the steam is closed
-    key.close();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    String dataString = new String(data, UTF_8);
-    dataString = dataString.concat(dataString);
-    Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
-    validateData(keyName, dataString.getBytes(UTF_8));
-  }
-
-  @Test
-  public void testBlockWrites() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, 2 * chunkSize)
-            .getBytes(UTF_8);
-    key.write(data1);
-
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-
-    waitForContainerClose(key);
-    byte[] data2 =
-        ContainerTestHelper.getFixedLengthString(keyString, 3 * chunkSize)
-            .getBytes(UTF_8);
-    key.write(data2);
-    key.flush();
-    key.close();
-    // read the key from OM again and match the length.The length will still
-    // be the equal to the original data size.
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(5 * chunkSize, keyInfo.getDataSize());
-
-    // Written the same data twice
-    String dataString = new String(data1, UTF_8);
-    // Written the same data twice
-    String dataString2 = new String(data2, UTF_8);
-    dataString = dataString.concat(dataString2);
-    validateData(keyName, dataString.getBytes(UTF_8));
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
deleted file mode 100644
index ea51900..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.BufferPool;
-import org.apache.hadoop.hdds.scm.storage.CommitWatcher;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Class to test CommitWatcher functionality.
- */
-public class TestCommitWatcher {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static long flushSize;
-  private static long maxFlushSize;
-  private static long blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    chunkSize = (int)(1 * OzoneConsts.MB);
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
-    conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(7)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockoutputstream";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testReleaseBuffers() throws Exception {
-    int capacity = 2;
-    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    Pipeline pipeline = container.getPipeline();
-    long containerId = container.getContainerInfo().getContainerID();
-    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
-    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
-    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
-    List<ByteBuffer> bufferList = new ArrayList<>();
-    List<XceiverClientReply> replies = new ArrayList<>();
-    long length = 0;
-    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
-        futures = new ArrayList<>();
-    for (int i = 0; i < capacity; i++) {
-      bufferList.clear();
-      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-          ContainerTestHelper
-              .getWriteChunkRequest(pipeline, blockID, chunkSize);
-      // add the data to the buffer pool
-      ByteBuffer byteBuffer = bufferPool.allocateBufferIfNeeded().put(
-          writeChunkRequest.getWriteChunk().getData().asReadOnlyByteBuffer());
-      ratisClient.sendCommandAsync(writeChunkRequest);
-      ContainerProtos.ContainerCommandRequestProto putBlockRequest =
-          ContainerTestHelper
-              .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
-      XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
-      bufferList.add(byteBuffer);
-      length += byteBuffer.position();
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
-          reply.getResponse().thenApply(v -> {
-            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
-            return v;
-          });
-      futures.add(future);
-      watcher.getFutureMap().put(length, future);
-      replies.add(reply);
-    }
-
-    Assert.assertTrue(replies.size() == 2);
-    // wait on the 1st putBlock to complete
-    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 =
-        futures.get(0);
-    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 =
-        futures.get(1);
-    future1.get();
-    Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize)));
-    Assert.assertTrue(
-        watcher.getFutureMap().get(new Long(chunkSize)).equals(future1));
-    // wait on 2nd putBlock to complete
-    future2.get();
-    Assert.assertNotNull(watcher.getFutureMap().get(new Long(2 * chunkSize)));
-    Assert.assertTrue(
-        watcher.getFutureMap().get(new Long(2 * chunkSize)).equals(future2));
-    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
-    watcher.watchOnFirstIndex();
-    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap()
-        .containsKey(replies.get(0).getLogIndex()));
-    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
-    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
-    watcher.watchOnLastIndex();
-    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap()
-        .containsKey(replies.get(1).getLogIndex()));
-    Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
-    Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
-    Assert.assertTrue(watcher.getFutureMap().isEmpty());
-    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
-  }
-
-  @Test
-  public void testReleaseBuffersOnException() throws Exception {
-    int capacity = 2;
-    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    Pipeline pipeline = container.getPipeline();
-    long containerId = container.getContainerInfo().getContainerID();
-    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
-    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
-    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
-    List<ByteBuffer> bufferList = new ArrayList<>();
-    List<XceiverClientReply> replies = new ArrayList<>();
-    long length = 0;
-    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
-        futures = new ArrayList<>();
-    for (int i = 0; i < capacity; i++) {
-      bufferList.clear();
-      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-          ContainerTestHelper
-              .getWriteChunkRequest(pipeline, blockID, chunkSize);
-      // add the data to the buffer pool
-      ByteBuffer byteBuffer = bufferPool.allocateBufferIfNeeded().put(
-          writeChunkRequest.getWriteChunk().getData().asReadOnlyByteBuffer());
-      ratisClient.sendCommandAsync(writeChunkRequest);
-      ContainerProtos.ContainerCommandRequestProto putBlockRequest =
-          ContainerTestHelper
-              .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
-      XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
-      bufferList.add(byteBuffer);
-      length += byteBuffer.position();
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
-          reply.getResponse().thenApply(v -> {
-            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
-            return v;
-          });
-      futures.add(future);
-      watcher.getFutureMap().put(length, future);
-      replies.add(reply);
-    }
-
-    Assert.assertTrue(replies.size() == 2);
-    // wait on the 1st putBlock to complete
-    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 =
-        futures.get(0);
-    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 =
-        futures.get(1);
-    future1.get();
-    Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize)));
-    Assert.assertTrue(
-        watcher.getFutureMap().get(new Long(chunkSize)).equals(future1));
-    // wait on 2nd putBlock to complete
-    future2.get();
-    Assert.assertNotNull(watcher.getFutureMap().get(new Long(2 * chunkSize)));
-    Assert.assertTrue(
-        watcher.getFutureMap().get(new Long(2 * chunkSize)).equals(future2));
-    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
-    watcher.watchOnFirstIndex();
-    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap()
-        .containsKey(replies.get(0).getLogIndex()));
-    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
-    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
-    try {
-      // just watch for a higher index so as to ensure, it does an actual
-      // call to Ratis. Otherwise, it may just return in case the commitInfoMap
-      // is updated to the latest index in putBlock response.
-      watcher.watchForCommit(replies.get(1).getLogIndex() + 1);
-    } catch(IOException ioe) {
-      Assert.assertTrue(ioe.getCause() instanceof TimeoutException);
-    }
-    long lastIndex = replies.get(1).getLogIndex();
-    // Depending on the last successfully replicated commitIndex, either we
-    // discard only 1st buffer or both buffers
-    Assert.assertTrue(ratisClient.getReplicatedMinCommitIndex() <= lastIndex);
-    if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1)
-        .getLogIndex()) {
-      Assert.assertTrue(watcher.getTotalAckDataLength() == chunkSize);
-      Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 1);
-      Assert.assertTrue(watcher.getFutureMap().size() == 1);
-    } else {
-      Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
-      Assert.assertTrue(watcher.getFutureMap().isEmpty());
-      Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
deleted file mode 100644
index 0886d26..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests delete key operation with a slow follower in the datanode
- * pipeline.
- */
-public class TestContainerReplicationEndToEnd {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static String volumeName;
-  private static String bucketName;
-  private static String path;
-  private static XceiverClientManager xceiverClientManager;
-  private static long containerReportInterval;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    path = GenericTestUtils
-        .getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-    containerReportInterval = 2000;
-
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL,
-        containerReportInterval, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, containerReportInterval,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL,
-        2 * containerReportInterval, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setLong("hdds.scm.replication.thread.interval",
-        containerReportInterval);
-
-    conf.setQuietMode(false);
-    cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4).setHbInterval(200)
-            .build();
-    cluster.waitForClusterToBeReady();
-    cluster.getStorageContainerManager().getReplicationManager().start();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    xceiverClientManager = new XceiverClientManager(conf);
-    volumeName = "testcontainerstatemachinefailures";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * The test simulates end to end container replication.
-   */
-  @Test
-  public void testContainerReplication() throws Exception {
-    String keyName = "testContainerReplication";
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey(keyName, 0, ReplicationType.RATIS,
-                ReplicationFactor.THREE, new HashMap<>());
-    byte[] testData = "ratis".getBytes();
-    // First write and flush creates a container in the datanode
-    key.write(testData);
-    key.flush();
-
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    long containerID = omKeyLocationInfo.getContainerID();
-    PipelineID pipelineID =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(new ContainerID(containerID)).getPipelineID();
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(pipelineID);
-    key.close();
-
-    if (cluster.getStorageContainerManager().getContainerManager()
-        .getContainer(new ContainerID(containerID)).getState() !=
-        HddsProtos.LifeCycleState.CLOSING) {
-      cluster.getStorageContainerManager().getContainerManager()
-          .updateContainerState(new ContainerID(containerID),
-              HddsProtos.LifeCycleEvent.FINALIZE);
-    }
-    // wait for container to move to OPEN state in SCM
-    Thread.sleep(2 * containerReportInterval);
-    DatanodeDetails oldReplicaNode = pipeline.getFirstNode();
-    // now move the container to the closed on the datanode.
-    XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
-    ContainerProtos.ContainerCommandRequestProto.Builder request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    xceiverClient.sendCommand(request.build());
-    // wait for container to move to closed state in SCM
-    Thread.sleep(2 * containerReportInterval);
-    Assert.assertTrue(
-        cluster.getStorageContainerManager().getContainerInfo(containerID)
-            .getState() == HddsProtos.LifeCycleState.CLOSED);
-    // shutdown the replica node
-    cluster.shutdownHddsDatanode(oldReplicaNode);
-    // now the container is under replicated and will be moved to a different dn
-    HddsDatanodeService dnService = null;
-
-    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
-      Predicate<DatanodeDetails> p =
-          i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid());
-      if (!pipeline.getNodes().stream().anyMatch(p)) {
-        dnService = dn;
-      }
-    }
-
-    Assert.assertNotNull(dnService);
-    final HddsDatanodeService newReplicaNode = dnService;
-    // wait for the container to get replicated
-    GenericTestUtils.waitFor(() -> {
-      return newReplicaNode.getDatanodeStateMachine().getContainer()
-          .getContainerSet().getContainer(containerID) != null;
-    }, 500, 100000);
-    Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer()
-        .getContainerSet().getContainer(containerID).getContainerData()
-        .getBlockCommitSequenceId() > 0);
-    // wait for SCM to update the replica Map
-    Thread.sleep(5 * containerReportInterval);
-    // now shutdown the other two dns of the original pipeline and try reading
-    // the key again
-    for (DatanodeDetails dn : pipeline.getNodes()) {
-      cluster.shutdownHddsDatanode(dn);
-    }
-    // This will try to read the data from the dn to which the container got
-    // replicated after the container got closed.
-    ContainerTestHelper
-        .validateData(keyName, testData, objectStore, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
deleted file mode 100644
index 19a1707..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.RatisServerConfiguration;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests the containerStateMachine failure handling.
- */
-
-public class TestContainerStateMachine {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static String volumeName;
-  private static String bucketName;
-  private static String path;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    path = GenericTestUtils
-        .getTempPath(TestContainerStateMachine.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-
-    conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
-  //  conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.setQuietMode(false);
-    OzoneManager.setTestSecureOmFlag(true);
-    conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
-    //  conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
-    cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1)
-            .setHbInterval(200)
-            .setCertificateClient(new CertificateClientTestImpl(conf))
-            .build();
-    cluster.waitForClusterToBeReady();
-    cluster.getOzoneManager().startSecretManager();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    volumeName = "testcontainerstatemachinefailures";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerStateMachineFailures() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis".getBytes());
-    key.flush();
-    key.write("ratis".getBytes());
-
-    //get the name of a valid container
-    KeyOutputStream groupOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-
-    // delete the container dir
-    FileUtil.fullyDelete(new File(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID()).getContainerData()
-            .getContainerPath()));
-
-    key.close();
-    // Make sure the container is marked unhealthy
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-  }
-
-  @Test
-  public void testRatisSnapshotRetention() throws Exception {
-
-    ContainerStateMachine stateMachine =
-        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
-    SimpleStateMachineStorage storage =
-        (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-    Assert.assertNull(storage.findLatestSnapshot());
-
-    // Write 10 keys. Num snapshots should be equal to config value.
-    for (int i = 1; i <= 10; i++) {
-      OzoneOutputStream key =
-          objectStore.getVolume(volumeName).getBucket(bucketName)
-              .createKey(("ratis" + i), 1024, ReplicationType.RATIS,
-                  ReplicationFactor.ONE, new HashMap<>());
-      // First write and flush creates a container in the datanode
-      key.write(("ratis" + i).getBytes());
-      key.flush();
-      key.write(("ratis" + i).getBytes());
-    }
-
-    RatisServerConfiguration ratisServerConfiguration =
-        conf.getObject(RatisServerConfiguration.class);
-
-    stateMachine =
-        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
-    storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
-    int numSnapshots = parentPath.getParent().toFile().listFiles().length;
-    Assert.assertTrue(Math.abs(ratisServerConfiguration
-        .getNumSnapshotsRetained() - numSnapshots) <= 1);
-
-    // Write 10 more keys. Num Snapshots should remain the same.
-    for (int i = 11; i <= 20; i++) {
-      OzoneOutputStream key =
-          objectStore.getVolume(volumeName).getBucket(bucketName)
-              .createKey(("ratis" + i), 1024, ReplicationType.RATIS,
-                  ReplicationFactor.ONE, new HashMap<>());
-      // First write and flush creates a container in the datanode
-      key.write(("ratis" + i).getBytes());
-      key.flush();
-      key.write(("ratis" + i).getBytes());
-    }
-    stateMachine =
-        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
-    storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-    parentPath = storage.findLatestSnapshot().getFile().getPath();
-    numSnapshots = parentPath.getParent().toFile().listFiles().length;
-    Assert.assertTrue(Math.abs(ratisServerConfiguration
-        .getNumSnapshotsRetained() - numSnapshots) <= 1);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
deleted file mode 100644
index 9ac45b8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ /dev/null
@@ -1,504 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.apache.ratis.protocol.StateMachineException;
-import org.apache.ratis.server.storage.FileInfo;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.
-    HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.
-    HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    ContainerDataProto.State.UNHEALTHY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
-    HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
-    OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
-    OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-
-/**
- * Tests the containerStateMachine failure handling.
- */
-
-public class TestContainerStateMachineFailures {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static String volumeName;
-  private static String bucketName;
-  private static String path;
-  private static XceiverClientManager xceiverClientManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    path = GenericTestUtils
-        .getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-
-
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 10,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        1, TimeUnit.SECONDS);
-    conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
-    conf.setQuietMode(false);
-    cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200)
-            .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    xceiverClientManager = new XceiverClientManager(conf);
-    volumeName = "testcontainerstatemachinefailures";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerStateMachineFailures() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    byte[] testData = "ratis".getBytes();
-    // First write and flush creates a container in the datanode
-    key.write(testData);
-    key.flush();
-    key.write(testData);
-    KeyOutputStream groupOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    // delete the container dir
-    FileUtil.fullyDelete(new File(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID()).getContainerData()
-            .getContainerPath()));
-    try {
-      // there is only 1 datanode in the pipeline, the pipeline will be closed
-      // and allocation to new pipeline will fail as there is no other dn in
-      // the cluster
-      key.close();
-    } catch(IOException ioe) {
-      Assert.assertTrue(ioe instanceof OMException);
-    }
-    long containerID = omKeyLocationInfo.getContainerID();
-
-    // Make sure the container is marked unhealthy
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(containerID)
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-    OzoneContainer ozoneContainer = cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContainer();
-    // make sure the missing containerSet is empty
-    HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
-    Assert.assertTrue(dispatcher.getMissingContainerSet().isEmpty());
-
-    // restart the hdds datanode, container should not in the regular set
-    cluster.restartHddsDatanode(0, true);
-    ozoneContainer = cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContainer();
-    Assert
-        .assertNull(ozoneContainer.getContainerSet().getContainer(containerID));
-  }
-
-  @Test
-  public void testUnhealthyContainer() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis".getBytes());
-    key.flush();
-    key.write("ratis".getBytes());
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    ContainerData containerData =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerData();
-    Assert.assertTrue(containerData instanceof KeyValueContainerData);
-    KeyValueContainerData keyValueContainerData =
-        (KeyValueContainerData) containerData;
-    // delete the container db file
-    FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath()));
-    try {
-      // there is only 1 datanode in the pipeline, the pipeline will be closed
-      // and allocation to new pipeline will fail as there is no other dn in
-      // the cluster
-      key.close();
-    } catch(IOException ioe) {
-      Assert.assertTrue(ioe instanceof OMException);
-    }
-
-    long containerID = omKeyLocationInfo.getContainerID();
-
-    // Make sure the container is marked unhealthy
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet().getContainer(containerID)
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-    // Check metadata in the .container file
-    File containerFile = new File(keyValueContainerData.getMetadataPath(),
-        containerID + OzoneConsts.CONTAINER_EXTENSION);
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-
-    // restart the hdds datanode and see if the container is listed in the
-    // in the missing container set and not in the regular set
-    cluster.restartHddsDatanode(0, true);
-    // make sure the container state is still marked unhealthy after restart
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
-
-    OzoneContainer ozoneContainer;
-    ozoneContainer = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-        .getContainer();
-    HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
-    ContainerProtos.ContainerCommandRequestProto.Builder request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails().getUuidString());
-    Assert.assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY,
-        dispatcher.dispatch(request.build(), null).getResult());
-  }
-
-  @Test
-  public void testApplyTransactionFailure() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis".getBytes());
-    key.flush();
-    key.write("ratis".getBytes());
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    ContainerData containerData =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerData();
-    Assert.assertTrue(containerData instanceof KeyValueContainerData);
-    KeyValueContainerData keyValueContainerData =
-        (KeyValueContainerData) containerData;
-    key.close();
-    ContainerStateMachine stateMachine =
-        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
-    SimpleStateMachineStorage storage =
-        (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
-    // Since the snapshot threshold is set to 1, since there are
-    // applyTransactions, we should see snapshots
-    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
-    FileInfo snapshot = storage.findLatestSnapshot().getFile();
-    Assert.assertNotNull(snapshot);
-    long containerID = omKeyLocationInfo.getContainerID();
-    // delete the container db file
-    FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
-    Pipeline pipeline = cluster.getStorageContainerLocationClient()
-        .getContainerWithPipeline(containerID).getPipeline();
-    XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
-    ContainerProtos.ContainerCommandRequestProto.Builder request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    // close container transaction will fail over Ratis and will initiate
-    // a pipeline close action
-
-    // Since the applyTransaction failure is propagated to Ratis,
-    // stateMachineUpdater will it exception while taking the next snapshot
-    // and should shutdown the RaftServerImpl. The client request will fail
-    // with RaftRetryFailureException.
-    try {
-      xceiverClient.sendCommand(request.build());
-      Assert.fail("Expected exception not thrown");
-    } catch (IOException e) {
-      Assert.assertTrue(HddsClientUtils
-          .checkForException(e) instanceof RaftRetryFailureException);
-    }
-    // Make sure the container is marked unhealthy
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet().getContainer(containerID)
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-    try {
-      // try to take a new snapshot, ideally it should just fail
-      stateMachine.takeSnapshot();
-    } catch (IOException ioe) {
-      Assert.assertTrue(ioe instanceof StateMachineException);
-    }
-    // Make sure the latest snapshot is same as the previous one
-    FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
-    Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
-  }
-
-  @Test
-  public void testApplyTransactionIdempotencyWithClosedContainer()
-      throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis".getBytes());
-    key.flush();
-    key.write("ratis".getBytes());
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    ContainerData containerData =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerData();
-    Assert.assertTrue(containerData instanceof KeyValueContainerData);
-    key.close();
-    ContainerStateMachine stateMachine =
-        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
-    SimpleStateMachineStorage storage =
-        (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
-    // Since the snapshot threshold is set to 1, since there are
-    // applyTransactions, we should see snapshots
-    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
-    FileInfo snapshot = storage.findLatestSnapshot().getFile();
-    Assert.assertNotNull(snapshot);
-    long containerID = omKeyLocationInfo.getContainerID();
-    Pipeline pipeline = cluster.getStorageContainerLocationClient()
-        .getContainerWithPipeline(containerID).getPipeline();
-    XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
-    ContainerProtos.ContainerCommandRequestProto.Builder request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    try {
-      xceiverClient.sendCommand(request.build());
-    } catch (IOException e) {
-      Assert.fail("Exception should not be thrown");
-    }
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet().getContainer(containerID)
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.CLOSED);
-    Assert.assertTrue(stateMachine.isStateMachineHealthy());
-    try {
-      stateMachine.takeSnapshot();
-    } catch (IOException ioe) {
-      Assert.fail("Exception should not be thrown");
-    }
-    FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
-    Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
-  }
-
-  @Test
-  public void testValidateBCSIDOnDnRestart() throws Exception {
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 1024, ReplicationType.RATIS,
-                ReplicationFactor.ONE, new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis".getBytes());
-    key.flush();
-    key.write("ratis".getBytes());
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    ContainerData containerData =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet()
-            .getContainer(omKeyLocationInfo.getContainerID())
-            .getContainerData();
-    Assert.assertTrue(containerData instanceof KeyValueContainerData);
-    KeyValueContainerData keyValueContainerData =
-        (KeyValueContainerData) containerData;
-    key.close();
-
-    long containerID = omKeyLocationInfo.getContainerID();
-    cluster.shutdownHddsDatanode(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails());
-    // delete the container db file
-    FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
-    cluster.restartHddsDatanode(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails(), true);
-    OzoneContainer ozoneContainer =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer();
-    // make sure the missing containerSet is not empty
-    HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
-    Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty());
-    Assert
-        .assertTrue(dispatcher.getMissingContainerSet().contains(containerID));
-    // write a new key
-    key = objectStore.getVolume(volumeName).getBucket(bucketName)
-        .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE,
-            new HashMap<>());
-    // First write and flush creates a container in the datanode
-    key.write("ratis1".getBytes());
-    key.flush();
-    groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    locationInfoList = groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    omKeyLocationInfo = locationInfoList.get(0);
-    key.close();
-    containerID = omKeyLocationInfo.getContainerID();
-    containerData = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-        .getContainer().getContainerSet()
-        .getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
-    Assert.assertTrue(containerData instanceof KeyValueContainerData);
-    keyValueContainerData = (KeyValueContainerData) containerData;
-    ReferenceCountedDB db = BlockUtils.
-        getDB(keyValueContainerData, conf);
-    byte[] blockCommitSequenceIdKey =
-        DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
-
-    // modify the bcsid for the container in the ROCKS DB tereby inducing
-    // corruption
-    db.getStore().put(blockCommitSequenceIdKey, Longs.toByteArray(0));
-    db.decrementReference();
-    // shutdown of dn will take a snapsot which will persist the valid BCSID
-    // recorded in the container2BCSIDMap in ContainerStateMachine
-    cluster.shutdownHddsDatanode(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails());
-    // after the restart, there will be a mismatch in BCSID of what is recorded
-    // in the and what is there in RockSDB and hence the container would be
-    // marked unhealthy
-    cluster.restartHddsDatanode(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails(), true);
-    // Make sure the container is marked unhealthy
-    Assert.assertTrue(
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet().getContainer(containerID)
-            .getContainerState()
-            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
deleted file mode 100644
index 30c2624..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests delete key operation with a slow follower in the datanode
- * pipeline.
- */
-public class TestDeleteWithSlowFollower {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static String volumeName;
-  private static String bucketName;
-  private static String path;
-  private static XceiverClientManager xceiverClientManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    path = GenericTestUtils
-        .getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    // Make the stale, dead and server failure timeout higher so that a dead
-    // node is not detecte at SCM as well as the pipeline close action
-    // never gets initiated early at Datanode in the test.
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        1, TimeUnit.SECONDS);
-
-    conf.setQuietMode(false);
-    cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setHbInterval(100)
-            .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    xceiverClientManager = new XceiverClientManager(conf);
-    volumeName = "testcontainerstatemachinefailures";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * The test simulates a slow follower by first writing key thereby creating a
-   * a container on 3 dns of the cluster. Then, a dn is shutdown and a close
-   * container cmd gets issued so that in the leader and the alive follower,
-   * container gets closed. And then, key is deleted and
-   * the node is started up again so that it
-   * rejoins the ring and starts applying the transaction from where it left
-   * by fetching the entries from the leader. Until and unless this follower
-   * catches up and its replica gets closed,
-   * the data is not deleted from any of the nodes which have the
-   * closed replica.
-   */
-  @Test
-  public void testDeleteKeyWithSlowFollower() throws Exception {
-
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey("ratis", 0, ReplicationType.RATIS,
-                ReplicationFactor.THREE, new HashMap<>());
-    byte[] testData = "ratis".getBytes();
-    // First write and flush creates a container in the datanode
-    key.write(testData);
-    key.flush();
-
-    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertEquals(1, locationInfoList.size());
-    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
-    long containerID = omKeyLocationInfo.getContainerID();
-    // A container is created on the datanode. Now figure out a follower node to
-    // kill/slow down.
-    HddsDatanodeService follower = null;
-    HddsDatanodeService leader = null;
-
-    List<Pipeline> pipelineList =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipelines(HddsProtos.ReplicationType.RATIS,
-                HddsProtos.ReplicationFactor.THREE);
-    Assert.assertTrue(pipelineList.size() == 1);
-    Pipeline pipeline = pipelineList.get(0);
-    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
-      if (ContainerTestHelper.isRatisFollower(dn, pipeline)) {
-        follower = dn;
-      } else if (ContainerTestHelper.isRatisLeader(dn, pipeline)) {
-        leader = dn;
-      }
-    }
-    Assert.assertNotNull(follower);
-    Assert.assertNotNull(leader);
-    // shutdown the slow follower
-    cluster.shutdownHddsDatanode(follower.getDatanodeDetails());
-    key.write(testData);
-    key.close();
-
-    // now move the container to the closed on the datanode.
-    XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
-    ContainerProtos.ContainerCommandRequestProto.Builder request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(
-        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-    xceiverClient.sendCommand(request.build());
-
-    ContainerStateMachine stateMachine =
-        (ContainerStateMachine) ContainerTestHelper
-            .getStateMachine(leader, pipeline);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
-        setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName("ratis")
-        .build();
-    OmKeyInfo info = cluster.getOzoneManager().lookupKey(keyArgs);
-    BlockID blockID =
-        info.getKeyLocationVersions().get(0).getLocationList().get(0)
-            .getBlockID();
-    OzoneContainer ozoneContainer;
-    final DatanodeStateMachine dnStateMachine =
-        leader.getDatanodeStateMachine();
-    ozoneContainer = dnStateMachine.getContainer();
-    KeyValueHandler keyValueHandler =
-        (KeyValueHandler) ozoneContainer.getDispatcher()
-            .getHandler(ContainerProtos.ContainerType.KeyValueContainer);
-    Container container =
-        ozoneContainer.getContainerSet().getContainer(blockID.getContainerID());
-    KeyValueContainerData containerData =
-        ((KeyValueContainerData) container.getContainerData());
-    long delTrxId = containerData.getDeleteTransactionId();
-    long numPendingDeletionBlocks = containerData.getNumPendingDeletionBlocks();
-    BlockData blockData =
-        keyValueHandler.getBlockManager().getBlock(container, blockID);
-    cluster.getOzoneManager().deleteKey(keyArgs);
-    GenericTestUtils.waitFor(() -> {
-      return
-          dnStateMachine.getCommandDispatcher().getDeleteBlocksCommandHandler()
-              .getInvocationCount() >= 1;
-    }, 500, 100000);
-    Assert.assertTrue(containerData.getDeleteTransactionId() > delTrxId);
-    Assert.assertTrue(
-        containerData.getNumPendingDeletionBlocks() > numPendingDeletionBlocks);
-    // make sure the chunk was never deleted on the leader even though
-    // deleteBlock handler is invoked
-    try {
-      for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
-        keyValueHandler.getChunkManager()
-            .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo),
-                null);
-      }
-    } catch (IOException ioe) {
-      Assert.fail("Exception should not be thrown.");
-
-    }
-    long numReadStateMachineOps =
-        stateMachine.getMetrics().getNumReadStateMachineOps();
-    Assert.assertTrue(
-        stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
-    stateMachine.evictStateMachineCache();
-    cluster.restartHddsDatanode(follower.getDatanodeDetails(), false);
-    // wait for the raft server to come up and join the ratis ring
-    Thread.sleep(10000);
-
-    // Make sure the readStateMachine call got triggered after the follower
-    // caught up
-    Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps()
-        > numReadStateMachineOps);
-    Assert.assertTrue(
-        stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
-    // wait for the chunk to get deleted now
-    Thread.sleep(10000);
-    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
-      keyValueHandler =
-          (KeyValueHandler) dn.getDatanodeStateMachine().getContainer()
-              .getDispatcher()
-              .getHandler(ContainerProtos.ContainerType.KeyValueContainer);
-      // make sure the chunk is now deleted on the all dns
-      try {
-        for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
-          keyValueHandler.getChunkManager().readChunk(container, blockID,
-              ChunkInfo.getFromProtoBuf(chunkInfo), null);
-        }
-        Assert.fail("Expected exception is not thrown");
-      } catch (IOException ioe) {
-        Assert.assertTrue(ioe instanceof StorageContainerException);
-        Assert.assertTrue(((StorageContainerException) ioe).getResult()
-            == ContainerProtos.Result.UNABLE_TO_FIND_CHUNK);
-      }
-    }
-
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
deleted file mode 100644
index edb796b..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ /dev/null
@@ -1,415 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.net.DNSToSwitchMapping;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
-    .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests Close Container Exception handling by Ozone Client.
- */
-public class TestFailureHandlingByClient {
-
-  private MiniOzoneCluster cluster;
-  private OzoneConfiguration conf;
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private int chunkSize;
-  private int blockSize;
-  private String volumeName;
-  private String bucketName;
-  private String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  private void init() throws Exception {
-    conf = new OzoneConfiguration();
-    chunkSize = (int) OzoneConsts.MB;
-    blockSize = 4 * chunkSize;
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        1, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        1, TimeUnit.SECONDS);
-    conf.setBoolean(
-        OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
-
-    conf.setQuietMode(false);
-    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        StaticMapping.class, DNSToSwitchMapping.class);
-    StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
-        Collections.singleton(HddsUtils.getHostName(conf))).get(0),
-        "/rack1");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10).build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "datanodefailurehandlingtest";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  private void startCluster() throws Exception {
-    init();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBlockWritesWithDnFailures() throws Exception {
-    startCluster();
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    byte[] data =
-        ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
-    key.write(data);
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream groupOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertTrue(locationInfoList.size() == 1);
-    long containerId = locationInfoList.get(0).getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-    cluster.shutdownHddsDatanode(datanodes.get(1));
-    // The write will fail but exception will be handled and length will be
-    // updated correctly in OzoneManager once the steam is closed
-    key.close();
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(data.length, keyInfo.getDataSize());
-    validateData(keyName, data);
-  }
-
-  @Test
-  public void testWriteSmallFile() throws Exception {
-    startCluster();
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 0);
-    String data = ContainerTestHelper
-        .getFixedLengthString(keyString,  chunkSize/2);
-    key.write(data.getBytes());
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        keyOutputStream.getLocationInfoList();
-    long containerId = locationInfoList.get(0).getContainerID();
-    BlockID blockId = locationInfoList.get(0).getBlockID();
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-    cluster.shutdownHddsDatanode(datanodes.get(1));
-    key.close();
-    // this will throw AlreadyClosedException and and current stream
-    // will be discarded and write a new block
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-
-    // Make sure a new block is written
-    Assert.assertNotEquals(
-        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
-            .getBlockID(), blockId);
-    Assert.assertEquals(data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName, data.getBytes());
-  }
-
-
-  @Test
-  public void testContainerExclusionWithClosedContainerException()
-      throws Exception {
-    startCluster();
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, blockSize);
-    String data = ContainerTestHelper
-        .getFixedLengthString(keyString,  chunkSize);
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-
-    // Assert that 1 block will be preallocated
-    Assert.assertEquals(1, streamEntryList.size());
-    key.write(data.getBytes());
-    key.flush();
-    long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-    BlockID blockId = streamEntryList.get(0).getBlockID();
-    List<Long> containerIdList = new ArrayList<>();
-    containerIdList.add(containerId);
-
-    // below check will assert if the container does not get closed
-    ContainerTestHelper
-        .waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
-
-    // This write will hit ClosedContainerException and this container should
-    // will be added in the excludelist
-    key.write(data.getBytes());
-    key.flush();
-
-    Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds()
-        .contains(ContainerID.valueof(containerId)));
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getDatanodes().isEmpty());
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
-
-    // The close will just write to the buffer
-    key.close();
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-
-    // Make sure a new block is written
-    Assert.assertNotEquals(
-        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
-            .getBlockID(), blockId);
-    Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName, data.concat(data).getBytes());
-  }
-
-  @Test
-  public void testDatanodeExclusionWithMajorityCommit() throws Exception {
-    startCluster();
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, blockSize);
-    String data = ContainerTestHelper
-        .getFixedLengthString(keyString,  chunkSize);
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-
-    // Assert that 1 block will be preallocated
-    Assert.assertEquals(1, streamEntryList.size());
-    key.write(data.getBytes());
-    key.flush();
-    long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-    BlockID blockId = streamEntryList.get(0).getBlockID();
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-    // shutdown 1 datanode. This will make sure the 2 way commit happens for
-    // next write ops.
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-
-    key.write(data.getBytes());
-    key.write(data.getBytes());
-    key.flush();
-
-    Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes()
-        .contains(datanodes.get(0)));
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getContainerIds().isEmpty());
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
-    // The close will just write to the buffer
-    key.close();
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-
-    // Make sure a new block is written
-    Assert.assertNotEquals(
-        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
-            .getBlockID(), blockId);
-    Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName, data.concat(data).concat(data).getBytes());
-  }
-
-
-  @Test
-  public void testPipelineExclusionWithPipelineFailure() throws Exception {
-    startCluster();
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, blockSize);
-    String data = ContainerTestHelper
-        .getFixedLengthString(keyString,  chunkSize);
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-
-    // Assert that 1 block will be preallocated
-    Assert.assertEquals(1, streamEntryList.size());
-    key.write(data.getBytes());
-    key.flush();
-    long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-    BlockID blockId = streamEntryList.get(0).getBlockID();
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-    // Two nodes, next write will hit AlreadyClosedException , the pipeline
-    // will be added in the exclude list
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-    cluster.shutdownHddsDatanode(datanodes.get(1));
-
-    key.write(data.getBytes());
-    key.write(data.getBytes());
-    key.flush();
-    Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
-        .contains(pipeline.getId()));
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getContainerIds().isEmpty());
-    Assert.assertTrue(
-        keyOutputStream.getExcludeList().getDatanodes().isEmpty());
-    // The close will just write to the buffer
-    key.close();
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-
-    // Make sure a new block is written
-    Assert.assertNotEquals(
-        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
-            .getBlockID(), blockId);
-    Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName, data.concat(data).concat(data).getBytes());
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
deleted file mode 100644
index 47a716e..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.HashMap;
-
-/**
- * Tests Hybrid Pipeline Creation and IO on same set of Datanodes.
- */
-public class TestHybridPipelineOnDatanode {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Tests reading a corrputed chunk file throws checksum exception.
-   * @throws IOException
-   */
-  @Test
-  public void testHybridPipelineOnDatanode() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = UUID.randomUUID().toString();
-    byte[] data = value.getBytes();
-    objectStore.createVolume(volumeName);
-    OzoneVolume volume = objectStore.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName1 = UUID.randomUUID().toString();
-
-    // Write data into a key
-    OzoneOutputStream out = bucket
-        .createKey(keyName1, data.length, ReplicationType.RATIS,
-            ReplicationFactor.ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-
-    String keyName2 = UUID.randomUUID().toString();
-
-    // Write data into a key
-    out = bucket
-        .createKey(keyName2, data.length, ReplicationType.RATIS,
-            ReplicationFactor.THREE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-
-    // We need to find the location of the chunk file corresponding to the
-    // data we just wrote.
-    OzoneKey key1 = bucket.getKey(keyName1);
-    long containerID1 =
-        ((OzoneKeyDetails) key1).getOzoneKeyLocations().get(0).getContainerID();
-
-    OzoneKey key2 = bucket.getKey(keyName2);
-    long containerID2 =
-        ((OzoneKeyDetails) key2).getOzoneKeyLocations().get(0).getContainerID();
-
-    PipelineID pipelineID1 =
-        cluster.getStorageContainerManager().getContainerInfo(containerID1)
-            .getPipelineID();
-    PipelineID pipelineID2 =
-        cluster.getStorageContainerManager().getContainerInfo(containerID2)
-            .getPipelineID();
-    Pipeline pipeline1 =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(pipelineID1);
-    List<DatanodeDetails> dns = pipeline1.getNodes();
-    Assert.assertTrue(dns.size() == 1);
-
-    Pipeline pipeline2 =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(pipelineID2);
-    Assert.assertFalse(pipeline1.getFactor().equals(pipeline2.getFactor()));
-    Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS);
-    Assert.assertTrue(pipeline1.getType() == pipeline2.getType());
-    // assert that the pipeline Id1 and pipelineId2 are on the same node
-    // but different replication factor
-    Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
-    byte[] b1 = new byte[data.length];
-    byte[] b2 = new byte[data.length];
-    // now try to read both the keys
-    OzoneInputStream is = bucket.readKey(keyName1);
-    is.read(b1);
-    is.close();
-
-    // now try to read both the keys
-    is = bucket.readKey(keyName2);
-    is.read(b2);
-    is.close();
-    Assert.assertTrue(Arrays.equals(b1, data));
-    Assert.assertTrue(Arrays.equals(b1, b2));
-  }
-}
-
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java
deleted file mode 100644
index fa8a289..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests {@link KeyInputStream}.
- */
-public class TestKeyInputStream {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static int flushSize;
-  private static int maxFlushSize;
-  private static int blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    chunkSize = 100;
-    flushSize = 4 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "test-key-input-stream-volume";
-    bucketName = "test-key-input-stream-bucket";
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  @Test
-  public void testSeek() throws Exception {
-    XceiverClientMetrics metrics = XceiverClientManager
-        .getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long readChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.ReadChunk);
-
-    String keyName = getKeyName();
-    OzoneOutputStream key = ContainerTestHelper.createKey(keyName,
-        ReplicationType.RATIS, 0, objectStore, volumeName, bucketName);
-
-    // write data spanning 3 chunks
-    int dataLength = (2 * chunkSize) + (chunkSize / 2);
-    byte[] inputData = ContainerTestHelper.getFixedLengthString(
-        keyString, dataLength).getBytes(UTF_8);
-    key.write(inputData);
-    key.close();
-
-    Assert.assertEquals(writeChunkCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-
-    KeyInputStream keyInputStream = (KeyInputStream) objectStore
-        .getVolume(volumeName).getBucket(bucketName).readKey(keyName)
-        .getInputStream();
-
-    // Seek to position 150
-    keyInputStream.seek(150);
-
-    Assert.assertEquals(150, keyInputStream.getPos());
-
-    // Seek operation should not result in any readChunk operation.
-    Assert.assertEquals(readChunkCount, metrics
-        .getContainerOpsMetrics(ContainerProtos.Type.ReadChunk));
-    Assert.assertEquals(readChunkCount, metrics
-        .getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk));
-
-    byte[] readData = new byte[chunkSize];
-    keyInputStream.read(readData, 0, chunkSize);
-
-    // Since we reading data from index 150 to 250 and the chunk boundary is
-    // 100 bytes, we need to read 2 chunks.
-    Assert.assertEquals(readChunkCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk));
-
-    keyInputStream.close();
-
-    // Verify that the data read matches with the input data at corresponding
-    // indices.
-    for (int i = 0; i < chunkSize; i++) {
-      Assert.assertEquals(inputData[chunkSize + 50 + i], readData[i]);
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
deleted file mode 100644
index 9666247..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests MultiBlock Writes with Dn failures by Ozone Client.
- */
-public class TestMultiBlockWritesWithDnFailures {
-
-  private MiniOzoneCluster cluster;
-  private OzoneConfiguration conf;
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private int chunkSize;
-  private int blockSize;
-  private String volumeName;
-  private String bucketName;
-  private String keyString;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  private void startCluster(int datanodes) throws Exception {
-    conf = new OzoneConfiguration();
-    chunkSize = (int) OzoneConsts.MB;
-    blockSize = 4 * chunkSize;
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        1, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        1, TimeUnit.SECONDS);
-
-    conf.setQuietMode(false);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(datanodes).build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "datanodefailurehandlingtest";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testMultiBlockWritesWithDnFailures() throws Exception {
-    startCluster(6);
-    String keyName = "ratis3";
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    String data =
-        ContainerTestHelper
-            .getFixedLengthString(keyString, blockSize + chunkSize);
-    key.write(data.getBytes());
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream groupOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<OmKeyLocationInfo> locationInfoList =
-        groupOutputStream.getLocationInfoList();
-    Assert.assertTrue(locationInfoList.size() == 2);
-    long containerId = locationInfoList.get(1).getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-    cluster.shutdownHddsDatanode(datanodes.get(1));
-
-    // The write will fail but exception will be handled and length will be
-    // updated correctly in OzoneManager once the steam is closed
-    key.write(data.getBytes());
-    key.close();
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName, data.concat(data).getBytes());
-  }
-
-  @Test
-  public void testMultiBlockWritesWithIntermittentDnFailures()
-      throws Exception {
-    startCluster(10);
-    String keyName = UUID.randomUUID().toString();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 6 * blockSize);
-    String data = ContainerTestHelper
-        .getFixedLengthString(keyString, blockSize + chunkSize);
-    key.write(data.getBytes());
-
-    // get the name of a valid container
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) key.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-
-    // Assert that 6 block will be preallocated
-    Assert.assertEquals(6, streamEntryList.size());
-    key.write(data.getBytes());
-    key.flush();
-    long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-    BlockID blockId = streamEntryList.get(0).getBlockID();
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    cluster.shutdownHddsDatanode(datanodes.get(0));
-
-    // The write will fail but exception will be handled and length will be
-    // updated correctly in OzoneManager once the steam is closed
-    key.write(data.getBytes());
-
-    // shutdown the second datanode
-    cluster.shutdownHddsDatanode(datanodes.get(1));
-    key.write(data.getBytes());
-    key.close();
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize());
-    validateData(keyName,
-        data.concat(data).concat(data).concat(data).getBytes());
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
deleted file mode 100644
index 2d96b8d..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
-import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * This class is to test all the public facing APIs of Ozone Client.
- */
-public class TestOzoneAtRestEncryption extends TestOzoneRpcClient {
-
-  private static MiniOzoneCluster cluster = null;
-  private static MiniKMS miniKMS;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-
-  private static final String SCM_ID = UUID.randomUUID().toString();
-  private static File testDir;
-  private static OzoneConfiguration conf;
-  private static final String TEST_KEY = "key1";
-
-
-    /**
-     * Create a MiniOzoneCluster for testing.
-     * <p>
-     * Ozone is made active by setting OZONE_ENABLED = true
-     *
-     * @throws IOException
-     */
-  @BeforeClass
-  public static void init() throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestSecureOzoneRpcClient.class.getSimpleName());
-
-    File kmsDir = new File(testDir, UUID.randomUUID().toString());
-    Assert.assertTrue(kmsDir.mkdirs());
-    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
-    miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
-    miniKMS.start();
-
-    OzoneManager.setTestSecureOmFlag(true);
-    conf = new OzoneConfiguration();
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
-        getKeyProviderURI(miniKMS));
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true);
-    conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    CertificateClientTestImpl certificateClientTest =
-        new CertificateClientTestImpl(conf);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .setScmId(SCM_ID)
-        .setCertificateClient(certificateClientTest)
-        .build();
-    cluster.getOzoneManager().startSecretManager();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    ozoneManager = cluster.getOzoneManager();
-    TestOzoneRpcClient.setCluster(cluster);
-    TestOzoneRpcClient.setOzClient(ozClient);
-    TestOzoneRpcClient.setOzoneManager(ozoneManager);
-    TestOzoneRpcClient.setStorageContainerLocationClient(
-        storageContainerLocationClient);
-    TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
-
-    // create test key
-    createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
-  }
-
-
-
-    /**
-     * Close OzoneClient and shutdown MiniOzoneCluster.
-     */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    if(ozClient != null) {
-      ozClient.close();
-    }
-
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-    if (miniKMS != null) {
-      miniKMS.stop();
-    }
-  }
-
-  @Test
-  public void testPutKeyWithEncryption() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs bucketArgs = BucketArgs.newBuilder()
-        .setBucketEncryptionKey(TEST_KEY).build();
-    volume.createBucket(bucketName, bucketArgs);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 1; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      try (OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes("UTF-8").length, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE, new HashMap<>())) {
-        out.write(value.getBytes("UTF-8"));
-      }
-
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      byte[] fileContent;
-      int len = 0;
-
-      try(OzoneInputStream is = bucket.readKey(keyName)) {
-        fileContent = new byte[value.getBytes("UTF-8").length];
-        len = is.read(fileContent);
-      }
-
-      Assert.assertEquals(len, value.length());
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE));
-      Assert.assertEquals(value, new String(fileContent, "UTF-8"));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-  }
-
-  private boolean verifyRatisReplication(String volumeName, String bucketName,
-      String keyName, ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    HddsProtos.ReplicationType replicationType =
-        HddsProtos.ReplicationType.valueOf(type.toString());
-    HddsProtos.ReplicationFactor replicationFactor =
-        HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
-    for (OmKeyLocationInfo info:
-        keyInfo.getLatestVersionLocations().getLocationList()) {
-      ContainerInfo container =
-          storageContainerLocationClient.getContainer(info.getContainerID());
-      if (!container.getReplicationFactor().equals(replicationFactor) || (
-          container.getReplicationType() != replicationType)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  private static String getKeyProviderURI(MiniKMS kms) {
-    return KMSClientProvider.SCHEME_NAME + "://" +
-        kms.getKMSUrl().toExternalForm().replace("://", "@");
-  }
-
-  private static void createKey(String keyName, KeyProvider
-      provider, Configuration config)
-      throws NoSuchAlgorithmException, IOException {
-    final KeyProvider.Options options = KeyProvider.options(config);
-    options.setDescription(keyName);
-    options.setBitLength(128);
-    provider.createKey(keyName, options);
-    provider.flush();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
deleted file mode 100644
index 5f6d494..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-
-/**
- * Tests failure detection and handling in BlockOutputStream Class.
- */
-public class TestOzoneClientRetriesOnException {
-
-  private static MiniOzoneCluster cluster;
-  private OzoneConfiguration conf = new OzoneConfiguration();
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private int chunkSize;
-  private int flushSize;
-  private int maxFlushSize;
-  private int blockSize;
-  private String volumeName;
-  private String bucketName;
-  private String keyString;
-  private XceiverClientManager xceiverClientManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-   // conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
-    conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, 3);
-    conf.setQuietMode(false);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(7)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    xceiverClientManager = new XceiverClientManager(conf);
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockoutputstreamwithretries";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testGroupMismatchExceptionHandling() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-    long containerID =
-        keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    ContainerInfo container =
-        cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline =
-        cluster.getStorageContainerManager().getPipelineManager()
-            .getPipeline(container.getPipelineID());
-    ContainerTestHelper.waitForPipelineClose(key, cluster, true);
-    key.flush();
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof GroupMismatchException);
-    Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
-        .contains(pipeline.getId()));
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
-    key.close();
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
-    validateData(keyName, data1);
-  }
-
-  @Test
-  public void testMaxRetriesByOzoneClient() throws Exception {
-    String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, 4 * blockSize);
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
-    List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 4);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    long containerID;
-    List<Long> containerList = new ArrayList<>();
-    for (BlockOutputStreamEntry entry : entries) {
-      containerID = entry.getBlockID().getContainerID();
-      ContainerInfo container =
-          cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
-      Pipeline pipeline =
-          cluster.getStorageContainerManager().getPipelineManager()
-              .getPipeline(container.getPipelineID());
-      XceiverClientSpi xceiverClient =
-          xceiverClientManager.acquireClient(pipeline);
-      if (!containerList.contains(containerID)) {
-        xceiverClient.sendCommand(ContainerTestHelper
-            .getCreateContainerRequest(containerID, pipeline));
-      }
-      xceiverClientManager.releaseClient(xceiverClient, false);
-    }
-    key.write(data1);
-    OutputStream stream = entries.get(0).getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-    ContainerTestHelper.waitForContainerClose(key, cluster);
-    try {
-      key.write(data1);
-      Assert.fail("Expected exception not thrown");
-    } catch (IOException ioe) {
-      Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-          .getIoException()) instanceof ContainerNotOpenException);
-      Assert.assertTrue(ioe.getMessage().contains(
-          "Retry request failed. retries get failed due to exceeded maximum "
-              + "allowed retries number: 3"));
-    }
-    try {
-      key.flush();
-      Assert.fail("Expected exception not thrown");
-    } catch (IOException ioe) {
-      Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
-    }
-    try {
-      key.close();
-    } catch (IOException ioe) {
-      Assert.fail("Expected should not be thrown");
-    }
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
deleted file mode 100644
index 8ecddac..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-
-/**
- * This class is to test all the public facing APIs of Ozone Client.
- */
-public class TestOzoneRpcClient extends TestOzoneRpcClientAbstract {
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    startCluster(conf);
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    shutdownCluster();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
deleted file mode 100644
index 9189c2f..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ /dev/null
@@ -1,2810 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.OzoneKeyLocation;
-import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.ha.OMProxyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.Time;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-
-import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.either;
-
-import org.junit.Assert;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is an abstract class to test all the public facing APIs of Ozone
- * Client, w/o OM Ratis server.
- * {@link TestOzoneRpcClient} tests the Ozone Client by submitting the
- * requests directly to OzoneManager. {@link TestOzoneRpcClientWithRatis}
- * tests the Ozone Client by submitting requests to OM's Ratis server.
- */
-public abstract class TestOzoneRpcClientAbstract {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneRpcClientAbstract.class);
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String remoteUserName = "remoteUser";
-  private static String remoteGroupName = "remoteGroup";
-  private static OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-      READ, DEFAULT);
-  private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteGroupName,
-      READ, DEFAULT);
-  private static OzoneAcl inheritedUserAcl = new OzoneAcl(USER, remoteUserName,
-      READ, ACCESS);
-  private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP,
-      remoteGroupName, READ, ACCESS);
-
-  private static String scmId = UUID.randomUUID().toString();
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * @param conf Configurations to start the cluster.
-   * @throws Exception
-   */
-  static void startCluster(OzoneConfiguration conf) throws Exception {
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .setScmId(scmId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    ozoneManager = cluster.getOzoneManager();
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  static void shutdownCluster() throws IOException {
-    if(ozClient != null) {
-      ozClient.close();
-    }
-
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  public static void setCluster(MiniOzoneCluster cluster) {
-    TestOzoneRpcClientAbstract.cluster = cluster;
-  }
-
-  public static void setOzClient(OzoneClient ozClient) {
-    TestOzoneRpcClientAbstract.ozClient = ozClient;
-  }
-
-  public static void setOzoneManager(OzoneManager ozoneManager){
-    TestOzoneRpcClientAbstract.ozoneManager = ozoneManager;
-  }
-
-  public static void setStorageContainerLocationClient(
-      StorageContainerLocationProtocolClientSideTranslatorPB
-          storageContainerLocationClient) {
-    TestOzoneRpcClientAbstract.storageContainerLocationClient =
-        storageContainerLocationClient;
-  }
-
-  public static void setStore(ObjectStore store) {
-    TestOzoneRpcClientAbstract.store = store;
-  }
-
-  public static ObjectStore getStore() {
-    return TestOzoneRpcClientAbstract.store;
-  }
-
-  public static void setScmId(String scmId){
-    TestOzoneRpcClientAbstract.scmId = scmId;
-  }
-
-  /**
-   * Test OM Proxy Provider.
-   */
-  @Test
-  public void testOMClientProxyProvider() {
-    OMFailoverProxyProvider omFailoverProxyProvider = store.getClientProxy()
-        .getOMProxyProvider();
-    List<OMProxyInfo> omProxies = omFailoverProxyProvider.getOMProxyInfos();
-
-    // For a non-HA OM service, there should be only one OM proxy.
-    Assert.assertEquals(1, omProxies.size());
-    // The address in OMProxyInfo object, which client will connect to,
-    // should match the OM's RPC address.
-    Assert.assertTrue(omProxies.get(0).getAddress().equals(
-        ozoneManager.getOmRpcServerAddr()));
-  }
-
-  @Test
-  public void testSetVolumeQuota()
-      throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    store.getVolume(volumeName).setQuota(
-        OzoneQuota.parseQuota("100000000 BYTES"));
-    OzoneVolume volume = store.getVolume(volumeName);
-    Assert.assertEquals(100000000L, volume.getQuota());
-  }
-
-  @Test
-  public void testDeleteVolume()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    Assert.assertNotNull(volume);
-    store.deleteVolume(volumeName);
-    OzoneTestUtils.expectOmException(ResultCodes.VOLUME_NOT_FOUND,
-        () -> store.getVolume(volumeName));
-
-  }
-
-  @Test
-  public void testCreateVolumeWithMetadata()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .addMetadata("key1", "val1")
-        .build();
-    store.createVolume(volumeName, volumeArgs);
-    OzoneVolume volume = store.getVolume(volumeName);
-
-    Assert.assertEquals("val1", volume.getMetadata().get("key1"));
-    Assert.assertEquals(volumeName, volume.getName());
-  }
-
-  @Test
-  public void testCreateBucketWithMetadata()
-      throws IOException, OzoneClientException {
-    long currentTime = Time.now();
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs args = BucketArgs.newBuilder()
-        .addMetadata("key1", "value1").build();
-    volume.createBucket(bucketName, args);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertNotNull(bucket.getMetadata());
-    Assert.assertEquals("value1", bucket.getMetadata().get("key1"));
-
-  }
-
-
-  @Test
-  public void testCreateBucket()
-      throws IOException, OzoneClientException {
-    long currentTime = Time.now();
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertTrue(bucket.getCreationTime() >= currentTime);
-    Assert.assertTrue(volume.getCreationTime() >= currentTime);
-  }
-
-  @Test
-  public void testCreateS3Bucket()
-      throws IOException, OzoneClientException {
-    long currentTime = Time.now();
-    String userName = UserGroupInformation.getCurrentUser().getUserName();
-    String bucketName = UUID.randomUUID().toString();
-    store.createS3Bucket(userName, bucketName);
-    String volumeName = store.getOzoneVolumeName(bucketName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertTrue(bucket.getCreationTime() >= currentTime);
-    Assert.assertTrue(volume.getCreationTime() >= currentTime);
-  }
-
-  @Test
-  public void testCreateSecureS3Bucket() throws IOException {
-    long currentTime = Time.now();
-    String userName = "ozone/localhost@EXAMPLE.COM";
-    String bucketName = UUID.randomUUID().toString();
-    String s3VolumeName = OzoneS3Util.getVolumeName(userName);
-    store.createS3Bucket(s3VolumeName, bucketName);
-    String volumeName = store.getOzoneVolumeName(bucketName);
-    assertEquals(volumeName, "s3" + s3VolumeName);
-
-    OzoneVolume volume = store.getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertTrue(bucket.getCreationTime() >= currentTime);
-    Assert.assertTrue(volume.getCreationTime() >= currentTime);
-  }
-
-
-  @Test
-  public void testListS3Buckets()
-      throws IOException, OzoneClientException {
-    String userName = "ozone100";
-    String bucketName1 = UUID.randomUUID().toString();
-    String bucketName2 = UUID.randomUUID().toString();
-    store.createS3Bucket(userName, bucketName1);
-    store.createS3Bucket(userName, bucketName2);
-    Iterator<? extends OzoneBucket> iterator = store.listS3Buckets(userName,
-        null);
-
-    while (iterator.hasNext()) {
-      assertThat(iterator.next().getName(), either(containsString(bucketName1))
-          .or(containsString(bucketName2)));
-    }
-
-  }
-
-  @Test
-  public void testListS3BucketsFail()
-      throws IOException, OzoneClientException {
-    String userName = "randomUser";
-    Iterator<? extends OzoneBucket> iterator = store.listS3Buckets(userName,
-        null);
-
-    Assert.assertFalse(iterator.hasNext());
-
-  }
-
-  @Test
-  public void testDeleteS3Bucket()
-      throws Exception {
-    long currentTime = Time.now();
-    String userName = "ozone1";
-    String bucketName = UUID.randomUUID().toString();
-    store.createS3Bucket(userName, bucketName);
-    String volumeName = store.getOzoneVolumeName(bucketName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertTrue(bucket.getCreationTime() >= currentTime);
-    Assert.assertTrue(volume.getCreationTime() >= currentTime);
-    store.deleteS3Bucket(bucketName);
-
-    OzoneTestUtils.expectOmException(ResultCodes.S3_BUCKET_NOT_FOUND,
-        () -> store.getOzoneVolumeName(bucketName));
-  }
-
-  @Test
-  public void testDeleteS3NonExistingBucket() {
-    try {
-      store.deleteS3Bucket(UUID.randomUUID().toString());
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("NOT_FOUND", ex);
-    }
-  }
-
-  @Test
-  public void testCreateS3BucketMapping()
-      throws IOException, OzoneClientException {
-    long currentTime = Time.now();
-    String userName = "ozone";
-    String bucketName = UUID.randomUUID().toString();
-    store.createS3Bucket(userName, bucketName);
-    String volumeName = store.getOzoneVolumeName(bucketName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-
-    String mapping = store.getOzoneBucketMapping(bucketName);
-    Assert.assertEquals("s3"+userName+"/"+bucketName, mapping);
-    Assert.assertEquals(bucketName, store.getOzoneBucketName(bucketName));
-    Assert.assertEquals("s3"+userName, store.getOzoneVolumeName(bucketName));
-
-  }
-
-  @Test
-  public void testCreateBucketWithVersioning()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setVersioning(true);
-    volume.createBucket(bucketName, builder.build());
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertEquals(true, bucket.getVersioning());
-  }
-
-  @Test
-  public void testCreateBucketWithStorageType()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setStorageType(StorageType.SSD);
-    volume.createBucket(bucketName, builder.build());
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertEquals(StorageType.SSD, bucket.getStorageType());
-  }
-
-  @Test
-  public void testCreateBucketWithAcls()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OzoneAcl userAcl = new OzoneAcl(USER, "test",
-        READ, ACCESS);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(userAcl);
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setAcls(acls);
-    volume.createBucket(bucketName, builder.build());
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertTrue(bucket.getAcls().contains(userAcl));
-  }
-
-  @Test
-  public void testCreateBucketWithAllArgument()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OzoneAcl userAcl = new OzoneAcl(USER, "test",
-        ACLType.ALL, ACCESS);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(userAcl);
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setVersioning(true)
-        .setStorageType(StorageType.SSD)
-        .setAcls(acls);
-    volume.createBucket(bucketName, builder.build());
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertEquals(true, bucket.getVersioning());
-    Assert.assertEquals(StorageType.SSD, bucket.getStorageType());
-    Assert.assertTrue(bucket.getAcls().contains(userAcl));
-  }
-
-  @Test
-  public void testInvalidBucketCreation() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = "invalid#bucket";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    LambdaTestUtils.intercept(IllegalArgumentException.class,
-        "Bucket or Volume name has an unsupported" +
-            " character : #",
-        () -> volume.createBucket(bucketName));
-
-  }
-
-  @Test
-  public void testAddBucketAcl()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(new OzoneAcl(USER, "test", ACLType.ALL, ACCESS));
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    for (OzoneAcl acl : acls) {
-      assertTrue(bucket.addAcls(acl));
-    }
-    OzoneBucket newBucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, newBucket.getName());
-    Assert.assertTrue(bucket.getAcls().contains(acls.get(0)));
-  }
-
-  @Test
-  public void testRemoveBucketAcl()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OzoneAcl userAcl = new OzoneAcl(USER, "test",
-        ACLType.ALL, ACCESS);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(userAcl);
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setAcls(acls);
-    volume.createBucket(bucketName, builder.build());
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    for (OzoneAcl acl : acls) {
-      assertTrue(bucket.removeAcls(acl));
-    }
-    OzoneBucket newBucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, newBucket.getName());
-    Assert.assertTrue(!bucket.getAcls().contains(acls.get(0)));
-  }
-
-  @Test
-  public void testRemoveBucketAclUsingRpcClientRemoveAcl()
-      throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OzoneAcl userAcl = new OzoneAcl(USER, "test",
-        ACLType.ALL, ACCESS);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(userAcl);
-    acls.add(new OzoneAcl(USER, "test1",
-        ACLType.ALL, ACCESS));
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setAcls(acls);
-    volume.createBucket(bucketName, builder.build());
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setResType(OzoneObj.ResourceType.BUCKET).build();
-
-    // Remove the 2nd acl added to the list.
-    boolean remove = store.removeAcl(ozoneObj, acls.get(1));
-    Assert.assertTrue(remove);
-    Assert.assertFalse(store.getAcl(ozoneObj).contains(acls.get(1)));
-
-    remove = store.removeAcl(ozoneObj, acls.get(0));
-    Assert.assertTrue(remove);
-    Assert.assertFalse(store.getAcl(ozoneObj).contains(acls.get(0)));
-  }
-
-  @Test
-  public void testSetBucketVersioning()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    bucket.setVersioning(true);
-    OzoneBucket newBucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, newBucket.getName());
-    Assert.assertEquals(true, newBucket.getVersioning());
-  }
-
-  @Test
-  public void testAclsAfterCallingSetBucketProperty() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-
-    OzoneBucket ozoneBucket = volume.getBucket(bucketName);
-    List<OzoneAcl> currentAcls = ozoneBucket.getAcls();
-
-    ozoneBucket.setVersioning(true);
-
-    OzoneBucket newBucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, newBucket.getName());
-    Assert.assertEquals(true, newBucket.getVersioning());
-
-    List<OzoneAcl> aclsAfterSet = newBucket.getAcls();
-    Assert.assertEquals(currentAcls, aclsAfterSet);
-
-  }
-
-  @Test
-  public void testSetBucketStorageType()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    bucket.setStorageType(StorageType.SSD);
-    OzoneBucket newBucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, newBucket.getName());
-    Assert.assertEquals(StorageType.SSD, newBucket.getStorageType());
-  }
-
-
-  @Test
-  public void testDeleteBucket()
-      throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertNotNull(bucket);
-    volume.deleteBucket(bucketName);
-
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND,
-        () -> volume.getBucket(bucketName)
-    );
-  }
-
-  private boolean verifyRatisReplication(String volumeName, String bucketName,
-      String keyName, ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    HddsProtos.ReplicationType replicationType =
-        HddsProtos.ReplicationType.valueOf(type.toString());
-    HddsProtos.ReplicationFactor replicationFactor =
-        HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
-    for (OmKeyLocationInfo info:
-        keyInfo.getLatestVersionLocations().getLocationList()) {
-      ContainerInfo container =
-          storageContainerLocationClient.getContainer(info.getContainerID());
-      if (!container.getReplicationFactor().equals(replicationFactor) || (
-          container.getReplicationType() != replicationType)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Test
-  public void testPutKey()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, STAND_ALONE,
-          ONE, new HashMap<>());
-      out.write(value.getBytes());
-      out.close();
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] fileContent = new byte[value.getBytes().length];
-      is.read(fileContent);
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, STAND_ALONE,
-          ONE));
-      Assert.assertEquals(value, new String(fileContent));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-  }
-
-  @Test
-  public void testValidateBlockLengthWithCommitKey() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = RandomStringUtils.random(RandomUtils.nextInt(0, 1024));
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-
-    // create the initial key with size 0, write will allocate the first block.
-    OzoneOutputStream out = bucket.createKey(keyName, 0,
-        STAND_ALONE, ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
-    builder.setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName).setRefreshPipeline(true);
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(builder.build());
-
-    List<OmKeyLocationInfo> locationInfoList =
-        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
-    // LocationList should have only 1 block
-    Assert.assertEquals(1, locationInfoList.size());
-    // make sure the data block size is updated
-    Assert.assertEquals(value.getBytes().length,
-        locationInfoList.get(0).getLength());
-    // make sure the total data size is set correctly
-    Assert.assertEquals(value.getBytes().length, keyInfo.getDataSize());
-  }
-
-  @Test
-  public void testPutKeyRatisOneNode()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, ReplicationType.RATIS,
-          ONE, new HashMap<>());
-      out.write(value.getBytes());
-      out.close();
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] fileContent = new byte[value.getBytes().length];
-      is.read(fileContent);
-      is.close();
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, ReplicationType.RATIS, ONE));
-      Assert.assertEquals(value, new String(fileContent));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-  }
-
-  @Test
-  public void testPutKeyRatisThreeNodes()
-      throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, ReplicationType.RATIS,
-          ReplicationFactor.THREE, new HashMap<>());
-      out.write(value.getBytes());
-      out.close();
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] fileContent = new byte[value.getBytes().length];
-      is.read(fileContent);
-      is.close();
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, ReplicationType.RATIS,
-          ReplicationFactor.THREE));
-      Assert.assertEquals(value, new String(fileContent));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-  }
-
-
-  @Ignore("Debug Jenkins Timeout")
-  @Test
-  public void testPutKeyRatisThreeNodesParallel() throws IOException,
-      InterruptedException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    CountDownLatch latch = new CountDownLatch(2);
-    AtomicInteger failCount = new AtomicInteger(0);
-
-    Runnable r = () -> {
-      try {
-        for (int i = 0; i < 5; i++) {
-          String keyName = UUID.randomUUID().toString();
-          String data = generateData(5 * 1024 * 1024,
-              (byte) RandomUtils.nextLong()).toString();
-          OzoneOutputStream out = bucket.createKey(keyName,
-              data.getBytes().length, ReplicationType.RATIS,
-              ReplicationFactor.THREE, new HashMap<>());
-          out.write(data.getBytes());
-          out.close();
-          OzoneKey key = bucket.getKey(keyName);
-          Assert.assertEquals(keyName, key.getName());
-          OzoneInputStream is = bucket.readKey(keyName);
-          byte[] fileContent = new byte[data.getBytes().length];
-          is.read(fileContent);
-          is.close();
-          Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-              keyName, ReplicationType.RATIS,
-              ReplicationFactor.THREE));
-          Assert.assertEquals(data, new String(fileContent));
-          Assert.assertTrue(key.getCreationTime() >= currentTime);
-          Assert.assertTrue(key.getModificationTime() >= currentTime);
-        }
-        latch.countDown();
-      } catch (IOException ex) {
-        latch.countDown();
-        failCount.incrementAndGet();
-      }
-    };
-
-    Thread thread1 = new Thread(r);
-    Thread thread2 = new Thread(r);
-
-    thread1.start();
-    thread2.start();
-
-    latch.await(600, TimeUnit.SECONDS);
-
-    if (failCount.get() > 0) {
-      fail("testPutKeyRatisThreeNodesParallel failed");
-    }
-
-  }
-
-
-  @Test
-  public void testReadKeyWithVerifyChecksumFlagEnable() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    // Create and corrupt key
-    createAndCorruptKey(volumeName, bucketName, keyName);
-
-    // read corrupt key with verify checksum enabled
-    readCorruptedKey(volumeName, bucketName, keyName, true);
-
-  }
-
-
-  @Test
-  public void testReadKeyWithVerifyChecksumFlagDisable() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    // Create and corrupt key
-    createAndCorruptKey(volumeName, bucketName, keyName);
-
-    // read corrupt key with verify checksum enabled
-    readCorruptedKey(volumeName, bucketName, keyName, false);
-
-  }
-
-  private void createAndCorruptKey(String volumeName, String bucketName,
-      String keyName) throws IOException {
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    // Write data into a key
-    OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, ReplicationType.RATIS,
-        ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-
-    // We need to find the location of the chunk file corresponding to the
-    // data we just wrote.
-    OzoneKey key = bucket.getKey(keyName);
-    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0)
-        .getContainerID();
-
-    // Get the container by traversing the datanodes. Atleast one of the
-    // datanode must have this container.
-    Container container = null;
-    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
-      container = hddsDatanode.getDatanodeStateMachine().getContainer()
-          .getContainerSet().getContainer(containerID);
-      if (container != null) {
-        break;
-      }
-    }
-    Assert.assertNotNull("Container not found", container);
-    corruptData(container, key);
-  }
-
-
-  private void readCorruptedKey(String volumeName, String bucketName,
-      String keyName, boolean verifyChecksum) throws IOException {
-    try {
-      Configuration configuration = cluster.getConf();
-      configuration.setBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM,
-          verifyChecksum);
-      RpcClient client = new RpcClient(configuration, null);
-      OzoneInputStream is = client.getKey(volumeName, bucketName, keyName);
-      is.read(new byte[100]);
-      is.close();
-      if (verifyChecksum) {
-        fail("Reading corrupted data should fail, as verify checksum is " +
-            "enabled");
-      }
-    } catch (IOException e) {
-      if (!verifyChecksum) {
-        fail("Reading corrupted data should not fail, as verify checksum is " +
-            "disabled");
-      }
-    }
-  }
-
-
-  private void readKey(OzoneBucket bucket, String keyName, String data)
-      throws IOException {
-    OzoneKey key = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, key.getName());
-    OzoneInputStream is = bucket.readKey(keyName);
-    byte[] fileContent = new byte[data.getBytes().length];
-    is.read(fileContent);
-    is.close();
-  }
-
-  @Test
-  public void testGetKeyDetails() throws IOException, OzoneClientException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-    String keyValue = RandomStringUtils.random(128);
-    //String keyValue = "this is a test value.glx";
-    // create the initial key with size 0, write will allocate the first block.
-    OzoneOutputStream out = bucket.createKey(keyName,
-        keyValue.getBytes().length, STAND_ALONE,
-        ONE, new HashMap<>());
-    out.write(keyValue.getBytes());
-    out.close();
-
-    OzoneInputStream is = bucket.readKey(keyName);
-    byte[] fileContent = new byte[32];
-    is.read(fileContent);
-
-    // First, confirm the key info from the client matches the info in OM.
-    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
-    builder.setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName).setRefreshPipeline(true);
-    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).
-        getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
-    long containerID = keyInfo.getContainerID();
-    long localID = keyInfo.getLocalID();
-    OzoneKeyDetails keyDetails = (OzoneKeyDetails)bucket.getKey(keyName);
-    Assert.assertEquals(keyName, keyDetails.getName());
-
-    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
-    Assert.assertEquals(1, keyLocations.size());
-    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
-    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
-
-    // Make sure that the data size matched.
-    Assert.assertEquals(keyValue.getBytes().length,
-        keyLocations.get(0).getLength());
-
-    // Second, sum the data size from chunks in Container via containerID
-    // and localID, make sure the size equals to the size from keyDetails.
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(datanodes.size(), 1);
-
-    DatanodeDetails datanodeDetails = datanodes.get(0);
-    Assert.assertNotNull(datanodeDetails);
-    HddsDatanodeService datanodeService = null;
-    for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
-      if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
-        datanodeService = datanodeServiceItr;
-        break;
-      }
-    }
-    KeyValueContainerData containerData =
-        (KeyValueContainerData)(datanodeService.getDatanodeStateMachine()
-            .getContainer().getContainerSet().getContainer(containerID)
-            .getContainerData());
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath))) {
-      while (keyValueBlockIterator.hasNext()) {
-        BlockData blockData = keyValueBlockIterator.nextBlock();
-        if (blockData.getBlockID().getLocalID() == localID) {
-          long length = 0;
-          List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
-          for (ContainerProtos.ChunkInfo chunk : chunks) {
-            length += chunk.getLen();
-          }
-          Assert.assertEquals(length, keyValue.getBytes().length);
-          break;
-        }
-      }
-    }
-  }
-
-  /**
-   * Tests reading a corrputed chunk file throws checksum exception.
-   * @throws IOException
-   */
-  @Test
-  public void testReadKeyWithCorruptedData() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-
-    // Write data into a key
-    OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, ReplicationType.RATIS,
-        ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-
-    // We need to find the location of the chunk file corresponding to the
-    // data we just wrote.
-    OzoneKey key = bucket.getKey(keyName);
-    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0)
-        .getContainerID();
-
-    // Get the container by traversing the datanodes. Atleast one of the
-    // datanode must have this container.
-    Container container = null;
-    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
-      container = hddsDatanode.getDatanodeStateMachine().getContainer()
-          .getContainerSet().getContainer(containerID);
-      if (container != null) {
-        break;
-      }
-    }
-    Assert.assertNotNull("Container not found", container);
-    corruptData(container, key);
-
-    // Try reading the key. Since the chunk file is corrupted, it should
-    // throw a checksum mismatch exception.
-    try {
-      OzoneInputStream is = bucket.readKey(keyName);
-      is.read(new byte[100]);
-      fail("Reading corrupted data should fail.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
-    }
-  }
-
-  /**
-   * Tests reading a corrputed chunk file throws checksum exception.
-   * @throws IOException
-   */
-  @Test
-  public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = "sample value";
-    byte[] data = value.getBytes();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-
-    // Write data into a key
-    OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, ReplicationType.RATIS,
-        ReplicationFactor.THREE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-
-    // We need to find the location of the chunk file corresponding to the
-    // data we just wrote.
-    OzoneKey key = bucket.getKey(keyName);
-    List<OzoneKeyLocation> keyLocation =
-        ((OzoneKeyDetails) key).getOzoneKeyLocations();
-    Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
-    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0)
-        .getContainerID();
-
-    // Get the container by traversing the datanodes.
-    List<Container> containerList = new ArrayList<>();
-    Container container;
-    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
-      container = hddsDatanode.getDatanodeStateMachine().getContainer()
-          .getContainerSet().getContainer(containerID);
-      if (container != null) {
-        containerList.add(container);
-        if (containerList.size() == 3) {
-          break;
-        }
-      }
-    }
-    Assert.assertTrue("Container not found", !containerList.isEmpty());
-    corruptData(containerList.get(0), key);
-    // Try reading the key. Read will fail on the first node and will eventually
-    // failover to next replica
-    try {
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] b = new byte[data.length];
-      is.read(b);
-      Assert.assertTrue(Arrays.equals(b, data));
-    } catch (OzoneChecksumException e) {
-      fail("Reading corrupted data should not fail.");
-    }
-    corruptData(containerList.get(1), key);
-    // Try reading the key. Read will fail on the first node and will eventually
-    // failover to next replica
-    try {
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] b = new byte[data.length];
-      is.read(b);
-      Assert.assertTrue(Arrays.equals(b, data));
-    } catch (OzoneChecksumException e) {
-      fail("Reading corrupted data should not fail.");
-    }
-    corruptData(containerList.get(2), key);
-    // Try reading the key. Read will fail here as all the replica are corrupt
-    try {
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] b = new byte[data.length];
-      is.read(b);
-      fail("Reading corrupted data should fail.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
-    }
-  }
-
-  private void corruptData(Container container, OzoneKey key)
-      throws IOException {
-    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0)
-        .getContainerID();
-    long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0)
-        .getLocalID();
-    // From the containerData, get the block iterator for all the blocks in
-    // the container.
-    KeyValueContainerData containerData =
-        (KeyValueContainerData) container.getContainerData();
-    String containerPath =
-        new File(containerData.getMetadataPath()).getParent();
-    try (KeyValueBlockIterator keyValueBlockIterator =
-        new KeyValueBlockIterator(containerID, new File(containerPath))) {
-
-      // Find the block corresponding to the key we put. We use the localID of
-      // the BlockData to identify out key.
-      BlockData blockData = null;
-      while (keyValueBlockIterator.hasNext()) {
-        blockData = keyValueBlockIterator.nextBlock();
-        if (blockData.getBlockID().getLocalID() == localID) {
-          break;
-        }
-      }
-      Assert.assertNotNull("Block not found", blockData);
-
-      // Get the location of the chunk file
-      String chunkName = blockData.getChunks().get(0).getChunkName();
-      String containreBaseDir =
-          container.getContainerData().getVolume().getHddsRootDir().getPath();
-      File chunksLocationPath = KeyValueContainerLocationUtil
-          .getChunksLocationPath(containreBaseDir, scmId, containerID);
-      File chunkFile = new File(chunksLocationPath, chunkName);
-
-      // Corrupt the contents of the chunk file
-      String newData = new String("corrupted data");
-      FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes());
-    }
-  }
-
-  @Test
-  public void testDeleteKey()
-      throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, STAND_ALONE,
-        ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-    OzoneKey key = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, key.getName());
-    bucket.deleteKey(keyName);
-
-    OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND,
-        () -> bucket.getKey(keyName));
-  }
-
-  @Test
-  public void testRenameKey()
-      throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String fromKeyName = UUID.randomUUID().toString();
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OzoneOutputStream out = bucket.createKey(fromKeyName,
-        value.getBytes().length, STAND_ALONE,
-        ONE, new HashMap<>());
-    out.write(value.getBytes());
-    out.close();
-    OzoneKey key = bucket.getKey(fromKeyName);
-    Assert.assertEquals(fromKeyName, key.getName());
-
-    // Rename to empty string should fail.
-    OMException oe = null;
-    String toKeyName = "";
-    try {
-      bucket.renameKey(fromKeyName, toKeyName);
-    } catch (OMException e) {
-      oe = e;
-    }
-    Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, oe.getResult());
-
-    toKeyName = UUID.randomUUID().toString();
-    bucket.renameKey(fromKeyName, toKeyName);
-
-    // Lookup for old key should fail.
-    try {
-      bucket.getKey(fromKeyName);
-    } catch (OMException e) {
-      oe = e;
-    }
-    Assert.assertEquals(ResultCodes.KEY_NOT_FOUND, oe.getResult());
-
-    key = bucket.getKey(toKeyName);
-    Assert.assertEquals(toKeyName, key.getName());
-  }
-
-  // Listing all volumes in the cluster feature has to be fixed after HDDS-357.
-  // TODO: fix this
-  @Ignore
-  @Test
-  public void testListVolume() throws IOException {
-    String volBase = "vol-" + RandomStringUtils.randomNumeric(3);
-    //Create 10 volume vol-<random>-a-0-<random> to vol-<random>-a-9-<random>
-    String volBaseNameA = volBase + "-a-";
-    for(int i = 0; i < 10; i++) {
-      store.createVolume(
-          volBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5));
-    }
-    //Create 10 volume vol-<random>-b-0-<random> to vol-<random>-b-9-<random>
-    String volBaseNameB = volBase + "-b-";
-    for(int i = 0; i < 10; i++) {
-      store.createVolume(
-          volBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5));
-    }
-    Iterator<? extends OzoneVolume> volIterator = store.listVolumes(volBase);
-    int totalVolumeCount = 0;
-    while(volIterator.hasNext()) {
-      volIterator.next();
-      totalVolumeCount++;
-    }
-    Assert.assertEquals(20, totalVolumeCount);
-    Iterator<? extends OzoneVolume> volAIterator = store.listVolumes(
-        volBaseNameA);
-    for(int i = 0; i < 10; i++) {
-      Assert.assertTrue(volAIterator.next().getName()
-          .startsWith(volBaseNameA + i + "-"));
-    }
-    Assert.assertFalse(volAIterator.hasNext());
-    Iterator<? extends OzoneVolume> volBIterator = store.listVolumes(
-        volBaseNameB);
-    for(int i = 0; i < 10; i++) {
-      Assert.assertTrue(volBIterator.next().getName()
-          .startsWith(volBaseNameB + i + "-"));
-    }
-    Assert.assertFalse(volBIterator.hasNext());
-    Iterator<? extends OzoneVolume> iter = store.listVolumes(volBaseNameA +
-        "1-");
-    Assert.assertTrue(iter.next().getName().startsWith(volBaseNameA + "1-"));
-    Assert.assertFalse(iter.hasNext());
-  }
-
-  @Test
-  public void testListBucket()
-      throws IOException {
-    String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5);
-    String volumeB = "vol-b-" + RandomStringUtils.randomNumeric(5);
-    store.createVolume(volumeA);
-    store.createVolume(volumeB);
-    OzoneVolume volA = store.getVolume(volumeA);
-    OzoneVolume volB = store.getVolume(volumeB);
-
-    //Create 10 buckets in  vol-a-<random> and 10 in vol-b-<random>
-    String bucketBaseNameA = "bucket-a-";
-    for(int i = 0; i < 10; i++) {
-      volA.createBucket(
-          bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5));
-      volB.createBucket(
-          bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5));
-    }
-    //Create 10 buckets in vol-a-<random> and 10 in vol-b-<random>
-    String bucketBaseNameB = "bucket-b-";
-    for(int i = 0; i < 10; i++) {
-      volA.createBucket(
-          bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5));
-      volB.createBucket(
-          bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5));
-    }
-    Iterator<? extends OzoneBucket> volABucketIter =
-        volA.listBuckets("bucket-");
-    int volABucketCount = 0;
-    while(volABucketIter.hasNext()) {
-      volABucketIter.next();
-      volABucketCount++;
-    }
-    Assert.assertEquals(20, volABucketCount);
-    Iterator<? extends OzoneBucket> volBBucketIter =
-        volA.listBuckets("bucket-");
-    int volBBucketCount = 0;
-    while(volBBucketIter.hasNext()) {
-      volBBucketIter.next();
-      volBBucketCount++;
-    }
-    Assert.assertEquals(20, volBBucketCount);
-
-    Iterator<? extends OzoneBucket> volABucketAIter =
-        volA.listBuckets("bucket-a-");
-    int volABucketACount = 0;
-    while(volABucketAIter.hasNext()) {
-      volABucketAIter.next();
-      volABucketACount++;
-    }
-    Assert.assertEquals(10, volABucketACount);
-    Iterator<? extends OzoneBucket> volBBucketBIter =
-        volA.listBuckets("bucket-b-");
-    int volBBucketBCount = 0;
-    while(volBBucketBIter.hasNext()) {
-      volBBucketBIter.next();
-      volBBucketBCount++;
-    }
-    Assert.assertEquals(10, volBBucketBCount);
-    Iterator<? extends OzoneBucket> volABucketBIter = volA.listBuckets(
-        "bucket-b-");
-    for(int i = 0; i < 10; i++) {
-      Assert.assertTrue(volABucketBIter.next().getName()
-          .startsWith(bucketBaseNameB + i + "-"));
-    }
-    Assert.assertFalse(volABucketBIter.hasNext());
-    Iterator<? extends OzoneBucket> volBBucketAIter = volB.listBuckets(
-        "bucket-a-");
-    for(int i = 0; i < 10; i++) {
-      Assert.assertTrue(volBBucketAIter.next().getName()
-          .startsWith(bucketBaseNameA + i + "-"));
-    }
-    Assert.assertFalse(volBBucketAIter.hasNext());
-
-  }
-
-  @Test
-  public void testListBucketsOnEmptyVolume()
-      throws IOException {
-    String volume = "vol-" + RandomStringUtils.randomNumeric(5);
-    store.createVolume(volume);
-    OzoneVolume vol = store.getVolume(volume);
-    Iterator<? extends OzoneBucket> buckets = vol.listBuckets("");
-    while(buckets.hasNext()) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testListKey()
-      throws IOException {
-    String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5);
-    String volumeB = "vol-b-" + RandomStringUtils.randomNumeric(5);
-    String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5);
-    String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5);
-    store.createVolume(volumeA);
-    store.createVolume(volumeB);
-    OzoneVolume volA = store.getVolume(volumeA);
-    OzoneVolume volB = store.getVolume(volumeB);
-    volA.createBucket(bucketA);
-    volA.createBucket(bucketB);
-    volB.createBucket(bucketA);
-    volB.createBucket(bucketB);
-    OzoneBucket volAbucketA = volA.getBucket(bucketA);
-    OzoneBucket volAbucketB = volA.getBucket(bucketB);
-    OzoneBucket volBbucketA = volB.getBucket(bucketA);
-    OzoneBucket volBbucketB = volB.getBucket(bucketB);
-
-    /*
-    Create 10 keys in  vol-a-<random>/buc-a-<random>,
-    vol-a-<random>/buc-b-<random>, vol-b-<random>/buc-a-<random> and
-    vol-b-<random>/buc-b-<random>
-     */
-    String keyBaseA = "key-a-";
-    for (int i = 0; i < 10; i++) {
-      byte[] value = RandomStringUtils.randomAscii(10240).getBytes();
-      OzoneOutputStream one = volAbucketA.createKey(
-          keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      one.write(value);
-      one.close();
-      OzoneOutputStream two = volAbucketB.createKey(
-          keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      two.write(value);
-      two.close();
-      OzoneOutputStream three = volBbucketA.createKey(
-          keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      three.write(value);
-      three.close();
-      OzoneOutputStream four = volBbucketB.createKey(
-          keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      four.write(value);
-      four.close();
-    }
-    /*
-    Create 10 keys in  vol-a-<random>/buc-a-<random>,
-    vol-a-<random>/buc-b-<random>, vol-b-<random>/buc-a-<random> and
-    vol-b-<random>/buc-b-<random>
-     */
-    String keyBaseB = "key-b-";
-    for (int i = 0; i < 10; i++) {
-      byte[] value = RandomStringUtils.randomAscii(10240).getBytes();
-      OzoneOutputStream one = volAbucketA.createKey(
-          keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      one.write(value);
-      one.close();
-      OzoneOutputStream two = volAbucketB.createKey(
-          keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      two.write(value);
-      two.close();
-      OzoneOutputStream three = volBbucketA.createKey(
-          keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      three.write(value);
-      three.close();
-      OzoneOutputStream four = volBbucketB.createKey(
-          keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
-          value.length, STAND_ALONE, ONE,
-          new HashMap<>());
-      four.write(value);
-      four.close();
-    }
-    Iterator<? extends OzoneKey> volABucketAIter =
-        volAbucketA.listKeys("key-");
-    int volABucketAKeyCount = 0;
-    while(volABucketAIter.hasNext()) {
-      volABucketAIter.next();
-      volABucketAKeyCount++;
-    }
-    Assert.assertEquals(20, volABucketAKeyCount);
-    Iterator<? extends OzoneKey> volABucketBIter =
-        volAbucketB.listKeys("key-");
-    int volABucketBKeyCount = 0;
-    while(volABucketBIter.hasNext()) {
-      volABucketBIter.next();
-      volABucketBKeyCount++;
-    }
-    Assert.assertEquals(20, volABucketBKeyCount);
-    Iterator<? extends OzoneKey> volBBucketAIter =
-        volBbucketA.listKeys("key-");
-    int volBBucketAKeyCount = 0;
-    while(volBBucketAIter.hasNext()) {
-      volBBucketAIter.next();
-      volBBucketAKeyCount++;
-    }
-    Assert.assertEquals(20, volBBucketAKeyCount);
-    Iterator<? extends OzoneKey> volBBucketBIter =
-        volBbucketB.listKeys("key-");
-    int volBBucketBKeyCount = 0;
-    while(volBBucketBIter.hasNext()) {
-      volBBucketBIter.next();
-      volBBucketBKeyCount++;
-    }
-    Assert.assertEquals(20, volBBucketBKeyCount);
-    Iterator<? extends OzoneKey> volABucketAKeyAIter =
-        volAbucketA.listKeys("key-a-");
-    int volABucketAKeyACount = 0;
-    while(volABucketAKeyAIter.hasNext()) {
-      volABucketAKeyAIter.next();
-      volABucketAKeyACount++;
-    }
-    Assert.assertEquals(10, volABucketAKeyACount);
-    Iterator<? extends OzoneKey> volABucketAKeyBIter =
-        volAbucketA.listKeys("key-b-");
-    for(int i = 0; i < 10; i++) {
-      Assert.assertTrue(volABucketAKeyBIter.next().getName()
-          .startsWith("key-b-" + i + "-"));
-    }
-    Assert.assertFalse(volABucketBIter.hasNext());
-  }
-
-  @Test
-  public void testListKeyOnEmptyBucket()
-      throws IOException {
-    String volume = "vol-" + RandomStringUtils.randomNumeric(5);
-    String bucket = "buc-" + RandomStringUtils.randomNumeric(5);
-    store.createVolume(volume);
-    OzoneVolume vol = store.getVolume(volume);
-    vol.createBucket(bucket);
-    OzoneBucket buc = vol.getBucket(bucket);
-    Iterator<? extends OzoneKey> keys = buc.listKeys("");
-    while(keys.hasNext()) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testInitiateMultipartUploadWithReplicationInformationSet() throws
-      IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        STAND_ALONE, ONE);
-
-    assertNotNull(multipartInfo);
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
-
-    // Call initiate multipart upload for the same key again, this should
-    // generate a new uploadID.
-    multipartInfo = bucket.initiateMultipartUpload(keyName,
-        STAND_ALONE, ONE);
-
-    assertNotNull(multipartInfo);
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
-  }
-
-
-  @Test
-  public void testInitiateMultipartUploadWithDefaultReplication() throws
-      IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
-
-    assertNotNull(multipartInfo);
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
-
-    // Call initiate multipart upload for the same key again, this should
-    // generate a new uploadID.
-    multipartInfo = bucket.initiateMultipartUpload(keyName);
-
-    assertNotNull(multipartInfo);
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
-  }
-
-
-  @Test
-  public void testUploadPartWithNoOverride() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String sampleData = "sample Value";
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        STAND_ALONE, ONE);
-
-    assertNotNull(multipartInfo);
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
-
-    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
-        sampleData.length(), 1, uploadID);
-    ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
-        sampleData.length());
-    ozoneOutputStream.close();
-
-    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
-        .getCommitUploadPartInfo();
-
-    assertNotNull(commitUploadPartInfo);
-    String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
-
-  }
-
-  @Test
-  public void testUploadPartOverrideWithStandAlone() throws IOException {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String sampleData = "sample Value";
-    int partNumber = 1;
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        STAND_ALONE, ONE);
-
-    assertNotNull(multipartInfo);
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
-
-    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
-        sampleData.length(), partNumber, uploadID);
-    ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
-        sampleData.length());
-    ozoneOutputStream.close();
-
-    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
-        .getCommitUploadPartInfo();
-
-    assertNotNull(commitUploadPartInfo);
-    String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
-
-    //Overwrite the part by creating part key with same part number.
-    sampleData = "sample Data Changed";
-    ozoneOutputStream = bucket.createMultipartKey(keyName,
-        sampleData.length(), partNumber, uploadID);
-    ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name"
-        .length());
-    ozoneOutputStream.close();
-
-    commitUploadPartInfo = ozoneOutputStream
-        .getCommitUploadPartInfo();
-
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
-
-    // PartName should be different from old part Name.
-    assertNotEquals("Part names should be different", partName,
-        commitUploadPartInfo.getPartName());
-  }
-
-  @Test
-  public void testUploadPartOverrideWithRatis() throws IOException {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String sampleData = "sample Value";
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        ReplicationType.RATIS, ReplicationFactor.THREE);
-
-    assertNotNull(multipartInfo);
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
-    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
-    Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
-
-    int partNumber = 1;
-
-    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
-        sampleData.length(), partNumber, uploadID);
-    ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
-        sampleData.length());
-    ozoneOutputStream.close();
-
-    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
-        .getCommitUploadPartInfo();
-
-    assertNotNull(commitUploadPartInfo);
-    String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
-
-    //Overwrite the part by creating part key with same part number.
-    sampleData = "sample Data Changed";
-    ozoneOutputStream = bucket.createMultipartKey(keyName,
-        sampleData.length(), partNumber, uploadID);
-    ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name"
-        .length());
-    ozoneOutputStream.close();
-
-    commitUploadPartInfo = ozoneOutputStream
-        .getCommitUploadPartInfo();
-
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
-
-    // PartName should be different from old part Name.
-    assertNotEquals("Part names should be different", partName,
-        commitUploadPartInfo.getPartName());
-  }
-
-  @Test
-  public void testNoSuchUploadError() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String sampleData = "sample Value";
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = "random";
-    OzoneTestUtils
-        .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () ->
-            bucket
-                .createMultipartKey(keyName, sampleData.length(), 1, uploadID));
-  }
-
-  @Test
-  public void testMultipartUpload() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    doMultipartUpload(bucket, keyName, (byte)98);
-  }
-
-
-  @Test
-  public void testMultipartUploadOverride() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    doMultipartUpload(bucket, keyName, (byte)96);
-
-    // Initiate Multipart upload again, now we should read latest version, as
-    // read always reads latest blocks.
-    doMultipartUpload(bucket, keyName, (byte)97);
-
-  }
-
-
-  @Test
-  public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    // Initiate multipart upload
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-
-    // Upload Parts
-    Map<Integer, String> partsMap = new TreeMap<>();
-    // Uploading part 1 with less than min size
-    String partName = uploadPart(bucket, keyName, uploadID, 1,
-        "data".getBytes(UTF_8));
-    partsMap.put(1, partName);
-
-    partName = uploadPart(bucket, keyName, uploadID, 2,
-        "data".getBytes(UTF_8));
-    partsMap.put(2, partName);
-
-
-    // Complete multipart upload
-
-    OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
-
-  }
-  @Test
-  public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-
-    // We have not uploaded any parts, but passing some list it should throw
-    // error.
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    partsMap.put(1, UUID.randomUUID().toString());
-
-    OzoneTestUtils.expectOmException(ResultCodes.MISMATCH_MULTIPART_LIST,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
-
-  }
-
-  @Test
-  public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-
-    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
-    // We have not uploaded any parts, but passing some list it should throw
-    // error.
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    partsMap.put(1, UUID.randomUUID().toString());
-
-    OzoneTestUtils.expectOmException(ResultCodes.MISMATCH_MULTIPART_LIST,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
-
-  }
-
-  @Test
-  public void testMultipartUploadWithMissingParts() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-
-    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
-    // We have not uploaded any parts, but passing some list it should throw
-    // error.
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    partsMap.put(3, "random");
-
-    OzoneTestUtils.expectOmException(ResultCodes.MISSING_UPLOAD_PARTS,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
-  }
-
-  @Test
-  public void testAbortUploadFail() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    OzoneTestUtils.expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR,
-        () -> bucket.abortMultipartUpload(keyName, "random"));
-  }
-
-
-  @Test
-  public void testAbortUploadSuccessWithOutAnyParts() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-    bucket.abortMultipartUpload(keyName, uploadID);
-  }
-
-  @Test
-  public void testAbortUploadSuccessWithParts() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
-    bucket.abortMultipartUpload(keyName, uploadID);
-  }
-
-  @Test
-  public void testListMultipartUploadParts() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    Map<Integer, String> partsMap = new TreeMap<>();
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
-
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
-
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
-
-    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-        bucket.listParts(keyName, uploadID, 0, 3);
-
-    Assert.assertEquals(STAND_ALONE,
-        ozoneMultipartUploadPartListParts.getReplicationType());
-    Assert.assertEquals(3,
-        ozoneMultipartUploadPartListParts.getPartInfoList().size());
-
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(0).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
-            .getPartName());
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(1).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
-            .getPartName());
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(2).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(2)
-            .getPartName());
-
-    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
-  }
-
-  @Test
-  public void testListMultipartUploadPartsWithContinuation()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    Map<Integer, String> partsMap = new TreeMap<>();
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
-
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
-
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
-
-    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-        bucket.listParts(keyName, uploadID, 0, 2);
-
-    Assert.assertEquals(STAND_ALONE,
-        ozoneMultipartUploadPartListParts.getReplicationType());
-
-    Assert.assertEquals(2,
-        ozoneMultipartUploadPartListParts.getPartInfoList().size());
-
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(0).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
-            .getPartName());
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(1).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
-            .getPartName());
-
-    // Get remaining
-    Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
-    ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID,
-        ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
-
-    Assert.assertEquals(1,
-        ozoneMultipartUploadPartListParts.getPartInfoList().size());
-    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
-            .getPartInfoList().get(0).getPartNumber()),
-        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
-            .getPartName());
-
-
-    // As we don't have any parts for this, we should get false here
-    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
-
-  }
-
-  @Test
-  public void testListPartsInvalidPartMarker() throws Exception {
-    try {
-      String volumeName = UUID.randomUUID().toString();
-      String bucketName = UUID.randomUUID().toString();
-      String keyName = UUID.randomUUID().toString();
-
-      store.createVolume(volumeName);
-      OzoneVolume volume = store.getVolume(volumeName);
-      volume.createBucket(bucketName);
-      OzoneBucket bucket = volume.getBucket(bucketName);
-
-
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          bucket.listParts(keyName, "random", -1, 2);
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Should be greater than or " +
-          "equal to zero", ex);
-    }
-  }
-
-  @Test
-  public void testListPartsInvalidMaxParts() throws Exception {
-    try {
-      String volumeName = UUID.randomUUID().toString();
-      String bucketName = UUID.randomUUID().toString();
-      String keyName = UUID.randomUUID().toString();
-
-      store.createVolume(volumeName);
-      OzoneVolume volume = store.getVolume(volumeName);
-      volume.createBucket(bucketName);
-      OzoneBucket bucket = volume.getBucket(bucketName);
-
-
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          bucket.listParts(keyName, "random", 1,  -1);
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Max Parts Should be greater " +
-          "than zero", ex);
-    }
-  }
-
-  @Test
-  public void testListPartsWithPartMarkerGreaterThanPartCount()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-
-    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
-        ONE);
-    uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-
-
-    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-        bucket.listParts(keyName, uploadID, 100, 2);
-
-    // Should return empty
-
-    Assert.assertEquals(0,
-        ozoneMultipartUploadPartListParts.getPartInfoList().size());
-    Assert.assertEquals(STAND_ALONE,
-        ozoneMultipartUploadPartListParts.getReplicationType());
-
-    // As we don't have any parts with greater than partNumberMarker and list
-    // is not truncated, so it should return false here.
-    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
-
-  }
-
-  @Test
-  public void testListPartsWithInvalidUploadID() throws Exception {
-    OzoneTestUtils
-        .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> {
-          String volumeName = UUID.randomUUID().toString();
-          String bucketName = UUID.randomUUID().toString();
-          String keyName = UUID.randomUUID().toString();
-
-          store.createVolume(volumeName);
-          OzoneVolume volume = store.getVolume(volumeName);
-          volume.createBucket(bucketName);
-          OzoneBucket bucket = volume.getBucket(bucketName);
-          OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-              bucket.listParts(keyName, "random", 100, 2);
-        });
-  }
-
-  @Test
-  public void testNativeAclsForVolume() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-
-    OzoneObj ozObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    validateOzoneAccessAcl(ozObj);
-  }
-
-  @Test
-  public void testNativeAclsForBucket() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    assertNotNull("Bucket creation failed", bucket);
-
-    OzoneObj ozObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    validateOzoneAccessAcl(ozObj);
-
-    OzoneObj volObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-    validateDefaultAcls(volObj, ozObj, volume, null);
-  }
-
-  private void validateDefaultAcls(OzoneObj parentObj, OzoneObj childObj,
-      OzoneVolume volume,  OzoneBucket bucket) throws Exception {
-    assertTrue(store.addAcl(parentObj, defaultUserAcl));
-    assertTrue(store.addAcl(parentObj, defaultGroupAcl));
-    if (volume != null) {
-      volume.deleteBucket(childObj.getBucketName());
-      volume.createBucket(childObj.getBucketName());
-    } else {
-      if (childObj.getResourceType().equals(OzoneObj.ResourceType.KEY)) {
-        bucket.deleteKey(childObj.getKeyName());
-        writeKey(childObj.getKeyName(), bucket);
-      } else {
-        store.setAcl(childObj, getAclList(new OzoneConfiguration()));
-      }
-    }
-    List<OzoneAcl> acls = store.getAcl(parentObj);
-    assertTrue("Current acls: " + StringUtils.join(",", acls) +
-            " inheritedUserAcl: " + inheritedUserAcl,
-        acls.contains(defaultUserAcl));
-    assertTrue("Current acls: " + StringUtils.join(",", acls) +
-            " inheritedGroupAcl: " + inheritedGroupAcl,
-        acls.contains(defaultGroupAcl));
-
-    acls = store.getAcl(childObj);
-    assertTrue("Current acls:" + StringUtils.join(",", acls) +
-            " inheritedUserAcl:" + inheritedUserAcl,
-        acls.contains(inheritedUserAcl));
-    assertTrue("Current acls:" + StringUtils.join(",", acls) +
-            " inheritedGroupAcl:" + inheritedGroupAcl,
-        acls.contains(inheritedGroupAcl));
-  }
-
-  @Test
-  public void testNativeAclsForKey() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String key1 = "dir1/dir2" + UUID.randomUUID().toString();
-    String key2 = "dir1/dir2" + UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    assertNotNull("Bucket creation failed", bucket);
-
-    writeKey(key1, bucket);
-    writeKey(key2, bucket);
-
-    OzoneObj ozObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key1)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    // Validates access acls.
-    validateOzoneAccessAcl(ozObj);
-
-    // Check default acls inherited from bucket.
-    OzoneObj buckObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key1)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    validateDefaultAcls(buckObj, ozObj, null, bucket);
-
-    // Check default acls inherited from prefix.
-    OzoneObj prefixObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key1)
-        .setPrefixName("dir1/")
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-    store.setAcl(prefixObj, getAclList(new OzoneConfiguration()));
-    // Prefix should inherit DEFAULT acl from bucket.
-
-    List<OzoneAcl> acls = store.getAcl(prefixObj);
-    assertTrue("Current acls:" + StringUtils.join(",", acls),
-        acls.contains(inheritedUserAcl));
-    assertTrue("Current acls:" + StringUtils.join(",", acls),
-        acls.contains(inheritedGroupAcl));
-    // Remove inherited acls from prefix.
-    assertTrue(store.removeAcl(prefixObj, inheritedUserAcl));
-    assertTrue(store.removeAcl(prefixObj, inheritedGroupAcl));
-
-    validateDefaultAcls(prefixObj, ozObj, null, bucket);
-  }
-
-  @Test
-  public void testNativeAclsForPrefix() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String prefix1 = "PF" + UUID.randomUUID().toString() + "/";
-    String key1 = prefix1 + "KEY" + UUID.randomUUID().toString();
-
-    String prefix2 = "PF" + UUID.randomUUID().toString() + "/";
-    String key2 = prefix2 + "KEY" + UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    assertNotNull("Bucket creation failed", bucket);
-
-    writeKey(key1, bucket);
-    writeKey(key2, bucket);
-
-    OzoneObj prefixObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(prefix1)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    OzoneObj prefixObj2 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(prefix2)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    // add acl
-    BitSet aclRights1 = new BitSet();
-    aclRights1.set(READ.ordinal());
-    OzoneAcl user1Acl = new OzoneAcl(USER,
-        "user1", aclRights1, ACCESS);
-    assertTrue(store.addAcl(prefixObj, user1Acl));
-
-    // get acl
-    List<OzoneAcl> aclsGet = store.getAcl(prefixObj);
-    Assert.assertEquals(1, aclsGet.size());
-    Assert.assertEquals(user1Acl, aclsGet.get(0));
-
-    // remove acl
-    Assert.assertTrue(store.removeAcl(prefixObj, user1Acl));
-    aclsGet = store.getAcl(prefixObj);
-    Assert.assertEquals(0, aclsGet.size());
-
-    // set acl
-    BitSet aclRights2 = new BitSet();
-    aclRights2.set(ACLType.ALL.ordinal());
-    OzoneAcl group1Acl = new OzoneAcl(GROUP,
-        "group1", aclRights2, ACCESS);
-    List<OzoneAcl> acls = new ArrayList<>();
-    acls.add(user1Acl);
-    acls.add(group1Acl);
-    Assert.assertTrue(store.setAcl(prefixObj, acls));
-
-    // get acl
-    aclsGet = store.getAcl(prefixObj);
-    Assert.assertEquals(2, aclsGet.size());
-
-    OzoneObj keyObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key1)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    // Check default acls inherited from prefix.
-    validateDefaultAcls(prefixObj, keyObj, null, bucket);
-
-    // Check default acls inherited from bucket when prefix does not exist.
-    validateDefaultAcls(prefixObj2, keyObj, null, bucket);
-  }
-
-  /**
-   * Helper function to get default acl list for current user.
-   *
-   * @return list of default Acls.
-   * @throws IOException
-   * */
-  private List<OzoneAcl> getAclList(OzoneConfiguration conf)
-      throws IOException {
-    List<OzoneAcl> listOfAcls = new ArrayList<>();
-    //User ACL
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class);
-    ACLType userRights = aclConfig.getUserDefaultRights();
-    ACLType groupRights = aclConfig.getGroupDefaultRights();
-
-    listOfAcls.add(new OzoneAcl(USER,
-        ugi.getUserName(), userRights, ACCESS));
-    //Group ACLs of the User
-    List<String> userGroups = Arrays.asList(ugi.getGroupNames());
-    userGroups.stream().forEach((group) -> listOfAcls.add(
-        new OzoneAcl(GROUP, group, groupRights, ACCESS)));
-    return listOfAcls;
-  }
-
-  /**
-   * Helper function to validate ozone Acl for given object.
-   * @param ozObj
-   * */
-  private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException {
-    // Get acls for volume.
-    List<OzoneAcl> expectedAcls = getAclList(new OzoneConfiguration());
-
-    // Case:1 Add new acl permission to existing acl.
-    if(expectedAcls.size()>0) {
-      OzoneAcl oldAcl = expectedAcls.get(0);
-      OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
-          ACLType.READ_ACL, ACCESS);
-      // Verify that operation successful.
-      assertTrue(store.addAcl(ozObj, newAcl));
-
-      assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
-      final Optional<OzoneAcl> readAcl = store.getAcl(ozObj).stream()
-          .filter(acl -> acl.getName().equals(newAcl.getName())
-              && acl.getType().equals(newAcl.getType()))
-          .findFirst();
-      assertTrue("New acl expected but not found.", readAcl.isPresent());
-      assertTrue("READ_ACL should exist in current acls:"
-          + readAcl.get(),
-          readAcl.get().getAclList().contains(ACLType.READ_ACL));
-
-
-      // Case:2 Remove newly added acl permission.
-      assertTrue(store.removeAcl(ozObj, newAcl));
-
-      assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
-      final Optional<OzoneAcl> nonReadAcl = store.getAcl(ozObj).stream()
-          .filter(acl -> acl.getName().equals(newAcl.getName())
-              && acl.getType().equals(newAcl.getType()))
-          .findFirst();
-      assertTrue("New acl expected but not found.", nonReadAcl.isPresent());
-      assertFalse("READ_ACL should not exist in current acls:"
-              + nonReadAcl.get(),
-          nonReadAcl.get().getAclList().contains(ACLType.READ_ACL));
-    } else {
-      fail("Default acl should not be empty.");
-    }
-
-    List<OzoneAcl> keyAcls = store.getAcl(ozObj);
-    expectedAcls.forEach(a -> assertTrue(keyAcls.contains(a)));
-
-    // Remove all acl's.
-    for (OzoneAcl a : expectedAcls) {
-      store.removeAcl(ozObj, a);
-    }
-    List<OzoneAcl> newAcls = store.getAcl(ozObj);
-    assertEquals(0, newAcls.size());
-
-    // Add acl's and then call getAcl.
-    int aclCount = 0;
-    for (OzoneAcl a : expectedAcls) {
-      aclCount++;
-      assertTrue(store.addAcl(ozObj, a));
-      assertEquals(aclCount, store.getAcl(ozObj).size());
-    }
-    newAcls = store.getAcl(ozObj);
-    assertEquals(expectedAcls.size(), newAcls.size());
-    List<OzoneAcl> finalNewAcls = newAcls;
-    expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a)));
-
-    // Reset acl's.
-    OzoneAcl ua = new OzoneAcl(USER, "userx",
-        ACLType.READ_ACL, ACCESS);
-    OzoneAcl ug = new OzoneAcl(GROUP, "userx",
-        ACLType.ALL, ACCESS);
-    store.setAcl(ozObj, Arrays.asList(ua, ug));
-    newAcls = store.getAcl(ozObj);
-    assertEquals(2, newAcls.size());
-    assertTrue(newAcls.contains(ua));
-    assertTrue(newAcls.contains(ug));
-  }
-
-  private void writeKey(String key1, OzoneBucket bucket) throws IOException {
-    OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE,
-        ONE, new HashMap<>());
-    out.write(RandomStringUtils.random(1024).getBytes());
-    out.close();
-  }
-
-  private byte[] generateData(int size, byte val) {
-    byte[] chars = new byte[size];
-    Arrays.fill(chars, val);
-    return chars;
-  }
-
-
-  private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val)
-      throws Exception {
-    // Initiate Multipart upload request
-    String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType
-        .RATIS, ReplicationFactor.THREE);
-
-    // Upload parts
-    Map<Integer, String> partsMap = new TreeMap<>();
-
-    // get 5mb data, as each part should be of min 5mb, last part can be less
-    // than 5mb
-    int length = 0;
-    byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val);
-    String partName = uploadPart(bucket, keyName, uploadID, 1, data);
-    partsMap.put(1, partName);
-    length += data.length;
-
-
-    partName = uploadPart(bucket, keyName, uploadID, 2, data);
-    partsMap.put(2, partName);
-    length += data.length;
-
-    String part3 = UUID.randomUUID().toString();
-    partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes(
-        UTF_8));
-    partsMap.put(3, partName);
-    length += part3.getBytes(UTF_8).length;
-
-
-    // Complete multipart upload request
-    completeMultipartUpload(bucket, keyName, uploadID, partsMap);
-
-
-    //Now Read the key which has been completed multipart upload.
-    byte[] fileContent = new byte[data.length + data.length + part3.getBytes(
-        UTF_8).length];
-    OzoneInputStream inputStream = bucket.readKey(keyName);
-    inputStream.read(fileContent);
-
-    Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(),
-        bucket.getName(), keyName, ReplicationType.RATIS,
-        ReplicationFactor.THREE));
-
-    StringBuilder sb = new StringBuilder(length);
-
-    // Combine all parts data, and check is it matching with get key data.
-    String part1 = new String(data);
-    String part2 = new String(data);
-    sb.append(part1);
-    sb.append(part2);
-    sb.append(part3);
-    Assert.assertEquals(sb.toString(), new String(fileContent));
-  }
-
-
-  private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
-      ReplicationType replicationType, ReplicationFactor replicationFactor)
-      throws Exception {
-    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        replicationType, replicationFactor);
-
-    String uploadID = multipartInfo.getUploadID();
-    Assert.assertNotNull(uploadID);
-    return uploadID;
-  }
-
-  private String uploadPart(OzoneBucket bucket, String keyName, String
-      uploadID, int partNumber, byte[] data) throws Exception {
-    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
-        data.length, partNumber, uploadID);
-    ozoneOutputStream.write(data, 0,
-        data.length);
-    ozoneOutputStream.close();
-
-    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
-        ozoneOutputStream.getCommitUploadPartInfo();
-
-    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
-    Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
-    return omMultipartCommitUploadPartInfo.getPartName();
-
-  }
-
-  private void completeMultipartUpload(OzoneBucket bucket, String keyName,
-      String uploadID, Map<Integer, String> partsMap) throws Exception {
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket
-        .completeMultipartUpload(keyName, uploadID, partsMap);
-
-    Assert.assertNotNull(omMultipartUploadCompleteInfo);
-    Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket
-        .getName());
-    Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket
-        .getVolumeName());
-    Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
-    Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
-  }
-
-  /**
-   * Tests GDPR encryption/decryption.
-   * 1. Create GDPR Enabled bucket.
-   * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
-   * 3. Read key and validate the content/metadata is as expected because the
-   * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
-   * Metadata.
-   * 4. To check encryption, we forcibly update KeyInfo Metadata and remove the
-   * gdprEnabled flag
-   * 5. When we now read the key, {@link RpcClient} checks for GDPR Flag in
-   * method createInputStream. If the gdprEnabled flag in metadata is set to
-   * true, it decrypts using the GDPRSymmetricKey. Since we removed that flag
-   * from metadata for this key, if will read the encrypted data as-is.
-   * 6. Thus, when we compare this content with expected text, it should
-   * not match as the decryption has not been performed.
-   * @throws Exception
-   */
-  @Test
-  public void testKeyReadWriteForGDPR() throws Exception {
-    //Step 1
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs args = BucketArgs.newBuilder()
-        .addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
-    volume.createBucket(bucketName, args);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertNotNull(bucket.getMetadata());
-    Assert.assertEquals("true",
-        bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
-
-    //Step 2
-    String text = "hello world";
-    Map<String, String> keyMetadata = new HashMap<>();
-    keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
-    OzoneOutputStream out = bucket.createKey(keyName,
-        text.getBytes().length, STAND_ALONE, ONE, keyMetadata);
-    out.write(text.getBytes());
-    out.close();
-
-    //Step 3
-    OzoneKeyDetails key = bucket.getKey(keyName);
-
-    Assert.assertEquals(keyName, key.getName());
-    Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
-    Assert.assertEquals("AES",
-        key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
-    Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
-
-    OzoneInputStream is = bucket.readKey(keyName);
-    byte[] fileContent = new byte[text.getBytes().length];
-    is.read(fileContent);
-    Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-        keyName, STAND_ALONE,
-        ONE));
-    Assert.assertEquals(text, new String(fileContent));
-
-    //Step 4
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    OmKeyInfo omKeyInfo =
-        omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey(
-            volumeName, bucketName, keyName));
-
-    omKeyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
-
-    omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey(
-         volumeName, bucketName, keyName), omKeyInfo);
-
-    //Step 5
-    key = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, key.getName());
-    Assert.assertEquals(null, key.getMetadata().get(OzoneConsts.GDPR_FLAG));
-    is = bucket.readKey(keyName);
-    fileContent = new byte[text.getBytes().length];
-    is.read(fileContent);
-
-    //Step 6
-    Assert.assertNotEquals(text, new String(fileContent));
-
-  }
-
-  /**
-   * Tests deletedKey for GDPR.
-   * 1. Create GDPR Enabled bucket.
-   * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
-   * 3. Read key and validate the content/metadata is as expected because the
-   * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
-   * Metadata.
-   * 4. Delete this key in GDPR enabled bucket
-   * 5. Confirm the deleted key metadata in deletedTable does not contain the
-   * GDPR encryption details (flag, secret, algorithm).
-   * @throws Exception
-   */
-  @Test
-  public void testDeletedKeyForGDPR() throws Exception {
-    //Step 1
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    BucketArgs args = BucketArgs.newBuilder()
-        .addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
-    volume.createBucket(bucketName, args);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    Assert.assertEquals(bucketName, bucket.getName());
-    Assert.assertNotNull(bucket.getMetadata());
-    Assert.assertEquals("true",
-        bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
-
-    //Step 2
-    String text = "hello world";
-    Map<String, String> keyMetadata = new HashMap<>();
-    keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
-    OzoneOutputStream out = bucket.createKey(keyName,
-        text.getBytes().length, STAND_ALONE, ONE, keyMetadata);
-    out.write(text.getBytes());
-    out.close();
-
-    //Step 3
-    OzoneKeyDetails key = bucket.getKey(keyName);
-
-    Assert.assertEquals(keyName, key.getName());
-    Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
-    Assert.assertEquals("AES",
-        key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
-    Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
-
-    OzoneInputStream is = bucket.readKey(keyName);
-    byte[] fileContent = new byte[text.getBytes().length];
-    is.read(fileContent);
-    Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-        keyName, STAND_ALONE,
-        ONE));
-    Assert.assertEquals(text, new String(fileContent));
-
-    //Step 4
-    bucket.deleteKey(keyName);
-
-    //Step 5
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    RepeatedOmKeyInfo deletedKeys =
-        omMetadataManager.getDeletedTable().get(objectKey);
-    Map<String, String> deletedKeyMetadata =
-        deletedKeys.getOmKeyInfoList().get(0).getMetadata();
-    Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
-    Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
-    Assert.assertFalse(
-        deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
deleted file mode 100644
index 0b424b1..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import net.jcip.annotations.NotThreadSafe;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.FixMethodOrder;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runners.MethodSorters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class is to test audit logs for xxxACL APIs of Ozone Client.
- * It is annotated as NotThreadSafe intentionally since this test reads from
- * the generated audit logs to verify the operations. Since the
- * maven test plugin will trigger parallel test execution, there is a
- * possibility of other audit events being logged and leading to failure of
- * all assertion based test in this class.
- */
-@NotThreadSafe
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-@Ignore("Fix this after adding audit support for HA Acl code. This will be " +
-    "fixed by HDDS-2038")
-public class TestOzoneRpcClientForAclAuditLog {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneRpcClientForAclAuditLog.class);
-  private static UserGroupInformation ugi;
-  private static final OzoneAcl USER_ACL =
-      new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER,
-      "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS);
-  private static final OzoneAcl USER_ACL_2 =
-      new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER,
-      "jane", IAccessAuthorizer.ACLType.ALL, ACCESS);
-  private static List<OzoneAcl> aclListToAdd = new ArrayList<>();
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String scmId = UUID.randomUUID().toString();
-
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   *
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    System.setProperty("log4j.configurationFile", "auditlog.properties");
-    ugi = UserGroupInformation.getCurrentUser();
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
-    conf.set(OZONE_ACL_AUTHORIZER_CLASS,
-        OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
-    startCluster(conf);
-    aclListToAdd.add(USER_ACL);
-    aclListToAdd.add(USER_ACL_2);
-    emptyAuditLog();
-  }
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * @param conf Configurations to start the cluster.
-   * @throws Exception
-   */
-  private static void startCluster(OzoneConfiguration conf) throws Exception {
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .setScmId(scmId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void teardown() throws IOException {
-    shutdownCluster();
-    deleteAuditLog();
-  }
-
-  private static void deleteAuditLog() throws IOException {
-    File file = new File("audit.log");
-    if (FileUtils.deleteQuietly(file)) {
-      LOG.info(file.getName() +
-          " has been deleted.");
-    } else {
-      LOG.info("audit.log could not be deleted.");
-    }
-  }
-
-  private static void emptyAuditLog() throws IOException {
-    File file = new File("audit.log");
-    FileUtils.writeLines(file, new ArrayList<>(), false);
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  private static void shutdownCluster() throws IOException {
-    if(ozClient != null) {
-      ozClient.close();
-    }
-
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testXXXAclSuccessAudits() throws Exception {
-
-    String userName = ugi.getUserName();
-    String adminName = ugi.getUserName();
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setAdmin(adminName)
-        .setOwner(userName)
-        .build();
-    store.createVolume(volumeName, createVolumeArgs);
-    verifyLog(OMAction.CREATE_VOLUME.name(), volumeName,
-        AuditEventStatus.SUCCESS.name());
-    OzoneVolume retVolumeinfo = store.getVolume(volumeName);
-    verifyLog(OMAction.READ_VOLUME.name(), volumeName,
-        AuditEventStatus.SUCCESS.name());
-    Assert.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName));
-
-    OzoneObj volObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(VOLUME)
-        .setStoreType(OZONE)
-        .build();
-
-    //Testing getAcl
-    List<OzoneAcl> acls = store.getAcl(volObj);
-    verifyLog(OMAction.GET_ACL.name(), volumeName,
-        AuditEventStatus.SUCCESS.name());
-    Assert.assertTrue(acls.size() > 0);
-
-    //Testing addAcl
-    store.addAcl(volObj, USER_ACL);
-    verifyLog(OMAction.ADD_ACL.name(), volumeName, "johndoe",
-        AuditEventStatus.SUCCESS.name());
-
-    //Testing removeAcl
-    store.removeAcl(volObj, USER_ACL);
-    verifyLog(OMAction.REMOVE_ACL.name(), volumeName, "johndoe",
-        AuditEventStatus.SUCCESS.name());
-
-    //Testing setAcl
-    store.setAcl(volObj, aclListToAdd);
-    verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane",
-        AuditEventStatus.SUCCESS.name());
-
-  }
-
-  @Test
-  public void testXXXAclFailureAudits() throws Exception {
-
-    String userName = "bilbo";
-    String adminName = "bilbo";
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setAdmin(adminName)
-        .setOwner(userName)
-        .build();
-    store.createVolume(volumeName, createVolumeArgs);
-    verifyLog(OMAction.CREATE_VOLUME.name(), volumeName,
-        AuditEventStatus.SUCCESS.name());
-
-    OzoneObj volObj = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(VOLUME)
-        .setStoreType(OZONE)
-        .build();
-
-    // xxxAcl will fail as current ugi user doesn't have the required access
-    // for volume
-    try{
-      List<OzoneAcl> acls = store.getAcl(volObj);
-    } catch (Exception ex) {
-      verifyLog(OMAction.GET_ACL.name(), volumeName,
-          AuditEventStatus.FAILURE.name());
-    }
-
-    try{
-      store.addAcl(volObj, USER_ACL);
-    } catch (Exception ex) {
-      verifyLog(OMAction.ADD_ACL.name(), volumeName,
-          AuditEventStatus.FAILURE.name());
-    }
-
-    try{
-      store.removeAcl(volObj, USER_ACL);
-    } catch (Exception ex) {
-      verifyLog(OMAction.REMOVE_ACL.name(), volumeName,
-          AuditEventStatus.FAILURE.name());
-    }
-
-    try{
-      store.setAcl(volObj, aclListToAdd);
-    } catch (Exception ex) {
-      verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane",
-          AuditEventStatus.FAILURE.name());
-    }
-
-  }
-
-  private void verifyLog(String... expected) throws Exception {
-    File file = new File("audit.log");
-    final List<String> lines = FileUtils.readLines(file, (String)null);
-    GenericTestUtils.waitFor(() ->
-        (lines != null) ? true : false, 100, 60000);
-
-    try{
-      // When log entry is expected, the log file will contain one line and
-      // that must be equal to the expected string
-      assertTrue(lines.size() != 0);
-      for(String exp: expected){
-        assertTrue(lines.get(0).contains(exp));
-      }
-    } catch (AssertionError ex){
-      LOG.error("Error occurred in log verification", ex);
-      if(lines.size() != 0){
-        LOG.error("Actual line ::: " + lines.get(0));
-        LOG.error("Expected tokens ::: " + Arrays.toString(expected));
-      }
-      throw ex;
-    } finally {
-      emptyAuditLog();
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
deleted file mode 100644
index 73a7de5..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.junit.Assert.fail;
-
-/**
- * This class is to test all the public facing APIs of Ozone Client with an
- * active OM Ratis server.
- */
-public class TestOzoneRpcClientWithRatis extends TestOzoneRpcClientAbstract {
-  private static OzoneConfiguration conf;
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * Ozone is made active by setting OZONE_ENABLED = true.
-   * Ozone OM Ratis server is made active by setting
-   * OZONE_OM_RATIS_ENABLE = true;
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
-        true);
-    startCluster(conf);
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    shutdownCluster();
-  }
-
-  /**
-   * Tests get the information of key with network topology awareness enabled.
-   * @throws IOException
-   */
-  @Test
-  public void testGetKeyAndFileWithNetworkTopology() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = "sample value";
-    getStore().createVolume(volumeName);
-    OzoneVolume volume = getStore().getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-
-    // Write data into a key
-    try (OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, ReplicationType.RATIS,
-        THREE, new HashMap<>())) {
-      out.write(value.getBytes());
-    }
-
-    // Since the rpc client is outside of cluster, then getFirstNode should be
-    // equal to getClosestNode.
-    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
-    builder.setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName).setRefreshPipeline(true);
-
-    // read key with topology aware read enabled
-    try (OzoneInputStream is = bucket.readKey(keyName)) {
-      byte[] b = new byte[value.getBytes().length];
-      is.read(b);
-      Assert.assertTrue(Arrays.equals(b, value.getBytes()));
-    } catch (OzoneChecksumException e) {
-      fail("Read key should succeed");
-    }
-
-    // read file with topology aware read enabled
-    try (OzoneInputStream is = bucket.readKey(keyName)) {
-      byte[] b = new byte[value.getBytes().length];
-      is.read(b);
-      Assert.assertTrue(Arrays.equals(b, value.getBytes()));
-    } catch (OzoneChecksumException e) {
-      fail("Read file should succeed");
-    }
-
-    // read key with topology aware read disabled
-    conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
-        false);
-    try (OzoneClient newClient = OzoneClientFactory.getRpcClient(conf)) {
-      ObjectStore newStore = newClient.getObjectStore();
-      OzoneBucket newBucket =
-          newStore.getVolume(volumeName).getBucket(bucketName);
-      try (OzoneInputStream is = newBucket.readKey(keyName)) {
-        byte[] b = new byte[value.getBytes().length];
-        is.read(b);
-        Assert.assertTrue(Arrays.equals(b, value.getBytes()));
-      } catch (OzoneChecksumException e) {
-        fail("Read key should succeed");
-      }
-
-      // read file with topology aware read disabled
-      try (OzoneInputStream is = newBucket.readFile(keyName)) {
-        byte[] b = new byte[value.getBytes().length];
-        is.read(b);
-        Assert.assertTrue(Arrays.equals(b, value.getBytes()));
-      } catch (OzoneChecksumException e) {
-        fail("Read file should succeed");
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
deleted file mode 100644
index 1343a03..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.OzoneKeyLocation;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.Rule;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * Test read retries from multiple nodes in the pipeline.
- */
-public class TestReadRetries {
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-
-  private static final String SCM_ID = UUID.randomUUID().toString();
-
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * @throws Exception
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .setScmId(SCM_ID)
-        .build();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    ozoneManager = cluster.getOzoneManager();
-  }
-
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    if(ozClient != null) {
-      ozClient.close();
-    }
-
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-  @Test
-  public void testPutKeyAndGetKeyThreeNodes()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String keyName = UUID.randomUUID().toString();
-
-    OzoneOutputStream out = bucket
-        .createKey(keyName, value.getBytes().length, ReplicationType.RATIS,
-            ReplicationFactor.THREE, new HashMap<>());
-    KeyOutputStream groupOutputStream =
-        (KeyOutputStream) out.getOutputStream();
-    XceiverClientManager manager = groupOutputStream.getXceiverClientManager();
-    out.write(value.getBytes());
-    out.close();
-    // First, confirm the key info from the client matches the info in OM.
-    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
-    builder.setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName).setRefreshPipeline(true);
-    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).
-        getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
-    long containerID = keyInfo.getContainerID();
-    long localID = keyInfo.getLocalID();
-    OzoneKeyDetails keyDetails = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, keyDetails.getName());
-
-    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
-    Assert.assertEquals(1, keyLocations.size());
-    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
-    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
-
-    // Make sure that the data size matched.
-    Assert
-        .assertEquals(value.getBytes().length, keyLocations.get(0).getLength());
-
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-    DatanodeDetails datanodeDetails = datanodes.get(0);
-    Assert.assertNotNull(datanodeDetails);
-
-    XceiverClientSpi clientSpi = manager.acquireClient(pipeline);
-    Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
-    XceiverClientRatis ratisClient = (XceiverClientRatis)clientSpi;
-
-    ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId(), 5000);
-    // shutdown the datanode
-    cluster.shutdownHddsDatanode(datanodeDetails);
-
-    Assert.assertTrue(container.getState()
-        == HddsProtos.LifeCycleState.OPEN);
-    // try to read, this shouls be successful
-    readKey(bucket, keyName, value);
-
-    Assert.assertTrue(container.getState()
-        == HddsProtos.LifeCycleState.OPEN);
-    // shutdown the second datanode
-    datanodeDetails = datanodes.get(1);
-    cluster.shutdownHddsDatanode(datanodeDetails);
-    Assert.assertTrue(container.getState()
-        == HddsProtos.LifeCycleState.OPEN);
-
-    // the container is open and with loss of 2 nodes we still should be able
-    // to read via Standalone protocol
-    // try to read
-    readKey(bucket, keyName, value);
-
-    // shutdown the 3rd datanode
-    datanodeDetails = datanodes.get(2);
-    cluster.shutdownHddsDatanode(datanodeDetails);
-    try {
-      // try to read
-      readKey(bucket, keyName, value);
-      fail("Expected exception not thrown");
-    } catch (IOException e) {
-      // it should throw an ioException as none of the servers
-      // are available
-    }
-    manager.releaseClient(clientSpi, false);
-  }
-
-  private void readKey(OzoneBucket bucket, String keyName, String data)
-      throws IOException {
-    OzoneKey key = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, key.getName());
-    OzoneInputStream is = bucket.readKey(keyName);
-    byte[] fileContent = new byte[data.getBytes().length];
-    is.read(fileContent);
-    is.close();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
deleted file mode 100644
index 2ed24a2..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.security.token.BlockTokenVerifier;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * This class is to test all the public facing APIs of Ozone Client.
- */
-public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
-
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-
-  private static final String SCM_ID = UUID.randomUUID().toString();
-  private static File testDir;
-  private static OzoneConfiguration conf;
-  private static OzoneBlockTokenSecretManager secretManager;
-
-  /**
-   * Create a MiniOzoneCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestSecureOzoneRpcClient.class.getSimpleName());
-    OzoneManager.setTestSecureOmFlag(true);
-    conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true);
-    conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    CertificateClientTestImpl certificateClientTest =
-        new CertificateClientTestImpl(conf);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .setScmId(SCM_ID)
-        .setCertificateClient(certificateClientTest)
-        .build();
-    String user = UserGroupInformation.getCurrentUser().getShortUserName();
-    secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf),
-        60 *60, certificateClientTest.getCertificate().
-        getSerialNumber().toString());
-    secretManager.start(certificateClientTest);
-    Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(
-        user, EnumSet.allOf(AccessModeProto.class), 60*60);
-    UserGroupInformation.getCurrentUser().addToken(token);
-    cluster.getOzoneManager().startSecretManager();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    ozoneManager = cluster.getOzoneManager();
-    TestOzoneRpcClient.setCluster(cluster);
-    TestOzoneRpcClient.setOzClient(ozClient);
-    TestOzoneRpcClient.setOzoneManager(ozoneManager);
-    TestOzoneRpcClient.setStorageContainerLocationClient(
-        storageContainerLocationClient);
-    TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
-  }
-
-  /**
-   * Tests successful completion of following operations when grpc block
-   * token is used.
-   * 1. getKey
-   * 2. writeChunk
-   * */
-  @Test
-  public void testPutKeySuccessWithBlockToken() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      try (OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE, new HashMap<>())) {
-        out.write(value.getBytes());
-      }
-
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      byte[] fileContent;
-      try(OzoneInputStream is = bucket.readKey(keyName)) {
-        fileContent = new byte[value.getBytes().length];
-        is.read(fileContent);
-      }
-
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE));
-      Assert.assertEquals(value, new String(fileContent));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-  }
-
-  /**
-   * Tests failure in following operations when grpc block token is
-   * not present.
-   * 1. getKey
-   * 2. writeChunk
-   * */
-  @Test
-  @Ignore("Needs to be moved out of this class as  client setup is static")
-  public void testKeyOpFailureWithoutBlockToken() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String value = "sample value";
-    BlockTokenVerifier.setTestStub(true);
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      try (OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE, new HashMap<>())) {
-        LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " +
-                "to find any token ",
-            () -> out.write(value.getBytes()));
-      }
-
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      LambdaTestUtils.intercept(IOException.class, "Failed to authenticate" +
-              " with GRPC XceiverServer with Ozone block token.",
-          () -> bucket.readKey(keyName));
-    }
-    BlockTokenVerifier.setTestStub(false);
-  }
-
-  private boolean verifyRatisReplication(String volumeName, String bucketName,
-      String keyName, ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    HddsProtos.ReplicationType replicationType =
-        HddsProtos.ReplicationType.valueOf(type.toString());
-    HddsProtos.ReplicationFactor replicationFactor =
-        HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
-    for (OmKeyLocationInfo info:
-        keyInfo.getLatestVersionLocations().getLocationList()) {
-      ContainerInfo container =
-          storageContainerLocationClient.getContainer(info.getContainerID());
-      if (!container.getReplicationFactor().equals(replicationFactor) || (
-          container.getReplicationType() != replicationType)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Close OzoneClient and shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    if(ozClient != null) {
-      ozClient.close();
-    }
-
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
deleted file mode 100644
index 9b59349..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ /dev/null
@@ -1,463 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.*;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * This class verifies the watchForCommit Handling by xceiverClient.
- */
-public class TestWatchForCommit {
-
-  private MiniOzoneCluster cluster;
-  private OzoneClient client;
-  private ObjectStore objectStore;
-  private String volumeName;
-  private String bucketName;
-  private String keyString;
-  private int chunkSize;
-  private int flushSize;
-  private int maxFlushSize;
-  private int blockSize;
-  private StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  private void startCluster(OzoneConfiguration conf) throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        1, TimeUnit.SECONDS);
-
-    conf.setQuietMode(false);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(7)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "watchforcommithandlingtest";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  private void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  private String getKeyName() {
-    return UUID.randomUUID().toString();
-  }
-
-  @Test
-  public void testWatchForCommitWithKeyWrite() throws Exception {
-    // in this case, watch request should fail with RaftRetryFailureException
-    // and will be captured in keyOutputStream and the failover will happen
-    // to a different block
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-    conf.setTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        1, TimeUnit.SECONDS);
-    startCluster(conf);
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long writeChunkCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long putBlockCount = metrics.getContainerOpCountMetrics(
-        ContainerProtos.Type.PutBlock);
-    long pendingWriteChunkCount =  metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.WriteChunk);
-    long pendingPutBlockCount = metrics.getContainerOpsMetrics(
-        ContainerProtos.Type.PutBlock);
-    long totalOpCount = metrics.getTotalOpCount();
-    String keyName = getKeyName();
-    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    int dataLength = maxFlushSize + 50;
-    // write data more than 1 chunk
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
-    key.write(data1);
-    // since its hitting the full bufferCondition, it will call watchForCommit
-    // and completes atleast putBlock for first flushSize worth of data
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)
-            <= pendingWriteChunkCount + 2);
-    Assert.assertTrue(
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)
-            <= pendingPutBlockCount + 1);
-    Assert.assertEquals(writeChunkCount + 4,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 2,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 6,
-        metrics.getTotalOpCount());
-    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-    KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
-
-    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
-    OutputStream stream = keyOutputStream.getStreamEntries().get(0)
-        .getOutputStream();
-    Assert.assertTrue(stream instanceof BlockOutputStream);
-    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
-    // we have just written data more than flush Size(2 chunks), at this time
-    // buffer pool will have 3 buffers allocated worth of chunk size
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    // writtenDataLength as well flushedDataLength will be updated here
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-    Assert.assertEquals(maxFlushSize,
-        blockOutputStream.getTotalDataFlushedLength());
-    // since data equals to maxBufferSize is written, this will be a blocking
-    // call and hence will wait for atleast flushSize worth of data to get
-    // acked by all servers right here
-    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
-    // watchForCommit will clean up atleast one entry from the map where each
-    // entry corresponds to flushSize worth of data
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
-    // Now do a flush. This will flush the data and update the flush length and
-    // the map.
-    key.flush();
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 5,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 3,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 8,
-        metrics.getTotalOpCount());
-    // Since the data in the buffer is already flushed, flush here will have
-    // no impact on the counters and data structures
-    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
-    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
-    Assert.assertEquals(dataLength,
-        blockOutputStream.getTotalDataFlushedLength());
-    // flush will make sure one more entry gets updated in the map
-    Assert.assertTrue(
-        blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
-    XceiverClientRatis raftClient =
-        (XceiverClientRatis) blockOutputStream.getXceiverClient();
-    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
-    Pipeline pipeline = raftClient.getPipeline();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
-    // again write data with more than max buffer limit. This will call
-    // watchForCommit again. Since the commit will happen 2 way, the
-    // commitInfoMap will get updated for servers which are alive
-    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
-    // once exception is hit
-    key.write(data1);
-    // As a part of handling the exception, 4 failed writeChunks  will be
-    // rewritten plus one partial chunk plus two putBlocks for flushSize
-    // and one flush for partial chunk
-    key.flush();
-    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
-        .getIoException()) instanceof RaftRetryFailureException);
-    // Make sure the retryCount is reset after the exception is handled
-    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
-    // now close the stream, It will update the ack length after watchForCommit
-    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
-    key.close();
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert
-        .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
-    Assert.assertEquals(pendingWriteChunkCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(pendingPutBlockCount,
-        metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(writeChunkCount + 14,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
-    Assert.assertEquals(putBlockCount + 8,
-        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
-    Assert.assertEquals(totalOpCount + 22,
-        metrics.getTotalOpCount());
-    Assert
-        .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
-    // make sure the bufferPool is empty
-    Assert
-        .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
-    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
-    validateData(keyName, data1);
-    shutdown();
-  }
-
-  @Test
-  public void testWatchForCommitWithSmallerTimeoutValue() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 3,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-    startCluster(conf);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    XceiverClientSpi xceiverClient = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        xceiverClient.getPipeline());
-    Pipeline pipeline = xceiverClient.getPipeline();
-    XceiverClientReply reply = xceiverClient.sendCommandAsync(
-        ContainerTestHelper.getCreateContainerRequest(
-            container1.getContainerInfo().getContainerID(),
-            xceiverClient.getPipeline()));
-    reply.getResponse().get();
-    long index = reply.getLogIndex();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
-    try {
-      // just watch for a log index which in not updated in the commitInfo Map
-      // as well as there is no logIndex generate in Ratis.
-      // The basic idea here is just to test if its throws an exception.
-      xceiverClient
-          .watchForCommit(index + new Random().nextInt(100) + 10, 3000);
-      Assert.fail("expected exception not thrown");
-    } catch (Exception e) {
-      Assert.assertTrue(
-          HddsClientUtils.checkForException(e) instanceof TimeoutException);
-    }
-    // After releasing the xceiverClient, this connection should be closed
-    // and any container operations should fail
-    clientManager.releaseClient(xceiverClient, false);
-    shutdown();
-  }
-
-  @Test
-  public void testWatchForCommitForRetryfailure() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT,
-        100, TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-    startCluster(conf);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    XceiverClientSpi xceiverClient = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        xceiverClient.getPipeline());
-    Pipeline pipeline = xceiverClient.getPipeline();
-    XceiverClientReply reply = xceiverClient.sendCommandAsync(
-        ContainerTestHelper.getCreateContainerRequest(
-            container1.getContainerInfo().getContainerID(),
-            xceiverClient.getPipeline()));
-    reply.getResponse().get();
-    long index = reply.getLogIndex();
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
-    // again write data with more than max buffer limit. This wi
-    try {
-      // just watch for a log index which in not updated in the commitInfo Map
-      // as well as there is no logIndex generate in Ratis.
-      // The basic idea here is just to test if its throws an exception.
-      xceiverClient
-          .watchForCommit(index + new Random().nextInt(100) + 10, 20000);
-      Assert.fail("expected exception not thrown");
-    } catch (Exception e) {
-      Assert.assertTrue(e instanceof ExecutionException);
-      // since the timeout value is quite long, the watch request will either
-      // fail with NotReplicated exceptio, RetryFailureException or
-      // RuntimeException
-      Assert.assertFalse(HddsClientUtils
-          .checkForException(e) instanceof TimeoutException);
-    }
-    clientManager.releaseClient(xceiverClient, false);
-    shutdown();
-  }
-
-  @Test
-  public void test2WayCommitForTimeoutException() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 3,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-    startCluster(conf);
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    XceiverClientSpi xceiverClient = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        xceiverClient.getPipeline());
-    Pipeline pipeline = xceiverClient.getPipeline();
-    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
-    XceiverClientReply reply = xceiverClient.sendCommandAsync(
-        ContainerTestHelper.getCreateContainerRequest(
-            container1.getContainerInfo().getContainerID(),
-            xceiverClient.getPipeline()));
-    reply.getResponse().get();
-    Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
-    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
-    reply = xceiverClient.sendCommandAsync(ContainerTestHelper
-        .getCloseContainer(pipeline,
-            container1.getContainerInfo().getContainerID()));
-    reply.getResponse().get();
-    xceiverClient.watchForCommit(reply.getLogIndex(), 3000);
-
-    // commitInfo Map will be reduced to 2 here
-    Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
-    clientManager.releaseClient(xceiverClient, false);
-    Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
-    Assert.assertTrue(logCapturer.getOutput().contains("TimeoutException"));
-    Assert
-        .assertTrue(logCapturer.getOutput().contains("Committed by majority"));
-    logCapturer.stopCapturing();
-    shutdown();
-  }
-
-  @Test
-  public void testWatchForCommitForGroupMismatchException() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20,
-        TimeUnit.SECONDS);
-    conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20);
-
-    // mark the node stale early so that pipleline gets destroyed quickly
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-    startCluster(conf);
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG);
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
-    XceiverClientSpi xceiverClient = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, xceiverClient.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        xceiverClient.getPipeline());
-    Pipeline pipeline = xceiverClient.getPipeline();
-    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
-    long containerId = container1.getContainerInfo().getContainerID();
-    XceiverClientReply reply = xceiverClient.sendCommandAsync(
-        ContainerTestHelper.getCreateContainerRequest(containerId,
-            xceiverClient.getPipeline()));
-    reply.getResponse().get();
-    Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
-    List<Pipeline> pipelineList = new ArrayList<>();
-    pipelineList.add(pipeline);
-    ContainerTestHelper.waitForPipelineClose(pipelineList, cluster);
-    try {
-      // just watch for a log index which in not updated in the commitInfo Map
-      // as well as there is no logIndex generate in Ratis.
-      // The basic idea here is just to test if its throws an exception.
-      xceiverClient
-          .watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10,
-              20000);
-      Assert.fail("Expected exception not thrown");
-    } catch(Exception e) {
-      Assert.assertTrue(HddsClientUtils
-          .checkForException(e) instanceof GroupMismatchException);
-    }
-    clientManager.releaseClient(xceiverClient, false);
-    shutdown();
-  }
-
-  private OzoneOutputStream createKey(String keyName, ReplicationType type,
-      long size) throws Exception {
-    return ContainerTestHelper
-        .createKey(keyName, type, size, objectStore, volumeName, bucketName);
-  }
-
-  private void validateData(String keyName, byte[] data) throws Exception {
-    ContainerTestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java
deleted file mode 100644
index 0f48495..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rpc;
-
-/**
- * This package contains test class for Ozone rpc client library.
- */
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
deleted file mode 100644
index 395bda0..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container;
-
-import java.io.IOException;
-import java.net.ServerSocket;
-import java.nio.ByteBuffer;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
-import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.security.token.Token;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.server.impl.RaftServerImpl;
-import org.apache.ratis.server.impl.RaftServerProxy;
-import org.apache.ratis.statemachine.StateMachine;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Helpers for container tests.
- */
-public final class ContainerTestHelper {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      ContainerTestHelper.class);
-  private static Random r = new Random();
-
-  public static final long CONTAINER_MAX_SIZE =
-      (long) StorageUnit.GB.toBytes(1);
-
-  /**
-   * Never constructed.
-   */
-  private ContainerTestHelper() {
-  }
-
-  // TODO: mock multi-node pipeline
-  /**
-   * Create a pipeline with single node replica.
-   *
-   * @return Pipeline with single node in it.
-   * @throws IOException
-   */
-  public static Pipeline createSingleNodePipeline() throws
-      IOException {
-    return createPipeline(1);
-  }
-
-  public static String createLocalAddress() throws IOException {
-    try(ServerSocket s = new ServerSocket(0)) {
-      return "127.0.0.1:" + s.getLocalPort();
-    }
-  }
-  public static DatanodeDetails createDatanodeDetails() throws IOException {
-    ServerSocket socket = new ServerSocket(0);
-    int port = socket.getLocalPort();
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, port);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, port);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, port);
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setIpAddress(socket.getInetAddress().getHostAddress())
-        .setHostName(socket.getInetAddress().getHostName())
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .build();
-
-    socket.close();
-    return datanodeDetails;
-  }
-
-  /**
-   * Create a pipeline with single node replica.
-   *
-   * @return Pipeline with single node in it.
-   * @throws IOException
-   */
-  public static Pipeline createPipeline(int numNodes)
-      throws IOException {
-    Preconditions.checkArgument(numNodes >= 1);
-    final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
-    for(int i = 0; i < numNodes; i++) {
-      ids.add(createDatanodeDetails());
-    }
-    return createPipeline(ids);
-  }
-
-  public static Pipeline createPipeline(
-      Iterable<DatanodeDetails> ids) throws IOException {
-    Objects.requireNonNull(ids, "ids == null");
-    Preconditions.checkArgument(ids.iterator().hasNext());
-    List<DatanodeDetails> dns = new ArrayList<>();
-    ids.forEach(dns::add);
-    Pipeline pipeline = Pipeline.newBuilder()
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(ReplicationFactor.ONE)
-        .setNodes(dns)
-        .build();
-    return pipeline;
-  }
-
-  /**
-   * Creates a ChunkInfo for testing.
-   *
-   * @param keyID - ID of the key
-   * @param seqNo - Chunk number.
-   * @return ChunkInfo
-   * @throws IOException
-   */
-  public static ChunkInfo getChunk(long keyID, int seqNo, long offset,
-      long len) throws IOException {
-
-    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", keyID,
-        seqNo), offset, len);
-    return info;
-  }
-
-  /**
-   * Generates some data of the requested len.
-   *
-   * @param len - Number of bytes.
-   * @return byte array with valid data.
-   */
-  public static ByteBuffer getData(int len) {
-    byte[] data = new byte[len];
-    r.nextBytes(data);
-    return ByteBuffer.wrap(data);
-  }
-
-  /**
-   * Computes the hash and sets the value correctly.
-   *
-   * @param info - chunk info.
-   * @param data - data array
-   * @throws NoSuchAlgorithmException
-   */
-  public static void setDataChecksum(ChunkInfo info, ByteBuffer data)
-      throws OzoneChecksumException {
-    Checksum checksum = new Checksum();
-    info.setChecksumData(checksum.computeChecksum(data));
-  }
-
-  /**
-   * Returns a writeChunk Request.
-   *
-   * @param pipeline - A set of machines where this container lives.
-   * @param blockID - Block ID of the chunk.
-   * @param datalen - Length of data.
-   * @return ContainerCommandRequestProto
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  public static ContainerCommandRequestProto getWriteChunkRequest(
-      Pipeline pipeline, BlockID blockID, int datalen) throws IOException {
-    LOG.trace("writeChunk {} (blockID={}) to pipeline=",
-        datalen, blockID, pipeline);
-    ContainerProtos.WriteChunkRequestProto.Builder writeRequest =
-        ContainerProtos.WriteChunkRequestProto
-            .newBuilder();
-
-    writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
-
-    ByteBuffer data = getData(datalen);
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
-    setDataChecksum(info, data);
-
-    writeRequest.setChunkData(info.getProtoBufMessage());
-    writeRequest.setData(ByteString.copyFrom(data));
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.WriteChunk);
-    request.setContainerID(blockID.getContainerID());
-    request.setWriteChunk(writeRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-
-    return request.build();
-  }
-
-  /**
-   * Returns PutSmallFile Request that we can send to the container.
-   *
-   * @param pipeline - Pipeline
-   * @param blockID - Block ID of the small file.
-   * @param dataLen - Number of bytes in the data
-   * @return ContainerCommandRequestProto
-   */
-  public static ContainerCommandRequestProto getWriteSmallFileRequest(
-      Pipeline pipeline, BlockID blockID, int dataLen)
-      throws Exception {
-    ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest =
-        ContainerProtos.PutSmallFileRequestProto.newBuilder();
-    ByteBuffer data = getData(dataLen);
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen);
-    setDataChecksum(info, data);
-
-
-    ContainerProtos.PutBlockRequestProto.Builder putRequest =
-        ContainerProtos.PutBlockRequestProto.newBuilder();
-
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
-    newList.add(info.getProtoBufMessage());
-    blockData.setChunks(newList);
-    putRequest.setBlockData(blockData.getProtoBufMessage());
-
-    smallFileRequest.setChunkInfo(info.getProtoBufMessage());
-    smallFileRequest.setData(ByteString.copyFrom(data));
-    smallFileRequest.setBlock(putRequest);
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutSmallFile);
-    request.setContainerID(blockID.getContainerID());
-    request.setPutSmallFile(smallFileRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-
-  public static ContainerCommandRequestProto getReadSmallFileRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putKey)
-      throws Exception {
-    ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest =
-        ContainerProtos.GetSmallFileRequestProto.newBuilder();
-    ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey);
-    smallFileRequest.setBlock(getKey.getGetBlock());
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.GetSmallFile);
-    request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID());
-    request.setGetSmallFile(smallFileRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-  /**
-   * Returns a read Request.
-   *
-   * @param pipeline pipeline.
-   * @param request writeChunkRequest.
-   * @return Request.
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  public static ContainerCommandRequestProto getReadChunkRequest(
-      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto request)
-      throws IOException, NoSuchAlgorithmException {
-    LOG.trace("readChunk blockID={} from pipeline={}",
-        request.getBlockID(), pipeline);
-
-    ContainerProtos.ReadChunkRequestProto.Builder readRequest =
-        ContainerProtos.ReadChunkRequestProto.newBuilder();
-    readRequest.setBlockID(request.getBlockID());
-    readRequest.setChunkData(request.getChunkData());
-
-    Builder newRequest =
-        ContainerCommandRequestProto.newBuilder();
-    newRequest.setCmdType(ContainerProtos.Type.ReadChunk);
-    newRequest.setContainerID(readRequest.getBlockID().getContainerID());
-    newRequest.setReadChunk(readRequest);
-    newRequest.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return newRequest.build();
-  }
-
-  /**
-   * Returns a delete Request.
-   *
-   * @param pipeline pipeline.
-   * @param writeRequest - write request
-   * @return request
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  public static ContainerCommandRequestProto getDeleteChunkRequest(
-      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest)
-      throws
-      IOException, NoSuchAlgorithmException {
-    LOG.trace("deleteChunk blockID={} from pipeline={}",
-        writeRequest.getBlockID(), pipeline);
-
-    ContainerProtos.DeleteChunkRequestProto.Builder deleteRequest =
-        ContainerProtos.DeleteChunkRequestProto
-            .newBuilder();
-
-    deleteRequest.setChunkData(writeRequest.getChunkData());
-    deleteRequest.setBlockID(writeRequest.getBlockID());
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteChunk);
-    request.setContainerID(writeRequest.getBlockID().getContainerID());
-    request.setDeleteChunk(deleteRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-  /**
-   * Returns a create container command for test purposes. There are a bunch of
-   * tests where we need to just send a request and get a reply.
-   *
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandRequestProto getCreateContainerRequest(
-      long containerID, Pipeline pipeline) throws IOException {
-    LOG.trace("addContainer: {}", containerID);
-    return getContainerCommandRequestBuilder(containerID, pipeline).build();
-  }
-
-  /**
-   * Returns a create container command with token. There are a bunch of
-   * tests where we need to just send a request and get a reply.
-   *
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandRequestProto getCreateContainerRequest(
-      long containerID, Pipeline pipeline, Token token) throws IOException {
-    LOG.trace("addContainer: {}", containerID);
-    return getContainerCommandRequestBuilder(containerID, pipeline)
-        .setEncodedToken(token.encodeToUrlString())
-        .build();
-  }
-
-  private static Builder getContainerCommandRequestBuilder(long containerID,
-      Pipeline pipeline) throws IOException {
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setContainerID(containerID);
-    request.setCreateContainer(
-        ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-
-    return request;
-  }
-
-  /**
-   * Returns a create container command for test purposes. There are a bunch of
-   * tests where we need to just send a request and get a reply.
-   *
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandRequestProto getCreateContainerSecureRequest(
-      long containerID, Pipeline pipeline,
-      Token<OzoneBlockTokenIdentifier> token) throws IOException {
-    LOG.trace("addContainer: {}", containerID);
-
-    Builder request = getContainerCommandRequestBuilder(containerID, pipeline);
-    if(token != null){
-      request.setEncodedToken(token.encodeToUrlString());
-    }
-    return request.build();
-  }
-
-  /**
-   * Return an update container command for test purposes.
-   * Creates a container data based on the given meta data,
-   * and request to update an existing container with it.
-   *
-   * @param containerID
-   * @param metaData
-   * @return
-   * @throws IOException
-   */
-  public static ContainerCommandRequestProto getUpdateContainerRequest(
-      long containerID, Map<String, String> metaData) throws IOException {
-    ContainerProtos.UpdateContainerRequestProto.Builder updateRequestBuilder =
-        ContainerProtos.UpdateContainerRequestProto.newBuilder();
-    String[] keys = metaData.keySet().toArray(new String[]{});
-    for(int i=0; i<keys.length; i++) {
-      KeyValue.Builder kvBuilder = KeyValue.newBuilder();
-      kvBuilder.setKey(keys[i]);
-      kvBuilder.setValue(metaData.get(keys[i]));
-      updateRequestBuilder.addMetadata(kvBuilder.build());
-    }
-    Pipeline pipeline =
-        ContainerTestHelper.createSingleNodePipeline();
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.UpdateContainer);
-    request.setContainerID(containerID);
-    request.setUpdateContainer(updateRequestBuilder.build());
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-  /**
-   * Returns a create container response for test purposes. There are a bunch of
-   * tests where we need to just send a request and get a reply.
-   *
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandResponseProto
-      getCreateContainerResponse(ContainerCommandRequestProto request) {
-
-    ContainerCommandResponseProto.Builder response =
-        ContainerCommandResponseProto.newBuilder();
-    response.setCmdType(ContainerProtos.Type.CreateContainer);
-    response.setTraceID(request.getTraceID());
-    response.setCreateContainer(
-        ContainerProtos.CreateContainerResponseProto.getDefaultInstance());
-    response.setResult(ContainerProtos.Result.SUCCESS);
-    return response.build();
-  }
-
-  /**
-   * Returns the PutBlockRequest for test purpose.
-   * @param pipeline - pipeline.
-   * @param writeRequest - Write Chunk Request.
-   * @return - Request
-   */
-  public static ContainerCommandRequestProto getPutBlockRequest(
-      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest)
-      throws IOException {
-    LOG.trace("putBlock: {} to pipeline={}",
-        writeRequest.getBlockID());
-
-    ContainerProtos.PutBlockRequestProto.Builder putRequest =
-        ContainerProtos.PutBlockRequestProto.newBuilder();
-
-    BlockData blockData = new BlockData(
-        BlockID.getFromProtobuf(writeRequest.getBlockID()));
-    List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
-    newList.add(writeRequest.getChunkData());
-    blockData.setChunks(newList);
-    blockData.setBlockCommitSequenceId(0);
-    putRequest.setBlockData(blockData.getProtoBufMessage());
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutBlock);
-    request.setContainerID(blockData.getContainerID());
-    request.setPutBlock(putRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-  /**
-   * Gets a GetBlockRequest for test purpose.
-   * @param  pipeline - pipeline
-   * @param putBlockRequest - putBlockRequest.
-   * @return - Request
-   * immediately.
-   */
-  public static ContainerCommandRequestProto getBlockRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest)
-      throws IOException {
-    ContainerProtos.DatanodeBlockID blockID =
-        putBlockRequest.getBlockData().getBlockID();
-    LOG.trace("getKey: blockID={}", blockID);
-
-    ContainerProtos.GetBlockRequestProto.Builder getRequest =
-        ContainerProtos.GetBlockRequestProto.newBuilder();
-    getRequest.setBlockID(blockID);
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.GetBlock);
-    request.setContainerID(blockID.getContainerID());
-    request.setGetBlock(getRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-  /**
-   * Verify the response against the request.
-   *
-   * @param request - Request
-   * @param response - Response
-   */
-  public static void verifyGetBlock(ContainerCommandRequestProto request,
-      ContainerCommandResponseProto response, int expectedChunksCount) {
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertEquals(expectedChunksCount,
-        response.getGetBlock().getBlockData().getChunksCount());
-  }
-
-  /**
-   * @param pipeline - pipeline.
-   * @param putBlockRequest - putBlockRequest.
-   * @return - Request
-   */
-  public static ContainerCommandRequestProto getDeleteBlockRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest)
-      throws IOException {
-    ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData()
-        .getBlockID();
-    LOG.trace("deleteBlock: name={}", blockID);
-    ContainerProtos.DeleteBlockRequestProto.Builder delRequest =
-        ContainerProtos.DeleteBlockRequestProto.newBuilder();
-    delRequest.setBlockID(blockID);
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteBlock);
-    request.setContainerID(blockID.getContainerID());
-    request.setDeleteBlock(delRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request.build();
-  }
-
-  /**
-   * Returns a close container request.
-   * @param pipeline - pipeline
-   * @param containerID - ID of the container.
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandRequestProto getCloseContainer(
-      Pipeline pipeline, long containerID) throws IOException {
-    ContainerProtos.ContainerCommandRequestProto cmd =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CloseContainer)
-            .setContainerID(containerID)
-            .setCloseContainer(
-                ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
-            .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
-            .build();
-
-    return cmd;
-  }
-
-  /**
-   * Returns a simple request without traceId.
-   * @param pipeline - pipeline
-   * @param containerID - ID of the container.
-   * @return ContainerCommandRequestProto without traceId.
-   */
-  public static ContainerCommandRequestProto getRequestWithoutTraceId(
-      Pipeline pipeline, long containerID) throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    ContainerProtos.ContainerCommandRequestProto cmd =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CloseContainer)
-            .setContainerID(containerID)
-            .setCloseContainer(
-                ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
-            .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
-            .build();
-    return cmd;
-  }
-
-  /**
-   * Returns a delete container request.
-   * @param pipeline - pipeline
-   * @return ContainerCommandRequestProto.
-   */
-  public static ContainerCommandRequestProto getDeleteContainer(
-      Pipeline pipeline, long containerID, boolean forceDelete)
-      throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    ContainerProtos.DeleteContainerRequestProto deleteRequest =
-        ContainerProtos.DeleteContainerRequestProto.newBuilder().
-            setForceDelete(forceDelete).build();
-    return ContainerCommandRequestProto.newBuilder()
-        .setCmdType(ContainerProtos.Type.DeleteContainer)
-        .setContainerID(containerID)
-        .setDeleteContainer(
-            ContainerProtos.DeleteContainerRequestProto.getDefaultInstance())
-        .setDeleteContainer(deleteRequest)
-        .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
-        .build();
-  }
-
-  private static void sleep(long milliseconds) {
-    try {
-      Thread.sleep(milliseconds);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  public static BlockID getTestBlockID(long containerID) {
-    // Add 2ms delay so that localID based on UtcTime
-    // won't collide.
-    sleep(2);
-    return new BlockID(containerID, HddsUtils.getUtcTime());
-  }
-
-  public static long getTestContainerID() {
-    return HddsUtils.getUtcTime();
-  }
-
-  public static boolean isContainerClosed(MiniOzoneCluster cluster,
-      long containerID, DatanodeDetails datanode) {
-    ContainerData containerData;
-    for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
-      if (datanode.equals(datanodeService.getDatanodeDetails())) {
-        Container container =
-            datanodeService.getDatanodeStateMachine().getContainer()
-                .getContainerSet().getContainer(containerID);
-        if (container != null) {
-          containerData = container.getContainerData();
-          return containerData.isClosed();
-        }
-      }
-    }
-    return false;
-  }
-
-  public static boolean isContainerPresent(MiniOzoneCluster cluster,
-      long containerID, DatanodeDetails datanode) {
-    for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
-      if (datanode.equals(datanodeService.getDatanodeDetails())) {
-        Container container =
-            datanodeService.getDatanodeStateMachine().getContainer()
-                .getContainerSet().getContainer(containerID);
-        if (container != null) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public static OzoneOutputStream createKey(String keyName,
-      ReplicationType type, long size, ObjectStore objectStore,
-      String volumeName, String bucketName) throws Exception {
-    org.apache.hadoop.hdds.client.ReplicationFactor factor =
-        type == ReplicationType.STAND_ALONE ?
-            org.apache.hadoop.hdds.client.ReplicationFactor.ONE :
-            org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-    return objectStore.getVolume(volumeName).getBucket(bucketName)
-        .createKey(keyName, size, type, factor, new HashMap<>());
-  }
-
-  public static OzoneOutputStream createKey(String keyName,
-      ReplicationType type,
-      org.apache.hadoop.hdds.client.ReplicationFactor factor, long size,
-      ObjectStore objectStore, String volumeName, String bucketName)
-      throws Exception {
-    return objectStore.getVolume(volumeName).getBucket(bucketName)
-        .createKey(keyName, size, type, factor, new HashMap<>());
-  }
-
-  public static void validateData(String keyName, byte[] data,
-      ObjectStore objectStore, String volumeName, String bucketName)
-      throws Exception {
-    byte[] readData = new byte[data.length];
-    OzoneInputStream is =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .readKey(keyName);
-    is.read(readData);
-    MessageDigest sha1 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-    sha1.update(data);
-    MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-    sha2.update(readData);
-    Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest()));
-    is.close();
-  }
-
-  public static String getFixedLengthString(String string, int length) {
-    return String.format("%1$" + length + "s", string);
-  }
-
-  public static void waitForContainerClose(OzoneOutputStream outputStream,
-      MiniOzoneCluster cluster) throws Exception {
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) outputStream.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-    List<Long> containerIdList = new ArrayList<>();
-    for (BlockOutputStreamEntry entry : streamEntryList) {
-      long id = entry.getBlockID().getContainerID();
-      if (!containerIdList.contains(id)) {
-        containerIdList.add(id);
-      }
-    }
-    Assert.assertTrue(!containerIdList.isEmpty());
-    waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
-  }
-
-  public static void waitForPipelineClose(OzoneOutputStream outputStream,
-      MiniOzoneCluster cluster, boolean waitForContainerCreation)
-      throws Exception {
-    KeyOutputStream keyOutputStream =
-        (KeyOutputStream) outputStream.getOutputStream();
-    List<BlockOutputStreamEntry> streamEntryList =
-        keyOutputStream.getStreamEntries();
-    List<Long> containerIdList = new ArrayList<>();
-    for (BlockOutputStreamEntry entry : streamEntryList) {
-      long id = entry.getBlockID().getContainerID();
-      if (!containerIdList.contains(id)) {
-        containerIdList.add(id);
-      }
-    }
-    Assert.assertTrue(!containerIdList.isEmpty());
-    waitForPipelineClose(cluster, waitForContainerCreation,
-        containerIdList.toArray(new Long[0]));
-  }
-
-  public static void waitForPipelineClose(MiniOzoneCluster cluster,
-      boolean waitForContainerCreation, Long... containerIdList)
-      throws TimeoutException, InterruptedException, IOException {
-    List<Pipeline> pipelineList = new ArrayList<>();
-    for (long containerID : containerIdList) {
-      ContainerInfo container =
-          cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
-      Pipeline pipeline =
-          cluster.getStorageContainerManager().getPipelineManager()
-              .getPipeline(container.getPipelineID());
-      if (!pipelineList.contains(pipeline)) {
-        pipelineList.add(pipeline);
-      }
-      List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-      if (waitForContainerCreation) {
-        for (DatanodeDetails details : datanodes) {
-          // Client will issue write chunk and it will create the container on
-          // datanodes.
-          // wait for the container to be created
-          GenericTestUtils
-              .waitFor(() -> isContainerPresent(cluster, containerID, details),
-                  500, 100 * 1000);
-          Assert.assertTrue(isContainerPresent(cluster, containerID, details));
-
-          // make sure the container gets created first
-          Assert.assertFalse(ContainerTestHelper
-              .isContainerClosed(cluster, containerID, details));
-        }
-      }
-    }
-    waitForPipelineClose(pipelineList, cluster);
-  }
-
-  public static void waitForPipelineClose(List<Pipeline> pipelineList,
-      MiniOzoneCluster cluster)
-      throws TimeoutException, InterruptedException, IOException {
-    for (Pipeline pipeline1 : pipelineList) {
-      // issue pipeline destroy command
-      cluster.getStorageContainerManager().getPipelineManager()
-          .finalizeAndDestroyPipeline(pipeline1, false);
-    }
-
-    // wait for the pipeline to get destroyed in the datanodes
-    for (Pipeline pipeline : pipelineList) {
-      for (DatanodeDetails dn : pipeline.getNodes()) {
-        XceiverServerSpi server =
-            cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn))
-                .getDatanodeStateMachine().getContainer().getWriteChannel();
-        Assert.assertTrue(server instanceof XceiverServerRatis);
-        XceiverServerRatis raftServer = (XceiverServerRatis) server;
-        GenericTestUtils.waitFor(
-            () -> (!raftServer.getPipelineIds().contains(pipeline.getId())),
-            500, 100 * 1000);
-      }
-    }
-  }
-
-  public static void waitForContainerClose(MiniOzoneCluster cluster,
-      Long... containerIdList)
-      throws ContainerNotFoundException, PipelineNotFoundException,
-      TimeoutException, InterruptedException {
-    List<Pipeline> pipelineList = new ArrayList<>();
-    for (long containerID : containerIdList) {
-      ContainerInfo container =
-          cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
-      Pipeline pipeline =
-          cluster.getStorageContainerManager().getPipelineManager()
-              .getPipeline(container.getPipelineID());
-      pipelineList.add(pipeline);
-      List<DatanodeDetails> datanodes = pipeline.getNodes();
-
-      for (DatanodeDetails details : datanodes) {
-        // Client will issue write chunk and it will create the container on
-        // datanodes.
-        // wait for the container to be created
-        GenericTestUtils
-            .waitFor(() -> isContainerPresent(cluster, containerID, details),
-                500, 100 * 1000);
-        Assert.assertTrue(isContainerPresent(cluster, containerID, details));
-
-        // make sure the container gets created first
-        Assert.assertFalse(ContainerTestHelper
-            .isContainerClosed(cluster, containerID, details));
-        // send the order to close the container
-        cluster.getStorageContainerManager().getEventQueue()
-            .fireEvent(SCMEvents.CLOSE_CONTAINER,
-                ContainerID.valueof(containerID));
-      }
-    }
-    int index = 0;
-    for (long containerID : containerIdList) {
-      Pipeline pipeline = pipelineList.get(index);
-      List<DatanodeDetails> datanodes = pipeline.getNodes();
-      // Below condition avoids the case where container has been allocated
-      // but not yet been used by the client. In such a case container is never
-      // created.
-      for (DatanodeDetails datanodeDetails : datanodes) {
-        GenericTestUtils.waitFor(
-            () -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
-            15 * 1000);
-        //double check if it's really closed
-        // (waitFor also throws an exception)
-        Assert.assertTrue(
-            isContainerClosed(cluster, containerID, datanodeDetails));
-      }
-      index++;
-    }
-  }
-
-  public static StateMachine getStateMachine(MiniOzoneCluster cluster)
-      throws Exception {
-    return getStateMachine(cluster.getHddsDatanodes().get(0), null);
-  }
-
-  private static RaftServerImpl getRaftServerImpl(HddsDatanodeService dn,
-      Pipeline pipeline) throws Exception {
-    XceiverServerSpi server = dn.getDatanodeStateMachine().
-        getContainer().getWriteChannel();
-    RaftServerProxy proxy =
-        (RaftServerProxy) (((XceiverServerRatis) server).getServer());
-    RaftGroupId groupId =
-        pipeline == null ? proxy.getGroupIds().iterator().next() :
-            RatisHelper.newRaftGroup(pipeline).getGroupId();
-    return proxy.getImpl(groupId);
-  }
-
-  public static StateMachine getStateMachine(HddsDatanodeService dn,
-      Pipeline pipeline) throws Exception {
-    return getRaftServerImpl(dn, pipeline).getStateMachine();
-  }
-
-  public static boolean isRatisLeader(HddsDatanodeService dn, Pipeline pipeline)
-      throws Exception {
-    return getRaftServerImpl(dn, pipeline).isLeader();
-  }
-
-  public static boolean isRatisFollower(HddsDatanodeService dn,
-      Pipeline pipeline) throws Exception {
-    return getRaftServerImpl(dn, pipeline).isFollower();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
deleted file mode 100644
index 524c3bdb..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .DatanodeBlockID;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer
-    .writeChunkForContainer;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Tests ozone containers replication.
- */
-public class TestContainerReplication {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster cluster;
-
-  @Before
-  public void setup() throws Exception {
-    conf = newOzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2)
-        .setRandomContainerPort(true).build();
-  }
-
-  @After
-  public void teardown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerReplication() throws Exception {
-    //GIVEN
-    long containerId = 1L;
-
-    cluster.waitForClusterToBeReady();
-
-    HddsDatanodeService firstDatanode = cluster.getHddsDatanodes().get(0);
-
-    //copy from the first datanode
-    List<DatanodeDetails> sourceDatanodes = new ArrayList<>();
-    sourceDatanodes.add(firstDatanode.getDatanodeDetails());
-
-    Pipeline sourcePipelines =
-        ContainerTestHelper.createPipeline(sourceDatanodes);
-
-    //create a new client
-    XceiverClientSpi client = new XceiverClientGrpc(sourcePipelines, conf);
-    client.connect();
-
-    //New container for testing
-    TestOzoneContainer.createContainerForTesting(client, containerId);
-
-    ContainerCommandRequestProto requestProto =
-        writeChunkForContainer(client, containerId, 1024);
-
-    DatanodeBlockID blockID = requestProto.getWriteChunk().getBlockID();
-
-    // Put Block to the test container
-    ContainerCommandRequestProto putBlockRequest = ContainerTestHelper
-        .getPutBlockRequest(sourcePipelines, requestProto.getWriteChunk());
-
-    ContainerCommandResponseProto response =
-        client.sendCommand(putBlockRequest);
-
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-    HddsDatanodeService destinationDatanode =
-        chooseDatanodeWithoutContainer(sourcePipelines,
-            cluster.getHddsDatanodes());
-
-    // Close the container
-    ContainerCommandRequestProto closeContainerRequest = ContainerTestHelper
-        .getCloseContainer(sourcePipelines, containerId);
-    response = client.sendCommand(closeContainerRequest);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-    //WHEN: send the order to replicate the container
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(destinationDatanode.getDatanodeDetails().getUuid(),
-            new ReplicateContainerCommand(containerId,
-                sourcePipelines.getNodes()));
-
-    DatanodeStateMachine destinationDatanodeDatanodeStateMachine =
-        destinationDatanode.getDatanodeStateMachine();
-
-    //wait for the replication
-    GenericTestUtils.waitFor(()
-        -> destinationDatanodeDatanodeStateMachine.getSupervisor()
-        .getReplicationCounter() > 0, 1000, 20_000);
-
-    OzoneContainer ozoneContainer =
-        destinationDatanodeDatanodeStateMachine.getContainer();
-
-    Container container =
-        ozoneContainer
-            .getContainerSet().getContainer(containerId);
-
-    Assert.assertNotNull(
-        "Container is not replicated to the destination datanode",
-        container);
-
-    Assert.assertNotNull(
-        "ContainerData of the replicated container is null",
-        container.getContainerData());
-
-    KeyValueHandler handler = (KeyValueHandler) ozoneContainer.getDispatcher()
-        .getHandler(ContainerType.KeyValueContainer);
-
-    BlockData key = handler.getBlockManager()
-        .getBlock(container, BlockID.getFromProtobuf(blockID));
-
-    Assert.assertNotNull(key);
-    Assert.assertEquals(1, key.getChunks().size());
-    Assert.assertEquals(requestProto.getWriteChunk().getChunkData(),
-        key.getChunks().get(0));
-  }
-
-  private HddsDatanodeService chooseDatanodeWithoutContainer(Pipeline pipeline,
-      List<HddsDatanodeService> dataNodes) {
-    for (HddsDatanodeService datanode : dataNodes) {
-      if (!pipeline.getNodes().contains(datanode.getDatanodeDetails())) {
-        return datanode;
-      }
-    }
-    throw new AssertionError(
-        "No datanode outside of the pipeline");
-  }
-
-  private static OzoneConfiguration newOzoneConfiguration() {
-    return new OzoneConfiguration();
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
deleted file mode 100644
index e1d1a95..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ /dev/null
@@ -1,465 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
-import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
-    .BlockDeletingService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.BeforeClass;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-
-/**
- * Tests to test block deleting service.
- */
-public class TestBlockDeletingService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestBlockDeletingService.class);
-
-  private static File testRoot;
-  private static String scmId;
-  private static String clusterID;
-
-  @BeforeClass
-  public static void init() throws IOException {
-    testRoot = GenericTestUtils
-        .getTestDir(TestBlockDeletingService.class.getSimpleName());
-    if (testRoot.exists()) {
-      FileUtils.cleanDirectory(testRoot);
-    }
-    scmId = UUID.randomUUID().toString();
-    clusterID = UUID.randomUUID().toString();
-  }
-
-  @AfterClass
-  public static void cleanup() throws IOException {
-    FileUtils.deleteDirectory(testRoot);
-  }
-
-  /**
-   * A helper method to create some blocks and put them under deletion
-   * state for testing. This method directly updates container.db and
-   * creates some fake chunk files for testing.
-   */
-  private void createToDeleteBlocks(ContainerSet containerSet,
-      Configuration conf, int numOfContainers, int numOfBlocksPerContainer,
-      int numOfChunksPerBlock) throws IOException {
-    for (int x = 0; x < numOfContainers; x++) {
-      conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
-      long containerID = ContainerTestHelper.getTestContainerID();
-      KeyValueContainerData data = new KeyValueContainerData(containerID,
-          ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
-          UUID.randomUUID().toString());
-      data.closeContainer();
-      Container container = new KeyValueContainer(data, conf);
-      container.create(new VolumeSet(scmId, clusterID, conf),
-          new RoundRobinVolumeChoosingPolicy(), scmId);
-      containerSet.addContainer(container);
-      data = (KeyValueContainerData) containerSet.getContainer(
-          containerID).getContainerData();
-      try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
-        for (int j = 0; j < numOfBlocksPerContainer; j++) {
-          BlockID blockID =
-              ContainerTestHelper.getTestBlockID(containerID);
-          String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
-              blockID.getLocalID();
-          BlockData kd = new BlockData(blockID);
-          List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
-          for (int k = 0; k < numOfChunksPerBlock; k++) {
-            // offset doesn't matter here
-            String chunkName = blockID.getLocalID() + "_chunk_" + k;
-            File chunk = new File(data.getChunksPath(), chunkName);
-            FileUtils.writeStringToFile(chunk, "a chunk",
-                Charset.defaultCharset());
-            LOG.info("Creating file {}", chunk.getAbsolutePath());
-            // make sure file exists
-            Assert.assertTrue(chunk.isFile() && chunk.exists());
-            ContainerProtos.ChunkInfo info =
-                ContainerProtos.ChunkInfo.newBuilder()
-                    .setChunkName(chunk.getAbsolutePath())
-                    .setLen(0)
-                    .setOffset(0)
-                    .setChecksumData(Checksum.getNoChecksumDataProto())
-                    .build();
-            chunks.add(info);
-          }
-          kd.setChunks(chunks);
-          metadata.getStore().put(DFSUtil.string2Bytes(deleteStateName),
-              kd.getProtoBufMessage().toByteArray());
-        }
-      }
-    }
-  }
-
-  /**
-   *  Run service runDeletingTasks and wait for it's been processed.
-   */
-  private void deleteAndWait(BlockDeletingServiceTestImpl service,
-      int timesOfProcessed) throws TimeoutException, InterruptedException {
-    service.runDeletingTasks();
-    GenericTestUtils.waitFor(()
-        -> service.getTimesOfProcessed() == timesOfProcessed, 100, 3000);
-  }
-
-  /**
-   * Get under deletion blocks count from DB,
-   * note this info is parsed from container.db.
-   */
-  private int getUnderDeletionBlocksCount(ReferenceCountedDB meta)
-      throws IOException {
-    List<Map.Entry<byte[], byte[]>> underDeletionBlocks =
-        meta.getStore().getRangeKVs(null, 100,
-            new MetadataKeyFilters.KeyPrefixFilter()
-            .addFilter(OzoneConsts.DELETING_KEY_PREFIX));
-    return underDeletionBlocks.size();
-  }
-
-  private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException {
-    List<Map.Entry<byte[], byte[]>> underDeletionBlocks =
-        db.getStore().getRangeKVs(null, 100,
-            new MetadataKeyFilters.KeyPrefixFilter()
-            .addFilter(OzoneConsts.DELETED_KEY_PREFIX));
-    return underDeletionBlocks.size();
-  }
-
-  @Test
-  public void testBlockDeletion() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
-    conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
-    ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
-
-    BlockDeletingServiceTestImpl svc =
-        getBlockDeletinService(containerSet, conf, 1000);
-    svc.start();
-    GenericTestUtils.waitFor(svc::isStarted, 100, 3000);
-
-    // Ensure 1 container was created
-    List<ContainerData> containerData = Lists.newArrayList();
-    containerSet.listContainer(0L, 1, containerData);
-    Assert.assertEquals(1, containerData.size());
-
-    try(ReferenceCountedDB meta = BlockUtils.getDB(
-        (KeyValueContainerData) containerData.get(0), conf)) {
-      Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
-      // NOTE: this test assumes that all the container is KetValueContainer and
-      // have DeleteTransactionId in KetValueContainerData. If other
-      // types is going to be added, this test should be checked.
-      long transactionId = ((KeyValueContainerData) containerMap
-          .get(containerData.get(0).getContainerID()).getContainerData())
-          .getDeleteTransactionId();
-
-
-      // Number of deleted blocks in container should be equal to 0 before
-      // block delete
-      Assert.assertEquals(0, transactionId);
-
-      // Ensure there are 3 blocks under deletion and 0 deleted blocks
-      Assert.assertEquals(3, getUnderDeletionBlocksCount(meta));
-      Assert.assertEquals(0, getDeletedBlocksCount(meta));
-
-      // An interval will delete 1 * 2 blocks
-      deleteAndWait(svc, 1);
-      Assert.assertEquals(1, getUnderDeletionBlocksCount(meta));
-      Assert.assertEquals(2, getDeletedBlocksCount(meta));
-
-      deleteAndWait(svc, 2);
-      Assert.assertEquals(0, getUnderDeletionBlocksCount(meta));
-      Assert.assertEquals(3, getDeletedBlocksCount(meta));
-
-      deleteAndWait(svc, 3);
-      Assert.assertEquals(0, getUnderDeletionBlocksCount(meta));
-      Assert.assertEquals(3, getDeletedBlocksCount(meta));
-    }
-
-    svc.shutdown();
-  }
-
-  @Test
-  public void testShutdownService() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
-        TimeUnit.MILLISECONDS);
-    conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
-    conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10);
-    ContainerSet containerSet = new ContainerSet();
-    // Create 1 container with 100 blocks
-    createToDeleteBlocks(containerSet, conf, 1, 100, 1);
-
-    BlockDeletingServiceTestImpl service =
-        getBlockDeletinService(containerSet, conf, 1000);
-    service.start();
-    GenericTestUtils.waitFor(service::isStarted, 100, 3000);
-
-    // Run some deleting tasks and verify there are threads running
-    service.runDeletingTasks();
-    GenericTestUtils.waitFor(() -> service.getThreadCount() > 0, 100, 1000);
-
-    // Wait for 1 or 2 intervals
-    Thread.sleep(1000);
-
-    // Shutdown service and verify all threads are stopped
-    service.shutdown();
-    GenericTestUtils.waitFor(() -> service.getThreadCount() == 0, 100, 1000);
-  }
-
-  @Test
-  public void testBlockDeletionTimeout() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
-    conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
-    ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
-
-    // set timeout value as 1ns to trigger timeout behavior
-    long timeout  = 1;
-    OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class);
-    Mockito.when(ozoneContainer.getContainerSet())
-        .thenReturn(containerSet);
-    Mockito.when(ozoneContainer.getWriteChannel())
-        .thenReturn(null);
-    BlockDeletingService svc = new BlockDeletingService(ozoneContainer,
-        TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS,
-        conf);
-    svc.start();
-
-    LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG);
-    GenericTestUtils.waitFor(() -> {
-      if(log.getOutput().contains(
-          "Background task executes timed out, retrying in next interval")) {
-        log.stopCapturing();
-        return true;
-      }
-
-      return false;
-    }, 1000, 100000);
-
-    log.stopCapturing();
-    svc.shutdown();
-
-    // test for normal case that doesn't have timeout limitation
-    timeout  = 0;
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
-    svc = new BlockDeletingService(ozoneContainer,
-        TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS,
-        conf);
-    svc.start();
-
-    // get container meta data
-    List<ContainerData> containerData = Lists.newArrayList();
-    containerSet.listContainer(0L, 1, containerData);
-    try(ReferenceCountedDB meta = BlockUtils.getDB(
-        (KeyValueContainerData) containerData.get(0), conf)) {
-
-      LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);
-      GenericTestUtils.waitFor(() -> {
-        try {
-          if (getUnderDeletionBlocksCount(meta) == 0) {
-            return true;
-          }
-        } catch (IOException ignored) {
-        }
-        return false;
-      }, 1000, 100000);
-      newLog.stopCapturing();
-
-      // The block deleting successfully and shouldn't catch timed
-      // out warning log.
-      Assert.assertFalse(newLog.getOutput().contains(
-          "Background task executes timed out, retrying in next interval"));
-    }
-    svc.shutdown();
-  }
-
-  private BlockDeletingServiceTestImpl getBlockDeletinService(
-      ContainerSet containerSet, Configuration conf, int timeout) {
-    OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class);
-    Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
-    Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null);
-    return new BlockDeletingServiceTestImpl(ozoneContainer, timeout, conf);
-  }
-
-  @Test(timeout = 30000)
-  public void testContainerThrottle() throws Exception {
-    // Properties :
-    //  - Number of containers : 2
-    //  - Number of blocks per container : 1
-    //  - Number of chunks per block : 10
-    //  - Container limit per interval : 1
-    //  - Block limit per container : 1
-    //
-    // Each time only 1 container can be processed, so each time
-    // 1 block from 1 container can be deleted.
-    Configuration conf = new OzoneConfiguration();
-    // Process 1 container per interval
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1);
-    conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1);
-    ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 2, 1, 10);
-
-    BlockDeletingServiceTestImpl service =
-        getBlockDeletinService(containerSet, conf, 1000);
-    service.start();
-
-    try {
-      GenericTestUtils.waitFor(service::isStarted, 100, 3000);
-      // 1st interval processes 1 container 1 block and 10 chunks
-      deleteAndWait(service, 1);
-      Assert.assertEquals(10, getNumberOfChunksInContainers(containerSet));
-
-      AtomicInteger timesToProcess = new AtomicInteger(1);
-      GenericTestUtils.waitFor(() -> {
-        try {
-          timesToProcess.incrementAndGet();
-          deleteAndWait(service, timesToProcess.get());
-          if (getNumberOfChunksInContainers(containerSet) == 0) {
-            return true;
-          }
-        } catch (Exception ignored) {}
-        return false;
-      }, 100, 100000);
-      Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet));
-    } finally {
-      service.shutdown();
-    }
-  }
-
-
-  @Test(timeout = 30000)
-  public void testBlockThrottle() throws Exception {
-    // Properties :
-    //  - Number of containers : 5
-    //  - Number of blocks per container : 3
-    //  - Number of chunks per block : 1
-    //  - Container limit per interval : 10
-    //  - Block limit per container : 2
-    //
-    // Each time containers can be all scanned, but only 2 blocks
-    // per container can be actually deleted. So it requires 2 waves
-    // to cleanup all blocks.
-    Configuration conf = new OzoneConfiguration();
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
-    conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
-    ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 5, 3, 1);
-
-    // Make sure chunks are created
-    Assert.assertEquals(15, getNumberOfChunksInContainers(containerSet));
-    OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class);
-    Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
-    Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null);
-    BlockDeletingServiceTestImpl service =
-        getBlockDeletinService(containerSet, conf, 1000);
-    service.start();
-
-    try {
-      GenericTestUtils.waitFor(service::isStarted, 100, 3000);
-      // Total blocks = 3 * 5 = 15
-      // block per task = 2
-      // number of containers = 5
-      // each interval will at most runDeletingTasks 5 * 2 = 10 blocks
-      deleteAndWait(service, 1);
-      Assert.assertEquals(5, getNumberOfChunksInContainers(containerSet));
-
-      // There is only 5 blocks left to runDeletingTasks
-      deleteAndWait(service, 2);
-      Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet));
-    } finally {
-      service.shutdown();
-    }
-  }
-
-  private int getNumberOfChunksInContainers(ContainerSet containerSet) {
-    Iterator<Container<?>> iterator = containerSet.getContainerIterator();
-    int numChunks = 0;
-    while (iterator.hasNext()) {
-      Container container = iterator.next();
-      File chunkDir = FileUtils.getFile(
-          ((KeyValueContainerData) container.getContainerData())
-              .getChunksPath());
-      numChunks += chunkDir.listFiles().length;
-    }
-    return numChunks;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
deleted file mode 100644
index 2973a763..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Tests to test block deleting service.
- */
-public class TestBlockData {
-  static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
-  @Rule
-  public TestRule timeout = new Timeout(10000);
-
-  static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
-      long len) {
-    return ContainerProtos.ChunkInfo.newBuilder()
-        .setChunkName(name)
-        .setOffset(offset)
-        .setLen(len)
-        .setChecksumData(Checksum.getNoChecksumDataProto())
-        .build();
-  }
-
-  @Test
-  public void testAddAndRemove() {
-    final BlockData computed = new BlockData(null);
-    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
-    assertChunks(expected, computed);
-    long offset = 0;
-    int n = 5;
-    for(int i = 0; i < n; i++) {
-      offset += assertAddChunk(expected, computed, offset);
-    }
-
-    for(; !expected.isEmpty();) {
-      removeChunk(expected, computed);
-    }
-  }
-
-  private static int chunkCount = 0;
-  static ContainerProtos.ChunkInfo addChunk(
-      List<ContainerProtos.ChunkInfo> expected, long offset) {
-    final long length = ThreadLocalRandom.current().nextLong(1000);
-    final ContainerProtos.ChunkInfo info =
-        buildChunkInfo("c" + ++chunkCount, offset, length);
-    expected.add(info);
-    return info;
-  }
-
-  static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected,
-      BlockData computed, long offset) {
-    final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
-    LOG.info("addChunk: " + toString(info));
-    computed.addChunk(info);
-    assertChunks(expected, computed);
-    return info.getLen();
-  }
-
-
-  static void removeChunk(List<ContainerProtos.ChunkInfo> expected,
-      BlockData computed) {
-    final int i = ThreadLocalRandom.current().nextInt(expected.size());
-    final ContainerProtos.ChunkInfo info = expected.remove(i);
-    LOG.info("removeChunk: " + toString(info));
-    computed.removeChunk(info);
-    assertChunks(expected, computed);
-  }
-
-  static void assertChunks(List<ContainerProtos.ChunkInfo> expected,
-      BlockData computed) {
-    final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
-    Assert.assertEquals("expected=" + expected + "\ncomputed=" +
-        computedChunks, expected, computedChunks);
-    Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
-        computed.getSize());
-  }
-
-  static String toString(ContainerProtos.ChunkInfo info) {
-    return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
-  }
-
-  static String toString(List<ContainerProtos.ChunkInfo> infos) {
-    return infos.stream().map(TestBlockData::toString)
-        .reduce((left, right) -> left + ", " + right)
-        .orElse("");
-  }
-
-  @Test
-  public void testSetChunks() {
-    final BlockData computed = new BlockData(null);
-    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
-    assertChunks(expected, computed);
-    long offset = 0;
-    int n = 5;
-    for(int i = 0; i < n; i++) {
-      offset += addChunk(expected, offset).getLen();
-      LOG.info("setChunk: " + toString(expected));
-      computed.setChunks(expected);
-      assertChunks(expected, computed);
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
deleted file mode 100644
index b872516..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * The class for testing container deletion choosing policy.
- */
-public class TestContainerDeletionChoosingPolicy {
-  private static String path;
-  private OzoneContainer ozoneContainer;
-  private ContainerSet containerSet;
-  private OzoneConfiguration conf;
-  private BlockDeletingService blockDeletingService;
-  // the service timeout
-  private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
-  private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000;
-
-  @Before
-  public void init() throws Throwable {
-    conf = new OzoneConfiguration();
-    path = GenericTestUtils
-        .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName());
-  }
-
-  @Test
-  public void testRandomChoosingPolicy() throws IOException {
-    File containerDir = new File(path);
-    if (containerDir.exists()) {
-      FileUtils.deleteDirectory(new File(path));
-    }
-    Assert.assertTrue(containerDir.mkdirs());
-
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        RandomContainerDeletionChoosingPolicy.class.getName());
-    List<StorageLocation> pathLists = new LinkedList<>();
-    pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
-    containerSet = new ContainerSet();
-
-    int numContainers = 10;
-    for (int i = 0; i < numContainers; i++) {
-      KeyValueContainerData data = new KeyValueContainerData(i,
-          ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
-          UUID.randomUUID().toString());
-      data.closeContainer();
-      KeyValueContainer container = new KeyValueContainer(data, conf);
-      containerSet.addContainer(container);
-      Assert.assertTrue(
-          containerSet.getContainerMapCopy()
-              .containsKey(data.getContainerID()));
-    }
-    blockDeletingService = getBlockDeletingService();
-
-    ContainerDeletionChoosingPolicy deletionPolicy =
-        new RandomContainerDeletionChoosingPolicy();
-    List<ContainerData> result0 =
-        blockDeletingService.chooseContainerForBlockDeletion(5, deletionPolicy);
-    Assert.assertEquals(5, result0.size());
-
-    // test random choosing
-    List<ContainerData> result1 = blockDeletingService
-        .chooseContainerForBlockDeletion(numContainers, deletionPolicy);
-    List<ContainerData> result2 = blockDeletingService
-        .chooseContainerForBlockDeletion(numContainers, deletionPolicy);
-
-    boolean hasShuffled = false;
-    for (int i = 0; i < numContainers; i++) {
-      if (result1.get(i).getContainerID()
-           != result2.get(i).getContainerID()) {
-        hasShuffled = true;
-        break;
-      }
-    }
-    Assert.assertTrue("Chosen container results were same", hasShuffled);
-  }
-
-  @Test
-  public void testTopNOrderedChoosingPolicy() throws IOException {
-    File containerDir = new File(path);
-    if (containerDir.exists()) {
-      FileUtils.deleteDirectory(new File(path));
-    }
-    Assert.assertTrue(containerDir.mkdirs());
-
-    conf.set(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        TopNOrderedContainerDeletionChoosingPolicy.class.getName());
-    List<StorageLocation> pathLists = new LinkedList<>();
-    pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
-    containerSet = new ContainerSet();
-
-    int numContainers = 10;
-    Random random = new Random();
-    Map<Long, Integer> name2Count = new HashMap<>();
-    // create [numContainers + 1] containers
-    for (int i = 0; i <= numContainers; i++) {
-      long containerId = RandomUtils.nextLong();
-      KeyValueContainerData data =
-          new KeyValueContainerData(containerId,
-              ContainerTestHelper.CONTAINER_MAX_SIZE,
-              UUID.randomUUID().toString(),
-              UUID.randomUUID().toString());
-      if (i != numContainers) {
-        int deletionBlocks = random.nextInt(numContainers) + 1;
-        data.incrPendingDeletionBlocks(deletionBlocks);
-        name2Count.put(containerId, deletionBlocks);
-      }
-      KeyValueContainer container = new KeyValueContainer(data, conf);
-      data.closeContainer();
-      containerSet.addContainer(container);
-      Assert.assertTrue(
-          containerSet.getContainerMapCopy().containsKey(containerId));
-    }
-
-    blockDeletingService = getBlockDeletingService();
-    ContainerDeletionChoosingPolicy deletionPolicy =
-        new TopNOrderedContainerDeletionChoosingPolicy();
-    List<ContainerData> result0 =
-        blockDeletingService.chooseContainerForBlockDeletion(5, deletionPolicy);
-    Assert.assertEquals(5, result0.size());
-
-    List<ContainerData> result1 = blockDeletingService
-        .chooseContainerForBlockDeletion(numContainers + 1, deletionPolicy);
-    // the empty deletion blocks container should not be chosen
-    Assert.assertEquals(numContainers, result1.size());
-
-    // verify the order of return list
-    int lastCount = Integer.MAX_VALUE;
-    for (ContainerData data : result1) {
-      int currentCount = name2Count.remove(data.getContainerID());
-      // previous count should not smaller than next one
-      Assert.assertTrue(currentCount > 0 && currentCount <= lastCount);
-      lastCount = currentCount;
-    }
-    // ensure all the container data are compared
-    Assert.assertEquals(0, name2Count.size());
-  }
-
-  private BlockDeletingService getBlockDeletingService() {
-    ozoneContainer = Mockito.mock(OzoneContainer.class);
-    Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
-    Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null);
-    blockDeletingService = new BlockDeletingService(ozoneContainer,
-        SERVICE_INTERVAL_IN_MILLISECONDS, SERVICE_TIMEOUT_IN_MILLISECONDS,
-        TimeUnit.MILLISECONDS, conf);
-    return blockDeletingService;
-
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
deleted file mode 100644
index ed48209..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ /dev/null
@@ -1,897 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.collect.Maps;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Simple tests to verify that container persistence works as expected. Some of
- * these tests are specific to {@link KeyValueContainer}. If a new {@link
- * ContainerProtos.ContainerType} is added, the tests need to be modified.
- */
-public class TestContainerPersistence {
-  private static final String DATANODE_UUID = UUID.randomUUID().toString();
-  private static final String SCM_ID = UUID.randomUUID().toString();
-  private static Logger log =
-      LoggerFactory.getLogger(TestContainerPersistence.class);
-  private static String hddsPath;
-  private static OzoneConfiguration conf;
-  private static ContainerSet containerSet;
-  private static VolumeSet volumeSet;
-  private static VolumeChoosingPolicy volumeChoosingPolicy;
-  private static BlockManager blockManager;
-  private static ChunkManager chunkManager;
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-  private Long containerID = 8888L;
-
-  @BeforeClass
-  public static void init() throws Throwable {
-    conf = new OzoneConfiguration();
-    hddsPath = GenericTestUtils
-        .getTempPath(TestContainerPersistence.class.getSimpleName());
-    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
-    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
-  }
-
-  @AfterClass
-  public static void shutdown() throws IOException {
-    FileUtils.deleteDirectory(new File(hddsPath));
-  }
-
-  @Before
-  public void setupPaths() throws IOException {
-    containerSet = new ContainerSet();
-    volumeSet = new VolumeSet(DATANODE_UUID, conf);
-    blockManager = new BlockManagerImpl(conf);
-    chunkManager = new ChunkManagerImpl(true);
-
-    for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
-      StorageLocation location = StorageLocation.parse(dir);
-      FileUtils.forceMkdir(new File(location.getNormalizedUri()));
-    }
-  }
-
-  @After
-  public void cleanupDir() throws IOException {
-    // Clean up SCM metadata
-    log.info("Deleting {}", hddsPath);
-    FileUtils.deleteDirectory(new File(hddsPath));
-
-    // Clean up SCM datanode container metadata/data
-    for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
-      StorageLocation location = StorageLocation.parse(dir);
-      FileUtils.deleteDirectory(new File(location.getNormalizedUri()));
-    }
-  }
-
-  private long getTestContainerID() {
-    return ContainerTestHelper.getTestContainerID();
-  }
-
-  private DispatcherContext getDispatcherContext() {
-    return new DispatcherContext.Builder().build();
-  }
-
-  private Container addContainer(ContainerSet cSet, long cID)
-      throws IOException {
-    long commitBytesBefore = 0;
-    long commitBytesAfter = 0;
-    long commitIncrement = 0;
-    KeyValueContainerData data = new KeyValueContainerData(cID,
-        ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
-        UUID.randomUUID().toString());
-    data.addMetadata("VOLUME", "shire");
-    data.addMetadata("owner)", "bilbo");
-    KeyValueContainer container = new KeyValueContainer(data, conf);
-    container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
-    commitBytesBefore = container.getContainerData()
-        .getVolume().getCommittedBytes();
-    cSet.addContainer(container);
-    commitBytesAfter = container.getContainerData()
-        .getVolume().getCommittedBytes();
-    commitIncrement = commitBytesAfter - commitBytesBefore;
-    // did we commit space for the new container?
-    Assert.assertTrue(commitIncrement ==
-        ContainerTestHelper.CONTAINER_MAX_SIZE);
-    return container;
-  }
-
-  @Test
-  public void testCreateContainer() throws Exception {
-    long testContainerID = getTestContainerID();
-    addContainer(containerSet, testContainerID);
-    Assert.assertTrue(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID));
-    KeyValueContainerData kvData =
-        (KeyValueContainerData) containerSet.getContainer(testContainerID)
-            .getContainerData();
-
-    Assert.assertNotNull(kvData);
-    Assert.assertTrue(new File(kvData.getMetadataPath()).exists());
-    Assert.assertTrue(new File(kvData.getChunksPath()).exists());
-    Assert.assertTrue(kvData.getDbFile().exists());
-
-    Path meta = kvData.getDbFile().toPath().getParent();
-    Assert.assertTrue(meta != null && Files.exists(meta));
-
-    ReferenceCountedDB store = null;
-    try {
-      store = BlockUtils.getDB(kvData, conf);
-      Assert.assertNotNull(store);
-    } finally {
-      if (store != null) {
-        store.close();
-      }
-    }
-  }
-
-  @Test
-  public void testCreateDuplicateContainer() throws Exception {
-    long testContainerID = getTestContainerID();
-
-    Container container = addContainer(containerSet, testContainerID);
-    try {
-      containerSet.addContainer(container);
-      fail("Expected Exception not thrown.");
-    } catch (IOException ex) {
-      Assert.assertNotNull(ex);
-    }
-  }
-
-  @Test
-  public void testDeleteContainer() throws Exception {
-    long testContainerID1 = getTestContainerID();
-    Thread.sleep(100);
-    long testContainerID2 = getTestContainerID();
-
-    Container container1 = addContainer(containerSet, testContainerID1);
-    container1.close();
-
-    Container container2 = addContainer(containerSet, testContainerID2);
-
-    Assert.assertTrue(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID1));
-    Assert.assertTrue(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID2));
-
-    container1.delete();
-    containerSet.removeContainer(testContainerID1);
-    Assert.assertFalse(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID1));
-
-    // Adding block to a deleted container should fail.
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Error opening DB.");
-    BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID1);
-    BlockData someKey1 = new BlockData(blockID1);
-    someKey1.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
-    blockManager.putBlock(container1, someKey1);
-
-    // Deleting a non-empty container should fail.
-    BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID2);
-    BlockData someKey2 = new BlockData(blockID2);
-    someKey2.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
-    blockManager.putBlock(container2, someKey2);
-
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage(
-        "Container cannot be deleted because it is not empty.");
-    container2.delete();
-    Assert.assertTrue(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID2));
-  }
-
-  @Test
-  public void testGetContainerReports() throws Exception {
-    final int count = 10;
-    List<Long> containerIDs = new ArrayList<>();
-
-    for (int i = 0; i < count; i++) {
-      long testContainerID = getTestContainerID();
-      Container container = addContainer(containerSet, testContainerID);
-
-      // Close a bunch of containers.
-      if (i % 3 == 0) {
-        container.close();
-      }
-      containerIDs.add(testContainerID);
-    }
-
-    // ContainerSet#getContainerReport currently returns all containers (open
-    // and closed) reports.
-    List<StorageContainerDatanodeProtocolProtos.ContainerReplicaProto> reports =
-        containerSet.getContainerReport().getReportsList();
-    Assert.assertEquals(10, reports.size());
-    for (StorageContainerDatanodeProtocolProtos.ContainerReplicaProto report :
-        reports) {
-      long actualContainerID = report.getContainerID();
-      Assert.assertTrue(containerIDs.remove(actualContainerID));
-    }
-    Assert.assertTrue(containerIDs.isEmpty());
-  }
-
-  /**
-   * This test creates 50 containers and reads them back 5 containers at a time
-   * and verifies that we did get back all containers.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testListContainer() throws IOException {
-    final int count = 10;
-    final int step = 5;
-
-    Map<Long, ContainerData> testMap = new HashMap<>();
-    for (int x = 0; x < count; x++) {
-      long testContainerID = getTestContainerID();
-      Container container = addContainer(containerSet, testContainerID);
-      testMap.put(testContainerID, container.getContainerData());
-    }
-
-    int counter = 0;
-    long prevKey = 0;
-    List<ContainerData> results = new LinkedList<>();
-    while (counter < count) {
-      containerSet.listContainer(prevKey, step, results);
-      for (int y = 0; y < results.size(); y++) {
-        testMap.remove(results.get(y).getContainerID());
-      }
-      counter += step;
-      long nextKey = results.get(results.size() - 1).getContainerID();
-
-      //Assert that container is returning results in a sorted fashion.
-      Assert.assertTrue(prevKey < nextKey);
-      prevKey = nextKey + 1;
-      results.clear();
-    }
-    // Assert that we listed all the keys that we had put into
-    // container.
-    Assert.assertTrue(testMap.isEmpty());
-  }
-
-  private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException {
-    final int datalen = 1024;
-    long commitBytesBefore = 0;
-    long commitBytesAfter = 0;
-    long commitDecrement = 0;
-    long testContainerID = blockID.getContainerID();
-    Container container = containerSet.getContainer(testContainerID);
-    if (container == null) {
-      container = addContainer(containerSet, testContainerID);
-    }
-    ChunkInfo info = getChunk(
-        blockID.getLocalID(), 0, 0, datalen);
-    ByteBuffer data = getData(datalen);
-    setDataChecksum(info, data);
-    commitBytesBefore = container.getContainerData()
-        .getVolume().getCommittedBytes();
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-    commitBytesAfter = container.getContainerData()
-        .getVolume().getCommittedBytes();
-    commitDecrement = commitBytesBefore - commitBytesAfter;
-    // did we decrement commit bytes by the amount of data we wrote?
-    Assert.assertTrue(commitDecrement == info.getLen());
-    return info;
-
-  }
-
-  /**
-   * Writes a single chunk.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testWriteChunk() throws IOException,
-      NoSuchAlgorithmException {
-    BlockID blockID = ContainerTestHelper.
-        getTestBlockID(getTestContainerID());
-    writeChunkHelper(blockID);
-  }
-
-  /**
-   * Writes many chunks of the same block into different chunk files and
-   * verifies that we have that data in many files.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testWritReadManyChunks() throws IOException {
-    final int datalen = 1024;
-    final int chunkCount = 1024;
-
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    Map<String, ChunkInfo> fileHashMap = new HashMap<>();
-    for (int x = 0; x < chunkCount; x++) {
-      ChunkInfo info = getChunk(blockID.getLocalID(), x, 0, datalen);
-      ByteBuffer data = getData(datalen);
-      setDataChecksum(info, data);
-      chunkManager.writeChunk(container, blockID, info, data,
-          getDispatcherContext());
-      String fileName = String.format("%s.data.%d", blockID.getLocalID(), x);
-      fileHashMap.put(fileName, info);
-    }
-
-    KeyValueContainerData cNewData =
-        (KeyValueContainerData) container.getContainerData();
-    Assert.assertNotNull(cNewData);
-    Path dataDir = Paths.get(cNewData.getChunksPath());
-
-    String globFormat = String.format("%s.data.*", blockID.getLocalID());
-
-    // Read chunk via file system and verify.
-    int count = 0;
-    try (DirectoryStream<Path> stream =
-             Files.newDirectoryStream(dataDir, globFormat)) {
-      Checksum checksum = new Checksum();
-
-      for (Path fname : stream) {
-        ChecksumData checksumData = checksum
-            .computeChecksum(FileUtils.readFileToByteArray(fname.toFile()));
-        Assert.assertEquals(fileHashMap.get(fname.getFileName().toString())
-            .getChecksumData(), checksumData);
-        count++;
-      }
-      Assert.assertEquals(chunkCount, count);
-
-      // Read chunk via ReadChunk call.
-      for (int x = 0; x < chunkCount; x++) {
-        String fileName = String.format("%s.data.%d", blockID.getLocalID(), x);
-        ChunkInfo info = fileHashMap.get(fileName);
-        ByteBuffer data = chunkManager
-            .readChunk(container, blockID, info, getDispatcherContext());
-        ChecksumData checksumData = checksum.computeChecksum(data);
-        Assert.assertEquals(info.getChecksumData(), checksumData);
-      }
-    }
-  }
-
-  /**
-   * Test partial within a single chunk.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testPartialRead() throws Exception {
-    final int datalen = 1024;
-    final int start = datalen / 4;
-    final int length = datalen / 2;
-
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(
-        blockID.getLocalID(), 0, 0, datalen);
-    ByteBuffer data = getData(datalen);
-    setDataChecksum(info, data);
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-
-    ByteBuffer readData = chunkManager
-        .readChunk(container, blockID, info, getDispatcherContext());
-    assertTrue(data.rewind().equals(readData.rewind()));
-
-    ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
-    ByteBuffer readData2 = chunkManager
-        .readChunk(container, blockID, info2, getDispatcherContext());
-    assertEquals(length, info2.getLen());
-    boolean equals =
-        data.position(start).limit(start+length).equals(readData2.rewind());
-    assertTrue(equals);
-  }
-
-  /**
-   * Writes a single chunk and tries to overwrite that chunk without over write
-   * flag then re-tries with overwrite flag.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testOverWrite() throws IOException,
-      NoSuchAlgorithmException {
-    final int datalen = 1024;
-
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(
-        blockID.getLocalID(), 0, 0, datalen);
-    ByteBuffer data = getData(datalen);
-    setDataChecksum(info, data);
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-    data.rewind();
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-    data.rewind();
-    // With the overwrite flag it should work now.
-    info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-    long bytesUsed = container.getContainerData().getBytesUsed();
-    Assert.assertEquals(datalen, bytesUsed);
-
-    long bytesWrite = container.getContainerData().getWriteBytes();
-    Assert.assertEquals(datalen * 3, bytesWrite);
-  }
-
-  /**
-   * This test writes data as many small writes and tries to read back the data
-   * in a single large read.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testMultipleWriteSingleRead() throws IOException,
-      NoSuchAlgorithmException {
-    final int datalen = 1024;
-    final int chunkCount = 1024;
-
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-    for (int x = 0; x < chunkCount; x++) {
-      // we are writing to the same chunk file but at different offsets.
-      long offset = x * datalen;
-      ChunkInfo info = getChunk(
-          blockID.getLocalID(), 0, offset, datalen);
-      ByteBuffer data = getData(datalen);
-      oldSha.update(data);
-      data.rewind();
-      setDataChecksum(info, data);
-      chunkManager.writeChunk(container, blockID, info, data,
-          getDispatcherContext());
-    }
-
-    // Request to read the whole data in a single go.
-    ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0,
-        datalen * chunkCount);
-    ByteBuffer newdata =
-        chunkManager.readChunk(container, blockID, largeChunk,
-            getDispatcherContext());
-    MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-    newSha.update(newdata);
-    Assert.assertEquals(Hex.encodeHexString(oldSha.digest()),
-        Hex.encodeHexString(newSha.digest()));
-  }
-
-  /**
-   * Writes a chunk and deletes it, re-reads to make sure it is gone.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testDeleteChunk() throws IOException,
-      NoSuchAlgorithmException {
-    final int datalen = 1024;
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(
-        blockID.getLocalID(), 0, 0, datalen);
-    ByteBuffer data = getData(datalen);
-    setDataChecksum(info, data);
-    chunkManager.writeChunk(container, blockID, info, data,
-        getDispatcherContext());
-    chunkManager.deleteChunk(container, blockID, info);
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the chunk file.");
-    chunkManager.readChunk(container, blockID, info, getDispatcherContext());
-  }
-
-  /**
-   * Tests a put block and read block.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testPutBlock() throws IOException, NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockManager.putBlock(container, blockData);
-    BlockData readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID());
-    ChunkInfo readChunk =
-        ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
-    Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
-  }
-
-  /**
-   * Tests a put block and read block with invalid bcsId.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testPutBlockWithInvalidBCSId()
-      throws IOException, NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-
-    BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID1);
-    BlockData blockData = new BlockData(blockID1);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockData.setBlockCommitSequenceId(3);
-    blockManager.putBlock(container, blockData);
-    chunkList.clear();
-
-    // write a 2nd block
-    BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
-    info = writeChunkHelper(blockID2);
-    blockData = new BlockData(blockID2);
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockData.setBlockCommitSequenceId(4);
-    blockManager.putBlock(container, blockData);
-    BlockData readBlockData;
-    try {
-      blockID1.setBlockCommitSequenceId(5);
-      // read with bcsId higher than container bcsId
-      blockManager.
-          getBlock(container, blockID1);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
-    }
-
-    try {
-      blockID1.setBlockCommitSequenceId(4);
-      // read with bcsId lower than container bcsId but greater than committed
-      // bcsId.
-      blockManager.
-          getBlock(container, blockID1);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
-    }
-    readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID());
-    ChunkInfo readChunk =
-        ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
-    Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
-  }
-
-  /**
-   * Tests a put block and read block.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testPutBlockWithLotsOfChunks() throws IOException,
-      NoSuchAlgorithmException {
-    final int chunkCount = 2;
-    final int datalen = 1024;
-    long totalSize = 0L;
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    List<ChunkInfo> chunkList = new LinkedList<>();
-    ChunkInfo info = writeChunkHelper(blockID);
-    totalSize += datalen;
-    chunkList.add(info);
-    for (int x = 1; x < chunkCount; x++) {
-      // with holes in the front (before x * datalen)
-      info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
-      ByteBuffer data = getData(datalen);
-      setDataChecksum(info, data);
-      chunkManager.writeChunk(container, blockID, info, data,
-          getDispatcherContext());
-      totalSize += datalen;
-      chunkList.add(info);
-    }
-
-    long bytesUsed = container.getContainerData().getBytesUsed();
-    Assert.assertEquals(totalSize, bytesUsed);
-    long writeBytes = container.getContainerData().getWriteBytes();
-    Assert.assertEquals(chunkCount * datalen, writeBytes);
-    long readCount = container.getContainerData().getReadCount();
-    Assert.assertEquals(0, readCount);
-    long writeCount = container.getContainerData().getWriteCount();
-    Assert.assertEquals(chunkCount, writeCount);
-
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
-    for (ChunkInfo i : chunkList) {
-      chunkProtoList.add(i.getProtoBufMessage());
-    }
-    blockData.setChunks(chunkProtoList);
-    blockManager.putBlock(container, blockData);
-    BlockData readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID());
-    ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
-    ChunkInfo readChunk =
-        ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData
-            .getChunks().size() - 1));
-    Assert.assertEquals(
-        lastChunk.getChecksumData(), readChunk.getChecksumData());
-  }
-
-  /**
-   * Deletes a block and tries to read it back.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testDeleteBlock() throws IOException, NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockManager.putBlock(container, blockData);
-    blockManager.deleteBlock(container, blockID);
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the block.");
-    blockManager.getBlock(container, blockData.getBlockID());
-  }
-
-  /**
-   * Tries to Deletes a block twice.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testDeleteBlockTwice() throws IOException,
-      NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockManager.putBlock(container, blockData);
-    blockManager.deleteBlock(container, blockID);
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the block.");
-    blockManager.deleteBlock(container, blockID);
-  }
-
-  /**
-   * Tries to update an existing and non-existing container. Verifies container
-   * map and persistent data both updated.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testUpdateContainer() throws IOException {
-    long testContainerID = ContainerTestHelper.getTestContainerID();
-    KeyValueContainer container =
-        (KeyValueContainer) addContainer(containerSet, testContainerID);
-
-    File orgContainerFile = container.getContainerFile();
-    Assert.assertTrue(orgContainerFile.exists());
-
-    Map<String, String> newMetadata = Maps.newHashMap();
-    newMetadata.put("VOLUME", "shire_new");
-    newMetadata.put("owner", "bilbo_new");
-
-    container.update(newMetadata, false);
-
-    Assert.assertEquals(1, containerSet.getContainerMapCopy().size());
-    Assert.assertTrue(containerSet.getContainerMapCopy()
-        .containsKey(testContainerID));
-
-    // Verify in-memory map
-    KeyValueContainerData actualNewData = (KeyValueContainerData)
-        containerSet.getContainer(testContainerID).getContainerData();
-    Assert.assertEquals("shire_new",
-        actualNewData.getMetadata().get("VOLUME"));
-    Assert.assertEquals("bilbo_new",
-        actualNewData.getMetadata().get("owner"));
-
-    // Verify container data on disk
-    File containerBaseDir = new File(actualNewData.getMetadataPath())
-        .getParentFile();
-    File newContainerFile = ContainerUtils.getContainerFile(containerBaseDir);
-    Assert.assertTrue("Container file should exist.",
-        newContainerFile.exists());
-    Assert.assertEquals("Container file should be in same location.",
-        orgContainerFile.getAbsolutePath(),
-        newContainerFile.getAbsolutePath());
-
-    ContainerData actualContainerData = ContainerDataYaml.readContainerFile(
-        newContainerFile);
-    Assert.assertEquals("shire_new",
-        actualContainerData.getMetadata().get("VOLUME"));
-    Assert.assertEquals("bilbo_new",
-        actualContainerData.getMetadata().get("owner"));
-
-
-    // Test force update flag.
-    // Close the container and then try to update without force update flag.
-    container.close();
-    try {
-      container.update(newMetadata, false);
-    } catch (StorageContainerException ex) {
-      Assert.assertEquals("Updating a closed container without " +
-          "force option is not allowed. ContainerID: " +
-          testContainerID, ex.getMessage());
-    }
-
-    // Update with force flag, it should be success.
-    newMetadata.put("VOLUME", "shire_new_1");
-    newMetadata.put("owner", "bilbo_new_1");
-    container.update(newMetadata, true);
-
-    // Verify in-memory map
-    actualNewData = (KeyValueContainerData)
-        containerSet.getContainer(testContainerID).getContainerData();
-    Assert.assertEquals("shire_new_1",
-        actualNewData.getMetadata().get("VOLUME"));
-    Assert.assertEquals("bilbo_new_1",
-        actualNewData.getMetadata().get("owner"));
-
-  }
-
-  private BlockData writeBlockHelper(BlockID blockID, int i)
-      throws IOException, NoSuchAlgorithmException {
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    blockData.setBlockCommitSequenceId((long) i);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    return blockData;
-  }
-
-  @Test
-  public void testListBlock() throws Exception {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    List<BlockID> expectedBlocks = new ArrayList<>();
-    for (int i = 0; i < 10; i++) {
-      BlockID blockID = new BlockID(testContainerID, i);
-      expectedBlocks.add(blockID);
-      BlockData kd = writeBlockHelper(blockID, i);
-      blockManager.putBlock(container, kd);
-    }
-
-    // List all blocks
-    List<BlockData> result = blockManager.listBlock(
-        container, 0, 100);
-    Assert.assertEquals(10, result.size());
-
-    int index = 0;
-    for (int i = index; i < result.size(); i++) {
-      BlockData data = result.get(i);
-      Assert.assertEquals(testContainerID, data.getContainerID());
-      Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
-          data.getLocalID());
-      index++;
-    }
-
-    // List block with startBlock filter
-    long k6 = expectedBlocks.get(6).getLocalID();
-    result = blockManager.listBlock(container, k6, 100);
-
-    Assert.assertEquals(4, result.size());
-    for (int i = 6; i < 10; i++) {
-      Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
-          result.get(i - 6).getLocalID());
-    }
-
-    // Count must be >0
-    exception.expect(IllegalArgumentException.class);
-    exception.expectMessage("Count must be a positive number.");
-    blockManager.listBlock(container, 0, -1);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
deleted file mode 100644
index c7b7992..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
-import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.event.Level;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.List;
-import java.util.HashSet;
-import java.util.ArrayList;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static java.lang.Math.max;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds
-    .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-
-/**
- * Tests for Block deletion.
- */
-public class TestBlockDeletion {
-  private static OzoneConfiguration conf = null;
-  private static ObjectStore store;
-  private static MiniOzoneCluster cluster = null;
-  private static StorageContainerManager scm = null;
-  private static OzoneManager om = null;
-  private static Set<Long> containerIdsWithDeletedBlocks;
-  private static long maxTransactionId = 0;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG);
-    GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
-
-    String path =
-        GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName());
-    File baseDir = new File(path);
-    baseDir.mkdirs();
-
-    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-        3, TimeUnit.SECONDS);
-    conf.setQuietMode(false);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .setHbInterval(200)
-        .build();
-    cluster.waitForClusterToBeReady();
-    store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
-    om = cluster.getOzoneManager();
-    scm = cluster.getStorageContainerManager();
-    containerIdsWithDeletedBlocks = new HashSet<>();
-  }
-
-  @AfterClass
-  public static void cleanup() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testBlockDeletion() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = RandomStringUtils.random(10000000);
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    String keyName = UUID.randomUUID().toString();
-
-    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length,
-        ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
-    for (int i = 0; i < 100; i++) {
-      out.write(value.getBytes());
-    }
-    out.close();
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setKeyName(keyName).setDataSize(0)
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setRefreshPipeline(true)
-        .build();
-    List<OmKeyLocationInfoGroup> omKeyLocationInfoGroupList =
-        om.lookupKey(keyArgs).getKeyLocationVersions();
-
-    // verify key blocks were created in DN.
-    verifyBlocksCreated(omKeyLocationInfoGroupList);
-    // No containers with deleted blocks
-    Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
-    // Delete transactionIds for the containers should be 0.
-    // NOTE: this test assumes that all the container is KetValueContainer. If
-    // other container types is going to be added, this test should be checked.
-    matchContainerTransactionIds();
-    om.deleteKey(keyArgs);
-    Thread.sleep(5000);
-    // The blocks should not be deleted in the DN as the container is open
-    try {
-      verifyBlocksDeleted(omKeyLocationInfoGroupList);
-      Assert.fail("Blocks should not have been deleted");
-    } catch (Throwable e) {
-      Assert.assertTrue(e.getMessage().contains("expected null, but was"));
-      Assert.assertEquals(e.getClass(), AssertionError.class);
-    }
-
-    // close the containers which hold the blocks for the key
-    OzoneTestUtils.closeContainers(omKeyLocationInfoGroupList, scm);
-
-    waitForDatanodeCommandRetry();
-
-    // make sure the containers are closed on the dn
-    omKeyLocationInfoGroupList.forEach((group) -> {
-      List<OmKeyLocationInfo> locationInfo = group.getLocationList();
-      locationInfo.forEach(
-          (info) -> cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-              .getContainer().getContainerSet()
-              .getContainer(info.getContainerID()).getContainerData()
-              .setState(ContainerProtos.ContainerDataProto.State.CLOSED));
-    });
-    waitForDatanodeBlockDeletionStart();
-    // The blocks should be deleted in the DN.
-    verifyBlocksDeleted(omKeyLocationInfoGroupList);
-
-    // Few containers with deleted blocks
-    Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty());
-    // Containers in the DN and SCM should have same delete transactionIds
-    matchContainerTransactionIds();
-    // Containers in the DN and SCM should have same delete transactionIds
-    // after DN restart. The assertion is just to verify that the state of
-    // containerInfos in dn and scm is consistent after dn restart.
-    cluster.restartHddsDatanode(0, true);
-    matchContainerTransactionIds();
-
-    // verify PENDING_DELETE_STATUS event is fired
-    verifyPendingDeleteEvent();
-
-    // Verify transactions committed
-    verifyTransactionsCommitted();
-  }
-
-  private void waitForDatanodeBlockDeletionStart()
-      throws TimeoutException, InterruptedException {
-    LogCapturer logCapturer =
-        LogCapturer.captureLogs(DeleteBlocksCommandHandler.LOG);
-    logCapturer.clearOutput();
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput()
-            .contains("Start to delete container block"),
-        500, 10000);
-    Thread.sleep(1000);
-  }
-
-  /**
-   * Waits for datanode command to be retried when datanode is dead.
-   */
-  private void waitForDatanodeCommandRetry()
-      throws TimeoutException, InterruptedException {
-    cluster.shutdownHddsDatanode(0);
-    LogCapturer logCapturer =
-        LogCapturer.captureLogs(RetriableDatanodeEventWatcher.LOG);
-    logCapturer.clearOutput();
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput()
-            .contains("RetriableDatanodeCommand type=deleteBlocksCommand"),
-        500, 5000);
-    cluster.restartHddsDatanode(0, true);
-  }
-
-  private void verifyTransactionsCommitted() throws IOException {
-    DeletedBlockLogImpl deletedBlockLog =
-        (DeletedBlockLogImpl) scm.getScmBlockManager().getDeletedBlockLog();
-    for (long txnID = 1; txnID <= maxTransactionId; txnID++) {
-      Assert.assertNull(
-          scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID));
-    }
-  }
-
-  private void verifyPendingDeleteEvent()
-      throws IOException, InterruptedException {
-    ContainerSet dnContainerSet =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet();
-    LogCapturer logCapturer =
-        LogCapturer.captureLogs(SCMBlockDeletingService.LOG);
-    // Create dummy container reports with deleteTransactionId set as 0
-    ContainerReportsProto containerReport = dnContainerSet.getContainerReport();
-    ContainerReportsProto.Builder dummyReportsBuilder =
-        ContainerReportsProto.newBuilder();
-    for (ContainerReplicaProto containerInfo :
-        containerReport.getReportsList()) {
-      dummyReportsBuilder.addReports(
-          ContainerReplicaProto.newBuilder(containerInfo)
-              .setDeleteTransactionId(0)
-              .build());
-    }
-    ContainerReportsProto dummyReport = dummyReportsBuilder.build();
-
-    logCapturer.clearOutput();
-    cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContext().addReport(dummyReport);
-    cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().triggerHeartbeat();
-    // wait for event to be handled by event handler
-    Thread.sleep(1000);
-    String output = logCapturer.getOutput();
-    for (ContainerReplicaProto containerInfo : dummyReport.getReportsList()) {
-      long containerId = containerInfo.getContainerID();
-      // Event should be triggered only for containers which have deleted blocks
-      if (containerIdsWithDeletedBlocks.contains(containerId)) {
-        Assert.assertTrue(output.contains(
-            "for containerID " + containerId + ". Datanode delete txnID"));
-      } else {
-        Assert.assertTrue(!output.contains(
-            "for containerID " + containerId + ". Datanode delete txnID"));
-      }
-    }
-    logCapturer.clearOutput();
-  }
-
-  private void matchContainerTransactionIds() throws IOException {
-    ContainerSet dnContainerSet =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet();
-    List<ContainerData> containerDataList = new ArrayList<>();
-    dnContainerSet.listContainer(0, 10000, containerDataList);
-    for (ContainerData containerData : containerDataList) {
-      long containerId = containerData.getContainerID();
-      if (containerIdsWithDeletedBlocks.contains(containerId)) {
-        Assert.assertTrue(
-            scm.getContainerInfo(containerId).getDeleteTransactionId() > 0);
-        maxTransactionId = max(maxTransactionId,
-            scm.getContainerInfo(containerId).getDeleteTransactionId());
-      } else {
-        Assert.assertEquals(
-            scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
-      }
-      Assert.assertEquals(((KeyValueContainerData)dnContainerSet
-              .getContainer(containerId).getContainerData())
-              .getDeleteTransactionId(),
-          scm.getContainerInfo(containerId).getDeleteTransactionId());
-    }
-  }
-
-  private void verifyBlocksCreated(
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
-    ContainerSet dnContainerSet =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet();
-    OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
-      try(ReferenceCountedDB db =
-          BlockUtils.getDB((KeyValueContainerData) dnContainerSet
-          .getContainer(blockID.getContainerID()).getContainerData(), conf)) {
-        Assert.assertNotNull(db.getStore().get(
-            Longs.toByteArray(blockID.getLocalID())));
-      }
-    }, omKeyLocationInfoGroups);
-  }
-
-  private void verifyBlocksDeleted(
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
-    ContainerSet dnContainerSet =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
-            .getContainer().getContainerSet();
-    OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
-      try(ReferenceCountedDB db =
-          BlockUtils.getDB((KeyValueContainerData) dnContainerSet
-          .getContainer(blockID.getContainerID()).getContainerData(), conf)) {
-        Assert.assertNull(db.getStore().get(
-            Longs.toByteArray(blockID.getLocalID())));
-        Assert.assertNull(db.getStore().get(DFSUtil.string2Bytes(
-            OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID())));
-        Assert.assertNotNull(DFSUtil.string2Bytes(
-            OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID()));
-      }
-      containerIdsWithDeletedBlocks.add(blockID.getContainerID());
-    }, omKeyLocationInfoGroups);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
deleted file mode 100644
index b676e1c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Test container closing.
- */
-public class TestCloseContainerByPipeline {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .build();
-    cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    objectStore.createVolume("test");
-    objectStore.getVolume("test").createBucket("test");
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
-    String keyName = "testIfCloseContainerCommandHandlerIsInvoked";
-    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE,
-            new HashMap<>());
-    key.write(keyName.getBytes());
-    key.close();
-
-    //get the name of a valid container
-    OmKeyArgs keyArgs =
-        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.RATIS)
-            .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
-            .setKeyName(keyName).setRefreshPipeline(true).build();
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    long containerID = omKeyLocationInfo.getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(datanodes.size(), 1);
-
-    DatanodeDetails datanodeDetails = datanodes.get(0);
-    HddsDatanodeService datanodeService = null;
-    Assert
-        .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
-    for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
-      if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
-        datanodeService = datanodeServiceItr;
-        break;
-      }
-    }
-    CommandHandler closeContainerHandler =
-        datanodeService.getDatanodeStateMachine().getCommandDispatcher()
-            .getCloseContainerHandler();
-    int lastInvocationCount = closeContainerHandler.getInvocationCount();
-    //send the order to close the container
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId()));
-    GenericTestUtils
-        .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
-            500, 5 * 1000);
-    // Make sure the closeContainerCommandHandler is Invoked
-    Assert.assertTrue(
-        closeContainerHandler.getInvocationCount() > lastInvocationCount);
-  }
-
-  @Test
-  public void testCloseContainerViaStandAlone()
-      throws IOException, TimeoutException, InterruptedException {
-
-    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey("standalone", 1024, ReplicationType.RATIS,
-            ReplicationFactor.ONE, new HashMap<>());
-    key.write("standalone".getBytes());
-    key.close();
-
-    //get the name of a valid container
-    OmKeyArgs keyArgs =
-        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.RATIS)
-            .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
-            .setKeyName("standalone")
-            .setRefreshPipeline(true)
-            .build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    long containerID = omKeyLocationInfo.getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(datanodes.size(), 1);
-
-    DatanodeDetails datanodeDetails = datanodes.get(0);
-    Assert
-        .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
-
-    // Send the order to close the container, give random pipeline id so that
-    // the container will not be closed via RATIS
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId()));
-
-    //double check if it's really closed (waitFor also throws an exception)
-    // TODO: change the below line after implementing QUASI_CLOSED to CLOSED
-    // logic. The container will be QUASI closed as of now
-    GenericTestUtils
-        .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
-            500, 5 * 1000);
-    Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
-
-    cluster.getStorageContainerManager().getPipelineManager()
-        .finalizeAndDestroyPipeline(pipeline, false);
-    Thread.sleep(5000);
-    // Pipeline close should not affect a container in CLOSED state
-    Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
-  }
-
-  @Test
-  public void testCloseContainerViaRatis() throws IOException,
-      TimeoutException, InterruptedException {
-
-    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey("ratis", 1024, ReplicationType.RATIS,
-            ReplicationFactor.THREE, new HashMap<>());
-    key.write("ratis".getBytes());
-    key.close();
-
-    //get the name of a valid container
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").
-        setBucketName("test").setType(HddsProtos.ReplicationType.RATIS)
-        .setFactor(HddsProtos.ReplicationFactor.THREE).setDataSize(1024)
-        .setKeyName("ratis").setRefreshPipeline(true).build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    long containerID = omKeyLocationInfo.getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(3, datanodes.size());
-
-    List<ReferenceCountedDB> metadataStores = new ArrayList<>(datanodes.size());
-    for (DatanodeDetails details : datanodes) {
-      Assert.assertFalse(isContainerClosed(cluster, containerID, details));
-      //send the order to close the container
-      cluster.getStorageContainerManager().getScmNodeManager()
-          .addDatanodeCommand(details.getUuid(),
-              new CloseContainerCommand(containerID, pipeline.getId()));
-      int index = cluster.getHddsDatanodeIndex(details);
-      Container dnContainer = cluster.getHddsDatanodes().get(index)
-          .getDatanodeStateMachine().getContainer().getContainerSet()
-          .getContainer(containerID);
-      try(ReferenceCountedDB store = BlockUtils.getDB(
-          (KeyValueContainerData) dnContainer.getContainerData(), conf)) {
-        metadataStores.add(store);
-      }
-    }
-
-    // There should be as many rocks db as the number of datanodes in pipeline.
-    Assert.assertEquals(datanodes.size(),
-        metadataStores.stream().distinct().count());
-
-    // Make sure that it is CLOSED
-    for (DatanodeDetails datanodeDetails : datanodes) {
-      GenericTestUtils.waitFor(
-          () -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
-          15 * 1000);
-      //double check if it's really closed (waitFor also throws an exception)
-      Assert.assertTrue(isContainerClosed(cluster,
-          containerID, datanodeDetails));
-    }
-  }
-
-  @Test
-  public void testQuasiCloseTransitionViaRatis()
-      throws IOException, TimeoutException, InterruptedException {
-
-    String keyName = "testQuasiCloseTransitionViaRatis";
-    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey(keyName, 1024, ReplicationType.RATIS,
-            ReplicationFactor.ONE, new HashMap<>());
-    key.write(keyName.getBytes());
-    key.close();
-
-    OmKeyArgs keyArgs =
-        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.RATIS)
-            .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
-            .setKeyName(keyName)
-            .setRefreshPipeline(true)
-            .build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    long containerID = omKeyLocationInfo.getContainerID();
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-    List<DatanodeDetails> datanodes = pipeline.getNodes();
-    Assert.assertEquals(datanodes.size(), 1);
-
-    DatanodeDetails datanodeDetails = datanodes.get(0);
-    Assert
-        .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
-
-    // close the pipeline
-    cluster.getStorageContainerManager()
-        .getPipelineManager().finalizeAndDestroyPipeline(pipeline, false);
-
-    // All the containers in OPEN or CLOSING state should transition to
-    // QUASI-CLOSED after pipeline close
-    GenericTestUtils.waitFor(
-        () -> isContainerQuasiClosed(cluster, containerID, datanodeDetails),
-        500, 5 * 1000);
-    Assert.assertTrue(
-        isContainerQuasiClosed(cluster, containerID, datanodeDetails));
-
-    // Send close container command from SCM to datanode with forced flag as
-    // true
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId(), true));
-    GenericTestUtils
-        .waitFor(() -> isContainerClosed(
-            cluster, containerID, datanodeDetails), 500, 5 * 1000);
-    Assert.assertTrue(
-        isContainerClosed(cluster, containerID, datanodeDetails));
-  }
-
-  private Boolean isContainerClosed(MiniOzoneCluster ozoneCluster,
-      long containerID,
-      DatanodeDetails datanode) {
-    ContainerData containerData;
-    for (HddsDatanodeService datanodeService : ozoneCluster
-        .getHddsDatanodes()) {
-      if (datanode.equals(datanodeService.getDatanodeDetails())) {
-        containerData =
-            datanodeService.getDatanodeStateMachine().getContainer()
-                .getContainerSet().getContainer(containerID).getContainerData();
-        return containerData.isClosed();
-      }
-    }
-    return false;
-  }
-
-  private Boolean isContainerQuasiClosed(MiniOzoneCluster miniCluster,
-      long containerID, DatanodeDetails datanode) {
-    ContainerData containerData;
-    for (HddsDatanodeService datanodeService : miniCluster.getHddsDatanodes()) {
-      if (datanode.equals(datanodeService.getDatanodeDetails())) {
-        containerData =
-            datanodeService.getDatanodeStateMachine().getContainer()
-                .getContainerSet().getContainer(containerID).getContainerData();
-        return containerData.isQuasiClosed();
-      }
-    }
-    return false;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
deleted file mode 100644
index 5c7f2c1..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import java.util.HashMap;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test to behaviour of the datanode when receive close container command.
- */
-public class TestCloseContainerHandler {
-
-  private MiniOzoneCluster cluster;
-  private OzoneConfiguration conf;
-
-  @Before
-  public void setup() throws Exception {
-    //setup a cluster (1G free space is enough for a unit test)
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1).build();
-  }
-
-  @After
-  public void teardown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void test() throws Exception {
-    cluster.waitForClusterToBeReady();
-
-    //the easiest way to create an open container is creating a key
-    OzoneClient client = OzoneClientFactory.getClient(conf);
-    ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume("test");
-    objectStore.getVolume("test").createBucket("test");
-    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey("test", 1024, ReplicationType.STAND_ALONE,
-            ReplicationFactor.ONE, new HashMap<>());
-    key.write("test".getBytes());
-    key.close();
-
-    //get the name of a valid container
-    OmKeyArgs keyArgs =
-        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.STAND_ALONE)
-            .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
-            .setKeyName("test")
-            .setRefreshPipeline(true)
-            .build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    ContainerID containerId = ContainerID.valueof(
-        omKeyLocationInfo.getContainerID());
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(containerId);
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-
-    Assert.assertFalse(isContainerClosed(cluster, containerId.getId()));
-
-    DatanodeDetails datanodeDetails =
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails();
-    //send the order to close the container
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerId.getId(), pipeline.getId()));
-
-    GenericTestUtils.waitFor(() ->
-            isContainerClosed(cluster, containerId.getId()),
-            500,
-            5 * 1000);
-
-    //double check if it's really closed (waitFor also throws an exception)
-    Assert.assertTrue(isContainerClosed(cluster, containerId.getId()));
-  }
-
-  private static Boolean isContainerClosed(MiniOzoneCluster cluster,
-      long containerID) {
-    ContainerData containerData;
-    containerData = cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContainer().getContainerSet()
-        .getContainer(containerID).getContainerData();
-    return !containerData.isOpen();
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
deleted file mode 100644
index 1cbf69e..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-
-/**
- * Tests DeleteContainerCommand Handler.
- */
-public class TestDeleteContainerHandler {
-
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static ObjectStore objectStore;
-  private static String volumeName = UUID.randomUUID().toString();
-  private static String bucketName = UUID.randomUUID().toString();
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-
-    OzoneClient client = OzoneClientFactory.getClient(conf);
-    objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      try {
-        cluster.shutdown();
-      } catch (Exception e) {
-        // do nothing.
-      }
-    }
-  }
-
-  @Test(timeout = 60000)
-  public void testDeleteContainerRequestHandlerOnClosedContainer()
-      throws Exception {
-
-    //the easiest way to create an open container is creating a key
-
-    String keyName = UUID.randomUUID().toString();
-
-    // create key
-    createKey(keyName);
-
-    // get containerID of the key
-    ContainerID containerId = getContainerID(keyName);
-
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(containerId);
-
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipeline(container.getPipelineID());
-
-    // We need to close the container because delete container only happens
-    // on closed containers with force flag set to false.
-
-    HddsDatanodeService hddsDatanodeService =
-        cluster.getHddsDatanodes().get(0);
-
-    Assert.assertFalse(isContainerClosed(hddsDatanodeService,
-        containerId.getId()));
-
-    DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
-
-    NodeManager nodeManager =
-        cluster.getStorageContainerManager().getScmNodeManager();
-
-    //send the order to close the container
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerId.getId(), pipeline.getId()));
-
-    GenericTestUtils.waitFor(() ->
-            isContainerClosed(hddsDatanodeService, containerId.getId()),
-        500, 5 * 1000);
-
-    //double check if it's really closed (waitFor also throws an exception)
-    Assert.assertTrue(isContainerClosed(hddsDatanodeService,
-        containerId.getId()));
-
-    // Check container exists before sending delete container command
-    Assert.assertFalse(isContainerDeleted(hddsDatanodeService,
-        containerId.getId()));
-
-    // send delete container to the datanode
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-            new DeleteContainerCommand(containerId.getId(), false));
-
-    GenericTestUtils.waitFor(() ->
-            isContainerDeleted(hddsDatanodeService, containerId.getId()),
-        500, 5 * 1000);
-
-    Assert.assertTrue(isContainerDeleted(hddsDatanodeService,
-        containerId.getId()));
-
-  }
-
-
-  @Test
-  public void testDeleteContainerRequestHandlerOnOpenContainer()
-      throws Exception {
-
-    //the easiest way to create an open container is creating a key
-    String keyName = UUID.randomUUID().toString();
-
-    // create key
-    createKey(keyName);
-
-    // get containerID of the key
-    ContainerID containerId = getContainerID(keyName);
-
-    HddsDatanodeService hddsDatanodeService =
-        cluster.getHddsDatanodes().get(0);
-    DatanodeDetails datanodeDetails =
-        hddsDatanodeService.getDatanodeDetails();
-
-    NodeManager nodeManager =
-        cluster.getStorageContainerManager().getScmNodeManager();
-
-    // Send delete container command with force flag set to false.
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-        new DeleteContainerCommand(containerId.getId(), false));
-
-    // Here it should not delete it, and the container should exist in the
-    // containerset
-    int count = 1;
-    // Checking for 5 seconds, whether it is containerSet, as after command
-    // is issued, giving some time for it to process.
-    while (!isContainerDeleted(hddsDatanodeService, containerId.getId())) {
-      Thread.sleep(1000);
-      count++;
-      if (count == 5) {
-        break;
-      }
-    }
-
-    Assert.assertFalse(isContainerDeleted(hddsDatanodeService,
-        containerId.getId()));
-
-
-    // Now delete container with force flag set to true. now it should delete
-    // container
-
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-        new DeleteContainerCommand(containerId.getId(), true));
-
-    GenericTestUtils.waitFor(() ->
-            isContainerDeleted(hddsDatanodeService, containerId.getId()),
-        500, 5 * 1000);
-
-    Assert.assertTrue(isContainerDeleted(hddsDatanodeService,
-        containerId.getId()));
-
-  }
-
-  /**
-   * create a key with specified name.
-   * @param keyName
-   * @throws IOException
-   */
-  private void createKey(String keyName) throws IOException {
-    OzoneOutputStream key = objectStore.getVolume(volumeName)
-        .getBucket(bucketName)
-        .createKey(keyName, 1024, ReplicationType.STAND_ALONE,
-            ReplicationFactor.ONE, new HashMap<>());
-    key.write("test".getBytes());
-    key.close();
-  }
-
-  /**
-   * Return containerID of the key.
-   * @param keyName
-   * @return ContainerID
-   * @throws IOException
-   */
-  private ContainerID getContainerID(String keyName) throws IOException {
-    OmKeyArgs keyArgs =
-        new OmKeyArgs.Builder().setVolumeName(volumeName)
-            .setBucketName(bucketName)
-            .setType(HddsProtos.ReplicationType.STAND_ALONE)
-            .setFactor(HddsProtos.ReplicationFactor.ONE)
-            .setKeyName(keyName)
-            .setRefreshPipeline(true)
-            .build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    return ContainerID.valueof(
-        omKeyLocationInfo.getContainerID());
-  }
-
-  /**
-   * Checks whether is closed or not on a datanode.
-   * @param hddsDatanodeService
-   * @param containerID
-   * @return true - if container is closes, else returns false.
-   */
-  private Boolean isContainerClosed(HddsDatanodeService hddsDatanodeService,
-      long containerID) {
-    ContainerData containerData;
-    containerData =hddsDatanodeService
-        .getDatanodeStateMachine().getContainer().getContainerSet()
-        .getContainer(containerID).getContainerData();
-    return !containerData.isOpen();
-  }
-
-  /**
-   * Checks whether container is deleted from the datanode or not.
-   * @param hddsDatanodeService
-   * @param containerID
-   * @return true - if container is deleted, else returns false
-   */
-  private Boolean isContainerDeleted(HddsDatanodeService hddsDatanodeService,
-      long containerID) {
-    Container container;
-    // if container is not in container set, it means container got deleted.
-    container = hddsDatanodeService
-        .getDatanodeStateMachine().getContainer().getContainerSet()
-        .getContainer(containerID);
-    return container == null;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
deleted file mode 100644
index 67bdc17..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Integration tests for the command handler's.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
deleted file mode 100644
index 3967c0c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-      .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-      .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.*;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.transport.server
-      .XceiverServerSpi;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.util.function.CheckedBiConsumer;
-
-import java.util.Map;
-import java.util.function.BiConsumer;
-
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * This class tests the metrics of ContainerStateMachine.
- */
-public class TestCSMMetrics {
-  static final String TEST_DIR =
-      GenericTestUtils.getTestDir("dfs").getAbsolutePath()
-          + File.separator;
-  @FunctionalInterface
-  interface CheckedBiFunction<LEFT, RIGHT, OUT, THROWABLE extends Throwable> {
-    OUT apply(LEFT left, RIGHT right) throws THROWABLE;
-  }
-
-  @Test
-  public void testContainerStateMachineMetrics() throws Exception {
-    runContainerStateMachineMetrics(1,
-        (pipeline, conf) -> RatisTestHelper.initRatisConf(GRPC, conf),
-        XceiverClientRatis::newXceiverClientRatis,
-        TestCSMMetrics::newXceiverServerRatis,
-        (dn, p) -> RatisTestHelper.initXceiverServerRatis(GRPC, dn, p));
-  }
-
-  static void runContainerStateMachineMetrics(
-      int numDatanodes,
-      BiConsumer<Pipeline, OzoneConfiguration> initConf,
-      TestCSMMetrics.CheckedBiFunction<Pipeline, OzoneConfiguration,
-          XceiverClientSpi, IOException> createClient,
-      TestCSMMetrics.CheckedBiFunction<DatanodeDetails, OzoneConfiguration,
-          XceiverServerSpi, IOException> createServer,
-      CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer)
-      throws Exception {
-    final List<XceiverServerSpi> servers = new ArrayList<>();
-    XceiverClientSpi client = null;
-    String containerName = OzoneUtils.getRequestID();
-    try {
-      final Pipeline pipeline = ContainerTestHelper.createPipeline(
-          numDatanodes);
-      final OzoneConfiguration conf = new OzoneConfiguration();
-      initConf.accept(pipeline, conf);
-
-      for (DatanodeDetails dn : pipeline.getNodes()) {
-        final XceiverServerSpi s = createServer.apply(dn, conf);
-        servers.add(s);
-        s.start();
-        initServer.accept(dn, pipeline);
-      }
-
-      client = createClient.apply(pipeline, conf);
-      client.connect();
-
-      // Before Read Chunk/Write Chunk
-      MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME +
-          RaftGroupId.valueOf(pipeline.getId().getId()).toString());
-      assertCounter("NumWriteStateMachineOps", 0L, metric);
-      assertCounter("NumReadStateMachineOps", 0L, metric);
-      assertCounter("NumApplyTransactionOps", 0L, metric);
-      assertCounter("NumBytesWrittenCount", 0L, metric);
-      assertCounter("NumBytesCommittedCount", 0L, metric);
-      assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
-      assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
-      assertCounter("WriteChunkNumOps", 0L, metric);
-      double applyTransactionLatency = getDoubleGauge(
-          "ApplyTransactionAvgTime", metric);
-      assertTrue(applyTransactionLatency == 0.0);
-      double writeStateMachineLatency = getDoubleGauge(
-          "WriteStateMachineDataAvgTime", metric);
-      assertTrue(writeStateMachineLatency == 0.0);
-
-      // Write Chunk
-      BlockID blockID = ContainerTestHelper.getTestBlockID(ContainerTestHelper.
-          getTestContainerID());
-      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-          ContainerTestHelper.getWriteChunkRequest(
-              pipeline, blockID, 1024);
-      ContainerCommandResponseProto response =
-          client.sendCommand(writeChunkRequest);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      metric = getMetrics(CSMMetrics.SOURCE_NAME +
-              RaftGroupId.valueOf(pipeline.getId().getId()).toString());
-      assertCounter("NumWriteStateMachineOps", 1L, metric);
-      assertCounter("NumBytesWrittenCount", 1024L, metric);
-      assertCounter("NumApplyTransactionOps", 1L, metric);
-      assertCounter("NumBytesCommittedCount", 1024L, metric);
-      assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
-      assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
-      assertCounter("WriteChunkNumOps", 1L, metric);
-
-      //Read Chunk
-      ContainerProtos.ContainerCommandRequestProto readChunkRequest =
-          ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
-              .getWriteChunk());
-      response = client.sendCommand(readChunkRequest);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      metric = getMetrics(CSMMetrics.SOURCE_NAME +
-          RaftGroupId.valueOf(pipeline.getId().getId()).toString());
-      assertCounter("NumQueryStateMachineOps", 1L, metric);
-      assertCounter("NumApplyTransactionOps", 1L, metric);
-      applyTransactionLatency = getDoubleGauge(
-          "ApplyTransactionAvgTime", metric);
-      assertTrue(applyTransactionLatency > 0.0);
-      writeStateMachineLatency = getDoubleGauge(
-          "WriteStateMachineDataAvgTime", metric);
-      assertTrue(writeStateMachineLatency > 0.0);
-
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      servers.stream().forEach(XceiverServerSpi::stop);
-    }
-  }
-
-  static XceiverServerRatis newXceiverServerRatis(
-      DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
-    final String dir = TEST_DIR + dn.getUuid();
-    conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
-
-    final ContainerDispatcher dispatcher = new TestContainerDispatcher();
-    return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
-        new ContainerController(new ContainerSet(), Maps.newHashMap()),
-        null, null);
-  }
-
-  private static class TestContainerDispatcher implements ContainerDispatcher {
-    /**
-     * Dispatches commands to container layer.
-     *
-     * @param msg - Command Request
-     * @return Command Response
-     */
-    @Override
-    public ContainerCommandResponseProto dispatch(
-        ContainerCommandRequestProto msg,
-        DispatcherContext context) {
-      return ContainerTestHelper.getCreateContainerResponse(msg);
-    }
-
-    @Override
-    public void validateContainerCommand(
-        ContainerCommandRequestProto msg) throws StorageContainerException {
-    }
-
-    @Override
-    public void init() {
-    }
-
-    @Override
-    public void shutdown() {
-    }
-
-    @Override
-    public Handler getHandler(ContainerProtos.ContainerType containerType) {
-      return null;
-    }
-
-    @Override
-    public void setScmId(String scmId) {
-
-    }
-
-    @Override
-    public void buildMissingContainerSetAndValidate(
-        Map<Long, Long> container2BCSIDMap) {
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
deleted file mode 100644
index 43c354c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.metrics;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
-import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.util.Map;
-import java.util.UUID;
-
-/**
- * Test for metrics published by storage containers.
- */
-public class TestContainerMetrics {
-
-  private GrpcReplicationService createReplicationService(
-      ContainerController controller) {
-    return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(controller));
-  }
-
-  @Test
-  public void testContainerMetrics() throws Exception {
-    XceiverServerGrpc server = null;
-    XceiverClientGrpc client = null;
-    long containerID = ContainerTestHelper.getTestContainerID();
-    String path = GenericTestUtils.getRandomizedTempPath();
-
-    try {
-      final int interval = 1;
-      Pipeline pipeline = ContainerTestHelper
-          .createSingleNodePipeline();
-      OzoneConfiguration conf = new OzoneConfiguration();
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode()
-              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-      conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
-          interval);
-
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
-      VolumeSet volumeSet = new VolumeSet(
-          datanodeDetails.getUuidString(), conf);
-      ContainerSet containerSet = new ContainerSet();
-      DatanodeStateMachine stateMachine = Mockito.mock(
-          DatanodeStateMachine.class);
-      StateContext context = Mockito.mock(StateContext.class);
-      Mockito.when(stateMachine.getDatanodeDetails())
-          .thenReturn(datanodeDetails);
-      Mockito.when(context.getParent()).thenReturn(stateMachine);
-      ContainerMetrics metrics = ContainerMetrics.create(conf);
-      Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
-      for (ContainerProtos.ContainerType containerType :
-          ContainerProtos.ContainerType.values()) {
-        handlers.put(containerType,
-            Handler.getHandlerForContainerType(containerType, conf, context,
-                containerSet, volumeSet, metrics));
-      }
-      HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
-          volumeSet, handlers, context, metrics);
-      dispatcher.setScmId(UUID.randomUUID().toString());
-
-      server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null,
-          createReplicationService(new ContainerController(
-              containerSet, handlers)));
-      client = new XceiverClientGrpc(pipeline, conf);
-
-      server.start();
-      client.connect();
-
-      // Create container
-      ContainerCommandRequestProto request = ContainerTestHelper
-          .getCreateContainerRequest(containerID, pipeline);
-      ContainerCommandResponseProto response = client.sendCommand(request);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      // Write Chunk
-      BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-      ContainerTestHelper.getWriteChunkRequest(
-          pipeline, blockID, 1024);
-      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-          ContainerTestHelper.getWriteChunkRequest(
-              pipeline, blockID, 1024);
-      response = client.sendCommand(writeChunkRequest);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      //Read Chunk
-      ContainerProtos.ContainerCommandRequestProto readChunkRequest =
-          ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
-              .getWriteChunk());
-      response = client.sendCommand(readChunkRequest);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      MetricsRecordBuilder containerMetrics = getMetrics(
-          "StorageContainerMetrics");
-      assertCounter("NumOps", 3L, containerMetrics);
-      assertCounter("numCreateContainer", 1L, containerMetrics);
-      assertCounter("numWriteChunk", 1L, containerMetrics);
-      assertCounter("numReadChunk", 1L, containerMetrics);
-      assertCounter("bytesWriteChunk", 1024L, containerMetrics);
-      assertCounter("bytesReadChunk", 1024L, containerMetrics);
-
-      String sec = interval + "s";
-      Thread.sleep((interval + 1) * 1000);
-      assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      if (server != null) {
-        server.stop();
-      }
-      // clean up volume dir
-      File file = new File(path);
-      if(file.exists()) {
-        FileUtil.fullyDelete(file);
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
deleted file mode 100644
index 70a88af..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-
-import java.util.*;
-import java.util.concurrent.CompletableFuture;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-
-/**
- * Tests ozone containers.
- */
-public class TestOzoneContainer {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
-
-  @Test
-  public void testCreateOzoneContainer() throws Exception {
-    long containerID = ContainerTestHelper.getTestContainerID();
-    OzoneConfiguration conf = newOzoneConfiguration();
-    OzoneContainer container = null;
-    MiniOzoneCluster cluster = null;
-    try {
-      cluster = MiniOzoneCluster.newBuilder(conf).build();
-      cluster.waitForClusterToBeReady();
-      // We don't start Ozone Container via data node, we will do it
-      // independently in our test path.
-      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode()
-              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-      conf.setBoolean(
-          OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      StateContext context = Mockito.mock(StateContext.class);
-      DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
-      Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
-      Mockito.when(context.getParent()).thenReturn(dsm);
-      container = new OzoneContainer(datanodeDetails, conf, context, null);
-      //Set scmId and manually start ozone container.
-      container.start(UUID.randomUUID().toString());
-
-      XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
-      client.connect();
-      createContainerForTesting(client, containerID);
-    } finally {
-      if (container != null) {
-        container.stop();
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  @Test
-  public void testOzoneContainerStart() throws Exception {
-    OzoneConfiguration conf = newOzoneConfiguration();
-    MiniOzoneCluster cluster = null;
-    OzoneContainer container = null;
-
-    try {
-      cluster = MiniOzoneCluster.newBuilder(conf).build();
-      cluster.waitForClusterToBeReady();
-
-      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode()
-              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-      conf.setBoolean(
-          OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-
-
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      StateContext context = Mockito.mock(StateContext.class);
-      DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
-      Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
-      Mockito.when(context.getParent()).thenReturn(dsm);
-      container = new OzoneContainer(datanodeDetails, conf,
-          context, null);
-
-      String scmId = UUID.randomUUID().toString();
-      container.start(scmId);
-      try {
-        container.start(scmId);
-      } catch (Exception e) {
-        Assert.fail();
-      }
-
-      container.stop();
-      try {
-        container.stop();
-      } catch (Exception e) {
-        Assert.fail();
-      }
-
-    } finally {
-      if (container != null) {
-        container.stop();
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-
-  static OzoneConfiguration newOzoneConfiguration() {
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    return conf;
-  }
-
-  @Test
-  public void testOzoneContainerViaDataNode() throws Exception {
-    MiniOzoneCluster cluster = null;
-    try {
-      long containerID =
-          ContainerTestHelper.getTestContainerID();
-      OzoneConfiguration conf = newOzoneConfiguration();
-
-      // Start ozone container Via Datanode create.
-
-      Pipeline pipeline =
-          ContainerTestHelper.createSingleNodePipeline();
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode()
-              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-
-      cluster = MiniOzoneCluster.newBuilder(conf)
-          .setRandomContainerPort(false)
-          .build();
-      cluster.waitForClusterToBeReady();
-
-      // This client talks to ozone container via datanode.
-      XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
-
-      runTestOzoneContainerViaDataNode(containerID, client);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  static void runTestOzoneContainerViaDataNode(
-      long testContainerID, XceiverClientSpi client) throws Exception {
-    ContainerProtos.ContainerCommandRequestProto
-        request, writeChunkRequest, putBlockRequest,
-        updateRequest1, updateRequest2;
-    ContainerProtos.ContainerCommandResponseProto response,
-        updateResponse1, updateResponse2;
-    try {
-      client.connect();
-
-      Pipeline pipeline = client.getPipeline();
-      createContainerForTesting(client, testContainerID);
-      writeChunkRequest = writeChunkForContainer(client, testContainerID,
-          1024);
-
-      // Read Chunk
-      request = ContainerTestHelper.getReadChunkRequest(
-          pipeline, writeChunkRequest.getWriteChunk());
-
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      // Put Block
-      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
-          pipeline, writeChunkRequest.getWriteChunk());
-
-
-      response = client.sendCommand(putBlockRequest);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      // Get Block
-      request = ContainerTestHelper.
-          getBlockRequest(pipeline, putBlockRequest.getPutBlock());
-      response = client.sendCommand(request);
-      int chunksCount = putBlockRequest.getPutBlock().getBlockData().
-          getChunksCount();
-      ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
-
-
-      // Delete Block
-      request =
-          ContainerTestHelper.getDeleteBlockRequest(
-              pipeline, putBlockRequest.getPutBlock());
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      //Delete Chunk
-      request = ContainerTestHelper.getDeleteChunkRequest(
-          pipeline, writeChunkRequest.getWriteChunk());
-
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      //Update an existing container
-      Map<String, String> containerUpdate = new HashMap<String, String>();
-      containerUpdate.put("container_updated_key", "container_updated_value");
-      updateRequest1 = ContainerTestHelper.getUpdateContainerRequest(
-          testContainerID, containerUpdate);
-      updateResponse1 = client.sendCommand(updateRequest1);
-      Assert.assertNotNull(updateResponse1);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      //Update an non-existing container
-      long nonExistingContinerID =
-          ContainerTestHelper.getTestContainerID();
-      updateRequest2 = ContainerTestHelper.getUpdateContainerRequest(
-          nonExistingContinerID, containerUpdate);
-      updateResponse2 = client.sendCommand(updateRequest2);
-      Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND,
-          updateResponse2.getResult());
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-    }
-  }
-
-  @Test
-  public void testBothGetandPutSmallFile() throws Exception {
-    MiniOzoneCluster cluster = null;
-    XceiverClientGrpc client = null;
-    try {
-      OzoneConfiguration conf = newOzoneConfiguration();
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-          tempFolder.getRoot().getPath());
-      client = createClientForTesting(conf);
-      cluster = MiniOzoneCluster.newBuilder(conf)
-          .setRandomContainerPort(false)
-          .build();
-      cluster.waitForClusterToBeReady();
-      long containerID = ContainerTestHelper.getTestContainerID();
-      runTestBothGetandPutSmallFile(containerID, client);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  static void runTestBothGetandPutSmallFile(
-      long containerID, XceiverClientSpi client) throws Exception {
-    try {
-      client.connect();
-
-      createContainerForTesting(client, containerID);
-
-      BlockID blockId = ContainerTestHelper.getTestBlockID(containerID);
-      final ContainerProtos.ContainerCommandRequestProto smallFileRequest
-          = ContainerTestHelper.getWriteSmallFileRequest(
-          client.getPipeline(), blockId, 1024);
-      ContainerProtos.ContainerCommandResponseProto response
-          = client.sendCommand(smallFileRequest);
-      Assert.assertNotNull(response);
-
-      final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
-          = ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
-          smallFileRequest.getPutSmallFile().getBlock());
-      response = client.sendCommand(getSmallFileRequest);
-      Assert.assertArrayEquals(
-          smallFileRequest.getPutSmallFile().getData().toByteArray(),
-          response.getGetSmallFile().getData().getData().toByteArray());
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-    }
-  }
-
-
-
-  @Test
-  public void testCloseContainer() throws Exception {
-    MiniOzoneCluster cluster = null;
-    XceiverClientGrpc client = null;
-    ContainerProtos.ContainerCommandResponseProto response;
-    ContainerProtos.ContainerCommandRequestProto
-        writeChunkRequest, putBlockRequest, request;
-    try {
-
-      OzoneConfiguration conf = newOzoneConfiguration();
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-          tempFolder.getRoot().getPath());
-      client = createClientForTesting(conf);
-      cluster = MiniOzoneCluster.newBuilder(conf)
-          .setRandomContainerPort(false)
-          .build();
-      cluster.waitForClusterToBeReady();
-      client.connect();
-
-      long containerID = ContainerTestHelper.getTestContainerID();
-      createContainerForTesting(client, containerID);
-      writeChunkRequest = writeChunkForContainer(client, containerID,
-          1024);
-
-
-      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
-          client.getPipeline(), writeChunkRequest.getWriteChunk());
-      // Put block before closing.
-      response = client.sendCommand(putBlockRequest);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      // Close the contianer.
-      request = ContainerTestHelper.getCloseContainer(
-          client.getPipeline(), containerID);
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-
-      // Assert that none of the write  operations are working after close.
-
-      // Write chunks should fail now.
-
-      response = client.sendCommand(writeChunkRequest);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
-          response.getResult());
-
-      // Read chunk must work on a closed container.
-      request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(),
-          writeChunkRequest.getWriteChunk());
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      // Put block will fail on a closed container.
-      response = client.sendCommand(putBlockRequest);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
-          response.getResult());
-
-      // Get block must work on the closed container.
-      request = ContainerTestHelper.getBlockRequest(client.getPipeline(),
-          putBlockRequest.getPutBlock());
-      response = client.sendCommand(request);
-      int chunksCount = putBlockRequest.getPutBlock().getBlockData()
-          .getChunksCount();
-      ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
-
-      // Delete block must fail on a closed container.
-      request =
-          ContainerTestHelper.getDeleteBlockRequest(client.getPipeline(),
-              putBlockRequest.getPutBlock());
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
-          response.getResult());
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  @Test
-  public void testDeleteContainer() throws Exception {
-    MiniOzoneCluster cluster = null;
-    XceiverClientGrpc client = null;
-    ContainerProtos.ContainerCommandResponseProto response;
-    ContainerProtos.ContainerCommandRequestProto request,
-        writeChunkRequest, putBlockRequest;
-    try {
-      OzoneConfiguration conf = newOzoneConfiguration();
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-          tempFolder.getRoot().getPath());
-      client = createClientForTesting(conf);
-      cluster = MiniOzoneCluster.newBuilder(conf)
-          .setRandomContainerPort(false)
-          .build();
-      cluster.waitForClusterToBeReady();
-      client.connect();
-
-      long containerID = ContainerTestHelper.getTestContainerID();
-      createContainerForTesting(client, containerID);
-      writeChunkRequest = writeChunkForContainer(
-          client, containerID, 1024);
-
-      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
-          client.getPipeline(), writeChunkRequest.getWriteChunk());
-      // Put key before deleting.
-      response = client.sendCommand(putBlockRequest);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-      // Container cannot be deleted because force flag is set to false and
-      // the container is still open
-      request = ContainerTestHelper.getDeleteContainer(
-          client.getPipeline(), containerID, false);
-      response = client.sendCommand(request);
-
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
-          response.getResult());
-
-      // Container can be deleted, by setting force flag, even with out closing
-      request = ContainerTestHelper.getDeleteContainer(
-          client.getPipeline(), containerID, true);
-      response = client.sendCommand(request);
-
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          response.getResult());
-
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-
-  // Runs a set of commands as Async calls and verifies that calls indeed worked
-  // as expected.
-  static void runAsyncTests(
-      long containerID, XceiverClientSpi client) throws Exception {
-    try {
-      client.connect();
-
-      createContainerForTesting(client, containerID);
-      final List<CompletableFuture> computeResults = new LinkedList<>();
-      int requestCount = 1000;
-      // Create a bunch of Async calls from this test.
-      for(int x = 0; x <requestCount; x++) {
-        BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-        final ContainerProtos.ContainerCommandRequestProto smallFileRequest
-            = ContainerTestHelper.getWriteSmallFileRequest(
-            client.getPipeline(), blockID, 1024);
-
-        CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
-            response = client.sendCommandAsync(smallFileRequest).getResponse();
-        computeResults.add(response);
-      }
-
-      CompletableFuture<Void> combinedFuture =
-          CompletableFuture.allOf(computeResults.toArray(
-              new CompletableFuture[computeResults.size()]));
-      // Wait for all futures to complete.
-      combinedFuture.get();
-      // Assert that all futures are indeed done.
-      for (CompletableFuture future : computeResults) {
-        Assert.assertTrue(future.isDone());
-      }
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-    }
-  }
-
-  @Test
-  public void testXcieverClientAsync() throws Exception {
-    MiniOzoneCluster cluster = null;
-    XceiverClientGrpc client = null;
-    try {
-      OzoneConfiguration conf = newOzoneConfiguration();
-      conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-          tempFolder.getRoot().getPath());
-      client = createClientForTesting(conf);
-      cluster = MiniOzoneCluster.newBuilder(conf)
-          .setRandomContainerPort(false)
-          .build();
-      cluster.waitForClusterToBeReady();
-      long containerID = ContainerTestHelper.getTestContainerID();
-      runAsyncTests(containerID, client);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  private static XceiverClientGrpc createClientForTesting(
-      OzoneConfiguration conf) throws Exception {
-    // Start ozone container Via Datanode create.
-    Pipeline pipeline =
-        ContainerTestHelper.createSingleNodePipeline();
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getFirstNode()
-            .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-
-    // This client talks to ozone container via datanode.
-    return new XceiverClientGrpc(pipeline, conf);
-  }
-
-  public static void createContainerForTesting(XceiverClientSpi client,
-      long containerID) throws Exception {
-    // Create container
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerRequest(
-            containerID, client.getPipeline());
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-  }
-
-  public static ContainerProtos.ContainerCommandRequestProto
-      writeChunkForContainer(XceiverClientSpi client,
-      long containerID, int dataLen) throws Exception {
-    // Write Chunk
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper.getWriteChunkRequest(client.getPipeline(),
-            blockID, dataLen);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(writeChunkRequest);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    return writeChunkRequest;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
deleted file mode 100644
index 8577156..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.function.CheckedBiConsumer;
-import org.apache.ratis.util.CollectionUtils;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * Tests ozone containers with Apache Ratis.
- */
-@Ignore("Disabling Ratis tests for pipeline work.")
-public class TestOzoneContainerRatis {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestOzoneContainerRatis.class);
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  static OzoneConfiguration newOzoneConfiguration() {
-    return new OzoneConfiguration();
-  }
-
-  private static void runTestOzoneContainerViaDataNodeRatis(
-      RpcType rpc, int numNodes) throws Exception {
-    runTest("runTestOzoneContainerViaDataNodeRatis", rpc, numNodes,
-        TestOzoneContainer::runTestOzoneContainerViaDataNode);
-  }
-
-  private static void runTest(
-      String testName, RpcType rpc, int numNodes,
-      CheckedBiConsumer<Long, XceiverClientSpi, Exception> test)
-      throws Exception {
-    LOG.info(testName + "(rpc=" + rpc + ", numNodes=" + numNodes);
-
-    // create Ozone clusters
-    final OzoneConfiguration conf = newOzoneConfiguration();
-    RatisTestHelper.initRatisConf(rpc, conf);
-    final MiniOzoneCluster cluster =
-        MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numNodes)
-        .build();
-    try {
-      cluster.waitForClusterToBeReady();
-
-      final String containerName = OzoneUtils.getRequestID();
-      final List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
-      final Pipeline pipeline = ContainerTestHelper.createPipeline(
-          CollectionUtils.as(datanodes,
-              HddsDatanodeService::getDatanodeDetails));
-      LOG.info("pipeline=" + pipeline);
-
-      // Create Ratis cluster
-//      final String ratisId = "ratis1";
-//      final PipelineManager manager = RatisManagerImpl.newRatisManager(conf);
-//      manager.createPipeline(ratisId, pipeline.getNodes());
-//      LOG.info("Created RatisCluster " + ratisId);
-//
-//      // check Ratis cluster members
-//      final List<DatanodeDetails> dns = manager.getMembers(ratisId);
-//      Assert.assertEquals(pipeline.getNodes(), dns);
-//
-//      // run test
-//      final XceiverClientSpi client = XceiverClientRatis
-// .newXceiverClientRatis(
-//          pipeline, conf);
-//      test.accept(containerName, client);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  private static void runTestBothGetandPutSmallFileRatis(
-      RpcType rpc, int numNodes) throws Exception {
-    runTest("runTestBothGetandPutSmallFileRatis", rpc, numNodes,
-        TestOzoneContainer::runTestBothGetandPutSmallFile);
-  }
-
-  @Test
-  public void testOzoneContainerViaDataNodeRatisGrpc() throws Exception {
-    runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.GRPC, 1);
-    runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.GRPC, 3);
-  }
-
-  @Test
-  public void testOzoneContainerViaDataNodeRatisNetty() throws Exception {
-    runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.NETTY, 1);
-    runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.NETTY, 3);
-  }
-
-  @Test
-  public void testBothGetandPutSmallFileRatisNetty() throws Exception {
-    runTestBothGetandPutSmallFileRatis(SupportedRpcType.NETTY, 1);
-    runTestBothGetandPutSmallFileRatis(SupportedRpcType.NETTY, 3);
-  }
-
-  @Test
-  public void testBothGetandPutSmallFileRatisGrpc() throws Exception {
-    runTestBothGetandPutSmallFileRatis(SupportedRpcType.GRPC, 1);
-    runTestBothGetandPutSmallFileRatis(SupportedRpcType.GRPC, 3);
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
deleted file mode 100644
index 30a2593..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.*;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-/**
- * Tests ozone containers via secure grpc/netty.
- */
-@RunWith(Parameterized.class)
-@Ignore("TODO:HDDS-1157")
-public class TestOzoneContainerWithTLS {
-  private final static Logger LOG = LoggerFactory.getLogger(
-      TestOzoneContainerWithTLS.class);
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
-
-  private OzoneConfiguration conf;
-  private OzoneBlockTokenSecretManager secretManager;
-  private CertificateClientTestImpl caClient;
-  private boolean blockTokenEnabled;
-
-  public TestOzoneContainerWithTLS(boolean blockTokenEnabled) {
-    this.blockTokenEnabled = blockTokenEnabled;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> enableBlockToken() {
-    return Arrays.asList(new Object[][] {
-        {false},
-        {true}
-    });
-  }
-
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    String ozoneMetaPath =
-        GenericTestUtils.getTempPath("ozoneMeta");
-    File ozoneMetaFile = new File(ozoneMetaPath);
-    conf.set(OZONE_METADATA_DIRS, ozoneMetaPath);
-
-    FileUtil.fullyDelete(ozoneMetaFile);
-    String keyDirName = conf.get(HDDS_KEY_DIR_NAME,
-        HDDS_KEY_DIR_NAME_DEFAULT);
-
-    File ozoneKeyDir = new File(ozoneMetaFile, keyDirName);
-    ozoneKeyDir.mkdirs();
-    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.setBoolean(HddsConfigKeys.HDDS_GRPC_TLS_ENABLED, true);
-
-    conf.setBoolean(HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT, true);
-
-    long expiryTime = conf.getTimeDuration(
-        HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME,
-        HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    caClient = new CertificateClientTestImpl(conf);
-    secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf),
-        expiryTime, caClient.getCertificate().
-        getSerialNumber().toString());
-  }
-
-  @Test
-  public void testCreateOzoneContainer() throws Exception {
-    LOG.info("testCreateOzoneContainer with TLS and blockToken enabled: {}",
-        blockTokenEnabled);
-    conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED,
-        blockTokenEnabled);
-
-    long containerID = ContainerTestHelper.getTestContainerID();
-    OzoneContainer container = null;
-    System.out.println(System.getProperties().getProperty("java.library.path"));
-    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-    try {
-      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE)
-              .getValue());
-      conf.setBoolean(
-          OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-
-      container = new OzoneContainer(dn, conf, getContext(dn), caClient);
-      //Set scmId and manually start ozone container.
-      container.start(UUID.randomUUID().toString());
-
-      XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf,
-          caClient.getCACertificate());
-
-      if (blockTokenEnabled) {
-        secretManager.start(caClient);
-        Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(
-            "123", EnumSet.allOf(
-                HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-            RandomUtils.nextLong());
-        client.connect(token.encodeToUrlString());
-        createSecureContainerForTesting(client, containerID, token);
-      } else {
-        createContainerForTesting(client, containerID);
-        client.connect();
-      }
-    } finally {
-      if (container != null) {
-        container.stop();
-      }
-    }
-  }
-
-  public static void createContainerForTesting(XceiverClientSpi client,
-      long containerID) throws Exception {
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerRequest(
-            containerID, client.getPipeline());
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-  }
-
-  public static void createSecureContainerForTesting(XceiverClientSpi client,
-      long containerID, Token<OzoneBlockTokenIdentifier> token)
-      throws Exception {
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerSecureRequest(
-            containerID, client.getPipeline(), token);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-  }
-
-
-  private StateContext getContext(DatanodeDetails datanodeDetails) {
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    return context;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
deleted file mode 100644
index 1e78ec6..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.stream.Collectors;
-
-/**
- * Tests ozone containers with Apache Ratis.
- */
-@Ignore("Disabling Ratis tests for pipeline work.")
-public class TestRatisManager {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestRatisManager.class);
-
-  static OzoneConfiguration newOzoneConfiguration() {
-    return new OzoneConfiguration();
-  }
-
-
-  /** Set the timeout for every test. */
-  @Rule
-  public Timeout testTimeout = new Timeout(200_000);
-
-  @Test
-  public void testTestRatisManagerGrpc() throws Exception {
-    runTestRatisManager(SupportedRpcType.GRPC);
-  }
-
-  @Test
-  public void testTestRatisManagerNetty() throws Exception {
-    runTestRatisManager(SupportedRpcType.NETTY);
-  }
-
-  private static void runTestRatisManager(RpcType rpc) throws Exception {
-    LOG.info("runTestRatisManager, rpc=" + rpc);
-
-    // create Ozone clusters
-    final OzoneConfiguration conf = newOzoneConfiguration();
-    RatisTestHelper.initRatisConf(rpc, conf);
-    final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(5)
-        .build();
-    try {
-      cluster.waitForClusterToBeReady();
-
-      final List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
-      final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream()
-          .map(HddsDatanodeService::getDatanodeDetails).collect(
-              Collectors.toList());
-
-      //final RatisManager manager = RatisManager.newRatisManager(conf);
-
-      final int[] idIndex = {3, 4, 5};
-      for (int i = 0; i < idIndex.length; i++) {
-        final int previous = i == 0 ? 0 : idIndex[i - 1];
-        final List<DatanodeDetails> subIds = datanodeDetailsSet.subList(
-            previous, idIndex[i]);
-
-        // Create Ratis cluster
-        final String ratisId = "ratis" + i;
-        //manager.createRatisCluster(ratisId, subIds);
-        LOG.info("Created RatisCluster " + ratisId);
-
-        // check Ratis cluster members
-        //final List<DatanodeDetails> dns = manager.getMembers(ratisId);
-        //Assert.assertEquals(subIds, dns);
-      }
-
-      // randomly close two of the clusters
-      final int chosen = ThreadLocalRandom.current().nextInt(idIndex.length);
-      LOG.info("chosen = " + chosen);
-
-      for (int i = 0; i < idIndex.length; i++) {
-        if (i != chosen) {
-          final String ratisId = "ratis" + i;
-          //manager.closeRatisCluster(ratisId);
-        }
-      }
-
-      // update datanodes
-      final String ratisId = "ratis" + chosen;
-      //manager.updatePipeline(ratisId, allIds);
-
-      // check Ratis cluster members
-      //final List<DatanodeDetails> dns = manager.getMembers(ratisId);
-      //Assert.assertEquals(allIds, dns);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
deleted file mode 100644
index fca449b..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.security.PrivilegedAction;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Tests ozone containers via secure grpc/netty.
- */
-@RunWith(Parameterized.class)
-public class TestSecureOzoneContainer {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestSecureOzoneContainer.class);
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
-
-  private OzoneConfiguration conf;
-  private SecurityConfig secConfig;
-  private Boolean requireBlockToken;
-  private Boolean hasBlockToken;
-  private Boolean blockTokeExpired;
-  private CertificateClientTestImpl caClient;
-  private OzoneBlockTokenSecretManager secretManager;
-
-
-  public TestSecureOzoneContainer(Boolean requireBlockToken,
-      Boolean hasBlockToken, Boolean blockTokenExpired) {
-    this.requireBlockToken = requireBlockToken;
-    this.hasBlockToken = hasBlockToken;
-    this.blockTokeExpired = blockTokenExpired;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> blockTokenOptions() {
-    return Arrays.asList(new Object[][] {
-        {true, true, false},
-        {true, true, true},
-        {true, false, false},
-        {false, true, false},
-        {false, false, false}});
-  }
-
-  @Before
-  public void setup() throws Exception {
-    DefaultMetricsSystem.setMiniClusterMode(true);
-    conf = new OzoneConfiguration();
-    String ozoneMetaPath =
-        GenericTestUtils.getTempPath("ozoneMeta");
-    conf.set(OZONE_METADATA_DIRS, ozoneMetaPath);
-    secConfig = new SecurityConfig(conf);
-    caClient = new CertificateClientTestImpl(conf);
-    secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf),
-        60 * 60 * 24, caClient.getCertificate().
-        getSerialNumber().toString());
-  }
-
-  @Test
-  public void testCreateOzoneContainer() throws Exception {
-    LOG.info("Test case: requireBlockToken: {} hasBlockToken: {} " +
-        "blockTokenExpired: {}.", requireBlockToken, hasBlockToken,
-        blockTokeExpired);
-    conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED,
-        requireBlockToken);
-
-    long containerID = ContainerTestHelper.getTestContainerID();
-    OzoneContainer container = null;
-    System.out.println(System.getProperties().getProperty("java.library.path"));
-    try {
-      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline
-          .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE)
-          .getValue());
-      conf.setBoolean(
-          OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-
-      DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-      container = new OzoneContainer(dn, conf, getContext(dn), caClient);
-      //Set scmId and manually start ozone container.
-      container.start(UUID.randomUUID().toString());
-
-      UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
-          "user1",  new String[] {"usergroup"});
-      long expiryDate = (blockTokeExpired) ?
-          Time.now() - 60 * 60 * 2 : Time.now() + 60 * 60 * 24;
-
-      OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(
-          "testUser", "cid:lud:bcsid",
-          EnumSet.allOf(AccessModeProto.class),
-          expiryDate, "1234", 128L);
-
-      int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-      if (port == 0) {
-        port = secConfig.getConfiguration().getInt(OzoneConfigKeys
-                .DFS_CONTAINER_IPC_PORT, DFS_CONTAINER_IPC_PORT_DEFAULT);
-      }
-      secretManager.start(caClient);
-      Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(
-          "123", EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong());
-      if (hasBlockToken) {
-        ugi.addToken(token);
-      }
-
-      ugi.doAs((PrivilegedAction<Void>) () -> {
-        try {
-          XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
-          client.connect(token.encodeToUrlString());
-          if (hasBlockToken) {
-            createContainerForTesting(client, containerID, token);
-          } else {
-            createContainerForTesting(client, containerID, null);
-          }
-
-        } catch (Exception e) {
-          if (requireBlockToken && hasBlockToken && !blockTokeExpired) {
-            LOG.error("Unexpected error. ", e);
-            fail("Client with BlockToken should succeed when block token is" +
-                " required.");
-          }
-          if (requireBlockToken && hasBlockToken && blockTokeExpired) {
-            assertTrue("Receive expected exception",
-                e instanceof SCMSecurityException);
-          }
-          if (requireBlockToken && !hasBlockToken) {
-            assertTrue("Receive expected exception", e instanceof
-                IOException);
-          }
-        }
-        return null;
-      });
-    } finally {
-      if (container != null) {
-        container.stop();
-      }
-    }
-  }
-
-  public static void createContainerForTesting(XceiverClientSpi client,
-      long containerID, Token token) throws Exception {
-    // Create container
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerSecureRequest(
-            containerID, client.getPipeline(), token);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-  }
-
-  private StateContext getContext(DatanodeDetails datanodeDetails) {
-    DatanodeStateMachine stateMachine = Mockito.mock(
-        DatanodeStateMachine.class);
-    StateContext context = Mockito.mock(StateContext.class);
-    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
-    Mockito.when(context.getParent()).thenReturn(stateMachine);
-    return context;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
deleted file mode 100644
index 59d741d..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.server;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
-import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.util.function.CheckedBiConsumer;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
-import static org.apache.ratis.rpc.SupportedRpcType.NETTY;
-import static org.mockito.Mockito.mock;
-
-/**
- * Test Containers.
- */
-@Ignore("Takes too long to run this test. Ignoring for time being.")
-public class TestContainerServer {
-  static final String TEST_DIR = GenericTestUtils.getTestDir("dfs")
-      .getAbsolutePath() + File.separator;
-  private static final OzoneConfiguration CONF = new OzoneConfiguration();
-  private static CertificateClient caClient;
-
-  private GrpcReplicationService createReplicationService(
-      ContainerController containerController) {
-    return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(containerController));
-  }
-
-  @BeforeClass
-  static public void setup() {
-    CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
-    caClient = new DNCertificateClient(new SecurityConfig(CONF));
-  }
-
-  @Test
-  public void testClientServer() throws Exception {
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    ContainerSet containerSet = new ContainerSet();
-    ContainerController controller = new ContainerController(
-        containerSet, null);
-    runTestClientServer(1, (pipeline, conf) -> conf
-            .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-                pipeline.getFirstNode()
-                    .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
-        XceiverClientGrpc::new,
-        (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf,
-            new TestContainerDispatcher(), caClient,
-            createReplicationService(controller)), (dn, p) -> {
-        });
-  }
-
-  @FunctionalInterface
-  interface CheckedBiFunction<LEFT, RIGHT, OUT, THROWABLE extends Throwable> {
-    OUT apply(LEFT left, RIGHT right) throws THROWABLE;
-  }
-
-  @Test
-  public void testClientServerRatisNetty() throws Exception {
-    runTestClientServerRatis(NETTY, 1);
-    runTestClientServerRatis(NETTY, 3);
-  }
-
-  @Test
-  public void testClientServerRatisGrpc() throws Exception {
-    runTestClientServerRatis(GRPC, 1);
-    runTestClientServerRatis(GRPC, 3);
-  }
-
-  static XceiverServerRatis newXceiverServerRatis(
-      DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
-    final String dir = TEST_DIR + dn.getUuid();
-    conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
-
-    final ContainerDispatcher dispatcher = new TestContainerDispatcher();
-    return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
-        new ContainerController(new ContainerSet(), Maps.newHashMap()),
-        caClient, null);
-  }
-
-  static void runTestClientServerRatis(RpcType rpc, int numNodes)
-      throws Exception {
-    runTestClientServer(numNodes,
-        (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf),
-        XceiverClientRatis::newXceiverClientRatis,
-        TestContainerServer::newXceiverServerRatis,
-        (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p));
-  }
-
-  static void runTestClientServer(
-      int numDatanodes,
-      CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf,
-      CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi,
-          IOException> createClient,
-      CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi,
-          IOException> createServer,
-      CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer)
-      throws Exception {
-    final List<XceiverServerSpi> servers = new ArrayList<>();
-    XceiverClientSpi client = null;
-    String containerName = OzoneUtils.getRequestID();
-    try {
-      final Pipeline pipeline =
-          ContainerTestHelper.createPipeline(numDatanodes);
-      initConf.accept(pipeline, CONF);
-
-      for (DatanodeDetails dn : pipeline.getNodes()) {
-        final XceiverServerSpi s = createServer.apply(dn, CONF);
-        servers.add(s);
-        s.start();
-        initServer.accept(dn, pipeline);
-      }
-
-      client = createClient.apply(pipeline, CONF);
-      client.connect();
-
-      final ContainerCommandRequestProto request =
-          ContainerTestHelper
-              .getCreateContainerRequest(
-                  ContainerTestHelper.getTestContainerID(), pipeline);
-      Assert.assertNotNull(request.getTraceID());
-
-      ContainerCommandResponseProto response = client.sendCommand(request);
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      servers.stream().forEach(XceiverServerSpi::stop);
-    }
-  }
-
-  @Test
-  public void testClientServerWithContainerDispatcher() throws Exception {
-    XceiverServerGrpc server = null;
-    XceiverClientGrpc client = null;
-    UUID scmId = UUID.randomUUID();
-    try {
-      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      OzoneConfiguration conf = new OzoneConfiguration();
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getFirstNode()
-              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-
-      ContainerSet containerSet = new ContainerSet();
-      VolumeSet volumeSet = mock(VolumeSet.class);
-      ContainerMetrics metrics = ContainerMetrics.create(conf);
-      Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      DatanodeStateMachine stateMachine = Mockito.mock(
-          DatanodeStateMachine.class);
-      StateContext context = Mockito.mock(StateContext.class);
-      Mockito.when(stateMachine.getDatanodeDetails())
-          .thenReturn(datanodeDetails);
-      Mockito.when(context.getParent()).thenReturn(stateMachine);
-
-
-      for (ContainerProtos.ContainerType containerType :
-          ContainerProtos.ContainerType.values()) {
-        handlers.put(containerType,
-            Handler.getHandlerForContainerType(containerType, conf, context,
-                containerSet, volumeSet, metrics));
-      }
-      HddsDispatcher dispatcher = new HddsDispatcher(
-          conf, containerSet, volumeSet, handlers, context, metrics);
-      dispatcher.setScmId(scmId.toString());
-      dispatcher.init();
-
-      server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher,
-          caClient, createReplicationService(
-              new ContainerController(containerSet, null)));
-      client = new XceiverClientGrpc(pipeline, conf);
-
-      server.start();
-      client.connect();
-
-      ContainerCommandRequestProto request =
-          ContainerTestHelper.getCreateContainerRequest(
-              ContainerTestHelper.getTestContainerID(), pipeline);
-      ContainerCommandResponseProto response = client.sendCommand(request);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static class TestContainerDispatcher implements ContainerDispatcher {
-    /**
-     * Dispatches commands to container layer.
-     *
-     * @param msg - Command Request
-     * @return Command Response
-     */
-    @Override
-    public ContainerCommandResponseProto dispatch(
-        ContainerCommandRequestProto msg,
-        DispatcherContext context) {
-      return ContainerTestHelper.getCreateContainerResponse(msg);
-    }
-
-    @Override
-    public void init() {
-    }
-
-    @Override
-    public void validateContainerCommand(
-        ContainerCommandRequestProto msg) throws StorageContainerException {
-    }
-
-    @Override
-    public void shutdown() {
-    }
-    @Override
-    public Handler getHandler(ContainerProtos.ContainerType containerType) {
-      return null;
-    }
-
-    @Override
-    public void setScmId(String scmId) {
-
-    }
-
-    @Override
-    public void buildMissingContainerSetAndValidate(
-        Map<Long, Long> container2BCSIDMap) {
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
deleted file mode 100644
index cfee1a6..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.server;
-
-import com.google.common.collect.Maps;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
-import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.util.function.CheckedBiConsumer;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.getCreateContainerRequest;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestContainerID;
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
-import static org.apache.ratis.rpc.SupportedRpcType.NETTY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test Container servers when security is enabled.
- */
-public class TestSecureContainerServer {
-  static final String TEST_DIR
-      = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator;
-  private static final OzoneConfiguration CONF = new OzoneConfiguration();
-  private static CertificateClientTestImpl caClient;
-
-  private GrpcReplicationService createReplicationService(
-      ContainerController containerController) {
-    return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(containerController));
-  }
-
-  @BeforeClass
-  static public void setup() throws Exception {
-    DefaultMetricsSystem.setMiniClusterMode(true);
-    CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
-    CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
-    caClient = new CertificateClientTestImpl(CONF);
-  }
-
-  @Test
-  public void testClientServer() throws Exception {
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    ContainerSet containerSet = new ContainerSet();
-    ContainerController controller = new ContainerController(
-        containerSet, null);
-    runTestClientServer(1, (pipeline, conf) -> conf
-            .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-                pipeline.getFirstNode()
-                    .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
-        XceiverClientGrpc::new,
-        (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf,
-            new TestContainerDispatcher(), caClient,
-            createReplicationService(controller)), (dn, p) -> {
-        });
-  }
-
-  @FunctionalInterface
-  interface CheckedBiFunction<LEFT, RIGHT, OUT, THROWABLE extends Throwable> {
-    OUT apply(LEFT left, RIGHT right) throws THROWABLE;
-  }
-
-  @Test
-  public void testClientServerRatisGrpc() throws Exception {
-    runTestClientServerRatis(GRPC, 1);
-    runTestClientServerRatis(GRPC, 3);
-  }
-
-  @Test
-  @Ignore
-  public void testClientServerRatisNetty() throws Exception {
-    runTestClientServerRatis(NETTY, 1);
-    runTestClientServerRatis(NETTY, 3);
-  }
-
-  static XceiverServerRatis newXceiverServerRatis(
-      DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
-    final String dir = TEST_DIR + dn.getUuid();
-    conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
-
-    final ContainerDispatcher dispatcher = new TestContainerDispatcher();
-    return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
-        new ContainerController(new ContainerSet(), Maps.newHashMap()),
-        caClient, null);
-  }
-
-  static void runTestClientServerRatis(RpcType rpc, int numNodes)
-      throws Exception {
-    runTestClientServer(numNodes,
-        (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf),
-        XceiverClientRatis::newXceiverClientRatis,
-        TestSecureContainerServer::newXceiverServerRatis,
-        (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p));
-  }
-
-  static void runTestClientServer(
-      int numDatanodes,
-      CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf,
-      CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi,
-          IOException> createClient,
-      CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi,
-          IOException> createServer,
-      CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer)
-      throws Exception {
-    final List<XceiverServerSpi> servers = new ArrayList<>();
-    XceiverClientSpi client = null;
-    String containerName = OzoneUtils.getRequestID();
-    try {
-      final Pipeline pipeline =
-          ContainerTestHelper.createPipeline(numDatanodes);
-
-      initConf.accept(pipeline, CONF);
-
-      for (DatanodeDetails dn : pipeline.getNodes()) {
-        final XceiverServerSpi s = createServer.apply(dn, CONF);
-        servers.add(s);
-        s.start();
-        initServer.accept(dn, pipeline);
-      }
-
-      client = createClient.apply(pipeline, CONF);
-      client.connect();
-
-      // Test 1: Test failure in request without block token.
-      final ContainerCommandRequestProto request =
-          getCreateContainerRequest(
-                  getTestContainerID(), pipeline);
-      Assert.assertNotNull(request.getTraceID());
-
-      XceiverClientSpi finalClient = client;
-      // Validation is different for grpc and ratis client.
-      if(client instanceof XceiverClientGrpc) {
-        LambdaTestUtils.intercept(SCMSecurityException.class, "Failed to" +
-                " authenticate with GRPC XceiverServer with Ozone block token",
-            () -> finalClient.sendCommand(request));
-      } else {
-        IOException e = LambdaTestUtils.intercept(IOException.class,
-            () -> finalClient.sendCommand(request));
-        Throwable rootCause = ExceptionUtils.getRootCause(e);
-        String msg = rootCause.getMessage();
-        assertTrue(msg, msg.contains("Block token verification failed"));
-      }
-
-      // Test 2: Test success in request with valid block token.
-      long expiryTime = Time.monotonicNow() + 60 * 60 * 24;
-
-      String omCertSerialId =
-          caClient.getCertificate().getSerialNumber().toString();
-      OzoneBlockTokenSecretManager secretManager =
-          new OzoneBlockTokenSecretManager(new SecurityConfig(CONF),
-          expiryTime, omCertSerialId);
-      secretManager.start(caClient);
-      Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken("1",
-          EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong());
-      final ContainerCommandRequestProto request2 =
-          ContainerTestHelper
-              .getCreateContainerSecureRequest(
-                  getTestContainerID(), pipeline,
-                  token);
-      Assert.assertNotNull(request2.getTraceID());
-      XceiverClientSpi finalClient2 = createClient.apply(pipeline, CONF);
-      if(finalClient2 instanceof XceiverClientGrpc) {
-        finalClient2.connect(token.encodeToUrlString());
-      } else {
-        finalClient2.connect();
-      }
-
-      ContainerCommandRequestProto request3 = getCreateContainerRequest(
-          getTestContainerID(), pipeline, token);
-      ContainerCommandResponseProto resp = finalClient2.sendCommand(request3);
-      assertEquals(SUCCESS, resp.getResult());
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-      servers.stream().forEach(XceiverServerSpi::stop);
-    }
-  }
-
-  private static class TestContainerDispatcher implements ContainerDispatcher {
-    /**
-     * Dispatches commands to container layer.
-     *
-     * @param msg - Command Request
-     * @return Command Response
-     */
-    @Override
-    public ContainerCommandResponseProto dispatch(
-        ContainerCommandRequestProto msg,
-        DispatcherContext context) {
-      return ContainerTestHelper.getCreateContainerResponse(msg);
-    }
-
-    @Override
-    public void init() {
-    }
-
-    @Override
-    public void validateContainerCommand(
-        ContainerCommandRequestProto msg) throws StorageContainerException {
-    }
-
-    @Override
-    public void shutdown() {
-    }
-    @Override
-    public Handler getHandler(ContainerProtos.ContainerType containerType) {
-      return null;
-    }
-
-    @Override
-    public void setScmId(String scmId) {
-    }
-
-    @Override
-    public void buildMissingContainerSetAndValidate(
-        Map<Long, Long> container2BCSIDMap) {
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
deleted file mode 100644
index 7fb9825..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.dn.scrubber;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerMetadataScanner;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.UUID;
-import java.io.File;
-
-import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
-
-/**
- * This class tests the data scrubber functionality.
- */
-public class TestDataScrubber {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConfig;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.set(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, "1s");
-    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1)
-        .build();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(ozoneConfig);
-    store = ozClient.getObjectStore();
-    ozoneManager = cluster.getOzoneManager();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-  }
-
-  @AfterClass
-  public static void shutdown() throws IOException {
-    if (ozClient != null) {
-      ozClient.close();
-    }
-    if (storageContainerLocationClient != null) {
-      storageContainerLocationClient.close();
-    }
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testOpenContainerIntegrity() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    long currentTime = Time.now();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      String keyName = UUID.randomUUID().toString();
-
-      OzoneOutputStream out = bucket.createKey(keyName,
-          value.getBytes().length, STAND_ALONE,
-          ONE, new HashMap<>());
-      out.write(value.getBytes());
-      out.close();
-      OzoneKey key = bucket.getKey(keyName);
-      Assert.assertEquals(keyName, key.getName());
-      OzoneInputStream is = bucket.readKey(keyName);
-      byte[] fileContent = new byte[value.getBytes().length];
-      is.read(fileContent);
-      Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
-          keyName, STAND_ALONE,
-          ONE));
-      Assert.assertEquals(value, new String(fileContent));
-      Assert.assertTrue(key.getCreationTime() >= currentTime);
-      Assert.assertTrue(key.getModificationTime() >= currentTime);
-    }
-
-    // wait for the container report to propagate to SCM
-    Thread.sleep(5000);
-
-
-    Assert.assertEquals(1, cluster.getHddsDatanodes().size());
-
-    HddsDatanodeService dn = cluster.getHddsDatanodes().get(0);
-    OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
-    ContainerSet cs = oc.getContainerSet();
-    Container c = cs.getContainerIterator().next();
-
-    Assert.assertTrue(cs.containerCount() > 0);
-
-    // delete the chunks directory.
-    File chunksDir = new File(c.getContainerData().getContainerPath(),
-        "chunks");
-    deleteDirectory(chunksDir);
-    Assert.assertFalse(chunksDir.exists());
-
-    ContainerScrubberConfiguration conf = ozoneConfig.getObject(
-        ContainerScrubberConfiguration.class);
-    ContainerMetadataScanner sb = new ContainerMetadataScanner(conf,
-        oc.getController());
-    sb.scrub(c);
-
-    // wait for the incremental container report to propagate to SCM
-    Thread.sleep(5000);
-
-    ContainerManager cm = cluster.getStorageContainerManager()
-        .getContainerManager();
-    Set<ContainerReplica> replicas = cm.getContainerReplicas(
-        ContainerID.valueof(c.getContainerData().getContainerID()));
-    Assert.assertEquals(1, replicas.size());
-    ContainerReplica r = replicas.iterator().next();
-    Assert.assertEquals(StorageContainerDatanodeProtocolProtos.
-        ContainerReplicaProto.State.UNHEALTHY, r.getState());
-  }
-
-  boolean deleteDirectory(File directoryToBeDeleted) {
-    File[] allContents = directoryToBeDeleted.listFiles();
-    if (allContents != null) {
-      for (File file : allContents) {
-        deleteDirectory(file);
-      }
-    }
-    return directoryToBeDeleted.delete();
-  }
-
-  private boolean verifyRatisReplication(String volumeName, String bucketName,
-                                         String keyName, ReplicationType type,
-                                         ReplicationFactor factor)
-      throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setRefreshPipeline(true)
-        .build();
-    HddsProtos.ReplicationType replicationType =
-        HddsProtos.ReplicationType.valueOf(type.toString());
-    HddsProtos.ReplicationFactor replicationFactor =
-        HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
-    for (OmKeyLocationInfo info :
-        keyInfo.getLatestVersionLocations().getLocationList()) {
-      ContainerInfo container =
-          storageContainerLocationClient.getContainer(info.getContainerID());
-      if (!container.getReplicationFactor().equals(replicationFactor) || (
-          container.getReplicationType() != replicationType)) {
-        return false;
-      }
-    }
-    return true;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
deleted file mode 100644
index 13d86ab..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.freon;
-/**
- * Classes related to Ozone tools tests.
- */
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
deleted file mode 100644
index ef49931..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-
-/**
- * This class tests container report with DN container state info.
- */
-public class TestContainerReportWithKeys {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestContainerReportWithKeys.class);
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerReportKeyWrite() throws Exception {
-    final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    final String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    final int keySize = 100;
-
-    OzoneClient client = OzoneClientFactory.getClient(conf);
-    ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
-                ReplicationFactor.ONE, new HashMap<>());
-    String dataString = RandomStringUtils.randomAlphabetic(keySize);
-    key.write(dataString.getBytes());
-    key.close();
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
-        .setRefreshPipeline(true)
-        .build();
-
-
-    OmKeyLocationInfo keyInfo =
-        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-
-    ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
-
-    LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
-        cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
-  }
-
-
-  private static ContainerData getContainerData(long containerID) {
-    ContainerData containerData;
-    ContainerSet containerManager = cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContainer().getContainerSet();
-    containerData =
-        containerManager.getContainer(containerID).getContainerData();
-    return containerData;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
deleted file mode 100644
index 44a386a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ /dev/null
@@ -1,975 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.net.NodeSchema;
-import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
-
-import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-
-/**
- * Test class for @{@link KeyManagerImpl}.
- */
-public class TestKeyManagerImpl {
-
-  private static PrefixManager prefixManager;
-  private static KeyManagerImpl keyManager;
-  private static NodeManager nodeManager;
-  private static StorageContainerManager scm;
-  private static ScmBlockLocationProtocol mockScmBlockLocationProtocol;
-  private static OzoneConfiguration conf;
-  private static OMMetadataManager metadataManager;
-  private static File dir;
-  private static long scmBlockSize;
-  private static final String KEY_NAME = "key1";
-  private static final String BUCKET_NAME = "bucket1";
-  private static final String VOLUME_NAME = "vol1";
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    dir = GenericTestUtils.getRandomizedTestDir();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
-    conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true");
-    mockScmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class);
-    metadataManager = new OmMetadataManagerImpl(conf);
-    nodeManager = new MockNodeManager(true, 10);
-    NodeSchema[] schemas = new NodeSchema[]
-        {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
-    NodeSchemaManager schemaManager = NodeSchemaManager.getInstance();
-    schemaManager.init(schemas, false);
-    NetworkTopology clusterMap = new NetworkTopologyImpl(schemaManager);
-    nodeManager.getAllNodes().stream().forEach(node -> {
-      node.setNetworkName(node.getUuidString());
-      clusterMap.add(node);
-    });
-    ((MockNodeManager)nodeManager).setNetworkTopology(clusterMap);
-    SCMConfigurator configurator = new SCMConfigurator();
-    configurator.setScmNodeManager(nodeManager);
-    configurator.setNetworkTopology(clusterMap);
-    scm = TestUtils.getScm(conf, configurator);
-    scm.start();
-    scm.exitSafeMode();
-    scmBlockSize = (long) conf
-        .getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT,
-            StorageUnit.BYTES);
-    conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10);
-
-    keyManager =
-        new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, conf,
-            "om1", null);
-    prefixManager = new PrefixManagerImpl(metadataManager, false);
-
-    Mockito.when(mockScmBlockLocationProtocol
-        .allocateBlock(Mockito.anyLong(), Mockito.anyInt(),
-            Mockito.any(ReplicationType.class),
-            Mockito.any(ReplicationFactor.class), Mockito.anyString(),
-            Mockito.any(ExcludeList.class))).thenThrow(
-        new SCMException("SafeModePrecheck failed for allocateBlock",
-            ResultCodes.SAFE_MODE_EXCEPTION));
-    createVolume(VOLUME_NAME);
-    createBucket(VOLUME_NAME, BUCKET_NAME);
-  }
-
-  @AfterClass
-  public static void cleanup() throws Exception {
-    scm.stop();
-    scm.join();
-    metadataManager.stop();
-    keyManager.stop();
-    FileUtils.deleteDirectory(dir);
-  }
-
-  @After
-  public void cleanupTest() throws IOException {
-    List<OzoneFileStatus> fileStatuses = keyManager
-        .listStatus(createBuilder().setKeyName("").build(), true, "", 100000);
-    for (OzoneFileStatus fileStatus : fileStatuses) {
-      if (fileStatus.isFile()) {
-        keyManager.deleteKey(
-            createKeyArgs(fileStatus.getPath().toString().substring(1)));
-      } else {
-        keyManager.deleteKey(createKeyArgs(OzoneFSUtils
-            .addTrailingSlashIfNeeded(
-                fileStatus.getPath().toString().substring(1))));
-      }
-    }
-  }
-
-  private static void createBucket(String volumeName, String bucketName)
-      throws IOException {
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .build();
-
-    TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo);
-  }
-
-  private static void createVolume(String volumeName) throws IOException {
-    OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder()
-        .setVolume(volumeName)
-        .setAdminName("bilbo")
-        .setOwnerName("bilbo")
-        .build();
-    TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs);
-  }
-
-  @Test
-  public void allocateBlockFailureInSafeMode() throws Exception {
-    KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol,
-        metadataManager, conf, "om1", null);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(KEY_NAME)
-        .build();
-
-    // As now openKey will allocate at least one block, even if the size
-    // passed is 0. So adding an entry to openKeyTable manually to test
-    // allocateBlock failure.
-    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setDataSize(0)
-        .setReplicationType(keyArgs.getType())
-        .setReplicationFactor(keyArgs.getFactor())
-        .setFileEncryptionInfo(null).build();
-    metadataManager.getOpenKeyTable().put(
-        metadataManager.getOpenKey(VOLUME_NAME, BUCKET_NAME, KEY_NAME, 1L),
-        omKeyInfo);
-    LambdaTestUtils.intercept(OMException.class,
-        "SafeModePrecheck failed for allocateBlock", () -> {
-          keyManager1
-              .allocateBlock(keyArgs, 1L, new ExcludeList());
-        });
-  }
-
-  @Test
-  public void openKeyFailureInSafeMode() throws Exception {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol,
-        metadataManager, conf, "om1", null);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(KEY_NAME)
-        .setDataSize(1000)
-        .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
-            ALL, ALL))
-        .build();
-    LambdaTestUtils.intercept(OMException.class,
-        "SafeModePrecheck failed for allocateBlock", () -> {
-          keyManager1.openKey(keyArgs);
-        });
-  }
-
-  @Test
-  public void openKeyWithMultipleBlocks() throws IOException {
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(UUID.randomUUID().toString())
-        .setDataSize(scmBlockSize * 10)
-        .build();
-    OpenKeySession keySession = keyManager.openKey(keyArgs);
-    OmKeyInfo keyInfo = keySession.getKeyInfo();
-    Assert.assertEquals(10,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-  }
-
-  @Test
-  public void testCreateDirectory() throws IOException {
-    // Create directory where the parent directory does not exist
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    for (int i =0; i< 5; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-    keyManager.createDirectory(keyArgs);
-    Path path = Paths.get(keyName);
-    while (path != null) {
-      // verify parent directories are created
-      Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
-      path = path.getParent();
-    }
-
-    // make sure create directory fails where parent is a file
-    keyName = RandomStringUtils.randomAlphabetic(5);
-    keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    OpenKeySession keySession = keyManager.openKey(keyArgs);
-    keyArgs.setLocationInfoList(
-        keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
-    keyManager.commitKey(keyArgs, keySession.getId());
-    for (int i =0; i< 5; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-    try {
-      keyManager.createDirectory(keyArgs);
-      Assert.fail("Creation should fail for directory.");
-    } catch (OMException e) {
-      Assert.assertEquals(e.getResult(),
-          OMException.ResultCodes.FILE_ALREADY_EXISTS);
-    }
-
-    // create directory for root directory
-    keyName = "";
-    keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    keyManager.createDirectory(keyArgs);
-    Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
-
-    // create directory where parent is root
-    keyName = RandomStringUtils.randomAlphabetic(5);
-    keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    keyManager.createDirectory(keyArgs);
-    Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
-  }
-
-  @Test
-  public void testOpenFile() throws IOException {
-    // create key
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    OpenKeySession keySession = keyManager.createFile(keyArgs, false, false);
-    keyArgs.setLocationInfoList(
-        keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
-    keyManager.commitKey(keyArgs, keySession.getId());
-
-    // try to open created key with overWrite flag set to false
-    try {
-      keyManager.createFile(keyArgs, false, false);
-      Assert.fail("Open key should fail for non overwrite create");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) {
-        throw ex;
-      }
-    }
-
-    // create file should pass with overwrite flag set to true
-    keyManager.createFile(keyArgs, true, false);
-
-    // try to create a file where parent directories do not exist and
-    // recursive flag is set to false
-    keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 5; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-    keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-    try {
-      keyManager.createFile(keyArgs, false, false);
-      Assert.fail("Open file should fail for non recursive write");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) {
-        throw ex;
-      }
-    }
-
-    // file create should pass when recursive flag is set to true
-    keySession = keyManager.createFile(keyArgs, false, true);
-    keyArgs.setLocationInfoList(
-        keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
-    keyManager.commitKey(keyArgs, keySession.getId());
-    Assert.assertTrue(keyManager
-        .getFileStatus(keyArgs).isFile());
-
-    // try creating a file over a directory
-    keyArgs = createBuilder()
-        .setKeyName("")
-        .build();
-    try {
-      keyManager.createFile(keyArgs, true, true);
-      Assert.fail("Open file should fail for non recursive write");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
-        throw ex;
-      }
-    }
-  }
-
-  @Test
-  public void testCheckAccessForFileKey() throws Exception {
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName("testdir/deep/NOTICE.txt")
-        .build();
-    OpenKeySession keySession = keyManager.createFile(keyArgs, false, true);
-    keyArgs.setLocationInfoList(
-        keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
-    keyManager.commitKey(keyArgs, keySession.getId());
-
-    OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-    RequestContext context = currentUserReads();
-    Assert.assertTrue(keyManager.checkAccess(fileKey, context));
-
-    OzoneObj parentDirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setKeyName("testdir")
-        .build();
-    Assert.assertTrue(keyManager.checkAccess(parentDirKey, context));
-  }
-
-  @Test
-  public void testCheckAccessForNonExistentKey() throws Exception {
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName("testdir/deep/NO_SUCH_FILE.txt")
-        .build();
-    OzoneObj nonExistentKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-    OzoneTestUtils.expectOmException(OMException.ResultCodes.KEY_NOT_FOUND,
-        () -> keyManager.checkAccess(nonExistentKey, currentUserReads()));
-  }
-
-  @Test
-  public void testCheckAccessForDirectoryKey() throws Exception {
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName("some/dir")
-        .build();
-    keyManager.createDirectory(keyArgs);
-
-    OzoneObj dirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-    Assert.assertTrue(keyManager.checkAccess(dirKey, currentUserReads()));
-  }
-
-  @Test
-  public void testPrefixAclOps() throws IOException {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String prefix1 = "pf1/";
-
-    OzoneObj ozPrefix1 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(prefix1)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1",
-        ACLType.READ, ACCESS);
-    prefixManager.addAcl(ozPrefix1, ozAcl1);
-
-    List<OzoneAcl> ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(1, ozAclGet.size());
-    Assert.assertEquals(ozAcl1, ozAclGet.get(0));
-
-    List<OzoneAcl> acls = new ArrayList<>();
-    OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin",
-        ACLType.ALL, ACCESS);
-
-    BitSet rwRights = new BitSet();
-    rwRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal());
-    rwRights.set(IAccessAuthorizer.ACLType.READ.ordinal());
-    OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev",
-        rwRights, ACCESS);
-
-    BitSet wRights = new BitSet();
-    wRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal());
-    OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev",
-        wRights, ACCESS);
-
-    BitSet rRights = new BitSet();
-    rRights.set(IAccessAuthorizer.ACLType.READ.ordinal());
-    OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev",
-        rRights, ACCESS);
-
-    acls.add(ozAcl2);
-    acls.add(ozAcl3);
-
-    prefixManager.setAcl(ozPrefix1, acls);
-    ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(2, ozAclGet.size());
-
-    int matchEntries = 0;
-    for (OzoneAcl acl : ozAclGet) {
-      if (acl.getType() == ACLIdentityType.GROUP) {
-        Assert.assertEquals(ozAcl3, acl);
-        matchEntries++;
-      }
-      if (acl.getType() == ACLIdentityType.USER) {
-        Assert.assertEquals(ozAcl2, acl);
-        matchEntries++;
-      }
-    }
-    Assert.assertEquals(2, matchEntries);
-
-    boolean result = prefixManager.removeAcl(ozPrefix1, ozAcl4);
-    Assert.assertEquals(true, result);
-
-    ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(2, ozAclGet.size());
-
-    result = prefixManager.removeAcl(ozPrefix1, ozAcl3);
-    Assert.assertEquals(true, result);
-    ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(1, ozAclGet.size());
-
-    Assert.assertEquals(ozAcl2, ozAclGet.get(0));
-
-    // add dev:w
-    prefixManager.addAcl(ozPrefix1, ozAcl4);
-    ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(2, ozAclGet.size());
-
-    // add dev:r and validate the acl bitset combined
-    prefixManager.addAcl(ozPrefix1, ozAcl5);
-    ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(2, ozAclGet.size());
-
-    matchEntries = 0;
-    for (OzoneAcl acl : ozAclGet) {
-      if (acl.getType() == ACLIdentityType.GROUP) {
-        Assert.assertEquals(ozAcl3, acl);
-        matchEntries++;
-      }
-      if (acl.getType() == ACLIdentityType.USER) {
-        Assert.assertEquals(ozAcl2, acl);
-        matchEntries++;
-      }
-    }
-    Assert.assertEquals(2, matchEntries);
-  }
-
-  @Test
-  public void testInvalidPrefixAcl() throws IOException {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String prefix1 = "pf1/";
-
-    // Invalid prefix not ending with "/"
-    String invalidPrefix = "invalid/pf";
-    OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1",
-        ACLType.READ, ACCESS);
-
-    OzoneObj ozInvalidPrefix = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(invalidPrefix)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    // add acl with invalid prefix name
-    exception.expect(OMException.class);
-    exception.expectMessage("Invalid prefix name");
-    prefixManager.addAcl(ozInvalidPrefix, ozAcl1);
-
-    OzoneObj ozPrefix1 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(prefix1)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-
-    List<OzoneAcl> ozAclGet = prefixManager.getAcl(ozPrefix1);
-    Assert.assertEquals(1, ozAclGet.size());
-    Assert.assertEquals(ozAcl1, ozAclGet.get(0));
-
-    // get acl with invalid prefix name
-    exception.expect(OMException.class);
-    exception.expectMessage("Invalid prefix name");
-    ozAclGet = prefixManager.getAcl(ozInvalidPrefix);
-    Assert.assertEquals(null, ozAcl1);
-
-    // set acl with invalid prefix name
-    List<OzoneAcl> ozoneAcls = new ArrayList<OzoneAcl>();
-    ozoneAcls.add(ozAcl1);
-    exception.expect(OMException.class);
-    exception.expectMessage("Invalid prefix name");
-    prefixManager.setAcl(ozInvalidPrefix, ozoneAcls);
-
-    // remove acl with invalid prefix name
-    exception.expect(OMException.class);
-    exception.expectMessage("Invalid prefix name");
-    prefixManager.removeAcl(ozInvalidPrefix, ozAcl1);
-  }
-
-  @Test
-  public void testLongestPrefixPath() throws IOException {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String prefix1 = "pf1/pf11/pf111/pf1111/";
-    String file1 = "pf1/pf11/file1";
-    String file2 = "pf1/pf11/pf111/pf1111/file2";
-
-    OzoneObj ozPrefix1 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(prefix1)
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1",
-        ACLType.READ, ACCESS);
-    prefixManager.addAcl(ozPrefix1, ozAcl1);
-
-    OzoneObj ozFile1 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(file1)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    List<OmPrefixInfo> prefixInfos =
-        prefixManager.getLongestPrefixPath(ozFile1.getPath());
-    Assert.assertEquals(5, prefixInfos.size());
-
-    OzoneObj ozFile2 = new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setPrefixName(file2)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .build();
-
-    prefixInfos =
-        prefixManager.getLongestPrefixPath(ozFile2.getPath());
-    Assert.assertEquals(7, prefixInfos.size());
-    // Only the last node has acl on it
-    Assert.assertEquals(ozAcl1, prefixInfos.get(6).getAcls().get(0));
-    // All other nodes don't have acl value associate with it
-    for (int i = 0; i < 6; i++) {
-      Assert.assertEquals(null, prefixInfos.get(i));
-    }
-  }
-
-  @Test
-  public void testLookupFile() throws IOException {
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .build();
-
-    // lookup for a non-existent file
-    try {
-      keyManager.lookupFile(keyArgs, null);
-      Assert.fail("Lookup file should fail for non existent file");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) {
-        throw ex;
-      }
-    }
-
-    // create a file
-    OpenKeySession keySession = keyManager.createFile(keyArgs, false, false);
-    keyArgs.setLocationInfoList(
-        keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
-    keyManager.commitKey(keyArgs, keySession.getId());
-    Assert.assertEquals(keyManager.lookupFile(keyArgs, null).getKeyName(),
-        keyName);
-
-    // lookup for created file
-    keyArgs = createBuilder()
-        .setKeyName("")
-        .build();
-    try {
-      keyManager.lookupFile(keyArgs, null);
-      Assert.fail("Lookup file should fail for a directory");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
-        throw ex;
-      }
-    }
-  }
-
-  private OmKeyArgs createKeyArgs(String toKeyName) throws IOException {
-    return createBuilder().setKeyName(toKeyName).build();
-  }
-
-  @Test
-  public void testLookupKeyWithLocation() throws IOException {
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    OmKeyArgs keyArgs = createBuilder()
-        .setKeyName(keyName)
-        .setSortDatanodesInPipeline(true)
-        .build();
-
-    // lookup for a non-existent key
-    try {
-      keyManager.lookupKey(keyArgs, null);
-      Assert.fail("Lookup key should fail for non existent key");
-    } catch (OMException ex) {
-      if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
-        throw ex;
-      }
-    }
-
-    // create a key
-    OpenKeySession keySession = keyManager.createFile(keyArgs, false, false);
-    // randomly select 3 datanodes
-    List<DatanodeDetails> nodeList = new ArrayList<>();
-    nodeList.add((DatanodeDetails)scm.getClusterMap().getNode(
-        0, null, null, null, null, 0));
-    nodeList.add((DatanodeDetails)scm.getClusterMap().getNode(
-        1, null, null, null, null, 0));
-    nodeList.add((DatanodeDetails)scm.getClusterMap().getNode(
-        2, null, null, null, null, 0));
-    Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(1)));
-    Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(2)));
-    // create a pipeline using 3 datanodes
-    Pipeline pipeline = scm.getPipelineManager().createPipeline(
-        ReplicationType.RATIS, ReplicationFactor.THREE, nodeList);
-    List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
-    locationInfoList.add(
-        new OmKeyLocationInfo.Builder().setPipeline(pipeline)
-            .setBlockID(new BlockID(1L, 1L)).build());
-    keyArgs.setLocationInfoList(locationInfoList);
-
-    keyManager.commitKey(keyArgs, keySession.getId());
-
-    OmKeyInfo key = keyManager.lookupKey(keyArgs, null);
-    Assert.assertEquals(key.getKeyName(), keyName);
-    List<OmKeyLocationInfo> keyLocations =
-        key.getLatestVersionLocations().getLocationList();
-    DatanodeDetails leader =
-        keyLocations.get(0).getPipeline().getFirstNode();
-    DatanodeDetails follower1 =
-        keyLocations.get(0).getPipeline().getNodes().get(1);
-    DatanodeDetails follower2 =
-        keyLocations.get(0).getPipeline().getNodes().get(2);
-    Assert.assertNotEquals(leader, follower1);
-    Assert.assertNotEquals(follower1, follower2);
-
-    // lookup key, leader as client
-    OmKeyInfo key1 = keyManager.lookupKey(keyArgs, leader.getIpAddress());
-    Assert.assertEquals(leader, key1.getLatestVersionLocations()
-        .getLocationList().get(0).getPipeline().getClosestNode());
-
-    // lookup key, follower1 as client
-    OmKeyInfo key2 = keyManager.lookupKey(keyArgs, follower1.getIpAddress());
-    Assert.assertEquals(follower1, key2.getLatestVersionLocations()
-        .getLocationList().get(0).getPipeline().getClosestNode());
-
-    // lookup key, follower2 as client
-    OmKeyInfo key3 = keyManager.lookupKey(keyArgs, follower2.getIpAddress());
-    Assert.assertEquals(follower2, key3.getLatestVersionLocations()
-        .getLocationList().get(0).getPipeline().getClosestNode());
-
-    // lookup key, random node as client
-    OmKeyInfo key4 = keyManager.lookupKey(keyArgs,
-        "/d=default-drack/127.0.0.1");
-    Assert.assertEquals(leader, key4.getLatestVersionLocations()
-        .getLocationList().get(0).getPipeline().getClosestNode());
-  }
-
-  @Test
-  public void testListStatus() throws IOException {
-    String superDir = RandomStringUtils.randomAlphabetic(5);
-
-    int numDirectories = 5;
-    int numFiles = 5;
-    // set of directory descendants of root
-    Set<String> directorySet = new TreeSet<>();
-    // set of file descendants of root
-    Set<String> fileSet = new TreeSet<>();
-    createDepthTwoDirectory(superDir, numDirectories, numFiles, directorySet,
-        fileSet);
-    // set of all descendants of root
-    Set<String> children = new TreeSet<>(directorySet);
-    children.addAll(fileSet);
-    // number of entries in the filesystem
-    int numEntries = directorySet.size() + fileSet.size();
-
-    OmKeyArgs rootDirArgs = createKeyArgs("");
-    List<OzoneFileStatus> fileStatuses =
-        keyManager.listStatus(rootDirArgs, true, "", 100);
-    // verify the number of status returned is same as number of entries
-    Assert.assertEquals(numEntries, fileStatuses.size());
-
-    fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100);
-    // the number of immediate children of root is 1
-    Assert.assertEquals(1, fileStatuses.size());
-
-    // if startKey is the first descendant of the root then listStatus should
-    // return all the entries.
-    String startKey = children.iterator().next();
-    fileStatuses = keyManager.listStatus(rootDirArgs, true,
-        startKey.substring(0, startKey.length() - 1), 100);
-    Assert.assertEquals(numEntries, fileStatuses.size());
-
-    for (String directory : directorySet) {
-      // verify status list received for each directory with recursive flag set
-      // to false
-      OmKeyArgs dirArgs = createKeyArgs(directory);
-      fileStatuses = keyManager.listStatus(dirArgs, false, "", 100);
-      verifyFileStatus(directory, fileStatuses, directorySet, fileSet, false);
-
-      // verify status list received for each directory with recursive flag set
-      // to true
-      fileStatuses = keyManager.listStatus(dirArgs, true, "", 100);
-      verifyFileStatus(directory, fileStatuses, directorySet, fileSet, true);
-
-      // verify list status call with using the startKey parameter and
-      // recursive flag set to false. After every call to listStatus use the
-      // latest received file status as the startKey until no more entries are
-      // left to list.
-      List<OzoneFileStatus> tempFileStatus = null;
-      Set<OzoneFileStatus> tmpStatusSet = new HashSet<>();
-      do {
-        tempFileStatus = keyManager.listStatus(dirArgs, false,
-            tempFileStatus != null ? OzoneFSUtils.pathToKey(
-                tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null,
-            2);
-        tmpStatusSet.addAll(tempFileStatus);
-      } while (tempFileStatus.size() == 2);
-      verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet,
-          fileSet, false);
-
-      // verify list status call with using the startKey parameter and
-      // recursive flag set to true. After every call to listStatus use the
-      // latest received file status as the startKey until no more entries are
-      // left to list.
-      tempFileStatus = null;
-      tmpStatusSet = new HashSet<>();
-      do {
-        tempFileStatus = keyManager.listStatus(dirArgs, true,
-            tempFileStatus != null ? OzoneFSUtils.pathToKey(
-                tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null,
-            2);
-        tmpStatusSet.addAll(tempFileStatus);
-      } while (tempFileStatus.size() == 2);
-      verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet,
-          fileSet, true);
-    }
-  }
-
-  /**
-   * Creates a depth two directory.
-   *
-   * @param superDir       Super directory to create
-   * @param numDirectories number of directory children
-   * @param numFiles       number of file children
-   * @param directorySet   set of descendant directories for the super directory
-   * @param fileSet        set of descendant files for the super directory
-   */
-  private void createDepthTwoDirectory(String superDir, int numDirectories,
-      int numFiles, Set<String> directorySet, Set<String> fileSet)
-      throws IOException {
-    // create super directory
-    OmKeyArgs superDirArgs = createKeyArgs(superDir);
-    keyManager.createDirectory(superDirArgs);
-    directorySet.add(superDir);
-
-    // add directory children to super directory
-    Set<String> childDirectories =
-        createDirectories(superDir, new HashMap<>(), numDirectories);
-    directorySet.addAll(childDirectories);
-    // add file to super directory
-    fileSet.addAll(createFiles(superDir, new HashMap<>(), numFiles));
-
-    // for each child directory create files and directories
-    for (String child : childDirectories) {
-      fileSet.addAll(createFiles(child, new HashMap<>(), numFiles));
-      directorySet
-          .addAll(createDirectories(child, new HashMap<>(), numDirectories));
-    }
-  }
-
-  private void verifyFileStatus(String directory,
-      List<OzoneFileStatus> fileStatuses, Set<String> directorySet,
-      Set<String> fileSet, boolean recursive) {
-
-    for (OzoneFileStatus fileStatus : fileStatuses) {
-      String keyName = OzoneFSUtils.pathToKey(fileStatus.getPath());
-      String parent = Paths.get(keyName).getParent().toString();
-      if (!recursive) {
-        // if recursive is false, verify all the statuses have the input
-        // directory as parent
-        Assert.assertEquals(parent, directory);
-      }
-      // verify filestatus is present in directory or file set accordingly
-      if (fileStatus.isDirectory()) {
-        Assert.assertTrue(directorySet.contains(keyName));
-      } else {
-        Assert.assertTrue(fileSet.contains(keyName));
-      }
-    }
-
-    // count the number of entries which should be present in the directory
-    int numEntries = 0;
-    Set<String> entrySet = new TreeSet<>(directorySet);
-    entrySet.addAll(fileSet);
-    for (String entry : entrySet) {
-      if (OzoneFSUtils.getParent(entry)
-          .startsWith(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) {
-        if (recursive) {
-          numEntries++;
-        } else if (OzoneFSUtils.getParent(entry)
-            .equals(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) {
-          numEntries++;
-        }
-      }
-    }
-    // verify the number of entries match the status list size
-    Assert.assertEquals(fileStatuses.size(), numEntries);
-  }
-
-  private Set<String> createDirectories(String parent,
-      Map<String, List<String>> directoryMap, int numDirectories)
-      throws IOException {
-    Set<String> keyNames = new TreeSet<>();
-    for (int i = 0; i < numDirectories; i++) {
-      String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5);
-      OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build();
-      keyManager.createDirectory(keyArgs);
-      keyNames.add(keyName);
-    }
-    directoryMap.put(parent, new ArrayList<>(keyNames));
-    return keyNames;
-  }
-
-  private List<String> createFiles(String parent,
-      Map<String, List<String>> fileMap, int numFiles) throws IOException {
-    List<String> keyNames = new ArrayList<>();
-    for (int i = 0; i < numFiles; i++) {
-      String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5);
-      OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build();
-      OpenKeySession keySession = keyManager.createFile(keyArgs, false, false);
-      keyArgs.setLocationInfoList(
-          keySession.getKeyInfo().getLatestVersionLocations()
-              .getLocationList());
-      keyManager.commitKey(keyArgs, keySession.getId());
-      keyNames.add(keyName);
-    }
-    fileMap.put(parent, keyNames);
-    return keyNames;
-  }
-
-  private OmKeyArgs.Builder createBuilder() throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    return new OmKeyArgs.Builder()
-        .setBucketName(BUCKET_NAME)
-        .setFactor(ReplicationFactor.ONE)
-        .setDataSize(0)
-        .setType(ReplicationType.STAND_ALONE)
-        .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
-            ALL, ALL))
-        .setVolumeName(VOLUME_NAME);
-  }
-
-  private RequestContext currentUserReads() throws IOException {
-    return RequestContext.newBuilder()
-        .setClientUgi(UserGroupInformation.getCurrentUser())
-        .setAclRights(ACLType.READ_ACL)
-        .setAclType(ACLIdentityType.USER)
-        .build();
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
deleted file mode 100644
index 732fb34..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-
-/**
- * Test OM's {@link KeyDeletingService}.
- */
-public class TestKeyPurging {
-
-  private static MiniOzoneCluster cluster;
-  private static ObjectStore store;
-  private static OzoneManager om;
-
-  private static final int NUM_KEYS = 10;
-  private static final int KEY_SIZE = 100;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    conf.setQuietMode(false);
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .setHbInterval(200)
-        .build();
-    cluster.waitForClusterToBeReady();
-    store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
-    om = cluster.getOzoneManager();
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test(timeout = 30000)
-  public void testKeysPurgingByKeyDeletingService() throws Exception {
-    // Create Volume and Bucket
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-
-    // Create some keys and write data into them
-    String keyBase = UUID.randomUUID().toString();
-    String keyString = UUID.randomUUID().toString();
-    byte[] data = ContainerTestHelper.getFixedLengthString(
-        keyString, KEY_SIZE).getBytes(UTF_8);
-    List<String> keys = new ArrayList<>(NUM_KEYS);
-    for (int i = 1; i <= NUM_KEYS; i++) {
-      String keyName = keyBase + "-" + i;
-      keys.add(keyName);
-      OzoneOutputStream keyStream = ContainerTestHelper.createKey(
-          keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-          KEY_SIZE, store, volumeName, bucketName);
-      keyStream.write(data);
-      keyStream.close();
-    }
-
-    // Delete created keys
-    for (String key : keys) {
-      bucket.deleteKey(key);
-    }
-
-    // Verify that KeyDeletingService picks up deleted keys and purges them
-    // from DB.
-    KeyManager keyManager = om.getKeyManager();
-    KeyDeletingService keyDeletingService =
-        (KeyDeletingService) keyManager.getDeletingService();
-
-    GenericTestUtils.waitFor(
-        () -> keyDeletingService.getDeletedKeyCount().get() >= NUM_KEYS,
-        1000, 10000);
-
-    Assert.assertTrue(keyDeletingService.getRunCount().get() > 1);
-
-    GenericTestUtils.waitFor(
-        () -> {
-          try {
-            return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE)
-                .size() == 0;
-          } catch (IOException e) {
-            return false;
-          }
-        }, 1000, 10000);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
deleted file mode 100644
index 3cba9b3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.
-    OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.UUID;
-
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletOutputStream;
-import javax.servlet.WriteListener;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.mockito.Matchers;
-
-import static org.apache.hadoop.ozone.OzoneConsts.
-    OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
-import static org.mockito.Mockito.doCallRealMethod;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * Class used for testing the OM DB Checkpoint provider servlet.
- */
-public class TestOMDbCheckpointServlet {
-  private MiniOzoneCluster cluster = null;
-  private OMMetrics omMetrics;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omId;
-
-  @Rule
-  public Timeout timeout = new Timeout(60000);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    omMetrics = cluster.getOzoneManager().getMetrics();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testDoGet() throws ServletException, IOException {
-
-    File tempFile = null;
-    try {
-      OMDBCheckpointServlet omDbCheckpointServletMock =
-          mock(OMDBCheckpointServlet.class);
-
-      doCallRealMethod().when(omDbCheckpointServletMock).init();
-
-      HttpServletRequest requestMock = mock(HttpServletRequest.class);
-      HttpServletResponse responseMock = mock(HttpServletResponse.class);
-
-      ServletContext servletContextMock = mock(ServletContext.class);
-      when(omDbCheckpointServletMock.getServletContext())
-          .thenReturn(servletContextMock);
-
-      when(servletContextMock.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE))
-          .thenReturn(cluster.getOzoneManager());
-      when(requestMock.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH))
-          .thenReturn("true");
-      doNothing().when(responseMock).setContentType("application/x-tgz");
-      doNothing().when(responseMock).setHeader(Matchers.anyString(),
-          Matchers.anyString());
-
-      tempFile = File.createTempFile("testDoGet_" + System
-          .currentTimeMillis(), ".tar.gz");
-
-      FileOutputStream fileOutputStream = new FileOutputStream(tempFile);
-      when(responseMock.getOutputStream()).thenReturn(
-          new ServletOutputStream() {
-            @Override
-            public boolean isReady() {
-              return true;
-            }
-
-            @Override
-            public void setWriteListener(WriteListener writeListener) {
-            }
-
-            @Override
-            public void write(int b) throws IOException {
-              fileOutputStream.write(b);
-            }
-          });
-
-      doCallRealMethod().when(omDbCheckpointServletMock).doGet(requestMock,
-          responseMock);
-
-      omDbCheckpointServletMock.init();
-
-      Assert.assertTrue(
-          omMetrics.getLastCheckpointCreationTimeTaken() == 0);
-      Assert.assertTrue(
-          omMetrics.getLastCheckpointStreamingTimeTaken() == 0);
-
-      omDbCheckpointServletMock.doGet(requestMock, responseMock);
-
-      Assert.assertTrue(tempFile.length() > 0);
-      Assert.assertTrue(
-          omMetrics.getLastCheckpointCreationTimeTaken() > 0);
-      Assert.assertTrue(
-          omMetrics.getLastCheckpointStreamingTimeTaken() > 0);
-    } finally {
-      FileUtils.deleteQuietly(tempFile);
-    }
-
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
deleted file mode 100644
index 901dbe9..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey;
-
-/**
- * Tests the Ratis snaphsots feature in OM.
- */
-public class TestOMRatisSnapshots {
-
-  private MiniOzoneHAClusterImpl cluster = null;
-  private ObjectStore objectStore;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omServiceId;
-  private int numOfOMs = 3;
-  private static final long SNAPSHOT_THRESHOLD = 50;
-  private static final int LOG_PURGE_GAP = 50;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Rule
-  public Timeout timeout = new Timeout(500_000);
-
-  /**
-   * Create a MiniOzoneCluster for testing. The cluster initially has one
-   * inactive OM. So at the start of the cluster, there will be 2 active and 1
-   * inactive OM.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omServiceId = "om-service-test1";
-    conf.setLong(
-        OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
-        SNAPSHOT_THRESHOLD);
-    conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOMServiceId("om-service-test1")
-        .setNumOfOzoneManagers(numOfOMs)
-        .setNumOfActiveOMs(2)
-        .build();
-    cluster.waitForClusterToBeReady();
-    objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf)
-        .getObjectStore();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testInstallSnapshot() throws Exception {
-    // Get the leader OM
-    String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider()
-        .getCurrentProxyOMNodeId();
-    OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId);
-    OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer();
-
-    // Find the inactive OM
-    String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId();
-    if (cluster.isOMActive(followerNodeId)) {
-      followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId();
-    }
-    OzoneManager followerOM = cluster.getOzoneManager(followerNodeId);
-
-    // Do some transactions so that the log index increases
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-    retVolumeinfo.createBucket(bucketName);
-    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-    long leaderOMappliedLogIndex =
-        leaderRatisServer.getStateMachineLastAppliedIndex();
-
-    List<String> keys = new ArrayList<>();
-    while (leaderOMappliedLogIndex < 2000) {
-      keys.add(createKey(ozoneBucket));
-      leaderOMappliedLogIndex =
-          leaderRatisServer.getStateMachineLastAppliedIndex();
-    }
-
-    // Get the latest db checkpoint from the leader OM.
-    long leaderOMSnaphsotIndex = leaderOM.saveRatisSnapshot();
-    DBCheckpoint leaderDbCheckpoint =
-        leaderOM.getMetadataManager().getStore().getCheckpoint(false);
-
-    // Start the inactive OM
-    cluster.startInactiveOM(followerNodeId);
-
-    // The recently started OM should be lagging behind the leader OM.
-    long followerOMLastAppliedIndex =
-        followerOM.getOmRatisServer().getStateMachineLastAppliedIndex();
-    Assert.assertTrue(
-        followerOMLastAppliedIndex < leaderOMSnaphsotIndex);
-
-    // Install leader OM's db checkpoint on the lagging OM.
-    followerOM.getOmRatisServer().getOmStateMachine().pause();
-    followerOM.getMetadataManager().getStore().close();
-    followerOM.replaceOMDBWithCheckpoint(
-        leaderOMSnaphsotIndex, leaderDbCheckpoint.getCheckpointLocation());
-
-    // Reload the follower OM with new DB checkpoint from the leader OM.
-    followerOM.reloadOMState(leaderOMSnaphsotIndex);
-    followerOM.getOmRatisServer().getOmStateMachine().unpause(
-        leaderOMSnaphsotIndex);
-
-    // After the new checkpoint is loaded and state machine is unpaused, the
-    // follower OM lastAppliedIndex must match the snapshot index of the
-    // checkpoint.
-    followerOMLastAppliedIndex = followerOM.getOmRatisServer()
-        .getStateMachineLastAppliedIndex();
-    Assert.assertEquals(leaderOMSnaphsotIndex, followerOMLastAppliedIndex);
-
-    // Verify that the follower OM's DB contains the transactions which were
-    // made while it was inactive.
-    OMMetadataManager followerOMMetaMngr = followerOM.getMetadataManager();
-    Assert.assertNotNull(followerOMMetaMngr.getVolumeTable().get(
-        followerOMMetaMngr.getVolumeKey(volumeName)));
-    Assert.assertNotNull(followerOMMetaMngr.getBucketTable().get(
-        followerOMMetaMngr.getBucketKey(volumeName, bucketName)));
-    for (String key : keys) {
-      Assert.assertNotNull(followerOMMetaMngr.getKeyTable().get(
-          followerOMMetaMngr.getOzoneKey(volumeName, bucketName, key)));
-    }
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
deleted file mode 100644
index c75e365..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.IOzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import org.junit.AfterClass;
-import static org.junit.Assert.assertTrue;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test for Ozone Manager ACLs.
- */
-public class TestOmAcls {
-
-  private static boolean aclAllow = true;
-  private static MiniOzoneCluster cluster = null;
-  private static OMMetrics omMetrics;
-  private static OzoneConfiguration conf;
-  private static String clusterId;
-  private static String scmId;
-  private static String omId;
-  private static GenericTestUtils.LogCapturer logCapturer;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class,
-        IAccessAuthorizer.class);
-    conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    omMetrics = cluster.getOzoneManager().getMetrics();
-    logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(OzoneManager.getLogger());
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Tests the OM Initialization.
-   */
-
-  @Test
-  public void testBucketCreationPermissionDenied() throws Exception {
-
-    TestOmAcls.aclAllow = true;
-
-    String volumeName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
-    String bucketName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
-    cluster.getClient().getObjectStore().createVolume(volumeName);
-    OzoneVolume volume =
-        cluster.getClient().getObjectStore().getVolume(volumeName);
-
-    TestOmAcls.aclAllow = false;
-    OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
-        () -> volume.createBucket(bucketName));
-
-    assertTrue(logCapturer.getOutput()
-        .contains("doesn't have CREATE permission to access volume"));
-  }
-
-  @Test
-  public void testFailureInKeyOp() throws Exception {
-    final VolumeArgs createVolumeArgs;
-
-    TestOmAcls.aclAllow = true;
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-    logCapturer.clearOutput();
-
-    TestOmAcls.aclAllow = false;
-
-    OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
-        () -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
-    assertTrue(logCapturer.getOutput().contains("doesn't have WRITE " +
-        "permission to access bucket"));
-  }
-
-  /**
-   * Test implementation to negative case.
-   */
-  static class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
-
-    @Override
-    public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) {
-      return TestOmAcls.aclAllow;
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
deleted file mode 100644
index ff1cf03..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * This class tests the versioning of blocks from OM side.
- */
-public class TestOmBlockVersioning {
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneConfiguration conf;
-  private static OzoneManager ozoneManager;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    ozoneManager = cluster.getOzoneManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testAllocateCommit() throws Exception {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    String userName = ugi.getUserName();
-    String adminName = ugi.getUserName();
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .setRefreshPipeline(true)
-        .setAcls(new ArrayList<>())
-        .build();
-
-    // 1st update, version 0
-    OpenKeySession openKey = ozoneManager.openKey(keyArgs);
-    // explicitly set the keyLocation list before committing the key.
-    keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations()
-        .getBlocksLatestVersionOnly());
-    ozoneManager.commitKey(keyArgs, openKey.getId());
-
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
-    OmKeyLocationInfoGroup highestVersion =
-        checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(0, highestVersion.getVersion());
-    assertEquals(1, highestVersion.getLocationList().size());
-
-    // 2nd update, version 1
-    openKey = ozoneManager.openKey(keyArgs);
-    //OmKeyLocationInfo locationInfo =
-    //    ozoneManager.allocateBlock(keyArgs, openKey.getId());
-    // explicitly set the keyLocation list before committing the key.
-    keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations()
-        .getBlocksLatestVersionOnly());
-    ozoneManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = ozoneManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(1, highestVersion.getVersion());
-    assertEquals(2, highestVersion.getLocationList().size());
-
-    // 3rd update, version 2
-    openKey = ozoneManager.openKey(keyArgs);
-
-    // this block will be appended to the latest version of version 2.
-    OmKeyLocationInfo locationInfo =
-        ozoneManager.allocateBlock(keyArgs, openKey.getId(),
-            new ExcludeList());
-    List<OmKeyLocationInfo> locationInfoList =
-        openKey.getKeyInfo().getLatestVersionLocations()
-            .getBlocksLatestVersionOnly();
-    Assert.assertTrue(locationInfoList.size() == 1);
-    locationInfoList.add(locationInfo);
-    keyArgs.setLocationInfoList(locationInfoList);
-    ozoneManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = ozoneManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(2, highestVersion.getVersion());
-    assertEquals(4, highestVersion.getLocationList().size());
-  }
-
-  private OmKeyLocationInfoGroup checkVersions(
-      List<OmKeyLocationInfoGroup> versions) {
-    OmKeyLocationInfoGroup currentVersion = null;
-    for (OmKeyLocationInfoGroup version : versions) {
-      if (currentVersion != null) {
-        assertEquals(currentVersion.getVersion() + 1, version.getVersion());
-        for (OmKeyLocationInfo info : currentVersion.getLocationList()) {
-          boolean found = false;
-          // all the blocks from the previous version must present in the next
-          // version
-          for (OmKeyLocationInfo info2 : version.getLocationList()) {
-            if (info.getLocalID() == info2.getLocalID()) {
-              found = true;
-              break;
-            }
-          }
-          assertTrue(found);
-        }
-      }
-      currentVersion = version;
-    }
-    return currentVersion;
-  }
-
-  @Test
-  public void testReadLatestVersion() throws Exception {
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    OzoneBucket bucket =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
-
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .setRefreshPipeline(true)
-        .build();
-
-    String dataString = RandomStringUtils.randomAlphabetic(100);
-
-    TestDataUtil.createKey(bucket, keyName, dataString);
-    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
-    OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(1,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    // this write will create 2nd version, 2nd version will contain block from
-    // version 1, and add a new block
-    TestDataUtil.createKey(bucket, keyName, dataString);
-
-
-    keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
-    assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(2,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    dataString = RandomStringUtils.randomAlphabetic(200);
-    TestDataUtil.createKey(bucket, keyName, dataString);
-
-    keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
-    assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(3,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java
deleted file mode 100644
index de42fdc..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test Ozone Manager Init.
- */
-public class TestOmInit {
-  private static MiniOzoneCluster cluster = null;
-  private static OMMetrics omMetrics;
-  private static OzoneConfiguration conf;
-  private static String clusterId;
-  private static String scmId;
-  private static String omId;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    omMetrics = cluster.getOzoneManager().getMetrics();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-  /**
-   * Tests the OM Initialization.
-   * @throws IOException, AuthenticationException
-   */
-  @Test
-  public void testOmInitAgain() throws IOException,
-      AuthenticationException {
-    // Stop the Ozone Manager
-    cluster.getOzoneManager().stop();
-    // Now try to init the OM again. It should succeed
-    Assert.assertTrue(OzoneManager.omInit(conf));
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
deleted file mode 100644
index e079974..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyLong;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.test.MetricsAsserts;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test for OM metrics.
- */
-@SuppressWarnings("deprecation")
-public class TestOmMetrics {
-  private MiniOzoneCluster cluster;
-  private OzoneManager ozoneManager;
-
-  /**
-   * The exception used for testing failure metrics.
-   */
-  private IOException exception = new IOException();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL,
-        1000, TimeUnit.MILLISECONDS);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    ozoneManager = cluster.getOzoneManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-
-  @Test
-  public void testVolumeOps() throws IOException {
-    VolumeManager volumeManager =
-        (VolumeManager) HddsWhiteboxTestUtils.getInternalState(
-            ozoneManager, "volumeManager");
-    VolumeManager mockVm = Mockito.spy(volumeManager);
-
-    Mockito.doNothing().when(mockVm).createVolume(null);
-    Mockito.doNothing().when(mockVm).deleteVolume(null);
-    Mockito.doReturn(null).when(mockVm).getVolumeInfo(null);
-    Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doNothing().when(mockVm).setOwner(null, null);
-    Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
-
-    HddsWhiteboxTestUtils.setInternalState(
-        ozoneManager, "volumeManager", mockVm);
-    doVolumeOps();
-
-    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumVolumeOps", 6L, omMetrics);
-    assertCounter("NumVolumeCreates", 1L, omMetrics);
-    assertCounter("NumVolumeUpdates", 1L, omMetrics);
-    assertCounter("NumVolumeInfos", 1L, omMetrics);
-    assertCounter("NumVolumeCheckAccesses", 1L, omMetrics);
-    assertCounter("NumVolumeDeletes", 1L, omMetrics);
-    assertCounter("NumVolumeLists", 1L, omMetrics);
-    assertCounter("NumVolumes", 0L, omMetrics);
-
-    ozoneManager.createVolume(null);
-    ozoneManager.createVolume(null);
-    ozoneManager.createVolume(null);
-    ozoneManager.deleteVolume(null);
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumVolumes", 2L, omMetrics);
-
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockVm).createVolume(null);
-    Mockito.doThrow(exception).when(mockVm).deleteVolume(null);
-    Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null);
-    Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
-    Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
-
-    HddsWhiteboxTestUtils.setInternalState(ozoneManager,
-        "volumeManager", mockVm);
-    doVolumeOps();
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumVolumeOps", 16L, omMetrics);
-    assertCounter("NumVolumeCreates", 5L, omMetrics);
-    assertCounter("NumVolumeUpdates", 2L, omMetrics);
-    assertCounter("NumVolumeInfos", 2L, omMetrics);
-    assertCounter("NumVolumeCheckAccesses", 2L, omMetrics);
-    assertCounter("NumVolumeDeletes", 3L, omMetrics);
-    assertCounter("NumVolumeLists", 2L, omMetrics);
-
-    assertCounter("NumVolumeCreateFails", 1L, omMetrics);
-    assertCounter("NumVolumeUpdateFails", 1L, omMetrics);
-    assertCounter("NumVolumeInfoFails", 1L, omMetrics);
-    assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics);
-    assertCounter("NumVolumeDeleteFails", 1L, omMetrics);
-    assertCounter("NumVolumeListFails", 1L, omMetrics);
-
-    // As last call for volumesOps does not increment numVolumes as those are
-    // failed.
-    assertCounter("NumVolumes", 2L, omMetrics);
-
-    cluster.restartOzoneManager();
-    assertCounter("NumVolumes", 2L, omMetrics);
-
-
-  }
-
-  @Test
-  @Ignore("Test failing because of table cache. Revisit later.")
-  public void testBucketOps() throws IOException {
-    BucketManager bucketManager =
-        (BucketManager) HddsWhiteboxTestUtils.getInternalState(
-            ozoneManager, "bucketManager");
-    BucketManager mockBm = Mockito.spy(bucketManager);
-
-    S3BucketManager s3BucketManager =
-        (S3BucketManager) HddsWhiteboxTestUtils.getInternalState(
-            ozoneManager, "s3BucketManager");
-    S3BucketManager mockS3Bm = Mockito.spy(s3BucketManager);
-
-    Mockito.doNothing().when(mockS3Bm).createS3Bucket("random", "random");
-    Mockito.doNothing().when(mockS3Bm).deleteS3Bucket("random");
-    Mockito.doReturn(true).when(mockS3Bm).createOzoneVolumeIfNeeded(null);
-
-    Mockito.doNothing().when(mockBm).createBucket(null);
-    Mockito.doNothing().when(mockBm).createBucket(null);
-    Mockito.doNothing().when(mockBm).deleteBucket(null, null);
-    Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
-    Mockito.doNothing().when(mockBm).setBucketProperty(null);
-    Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
-
-    HddsWhiteboxTestUtils.setInternalState(
-        ozoneManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumBucketOps", 5L, omMetrics);
-    assertCounter("NumBucketCreates", 1L, omMetrics);
-    assertCounter("NumBucketUpdates", 1L, omMetrics);
-    assertCounter("NumBucketInfos", 1L, omMetrics);
-    assertCounter("NumBucketDeletes", 1L, omMetrics);
-    assertCounter("NumBucketLists", 1L, omMetrics);
-    assertCounter("NumBuckets", 0L, omMetrics);
-
-    ozoneManager.createBucket(null);
-    ozoneManager.createBucket(null);
-    ozoneManager.createBucket(null);
-    ozoneManager.deleteBucket(null, null);
-
-    //Taking already existing value, as the same metrics is used over all the
-    // test cases.
-    long numVolumesOps = MetricsAsserts.getLongCounter("NumVolumeOps",
-        omMetrics);
-    long numVolumes = MetricsAsserts.getLongCounter("NumVolumes",
-        omMetrics);
-    long numVolumeCreates = MetricsAsserts.getLongCounter("NumVolumeCreates",
-        omMetrics);
-
-    ozoneManager.createS3Bucket("random", "random");
-    ozoneManager.createS3Bucket("random1", "random1");
-    ozoneManager.createS3Bucket("random2", "random2");
-    ozoneManager.deleteS3Bucket("random");
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumBuckets", 4L, omMetrics);
-
-    assertCounter("NumVolumeOps", numVolumesOps + 3, omMetrics);
-    assertCounter("NumVolumeCreates", numVolumeCreates + 3, omMetrics);
-    assertCounter("NumVolumes", numVolumes + 3, omMetrics);
-
-
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockBm).createBucket(null);
-    Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null);
-    Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null);
-    Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
-    Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
-
-    HddsWhiteboxTestUtils.setInternalState(
-        ozoneManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumBucketOps", 18L, omMetrics);
-    assertCounter("NumBucketCreates", 8L, omMetrics);
-    assertCounter("NumBucketUpdates", 2L, omMetrics);
-    assertCounter("NumBucketInfos", 2L, omMetrics);
-    assertCounter("NumBucketDeletes", 4L, omMetrics);
-    assertCounter("NumBucketLists", 2L, omMetrics);
-
-    assertCounter("NumBucketCreateFails", 1L, omMetrics);
-    assertCounter("NumBucketUpdateFails", 1L, omMetrics);
-    assertCounter("NumBucketInfoFails", 1L, omMetrics);
-    assertCounter("NumBucketDeleteFails", 1L, omMetrics);
-    assertCounter("NumBucketListFails", 1L, omMetrics);
-
-    assertCounter("NumBuckets", 4L, omMetrics);
-
-    cluster.restartOzoneManager();
-    assertCounter("NumBuckets", 4L, omMetrics);
-  }
-
-  @Test
-  public void testKeyOps() throws IOException {
-    KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils
-        .getInternalState(ozoneManager, "keyManager");
-    KeyManager mockKm = Mockito.spy(keyManager);
-
-    Mockito.doReturn(null).when(mockKm).openKey(null);
-    Mockito.doNothing().when(mockKm).deleteKey(null);
-    Mockito.doReturn(null).when(mockKm).lookupKey(null, "");
-    Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
-    Mockito.doNothing().when(mockKm).commitKey(any(OmKeyArgs.class), anyLong());
-    Mockito.doReturn(null).when(mockKm).initiateMultipartUpload(
-        any(OmKeyArgs.class));
-
-    HddsWhiteboxTestUtils.setInternalState(
-        ozoneManager, "keyManager", mockKm);
-    doKeyOps();
-
-    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumKeyOps", 6L, omMetrics);
-    assertCounter("NumKeyAllocate", 1L, omMetrics);
-    assertCounter("NumKeyLookup", 1L, omMetrics);
-    assertCounter("NumKeyDeletes", 1L, omMetrics);
-    assertCounter("NumKeyLists", 1L, omMetrics);
-    assertCounter("NumKeys", 0L, omMetrics);
-    assertCounter("NumInitiateMultipartUploads", 1L, omMetrics);
-
-
-    ozoneManager.openKey(null);
-    ozoneManager.commitKey(createKeyArgs(), 0);
-    ozoneManager.openKey(null);
-    ozoneManager.commitKey(createKeyArgs(), 0);
-    ozoneManager.openKey(null);
-    ozoneManager.commitKey(createKeyArgs(), 0);
-    ozoneManager.deleteKey(null);
-
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumKeys", 2L, omMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockKm).openKey(null);
-    Mockito.doThrow(exception).when(mockKm).deleteKey(null);
-    Mockito.doThrow(exception).when(mockKm).lookupKey(null, "");
-    Mockito.doThrow(exception).when(mockKm).listKeys(
-        null, null, null, null, 0);
-    Mockito.doThrow(exception).when(mockKm).commitKey(any(OmKeyArgs.class),
-        anyLong());
-    Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload(
-        any(OmKeyArgs.class));
-
-    HddsWhiteboxTestUtils.setInternalState(
-        ozoneManager, "keyManager", mockKm);
-    doKeyOps();
-
-    omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumKeyOps", 19L, omMetrics);
-    assertCounter("NumKeyAllocate", 5L, omMetrics);
-    assertCounter("NumKeyLookup", 2L, omMetrics);
-    assertCounter("NumKeyDeletes", 3L, omMetrics);
-    assertCounter("NumKeyLists", 2L, omMetrics);
-    assertCounter("NumInitiateMultipartUploads", 2L, omMetrics);
-
-    assertCounter("NumKeyAllocateFails", 1L, omMetrics);
-    assertCounter("NumKeyLookupFails", 1L, omMetrics);
-    assertCounter("NumKeyDeleteFails", 1L, omMetrics);
-    assertCounter("NumKeyListFails", 1L, omMetrics);
-    assertCounter("NumInitiateMultipartUploadFails", 1L, omMetrics);
-
-
-    assertCounter("NumKeys", 2L, omMetrics);
-
-    cluster.restartOzoneManager();
-    assertCounter("NumKeys", 2L, omMetrics);
-
-  }
-
-  /**
-   * Test volume operations with ignoring thrown exception.
-   */
-  private void doVolumeOps() {
-    try {
-      ozoneManager.createVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.deleteVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.getVolumeInfo(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.checkVolumeAccess(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.setOwner(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.listAllVolumes(null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test bucket operations with ignoring thrown exception.
-   */
-  private void doBucketOps() {
-    try {
-      ozoneManager.createBucket(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.deleteBucket(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.getBucketInfo(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.setBucketProperty(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.listBuckets(null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test key operations with ignoring thrown exception.
-   */
-  private void doKeyOps() {
-    try {
-      ozoneManager.openKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.deleteKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.lookupKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.listKeys(null, null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.commitKey(createKeyArgs(), 0);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ozoneManager.initiateMultipartUpload(null);
-    } catch (IOException ignored) {
-    }
-
-  }
-
-  private OmKeyArgs createKeyArgs() {
-    OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder()
-        .setBlockID(new BlockID(new ContainerBlockID(1, 1))).build();
-    keyLocationInfo.setCreateVersion(0);
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-    omKeyLocationInfoList.add(keyLocationInfo);
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setLocationInfoList(
-        omKeyLocationInfoList).build();
-    return keyArgs;
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
deleted file mode 100644
index 2716d51..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.util.LifeCycle;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collection;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Tests OM related configurations.
- */
-public class TestOzoneManagerConfiguration {
-
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster cluster;
-  private String omId;
-  private String clusterId;
-  private String scmId;
-  private OzoneManager om;
-  private OzoneManagerRatisServer omRatisServer;
-
-  private static final long LEADER_ELECTION_TIMEOUT = 500L;
-
-  @Before
-  public void init() throws IOException {
-    conf = new OzoneConfiguration();
-    omId = UUID.randomUUID().toString();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    final String path = GenericTestUtils.getTempPath(omId);
-    Path metaDirPath = Paths.get(path, "om-meta");
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    conf.setTimeDuration(
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
-
-    OMStorage omStore = new OMStorage(conf);
-    omStore.setClusterId("testClusterId");
-    omStore.setScmId("testScmId");
-    // writes the version file properties
-    omStore.initialize();
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  private void startCluster() throws Exception {
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-      .setClusterId(clusterId)
-      .setScmId(scmId)
-      .setOmId(omId)
-      .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Test that if no OM address is specified, then the OM rpc server
-   * is started on localhost.
-   */
-  @Test
-  public void testNoConfiguredOMAddress() throws Exception {
-    startCluster();
-    om = cluster.getOzoneManager();
-
-    Assert.assertTrue(NetUtils.isLocalAddress(
-        om.getOmRpcServerAddr().getAddress()));
-  }
-
-  /**
-   * Test that if only the hostname is specified for om address, then the
-   * default port is used.
-   */
-  @Test
-  public void testDefaultPortIfNotSpecified() throws Exception {
-
-    String omNode1Id = "omNode1";
-    String omNode2Id = "omNode2";
-    String omNodesKeyValue = omNode1Id + "," + omNode2Id;
-    String serviceID = "service1";
-    conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID);
-    conf.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID,
-        omNodesKeyValue);
-
-    String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode1Id);
-    String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode2Id);
-
-    conf.set(omNode1RpcAddrKey, "0.0.0.0");
-    conf.set(omNode2RpcAddrKey, "122.0.0.122");
-
-    // Set omNode1 as the current node. omNode1 address does not have a port
-    // number specified. So the default port should be taken.
-    conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, omNode1Id);
-
-    startCluster();
-    om = cluster.getOzoneManager();
-    Assert.assertEquals("0.0.0.0",
-        om.getOmRpcServerAddr().getHostName());
-    Assert.assertEquals(OMConfigKeys.OZONE_OM_PORT_DEFAULT,
-        om.getOmRpcServerAddr().getPort());
-
-    // Verify that the 2nd OMs address stored in the current OM also has the
-    // default port as the port is not specified
-    InetSocketAddress omNode2Addr = om.getPeerNodes().get(0).getRpcAddress();
-    Assert.assertEquals("122.0.0.122", omNode2Addr.getHostString());
-    Assert.assertEquals(OMConfigKeys.OZONE_OM_PORT_DEFAULT,
-        omNode2Addr.getPort());
-
-  }
-
-  /**
-   * Test a single node OM service (default setting for MiniOzoneCluster).
-   * @throws Exception
-   */
-  @Test
-  public void testSingleNodeOMservice() throws Exception {
-    // Default settings of MiniOzoneCluster start a sinle node OM service.
-    startCluster();
-    om = cluster.getOzoneManager();
-    omRatisServer = om.getOmRatisServer();
-
-    Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
-    // OM's Ratis server should have only 1 peer (itself) in its RaftGroup
-    Collection<RaftPeer> peers = omRatisServer.getRaftGroup().getPeers();
-    Assert.assertEquals(1, peers.size());
-
-    // The RaftPeer id should match the configured omId
-    RaftPeer raftPeer = peers.toArray(new RaftPeer[1])[0];
-    Assert.assertEquals(omId, raftPeer.getId().toString());
-  }
-
-  /**
-   * Test configurating an OM service with three OM nodes.
-   * @throws Exception
-   */
-  @Test
-  public void testThreeNodeOMservice() throws Exception {
-    // Set the configuration for 3 node OM service. Set one node's rpc
-    // address to localhost. OM will parse all configurations and find the
-    // nodeId representing the localhost
-
-    final String omServiceId = "om-service-test1";
-    final String omNode1Id = "omNode1";
-    final String omNode2Id = "omNode2";
-    final String omNode3Id = "omNode3";
-
-    String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
-
-    String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id);
-    String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id);
-    String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id);
-
-    String omNode3RatisPortKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNode3Id);
-
-    conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
-    conf.set(omNodesKey, omNodesKeyValue);
-
-    // Set node2 to localhost and the other two nodes to dummy addresses
-    conf.set(omNode1RpcAddrKey, "123.0.0.123:9862");
-    conf.set(omNode2RpcAddrKey, "0.0.0.0:9862");
-    conf.set(omNode3RpcAddrKey, "124.0.0.124:9862");
-
-    conf.setInt(omNode3RatisPortKey, 9898);
-
-    startCluster();
-    om = cluster.getOzoneManager();
-    omRatisServer = om.getOmRatisServer();
-
-    Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
-
-    // OM's Ratis server should have 3 peers in its RaftGroup
-    Collection<RaftPeer> peers = omRatisServer.getRaftGroup().getPeers();
-    Assert.assertEquals(3, peers.size());
-
-    // Ratis server RaftPeerId should match with omNode2 ID as node2 is the
-    // localhost
-    Assert.assertEquals(omNode2Id, omRatisServer.getRaftPeerId().toString());
-
-    // Verify peer details
-    for (RaftPeer peer : peers) {
-      String expectedPeerAddress = null;
-      switch (peer.getId().toString()) {
-      case omNode1Id :
-        // Ratis port is not set for node1. So it should take the default port
-        expectedPeerAddress = "123.0.0.123:" +
-            OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
-        break;
-      case omNode2Id :
-        expectedPeerAddress = "0.0.0.0:"+
-            OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
-        break;
-      case omNode3Id :
-        // Ratis port is not set for node3. So it should take the default port
-        expectedPeerAddress = "124.0.0.124:9898";
-        break;
-      default : Assert.fail("Unrecognized RaftPeerId");
-      }
-      Assert.assertEquals(expectedPeerAddress, peer.getAddress());
-    }
-  }
-
-  /**
-   * Test a wrong configuration for OM HA. A configuration with none of the
-   * OM addresses matching the local address should throw an error.
-   * @throws Exception
-   */
-  @Test
-  public void testWrongConfiguration() throws Exception {
-    String omServiceId = "om-service-test1";
-
-    String omNode1Id = "omNode1";
-    String omNode2Id = "omNode2";
-    String omNode3Id = "omNode3";
-    String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
-
-    String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id);
-    String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id);
-    String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id);
-
-    conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
-    conf.set(omNodesKey, omNodesKeyValue);
-
-    // Set node2 to localhost and the other two nodes to dummy addresses
-    conf.set(omNode1RpcAddrKey, "123.0.0.123:9862");
-    conf.set(omNode2RpcAddrKey, "125.0.0.2:9862");
-    conf.set(omNode3RpcAddrKey, "124.0.0.124:9862");
-
-    try {
-      startCluster();
-      Assert.fail("Wrong Configuration. OM initialization should have failed.");
-    } catch (OzoneIllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Configuration has no " +
-          OMConfigKeys.OZONE_OM_ADDRESS_KEY + " address that matches local " +
-          "node's address.", e);
-    }
-  }
-
-  /**
-   * Test multiple OM service configuration.
-   */
-  @Test
-  public void testMultipleOMServiceIds() throws Exception {
-    // Set up OZONE_OM_SERVICES_KEY with 2 service Ids.
-    String om1ServiceId = "om-service-test1";
-    String om2ServiceId = "om-service-test2";
-    String omServices = om1ServiceId + "," + om2ServiceId;
-    conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServices);
-
-    String omNode1Id = "omNode1";
-    String omNode2Id = "omNode2";
-    String omNode3Id = "omNode3";
-    String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-
-    // Set the node Ids for the 2 services. The nodeIds need to be
-    // distinch within one service. The ids can overlap between
-    // different services.
-    String om1NodesKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_NODES_KEY, om1ServiceId);
-    String om2NodesKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_NODES_KEY, om2ServiceId);
-    conf.set(om1NodesKey, omNodesKeyValue);
-    conf.set(om2NodesKey, omNodesKeyValue);
-
-    // Set the RPC addresses for all 6 OMs (3 for each service). Only one
-    // node out of these must have the localhost address.
-    conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode1Id),
-        "122.0.0.123:9862");
-    conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode2Id),
-        "123.0.0.124:9862");
-    conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode3Id),
-        "124.0.0.125:9862");
-    conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode1Id),
-        "125.0.0.126:9862");
-    conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode2Id),
-        "0.0.0.0:9862");
-    conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode3Id),
-        "126.0.0.127:9862");
-
-    startCluster();
-    om = cluster.getOzoneManager();
-    omRatisServer = om.getOmRatisServer();
-
-    Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
-
-    // OM's Ratis server should have 3 peers in its RaftGroup
-    Collection<RaftPeer> peers = omRatisServer.getRaftGroup().getPeers();
-    Assert.assertEquals(3, peers.size());
-
-    // Verify that the serviceId and nodeId match the node with the localhost
-    // address - om-service-test2 and omNode2
-    Assert.assertEquals(om2ServiceId, om.getOMServiceId());
-    Assert.assertEquals(omNode2Id, omRatisServer.getRaftPeerId().toString());
-  }
-
-  private String getOMAddrKeyWithSuffix(String serviceId, String nodeId) {
-    return OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceId, nodeId);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
deleted file mode 100644
index 62658dc..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++ /dev/null
@@ -1,1248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-import org.apache.log4j.Logger;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.ha.OMProxyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.util.Time;
-
-
-import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl
-    .NODE_FAILURE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
-import static org.junit.Assert.fail;
-
-/**
- * Test Ozone Manager operation in distributed handler scenario.
- */
-public class TestOzoneManagerHA {
-
-  private MiniOzoneHAClusterImpl cluster = null;
-  private ObjectStore objectStore;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omServiceId;
-  private int numOfOMs = 3;
-  private static final long SNAPSHOT_THRESHOLD = 50;
-  private static final int LOG_PURGE_GAP = 50;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Rule
-  public Timeout timeout = new Timeout(300_000);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omServiceId = "om-service-test1";
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS,
-        OZONE_ADMINISTRATORS_WILDCARD);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.setInt(OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY, 10);
-    conf.setInt(OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 10);
-    conf.setLong(
-        OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
-        SNAPSHOT_THRESHOLD);
-    conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(numOfOMs)
-        .build();
-    cluster.waitForClusterToBeReady();
-    objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf)
-        .getObjectStore();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-
-  private OzoneVolume createAndCheckVolume(String volumeName)
-      throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-
-    OzoneVolume retVolume = objectStore.getVolume(volumeName);
-
-    Assert.assertTrue(retVolume.getName().equals(volumeName));
-    Assert.assertTrue(retVolume.getOwner().equals(userName));
-    Assert.assertTrue(retVolume.getAdmin().equals(adminName));
-
-    return retVolume;
-  }
-  @Test
-  public void testAllVolumeOperations() throws Exception {
-
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    createAndCheckVolume(volumeName);
-
-    objectStore.deleteVolume(volumeName);
-
-    OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND,
-        () -> objectStore.getVolume(volumeName));
-
-    OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND,
-        () -> objectStore.deleteVolume(volumeName));
-  }
-
-
-  @Test
-  public void testAllBucketOperations() throws Exception {
-
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    OzoneVolume retVolume = createAndCheckVolume(volumeName);
-
-    BucketArgs bucketArgs =
-        BucketArgs.newBuilder().setStorageType(StorageType.DISK)
-            .setVersioning(true).build();
-
-
-    retVolume.createBucket(bucketName, bucketArgs);
-
-
-    OzoneBucket ozoneBucket = retVolume.getBucket(bucketName);
-
-    Assert.assertEquals(volumeName, ozoneBucket.getVolumeName());
-    Assert.assertEquals(bucketName, ozoneBucket.getName());
-    Assert.assertTrue(ozoneBucket.getVersioning());
-    Assert.assertEquals(StorageType.DISK, ozoneBucket.getStorageType());
-    Assert.assertTrue(ozoneBucket.getCreationTime() <= Time.now());
-
-
-    // Change versioning to false
-    ozoneBucket.setVersioning(false);
-
-    ozoneBucket = retVolume.getBucket(bucketName);
-    Assert.assertFalse(ozoneBucket.getVersioning());
-
-    retVolume.deleteBucket(bucketName);
-
-    OzoneTestUtils.expectOmException(OMException.ResultCodes.BUCKET_NOT_FOUND,
-        () -> retVolume.deleteBucket(bucketName));
-
-
-
-  }
-
-  /**
-   * Test a client request when all OM nodes are running. The request should
-   * succeed.
-   * @throws Exception
-   */
-  @Test
-  public void testAllOMNodesRunning() throws Exception {
-    createVolumeTest(true);
-    createKeyTest(true);
-  }
-
-  /**
-   * Test client request succeeds even if one OM is down.
-   */
-  @Test
-  public void testOneOMNodeDown() throws Exception {
-    cluster.stopOzoneManager(1);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
-
-    createVolumeTest(true);
-
-    createKeyTest(true);
-  }
-
-  /**
-   * Test client request fails when 2 OMs are down.
-   */
-  @Test
-  public void testTwoOMNodesDown() throws Exception {
-    cluster.stopOzoneManager(1);
-    cluster.stopOzoneManager(2);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
-
-    createVolumeTest(false);
-
-    createKeyTest(false);
-
-  }
-
-  private OzoneBucket setupBucket() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-    Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
-    Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
-    Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
-
-    String bucketName = UUID.randomUUID().toString();
-    retVolumeinfo.createBucket(bucketName);
-
-    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
-    Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
-
-    return ozoneBucket;
-  }
-
-  @Test
-  public void testMultipartUpload() throws Exception {
-
-    // Happy scenario when all OM's are up.
-    OzoneBucket ozoneBucket = setupBucket();
-
-    String keyName = UUID.randomUUID().toString();
-    String uploadID = initiateMultipartUpload(ozoneBucket, keyName);
-
-    createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID);
-
-  }
-
-
-  @Test
-  public void testFileOperationsWithRecursive() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-
-    String data = "random data";
-
-    // one level key name
-    String keyName = UUID.randomUUID().toString();
-    testCreateFile(ozoneBucket, keyName, data, true, false);
-
-    // multi level key name
-    keyName = "dir1/dir2/dir3/file1";
-    testCreateFile(ozoneBucket, keyName, data, true, false);
-
-
-    data = "random data random data";
-
-    // multi level key name with over write set.
-    testCreateFile(ozoneBucket, keyName, data, true, true);
-
-
-    try {
-      testCreateFile(ozoneBucket, keyName, data, true, false);
-      fail("testFileOperationsWithRecursive");
-    } catch (OMException ex) {
-      Assert.assertEquals(FILE_ALREADY_EXISTS, ex.getResult());
-    }
-
-    // Try now with a file name which is same as a directory.
-    try {
-      keyName = "folder/folder2";
-      ozoneBucket.createDirectory(keyName);
-      testCreateFile(ozoneBucket, keyName, data, true, false);
-      fail("testFileOperationsWithNonRecursive");
-    } catch (OMException ex) {
-      Assert.assertEquals(NOT_A_FILE, ex.getResult());
-    }
-
-  }
-
-
-  @Test
-  public void testFileOperationsWithNonRecursive() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-
-    String data = "random data";
-
-    // one level key name
-    String keyName = UUID.randomUUID().toString();
-    testCreateFile(ozoneBucket, keyName, data, false, false);
-
-    // multi level key name
-    keyName = "dir1/dir2/dir3/file1";
-
-    // Should fail, as this is non-recursive and no parent directories exist
-    try {
-      testCreateFile(ozoneBucket, keyName, data, false, false);
-    } catch (OMException ex) {
-      Assert.assertEquals(NOT_A_FILE, ex.getResult());
-    }
-
-    // create directory, now this should pass.
-    ozoneBucket.createDirectory("dir1/dir2/dir3");
-    testCreateFile(ozoneBucket, keyName, data, false, false);
-    data = "random data random data";
-
-    // multi level key name with over write set.
-    testCreateFile(ozoneBucket, keyName, data, false, true);
-
-    try {
-      testCreateFile(ozoneBucket, keyName, data, false, false);
-      fail("testFileOperationsWithRecursive");
-    } catch (OMException ex) {
-      Assert.assertEquals(FILE_ALREADY_EXISTS, ex.getResult());
-    }
-
-
-    // Try now with a file which already exists under the path
-    ozoneBucket.createDirectory("folder1/folder2/folder3/folder4");
-
-    keyName = "folder1/folder2/folder3/folder4/file1";
-    testCreateFile(ozoneBucket, keyName, data, false, false);
-
-    keyName = "folder1/folder2/folder3/file1";
-    testCreateFile(ozoneBucket, keyName, data, false, false);
-
-    // Try now with a file under path already. This should fail.
-    try {
-      keyName = "folder/folder2";
-      ozoneBucket.createDirectory(keyName);
-      testCreateFile(ozoneBucket, keyName, data, false, false);
-      fail("testFileOperationsWithNonRecursive");
-    } catch (OMException ex) {
-      Assert.assertEquals(NOT_A_FILE, ex.getResult());
-    }
-
-  }
-
-  /**
-   * This method createFile and verifies the file is successfully created or
-   * not.
-   * @param ozoneBucket
-   * @param keyName
-   * @param data
-   * @param recursive
-   * @param overwrite
-   * @throws Exception
-   */
-  public void testCreateFile(OzoneBucket ozoneBucket, String keyName,
-      String data, boolean recursive, boolean overwrite)
-      throws Exception {
-
-    OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName,
-        data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
-        overwrite, recursive);
-
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
-    ozoneOutputStream.close();
-
-    OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName);
-
-    Assert.assertEquals(keyName, ozoneKeyDetails.getName());
-    Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName());
-    Assert.assertEquals(ozoneBucket.getVolumeName(),
-        ozoneKeyDetails.getVolumeName());
-    Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize());
-
-    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
-
-    byte[] fileContent = new byte[data.getBytes().length];
-    ozoneInputStream.read(fileContent);
-    Assert.assertEquals(data, new String(fileContent));
-  }
-
-  @Test
-  public void testMultipartUploadWithOneOmNodeDown() throws Exception {
-
-    OzoneBucket ozoneBucket = setupBucket();
-
-    String keyName = UUID.randomUUID().toString();
-    String uploadID = initiateMultipartUpload(ozoneBucket, keyName);
-
-    // After initiate multipartupload, shutdown leader OM.
-    // Stop leader OM, to see when the OM leader changes
-    // multipart upload is happening successfully or not.
-
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-
-    // The OMFailoverProxyProvider will point to the current leader OM node.
-    String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // Stop one of the ozone manager, to see when the OM leader changes
-    // multipart upload is happening successfully or not.
-    cluster.stopOzoneManager(leaderOMNodeId);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
-
-    createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID);
-
-    String newLeaderOMNodeId =
-        omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    Assert.assertTrue(leaderOMNodeId != newLeaderOMNodeId);
-  }
-
-
-  private String initiateMultipartUpload(OzoneBucket ozoneBucket,
-      String keyName) throws Exception {
-
-    OmMultipartInfo omMultipartInfo =
-        ozoneBucket.initiateMultipartUpload(keyName,
-            ReplicationType.RATIS,
-            ReplicationFactor.ONE);
-
-    String uploadID = omMultipartInfo.getUploadID();
-    Assert.assertTrue(uploadID != null);
-    return uploadID;
-  }
-
-  private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket,
-      String keyName, String uploadID) throws Exception {
-
-    String value = "random data";
-    OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
-        keyName, value.length(), 1, uploadID);
-    ozoneOutputStream.write(value.getBytes(), 0, value.length());
-    ozoneOutputStream.close();
-
-
-    Map<Integer, String> partsMap = new HashMap<>();
-    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
-        ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap);
-
-    Assert.assertTrue(omMultipartUploadCompleteInfo != null);
-    Assert.assertTrue(omMultipartUploadCompleteInfo.getHash() != null);
-
-
-    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
-
-    byte[] fileContent = new byte[value.getBytes().length];
-    ozoneInputStream.read(fileContent);
-    Assert.assertEquals(value, new String(fileContent));
-  }
-
-
-  private void createKeyTest(boolean checkSuccess) throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    try {
-      objectStore.createVolume(volumeName, createVolumeArgs);
-
-      OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-      Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
-      Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
-      Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
-
-      String bucketName = UUID.randomUUID().toString();
-      String keyName = UUID.randomUUID().toString();
-      retVolumeinfo.createBucket(bucketName);
-
-      OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-      Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
-      Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
-
-      String value = "random data";
-      OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName,
-          value.length(), ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE, new HashMap<>());
-      ozoneOutputStream.write(value.getBytes(), 0, value.length());
-      ozoneOutputStream.close();
-
-      OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
-
-      byte[] fileContent = new byte[value.getBytes().length];
-      ozoneInputStream.read(fileContent);
-      Assert.assertEquals(value, new String(fileContent));
-
-    } catch (ConnectException | RemoteException e) {
-      if (!checkSuccess) {
-        // If the last OM to be tried by the RetryProxy is down, we would get
-        // ConnectException. Otherwise, we would get a RemoteException from the
-        // last running OM as it would fail to get a quorum.
-        if (e instanceof RemoteException) {
-          GenericTestUtils.assertExceptionContains(
-              "NotLeaderException", e);
-        }
-      } else {
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * Create a volume and test its attribute.
-   */
-  private void createVolumeTest(boolean checkSuccess) throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    try {
-      objectStore.createVolume(volumeName, createVolumeArgs);
-
-      OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-      if (checkSuccess) {
-        Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
-        Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
-        Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
-      } else {
-        // Verify that the request failed
-        fail("There is no quorum. Request should have failed");
-      }
-    } catch (ConnectException | RemoteException e) {
-      if (!checkSuccess) {
-        // If the last OM to be tried by the RetryProxy is down, we would get
-        // ConnectException. Otherwise, we would get a RemoteException from the
-        // last running OM as it would fail to get a quorum.
-        if (e instanceof RemoteException) {
-          GenericTestUtils.assertExceptionContains(
-              "NotLeaderException", e);
-        }
-      } else {
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * Test that OMFailoverProxyProvider creates an OM proxy for each OM in the
-   * cluster.
-   */
-  @Test
-  public void testOMProxyProviderInitialization() throws Exception {
-    OzoneClient rpcClient = cluster.getRpcClient();
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        rpcClient.getObjectStore().getClientProxy().getOMProxyProvider();
-    List<OMProxyInfo> omProxies =
-        omFailoverProxyProvider.getOMProxyInfos();
-
-    Assert.assertEquals(numOfOMs, omProxies.size());
-
-    for (int i = 0; i < numOfOMs; i++) {
-      InetSocketAddress omRpcServerAddr =
-          cluster.getOzoneManager(i).getOmRpcServerAddr();
-      boolean omClientProxyExists = false;
-      for (OMProxyInfo omProxyInfo : omProxies) {
-        if (omProxyInfo.getAddress().equals(omRpcServerAddr)) {
-          omClientProxyExists = true;
-          break;
-        }
-      }
-      Assert.assertTrue("There is no OM Client Proxy corresponding to OM " +
-              "node" + cluster.getOzoneManager(i).getOMNodeId(),
-          omClientProxyExists);
-    }
-  }
-
-  /**
-   * Test OMFailoverProxyProvider failover on connection exception to OM client.
-   */
-  @Test
-  public void testOMProxyProviderFailoverOnConnectionFailure()
-      throws Exception {
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-    String firstProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    createVolumeTest(true);
-
-    // On stopping the current OM Proxy, the next connection attempt should
-    // failover to a another OM proxy.
-    cluster.stopOzoneManager(firstProxyNodeId);
-    Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT * 4);
-
-    // Next request to the proxy provider should result in a failover
-    createVolumeTest(true);
-    Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
-
-    // Get the new OM Proxy NodeId
-    String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // Verify that a failover occured. the new proxy nodeId should be
-    // different from the old proxy nodeId.
-    Assert.assertNotEquals("Failover did not occur as expected",
-        firstProxyNodeId, newProxyNodeId);
-  }
-
-  /**
-   * Test OMFailoverProxyProvider failover when current OM proxy is not
-   * the current OM Leader.
-   */
-  @Test
-  public void testOMProxyProviderFailoverToCurrentLeader() throws Exception {
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-
-    // Run couple of createVolume tests to discover the current Leader OM
-    createVolumeTest(true);
-    createVolumeTest(true);
-
-    // The OMFailoverProxyProvider will point to the current leader OM node.
-    String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // Perform a manual failover of the proxy provider to move the
-    // currentProxyIndex to a node other than the leader OM.
-    omFailoverProxyProvider.performFailover(
-        (OzoneManagerProtocolPB) omFailoverProxyProvider.getProxy().proxy);
-
-    String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-    Assert.assertNotEquals(leaderOMNodeId, newProxyNodeId);
-
-    // Once another request is sent to this new proxy node, the leader
-    // information must be returned via the response and a failover must
-    // happen to the leader proxy node.
-    createVolumeTest(true);
-    Thread.sleep(2000);
-
-    String newLeaderOMNodeId =
-        omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // The old and new Leader OM NodeId must match since there was no new
-    // election in the Ratis ring.
-    Assert.assertEquals(leaderOMNodeId, newLeaderOMNodeId);
-  }
-
-  @Test
-  public void testOMRetryProxy() throws Exception {
-    // Stop all the OMs. After making 5 (set maxRetries value) attempts at
-    // connection, the RpcClient should give up.
-    for (int i = 0; i < numOfOMs; i++) {
-      cluster.stopOzoneManager(i);
-    }
-
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final org.apache.log4j.Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
-
-    try {
-      createVolumeTest(true);
-      fail("TestOMRetryProxy should fail when there are no OMs running");
-    } catch (ConnectException e) {
-      // Each retry attempt tries upto 10 times to connect. So there should be
-      // 10*10 "Retrying connect to server" messages
-      Assert.assertEquals(100,
-          appender.countLinesWithMessage("Retrying connect to server:"));
-
-      Assert.assertEquals(1,
-          appender.countLinesWithMessage("Failed to connect to OM. Attempted " +
-              "10 retries and 10 failovers"));
-    }
-  }
-
-  @Test
-  public void testReadRequest() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    objectStore.createVolume(volumeName);
-
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-    String currentLeaderNodeId = omFailoverProxyProvider
-        .getCurrentProxyOMNodeId();
-
-    // A read request from any proxy should failover to the current leader OM
-    for (int i = 0; i < numOfOMs; i++) {
-      // Failover OMFailoverProxyProvider to OM at index i
-      OzoneManager ozoneManager = cluster.getOzoneManager(i);
-      String omHostName = ozoneManager.getOmRpcServerAddr().getHostName();
-      int rpcPort = ozoneManager.getOmRpcServerAddr().getPort();
-
-      // Get the ObjectStore and FailoverProxyProvider for OM at index i
-      final ObjectStore store = OzoneClientFactory.getRpcClient(
-          omHostName, rpcPort, omServiceId, conf).getObjectStore();
-      final OMFailoverProxyProvider proxyProvider =
-          store.getClientProxy().getOMProxyProvider();
-
-      // Failover to the OM node that the objectStore points to
-      omFailoverProxyProvider.performFailoverIfRequired(
-          ozoneManager.getOMNodeId());
-
-      // A read request should result in the proxyProvider failing over to
-      // leader node.
-      OzoneVolume volume = store.getVolume(volumeName);
-      Assert.assertEquals(volumeName, volume.getName());
-
-      Assert.assertEquals(currentLeaderNodeId,
-          proxyProvider.getCurrentProxyOMNodeId());
-    }
-  }
-
-  @Test
-  public void testAddBucketAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName()).build();
-
-    testAddAcl(remoteUserName, ozoneObj, defaultUserAcl);
-  }
-  @Test
-  public void testRemoveBucketAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName()).build();
-
-    testRemoveAcl(remoteUserName, ozoneObj, defaultUserAcl);
-
-  }
-
-  @Test
-  public void testSetBucketAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName()).build();
-
-    testSetAcl(remoteUserName, ozoneObj, defaultUserAcl);
-  }
-
-  private boolean containsAcl(OzoneAcl ozoneAcl, List<OzoneAcl> ozoneAcls) {
-    for (OzoneAcl acl : ozoneAcls) {
-      boolean result = compareAcls(ozoneAcl, acl);
-      if (result) {
-        // We found a match, return.
-        return result;
-      }
-    }
-    return false;
-  }
-
-  private boolean compareAcls(OzoneAcl givenAcl, OzoneAcl existingAcl) {
-    if (givenAcl.getType().equals(existingAcl.getType())
-        && givenAcl.getName().equals(existingAcl.getName())
-        && givenAcl.getAclScope().equals(existingAcl.getAclScope())) {
-      BitSet bitSet = (BitSet) givenAcl.getAclBitSet().clone();
-      bitSet.and(existingAcl.getAclBitSet());
-      if (bitSet.equals(existingAcl.getAclBitSet())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Test
-  public void testAddKeyAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    String key = createKey(ozoneBucket);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setKeyName(key).build();
-
-    testAddAcl(remoteUserName, ozoneObj, userAcl);
-  }
-
-  @Test
-  public void testRemoveKeyAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    String key = createKey(ozoneBucket);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setKeyName(key).build();
-
-    testRemoveAcl(remoteUserName, ozoneObj, userAcl);
-
-  }
-
-  @Test
-  public void testSetKeyAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    String key = createKey(ozoneBucket);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setKeyName(key).build();
-
-    testSetAcl(remoteUserName, ozoneObj, userAcl);
-
-  }
-
-  @Test
-  public void testAddPrefixAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
-    OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setPrefixName(prefixName).build();
-
-    testAddAcl(remoteUserName, ozoneObj, defaultUserAcl);
-  }
-  @Test
-  public void testRemovePrefixAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
-    OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName,
-        READ, ACCESS);
-    OzoneAcl userAcl1 = new OzoneAcl(USER, "remote",
-        READ, ACCESS);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setPrefixName(prefixName).build();
-
-    boolean result = objectStore.addAcl(ozoneObj, userAcl);
-    Assert.assertTrue(result);
-
-    result = objectStore.addAcl(ozoneObj, userAcl1);
-    Assert.assertTrue(result);
-
-    result = objectStore.removeAcl(ozoneObj, userAcl);
-    Assert.assertTrue(result);
-
-    // try removing already removed acl.
-    result = objectStore.removeAcl(ozoneObj, userAcl);
-    Assert.assertFalse(result);
-
-    result = objectStore.removeAcl(ozoneObj, userAcl1);
-    Assert.assertTrue(result);
-
-  }
-
-  @Test
-  public void testSetPrefixAcl() throws Exception {
-    OzoneBucket ozoneBucket = setupBucket();
-    String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
-    OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
-        READ, DEFAULT);
-
-    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(OzoneObj.ResourceType.PREFIX)
-        .setStoreType(OzoneObj.StoreType.OZONE)
-        .setVolumeName(ozoneBucket.getVolumeName())
-        .setBucketName(ozoneBucket.getName())
-        .setPrefixName(prefixName).build();
-
-    testSetAcl(remoteUserName, ozoneObj, defaultUserAcl);
-  }
-
-
-  private void testSetAcl(String remoteUserName, OzoneObj ozoneObj,
-      OzoneAcl userAcl) throws Exception {
-    // As by default create will add some default acls in RpcClient.
-
-    if (!ozoneObj.getResourceType().name().equals(
-        OzoneObj.ResourceType.PREFIX.name())) {
-      List<OzoneAcl> acls = objectStore.getAcl(ozoneObj);
-
-      Assert.assertTrue(acls.size() > 0);
-    }
-
-    OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName,
-        WRITE, DEFAULT);
-
-    List<OzoneAcl> newAcls = Collections.singletonList(modifiedUserAcl);
-    boolean setAcl = objectStore.setAcl(ozoneObj, newAcls);
-    Assert.assertTrue(setAcl);
-
-    // Get acls and check whether they are reset or not.
-    List<OzoneAcl> getAcls = objectStore.getAcl(ozoneObj);
-
-    Assert.assertTrue(newAcls.size() == getAcls.size());
-    int i = 0;
-    for (OzoneAcl ozoneAcl : newAcls) {
-      Assert.assertTrue(compareAcls(getAcls.get(i++), ozoneAcl));
-    }
-
-  }
-
-  private void testAddAcl(String remoteUserName, OzoneObj ozoneObj,
-      OzoneAcl userAcl) throws Exception {
-    boolean addAcl = objectStore.addAcl(ozoneObj, userAcl);
-    Assert.assertTrue(addAcl);
-
-    List<OzoneAcl> acls = objectStore.getAcl(ozoneObj);
-
-    Assert.assertTrue(containsAcl(userAcl, acls));
-
-    // Add an already existing acl.
-    addAcl = objectStore.addAcl(ozoneObj, userAcl);
-    Assert.assertFalse(addAcl);
-
-    // Add an acl by changing acl type with same type, name and scope.
-    userAcl = new OzoneAcl(USER, remoteUserName,
-        WRITE, DEFAULT);
-    addAcl = objectStore.addAcl(ozoneObj, userAcl);
-    Assert.assertTrue(addAcl);
-  }
-
-  private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj,
-      OzoneAcl userAcl)
-      throws Exception{
-    // As by default create will add some default acls in RpcClient.
-    List<OzoneAcl> acls = objectStore.getAcl(ozoneObj);
-
-    Assert.assertTrue(acls.size() > 0);
-
-    // Remove an existing acl.
-    boolean removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0));
-    Assert.assertTrue(removeAcl);
-
-    // Trying to remove an already removed acl.
-    removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0));
-    Assert.assertFalse(removeAcl);
-
-    boolean addAcl = objectStore.addAcl(ozoneObj, userAcl);
-    Assert.assertTrue(addAcl);
-
-    // Just changed acl type here to write, rest all is same as defaultUserAcl.
-    OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName,
-        WRITE, DEFAULT);
-    addAcl = objectStore.addAcl(ozoneObj, modifiedUserAcl);
-    Assert.assertTrue(addAcl);
-
-    removeAcl = objectStore.removeAcl(ozoneObj, modifiedUserAcl);
-    Assert.assertTrue(removeAcl);
-
-    removeAcl = objectStore.removeAcl(ozoneObj, userAcl);
-    Assert.assertTrue(removeAcl);
-  }
-
-
-
-  @Test
-  public void testOMRatisSnapshot() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-    retVolumeinfo.createBucket(bucketName);
-    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-    String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider()
-        .getCurrentProxyOMNodeId();
-    OzoneManager ozoneManager = cluster.getOzoneManager(leaderOMNodeId);
-
-    // Send commands to ratis to increase the log index so that ratis
-    // triggers a snapshot on the state machine.
-
-    long appliedLogIndex = 0;
-    while (appliedLogIndex <= SNAPSHOT_THRESHOLD) {
-      createKey(ozoneBucket);
-      appliedLogIndex = ozoneManager.getOmRatisServer()
-          .getStateMachineLastAppliedIndex();
-    }
-
-    GenericTestUtils.waitFor(() -> {
-      if (ozoneManager.getRatisSnapshotIndex() > 0) {
-        return true;
-      }
-      return false;
-    }, 1000, 100000);
-
-    // The current lastAppliedLogIndex on the state machine should be greater
-    // than or equal to the saved snapshot index.
-    long smLastAppliedIndex =
-        ozoneManager.getOmRatisServer().getStateMachineLastAppliedIndex();
-    long ratisSnapshotIndex = ozoneManager.getRatisSnapshotIndex();
-    Assert.assertTrue("LastAppliedIndex on OM State Machine ("
-            + smLastAppliedIndex + ") is less than the saved snapshot index("
-            + ratisSnapshotIndex + ").",
-        smLastAppliedIndex >= ratisSnapshotIndex);
-
-    // Add more transactions to Ratis to trigger another snapshot
-    while (appliedLogIndex <= (smLastAppliedIndex + SNAPSHOT_THRESHOLD)) {
-      createKey(ozoneBucket);
-      appliedLogIndex = ozoneManager.getOmRatisServer()
-          .getStateMachineLastAppliedIndex();
-    }
-
-    GenericTestUtils.waitFor(() -> {
-      if (ozoneManager.getRatisSnapshotIndex() > 0) {
-        return true;
-      }
-      return false;
-    }, 1000, 100000);
-
-    // The new snapshot index must be greater than the previous snapshot index
-    long ratisSnapshotIndexNew = ozoneManager.getRatisSnapshotIndex();
-    Assert.assertTrue("Latest snapshot index must be greater than previous " +
-        "snapshot indices", ratisSnapshotIndexNew > ratisSnapshotIndex);
-
-  }
-
-  /**
-   * Create a key in the bucket.
-   * @return the key name.
-   */
-  static String createKey(OzoneBucket ozoneBucket) throws IOException {
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    String data = "data" + RandomStringUtils.randomNumeric(5);
-    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName,
-        data.length(), ReplicationType.STAND_ALONE,
-        ReplicationFactor.ONE, new HashMap<>());
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
-    ozoneOutputStream.close();
-    return keyName;
-  }
-
-  @Test
-  public void testOMRestart() throws Exception {
-    // Get the leader OM
-    String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider()
-        .getCurrentProxyOMNodeId();
-    OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId);
-
-    // Get follower OMs
-    OzoneManager followerOM1 = cluster.getOzoneManager(
-        leaderOM.getPeerNodes().get(0).getOMNodeId());
-    OzoneManager followerOM2 = cluster.getOzoneManager(
-        leaderOM.getPeerNodes().get(1).getOMNodeId());
-
-    // Do some transactions so that the log index increases
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-    retVolumeinfo.createBucket(bucketName);
-    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-    for (int i = 0; i < 10; i++) {
-      createKey(ozoneBucket);
-    }
-
-    long lastAppliedTxOnFollowerOM =
-        followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex();
-
-    // Stop one follower OM
-    followerOM1.stop();
-
-    // Do more transactions. Stopped OM should miss these transactions and
-    // the logs corresponding to atleast some of the missed transactions
-    // should be purged. This will force the OM to install snapshot when
-    // restarted.
-    long minNewTxIndex = lastAppliedTxOnFollowerOM + (LOG_PURGE_GAP * 10);
-    long leaderOMappliedLogIndex = leaderOM.getOmRatisServer()
-        .getStateMachineLastAppliedIndex();
-
-    List<String> missedKeys = new ArrayList<>();
-    while (leaderOMappliedLogIndex < minNewTxIndex) {
-      missedKeys.add(createKey(ozoneBucket));
-      leaderOMappliedLogIndex = leaderOM.getOmRatisServer()
-          .getStateMachineLastAppliedIndex();
-    }
-
-    // Restart the stopped OM.
-    followerOM1.restart();
-
-    // Get the latest snapshotIndex from the leader OM.
-    long leaderOMSnaphsotIndex = leaderOM.saveRatisSnapshot();
-
-    // The recently started OM should be lagging behind the leader OM.
-    long followerOMLastAppliedIndex =
-        followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex();
-    Assert.assertTrue(
-        followerOMLastAppliedIndex < leaderOMSnaphsotIndex);
-
-    // Wait for the follower OM to catch up
-    GenericTestUtils.waitFor(() -> {
-      long lastAppliedIndex =
-          followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex();
-      if (lastAppliedIndex >= leaderOMSnaphsotIndex) {
-        return true;
-      }
-      return false;
-    }, 100, 200000);
-
-    // Do more transactions. The restarted OM should receive the
-    // new transactions. It's last applied tx index should increase from the
-    // last snapshot index after more transactions are applied.
-    for (int i = 0; i < 10; i++) {
-      createKey(ozoneBucket);
-    }
-    long followerOM1lastAppliedIndex = followerOM1.getOmRatisServer()
-        .getStateMachineLastAppliedIndex();
-    Assert.assertTrue(followerOM1lastAppliedIndex >
-        leaderOMSnaphsotIndex);
-
-    // The follower OMs should be in sync. There can be a small lag between
-    // leader OM and follower OMs as txns are applied first on leader OM.
-    long followerOM2lastAppliedIndex = followerOM1.getOmRatisServer()
-        .getStateMachineLastAppliedIndex();
-    Assert.assertEquals(followerOM1lastAppliedIndex,
-        followerOM2lastAppliedIndex);
-
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
deleted file mode 100644
index 8168d27a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.core.type.TypeReference;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.util.EntityUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddressForClients;
-
-/**
- * This class is to test the REST interface exposed by OzoneManager.
- */
-public class TestOzoneManagerRestInterface {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testGetServiceList() throws Exception {
-    OzoneManagerHttpServer server =
-        cluster.getOzoneManager().getHttpServer();
-    HttpClient client = HttpClients.createDefault();
-    String connectionUri = "http://" +
-        NetUtils.getHostPortString(server.getHttpAddress());
-    HttpGet httpGet = new HttpGet(connectionUri + "/serviceList");
-    HttpResponse response = client.execute(httpGet);
-    String serviceListJson = EntityUtils.toString(response.getEntity());
-
-    ObjectMapper objectMapper = new ObjectMapper();
-    TypeReference<List<ServiceInfo>> serviceInfoReference =
-        new TypeReference<List<ServiceInfo>>() {};
-    List<ServiceInfo> serviceInfos = objectMapper.readValue(
-        serviceListJson, serviceInfoReference);
-    Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
-    for (ServiceInfo serviceInfo : serviceInfos) {
-      serviceMap.put(serviceInfo.getNodeType(), serviceInfo);
-    }
-
-    InetSocketAddress omAddress =
-        getOmAddressForClients(conf);
-    ServiceInfo omInfo = serviceMap.get(HddsProtos.NodeType.OM);
-
-    Assert.assertEquals(omAddress.getHostName(), omInfo.getHostname());
-    Assert.assertEquals(omAddress.getPort(),
-        omInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(server.getHttpAddress().getPort(),
-        omInfo.getPort(ServicePort.Type.HTTP));
-
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(conf);
-    ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM);
-
-    Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname());
-    Assert.assertEquals(scmAddress.getPort(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-
-    ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-    Assert.assertEquals(datanodeDetails.getHostName(),
-        datanodeInfo.getHostname());
-
-    Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
-    for(ServicePort.Type type : ports.keySet()) {
-      switch (type) {
-      case HTTP:
-      case HTTPS:
-        Assert.assertEquals(
-            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
-            ports.get(type));
-        break;
-      default:
-        // OM only sends Datanode's info port details
-        // i.e. HTTP or HTTPS
-        // Other ports are not expected as of now.
-        Assert.fail();
-        break;
-      }
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
deleted file mode 100644
index 443f305..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import org.junit.After;
-import org.junit.Assert;
-import static org.junit.Assert.fail;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Test some client operations after cluster starts. And perform restart and
- * then performs client operations and check the behavior is expected or not.
- */
-public class TestOzoneManagerRestart {
-  private MiniOzoneCluster cluster = null;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omId;
-
-  @Rule
-  public Timeout timeout = new Timeout(60000);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testRestartOMWithVolumeOperation() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    OzoneClient client = cluster.getClient();
-
-    ObjectStore objectStore = client.getObjectStore();
-
-    objectStore.createVolume(volumeName);
-
-    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
-    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-
-    cluster.restartOzoneManager();
-    cluster.restartStorageContainerManager(true);
-
-    // After restart, try to create same volume again, it should fail.
-    try {
-      objectStore.createVolume(volumeName);
-      fail("testRestartOM failed");
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("VOLUME_ALREADY_EXISTS", ex);
-    }
-
-    // Get Volume.
-    ozoneVolume = objectStore.getVolume(volumeName);
-    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-
-  }
-
-
-  @Test
-  public void testRestartOMWithBucketOperation() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    OzoneClient client = cluster.getClient();
-
-    ObjectStore objectStore = client.getObjectStore();
-
-    objectStore.createVolume(volumeName);
-
-    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
-    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-
-    ozoneVolume.createBucket(bucketName);
-
-    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
-    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
-
-    cluster.restartOzoneManager();
-    cluster.restartStorageContainerManager(true);
-
-    // After restart, try to create same bucket again, it should fail.
-    try {
-      ozoneVolume.createBucket(bucketName);
-      fail("testRestartOMWithBucketOperation failed");
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("BUCKET_ALREADY_EXISTS", ex);
-    }
-
-    // Get bucket.
-    ozoneBucket = ozoneVolume.getBucket(bucketName);
-    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
-
-  }
-
-
-  @Test
-  public void testRestartOMWithKeyOperation() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String key = "key" + RandomStringUtils.randomNumeric(5);
-
-    OzoneClient client = cluster.getClient();
-
-    ObjectStore objectStore = client.getObjectStore();
-
-    objectStore.createVolume(volumeName);
-
-    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
-    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-
-    ozoneVolume.createBucket(bucketName);
-
-    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
-    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
-
-    String data = "random data";
-    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
-        data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
-        new HashMap<>());
-
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
-    ozoneOutputStream.close();
-
-    cluster.restartOzoneManager();
-    cluster.restartStorageContainerManager(true);
-
-
-    // As we allow override of keys, not testing re-create key. We shall see
-    // after restart key exists or not.
-
-    // Get key.
-    OzoneKey ozoneKey = ozoneBucket.getKey(key);
-    Assert.assertTrue(ozoneKey.getName().equals(key));
-    Assert.assertTrue(ozoneKey.getReplicationType().equals(
-        ReplicationType.RATIS));
-  }
-
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
deleted file mode 100644
index 5ca2eea..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.util.UUID;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Test RocksDB logging for Ozone Manager.
- */
-public class TestOzoneManagerRocksDBLogging {
-  private MiniOzoneCluster cluster = null;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omId;
-
-  @Rule
-  public Timeout timeout = new Timeout(60000);
-
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set("hadoop.hdds.db.rocksdb.logging.enabled", "true");
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testOMRocksDBLoggingEnabled() throws Exception {
-
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(DBStoreBuilder.ROCKS_DB_LOGGER);
-    cluster.restartOzoneManager();
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput()
-            .contains("db_impl.cc"),
-        1000, 10000);
-
-    cluster.getConf().set("hadoop.hdds.db.rocksdb.logging.enabled", "false");
-    cluster.restartOzoneManager();
-    logCapturer.clearOutput();
-    try {
-      GenericTestUtils.waitFor(() -> logCapturer.getOutput()
-              .contains("db_impl.cc"),
-          1000, 10000);
-      Assert.fail();
-    } catch (TimeoutException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Timed out"));
-    }
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
deleted file mode 100644
index 3614a05..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.TestStorageContainerManagerHelper;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
-import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test Ozone Manager operation in distributed handler scenario.
- */
-public class TestScmSafeMode {
-
-  private final static Logger LOG = LoggerFactory
-      .getLogger(TestScmSafeMode.class);
-  private static MiniOzoneCluster cluster = null;
-  private static MiniOzoneCluster.Builder builder = null;
-  private static OzoneConfiguration conf;
-  private static OzoneManager om;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-
-
-  @Rule
-  public Timeout timeout = new Timeout(1000 * 200);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s");
-    conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s");
-    builder = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(500)
-        .setStartDataNodes(false);
-    cluster = builder.build();
-    cluster.startHddsDatanodes();
-    cluster.waitForClusterToBeReady();
-    om = cluster.getOzoneManager();
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      try {
-        cluster.shutdown();
-      } catch (Exception e) {
-        // do nothing.
-      }
-    }
-  }
-
-  @Test(timeout = 300_000)
-  public void testSafeModeOperations() throws Exception {
-    // Create {numKeys} random names keys.
-    TestStorageContainerManagerHelper helper =
-        new TestStorageContainerManagerHelper(cluster, conf);
-    Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096);
-    final List<ContainerInfo> containers = cluster
-        .getStorageContainerManager().getContainerManager().getContainers();
-    GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000);
-
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    ObjectStore store = cluster.getRpcClient().getObjectStore();
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    bucket.createKey(keyName, 1000, RATIS, ONE, new HashMap<>());
-
-    cluster.stop();
-
-    try {
-      cluster = builder.build();
-    } catch (IOException e) {
-      fail("failed");
-    }
-
-
-    StorageContainerManager scm;
-
-    scm = cluster.getStorageContainerManager();
-    Assert.assertTrue(scm.isInSafeMode());
-
-    om = cluster.getOzoneManager();
-
-
-    final OzoneBucket bucket1 =
-        cluster.getRpcClient().getObjectStore().getVolume(volumeName)
-            .getBucket(bucketName);
-
-// As cluster is restarted with out datanodes restart
-    LambdaTestUtils.intercept(IOException.class,
-        "SafeModePrecheck failed for allocateBlock",
-        () -> bucket1.createKey(keyName, 1000, RATIS, ONE,
-            new HashMap<>()));
-  }
-
-  /**
-   * Tests inSafeMode & forceExitSafeMode api calls.
-   */
-  @Test(timeout = 300_000)
-  public void testIsScmInSafeModeAndForceExit() throws Exception {
-    // Test 1: SCM should be out of safe mode.
-    Assert.assertFalse(storageContainerLocationClient.inSafeMode());
-    cluster.stop();
-    // Restart the cluster with same metadata dir.
-
-    try {
-      cluster = builder.build();
-    } catch (IOException e) {
-      Assert.fail("Cluster startup failed.");
-    }
-
-    // Test 2: Scm should be in safe mode as datanodes are not started yet.
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-    Assert.assertTrue(storageContainerLocationClient.inSafeMode());
-    // Force scm out of safe mode.
-    cluster.getStorageContainerManager().getClientProtocolServer()
-        .forceExitSafeMode();
-    // Test 3: SCM should be out of safe mode.
-    GenericTestUtils.waitFor(() -> {
-      try {
-        return !cluster.getStorageContainerManager().getClientProtocolServer()
-            .inSafeMode();
-      } catch (IOException e) {
-        Assert.fail("Cluster");
-        return false;
-      }
-    }, 10, 1000 * 5);
-
-  }
-
-  @Test(timeout = 300_000)
-  public void testSCMSafeMode() throws Exception {
-    // Test1: Test safe mode  when there are no containers in system.
-    cluster.stop();
-
-    try {
-      cluster = builder.build();
-    } catch (IOException e) {
-      Assert.fail("Cluster startup failed.");
-    }
-    assertTrue(cluster.getStorageContainerManager().isInSafeMode());
-    cluster.startHddsDatanodes();
-    cluster.waitForClusterToBeReady();
-    cluster.waitTobeOutOfSafeMode();
-    assertFalse(cluster.getStorageContainerManager().isInSafeMode());
-
-    // Test2: Test safe mode  when containers are there in system.
-    // Create {numKeys} random names keys.
-    TestStorageContainerManagerHelper helper =
-        new TestStorageContainerManagerHelper(cluster, conf);
-    Map<String, OmKeyInfo> keyLocations = helper.createKeys(100 * 2, 4096);
-    final List<ContainerInfo> containers = cluster
-        .getStorageContainerManager().getContainerManager().getContainers();
-    GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000 * 30);
-
-    // Removing some container to keep them open.
-    containers.remove(0);
-    containers.remove(0);
-
-    // Close remaining containers
-    SCMContainerManager mapping = (SCMContainerManager) cluster
-        .getStorageContainerManager().getContainerManager();
-    containers.forEach(c -> {
-      try {
-        mapping.updateContainerState(c.containerID(),
-            HddsProtos.LifeCycleEvent.FINALIZE);
-        mapping.updateContainerState(c.containerID(),
-            LifeCycleEvent.CLOSE);
-      } catch (IOException e) {
-        LOG.info("Failed to change state of open containers.", e);
-      }
-    });
-    cluster.stop();
-
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(SCMSafeModeManager.getLogger());
-    logCapturer.clearOutput();
-
-    try {
-      cluster = builder.build();
-    } catch (IOException ex) {
-      fail("failed");
-    }
-
-    StorageContainerManager scm;
-
-    scm = cluster.getStorageContainerManager();
-    assertTrue(scm.isInSafeMode());
-    assertFalse(logCapturer.getOutput().contains("SCM exiting safe mode."));
-    assertTrue(scm.getCurrentContainerThreshold() == 0);
-    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
-      dn.start();
-    }
-    GenericTestUtils
-        .waitFor(() -> scm.getCurrentContainerThreshold() == 1.0, 100, 20000);
-
-    EventQueue eventQueue =
-        (EventQueue) cluster.getStorageContainerManager().getEventQueue();
-    eventQueue.processAll(5000L);
-
-    double safeModeCutoff = conf
-        .getDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
-            HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT);
-    assertTrue(scm.getCurrentContainerThreshold() >= safeModeCutoff);
-    assertTrue(logCapturer.getOutput().contains("SCM exiting safe mode."));
-    assertFalse(scm.isInSafeMode());
-  }
-
-  @Test(timeout = 300_000)
-  public void testSCMSafeModeRestrictedOp() throws Exception {
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-        OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB);
-    cluster.stop();
-    cluster = builder.build();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    assertTrue(scm.isInSafeMode());
-
-    LambdaTestUtils.intercept(SCMException.class,
-        "SafeModePrecheck failed for allocateContainer", () -> {
-          scm.getClientProtocolServer()
-              .allocateContainer(ReplicationType.STAND_ALONE,
-                  ReplicationFactor.ONE, "");
-        });
-
-    cluster.startHddsDatanodes();
-    cluster.waitForClusterToBeReady();
-    cluster.waitTobeOutOfSafeMode();
-    assertFalse(scm.isInSafeMode());
-
-    TestStorageContainerManagerHelper helper =
-        new TestStorageContainerManagerHelper(cluster, conf);
-    helper.createKeys(10, 4096);
-    SCMClientProtocolServer clientProtocolServer = cluster
-        .getStorageContainerManager().getClientProtocolServer();
-    assertFalse((scm.getClientProtocolServer()).getSafeModeStatus());
-    final List<ContainerInfo> containers = scm.getContainerManager()
-        .getContainers();
-    scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS,
-        new SCMSafeModeManager.SafeModeStatus(true));
-    GenericTestUtils.waitFor(() -> {
-      return clientProtocolServer.getSafeModeStatus();
-    }, 50, 1000 * 30);
-    assertTrue(clientProtocolServer.getSafeModeStatus());
-
-    cluster.shutdownHddsDatanodes();
-    Thread.sleep(30000);
-    LambdaTestUtils.intercept(SCMException.class,
-        "Open container " + containers.get(0).getContainerID() + " "
-            + "doesn't have enough replicas to service this operation in Safe"
-            + " mode.", () -> clientProtocolServer
-            .getContainerWithPipeline(containers.get(0).getContainerID()));
-  }
-
-  @Test(timeout = 300_000)
-  public void testSCMSafeModeDisabled() throws Exception {
-    cluster.shutdown();
-
-    // If safe mode is disabled, cluster should not be in safe mode even if
-    // min number of datanodes are not started.
-    conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false);
-    conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3);
-    builder = MiniOzoneCluster.newBuilder(conf)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(500)
-        .setNumDatanodes(1);
-    cluster = builder.build();
-    StorageContainerManager scm = cluster.getStorageContainerManager();
-    assertFalse(scm.isInSafeMode());
-
-    // Even on SCM restart, cluster should be out of safe mode immediately.
-    cluster.restartStorageContainerManager(true);
-    assertFalse(scm.isInSafeMode());
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
deleted file mode 100644
index 48a9c6a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.cert.X509Certificate;
-import java.util.UUID;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.apache.hadoop.test.GenericTestUtils.*;
-
-/**
- * Test secure Ozone Manager operation in distributed handler scenario.
- */
-public class TestSecureOzoneManager {
-
-  private static final String COMPONENT = "om";
-  private MiniOzoneCluster cluster = null;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omId;
-  private Path metaDir;
-
-  @Rule
-  public Timeout timeout = new Timeout(1000 * 25);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
-    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2);
-    conf.set(OZONE_SCM_NAMES, "localhost");
-    final String path = getTempPath(UUID.randomUUID().toString());
-    metaDir = Paths.get(path, "om-meta");
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
-    OzoneManager.setTestSecureOmFlag(true);
-
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    FileUtils.deleteQuietly(metaDir.toFile());
-  }
-
-  /**
-   * Test failure cases for secure OM initialization.
-   */
-  @Test
-  public void testSecureOmInitFailures() throws Exception {
-    PrivateKey privateKey;
-    PublicKey publicKey;
-    LogCapturer omLogs =
-        LogCapturer.captureLogs(OzoneManager.getLogger());
-    OMStorage omStorage = new OMStorage(conf);
-    omStorage.setClusterId(clusterId);
-    omStorage.setScmId(scmId);
-    omStorage.setOmId(omId);
-    omLogs.clearOutput();
-
-    // Case 1: When keypair as well as certificate is missing. Initial keypair
-    // boot-up. Get certificate will fail when SCM is not running.
-    SecurityConfig securityConfig = new SecurityConfig(conf);
-    CertificateClient client = new OMCertificateClient(securityConfig,
-        omStorage.getOmCertSerialId());
-    Assert.assertEquals(CertificateClient.InitResponse.GETCERT, client.init());
-    privateKey = client.getPrivateKey();
-    publicKey = client.getPublicKey();
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-
-    // Case 2: If key pair already exist than response should be RECOVER.
-    client = new OMCertificateClient(securityConfig,
-        omStorage.getOmCertSerialId());
-    Assert.assertEquals(CertificateClient.InitResponse.RECOVER, client.init());
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-
-    // Case 3: When public key as well as certificate is missing.
-    client = new OMCertificateClient(securityConfig);
-    FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
-        .toString(), securityConfig.getPublicKeyFileName()).toFile());
-    Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init());
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-
-    // Case 4: When private key and certificate is missing.
-    client = new OMCertificateClient(securityConfig);
-    KeyCodec keyCodec = new KeyCodec(securityConfig, COMPONENT);
-    keyCodec.writePublicKey(publicKey);
-    FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
-        .toString(), securityConfig.getPrivateKeyFileName()).toFile());
-    Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init());
-    Assert.assertNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNull(client.getCertificate());
-
-    // Case 5: When only certificate is present.
-    FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
-        .toString(), securityConfig.getPublicKeyFileName()).toFile());
-    CertificateCodec certCodec =
-        new CertificateCodec(securityConfig, COMPONENT);
-    X509Certificate x509Certificate = KeyStoreTestUtil.generateCertificate(
-        "CN=Test", new KeyPair(publicKey, privateKey), 10,
-        securityConfig.getSignatureAlgo());
-    certCodec.writeCertificate(new X509CertificateHolder(
-        x509Certificate.getEncoded()));
-    client = new OMCertificateClient(securityConfig,
-        x509Certificate.getSerialNumber().toString());
-    omStorage.setOmCertSerialId(x509Certificate.getSerialNumber().toString());
-    Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init());
-    Assert.assertNull(client.getPrivateKey());
-    Assert.assertNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-
-    // Case 6: When private key and certificate is present.
-    client = new OMCertificateClient(securityConfig,
-        x509Certificate.getSerialNumber().toString());
-    FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
-        .toString(), securityConfig.getPublicKeyFileName()).toFile());
-    keyCodec.writePrivateKey(privateKey);
-    Assert.assertEquals(CertificateClient.InitResponse.SUCCESS, client.init());
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-
-    // Case 7 When keypair and certificate is present.
-    client = new OMCertificateClient(securityConfig,
-        x509Certificate.getSerialNumber().toString());
-    Assert.assertEquals(CertificateClient.InitResponse.SUCCESS, client.init());
-    Assert.assertNotNull(client.getPrivateKey());
-    Assert.assertNotNull(client.getPublicKey());
-    Assert.assertNotNull(client.getCertificate());
-  }
-
-  /**
-   * Test om bind socket address.
-   */
-  @Test
-  public void testSecureOmInitFailure() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration(conf);
-    OMStorage omStorage = new OMStorage(config);
-    omStorage.setClusterId(clusterId);
-    omStorage.setScmId(scmId);
-    omStorage.setOmId(omId);
-    config.set(OZONE_OM_ADDRESS_KEY, "om-unknown");
-    LambdaTestUtils.intercept(RuntimeException.class, "Can't get SCM signed" +
-            " certificate",
-        () -> OzoneManager.initializeSecurity(config, omStorage));
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
deleted file mode 100644
index 5ad6770..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Ozone Manager Tests.
- */
-package org.apache.hadoop.ozone.om;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java
deleted file mode 100644
index 56fef1a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.snapshot;
-
-import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.util.Random;
-
-/**
- * Tests {@link org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo}.
- */
-public class TestOMRatisSnapshotInfo {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Test
-  public void testSaveAndLoadSnapshotInfo() throws Exception {
-    File rootDir = folder.newFolder();
-    OMRatisSnapshotInfo omRatisSnapshotInfo = new OMRatisSnapshotInfo(rootDir);
-
-    // Initially term and index should be 0 and -1
-    Assert.assertEquals(0, omRatisSnapshotInfo.getTerm());
-    Assert.assertEquals(-1, omRatisSnapshotInfo.getIndex());
-
-    Random random = new Random();
-    int snapshotIndex = random.nextInt(50);
-    int termIndex = random.nextInt(10);
-
-    // Save snapshotInfo to disk
-    omRatisSnapshotInfo.updateTerm(termIndex);
-    omRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex);
-
-    Assert.assertEquals(termIndex, omRatisSnapshotInfo.getTerm());
-    Assert.assertEquals(snapshotIndex, omRatisSnapshotInfo.getIndex());
-
-    // Load the snapshot file into new SnapshotInfo
-    OMRatisSnapshotInfo newSnapshotInfo = new OMRatisSnapshotInfo(rootDir);
-
-    // Verify that the snapshot file loaded properly
-    Assert.assertEquals(termIndex, newSnapshotInfo.getTerm());
-    Assert.assertEquals(snapshotIndex, newSnapshotInfo.getIndex());
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
deleted file mode 100644
index 92a4a34..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.snapshot;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.util.UUID;
-
-/**
- * Test OM's snapshot provider service.
- */
-public class TestOzoneManagerSnapshotProvider {
-
-  private MiniOzoneHAClusterImpl cluster = null;
-  private ObjectStore objectStore;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omServiceId;
-  private int numOfOMs = 3;
-
-  @Rule
-  public Timeout timeout = new Timeout(300_000);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omServiceId = "om-service-test1";
-    conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(numOfOMs)
-        .build();
-    cluster.waitForClusterToBeReady();
-    objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf)
-        .getObjectStore();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testDownloadCheckpoint() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
-
-    objectStore.createVolume(volumeName, createVolumeArgs);
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-
-    retVolumeinfo.createBucket(bucketName);
-    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
-
-    String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider()
-        .getCurrentProxyOMNodeId();
-    OzoneManager ozoneManager = cluster.getOzoneManager(leaderOMNodeId);
-
-    // Get a follower OM
-    String followerNodeId = ozoneManager.getPeerNodes().get(0).getOMNodeId();
-    OzoneManager followerOM = cluster.getOzoneManager(followerNodeId);
-
-    // Download latest checkpoint from leader OM to follower OM
-    DBCheckpoint omSnapshot = followerOM.getOmSnapshotProvider()
-        .getOzoneManagerDBSnapshot(leaderOMNodeId);
-
-    long leaderSnapshotIndex = ozoneManager.getRatisSnapshotIndex();
-    long downloadedSnapshotIndex = omSnapshot.getRatisSnapshotIndex();
-
-    // The snapshot index downloaded from leader OM should match the ratis
-    // snapshot index on the leader OM
-    Assert.assertEquals("The snapshot index downloaded from leader OM does " +
-        "not match its ratis snapshot index",
-        leaderSnapshotIndex, downloadedSnapshotIndex);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
deleted file mode 100644
index 65bc275..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ozShell;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import com.google.common.base.Strings;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import static org.junit.Assert.fail;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParameterException;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.RunLast;
-
-/**
- * This test class specified for testing Ozone datanode shell command.
- */
-public class TestOzoneDatanodeShell {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneDatanodeShell.class);
-
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static File baseDir;
-  private static OzoneConfiguration conf = null;
-  private static MiniOzoneCluster cluster = null;
-  private static HddsDatanodeService datanode = null;
-
-  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-
-  /**
-   * Create a MiniDFSCluster for testing with using distributed Ozone
-   * handler type.
-   *
-   * @throws Exception
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    String path = GenericTestUtils.getTempPath(
-        TestOzoneDatanodeShell.class.getSimpleName());
-    baseDir = new File(path);
-    baseDir.mkdirs();
-
-    datanode = HddsDatanodeService.createHddsDatanodeService(null);
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
-    conf.setQuietMode(false);
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-    if (baseDir != null) {
-      FileUtil.fullyDelete(baseDir, true);
-    }
-  }
-
-  @Before
-  public void setup() {
-    System.setOut(new PrintStream(out));
-    System.setErr(new PrintStream(err));
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    out.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-
-  private void executeDatanode(HddsDatanodeService hdds, String[] args) {
-    LOG.info("Executing datanode command with args {}", Arrays.asList(args));
-    CommandLine cmd = hdds.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseResult) {
-            throw ex;
-          }
-        };
-    cmd.parseWithHandlers(new RunLast(),
-        exceptionHandler, args);
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown and contains the specified usage string.
-   */
-  private void executeDatanodeWithError(HddsDatanodeService hdds, String[] args,
-      String expectedError) {
-    if (Strings.isNullOrEmpty(expectedError)) {
-      executeDatanode(hdds, args);
-    } else {
-      try {
-        executeDatanode(hdds, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        if (!Strings.isNullOrEmpty(expectedError)) {
-          Throwable exceptionToCheck = ex;
-          if (exceptionToCheck.getCause() != null) {
-            exceptionToCheck = exceptionToCheck.getCause();
-          }
-          Assert.assertTrue(
-              String.format(
-                  "Error of shell code doesn't contain the " +
-                      "exception [%s] in [%s]",
-                  expectedError, exceptionToCheck.getMessage()),
-              exceptionToCheck.getMessage().contains(expectedError));
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testDatanodeCommand() {
-    LOG.info("Running testDatanodeIncompleteCommand");
-    String[] args = new String[]{}; //executing 'ozone datanode'
-
-    //'ozone datanode' command should not result in error
-    executeDatanodeWithError(datanode, args, null);
-  }
-
-  @Test
-  public void testDatanodeInvalidParamCommand() {
-    LOG.info("Running testDatanodeIncompleteCommand");
-    String expectedError = "Unknown option: -invalidParam";
-    //executing 'ozone datanode -invalidParam'
-    String[] args = new String[]{"-invalidParam"};
-
-    executeDatanodeWithError(datanode, args, expectedError);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java
deleted file mode 100644
index 4e04b4c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ozShell;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.web.ozShell.OzoneShell;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParameterException;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.RunLast;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests Ozone sh shell command.
- * Inspired by TestS3Shell
- */
-public class TestOzoneShellHA {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneShellHA.class);
-
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static File baseDir;
-  private static OzoneConfiguration conf = null;
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneShell ozoneShell = null;
-
-  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-
-  private static String omServiceId;
-  private static String clusterId;
-  private static String scmId;
-  private static int numOfOMs;
-
-  /**
-   * Create a MiniOzoneCluster for testing with using distributed Ozone
-   * handler type.
-   *
-   * @throws Exception
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    String path = GenericTestUtils.getTempPath(
-        TestOzoneShellHA.class.getSimpleName());
-    baseDir = new File(path);
-    baseDir.mkdirs();
-    ozoneShell = new OzoneShell();
-
-    // Init HA cluster
-    omServiceId = "om-service-test1";
-    numOfOMs = 3;
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    cluster = MiniOzoneCluster.newHABuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(numOfOMs)
-        .build();
-    conf.setQuietMode(false);
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-    if (baseDir != null) {
-      FileUtil.fullyDelete(baseDir, true);
-    }
-  }
-
-  @Before
-  public void setup() {
-    System.setOut(new PrintStream(out));
-    System.setErr(new PrintStream(err));
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    out.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-  private void execute(OzoneShell shell, String[] args) {
-    LOG.info("Executing OzoneShell command with args {}", Arrays.asList(args));
-    CommandLine cmd = shell.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseRes) {
-            throw ex;
-          }
-        };
-
-    // Since there is no elegant way to pass Ozone config to the shell,
-    // the idea is to use 'set' to place those OM HA configs.
-    String[] argsWithHAConf = getHASetConfStrings(args);
-
-    cmd.parseWithHandlers(new RunLast(), exceptionHandler, argsWithHAConf);
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(OzoneShell shell, String[] args,
-      String expectedError) {
-    if (Strings.isNullOrEmpty(expectedError)) {
-      execute(shell, args);
-    } else {
-      try {
-        execute(shell, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        if (!Strings.isNullOrEmpty(expectedError)) {
-          Throwable exceptionToCheck = ex;
-          if (exceptionToCheck.getCause() != null) {
-            exceptionToCheck = exceptionToCheck.getCause();
-          }
-          Assert.assertTrue(
-              String.format(
-                  "Error of OzoneShell code doesn't contain the " +
-                      "exception [%s] in [%s]",
-                  expectedError, exceptionToCheck.getMessage()),
-              exceptionToCheck.getMessage().contains(expectedError));
-        }
-      }
-    }
-  }
-
-  /**
-   * @return the leader OM's Node ID in the MiniOzoneHACluster.
-   *
-   * TODO: This should be put into MiniOzoneHAClusterImpl in the future.
-   * This helper function is similar to the one in TestOzoneFsHAURLs.
-   */
-  private String getLeaderOMNodeId() {
-    Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, omServiceId);
-    assert(omNodeIds.size() == numOfOMs);
-    MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster;
-    // Note: this loop may be implemented inside MiniOzoneHAClusterImpl
-    for (String omNodeId : omNodeIds) {
-      // Find the leader OM
-      if (!haCluster.getOzoneManager(omNodeId).isLeader()) {
-        continue;
-      }
-      return omNodeId;
-    }
-    return null;
-  }
-
-  private String getSetConfStringFromConf(String key) {
-    return String.format("--set=%s=%s", key, conf.get(key));
-  }
-
-  private String generateSetConfString(String key, String value) {
-    return String.format("--set=%s=%s", key, value);
-  }
-
-  /**
-   * Helper function to get a String array to be fed into OzoneShell.
-   * @param numOfArgs Additional number of arguments after the HA conf string,
-   *                  this translates into the number of empty array elements
-   *                  after the HA conf string.
-   * @return String array.
-   */
-  private String[] getHASetConfStrings(int numOfArgs) {
-    assert(numOfArgs >= 0);
-    String[] res = new String[1 + 1 + numOfOMs + numOfArgs];
-    final int indexOmServiceIds = 0;
-    final int indexOmNodes = 1;
-    final int indexOmAddressStart = 2;
-
-    res[indexOmServiceIds] = getSetConfStringFromConf(
-        OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY);
-
-    String omNodesKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
-    String omNodesVal = conf.get(omNodesKey);
-    res[indexOmNodes] = generateSetConfString(omNodesKey, omNodesVal);
-
-    String[] omNodesArr = omNodesVal.split(",");
-    // Sanity check
-    assert(omNodesArr.length == numOfOMs);
-    for (int i = 0; i < numOfOMs; i++) {
-      res[indexOmAddressStart + i] =
-          getSetConfStringFromConf(OmUtils.addKeySuffixes(
-              OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodesArr[i]));
-    }
-
-    return res;
-  }
-
-  /**
-   * Helper function to create a new set of arguments that contains HA configs.
-   * @param existingArgs Existing arguments to be fed into OzoneShell command.
-   * @return String array.
-   */
-  private String[] getHASetConfStrings(String[] existingArgs) {
-    // Get a String array populated with HA configs first
-    String[] res = getHASetConfStrings(existingArgs.length);
-
-    int indexCopyStart = res.length - existingArgs.length;
-    // Then copy the existing args to the returned String array
-    for (int i = 0; i < existingArgs.length; i++) {
-      res[indexCopyStart + i] = existingArgs[i];
-    }
-    return res;
-  }
-
-  /**
-   * Tests ozone sh command URI parsing with volume and bucket create commands.
-   */
-  @Test
-  public void testOzoneShCmdURIs() {
-    // Test case 1: ozone sh volume create /volume
-    // Expectation: Failure.
-    String[] args = new String[] {"volume", "create", "/volume"};
-    executeWithError(ozoneShell, args,
-        "Service ID or host name must not be omitted");
-
-    // Get leader OM node RPC address from ozone.om.address.omServiceId.omNode
-    String omLeaderNodeId = getLeaderOMNodeId();
-    String omLeaderNodeAddrKey = OmUtils.addKeySuffixes(
-        OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omLeaderNodeId);
-    String omLeaderNodeAddr = conf.get(omLeaderNodeAddrKey);
-    String omLeaderNodeAddrWithoutPort = omLeaderNodeAddr.split(":")[0];
-
-    // Test case 2: ozone sh volume create o3://om1/volume2
-    // Expectation: Success.
-    // Note: For now it seems OzoneShell is only trying the default port 9862
-    // instead of using the port defined in ozone.om.address (as ozone fs does).
-    // So the test will fail before this behavior is fixed.
-    // TODO: Fix this behavior, then uncomment the execute() below.
-    String setOmAddress = "--set=" + OMConfigKeys.OZONE_OM_ADDRESS_KEY + "="
-        + omLeaderNodeAddr;
-    args = new String[] {setOmAddress,
-        "volume", "create", "o3://" + omLeaderNodeAddrWithoutPort + "/volume2"};
-    //execute(ozoneShell, args);
-
-    // Test case 3: ozone sh volume create o3://om1:port/volume3
-    // Expectation: Success.
-    args = new String[] {
-        "volume", "create", "o3://" + omLeaderNodeAddr + "/volume3"};
-    execute(ozoneShell, args);
-
-    // Test case 4: ozone sh volume create o3://id1/volume
-    // Expectation: Success.
-    args = new String[] {"volume", "create", "o3://" + omServiceId + "/volume"};
-    execute(ozoneShell, args);
-
-    // Test case 5: ozone sh volume create o3://id1:port/volume
-    // Expectation: Failure.
-    args = new String[] {"volume", "create",
-        "o3://" + omServiceId + ":9862" + "/volume"};
-    executeWithError(ozoneShell, args, "does not use port information");
-
-    // Test case 6: ozone sh bucket create /volume/bucket
-    // Expectation: Failure.
-    args = new String[] {"bucket", "create", "/volume/bucket"};
-    executeWithError(ozoneShell, args,
-        "Service ID or host name must not be omitted");
-
-    // Test case 7: ozone sh bucket create o3://om1/volume/bucket
-    // Expectation: Success.
-    args = new String[] {
-        "bucket", "create", "o3://" + omServiceId + "/volume/bucket"};
-    execute(ozoneShell, args);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
deleted file mode 100644
index c55de0b..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ozShell;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.web.ozShell.s3.S3Shell;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParameterException;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.RunLast;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
-
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This test class specified for testing Ozone s3Shell command.
- */
-public class TestS3Shell {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestS3Shell.class);
-
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static String url;
-  private static File baseDir;
-  private static OzoneConfiguration conf = null;
-  private static MiniOzoneCluster cluster = null;
-  private static ClientProtocol client = null;
-  private static S3Shell s3Shell = null;
-
-  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-
-  /**
-   * Create a MiniOzoneCluster for testing with using distributed Ozone
-   * handler type.
-   *
-   * @throws Exception
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    String path = GenericTestUtils.getTempPath(
-        TestS3Shell.class.getSimpleName());
-    baseDir = new File(path);
-    baseDir.mkdirs();
-
-    s3Shell = new S3Shell();
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
-    conf.setQuietMode(false);
-    client = new RpcClient(conf, null);
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-    if (baseDir != null) {
-      FileUtil.fullyDelete(baseDir, true);
-    }
-  }
-
-  @Before
-  public void setup() {
-    System.setOut(new PrintStream(out));
-    System.setErr(new PrintStream(err));
-    url = "o3://" + getOmAddress();
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    out.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-  @Test
-  public void testS3BucketMapping() throws IOException {
-    String setOmAddress =
-        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
-
-    String s3Bucket = "bucket1";
-    String commandOutput;
-    createS3Bucket("ozone", s3Bucket);
-
-    // WHEN
-    String[] args =
-        new String[] {setOmAddress, "path", s3Bucket};
-    execute(s3Shell, args);
-
-    // THEN
-    commandOutput = out.toString();
-    String volumeName = client.getOzoneVolumeName(s3Bucket);
-    assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
-        volumeName));
-    assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
-        s3Bucket + "." + volumeName));
-    out.reset();
-
-    // Trying to get map for an unknown bucket
-    args = new String[] {setOmAddress, "path", "unknownbucket"};
-    executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
-
-    // No bucket name
-    args = new String[] {setOmAddress, "path"};
-    executeWithError(s3Shell, args, "Missing required parameter");
-
-    // Invalid bucket name
-    args = new String[] {setOmAddress, "path", "/asd/multipleslash"};
-    executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
-  }
-
-  @Test
-  public void testS3SecretUnsecuredCluster() throws Exception {
-    String setOmAddress =
-        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
-
-    String output;
-
-    String[] args = new String[] {setOmAddress, "getsecret"};
-    execute(s3Shell, args);
-    // Get the first line of output
-    output = out.toString().split("\n")[0];
-
-    assertTrue(output.equals(OZONE_GETS3SECRET_ERROR));
-  }
-
-  private void createS3Bucket(String userName, String s3Bucket) {
-    try {
-      client.createS3Bucket("ozone", s3Bucket);
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
-    }
-  }
-
-  private void execute(S3Shell shell, String[] args) {
-    LOG.info("Executing s3Shell command with args {}", Arrays.asList(args));
-    CommandLine cmd = shell.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-                                                   String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-                                                       ParseResult parseRes) {
-            throw ex;
-          }
-        };
-    cmd.parseWithHandlers(new RunLast(),
-        exceptionHandler, args);
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(S3Shell shell, String[] args,
-                                OMException.ResultCodes code) {
-    try {
-      execute(shell, args);
-      fail("Exception is expected from command execution " + Arrays
-          .asList(args));
-    } catch (Exception ex) {
-      Assert.assertEquals(OMException.class, ex.getCause().getClass());
-      Assert.assertEquals(code, ((OMException) ex.getCause()).getResult());
-    }
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(S3Shell shell, String[] args,
-                                String expectedError) {
-    if (Strings.isNullOrEmpty(expectedError)) {
-      execute(shell, args);
-    } else {
-      try {
-        execute(shell, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        if (!Strings.isNullOrEmpty(expectedError)) {
-          Throwable exceptionToCheck = ex;
-          if (exceptionToCheck.getCause() != null) {
-            exceptionToCheck = exceptionToCheck.getCause();
-          }
-          Assert.assertTrue(
-              String.format(
-                  "Error of s3Shell code doesn't contain the " +
-                      "exception [%s] in [%s]",
-                  expectedError, exceptionToCheck.getMessage()),
-              exceptionToCheck.getMessage().contains(expectedError));
-        }
-      }
-    }
-  }
-
-  private String getOmAddress() {
-    List<ServiceInfo> services;
-    try {
-      services = cluster.getOzoneManager().getServiceList();
-    } catch (IOException e) {
-      fail("Could not get service list from OM");
-      return null;
-    }
-
-    return services.stream()
-        .filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType()))
-        .findFirst()
-        .map(s -> s.getServiceAddress(ServicePort.Type.RPC))
-        .orElseThrow(IllegalStateException::new);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index 84eb8dd..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Test utils for Ozone.
- */
-package org.apache.hadoop.ozone;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
deleted file mode 100644
index 88b7c04..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test allocate container calls.
- */
-public class TestAllocateContainer {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(conf);
-  }
-
-  @AfterClass
-  public static void shutdown() throws InterruptedException {
-    if(cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void testAllocate() throws Exception {
-    ContainerWithPipeline container =
-        storageContainerLocationClient.allocateContainer(
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(),
-            containerOwner);
-    Assert.assertNotNull(container);
-    Assert.assertNotNull(container.getPipeline().getFirstNode());
-
-  }
-
-  @Test
-  public void testAllocateNull() throws Exception {
-    thrown.expect(NullPointerException.class);
-    storageContainerLocationClient.allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), null);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
deleted file mode 100644
index 4c62c70..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test Container calls.
- */
-public class TestContainerSmallFile {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConfig;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(ozoneConfig);
-  }
-
-  @AfterClass
-  public static void shutdown() throws InterruptedException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void testAllocateWrite() throws Exception {
-    ContainerWithPipeline container =
-        storageContainerLocationClient.allocateContainer(
-            xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline());
-    ContainerProtocolCalls.createContainer(client,
-        container.getContainerInfo().getContainerID(), null);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerInfo().getContainerID());
-    ContainerProtocolCalls.writeSmallFile(client, blockID,
-        "data123".getBytes());
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client, blockID);
-    String readData = response.getData().getData().toStringUtf8();
-    Assert.assertEquals("data123", readData);
-    xceiverClientManager.releaseClient(client, false);
-  }
-
-  @Test
-  public void testInvalidBlockRead() throws Exception {
-    ContainerWithPipeline container =
-        storageContainerLocationClient.allocateContainer(
-            xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline());
-    ContainerProtocolCalls.createContainer(client,
-        container.getContainerInfo().getContainerID(), null);
-
-    thrown.expect(StorageContainerException.class);
-    thrown.expectMessage("Unable to find the block");
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerInfo().getContainerID());
-    // Try to read a Key Container Name
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client, blockID);
-    xceiverClientManager.releaseClient(client, false);
-  }
-
-  @Test
-  public void testInvalidContainerRead() throws Exception {
-    long nonExistContainerID = 8888L;
-    ContainerWithPipeline container =
-        storageContainerLocationClient.allocateContainer(
-            xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline());
-    ContainerProtocolCalls.createContainer(client,
-        container.getContainerInfo().getContainerID(), null);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerInfo().getContainerID());
-    ContainerProtocolCalls.writeSmallFile(client, blockID,
-        "data123".getBytes());
-
-    thrown.expect(StorageContainerException.class);
-    thrown.expectMessage("ContainerID 8888 does not exist");
-
-    // Try to read a invalid key
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client,
-            ContainerTestHelper.getTestBlockID(
-                nonExistContainerID));
-    xceiverClientManager.releaseClient(client, false);
-  }
-
-  @Test
-  public void testReadWriteWithBCSId() throws Exception {
-    ContainerWithPipeline container =
-        storageContainerLocationClient.allocateContainer(
-            HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline());
-    ContainerProtocolCalls.createContainer(client,
-        container.getContainerInfo().getContainerID(), null);
-
-    BlockID blockID1 = ContainerTestHelper.getTestBlockID(
-        container.getContainerInfo().getContainerID());
-    ContainerProtos.PutSmallFileResponseProto responseProto =
-        ContainerProtocolCalls
-            .writeSmallFile(client, blockID1, "data123".getBytes());
-    long bcsId = responseProto.getCommittedBlockLength().getBlockID()
-        .getBlockCommitSequenceId();
-    try {
-      blockID1.setBlockCommitSequenceId(bcsId + 1);
-      //read a file with higher bcsId than the container bcsId
-      ContainerProtocolCalls
-          .readSmallFile(client, blockID1);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert
-          .assertTrue(sce.getResult() == ContainerProtos.Result.UNKNOWN_BCSID);
-    }
-
-    // write a new block again to bump up the container bcsId
-    BlockID blockID2 = ContainerTestHelper
-        .getTestBlockID(container.getContainerInfo().getContainerID());
-    ContainerProtocolCalls
-        .writeSmallFile(client, blockID2, "data123".getBytes());
-
-    try {
-      blockID1.setBlockCommitSequenceId(bcsId + 1);
-      //read a file with higher bcsId than the committed bcsId for the block
-      ContainerProtocolCalls.readSmallFile(client, blockID1);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert
-          .assertTrue(sce.getResult() == ContainerProtos.Result.BCSID_MISMATCH);
-    }
-    blockID1.setBlockCommitSequenceId(bcsId);
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client, blockID1);
-    String readData = response.getData().getData().toStringUtf8();
-    Assert.assertEquals("data123", readData);
-    xceiverClientManager.releaseClient(client, false);
-  }
-}
-
-
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
deleted file mode 100644
index 8e4645f..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    StorageContainerException;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.
-    StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test Container calls.
- */
-public class TestGetCommittedBlockLengthAndPutKey {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConfig;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster =
-        MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(ozoneConfig);
-  }
-
-  @AfterClass
-  public static void shutdown() throws InterruptedException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void tesGetCommittedBlockLength() throws Exception {
-    ContainerProtos.GetCommittedBlockLengthResponseProto response;
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    Pipeline pipeline = container.getPipeline();
-    XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
-    //create the container
-    ContainerProtocolCalls.createContainer(client, containerID, null);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    byte[] data =
-        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper
-            .getWriteChunkRequest(container.getPipeline(), blockID,
-                data.length);
-    client.sendCommand(writeChunkRequest);
-    // Now, explicitly make a putKey request for the block.
-    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
-        ContainerTestHelper
-            .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
-    client.sendCommand(putKeyRequest);
-    response = ContainerProtocolCalls
-        .getCommittedBlockLength(client, blockID);
-    // make sure the block ids in the request and response are same.
-    Assert.assertTrue(
-        BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
-    Assert.assertTrue(response.getBlockLength() == data.length);
-    xceiverClientManager.releaseClient(client, false);
-  }
-
-  @Test
-  public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline());
-    ContainerProtocolCalls.createContainer(client, containerID, null);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    // move the container to closed state
-    ContainerProtocolCalls.closeContainer(client, containerID, null);
-    try {
-      // There is no block written inside the container. The request should
-      // fail.
-      ContainerProtocolCalls.getCommittedBlockLength(client, blockID);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getMessage().contains("Unable to find the block"));
-    }
-    xceiverClientManager.releaseClient(client, false);
-  }
-
-  @Test
-  public void tesPutKeyResposne() throws Exception {
-    ContainerProtos.PutBlockResponseProto response;
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    Pipeline pipeline = container.getPipeline();
-    XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
-    //create the container
-    ContainerProtocolCalls.createContainer(client, containerID, null);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    byte[] data =
-        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper
-            .getWriteChunkRequest(container.getPipeline(), blockID,
-                data.length);
-    client.sendCommand(writeChunkRequest);
-    // Now, explicitly make a putKey request for the block.
-    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
-        ContainerTestHelper
-            .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
-    response = client.sendCommand(putKeyRequest).getPutBlock();
-    Assert.assertEquals(
-        response.getCommittedBlockLength().getBlockLength(), data.length);
-    Assert.assertTrue(response.getCommittedBlockLength().getBlockID()
-        .getBlockCommitSequenceId() > 0);
-    BlockID responseBlockID = BlockID
-        .getFromProtobuf(response.getCommittedBlockLength().getBlockID());
-    blockID
-        .setBlockCommitSequenceId(responseBlockID.getBlockCommitSequenceId());
-    // make sure the block ids in the request and response are same.
-    // This will also ensure that closing the container committed the block
-    // on the Datanodes.
-    Assert.assertEquals(responseBlockID, blockID);
-    xceiverClientManager.releaseClient(client, false);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
deleted file mode 100644
index 536d807..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementMetrics;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.net.DNSToSwitchMapping;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
-    .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-/**
- * Test cases to verify the metrics exposed by SCMPipelineManager.
- */
-public class TestSCMContainerPlacementPolicyMetrics {
-
-  private MiniOzoneCluster cluster;
-  private MetricsRecordBuilder metrics;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        "org.apache.hadoop.hdds.scm.container.placement.algorithms." +
-            "SCMContainerPlacementRackAware");
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        StaticMapping.class, DNSToSwitchMapping.class);
-    StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
-        Collections.singleton(HddsUtils.getHostName(conf))).get(0),
-        "/rack1");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(4)
-        .build();
-    cluster.waitForClusterToBeReady();
-    metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName());
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-  }
-
-  /**
-   * Verifies container placement metric.
-   */
-  @Test(timeout = 60000)
-  public void test() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    String value = "sample value";
-    store.createVolume(volumeName);
-    OzoneVolume volume = store.getVolume(volumeName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    String keyName = UUID.randomUUID().toString();
-
-    // Write data into a key
-    try (OzoneOutputStream out = bucket.createKey(keyName,
-        value.getBytes().length, ReplicationType.RATIS,
-        THREE, new HashMap<>())) {
-      out.write(value.getBytes());
-    }
-
-    // close container
-    PipelineManager manager =
-        cluster.getStorageContainerManager().getPipelineManager();
-    List<Pipeline> pipelines = manager.getPipelines().stream().filter(p ->
-        p.getType() == HddsProtos.ReplicationType.RATIS &&
-            p.getFactor() == HddsProtos.ReplicationFactor.THREE)
-        .collect(Collectors.toList());
-    Pipeline targetPipeline = pipelines.get(0);
-    List<DatanodeDetails> nodes = targetPipeline.getNodes();
-    manager.finalizeAndDestroyPipeline(pipelines.get(0), true);
-
-    // kill datanode to trigger under-replicated container replication
-    cluster.shutdownHddsDatanode(nodes.get(0));
-    try {
-      Thread.sleep(5 * 1000);
-    } catch (InterruptedException e) {
-    }
-    cluster.getStorageContainerManager().getReplicationManager()
-        .processContainersNow();
-    try {
-      Thread.sleep(30 * 1000);
-    } catch (InterruptedException e) {
-    }
-
-    long totalRequest = getLongCounter("DatanodeRequestCount", metrics);
-    long tryCount = getLongCounter("DatanodeChooseAttemptCount", metrics);
-    long sucessCount =
-        getLongCounter("DatanodeChooseSuccessCount", metrics);
-    long compromiseCount =
-        getLongCounter("DatanodeChooseFallbackCount", metrics);
-
-    // Seems no under-replicated closed containers get replicated
-    Assert.assertTrue(totalRequest == 0);
-    Assert.assertTrue(tryCount == 0);
-    Assert.assertTrue(sucessCount == 0);
-    Assert.assertTrue(compromiseCount == 0);
-  }
-
-  @After
-  public void teardown() {
-    cluster.shutdown();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
deleted file mode 100644
index e700a0e..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Iterator;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeoutException;
-
-import javax.management.openmbean.CompositeData;
-import javax.management.openmbean.TabularData;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- *
- * This class is to test JMX management interface for scm information.
- */
-public class TestSCMMXBean {
-
-  public static final Log LOG = LogFactory.getLog(TestSCMMXBean.class);
-  private static int numOfDatanodes = 1;
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-  private static MBeanServer mbs;
-
-  @BeforeClass
-  public static void init() throws IOException, TimeoutException,
-      InterruptedException {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numOfDatanodes)
-        .build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-    mbs = ManagementFactory.getPlatformMBeanServer();
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testSCMMXBean() throws Exception {
-    ObjectName bean = new ObjectName(
-        "Hadoop:service=StorageContainerManager,"
-            + "name=StorageContainerManagerInfo,"
-            + "component=ServerRuntime");
-
-    String dnRpcPort = (String)mbs.getAttribute(bean,
-        "DatanodeRpcPort");
-    assertEquals(scm.getDatanodeRpcPort(), dnRpcPort);
-
-
-    String clientRpcPort = (String)mbs.getAttribute(bean,
-        "ClientRpcPort");
-    assertEquals(scm.getClientRpcPort(), clientRpcPort);
-
-    ConcurrentMap<String, ContainerStat> map = scm.getContainerReportCache();
-    ContainerStat stat = new ContainerStat(1, 2, 3, 4, 5, 6, 7);
-    map.put("nodeID", stat);
-    TabularData data = (TabularData) mbs.getAttribute(
-        bean, "ContainerReport");
-
-    // verify report info
-    assertEquals(1, data.values().size());
-    for (Object obj : data.values()) {
-      assertTrue(obj instanceof CompositeData);
-      CompositeData d = (CompositeData) obj;
-      Iterator<?> it = d.values().iterator();
-      String key = it.next().toString();
-      String value = it.next().toString();
-      assertEquals("nodeID", key);
-      assertEquals(stat.toJsonString(), value);
-    }
-
-    boolean inSafeMode = (boolean) mbs.getAttribute(bean,
-        "InSafeMode");
-    assertEquals(scm.isInSafeMode(), inSafeMode);
-
-    double containerThreshold = (double) mbs.getAttribute(bean,
-        "SafeModeCurrentContainerThreshold");
-    assertEquals(scm.getCurrentContainerThreshold(), containerThreshold, 0);
-  }
-
-  @Test
-  public void testSCMContainerStateCount() throws Exception {
-
-    ObjectName bean = new ObjectName(
-        "Hadoop:service=StorageContainerManager,"
-            + "name=StorageContainerManagerInfo,"
-            + "component=ServerRuntime");
-    TabularData data = (TabularData) mbs.getAttribute(
-        bean, "ContainerStateCount");
-    Map<String, Integer> containerStateCount = scm.getContainerStateCount();
-    verifyEquals(data, containerStateCount);
-
-    // Do some changes like allocate containers and change the container states
-    ContainerManager scmContainerManager = scm.getContainerManager();
-
-    List<ContainerInfo> containerInfoList = new ArrayList<>();
-    for (int i=0; i < 10; i++) {
-      containerInfoList.add(scmContainerManager.allocateContainer(HddsProtos
-          .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE,
-          UUID.randomUUID().toString()));
-    }
-    long containerID;
-    for (int i=0; i < 10; i++) {
-      if (i % 2 == 0) {
-        containerID = containerInfoList.get(i).getContainerID();
-        scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
-            containerID)).getState(), HddsProtos.LifeCycleState.CLOSING);
-      } else {
-        containerID = containerInfoList.get(i).getContainerID();
-        scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
-        scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
-            containerID)).getState(), HddsProtos.LifeCycleState.CLOSED);
-      }
-
-    }
-
-    data = (TabularData) mbs.getAttribute(
-        bean, "ContainerStateCount");
-    containerStateCount = scm.getContainerStateCount();
-
-    containerStateCount.forEach((k, v) -> {
-      if(k == HddsProtos.LifeCycleState.CLOSING.toString()) {
-        assertEquals((int)v, 5);
-      } else if (k == HddsProtos.LifeCycleState.CLOSED.toString()) {
-        assertEquals((int)v, 5);
-      } else  {
-        // Remaining all container state count should be zero.
-        assertEquals((int)v, 0);
-      }
-    });
-
-    verifyEquals(data, containerStateCount);
-
-  }
-
-
-  /**
-   * An internal function used to compare a TabularData returned
-   * by JMX with the expected data in a Map.
-   */
-  private void verifyEquals(TabularData actualData,
-      Map<String, Integer> expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
-    for (Object obj : actualData.values()) {
-      // Each TabularData is a set of CompositeData
-      assertTrue(obj instanceof CompositeData);
-      CompositeData cds = (CompositeData) obj;
-      assertEquals(2, cds.values().size());
-      Iterator<?> it = cds.values().iterator();
-      String key = it.next().toString();
-      String value = it.next().toString();
-      int num = Integer.parseInt(value);
-      assertTrue(expectedData.containsKey(key));
-      assertEquals(expectedData.remove(key).intValue(), num);
-    }
-    assertTrue(expectedData.isEmpty());
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
deleted file mode 100644
index 43b9bf0..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import javax.management.openmbean.CompositeData;
-import javax.management.openmbean.TabularData;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Class which tests the SCMNodeManagerInfo Bean.
- */
-public class TestSCMNodeManagerMXBean {
-  public static final Log LOG = LogFactory.getLog(TestSCMMXBean.class);
-  private static int numOfDatanodes = 3;
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-  private static MBeanServer mbs;
-
-  @BeforeClass
-  public static void init() throws IOException, TimeoutException,
-      InterruptedException {
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_STALENODE_INTERVAL, "60000ms");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numOfDatanodes)
-        .build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-    mbs = ManagementFactory.getPlatformMBeanServer();
-  }
-
-  @AfterClass
-  public static void cleanup() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testDiskUsage() throws Exception {
-    ObjectName bean = new ObjectName(
-        "Hadoop:service=SCMNodeManager,"
-            + "name=SCMNodeManagerInfo");
-
-    TabularData data = (TabularData) mbs.getAttribute(bean, "NodeInfo");
-    Map<String, Long> datanodeInfo = scm.getScmNodeManager().getNodeInfo();
-    verifyEquals(data, datanodeInfo);
-  }
-
-  @Test
-  public void testNodeCount() throws Exception {
-    ObjectName bean = new ObjectName(
-        "Hadoop:service=SCMNodeManager,"
-            + "name=SCMNodeManagerInfo");
-
-    TabularData data = (TabularData) mbs.getAttribute(bean, "NodeCount");
-    Map<String, Integer> nodeCount = scm.getScmNodeManager().getNodeCount();
-    Map<String, Long> nodeCountLong = new HashMap<>();
-    nodeCount.forEach((k, v) -> nodeCountLong.put(k, new Long(v)));
-    verifyEquals(data, nodeCountLong);
-  }
-
-  private void verifyEquals(TabularData actualData, Map<String, Long>
-      expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
-    for (Object obj : actualData.values()) {
-      assertTrue(obj instanceof CompositeData);
-      CompositeData cds = (CompositeData) obj;
-      assertEquals(2, cds.values().size());
-      Iterator<?> it = cds.values().iterator();
-      String key = it.next().toString();
-      String value = it.next().toString();
-      long num = Long.parseLong(value);
-      assertTrue(expectedData.containsKey(key));
-      assertEquals(expectedData.remove(key).longValue(), num);
-    }
-    assertTrue(expectedData.isEmpty());
-  }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
deleted file mode 100644
index 4c25b0c..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import com.google.common.cache.Cache;
-import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-
-/**
- * Test for XceiverClientManager caching and eviction.
- */
-public class TestXceiverClientManager {
-  private static OzoneConfiguration config;
-  private static MiniOzoneCluster cluster;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Before
-  public void init() throws Exception {
-    config = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(config)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void testCaching() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String metaDir = GenericTestUtils.getTempPath(
-        TestXceiverClientManager.class.getName() + UUID.randomUUID());
-    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
-
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerOwner);
-    XceiverClientSpi client1 = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
-
-    ContainerWithPipeline container2 = storageContainerLocationClient
-        .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerOwner);
-    XceiverClientSpi client2 = clientManager
-        .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
-
-    XceiverClientSpi client3 = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(2, client3.getRefcount());
-    Assert.assertEquals(2, client1.getRefcount());
-    Assert.assertEquals(client1, client3);
-    clientManager.releaseClient(client1, false);
-    clientManager.releaseClient(client2, false);
-    clientManager.releaseClient(client3, false);
-  }
-
-  @Test
-  public void testFreeByReference() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class);
-    clientConfig.setMaxSize(1);
-    String metaDir = GenericTestUtils.getTempPath(
-        TestXceiverClientManager.class.getName() + UUID.randomUUID());
-    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
-    XceiverClientManager clientManager =
-        new XceiverClientManager(conf, clientConfig, null);
-    Cache<String, XceiverClientSpi> cache =
-        clientManager.getClientCache();
-
-    ContainerWithPipeline container1 =
-        storageContainerLocationClient.allocateContainer(
-            clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
-            containerOwner);
-    XceiverClientSpi client1 = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
-        client1.getPipeline());
-
-    ContainerWithPipeline container2 =
-        storageContainerLocationClient.allocateContainer(
-            clientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client2 = clientManager
-        .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertNotEquals(client1, client2);
-
-    // least recent container (i.e containerName1) is evicted
-    XceiverClientSpi nonExistent1 = cache.getIfPresent(
-        container1.getContainerInfo().getPipelineID().getId().toString()
-            + container1.getContainerInfo().getReplicationType());
-    Assert.assertEquals(null, nonExistent1);
-    // However container call should succeed because of refcount on the client.
-    ContainerProtocolCalls.createContainer(client1,
-        container1.getContainerInfo().getContainerID(), null);
-
-    // After releasing the client, this connection should be closed
-    // and any container operations should fail
-    clientManager.releaseClient(client1, false);
-
-    String expectedMessage = "This channel is not connected.";
-    try {
-      ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerInfo().getContainerID(), null);
-      Assert.fail("Create container should throw exception on closed"
-          + "client");
-    } catch (Exception e) {
-      Assert.assertEquals(e.getClass(), IOException.class);
-      Assert.assertTrue(e.getMessage().contains(expectedMessage));
-    }
-    clientManager.releaseClient(client2, false);
-  }
-
-  @Test
-  public void testFreeByEviction() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class);
-    clientConfig.setMaxSize(1);
-    String metaDir = GenericTestUtils.getTempPath(
-        TestXceiverClientManager.class.getName() + UUID.randomUUID());
-    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
-    XceiverClientManager clientManager =
-        new XceiverClientManager(conf, clientConfig, null);
-    Cache<String, XceiverClientSpi> cache =
-        clientManager.getClientCache();
-
-    ContainerWithPipeline container1 =
-        storageContainerLocationClient.allocateContainer(
-            clientManager.getType(),
-            clientManager.getFactor(), containerOwner);
-    XceiverClientSpi client1 = clientManager
-        .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
-
-    clientManager.releaseClient(client1, false);
-    Assert.assertEquals(0, client1.getRefcount());
-
-    ContainerWithPipeline container2 = storageContainerLocationClient
-        .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerOwner);
-    XceiverClientSpi client2 = clientManager
-        .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertNotEquals(client1, client2);
-
-    // now client 1 should be evicted
-    XceiverClientSpi nonExistent = cache.getIfPresent(
-        container1.getContainerInfo().getPipelineID().getId().toString()
-            + container1.getContainerInfo().getReplicationType());
-    Assert.assertEquals(null, nonExistent);
-
-    // Any container operation should now fail
-    String expectedMessage = "This channel is not connected.";
-    try {
-      ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerInfo().getContainerID(), null);
-      Assert.fail("Create container should throw exception on closed"
-          + "client");
-    } catch (Exception e) {
-      Assert.assertEquals(e.getClass(), IOException.class);
-      Assert.assertTrue(e.getMessage().contains(expectedMessage));
-    }
-    clientManager.releaseClient(client2, false);
-  }
-
-  @Test
-  public void testFreeByRetryFailure() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class);
-    clientConfig.setMaxSize(1);
-    XceiverClientManager clientManager =
-        new XceiverClientManager(conf, clientConfig, null);
-    Cache<String, XceiverClientSpi> cache =
-        clientManager.getClientCache();
-
-    // client is added in cache
-    ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerOwner);
-    XceiverClientSpi client1 =
-        clientManager.acquireClient(container1.getPipeline());
-    clientManager.acquireClient(container1.getPipeline());
-    Assert.assertEquals(2, client1.getRefcount());
-
-    // client should be invalidated in the cache
-    clientManager.releaseClient(client1, true);
-    Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertNull(cache.getIfPresent(
-        container1.getContainerInfo().getPipelineID().getId().toString()
-            + container1.getContainerInfo().getReplicationType()));
-
-    // new client should be added in cache
-    XceiverClientSpi client2 =
-        clientManager.acquireClient(container1.getPipeline());
-    Assert.assertNotEquals(client1, client2);
-    Assert.assertEquals(1, client2.getRefcount());
-
-    // on releasing the old client the entry in cache should not be invalidated
-    clientManager.releaseClient(client1, true);
-    Assert.assertEquals(0, client1.getRefcount());
-    Assert.assertNotNull(cache.getIfPresent(
-        container1.getContainerInfo().getPipelineID().getId().toString()
-            + container1.getContainerInfo().getReplicationType()));
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
deleted file mode 100644
index 5285fb3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * This class tests the metrics of XceiverClient.
- */
-public class TestXceiverClientMetrics {
-  // only for testing
-  private volatile boolean breakFlag;
-  private CountDownLatch latch;
-
-  private static OzoneConfiguration config;
-  private static MiniOzoneCluster cluster;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
-
-  @BeforeClass
-  public static void init() throws Exception {
-    config = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(config).build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient = cluster
-        .getStorageContainerLocationClient();
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    cluster.shutdown();
-  }
-
-  @Test
-  public void testMetrics() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String metaDir = GenericTestUtils.getTempPath(
-        TestXceiverClientManager.class.getName() + UUID.randomUUID());
-    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
-
-    XceiverClientManager clientManager = new XceiverClientManager(conf);
-
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerOwner);
-    XceiverClientSpi client = clientManager
-        .acquireClient(container.getPipeline());
-
-    ContainerCommandRequestProto request = ContainerTestHelper
-        .getCreateContainerRequest(
-            container.getContainerInfo().getContainerID(),
-            container.getPipeline());
-    client.sendCommand(request);
-
-    MetricsRecordBuilder containerMetrics = getMetrics(
-        XceiverClientMetrics.SOURCE_NAME);
-    // Above request command is in a synchronous way, so there will be no
-    // pending requests.
-    assertCounter("PendingOps", 0L, containerMetrics);
-    assertCounter("numPendingCreateContainer", 0L, containerMetrics);
-    // the counter value of average latency metric should be increased
-    assertCounter("CreateContainerLatencyNumOps", 1L, containerMetrics);
-
-    breakFlag = false;
-    latch = new CountDownLatch(1);
-
-    int numRequest = 10;
-    List<CompletableFuture<ContainerCommandResponseProto>> computeResults
-        = new ArrayList<>();
-    // start new thread to send async requests
-    Thread sendThread = new Thread(() -> {
-      while (!breakFlag) {
-        try {
-          // use async interface for testing pending metrics
-          for (int i = 0; i < numRequest; i++) {
-            BlockID blockID = ContainerTestHelper.
-                getTestBlockID(container.getContainerInfo().getContainerID());
-            ContainerProtos.ContainerCommandRequestProto smallFileRequest;
-
-            smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(
-                client.getPipeline(), blockID, 1024);
-            CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
-                response =
-                client.sendCommandAsync(smallFileRequest).getResponse();
-            computeResults.add(response);
-          }
-
-          Thread.sleep(1000);
-        } catch (Exception ignored) {
-        }
-      }
-
-      latch.countDown();
-    });
-    sendThread.start();
-
-    GenericTestUtils.waitFor(() -> {
-      // check if pending metric count is increased
-      MetricsRecordBuilder metric =
-          getMetrics(XceiverClientMetrics.SOURCE_NAME);
-      long pendingOps = getLongCounter("PendingOps", metric);
-      long pendingPutSmallFileOps =
-          getLongCounter("numPendingPutSmallFile", metric);
-
-      if (pendingOps > 0 && pendingPutSmallFileOps > 0) {
-        // reset break flag
-        breakFlag = true;
-        return true;
-      } else {
-        return false;
-      }
-    }, 100, 60000);
-
-    // blocking until we stop sending async requests
-    latch.await();
-    // Wait for all futures being done.
-    GenericTestUtils.waitFor(() -> {
-      for (CompletableFuture future : computeResults) {
-        if (!future.isDone()) {
-          return false;
-        }
-      }
-
-      return true;
-    }, 100, 60000);
-
-    // the counter value of pending metrics should be decreased to 0
-    containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME);
-    assertCounter("PendingOps", 0L, containerMetrics);
-    assertCounter("numPendingPutSmallFile", 0L, containerMetrics);
-
-    clientManager.close();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
deleted file mode 100644
index c9b8c89..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.scm.node;
-
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static java.util.concurrent.TimeUnit.SECONDS;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_PIPELINE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_NODE_REPORT_INTERVAL;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test Query Node Operation.
- */
-public class TestQueryNode {
-  private static int numOfDatanodes = 5;
-  private MiniOzoneCluster cluster;
-
-  private ContainerOperationClient scmClient;
-
-  @Before
-  public void setUp() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final int interval = 100;
-
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        interval, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
-    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numOfDatanodes)
-        .build();
-    cluster.waitForClusterToBeReady();
-    scmClient = new ContainerOperationClient(cluster
-        .getStorageContainerLocationClient(),
-        new XceiverClientManager(conf));
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testHealthyNodesCount() throws Exception {
-    List<HddsProtos.Node> nodes = scmClient.queryNode(HEALTHY,
-        HddsProtos.QueryScope.CLUSTER, "");
-    assertEquals("Expected  live nodes", numOfDatanodes,
-        nodes.size());
-  }
-
-  @Test(timeout = 10 * 1000L)
-  public void testStaleNodesCount() throws Exception {
-    cluster.shutdownHddsDatanode(0);
-    cluster.shutdownHddsDatanode(1);
-
-    GenericTestUtils.waitFor(() ->
-            cluster.getStorageContainerManager().getNodeCount(STALE) == 2,
-        100, 4 * 1000);
-
-    int nodeCount = scmClient.queryNode(STALE,
-        HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 2, nodeCount);
-
-    GenericTestUtils.waitFor(() ->
-            cluster.getStorageContainerManager().getNodeCount(DEAD) == 2,
-        100, 4 * 1000);
-
-    // Assert that we don't find any stale nodes.
-    nodeCount = scmClient.queryNode(STALE,
-        HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 0, nodeCount);
-
-    // Assert that we find the expected number of dead nodes.
-    nodeCount = scmClient.queryNode(DEAD,
-        HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 2, nodeCount);
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
deleted file mode 100644
index 65a6357..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm.node;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-/**
- * Test cases to verify the metrics exposed by SCMNodeManager.
- */
-public class TestSCMNodeMetrics {
-
-  private MiniOzoneCluster cluster;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Verifies heartbeat processing count.
-   *
-   * @throws InterruptedException
-   */
-  @Test
-  public void testHBProcessing() throws InterruptedException {
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMNodeMetrics.class.getSimpleName());
-    long hbProcessed = getLongCounter("NumHBProcessed", metrics);
-    cluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().triggerHeartbeat();
-    // Give some time so that SCM receives and processes the heartbeat.
-    Thread.sleep(100L);
-    assertCounter("NumHBProcessed", hbProcessed + 1,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-  }
-
-  /**
-   * Verifies heartbeat processing failure count.
-   */
-  @Test
-  public void testHBProcessingFailure() {
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMNodeMetrics.class.getSimpleName());
-    long hbProcessedFailed = getLongCounter("NumHBProcessingFailed", metrics);
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .processHeartbeat(TestUtils.randomDatanodeDetails());
-    assertCounter("NumHBProcessingFailed", hbProcessedFailed + 1,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-  }
-
-  /**
-   * Verifies node report processing count.
-   *
-   * @throws InterruptedException
-   */
-  @Test
-  public void testNodeReportProcessing() throws InterruptedException {
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMNodeMetrics.class.getSimpleName());
-    long nrProcessed = getLongCounter("NumNodeReportProcessed", metrics);
-    HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0);
-    StorageReportProto storageReport = TestUtils.createStorageReport(
-        datanode.getDatanodeDetails().getUuid(), "/tmp", 100, 10, 90, null);
-    NodeReportProto nodeReport = NodeReportProto.newBuilder()
-        .addStorageReport(storageReport).build();
-    datanode.getDatanodeStateMachine().getContext().addReport(nodeReport);
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .processNodeReport(datanode.getDatanodeDetails(), nodeReport);
-
-    assertCounter("NumNodeReportProcessed", nrProcessed + 1,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-  }
-
-  /**
-   * Verifies node report processing failure count.
-   */
-  @Test
-  public void testNodeReportProcessingFailure() {
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMNodeMetrics.class.getSimpleName());
-    long nrProcessed = getLongCounter("NumNodeReportProcessingFailed",
-        metrics);
-    DatanodeDetails datanode = TestUtils.randomDatanodeDetails();
-    StorageReportProto storageReport = TestUtils.createStorageReport(
-        datanode.getUuid(), "/tmp", 100, 10, 90, null);
-    NodeReportProto nodeReport = NodeReportProto.newBuilder()
-        .addStorageReport(storageReport).build();
-
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .processNodeReport(datanode, nodeReport);
-    assertCounter("NumNodeReportProcessingFailed", nrProcessed + 1,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-  }
-
-  /**
-   * Verify that datanode aggregated state and capacity metrics are reported.
-   */
-  @Test
-  public void testNodeCountAndInfoMetricsReported() throws Exception {
-    HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0);
-    StorageReportProto storageReport = TestUtils.createStorageReport(
-        datanode.getDatanodeDetails().getUuid(), "/tmp", 100, 10, 90, null);
-    NodeReportProto nodeReport = NodeReportProto.newBuilder()
-        .addStorageReport(storageReport).build();
-    datanode.getDatanodeStateMachine().getContext().addReport(nodeReport);
-    cluster.getStorageContainerManager().getScmNodeManager()
-        .processNodeReport(datanode.getDatanodeDetails(), nodeReport);
-
-    assertGauge("HealthyNodes", 1,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("StaleNodes", 0,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DeadNodes", 0,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DecommissioningNodes", 0,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DecommissionedNodes", 0,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DiskCapacity", 100L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DiskUsed", 10L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("DiskRemaining", 90L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("SSDCapacity", 0L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("SSDUsed", 0L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("SSDRemaining", 0L,
-        getMetrics(SCMNodeMetrics.class.getSimpleName()));
-  }
-
-  @After
-  public void teardown() {
-    cluster.shutdown();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
deleted file mode 100644
index 7ac6d18..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * Utility classes to encode/decode DTO objects to/from byte array.
- */
-
-/**
- * Unit tests for Node related functions in SCM.
- */
-package org.apache.hadoop.ozone.scm.node;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
deleted file mode 100644
index cdc9f0f..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import javax.management.openmbean.CompositeData;
-import javax.management.openmbean.TabularData;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.TimeoutException;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test cases to verify the metrics exposed by SCMPipelineManager via MXBean.
- */
-public class TestPipelineManagerMXBean {
-
-  private MiniOzoneCluster cluster;
-  private static MBeanServer mbs;
-
-  @Before
-  public void init()
-      throws IOException, TimeoutException, InterruptedException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    mbs = ManagementFactory.getPlatformMBeanServer();
-  }
-
-  /**
-   * Verifies SCMPipelineManagerInfo metrics.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPipelineInfo() throws Exception {
-    ObjectName bean = new ObjectName(
-        "Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo");
-
-    TabularData data = (TabularData) mbs.getAttribute(bean, "PipelineInfo");
-    Map<String, Integer> datanodeInfo = cluster.getStorageContainerManager()
-        .getPipelineManager().getPipelineInfo();
-    verifyEquals(data, datanodeInfo);
-  }
-
-  private void verifyEquals(TabularData actualData, Map<String, Integer>
-      expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
-    for (Object obj : actualData.values()) {
-      assertTrue(obj instanceof CompositeData);
-      CompositeData cds = (CompositeData) obj;
-      assertEquals(2, cds.values().size());
-      Iterator<?> it = cds.values().iterator();
-      String key = it.next().toString();
-      String value = it.next().toString();
-      long num = Long.parseLong(value);
-      assertTrue(expectedData.containsKey(key));
-      assertEquals(expectedData.remove(key).longValue(), num);
-    }
-    assertTrue(expectedData.isEmpty());
-  }
-
-  @After
-  public void teardown() {
-    cluster.shutdown();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
deleted file mode 100644
index 2f1ec66..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.scm.pipeline;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Optional;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-/**
- * Test cases to verify the metrics exposed by SCMPipelineManager.
- */
-public class TestSCMPipelineMetrics {
-
-  private MiniOzoneCluster cluster;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Verifies pipeline creation metric.
-   */
-  @Test
-  public void testPipelineCreation() {
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMPipelineMetrics.class.getSimpleName());
-    long numPipelineCreated = getLongCounter("NumPipelineCreated", metrics);
-    // Pipelines are created in background when the cluster starts.
-    Assert.assertTrue(numPipelineCreated > 0);
-  }
-
-  /**
-   * Verifies pipeline destroy metric.
-   */
-  @Test
-  public void testPipelineDestroy() {
-    PipelineManager pipelineManager = cluster
-        .getStorageContainerManager().getPipelineManager();
-    Optional<Pipeline> pipeline = pipelineManager
-        .getPipelines().stream().findFirst();
-    Assert.assertTrue(pipeline.isPresent());
-    pipeline.ifPresent(pipeline1 -> {
-      try {
-        cluster.getStorageContainerManager()
-            .getClientProtocolServer().closePipeline(
-                pipeline.get().getId().getProtobuf());
-      } catch (IOException e) {
-        e.printStackTrace();
-        Assert.fail();
-      }
-    });
-    MetricsRecordBuilder metrics = getMetrics(
-        SCMPipelineMetrics.class.getSimpleName());
-    assertCounter("NumPipelineDestroyed", 1L, metrics);
-  }
-
-  @Test
-  public void testNumBlocksAllocated() throws IOException {
-    AllocatedBlock block =
-        cluster.getStorageContainerManager().getScmBlockManager()
-            .allocateBlock(5, HddsProtos.ReplicationType.RATIS,
-                HddsProtos.ReplicationFactor.ONE, "Test", new ExcludeList());
-    MetricsRecordBuilder metrics =
-        getMetrics(SCMPipelineMetrics.class.getSimpleName());
-    Pipeline pipeline = block.getPipeline();
-    long numBlocksAllocated = getLongCounter(
-        SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), metrics);
-    Assert.assertEquals(numBlocksAllocated, 1);
-
-    // destroy the pipeline
-    try {
-      cluster.getStorageContainerManager().getClientProtocolServer()
-          .closePipeline(pipeline.getId().getProtobuf());
-    } catch (IOException e) {
-      e.printStackTrace();
-      Assert.fail();
-    }
-    metrics = getMetrics(SCMPipelineMetrics.class.getSimpleName());
-    try {
-      getLongCounter(SCMPipelineMetrics.getBlockAllocationMetricName(pipeline),
-          metrics);
-      Assert.fail("Metric should not be present for closed pipeline.");
-    } catch (AssertionError e) {
-      Assert.assertTrue(e.getMessage().contains(
-          "Expected exactly one metric for name " + SCMPipelineMetrics
-              .getBlockAllocationMetricName(block.getPipeline())));
-    }
-  }
-
-  @After
-  public void teardown() {
-    cluster.shutdown();
-  }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java
deleted file mode 100644
index ea6734a..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * Utility classes to encode/decode DTO objects to/from byte array.
- */
-
-/**
- * Unit tests for Pipeline related functions in SCM.
- */
-package org.apache.hadoop.ozone.scm.pipeline;
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
deleted file mode 100644
index 43ce679..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
+++ /dev/null
@@ -1,470 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.BucketManagerImpl;
-import org.apache.hadoop.ozone.om.IOzoneAcl;
-import org.apache.hadoop.ozone.om.KeyManagerImpl;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.PrefixManager;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl;
-import org.apache.hadoop.ozone.om.VolumeManagerImpl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.ANONYMOUS;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.WORLD;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test class for {@link OzoneNativeAuthorizer}.
- */
-@RunWith(Parameterized.class)
-public class TestOzoneNativeAuthorizer {
-
-  private static OzoneConfiguration ozConfig;
-  private String vol;
-  private String buck;
-  private String key;
-  private String prefix;
-  private ACLType parentDirUserAcl;
-  private ACLType parentDirGroupAcl;
-  private boolean expectedAclResult;
-
-  private static KeyManagerImpl keyManager;
-  private static VolumeManagerImpl volumeManager;
-  private static BucketManagerImpl bucketManager;
-  private static PrefixManager prefixManager;
-  private static OMMetadataManager metadataManager;
-  private static OzoneNativeAuthorizer nativeAuthorizer;
-
-  private static StorageContainerManager scm;
-  private static UserGroupInformation ugi;
-
-  private static OzoneObj volObj;
-  private static OzoneObj buckObj;
-  private static OzoneObj keyObj;
-  private static OzoneObj prefixObj;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"key", "dir1/", ALL, ALL, true},
-        {"file1", "2019/june/01/", ALL, ALL, true},
-        {"file2", "", ALL, ALL, true},
-        {"dir1/dir2/dir4/", "", ALL, ALL, true},
-        {"key", "dir1/", NONE, NONE, false},
-        {"file1", "2019/june/01/", NONE, NONE, false},
-        {"file2", "", NONE, NONE, false},
-        {"dir1/dir2/dir4/", "", NONE, NONE, false}
-    });
-  }
-
-  public TestOzoneNativeAuthorizer(String keyName, String prefixName,
-      ACLType userRight,
-      ACLType groupRight, boolean expectedResult) throws IOException {
-    int randomInt = RandomUtils.nextInt();
-    vol = "vol" + randomInt;
-    buck = "bucket" + randomInt;
-    key = keyName + randomInt;
-    prefix = prefixName + randomInt + OZONE_URI_DELIMITER;
-    parentDirUserAcl = userRight;
-    parentDirGroupAcl = groupRight;
-    expectedAclResult = expectedResult;
-
-    createVolume(vol);
-    createBucket(vol, buck);
-    createKey(vol, buck, key);
-  }
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    ozConfig = new OzoneConfiguration();
-    ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS,
-        OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
-    File dir = GenericTestUtils.getRandomizedTestDir();
-    ozConfig.set(OZONE_METADATA_DIRS, dir.toString());
-    ozConfig.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
-
-    metadataManager = new OmMetadataManagerImpl(ozConfig);
-    volumeManager = new VolumeManagerImpl(metadataManager, ozConfig);
-    bucketManager = new BucketManagerImpl(metadataManager);
-    prefixManager = new PrefixManagerImpl(metadataManager, false);
-
-    NodeManager nodeManager = new MockNodeManager(true, 10);
-    SCMConfigurator configurator = new SCMConfigurator();
-    configurator.setScmNodeManager(nodeManager);
-    scm = TestUtils.getScm(ozConfig, configurator);
-    scm.start();
-    scm.exitSafeMode();
-    keyManager =
-        new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager,
-            ozConfig,
-            "om1", null);
-
-    nativeAuthorizer = new OzoneNativeAuthorizer(volumeManager, bucketManager,
-        keyManager, prefixManager);
-    //keySession.
-    ugi = UserGroupInformation.getCurrentUser();
-  }
-
-  private void createKey(String volume,
-      String bucket, String keyName) throws IOException {
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volume)
-        .setBucketName(bucket)
-        .setKeyName(keyName)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setDataSize(0)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
-            ALL, ALL))
-        .build();
-
-    if (keyName.split(OZONE_URI_DELIMITER).length > 1) {
-      keyManager.createDirectory(keyArgs);
-      key = key + OZONE_URI_DELIMITER;
-    } else {
-      OpenKeySession keySession = keyManager.createFile(keyArgs, true, false);
-      keyArgs.setLocationInfoList(
-          keySession.getKeyInfo().getLatestVersionLocations()
-              .getLocationList());
-      keyManager.commitKey(keyArgs, keySession.getId());
-    }
-
-    keyObj = new OzoneObjInfo.Builder()
-        .setVolumeName(vol)
-        .setBucketName(buck)
-        .setKeyName(key)
-        .setResType(KEY)
-        .setStoreType(OZONE)
-        .build();
-  }
-
-  private void createBucket(String volumeName, String bucketName)
-      throws IOException {
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .build();
-    TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo);
-    buckObj = new OzoneObjInfo.Builder()
-        .setVolumeName(vol)
-        .setBucketName(buck)
-        .setResType(BUCKET)
-        .setStoreType(OZONE)
-        .build();
-  }
-
-  private void createVolume(String volumeName) throws IOException {
-    OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder()
-        .setVolume(volumeName)
-        .setAdminName("bilbo")
-        .setOwnerName("bilbo")
-        .build();
-    TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs);
-    volObj = new OzoneObjInfo.Builder()
-        .setVolumeName(vol)
-        .setResType(VOLUME)
-        .setStoreType(OZONE)
-        .build();
-  }
-
-  @Test
-  public void testCheckAccessForVolume() throws Exception {
-    expectedAclResult = true;
-    resetAclsAndValidateAccess(volObj, USER, volumeManager);
-    resetAclsAndValidateAccess(volObj, GROUP, volumeManager);
-    resetAclsAndValidateAccess(volObj, WORLD, volumeManager);
-    resetAclsAndValidateAccess(volObj, ANONYMOUS, volumeManager);
-  }
-
-  @Test
-  public void testCheckAccessForBucket() throws Exception {
-
-    OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl,
-        ACCESS);
-    OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ?
-        ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS);
-    // Set access for volume.
-    volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl));
-
-    resetAclsAndValidateAccess(buckObj, USER, bucketManager);
-    resetAclsAndValidateAccess(buckObj, GROUP, bucketManager);
-    resetAclsAndValidateAccess(buckObj, WORLD, bucketManager);
-    resetAclsAndValidateAccess(buckObj, ANONYMOUS, bucketManager);
-  }
-
-  @Test
-  public void testCheckAccessForKey() throws Exception {
-    OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl,
-        ACCESS);
-    OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ?
-        ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS);
-    // Set access for volume, bucket & prefix.
-    volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl));
-    bucketManager.setAcl(buckObj, Arrays.asList(userAcl, groupAcl));
-    //prefixManager.setAcl(prefixObj, Arrays.asList(userAcl, groupAcl));
-
-    resetAclsAndValidateAccess(keyObj, USER, keyManager);
-    resetAclsAndValidateAccess(keyObj, GROUP, keyManager);
-    resetAclsAndValidateAccess(keyObj, WORLD, keyManager);
-    resetAclsAndValidateAccess(keyObj, ANONYMOUS, keyManager);
-  }
-
-  @Test
-  public void testCheckAccessForPrefix() throws Exception {
-    prefixObj = new OzoneObjInfo.Builder()
-        .setVolumeName(vol)
-        .setBucketName(buck)
-        .setPrefixName(prefix)
-        .setResType(PREFIX)
-        .setStoreType(OZONE)
-        .build();
-
-    OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl,
-        ACCESS);
-    OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ?
-        ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS);
-    // Set access for volume & bucket.
-    volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl));
-    bucketManager.setAcl(buckObj, Arrays.asList(userAcl, groupAcl));
-
-    resetAclsAndValidateAccess(prefixObj, USER, prefixManager);
-    resetAclsAndValidateAccess(prefixObj, GROUP, prefixManager);
-    resetAclsAndValidateAccess(prefixObj, WORLD, prefixManager);
-    resetAclsAndValidateAccess(prefixObj, ANONYMOUS, prefixManager);
-  }
-
-  private void resetAclsAndValidateAccess(OzoneObj obj,
-      ACLIdentityType accessType, IOzoneAcl aclImplementor)
-      throws IOException {
-
-    List<OzoneAcl> acls;
-    String user = "";
-    String group = "";
-
-    user = ugi.getUserName();
-    if (ugi.getGroups().size() > 0) {
-      group = ugi.getGroups().get(0);
-    }
-
-    RequestContext.Builder builder = new RequestContext.Builder()
-        .setClientUgi(ugi)
-        .setAclType(accessType);
-
-    // Get all acls.
-    List<ACLType> allAcls = Arrays.stream(ACLType.values()).
-        collect(Collectors.toList());
-
-    /**
-     * 1. Reset default acls to an acl.
-     * 2. Test if user/group has access only to it.
-     * 3. Add remaining acls one by one and then test
-     *    if user/group has access to them.
-     * */
-    for (ACLType a1 : allAcls) {
-      OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), a1,
-          ACCESS);
-
-      // Reset acls to only one right.
-      aclImplementor.setAcl(obj, Arrays.asList(newAcl));
-
-      // Fetch current acls and validate.
-      acls = aclImplementor.getAcl(obj);
-      assertTrue(acls.size() == 1);
-      assertTrue(acls.contains(newAcl));
-
-      // Special handling for ALL.
-      if (a1.equals(ALL)) {
-        validateAll(obj, builder);
-        continue;
-      }
-
-      // Special handling for NONE.
-      if (a1.equals(NONE)) {
-        validateNone(obj, builder);
-        continue;
-      }
-      assertEquals("Acl to check:" + a1 + " accessType:" +
-              accessType + " path:" + obj.getPath(),
-          expectedAclResult, nativeAuthorizer.checkAccess(obj,
-              builder.setAclRights(a1).build()));
-
-      List<ACLType> aclsToBeValidated =
-          Arrays.stream(ACLType.values()).collect(Collectors.toList());
-      List<ACLType> aclsToBeAdded =
-          Arrays.stream(ACLType.values()).collect(Collectors.toList());
-      aclsToBeValidated.remove(NONE);
-      aclsToBeValidated.remove(a1);
-
-      aclsToBeAdded.remove(NONE);
-      aclsToBeAdded.remove(ALL);
-
-      // Fetch acls again.
-      for (ACLType a2 : aclsToBeAdded) {
-        if (!a2.equals(a1)) {
-
-          acls = aclImplementor.getAcl(obj);
-          List right = acls.stream().map(a -> a.getAclList()).collect(
-              Collectors.toList());
-          assertFalse("Do not expected client to have " + a2 + " acl. " +
-                  "Current acls found:" + right + ". Type:" + accessType + ","
-                  + " name:" + (accessType == USER ? user : group),
-              nativeAuthorizer.checkAccess(obj,
-                  builder.setAclRights(a2).build()));
-
-          // Randomize next type.
-          int type = RandomUtils.nextInt(0, 3);
-          ACLIdentityType identityType = ACLIdentityType.values()[type];
-          // Add remaining acls one by one and then check access.
-          OzoneAcl addAcl = new OzoneAcl(identityType, 
-              getAclName(identityType), a2, ACCESS);
-          aclImplementor.addAcl(obj, addAcl);
-
-          // Fetch acls again.
-          acls = aclImplementor.getAcl(obj);
-          boolean a2AclFound = false;
-          boolean a1AclFound = false;
-          for (OzoneAcl acl : acls) {
-            if (acl.getAclList().contains(a2)) {
-              a2AclFound = true;
-            }
-            if (acl.getAclList().contains(a1)) {
-              a1AclFound = true;
-            }
-          }
-
-          assertTrue("Current acls :" + acls + ". " +
-              "Type:" + accessType + ", name:" + (accessType == USER ? user
-              : group) + " acl:" + a2, a2AclFound);
-          assertTrue("Expected client to have " + a1 + " acl. Current acls " +
-              "found:" + acls + ". Type:" + accessType +
-              ", name:" + (accessType == USER ? user : group), a1AclFound);
-          assertEquals("Current acls " + acls + ". Expect acl:" + a2 +
-                  " to be set? " + expectedAclResult + " accessType:"
-                  + accessType, expectedAclResult,
-              nativeAuthorizer.checkAccess(obj,
-                  builder.setAclRights(a2).build()));
-          aclsToBeValidated.remove(a2);
-          for (ACLType a3 : aclsToBeValidated) {
-            if (!a3.equals(a1) && !a3.equals(a2)) {
-              assertFalse("User shouldn't have right " + a3 + ". " +
-                      "Current acl rights for user:" + a1 + "," + a2,
-                  nativeAuthorizer.checkAccess(obj,
-                      builder.setAclRights(a3).build()));
-            }
-          }
-        }
-      }
-    }
-
-  }
-
-  private String getAclName(ACLIdentityType identityType) {
-    switch (identityType) {
-    case USER:
-      return ugi.getUserName();
-    case GROUP:
-      if (ugi.getGroups().size() > 0) {
-        return ugi.getGroups().get(0);
-      }
-    default:
-      return "";
-    }
-  }
-
-  /**
-   * Helper function to test acl rights with user/group had ALL acl bit set.
-   * @param obj
-   * @param builder
-   */
-  private void validateAll(OzoneObj obj, RequestContext.Builder
-      builder) throws OMException {
-    List<ACLType> allAcls = new ArrayList<>(Arrays.asList(ACLType.values()));
-    allAcls.remove(ALL);
-    allAcls.remove(NONE);
-    for (ACLType a : allAcls) {
-      assertEquals("User should have right " + a + ".", 
-          nativeAuthorizer.checkAccess(obj,
-          builder.setAclRights(a).build()), expectedAclResult);
-    }
-  }
-
-  /**
-   * Helper function to test acl rights with user/group had NONE acl bit set.
-   * @param obj
-   * @param builder
-   */
-  private void validateNone(OzoneObj obj, RequestContext.Builder
-      builder) throws OMException {
-    List<ACLType> allAcls = new ArrayList<>(Arrays.asList(ACLType.values()));
-    allAcls.remove(NONE);
-    for (ACLType a : allAcls) {
-      assertFalse("User shouldn't have right " + a + ".", 
-          nativeAuthorizer.checkAccess(obj, builder.setAclRights(a).build()));
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/resources/auditlog.properties b/hadoop-ozone/integration-test/src/test/resources/auditlog.properties
deleted file mode 100644
index 19daa6f..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/auditlog.properties
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=5
-
-filter=read, write
-# filter.read.onMatch = DENY avoids logging all READ events
-# filter.read.onMatch = ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch = NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type = MarkerFilter
-filter.read.marker = READ
-filter.read.onMatch = NEUTRAL
-filter.read.onMismatch = NEUTRAL
-
-# filter.write.onMatch = DENY avoids logging all WRITE events
-# filter.write.onMatch = ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type = MarkerFilter
-filter.write.marker = WRITE
-filter.write.onMatch = NEUTRAL
-filter.write.onMismatch = NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-appenders = console, audit
-appender.console.type = Console
-appender.console.name = STDOUT
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-appender.audit.type = File
-appender.audit.name = AUDITLOG
-appender.audit.fileName=audit.log
-appender.audit.layout.type=PatternLayout
-appender.audit.layout.pattern= %d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=OMAudit
-logger.audit.level = INFO
-logger.audit.appenderRefs = audit
-logger.audit.appenderRef.file.ref = AUDITLOG
-
-rootLogger.level = INFO
-rootLogger.appenderRefs = stdout
-rootLogger.appenderRef.stdout.ref = STDOUT
diff --git a/hadoop-ozone/integration-test/src/test/resources/core-site.xml b/hadoop-ozone/integration-test/src/test/resources/core-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/core-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>
diff --git a/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml b/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>
diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties
deleted file mode 100644
index b8ad21d..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=info,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
-
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt
deleted file mode 100644
index 501be3c..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEpDCCAowCCQDu0m7J8pzvvjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAls
-b2NhbGhvc3QwHhcNMTgxMjA2MDY1ODE1WhcNMTkxMjA2MDY1ODE1WjAUMRIwEAYD
-VQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDU
-T3bUikPWsSLLYWpeg/S9Zko/X3aijj6INIC1pGD4IX40neKHqWLgkkhGQnM8nFa4
-soUz3Ewv/N+RPnUhLhXaMyD5Ca6h1+6g5839E9QLXcpd5OSwQ2iu/JLsrWb5Belb
-7PN60izZIbgRuA4lWmqeI9dEwQDIwbaPJW8HFlnUM5Ci5MM/CxmV/IdCSXocBKQ3
-whLP+vhLnhjOWMgFhhzdL1zObonbHVLONLBROnbFc+1zxI7gt7RD3OqTsnToXotZ
-Jy3CfN5TxzwuP6xRDj3xciwIsGwE9tSE4H3a8DbYKfEUzyS/OlWeHboFiumRxcm8
-qkA5tmOA4+AoPLigsrJpxgtQR/0YwjI8yn+Hh79g+rZCckoR0Fs/OYEpXw6xg92o
-YUzDA1SrRHd43r4xI0BDP/660fsbYtRk56WVmCQHNTKvJpeDlyg9qYwzWvZZSrPL
-vO9qJ0k1SUbnEd4StPUmF/UQfdVfkdcR86j7ZLXJ9ZLhcWJjVlXeXfwnEl2/ctSt
-RJROogM4ourc6sNNLOuFboLpnMEd5n8bijtoFG9vEJ0Cb//Zez942OEJa7db8fu1
-TEGPZzJTxnlgMIvaTrRdAE2VoZN2fzyIBF33wFgV4vgvllO61qeBH/SUFlpcOOo4
-LReY6bZxoKPlL9sG8ZHauQeq/uX+hhX50VP4cV1g+wIDAQABMA0GCSqGSIb3DQEB
-CwUAA4ICAQApJDDPq2cmn3JWEfabkc3YxX62Q0qNyXDv+hY/O3zrJBbvJ74lEu9k
-UPBk/oMIAZQGk/yvU5jBpJ1SndqB8ONnZcnOs7mDoqABcO9C8bB+kTmTXmxeZvcu
-ZnF/3wkzuecYndcZwfC4Yt76DDny3gEMKruEbr51aehLkqYQOI5EGrrtc3Q2HE4D
-z5H5CfzltaUajAkE8X+Iw6aVnEFrKbP5+VuQunMSi0lmmlBcpiVU6iyULt5LPNY5
-SbsEVgqUVekX0Qnn31ojabXOZJr4qK8/J+h5cGzaOQxGHopYqd34QjvlvZZnGCjd
-6MrlO9WF0KWBJyJxPuLI0j3qNyrRF253ZBTOzow9jl4EZ3nsNe0WDgxUsv1qRqlv
-CR4wKiCY9+Ti85k1KC1xQt6LEi0PRgE+rTpINGhWHQKOwkXwdZPdmPeXu9MFnDjt
-iEEudugRrxGscTWMIOThL7HQhdGHPg6eCgdxLZ+q/pW0t3NKa+oMBuXFrOlBOwwE
-iC9dpXPsd2S6wC5V33pj07WnIj+/+L/ViJvGimcudh/wj4KRhatsdFPjUBQI1b+E
-tJm8gbVRYrueHhlvSfD09BKkf4aQRJ7RW18SQrLbHhqO3g6jZa8HciQiVxF0YC3x
-qZh2A7b1BgOcqpFKEJp7k1U4qeH7H8hFm7vghGOnrd8bLE8viJRlxA==
------END CERTIFICATE-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key b/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key
deleted file mode 100644
index e53eef9..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key
+++ /dev/null
@@ -1,54 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: DES-EDE3-CBC,39D6E9EAB7ABCD69
-
-NvhRc8RQDMtbbJXEilb8mKEcVCJHhew278cdoJZyep5G2VvifpZJk207+ZZo2YFt
-hPlX5xtlOd+zmdj4nVsRVpDA5Vd/pF+PmRIbfoIwbgkcPIuUtRGoJtIhPa0zlUYu
-uyc85h1/YOAkxzaTq4fVgUr4pOb+GLlkve6Q1nKFif5q58uMYBzyHRrf/VsWbip7
-dL1CEoY8Alv+mph++ckauY+5zAM6CBXBpFK3bbVw1bj3QNFi8ISsEQjrzpMnm8qa
-7ZTNWN2gJ8QykbUxkFM5WCwb6iSkLdnV2zE/hUPLfK/ec3FGPJ6Qpyi/nY3urNTU
-6ett8g+W+/8ZdxZt7YWlIVfiHE/dQyqlTvGKCjnODEfdwcMvMOsDTAwTH2AkFf6c
-9sNSVIYScmnc9mu4MqYJN4uNZz0qcBA+QFBw0WtYRg2J4A6CD0YOINCt9aBuY8hf
-p1FxMCiZs8pEju5ujS9J/NRTpWGbCIQxq/9j9TJc5N/Pb9Rp4LOOeGSt+7iRbSnt
-BYV54MzS2zP+qwSZSWeyyhw0yagg1GP9b7RhvaZzzRAX9Ey4IHQAyWnjj0H6qq/K
-dEes4GRcbSlQyxIfgoY7wH3P6IaZraykVLHHzUcn4iQlmxZehh0ppVuqSBn0Mmql
-pK0mtoSETxBJAoyjorIsA1rpr0T8yGmmH5Gkhwn4npJxfJwOOygd3rOhAncC26Bk
-f8O/0RNnF9frt2N0XLDg0/HlLdsyiEu/rEWIIrHY3m29Z4BT/i2rPGwtO9aweVHQ
-JoORgZIdPTf9qT/PCyJSbna4N3AChgAMLzM1nacqKTQ99jhOmA2Z1iXf8umaOrv0
-6sWHwx+W8Ax5JiG4bWknePm02LVmpGxt3o0aecjzOBqrx+VdtJ4wRboq/LskqRcP
-X9sIO2q1r01Lt4nZiwLNO/OFaVUzVW1IddTS9oE6gM9vFZsOjDgyu8jxwZjeYmHy
-fWim4Rvc2w86vCmqx6Ff5TCsEJIqQ9QvIT+CvqMk9f09ftiCeuf93LrRDtcBGrir
-LS8Dd7nFV7bsdjYAGeDY01NzwDzZ+LV46BeGwjDDuiTANeJCGHBnoZaIsnTYGCzc
-U7ZEdlga7zMTGaIDPEHe7e4pyZFP6bubFPy+rxxXQfA/w4YfjgRyR8uXW9Fq3oWX
-Utz65aMUV2owxlsWhIBwrKJoJAYxXaST5V7PDAZ+h661bILMhl+m0XrlWofA6dr8
-Yfr67aDbRjXRD8J5poq/+fP8D4NUdoa4GCo3TXl39af7vEXSkE7CIu/UidZlFr4o
-2tCCUC7P/ZHtl+6durlQ4gBwpFPB9s1aqAA/8l0wDDUki0e1Pft0AZ/00LLkzlU2
-fwxfyYsA2L4M/mjCviPibi7VoUJjWZd28L8ixyKOopUT82gQ6eCs91kusMbZPI2t
-0wxGHhm41ij28xkMI3iK9mF9Of3N5D1XlK5SDN1lJ43dYXSYDay52KL28n/Pu6F5
-UyAkJIbDhHmNTQ2bjYTl2xtxdlKF7SfbJ1LlySrpmnmm9f63vm4jj+xTHbO1pUTM
-PPrxnfgdX7+E9/ZuiKNJoY9XXpPvLxA2aCvGWMjsYnuJ1d1TGrhI0BqMOIgq7G8N
-chdS9TP/eGihJO2vUyqcQWwKhNTFpwDH9/VomTaLglMB8SjPQHMrV/WrCjGPj1ql
-Oc4eVm2oBOkIeae3eaKU8xMKDaUrOEWjT1E7o+mhpK0pfmMg/qpjD75ZuBCMRTNS
-Ihgo2KBFzygE+T2lnbPQGtUkwPFEzeZVTzL/fmOrQ72UGovS8e2NmYy2Lqrwwl8Q
-xouYMWO85xVJhocd/mstl41y+Xl2v1oULEYLoznJDd3IWm4zkUW78KmZ45unIBAy
-zkLoO2OssTsc4n6Qb1/d4KEahgBIE1NyiWl1eh3cZAeBbt4zuMZ/3wOSo2ErK15z
-oxjH/eEti6tP0Fe/FCiBnW3fCs7vN4CkAFISrEJo28J9e0UjBsfEacZv89Lf9ued
-wH/jkdk4q2o068Uf3piLaBgaugIlFcjS9h2Mzwwdbvcs5HT5pRztZDhm8CFMOjEd
-nkAdshTEkJ2UDQPIDWl2LcYfLWY0/dMToMEfefkurd73RaTdkWqDaulBpvzFILzJ
-Kh3is/AyOlnEKYmcafvH0S+dAIH+LVI7tkaJQDNS6uftSF30q9faNHNzbvjbPd5N
-YMOZDARDILRvHrVPAA1NzSnedJiM1iG4gqKC/sC7CyfxX8hW0tvk65KZ6jVEgthg
-nEcrUZxI7YajnNKJJi6LslO8dX4rULEGPMCtwgCA26EANe8uvk0GrH7PLjcaVtOE
-1O3WL3HDy6tdfnFCNL5W8IlFP7x7yVgf5xlwurV6AW1kMokuF20UCaQs15c+/+ob
-ge3Q9w5RsWs/2iyxZ6QyDcMKPpkeyRJUjGqcOmTcFDGwbiShckkTK1vGIHWMk/8I
-oLuAC/yAbNEL3ROmD554AuJDK0PAS2+zND+eB8steJxuBouVaroDzS1QJUg9TFkR
-VaFjYCOjMHsIPZ3WjwzofCQsL3waPOfYIeHtWOULqRWtb1GGQZTdxk2Q/rb4U8Eh
-x3zngQzIynGegWXi+1ZTJAoDNCEPBB65u3JVU8hLKlLCmjnAh5UW0dVSWkDIERU3
-9sPvpaJherJQeUzwnSdMCQrbyhXR63nlJQUILyr/pvKS6cHIC8U2rCO7NEn36qHD
-nYEL/1cFmf+3zb1KauoOHbbTbvIcw6xNGGcGJOhzQL4WF0M3vgvwCQscSmFUXP9i
-gCdstl0viQkjRGkMWoynfVC0MYypNdEsVvLpE/IsjWugtwrKK+4s+gx1C0+tlMf9
-XGo+gfz1haHtDoxckfFG1vDXjxOaxsjsS3xMqJqFMzlph1lMI5d0RK5JASZfrim/
-v4B5bhpBLmE/JViCZbm8wD2a5GsfuutpPZj9KF+A9hWGOm1vm9hC7OAJfBzQ8NgC
-agSDqGt74mtmHs59ueR/oq/JXlUNBZvxnAGaWd8/n3e4nijwiyQ6uhxRSIq/CJ12
-wguxZNCSDBd/Eec+7bBSlbHUsAHf3XdjQ5Qi8eAq7XNgjw3iATBGsDvwVZ6QXin2
-2WCSzSqecCEwYCA5E4WCfn72SkF6Ls+1VWOPQBLCYpB/bvgjAxgsfkVBqpRlTTs1
------END RSA PRIVATE KEY-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt
deleted file mode 100644
index f093a70..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEnDCCAoQCAQEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAwwJbG9jYWxob3N0
-MB4XDTE4MTIwNjA2NTkwM1oXDTE5MTIwNjA2NTkwM1owFDESMBAGA1UEAwwJbG9j
-YWxob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAmfmrFVX0mDZE
-VZ0cI3V0Kzy8PprNcMc8vl8K02v3yAuJaigv+FJxE7jAa31sTHco/SN1kCKf9fiG
-Gf7lhLqTutDCkjXxF7LhGBmC+W0KFJG2Ucd88KPLzQKG7tOV5+4eoxOvQcfO41w/
-daAlrKuao2/hj2tUV2T4Sh7e5co11lt2ndP/eegFMq6hnP7sunZKjX+kETx8H7rL
-exBB8NP7yZnrzEspxsoMQ/GLfguSgrQOcwz0MkujFTBGfOlzdJSm1qIl5Y+ZnnjG
-kHM8bKN3/gO5vov0iqpCNIwTt8L2vFqEy2vuW1yj/A4qP7RLXpmWv+XI0v3Bbyff
-BvYNBfCzJCivPKOKIcPYfDQ/Y8DUrWOOevoCB8LkqsLtVmtgNJB5wDDbtQYLPz6i
-TSm1M9oxtSypRa/GyYDwZQq1wkRguqj1y5VI24SJ/zYrLr0DzzpUx2CuWV/X9BMq
-V87MxLEjcqqT743lvSx/6gAX987EhMvZfQETyHU1qON+P1V1fYUuhVb0kS0itBim
-Aa5zKLCthcIMLchJlje2GpHOSd+/hnDdiDZEwWmGaX0OOV9kfd0gQl0kSSvuVZsG
-nHL2VIWcIUJEp/sW2IklZHS1W1B0M8WjFKCZGS84MyL7CllT759AS1pHF6HVVuH+
-7sNvTOnf5lVy1XmLigocFiB0sc8Uul0CAwEAATANBgkqhkiG9w0BAQUFAAOCAgEA
-uRurFJyKrNVQ/QLKaVDTORukuct7wfw/+FWDKdBEzD6styCVKrXHfSa3ZZS6Wv1f
-XR2PrLuW6oJoGonVvt7xj086Vu7Dt+dB8JZIOn1QgNCNlocsVvEptZ6fKPfqcF6J
-cZDcgXhFxB4dY/qV+TfcOKpF4sMJhqJXMh6xtJWskc6Saj0O7xQD/XnL0PeJbrk0
-l9ZiLWzxkXaYomM5YHolMdwpZSjpm7hHzr8cbNmWQLPl4NHvNrEvnDgLa7MTuLS/
-Zf3Yi/RtJIbA1ew1Kqs4zdA3jd/eTNCuVTxgj8VM1WR8i5li/kVv69wd20fO0nWq
-EWpRIMMTzKGfYSCM4SUTTQXfmvg6o/dzM/p5NCQPyQPnEVGzxxJQ8NetM1dCjidl
-F+ZzjW++DppwIIV8Ntah9tZIvATyCbIJSrX6ntsjnz7C1yZWqgkbbc3sTy9tQTJS
-7Oa1sub8PdTj8gIlGdrRGDoVJ6fy/XQJkf0LuvadL5h7um2iL093Y5W5MS43hI8i
-18qO4udxTXN+Xk+YZHBXvruLhE/QTm2KizPjA+EMU17zSQEybpwqCFshjyGjiJ2i
-UFx5Cllg/QSqxKmSc2vTGCOM5T7+SaD5byg2x+f49pt0tXsFFmTphFNvdlKW9NJ2
-GXACHF0k7kh+q0a5ajb8nupIxkbtyvBEY7/y+XCj9zw=
------END CERTIFICATE-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr b/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr
deleted file mode 100644
index 38ecdb1..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr
+++ /dev/null
@@ -1,26 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIEWTCCAkECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0B
-AQEFAAOCAg8AMIICCgKCAgEAmfmrFVX0mDZEVZ0cI3V0Kzy8PprNcMc8vl8K02v3
-yAuJaigv+FJxE7jAa31sTHco/SN1kCKf9fiGGf7lhLqTutDCkjXxF7LhGBmC+W0K
-FJG2Ucd88KPLzQKG7tOV5+4eoxOvQcfO41w/daAlrKuao2/hj2tUV2T4Sh7e5co1
-1lt2ndP/eegFMq6hnP7sunZKjX+kETx8H7rLexBB8NP7yZnrzEspxsoMQ/GLfguS
-grQOcwz0MkujFTBGfOlzdJSm1qIl5Y+ZnnjGkHM8bKN3/gO5vov0iqpCNIwTt8L2
-vFqEy2vuW1yj/A4qP7RLXpmWv+XI0v3BbyffBvYNBfCzJCivPKOKIcPYfDQ/Y8DU
-rWOOevoCB8LkqsLtVmtgNJB5wDDbtQYLPz6iTSm1M9oxtSypRa/GyYDwZQq1wkRg
-uqj1y5VI24SJ/zYrLr0DzzpUx2CuWV/X9BMqV87MxLEjcqqT743lvSx/6gAX987E
-hMvZfQETyHU1qON+P1V1fYUuhVb0kS0itBimAa5zKLCthcIMLchJlje2GpHOSd+/
-hnDdiDZEwWmGaX0OOV9kfd0gQl0kSSvuVZsGnHL2VIWcIUJEp/sW2IklZHS1W1B0
-M8WjFKCZGS84MyL7CllT759AS1pHF6HVVuH+7sNvTOnf5lVy1XmLigocFiB0sc8U
-ul0CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQCZCPz6ps4cqB0KPFk7aRtE0Ga8
-MvnEbreFJ7UyVknUDz6cqW9Jsx0OpvCPbh6C/iXqBMx5tD1ZQwVRmqhNTwGzg1zN
-27PDtx+7SEa+vc0IM3qNilff2TS0G4LMPpp1K3VOwAb9bQCM2CCqRtEnwmC8rQc3
-ZZYmo5+EEFgzgsZ43k2bOvytEcWhcnviUfYc7PHxiWLxrwEoqQCBT0YWLGqjqR0k
-Zm6O8f+y4U+f25e2h/Wjt+qMERoZq2v/chpcvav0l/zHFTClPg8E/BflQnllys8K
-Z1nOgb2qpB5FID7ighVLggL/iSVQU91XX6+TAATBtNCuAYBp/89UBmBkwgkHRzhb
-eFSSjZtIBpFzDpcx1dKE2RQuySEk9K7aC9BMeh5m2DFVZDUZJi0qXNfex/KuVA5q
-jgX88axjQDtn4BqkPTLR5/SLNk1MIZydiVQewTd2zmmHboJKiozjMWdd/+/79xuJ
-zxPFfx5yIkGvipk0Tn6AdtW/YgxqhocUl/cpq4gYBFxzqJiHTfODVHZhV+svrFy8
-fm/f4DxMa6Fl5hqnoJHM0KVw/OYoGujSV8ER73gxzYSAHpAW7dWJqD1MBy6OU2a2
-uICQutBInoITDDtyH/9Uqkw4PfWrdrcwPEkPG+LrvgRgc2Gd8cFv1bXyJWNRRpMc
-GsAeGqu8EGrQkRmfOQ==
------END CERTIFICATE REQUEST-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.key b/hadoop-ozone/integration-test/src/test/resources/ssl/client.key
deleted file mode 100644
index 0286a3f..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKAIBAAKCAgEAmfmrFVX0mDZEVZ0cI3V0Kzy8PprNcMc8vl8K02v3yAuJaigv
-+FJxE7jAa31sTHco/SN1kCKf9fiGGf7lhLqTutDCkjXxF7LhGBmC+W0KFJG2Ucd8
-8KPLzQKG7tOV5+4eoxOvQcfO41w/daAlrKuao2/hj2tUV2T4Sh7e5co11lt2ndP/
-eegFMq6hnP7sunZKjX+kETx8H7rLexBB8NP7yZnrzEspxsoMQ/GLfguSgrQOcwz0
-MkujFTBGfOlzdJSm1qIl5Y+ZnnjGkHM8bKN3/gO5vov0iqpCNIwTt8L2vFqEy2vu
-W1yj/A4qP7RLXpmWv+XI0v3BbyffBvYNBfCzJCivPKOKIcPYfDQ/Y8DUrWOOevoC
-B8LkqsLtVmtgNJB5wDDbtQYLPz6iTSm1M9oxtSypRa/GyYDwZQq1wkRguqj1y5VI
-24SJ/zYrLr0DzzpUx2CuWV/X9BMqV87MxLEjcqqT743lvSx/6gAX987EhMvZfQET
-yHU1qON+P1V1fYUuhVb0kS0itBimAa5zKLCthcIMLchJlje2GpHOSd+/hnDdiDZE
-wWmGaX0OOV9kfd0gQl0kSSvuVZsGnHL2VIWcIUJEp/sW2IklZHS1W1B0M8WjFKCZ
-GS84MyL7CllT759AS1pHF6HVVuH+7sNvTOnf5lVy1XmLigocFiB0sc8Uul0CAwEA
-AQKCAgADpg/wzH2kUbzizntJN9JN5/2J+j8eCgqddEEca3WOrv9NnbAtUT7OudUN
-dwZm9XfqL7nsdXWW7ZG38ftcXtN7XNEPh+mzpxCAcrJQ2M2hWSaZ34FNboQ40nOC
-G091FIZzVNcVVvfHGXuDfQ0Hf3WFo/QTYva3r3PWxc6AYX9PGhHAgbKPH/lnjw3T
-W5MehAkWO00W/3jtg46o1uTJISzZRSV6TNmrlUQfJA0rKnkJUdz5yvfKbVJrAR7a
-fOm4fIFLmsINI47/W1tRNvnalTEVut7e7hAYbRpuhlc9Roh0RCzbaS5XyeU05t0H
-b21Ny5Pv7jEJFuxLhwVY8+GxH1gPXOJUkpoDS4gE9MOE6lq3oBj1Q+KFX2lGjlxn
-fpOjFfuFTAgmsr7dzYP29T7X4rgixEQ6PKba8lhITq7Emaxer2bYlmyD9UlqVZHb
-GjW9o7GcD5YRnbvxy5XMTbNVQatlOTOGmB+XkyfuJCwiSKT51HuR+YOLgtZASahS
-0vDQduy26s9hWPMc6/+oy3eVBRMBrU+T7M5qkIFsPrDC4nRkZDhpxBpnAJm4yRwo
-Bl+SWMD5DIXEwVuQfB8xBsM2sOSlT8/kVoiTze0X/F8ZGLLAFzUOillvNKu//69C
-tQURH1RhA6AFlQOBVXkCDP7OSDmAYVXTynJL5FRMRPXz5sDHHQKCAQEAyZh/9Mjx
-cFgf4NCKT26iPaoeoPPl6tAjsSEiW+jG8XozbF2uXOLbAgZguLMcrZDxXD+CNE3m
-QnXr8Pi7SBMzhBfntVP9knEzCVwXYodk1fJdrZu41WL0/PspHzOw6+Q0MKSgySJ9
-aEbu67EgBkfZjAlAqDNXl5dwcOKX8KN9vKVe0Uj8OI6PWgyE0pbkelfUP6BWWq/R
-2Ws6MMcHMHfw1Jku4rR+ybjbR+tnqXzC+M36RqjD+igcbjLwzx8Ab5Zo4sbpcufc
-4KvY7S9nYb5h5IffnCsaGMBGeJwHNfzDaFuncwDiAtveryWPBUsdoABrT9MeX6L0
-uzQRlwQPQo1XZwKCAQEAw4c/vlFxWWJHbTwhf/cbqatuLT4PEC88yRvYk3vFDNkY
-dnKyR1AxZwMJa0V9SKXLftEzmE3uFzVL8IPx+lRkZmLrab9lJiAc8z9nIFKSDCcc
-MP2opo14fPK0ID7AMe96YWomHENhmcZdzaOYMMoRC9J312PQ5+ChodEl8vlUY/PU
-WB/vPAtYfJEdZPJwnIhXSdae0uwghyzqlJ963Dxmh4dxJLh24PYLBVdhsgP21z8H
-Du4KM4jtkHHlAz7GTdkt1H7fmFodsowFmc6SNu+22iiZ1XQrP4V++Umu24Gttw7I
-f2rvZHDsQE9Qk6K84g7a/LAtvhO3U9H+uYmMgjJZmwKCAQBDL/IlUPs2qAgn0xjl
-lEe6KYJ/vgm4kpnypMpgu1nijQmqaiZ8ipbXO+zsYbWDGzV1uyzX5caCC+8QprU0
-NkILGjR9OHrgXZ3W1rxseBdhPp9+BtI5O/vOfJ6d6Ypjc/D47UUxA6+sG0fxgVzc
-+wFELKlB5aqhuTUeSka9Sp/TSYIqWhrFdq3MIzP5Q5TuOWthsTxWiRZ1UclZDFwX
-CUJYeJ0prWI8NMHQXGJ2GECaz3tEJWb7bnbbO1sKjJiGmChovEZ9p0z0DBIGKrBX
-4S2bDrW1xJ+z9BEIjWfR1GYD19gc+gRZU5IJ6YibCQfclYcuWXxb/2F1KstZ+15i
-ndytAoIBAGvqWtEkzCWkK33roSWqcfccKcwIo3GwUKFCoC8OMbycmXbOaP0ZEpsj
-PvCYwsP01bKhrhNSd6URgl81w7kBGQS1de7Adwgq0y+h/74ENJ1GfLXBWnLKRATa
-Q3ZEi/lDjkzztCMHQXgI1r7nmtjavbvDpucXLTa9cRgJgiNvXxdnfPxCa9y8+lKO
-GSYc9PBAA8U6EiChuHZC4Rm0R7AEGiaVJ2o38UzKH10MVFxW+cbk/3VLBhBZc5y0
-b8xxuis/QZ81gxzoJ9nilDjGnUZ62XXg0L7RxgjiGilmdH6sPP96xkgk8gmClbIM
-1JEXUZ6GynCKoER3R0iY7zjh5M37Eh8CggEBAKIWY+cRumpBZAlWRIvp0DGToWbM
-2GhuFi3Pd83DiifBGsKDNbqqnPQzxy0uqmGp8ollHFXDDt9ZlhWE0jcIa4Pb8ymv
-toR36hGtGq1g0TggTy+OJneuHp27pzqSd/8VvIrxQEoag5pzLMPCoJniRTi78Nhg
-60OkMJz0ycnrP79LyCK0OjJetoDMZLSvEy9XE7oV3L45l1rbFlcha6RFaGKa9ApW
-Hl7E0pcbSWrUstTw8ywH3Dj3qgViDam+DuiDe3BaewCQlElVBHQyxbRVWID6m5jI
-eR4RgzIebd9g5Pa7Q/GAt2qAREWCYjLvUmNEIGbXtKY/w0WCCqlgOyUPPC8=
------END RSA PRIVATE KEY-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem b/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem
deleted file mode 100644
index 508e465..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCZ+asVVfSYNkRV
-nRwjdXQrPLw+ms1wxzy+XwrTa/fIC4lqKC/4UnETuMBrfWxMdyj9I3WQIp/1+IYZ
-/uWEupO60MKSNfEXsuEYGYL5bQoUkbZRx3zwo8vNAobu05Xn7h6jE69Bx87jXD91
-oCWsq5qjb+GPa1RXZPhKHt7lyjXWW3ad0/956AUyrqGc/uy6dkqNf6QRPHwfust7
-EEHw0/vJmevMSynGygxD8Yt+C5KCtA5zDPQyS6MVMEZ86XN0lKbWoiXlj5meeMaQ
-czxso3f+A7m+i/SKqkI0jBO3wva8WoTLa+5bXKP8Dio/tEtemZa/5cjS/cFvJ98G
-9g0F8LMkKK88o4ohw9h8ND9jwNStY456+gIHwuSqwu1Wa2A0kHnAMNu1Bgs/PqJN
-KbUz2jG1LKlFr8bJgPBlCrXCRGC6qPXLlUjbhIn/NisuvQPPOlTHYK5ZX9f0EypX
-zszEsSNyqpPvjeW9LH/qABf3zsSEy9l9ARPIdTWo434/VXV9hS6FVvSRLSK0GKYB
-rnMosK2FwgwtyEmWN7Yakc5J37+GcN2INkTBaYZpfQ45X2R93SBCXSRJK+5Vmwac
-cvZUhZwhQkSn+xbYiSVkdLVbUHQzxaMUoJkZLzgzIvsKWVPvn0BLWkcXodVW4f7u
-w29M6d/mVXLVeYuKChwWIHSxzxS6XQIDAQABAoICAAOmD/DMfaRRvOLOe0k30k3n
-/Yn6Px4KCp10QRxrdY6u/02dsC1RPs651Q13Bmb1d+ovuex1dZbtkbfx+1xe03tc
-0Q+H6bOnEIByslDYzaFZJpnfgU1uhDjSc4IbT3UUhnNU1xVW98cZe4N9DQd/dYWj
-9BNi9revc9bFzoBhf08aEcCBso8f+WePDdNbkx6ECRY7TRb/eO2DjqjW5MkhLNlF
-JXpM2auVRB8kDSsqeQlR3PnK98ptUmsBHtp86bh8gUuawg0jjv9bW1E2+dqVMRW6
-3t7uEBhtGm6GVz1GiHRELNtpLlfJ5TTm3QdvbU3Lk+/uMQkW7EuHBVjz4bEfWA9c
-4lSSmgNLiAT0w4TqWregGPVD4oVfaUaOXGd+k6MV+4VMCCayvt3Ng/b1PtfiuCLE
-RDo8ptryWEhOrsSZrF6vZtiWbIP1SWpVkdsaNb2jsZwPlhGdu/HLlcxNs1VBq2U5
-M4aYH5eTJ+4kLCJIpPnUe5H5g4uC1kBJqFLS8NB27Lbqz2FY8xzr/6jLd5UFEwGt
-T5PszmqQgWw+sMLidGRkOGnEGmcAmbjJHCgGX5JYwPkMhcTBW5B8HzEGwzaw5KVP
-z+RWiJPN7Rf8XxkYssAXNQ6KWW80q7//r0K1BREfVGEDoAWVA4FVeQIM/s5IOYBh
-VdPKckvkVExE9fPmwMcdAoIBAQDJmH/0yPFwWB/g0IpPbqI9qh6g8+Xq0COxISJb
-6MbxejNsXa5c4tsCBmC4sxytkPFcP4I0TeZCdevw+LtIEzOEF+e1U/2ScTMJXBdi
-h2TV8l2tm7jVYvT8+ykfM7Dr5DQwpKDJIn1oRu7rsSAGR9mMCUCoM1eXl3Bw4pfw
-o328pV7RSPw4jo9aDITSluR6V9Q/oFZar9HZazowxwcwd/DUmS7itH7JuNtH62ep
-fML4zfpGqMP6KBxuMvDPHwBvlmjixuly59zgq9jtL2dhvmHkh9+cKxoYwEZ4nAc1
-/MNoW6dzAOIC296vJY8FSx2gAGtP0x5fovS7NBGXBA9CjVdnAoIBAQDDhz++UXFZ
-YkdtPCF/9xupq24tPg8QLzzJG9iTe8UM2Rh2crJHUDFnAwlrRX1Ipct+0TOYTe4X
-NUvwg/H6VGRmYutpv2UmIBzzP2cgUpIMJxww/aimjXh88rQgPsAx73phaiYcQ2GZ
-xl3No5gwyhEL0nfXY9Dn4KGh0SXy+VRj89RYH+88C1h8kR1k8nCciFdJ1p7S7CCH
-LOqUn3rcPGaHh3EkuHbg9gsFV2GyA/bXPwcO7goziO2QceUDPsZN2S3Uft+YWh2y
-jAWZzpI277baKJnVdCs/hX75Sa7bga23Dsh/au9kcOxAT1CTorziDtr8sC2+E7dT
-0f65iYyCMlmbAoIBAEMv8iVQ+zaoCCfTGOWUR7opgn++CbiSmfKkymC7WeKNCapq
-JnyKltc77OxhtYMbNXW7LNflxoIL7xCmtTQ2QgsaNH04euBdndbWvGx4F2E+n34G
-0jk7+858np3pimNz8PjtRTEDr6wbR/GBXNz7AUQsqUHlqqG5NR5KRr1Kn9NJgipa
-GsV2rcwjM/lDlO45a2GxPFaJFnVRyVkMXBcJQlh4nSmtYjw0wdBcYnYYQJrPe0Ql
-Zvtudts7WwqMmIaYKGi8Rn2nTPQMEgYqsFfhLZsOtbXEn7P0EQiNZ9HUZgPX2Bz6
-BFlTkgnpiJsJB9yVhy5ZfFv/YXUqy1n7XmKd3K0CggEAa+pa0STMJaQrfeuhJapx
-9xwpzAijcbBQoUKgLw4xvJyZds5o/RkSmyM+8JjCw/TVsqGuE1J3pRGCXzXDuQEZ
-BLV17sB3CCrTL6H/vgQ0nUZ8tcFacspEBNpDdkSL+UOOTPO0IwdBeAjWvuea2Nq9
-u8Om5xctNr1xGAmCI29fF2d8/EJr3Lz6Uo4ZJhz08EADxToSIKG4dkLhGbRHsAQa
-JpUnajfxTMofXQxUXFb5xuT/dUsGEFlznLRvzHG6Kz9BnzWDHOgn2eKUOMadRnrZ
-deDQvtHGCOIaKWZ0fqw8/3rGSCTyCYKVsgzUkRdRnobKcIqgRHdHSJjvOOHkzfsS
-HwKCAQEAohZj5xG6akFkCVZEi+nQMZOhZszYaG4WLc93zcOKJ8EawoM1uqqc9DPH
-LS6qYanyiWUcVcMO31mWFYTSNwhrg9vzKa+2hHfqEa0arWDROCBPL44md64enbun
-OpJ3/xW8ivFAShqDmnMsw8KgmeJFOLvw2GDrQ6QwnPTJyes/v0vIIrQ6Ml62gMxk
-tK8TL1cTuhXcvjmXWtsWVyFrpEVoYpr0ClYeXsTSlxtJatSy1PDzLAfcOPeqBWIN
-qb4O6IN7cFp7AJCUSVUEdDLFtFVYgPqbmMh5HhGDMh5t32Dk9rtD8YC3aoBERYJi
-Mu9SY0QgZte0pj/DRYIKqWA7JQ88Lw==
------END PRIVATE KEY-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh b/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh
deleted file mode 100755
index 5eb5ff2..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-# Changes these CN's to match your hosts in your environment if needed.
-SERVER_CN=localhost
-# Used when doing mutual TLS
-CLIENT_CN=localhost
-
-echo Generate CA key:
-openssl genrsa -passout pass:1111 -des3 -out ca.key 4096
-echo Generate CA certificate:
-# Generates ca.crt which is the trustCertCollectionFile
-openssl req -passin pass:1111 -new -x509 -days 365 -key ca.key -out ca.crt -subj "/CN=${SERVER_CN}"
-echo Generate server key:
-openssl genrsa -passout pass:1111 -des3 -out server.key 4096
-echo Generate server signing request:
-openssl req -passin pass:1111 -new -key server.key -out server.csr -subj "/CN=${SERVER_CN}"
-echo Self-signed server certificate:
-# Generates server.crt which is the certChainFile for the server
-openssl x509 -req -passin pass:1111 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
-echo Remove passphrase from server key:
-openssl rsa -passin pass:1111 -in server.key -out server.key
-echo Generate client key
-openssl genrsa -passout pass:1111 -des3 -out client.key 4096
-echo Generate client signing request:
-openssl req -passin pass:1111 -new -key client.key -out client.csr -subj "/CN=${CLIENT_CN}"
-echo Self-signed client certificate:
-# Generates client.crt which is the clientCertChainFile for the client (need for mutual TLS only)
-openssl x509 -passin pass:1111 -req -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out client.crt
-echo Remove passphrase from client key:
-openssl rsa -passin pass:1111 -in client.key -out client.key
-echo Converting the private keys to X.509:
-# Generates client.pem which is the clientPrivateKeyFile for the Client (needed for mutual TLS only)
-openssl pkcs8 -topk8 -nocrypt -in client.key -out client.pem
-# Generates server.pem which is the privateKeyFile for the Server
-openssl pkcs8 -topk8 -nocrypt -in server.key -out server.pem
-
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt
deleted file mode 100644
index 88757ac..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEnDCCAoQCAQEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAwwJbG9jYWxob3N0
-MB4XDTE4MTIwNjA2NTgzNVoXDTE5MTIwNjA2NTgzNVowFDESMBAGA1UEAwwJbG9j
-YWxob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxXU3AclIWZJm
-iqNfUaurfSrgUFbbCSmhce1R0lVIKqscCJUpE1XNTWVXVCY7FWt9S1bJwFIzOYqN
-zEqaGDIiemxfHyN87mz1pKZsIS8tL6+TBxcVsDQ5tMBXT8+jnOTvffCYeSDPL4tr
-iTqmXvT3Qhc1YbjK9MbMIf+sXRWKzg9OADAVvO7GQCEiLtlfBRUIwXHAyRMtclHR
-x8r6VrRZH3tHTr2ruixpFIH5/Ak+s8Wq+nNsqWGiMj15wlDG+pPiRzTcnJNoH4zs
-R1D2trv2Qd87dxf/j0e441XyT0PEuqMmrXUKWy3JvF849EO4yNrns1LTYgnHawgu
-9ahStUqaPYnE8dcR8GnDZoJHQ8BtQt3/X8F5LnoVxZPn89jdKPBY4foQ/XE7kwlO
-U7JE6FATwsdUPwq2WgmPDqlVe2cvvCxyp6ZBQrM3EpLSew57oJZzDU5T8jqLkwqV
-7pJyxYtz2sETsbeq7SJhS66pkP64/A4L03/gVh+OYlGJJqbX1GwYLbzexZAv166+
-eVK1IbFDhYCGdqinJfCfgCrAPnnRhuSAWvXLvaYJCHQIiu10umebLoaLjBjg/z2v
-tXyl+sX7Lx127JgDXJUsiWSKVKCbGVd+d5e9cxdngWhnq0/cYgUKSwKZcA4eZ4CX
-yA+8O4bUdhPZNfbGvuHCSdvMv6cgmT0CAwEAATANBgkqhkiG9w0BAQUFAAOCAgEA
-J0VSeWd8nScizyFr74hCFcwRtdtTaPtkHaHTPpBVrGl7Wygsajao5LS3dBZt7h4S
-uq4fVH2vPjjbPrdWbQZ0wmCzqaiGy75ZAglwIReosazXCBaaxYpWDZxOcgl/CCdr
-1A3Ls84QzDGYGsVNlEhvyEkjWOw1urAqC49aKZdSle4Z0pagfHn9Bg0zjLyHTvS2
-BxWDUCEJmaNf7NwO2PFL5lAaA62rQyWK7VQkOsFPKjb9lY2/+R6AZnB/dLyWgFaY
-wqbdzjjFkQRdjJPnf2azfh7Td+Z02H/b5h+B7KK1VDHv4R6INSlaci8SoUital8B
-UtAhKjzbI+4MCx12zPPf5sp/g9jxKopnpNsKBrTdwe/h6iJ9mpOhAMgpXAxMKftA
-EHoI1bnyRVUcbQPUFGQYecT7bRqANhZLB5ysUenk09jNQRFcWXl9MJhLjq8LvO/w
-DXvQKVLEDQs9idJpuf9wjAIow0QxLE7zsAY6ZKFXiYas60cKcH6BtLc0eGxvgF5a
-42b84B28nmjVoZUJzmeKPSpxMd9o/nTXFud3jbUBdXfaoNvqZIdNmKPvytbcKTil
-4QVjcNhQEo76YWEfkFx5ZmyvxGWwwPcOmeT87BhK7ma6s1AMi1m6/rTpsizbPiuK
-ZXnEuIZagK3AHUEEAWi3ZeGvAqGPZW/jUPL4xOO296c=
------END CERTIFICATE-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr b/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr
deleted file mode 100644
index 3c1c5d6..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr
+++ /dev/null
@@ -1,26 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIEWTCCAkECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0B
-AQEFAAOCAg8AMIICCgKCAgEAxXU3AclIWZJmiqNfUaurfSrgUFbbCSmhce1R0lVI
-KqscCJUpE1XNTWVXVCY7FWt9S1bJwFIzOYqNzEqaGDIiemxfHyN87mz1pKZsIS8t
-L6+TBxcVsDQ5tMBXT8+jnOTvffCYeSDPL4triTqmXvT3Qhc1YbjK9MbMIf+sXRWK
-zg9OADAVvO7GQCEiLtlfBRUIwXHAyRMtclHRx8r6VrRZH3tHTr2ruixpFIH5/Ak+
-s8Wq+nNsqWGiMj15wlDG+pPiRzTcnJNoH4zsR1D2trv2Qd87dxf/j0e441XyT0PE
-uqMmrXUKWy3JvF849EO4yNrns1LTYgnHawgu9ahStUqaPYnE8dcR8GnDZoJHQ8Bt
-Qt3/X8F5LnoVxZPn89jdKPBY4foQ/XE7kwlOU7JE6FATwsdUPwq2WgmPDqlVe2cv
-vCxyp6ZBQrM3EpLSew57oJZzDU5T8jqLkwqV7pJyxYtz2sETsbeq7SJhS66pkP64
-/A4L03/gVh+OYlGJJqbX1GwYLbzexZAv166+eVK1IbFDhYCGdqinJfCfgCrAPnnR
-huSAWvXLvaYJCHQIiu10umebLoaLjBjg/z2vtXyl+sX7Lx127JgDXJUsiWSKVKCb
-GVd+d5e9cxdngWhnq0/cYgUKSwKZcA4eZ4CXyA+8O4bUdhPZNfbGvuHCSdvMv6cg
-mT0CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQB5EuNt1A6Q+AO80t+08wEeV6/a
-sJlLZKkEww4yMajMFo/i8zr70jW/9Garc74pzhF054XpLrTwMTLlPFfMaf3wDtMy
-8v6Oh3jaroiYVLn14KbkxM2UCkwY0rh1eu0e9HVpM2763Ycc27Bgt5DQJ0h8tU/P
-S+knwmEACIjimQIrqpgB2lPYU68cvCmifLjyzJ93mGdgOllKoXshv1uhGFNACBMj
-xYt+bWSP+uZx/aFU0tPcXdo4b6QmlE43iLcDFduf8nSNcSldvXquXUjWvRVFMqUr
-7gzmvCV9uekJHSW8ftORB3O9Q8OmBMQ0WLHexE/zcXmXNILHBMIKvYe7K5CCOU7h
-6q5aBmsZkdPwVeY8FGtLShj3ljRKyxdCddN3zzouRmKWHId5QSDD4fZhyCtH4DvP
-E0GLyJkZnHvQ8/HCCLEltNSjL9tXRj5aO/RqCqNAHUmhc9LcItS+wUJPVZBEo6Np
-+4pSMI2Vm97wD9qV1soGz/KwpFpj69sn8klQWVAdTKJ6bCF5028sh+UT9sVynq33
-Cp7Zbg/soNAYWGVNffcz/3vCumMTRJGTDkAap0xcHlhqGo8t4OoDSRVTWoci35or
-aV17gDiE5Q0s0IP/lnkoPAp45CB+GIhjuPqXPpBUOZ+4YM/furhDUoYoXaRo15Ru
-75qeGYFfwO9cnTZT5Q==
------END CERTIFICATE REQUEST-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.key b/hadoop-ozone/integration-test/src/test/resources/ssl/server.key
deleted file mode 100644
index de16c35..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJJwIBAAKCAgEAxXU3AclIWZJmiqNfUaurfSrgUFbbCSmhce1R0lVIKqscCJUp
-E1XNTWVXVCY7FWt9S1bJwFIzOYqNzEqaGDIiemxfHyN87mz1pKZsIS8tL6+TBxcV
-sDQ5tMBXT8+jnOTvffCYeSDPL4triTqmXvT3Qhc1YbjK9MbMIf+sXRWKzg9OADAV
-vO7GQCEiLtlfBRUIwXHAyRMtclHRx8r6VrRZH3tHTr2ruixpFIH5/Ak+s8Wq+nNs
-qWGiMj15wlDG+pPiRzTcnJNoH4zsR1D2trv2Qd87dxf/j0e441XyT0PEuqMmrXUK
-Wy3JvF849EO4yNrns1LTYgnHawgu9ahStUqaPYnE8dcR8GnDZoJHQ8BtQt3/X8F5
-LnoVxZPn89jdKPBY4foQ/XE7kwlOU7JE6FATwsdUPwq2WgmPDqlVe2cvvCxyp6ZB
-QrM3EpLSew57oJZzDU5T8jqLkwqV7pJyxYtz2sETsbeq7SJhS66pkP64/A4L03/g
-Vh+OYlGJJqbX1GwYLbzexZAv166+eVK1IbFDhYCGdqinJfCfgCrAPnnRhuSAWvXL
-vaYJCHQIiu10umebLoaLjBjg/z2vtXyl+sX7Lx127JgDXJUsiWSKVKCbGVd+d5e9
-cxdngWhnq0/cYgUKSwKZcA4eZ4CXyA+8O4bUdhPZNfbGvuHCSdvMv6cgmT0CAwEA
-AQKCAgBx2vWdzQ8vvs/rruo+cGtQoBF5oato7B1QUNQ2IMCdAc8HT+LAaGAZ+Y5S
-Uj0NS86SS3fHsl4hFrhOjNGvk/D3gFeU3+Sgoik+CEwfElHOxkFT/EagNGz1wVZX
-CdZAmG1TxBBW/8kXlB+soCngZQXRkQpRz7kPTTXVgNRFVC+WQ5LpXtCaAWBFCBXq
-x6IXjxpeWJYeGzXATldVCcAxkIo3MeFbENjdX9AzaALaBgamqBq/kSCdxlM8/t+f
-YO5q/CykfGGc0w5d6ucu9AteMKF9OBfUwvy0BFoik6NFe0ELkRmzOOKA0rUZLhrs
-FcSN5FNnviFuzU60c6KIOcd/C4ZFYvFS8TZu3DX3KnwVeyOhvXXNOnHfYL1ELwmj
-8K9BIkixBENkB64mq3lvf84SbflcgVsD1V+ALwF84YY+Zgq9pFrv4PxtS//1pxlD
-P7/V+oS+G5RvcvcwPjug0z/EXaeyIehvJs1C/829clMvGc7+WuzxIZX26yFGESOC
-z29aRUlNRDibQ0qkgS4+mCNp2xLj3PwgTBDxIxWyynOOCzQKp01vlXU5lxW1IMP9
-8JAZ75jNUJseMiCG9OnPrlRwHv9vomUihUZjwiJXLlH4DIibU3T0fGK4PAykESCM
-HyUK2bx93UtJl/uTIMtoekz9pyeM+3JqHPQKfhjCdr1kZoXZAQKCAQEA+QNSx8Y5
-wnbXZSD9vdWA4n5RC2DIHSENNedx7S1ubTg++cR6Fs10dKHrhIPEybh7zmyR1uwF
-/Yz8FixNXuzuCf0E3dwSxq242Ja9pKoB5rc0uKC8F+58uJl3EBeMX81KjA1K+7Xj
-L2GlJdeCZm0y56JHmufR1nWd7K0J1DVNVHU5MOQt1ZQb1qenXGI67MxwgUXnzYdS
-EtxOeinPUMsaZGM2ZwLfCVt+heOCcKJht9WAu+kstm9rD5ArEy8EB0yYBoEp1WSH
-KCN0K2sAc0brHrCtfXtfImqAZUzD1+FCuuoukvdsbL03e7PeTlSuSjlKgWjxY/Sc
-ND6zy/iRayAMwQKCAQEAyv+RWDhirgODp7ufmBPFlDzF5dPUek/kMGk754hJ9S06
-eLnFAsR23jddZGSzH2nOIaSfJ/5mWRqJMFIXXqlU/juJWhyeBr7DPvx4i0wxJdks
-2Z35LQkCeZqmaNwrxIy71lrZ7qlElOquj864mZiZp6WIrgQF68HIIzDqZOM2VGZJ
-RzZTmw21aNjzWBl7LdEFKMQhMJGmknH2YN7INk8pgyQz2u5MUoCYeMh7hVm11Kdz
-q7L8Ixc/ZRRFTgVD2wXF76tyD2OjZJt9iCHCIvyPh59upt/Ie/vYQHULHjKsPYy6
-ijHKDWyg/oaDBdY/JYYwU+ThLRnvVwsJpO3Jf68ffQKCAQAgTs0TvGVMFM03gsNJ
-OQVC3a64MjNkjCBBqSi/5BAavZx2HYbVpIyCgWukQtBqd7QggTee0fqo/fzLB652
-LXlo9FoISwBopKuB9nTeg2xBue1uMvSUik3GSasH/HYrC+CrMSJUbDHwuNOLiF2T
-2oErSoPN1lwEXjhCN+U5kjzZQ2hLLp+/wTqnbBMrylbo2FGUhDRiFzeP2OOZuAj8
-640eDz1Eujuj5CoTRwRqhrb0+g980fEKLoSOfV8JWyVDqS1kUqfR1vwuOgNdisGB
-M2dYEQZBbJtYRMcp3X7faIuW4sFuMgnwRdCIDTs/oH8IhExlY+9Fz7vgj24Wfcao
-Rn1BAoIBAD5kZJjX48SWQe3Y5gmI8i5Iq46jF+hsC7excH8OTaT0vMcEWgAqwFo2
-bBcCOGfMTlXa0iwpre1vEYFvic1HgF8Pj3zJ1Ow/z6TZVneB+I0offd47XAhF8im
-dsU9/pnPo6ATlm4bSn/2zaZXpDdZRsjXQPYzOFqo2cmvLCvMBhPUyGsB0JqUkRBj
-tg967Xg8iThpZ8YUzjyumEpXzvOaSykKhIGiwoSND8/31rc6xn9Q5GV+gq6KY6q+
-mzqKtbtov9iVOl5ugnbWr7OapJ+6PqcxooHZwDYTRvkwwDUM4BGe4mq9ONv9alIw
-p66wlgIDh3ERpQAGu6BmPRWbHFaJTcUCggEASrbK3C2se9fB8WGxfQJ6BLKrpuQF
-GQTWmNnj/Ie6Y2LZsM6cE9tNxVXJO2RZMmJSrSBOTmCf8CuUkZsI1aH1YncAvpWv
-C5aelEEGfX5cuRTVGHgMsyMxseghES+eKbUqgEbTYYv7363aSNsAkd/iPScGKVX1
-NQXe3yxXHuCiDfbwasZoCWn6fP9wPtQITVC3scYk5OMne6NwNXePlnBufg69UdTC
-2ygG93nOAgJ0AI2Q0Nx4bagIpEOGzOGEGwoYuSmq1LSx/Bno4vO2BlaLJVH6Zwhg
-m7aD2YwJSIotcF0zzfT7bbBIYxZflQYaYfE8b2sEwy3rYQLKD4wdZ0Qr3w==
------END RSA PRIVATE KEY-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem b/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem
deleted file mode 100644
index e3ca684..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDFdTcByUhZkmaK
-o19Rq6t9KuBQVtsJKaFx7VHSVUgqqxwIlSkTVc1NZVdUJjsVa31LVsnAUjM5io3M
-SpoYMiJ6bF8fI3zubPWkpmwhLy0vr5MHFxWwNDm0wFdPz6Oc5O998Jh5IM8vi2uJ
-OqZe9PdCFzVhuMr0xswh/6xdFYrOD04AMBW87sZAISIu2V8FFQjBccDJEy1yUdHH
-yvpWtFkfe0dOvau6LGkUgfn8CT6zxar6c2ypYaIyPXnCUMb6k+JHNNyck2gfjOxH
-UPa2u/ZB3zt3F/+PR7jjVfJPQ8S6oyatdQpbLcm8Xzj0Q7jI2uezUtNiCcdrCC71
-qFK1Spo9icTx1xHwacNmgkdDwG1C3f9fwXkuehXFk+fz2N0o8Fjh+hD9cTuTCU5T
-skToUBPCx1Q/CrZaCY8OqVV7Zy+8LHKnpkFCszcSktJ7DnuglnMNTlPyOouTCpXu
-knLFi3PawROxt6rtImFLrqmQ/rj8DgvTf+BWH45iUYkmptfUbBgtvN7FkC/Xrr55
-UrUhsUOFgIZ2qKcl8J+AKsA+edGG5IBa9cu9pgkIdAiK7XS6Z5suhouMGOD/Pa+1
-fKX6xfsvHXbsmANclSyJZIpUoJsZV353l71zF2eBaGerT9xiBQpLAplwDh5ngJfI
-D7w7htR2E9k19sa+4cJJ28y/pyCZPQIDAQABAoICAHHa9Z3NDy++z+uu6j5wa1Cg
-EXmhq2jsHVBQ1DYgwJ0BzwdP4sBoYBn5jlJSPQ1LzpJLd8eyXiEWuE6M0a+T8PeA
-V5Tf5KCiKT4ITB8SUc7GQVP8RqA0bPXBVlcJ1kCYbVPEEFb/yReUH6ygKeBlBdGR
-ClHPuQ9NNdWA1EVUL5ZDkule0JoBYEUIFerHohePGl5Ylh4bNcBOV1UJwDGQijcx
-4VsQ2N1f0DNoAtoGBqaoGr+RIJ3GUzz+359g7mr8LKR8YZzTDl3q5y70C14woX04
-F9TC/LQEWiKTo0V7QQuRGbM44oDStRkuGuwVxI3kU2e+IW7NTrRzoog5x38LhkVi
-8VLxNm7cNfcqfBV7I6G9dc06cd9gvUQvCaPwr0EiSLEEQ2QHriareW9/zhJt+VyB
-WwPVX4AvAXzhhj5mCr2kWu/g/G1L//WnGUM/v9X6hL4blG9y9zA+O6DTP8Rdp7Ih
-6G8mzUL/zb1yUy8Zzv5a7PEhlfbrIUYRI4LPb1pFSU1EOJtDSqSBLj6YI2nbEuPc
-/CBMEPEjFbLKc44LNAqnTW+VdTmXFbUgw/3wkBnvmM1Qmx4yIIb06c+uVHAe/2+i
-ZSKFRmPCIlcuUfgMiJtTdPR8Yrg8DKQRIIwfJQrZvH3dS0mX+5Mgy2h6TP2nJ4z7
-cmoc9Ap+GMJ2vWRmhdkBAoIBAQD5A1LHxjnCdtdlIP291YDiflELYMgdIQ0153Ht
-LW5tOD75xHoWzXR0oeuEg8TJuHvObJHW7AX9jPwWLE1e7O4J/QTd3BLGrbjYlr2k
-qgHmtzS4oLwX7ny4mXcQF4xfzUqMDUr7teMvYaUl14JmbTLnokea59HWdZ3srQnU
-NU1UdTkw5C3VlBvWp6dcYjrszHCBRefNh1IS3E56Kc9QyxpkYzZnAt8JW36F44Jw
-omG31YC76Sy2b2sPkCsTLwQHTJgGgSnVZIcoI3QrawBzRusesK19e18iaoBlTMPX
-4UK66i6S92xsvTd7s95OVK5KOUqBaPFj9Jw0PrPL+JFrIAzBAoIBAQDK/5FYOGKu
-A4Onu5+YE8WUPMXl09R6T+QwaTvniEn1LTp4ucUCxHbeN11kZLMfac4hpJ8n/mZZ
-GokwUhdeqVT+O4laHJ4GvsM+/HiLTDEl2SzZnfktCQJ5mqZo3CvEjLvWWtnuqUSU
-6q6PzriZmJmnpYiuBAXrwcgjMOpk4zZUZklHNlObDbVo2PNYGXst0QUoxCEwkaaS
-cfZg3sg2TymDJDPa7kxSgJh4yHuFWbXUp3OrsvwjFz9lFEVOBUPbBcXvq3IPY6Nk
-m32IIcIi/I+Hn26m38h7+9hAdQseMqw9jLqKMcoNbKD+hoMF1j8lhjBT5OEtGe9X
-Cwmk7cl/rx99AoIBACBOzRO8ZUwUzTeCw0k5BULdrrgyM2SMIEGpKL/kEBq9nHYd
-htWkjIKBa6RC0Gp3tCCBN57R+qj9/MsHrnYteWj0WghLAGikq4H2dN6DbEG57W4y
-9JSKTcZJqwf8disL4KsxIlRsMfC404uIXZPagStKg83WXAReOEI35TmSPNlDaEsu
-n7/BOqdsEyvKVujYUZSENGIXN4/Y45m4CPzrjR4PPUS6O6PkKhNHBGqGtvT6D3zR
-8QouhI59XwlbJUOpLWRSp9HW/C46A12KwYEzZ1gRBkFsm1hExyndft9oi5biwW4y
-CfBF0IgNOz+gfwiETGVj70XPu+CPbhZ9xqhGfUECggEAPmRkmNfjxJZB7djmCYjy
-LkirjqMX6GwLt7Fwfw5NpPS8xwRaACrAWjZsFwI4Z8xOVdrSLCmt7W8RgW+JzUeA
-Xw+PfMnU7D/PpNlWd4H4jSh993jtcCEXyKZ2xT3+mc+joBOWbhtKf/bNplekN1lG
-yNdA9jM4WqjZya8sK8wGE9TIawHQmpSREGO2D3rteDyJOGlnxhTOPK6YSlfO85pL
-KQqEgaLChI0Pz/fWtzrGf1DkZX6Cropjqr6bOoq1u2i/2JU6Xm6Cdtavs5qkn7o+
-pzGigdnANhNG+TDANQzgEZ7iar042/1qUjCnrrCWAgOHcRGlAAa7oGY9FZscVolN
-xQKCAQBKtsrcLax718HxYbF9AnoEsqum5AUZBNaY2eP8h7pjYtmwzpwT203FVck7
-ZFkyYlKtIE5OYJ/wK5SRmwjVofVidwC+la8Llp6UQQZ9fly5FNUYeAyzIzGx6CER
-L54ptSqARtNhi/vfrdpI2wCR3+I9JwYpVfU1Bd7fLFce4KIN9vBqxmgJafp8/3A+
-1AhNULexxiTk4yd7o3A1d4+WcG5+Dr1R1MLbKAb3ec4CAnQAjZDQ3HhtqAikQ4bM
-4YQbChi5KarUtLH8Geji87YGVoslUfpnCGCbtoPZjAlIii1wXTPN9PttsEhjFl+V
-Bhph8TxvawTDLethAsoPjB1nRCvf
------END PRIVATE KEY-----
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
deleted file mode 100644
index 09697dc..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep
deleted file mode 100644
index 09697dc..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
deleted file mode 100644
index 653209b..0000000
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-ozone-manager</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Manager Server</description>
-  <name>Apache Hadoop Ozone Manager Server</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-docs</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.bouncycastle</groupId>
-      <artifactId>bcprov-jdk15on</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.28.2</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.jmockit</groupId>
-      <artifactId>jmockit</artifactId>
-      <version>1.24</version>
-      <scope>test</scope>
-    </dependency>
-
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-common-html</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>unpack</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-server-framework</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}
-                  </outputDirectory>
-                  <includes>webapps/static/**/*.*</includes>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-docs</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}/webapps/ozoneManager</outputDirectory>
-                  <includes>docs/**/*.*</includes>
-                </artifactItem>
-              </artifactItems>
-              <overWriteSnapshots>true</overWriteSnapshots>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-    <testResources>
-      <testResource>
-        <directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
-      </testResource>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-      </testResource>
-    </testResources>
-  </build>
-</project>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
deleted file mode 100644
index 595ea43..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * BucketManager handles all the bucket level operations.
- */
-public interface BucketManager extends IOzoneAcl {
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - OmBucketInfo for creating bucket.
-   */
-  void createBucket(OmBucketInfo bucketInfo) throws IOException;
-
-
-  /**
-   * Returns Bucket Information.
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(OmBucketArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link OmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   Required parameter volume name determines buckets in which volume
-   *   to return.
-   * @param startBucket
-   *   Optional start bucket name parameter indicating where to start
-   *   the bucket listing from, this key is excluded from the result.
-   * @param bucketPrefix
-   *   Optional start key parameter, restricting the response to buckets
-   *   that begin with the specified name.
-   * @param maxNumOfBuckets
-   *   The maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<OmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
deleted file mode 100644
index d64eae4..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ /dev/null
@@ -1,590 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-import org.iq80.leveldb.DBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-/**
- * OM bucket manager.
- */
-public class BucketManagerImpl implements BucketManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BucketManagerImpl.class);
-
-  /**
-   * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock.
-   */
-  private final OMMetadataManager metadataManager;
-  private final KeyProviderCryptoExtension kmsProvider;
-
-  /**
-   * Constructs BucketManager.
-   *
-   * @param metadataManager
-   */
-  public BucketManagerImpl(OMMetadataManager metadataManager) {
-    this(metadataManager, null, false);
-  }
-
-  public BucketManagerImpl(OMMetadataManager metadataManager,
-                           KeyProviderCryptoExtension kmsProvider) {
-    this(metadataManager, kmsProvider, false);
-  }
-
-  public BucketManagerImpl(OMMetadataManager metadataManager,
-      KeyProviderCryptoExtension kmsProvider, boolean isRatisEnabled) {
-    this.metadataManager = metadataManager;
-    this.kmsProvider = kmsProvider;
-  }
-
-  KeyProviderCryptoExtension getKMSProvider() {
-    return kmsProvider;
-  }
-
-  /**
-   * MetadataDB is maintained in MetadataManager and shared between
-   * BucketManager and VolumeManager. (and also by BlockManager)
-   *
-   * BucketManager uses MetadataDB to store bucket level information.
-   *
-   * Keys used in BucketManager for storing data into MetadataDB
-   * for BucketInfo:
-   * {volume/bucket} -> bucketInfo
-   *
-   * Work flow of create bucket:
-   *
-   * -> Check if the Volume exists in metadataDB, if not throw
-   * VolumeNotFoundException.
-   * -> Else check if the Bucket exists in metadataDB, if so throw
-   * BucketExistException
-   * -> Else update MetadataDB with VolumeInfo.
-   */
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - OmBucketInfo.
-   */
-  @Override
-  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
-    Preconditions.checkNotNull(bucketInfo);
-    String volumeName = bucketInfo.getVolumeName();
-    String bucketName = bucketInfo.getBucketName();
-    boolean acquiredBucketLock = false;
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volumeName);
-    try {
-      acquiredBucketLock = metadataManager.getLock().acquireLock(BUCKET_LOCK,
-          volumeName, bucketName);
-      String volumeKey = metadataManager.getVolumeKey(volumeName);
-      String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(volumeKey);
-
-      //Check if the volume exists
-      if (volumeArgs == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new OMException("Volume doesn't exist",
-            OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-      //Check if bucket already exists
-      if (metadataManager.getBucketTable().get(bucketKey) != null) {
-        LOG.debug("bucket: {} already exists ", bucketName);
-        throw new OMException("Bucket already exist",
-            OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
-      }
-      BucketEncryptionKeyInfo bek = bucketInfo.getEncryptionKeyInfo();
-      BucketEncryptionKeyInfo.Builder bekb = null;
-      if (bek != null) {
-        if (kmsProvider == null) {
-          throw new OMException("Invalid KMS provider, check configuration " +
-              CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
-              OMException.ResultCodes.INVALID_KMS_PROVIDER);
-        }
-        if (bek.getKeyName() == null) {
-          throw new OMException("Bucket encryption key needed.", OMException
-              .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
-        }
-        // Talk to KMS to retrieve the bucket encryption key info.
-        KeyProvider.Metadata metadata = getKMSProvider().getMetadata(
-            bek.getKeyName());
-        if (metadata == null) {
-          throw new OMException("Bucket encryption key " + bek.getKeyName()
-              + " doesn't exist.",
-              OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
-        }
-        // If the provider supports pool for EDEKs, this will fill in the pool
-        kmsProvider.warmUpEncryptedKeys(bek.getKeyName());
-        bekb = new BucketEncryptionKeyInfo.Builder()
-            .setKeyName(bek.getKeyName())
-            .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES)
-            .setSuite(CipherSuite.convert(metadata.getCipher()));
-      }
-      List<OzoneAcl> acls = new ArrayList<>();
-      acls.addAll(bucketInfo.getAcls());
-      volumeArgs.getAclMap().getDefaultAclList().forEach(
-          a -> acls.add(OzoneAcl.fromProtobufWithAccessType(a)));
-
-      OmBucketInfo.Builder omBucketInfoBuilder = OmBucketInfo.newBuilder()
-          .setVolumeName(bucketInfo.getVolumeName())
-          .setBucketName(bucketInfo.getBucketName())
-          .setAcls(acls)
-          .setStorageType(bucketInfo.getStorageType())
-          .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
-          .setCreationTime(Time.now())
-          .addAllMetadata(bucketInfo.getMetadata());
-
-      if (bekb != null) {
-        omBucketInfoBuilder.setBucketEncryptionKey(bekb.build());
-      }
-
-      OmBucketInfo omBucketInfo = omBucketInfoBuilder.build();
-      commitBucketInfoToDB(omBucketInfo);
-      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Bucket creation failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      if (acquiredBucketLock) {
-        metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName);
-    }
-  }
-
-  private void commitBucketInfoToDB(OmBucketInfo omBucketInfo)
-      throws IOException {
-    String dbBucketKey =
-        metadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName());
-    metadataManager.getBucketTable().put(dbBucketKey,
-        omBucketInfo);
-  }
-
-  /**
-   * Returns Bucket Information.
-   *
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  @Override
-  public OmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo value = metadataManager.getBucketTable().get(bucketKey);
-      if (value == null) {
-        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
-            volumeName);
-        throw new OMException("Bucket not found",
-            BUCKET_NOT_FOUND);
-      }
-      return value;
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Exception while getting bucket info for bucket: {}",
-            bucketName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   *
-   * @param args - BucketArgs.
-   * @throws IOException - On Failure.
-   */
-  @Override
-  public void setBucketProperty(OmBucketArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-      String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo oldBucketInfo =
-          metadataManager.getBucketTable().get(bucketKey);
-      //Check if bucket exist
-      if (oldBucketInfo == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new OMException("Bucket doesn't exist",
-            BUCKET_NOT_FOUND);
-      }
-      OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
-      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
-          .setBucketName(oldBucketInfo.getBucketName());
-      bucketInfoBuilder.addAllMetadata(args.getMetadata());
-
-      //Check StorageType to update
-      StorageType storageType = args.getStorageType();
-      if (storageType != null) {
-        bucketInfoBuilder.setStorageType(storageType);
-        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
-      }
-
-      //Check Versioning to update
-      Boolean versioning = args.getIsVersionEnabled();
-      if (versioning != null) {
-        bucketInfoBuilder.setIsVersionEnabled(versioning);
-        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder
-            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
-      }
-      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
-
-      // Set acls from oldBucketInfo if it has any.
-      if (oldBucketInfo.getAcls() != null) {
-        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
-      }
-
-      OmBucketInfo omBucketInfo = bucketInfoBuilder.build();
-
-
-      commitBucketInfoToDB(omBucketInfo);
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   *
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException - on Failure.
-   */
-  @Override
-  public void deleteBucket(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-      //Check if bucket exists
-      String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      if (metadataManager.getBucketTable().get(bucketKey) == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new OMException("Bucket doesn't exist",
-            BUCKET_NOT_FOUND);
-      }
-      //Check if bucket is empty
-      if (!metadataManager.isBucketEmpty(volumeName, bucketName)) {
-        LOG.debug("bucket: {} is not empty ", bucketName);
-        throw new OMException("Bucket is not empty",
-            OMException.ResultCodes.BUCKET_NOT_EMPTY);
-      }
-      commitDeleteBucketInfoToOMDB(bucketKey);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
-            volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  private void commitDeleteBucketInfoToOMDB(String dbBucketKey)
-      throws IOException {
-    metadataManager.getBucketTable().delete(dbBucketKey);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<OmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    return metadataManager.listBuckets(
-        volumeName, startBucket, bucketPrefix, maxNumOfBuckets);
-
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acl);
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "BucketManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    boolean changed = false;
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      String dbBucketKey = metadataManager.getBucketKey(volume, bucket);
-      OmBucketInfo bucketInfo =
-          metadataManager.getBucketTable().get(dbBucketKey);
-      if (bucketInfo == null) {
-        LOG.debug("Bucket:{}/{} does not exist", volume, bucket);
-        throw new OMException("Bucket " + bucket + " is not found",
-            BUCKET_NOT_FOUND);
-      }
-
-      changed = bucketInfo.addAcl(acl);
-      if (changed) {
-        metadataManager.getBucketTable().put(dbBucketKey, bucketInfo);
-      }
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Add acl operation failed for bucket:{}/{} acl:{}",
-            volume, bucket, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-
-    return changed;
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acl);
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "BucketManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    boolean removed = false;
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      String dbBucketKey = metadataManager.getBucketKey(volume, bucket);
-      OmBucketInfo bucketInfo =
-          metadataManager.getBucketTable().get(dbBucketKey);
-      if (bucketInfo == null) {
-        LOG.debug("Bucket:{}/{} does not exist", volume, bucket);
-        throw new OMException("Bucket " + bucket + " is not found",
-            BUCKET_NOT_FOUND);
-      }
-      removed = bucketInfo.removeAcl(acl);
-      if (removed) {
-        metadataManager.getBucketTable().put(dbBucketKey, bucketInfo);
-      }
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}",
-            volume, bucket, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-    return removed;
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acls);
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "BucketManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      String dbBucketKey = metadataManager.getBucketKey(volume, bucket);
-      OmBucketInfo bucketInfo =
-          metadataManager.getBucketTable().get(dbBucketKey);
-      if (bucketInfo == null) {
-        LOG.debug("Bucket:{}/{} does not exist", volume, bucket);
-        throw new OMException("Bucket " + bucket + " is not found",
-            BUCKET_NOT_FOUND);
-      }
-      bucketInfo.setAcls(acls);
-      metadataManager.getBucketTable().put(dbBucketKey, bucketInfo);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Set acl operation failed for bucket:{}/{} acl:{}",
-            volume, bucket, StringUtils.join(",", acls), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-    return true;
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    Objects.requireNonNull(obj);
-
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "BucketManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
-    try {
-      String dbBucketKey = metadataManager.getBucketKey(volume, bucket);
-      OmBucketInfo bucketInfo =
-          metadataManager.getBucketTable().get(dbBucketKey);
-      if (bucketInfo == null) {
-        LOG.debug("Bucket:{}/{} does not exist", volume, bucket);
-        throw new OMException("Bucket " + bucket + " is not found",
-            BUCKET_NOT_FOUND);
-      }
-      return bucketInfo.getAcls();
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Get acl operation failed for bucket:{}/{} acl:{}",
-            volume, bucket, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket);
-    }
-  }
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @return true if user has access else false.
-   */
-  @Override
-  public boolean checkAccess(OzoneObj ozObject, RequestContext context)
-      throws OMException {
-    Objects.requireNonNull(ozObject);
-    Objects.requireNonNull(context);
-
-    String volume = ozObject.getVolumeName();
-    String bucket = ozObject.getBucketName();
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
-    try {
-      String dbBucketKey = metadataManager.getBucketKey(volume, bucket);
-      OmBucketInfo bucketInfo =
-          metadataManager.getBucketTable().get(dbBucketKey);
-      if (bucketInfo == null) {
-        LOG.debug("Bucket:{}/{} does not exist", volume, bucket);
-        throw new OMException("Bucket " + bucket + " is not found",
-            BUCKET_NOT_FOUND);
-      }
-      boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(),
-          context);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("user:{} has access rights for bucket:{} :{} ",
-            context.getClientUgi(), ozObject.getBucketName(), hasAccess);
-      }
-      return hasAccess;
-    } catch (IOException ex) {
-      if(ex instanceof OMException) {
-        throw (OMException) ex;
-      }
-      LOG.error("CheckAccess operation failed for bucket:{}/{} acl:{}",
-          volume, bucket, ex);
-      throw new OMException("Check access operation failed for " +
-          "bucket:" + bucket, ex, INTERNAL_ERROR);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java
deleted file mode 100644
index 6162ba2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface for Ozone Acl management.
- */
-public interface IOzoneAcl {
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for
-   * given object to list of ACLs provided in argument.
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   *
-   * @throws IOException if there is error.
-   * */
-  boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException;
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   * @param obj Ozone object.
-   *
-   * @throws IOException if there is error.
-   * */
-  List<OzoneAcl> getAcl(OzoneObj obj) throws IOException;
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @throws org.apache.hadoop.ozone.om.exceptions.OMException
-   * @return true if user has access else false.
-   */
-  boolean checkAccess(OzoneObj ozObject, RequestContext context)
-      throws OMException;
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
deleted file mode 100644
index ff12123..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.BackgroundTask;
-import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult;
-
-import com.google.common.annotations.VisibleForTesting;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT;
-
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.ratis.protocol.ClientId;
-import org.rocksdb.RocksDBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is the background service to delete keys. Scan the metadata of om
- * periodically to get the keys from DeletedTable and ask scm to delete
- * metadata accordingly, if scm returns success for keys, then clean up those
- * keys.
- */
-public class KeyDeletingService extends BackgroundService {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyDeletingService.class);
-
-  // The thread pool size for key deleting service.
-  private final static int KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final OzoneManager ozoneManager;
-  private final ScmBlockLocationProtocol scmClient;
-  private final KeyManager manager;
-  private ClientId clientId = ClientId.randomId();
-  private final int keyLimitPerTask;
-  private final AtomicLong deletedKeyCount;
-  private final AtomicLong runCount;
-
-  KeyDeletingService(OzoneManager ozoneManager,
-      ScmBlockLocationProtocol scmClient,
-      KeyManager manager, long serviceInterval,
-      long serviceTimeout, Configuration conf) {
-    super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
-        KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.ozoneManager = ozoneManager;
-    this.scmClient = scmClient;
-    this.manager = manager;
-    this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
-        OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
-    this.deletedKeyCount = new AtomicLong(0);
-    this.runCount = new AtomicLong(0);
-  }
-
-  /**
-   * Returns the number of times this Background service has run.
-   *
-   * @return Long, run count.
-   */
-  @VisibleForTesting
-  public AtomicLong getRunCount() {
-    return runCount;
-  }
-
-  /**
-   * Returns the number of keys deleted by the background service.
-   *
-   * @return Long count.
-   */
-  @VisibleForTesting
-  public AtomicLong getDeletedKeyCount() {
-    return deletedKeyCount;
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new KeyDeletingTask());
-    return queue;
-  }
-
-  private boolean shouldRun() {
-    if (ozoneManager == null) {
-      // OzoneManager can be null for testing
-      return true;
-    }
-    return ozoneManager.isLeader();
-  }
-
-  private boolean isRatisEnabled() {
-    if (ozoneManager == null) {
-      return false;
-    }
-    return ozoneManager.isRatisEnabled();
-  }
-
-  /**
-   * A key deleting task scans OM DB and looking for a certain number of
-   * pending-deletion keys, sends these keys along with their associated blocks
-   * to SCM for deletion. Once SCM confirms keys are deleted (once SCM persisted
-   * the blocks info in its deletedBlockLog), it removes these keys from the
-   * DB.
-   */
-  private class KeyDeletingTask implements
-      BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      // Check if this is the Leader OM. If not leader, no need to execute this
-      // task.
-      if (shouldRun()) {
-        runCount.incrementAndGet();
-        try {
-          long startTime = Time.monotonicNow();
-          List<BlockGroup> keyBlocksList = manager
-              .getPendingDeletionKeys(keyLimitPerTask);
-          if (keyBlocksList != null && keyBlocksList.size() > 0) {
-            List<DeleteBlockGroupResult> results =
-                scmClient.deleteKeyBlocks(keyBlocksList);
-            if (results != null) {
-              int delCount;
-              if (isRatisEnabled()) {
-                delCount = submitPurgeKeysRequest(results);
-              } else {
-                // TODO: Once HA and non-HA paths are merged, we should have
-                //  only one code path here. Purge keys should go through an
-                //  OMRequest model.
-                delCount = deleteAllKeys(results);
-              }
-              LOG.debug("Number of keys deleted: {}, elapsed time: {}ms",
-                  delCount, Time.monotonicNow() - startTime);
-              deletedKeyCount.addAndGet(delCount);
-            }
-          }
-        } catch (IOException e) {
-          LOG.error("Error while running delete keys background task. Will " +
-              "retry at next run.", e);
-        }
-      }
-      // By design, no one cares about the results of this call back.
-      return EmptyTaskResult.newResult();
-    }
-
-    /**
-     * Deletes all the keys that SCM has acknowledged and queued for delete.
-     *
-     * @param results DeleteBlockGroups returned by SCM.
-     * @throws RocksDBException on Error.
-     * @throws IOException      on Error
-     */
-    private int deleteAllKeys(List<DeleteBlockGroupResult> results)
-        throws RocksDBException, IOException {
-      Table deletedTable = manager.getMetadataManager().getDeletedTable();
-
-      DBStore store = manager.getMetadataManager().getStore();
-
-      // Put all keys to delete in a single transaction and call for delete.
-      int deletedCount = 0;
-      try (BatchOperation writeBatch = store.initBatchOperation()) {
-        for (DeleteBlockGroupResult result : results) {
-          if (result.isSuccess()) {
-            // Purge key from OM DB.
-            deletedTable.deleteWithBatch(writeBatch,
-                result.getObjectKey());
-            LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
-            deletedCount++;
-          }
-        }
-        // Write a single transaction for delete.
-        store.commitBatchOperation(writeBatch);
-      }
-      return deletedCount;
-    }
-
-    /**
-     * Submits PurgeKeys request for the keys whose blocks have been deleted
-     * by SCM.
-     *
-     * @param results DeleteBlockGroups returned by SCM.
-     * @throws IOException      on Error
-     */
-    public int submitPurgeKeysRequest(List<DeleteBlockGroupResult> results) {
-      List<String> purgeKeysList = new ArrayList<>();
-
-      // Put all keys to be purged in a list
-      int deletedCount = 0;
-      for (DeleteBlockGroupResult result : results) {
-        if (result.isSuccess()) {
-          // Add key to PurgeKeys list.
-          String deletedKey = result.getObjectKey();
-          purgeKeysList.add(deletedKey);
-          LOG.debug("Key {} set to be purged from OM DB", deletedKey);
-          deletedCount++;
-        }
-      }
-
-      PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder()
-          .addAllKeys(purgeKeysList)
-          .build();
-
-      OMRequest omRequest = OMRequest.newBuilder()
-          .setCmdType(Type.PurgeKeys)
-          .setPurgeKeysRequest(purgeKeysRequest)
-          .setClientId(clientId.toString())
-          .build();
-
-      // Submit PurgeKeys request to OM
-      try {
-        ozoneManager.getOmServerProtocol().submitRequest(null, omRequest);
-      } catch (ServiceException e) {
-        LOG.error("PurgeKey request failed. Will retry at next run.");
-        return 0;
-      }
-
-      return deletedCount;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
deleted file mode 100644
index c1aeaa9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.fs.OzoneManagerFS;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Handles key level commands.
- */
-public interface KeyManager extends OzoneManagerFS, IOzoneAcl {
-
-  /**
-   * Start key manager.
-   *
-   * @param configuration
-   * @throws IOException
-   */
-  void start(OzoneConfiguration configuration);
-
-  /**
-   * Stop key manager.
-   */
-  void stop() throws IOException;
-
-  /**
-   * After calling commit, the key will be made visible. There can be multiple
-   * open key writes in parallel (identified by client id). The most recently
-   * committed one will be the one visible.
-   *
-   * @param args the key to commit.
-   * @param clientID the client that is committing.
-   * @throws IOException
-   */
-  void commitKey(OmKeyArgs args, long clientID) throws IOException;
-
-  /**
-   * A client calls this on an open key, to request to allocate a new block,
-   * and appended to the tail of current block list of the open client.
-   *
-   * @param args the key to append
-   * @param clientID the client requesting block.
-   * @param excludeList List of datanodes/containers to exclude during block
-   *                    allocation.
-   * @return the reference to the new block.
-   * @throws IOException
-   */
-  OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID,
-      ExcludeList excludeList) throws IOException;
-
-  /**
-   * Given the args of a key to put, write an open key entry to meta data.
-   *
-   * In case that the container creation or key write failed on
-   * DistributedStorageHandler, this key's metadata will still stay in OM.
-   * TODO garbage collect the open keys that never get closed
-   *
-   * @param args the args of the key provided by client.
-   * @return a OpenKeySession instance client uses to talk to container.
-   * @throws IOException
-   */
-  OpenKeySession openKey(OmKeyArgs args) throws IOException;
-
-  /**
-   * Look up an existing key. Return the info of the key to client side, which
-   * DistributedStorageHandler will use to access the data on datanode.
-   *
-   * @param args the args of the key provided by client.
-   * @param clientAddress a hint to key manager, order the datanode in returned
-   *                      pipeline by distance between client and datanode.
-   * @return a OmKeyInfo instance client uses to talk to container.
-   * @throws IOException
-   */
-  OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException;
-
-  /**
-   * Renames an existing key within a bucket.
-   *
-   * @param args the args of the key provided by client.
-   * @param toKeyName New name to be used for the key
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while renaming the key.
-   */
-  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an object by an object key. The key will be immediately removed
-   * from OM namespace and become invisible to clients. The object data
-   * will be removed in async manner that might retain for some time.
-   *
-   * @param args the args of the key provided by client.
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while deleting an object.
-   */
-  void deleteKey(OmKeyArgs args) throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link OmKeyInfo}
-   * in the given bucket.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKey
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<OmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the
-   * key name and all its associated block IDs. A pending deletion key is
-   * stored with #deleting# prefix in OM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in OM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-
-  /**
-   * Deletes a expired open key by its name. Called when a hanging key has been
-   * lingering for too long. Once called, the open key entries gets removed
-   * from OM mdata data.
-   *
-   * @param objectKeyName object key name with #open# prefix.
-   * @throws IOException if specified key doesn't exist or other I/O errors.
-   */
-  void deleteExpiredOpenKey(String objectKeyName) throws IOException;
-
-  /**
-   * Returns the metadataManager.
-   * @return OMMetadataManager.
-   */
-  OMMetadataManager getMetadataManager();
-
-  /**
-   * Returns the instance of Deleting Service.
-   * @return Background service.
-   */
-  BackgroundService getDeletingService();
-
-
-  /**
-   * Initiate multipart upload for the specified key.
-   * @param keyArgs
-   * @return MultipartInfo
-   * @throws IOException
-   */
-  OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException;
-
-  /**
-   * Commit Multipart upload part file.
-   * @param omKeyArgs
-   * @param clientID
-   * @return OmMultipartCommitUploadPartInfo
-   * @throws IOException
-   */
-
-  OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
-      OmKeyArgs omKeyArgs, long clientID) throws IOException;
-
-  /**
-   * Complete Multipart upload Request.
-   * @param omKeyArgs
-   * @param multipartUploadList
-   * @return OmMultipartUploadCompleteInfo
-   * @throws IOException
-   */
-  OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs,
-      OmMultipartUploadCompleteList multipartUploadList) throws IOException;
-
-  /**
-   * Abort multipart upload request.
-   * @param omKeyArgs
-   * @throws IOException
-   */
-  void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException;
-
-  OmMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName, String prefix) throws OMException;
-
-  /**
-   * Returns list of parts of a multipart upload key.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param uploadID
-   * @param partNumberMarker
-   * @param maxParts
-   * @return OmMultipartUploadListParts
-   */
-  OmMultipartUploadListParts listParts(String volumeName, String bucketName,
-      String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException;
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
deleted file mode 100644
index 20b7fdfe..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ /dev/null
@@ -1,2157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.security.GeneralSecurityException;
-import java.security.PrivilegedExceptionAction;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.UniqueId;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.CodecRegistry;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
-import static org.apache.hadoop.util.Time.monotonicNow;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of keyManager.
- */
-public class KeyManagerImpl implements KeyManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyManagerImpl.class);
-
-  /**
-   * A SCM block client, used to talk to SCM to allocate block during putKey.
-   */
-  private final OzoneManager ozoneManager;
-  private final ScmClient scmClient;
-  private final OMMetadataManager metadataManager;
-  private final long scmBlockSize;
-  private final boolean useRatis;
-
-  private final int preallocateBlocksMax;
-  private final String omId;
-  private final OzoneBlockTokenSecretManager secretManager;
-  private final boolean grpcBlockTokenEnabled;
-
-  private BackgroundService keyDeletingService;
-
-  private final KeyProviderCryptoExtension kmsProvider;
-  private final PrefixManager prefixManager;
-
-
-  @VisibleForTesting
-  public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
-      OMMetadataManager metadataManager, OzoneConfiguration conf, String omId,
-      OzoneBlockTokenSecretManager secretManager) {
-    this(null, new ScmClient(scmBlockClient, null), metadataManager,
-        conf, omId, secretManager, null, null);
-  }
-
-  public KeyManagerImpl(OzoneManager om, ScmClient scmClient,
-      OzoneConfiguration conf, String omId) {
-    this (om, scmClient, om.getMetadataManager(), conf, omId,
-        om.getBlockTokenMgr(), om.getKmsProvider(), om.getPrefixManager());
-  }
-
-  @SuppressWarnings("parameternumber")
-  private KeyManagerImpl(OzoneManager om, ScmClient scmClient,
-      OMMetadataManager metadataManager, OzoneConfiguration conf, String omId,
-      OzoneBlockTokenSecretManager secretManager,
-      KeyProviderCryptoExtension kmsProvider, PrefixManager prefixManager) {
-    this.scmBlockSize = (long) conf
-        .getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT,
-            StorageUnit.BYTES);
-    this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
-        DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    this.preallocateBlocksMax = conf.getInt(
-        OZONE_KEY_PREALLOCATION_BLOCKS_MAX,
-        OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT);
-    this.grpcBlockTokenEnabled = conf.getBoolean(
-        HDDS_BLOCK_TOKEN_ENABLED,
-        HDDS_BLOCK_TOKEN_ENABLED_DEFAULT);
-
-    this.ozoneManager = om;
-    this.omId = omId;
-    this.scmClient = scmClient;
-    this.metadataManager = metadataManager;
-    this.prefixManager = prefixManager;
-    this.secretManager = secretManager;
-    this.kmsProvider = kmsProvider;
-
-  }
-
-  @Override
-  public void start(OzoneConfiguration configuration) {
-    if (keyDeletingService == null) {
-      long blockDeleteInterval = configuration.getTimeDuration(
-          OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-          OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      long serviceTimeout = configuration.getTimeDuration(
-          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      keyDeletingService = new KeyDeletingService(ozoneManager,
-          scmClient.getBlockClient(), this, blockDeleteInterval,
-          serviceTimeout, configuration);
-      keyDeletingService.start();
-    }
-  }
-
-  KeyProviderCryptoExtension getKMSProvider() {
-    return kmsProvider;
-  }
-
-  @Override
-  public void stop() throws IOException {
-    if (keyDeletingService != null) {
-      keyDeletingService.shutdown();
-      keyDeletingService = null;
-    }
-  }
-
-  private OmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException {
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    return metadataManager.getBucketTable().get(bucketKey);
-  }
-
-  private void validateBucket(String volumeName, String bucketName)
-      throws IOException {
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    // Check if bucket exists
-    if (metadataManager.getBucketTable().get(bucketKey) == null) {
-      String volumeKey = metadataManager.getVolumeKey(volumeName);
-      // If the volume also does not exist, we should throw volume not found
-      // exception
-      if (metadataManager.getVolumeTable().get(volumeKey) == null) {
-        LOG.error("volume not found: {}", volumeName);
-        throw new OMException("Volume not found",
-            VOLUME_NOT_FOUND);
-      }
-
-      // if the volume exists but bucket does not exist, throw bucket not found
-      // exception
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new OMException("Bucket not found",
-          BUCKET_NOT_FOUND);
-    }
-  }
-
-  /**
-   * Check S3 bucket exists or not.
-   * @param volumeName
-   * @param bucketName
-   * @throws IOException
-   */
-  private OmBucketInfo validateS3Bucket(String volumeName, String bucketName)
-      throws IOException {
-
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    OmBucketInfo omBucketInfo = metadataManager.getBucketTable().
-        get(bucketKey);
-    //Check if bucket already exists
-    if (omBucketInfo == null) {
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new OMException("Bucket not found",
-          BUCKET_NOT_FOUND);
-    }
-    return omBucketInfo;
-  }
-
-  @Override
-  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID,
-      ExcludeList excludeList) throws IOException {
-    Preconditions.checkNotNull(args);
-
-
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
-    String openKey = metadataManager.getOpenKey(
-        volumeName, bucketName, keyName, clientID);
-
-    OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey);
-    if (keyInfo == null) {
-      LOG.error("Allocate block for a key not in open status in meta store" +
-          " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID);
-      throw new OMException("Open Key not found",
-          KEY_NOT_FOUND);
-    }
-
-    // current version not committed, so new blocks coming now are added to
-    // the same version
-    List<OmKeyLocationInfo> locationInfos =
-        allocateBlock(keyInfo, excludeList, scmBlockSize);
-
-    keyInfo.appendNewBlocks(locationInfos, true);
-    keyInfo.updateModifcationTime();
-    metadataManager.getOpenKeyTable().put(openKey, keyInfo);
-
-    return locationInfos.get(0);
-
-  }
-
-  /**
-   * This methods avoids multiple rpc calls to SCM by allocating multiple blocks
-   * in one rpc call.
-   * @param keyInfo - key info for key to be allocated.
-   * @param requestedSize requested length for allocation.
-   * @param excludeList exclude list while allocating blocks.
-   * @param requestedSize requested size to be allocated.
-   * @return
-   * @throws IOException
-   */
-  private List<OmKeyLocationInfo> allocateBlock(OmKeyInfo keyInfo,
-      ExcludeList excludeList, long requestedSize) throws IOException {
-    int numBlocks = Math.min((int) ((requestedSize - 1) / scmBlockSize + 1),
-        preallocateBlocksMax);
-    List<OmKeyLocationInfo> locationInfos = new ArrayList<>(numBlocks);
-    String remoteUser = getRemoteUser().getShortUserName();
-    List<AllocatedBlock> allocatedBlocks;
-    try {
-      allocatedBlocks = scmClient.getBlockClient()
-          .allocateBlock(scmBlockSize, numBlocks, keyInfo.getType(),
-              keyInfo.getFactor(), omId, excludeList);
-    } catch (SCMException ex) {
-      if (ex.getResult()
-          .equals(SCMException.ResultCodes.SAFE_MODE_EXCEPTION)) {
-        throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE);
-      }
-      throw ex;
-    }
-    for (AllocatedBlock allocatedBlock : allocatedBlocks) {
-      OmKeyLocationInfo.Builder builder = new OmKeyLocationInfo.Builder()
-          .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-          .setLength(scmBlockSize)
-          .setOffset(0)
-          .setPipeline(allocatedBlock.getPipeline());
-      if (grpcBlockTokenEnabled) {
-        builder.setToken(secretManager
-            .generateToken(remoteUser, allocatedBlock.getBlockID().toString(),
-                getAclForUser(remoteUser), scmBlockSize));
-      }
-      locationInfos.add(builder.build());
-    }
-    return locationInfos;
-  }
-
-  /* Optimize ugi lookup for RPC operations to avoid a trip through
-   * UGI.getCurrentUser which is synch'ed.
-   */
-  public static UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
-  /**
-   * Return acl for user.
-   * @param user
-   *
-   * */
-  private EnumSet<AccessModeProto> getAclForUser(String user) {
-    // TODO: Return correct acl for user.
-    return EnumSet.allOf(AccessModeProto.class);
-  }
-
-  private EncryptedKeyVersion generateEDEK(
-      final String ezKeyName) throws IOException {
-    if (ezKeyName == null) {
-      return null;
-    }
-    long generateEDEKStartTime = monotonicNow();
-    EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
-        new PrivilegedExceptionAction<EncryptedKeyVersion>() {
-          @Override
-          public EncryptedKeyVersion run() throws IOException {
-            try {
-              return getKMSProvider().generateEncryptedKey(ezKeyName);
-            } catch (GeneralSecurityException e) {
-              throw new IOException(e);
-            }
-          }
-        });
-    long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
-    LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
-    Preconditions.checkNotNull(edek);
-    return edek;
-  }
-
-  @Override
-  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    Preconditions.checkNotNull(args.getAcls(), "Default acls " +
-        "should be set.");
-
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
-
-    long currentTime = UniqueId.next();
-    OmKeyInfo keyInfo;
-    long openVersion;
-    // NOTE size of a key is not a hard limit on anything, it is a value that
-    // client should expect, in terms of current size of key. If client sets
-    // a value, then this value is used, otherwise, we allocate a single
-    // block which is the current size, if read by the client.
-    final long size = args.getDataSize() > 0 ?
-        args.getDataSize() : scmBlockSize;
-    final List<OmKeyLocationInfo> locations = new ArrayList<>();
-
-    ReplicationFactor factor = args.getFactor();
-    if (factor == null) {
-      factor = useRatis ? ReplicationFactor.THREE : ReplicationFactor.ONE;
-    }
-
-    ReplicationType type = args.getType();
-    if (type == null) {
-      type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE;
-    }
-
-    String dbKeyName = metadataManager.getOzoneKey(
-        args.getVolumeName(), args.getBucketName(), args.getKeyName());
-
-    FileEncryptionInfo encInfo;
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    OmBucketInfo bucketInfo;
-    try {
-      bucketInfo = getBucketInfo(volumeName, bucketName);
-      encInfo = getFileEncryptionInfo(bucketInfo);
-      keyInfo = prepareKeyInfo(args, dbKeyName, size, locations, encInfo);
-    } catch (OMException e) {
-      throw e;
-    } catch (IOException ex) {
-      LOG.error("Key open failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new OMException(ex.getMessage(), ResultCodes.KEY_ALLOCATION_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-    if (keyInfo == null) {
-      // the key does not exist, create a new object, the new blocks are the
-      // version 0
-      keyInfo = createKeyInfo(args, locations, factor, type, size,
-          encInfo, bucketInfo);
-    }
-    openVersion = keyInfo.getLatestVersionLocations().getVersion();
-    LOG.debug("Key {} allocated in volume {} bucket {}",
-        keyName, volumeName, bucketName);
-    allocateBlockInKey(keyInfo, size, currentTime);
-    return new OpenKeySession(currentTime, keyInfo, openVersion);
-  }
-
-  private void allocateBlockInKey(OmKeyInfo keyInfo, long size, long sessionId)
-      throws IOException {
-    String openKey = metadataManager
-        .getOpenKey(keyInfo.getVolumeName(), keyInfo.getBucketName(),
-            keyInfo.getKeyName(), sessionId);
-    // requested size is not required but more like a optimization:
-    // SCM looks at the requested, if it 0, no block will be allocated at
-    // the point, if client needs more blocks, client can always call
-    // allocateBlock. But if requested size is not 0, OM will preallocate
-    // some blocks and piggyback to client, to save RPC calls.
-    if (size > 0) {
-      List<OmKeyLocationInfo> locationInfos =
-          allocateBlock(keyInfo, new ExcludeList(), size);
-      keyInfo.appendNewBlocks(locationInfos, true);
-    }
-
-    metadataManager.getOpenKeyTable().put(openKey, keyInfo);
-
-  }
-
-  private OmKeyInfo prepareKeyInfo(
-      OmKeyArgs keyArgs, String dbKeyName, long size,
-      List<OmKeyLocationInfo> locations, FileEncryptionInfo encInfo)
-      throws IOException {
-    OmKeyInfo keyInfo = null;
-    if (keyArgs.getIsMultipartKey()) {
-      keyInfo = prepareMultipartKeyInfo(keyArgs, size, locations, encInfo);
-    } else if (metadataManager.getKeyTable().isExist(dbKeyName)) {
-      keyInfo = metadataManager.getKeyTable().get(dbKeyName);
-      // the key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      keyInfo.addNewVersion(locations, true);
-      keyInfo.setDataSize(size + keyInfo.getDataSize());
-    }
-    if(keyInfo != null) {
-      keyInfo.setMetadata(keyArgs.getMetadata());
-    }
-    return keyInfo;
-  }
-
-  private OmKeyInfo prepareMultipartKeyInfo(OmKeyArgs args, long size,
-      List<OmKeyLocationInfo> locations, FileEncryptionInfo encInfo)
-      throws IOException {
-    ReplicationFactor factor;
-    ReplicationType type;
-
-    Preconditions.checkArgument(args.getMultipartUploadPartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = metadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = metadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      factor = partKeyInfo.getFactor();
-      type = partKeyInfo.getType();
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, factor, type, size, encInfo,
-        getBucketInfo(args.getVolumeName(), args.getBucketName()));
-  }
-
-  /**
-   * Create OmKeyInfo object.
-   * @param keyArgs
-   * @param locations
-   * @param factor
-   * @param type
-   * @param size
-   * @param encInfo
-   * @param omBucketInfo
-   * @return
-   */
-  private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs,
-      List<OmKeyLocationInfo> locations,
-      ReplicationFactor factor,
-      ReplicationType type, long size,
-      FileEncryptionInfo encInfo,
-      OmBucketInfo omBucketInfo) {
-    OmKeyInfo.Builder builder = new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setDataSize(size)
-        .setReplicationType(type)
-        .setReplicationFactor(factor)
-        .setFileEncryptionInfo(encInfo)
-        .addAllMetadata(keyArgs.getMetadata());
-    builder.setAcls(getAclsForKey(keyArgs, omBucketInfo));
-
-    if(Boolean.valueOf(omBucketInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
-      builder.addMetadata(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
-    }
-    return builder.build();
-  }
-
-  @Override
-  public void commitKey(OmKeyArgs args, long clientID) throws IOException {
-    Preconditions.checkNotNull(args);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    List<OmKeyLocationInfo> locationInfoList = args.getLocationInfoList();
-    String objectKey = metadataManager
-        .getOzoneKey(volumeName, bucketName, keyName);
-    String openKey = metadataManager
-        .getOpenKey(volumeName, bucketName, keyName, clientID);
-    Preconditions.checkNotNull(locationInfoList);
-    try {
-      metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
-          bucketName);
-      validateBucket(volumeName, bucketName);
-      OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey);
-      if (keyInfo == null) {
-        throw new OMException("Failed to commit key, as " + openKey + "entry " +
-            "is not found in the openKey table", KEY_NOT_FOUND);
-      }
-      keyInfo.setDataSize(args.getDataSize());
-      keyInfo.setModificationTime(Time.now());
-
-      //update the block length for each block
-      keyInfo.updateLocationInfoList(locationInfoList);
-      metadataManager.getStore().move(
-          openKey,
-          objectKey,
-          keyInfo,
-          metadataManager.getOpenKeyTable(),
-          metadataManager.getKeyTable());
-    } catch (OMException e) {
-      throw e;
-    } catch (IOException ex) {
-      LOG.error("Key commit failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new OMException(ex.getMessage(),
-          ResultCodes.KEY_ALLOCATION_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  @Override
-  public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress)
-      throws IOException {
-    Preconditions.checkNotNull(args);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      String keyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      OmKeyInfo value = metadataManager.getKeyTable().get(keyBytes);
-      if (value == null) {
-        LOG.debug("volume:{} bucket:{} Key:{} not found",
-            volumeName, bucketName, keyName);
-        throw new OMException("Key not found",
-            KEY_NOT_FOUND);
-      }
-      if (grpcBlockTokenEnabled) {
-        String remoteUser = getRemoteUser().getShortUserName();
-        for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
-          key.getLocationList().forEach(k -> {
-            k.setToken(secretManager.generateToken(remoteUser,
-                k.getBlockID().getContainerBlockID().toString(),
-                getAclForUser(remoteUser),
-                k.getLength()));
-          });
-        }
-      }
-      // Refresh container pipeline info from SCM
-      // based on OmKeyArgs.refreshPipeline flag
-      // 1. Client send initial read request OmKeyArgs.refreshPipeline = false
-      // and uses the pipeline cached in OM to access datanode
-      // 2. If succeeded, done.
-      // 3. If failed due to pipeline does not exist or invalid pipeline state
-      //    exception, client should retry lookupKey with
-      //    OmKeyArgs.refreshPipeline = true
-      if (args.getRefreshPipeline()) {
-        for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
-          key.getLocationList().forEach(k -> {
-            // TODO: fix Some tests that may not initialize container client
-            // The production should always have containerClient initialized.
-            if (scmClient.getContainerClient() != null) {
-              try {
-                ContainerWithPipeline cp = scmClient.getContainerClient()
-                    .getContainerWithPipeline(k.getContainerID());
-                if (!cp.getPipeline().equals(k.getPipeline())) {
-                  k.setPipeline(cp.getPipeline());
-                }
-              } catch (IOException e) {
-                LOG.error("Unable to update pipeline for container:{}",
-                    k.getContainerID());
-              }
-            }
-          });
-        }
-      }
-      if (args.getSortDatanodes()) {
-        sortDatanodeInPipeline(value, clientAddress);
-      }
-      return value;
-    } catch (IOException ex) {
-      LOG.debug("Get key failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new OMException(ex.getMessage(),
-          KEY_NOT_FOUND);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  @Override
-  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
-    Preconditions.checkNotNull(args);
-    Preconditions.checkNotNull(toKeyName);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String fromKeyName = args.getKeyName();
-    if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}",
-          volumeName, bucketName, fromKeyName, toKeyName);
-      throw new OMException("Key name is empty",
-          ResultCodes.INVALID_KEY_NAME);
-    }
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-      // fromKeyName should exist
-      String fromKey = metadataManager.getOzoneKey(
-          volumeName, bucketName, fromKeyName);
-      OmKeyInfo fromKeyValue = metadataManager.getKeyTable().get(fromKey);
-      if (fromKeyValue == null) {
-        // TODO: Add support for renaming open key
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} not found.", volumeName, bucketName, fromKeyName,
-            toKeyName, fromKeyName);
-        throw new OMException("Key not found",
-            KEY_NOT_FOUND);
-      }
-
-      // A rename is a no-op if the target and source name is same.
-      // TODO: Discuss if we need to throw?.
-      if (fromKeyName.equals(toKeyName)) {
-        return;
-      }
-
-      // toKeyName should not exist
-      String toKey =
-          metadataManager.getOzoneKey(volumeName, bucketName, toKeyName);
-      OmKeyInfo toKeyValue = metadataManager.getKeyTable().get(toKey);
-      if (toKeyValue != null) {
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} already exists.", volumeName, bucketName,
-            fromKeyName, toKeyName, toKeyName);
-        throw new OMException("Key already exists",
-            OMException.ResultCodes.KEY_ALREADY_EXISTS);
-      }
-
-      fromKeyValue.setKeyName(toKeyName);
-      fromKeyValue.updateModifcationTime();
-      DBStore store = metadataManager.getStore();
-      try (BatchOperation batch = store.initBatchOperation()) {
-        metadataManager.getKeyTable().deleteWithBatch(batch, fromKey);
-        metadataManager.getKeyTable().putWithBatch(batch, toKey,
-            fromKeyValue);
-        store.commitBatchOperation(batch);
-      }
-    } catch (IOException ex) {
-      if (ex instanceof OMException) {
-        throw ex;
-      }
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}",
-          volumeName, bucketName, fromKeyName, toKeyName, ex);
-      throw new OMException(ex.getMessage(),
-          ResultCodes.KEY_RENAME_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  @Override
-  public void deleteKey(OmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-      String objectKey = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-      if (keyInfo == null) {
-        throw new OMException("Key not found",
-            KEY_NOT_FOUND);
-      } else {
-        // directly delete key with no blocks from db. This key need not be
-        // moved to deleted table.
-        if (isKeyEmpty(keyInfo)) {
-          metadataManager.getKeyTable().delete(objectKey);
-          LOG.debug("Key {} deleted from OM DB", keyName);
-          return;
-        }
-      }
-      RepeatedOmKeyInfo repeatedOmKeyInfo =
-          metadataManager.getDeletedTable().get(objectKey);
-      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
-          repeatedOmKeyInfo);
-      metadataManager.getKeyTable().delete(objectKey);
-      metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo);
-    } catch (OMException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      LOG.error(String.format("Delete key failed for volume:%s "
-          + "bucket:%s key:%s", volumeName, bucketName, keyName), ex);
-      throw new OMException(ex.getMessage(), ex,
-          ResultCodes.KEY_DELETION_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  private boolean isKeyEmpty(OmKeyInfo keyInfo) {
-    for (OmKeyLocationInfoGroup keyLocationList : keyInfo
-        .getKeyLocationVersions()) {
-      if (keyLocationList.getLocationList().size() != 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix,
-      int maxKeys) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-
-    // We don't take a lock in this path, since we walk the
-    // underlying table using an iterator. That automatically creates a
-    // snapshot of the data, so we don't need these locks at a higher level
-    // when we iterate.
-    return metadataManager.listKeys(volumeName, bucketName,
-        startKey, keyPrefix, maxKeys);
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int count)
-      throws IOException {
-    return  metadataManager.getPendingDeletionKeys(count);
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    return metadataManager.getExpiredOpenKeys();
-
-  }
-
-  @Override
-  public void deleteExpiredOpenKey(String objectKeyName) throws IOException {
-    Preconditions.checkNotNull(objectKeyName);
-    // TODO: Fix this in later patches.
-  }
-
-  @Override
-  public OMMetadataManager getMetadataManager() {
-    return metadataManager;
-  }
-
-  @Override
-  public BackgroundService getDeletingService() {
-    return keyDeletingService;
-  }
-
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws
-      IOException {
-    Preconditions.checkNotNull(omKeyArgs);
-    String uploadID = UUID.randomUUID().toString() + "-" + UniqueId.next();
-    return createMultipartInfo(omKeyArgs, uploadID);
-  }
-
-  private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs,
-      String multipartUploadID) throws IOException {
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    OmBucketInfo bucketInfo = validateS3Bucket(volumeName, bucketName);
-    try {
-
-      // We are adding uploadId to key, because if multiple users try to
-      // perform multipart upload on the same key, each will try to upload, who
-      // ever finally commit the key, we see that key in ozone. Suppose if we
-      // don't add id, and use the same key /volume/bucket/key, when multiple
-      // users try to upload the key, we update the parts of the key's from
-      // multiple users to same key, and the key output can be a mix of the
-      // parts from multiple users.
-
-      // So on same key if multiple time multipart upload is initiated we
-      // store multiple entries in the openKey Table.
-      // Checked AWS S3, when we try to run multipart upload, each time a
-      // new uploadId is returned.
-
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, multipartUploadID);
-
-      // Not checking if there is an already key for this in the keyTable, as
-      // during final complete multipart upload we take care of this.
-
-
-      Map<Integer, PartKeyInfo> partKeyInfoMap = new HashMap<>();
-      OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo(
-          multipartUploadID, partKeyInfoMap);
-      List<OmKeyLocationInfo> locations = new ArrayList<>();
-      OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setCreationTime(Time.now())
-          .setModificationTime(Time.now())
-          .setReplicationType(keyArgs.getType())
-          .setReplicationFactor(keyArgs.getFactor())
-          .setOmKeyLocationInfos(Collections.singletonList(
-              new OmKeyLocationInfoGroup(0, locations)))
-          .setAcls(getAclsForKey(keyArgs, bucketInfo))
-          .build();
-      DBStore store = metadataManager.getStore();
-      try (BatchOperation batch = store.initBatchOperation()) {
-        // Create an entry in open key table and multipart info table for
-        // this key.
-        metadataManager.getMultipartInfoTable().putWithBatch(batch,
-            multipartKey, multipartKeyInfo);
-        metadataManager.getOpenKeyTable().putWithBatch(batch,
-            multipartKey, omKeyInfo);
-        store.commitBatchOperation(batch);
-        return new OmMultipartInfo(volumeName, bucketName, keyName,
-            multipartUploadID);
-      }
-    } catch (IOException ex) {
-      LOG.error("Initiate Multipart upload Failed for volume:{} bucket:{} " +
-          "key:{}", volumeName, bucketName, keyName, ex);
-      throw new OMException(ex.getMessage(),
-          ResultCodes.INITIATE_MULTIPART_UPLOAD_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  private List<OzoneAcl> getAclsForKey(OmKeyArgs keyArgs,
-      OmBucketInfo bucketInfo) {
-    List<OzoneAcl> acls = new ArrayList<>();
-
-    if(keyArgs.getAcls() != null) {
-      acls.addAll(keyArgs.getAcls());
-    }
-
-    // Inherit DEFAULT acls from prefix.
-    if(prefixManager != null) {
-      List<OmPrefixInfo> prefixList = prefixManager.getLongestPrefixPath(
-          OZONE_URI_DELIMITER +
-              keyArgs.getVolumeName() + OZONE_URI_DELIMITER +
-              keyArgs.getBucketName() + OZONE_URI_DELIMITER +
-              keyArgs.getKeyName());
-
-      if(prefixList.size() > 0) {
-        // Add all acls from direct parent to key.
-        OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1);
-        if(prefixInfo  != null) {
-          if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) {
-            return acls;
-          }
-        }
-      }
-    }
-
-    // Inherit DEFAULT acls from bucket only if DEFAULT acls for
-    // prefix are not set.
-    if (bucketInfo != null) {
-      if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) {
-        return acls;
-      }
-    }
-
-    // TODO: do we need to further fallback to volume default ACL
-    return acls;
-  }
-
-  @Override
-  public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
-      OmKeyArgs omKeyArgs, long clientID) throws IOException {
-    Preconditions.checkNotNull(omKeyArgs);
-    String volumeName = omKeyArgs.getVolumeName();
-    String bucketName = omKeyArgs.getBucketName();
-    String keyName = omKeyArgs.getKeyName();
-    String uploadID = omKeyArgs.getMultipartUploadID();
-    int partNumber = omKeyArgs.getMultipartUploadPartNumber();
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    validateS3Bucket(volumeName, bucketName);
-    String partName;
-    try {
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-      OmMultipartKeyInfo multipartKeyInfo = metadataManager
-          .getMultipartInfoTable().get(multipartKey);
-
-      String openKey = metadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
-      OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(
-          openKey);
-
-      // set the data size and location info list
-      keyInfo.setDataSize(omKeyArgs.getDataSize());
-      keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList());
-
-      partName = metadataManager.getOzoneKey(volumeName, bucketName, keyName)
-          + clientID;
-      if (multipartKeyInfo == null) {
-        // This can occur when user started uploading part by the time commit
-        // of that part happens, in between the user might have requested
-        // abort multipart upload. If we just throw exception, then the data
-        // will not be garbage collected, so move this part to delete table
-        // and throw error
-        // Move this part to delete table.
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            metadataManager.getDeletedTable().get(partName);
-        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-            keyInfo, repeatedOmKeyInfo);
-        metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
-        throw new OMException("No such Multipart upload is with specified " +
-            "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      } else {
-        PartKeyInfo oldPartKeyInfo =
-            multipartKeyInfo.getPartKeyInfo(partNumber);
-        PartKeyInfo.Builder partKeyInfo = PartKeyInfo.newBuilder();
-        partKeyInfo.setPartName(partName);
-        partKeyInfo.setPartNumber(partNumber);
-        partKeyInfo.setPartKeyInfo(keyInfo.getProtobuf());
-        multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build());
-        if (oldPartKeyInfo == null) {
-          // This is the first time part is being added.
-          DBStore store = metadataManager.getStore();
-          try (BatchOperation batch = store.initBatchOperation()) {
-            metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey);
-            metadataManager.getMultipartInfoTable().putWithBatch(batch,
-                multipartKey, multipartKeyInfo);
-            store.commitBatchOperation(batch);
-          }
-        } else {
-          // If we have this part already, that means we are overriding it.
-          // We need to 3 steps.
-          // Add the old entry to delete table.
-          // Remove the new entry from openKey table.
-          // Add the new entry in to the list of part keys.
-          DBStore store = metadataManager.getStore();
-          try (BatchOperation batch = store.initBatchOperation()) {
-            OmKeyInfo partKey = OmKeyInfo.getFromProtobuf(
-                oldPartKeyInfo.getPartKeyInfo());
-
-            RepeatedOmKeyInfo repeatedOmKeyInfo =
-                metadataManager.getDeletedTable()
-                    .get(oldPartKeyInfo.getPartName());
-
-            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-                partKey, repeatedOmKeyInfo);
-
-            metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
-            metadataManager.getDeletedTable().putWithBatch(batch,
-                oldPartKeyInfo.getPartName(),
-                repeatedOmKeyInfo);
-            metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey);
-            metadataManager.getMultipartInfoTable().putWithBatch(batch,
-                multipartKey, multipartKeyInfo);
-            store.commitBatchOperation(batch);
-          }
-        }
-      }
-    } catch (IOException ex) {
-      LOG.error("Upload part Failed: volume:{} bucket:{} " +
-          "key:{} PartNumber: {}", volumeName, bucketName, keyName,
-          partNumber, ex);
-      throw new OMException(ex.getMessage(),
-          ResultCodes.MULTIPART_UPLOAD_PARTFILE_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-
-    return new OmMultipartCommitUploadPartInfo(partName);
-
-  }
-
-  @Override
-  @SuppressWarnings("methodlength")
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(
-      OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
-      throws IOException {
-    Preconditions.checkNotNull(omKeyArgs);
-    Preconditions.checkNotNull(multipartUploadList);
-    String volumeName = omKeyArgs.getVolumeName();
-    String bucketName = omKeyArgs.getBucketName();
-    String keyName = omKeyArgs.getKeyName();
-    String uploadID = omKeyArgs.getMultipartUploadID();
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    validateS3Bucket(volumeName, bucketName);
-    try {
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-      String ozoneKey = metadataManager.getOzoneKey(volumeName, bucketName,
-          keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(ozoneKey);
-
-      OmMultipartKeyInfo multipartKeyInfo = metadataManager
-          .getMultipartInfoTable().get(multipartKey);
-      if (multipartKeyInfo == null) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      }
-      TreeMap<Integer, PartKeyInfo> partKeyInfoMap = multipartKeyInfo
-          .getPartKeyInfoMap();
-
-      TreeMap<Integer, String> multipartMap = multipartUploadList
-          .getMultipartMap();
-
-      // Last key in the map should be having key value as size, as map's
-      // are sorted. Last entry in both maps should have partNumber as size
-      // of the map. As we have part entries 1, 2, 3, 4 and then we get
-      // complete multipart upload request so the map last entry should have 4,
-      // if it is having value greater or less than map size, then there is
-      // some thing wrong throw error.
-
-      Map.Entry<Integer, String> multipartMapLastEntry = multipartMap
-          .lastEntry();
-      Map.Entry<Integer, PartKeyInfo> partKeyInfoLastEntry = partKeyInfoMap
-          .lastEntry();
-      if (partKeyInfoMap.size() != multipartMap.size()) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            ResultCodes.MISMATCH_MULTIPART_LIST);
-      }
-
-      // Last entry part Number should be the size of the map, otherwise this
-      // means we have missing some parts but we got a complete request.
-      if (multipartMapLastEntry.getKey() != partKeyInfoMap.size() ||
-          partKeyInfoLastEntry.getKey() != partKeyInfoMap.size()) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            ResultCodes.MISSING_UPLOAD_PARTS);
-      }
-      ReplicationType type = partKeyInfoLastEntry.getValue().getPartKeyInfo()
-          .getType();
-      ReplicationFactor factor = partKeyInfoLastEntry.getValue()
-          .getPartKeyInfo().getFactor();
-      List<OmKeyLocationInfo> locations = new ArrayList<>();
-      long size = 0;
-      int partsCount =1;
-      int partsMapSize = partKeyInfoMap.size();
-      for(Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry : partKeyInfoMap
-          .entrySet()) {
-        int partNumber = partKeyInfoEntry.getKey();
-        PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
-        // Check we have all parts to complete multipart upload and also
-        // check partNames provided match with actual part names
-        String providedPartName = multipartMap.get(partNumber);
-        String actualPartName = partKeyInfo.getPartName();
-        if (partNumber == partsCount) {
-          if (!actualPartName.equals(providedPartName)) {
-            throw new OMException("Complete Multipart Upload Failed: volume: " +
-                volumeName + "bucket: " + bucketName + "key: " + keyName,
-                ResultCodes.MISMATCH_MULTIPART_LIST);
-          }
-          OmKeyInfo currentPartKeyInfo = OmKeyInfo
-              .getFromProtobuf(partKeyInfo.getPartKeyInfo());
-          // Check if any part size is less than 5mb, last part can be less
-          // than 5 mb.
-          if (partsCount != partsMapSize &&
-              currentPartKeyInfo.getDataSize() < OM_MULTIPART_MIN_SIZE) {
-            LOG.error("MultipartUpload: " + ozoneKey + "Part number: " +
-                partKeyInfo.getPartNumber() + "size " + currentPartKeyInfo
-                    .getDataSize() + " is less than minimum part size " +
-                OzoneConsts.OM_MULTIPART_MIN_SIZE);
-            throw new OMException("Complete Multipart Upload Failed: Entity " +
-                "too small: volume: " + volumeName + "bucket: " + bucketName
-                + "key: " + keyName, ResultCodes.ENTITY_TOO_SMALL);
-          }
-          // As all part keys will have only one version.
-          OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
-              .getKeyLocationVersions().get(0);
-          locations.addAll(currentKeyInfoGroup.getLocationList());
-          size += currentPartKeyInfo.getDataSize();
-        } else {
-          throw new OMException("Complete Multipart Upload Failed: volume: " +
-              volumeName + "bucket: " + bucketName + "key: " + keyName,
-              ResultCodes.MISSING_UPLOAD_PARTS);
-        }
-        partsCount++;
-      }
-      if (keyInfo == null) {
-        // This is a newly added key, it does not have any versions.
-        OmKeyLocationInfoGroup keyLocationInfoGroup = new
-            OmKeyLocationInfoGroup(0, locations);
-        // A newly created key, this is the first version.
-        keyInfo = new OmKeyInfo.Builder()
-            .setVolumeName(omKeyArgs.getVolumeName())
-            .setBucketName(omKeyArgs.getBucketName())
-            .setKeyName(omKeyArgs.getKeyName())
-            .setReplicationFactor(factor)
-            .setReplicationType(type)
-            .setCreationTime(Time.now())
-            .setModificationTime(Time.now())
-            .setDataSize(size)
-            .setOmKeyLocationInfos(
-                Collections.singletonList(keyLocationInfoGroup))
-            .setAcls(omKeyArgs.getAcls()).build();
-      } else {
-        // Already a version exists, so we should add it as a new version.
-        // But now as versioning is not supported, just following the commit
-        // key approach. When versioning support comes, then we can uncomment
-        // below code keyInfo.addNewVersion(locations);
-        keyInfo.updateLocationInfoList(locations);
-      }
-      DBStore store = metadataManager.getStore();
-      try (BatchOperation batch = store.initBatchOperation()) {
-        //Remove entry in multipart table and add a entry in to key table
-        metadataManager.getMultipartInfoTable().deleteWithBatch(batch,
-            multipartKey);
-        metadataManager.getKeyTable().putWithBatch(batch,
-            ozoneKey, keyInfo);
-        metadataManager.getOpenKeyTable().deleteWithBatch(batch, multipartKey);
-        store.commitBatchOperation(batch);
-      }
-      return new OmMultipartUploadCompleteInfo(omKeyArgs.getVolumeName(),
-          omKeyArgs.getBucketName(), omKeyArgs.getKeyName(), DigestUtils
-              .sha256Hex(keyName));
-    } catch (OMException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      LOG.error("Complete Multipart Upload Failed: volume: " + volumeName +
-          "bucket: " + bucketName + "key: " + keyName, ex);
-      throw new OMException(ex.getMessage(), ResultCodes
-          .COMPLETE_MULTIPART_UPLOAD_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  @Override
-  public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
-
-    Preconditions.checkNotNull(omKeyArgs);
-    String volumeName = omKeyArgs.getVolumeName();
-    String bucketName = omKeyArgs.getBucketName();
-    String keyName = omKeyArgs.getKeyName();
-    String uploadID = omKeyArgs.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID, "uploadID cannot be null");
-    validateS3Bucket(volumeName, bucketName);
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    OmBucketInfo bucketInfo;
-    try {
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-      OmMultipartKeyInfo multipartKeyInfo = metadataManager
-          .getMultipartInfoTable().get(multipartKey);
-      OmKeyInfo openKeyInfo = metadataManager.getOpenKeyTable().get(
-          multipartKey);
-
-      // If there is no entry in openKeyTable, then there is no multipart
-      // upload initiated for this key.
-      if (openKeyInfo == null) {
-        LOG.error("Abort Multipart Upload Failed: volume: " + volumeName +
-            "bucket: " + bucketName + "key: " + keyName + "with error no " +
-            "such uploadID:" + uploadID);
-        throw new OMException("Abort Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      } else {
-        // Move all the parts to delete table
-        TreeMap<Integer, PartKeyInfo> partKeyInfoMap = multipartKeyInfo
-            .getPartKeyInfoMap();
-        DBStore store = metadataManager.getStore();
-        try (BatchOperation batch = store.initBatchOperation()) {
-          for (Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry : partKeyInfoMap
-              .entrySet()) {
-            PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
-            OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(
-                partKeyInfo.getPartKeyInfo());
-
-            RepeatedOmKeyInfo repeatedOmKeyInfo =
-                metadataManager.getDeletedTable()
-                    .get(partKeyInfo.getPartName());
-
-            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-                currentKeyPartInfo, repeatedOmKeyInfo);
-
-            metadataManager.getDeletedTable().putWithBatch(batch,
-                partKeyInfo.getPartName(), repeatedOmKeyInfo);
-          }
-          // Finally delete the entry from the multipart info table and open
-          // key table
-          metadataManager.getMultipartInfoTable().deleteWithBatch(batch,
-              multipartKey);
-          metadataManager.getOpenKeyTable().deleteWithBatch(batch,
-              multipartKey);
-          store.commitBatchOperation(batch);
-        }
-      }
-    } catch (OMException ex) {
-      throw ex;
-    } catch (IOException ex) {
-      LOG.error("Abort Multipart Upload Failed: volume: " + volumeName +
-          "bucket: " + bucketName + "key: " + keyName, ex);
-      throw new OMException(ex.getMessage(), ResultCodes
-          .ABORT_MULTIPART_UPLOAD_FAILED);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-
-  }
-
-  @Override
-  public OmMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName, String prefix) throws OMException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-
-      List<String> multipartUploadKeys =
-          metadataManager
-              .getMultipartUploadKeys(volumeName, bucketName, prefix);
-
-      List<OmMultipartUpload> collect = multipartUploadKeys.stream()
-          .map(OmMultipartUpload::from)
-          .map(upload -> {
-            String dbKey = metadataManager
-                .getOzoneKey(upload.getVolumeName(),
-                    upload.getBucketName(),
-                    upload.getKeyName());
-            try {
-              Table<String, OmKeyInfo> openKeyTable =
-                  metadataManager.getOpenKeyTable();
-
-              OmKeyInfo omKeyInfo =
-                  openKeyTable.get(upload.getDbKey());
-
-              upload.setCreationTime(
-                  Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
-
-              upload.setReplicationType(omKeyInfo.getType());
-              upload.setReplicationFactor(omKeyInfo.getFactor());
-            } catch (IOException e) {
-              LOG.warn(
-                  "Open key entry for multipart upload record can be read  {}",
-                  dbKey);
-            }
-            return upload;
-          })
-          .collect(Collectors.toList());
-
-      return new OmMultipartUploadList(collect);
-
-    } catch (IOException ex) {
-      LOG.error("List Multipart Uploads Failed: volume: " + volumeName +
-          "bucket: " + bucketName + "prefix: " + prefix, ex);
-      throw new OMException(ex.getMessage(), ResultCodes
-          .LIST_MULTIPART_UPLOAD_PARTS_FAILED);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  @Override
-  public OmMultipartUploadListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID,
-      int partNumberMarker, int maxParts)  throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(keyName);
-    Preconditions.checkNotNull(uploadID);
-    boolean isTruncated = false;
-    int nextPartNumberMarker = 0;
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-
-      OmMultipartKeyInfo multipartKeyInfo =
-          metadataManager.getMultipartInfoTable().get(multipartKey);
-
-      if (multipartKeyInfo == null) {
-        throw new OMException("No Such Multipart upload exists for this key.",
-            ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      } else {
-        TreeMap<Integer, PartKeyInfo> partKeyInfoMap =
-            multipartKeyInfo.getPartKeyInfoMap();
-        Iterator<Map.Entry<Integer, PartKeyInfo>> partKeyInfoMapIterator =
-            partKeyInfoMap.entrySet().iterator();
-
-        HddsProtos.ReplicationType replicationType = null;
-        HddsProtos.ReplicationFactor replicationFactor = null;
-
-        int count = 0;
-        List<OmPartInfo> omPartInfoList = new ArrayList<>();
-
-        while (count < maxParts && partKeyInfoMapIterator.hasNext()) {
-          Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry =
-              partKeyInfoMapIterator.next();
-          nextPartNumberMarker = partKeyInfoEntry.getKey();
-          // As we should return only parts with part number greater
-          // than part number marker
-          if (partKeyInfoEntry.getKey() > partNumberMarker) {
-            PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
-            OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(),
-                partKeyInfo.getPartName(),
-                partKeyInfo.getPartKeyInfo().getModificationTime(),
-                partKeyInfo.getPartKeyInfo().getDataSize());
-            omPartInfoList.add(omPartInfo);
-
-            //if there are parts, use replication type from one of the parts
-            replicationType = partKeyInfo.getPartKeyInfo().getType();
-            replicationFactor = partKeyInfo.getPartKeyInfo().getFactor();
-            count++;
-          }
-        }
-
-        if (replicationType == null) {
-          //if there are no parts, use the replicationType from the open key.
-
-          OmKeyInfo omKeyInfo =
-              metadataManager.getOpenKeyTable().get(multipartKey);
-
-          if (omKeyInfo == null) {
-            throw new IllegalStateException(
-                "Open key is missing for multipart upload " + multipartKey);
-          }
-
-          replicationType = omKeyInfo.getType();
-          replicationFactor = omKeyInfo.getFactor();
-        }
-        Preconditions.checkNotNull(replicationType,
-            "Replication type can't be identified");
-        Preconditions.checkNotNull(replicationFactor,
-            "Replication factor can't be identified");
-
-        if (partKeyInfoMapIterator.hasNext()) {
-          Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry =
-              partKeyInfoMapIterator.next();
-          isTruncated = true;
-        } else {
-          isTruncated = false;
-          nextPartNumberMarker = 0;
-        }
-        OmMultipartUploadListParts omMultipartUploadListParts =
-            new OmMultipartUploadListParts(replicationType, replicationFactor,
-                nextPartNumberMarker, isTruncated);
-        omMultipartUploadListParts.addPartList(omPartInfoList);
-        return omMultipartUploadListParts;
-      }
-    } catch (OMException ex) {
-      throw ex;
-    } catch (IOException ex){
-      LOG.error(
-          "List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: "
-              + "{} ",
-          volumeName, bucketName, keyName, ex);
-      throw new OMException(ex.getMessage(), ResultCodes
-              .LIST_MULTIPART_UPLOAD_PARTS_FAILED);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    validateOzoneObj(obj);
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String keyName = obj.getKeyName();
-    boolean changed = false;
-
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      validateBucket(volume, bucket);
-      String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-      if (keyInfo == null) {
-        throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
-      }
-
-      if (keyInfo.getAcls() == null) {
-        keyInfo.setAcls(new ArrayList<>());
-      }
-      changed = keyInfo.addAcl(acl);
-      if (changed) {
-        metadataManager.getKeyTable().put(objectKey, keyInfo);
-      }
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Add acl operation failed for key:{}/{}/{}", volume,
-            bucket, keyName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-    return changed;
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    validateOzoneObj(obj);
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String keyName = obj.getKeyName();
-    boolean changed = false;
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      validateBucket(volume, bucket);
-      String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-      if (keyInfo == null) {
-        throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
-      }
-
-      changed = keyInfo.removeAcl(acl);
-      if (changed) {
-        metadataManager.getKeyTable().put(objectKey, keyInfo);
-      }
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Remove acl operation failed for key:{}/{}/{}", volume,
-            bucket, keyName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-    return changed;
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    validateOzoneObj(obj);
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String keyName = obj.getKeyName();
-    boolean changed = false;
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket);
-    try {
-      validateBucket(volume, bucket);
-      String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-      if (keyInfo == null) {
-        throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
-      }
-
-      changed = keyInfo.setAcls(acls);
-
-      if (changed) {
-        metadataManager.getKeyTable().put(objectKey, keyInfo);
-      }
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Set acl operation failed for key:{}/{}/{}", volume,
-            bucket, keyName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket);
-    }
-    return changed;
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    validateOzoneObj(obj);
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String keyName = obj.getKeyName();
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
-    try {
-      validateBucket(volume, bucket);
-      String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-      if (keyInfo == null) {
-        throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
-      }
-
-      return keyInfo.getAcls();
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Get acl operation failed for key:{}/{}/{}", volume,
-            bucket, keyName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket);
-    }
-  }
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @return true if user has access else false.
-   */
-  @Override
-  public boolean checkAccess(OzoneObj ozObject, RequestContext context)
-      throws OMException {
-    Objects.requireNonNull(ozObject);
-    Objects.requireNonNull(context);
-    Objects.requireNonNull(context.getClientUgi());
-
-    String volume = ozObject.getVolumeName();
-    String bucket = ozObject.getBucketName();
-    String keyName = ozObject.getKeyName();
-    String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-    OmKeyArgs args = new OmKeyArgs.Builder()
-        .setVolumeName(volume)
-        .setBucketName(bucket)
-        .setKeyName(keyName)
-        .build();
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
-    try {
-      validateBucket(volume, bucket);
-      OmKeyInfo keyInfo = null;
-      try {
-        OzoneFileStatus fileStatus = getFileStatus(args);
-        keyInfo = fileStatus.getKeyInfo();
-        if (keyInfo == null) {
-          // the key does not exist, but it is a parent "dir" of some key
-          // let access be determined based on volume/bucket/prefix ACL
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("key:{} is non-existent parent, permit access to user:{}",
-                keyName, context.getClientUgi());
-          }
-          return true;
-        }
-      } catch (OMException e) {
-        if (e.getResult() == FILE_NOT_FOUND) {
-          keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
-        }
-      }
-
-      if (keyInfo == null) {
-        throw new OMException("Key not found, checkAccess failed. Key:" +
-            objectKey, KEY_NOT_FOUND);
-      }
-
-      boolean hasAccess = OzoneAclUtil.checkAclRight(
-          keyInfo.getAcls(), context);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("user:{} has access rights for key:{} :{} ",
-            context.getClientUgi(), ozObject.getKeyName(), hasAccess);
-      }
-      return hasAccess;
-    } catch (IOException ex) {
-      if(ex instanceof OMException) {
-        throw (OMException) ex;
-      }
-      LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume,
-          bucket, keyName, ex);
-      throw new OMException("Check access operation failed for " +
-          "key:" + keyName, ex, INTERNAL_ERROR);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket);
-    }
-  }
-
-  /**
-   * Helper method to validate ozone object.
-   * @param obj
-   * */
-  private void validateOzoneObj(OzoneObj obj) throws OMException {
-    Objects.requireNonNull(obj);
-
-    if (!obj.getResourceType().equals(KEY)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "KeyManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String keyName = obj.getKeyName();
-
-    if (Strings.isNullOrEmpty(volume)) {
-      throw new OMException("Volume name is required.", VOLUME_NOT_FOUND);
-    }
-    if (Strings.isNullOrEmpty(bucket)) {
-      throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND);
-    }
-    if (Strings.isNullOrEmpty(keyName)) {
-      throw new OMException("Key name is required.", KEY_NOT_FOUND);
-    }
-  }
-
-  /**
-   * OzoneFS api to get file status for an entry.
-   *
-   * @param args Key args
-   * @throws OMException if file does not exist
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args, "Key args can not be null");
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      // Check if this is the root of the filesystem.
-      if (keyName.length() == 0) {
-        validateBucket(volumeName, bucketName);
-        return new OzoneFileStatus(OZONE_URI_DELIMITER);
-      }
-
-      // Check if the key is a file.
-      String fileKeyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes);
-      if (fileKeyInfo != null) {
-        // this is a file
-        return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
-      }
-
-      String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-      String dirKeyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, dirKey);
-      OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes);
-      if (dirKeyInfo != null) {
-        return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true);
-      }
-
-      List<OmKeyInfo> keys = metadataManager.listKeys(volumeName, bucketName,
-          null, dirKey, 1);
-      if (keys.iterator().hasNext()) {
-        return new OzoneFileStatus(keyName);
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
-                " {}, key: {}, with error: No such file exists.", volumeName,
-            bucketName, keyName);
-      }
-      throw new OMException("Unable to get file status: volume: " +
-          volumeName + " bucket: " + bucketName + " key: " + keyName,
-          FILE_NOT_FOUND);
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  /**
-   * Ozone FS api to create a directory. Parent directories if do not exist
-   * are created for the input directory.
-   *
-   * @param args Key args
-   * @throws OMException if any entry in the path exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  public void createDirectory(OmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args, "Key args can not be null");
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-
-      // Check if this is the root of the filesystem.
-      if (keyName.length() == 0) {
-        return;
-      }
-
-      Path keyPath = Paths.get(keyName);
-      OzoneFileStatus status =
-          verifyNoFilesInPath(volumeName, bucketName, keyPath, false);
-      if (status != null && OzoneFSUtils.pathToKey(status.getPath())
-          .equals(keyName)) {
-        // if directory already exists
-        return;
-      }
-      OmKeyInfo dirDbKeyInfo =
-          createDirectoryKey(volumeName, bucketName, keyName, args.getAcls());
-      String dirDbKey = metadataManager
-          .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName());
-      metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-  }
-
-  private OmKeyInfo createDirectoryKey(String volumeName, String bucketName,
-      String keyName, List<OzoneAcl> acls) throws IOException {
-    // verify bucket exists
-    OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName);
-
-    String dir = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-    FileEncryptionInfo encInfo = getFileEncryptionInfo(bucketInfo);
-    return new OmKeyInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(dir)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setDataSize(0)
-        .setReplicationType(ReplicationType.RATIS)
-        .setReplicationFactor(ReplicationFactor.ONE)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(acls)
-        .build();
-  }
-
-  /**
-   * OzoneFS api to creates an output stream for a file.
-   *
-   * @param args        Key args
-   * @param isOverWrite if true existing file at the location will be
-   *                    overwritten
-   * @param isRecursive if true file would be created even if parent
-   *                    directories do not exist
-   * @throws OMException if given key is a directory
-   *                     if file exists and isOverwrite flag is false
-   *                     if an ancestor exists as a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  @Override
-  public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite,
-      boolean isRecursive) throws IOException {
-    Preconditions.checkNotNull(args, "Key args can not be null");
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    OpenKeySession keySession;
-
-    metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
-    try {
-      OzoneFileStatus fileStatus;
-      try {
-        fileStatus = getFileStatus(args);
-        if (fileStatus.isDirectory()) {
-          throw new OMException("Can not write to directory: " + keyName,
-              ResultCodes.NOT_A_FILE);
-        } else if (fileStatus.isFile()) {
-          if (!isOverWrite) {
-            throw new OMException("File " + keyName + " already exists",
-                ResultCodes.FILE_ALREADY_EXISTS);
-          }
-        }
-      } catch (OMException ex) {
-        if (ex.getResult() != FILE_NOT_FOUND) {
-          throw ex;
-        }
-      }
-
-      verifyNoFilesInPath(volumeName, bucketName,
-          Paths.get(keyName).getParent(), !isRecursive);
-      // TODO: Optimize call to openKey as keyInfo is already available in the
-      // filestatus. We can avoid some operations in openKey call.
-      keySession = openKey(args);
-    } finally {
-      metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-
-    return keySession;
-  }
-
-  /**
-   * OzoneFS api to lookup for a file.
-   *
-   * @param args Key args
-   * @throws OMException if given key is not found or it is not a file
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  @Override
-  public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress)
-      throws IOException {
-    Preconditions.checkNotNull(args, "Key args can not be null");
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      OzoneFileStatus fileStatus = getFileStatus(args);
-      if (fileStatus.isFile()) {
-        if (args.getSortDatanodes()) {
-          sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress);
-        }
-        return fileStatus.getKeyInfo();
-      }
-      //if key is not of type file or if key is not found we throw an exception
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-
-    throw new OMException("Can not write to directory: " + keyName,
-        ResultCodes.NOT_A_FILE);
-  }
-
-  /**
-   * List the status for a file or a directory and its contents.
-   *
-   * @param args       Key args
-   * @param recursive  For a directory if true all the descendants of a
-   *                   particular directory are listed
-   * @param startKey   Key from which listing needs to start. If startKey exists
-   *                   its status is included in the final list.
-   * @param numEntries Number of entries to list from the start key
-   * @return list of file status
-   */
-  public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
-      String startKey, long numEntries) throws IOException {
-    Preconditions.checkNotNull(args, "Key args can not be null");
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    List<OzoneFileStatus> fileStatusList = new ArrayList<>();
-    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
-        bucketName);
-    try {
-      if (Strings.isNullOrEmpty(startKey)) {
-        OzoneFileStatus fileStatus = getFileStatus(args);
-        if (fileStatus.isFile()) {
-          return Collections.singletonList(fileStatus);
-        }
-        startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-      }
-
-      String seekKeyInDb =
-          metadataManager.getOzoneKey(volumeName, bucketName, startKey);
-      String keyInDb = OzoneFSUtils.addTrailingSlashIfNeeded(
-          metadataManager.getOzoneKey(volumeName, bucketName, keyName));
-      TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-          iterator = metadataManager.getKeyTable().iterator();
-      iterator.seek(seekKeyInDb);
-
-      if (!iterator.hasNext()) {
-        return Collections.emptyList();
-      }
-
-      if (iterator.key().equals(keyInDb)) {
-        // skip the key which needs to be listed
-        iterator.next();
-      }
-
-      while (iterator.hasNext() && numEntries - fileStatusList.size() > 0) {
-        String entryInDb = iterator.key();
-        OmKeyInfo value = iterator.value().getValue();
-        if (entryInDb.startsWith(keyInDb)) {
-          String entryKeyName = value.getKeyName();
-          if (recursive) {
-            // for recursive list all the entries
-            fileStatusList.add(new OzoneFileStatus(value, scmBlockSize,
-                !OzoneFSUtils.isFile(entryKeyName)));
-            iterator.next();
-          } else {
-            // get the child of the directory to list from the entry. For
-            // example if directory to list is /a and entry is /a/b/c where
-            // c is a file. The immediate child is b which is a directory. c
-            // should not be listed as child of a.
-            String immediateChild = OzoneFSUtils
-                .getImmediateChild(entryKeyName, keyName);
-            boolean isFile = OzoneFSUtils.isFile(immediateChild);
-            if (isFile) {
-              fileStatusList
-                  .add(new OzoneFileStatus(value, scmBlockSize, !isFile));
-              iterator.next();
-            } else {
-              // if entry is a directory
-              fileStatusList.add(new OzoneFileStatus(immediateChild));
-              // skip the other descendants of this child directory.
-              iterator.seek(
-                  getNextGreaterString(volumeName, bucketName, immediateChild));
-            }
-          }
-        } else {
-          break;
-        }
-      }
-    } finally {
-      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-    return fileStatusList;
-  }
-
-  private String getNextGreaterString(String volumeName, String bucketName,
-      String keyPrefix) throws IOException {
-    // Increment the last character of the string and return the new ozone key.
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix),
-        "Key prefix is null or empty");
-    CodecRegistry codecRegistry =
-        ((RDBStore) metadataManager.getStore()).getCodecRegistry();
-    byte[] keyPrefixInBytes = codecRegistry.asRawData(keyPrefix);
-    keyPrefixInBytes[keyPrefixInBytes.length - 1]++;
-    String nextPrefix = codecRegistry.asObject(keyPrefixInBytes, String.class);
-    return metadataManager.getOzoneKey(volumeName, bucketName, nextPrefix);
-  }
-
-  /**
-   * Verify that none of the parent path exists as file in the filesystem.
-   *
-   * @param volumeName         Volume name
-   * @param bucketName         Bucket name
-   * @param path               Directory path. This is the absolute path of the
-   *                           directory for the ozone filesystem.
-   * @param directoryMustExist throws exception if true and given path does not
-   *                           exist as directory
-   * @return OzoneFileStatus of the first directory found in path in reverse
-   * order
-   * @throws OMException if ancestor exists as file in the filesystem
-   *                     if directoryMustExist flag is true and parent does
-   *                     not exist
-   *                     if bucket does not exist
-   * @throws IOException if there is error in the db
-   *                     invalid arguments
-   */
-  private OzoneFileStatus verifyNoFilesInPath(String volumeName,
-      String bucketName, Path path, boolean directoryMustExist)
-      throws IOException {
-    OmKeyArgs.Builder argsBuilder = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName);
-    while (path != null) {
-      String keyName = path.toString();
-      try {
-        OzoneFileStatus fileStatus =
-            getFileStatus(argsBuilder.setKeyName(keyName).build());
-        if (fileStatus.isFile()) {
-          LOG.error("Unable to create directory (File already exists): volume: "
-              + volumeName + "bucket: " + bucketName + "key: " + keyName);
-          throw new OMException(
-              "Unable to create directory at : volume: " + volumeName
-                  + "bucket: " + bucketName + "key: " + keyName,
-              ResultCodes.FILE_ALREADY_EXISTS);
-        } else if (fileStatus.isDirectory()) {
-          return fileStatus;
-        }
-      } catch (OMException ex) {
-        if (ex.getResult() != FILE_NOT_FOUND) {
-          throw ex;
-        } else if (ex.getResult() == FILE_NOT_FOUND) {
-          if (directoryMustExist) {
-            throw new OMException("Parent directory does not exist",
-                ex.getCause(), DIRECTORY_NOT_FOUND);
-          }
-        }
-      }
-      path = path.getParent();
-    }
-    return null;
-  }
-
-  private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo)
-      throws IOException {
-    FileEncryptionInfo encInfo = null;
-    BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo();
-    if (ezInfo != null) {
-      if (getKMSProvider() == null) {
-        throw new OMException("Invalid KMS provider, check configuration " +
-            HADOOP_SECURITY_KEY_PROVIDER_PATH,
-            INVALID_KMS_PROVIDER);
-      }
-
-      final String ezKeyName = ezInfo.getKeyName();
-      EncryptedKeyVersion edek = generateEDEK(ezKeyName);
-      encInfo = new FileEncryptionInfo(ezInfo.getSuite(), ezInfo.getVersion(),
-          edek.getEncryptedKeyVersion().getMaterial(),
-          edek.getEncryptedKeyIv(),
-          ezKeyName, edek.getEncryptionKeyVersionName());
-    }
-    return encInfo;
-  }
-
-  private void sortDatanodeInPipeline(OmKeyInfo keyInfo, String clientMachine) {
-    if (keyInfo != null && clientMachine != null && !clientMachine.isEmpty()) {
-      for (OmKeyLocationInfoGroup key : keyInfo.getKeyLocationVersions()) {
-        key.getLocationList().forEach(k -> {
-          List<DatanodeDetails> nodes = k.getPipeline().getNodes();
-          if (nodes == null || nodes.size() == 0) {
-            LOG.warn("Datanodes for pipeline {} is empty",
-                k.getPipeline().getId().toString());
-            return;
-          }
-          List<String> nodeList = new ArrayList<>();
-          nodes.stream().forEach(node ->
-              nodeList.add(node.getUuidString()));
-          try {
-            List<DatanodeDetails> sortedNodes = scmClient.getBlockClient()
-                .sortDatanodes(nodeList, clientMachine);
-            k.getPipeline().setNodesInOrder(sortedNodes);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Sort datanodes {} for client {}, return {}", nodes,
-                  clientMachine, sortedNodes);
-            }
-          } catch (IOException e) {
-            LOG.warn("Unable to sort datanodes based on distance to " +
-                "client, volume=" + keyInfo.getVolumeName() +
-                ", bucket=" + keyInfo.getBucketName() +
-                ", key=" + keyInfo.getKeyName() +
-                ", client=" + clientMachine +
-                ", datanodes=" + nodes.toString() +
-                ", exception=" + e.getMessage());
-          }
-        });
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
deleted file mode 100644
index 8103183..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX;
-import static org.apache.hadoop.ozone.OzoneConsts.
-    OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
-
-import java.io.IOException;
-import java.nio.file.Path;
-import java.time.Duration;
-import java.time.Instant;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides the current checkpoint Snapshot of the OM DB. (tar.gz)
- */
-public class OMDBCheckpointServlet extends HttpServlet {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMDBCheckpointServlet.class);
-  private static final long serialVersionUID = 1L;
-
-  private transient OzoneManager om;
-  private transient DBStore omDbStore;
-  private transient OMMetrics omMetrics;
-  private transient DataTransferThrottler throttler = null;
-
-  @Override
-  public void init() throws ServletException {
-
-    om = (OzoneManager) getServletContext()
-        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
-
-    if (om == null) {
-      LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null");
-      return;
-    }
-
-    omDbStore = om.getMetadataManager().getStore();
-    omMetrics = om.getMetrics();
-
-    OzoneConfiguration configuration = om.getConfiguration();
-    long transferBandwidth = configuration.getLongBytes(
-        OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY,
-        OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT);
-
-    if (transferBandwidth > 0) {
-      throttler = new DataTransferThrottler(transferBandwidth);
-    }
-  }
-
-  /**
-   * Process a GET request for the Ozone Manager DB checkpoint snapshot.
-   *
-   * @param request  The servlet request we are processing
-   * @param response The servlet response we are creating
-   */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response) {
-
-    LOG.info("Received request to obtain OM DB checkpoint snapshot");
-    if (omDbStore == null) {
-      LOG.error(
-          "Unable to process metadata snapshot request. DB Store is null");
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-      return;
-    }
-
-    DBCheckpoint checkpoint = null;
-    try {
-
-      boolean flush = false;
-      String flushParam =
-          request.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH);
-      if (StringUtils.isNotEmpty(flushParam)) {
-        flush = Boolean.valueOf(flushParam);
-      }
-
-      boolean takeRatisSnapshot = false;
-      String snapshotBeforeCheckpointParam =
-          request.getParameter(OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT);
-      if (StringUtils.isNotEmpty(snapshotBeforeCheckpointParam)) {
-        takeRatisSnapshot = Boolean.valueOf(snapshotBeforeCheckpointParam);
-      }
-
-      long ratisSnapshotIndex;
-      if (takeRatisSnapshot) {
-        // If OM follower is downloading the checkpoint, we should save a
-        // ratis snapshot first. This step also included flushing the OM DB.
-        // Hence, we can set flush to false.
-        flush = false;
-        ratisSnapshotIndex = om.saveRatisSnapshot();
-      } else {
-        ratisSnapshotIndex = om.getRatisSnapshotIndex();
-      }
-
-      checkpoint = omDbStore.getCheckpoint(flush);
-      if (checkpoint == null || checkpoint.getCheckpointLocation() == null) {
-        LOG.error("Unable to process metadata snapshot request. " +
-            "Checkpoint request returned null.");
-        response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-        return;
-      }
-      omMetrics.setLastCheckpointCreationTimeTaken(
-          checkpoint.checkpointCreationTimeTaken());
-
-      Path file = checkpoint.getCheckpointLocation().getFileName();
-      if (file == null) {
-        return;
-      }
-      response.setContentType("application/x-tgz");
-      response.setHeader("Content-Disposition",
-          "attachment; filename=\"" +
-               file.toString() + ".tgz\"");
-      // Ratis snapshot index used when downloading DB checkpoint to OM follower
-      response.setHeader(OM_RATIS_SNAPSHOT_INDEX,
-          String.valueOf(ratisSnapshotIndex));
-
-      Instant start = Instant.now();
-      OmUtils.writeOmDBCheckpointToStream(checkpoint,
-          response.getOutputStream());
-      Instant end = Instant.now();
-
-      long duration = Duration.between(start, end).toMillis();
-      LOG.info("Time taken to write the checkpoint to response output " +
-          "stream: " + duration + " milliseconds");
-      omMetrics.setLastCheckpointStreamingTimeTaken(duration);
-
-    } catch (Exception e) {
-      LOG.error(
-          "Unable to process metadata snapshot request. ", e);
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    } finally {
-      if (checkpoint != null) {
-        try {
-          checkpoint.cleanupCheckpoint();
-        } catch (IOException e) {
-          LOG.error("Error trying to clean checkpoint at {} .",
-              checkpoint.getCheckpointLocation().toString());
-        }
-      }
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
deleted file mode 100644
index 3ab9f47..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-/**
- * This is the JMX management interface for OM information.
- */
-@InterfaceAudience.Private
-public interface OMMXBean extends ServiceRuntimeInfo {
-
-  String getRpcPort();
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
deleted file mode 100644
index 2d1ae30..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ /dev/null
@@ -1,763 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-
-/**
- * This class is for maintaining Ozone Manager statistics.
- */
-@InterfaceAudience.Private
-@Metrics(about="Ozone Manager Metrics", context="dfs")
-public class OMMetrics {
-  private static final String SOURCE_NAME =
-      OMMetrics.class.getSimpleName();
-
-  // OM request type op metrics
-  private @Metric MutableCounterLong numVolumeOps;
-  private @Metric MutableCounterLong numBucketOps;
-  private @Metric MutableCounterLong numKeyOps;
-  private @Metric MutableCounterLong numFSOps;
-
-  // OM op metrics
-  private @Metric MutableCounterLong numVolumeCreates;
-  private @Metric MutableCounterLong numVolumeUpdates;
-  private @Metric MutableCounterLong numVolumeInfos;
-  private @Metric MutableCounterLong numVolumeCheckAccesses;
-  private @Metric MutableCounterLong numBucketCreates;
-  private @Metric MutableCounterLong numVolumeDeletes;
-  private @Metric MutableCounterLong numBucketInfos;
-  private @Metric MutableCounterLong numBucketUpdates;
-  private @Metric MutableCounterLong numBucketDeletes;
-  private @Metric MutableCounterLong numKeyAllocate;
-  private @Metric MutableCounterLong numKeyLookup;
-  private @Metric MutableCounterLong numKeyRenames;
-  private @Metric MutableCounterLong numKeyDeletes;
-  private @Metric MutableCounterLong numBucketLists;
-  private @Metric MutableCounterLong numKeyLists;
-  private @Metric MutableCounterLong numVolumeLists;
-  private @Metric MutableCounterLong numKeyCommits;
-  private @Metric MutableCounterLong numBlockAllocations;
-  private @Metric MutableCounterLong numGetServiceLists;
-  private @Metric MutableCounterLong numBucketS3Lists;
-  private @Metric MutableCounterLong numInitiateMultipartUploads;
-  private @Metric MutableCounterLong numCompleteMultipartUploads;
-
-  private @Metric MutableCounterLong numGetFileStatus;
-  private @Metric MutableCounterLong numCreateDirectory;
-  private @Metric MutableCounterLong numCreateFile;
-  private @Metric MutableCounterLong numLookupFile;
-  private @Metric MutableCounterLong numListStatus;
-
-  // Failure Metrics
-  private @Metric MutableCounterLong numVolumeCreateFails;
-  private @Metric MutableCounterLong numVolumeUpdateFails;
-  private @Metric MutableCounterLong numVolumeInfoFails;
-  private @Metric MutableCounterLong numVolumeDeleteFails;
-  private @Metric MutableCounterLong numBucketCreateFails;
-  private @Metric MutableCounterLong numVolumeCheckAccessFails;
-  private @Metric MutableCounterLong numBucketInfoFails;
-  private @Metric MutableCounterLong numBucketUpdateFails;
-  private @Metric MutableCounterLong numBucketDeleteFails;
-  private @Metric MutableCounterLong numKeyAllocateFails;
-  private @Metric MutableCounterLong numKeyLookupFails;
-  private @Metric MutableCounterLong numKeyRenameFails;
-  private @Metric MutableCounterLong numKeyDeleteFails;
-  private @Metric MutableCounterLong numBucketListFails;
-  private @Metric MutableCounterLong numKeyListFails;
-  private @Metric MutableCounterLong numVolumeListFails;
-  private @Metric MutableCounterLong numKeyCommitFails;
-  private @Metric MutableCounterLong numBlockAllocationFails;
-  private @Metric MutableCounterLong numGetServiceListFails;
-  private @Metric MutableCounterLong numBucketS3ListFails;
-  private @Metric MutableCounterLong numInitiateMultipartUploadFails;
-  private @Metric MutableCounterLong numCommitMultipartUploadParts;
-  private @Metric MutableCounterLong numCommitMultipartUploadPartFails;
-  private @Metric MutableCounterLong numCompleteMultipartUploadFails;
-  private @Metric MutableCounterLong numAbortMultipartUploads;
-  private @Metric MutableCounterLong numAbortMultipartUploadFails;
-  private @Metric MutableCounterLong numListMultipartUploadParts;
-  private @Metric MutableCounterLong numListMultipartUploadPartFails;
-
-  private @Metric MutableCounterLong numGetFileStatusFails;
-  private @Metric MutableCounterLong numCreateDirectoryFails;
-  private @Metric MutableCounterLong numCreateFileFails;
-  private @Metric MutableCounterLong numLookupFileFails;
-  private @Metric MutableCounterLong numListStatusFails;
-
-  // Metrics for total number of volumes, buckets and keys
-
-  private @Metric MutableCounterLong numVolumes;
-  private @Metric MutableCounterLong numBuckets;
-  private @Metric MutableCounterLong numS3Buckets;
-
-  //TODO: This metric is an estimate and it may be inaccurate on restart if the
-  // OM process was not shutdown cleanly. Key creations/deletions in the last
-  // few minutes before restart may not be included in this count.
-  private @Metric MutableCounterLong numKeys;
-
-
-
-  // Metrics to track checkpointing statistics from last run.
-  private @Metric MutableGaugeLong lastCheckpointCreationTimeTaken;
-  private @Metric MutableGaugeLong lastCheckpointStreamingTimeTaken;
-
-  private @Metric MutableCounterLong numBucketS3Creates;
-  private @Metric MutableCounterLong numBucketS3CreateFails;
-  private @Metric MutableCounterLong numBucketS3Deletes;
-  private @Metric MutableCounterLong numBucketS3DeleteFails;
-
-  private @Metric MutableCounterLong numListMultipartUploadFails;
-  private @Metric MutableCounterLong numListMultipartUploads;
-
-  public OMMetrics() {
-  }
-
-  public static OMMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
-        "Ozone Manager Metrics",
-        new OMMetrics());
-  }
-
-  public void incNumS3BucketCreates() {
-    numBucketOps.incr();
-    numBucketS3Creates.incr();
-  }
-
-  public void incNumS3BucketCreateFails() {
-    numBucketS3CreateFails.incr();
-  }
-
-  public void incNumS3BucketDeletes() {
-    numBucketOps.incr();
-    numBucketS3Deletes.incr();
-  }
-
-  public void incNumS3BucketDeleteFails() {
-    numBucketOps.incr();
-    numBucketS3DeleteFails.incr();
-  }
-
-
-  public void incNumS3Buckets() {
-    numS3Buckets.incr();
-  }
-
-  public void decNumS3Buckets() {
-    numS3Buckets.incr();
-  }
-
-  public void incNumVolumes() {
-    numVolumes.incr();
-  }
-
-  public void decNumVolumes() {
-    numVolumes.incr(-1);
-  }
-
-  public void incNumBuckets() {
-    numBuckets.incr();
-  }
-
-  public void decNumBuckets() {
-    numBuckets.incr(-1);
-  }
-
-  public void incNumKeys() {
-    numKeys.incr();
-  }
-
-  public void decNumKeys() {
-    numKeys.incr(-1);
-  }
-
-  public void setNumVolumes(long val) {
-    long oldVal = this.numVolumes.value();
-    this.numVolumes.incr(val - oldVal);
-  }
-
-  public void setNumBuckets(long val) {
-    long oldVal = this.numBuckets.value();
-    this.numBuckets.incr(val - oldVal);
-  }
-
-  public void setNumKeys(long val) {
-    long oldVal = this.numKeys.value();
-    this.numKeys.incr(val- oldVal);
-  }
-
-  public long getNumVolumes() {
-    return numVolumes.value();
-  }
-
-  public long getNumBuckets() {
-    return numBuckets.value();
-  }
-
-  public long getNumKeys() {
-    return numKeys.value();
-  }
-
-
-  public void incNumVolumeCreates() {
-    numVolumeOps.incr();
-    numVolumeCreates.incr();
-  }
-
-  public void incNumVolumeUpdates() {
-    numVolumeOps.incr();
-    numVolumeUpdates.incr();
-  }
-
-  public void incNumVolumeInfos() {
-    numVolumeOps.incr();
-    numVolumeInfos.incr();
-  }
-
-  public void incNumVolumeDeletes() {
-    numVolumeOps.incr();
-    numVolumeDeletes.incr();
-  }
-
-  public void incNumVolumeCheckAccesses() {
-    numVolumeOps.incr();
-    numVolumeCheckAccesses.incr();
-  }
-
-  public void incNumBucketCreates() {
-    numBucketOps.incr();
-    numBucketCreates.incr();
-  }
-
-  public void incNumBucketInfos() {
-    numBucketOps.incr();
-    numBucketInfos.incr();
-  }
-
-  public void incNumBucketUpdates() {
-    numBucketOps.incr();
-    numBucketUpdates.incr();
-  }
-
-  public void incNumBucketDeletes() {
-    numBucketOps.incr();
-    numBucketDeletes.incr();
-  }
-
-  public void incNumBucketLists() {
-    numBucketOps.incr();
-    numBucketLists.incr();
-  }
-
-  public void incNumKeyLists() {
-    numKeyOps.incr();
-    numKeyLists.incr();
-  }
-
-  public void incNumVolumeLists() {
-    numVolumeOps.incr();
-    numVolumeLists.incr();
-  }
-
-  public void incNumListS3Buckets() {
-    numBucketOps.incr();
-    numBucketS3Lists.incr();
-  }
-
-  public void incNumListS3BucketsFails() {
-    numBucketOps.incr();
-    numBucketS3ListFails.incr();
-  }
-
-  public void incNumInitiateMultipartUploads() {
-    numKeyOps.incr();
-    numInitiateMultipartUploads.incr();
-  }
-
-  public void incNumInitiateMultipartUploadFails() {
-    numInitiateMultipartUploadFails.incr();
-  }
-
-  public void incNumCommitMultipartUploadParts() {
-    numKeyOps.incr();
-    numCommitMultipartUploadParts.incr();
-  }
-
-  public void incNumCommitMultipartUploadPartFails() {
-    numCommitMultipartUploadPartFails.incr();
-  }
-
-  public void incNumCompleteMultipartUploads() {
-    numKeyOps.incr();
-    numCompleteMultipartUploads.incr();
-  }
-
-  public void incNumCompleteMultipartUploadFails() {
-    numCompleteMultipartUploadFails.incr();
-  }
-
-  public void incNumAbortMultipartUploads() {
-    numKeyOps.incr();
-    numAbortMultipartUploads.incr();
-  }
-
-  public void incNumListMultipartUploadFails() {
-    numListMultipartUploadFails.incr();
-  }
-
-  public void incNumListMultipartUploads() {
-    numKeyOps.incr();
-    numListMultipartUploads.incr();
-  }
-
-  public void incNumAbortMultipartUploadFails() {
-    numAbortMultipartUploadFails.incr();
-  }
-  public void incNumListMultipartUploadParts() {
-    numKeyOps.incr();
-    numListMultipartUploadParts.incr();
-  }
-
-  public void incNumGetFileStatus() {
-    numKeyOps.incr();
-    numFSOps.incr();
-    numGetFileStatus.incr();
-  }
-
-  public void incNumGetFileStatusFails() {
-    numGetFileStatusFails.incr();
-  }
-
-  public void incNumCreateDirectory() {
-    numKeyOps.incr();
-    numFSOps.incr();
-    numCreateDirectory.incr();
-  }
-
-  public void incNumCreateDirectoryFails() {
-    numCreateDirectoryFails.incr();
-  }
-
-  public void incNumCreateFile() {
-    numKeyOps.incr();
-    numFSOps.incr();
-    numCreateFile.incr();
-  }
-
-  public void incNumCreateFileFails() {
-    numCreateFileFails.incr();
-  }
-
-  public void incNumLookupFile() {
-    numKeyOps.incr();
-    numFSOps.incr();
-    numLookupFile.incr();
-  }
-
-  public void incNumLookupFileFails() {
-    numLookupFileFails.incr();
-  }
-
-  public void incNumListStatus() {
-    numKeyOps.incr();
-    numFSOps.incr();
-    numListStatus.incr();
-  }
-
-  public void incNumListStatusFails() {
-    numListStatusFails.incr();
-  }
-
-  public void incNumListMultipartUploadPartFails() {
-    numListMultipartUploadPartFails.incr();
-  }
-
-  public void incNumGetServiceLists() {
-    numGetServiceLists.incr();
-  }
-
-  public void incNumVolumeCreateFails() {
-    numVolumeCreateFails.incr();
-  }
-
-  public void incNumVolumeUpdateFails() {
-    numVolumeUpdateFails.incr();
-  }
-
-  public void incNumVolumeInfoFails() {
-    numVolumeInfoFails.incr();
-  }
-
-  public void incNumVolumeDeleteFails() {
-    numVolumeDeleteFails.incr();
-  }
-
-  public void incNumVolumeCheckAccessFails() {
-    numVolumeCheckAccessFails.incr();
-  }
-
-  public void incNumBucketCreateFails() {
-    numBucketCreateFails.incr();
-  }
-
-  public void incNumBucketInfoFails() {
-    numBucketInfoFails.incr();
-  }
-
-  public void incNumBucketUpdateFails() {
-    numBucketUpdateFails.incr();
-  }
-
-  public void incNumBucketDeleteFails() {
-    numBucketDeleteFails.incr();
-  }
-
-  public void incNumKeyAllocates() {
-    numKeyOps.incr();
-    numKeyAllocate.incr();
-  }
-
-  public void incNumKeyAllocateFails() {
-    numKeyAllocateFails.incr();
-  }
-
-  public void incNumKeyLookups() {
-    numKeyOps.incr();
-    numKeyLookup.incr();
-  }
-
-  public void incNumKeyLookupFails() {
-    numKeyLookupFails.incr();
-  }
-
-  public void incNumKeyRenames() {
-    numKeyOps.incr();
-    numKeyRenames.incr();
-  }
-
-  public void incNumKeyRenameFails() {
-    numKeyOps.incr();
-    numKeyRenameFails.incr();
-  }
-
-  public void incNumKeyDeleteFails() {
-    numKeyDeleteFails.incr();
-  }
-
-  public void incNumKeyDeletes() {
-    numKeyOps.incr();
-    numKeyDeletes.incr();
-  }
-
-  public void incNumKeyCommits() {
-    numKeyOps.incr();
-    numKeyCommits.incr();
-  }
-
-  public void incNumKeyCommitFails() {
-    numKeyCommitFails.incr();
-  }
-
-  public void incNumBlockAllocateCalls() {
-    numBlockAllocations.incr();
-  }
-
-  public void incNumBlockAllocateCallFails() {
-    numBlockAllocationFails.incr();
-  }
-
-  public void incNumBucketListFails() {
-    numBucketListFails.incr();
-  }
-
-  public void incNumKeyListFails() {
-    numKeyListFails.incr();
-  }
-
-  public void incNumVolumeListFails() {
-    numVolumeListFails.incr();
-  }
-
-  public void incNumGetServiceListFails() {
-    numGetServiceListFails.incr();
-  }
-
-  public void setLastCheckpointCreationTimeTaken(long val) {
-    this.lastCheckpointCreationTimeTaken.set(val);
-  }
-
-  public void setLastCheckpointStreamingTimeTaken(long val) {
-    this.lastCheckpointStreamingTimeTaken.set(val);
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreates() {
-    return numVolumeCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdates() {
-    return numVolumeUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfos() {
-    return numVolumeInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeletes() {
-    return numVolumeDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccesses() {
-    return numVolumeCheckAccesses.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreates() {
-    return numBucketCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfos() {
-    return numBucketInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdates() {
-    return numBucketUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeletes() {
-    return numBucketDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketLists() {
-    return numBucketLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeLists() {
-    return numVolumeLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLists() {
-    return numKeyLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceLists() {
-    return numGetServiceLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreateFails() {
-    return numVolumeCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdateFails() {
-    return numVolumeUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfoFails() {
-    return numVolumeInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeleteFails() {
-    return numVolumeDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccessFails() {
-    return numVolumeCheckAccessFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreateFails() {
-    return numBucketCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfoFails() {
-    return numBucketInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdateFails() {
-    return numBucketUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeleteFails() {
-    return numBucketDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocates() {
-    return numKeyAllocate.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocateFails() {
-    return numKeyAllocateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookups() {
-    return numKeyLookup.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookupFails() {
-    return numKeyLookupFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenames() {
-    return numKeyRenames.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenameFails() {
-    return numKeyRenameFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletes() {
-    return numKeyDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletesFails() {
-    return numKeyDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketListFails() {
-    return numBucketListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyListFails() {
-    return numKeyListFails.value();
-  }
-
-
-  @VisibleForTesting
-  public long getNumFSOps() {
-    return numFSOps.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetFileStatus() {
-    return numGetFileStatus.value();
-  }
-
-  @VisibleForTesting
-  public long getNumListStatus() {
-    return numListStatus.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeListFails() {
-    return numVolumeListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommits() {
-    return numKeyCommits.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommitFails() {
-    return numKeyCommitFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocates() {
-    return numBlockAllocations.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocateFails() {
-    return numBlockAllocationFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceListFails() {
-    return numGetServiceListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumListS3Buckets() {
-    return numBucketS3Lists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumListS3BucketsFails() {
-    return numBucketS3ListFails.value();
-  }
-
-  public long getNumInitiateMultipartUploads() {
-    return numInitiateMultipartUploads.value();
-  }
-
-  public long getNumInitiateMultipartUploadFails() {
-    return numInitiateMultipartUploadFails.value();
-  }
-
-  public long getNumAbortMultipartUploads() {
-    return numAbortMultipartUploads.value();
-  }
-
-  public long getNumAbortMultipartUploadFails() {
-    return numAbortMultipartUploadFails.value();
-  }
-
-  @VisibleForTesting
-  public long getLastCheckpointCreationTimeTaken() {
-    return lastCheckpointCreationTimeTaken.value();
-  }
-
-  @VisibleForTesting
-  public long getLastCheckpointStreamingTimeTaken() {
-    return lastCheckpointStreamingTimeTaken.value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
deleted file mode 100644
index 67c7eb8b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.Service;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-    .OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL;
-
-/**
- * {@link PolicyProvider} for OM protocols.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public final class OMPolicyProvider extends PolicyProvider {
-
-  private static AtomicReference<OMPolicyProvider> atomicReference =
-      new AtomicReference<>();
-
-  private OMPolicyProvider() {
-  }
-
-  @Private
-  @Unstable
-  public static OMPolicyProvider getInstance() {
-    if (atomicReference.get() == null) {
-      atomicReference.compareAndSet(null, new OMPolicyProvider());
-    }
-    return atomicReference.get();
-  }
-
-  private static final Service[] OM_SERVICES =
-      new Service[]{
-          new Service(OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL,
-              OzoneManagerProtocol.class),
-      };
-
-  @SuppressFBWarnings("EI_EXPOSE_REP")
-  @Override
-  public Service[] getServices() {
-    return OM_SERVICES;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
deleted file mode 100644
index f632ad1..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import java.io.IOException;
-
-/**
- * This interface is used by the OzoneManagerStarter class to allow the
- * dependencies to be injected to the CLI class.
- */
-public interface OMStarterInterface {
-  void start(OzoneConfiguration conf) throws IOException,
-      AuthenticationException;
-  boolean init(OzoneConfiguration conf) throws IOException,
-      AuthenticationException;
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
deleted file mode 100644
index b84cc5d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.common.Storage;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
-
-/**
- * OMStorage is responsible for management of the StorageDirectories used by
- * the Ozone Manager.
- */
-public class OMStorage extends Storage {
-
-  public static final String STORAGE_DIR = "om";
-  public static final String OM_ID = "omUuid";
-  public static final String OM_CERT_SERIAL_ID = "omCertSerialId";
-
-  /**
-   * Construct OMStorage.
-   * @throws IOException if any directories are inaccessible.
-   */
-  public OMStorage(OzoneConfiguration conf) throws IOException {
-    super(NodeType.OM, OmUtils.getOmDbDir(conf), STORAGE_DIR);
-  }
-
-  public void setScmId(String scmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("OM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(SCM_ID, scmId);
-    }
-  }
-
-  public void setOmCertSerialId(String certSerialId) throws IOException {
-    getStorageInfo().setProperty(OM_CERT_SERIAL_ID, certSerialId);
-  }
-
-  public void setOmId(String omId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("OM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(OM_ID, omId);
-    }
-  }
-
-  /**
-   * Retrieves the SCM ID from the version file.
-   * @return SCM_ID
-   */
-  public String getScmId() {
-    return getStorageInfo().getProperty(SCM_ID);
-  }
-
-  /**
-   * Retrieves the OM ID from the version file.
-   * @return OM_ID
-   */
-  public String getOmId() {
-    return getStorageInfo().getProperty(OM_ID);
-  }
-
-  /**
-   * Retrieves the serial id of certificate issued by SCM.
-   * @return OM_ID
-   */
-  public String getOmCertSerialId() {
-    return getStorageInfo().getProperty(OM_CERT_SERIAL_ID);
-  }
-
-  @Override
-  protected Properties getNodeProperties() {
-    String omId = getOmId();
-    if (omId == null) {
-      omId = UUID.randomUUID().toString();
-    }
-    Properties omProperties = new Properties();
-    omProperties.setProperty(OM_ID, omId);
-
-    if (getOmCertSerialId() != null) {
-      omProperties.setProperty(OM_CERT_SERIAL_ID, getOmCertSerialId());
-    }
-    return omProperties;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
deleted file mode 100644
index 95f21ae..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ /dev/null
@@ -1,943 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.hdds.utils.db.TypedTable;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
-import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
-import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
-import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
-import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
-import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec;
-import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec;
-import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
-import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import org.apache.commons.lang3.StringUtils;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import org.eclipse.jetty.util.StringUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ozone metadata manager interface.
- */
-public class OmMetadataManagerImpl implements OMMetadataManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OmMetadataManagerImpl.class);
-
-  /**
-   * OM RocksDB Structure .
-   * <p>
-   * OM DB stores metadata as KV pairs in different column families.
-   * <p>
-   * OM DB Schema:
-   * |----------------------------------------------------------------------|
-   * |  Column Family     |        VALUE                                    |
-   * |----------------------------------------------------------------------|
-   * | userTable          |     /user->UserVolumeInfo                       |
-   * |----------------------------------------------------------------------|
-   * | volumeTable        |     /volume->VolumeInfo                         |
-   * |----------------------------------------------------------------------|
-   * | bucketTable        |     /volume/bucket-> BucketInfo                 |
-   * |----------------------------------------------------------------------|
-   * | keyTable           | /volumeName/bucketName/keyName->KeyInfo         |
-   * |----------------------------------------------------------------------|
-   * | deletedTable       | /volumeName/bucketName/keyName->RepeatedKeyInfo |
-   * |----------------------------------------------------------------------|
-   * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
-   * |----------------------------------------------------------------------|
-   * | s3Table            | s3BucketName -> /volumeName/bucketName          |
-   * |----------------------------------------------------------------------|
-   * | s3SecretTable      | s3g_access_key_id -> s3Secret                   |
-   * |----------------------------------------------------------------------|
-   * | dTokenTable        | s3g_access_key_id -> s3Secret                   |
-   * |----------------------------------------------------------------------|
-   * | prefixInfoTable    | prefix -> PrefixInfo                            |
-   * |----------------------------------------------------------------------|
-   * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
-   * |----------------------------------------------------------------------|
-   */
-
-  public static final String USER_TABLE = "userTable";
-  public static final String VOLUME_TABLE = "volumeTable";
-  public static final String BUCKET_TABLE = "bucketTable";
-  public static final String KEY_TABLE = "keyTable";
-  public static final String DELETED_TABLE = "deletedTable";
-  public static final String OPEN_KEY_TABLE = "openKeyTable";
-  public static final String S3_TABLE = "s3Table";
-  public static final String MULTIPARTINFO_TABLE = "multipartInfoTable";
-  public static final String S3_SECRET_TABLE = "s3SecretTable";
-  public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
-  public static final String PREFIX_TABLE = "prefixTable";
-
-  private DBStore store;
-
-  private final OzoneManagerLock lock;
-  private final long openKeyExpireThresholdMS;
-
-  private Table userTable;
-  private Table volumeTable;
-  private Table bucketTable;
-  private Table keyTable;
-  private Table deletedTable;
-  private Table openKeyTable;
-  private Table s3Table;
-  private Table<String, OmMultipartKeyInfo> multipartInfoTable;
-  private Table s3SecretTable;
-  private Table dTokenTable;
-  private Table prefixTable;
-  private boolean isRatisEnabled;
-
-  public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
-    this.lock = new OzoneManagerLock(conf);
-    this.openKeyExpireThresholdMS = 1000L * conf.getInt(
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
-    // TODO: This is a temporary check. Once fully implemented, all OM state
-    //  change should go through Ratis - be it standalone (for non-HA) or
-    //  replicated (for HA).
-    isRatisEnabled = conf.getBoolean(
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT);
-    start(conf);
-  }
-
-  /**
-   * For subclass overriding.
-   */
-  protected OmMetadataManagerImpl() {
-    this.lock = new OzoneManagerLock(new OzoneConfiguration());
-    this.openKeyExpireThresholdMS =
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-  }
-
-  @Override
-  public Table<String, UserVolumeInfo> getUserTable() {
-    return userTable;
-  }
-
-  public Table<OzoneTokenIdentifier, Long> getDelegationTokenTable() {
-    return dTokenTable;
-  }
-
-  @Override
-  public Table<String, OmVolumeArgs> getVolumeTable() {
-    return volumeTable;
-  }
-
-  @Override
-  public Table<String, OmBucketInfo> getBucketTable() {
-    return bucketTable;
-  }
-
-  @Override
-  public Table<String, OmKeyInfo> getKeyTable() {
-    return keyTable;
-  }
-
-  @Override
-  public Table<String, RepeatedOmKeyInfo> getDeletedTable() {
-    return deletedTable;
-  }
-
-  @Override
-  public Table<String, OmKeyInfo> getOpenKeyTable() {
-    return openKeyTable;
-  }
-
-  @Override
-  public Table<String, String> getS3Table() {
-    return s3Table;
-  }
-
-  @Override
-  public Table<String, OmPrefixInfo> getPrefixTable() {
-    return prefixTable;
-  }
-
-  @Override
-  public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
-    return multipartInfoTable;
-  }
-
-
-  private void checkTableStatus(Table table, String name) throws IOException {
-    String logMessage = "Unable to get a reference to %s table. Cannot " +
-        "continue.";
-    String errMsg = "Inconsistent DB state, Table - %s. Please check the logs" +
-        "for more info.";
-    if (table == null) {
-      LOG.error(String.format(logMessage, name));
-      throw new IOException(String.format(errMsg, name));
-    }
-  }
-
-  /**
-   * Start metadata manager.
-   */
-  @Override
-  public void start(OzoneConfiguration configuration) throws IOException {
-    // We need to create the DB here, as when during restart, stop closes the
-    // db, so we need to create the store object and initialize DB.
-    if (store == null) {
-      File metaDir = OmUtils.getOmDbDir(configuration);
-
-      DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration)
-          .setName(OM_DB_NAME)
-          .setPath(Paths.get(metaDir.getPath()));
-      this.store = addOMTablesAndCodecs(dbStoreBuilder).build();
-      initializeOmTables();
-    }
-  }
-
-  protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
-
-    return builder.addTable(USER_TABLE)
-        .addTable(VOLUME_TABLE)
-        .addTable(BUCKET_TABLE)
-        .addTable(KEY_TABLE)
-        .addTable(DELETED_TABLE)
-        .addTable(OPEN_KEY_TABLE)
-        .addTable(S3_TABLE)
-        .addTable(MULTIPARTINFO_TABLE)
-        .addTable(DELEGATION_TOKEN_TABLE)
-        .addTable(S3_SECRET_TABLE)
-        .addTable(PREFIX_TABLE)
-        .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
-        .addCodec(OmKeyInfo.class, new OmKeyInfoCodec())
-        .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec())
-        .addCodec(OmBucketInfo.class, new OmBucketInfoCodec())
-        .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec())
-        .addCodec(UserVolumeInfo.class, new UserVolumeInfoCodec())
-        .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
-        .addCodec(S3SecretValue.class, new S3SecretValueCodec())
-        .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec());
-  }
-
-  /**
-   * Initialize OM Tables.
-   *
-   * @throws IOException
-   */
-  protected void initializeOmTables() throws IOException {
-    userTable =
-        this.store.getTable(USER_TABLE, String.class, UserVolumeInfo.class);
-    checkTableStatus(userTable, USER_TABLE);
-
-    TableCacheImpl.CacheCleanupPolicy cleanupPolicy =
-        TableCacheImpl.CacheCleanupPolicy.NEVER;
-
-    volumeTable =
-        this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class,
-            cleanupPolicy);
-    checkTableStatus(volumeTable, VOLUME_TABLE);
-
-    bucketTable =
-        this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class,
-            cleanupPolicy);
-
-    checkTableStatus(bucketTable, BUCKET_TABLE);
-
-    keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class);
-    checkTableStatus(keyTable, KEY_TABLE);
-
-    deletedTable = this.store.getTable(DELETED_TABLE, String.class,
-        RepeatedOmKeyInfo.class);
-    checkTableStatus(deletedTable, DELETED_TABLE);
-
-    openKeyTable =
-        this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class);
-    checkTableStatus(openKeyTable, OPEN_KEY_TABLE);
-
-    s3Table = this.store.getTable(S3_TABLE, String.class, String.class);
-    checkTableStatus(s3Table, S3_TABLE);
-
-    multipartInfoTable = this.store.getTable(MULTIPARTINFO_TABLE,
-        String.class, OmMultipartKeyInfo.class);
-    checkTableStatus(multipartInfoTable, MULTIPARTINFO_TABLE);
-
-    dTokenTable = this.store.getTable(DELEGATION_TOKEN_TABLE,
-        OzoneTokenIdentifier.class, Long.class);
-    checkTableStatus(dTokenTable, DELEGATION_TOKEN_TABLE);
-
-    s3SecretTable = this.store.getTable(S3_SECRET_TABLE, String.class,
-        S3SecretValue.class);
-    checkTableStatus(s3SecretTable, S3_SECRET_TABLE);
-
-    prefixTable = this.store.getTable(PREFIX_TABLE, String.class,
-        OmPrefixInfo.class);
-    checkTableStatus(prefixTable, PREFIX_TABLE);
-  }
-
-  /**
-   * Stop metadata manager.
-   */
-  @Override
-  public void stop() throws Exception {
-    if (store != null) {
-      store.close();
-      store = null;
-    }
-  }
-
-  /**
-   * Get metadata store.
-   *
-   * @return store - metadata store.
-   */
-  @VisibleForTesting
-  @Override
-  public DBStore getStore() {
-    return store;
-  }
-
-  /**
-   * Given a volume return the corresponding DB key.
-   *
-   * @param volume - Volume name
-   */
-  @Override
-  public String getVolumeKey(String volume) {
-    return OzoneConsts.OM_KEY_PREFIX + volume;
-  }
-
-  /**
-   * Given a user return the corresponding DB key.
-   *
-   * @param user - User name
-   */
-  @Override
-  public String getUserKey(String user) {
-    return user;
-  }
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   *
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  @Override
-  public String getBucketKey(String volume, String bucket) {
-    StringBuilder builder =
-        new StringBuilder().append(OM_KEY_PREFIX).append(volume);
-
-    if (StringUtils.isNotBlank(bucket)) {
-      builder.append(OM_KEY_PREFIX).append(bucket);
-    }
-    return builder.toString();
-  }
-
-  @Override
-  public String getOzoneKey(String volume, String bucket, String key) {
-    StringBuilder builder = new StringBuilder()
-        .append(OM_KEY_PREFIX).append(volume);
-    // TODO : Throw if the Bucket is null?
-    builder.append(OM_KEY_PREFIX).append(bucket);
-    if (StringUtil.isNotBlank(key)) {
-      builder.append(OM_KEY_PREFIX);
-      if (!key.equals(OM_KEY_PREFIX)) {
-        builder.append(key);
-      }
-    }
-    return builder.toString();
-  }
-
-  @Override
-  public String getOzoneDirKey(String volume, String bucket, String key) {
-    key = OzoneFSUtils.addTrailingSlashIfNeeded(key);
-    return getOzoneKey(volume, bucket, key);
-  }
-
-  @Override
-  public String getOpenKey(String volume, String bucket,
-                           String key, long id) {
-    String openKey = OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket +
-        OM_KEY_PREFIX + key + OM_KEY_PREFIX + id;
-    return openKey;
-  }
-
-  @Override
-  public String getMultipartKey(String volume, String bucket, String key,
-                                String
-                                    uploadId) {
-    return OmMultipartUpload.getDbKey(volume, bucket, key, uploadId);
-  }
-
-  /**
-   * Returns the OzoneManagerLock used on Metadata DB.
-   *
-   * @return OzoneManagerLock
-   */
-  @Override
-  public org.apache.hadoop.ozone.om.lock.OzoneManagerLock getLock() {
-    return lock;
-  }
-
-  /**
-   * Returns true if the firstArray startsWith the bytes of secondArray.
-   *
-   * @param firstArray - Byte array
-   * @param secondArray - Byte array
-   * @return true if the first array bytes match the bytes in the second array.
-   */
-  private boolean startsWith(byte[] firstArray, byte[] secondArray) {
-
-    if (firstArray == null) {
-      // if both are null, then the arrays match, else if first is null and
-      // second is not, then this function returns false.
-      return secondArray == null;
-    }
-
-
-    if (secondArray != null) {
-      // If the second array is longer then first array cannot be starting with
-      // the bytes of second array.
-      if (secondArray.length > firstArray.length) {
-        return false;
-      }
-
-      for (int ndx = 0; ndx < secondArray.length; ndx++) {
-        if (firstArray[ndx] != secondArray[ndx]) {
-          return false;
-        }
-      }
-      return true; //match, return true.
-    }
-    return false; // if first is not null and second is null, we define that
-    // array does not start with same chars.
-  }
-
-  /**
-   * Given a volume, check if it is empty, i.e there are no buckets inside it.
-   * We iterate in the bucket table and see if there is any key that starts with
-   * the volume prefix. We actually look for /volume/, since if we don't have
-   * the trailing slash it is possible that we might match some other volume.
-   * <p>
-   * For example, vol1 and vol122 might match, to avoid that we look for /vol1/
-   *
-   * @param volume - Volume name
-   * @return true if the volume is empty
-   */
-  @Override
-  public boolean isVolumeEmpty(String volume) throws IOException {
-    String volumePrefix = getVolumeKey(volume + OM_KEY_PREFIX);
-
-      // First check in bucket table cache.
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> iterator =
-        ((TypedTable< String, OmBucketInfo>) bucketTable).cacheIterator();
-    while (iterator.hasNext()) {
-      Map.Entry< CacheKey< String >, CacheValue< OmBucketInfo > > entry =
-          iterator.next();
-      String key = entry.getKey().getCacheKey();
-      OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
-      // Making sure that entry is not for delete bucket request.
-      if (key.startsWith(volumePrefix) && omBucketInfo != null) {
-        return false;
-      }
-    }
-
-    try (TableIterator<String, ? extends KeyValue<String, OmBucketInfo>>
-        bucketIter = bucketTable.iterator()) {
-      KeyValue<String, OmBucketInfo> kv = bucketIter.seek(volumePrefix);
-
-      if (kv != null) {
-        // Check the entry in db is not marked for delete. This can happen
-        // while entry is marked for delete, but it is not flushed to DB.
-        CacheValue<OmBucketInfo> cacheValue =
-            bucketTable.getCacheValue(new CacheKey(kv.getKey()));
-        if (cacheValue != null) {
-          if (kv.getKey().startsWith(volumePrefix)
-              && cacheValue.getCacheValue() != null) {
-            return false; // we found at least one bucket with this volume
-            // prefix.
-          }
-        } else {
-          if (kv.getKey().startsWith(volumePrefix)) {
-            return false; // we found at least one bucket with this volume
-            // prefix.
-          }
-        }
-      }
-
-    }
-    return true;
-  }
-
-  /**
-   * Given a volume/bucket, check if it is empty, i.e there are no keys inside
-   * it. Prefix is /volume/bucket/, and we lookup the keyTable.
-   *
-   * @param volume - Volume name
-   * @param bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  @Override
-  public boolean isBucketEmpty(String volume, String bucket)
-      throws IOException {
-    String keyPrefix = getBucketKey(volume, bucket);
-
-    // First check in key table cache.
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator =
-        ((TypedTable< String, OmKeyInfo>) keyTable).cacheIterator();
-    while (iterator.hasNext()) {
-      Map.Entry< CacheKey<String>, CacheValue<OmKeyInfo>> entry =
-          iterator.next();
-      String key = entry.getKey().getCacheKey();
-      OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
-      // Making sure that entry is not for delete key request.
-      if (key.startsWith(keyPrefix) && omKeyInfo != null) {
-        return false;
-      }
-    }
-    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter =
-        keyTable.iterator()) {
-      KeyValue<String, OmKeyInfo> kv = keyIter.seek(keyPrefix);
-
-      if (kv != null) {
-        // Check the entry in db is not marked for delete. This can happen
-        // while entry is marked for delete, but it is not flushed to DB.
-        CacheValue<OmKeyInfo> cacheValue =
-            keyTable.getCacheValue(new CacheKey(kv.getKey()));
-        if (cacheValue != null) {
-          if (kv.getKey().startsWith(keyPrefix)
-              && cacheValue.getCacheValue() != null) {
-            return false; // we found at least one key with this vol/bucket
-            // prefix.
-          }
-        } else {
-          if (kv.getKey().startsWith(keyPrefix)) {
-            return false; // we found at least one key with this vol/bucket
-            // prefix.
-          }
-        }
-      }
-
-    }
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<OmBucketInfo> listBuckets(final String volumeName,
-      final String startBucket, final String bucketPrefix,
-      final int maxNumOfBuckets) throws IOException {
-    List<OmBucketInfo> result = new ArrayList<>();
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new OMException("Volume name is required.",
-          ResultCodes.VOLUME_NOT_FOUND);
-    }
-
-    String volumeNameBytes = getVolumeKey(volumeName);
-    if (volumeTable.get(volumeNameBytes) == null) {
-      throw new OMException("Volume " + volumeName + " not found.",
-          ResultCodes.VOLUME_NOT_FOUND);
-    }
-
-    String startKey;
-    boolean skipStartKey = false;
-    if (StringUtil.isNotBlank(startBucket)) {
-      // if the user has specified a start key, we need to seek to that key
-      // and avoid that key in the response set.
-      startKey = getBucketKey(volumeName, startBucket);
-      skipStartKey = true;
-    } else {
-      // If the user has specified a prefix key, we need to get to the first
-      // of the keys with the prefix match. We can leverage RocksDB to do that.
-      // However, if the user has specified only a prefix, we cannot skip
-      // the first prefix key we see, the boolean skipStartKey allows us to
-      // skip the startkey or not depending on what patterns are specified.
-      startKey = getBucketKey(volumeName, bucketPrefix);
-    }
-
-    String seekPrefix;
-    if (StringUtil.isNotBlank(bucketPrefix)) {
-      seekPrefix = getBucketKey(volumeName, bucketPrefix);
-    } else {
-      seekPrefix = getVolumeKey(volumeName + OM_KEY_PREFIX);
-    }
-    int currentCount = 0;
-
-
-    // For Bucket it is full cache, so we can just iterate in-memory table
-    // cache.
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> iterator =
-        bucketTable.cacheIterator();
-
-
-    while (currentCount < maxNumOfBuckets && iterator.hasNext()) {
-      Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry =
-          iterator.next();
-
-      String key = entry.getKey().getCacheKey();
-      OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
-      // Making sure that entry in cache is not for delete bucket request.
-
-      if (omBucketInfo != null) {
-        if (key.equals(startKey) && skipStartKey) {
-          continue;
-        }
-
-        // We should return only the keys, whose keys match with prefix and
-        // the keys after the startBucket.
-        if (key.startsWith(seekPrefix) && key.compareTo(startKey) > 0) {
-          result.add(omBucketInfo);
-          currentCount++;
-        }
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-
-    List<OmKeyInfo> result = new ArrayList<>();
-    if (maxKeys <= 0) {
-      return result;
-    }
-
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new OMException("Volume name is required.",
-          ResultCodes.VOLUME_NOT_FOUND);
-    }
-
-    if (Strings.isNullOrEmpty(bucketName)) {
-      throw new OMException("Bucket name is required.",
-          ResultCodes.BUCKET_NOT_FOUND);
-    }
-
-    String bucketNameBytes = getBucketKey(volumeName, bucketName);
-    if (getBucketTable().get(bucketNameBytes) == null) {
-      throw new OMException("Bucket " + bucketName + " not found.",
-          ResultCodes.BUCKET_NOT_FOUND);
-    }
-
-    String seekKey;
-    boolean skipStartKey = false;
-    if (StringUtil.isNotBlank(startKey)) {
-      // Seek to the specified key.
-      seekKey = getOzoneKey(volumeName, bucketName, startKey);
-      skipStartKey = true;
-    } else {
-      // This allows us to seek directly to the first key with the right prefix.
-      seekKey = getOzoneKey(volumeName, bucketName, keyPrefix);
-    }
-
-    String seekPrefix;
-    if (StringUtil.isNotBlank(keyPrefix)) {
-      seekPrefix = getOzoneKey(volumeName, bucketName, keyPrefix);
-    } else {
-      seekPrefix = getBucketKey(volumeName, bucketName + OM_KEY_PREFIX);
-    }
-    int currentCount = 0;
-
-
-    TreeMap<String, OmKeyInfo> cacheKeyMap = new TreeMap<>();
-    Set<String> deletedKeySet = new TreeSet<>();
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator =
-        keyTable.cacheIterator();
-
-    //TODO: We can avoid this iteration if table cache has stored entries in
-    // treemap. Currently HashMap is used in Cache. HashMap get operation is an
-    // constant time operation, where as for treeMap get is log(n).
-    // So if we move to treemap, the get operation will be affected. As get
-    // is frequent operation on table. So, for now in list we iterate cache map
-    // and construct treeMap which match with keyPrefix and are greater than or
-    // equal to startKey. Later we can revisit this, if list operation
-    // is becoming slow.
-    while (iterator.hasNext()) {
-      Map.Entry< CacheKey<String>, CacheValue<OmKeyInfo>> entry =
-          iterator.next();
-
-      String key = entry.getKey().getCacheKey();
-      OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
-      // Making sure that entry in cache is not for delete key request.
-
-      if (omKeyInfo != null) {
-        if (key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) {
-          cacheKeyMap.put(key, omKeyInfo);
-        }
-      } else {
-        deletedKeySet.add(key);
-      }
-    }
-
-    // Get maxKeys from DB if it has.
-
-    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
-             keyIter = getKeyTable().iterator()) {
-      KeyValue< String, OmKeyInfo > kv;
-      keyIter.seek(seekKey);
-      // we need to iterate maxKeys + 1 here because if skipStartKey is true,
-      // we should skip that entry and return the result.
-      while (currentCount < maxKeys + 1 && keyIter.hasNext()) {
-        kv = keyIter.next();
-        if (kv != null && kv.getKey().startsWith(seekPrefix)) {
-
-          // Entry should not be marked for delete, consider only those
-          // entries.
-          if(!deletedKeySet.contains(kv.getKey())) {
-            cacheKeyMap.put(kv.getKey(), kv.getValue());
-            currentCount++;
-          }
-        } else {
-          // The SeekPrefix does not match any more, we can break out of the
-          // loop.
-          break;
-        }
-      }
-    }
-
-    // Finally DB entries and cache entries are merged, then return the count
-    // of maxKeys from the sorted map.
-    currentCount = 0;
-
-    for (Map.Entry<String, OmKeyInfo>  cacheKey : cacheKeyMap.entrySet()) {
-      if (cacheKey.getKey().equals(seekKey) && skipStartKey) {
-        continue;
-      }
-
-      result.add(cacheKey.getValue());
-      currentCount++;
-
-      if (currentCount == maxKeys) {
-        break;
-      }
-    }
-
-    // Clear map and set.
-    cacheKeyMap.clear();
-    deletedKeySet.clear();
-
-    return result;
-  }
-
-  @Override
-  public List<OmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    List<OmVolumeArgs> result = Lists.newArrayList();
-    UserVolumeInfo volumes;
-    if (StringUtil.isBlank(userName)) {
-      throw new OMException("User name is required to list Volumes.",
-          ResultCodes.USER_NOT_FOUND);
-    }
-    volumes = getVolumesByUser(userName);
-
-    if (volumes == null || volumes.getVolumeNamesCount() == 0) {
-      return result;
-    }
-
-    boolean startKeyFound = Strings.isNullOrEmpty(startKey);
-    for (String volumeName : volumes.getVolumeNamesList()) {
-      if (!Strings.isNullOrEmpty(prefix)) {
-        if (!volumeName.startsWith(prefix)) {
-          continue;
-        }
-      }
-
-      if (!startKeyFound && volumeName.equals(startKey)) {
-        startKeyFound = true;
-        continue;
-      }
-      if (startKeyFound && result.size() < maxKeys) {
-        OmVolumeArgs volumeArgs =
-            getVolumeTable().get(this.getVolumeKey(volumeName));
-        if (volumeArgs == null) {
-          // Could not get volume info by given volume name,
-          // since the volume name is loaded from db,
-          // this probably means om db is corrupted or some entries are
-          // accidentally removed.
-          throw new OMException("Volume info not found for " + volumeName,
-              ResultCodes.VOLUME_NOT_FOUND);
-        }
-        result.add(volumeArgs);
-      }
-    }
-
-    return result;
-  }
-
-  private UserVolumeInfo getVolumesByUser(String userNameKey)
-      throws OMException {
-    try {
-      UserVolumeInfo userVolInfo = getUserTable().get(userNameKey);
-      if (userVolInfo == null) {
-        // No volume found for this user, return an empty list
-        return UserVolumeInfo.newBuilder().build();
-      } else {
-        return userVolInfo;
-      }
-    } catch (IOException e) {
-      throw new OMException("Unable to get volumes info by the given user, "
-          + "metadata might be corrupted", e,
-          ResultCodes.METADATA_ERROR);
-    }
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int keyCount)
-      throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>>
-             keyIter = getDeletedTable().iterator()) {
-      int currentCount = 0;
-      while (keyIter.hasNext() && currentCount < keyCount) {
-        KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
-        if (kv != null) {
-          RepeatedOmKeyInfo infoList = kv.getValue();
-          // Get block keys as a list.
-          for(OmKeyInfo info : infoList.getOmKeyInfoList()){
-            OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
-            List<BlockID> item = latest.getLocationList().stream()
-                .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
-                .collect(Collectors.toList());
-            BlockGroup keyBlocks = BlockGroup.newBuilder()
-                .setKeyName(kv.getKey())
-                .addAllBlockIDs(item)
-                .build();
-            keyBlocksList.add(keyBlocks);
-            currentCount++;
-          }
-        }
-      }
-    }
-    return keyBlocksList;
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    // TODO: Fix the getExpiredOpenKeys, Not part of this patch.
-    return keyBlocksList;
-  }
-
-  @Override
-  public <KEY, VALUE> long countRowsInTable(Table<KEY, VALUE> table)
-      throws IOException {
-    long count = 0;
-    if (table != null) {
-      try (TableIterator<KEY, ? extends KeyValue<KEY, VALUE>>
-          keyValueTableIterator = table.iterator()) {
-        while (keyValueTableIterator.hasNext()) {
-          keyValueTableIterator.next();
-          count++;
-        }
-      }
-    }
-    return count;
-  }
-
-  @Override
-  public <KEY, VALUE> long countEstimatedRowsInTable(Table<KEY, VALUE> table)
-      throws IOException {
-    long count = 0;
-    if (table != null) {
-      count = table.getEstimatedKeyCount();
-    }
-    return count;
-  }
-
-  @Override
-  public List<String> getMultipartUploadKeys(
-      String volumeName, String bucketName, String prefix) throws IOException {
-    List<String> response = new ArrayList<>();
-
-    TableIterator<String, ? extends KeyValue<String, OmMultipartKeyInfo>>
-        iterator = getMultipartInfoTable().iterator();
-
-    String prefixKey =
-        OmMultipartUpload.getDbKey(volumeName, bucketName, prefix);
-    iterator.seek(prefixKey);
-
-    while (iterator.hasNext()) {
-      KeyValue<String, OmMultipartKeyInfo> entry = iterator.next();
-      if (entry.getKey().startsWith(prefixKey)) {
-        response.add(entry.getKey());
-      } else {
-        break;
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Table<String, S3SecretValue> getS3SecretTable() {
-    return s3SecretTable;
-  }
-
-  /**
-   * Update store used by subclass.
-   *
-   * @param store DB store.
-   */
-  protected void setStore(DBStore store) {
-    this.store = store;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java
deleted file mode 100644
index e9b1f43..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * OmMetricsInfo stored in a file, which will be used during OM restart to
- * initialize the metrics. Currently this stores only numKeys.
- */
-public class OmMetricsInfo {
-
-  @JsonProperty
-  private long numKeys;
-
-  OmMetricsInfo() {
-    this.numKeys = 0;
-  }
-
-  public long getNumKeys() {
-    return numKeys;
-  }
-
-  public void setNumKeys(long numKeys) {
-    this.numKeys = numKeys;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
deleted file mode 100644
index 79bc39f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.BackgroundTask;
-import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This is the background service to delete hanging open keys.
- * Scan the metadata of om periodically to get
- * the keys with prefix "#open#" and ask scm to
- * delete metadata accordingly, if scm returns
- * success for keys, then clean up those keys.
- */
-public class OpenKeyCleanupService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OpenKeyCleanupService.class);
-
-  private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final KeyManager keyManager;
-  private final ScmBlockLocationProtocol scmClient;
-
-  public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient,
-      KeyManager keyManager, int serviceInterval,
-      long serviceTimeout) {
-    super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS,
-        OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.keyManager = keyManager;
-    this.scmClient = scmClient;
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new OpenKeyDeletingTask());
-    return queue;
-  }
-
-  private class OpenKeyDeletingTask
-      implements BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      try {
-        List<BlockGroup> keyBlocksList = keyManager.getExpiredOpenKeys();
-        if (keyBlocksList.size() > 0) {
-          int toDeleteSize = keyBlocksList.size();
-          LOG.debug("Found {} to-delete open keys in OM", toDeleteSize);
-          List<DeleteBlockGroupResult> results =
-              scmClient.deleteKeyBlocks(keyBlocksList);
-          int deletedSize = 0;
-          for (DeleteBlockGroupResult result : results) {
-            if (result.isSuccess()) {
-              try {
-                keyManager.deleteExpiredOpenKey(result.getObjectKey());
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
-                }
-                deletedSize += 1;
-              } catch (IOException e) {
-                LOG.warn("Failed to delete hanging-open key {}",
-                    result.getObjectKey(), e);
-              }
-            } else {
-              LOG.warn("Deleting open Key {} failed because some of the blocks"
-                      + " were failed to delete, failed blocks: {}",
-                  result.getObjectKey(),
-                  StringUtils.join(",", result.getFailedBlocks()));
-            }
-          }
-          LOG.info("Found {} expired open key entries, successfully " +
-              "cleaned up {} entries", toDeleteSize, deletedSize);
-          return results::size;
-        } else {
-          LOG.debug("No hanging open key found in OM");
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to get hanging open keys, retry in"
-            + " next interval", e);
-      }
-      return BackgroundTaskResult.EmptyTaskResult.newResult();
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
deleted file mode 100644
index 0cd087e..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ /dev/null
@@ -1,3295 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.protobuf.BlockingService;
-
-import java.net.InetAddress;
-import java.nio.file.Path;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.KeyPair;
-import java.security.cert.CertificateException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Objects;
-
-import org.apache.commons.codec.digest.DigestUtils;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.ha.OMHANodeDetails;
-import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol;
-import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo;
-import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.ozone.security.OzoneSecurityException;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditLoggerType;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.audit.Auditor;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisClient;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
-import org.apache.hadoop.ozone.util.OzoneVersionInfo;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.JvmPauseMonitor;
-import org.apache.hadoop.util.KMSUtil;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.ShutdownHookManager;
-import org.apache.hadoop.hdds.utils.RetriableTask;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.util.FileUtils;
-import org.apache.ratis.util.LifeCycle;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.net.InetSocketAddress;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
-import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
-import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE;
-import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-import static org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneManagerService
-    .newReflectiveBlockingService;
-
-/**
- * Ozone Manager is the metadata manager of ozone.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class OzoneManager extends ServiceRuntimeInfoImpl
-    implements OzoneManagerServerProtocol, OMMXBean, Auditor {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManager.class);
-
-  private static final AuditLogger AUDIT = new AuditLogger(
-      AuditLoggerType.OMLOGGER);
-
-  private static final String OM_DAEMON = "om";
-  private static boolean securityEnabled = false;
-  private OzoneDelegationTokenSecretManager delegationTokenMgr;
-  private OzoneBlockTokenSecretManager blockTokenMgr;
-  private CertificateClient certClient;
-  private String caCertPem = null;
-  private static boolean testSecureOmFlag = false;
-  private final Text omRpcAddressTxt;
-  private final OzoneConfiguration configuration;
-  private RPC.Server omRpcServer;
-  private InetSocketAddress omRpcAddress;
-  private String omId;
-
-  private OMMetadataManager metadataManager;
-  private VolumeManager volumeManager;
-  private BucketManager bucketManager;
-  private KeyManager keyManager;
-  private PrefixManagerImpl prefixManager;
-  private S3BucketManager s3BucketManager;
-
-  private final OMMetrics metrics;
-  private final ProtocolMessageMetrics omClientProtocolMetrics;
-  private OzoneManagerHttpServer httpServer;
-  private final OMStorage omStorage;
-  private final ScmBlockLocationProtocol scmBlockClient;
-  private final StorageContainerLocationProtocol scmContainerClient;
-  private ObjectName omInfoBeanName;
-  private Timer metricsTimer;
-  private ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask;
-  private static final ObjectWriter WRITER =
-      new ObjectMapper().writerWithDefaultPrettyPrinter();
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(OmMetricsInfo.class);
-  private static final int SHUTDOWN_HOOK_PRIORITY = 30;
-  private final Runnable shutdownHook;
-  private final File omMetaDir;
-  private final boolean isAclEnabled;
-  private IAccessAuthorizer accessAuthorizer;
-  private JvmPauseMonitor jvmPauseMonitor;
-  private final SecurityConfig secConfig;
-  private S3SecretManager s3SecretManager;
-  private volatile boolean isOmRpcServerRunning = false;
-  private String omComponent;
-  private OzoneManagerProtocolServerSideTranslatorPB omServerProtocol;
-
-  private boolean isRatisEnabled;
-  private OzoneManagerRatisServer omRatisServer;
-  private OzoneManagerRatisClient omRatisClient;
-  private OzoneManagerSnapshotProvider omSnapshotProvider;
-  private OMNodeDetails omNodeDetails;
-  private List<OMNodeDetails> peerNodes;
-  private File omRatisSnapshotDir;
-  private final OMRatisSnapshotInfo omRatisSnapshotInfo;
-  private final Collection<String> ozAdmins;
-
-  private KeyProviderCryptoExtension kmsProvider = null;
-  private static String keyProviderUriKeyName =
-      CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
-
-  // Adding parameters needed for VolumeRequests here, so that during request
-  // execution, we can get from ozoneManager.
-  private long maxUserVolumeCount;
-
-  private final ScmClient scmClient;
-  private final long scmBlockSize;
-  private final int preallocateBlocksMax;
-  private final boolean grpcBlockTokenEnabled;
-  private final boolean useRatisForReplication;
-
-  private OzoneManager(OzoneConfiguration conf) throws IOException,
-      AuthenticationException {
-    super(OzoneVersionInfo.OZONE_VERSION_INFO);
-    Preconditions.checkNotNull(conf);
-    configuration = conf;
-    // Load HA related configurations
-    OMHANodeDetails omhaNodeDetails =
-        OMHANodeDetails.loadOMHAConfig(configuration);
-
-    this.peerNodes = omhaNodeDetails.getPeerNodeDetails();
-    this.omNodeDetails = omhaNodeDetails.getLocalNodeDetails();
-
-    omStorage = new OMStorage(conf);
-    omId = omStorage.getOmId();
-
-    // In case of single OM Node Service there will be no OM Node ID
-    // specified, set it to value from om storage
-    if (this.omNodeDetails.getOMNodeId() == null) {
-      this.omNodeDetails =
-          OMHANodeDetails.getOMNodeDetails(conf, omNodeDetails.getOMServiceId(),
-              omStorage.getOmId(), omNodeDetails.getRpcAddress(),
-              omNodeDetails.getRatisPort());
-    }
-
-    loginOMUserIfSecurityEnabled(conf);
-
-    this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
-        OZONE_OM_USER_MAX_VOLUME_DEFAULT);
-    Preconditions.checkArgument(this.maxUserVolumeCount > 0,
-        OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero");
-
-    if (omStorage.getState() != StorageState.INITIALIZED) {
-      throw new OMException("OM not initialized.",
-          ResultCodes.OM_NOT_INITIALIZED);
-    }
-
-    // Read configuration and set values.
-    ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS);
-    omMetaDir = OmUtils.getOmDbDir(configuration);
-    this.isAclEnabled = conf.getBoolean(OZONE_ACL_ENABLED,
-        OZONE_ACL_ENABLED_DEFAULT);
-    this.scmBlockSize = (long) conf.getStorageSize(OZONE_SCM_BLOCK_SIZE,
-        OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
-    this.preallocateBlocksMax = conf.getInt(
-        OZONE_KEY_PREALLOCATION_BLOCKS_MAX,
-        OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT);
-    this.grpcBlockTokenEnabled = conf.getBoolean(HDDS_BLOCK_TOKEN_ENABLED,
-        HDDS_BLOCK_TOKEN_ENABLED_DEFAULT);
-    this.useRatisForReplication = conf.getBoolean(
-        DFS_CONTAINER_RATIS_ENABLED_KEY, DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    // TODO: This is a temporary check. Once fully implemented, all OM state
-    //  change should go through Ratis - be it standalone (for non-HA) or
-    //  replicated (for HA).
-    isRatisEnabled = configuration.getBoolean(
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT);
-
-
-    InetSocketAddress omNodeRpcAddr = omNodeDetails.getRpcAddress();
-    omRpcAddressTxt = new Text(omNodeDetails.getRpcAddressString());
-
-    scmContainerClient = getScmContainerClient(configuration);
-    // verifies that the SCM info in the OM Version file is correct.
-    scmBlockClient = getScmBlockClient(configuration);
-    this.scmClient = new ScmClient(scmBlockClient, scmContainerClient);
-
-    // For testing purpose only, not hit scm from om as Hadoop UGI can't login
-    // two principals in the same JVM.
-    if (!testSecureOmFlag) {
-      ScmInfo scmInfo = getScmInfo(configuration);
-      if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo
-          .getScmId().equals(omStorage.getScmId()))) {
-        throw new OMException("SCM version info mismatch.",
-            ResultCodes.SCM_VERSION_MISMATCH_ERROR);
-      }
-    }
-
-    RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    secConfig = new SecurityConfig(configuration);
-    // Create the KMS Key Provider
-    try {
-      kmsProvider = createKeyProviderExt(configuration);
-    } catch (IOException ioe) {
-      kmsProvider = null;
-      LOG.error("Fail to create Key Provider");
-    }
-    if (secConfig.isSecurityEnabled()) {
-      omComponent = OM_DAEMON + "-" + omId;
-      if(omStorage.getOmCertSerialId() == null) {
-        throw new RuntimeException("OzoneManager started in secure mode but " +
-            "doesn't have SCM signed certificate.");
-      }
-      certClient = new OMCertificateClient(new SecurityConfig(conf),
-          omStorage.getOmCertSerialId());
-    }
-    if (secConfig.isBlockTokenEnabled()) {
-      blockTokenMgr = createBlockTokenSecretManager(configuration);
-    }
-
-    instantiateServices();
-
-    this.omRatisSnapshotInfo = new OMRatisSnapshotInfo(
-        omStorage.getCurrentDir());
-
-    initializeRatisServer();
-    initializeRatisClient();
-
-    if (isRatisEnabled) {
-      // Create Ratis storage dir
-      String omRatisDirectory = OmUtils.getOMRatisDirectory(configuration);
-      if (omRatisDirectory == null || omRatisDirectory.isEmpty()) {
-        throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS +
-            " must be defined.");
-      }
-      OmUtils.createOMDir(omRatisDirectory);
-      // Create Ratis snapshot dir
-      omRatisSnapshotDir = OmUtils.createOMDir(
-          OmUtils.getOMRatisSnapshotDirectory(configuration));
-
-      if (peerNodes != null && !peerNodes.isEmpty()) {
-        this.omSnapshotProvider = new OzoneManagerSnapshotProvider(
-            configuration, omRatisSnapshotDir, peerNodes);
-      }
-    }
-
-    metrics = OMMetrics.create();
-
-    omClientProtocolMetrics = ProtocolMessageMetrics
-        .create("OmClientProtocol", "Ozone Manager RPC endpoint",
-            OzoneManagerProtocolProtos.Type.values());
-
-    // Start Om Rpc Server.
-    omRpcServer = getRpcServer(configuration);
-    omRpcAddress = updateRPCListenAddress(configuration,
-        OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
-
-    shutdownHook = () -> {
-      saveOmMetrics();
-    };
-    ShutdownHookManager.get().addShutdownHook(shutdownHook,
-        SHUTDOWN_HOOK_PRIORITY);
-  }
-
-  /**
-   * Instantiate services which are dependent on the OM DB state.
-   * When OM state is reloaded, these services are re-initialized with the
-   * new OM state.
-   */
-  private void instantiateServices() throws IOException {
-
-    metadataManager = new OmMetadataManagerImpl(configuration);
-    volumeManager = new VolumeManagerImpl(metadataManager, configuration);
-    bucketManager = new BucketManagerImpl(metadataManager, getKmsProvider(),
-        isRatisEnabled);
-    s3BucketManager = new S3BucketManagerImpl(configuration, metadataManager,
-        volumeManager, bucketManager);
-    if (secConfig.isSecurityEnabled()) {
-      s3SecretManager = new S3SecretManagerImpl(configuration, metadataManager);
-      delegationTokenMgr = createDelegationTokenSecretManager(configuration);
-    }
-
-    prefixManager = new PrefixManagerImpl(metadataManager, isRatisEnabled);
-    keyManager = new KeyManagerImpl(this, scmClient, configuration,
-        omStorage.getOmId());
-
-    if (isAclEnabled) {
-      accessAuthorizer = getACLAuthorizerInstance(configuration);
-      if (accessAuthorizer instanceof OzoneNativeAuthorizer) {
-        OzoneNativeAuthorizer authorizer =
-            (OzoneNativeAuthorizer) accessAuthorizer;
-        authorizer.setVolumeManager(volumeManager);
-        authorizer.setBucketManager(bucketManager);
-        authorizer.setKeyManager(keyManager);
-        authorizer.setPrefixManager(prefixManager);
-      }
-    } else {
-      accessAuthorizer = null;
-    }
-  }
-
-  /**
-   * Return configuration value of
-   * {@link OzoneConfigKeys#DFS_CONTAINER_RATIS_ENABLED_KEY}.
-   */
-  public boolean shouldUseRatis() {
-    return useRatisForReplication;
-  }
-
-  /**
-   * Return scmClient.
-   */
-  public ScmClient getScmClient() {
-    return scmClient;
-  }
-
-  /**
-   * Return SecretManager for OM.
-   */
-  public OzoneBlockTokenSecretManager getBlockTokenSecretManager() {
-    return blockTokenMgr;
-  }
-
-  /**
-   * Return config value of {@link OzoneConfigKeys#OZONE_SCM_BLOCK_SIZE}.
-   */
-  public long getScmBlockSize() {
-    return scmBlockSize;
-  }
-
-  /**
-   * Return config value of
-   * {@link OzoneConfigKeys#OZONE_KEY_PREALLOCATION_BLOCKS_MAX}.
-   */
-  public int getPreallocateBlocksMax() {
-    return preallocateBlocksMax;
-  }
-
-  /**
-   * Return config value of
-   * {@link HddsConfigKeys#HDDS_BLOCK_TOKEN_ENABLED}.
-   */
-  public boolean isGrpcBlockTokenEnabled() {
-    return grpcBlockTokenEnabled;
-  }
-
-  private KeyProviderCryptoExtension createKeyProviderExt(
-      OzoneConfiguration conf) throws IOException {
-    KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
-        keyProviderUriKeyName);
-    if (keyProvider == null) {
-      return null;
-    }
-    KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
-        .createKeyProviderCryptoExtension(keyProvider);
-    return cryptoProvider;
-  }
-
-  /**
-   * Returns an instance of {@link IAccessAuthorizer}.
-   * Looks up the configuration to see if there is custom class specified.
-   * Constructs the instance by passing the configuration directly to the
-   * constructor to achieve thread safety using final fields.
-   * @param conf
-   * @return IAccessAuthorizer
-   */
-  private IAccessAuthorizer getACLAuthorizerInstance(OzoneConfiguration conf) {
-    Class<? extends IAccessAuthorizer> clazz = conf.getClass(
-        OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizer.class,
-        IAccessAuthorizer.class);
-    return ReflectionUtils.newInstance(clazz, conf);
-  }
-
-  @Override
-  public void close() throws IOException {
-    stop();
-  }
-
-  /**
-   * Class which schedule saving metrics to a file.
-   */
-  private class ScheduleOMMetricsWriteTask extends TimerTask {
-    public void run() {
-      saveOmMetrics();
-    }
-  }
-
-  private void saveOmMetrics() {
-    try {
-      boolean success;
-      Files.createDirectories(
-          getTempMetricsStorageFile().getParentFile().toPath());
-      try (BufferedWriter writer = new BufferedWriter(
-          new OutputStreamWriter(new FileOutputStream(
-              getTempMetricsStorageFile()), "UTF-8"))) {
-        OmMetricsInfo metricsInfo = new OmMetricsInfo();
-        metricsInfo.setNumKeys(metrics.getNumKeys());
-        WRITER.writeValue(writer, metricsInfo);
-        success = true;
-      }
-
-      if (success) {
-        Files.move(getTempMetricsStorageFile().toPath(),
-            getMetricsStorageFile().toPath(), StandardCopyOption
-                .ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
-      }
-    } catch (IOException ex) {
-      LOG.error("Unable to write the om Metrics file", ex);
-    }
-  }
-
-  /**
-   * Returns temporary metrics storage file.
-   * @return File
-   */
-  private File getTempMetricsStorageFile() {
-    return new File(omMetaDir, OM_METRICS_TEMP_FILE);
-  }
-
-  /**
-   * Returns metrics storage file.
-   * @return File
-   */
-  private File getMetricsStorageFile() {
-    return new File(omMetaDir, OM_METRICS_FILE);
-  }
-
-
-  private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager(
-      OzoneConfiguration conf) throws IOException {
-    long tokenRemoverScanInterval =
-        conf.getTimeDuration(OMConfigKeys.DELEGATION_REMOVER_SCAN_INTERVAL_KEY,
-            OMConfigKeys.DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    long tokenMaxLifetime =
-        conf.getTimeDuration(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
-            OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    long tokenRenewInterval =
-        conf.getTimeDuration(OMConfigKeys.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
-            OMConfigKeys.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-
-    return new OzoneDelegationTokenSecretManager(conf, tokenMaxLifetime,
-        tokenRenewInterval, tokenRemoverScanInterval, omRpcAddressTxt,
-        s3SecretManager, certClient);
-  }
-
-  private OzoneBlockTokenSecretManager createBlockTokenSecretManager(
-      OzoneConfiguration conf) {
-
-    long expiryTime = conf.getTimeDuration(
-        HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME,
-        HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    // TODO: Pass OM cert serial ID.
-    if (testSecureOmFlag) {
-      return new OzoneBlockTokenSecretManager(secConfig, expiryTime, "1");
-    }
-    Objects.requireNonNull(certClient);
-    return new OzoneBlockTokenSecretManager(secConfig, expiryTime,
-        certClient.getCertificate().getSerialNumber().toString());
-  }
-
-  private void stopSecretManager() {
-    if (blockTokenMgr != null) {
-      LOG.info("Stopping OM block token manager.");
-      try {
-        blockTokenMgr.stop();
-      } catch (IOException e) {
-        LOG.error("Failed to stop block token manager", e);
-      }
-    }
-
-    if (delegationTokenMgr != null) {
-      LOG.info("Stopping OM delegation token secret manager.");
-      try {
-        delegationTokenMgr.stop();
-      } catch (IOException e) {
-        LOG.error("Failed to stop delegation token manager", e);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public void startSecretManager() {
-    try {
-      readKeyPair();
-    } catch (OzoneSecurityException e) {
-      LOG.error("Unable to read key pair for OM.", e);
-      throw new RuntimeException(e);
-    }
-    if (secConfig.isBlockTokenEnabled() && blockTokenMgr != null) {
-      try {
-        LOG.info("Starting OM block token secret manager");
-        blockTokenMgr.start(certClient);
-      } catch (IOException e) {
-        // Unable to start secret manager.
-        LOG.error("Error starting block token secret manager.", e);
-        throw new RuntimeException(e);
-      }
-    }
-
-    if (delegationTokenMgr != null) {
-      try {
-        LOG.info("Starting OM delegation token secret manager");
-        delegationTokenMgr.start(certClient);
-      } catch (IOException e) {
-        // Unable to start secret manager.
-        LOG.error("Error starting delegation token secret manager.", e);
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  /**
-   * For testing purpose only.
-   * */
-  public void setCertClient(CertificateClient certClient) {
-    // TODO: Initialize it in constructor with implementation for certClient.
-    this.certClient = certClient;
-  }
-
-  /**
-   * Read private key from file.
-   */
-  private void readKeyPair() throws OzoneSecurityException {
-    try {
-      LOG.info("Reading keypair and certificate from file system.");
-      PublicKey pubKey = certClient.getPublicKey();
-      PrivateKey pvtKey = certClient.getPrivateKey();
-      Objects.requireNonNull(pubKey);
-      Objects.requireNonNull(pvtKey);
-      Objects.requireNonNull(certClient.getCertificate());
-    } catch (Exception e) {
-      throw new OzoneSecurityException("Error reading keypair & certificate "
-          + "OzoneManager.", e, OzoneSecurityException
-          .ResultCodes.OM_PUBLIC_PRIVATE_KEY_FILE_NOT_EXIST);
-    }
-  }
-
-  /**
-   * Login OM service user if security and Kerberos are enabled.
-   *
-   * @param  conf
-   * @throws IOException, AuthenticationException
-   */
-  private static void loginOMUser(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-
-    if (SecurityUtil.getAuthenticationMethod(conf).equals(
-        AuthenticationMethod.KERBEROS)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Ozone security is enabled. Attempting login for OM user. "
-                + "Principal: {}, keytab: {}", conf.get(
-            OZONE_OM_KERBEROS_PRINCIPAL_KEY),
-            conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
-      }
-
-      UserGroupInformation.setConfiguration(conf);
-
-      InetSocketAddress socAddr = OmUtils.getOmAddress(conf);
-      SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
-          OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
-    } else {
-      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(
-          conf) + " authentication method not supported. OM user login "
-          + "failed.");
-    }
-    LOG.info("Ozone Manager login successful.");
-  }
-
-  /**
-   * Create a scm block client, used by putKey() and getKey().
-   *
-   * @return {@link ScmBlockLocationProtocol}
-   * @throws IOException
-   */
-  private static ScmBlockLocationProtocol getScmBlockClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmBlockAddress =
-        getScmAddressForBlockClients(conf);
-    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
-        new ScmBlockLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
-                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return TracingUtil
-        .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
-            conf);
-  }
-
-  /**
-   * Returns a scm container client.
-   *
-   * @return {@link StorageContainerLocationProtocol}
-   * @throws IOException
-   */
-  private static StorageContainerLocationProtocol getScmContainerClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        conf);
-    StorageContainerLocationProtocol scmContainerClient =
-        TracingUtil.createProxy(
-            new StorageContainerLocationProtocolClientSideTranslatorPB(
-                RPC.getProxy(StorageContainerLocationProtocolPB.class,
-                    scmVersion,
-                    scmAddr, UserGroupInformation.getCurrentUser(), conf,
-                    NetUtils.getDefaultSocketFactory(conf),
-                    Client.getRpcTimeout(conf))),
-            StorageContainerLocationProtocol.class, conf);
-    return scmContainerClient;
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private RPC.Server startRpcServer(OzoneConfiguration conf,
-      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
-      int handlerCount) throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(delegationTokenMgr)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-
-    if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
-        false)) {
-      rpcServer.refreshServiceAcl(conf, OMPolicyProvider.getInstance());
-    }
-    return rpcServer;
-  }
-
-  private static boolean isOzoneSecurityEnabled() {
-    return securityEnabled;
-  }
-
-  /**
-   * Constructs OM instance based on the configuration.
-   *
-   * @param conf OzoneConfiguration
-   * @return OM instance
-   * @throws IOException, AuthenticationException in case OM instance
-   *   creation fails.
-   */
-  public static OzoneManager createOm(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    return new OzoneManager(conf);
-  }
-
-  /**
-   * Logs in the OM use if security is enabled in the configuration.
-   *
-   * @param conf OzoneConfiguration
-   * @throws IOException, AuthenticationException in case login failes.
-   */
-  private static void loginOMUserIfSecurityEnabled(OzoneConfiguration  conf)
-      throws IOException, AuthenticationException {
-    securityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf);
-    if (securityEnabled) {
-      loginOMUser(conf);
-    }
-  }
-
-  /**
-   * Initializes the OM instance.
-   *
-   * @param conf OzoneConfiguration
-   * @return true if OM initialization succeeds, false otherwise
-   * @throws IOException in case ozone metadata directory path is not
-   *                     accessible
-   */
-  @VisibleForTesting
-  public static boolean omInit(OzoneConfiguration conf) throws IOException,
-      AuthenticationException {
-    OMHANodeDetails.loadOMHAConfig(conf);
-    loginOMUserIfSecurityEnabled(conf);
-    OMStorage omStorage = new OMStorage(conf);
-    StorageState state = omStorage.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        ScmInfo scmInfo = getScmInfo(conf);
-        String clusterId = scmInfo.getClusterId();
-        String scmId = scmInfo.getScmId();
-        if (clusterId == null || clusterId.isEmpty()) {
-          throw new IOException("Invalid Cluster ID");
-        }
-        if (scmId == null || scmId.isEmpty()) {
-          throw new IOException("Invalid SCM ID");
-        }
-        omStorage.setClusterId(clusterId);
-        omStorage.setScmId(scmId);
-        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-          initializeSecurity(conf, omStorage);
-        }
-        omStorage.initialize();
-        System.out.println(
-            "OM initialization succeeded.Current cluster id for sd="
-                + omStorage.getStorageDir() + ";cid=" + omStorage
-                .getClusterID());
-
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize OM version file", ioe);
-        return false;
-      }
-    } else {
-      if(OzoneSecurityUtil.isSecurityEnabled(conf) &&
-          omStorage.getOmCertSerialId() == null) {
-        LOG.info("OM storage is already initialized. Initializing security");
-        initializeSecurity(conf, omStorage);
-        omStorage.persistCurrentState();
-      }
-      System.out.println(
-          "OM already initialized.Reusing existing cluster id for sd="
-              + omStorage.getStorageDir() + ";cid=" + omStorage
-              .getClusterID());
-      return true;
-    }
-  }
-
-  /**
-   * Initializes secure OzoneManager.
-   * */
-  @VisibleForTesting
-  public static void initializeSecurity(OzoneConfiguration conf,
-      OMStorage omStore)
-      throws IOException {
-    LOG.info("Initializing secure OzoneManager.");
-
-    CertificateClient certClient =
-        new OMCertificateClient(new SecurityConfig(conf),
-            omStore.getOmCertSerialId());
-    CertificateClient.InitResponse response = certClient.init();
-    LOG.info("Init response: {}", response);
-    switch (response) {
-    case SUCCESS:
-      LOG.info("Initialization successful.");
-      break;
-    case GETCERT:
-      getSCMSignedCert(certClient, conf, omStore);
-      LOG.info("Successfully stored SCM signed certificate.");
-      break;
-    case FAILURE:
-      LOG.error("OM security initialization failed.");
-      throw new RuntimeException("OM security initialization failed.");
-    case RECOVER:
-      LOG.error("OM security initialization failed. OM certificate is " +
-          "missing.");
-      throw new RuntimeException("OM security initialization failed.");
-    default:
-      LOG.error("OM security initialization failed. Init response: {}",
-          response);
-      throw new RuntimeException("OM security initialization failed.");
-    }
-  }
-
-  private static ScmInfo getScmInfo(OzoneConfiguration conf)
-      throws IOException {
-    try {
-      RetryPolicy retryPolicy = retryUpToMaximumCountWithFixedSleep(
-          10, 5, TimeUnit.SECONDS);
-      RetriableTask<ScmInfo> retriable = new RetriableTask<>(
-          retryPolicy, "OM#getScmInfo",
-          () -> getScmBlockClient(conf).getScmInfo());
-      return retriable.call();
-    } catch (IOException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new IOException("Failed to get SCM info", e);
-    }
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr RPC server listening address
-   * @return server startup message
-   */
-  private static String buildRpcServerStartMessage(String description,
-      InetSocketAddress addr) {
-    return addr != null ? String.format("%s is listening at %s",
-        description, addr.toString()) :
-        String.format("%s not started", description);
-  }
-
-  @VisibleForTesting
-  public KeyManager getKeyManager() {
-    return keyManager;
-  }
-
-  @VisibleForTesting
-  public ScmInfo getScmInfo() throws IOException {
-    return scmBlockClient.getScmInfo();
-  }
-
-  @VisibleForTesting
-  public OMStorage getOmStorage() {
-    return omStorage;
-  }
-
-  @VisibleForTesting
-  public OzoneManagerRatisServer getOmRatisServer() {
-    return omRatisServer;
-  }
-
-  @VisibleForTesting
-  public OzoneManagerSnapshotProvider getOmSnapshotProvider() {
-    return omSnapshotProvider;
-  }
-
-  @VisibleForTesting
-  public InetSocketAddress getOmRpcServerAddr() {
-    return omRpcAddress;
-  }
-
-  @VisibleForTesting
-  public LifeCycle.State getOmRatisServerState() {
-    if (omRatisServer == null) {
-      return null;
-    } else {
-      return omRatisServer.getServerState();
-    }
-  }
-
-  @VisibleForTesting
-  public KeyProviderCryptoExtension getKmsProvider() {
-    return kmsProvider;
-  }
-
-  public PrefixManager getPrefixManager() {
-    return prefixManager;
-  }
-
-  /**
-   * Get metadata manager.
-   *
-   * @return metadata manager.
-   */
-  public OMMetadataManager getMetadataManager() {
-    return metadataManager;
-  }
-
-  public OzoneBlockTokenSecretManager getBlockTokenMgr() {
-    return blockTokenMgr;
-  }
-
-  public OzoneManagerProtocolServerSideTranslatorPB getOmServerProtocol() {
-    return omServerProtocol;
-  }
-
-  public OMMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-
-    omClientProtocolMetrics.register();
-
-    LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
-        omRpcAddress));
-
-    DefaultMetricsSystem.initialize("OzoneManager");
-
-    // Start Ratis services
-    if (omRatisServer != null) {
-      omRatisServer.start();
-    }
-    if (omRatisClient != null) {
-      omRatisClient.connect();
-    }
-
-    metadataManager.start(configuration);
-    startSecretManagerIfNecessary();
-
-    if (certClient != null) {
-      caCertPem = CertificateCodec.getPEMEncodedString(
-          certClient.getCACertificate());
-    }
-    // Set metrics and start metrics back ground thread
-    metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager
-        .getVolumeTable()));
-    metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager
-        .getBucketTable()));
-
-    if (getMetricsStorageFile().exists()) {
-      OmMetricsInfo metricsInfo = READER.readValue(getMetricsStorageFile());
-      metrics.setNumKeys(metricsInfo.getNumKeys());
-    }
-
-    // Schedule save metrics
-    long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL,
-        OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-    scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask();
-    metricsTimer = new Timer();
-    metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period);
-
-    keyManager.start(configuration);
-    omRpcServer.start();
-    isOmRpcServerRunning = true;
-    try {
-      httpServer = new OzoneManagerHttpServer(configuration, this);
-      httpServer.start();
-    } catch (Exception ex) {
-      // Allow OM to start as Http Server failure is not fatal.
-      LOG.error("OM HttpServer failed to start.", ex);
-    }
-    registerMXBean();
-    setStartTime();
-  }
-
-  /**
-   * Restarts the service. This method re-initializes the rpc server.
-   */
-  public void restart() throws IOException {
-    LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
-        omRpcAddress));
-
-    HddsUtils.initializeMetrics(configuration, "OzoneManager");
-
-    instantiateServices();
-
-    startSecretManagerIfNecessary();
-
-    // Set metrics and start metrics back ground thread
-    metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager
-        .getVolumeTable()));
-    metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager
-        .getBucketTable()));
-
-    if (getMetricsStorageFile().exists()) {
-      OmMetricsInfo metricsInfo = READER.readValue(getMetricsStorageFile());
-      metrics.setNumKeys(metricsInfo.getNumKeys());
-    }
-
-    // Schedule save metrics
-    long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL,
-        OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-    scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask();
-    metricsTimer = new Timer();
-    metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period);
-
-    omRpcServer = getRpcServer(configuration);
-    omRpcServer.start();
-    isOmRpcServerRunning = true;
-
-    initializeRatisServer();
-    if (omRatisServer != null) {
-      omRatisServer.start();
-    }
-    initializeRatisClient();
-    if (omRatisClient != null) {
-      omRatisClient.connect();
-    }
-
-    try {
-      httpServer = new OzoneManagerHttpServer(configuration, this);
-      httpServer.start();
-    } catch (Exception ex) {
-      // Allow OM to start as Http Server failure is not fatal.
-      LOG.error("OM HttpServer failed to start.", ex);
-    }
-    registerMXBean();
-
-    // Start jvm monitor
-    jvmPauseMonitor = new JvmPauseMonitor();
-    jvmPauseMonitor.init(configuration);
-    jvmPauseMonitor.start();
-    setStartTime();
-  }
-
-  /**
-   * Creates a new instance of rpc server. If an earlier instance is already
-   * running then returns the same.
-   */
-  private RPC.Server getRpcServer(OzoneConfiguration conf) throws IOException {
-    if (isOmRpcServerRunning) {
-      return omRpcServer;
-    }
-
-    InetSocketAddress omNodeRpcAddr = OmUtils.getOmAddress(conf);
-
-    final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY,
-        OZONE_OM_HANDLER_COUNT_DEFAULT);
-    RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-    this.omServerProtocol = new OzoneManagerProtocolServerSideTranslatorPB(
-        this, omRatisServer, omClientProtocolMetrics, isRatisEnabled);
-
-    BlockingService omService = newReflectiveBlockingService(omServerProtocol);
-
-    return startRpcServer(configuration, omNodeRpcAddr,
-        OzoneManagerProtocolPB.class, omService,
-        handlerCount);
-  }
-
-  /**
-   * Creates an instance of ratis server.
-   */
-  private void initializeRatisServer() throws IOException {
-    if (isRatisEnabled) {
-      if (omRatisServer == null) {
-        omRatisServer = OzoneManagerRatisServer.newOMRatisServer(
-            configuration, this, omNodeDetails, peerNodes);
-      }
-      LOG.info("OzoneManager Ratis server initialized at port {}",
-          omRatisServer.getServerPort());
-    } else {
-      omRatisServer = null;
-    }
-  }
-
-  /**
-   * Creates an instance of ratis client.
-   */
-  private void initializeRatisClient() throws IOException {
-    if (isRatisEnabled) {
-      if (omRatisClient == null) {
-        omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient(
-            omNodeDetails.getOMNodeId(), omRatisServer.getRaftGroup(),
-            configuration);
-      }
-    } else {
-      omRatisClient = null;
-    }
-  }
-
-  public OMRatisSnapshotInfo getSnapshotInfo() {
-    return omRatisSnapshotInfo;
-  }
-
-  @VisibleForTesting
-  public long getRatisSnapshotIndex() {
-    return omRatisSnapshotInfo.getIndex();
-  }
-
-  @Override
-  public long saveRatisSnapshot() throws IOException {
-    long snapshotIndex = omRatisServer.getStateMachineLastAppliedIndex();
-
-    // Flush the OM state to disk
-    metadataManager.getStore().flush();
-
-    omRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex);
-
-    return snapshotIndex;
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-    try {
-      // Cancel the metrics timer and set to null.
-      if (metricsTimer!= null) {
-        metricsTimer.cancel();
-        metricsTimer = null;
-        scheduleOMMetricsWriteTask = null;
-      }
-      omRpcServer.stop();
-      // When ratis is not enabled, we need to call stop() to stop
-      // OzoneManageDoubleBuffer in OM server protocol.
-      if (!isRatisEnabled) {
-        omServerProtocol.stop();
-      }
-      if (omRatisServer != null) {
-        omRatisServer.stop();
-        omRatisServer = null;
-      }
-      if (omRatisClient != null) {
-        omRatisClient.close();
-        omRatisClient = null;
-      }
-      isOmRpcServerRunning = false;
-      keyManager.stop();
-      stopSecretManager();
-      if (httpServer != null) {
-        httpServer.stop();
-      }
-      metadataManager.stop();
-      metrics.unRegister();
-      omClientProtocolMetrics.unregister();
-      unregisterMXBean();
-      if (jvmPauseMonitor != null) {
-        jvmPauseMonitor.stop();
-      }
-    } catch (Exception e) {
-      LOG.error("OzoneManager stop failed.", e);
-    }
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      omRpcServer.join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during OzoneManager join.", e);
-    }
-  }
-
-  private void startSecretManagerIfNecessary() {
-    boolean shouldRun = isOzoneSecurityEnabled();
-    if (shouldRun) {
-      boolean running = delegationTokenMgr.isRunning()
-          && blockTokenMgr.isRunning();
-      if(!running){
-        startSecretManager();
-      }
-    }
-  }
-
-  /**
-   * Get SCM signed certificate and store it using certificate client.
-   * */
-  private static void getSCMSignedCert(CertificateClient client,
-      OzoneConfiguration config, OMStorage omStore) throws IOException {
-    CertificateSignRequest.Builder builder = client.getCSRBuilder();
-    KeyPair keyPair = new KeyPair(client.getPublicKey(),
-        client.getPrivateKey());
-    InetSocketAddress omRpcAdd;
-    omRpcAdd = OmUtils.getOmAddress(config);
-    if (omRpcAdd == null || omRpcAdd.getAddress() == null) {
-      LOG.error("Incorrect om rpc address. omRpcAdd:{}", omRpcAdd);
-      throw new RuntimeException("Can't get SCM signed certificate. " +
-          "omRpcAdd: " + omRpcAdd);
-    }
-    // Get host name.
-    String hostname = omRpcAdd.getAddress().getHostName();
-    String ip = omRpcAdd.getAddress().getHostAddress();
-
-    String subject = UserGroupInformation.getCurrentUser()
-        .getShortUserName() + "@" + hostname;
-
-    builder.setCA(false)
-        .setKey(keyPair)
-        .setConfiguration(config)
-        .setScmID(omStore.getScmId())
-        .setClusterID(omStore.getClusterID())
-        .setSubject(subject)
-        .addIpAddress(ip);
-
-    LOG.info("Creating csr for OM->dns:{},ip:{},scmId:{},clusterId:{}," +
-            "subject:{}", hostname, ip,
-        omStore.getScmId(), omStore.getClusterID(), subject);
-
-    HddsProtos.OzoneManagerDetailsProto.Builder omDetailsProtoBuilder =
-        HddsProtos.OzoneManagerDetailsProto.newBuilder()
-            .setHostName(omRpcAdd.getHostName())
-            .setIpAddress(ip)
-            .setUuid(omStore.getOmId())
-            .addPorts(HddsProtos.Port.newBuilder()
-                .setName(RPC_PORT)
-                .setValue(omRpcAdd.getPort())
-                .build());
-
-    PKCS10CertificationRequest csr = builder.build();
-    HddsProtos.OzoneManagerDetailsProto omDetailsProto =
-        omDetailsProtoBuilder.build();
-    LOG.info("OzoneManager ports added:{}", omDetailsProto.getPortsList());
-    SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
-        HddsUtils.getScmSecurityClient(config);
-
-    SCMGetCertResponseProto response = secureScmClient.
-        getOMCertChain(omDetailsProto, getEncodedString(csr));
-    String pemEncodedCert = response.getX509Certificate();
-
-    try {
-
-
-      // Store SCM CA certificate.
-      if(response.hasX509CACertificate()) {
-        String pemEncodedRootCert = response.getX509CACertificate();
-        client.storeCertificate(pemEncodedRootCert, true, true);
-        client.storeCertificate(pemEncodedCert, true);
-        // Persist om cert serial id.
-        omStore.setOmCertSerialId(CertificateCodec.
-            getX509Certificate(pemEncodedCert).getSerialNumber().toString());
-      } else {
-        throw new RuntimeException("Unable to retrieve OM certificate " +
-            "chain");
-      }
-    } catch (IOException | CertificateException e) {
-      LOG.error("Error while storing SCM signed certificate.", e);
-      throw new RuntimeException(e);
-    }
-
-  }
-
-  /**
-   *
-   * @return true if delegation token operation is allowed
-   */
-  private boolean isAllowedDelegationTokenOp() throws IOException {
-    AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
-    if (UserGroupInformation.isSecurityEnabled()
-        && (authMethod != AuthenticationMethod.KERBEROS)
-        && (authMethod != AuthenticationMethod.KERBEROS_SSL)
-        && (authMethod != AuthenticationMethod.CERTIFICATE)) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns authentication method used to establish the connection.
-   * @return AuthenticationMethod used to establish connection
-   * @throws IOException
-   */
-  private AuthenticationMethod getConnectionAuthenticationMethod()
-      throws IOException {
-    UserGroupInformation ugi = getRemoteUser();
-    AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
-    if (authMethod == AuthenticationMethod.PROXY) {
-      authMethod = ugi.getRealUser().getAuthenticationMethod();
-    }
-    return authMethod;
-  }
-
-  // optimize ugi lookup for RPC operations to avoid a trip through
-  // UGI.getCurrentUser which is synch'ed
-  private static UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
-  /**
-   * Get delegation token from OzoneManager.
-   * @param renewer Renewer information
-   * @return delegationToken DelegationToken signed by OzoneManager
-   * @throws IOException on error
-   */
-  @Override
-  public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws OMException {
-    Token<OzoneTokenIdentifier> token;
-    try {
-      if (!isAllowedDelegationTokenOp()) {
-        throw new OMException("Delegation Token can be issued only with "
-            + "kerberos or web authentication",
-            INVALID_AUTH_METHOD);
-      }
-      if (delegationTokenMgr == null || !delegationTokenMgr.isRunning()) {
-        LOG.warn("trying to get DT with no secret manager running in OM.");
-        return null;
-      }
-
-      UserGroupInformation ugi = getRemoteUser();
-      String user = ugi.getUserName();
-      Text owner = new Text(user);
-      Text realUser = null;
-      if (ugi.getRealUser() != null) {
-        realUser = new Text(ugi.getRealUser().getUserName());
-      }
-
-      return delegationTokenMgr.createToken(owner, renewer, realUser);
-    } catch (OMException oex) {
-      throw oex;
-    } catch (IOException ex) {
-      LOG.error("Get Delegation token failed, cause: {}", ex.getMessage());
-      throw new OMException("Get Delegation token failed.", ex,
-          TOKEN_ERROR_OTHER);
-    }
-  }
-
-  /**
-   * Method to renew a delegationToken issued by OzoneManager.
-   * @param token token to renew
-   * @return new expiryTime of the token
-   * @throws InvalidToken if {@code token} is invalid
-   * @throws IOException on other errors
-   */
-  @Override
-  public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException {
-    long expiryTime;
-
-    try {
-
-      if (!isAllowedDelegationTokenOp()) {
-        throw new OMException("Delegation Token can be renewed only with "
-            + "kerberos or web authentication",
-            INVALID_AUTH_METHOD);
-      }
-      String renewer = getRemoteUser().getShortUserName();
-      expiryTime = delegationTokenMgr.renewToken(token, renewer);
-
-    } catch (OMException oex) {
-      throw oex;
-    } catch (IOException ex) {
-      OzoneTokenIdentifier id = null;
-      try {
-        id = OzoneTokenIdentifier.readProtoBuf(token.getIdentifier());
-      } catch (IOException exe) {
-      }
-      LOG.error("Delegation token renewal failed for dt id: {}, cause: {}",
-          id, ex.getMessage());
-      throw new OMException("Delegation token renewal failed for dt: " + token,
-          ex, TOKEN_ERROR_OTHER);
-    }
-    return expiryTime;
-  }
-
-  /**
-   * Cancels a delegation token.
-   * @param token token to cancel
-   * @throws IOException on error
-   */
-  @Override
-  public void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws OMException {
-    OzoneTokenIdentifier id = null;
-    try {
-      String canceller = getRemoteUser().getUserName();
-      id = delegationTokenMgr.cancelToken(token, canceller);
-      LOG.trace("Delegation token cancelled for dt: {}", id);
-    } catch (OMException oex) {
-      throw oex;
-    } catch (IOException ex) {
-      LOG.error("Delegation token cancellation failed for dt id: {}, cause: {}",
-          id, ex.getMessage());
-      throw new OMException("Delegation token renewal failed for dt: " + token,
-          ex, TOKEN_ERROR_OTHER);
-    }
-  }
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(OmVolumeArgs args) throws IOException {
-    try {
-      if(isAclEnabled) {
-        if (!ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD) && 
-            !ozAdmins.contains(ProtobufRpcEngine.Server.getRemoteUser()
-                .getUserName())) {
-          LOG.error("Only admin users are authorized to create " +
-              "Ozone volumes. User :{} is not an admin.",
-              ProtobufRpcEngine.Server.getRemoteUser().getUserName());
-          throw new OMException("Only admin users are authorized to create " +
-              "Ozone volumes.", ResultCodes.PERMISSION_DENIED);
-        }
-      }
-      metrics.incNumVolumeCreates();
-      volumeManager.createVolume(args);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_VOLUME,
-          (args == null) ? null : args.toAuditMap()));
-      metrics.incNumVolumes();
-    } catch (Exception ex) {
-      metrics.incNumVolumeCreateFails();
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(OMAction.CREATE_VOLUME,
-              (args == null) ? null : args.toAuditMap(), ex)
-      );
-      throw ex;
-    }
-  }
-
-  /**
-   * Checks if current caller has acl permissions.
-   *
-   * @param resType - Type of ozone resource. Ex volume, bucket.
-   * @param store   - Store type. i.e Ozone, S3.
-   * @param acl     - type of access to be checked.
-   * @param vol     - name of volume
-   * @param bucket  - bucket name
-   * @param key     - key
-   * @throws OMException
-   */
-  private void checkAcls(ResourceType resType, StoreType store,
-      ACLType acl, String vol, String bucket, String key)
-      throws OMException {
-    checkAcls(resType, store, acl, vol, bucket, key,
-        ProtobufRpcEngine.Server.getRemoteUser(),
-        ProtobufRpcEngine.Server.getRemoteIp());
-  }
-
-  /**
-   * CheckAcls for the ozone object.
-   * @param resType
-   * @param storeType
-   * @param aclType
-   * @param vol
-   * @param bucket
-   * @param key
-   * @param ugi
-   * @param remoteAddress
-   * @throws OMException
-   */
-  @SuppressWarnings("parameternumber")
-  public void checkAcls(ResourceType resType, StoreType storeType,
-      ACLType aclType, String vol, String bucket, String key,
-      UserGroupInformation ugi, InetAddress remoteAddress)
-      throws OMException {
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setResType(resType)
-        .setStoreType(storeType)
-        .setVolumeName(vol)
-        .setBucketName(bucket)
-        .setKeyName(key).build();
-    RequestContext context = RequestContext.newBuilder()
-        .setClientUgi(ugi)
-        .setIp(remoteAddress)
-        .setAclType(ACLIdentityType.USER)
-        .setAclRights(aclType)
-        .build();
-    if (!accessAuthorizer.checkAccess(obj, context)) {
-      LOG.warn("User {} doesn't have {} permission to access {}",
-          ugi.getUserName(), aclType, resType);
-      throw new OMException("User " + ugi.getUserName() + " doesn't " +
-          "have " + aclType + " permission to access " + resType,
-          ResultCodes.PERMISSION_DENIED);
-    }
-  }
-
-  /**
-   *
-   * Return true if Ozone acl's are enabled, else false.
-   * @return boolean
-   */
-  public boolean getAclsEnabled() {
-    return isAclEnabled;
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE_ACL, volume,
-          null, null);
-    }
-    Map<String, String> auditMap = buildAuditMap(volume);
-    auditMap.put(OzoneConsts.OWNER, owner);
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setOwner(volume, owner);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_OWNER,
-          auditMap));
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_OWNER,
-          auditMap, ex)
-      );
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE, volume,
-          null, null);
-    }
-
-    Map<String, String> auditMap = buildAuditMap(volume);
-    auditMap.put(OzoneConsts.QUOTA, String.valueOf(quota));
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setQuota(volume, quota);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_QUOTA,
-          auditMap));
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_QUOTA,
-          auditMap, ex));
-      throw ex;
-    }
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume, false
-   * otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.OZONE,
-          ACLType.READ, volume, null, null);
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(volume);
-    auditMap.put(OzoneConsts.USER_ACL,
-        (userAcl == null) ? null : userAcl.getName());
-    try {
-      metrics.incNumVolumeCheckAccesses();
-      return volumeManager.checkVolumeAccess(volume, userAcl);
-    } catch (Exception ex) {
-      metrics.incNumVolumeCheckAccessFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(
-          OMAction.CHECK_VOLUME_ACCESS, auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(
-            OMAction.CHECK_VOLUME_ACCESS, auditMap));
-      }
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.READ, volume,
-          null, null);
-    }
-
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(volume);
-    try {
-      metrics.incNumVolumeInfos();
-      return volumeManager.getVolumeInfo(volume);
-    } catch (Exception ex) {
-      metrics.incNumVolumeInfoFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_VOLUME,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_VOLUME,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    try {
-      if(isAclEnabled) {
-        checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.DELETE, volume,
-            null, null);
-      }
-      metrics.incNumVolumeDeletes();
-      volumeManager.deleteVolume(volume);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_VOLUME,
-          buildAuditMap(volume)));
-      metrics.decNumVolumes();
-    } catch (Exception ex) {
-      metrics.incNumVolumeDeleteFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_VOLUME,
-          buildAuditMap(volume), ex));
-      throw ex;
-    }
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
-      String prevKey, int maxKeys) throws IOException {
-    if(isAclEnabled) {
-      UserGroupInformation remoteUserUgi = ProtobufRpcEngine.Server.
-          getRemoteUser();
-      if (remoteUserUgi == null) {
-        LOG.error("Rpc user UGI is null. Authorization failed.");
-        throw new OMException("Rpc user UGI is null. Authorization " +
-            "failed.", ResultCodes.PERMISSION_DENIED);
-      }
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.PREV_KEY, prevKey);
-    auditMap.put(OzoneConsts.PREFIX, prefix);
-    auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
-    auditMap.put(OzoneConsts.USERNAME, userName);
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey, int
-      maxKeys) throws IOException {
-    if(isAclEnabled) {
-      if (!ozAdmins.contains(ProtobufRpcEngine.Server.
-          getRemoteUser().getUserName())
-          && !ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD)) {
-        LOG.error("Only admin users are authorized to create " +
-            "Ozone volumes.");
-        throw new OMException("Only admin users are authorized to create " +
-            "Ozone volumes.", ResultCodes.PERMISSION_DENIED);
-      }
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.PREV_KEY, prevKey);
-    auditMap.put(OzoneConsts.PREFIX, prefix);
-    auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
-    auditMap.put(OzoneConsts.USERNAME, null);
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
-    try {
-      if(isAclEnabled) {
-        checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.CREATE,
-            bucketInfo.getVolumeName(), bucketInfo.getBucketName(), null);
-      }
-      metrics.incNumBucketCreates();
-      bucketManager.createBucket(bucketInfo);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_BUCKET,
-          (bucketInfo == null) ? null : bucketInfo.toAuditMap()));
-      metrics.incNumBuckets();
-    } catch (Exception ex) {
-      metrics.incNumBucketCreateFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_BUCKET,
-          (bucketInfo == null) ? null : bucketInfo.toAuditMap(), ex));
-      throw ex;
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<OmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int maxNumOfBuckets)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.LIST, volumeName,
-          null, null);
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(volumeName);
-    auditMap.put(OzoneConsts.START_KEY, startKey);
-    auditMap.put(OzoneConsts.PREFIX, prefix);
-    auditMap.put(OzoneConsts.MAX_NUM_OF_BUCKETS,
-        String.valueOf(maxNumOfBuckets));
-    try {
-      metrics.incNumBucketLists();
-      return bucketManager.listBuckets(volumeName,
-          startKey, prefix, maxNumOfBuckets);
-    } catch (IOException ex) {
-      metrics.incNumBucketListFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_BUCKETS,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_BUCKETS,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return OmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public OmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume,
-          bucket, null);
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(volume);
-    auditMap.put(OzoneConsts.BUCKET, bucket);
-    try {
-      metrics.incNumBucketInfos();
-      return bucketManager.getBucketInfo(volume, bucket);
-    } catch (Exception ex) {
-      metrics.incNumBucketInfoFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_BUCKET,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_BUCKET,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Allocate a key.
-   *
-   * @param args - attributes of the key.
-   * @return OmKeyInfo - the info about the allocated key.
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
-      try {
-        checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
-            args.getVolumeName(), args.getBucketName(), args.getKeyName());
-      } catch (OMException ex) {
-        // For new keys key checkAccess call will fail as key doesn't exist.
-        // Check user access for bucket.
-        if (ex.getResult().equals(KEY_NOT_FOUND)) {
-          checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-              args.getVolumeName(), args.getBucketName(), args.getKeyName());
-        } else {
-          throw ex;
-        }
-      }
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumKeyAllocates();
-      return keyManager.openKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyAllocateFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_KEY,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.ALLOCATE_KEY, (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  private Map<String, String> toAuditMap(KeyArgs omKeyArgs) {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, omKeyArgs.getVolumeName());
-    auditMap.put(OzoneConsts.BUCKET, omKeyArgs.getBucketName());
-    auditMap.put(OzoneConsts.KEY, omKeyArgs.getKeyName());
-    auditMap.put(OzoneConsts.DATA_SIZE,
-        String.valueOf(omKeyArgs.getDataSize()));
-    auditMap.put(OzoneConsts.REPLICATION_TYPE,
-        omKeyArgs.hasType() ? omKeyArgs.getType().name() : null);
-    auditMap.put(OzoneConsts.REPLICATION_FACTOR,
-        omKeyArgs.hasFactor() ? omKeyArgs.getFactor().name() : null);
-    auditMap.put(OzoneConsts.KEY_LOCATION_INFO,
-        (omKeyArgs.getKeyLocationsList() != null) ?
-            omKeyArgs.getKeyLocationsList().toString() : null);
-    return auditMap;
-  }
-
-  @Override
-  public void commitKey(OmKeyArgs args, long clientID)
-      throws IOException {
-    if(isAclEnabled) {
-      try {
-        checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
-            args.getVolumeName(), args.getBucketName(), args.getKeyName());
-      } catch (OMException ex) {
-        // For new keys key checkAccess call will fail as key doesn't exist.
-        // Check user access for bucket.
-        if (ex.getResult().equals(KEY_NOT_FOUND)) {
-          checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-              args.getVolumeName(), args.getBucketName(), args.getKeyName());
-        } else {
-          throw ex;
-        }
-      }
-    }
-    Map<String, String> auditMap = (args == null) ? new LinkedHashMap<>() :
-        args.toAuditMap();
-    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
-    try {
-      metrics.incNumKeyCommits();
-      keyManager.commitKey(args, clientID);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.COMMIT_KEY,
-          auditMap));
-      // As when we commit the key it is visible, so we should increment here.
-      // As key also can have multiple versions, we need to increment keys
-      // only if version is 0. Currently we have not complete support of
-      // versioning of keys. So, this can be revisited later.
-      if (args != null && args.getLocationInfoList() != null &&
-          args.getLocationInfoList().size() > 0 &&
-          args.getLocationInfoList().get(0) != null &&
-          args.getLocationInfoList().get(0).getCreateVersion() == 0) {
-        metrics.incNumKeys();
-      }
-    } catch (Exception ex) {
-      metrics.incNumKeyCommitFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.COMMIT_KEY,
-          auditMap, ex));
-      throw ex;
-    }
-  }
-
-  @Override
-  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID,
-      ExcludeList excludeList) throws IOException {
-    if(isAclEnabled) {
-      try {
-        checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
-            args.getVolumeName(), args.getBucketName(), args.getKeyName());
-      } catch (OMException ex) {
-        // For new keys key checkAccess call will fail as key doesn't exist.
-        // Check user access for bucket.
-        if (ex.getResult().equals(KEY_NOT_FOUND)) {
-          checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-              args.getVolumeName(), args.getBucketName(), args.getKeyName());
-        } else {
-          throw ex;
-        }
-      }
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = (args == null) ? new LinkedHashMap<>() :
-        args.toAuditMap();
-    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
-    try {
-      metrics.incNumBlockAllocateCalls();
-      return keyManager.allocateBlock(args, clientID, excludeList);
-    } catch (Exception ex) {
-      metrics.incNumBlockAllocateCallFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_BLOCK,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.ALLOCATE_BLOCK, auditMap));
-      }
-    }
-  }
-
-  /**
-   * Lookup a key.
-   *
-   * @param args - attributes of the key.
-   * @return OmKeyInfo - the info about the requested key.
-   * @throws IOException
-   */
-  @Override
-  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumKeyLookups();
-      return keyManager.lookupKey(args, getClientAddress());
-    } catch (Exception ex) {
-      metrics.incNumKeyLookupFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY,
-            (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  @Override
-  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    Map<String, String> auditMap = (args == null) ? new LinkedHashMap<>() :
-        args.toAuditMap();
-    auditMap.put(OzoneConsts.TO_KEY_NAME, toKeyName);
-    try {
-      metrics.incNumKeyRenames();
-      keyManager.renameKey(args, toKeyName);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.RENAME_KEY,
-          auditMap));
-    } catch (IOException e) {
-      metrics.incNumKeyRenameFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.RENAME_KEY,
-          auditMap, e));
-      throw e;
-    }
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args - attributes of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(OmKeyArgs args) throws IOException {
-    try {
-      if(isAclEnabled) {
-        checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.DELETE,
-            args.getVolumeName(), args.getBucketName(), args.getKeyName());
-      }
-      metrics.incNumKeyDeletes();
-      keyManager.deleteKey(args);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY,
-          (args == null) ? null : args.toAuditMap()));
-      metrics.decNumKeys();
-    } catch (Exception ex) {
-      metrics.incNumKeyDeleteFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    }
-  }
-
-  @Override
-  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.BUCKET,
-          StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix);
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(volumeName);
-    auditMap.put(OzoneConsts.BUCKET, bucketName);
-    auditMap.put(OzoneConsts.START_KEY, startKey);
-    auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
-    auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix);
-    try {
-      metrics.incNumKeyLists();
-      return keyManager.listKeys(volumeName, bucketName,
-          startKey, keyPrefix, maxKeys);
-    } catch (IOException ex) {
-      metrics.incNumKeyListFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_KEYS,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_KEYS,
-            auditMap));
-      }
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   *
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(OmBucketArgs args)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-          args.getVolumeName(), args.getBucketName(), null);
-    }
-    try {
-      metrics.incNumBucketUpdates();
-      bucketManager.setBucketProperty(args);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.UPDATE_BUCKET,
-          (args == null) ? null : args.toAuditMap()));
-    } catch (Exception ex) {
-      metrics.incNumBucketUpdateFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.UPDATE_BUCKET,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    }
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   *
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  @Override
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    if (isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume,
-          bucket, null);
-    }
-    Map<String, String> auditMap = buildAuditMap(volume);
-    auditMap.put(OzoneConsts.BUCKET, bucket);
-    try {
-      metrics.incNumBucketDeletes();
-      bucketManager.deleteBucket(volume, bucket);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_BUCKET,
-          auditMap));
-      metrics.decNumBuckets();
-    } catch (Exception ex) {
-      metrics.incNumBucketDeleteFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_BUCKET,
-          auditMap, ex));
-      throw ex;
-    }
-  }
-
-  private Map<String, String> buildAuditMap(String volume){
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, volume);
-    return auditMap;
-  }
-
-  public AuditLogger getAuditLogger() {
-    return AUDIT;
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForSuccess(AuditAction op,
-      Map<String, String> auditMap) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.SUCCESS.toString())
-        .withException(null)
-        .build();
-  }
-
-  @Override
-  public AuditMessage buildAuditMessageForFailure(AuditAction op,
-      Map<String, String> auditMap, Throwable throwable) {
-    return new AuditMessage.Builder()
-        .setUser((Server.getRemoteUser() == null) ? null :
-            Server.getRemoteUser().getUserName())
-        .atIp((Server.getRemoteIp() == null) ? null :
-            Server.getRemoteIp().getHostAddress())
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(AuditEventStatus.FAILURE.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  private void registerMXBean() {
-    Map<String, String> jmxProperties = new HashMap<>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.omInfoBeanName = HddsUtils.registerWithJmxProperties(
-        "OzoneManager", "OzoneManagerInfo", jmxProperties, this);
-  }
-
-  private void unregisterMXBean() {
-    if (this.omInfoBeanName != null) {
-      MBeans.unregister(this.omInfoBeanName);
-      this.omInfoBeanName = null;
-    }
-  }
-
-  private static String getClientAddress() {
-    String clientMachine = Server.getRemoteAddress();
-    if (clientMachine == null) { //not a RPC client
-      clientMachine = "";
-    }
-    return clientMachine;
-  }
-
-  @Override
-  public String getRpcPort() {
-    return "" + omRpcAddress.getPort();
-  }
-
-  @VisibleForTesting
-  public OzoneManagerHttpServer getHttpServer() {
-    return httpServer;
-  }
-
-  @Override
-  public List<ServiceInfo> getServiceList() throws IOException {
-    // When we implement multi-home this call has to be handled properly.
-    List<ServiceInfo> services = new ArrayList<>();
-    ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.OM)
-        .setHostname(omRpcAddress.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-            .setType(ServicePort.Type.RPC)
-            .setValue(omRpcAddress.getPort())
-            .build());
-    if (httpServer.getHttpAddress() != null) {
-      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTP)
-          .setValue(httpServer.getHttpAddress().getPort())
-          .build());
-    }
-    if (httpServer.getHttpsAddress() != null) {
-      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTPS)
-          .setValue(httpServer.getHttpsAddress().getPort())
-          .build());
-    }
-    services.add(omServiceInfoBuilder.build());
-
-    // For client we have to return SCM with container protocol port,
-    // not block protocol.
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        configuration);
-    ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.SCM)
-        .setHostname(scmAddr.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-            .setType(ServicePort.Type.RPC)
-            .setValue(scmAddr.getPort()).build());
-    services.add(scmServiceInfoBuilder.build());
-
-    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
-        HddsProtos.QueryScope.CLUSTER, "");
-
-    for (HddsProtos.Node node : nodes) {
-      HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();
-
-      ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder()
-          .setNodeType(HddsProtos.NodeType.DATANODE)
-          .setHostname(datanode.getHostName());
-
-      if(DatanodeDetails.getFromProtoBuf(datanode)
-          .getPort(DatanodeDetails.Port.Name.REST) != null) {
-        dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-            .setType(ServicePort.Type.HTTP)
-            .setValue(DatanodeDetails.getFromProtoBuf(datanode)
-                .getPort(DatanodeDetails.Port.Name.REST).getValue())
-            .build());
-      }
-
-      services.add(dnServiceInfoBuilder.build());
-    }
-
-    metrics.incNumGetServiceLists();
-    // For now there is no exception that can can happen in this call,
-    // so failure metrics is not handled. In future if there is any need to
-    // handle exception in this method, we need to incorporate
-    // metrics.incNumGetServiceListFails()
-    return services;
-  }
-
-  @Override
-  public ServiceInfoEx getServiceInfo() throws IOException {
-    return new ServiceInfoEx(getServiceList(), caCertPem);
-  }
-
-  @Override
-  /**
-   * {@inheritDoc}
-   */
-  public void createS3Bucket(String userName, String s3BucketName)
-      throws IOException {
-
-    boolean acquiredS3Lock = false;
-    boolean acquiredVolumeLock = false;
-    try {
-      metrics.incNumBucketCreates();
-      acquiredS3Lock = metadataManager.getLock().acquireLock(S3_BUCKET_LOCK,
-          s3BucketName);
-      try {
-        acquiredVolumeLock = metadataManager.getLock().acquireLock(VOLUME_LOCK,
-            s3BucketManager.formatOzoneVolumeName(userName));
-        boolean newVolumeCreate = s3BucketManager.createOzoneVolumeIfNeeded(
-            userName);
-        if (newVolumeCreate) {
-          metrics.incNumVolumeCreates();
-          metrics.incNumVolumes();
-        }
-      } catch (IOException ex) {
-        // We need to increment volume creates also because this is first
-        // time we are trying to create a volume, it failed. As we increment
-        // ops and create when we try to do that operation.
-        metrics.incNumVolumeCreates();
-        metrics.incNumVolumeCreateFails();
-        throw ex;
-      }
-      s3BucketManager.createS3Bucket(userName, s3BucketName);
-      metrics.incNumBuckets();
-    } catch (IOException ex) {
-      metrics.incNumBucketCreateFails();
-      throw ex;
-    } finally {
-      if (acquiredVolumeLock) {
-        metadataManager.getLock().releaseLock(VOLUME_LOCK,
-            s3BucketManager.formatOzoneVolumeName(userName));
-      }
-      if (acquiredS3Lock) {
-        metadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName);
-      }
-    }
-  }
-
-  @Override
-  /**
-   * {@inheritDoc}
-   */
-  public void deleteS3Bucket(String s3BucketName) throws IOException {
-    try {
-      if(isAclEnabled) {
-        checkAcls(ResourceType.BUCKET, StoreType.S3, ACLType.DELETE, 
-            getS3VolumeName(), s3BucketName, null);
-      }
-      metrics.incNumBucketDeletes();
-      s3BucketManager.deleteS3Bucket(s3BucketName);
-      metrics.decNumBuckets();
-    } catch (IOException ex) {
-      metrics.incNumBucketDeleteFails();
-    }
-  }
-
-  @Override
-  /**
-   * {@inheritDoc}
-   */
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException{
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-
-    // Check whether user name passed is matching with the current user or not.
-    if (!user.getUserName().equals(kerberosID)) {
-      throw new OMException("User mismatch. Requested user name is " +
-          "mismatched " + kerberosID +", with current user " +
-          user.getUserName(), OMException.ResultCodes.USER_MISMATCH);
-    }
-    return s3SecretManager.getS3Secret(kerberosID);
-  }
-
-  @Override
-  /**
-   * {@inheritDoc}
-   */
-  public String getOzoneBucketMapping(String s3BucketName)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.S3, ACLType.READ,
-          getS3VolumeName(), s3BucketName, null);
-    }
-    return s3BucketManager.getOzoneBucketMapping(s3BucketName);
-  }
-
-  /**
-   * Helper function to return volume name for S3 users.
-   * */
-  private String getS3VolumeName() {
-    return s3BucketManager.formatOzoneVolumeName(DigestUtils.md5Hex(
-        ProtobufRpcEngine.Server.getRemoteUser().getUserName().toLowerCase()));
-  }
-
-  @Override
-  public List<OmBucketInfo> listS3Buckets(String userName, String startKey,
-                                          String prefix, int maxNumOfBuckets)
-      throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.VOLUME, StoreType.S3, ACLType.LIST,
-          s3BucketManager.getOzoneVolumeNameForUser(userName), null, null);
-    }
-    boolean auditSuccess = true;
-    Map<String, String> auditMap = buildAuditMap(userName);
-    auditMap.put(OzoneConsts.START_KEY, startKey);
-    auditMap.put(OzoneConsts.PREFIX, prefix);
-    auditMap.put(OzoneConsts.MAX_NUM_OF_BUCKETS,
-        String.valueOf(maxNumOfBuckets));
-    try {
-      metrics.incNumListS3Buckets();
-      String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName);
-      return bucketManager.listBuckets(volumeName, startKey, prefix,
-          maxNumOfBuckets);
-    } catch (IOException ex) {
-      metrics.incNumListS3BucketsFails();
-      auditSuccess = false;
-      AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_S3BUCKETS,
-          auditMap, ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction
-                .LIST_S3BUCKETS, auditMap));
-      }
-    }
-  }
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws
-      IOException {
-    OmMultipartInfo multipartInfo;
-    metrics.incNumInitiateMultipartUploads();
-    try {
-      multipartInfo = keyManager.initiateMultipartUpload(keyArgs);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-          OMAction.INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null :
-              keyArgs.toAuditMap()));
-    } catch (IOException ex) {
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(
-          OMAction.INITIATE_MULTIPART_UPLOAD,
-          (keyArgs == null) ? null : keyArgs.toAuditMap(), ex));
-      metrics.incNumInitiateMultipartUploadFails();
-      throw ex;
-    }
-    return multipartInfo;
-  }
-
-  @Override
-  public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
-      OmKeyArgs keyArgs, long clientID) throws IOException {
-    boolean auditSuccess = false;
-    OmMultipartCommitUploadPartInfo commitUploadPartInfo;
-    metrics.incNumCommitMultipartUploadParts();
-    try {
-      commitUploadPartInfo = keyManager.commitMultipartUploadPart(keyArgs,
-          clientID);
-      auditSuccess = true;
-    } catch (IOException ex) {
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-              .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs
-          .toAuditMap(), ex));
-      metrics.incNumCommitMultipartUploadPartFails();
-      throw ex;
-    } finally {
-      if(auditSuccess) {
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, (keyArgs == null) ? null :
-                keyArgs.toAuditMap()));
-      }
-    }
-    return commitUploadPartInfo;
-  }
-
-  @Override
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(
-      OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
-      throws IOException {
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
-    metrics.incNumCompleteMultipartUploads();
-
-    Map<String, String> auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() :
-        omKeyArgs.toAuditMap();
-    auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList
-        .getMultipartMap().toString());
-    try {
-      omMultipartUploadCompleteInfo = keyManager.completeMultipartUpload(
-          omKeyArgs, multipartUploadList);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction
-          .COMPLETE_MULTIPART_UPLOAD, auditMap));
-      return omMultipartUploadCompleteInfo;
-    } catch (IOException ex) {
-      metrics.incNumCompleteMultipartUploadFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-          .COMPLETE_MULTIPART_UPLOAD, auditMap, ex));
-      throw ex;
-    }
-  }
-
-  @Override
-  public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
-
-    Map<String, String> auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() :
-        omKeyArgs.toAuditMap();
-    metrics.incNumAbortMultipartUploads();
-    try {
-      keyManager.abortMultipartUpload(omKeyArgs);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction
-          .COMPLETE_MULTIPART_UPLOAD, auditMap));
-    } catch (IOException ex) {
-      metrics.incNumAbortMultipartUploadFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-          .COMPLETE_MULTIPART_UPLOAD, auditMap, ex));
-      throw ex;
-    }
-
-  }
-
-  @Override
-  public OmMultipartUploadListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException {
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, volumeName);
-    auditMap.put(OzoneConsts.BUCKET, bucketName);
-    auditMap.put(OzoneConsts.KEY, keyName);
-    auditMap.put(OzoneConsts.UPLOAD_ID, uploadID);
-    auditMap.put(OzoneConsts.PART_NUMBER_MARKER,
-        Integer.toString(partNumberMarker));
-    auditMap.put(OzoneConsts.MAX_PARTS, Integer.toString(maxParts));
-    metrics.incNumListMultipartUploadParts();
-    try {
-      OmMultipartUploadListParts omMultipartUploadListParts =
-          keyManager.listParts(volumeName, bucketName, keyName, uploadID,
-              partNumberMarker, maxParts);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction
-          .LIST_MULTIPART_UPLOAD_PARTS, auditMap));
-      return omMultipartUploadListParts;
-    } catch (IOException ex) {
-      metrics.incNumListMultipartUploadPartFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-          .LIST_MULTIPART_UPLOAD_PARTS, auditMap, ex));
-      throw ex;
-    }
-  }
-
-  @Override
-  public OmMultipartUploadList listMultipartUploads(String volumeName,
-      String bucketName, String prefix) throws IOException {
-
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, volumeName);
-    auditMap.put(OzoneConsts.BUCKET, bucketName);
-    auditMap.put(OzoneConsts.PREFIX, prefix);
-
-    metrics.incNumListMultipartUploads();
-    try {
-      OmMultipartUploadList omMultipartUploadList =
-          keyManager.listMultipartUploads(volumeName, bucketName, prefix);
-      AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction
-          .LIST_MULTIPART_UPLOADS, auditMap));
-      return omMultipartUploadList;
-
-    } catch (IOException ex) {
-      metrics.incNumListMultipartUploadFails();
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-          .LIST_MULTIPART_UPLOADS, auditMap, ex));
-      throw ex;
-    }
-
-
-  }
-
-  @Override
-  public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
-    if (isAclEnabled) {
-      checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumGetFileStatus();
-      return keyManager.getFileStatus(args);
-    } catch (IOException ex) {
-      metrics.incNumGetFileStatusFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(OMAction.GET_FILE_STATUS,
-              (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if (auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS,
-                (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  private ResourceType getResourceType(OmKeyArgs args) {
-    if (args.getKeyName() == null || args.getKeyName().length() == 0) {
-      return ResourceType.BUCKET;
-    }
-    return ResourceType.KEY;
-  }
-
-  @Override
-  public void createDirectory(OmKeyArgs args) throws IOException {
-    if (isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumCreateDirectory();
-      keyManager.createDirectory(args);
-    } catch (IOException ex) {
-      metrics.incNumCreateDirectoryFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY,
-              (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if (auditSuccess) {
-        AUDIT.logWriteSuccess(
-            buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY,
-                (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  @Override
-  public OpenKeySession createFile(OmKeyArgs args, boolean overWrite,
-      boolean recursive) throws IOException {
-    if (isAclEnabled) {
-      checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
-          args.getVolumeName(), args.getBucketName(), null);
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumCreateFile();
-      return keyManager.createFile(args, overWrite, recursive);
-    } catch (Exception ex) {
-      metrics.incNumCreateFileFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_FILE,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.CREATE_FILE, (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  @Override
-  public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumLookupFile();
-      return keyManager.lookupFile(args, getClientAddress());
-    } catch (Exception ex) {
-      metrics.incNumLookupFileFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LOOKUP_FILE,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.LOOKUP_FILE, (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  @Override
-  public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
-      String startKey, long numEntries) throws IOException {
-    if(isAclEnabled) {
-      checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ,
-          args.getVolumeName(), args.getBucketName(), args.getKeyName());
-    }
-    boolean auditSuccess = true;
-    try {
-      metrics.incNumListStatus();
-      return keyManager.listStatus(args, recursive, startKey, numEntries);
-    } catch (Exception ex) {
-      metrics.incNumListStatusFails();
-      auditSuccess = false;
-      AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LIST_STATUS,
-          (args == null) ? null : args.toAuditMap(), ex));
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
-            OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap()));
-      }
-    }
-  }
-
-  private void auditAcl(OzoneObj ozoneObj, List<OzoneAcl> ozoneAcl,
-      OMAction omAction, Exception ex) {
-    Map<String, String> auditMap = ozoneObj.toAuditMap();
-    if(ozoneAcl != null) {
-      auditMap.put(OzoneConsts.ACL, ozoneAcl.toString());
-    }
-
-    if(ex == null) {
-      AUDIT.logWriteSuccess(
-          buildAuditMessageForSuccess(omAction, auditMap));
-    } else {
-      AUDIT.logWriteFailure(
-          buildAuditMessageForFailure(omAction, auditMap, ex));
-    }
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    boolean auditSuccess = true;
-
-    try{
-      if(isAclEnabled) {
-        checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
-            obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
-      }
-      switch (obj.getResourceType()) {
-      case VOLUME:
-        return volumeManager.addAcl(obj, acl);
-      case BUCKET:
-        return bucketManager.addAcl(obj, acl);
-      case KEY:
-        return keyManager.addAcl(obj, acl);
-      case PREFIX:
-        return prefixManager.addAcl(obj, acl);
-      default:
-        throw new OMException("Unexpected resource type: " +
-            obj.getResourceType(), INVALID_REQUEST);
-      }
-    } catch(Exception ex) {
-      auditSuccess = false;
-      auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, ex);
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, null);
-      }
-    }
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    boolean auditSuccess = true;
-
-    try{
-      if(isAclEnabled) {
-        checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
-            obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
-      }
-      switch (obj.getResourceType()) {
-      case VOLUME:
-        return volumeManager.removeAcl(obj, acl);
-      case BUCKET:
-        return bucketManager.removeAcl(obj, acl);
-      case KEY:
-        return keyManager.removeAcl(obj, acl);
-      case PREFIX:
-        return prefixManager.removeAcl(obj, acl);
-
-      default:
-        throw new OMException("Unexpected resource type: " +
-            obj.getResourceType(), INVALID_REQUEST);
-      }
-    } catch(Exception ex) {
-      auditSuccess = false;
-      auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, ex);
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, null);
-      }
-    }
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    boolean auditSuccess = true;
-
-    try{
-      if(isAclEnabled) {
-        checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
-            obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
-      }
-      switch (obj.getResourceType()) {
-      case VOLUME:
-        return volumeManager.setAcl(obj, acls);
-      case BUCKET:
-        return bucketManager.setAcl(obj, acls);
-      case KEY:
-        return keyManager.setAcl(obj, acls);
-      case PREFIX:
-        return prefixManager.setAcl(obj, acls);
-      default:
-        throw new OMException("Unexpected resource type: " +
-            obj.getResourceType(), INVALID_REQUEST);
-      }
-    } catch(Exception ex) {
-      auditSuccess = false;
-      auditAcl(obj, acls, OMAction.SET_ACL, ex);
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        auditAcl(obj, acls, OMAction.SET_ACL, null);
-      }
-    }
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    boolean auditSuccess = true;
-
-    try{
-      if(isAclEnabled) {
-        checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.READ_ACL,
-            obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
-      }
-      switch (obj.getResourceType()) {
-      case VOLUME:
-        return volumeManager.getAcl(obj);
-      case BUCKET:
-        return bucketManager.getAcl(obj);
-      case KEY:
-        return keyManager.getAcl(obj);
-      case PREFIX:
-        return prefixManager.getAcl(obj);
-
-      default:
-        throw new OMException("Unexpected resource type: " +
-            obj.getResourceType(), INVALID_REQUEST);
-      }
-    } catch(Exception ex) {
-      auditSuccess = false;
-      auditAcl(obj, null, OMAction.GET_ACL, ex);
-      throw ex;
-    } finally {
-      if(auditSuccess){
-        auditAcl(obj, null, OMAction.GET_ACL, null);
-      }
-    }
-  }
-
-  /**
-   * Download and install latest checkpoint from leader OM.
-   * If the download checkpoints snapshot index is greater than this OM's
-   * last applied transaction index, then re-initialize the OM state via this
-   * checkpoint. Before re-initializing OM state, the OM Ratis server should
-   * be stopped so that no new transactions can be applied.
-   * @param leaderId peerNodeID of the leader OM
-   * @return If checkpoint is installed, return the corresponding termIndex.
-   * Otherwise, return null.
-   */
-  public TermIndex installSnapshot(String leaderId) {
-    if (omSnapshotProvider == null) {
-      LOG.error("OM Snapshot Provider is not configured as there are no peer " +
-          "nodes.");
-      return null;
-    }
-
-    DBCheckpoint omDBcheckpoint = getDBCheckpointFromLeader(leaderId);
-    Path newDBlocation = omDBcheckpoint.getCheckpointLocation();
-
-    // Check if current ratis log index is smaller than the downloaded
-    // snapshot index. If yes, proceed by stopping the ratis server so that
-    // the OM state can be re-initialized. If no, then do not proceed with
-    // installSnapshot.
-    long lastAppliedIndex = omRatisServer.getStateMachineLastAppliedIndex();
-    long checkpointSnapshotIndex = omDBcheckpoint.getRatisSnapshotIndex();
-    if (checkpointSnapshotIndex <= lastAppliedIndex) {
-      LOG.error("Failed to install checkpoint from OM leader: {}. The last " +
-          "applied index: {} is greater than or equal to the checkpoint's " +
-          "snapshot index: {}. Deleting the downloaded checkpoint {}", leaderId,
-          lastAppliedIndex, checkpointSnapshotIndex,
-          newDBlocation);
-      try {
-        FileUtils.deleteFully(newDBlocation);
-      } catch (IOException e) {
-        LOG.error("Failed to fully delete the downloaded DB checkpoint {} " +
-            "from OM leader {}.", newDBlocation,
-            leaderId, e);
-      }
-      return null;
-    }
-
-    // Pause the State Machine so that no new transactions can be applied.
-    // This action also clears the OM Double Buffer so that if there are any
-    // pending transactions in the buffer, they are discarded.
-    // TODO: The Ratis server should also be paused here. This is required
-    //  because a leader election might happen while the snapshot
-    //  installation is in progress and the new leader might start sending
-    //  append log entries to the ratis server.
-    omRatisServer.getOmStateMachine().pause();
-
-    File dbBackup;
-    try {
-      dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, newDBlocation);
-    } catch (Exception e) {
-      LOG.error("OM DB checkpoint replacement with new downloaded checkpoint " +
-          "failed.", e);
-      return null;
-    }
-
-    // Reload the OM DB store with the new checkpoint.
-    // Restart (unpause) the state machine and update its last applied index
-    // to the installed checkpoint's snapshot index.
-    try {
-      reloadOMState(checkpointSnapshotIndex);
-      omRatisServer.getOmStateMachine().unpause(checkpointSnapshotIndex);
-    } catch (IOException e) {
-      LOG.error("Failed to reload OM state with new DB checkpoint.", e);
-      return null;
-    }
-
-    // Delete the backup DB
-    try {
-      FileUtils.deleteFully(dbBackup);
-    } catch (IOException e) {
-      LOG.error("Failed to delete the backup of the original DB {}", dbBackup);
-    }
-
-    // TODO: We should only return the snpashotIndex to the leader.
-    //  Should be fixed after RATIS-586
-    TermIndex newTermIndex = TermIndex.newTermIndex(0,
-        checkpointSnapshotIndex);
-
-    return newTermIndex;
-  }
-
-  /**
-   * Download the latest OM DB checkpoint from the leader OM.
-   * @param leaderId OMNodeID of the leader OM node.
-   * @return latest DB checkpoint from leader OM.
-   */
-  private DBCheckpoint getDBCheckpointFromLeader(String leaderId) {
-    LOG.info("Downloading checkpoint from leader OM {} and reloading state " +
-        "from the checkpoint.", leaderId);
-
-    try {
-      return omSnapshotProvider.getOzoneManagerDBSnapshot(leaderId);
-    } catch (IOException e) {
-      LOG.error("Failed to download checkpoint from OM leader {}", leaderId, e);
-    }
-    return null;
-  }
-
-  /**
-   * Replace the current OM DB with the new DB checkpoint.
-   * @param lastAppliedIndex the last applied index in the current OM DB.
-   * @param checkpointPath path to the new DB checkpoint
-   * @return location of the backup of the original DB
-   * @throws Exception
-   */
-  File replaceOMDBWithCheckpoint(long lastAppliedIndex, Path checkpointPath)
-      throws Exception {
-    // Stop the DB first
-    DBStore store = metadataManager.getStore();
-    store.close();
-
-    // Take a backup of the current DB
-    File db = store.getDbLocation();
-    String dbBackupName = OzoneConsts.OM_DB_BACKUP_PREFIX +
-        lastAppliedIndex + "_" + System.currentTimeMillis();
-    File dbBackup = new File(db.getParentFile(), dbBackupName);
-
-    try {
-      Files.move(db.toPath(), dbBackup.toPath());
-    } catch (IOException e) {
-      LOG.error("Failed to create a backup of the current DB. Aborting " +
-          "snapshot installation.");
-      throw e;
-    }
-
-    // Move the new DB checkpoint into the om metadata dir
-    try {
-      Files.move(checkpointPath, db.toPath());
-    } catch (IOException e) {
-      LOG.error("Failed to move downloaded DB checkpoint {} to metadata " +
-          "directory {}. Resetting to original DB.", checkpointPath,
-          db.toPath());
-      Files.move(dbBackup.toPath(), db.toPath());
-      throw e;
-    }
-    return dbBackup;
-  }
-
-  /**
-   * Re-instantiate MetadataManager with new DB checkpoint.
-   * All the classes which use/ store MetadataManager should also be updated
-   * with the new MetadataManager instance.
-   */
-  void reloadOMState(long newSnapshotIndex) throws IOException {
-
-    instantiateServices();
-
-    // Restart required services
-    metadataManager.start(configuration);
-    keyManager.start(configuration);
-
-    // Set metrics and start metrics back ground thread
-    metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager
-        .getVolumeTable()));
-    metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager
-        .getBucketTable()));
-    metrics.setNumKeys(metadataManager.countEstimatedRowsInTable(metadataManager
-        .getKeyTable()));
-
-    // Delete the omMetrics file if it exists and save the a new metrics file
-    // with new data
-    Files.deleteIfExists(getMetricsStorageFile().toPath());
-    saveOmMetrics();
-
-    // Update OM snapshot index with the new snapshot index (from the new OM
-    // DB state) and save the snapshot index to disk
-    omRatisSnapshotInfo.saveRatisSnapshotToDisk(newSnapshotIndex);
-  }
-
-  public static  Logger getLogger() {
-    return LOG;
-  }
-
-  public OzoneConfiguration getConfiguration() {
-    return configuration;
-  }
-
-  public static void setTestSecureOmFlag(boolean testSecureOmFlag) {
-    OzoneManager.testSecureOmFlag = testSecureOmFlag;
-  }
-
-  public String getOMNodeId() {
-    return omNodeDetails.getOMNodeId();
-  }
-
-  public String getOMServiceId() {
-    return omNodeDetails.getOMServiceId();
-  }
-
-  @VisibleForTesting
-  public List<OMNodeDetails> getPeerNodes() {
-    return peerNodes;
-  }
-
-  @VisibleForTesting
-  public CertificateClient getCertificateClient() {
-    return certClient;
-  }
-
-  public String getComponent() {
-    return omComponent;
-  }
-
-  @Override
-  public OMFailoverProxyProvider getOMFailoverProxyProvider() {
-    return null;
-  }
-
-  /**
-   * Return maximum volumes count per user.
-   * @return maxUserVolumeCount
-   */
-  public long getMaxUserVolumeCount() {
-    return maxUserVolumeCount;
-  }
-
-  /**
-   * Checks the Leader status of OM Ratis Server.
-   * Note that this status has a small window of error. It should not be used
-   * to determine the absolute leader status.
-   * If it is the leader, the role status is cached till Ratis server
-   * notifies of leader change. If it is not leader, the role information is
-   * retrieved through by submitting a GroupInfoRequest to Ratis server.
-   *
-   * If ratis is not enabled, then it always returns true.
-   *
-   * @return Return true if this node is the leader, false otherwsie.
-   */
-  public boolean isLeader() {
-    return isRatisEnabled ? omRatisServer.isLeader() : true;
-  }
-
-  /**
-   * Return if Ratis is enabled or not.
-   * @return
-   */
-  public boolean isRatisEnabled() {
-    return isRatisEnabled;
-  }
-
-  /**
-   * Get DB updates since a specific sequence number.
-   * @param dbUpdatesRequest request that encapsulates a sequence number.
-   * @return Wrapper containing the updates.
-   * @throws SequenceNumberNotFoundException if db is unable to read the data.
-   */
-  @Override
-  public DBUpdatesWrapper getDBUpdates(
-      DBUpdatesRequest dbUpdatesRequest)
-      throws SequenceNumberNotFoundException {
-    return metadataManager.getStore()
-        .getUpdatesSince(dbUpdatesRequest.getSequenceNumber());
-
-  }
-
-  public OzoneDelegationTokenSecretManager getDelegationTokenMgr() {
-    return delegationTokenMgr;
-  }
-
-  /**
-   * Return list of OzoneAdministrators.
-   */
-  public Collection<String> getOzoneAdmins() {
-    return ozAdmins;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
deleted file mode 100644
index b98d6d3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT;
-
-/**
- * HttpServer wrapper for the OzoneManager.
- */
-public class OzoneManagerHttpServer extends BaseHttpServer {
-
-  public OzoneManagerHttpServer(Configuration conf, OzoneManager om)
-      throws IOException {
-    super(conf, "ozoneManager");
-    addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT,
-        ServiceListJSONServlet.class);
-    addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT,
-        OMDBCheckpointServlet.class);
-    getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om);
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY;
-  }
-
-  @Override protected String getEnabledKey() {
-    return OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
deleted file mode 100644
index fa229aa..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.io.IOException;
-
-/**
- * This class provides a command line interface to start the OM
- * using Picocli.
- */
-@Command(name = "ozone om",
-    hidden = true, description = "Start or initialize the Ozone Manager.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class OzoneManagerStarter extends GenericCli {
-
-  private OzoneConfiguration conf;
-  private OMStarterInterface receiver;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerStarter.class);
-
-  public static void main(String[] args) throws Exception {
-    TracingUtil.initTracing("OzoneManager");
-    new OzoneManagerStarter(
-        new OzoneManagerStarter.OMStarterHelper()).run(args);
-  }
-
-  public OzoneManagerStarter(OMStarterInterface receiverObj) {
-    super();
-    receiver = receiverObj;
-  }
-
-  @Override
-  public Void call() throws Exception {
-    /**
-     * This method is invoked only when a sub-command is not called. Therefore
-     * if someone runs "ozone om" with no parameters, this is the method
-     * which runs and starts the OM.
-     */
-    commonInit();
-    startOm();
-    return null;
-  }
-
-  /**
-   * This function is used by the command line to start the OM.
-   */
-  private void startOm() throws Exception {
-    receiver.start(conf);
-  }
-
-  /**
-   * This function implements a sub-command to allow the OM to be
-   * initialized from the command line.
-   */
-  @CommandLine.Command(name = "--init",
-      customSynopsis = "ozone om [global options] --init",
-      hidden = false,
-      description = "Initialize the Ozone Manager if not already initialized",
-      mixinStandardHelpOptions = true,
-      versionProvider = HddsVersionProvider.class)
-  public void initOm()
-      throws Exception {
-    commonInit();
-    boolean result = receiver.init(conf);
-    if (!result) {
-      throw new IOException("OM Init failed.");
-    }
-  }
-
-  /**
-   * This function should be called by each command to ensure the configuration
-   * is set and print the startup banner message.
-   */
-  private void commonInit() {
-    conf = createOzoneConfiguration();
-
-    String[] originalArgs = getCmd().getParseResult().originalArgs()
-        .toArray(new String[0]);
-    StringUtils.startupShutdownMessage(OzoneManager.class,
-        originalArgs, LOG);
-  }
-
-  /**
-   * This static class wraps the external dependencies needed for this command
-   * to execute its tasks. This allows the dependency to be injected for unit
-   * testing.
-   */
-  static class OMStarterHelper implements OMStarterInterface{
-
-    public void start(OzoneConfiguration conf) throws IOException,
-        AuthenticationException {
-      OzoneManager om = OzoneManager.createOm(conf);
-      om.start();
-      om.join();
-    }
-
-    public boolean init(OzoneConfiguration conf) throws IOException,
-        AuthenticationException {
-      return OzoneManager.omInit(conf);
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java
deleted file mode 100644
index a505b8d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-
-import java.util.List;
-
-/**
- * Handles prefix commands.
- * //TODO: support OzoneManagerFS for ozfs optimization using prefix tree.
- */
-public interface PrefixManager extends IOzoneAcl {
-
-  /**
-   * Returns the metadataManager.
-   * @return OMMetadataManager.
-   */
-  OMMetadataManager getMetadataManager();
-
-  /**
-   * Get the list of path components that match with obj's path.
-   * longest prefix.
-   * Note: the number of the entries include a root "/"
-   * so if you have a longtest prefix path /a/b/c/
-   * the returned list will be ["/", "a", "b", "c"]
-   * @param path ozone object path
-   * @return list of longest path components that matches obj's path.
-   */
-  List<OmPrefixInfo> getLongestPrefixPath(String path);
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
deleted file mode 100644
index c89b32e..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
+++ /dev/null
@@ -1,417 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.ozone.util.RadixNode;
-import org.apache.hadoop.ozone.util.RadixTree;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK;
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX;
-
-/**
- * Implementation of PrefixManager.
- */
-public class PrefixManagerImpl implements PrefixManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(PrefixManagerImpl.class);
-
-  private static final List<OzoneAcl> EMPTY_ACL_LIST = new ArrayList<>();
-  private final OMMetadataManager metadataManager;
-
-  // In-memory prefix tree to optimize ACL evaluation
-  private RadixTree<OmPrefixInfo> prefixTree;
-
-  // TODO: This isRatisEnabled check will be removed as part of HDDS-1909,
-  //  where we integrate both HA and Non-HA code.
-  private boolean isRatisEnabled;
-
-  public PrefixManagerImpl(OMMetadataManager metadataManager,
-      boolean isRatisEnabled) {
-    this.isRatisEnabled = isRatisEnabled;
-    this.metadataManager = metadataManager;
-    loadPrefixTree();
-  }
-
-  private void loadPrefixTree() {
-    prefixTree = new RadixTree<>();
-    try (TableIterator<String, ? extends
-        KeyValue<String, OmPrefixInfo>> iterator =
-             getMetadataManager().getPrefixTable().iterator()) {
-      iterator.seekToFirst();
-      while (iterator.hasNext()) {
-        KeyValue<String, OmPrefixInfo> kv = iterator.next();
-        prefixTree.insert(kv.getKey(), kv.getValue());
-      }
-    } catch (IOException ex) {
-      LOG.error("Fail to load prefix tree");
-    }
-  }
-
-
-  @Override
-  public OMMetadataManager getMetadataManager() {
-    return metadataManager;
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    validateOzoneObj(obj);
-
-    String prefixPath = obj.getPath();
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      OmPrefixInfo prefixInfo =
-          metadataManager.getPrefixTable().get(prefixPath);
-
-      OMPrefixAclOpResult omPrefixAclOpResult = addAcl(obj, acl, prefixInfo);
-
-      return omPrefixAclOpResult.isOperationsResult();
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Add acl operation failed for prefix path:{} acl:{}",
-            prefixPath, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    validateOzoneObj(obj);
-    String prefixPath = obj.getPath();
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      OmPrefixInfo prefixInfo =
-          metadataManager.getPrefixTable().get(prefixPath);
-      OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, prefixInfo);
-
-      if (!omPrefixAclOpResult.isOperationsResult()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("acl {} does not exist for prefix path {} ",
-              acl, prefixPath);
-        }
-        return false;
-      }
-
-      return omPrefixAclOpResult.isOperationsResult();
-
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Remove prefix acl operation failed for prefix path:{}" +
-            " acl:{}", prefixPath, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    validateOzoneObj(obj);
-    String prefixPath = obj.getPath();
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      OmPrefixInfo prefixInfo =
-          metadataManager.getPrefixTable().get(prefixPath);
-
-      OMPrefixAclOpResult omPrefixAclOpResult = setAcl(obj, acls, prefixInfo);
-
-      return omPrefixAclOpResult.isOperationsResult();
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Set prefix acl operation failed for prefix path:{} acls:{}",
-            prefixPath, acls, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    validateOzoneObj(obj);
-    String prefixPath = obj.getPath();
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      String longestPrefix = prefixTree.getLongestPrefix(prefixPath);
-      if (prefixPath.equals(longestPrefix)) {
-        RadixNode<OmPrefixInfo> lastNode =
-            prefixTree.getLastNodeInPrefixPath(prefixPath);
-        if (lastNode != null && lastNode.getValue() != null) {
-          return lastNode.getValue().getAcls();
-        }
-      }
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-    return EMPTY_ACL_LIST;
-  }
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @return true if user has access else false.
-   */
-  @Override
-  public boolean checkAccess(OzoneObj ozObject, RequestContext context)
-      throws OMException {
-    Objects.requireNonNull(ozObject);
-    Objects.requireNonNull(context);
-
-    String prefixPath = ozObject.getPath();
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      String longestPrefix = prefixTree.getLongestPrefix(prefixPath);
-      if (prefixPath.equals(longestPrefix)) {
-        RadixNode<OmPrefixInfo> lastNode =
-            prefixTree.getLastNodeInPrefixPath(prefixPath);
-        if (lastNode != null && lastNode.getValue() != null) {
-          boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue().
-              getAcls(), context);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("user:{} has access rights for ozObj:{} ::{} ",
-                context.getClientUgi(), ozObject, hasAccess);
-          }
-          return hasAccess;
-        } else {
-          return true;
-        }
-      } else {
-        return true;
-      }
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-  }
-
-  @Override
-  public List<OmPrefixInfo> getLongestPrefixPath(String path) {
-    String prefixPath = prefixTree.getLongestPrefix(path);
-    metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath);
-    try {
-      return getLongestPrefixPathHelper(prefixPath);
-    } finally {
-      metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath);
-    }
-  }
-
-  /**
-   * Get longest prefix path assuming caller take prefix lock.
-   * @param prefixPath
-   * @return list of prefix info.
-   */
-  private List<OmPrefixInfo> getLongestPrefixPathHelper(String prefixPath) {
-    return prefixTree.getLongestPrefixPath(prefixPath).stream()
-          .map(c -> c.getValue()).collect(Collectors.toList());
-  }
-
-  /**
-   * Helper method to validate ozone object.
-   * @param obj
-   * */
-  public void validateOzoneObj(OzoneObj obj) throws OMException {
-    Objects.requireNonNull(obj);
-
-    if (!obj.getResourceType().equals(PREFIX)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "PrefixManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    String bucket = obj.getBucketName();
-    String prefixName = obj.getPrefixName();
-
-    if (Strings.isNullOrEmpty(volume)) {
-      throw new OMException("Volume name is required.", VOLUME_NOT_FOUND);
-    }
-    if (Strings.isNullOrEmpty(bucket)) {
-      throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND);
-    }
-    if (Strings.isNullOrEmpty(prefixName)) {
-      throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND);
-    }
-    if (!prefixName.endsWith("/")) {
-      throw new OMException("Invalid prefix name: " + prefixName,
-          PREFIX_NOT_FOUND);
-    }
-  }
-
-  public OMPrefixAclOpResult addAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl,
-      OmPrefixInfo prefixInfo) throws IOException {
-
-    if (prefixInfo == null) {
-      prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj
-          .getPath()).build();
-    }
-    boolean changed = prefixInfo.addAcl(ozoneAcl);
-    if (changed) {
-      // update the in-memory prefix tree
-      prefixTree.insert(ozoneObj.getPath(), prefixInfo);
-
-      if (!isRatisEnabled) {
-        metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo);
-      }
-    }
-    return new OMPrefixAclOpResult(prefixInfo, changed);
-  }
-
-  public OMPrefixAclOpResult removeAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl,
-      OmPrefixInfo prefixInfo) throws IOException {
-    boolean removed = false;
-    if (prefixInfo != null) {
-      removed = prefixInfo.removeAcl(ozoneAcl);
-    }
-
-    // Nothing is matching to remove.
-    if (removed) {
-      // Update in-memory prefix tree.
-      if (prefixInfo.getAcls().isEmpty()) {
-        prefixTree.removePrefixPath(ozoneObj.getPath());
-        if (!isRatisEnabled) {
-          metadataManager.getPrefixTable().delete(ozoneObj.getPath());
-        }
-      } else {
-        prefixTree.insert(ozoneObj.getPath(), prefixInfo);
-        if (!isRatisEnabled) {
-          metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo);
-        }
-      }
-    }
-    return new OMPrefixAclOpResult(prefixInfo, removed);
-  }
-
-  public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List<OzoneAcl> ozoneAcls,
-      OmPrefixInfo prefixInfo) throws IOException {
-    if (prefixInfo == null) {
-      prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj
-          .getPath()).build();
-    }
-
-    boolean changed = prefixInfo.setAcls(ozoneAcls);
-    if (changed) {
-      List<OzoneAcl> aclsToBeSet = prefixInfo.getAcls();
-      // Inherit DEFAULT acls from prefix.
-      boolean prefixParentFound = false;
-      List<OmPrefixInfo> prefixList = getLongestPrefixPathHelper(
-          prefixTree.getLongestPrefix(ozoneObj.getPath()));
-
-      if (prefixList.size() > 0) {
-        // Add all acls from direct parent to key.
-        OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1);
-        if (parentPrefixInfo != null) {
-          prefixParentFound = OzoneAclUtil.inheritDefaultAcls(aclsToBeSet,
-              parentPrefixInfo.getAcls());
-        }
-      }
-
-      // If no parent prefix is found inherit DEFAULT acls from bucket.
-      if (!prefixParentFound) {
-        String bucketKey = metadataManager.getBucketKey(ozoneObj
-            .getVolumeName(), ozoneObj.getBucketName());
-        OmBucketInfo bucketInfo = metadataManager.getBucketTable().
-            get(bucketKey);
-        if (bucketInfo != null) {
-          OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls());
-        }
-      }
-
-      prefixTree.insert(ozoneObj.getPath(), prefixInfo);
-      if (!isRatisEnabled) {
-        metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo);
-      }
-    }
-    return new OMPrefixAclOpResult(prefixInfo, changed);
-  }
-
-  /**
-   * Result of the prefix acl operation.
-   */
-  public static class OMPrefixAclOpResult {
-    private OmPrefixInfo omPrefixInfo;
-    private boolean operationsResult;
-
-    public OMPrefixAclOpResult(OmPrefixInfo omPrefixInfo,
-        boolean operationsResult) {
-      this.omPrefixInfo = omPrefixInfo;
-      this.operationsResult = operationsResult;
-    }
-
-    public OmPrefixInfo getOmPrefixInfo() {
-      return omPrefixInfo;
-    }
-
-    public boolean isOperationsResult() {
-      return operationsResult;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java
deleted file mode 100644
index dfd0ac3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-
-/**
- * An interface that maps S3 buckets to Ozone
- * volume/bucket.
- */
-public interface S3BucketManager {
-  /**
-   * Creates an s3 bucket and maps it to Ozone volume/bucket.
-   * @param  userName - Name of the user who owns the bucket.
-   * @param bucketName - S3 Bucket Name.
-   * @throws  IOException in case the bucket cannot be created.
-   */
-  void createS3Bucket(String userName, String bucketName) throws IOException;
-
-  /**
-   * Deletes an s3 bucket and removes mapping of Ozone volume/bucket.
-   * @param bucketName - S3 Bucket Name.
-   * @throws  IOException in case the bucket cannot be deleted.
-   */
-  void deleteS3Bucket(String bucketName) throws IOException;
-
-  /**
-   * Returns the Ozone volume/bucket where the S3 Bucket points to.
-   * @param s3BucketName - S3 Bucket Name
-   * @return String - Ozone volume/bucket
-   * @throws IOException in case of failure to retrieve mapping.
-   */
-  String getOzoneBucketMapping(String s3BucketName) throws IOException;
-
-  /**
-   * Returns Ozone volume name for a given S3Bucket.
-   * @param s3BucketName - S3 bucket name.
-   * @return String - Ozone volume name where is s3bucket resides.
-   * @throws IOException - in case of failure to retrieve mapping.
-   */
-  String getOzoneVolumeName(String s3BucketName) throws IOException;
-
-  /**
-   * Returns Ozone bucket name for a given s3Bucket.
-   * @param s3BucketName  - S3 bucket Name.
-   * @return  Ozone bucket name for this given S3 bucket
-   * @throws IOException - in case of failure to retrieve mapping.
-   */
-  String getOzoneBucketName(String s3BucketName) throws IOException;
-
-  /**
-   * Returns volume Name for a user.
-   * @param userName
-   */
-  String getOzoneVolumeNameForUser(String userName) throws IOException;
-
-  /**
-   * Create ozone volume if required, this will be needed during creates3Bucket.
-   * @param userName
-   * @return true - if volume is successfully created. false - if volume
-   * already exists or volume creation failure.
-   * @throws IOException - incase of volume creation failure.
-   */
-  boolean createOzoneVolumeIfNeeded(String userName) throws IOException;
-
-  /**
-   * Return volume name from userName.
-   * @param userName
-   */
-  String formatOzoneVolumeName(String userName);
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java
deleted file mode 100644
index 8a581bb..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.om;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS;
-
-import org.apache.logging.log4j.util.Strings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_VOLUME_PREFIX;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK;
-
-/**
- * S3 Bucket Manager, this class maintains a mapping between S3 Bucket and Ozone
- * Volume/bucket.
- */
-public class S3BucketManagerImpl implements S3BucketManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3BucketManagerImpl.class);
-
-  private static final String S3_ADMIN_NAME = "OzoneS3Manager";
-  private final OzoneConfiguration configuration;
-  private final OMMetadataManager omMetadataManager;
-  private final VolumeManager volumeManager;
-  private final BucketManager bucketManager;
-
-  /**
-   * Construct an S3 Bucket Manager Object.
-   *
-   * @param configuration - Ozone Configuration.
-   * @param omMetadataManager - Ozone Metadata Manager.
-   */
-  public S3BucketManagerImpl(
-      OzoneConfiguration configuration,
-      OMMetadataManager omMetadataManager,
-      VolumeManager volumeManager,
-      BucketManager bucketManager) {
-    this.configuration = configuration;
-    this.omMetadataManager = omMetadataManager;
-    this.volumeManager = volumeManager;
-    this.bucketManager = bucketManager;
-  }
-
-  @Override
-  public void createS3Bucket(String userName, String bucketName)
-      throws IOException {
-    Preconditions.checkArgument(Strings.isNotBlank(bucketName), "Bucket" +
-        " name cannot be null or empty.");
-    Preconditions.checkArgument(Strings.isNotBlank(userName), "User name " +
-        "cannot be null or empty.");
-
-    Preconditions.checkArgument(bucketName.length() >=3 &&
-        bucketName.length() < 64, "Length of the S3 Bucket is not correct.");
-
-
-    // TODO: Decide if we want to enforce S3 Bucket Creation Rules in this
-    // code path?
-    // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
-
-    // Generate an Ozone volume name. For the time being, we are going to use
-    // s3userName as the Ozone volume name. Since S3 advices 100 buckets max
-    // for a user and we have no limit to the number of Ozone buckets under a
-    // volume we will stick to very simple model.
-    //
-    // s3Bucket -> ozoneVolume/OzoneBucket name
-    // s3BucketName ->s3userName/s3Bucketname
-    //
-    // You might wonder if all names map to this pattern, why we need to
-    // store the S3 bucketName in a table at all. This is to support
-    // anonymous access to bucket where the user name is absent.
-    String ozoneVolumeName = formatOzoneVolumeName(userName);
-
-    omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, bucketName);
-    try {
-      String bucket = omMetadataManager.getS3Table().get(bucketName);
-
-      if (bucket != null) {
-        LOG.debug("Bucket already exists. {}", bucketName);
-        throw new OMException(
-            "Unable to create S3 bucket. " + bucketName + " already exists.",
-            OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS);
-      }
-      String ozoneBucketName = bucketName;
-      createOzoneBucket(ozoneVolumeName, ozoneBucketName);
-      String finalName = String.format("%s/%s", ozoneVolumeName,
-          ozoneBucketName);
-
-      omMetadataManager.getS3Table().put(bucketName, finalName);
-    } finally {
-      omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, bucketName);
-    }
-
-  }
-
-  @Override
-  public void deleteS3Bucket(String bucketName) throws IOException {
-    Preconditions.checkArgument(
-        Strings.isNotBlank(bucketName), "Bucket name cannot be null or empty");
-
-    omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, bucketName);
-    try {
-      String map = omMetadataManager.getS3Table().get(bucketName);
-
-      if (map == null) {
-        throw new OMException("No such S3 bucket. " + bucketName,
-            OMException.ResultCodes.S3_BUCKET_NOT_FOUND);
-      }
-
-      bucketManager.deleteBucket(getOzoneVolumeName(bucketName), bucketName);
-      omMetadataManager.getS3Table().delete(bucketName);
-    } catch(IOException ex) {
-      throw ex;
-    } finally {
-      omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, bucketName);
-    }
-
-  }
-
-  @Override
-  public String formatOzoneVolumeName(String userName) {
-    return String.format(OM_S3_VOLUME_PREFIX + "%s", userName);
-  }
-
-  @Override
-  public boolean createOzoneVolumeIfNeeded(String userName)
-      throws IOException {
-    // We don't have to time of check. time of use problem here because
-    // this call is invoked while holding the s3Bucket lock.
-    boolean newVolumeCreate = true;
-    String ozoneVolumeName = formatOzoneVolumeName(userName);
-    try {
-      OmVolumeArgs.Builder builder =
-          OmVolumeArgs.newBuilder()
-              .setAdminName(S3_ADMIN_NAME)
-              .setOwnerName(userName)
-              .setVolume(ozoneVolumeName)
-              .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES);
-      for (OzoneAcl acl : getDefaultAcls(userName)) {
-        builder.addOzoneAcls(OzoneAcl.toProtobuf(acl));
-      }
-
-      OmVolumeArgs args = builder.build();
-
-      volumeManager.createVolume(args);
-
-    } catch (OMException exp) {
-      newVolumeCreate = false;
-      if (exp.getResult().compareTo(VOLUME_ALREADY_EXISTS) == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Volume already exists. {}", exp.getMessage());
-        }
-      } else {
-        throw exp;
-      }
-    }
-
-    return newVolumeCreate;
-  }
-
-  /**
-   * Get default acls. 
-   * */
-  private List<OzoneAcl> getDefaultAcls(String userName) {
-    UserGroupInformation ugi = ProtobufRpcEngine.Server.getRemoteUser();
-    return OzoneAcl.parseAcls("user:" + (ugi == null ? userName :
-        ugi.getUserName()) + ":a,user:" + S3_ADMIN_NAME + ":a");
-  }
-
-  private void createOzoneBucket(String volumeName, String bucketName)
-      throws IOException {
-    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
-    OmBucketInfo bucketInfo =
-        builder
-            .setVolumeName(volumeName)
-            .setBucketName(bucketName)
-            .setIsVersionEnabled(Boolean.FALSE)
-            .setStorageType(StorageType.DEFAULT)
-            .setAcls(getDefaultAcls(null))
-            .build();
-    bucketManager.createBucket(bucketInfo);
-  }
-
-  @Override
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    Preconditions.checkArgument(
-        Strings.isNotBlank(s3BucketName),
-        "Bucket name cannot be null or empty.");
-    Preconditions.checkArgument(s3BucketName.length() >=3 &&
-        s3BucketName.length() < 64,
-        "Length of the S3 Bucket is not correct.");
-    omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, s3BucketName);
-    try {
-      String mapping = omMetadataManager.getS3Table().get(s3BucketName);
-      if (mapping != null) {
-        return mapping;
-      }
-      throw new OMException("No such S3 bucket.",
-          OMException.ResultCodes.S3_BUCKET_NOT_FOUND);
-    } finally {
-      omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName);
-    }
-  }
-
-  @Override
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[0];
-  }
-
-  @Override
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    String mapping = getOzoneBucketMapping(s3BucketName);
-    return mapping.split("/")[1];
-  }
-
-  @Override
-  public String getOzoneVolumeNameForUser(String userName) throws IOException {
-    Objects.requireNonNull(userName, "UserName cannot be null");
-    return formatOzoneVolumeName(userName);
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java
deleted file mode 100644
index 6a8ef37..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-
-/**
- * Wrapper class for Scm protocol clients.
- */
-public class ScmClient {
-
-  private final ScmBlockLocationProtocol blockClient;
-  private final StorageContainerLocationProtocol containerClient;
-
-  ScmClient(ScmBlockLocationProtocol blockClient,
-            StorageContainerLocationProtocol containerClient) {
-    this.containerClient = containerClient;
-    this.blockClient = blockClient;
-  }
-
-  public ScmBlockLocationProtocol getBlockClient() {
-    return this.blockClient;
-  }
-
-  StorageContainerLocationProtocol getContainerClient() {
-    return this.containerClient;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
deleted file mode 100644
index 9aab823..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-
-/**
- * Provides REST access to Ozone Service List.
- * <p>
- * This servlet generally will be placed under the /serviceList URL of
- * OzoneManager HttpServer.
- *
- * The return format is of JSON and in the form
- * <p>
- *  <pre><code>
- *  {
- *    "services" : [
- *      {
- *        "NodeType":"OM",
- *        "Hostname" "$hostname",
- *        "ports" : {
- *          "$PortType" : "$port",
- *          ...
- *        }
- *      }
- *    ]
- *  }
- *  </code></pre>
- *  <p>
- *
- */
-public class ServiceListJSONServlet  extends HttpServlet  {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ServiceListJSONServlet.class);
-  private static final long serialVersionUID = 1L;
-
-  private transient OzoneManager om;
-
-  @Override
-  public void init() throws ServletException {
-    this.om = (OzoneManager) getServletContext()
-        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
-  }
-
-  /**
-   * Process a GET request for the specified resource.
-   *
-   * @param request
-   *          The servlet request we are processing
-   * @param response
-   *          The servlet response we are creating
-   */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response) {
-    try {
-      ObjectMapper objectMapper = new ObjectMapper();
-      objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
-      response.setContentType("application/json; charset=utf8");
-      PrintWriter writer = response.getWriter();
-      try {
-        writer.write(objectMapper.writeValueAsString(om.getServiceList()));
-      } finally {
-        if (writer != null) {
-          writer.close();
-        }
-      }
-    } catch (IOException e) {
-      LOG.error(
-          "Caught an exception while processing ServiceList request", e);
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
deleted file mode 100644
index 01c277f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneAclInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * OM volume manager interface.
- */
-public interface VolumeManager extends IOzoneAcl {
-
-  /**
-   * Create a new volume.
-   * @param args - Volume args to create a volume
-   */
-  void createVolume(OmVolumeArgs args)
-      throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner)
-      throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null,
-   * returns all volumes.
-   *
-   * @param userName
-   *   volume owner
-   * @param prefix
-   *   the volume prefix used to filter the listing result.
-   * @param startKey
-   *   the start volume name determines where to start listing from,
-   *   this key is excluded from the result.
-   * @param maxKeys
-   *   the maximum number of volumes to return.
-   * @return a list of {@link OmVolumeArgs}
-   * @throws IOException
-   */
-  List<OmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
deleted file mode 100644
index 7375eb8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
+++ /dev/null
@@ -1,705 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import com.google.common.base.Preconditions;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * OM volume management code.
- */
-public class VolumeManagerImpl implements VolumeManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(VolumeManagerImpl.class);
-
-  private final OMMetadataManager metadataManager;
-  private final int maxUserVolumeCount;
-  private final boolean aclEnabled;
-
-
-  /**
-   * Constructor.
-   * @param conf - Ozone configuration.
-   * @throws IOException
-   */
-  public VolumeManagerImpl(OMMetadataManager metadataManager,
-      OzoneConfiguration conf) {
-    this.metadataManager = metadataManager;
-    this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
-        OZONE_OM_USER_MAX_VOLUME_DEFAULT);
-    aclEnabled = conf.getBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED,
-        OzoneConfigKeys.OZONE_ACL_ENABLED_DEFAULT);
-  }
-
-  // Helpers to add and delete volume from user list
-  private UserVolumeInfo addVolumeToOwnerList(String volume, String owner)
-      throws IOException {
-    // Get the volume list
-    String dbUserKey = metadataManager.getUserKey(owner);
-    UserVolumeInfo volumeList = metadataManager.getUserTable().get(dbUserKey);
-    List<String> prevVolList = new ArrayList<>();
-    if (volumeList != null) {
-      prevVolList.addAll(volumeList.getVolumeNamesList());
-    }
-
-    // Check the volume count
-    if (prevVolList.size() >= maxUserVolumeCount) {
-      LOG.debug("Too many volumes for user:{}", owner);
-      throw new OMException("Too many volumes for user:" + owner,
-          ResultCodes.USER_TOO_MANY_VOLUMES);
-    }
-
-    // Add the new volume to the list
-    prevVolList.add(volume);
-    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
-        .addAllVolumeNames(prevVolList).build();
-
-    return newVolList;
-  }
-
-  private UserVolumeInfo delVolumeFromOwnerList(String volume, String owner)
-      throws IOException {
-    // Get the volume list
-    UserVolumeInfo volumeList = metadataManager.getUserTable().get(owner);
-    List<String> prevVolList = new ArrayList<>();
-    if (volumeList != null) {
-      prevVolList.addAll(volumeList.getVolumeNamesList());
-    } else {
-      LOG.debug("volume:{} not found for user:{}", volume, owner);
-      throw new OMException(ResultCodes.USER_NOT_FOUND);
-    }
-
-    // Remove the volume from the list
-    prevVolList.remove(volume);
-    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
-        .addAllVolumeNames(prevVolList).build();
-    return newVolList;
-  }
-
-  /**
-   * Creates a volume.
-   * @param omVolumeArgs - OmVolumeArgs.
-   */
-  @Override
-  public void createVolume(OmVolumeArgs omVolumeArgs) throws IOException {
-    Preconditions.checkNotNull(omVolumeArgs);
-
-    boolean acquiredUserLock = false;
-    metadataManager.getLock().acquireLock(VOLUME_LOCK,
-        omVolumeArgs.getVolume());
-    try {
-      acquiredUserLock = metadataManager.getLock().acquireLock(USER_LOCK,
-          omVolumeArgs.getOwnerName());
-      String dbVolumeKey = metadataManager.getVolumeKey(
-          omVolumeArgs.getVolume());
-      String dbUserKey = metadataManager.getUserKey(
-          omVolumeArgs.getOwnerName());
-      OmVolumeArgs volumeInfo =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-
-      // Check of the volume already exists
-      if (volumeInfo != null) {
-        LOG.debug("volume:{} already exists", omVolumeArgs.getVolume());
-        throw new OMException(ResultCodes.VOLUME_ALREADY_EXISTS);
-      }
-
-      UserVolumeInfo volumeList = addVolumeToOwnerList(omVolumeArgs.getVolume(),
-          omVolumeArgs.getOwnerName());
-
-      // Set creation time
-      omVolumeArgs.setCreationTime(System.currentTimeMillis());
-
-
-      createVolumeCommitToDB(omVolumeArgs, volumeList, dbVolumeKey,
-            dbUserKey);
-
-      LOG.debug("created volume:{} user:{}", omVolumeArgs.getVolume(),
-          omVolumeArgs.getOwnerName());
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Volume creation failed for user:{} volume:{}",
-            omVolumeArgs.getOwnerName(), omVolumeArgs.getVolume(), ex);
-      }
-      throw ex;
-    } finally {
-      if (acquiredUserLock) {
-        metadataManager.getLock().releaseLock(USER_LOCK,
-            omVolumeArgs.getOwnerName());
-      }
-      metadataManager.getLock().releaseLock(VOLUME_LOCK,
-          omVolumeArgs.getVolume());
-    }
-  }
-
-  private void createVolumeCommitToDB(OmVolumeArgs omVolumeArgs,
-      UserVolumeInfo volumeList, String dbVolumeKey, String dbUserKey)
-      throws IOException {
-    try (BatchOperation batch = metadataManager.getStore()
-        .initBatchOperation()) {
-      // Write the vol info
-      metadataManager.getVolumeTable().putWithBatch(batch, dbVolumeKey,
-          omVolumeArgs);
-      metadataManager.getUserTable().putWithBatch(batch, dbUserKey,
-          volumeList);
-      // Add volume to user list
-      metadataManager.getStore().commitBatchOperation(batch);
-    } catch (IOException ex) {
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner)
-      throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(owner);
-    boolean acquiredUsersLock = false;
-    String actualOwner = null;
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs = metadataManager
-          .getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-
-      actualOwner = volumeArgs.getOwnerName();
-      String originalOwner = metadataManager.getUserKey(actualOwner);
-
-      acquiredUsersLock = metadataManager.getLock().acquireMultiUserLock(owner,
-          originalOwner);
-      UserVolumeInfo oldOwnerVolumeList = delVolumeFromOwnerList(volume,
-          originalOwner);
-
-      String newOwner =  metadataManager.getUserKey(owner);
-      UserVolumeInfo newOwnerVolumeList = addVolumeToOwnerList(volume,
-          newOwner);
-
-      volumeArgs.setOwnerName(owner);
-      setOwnerCommitToDB(oldOwnerVolumeList, newOwnerVolumeList,
-          volumeArgs, owner);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume, ex);
-      }
-      throw ex;
-    } finally {
-      if (acquiredUsersLock) {
-        metadataManager.getLock().releaseMultiUserLock(owner, actualOwner);
-      }
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-  }
-
-
-  private void setOwnerCommitToDB(UserVolumeInfo oldOwnerVolumeList,
-      UserVolumeInfo newOwnerVolumeList, OmVolumeArgs newOwnerVolumeArgs,
-      String oldOwner) throws IOException {
-    try (BatchOperation batch = metadataManager.getStore()
-        .initBatchOperation()) {
-      if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) {
-        metadataManager.getUserTable().deleteWithBatch(batch, oldOwner);
-      } else {
-        metadataManager.getUserTable().putWithBatch(batch, oldOwner,
-            oldOwnerVolumeList);
-      }
-      metadataManager.getUserTable().putWithBatch(batch,
-          newOwnerVolumeArgs.getOwnerName(),
-          newOwnerVolumeList);
-
-      String dbVolumeKey =
-          metadataManager.getVolumeKey(newOwnerVolumeArgs.getVolume());
-      metadataManager.getVolumeTable().putWithBatch(batch,
-          dbVolumeKey, newOwnerVolumeArgs);
-      metadataManager.getStore().commitBatchOperation(batch);
-    }
-  }
-
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException(ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-
-      volumeArgs.setQuotaInBytes(quota);
-
-      metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
-            quota, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      return volumeArgs;
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.warn("Info volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume);
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    String owner = null;
-    boolean acquiredUserLock = false;
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      owner = getVolumeInfo(volume).getOwnerName();
-      acquiredUserLock = metadataManager.getLock().acquireLock(USER_LOCK,
-          owner);
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      if (!metadataManager.isVolumeEmpty(volume)) {
-        LOG.debug("volume:{} is not empty", volume);
-        throw new OMException(ResultCodes.VOLUME_NOT_EMPTY);
-      }
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      // delete the volume from the owner list
-      // as well as delete the volume entry
-      UserVolumeInfo newVolumeList = delVolumeFromOwnerList(volume,
-          volumeArgs.getOwnerName());
-
-
-      deleteVolumeCommitToDB(newVolumeList, volume, owner);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Delete volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      if (acquiredUserLock) {
-        metadataManager.getLock().releaseLock(USER_LOCK, owner);
-      }
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-
-    }
-  }
-
-
-  private void deleteVolumeCommitToDB(UserVolumeInfo newVolumeList,
-      String volume, String owner) throws IOException {
-    try (BatchOperation batch = metadataManager.getStore()
-        .initBatchOperation()) {
-      String dbUserKey = metadataManager.getUserKey(owner);
-      if (newVolumeList.getVolumeNamesList().size() == 0) {
-        metadataManager.getUserTable().deleteWithBatch(batch, dbUserKey);
-      } else {
-        metadataManager.getUserTable().putWithBatch(batch, dbUserKey,
-            newVolumeList);
-      }
-      metadataManager.getVolumeTable().deleteWithBatch(batch,
-          metadataManager.getVolumeKey(volume));
-      metadataManager.getStore().commitBatchOperation(batch);
-    }
-  }
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(userAcl);
-    metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
-            volume, userAcl.getName(), userAcl.getRights().toString(), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<OmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    metadataManager.getLock().acquireLock(USER_LOCK, userName);
-    try {
-      List<OmVolumeArgs> volumes = metadataManager.listVolumes(
-          userName, prefix, startKey, maxKeys);
-      UserGroupInformation userUgi = ProtobufRpcEngine.Server.
-          getRemoteUser();
-      if (userUgi == null || !aclEnabled) {
-        return volumes;
-      }
-
-      List<OmVolumeArgs> filteredVolumes = volumes.stream().
-          filter(v -> v.getAclMap().
-              hasAccess(IAccessAuthorizer.ACLType.LIST, userUgi))
-          .collect(Collectors.toList());
-      return filteredVolumes;
-    } finally {
-      metadataManager.getLock().releaseLock(USER_LOCK, userName);
-    }
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acl);
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "VolumeManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-      try {
-        volumeArgs.addAcl(acl);
-      } catch (OMException ex) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Add acl failed.", ex);
-        }
-        return false;
-      }
-      metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      //return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Add acl operation failed for volume:{} acl:{}",
-            volume, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-
-    return true;
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acl);
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "VolumeManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-      try {
-        volumeArgs.removeAcl(acl);
-      } catch (OMException ex) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Remove acl failed.", ex);
-        }
-        return false;
-      }
-      metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      //return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Remove acl operation failed for volume:{} acl:{}",
-            volume, acl, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-
-    return true;
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    Objects.requireNonNull(obj);
-    Objects.requireNonNull(acls);
-
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "VolumeManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-      volumeArgs.setAcls(acls);
-      metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      //return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Set acl operation failed for volume:{} acls:{}",
-            volume, acls, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-
-    return true;
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    Objects.requireNonNull(obj);
-
-    if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) {
-      throw new IllegalArgumentException("Unexpected argument passed to " +
-          "VolumeManager. OzoneObj type:" + obj.getResourceType());
-    }
-    String volume = obj.getVolumeName();
-    metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      return volumeArgs.getAclMap().getAcl();
-    } catch (IOException ex) {
-      if (!(ex instanceof OMException)) {
-        LOG.error("Get acl operation failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume);
-    }
-  }
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @return true if user has access else false.
-   */
-  @Override
-  public boolean checkAccess(OzoneObj ozObject, RequestContext context)
-      throws OMException {
-    Objects.requireNonNull(ozObject);
-    Objects.requireNonNull(context);
-
-    String volume = ozObject.getVolumeName();
-    metadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
-    try {
-      String dbVolumeKey = metadataManager.getVolumeKey(volume);
-      OmVolumeArgs volumeArgs =
-          metadataManager.getVolumeTable().get(dbVolumeKey);
-      if (volumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException("Volume " + volume + " is not found",
-            ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
-      boolean hasAccess = volumeArgs.getAclMap().hasAccess(
-          context.getAclRights(), context.getClientUgi());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("user:{} has access rights for volume:{} :{} ",
-            context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
-      }
-      return hasAccess;
-    } catch (IOException ex) {
-      LOG.error("Check access operation failed for volume:{}", volume, ex);
-      throw new OMException("Check access operation failed for " +
-          "volume:" + volume, ex, ResultCodes.INTERNAL_ERROR);
-    } finally {
-      metadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java
deleted file mode 100644
index 647931a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.fs;
-
-import org.apache.hadoop.ozone.om.IOzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Ozone Manager FileSystem interface.
- */
-public interface OzoneManagerFS extends IOzoneAcl {
-  OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException;
-
-  void createDirectory(OmKeyArgs args) throws IOException;
-
-  OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite,
-      boolean isRecursive) throws IOException;
-
-  /**
-   * Look up a file. Return the info of the file to client side.
-   *
-   * @param args the args of the key provided by client.
-   * @param clientAddress a hint to key manager, order the datanode in returned
-   *                      pipeline by distance between client and datanode.
-   * @return a OmKeyInfo instance client uses to talk to container.
-   * @throws IOException
-   */
-  OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) throws IOException;
-
-  List<OzoneFileStatus> listStatus(OmKeyArgs keyArgs, boolean recursive,
-      String startKey, long numEntries) throws IOException;
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java
deleted file mode 100644
index 3255185..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.fs;
-/*
- This package contains the Ozone Manager FileSystem interface classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
deleted file mode 100644
index 8d9e709..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-
-/**
- * Class which maintains peer information and it's own OM node information.
- */
-public class OMHANodeDetails {
-
-  private static String[] genericConfigKeys = new String[] {
-      OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY,
-      OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY,
-      OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY,
-      OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY,
-      OMConfigKeys.OZONE_OM_DB_DIRS,
-      OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-  };
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OMHANodeDetails.class);
-  private final OMNodeDetails localNodeDetails;
-  private final List<OMNodeDetails> peerNodeDetails;
-
-  public OMHANodeDetails(OMNodeDetails localNodeDetails,
-      List<OMNodeDetails> peerNodeDetails) {
-    this.localNodeDetails = localNodeDetails;
-    this.peerNodeDetails = peerNodeDetails;
-  }
-
-  public OMNodeDetails getLocalNodeDetails() {
-    return localNodeDetails;
-  }
-
-  public List< OMNodeDetails > getPeerNodeDetails() {
-    return peerNodeDetails;
-  }
-
-
-  /**
-   * Inspects and loads OM node configurations.
-   *
-   * If {@link OMConfigKeys#OZONE_OM_SERVICE_IDS_KEY} is configured with
-   * multiple ids and/ or if {@link OMConfigKeys#OZONE_OM_NODE_ID_KEY} is not
-   * specifically configured , this method determines the omServiceId
-   * and omNodeId by matching the node's address with the configured
-   * addresses. When a match is found, it sets the omServicId and omNodeId from
-   * the corresponding configuration key. This method also finds the OM peers
-   * nodes belonging to the same OM service.
-   *
-   * @param conf
-   */
-  public static OMHANodeDetails loadOMHAConfig(OzoneConfiguration conf) {
-    InetSocketAddress localRpcAddress = null;
-    String localOMServiceId = null;
-    String localOMNodeId = null;
-    int localRatisPort = 0;
-    Collection<String> omServiceIds = conf.getTrimmedStringCollection(
-        OZONE_OM_SERVICE_IDS_KEY);
-
-    String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY);
-    int found = 0;
-    boolean isOMAddressSet = false;
-
-    for (String serviceId : omServiceIds) {
-      Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
-
-      if (omNodeIds.size() == 0) {
-        String msg = "Configuration does not have any value set for " +
-            OZONE_OM_NODES_KEY + " for service ID " + serviceId + ". List of " +
-            "OM Node ID's should be specified for the service ID";
-        throw new OzoneIllegalArgumentException(msg);
-      }
-
-      List<OMNodeDetails> peerNodesList = new ArrayList<>();
-      boolean isPeer;
-      for (String nodeId : omNodeIds) {
-        if (knownOMNodeId != null && !knownOMNodeId.equals(nodeId)) {
-          isPeer = true;
-        } else {
-          isPeer = false;
-        }
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-            serviceId, nodeId);
-        String rpcAddrStr = OmUtils.getOmRpcAddress(conf, rpcAddrKey);
-        if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
-          String msg = "Configuration does not have any value set for " +
-              rpcAddrKey + "." + "OM Rpc Address should be set for all node " +
-              "IDs for a service ID.";
-          throw new OzoneIllegalArgumentException(msg);
-        }
-
-        // If OM address is set for any node id, we will not fallback to the
-        // default
-        isOMAddressSet = true;
-
-        String ratisPortKey = OmUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY,
-            serviceId, nodeId);
-        int ratisPort = conf.getInt(ratisPortKey, OZONE_OM_RATIS_PORT_DEFAULT);
-
-        InetSocketAddress addr = null;
-        try {
-          addr = NetUtils.createSocketAddr(rpcAddrStr);
-        } catch (Exception e) {
-          LOG.warn("Exception in creating socket address " + addr, e);
-          continue;
-        }
-        if (!addr.isUnresolved()) {
-          if (!isPeer && OmUtils.isAddressLocal(addr)) {
-            localRpcAddress = addr;
-            localOMServiceId = serviceId;
-            localOMNodeId = nodeId;
-            localRatisPort = ratisPort;
-            found++;
-          } else {
-            // This OMNode belongs to same OM service as the current OMNode.
-            // Add it to peerNodes list.
-            // This OMNode belongs to same OM service as the current OMNode.
-            // Add it to peerNodes list.
-            peerNodesList.add(getHAOMNodeDetails(conf, serviceId,
-                nodeId, addr, ratisPort));
-          }
-        }
-      }
-      if (found == 1) {
-        LOG.debug("Found one matching OM address with service ID: {} and node" +
-            " ID: {}", localOMServiceId, localOMNodeId);
-
-        LOG.info("Found matching OM address with OMServiceId: {}, " +
-                "OMNodeId: {}, RPC Address: {} and Ratis port: {}",
-            localOMServiceId, localOMNodeId,
-            NetUtils.getHostPortString(localRpcAddress), localRatisPort);
-
-
-        setOMNodeSpecificConfigs(conf, localOMServiceId, localOMNodeId);
-        return new OMHANodeDetails(getHAOMNodeDetails(conf, localOMServiceId,
-            localOMNodeId, localRpcAddress, localRatisPort), peerNodesList);
-
-      } else if (found > 1) {
-        String msg = "Configuration has multiple " + OZONE_OM_ADDRESS_KEY +
-            " addresses that match local node's address. Please configure the" +
-            " system with " + OZONE_OM_SERVICE_IDS_KEY + " and " +
-            OZONE_OM_ADDRESS_KEY;
-        throw new OzoneIllegalArgumentException(msg);
-      }
-    }
-
-    if (!isOMAddressSet) {
-      // No OM address is set. Fallback to default
-      InetSocketAddress omAddress = OmUtils.getOmAddress(conf);
-      int ratisPort = conf.getInt(OZONE_OM_RATIS_PORT_KEY,
-          OZONE_OM_RATIS_PORT_DEFAULT);
-
-      LOG.info("Configuration either no {} set. Falling back to the default " +
-          "OM address {}", OZONE_OM_ADDRESS_KEY, omAddress);
-
-      return new OMHANodeDetails(getOMNodeDetails(conf, null,
-          null, omAddress, ratisPort), new ArrayList<>());
-
-    } else {
-      String msg = "Configuration has no " + OZONE_OM_ADDRESS_KEY + " " +
-          "address that matches local node's address. Please configure the " +
-          "system with " + OZONE_OM_ADDRESS_KEY;
-      LOG.info(msg);
-      throw new OzoneIllegalArgumentException(msg);
-    }
-  }
-
-  /**
-   * Create Local OM Node Details.
-   * @param serviceId - Service ID this OM belongs to,
-   * @param nodeId - Node ID of this OM.
-   * @param rpcAddress - Rpc Address of the OM.
-   * @param ratisPort - Ratis port of the OM.
-   * @return OMNodeDetails
-   */
-  public static OMNodeDetails getOMNodeDetails(OzoneConfiguration conf,
-      String serviceId, String nodeId, InetSocketAddress rpcAddress,
-      int ratisPort) {
-
-    if (serviceId == null) {
-      // If no serviceId is set, take the default serviceID om-service
-      serviceId = OzoneConsts.OM_SERVICE_ID_DEFAULT;
-      LOG.info("OM Service ID is not set. Setting it to the default ID: {}",
-          serviceId);
-    }
-
-
-    // We need to pass null for serviceID and nodeID as this is set for
-    // non-HA cluster. This means one node OM cluster.
-    String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf,
-        null, null, rpcAddress.getHostName());
-    String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf,
-        null, null, rpcAddress.getHostName());
-
-    return new OMNodeDetails.Builder()
-        .setOMServiceId(serviceId)
-        .setOMNodeId(nodeId)
-        .setRpcAddress(rpcAddress)
-        .setRatisPort(ratisPort)
-        .setHttpAddress(httpAddr)
-        .setHttpsAddress(httpsAddr)
-        .build();
-
-  }
-
-
-  /**
-   * Create Local OM Node Details.
-   * @param serviceId - Service ID this OM belongs to,
-   * @param nodeId - Node ID of this OM.
-   * @param rpcAddress - Rpc Address of the OM.
-   * @param ratisPort - Ratis port of the OM.
-   * @return OMNodeDetails
-   */
-  public static OMNodeDetails getHAOMNodeDetails(OzoneConfiguration conf,
-      String serviceId, String nodeId, InetSocketAddress rpcAddress,
-      int ratisPort) {
-    Preconditions.checkNotNull(serviceId);
-    Preconditions.checkNotNull(nodeId);
-
-    String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf,
-        serviceId, nodeId, rpcAddress.getHostName());
-    String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf,
-        serviceId, nodeId, rpcAddress.getHostName());
-
-    return new OMNodeDetails.Builder()
-        .setOMServiceId(serviceId)
-        .setOMNodeId(nodeId)
-        .setRpcAddress(rpcAddress)
-        .setRatisPort(ratisPort)
-        .setHttpAddress(httpAddr)
-        .setHttpsAddress(httpsAddr)
-        .build();
-
-  }
-
-
-  /**
-   * Check if any of the following configuration keys have been set using OM
-   * Node ID suffixed to the key. If yes, then set the base key with the
-   * configured valued.
-   *    1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY}
-   *    2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY}
-   *    3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY}
-   *    4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY}\
-   *    5. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE}
-   *    6. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY}
-   *    7. {@link OMConfigKeys#OZONE_OM_KERBEROS_KEYTAB_FILE_KEY}
-   *    8. {@link OMConfigKeys#OZONE_OM_KERBEROS_PRINCIPAL_KEY}
-   *    9. {@link OMConfigKeys#OZONE_OM_DB_DIRS}
-   *    10. {@link OMConfigKeys#OZONE_OM_ADDRESS_KEY}
-   */
-  private static void setOMNodeSpecificConfigs(
-      OzoneConfiguration ozoneConfiguration, String omServiceId,
-      String omNodeId) {
-
-    for (String confKey : genericConfigKeys) {
-      String confValue = OmUtils.getConfSuffixedWithOMNodeId(
-          ozoneConfiguration, confKey, omServiceId, omNodeId);
-      if (confValue != null) {
-        LOG.info("Setting configuration key {} with value of key {}: {}",
-            confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue);
-        ozoneConfiguration.set(confKey, confValue);
-      }
-    }
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
deleted file mode 100644
index 7d69b93..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.net.NetUtils;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
-
-/**
- * This class stores OM node details.
- */
-public final class OMNodeDetails {
-  private String omServiceId;
-  private String omNodeId;
-  private InetSocketAddress rpcAddress;
-  private int rpcPort;
-  private int ratisPort;
-  private String httpAddress;
-  private String httpsAddress;
-
-  /**
-   * Constructs OMNodeDetails object.
-   */
-  private OMNodeDetails(String serviceId, String nodeId,
-      InetSocketAddress rpcAddr, int rpcPort, int ratisPort,
-      String httpAddress, String httpsAddress) {
-    this.omServiceId = serviceId;
-    this.omNodeId = nodeId;
-    this.rpcAddress = rpcAddr;
-    this.rpcPort = rpcPort;
-    this.ratisPort = ratisPort;
-    this.httpAddress = httpAddress;
-    this.httpsAddress = httpsAddress;
-  }
-
-  @Override
-  public String toString() {
-    return "OMNodeDetails["
-        + "omServiceId=" + omServiceId +
-        ", omNodeId=" + omNodeId +
-        ", rpcAddress=" + rpcAddress +
-        ", rpcPort=" + rpcPort +
-        ", ratisPort=" + ratisPort +
-        ", httpAddress=" + httpAddress +
-        ", httpsAddress=" + httpsAddress +
-        "]";
-  }
-
-  /**
-   * Builder class for OMNodeDetails.
-   */
-  public static class Builder {
-    private String omServiceId;
-    private String omNodeId;
-    private InetSocketAddress rpcAddress;
-    private int rpcPort;
-    private int ratisPort;
-    private String httpAddr;
-    private String httpsAddr;
-
-    public Builder setRpcAddress(InetSocketAddress rpcAddr) {
-      this.rpcAddress = rpcAddr;
-      this.rpcPort = rpcAddress.getPort();
-      return this;
-    }
-
-    public Builder setRatisPort(int port) {
-      this.ratisPort = port;
-      return this;
-    }
-
-    public Builder setOMServiceId(String serviceId) {
-      this.omServiceId = serviceId;
-      return this;
-    }
-
-    public Builder setOMNodeId(String nodeId) {
-      this.omNodeId = nodeId;
-      return this;
-    }
-
-    public Builder setHttpAddress(String httpAddress) {
-      this.httpAddr = httpAddress;
-      return this;
-    }
-
-    public Builder setHttpsAddress(String httpsAddress) {
-      this.httpsAddr = httpsAddress;
-      return this;
-    }
-
-    public OMNodeDetails build() {
-      return new OMNodeDetails(omServiceId, omNodeId, rpcAddress, rpcPort,
-          ratisPort, httpAddr, httpsAddr);
-    }
-  }
-
-  public String getOMServiceId() {
-    return omServiceId;
-  }
-
-  public String getOMNodeId() {
-    return omNodeId;
-  }
-
-  public InetSocketAddress getRpcAddress() {
-    return rpcAddress;
-  }
-
-  public InetAddress getAddress() {
-    return rpcAddress.getAddress();
-  }
-
-  public int getRatisPort() {
-    return ratisPort;
-  }
-
-  public int getRpcPort() {
-    return rpcPort;
-  }
-
-  public String getRpcAddressString() {
-    return NetUtils.getHostPortString(rpcAddress);
-  }
-
-  public String getOMDBCheckpointEnpointUrl(HttpConfig.Policy httpPolicy) {
-    if (httpPolicy.isHttpEnabled()) {
-      if (StringUtils.isNotEmpty(httpAddress)) {
-        return "http://" + httpAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT
-            + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true";
-      }
-    } else {
-      if (StringUtils.isNotEmpty(httpsAddress)) {
-        return "https://" + httpsAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT
-            + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true";
-      }
-    }
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
deleted file mode 100644
index 3c40c88..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-/**
- * This package contains classes related to OM HA.
- */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
deleted file mode 100644
index 7904d5d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-/*
- This package contains the Ozone Manager classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java
deleted file mode 100644
index 520c117..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.server.storage.FileInfo;
-import org.apache.ratis.statemachine.SnapshotInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX;
-
-/**
- * This class captures the snapshotIndex and term of the latest snapshot in
- * the OM.
- * Ratis server loads the snapshotInfo during startup and updates the
- * lastApplied index to this snapshotIndex. OM SnapshotInfo does not contain
- * any files. It is used only to store/ update the last applied index and term.
- */
-public class OMRatisSnapshotInfo implements SnapshotInfo {
-
-  static final Logger LOG = LoggerFactory.getLogger(OMRatisSnapshotInfo.class);
-
-  private volatile long term = 0;
-  private volatile long snapshotIndex = -1;
-
-  private final File ratisSnapshotFile;
-
-  public OMRatisSnapshotInfo(File ratisDir) throws IOException {
-    ratisSnapshotFile = new File(ratisDir, OM_RATIS_SNAPSHOT_INDEX);
-    loadRatisSnapshotIndex();
-  }
-
-  public void updateTerm(long newTerm) {
-    term = newTerm;
-  }
-
-  private void updateSnapshotIndex(long newSnapshotIndex) {
-    snapshotIndex = newSnapshotIndex;
-  }
-
-  private void updateTermIndex(long newTerm, long newIndex) {
-    this.term = newTerm;
-    this.snapshotIndex = newIndex;
-  }
-
-  /**
-   * Load the snapshot index and term from the snapshot file on disk,
-   * if it exists.
-   * @throws IOException
-   */
-  private void loadRatisSnapshotIndex() throws IOException {
-    if (ratisSnapshotFile.exists()) {
-      RatisSnapshotYaml ratisSnapshotYaml = readRatisSnapshotYaml();
-      updateTermIndex(ratisSnapshotYaml.term, ratisSnapshotYaml.snapshotIndex);
-    }
-  }
-
-  /**
-   * Read and parse the snapshot yaml file.
-   */
-  private RatisSnapshotYaml readRatisSnapshotYaml() throws IOException {
-    try (FileInputStream inputFileStream = new FileInputStream(
-        ratisSnapshotFile)) {
-      Yaml yaml = new Yaml();
-      try {
-        return yaml.loadAs(inputFileStream, RatisSnapshotYaml.class);
-      } catch (Exception e) {
-        throw new IOException("Unable to parse RatisSnapshot yaml file.", e);
-      }
-    }
-  }
-
-  /**
-   * Update and persist the snapshot index and term to disk.
-   * @param index new snapshot index to be persisted to disk.
-   * @throws IOException
-   */
-  public void saveRatisSnapshotToDisk(long index) throws IOException {
-    updateSnapshotIndex(index);
-    writeRatisSnapshotYaml();
-    LOG.info("Saved Ratis Snapshot on the OM with snapshotIndex {}", index);
-  }
-
-  /**
-   * Write snapshot details to disk in yaml format.
-   */
-  private void writeRatisSnapshotYaml() throws IOException {
-    DumperOptions options = new DumperOptions();
-    options.setPrettyFlow(true);
-    options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW);
-    Yaml yaml = new Yaml(options);
-
-    RatisSnapshotYaml ratisSnapshotYaml = new RatisSnapshotYaml(term,
-        snapshotIndex);
-
-    try (Writer writer = new OutputStreamWriter(
-        new FileOutputStream(ratisSnapshotFile), "UTF-8")) {
-      yaml.dump(ratisSnapshotYaml, writer);
-    }
-  }
-
-  @Override
-  public TermIndex getTermIndex() {
-    return TermIndex.newTermIndex(term, snapshotIndex);
-  }
-
-  @Override
-  public long getTerm() {
-    return term;
-  }
-
-  @Override
-  public long getIndex() {
-    return snapshotIndex;
-  }
-
-  @Override
-  public List<FileInfo> getFiles() {
-    return null;
-  }
-
-  /**
-   * Ratis Snapshot details to be written to the yaml file.
-   */
-  public static class RatisSnapshotYaml {
-    private long term;
-    private long snapshotIndex;
-
-    public RatisSnapshotYaml() {
-      // Needed for snake-yaml introspection.
-    }
-
-    RatisSnapshotYaml(long term, long snapshotIndex) {
-      this.term = term;
-      this.snapshotIndex = snapshotIndex;
-    }
-
-    public void setTerm(long term) {
-      this.term = term;
-    }
-
-    public long getTerm() {
-      return this.term;
-    }
-
-    public void setSnapshotIndex(long index) {
-      this.snapshotIndex = index;
-    }
-
-    public long getSnapshotIndex() {
-      return this.snapshotIndex;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
deleted file mode 100644
index e5cadff..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import java.io.IOException;
-import java.util.Queue;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.ratis.helpers.DoubleBufferEntry;
-import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.ratis.util.ExitUtils;
-
-/**
- * This class implements DoubleBuffer implementation of OMClientResponse's. In
- * DoubleBuffer it has 2 buffers one is currentBuffer and other is
- * readyBuffer. The current OM requests will be always added to currentBuffer.
- * Flush thread will be running in background, it check's if currentBuffer has
- * any entries, it swaps the buffer and creates a batch and commit to DB.
- * Adding OM request to doubleBuffer and swap of buffer are synchronized
- * methods.
- *
- */
-public class OzoneManagerDoubleBuffer {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerDoubleBuffer.class);
-
-  // Taken unbounded queue, if sync thread is taking too long time, we
-  // might end up taking huge memory to add entries to the buffer.
-  // TODO: We can avoid this using unbounded queue and use queue with
-  // capacity, if queue is full we can wait for sync to be completed to
-  // add entries. But in this also we might block rpc handlers, as we
-  // clear entries after sync. Or we can come up with a good approach to
-  // solve this.
-  private Queue<DoubleBufferEntry<OMClientResponse>> currentBuffer;
-  private Queue<DoubleBufferEntry<OMClientResponse>> readyBuffer;
-
-
-  // future objects which hold the future returned by add method.
-  private volatile Queue<CompletableFuture<Void>> currentFutureQueue;
-
-  // Once we have an entry in current buffer, we swap the currentFutureQueue
-  // with readyFutureQueue. After flush is completed in flushTransaction
-  // daemon thread, we complete the futures in readyFutureQueue and clear them.
-  private volatile Queue<CompletableFuture<Void>> readyFutureQueue;
-
-  private Daemon daemon;
-  private final OMMetadataManager omMetadataManager;
-  private final AtomicLong flushedTransactionCount = new AtomicLong(0);
-  private final AtomicLong flushIterations = new AtomicLong(0);
-  private final AtomicBoolean isRunning = new AtomicBoolean(false);
-  private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics;
-  private long maxFlushedTransactionsInOneIteration;
-
-  private final OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot;
-
-  private final boolean isRatisEnabled;
-
-  public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager,
-      OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot) {
-    this(omMetadataManager, ozoneManagerRatisSnapShot, true);
-  }
-
-  public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager,
-      OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot,
-      boolean isRatisEnabled) {
-    this.currentBuffer = new ConcurrentLinkedQueue<>();
-    this.readyBuffer = new ConcurrentLinkedQueue<>();
-
-    this.isRatisEnabled = isRatisEnabled;
-
-    if (!isRatisEnabled) {
-      this.currentFutureQueue = new ConcurrentLinkedQueue<>();
-      this.readyFutureQueue = new ConcurrentLinkedQueue<>();
-    } else {
-      this.currentFutureQueue = null;
-      this.readyFutureQueue = null;
-    }
-
-    this.omMetadataManager = omMetadataManager;
-    this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot;
-    this.ozoneManagerDoubleBufferMetrics =
-        OzoneManagerDoubleBufferMetrics.create();
-
-    isRunning.set(true);
-    // Daemon thread which runs in back ground and flushes transactions to DB.
-    daemon = new Daemon(this::flushTransactions);
-    daemon.setName("OMDoubleBufferFlushThread");
-    daemon.start();
-
-  }
-
-
-
-
-  /**
-   * Runs in a background thread and batches the transaction in currentBuffer
-   * and commit to DB.
-   */
-  private void flushTransactions() {
-    while (isRunning.get()) {
-      try {
-        if (canFlush()) {
-          setReadyBuffer();
-          final BatchOperation batchOperation = omMetadataManager.getStore()
-              .initBatchOperation();
-
-          readyBuffer.iterator().forEachRemaining((entry) -> {
-            try {
-              entry.getResponse().addToDBBatch(omMetadataManager,
-                  batchOperation);
-            } catch (IOException ex) {
-              // During Adding to RocksDB batch entry got an exception.
-              // We should terminate the OM.
-              terminate(ex);
-            }
-          });
-
-          omMetadataManager.getStore().commitBatchOperation(batchOperation);
-          int flushedTransactionsSize = readyBuffer.size();
-          flushedTransactionCount.addAndGet(flushedTransactionsSize);
-          flushIterations.incrementAndGet();
-
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Sync Iteration {} flushed transactions in this " +
-                    "iteration{}", flushIterations.get(),
-                flushedTransactionsSize);
-          }
-
-          long lastRatisTransactionIndex =
-              readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex)
-              .max(Long::compareTo).get();
-
-          readyBuffer.clear();
-
-          // cleanup cache.
-          cleanupCache(lastRatisTransactionIndex);
-
-          // TODO: Need to revisit this logic, once we have multiple
-          //  executors for volume/bucket request handling. As for now
-          //  transactions are serialized this should be fine.
-          // update the last updated index in OzoneManagerStateMachine.
-          ozoneManagerRatisSnapShot.updateLastAppliedIndex(
-              lastRatisTransactionIndex);
-
-          // set metrics.
-          updateMetrics(flushedTransactionsSize);
-
-          if (!isRatisEnabled) {
-            // Once all entries are flushed, we can complete their future.
-            readyFutureQueue.iterator().forEachRemaining((entry) -> {
-              entry.complete(null);
-            });
-
-            readyFutureQueue.clear();
-          }
-        }
-      } catch (InterruptedException ex) {
-        Thread.currentThread().interrupt();
-        if (isRunning.get()) {
-          final String message = "OMDoubleBuffer flush thread " +
-              Thread.currentThread().getName() + " encountered Interrupted " +
-              "exception while running";
-          ExitUtils.terminate(1, message, ex, LOG);
-        } else {
-          LOG.info("OMDoubleBuffer flush thread " +
-              Thread.currentThread().getName() + " is interrupted and will " +
-              "exit. {}", Thread.currentThread().getName());
-        }
-      } catch (IOException ex) {
-        terminate(ex);
-      } catch (Throwable t) {
-        final String s = "OMDoubleBuffer flush thread" +
-            Thread.currentThread().getName() + "encountered Throwable error";
-        ExitUtils.terminate(2, s, t, LOG);
-      }
-    }
-  }
-
-  private void cleanupCache(long lastRatisTransactionIndex) {
-    // As now only volume and bucket transactions are handled only called
-    // cleanupCache on bucketTable.
-    // TODO: After supporting all write operations we need to call
-    //  cleanupCache on the tables only when buffer has entries for that table.
-    omMetadataManager.getBucketTable().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getVolumeTable().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getUserTable().cleanupCache(lastRatisTransactionIndex);
-
-    //TODO: Optimization we can do here is for key transactions we can only
-    // cleanup cache when it is key commit transaction. In this way all
-    // intermediate transactions for a key will be read from in-memory cache.
-    omMetadataManager.getOpenKeyTable().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getKeyTable().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getDeletedTable().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getS3Table().cleanupCache(lastRatisTransactionIndex);
-    omMetadataManager.getMultipartInfoTable().cleanupCache(
-        lastRatisTransactionIndex);
-
-  }
-
-  /**
-   * Update OzoneManagerDoubleBuffer metrics values.
-   * @param flushedTransactionsSize
-   */
-  private void updateMetrics(
-      long flushedTransactionsSize) {
-    ozoneManagerDoubleBufferMetrics.incrTotalNumOfFlushOperations();
-    ozoneManagerDoubleBufferMetrics.incrTotalSizeOfFlushedTransactions(
-        flushedTransactionsSize);
-    if (maxFlushedTransactionsInOneIteration < flushedTransactionsSize) {
-      maxFlushedTransactionsInOneIteration = flushedTransactionsSize;
-      ozoneManagerDoubleBufferMetrics
-          .setMaxNumberOfTransactionsFlushedInOneIteration(
-              flushedTransactionsSize);
-    }
-  }
-
-  /**
-   * Stop OM DoubleBuffer flush thread.
-   */
-  public void stop() {
-    if (isRunning.compareAndSet(true, false)) {
-      LOG.info("Stopping OMDoubleBuffer flush thread");
-      daemon.interrupt();
-      try {
-        // Wait for daemon thread to exit
-        daemon.join();
-      } catch (InterruptedException e) {
-        LOG.error("Interrupted while waiting for daemon to exit.");
-      }
-
-      // stop metrics.
-      ozoneManagerDoubleBufferMetrics.unRegister();
-    } else {
-      LOG.info("OMDoubleBuffer flush thread is not running.");
-    }
-
-  }
-
-  private void terminate(IOException ex) {
-    String message = "During flush to DB encountered error in " +
-        "OMDoubleBuffer flush thread " + Thread.currentThread().getName();
-    ExitUtils.terminate(1, message, ex, LOG);
-  }
-
-  /**
-   * Returns the flushed transaction count to OM DB.
-   * @return flushedTransactionCount
-   */
-  public long getFlushedTransactionCount() {
-    return flushedTransactionCount.get();
-  }
-
-  /**
-   * Returns total number of flush iterations run by sync thread.
-   * @return flushIterations
-   */
-  public long getFlushIterations() {
-    return flushIterations.get();
-  }
-
-  /**
-   * Add OmResponseBufferEntry to buffer.
-   * @param response
-   * @param transactionIndex
-   */
-  public synchronized CompletableFuture<Void> add(OMClientResponse response,
-      long transactionIndex) {
-    currentBuffer.add(new DoubleBufferEntry<>(transactionIndex, response));
-    notify();
-
-    if (!isRatisEnabled) {
-      CompletableFuture<Void> future = new CompletableFuture<>();
-      currentFutureQueue.add(future);
-      return future;
-    } else {
-      // In Non-HA case we don't need future to be returned, and this return
-      // status is not used.
-      return null;
-    }
-  }
-
-  /**
-   * Check can we flush transactions or not. This method wait's until
-   * currentBuffer size is greater than zero, once currentBuffer size is
-   * greater than zero it gets notify signal, and it returns true
-   * indicating that we are ready to flush.
-   *
-   * @return boolean
-   */
-  private synchronized boolean canFlush() throws InterruptedException {
-    // When transactions are added to buffer it notifies, then we check if
-    // currentBuffer size once and return from this method.
-    while (currentBuffer.size() == 0) {
-      wait(Long.MAX_VALUE);
-    }
-    return true;
-  }
-
-  /**
-   * Prepares the readyBuffer which is used by sync thread to flush
-   * transactions to OM DB. This method swaps the currentBuffer and readyBuffer.
-   */
-  private synchronized void setReadyBuffer() {
-    Queue<DoubleBufferEntry<OMClientResponse>> temp = currentBuffer;
-    currentBuffer = readyBuffer;
-    readyBuffer = temp;
-
-    if (!isRatisEnabled) {
-      // Swap future queue.
-      Queue<CompletableFuture<Void>> tempFuture = currentFutureQueue;
-      currentFutureQueue = readyFutureQueue;
-      readyFutureQueue = tempFuture;
-    }
-  }
-
-  @VisibleForTesting
-  public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() {
-    return ozoneManagerDoubleBufferMetrics;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
deleted file mode 100644
index 6f97f56..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftException;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.StateMachineException;
-import org.apache.ratis.retry.RetryPolicies;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import com.google.protobuf.ServiceException;
-
-/**
- * OM Ratis client to interact with OM Ratis server endpoint.
- */
-public final class OzoneManagerRatisClient implements Closeable {
-  static final Logger LOG = LoggerFactory.getLogger(
-      OzoneManagerRatisClient.class);
-
-  private final RaftGroup raftGroup;
-  private final String omNodeID;
-  private final RpcType rpcType;
-  private RaftClient raftClient;
-  private final RetryPolicy retryPolicy;
-  private final Configuration conf;
-
-  private OzoneManagerRatisClient(String omNodeId, RaftGroup raftGroup,
-      RpcType rpcType, RetryPolicy retryPolicy,
-      Configuration config) {
-    this.raftGroup = raftGroup;
-    this.omNodeID = omNodeId;
-    this.rpcType = rpcType;
-    this.retryPolicy = retryPolicy;
-    this.conf = config;
-  }
-
-  public static OzoneManagerRatisClient newOzoneManagerRatisClient(
-      String omNodeId, RaftGroup raftGroup, Configuration conf) {
-    final String rpcType = conf.get(
-        OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT);
-
-    final int maxRetryCount = conf.getInt(
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT);
-    final long retryInterval = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT
-            .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    final TimeDuration sleepDuration = TimeDuration.valueOf(
-        retryInterval, TimeUnit.MILLISECONDS);
-    final RetryPolicy retryPolicy = RetryPolicies
-        .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration);
-
-    return new OzoneManagerRatisClient(omNodeId, raftGroup,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), retryPolicy, conf);
-  }
-
-  public void connect() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
-          raftGroup.getGroupId().getUuid().toString(), omNodeID);
-    }
-
-    // TODO : XceiverClient ratis should pass the config value of
-    // maxOutstandingRequests so as to set the upper bound on max no of async
-    // requests to be handled by raft client
-
-    raftClient = OMRatisHelper.newRaftClient(rpcType, omNodeID, raftGroup,
-        retryPolicy, conf);
-  }
-
-  @Override
-  public void close() {
-    if (raftClient != null) {
-      try {
-        raftClient.close();
-      } catch (IOException e) {
-        throw new IllegalStateException(e);
-      }
-    }
-  }
-
-  /**
-   * Sends a given request to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   */
-  public OMResponse sendCommand(OMRequest request) throws ServiceException {
-    try {
-      CompletableFuture<OMResponse> reply = sendCommandAsync(request);
-      return reply.get();
-    } catch (ExecutionException | InterruptedException e) {
-      if (e.getCause() instanceof StateMachineException) {
-        OMResponse.Builder omResponse = OMResponse.newBuilder();
-        omResponse.setCmdType(request.getCmdType());
-        omResponse.setSuccess(false);
-        omResponse.setMessage(e.getCause().getMessage());
-        omResponse.setStatus(parseErrorStatus(e.getCause().getMessage()));
-        return omResponse.build();
-      }
-      throw new ServiceException(e);
-    }
-  }
-
-  private OzoneManagerProtocolProtos.Status parseErrorStatus(String message) {
-    if (message.contains(STATUS_CODE)) {
-      String errorCode = message.substring(message.indexOf(STATUS_CODE) +
-          STATUS_CODE.length());
-      LOG.debug("Parsing error message for error code {}", errorCode);
-      return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim());
-    } else {
-      return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR;
-    }
-
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   */
-  private CompletableFuture<OMResponse> sendCommandAsync(OMRequest request) {
-    CompletableFuture<RaftClientReply> raftClientReply =
-        sendRequestAsync(request);
-
-    return raftClientReply.whenComplete((reply, e) -> {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("received reply {} for request: cmdType={} traceID={} " +
-                "exception: {}", reply, request.getCmdType(),
-            request.getTraceID(), e);
-      }
-    }).thenApply(reply -> {
-      try {
-        Preconditions.checkNotNull(reply);
-        if (!reply.isSuccess()) {
-          RaftException exception = reply.getException();
-          Preconditions.checkNotNull(exception, "Raft reply failure " +
-              "but no exception propagated.");
-          throw new CompletionException(exception);
-        }
-        return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
-
-      } catch (InvalidProtocolBufferException e) {
-        throw new CompletionException(e);
-      }
-    });
-  }
-
-  /**
-   * Submits {@link RaftClient#sendReadOnlyAsync(Message)} request to Ratis
-   * server if the request is readOnly. Otherwise, submits
-   * {@link RaftClient#sendAsync(Message)} request.
-   * @param request OMRequest
-   * @return RaftClient response
-   */
-  private CompletableFuture<RaftClientReply> sendRequestAsync(
-      OMRequest request) {
-    boolean isReadOnlyRequest = OmUtils.isReadOnly(request);
-    ByteString byteString = OMRatisHelper.convertRequestToByteString(request);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request);
-    }
-    return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) :
-        raftClient.sendAsync(() -> byteString);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
deleted file mode 100644
index 7cab9d2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.UUID;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.ratis.client.RaftClientConfigKeys;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.netty.NettyConfigKeys;
-import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
-import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
-import org.apache.ratis.protocol.ClientId;
-import org.apache.ratis.protocol.GroupInfoReply;
-import org.apache.ratis.protocol.GroupInfoRequest;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.NotLeaderException;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftClientRequest;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.protocol.StateMachineException;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.server.RaftServer;
-import org.apache.ratis.server.RaftServerConfigKeys;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.LifeCycle;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE;
-
-/**
- * Creates a Ratis server endpoint for OM.
- */
-public final class OzoneManagerRatisServer {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneManagerRatisServer.class);
-
-  private final int port;
-  private final InetSocketAddress omRatisAddress;
-  private final RaftServer server;
-  private final RaftGroupId raftGroupId;
-  private final RaftGroup raftGroup;
-  private final RaftPeerId raftPeerId;
-
-  private final OzoneManager ozoneManager;
-  private final OzoneManagerStateMachine omStateMachine;
-  private final ClientId clientId = ClientId.randomId();
-
-  private final ScheduledExecutorService scheduledRoleChecker;
-  private long roleCheckInitialDelayMs = 1000; // 1 second default
-  private long roleCheckIntervalMs;
-  private ReentrantReadWriteLock roleCheckLock = new ReentrantReadWriteLock();
-  private Optional<RaftPeerRole> cachedPeerRole = Optional.empty();
-  private Optional<RaftPeerId> cachedLeaderPeerId = Optional.empty();
-
-  private static final AtomicLong CALL_ID_COUNTER = new AtomicLong();
-
-  private static long nextCallId() {
-    return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE;
-  }
-
-  /**
-   * Submit request to Ratis server.
-   * @param omRequest
-   * @return OMResponse - response returned to the client.
-   * @throws ServiceException
-   */
-  public OMResponse submitRequest(OMRequest omRequest) throws ServiceException {
-    RaftClientRequest raftClientRequest =
-        createWriteRaftClientRequest(omRequest);
-    RaftClientReply raftClientReply;
-    try {
-      raftClientReply = server.submitClientRequestAsync(raftClientRequest)
-          .get();
-    } catch (Exception ex) {
-      throw new ServiceException(ex.getMessage(), ex);
-    }
-
-    return processReply(omRequest, raftClientReply);
-  }
-
-  /**
-   * Create Write RaftClient request from OMRequest.
-   * @param omRequest
-   * @return RaftClientRequest - Raft Client request which is submitted to
-   * ratis server.
-   */
-  private RaftClientRequest createWriteRaftClientRequest(OMRequest omRequest) {
-    return new RaftClientRequest(clientId, server.getId(), raftGroupId,
-        nextCallId(),
-        Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest)),
-        RaftClientRequest.writeRequestType(), null);
-  }
-
-  /**
-   * Process the raftClientReply and return OMResponse.
-   * @param omRequest
-   * @param reply
-   * @return OMResponse - response which is returned to client.
-   * @throws ServiceException
-   */
-  private OMResponse processReply(OMRequest omRequest, RaftClientReply reply)
-      throws ServiceException {
-    // NotLeader exception is thrown only when the raft server to which the
-    // request is submitted is not the leader. This can happen first time
-    // when client is submitting request to OM.
-    NotLeaderException notLeaderException = reply.getNotLeaderException();
-    if (notLeaderException != null) {
-      throw new ServiceException(notLeaderException);
-    }
-    StateMachineException stateMachineException =
-        reply.getStateMachineException();
-    if (stateMachineException != null) {
-      OMResponse.Builder omResponse = OMResponse.newBuilder();
-      omResponse.setCmdType(omRequest.getCmdType());
-      omResponse.setSuccess(false);
-      omResponse.setMessage(stateMachineException.getCause().getMessage());
-      omResponse.setStatus(parseErrorStatus(
-          stateMachineException.getCause().getMessage()));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error while executing ratis request. " +
-            "stateMachineException: ", stateMachineException);
-      }
-      return omResponse.build();
-    }
-
-    try {
-      return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
-    } catch (InvalidProtocolBufferException ex) {
-      if (ex.getMessage() != null) {
-        throw new ServiceException(ex.getMessage(), ex);
-      } else {
-        throw new ServiceException(ex);
-      }
-    }
-
-    // TODO: Still need to handle RaftRetry failure exception and
-    //  NotReplicated exception.
-  }
-
-  /**
-   * Parse errorMessage received from the exception and convert to
-   * {@link OzoneManagerProtocolProtos.Status}.
-   * @param errorMessage
-   * @return OzoneManagerProtocolProtos.Status
-   */
-  private OzoneManagerProtocolProtos.Status parseErrorStatus(
-      String errorMessage) {
-    if (errorMessage.contains(STATUS_CODE)) {
-      String errorCode = errorMessage.substring(
-          errorMessage.indexOf(STATUS_CODE) + STATUS_CODE.length());
-      LOG.debug("Parsing error message for error code " +
-          errorCode);
-      return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim());
-    } else {
-      return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR;
-    }
-
-  }
-
-
-  /**
-   * Returns an OM Ratis server.
-   * @param conf configuration
-   * @param om the OM instance starting the ratis server
-   * @param raftGroupIdStr raft group id string
-   * @param localRaftPeerId raft peer id of this Ratis server
-   * @param addr address of the ratis server
-   * @param raftPeers peer nodes in the raft ring
-   * @throws IOException
-   */
-  private OzoneManagerRatisServer(Configuration conf,
-      OzoneManager om,
-      String raftGroupIdStr, RaftPeerId localRaftPeerId,
-      InetSocketAddress addr, List<RaftPeer> raftPeers)
-      throws IOException {
-    this.ozoneManager = om;
-    this.omRatisAddress = addr;
-    this.port = addr.getPort();
-    RaftProperties serverProperties = newRaftProperties(conf);
-
-    this.raftPeerId = localRaftPeerId;
-    this.raftGroupId = RaftGroupId.valueOf(
-        getRaftGroupIdFromOmServiceId(raftGroupIdStr));
-    this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers);
-
-    StringBuilder raftPeersStr = new StringBuilder();
-    for (RaftPeer peer : raftPeers) {
-      raftPeersStr.append(", ").append(peer.getAddress());
-    }
-    LOG.info("Instantiating OM Ratis server with GroupID: {} and " +
-        "Raft Peers: {}", raftGroupIdStr, raftPeersStr.toString().substring(2));
-
-    this.omStateMachine = getStateMachine();
-
-    this.server = RaftServer.newBuilder()
-        .setServerId(this.raftPeerId)
-        .setGroup(this.raftGroup)
-        .setProperties(serverProperties)
-        .setStateMachine(omStateMachine)
-        .build();
-
-    // Run a scheduler to check and update the server role on the leader
-    // periodically
-    this.scheduledRoleChecker = Executors.newSingleThreadScheduledExecutor();
-    this.scheduledRoleChecker.scheduleWithFixedDelay(new Runnable() {
-      @Override
-      public void run() {
-        // Run this check only on the leader OM
-        if (cachedPeerRole.isPresent() &&
-            cachedPeerRole.get() == RaftPeerRole.LEADER) {
-          updateServerRole();
-        }
-      }
-    }, roleCheckInitialDelayMs, roleCheckIntervalMs, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Creates an instance of OzoneManagerRatisServer.
-   */
-  public static OzoneManagerRatisServer newOMRatisServer(
-      Configuration ozoneConf, OzoneManager omProtocol,
-      OMNodeDetails omNodeDetails, List<OMNodeDetails> peerNodes)
-      throws IOException {
-
-    // RaftGroupId is the omServiceId
-    String omServiceId = omNodeDetails.getOMServiceId();
-
-    String omNodeId = omNodeDetails.getOMNodeId();
-    RaftPeerId localRaftPeerId = RaftPeerId.getRaftPeerId(omNodeId);
-
-    InetSocketAddress ratisAddr = new InetSocketAddress(
-        omNodeDetails.getAddress(), omNodeDetails.getRatisPort());
-
-    RaftPeer localRaftPeer = new RaftPeer(localRaftPeerId, ratisAddr);
-
-    List<RaftPeer> raftPeers = new ArrayList<>();
-    // Add this Ratis server to the Ratis ring
-    raftPeers.add(localRaftPeer);
-
-    for (OMNodeDetails peerInfo : peerNodes) {
-      String peerNodeId = peerInfo.getOMNodeId();
-      InetSocketAddress peerRatisAddr = new InetSocketAddress(
-          peerInfo.getAddress(), peerInfo.getRatisPort());
-      RaftPeerId raftPeerId = RaftPeerId.valueOf(peerNodeId);
-      RaftPeer raftPeer = new RaftPeer(raftPeerId, peerRatisAddr);
-
-      // Add other OM nodes belonging to the same OM service to the Ratis ring
-      raftPeers.add(raftPeer);
-    }
-
-    return new OzoneManagerRatisServer(ozoneConf, omProtocol, omServiceId,
-        localRaftPeerId, ratisAddr, raftPeers);
-  }
-
-  public RaftGroup getRaftGroup() {
-    return this.raftGroup;
-  }
-
-  /**
-   * Initializes and returns OzoneManager StateMachine.
-   */
-  private OzoneManagerStateMachine getStateMachine() {
-    return new OzoneManagerStateMachine(this);
-  }
-
-  @VisibleForTesting
-  public OzoneManagerStateMachine getOmStateMachine() {
-    return omStateMachine;
-  }
-
-  public OzoneManager getOzoneManager() {
-    return ozoneManager;
-  }
-
-  /**
-   * Start the Ratis server.
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    LOG.info("Starting {} {} at port {}", getClass().getSimpleName(),
-        server.getId(), port);
-    server.start();
-  }
-
-  public void stop() {
-    try {
-      server.close();
-      omStateMachine.stop();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  //TODO simplify it to make it shorter
-  @SuppressWarnings("methodlength")
-  private RaftProperties newRaftProperties(Configuration conf) {
-    final RaftProperties properties = new RaftProperties();
-
-    // Set RPC type
-    final String rpcType = conf.get(
-        OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT);
-    final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
-    RaftConfigKeys.Rpc.setType(properties, rpc);
-
-    // Set the ratis port number
-    if (rpc == SupportedRpcType.GRPC) {
-      GrpcConfigKeys.Server.setPort(properties, port);
-    } else if (rpc == SupportedRpcType.NETTY) {
-      NettyConfigKeys.Server.setPort(properties, port);
-    }
-
-    // Set Ratis storage directory
-    String storageDir = OmUtils.getOMRatisDirectory(conf);
-    RaftServerConfigKeys.setStorageDirs(properties,
-        Collections.singletonList(new File(storageDir)));
-
-    // Set RAFT segment size
-    final int raftSegmentSize = (int) conf.getStorageSize(
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
-        SizeInBytes.valueOf(raftSegmentSize));
-
-    // Set RAFT segment pre-allocated size
-    final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    int logAppenderQueueNumElements = conf.getInt(
-        OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
-        OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
-    final int logAppenderQueueByteLimit = (int) conf.getStorageSize(
-        OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
-        OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties,
-        logAppenderQueueNumElements);
-    RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties,
-        SizeInBytes.valueOf(logAppenderQueueByteLimit));
-    RaftServerConfigKeys.Log.setPreallocatedSize(properties,
-        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
-    RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties,
-        false);
-    final int logPurgeGap = conf.getInt(
-        OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP,
-        OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT);
-    RaftServerConfigKeys.Log.setPurgeGap(properties, logPurgeGap);
-
-    // For grpc set the maximum message size
-    // TODO: calculate the optimal max message size
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(logAppenderQueueByteLimit));
-
-    // Set the server request timeout
-    TimeUnit serverRequestTimeoutUnit =
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit();
-    long serverRequestTimeoutDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT
-            .getDuration(), serverRequestTimeoutUnit);
-    final TimeDuration serverRequestTimeout = TimeDuration.valueOf(
-        serverRequestTimeoutDuration, serverRequestTimeoutUnit);
-    RaftServerConfigKeys.Rpc.setRequestTimeout(properties,
-        serverRequestTimeout);
-
-    // Set timeout for server retry cache entry
-    TimeUnit retryCacheTimeoutUnit = OMConfigKeys
-        .OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit();
-    long retryCacheTimeoutDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT
-            .getDuration(), retryCacheTimeoutUnit);
-    final TimeDuration retryCacheTimeout = TimeDuration.valueOf(
-        retryCacheTimeoutDuration, retryCacheTimeoutUnit);
-    RaftServerConfigKeys.RetryCache.setExpiryTime(properties,
-        retryCacheTimeout);
-
-    // Set the server min and max timeout
-    TimeUnit serverMinTimeoutUnit =
-        OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit();
-    long serverMinTimeoutDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT
-            .getDuration(), serverMinTimeoutUnit);
-    final TimeDuration serverMinTimeout = TimeDuration.valueOf(
-        serverMinTimeoutDuration, serverMinTimeoutUnit);
-    long serverMaxTimeoutDuration =
-        serverMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200;
-    final TimeDuration serverMaxTimeout = TimeDuration.valueOf(
-        serverMaxTimeoutDuration, serverMinTimeoutUnit);
-    RaftServerConfigKeys.Rpc.setTimeoutMin(properties,
-        serverMinTimeout);
-    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
-        serverMaxTimeout);
-
-    // Set the number of maximum cached segments
-    RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2);
-
-    // Set the client request timeout
-    TimeUnit clientRequestTimeoutUnit = OMConfigKeys
-        .OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT .getUnit();
-    long clientRequestTimeoutDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), clientRequestTimeoutUnit);
-    final TimeDuration clientRequestTimeout = TimeDuration.valueOf(
-        clientRequestTimeoutDuration, clientRequestTimeoutUnit);
-    RaftClientConfigKeys.Rpc.setRequestTimeout(properties,
-        clientRequestTimeout);
-
-    // TODO: set max write buffer size
-
-    // Set the ratis leader election timeout
-    TimeUnit leaderElectionMinTimeoutUnit =
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    long leaderElectionMinTimeoutduration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), leaderElectionMinTimeoutUnit);
-    final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(
-        leaderElectionMinTimeoutduration, leaderElectionMinTimeoutUnit);
-    RaftServerConfigKeys.Rpc.setTimeoutMin(properties,
-        leaderElectionMinTimeout);
-    long leaderElectionMaxTimeout = leaderElectionMinTimeout.toLong(
-        TimeUnit.MILLISECONDS) + 200;
-    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
-        TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS));
-
-    TimeUnit nodeFailureTimeoutUnit =
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    long nodeFailureTimeoutDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), nodeFailureTimeoutUnit);
-    final TimeDuration nodeFailureTimeout = TimeDuration.valueOf(
-        nodeFailureTimeoutDuration, nodeFailureTimeoutUnit);
-    RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties,
-        nodeFailureTimeout);
-    RaftServerConfigKeys.Rpc.setSlownessTimeout(properties,
-        nodeFailureTimeout);
-
-    TimeUnit roleCheckIntervalUnit =
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT
-            .getUnit();
-    long roleCheckIntervalDuration = conf.getTimeDuration(
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT
-            .getDuration(), nodeFailureTimeoutUnit);
-    this.roleCheckIntervalMs = TimeDuration.valueOf(
-        roleCheckIntervalDuration, roleCheckIntervalUnit)
-        .toLong(TimeUnit.MILLISECONDS);
-    this.roleCheckInitialDelayMs = leaderElectionMinTimeout
-        .toLong(TimeUnit.MILLISECONDS);
-
-    long snapshotAutoTriggerThreshold = conf.getLong(
-        OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT);
-    RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled(
-        properties, true);
-    RaftServerConfigKeys.Snapshot.setAutoTriggerThreshold(
-        properties, snapshotAutoTriggerThreshold);
-
-    return properties;
-  }
-
-  /**
-   * Check the cached leader status.
-   * @return true if cached role is Leader, false otherwise.
-   */
-  private boolean checkCachedPeerRoleIsLeader() {
-    this.roleCheckLock.readLock().lock();
-    try {
-      if (cachedPeerRole.isPresent() &&
-          cachedPeerRole.get() == RaftPeerRole.LEADER) {
-        return true;
-      }
-      return false;
-    } finally {
-      this.roleCheckLock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Check if the current OM node is the leader node.
-   * @return true if Leader, false otherwise.
-   */
-  public boolean isLeader() {
-    if (checkCachedPeerRoleIsLeader()) {
-      return true;
-    }
-
-    // Get the server role from ratis server and update the cached values.
-    updateServerRole();
-
-    // After updating the server role, check and return if leader or not.
-    return checkCachedPeerRoleIsLeader();
-  }
-
-  /**
-   * Get the suggested leader peer id.
-   * @return RaftPeerId of the suggested leader node.
-   */
-  public Optional<RaftPeerId> getCachedLeaderPeerId() {
-    this.roleCheckLock.readLock().lock();
-    try {
-      return cachedLeaderPeerId;
-    } finally {
-      this.roleCheckLock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Get the gorup info (peer role and leader peer id) from Ratis server and
-   * update the OM server role.
-   */
-  public void updateServerRole() {
-    try {
-      GroupInfoReply groupInfo = getGroupInfo();
-      RoleInfoProto roleInfoProto = groupInfo.getRoleInfoProto();
-      RaftPeerRole thisNodeRole = roleInfoProto.getRole();
-
-      if (thisNodeRole.equals(RaftPeerRole.LEADER)) {
-        setServerRole(thisNodeRole, raftPeerId);
-
-      } else if (thisNodeRole.equals(RaftPeerRole.FOLLOWER)) {
-        ByteString leaderNodeId = roleInfoProto.getFollowerInfo()
-            .getLeaderInfo().getId().getId();
-        // There may be a chance, here we get leaderNodeId as null. For
-        // example, in 3 node OM Ratis, if 2 OM nodes are down, there will
-        // be no leader.
-        RaftPeerId leaderPeerId = null;
-        if (leaderNodeId != null && !leaderNodeId.isEmpty()) {
-          leaderPeerId = RaftPeerId.valueOf(leaderNodeId);
-        }
-
-        setServerRole(thisNodeRole, leaderPeerId);
-
-      } else {
-        setServerRole(thisNodeRole, null);
-
-      }
-    } catch (IOException e) {
-      LOG.error("Failed to retrieve RaftPeerRole. Setting cached role to " +
-          "{} and resetting leader info.", RaftPeerRole.UNRECOGNIZED, e);
-      setServerRole(null, null);
-    }
-  }
-
-  /**
-   * Set the current server role and the leader peer id.
-   */
-  private void setServerRole(RaftPeerRole currentRole,
-      RaftPeerId leaderPeerId) {
-    this.roleCheckLock.writeLock().lock();
-    try {
-      this.cachedPeerRole = Optional.ofNullable(currentRole);
-      this.cachedLeaderPeerId = Optional.ofNullable(leaderPeerId);
-    } finally {
-      this.roleCheckLock.writeLock().unlock();
-    }
-  }
-
-  private GroupInfoReply getGroupInfo() throws IOException {
-    GroupInfoRequest groupInfoRequest = new GroupInfoRequest(clientId,
-        raftPeerId, raftGroupId, nextCallId());
-    GroupInfoReply groupInfo = server.getGroupInfo(groupInfoRequest);
-    return groupInfo;
-  }
-
-  public int getServerPort() {
-    return port;
-  }
-
-  @VisibleForTesting
-  public LifeCycle.State getServerState() {
-    return server.getLifeCycleState();
-  }
-
-  @VisibleForTesting
-  public RaftPeerId getRaftPeerId() {
-    return this.raftPeerId;
-  }
-
-  private UUID getRaftGroupIdFromOmServiceId(String omServiceId) {
-    return UUID.nameUUIDFromBytes(omServiceId.getBytes(StandardCharsets.UTF_8));
-  }
-
-  public long getStateMachineLastAppliedIndex() {
-    return omStateMachine.getLastAppliedIndex();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java
deleted file mode 100644
index 5180261..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-/**
- * Functional interface for OM RatisSnapshot.
- */
-
-public interface OzoneManagerRatisSnapshot {
-
-  /**
-   * Update lastAppliedIndex with the specified value in OzoneManager
-   * StateMachine.
-   * @param lastAppliedIndex
-   */
-  void updateLastAppliedIndex(long lastAppliedIndex);
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
deleted file mode 100644
index e302956..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
+++ /dev/null
@@ -1,377 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.ServiceException;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocolPB.OzoneManagerHARequestHandler;
-import org.apache.hadoop.ozone.protocolPB.OzoneManagerHARequestHandlerImpl;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientRequest;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.server.RaftServer;
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.server.storage.RaftStorage;
-import org.apache.ratis.statemachine.SnapshotInfo;
-import org.apache.ratis.statemachine.TransactionContext;
-import org.apache.ratis.statemachine.impl.BaseStateMachine;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.LifeCycle;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The OM StateMachine is the state machine for OM Ratis server. It is
- * responsible for applying ratis committed transactions to
- * {@link OzoneManager}.
- */
-public class OzoneManagerStateMachine extends BaseStateMachine {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerStateMachine.class);
-  private final SimpleStateMachineStorage storage =
-      new SimpleStateMachineStorage();
-  private final OzoneManagerRatisServer omRatisServer;
-  private final OzoneManager ozoneManager;
-  private OzoneManagerHARequestHandler handler;
-  private RaftGroupId raftGroupId;
-  private long lastAppliedIndex;
-  private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
-  private final OMRatisSnapshotInfo snapshotInfo;
-  private final ExecutorService executorService;
-  private final ExecutorService installSnapshotExecutor;
-
-  public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer) {
-    this.omRatisServer = ratisServer;
-    this.ozoneManager = omRatisServer.getOzoneManager();
-
-    this.snapshotInfo = ozoneManager.getSnapshotInfo();
-    updateLastAppliedIndexWithSnaphsotIndex();
-
-    this.ozoneManagerDoubleBuffer =
-        new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(),
-            this::updateLastAppliedIndex);
-
-    this.handler = new OzoneManagerHARequestHandlerImpl(ozoneManager,
-        ozoneManagerDoubleBuffer);
-
-    ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
-        .setNameFormat("OM StateMachine ApplyTransaction Thread - %d").build();
-    this.executorService = HadoopExecutors.newSingleThreadExecutor(build);
-    this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor();
-  }
-
-  /**
-   * Initializes the State Machine with the given server, group and storage.
-   */
-  @Override
-  public void initialize(RaftServer server, RaftGroupId id,
-      RaftStorage raftStorage) throws IOException {
-    lifeCycle.startAndTransition(() -> {
-      super.initialize(server, id, raftStorage);
-      this.raftGroupId = id;
-      storage.init(raftStorage);
-    });
-  }
-
-  @Override
-  public SnapshotInfo getLatestSnapshot() {
-    return snapshotInfo;
-  }
-
-  /**
-   * Called to notify state machine about indexes which are processed
-   * internally by Raft Server, this currently happens when conf entries are
-   * processed in raft Server. This keep state machine to keep a track of index
-   * updates.
-   * @param term term of the current log entry
-   * @param index index which is being updated
-   */
-  @Override
-  public void notifyIndexUpdate(long term, long index) {
-    // SnapshotInfo should be updated when the term changes.
-    // The index here refers to the log entry index and the index in
-    // SnapshotInfo represents the snapshotIndex i.e. the index of the last
-    // transaction included in the snapshot. Hence, snaphsotInfo#index is not
-    // updated here.
-    snapshotInfo.updateTerm(term);
-  }
-
-  /**
-   * Validate/pre-process the incoming update request in the state machine.
-   * @return the content to be written to the log entry. Null means the request
-   * should be rejected.
-   * @throws IOException thrown by the state machine while validating
-   */
-  @Override
-  public TransactionContext startTransaction(
-      RaftClientRequest raftClientRequest) throws IOException {
-    ByteString messageContent = raftClientRequest.getMessage().getContent();
-    OMRequest omRequest = OMRatisHelper.convertByteStringToOMRequest(
-        messageContent);
-
-    Preconditions.checkArgument(raftClientRequest.getRaftGroupId().equals(
-        raftGroupId));
-    try {
-      handler.validateRequest(omRequest);
-    } catch (IOException ioe) {
-      TransactionContext ctxt = TransactionContext.newBuilder()
-          .setClientRequest(raftClientRequest)
-          .setStateMachine(this)
-          .setServerRole(RaftProtos.RaftPeerRole.LEADER)
-          .build();
-      ctxt.setException(ioe);
-      return ctxt;
-    }
-    return handleStartTransactionRequests(raftClientRequest, omRequest);
-  }
-
-  /*
-   * Apply a committed log entry to the state machine.
-   */
-  @Override
-  public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
-    try {
-      OMRequest request = OMRatisHelper.convertByteStringToOMRequest(
-          trx.getStateMachineLogEntry().getLogData());
-      long trxLogIndex = trx.getLogEntry().getIndex();
-      // In the current approach we have one single global thread executor.
-      // with single thread. Right now this is being done for correctness, as
-      // applyTransaction will be run on multiple OM's we want to execute the
-      // transactions in the same order on all OM's, otherwise there is a
-      // chance that OM replica's can be out of sync.
-      // TODO: In this way we are making all applyTransactions in
-      // OM serial order. Revisit this in future to use multiple executors for
-      // volume/bucket.
-
-      // Reason for not immediately implementing executor per volume is, if
-      // one executor operations are slow, we cannot update the
-      // lastAppliedIndex in OzoneManager StateMachine, even if other
-      // executor has completed the transactions with id more.
-
-      // We have 300 transactions, And for each volume we have transactions
-      // of 150. Volume1 transactions 0 - 149 and Volume2 transactions 150 -
-      // 299.
-      // Example: Executor1 - Volume1 - 100 (current completed transaction)
-      // Example: Executor2 - Volume2 - 299 (current completed transaction)
-
-      // Now we have applied transactions of 0 - 100 and 149 - 299. We
-      // cannot update lastAppliedIndex to 299. We need to update it to 100,
-      // since 101 - 149 are not applied. When OM restarts it will
-      // applyTransactions from lastAppliedIndex.
-      // We can update the lastAppliedIndex to 100, and update it to 299,
-      // only after completing 101 - 149. In initial stage, we are starting
-      // with single global executor. Will revisit this when needed.
-
-      CompletableFuture<Message> future = CompletableFuture.supplyAsync(
-          () -> runCommand(request, trxLogIndex), executorService);
-      return future;
-    } catch (IOException e) {
-      return completeExceptionally(e);
-    }
-  }
-
-  /**
-   * Query the state machine. The request must be read-only.
-   */
-  @Override
-  public CompletableFuture<Message> query(Message request) {
-    try {
-      OMRequest omRequest = OMRatisHelper.convertByteStringToOMRequest(
-          request.getContent());
-      return CompletableFuture.completedFuture(queryCommand(omRequest));
-    } catch (IOException e) {
-      return completeExceptionally(e);
-    }
-  }
-
-  @Override
-  public void pause() {
-    lifeCycle.transition(LifeCycle.State.PAUSING);
-    lifeCycle.transition(LifeCycle.State.PAUSED);
-    ozoneManagerDoubleBuffer.stop();
-  }
-
-  /**
-   * Unpause the StateMachine, re-initialize the DoubleBuffer and update the
-   * lastAppliedIndex. This should be done after uploading new state to the
-   * StateMachine.
-   */
-  public void unpause(long newLastAppliedSnaphsotIndex) {
-    lifeCycle.startAndTransition(() -> {
-      this.ozoneManagerDoubleBuffer =
-          new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(),
-              this::updateLastAppliedIndex);
-      this.updateLastAppliedIndex(newLastAppliedSnaphsotIndex);
-    });
-  }
-
-  /**
-   * Take OM Ratis snapshot. Write the snapshot index to file. Snapshot index
-   * is the log index corresponding to the last applied transaction on the OM
-   * State Machine.
-   *
-   * @return the last applied index on the state machine which has been
-   * stored in the snapshot file.
-   */
-  @Override
-  public long takeSnapshot() throws IOException {
-    LOG.info("Saving Ratis snapshot on the OM.");
-    if (ozoneManager != null) {
-      return ozoneManager.saveRatisSnapshot();
-    }
-    return 0;
-  }
-
-  /**
-   * Leader OM has purged entries from its log. To catch up, OM must download
-   * the latest checkpoint from the leader OM and install it.
-   * @param roleInfoProto the leader node information
-   * @param firstTermIndexInLog TermIndex of the first append entry available
-   *                           in the Leader's log.
-   * @return the last term index included in the installed snapshot.
-   */
-  @Override
-  public CompletableFuture<TermIndex> notifyInstallSnapshotFromLeader(
-      RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) {
-
-    String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId())
-        .toString();
-
-    LOG.info("Received install snapshot notificaiton form OM leader: {} with " +
-            "term index: {}", leaderNodeId, firstTermIndexInLog);
-
-    if (!roleInfoProto.getRole().equals(RaftProtos.RaftPeerRole.LEADER)) {
-      // A non-leader Ratis server should not send this notification.
-      LOG.error("Received Install Snapshot notification from non-leader OM " +
-          "node: {}. Ignoring the notification.", leaderNodeId);
-      return completeExceptionally(new OMException("Received notification to " +
-          "install snaphost from non-leader OM node",
-          OMException.ResultCodes.RATIS_ERROR));
-    }
-
-    CompletableFuture<TermIndex> future = CompletableFuture.supplyAsync(
-        () -> ozoneManager.installSnapshot(leaderNodeId),
-        installSnapshotExecutor);
-    return future;
-  }
-
-  /**
-   * Notifies the state machine that the raft peer is no longer leader.
-   */
-  @Override
-  public void notifyNotLeader(Collection<TransactionContext> pendingEntries)
-      throws IOException {
-    omRatisServer.updateServerRole();
-  }
-
-  /**
-   * Handle the RaftClientRequest and return TransactionContext object.
-   * @param raftClientRequest
-   * @param omRequest
-   * @return TransactionContext
-   */
-  private TransactionContext handleStartTransactionRequests(
-      RaftClientRequest raftClientRequest, OMRequest omRequest) {
-
-    return TransactionContext.newBuilder()
-        .setClientRequest(raftClientRequest)
-        .setStateMachine(this)
-        .setServerRole(RaftProtos.RaftPeerRole.LEADER)
-        .setLogData(raftClientRequest.getMessage().getContent())
-        .build();
-  }
-
-  /**
-   * Submits write request to OM and returns the response Message.
-   * @param request OMRequest
-   * @return response from OM
-   * @throws ServiceException
-   */
-  private Message runCommand(OMRequest request, long trxLogIndex) {
-    OMResponse response = handler.handleApplyTransaction(request, trxLogIndex);
-    lastAppliedIndex = trxLogIndex;
-    return OMRatisHelper.convertResponseToMessage(response);
-  }
-
-  @SuppressWarnings("HiddenField")
-  public void updateLastAppliedIndex(long lastAppliedIndex) {
-    this.lastAppliedIndex = lastAppliedIndex;
-  }
-
-  public void updateLastAppliedIndexWithSnaphsotIndex() {
-    this.lastAppliedIndex = snapshotInfo.getIndex();
-  }
-
-  /**
-   * Submits read request to OM and returns the response Message.
-   * @param request OMRequest
-   * @return response from OM
-   * @throws ServiceException
-   */
-  private Message queryCommand(OMRequest request) {
-    OMResponse response = handler.handle(request);
-    return OMRatisHelper.convertResponseToMessage(response);
-  }
-
-  public long getLastAppliedIndex() {
-    return lastAppliedIndex;
-  }
-
-  private static <T> CompletableFuture<T> completeExceptionally(Exception e) {
-    final CompletableFuture<T> future = new CompletableFuture<>();
-    future.completeExceptionally(e);
-    return future;
-  }
-
-  @VisibleForTesting
-  public void setHandler(OzoneManagerHARequestHandler handler) {
-    this.handler = handler;
-  }
-
-  @VisibleForTesting
-  public void setRaftGroupId(RaftGroupId raftGroupId) {
-    this.raftGroupId = raftGroupId;
-  }
-
-  public void stop() {
-    ozoneManagerDoubleBuffer.stop();
-    HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS);
-    HadoopExecutors.shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java
deleted file mode 100644
index cd4c5ae..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis.helpers;
-
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-
-/**
- * Entry in OzoneManagerDouble Buffer.
- * @param <Response>
- */
-public class DoubleBufferEntry<Response extends OMClientResponse> {
-
-  private long trxLogIndex;
-  private Response response;
-
-  public DoubleBufferEntry(long trxLogIndex, Response response) {
-    this.trxLogIndex = trxLogIndex;
-    this.response = response;
-  }
-
-  public long getTrxLogIndex() {
-    return trxLogIndex;
-  }
-
-  public Response getResponse() {
-    return response;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java
deleted file mode 100644
index b12a324..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-/**
- * package which contains helper classes for each OM request response.
- */
-package org.apache.hadoop.ozone.om.ratis.helpers;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java
deleted file mode 100644
index e2d7f72..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis.metrics;
-
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * Class which maintains metrics related to OzoneManager DoubleBuffer.
- */
-public class OzoneManagerDoubleBufferMetrics {
-
-  private static final String SOURCE_NAME =
-      OzoneManagerDoubleBufferMetrics.class.getSimpleName();
-
-  @Metric(about = "Total Number of flush operations happened in " +
-      "OzoneManagerDoubleBuffer.")
-  private MutableCounterLong totalNumOfFlushOperations;
-
-  @Metric(about = "Total Number of flushed transactions happened in " +
-      "OzoneManagerDoubleBuffer.")
-  private MutableCounterLong totalNumOfFlushedTransactions;
-
-  @Metric(about = "Max Number of transactions flushed in a iteration in " +
-      "OzoneManagerDoubleBuffer. This will provide a value which is maximum " +
-      "number of transactions flushed in a single flush iteration till now.")
-  private MutableCounterLong maxNumberOfTransactionsFlushedInOneIteration;
-
-
-  public static OzoneManagerDoubleBufferMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
-        "OzoneManager DoubleBuffer Metrics",
-        new OzoneManagerDoubleBufferMetrics());
-  }
-
-  public void incrTotalNumOfFlushOperations() {
-    this.totalNumOfFlushOperations.incr();
-  }
-
-  public void incrTotalSizeOfFlushedTransactions(
-      long flushedTransactions) {
-    this.totalNumOfFlushedTransactions.incr(flushedTransactions);
-  }
-
-  public void setMaxNumberOfTransactionsFlushedInOneIteration(
-      long maxTransactions) {
-    // We should set the value with maxTransactions, so decrement old value
-    // first and then add the new value.
-    this.maxNumberOfTransactionsFlushedInOneIteration.incr(
-        Math.negateExact(getMaxNumberOfTransactionsFlushedInOneIteration())
-            + maxTransactions);
-  }
-
-  public long getTotalNumOfFlushOperations() {
-    return totalNumOfFlushOperations.value();
-  }
-
-  public long getTotalNumOfFlushedTransactions() {
-    return totalNumOfFlushedTransactions.value();
-  }
-
-  public long getMaxNumberOfTransactionsFlushedInOneIteration() {
-    return maxNumberOfTransactionsFlushedInOneIteration.value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java
deleted file mode 100644
index e41c645..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * package which contains metrics classes.
- */
-package org.apache.hadoop.ozone.om.ratis.metrics;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
deleted file mode 100644
index ea25f13..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.ratis;
-
-/**
- * This package contains classes for the OM Ratis server implementation.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java
deleted file mode 100644
index d893f52..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis.utils;
-
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-
-import java.util.concurrent.CompletableFuture;
-
-/**
- * Helper interface for OzoneManagerDoubleBuffer.
- *
- */
-public interface OzoneManagerDoubleBufferHelper {
-
-  CompletableFuture<Void> add(OMClientResponse response,
-      long transactionIndex);
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
deleted file mode 100644
index 4f01960..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest;
-import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
-import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
-import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
-import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
-import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
-import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketDeleteRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
-import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.security.OMRenewDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetQuotaRequest;
-import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeAddAclRequest;
-import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeRemoveAclRequest;
-import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeSetAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-
-import java.io.IOException;
-
-/**
- * Utility class used by OzoneManager HA.
- */
-public final class OzoneManagerRatisUtils {
-
-  private OzoneManagerRatisUtils() {
-  }
-  /**
-   * Create OMClientRequest which enacpsulates the OMRequest.
-   * @param omRequest
-   * @return OMClientRequest
-   * @throws IOException
-   */
-  public static OMClientRequest createClientRequest(OMRequest omRequest) {
-    Type cmdType = omRequest.getCmdType();
-    switch (cmdType) {
-    case CreateVolume:
-      return new OMVolumeCreateRequest(omRequest);
-    case SetVolumeProperty:
-      boolean hasQuota = omRequest.getSetVolumePropertyRequest()
-          .hasQuotaInBytes();
-      boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName();
-      Preconditions.checkState(hasOwner || hasQuota, "Either Quota or owner " +
-          "should be set in the SetVolumeProperty request");
-      Preconditions.checkState(!(hasOwner && hasQuota), "Either Quota or " +
-          "owner should be set in the SetVolumeProperty request. Should not " +
-          "set both");
-      if (hasQuota) {
-        return new OMVolumeSetQuotaRequest(omRequest);
-      } else {
-        return new OMVolumeSetOwnerRequest(omRequest);
-      }
-    case DeleteVolume:
-      return new OMVolumeDeleteRequest(omRequest);
-    case CreateBucket:
-      return new OMBucketCreateRequest(omRequest);
-    case DeleteBucket:
-      return new OMBucketDeleteRequest(omRequest);
-    case SetBucketProperty:
-      return new OMBucketSetPropertyRequest(omRequest);
-    case AllocateBlock:
-      return new OMAllocateBlockRequest(omRequest);
-    case CreateKey:
-      return new OMKeyCreateRequest(omRequest);
-    case CommitKey:
-      return new OMKeyCommitRequest(omRequest);
-    case DeleteKey:
-      return new OMKeyDeleteRequest(omRequest);
-    case RenameKey:
-      return new OMKeyRenameRequest(omRequest);
-    case CreateDirectory:
-      return new OMDirectoryCreateRequest(omRequest);
-    case CreateFile:
-      return new OMFileCreateRequest(omRequest);
-    case PurgeKeys:
-      return new OMKeyPurgeRequest(omRequest);
-    case CreateS3Bucket:
-      return new S3BucketCreateRequest(omRequest);
-    case DeleteS3Bucket:
-      return new S3BucketDeleteRequest(omRequest);
-    case InitiateMultiPartUpload:
-      return new S3InitiateMultipartUploadRequest(omRequest);
-    case CommitMultiPartUpload:
-      return new S3MultipartUploadCommitPartRequest(omRequest);
-    case AbortMultiPartUpload:
-      return new S3MultipartUploadAbortRequest(omRequest);
-    case CompleteMultiPartUpload:
-      return new S3MultipartUploadCompleteRequest(omRequest);
-    case AddAcl:
-    case RemoveAcl:
-    case SetAcl:
-      return getOMAclRequest(omRequest);
-    case GetDelegationToken:
-      return new OMGetDelegationTokenRequest(omRequest);
-    case CancelDelegationToken:
-      return new OMCancelDelegationTokenRequest(omRequest);
-    case RenewDelegationToken:
-      return new OMRenewDelegationTokenRequest(omRequest);
-    case GetS3Secret:
-      return new S3GetSecretRequest(omRequest);
-    default:
-      return null;
-    }
-  }
-
-  private static OMClientRequest getOMAclRequest(OMRequest omRequest) {
-    Type cmdType = omRequest.getCmdType();
-    if (Type.AddAcl == cmdType) {
-      ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
-      if (ObjectType.VOLUME == type) {
-        return new OMVolumeAddAclRequest(omRequest);
-      } else if (ObjectType.BUCKET == type) {
-        return new OMBucketAddAclRequest(omRequest);
-      } else if (ObjectType.KEY == type) {
-        return new OMKeyAddAclRequest(omRequest);
-      } else {
-        return new OMPrefixAddAclRequest(omRequest);
-      }
-    } else if (Type.RemoveAcl == cmdType) {
-      ObjectType type = omRequest.getRemoveAclRequest().getObj().getResType();
-      if (ObjectType.VOLUME == type) {
-        return new OMVolumeRemoveAclRequest(omRequest);
-      } else if (ObjectType.BUCKET == type) {
-        return new OMBucketRemoveAclRequest(omRequest);
-      } else if (ObjectType.KEY == type) {
-        return new OMKeyRemoveAclRequest(omRequest);
-      } else {
-        return new OMPrefixRemoveAclRequest(omRequest);
-      }
-    } else {
-      ObjectType type = omRequest.getSetAclRequest().getObj().getResType();
-      if (ObjectType.VOLUME == type) {
-        return new OMVolumeSetAclRequest(omRequest);
-      } else if (ObjectType.BUCKET == type) {
-        return new OMBucketSetAclRequest(omRequest);
-      } else if (ObjectType.KEY == type) {
-        return new OMKeySetAclRequest(omRequest);
-      } else {
-        return new OMPrefixSetAclRequest(omRequest);
-      }
-    }
-  }
-
-  /**
-   * Convert exception result to {@link OzoneManagerProtocolProtos.Status}.
-   * @param exception
-   * @return OzoneManagerProtocolProtos.Status
-   */
-  public static Status exceptionToResponseStatus(IOException exception) {
-    if (exception instanceof OMException) {
-      return Status.values()[((OMException) exception).getResult().ordinal()];
-    } else {
-      return Status.INTERNAL_ERROR;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java
deleted file mode 100644
index 94fd0c8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.ratis.utils;
-
-/**
- * Utility class used by OzoneManager HA.
- */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
deleted file mode 100644
index 306527f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditEventStatus;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import javax.annotation.Nonnull;
-
-/**
- * OMClientRequest provides methods which every write OM request should
- * implement.
- */
-public abstract class OMClientRequest implements RequestAuditor {
-
-  private OMRequest omRequest;
-
-  public OMClientRequest(OMRequest omRequest) {
-    Preconditions.checkNotNull(omRequest);
-    this.omRequest = omRequest;
-  }
-  /**
-   * Perform pre-execute steps on a OMRequest.
-   *
-   * Called from the RPC context, and generates a OMRequest object which has
-   * all the information that will be either persisted
-   * in RocksDB or returned to the caller once this operation
-   * is executed.
-   *
-   * @return OMRequest that will be serialized and handed off to Ratis for
-   *         consensus.
-   */
-  public OMRequest preExecute(OzoneManager ozoneManager)
-      throws IOException {
-    omRequest = getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
-    return omRequest;
-  }
-
-  /**
-   * Validate the OMRequest and update the cache.
-   * This step should verify that the request can be executed, perform
-   * any authorization steps and update the in-memory cache.
-
-   * This step does not persist the changes to the database.
-   *
-   * @return the response that will be returned to the client.
-   */
-  public abstract OMClientResponse validateAndUpdateCache(
-      OzoneManager ozoneManager, long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper);
-
-  @VisibleForTesting
-  public OMRequest getOmRequest() {
-    return omRequest;
-  }
-
-  /**
-   * Get User information which needs to be set in the OMRequest object.
-   * @return User Info.
-   */
-  public OzoneManagerProtocolProtos.UserInfo getUserInfo() {
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-    InetAddress remoteAddress = ProtobufRpcEngine.Server.getRemoteIp();
-    OzoneManagerProtocolProtos.UserInfo.Builder userInfo =
-        OzoneManagerProtocolProtos.UserInfo.newBuilder();
-
-    // Added not null checks, as in UT's these values might be null.
-    if (user != null) {
-      userInfo.setUserName(user.getUserName());
-    }
-
-    if (remoteAddress != null) {
-      userInfo.setRemoteAddress(remoteAddress.getHostAddress()).build();
-    }
-
-    return userInfo.build();
-  }
-
-  /**
-   * Check Acls of ozone object.
-   * @param ozoneManager
-   * @param resType
-   * @param storeType
-   * @param aclType
-   * @param vol
-   * @param bucket
-   * @param key
-   * @throws IOException
-   */
-  public void checkAcls(OzoneManager ozoneManager,
-      OzoneObj.ResourceType resType,
-      OzoneObj.StoreType storeType, IAccessAuthorizer.ACLType aclType,
-      String vol, String bucket, String key) throws IOException {
-    ozoneManager.checkAcls(resType, storeType, aclType, vol, bucket, key,
-        createUGI(), getRemoteAddress());
-  }
-
-  /**
-   * Return UGI object created from OMRequest userInfo. If userInfo is not
-   * set, returns null.
-   * @return UserGroupInformation.
-   */
-  @VisibleForTesting
-  public UserGroupInformation createUGI() {
-    if (omRequest.hasUserInfo() &&
-        !StringUtils.isBlank(omRequest.getUserInfo().getUserName())) {
-      return UserGroupInformation.createRemoteUser(
-          omRequest.getUserInfo().getUserName());
-    } else {
-      // This will never happen, as for every OM request preExecute, we
-      // should add userInfo.
-      return null;
-    }
-  }
-
-  /**
-   * Return InetAddress created from OMRequest userInfo. If userInfo is not
-   * set, returns null.
-   * @return InetAddress
-   * @throws IOException
-   */
-  @VisibleForTesting
-  public InetAddress getRemoteAddress() throws IOException {
-    if (omRequest.hasUserInfo()) {
-      return InetAddress.getByName(omRequest.getUserInfo()
-          .getRemoteAddress());
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Set parameters needed for return error response to client.
-   * @param omResponse
-   * @param ex - IOException
-   * @return error response need to be returned to client - OMResponse.
-   */
-  protected OMResponse createErrorOMResponse(
-      @Nonnull OMResponse.Builder omResponse, @Nonnull IOException ex) {
-
-    omResponse.setSuccess(false);
-    if (ex.getMessage() != null) {
-      omResponse.setMessage(ex.getMessage());
-    }
-    omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
-    return omResponse.build();
-  }
-
-  /**
-   * Log the auditMessage.
-   * @param auditLogger
-   * @param auditMessage
-   */
-  protected void auditLog(AuditLogger auditLogger, AuditMessage auditMessage) {
-    auditLogger.logWrite(auditMessage);
-  }
-
-  @Override
-  public AuditMessage buildAuditMessage(AuditAction op,
-      Map< String, String > auditMap, Throwable throwable,
-      OzoneManagerProtocolProtos.UserInfo userInfo) {
-    return new AuditMessage.Builder()
-        .setUser(userInfo != null ? userInfo.getUserName() : null)
-        .atIp(userInfo != null ? userInfo.getRemoteAddress() : null)
-        .forOperation(op.getAction())
-        .withParams(auditMap)
-        .withResult(throwable != null ? AuditEventStatus.FAILURE.toString() :
-            AuditEventStatus.SUCCESS.toString())
-        .withException(throwable)
-        .build();
-  }
-
-  @Override
-  public Map<String, String> buildVolumeAuditMap(String volume) {
-    Map<String, String> auditMap = new LinkedHashMap<>();
-    auditMap.put(OzoneConsts.VOLUME, volume);
-    return auditMap;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java
deleted file mode 100644
index 9aa8fc4..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request;
-
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.AuditAction;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserInfo;
-
-/**
- * Interface for OM Requests to convert to audit objects.
- */
-public interface RequestAuditor {
-
-  /**
-   * Build AuditMessage.
-   * @param op
-   * @param auditMap
-   * @param throwable
-   * @param userInfo
-   * @return
-   */
-  AuditMessage buildAuditMessage(AuditAction op,
-      Map<String, String> auditMap, Throwable throwable, UserInfo userInfo);
-
-  /**
-   * Build auditMap with specified volume.
-   * @param volume
-   * @return auditMap.
-   */
-  Map<String, String> buildVolumeAuditMap(String volume);
-
-  /**
-   * Build auditMap for KeyArgs.
-   * @param keyArgs
-   */
-  default Map<String, String> buildKeyArgsAuditMap(KeyArgs keyArgs) {
-
-    if (keyArgs == null) {
-      return new HashMap<>(0);
-    } else {
-      Map< String, String > auditMap = new LinkedHashMap<>();
-      auditMap.put(OzoneConsts.VOLUME, keyArgs.getVolumeName());
-      auditMap.put(OzoneConsts.BUCKET, keyArgs.getBucketName());
-      auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName());
-      auditMap.put(OzoneConsts.DATA_SIZE,
-          String.valueOf(keyArgs.getDataSize()));
-      auditMap.put(OzoneConsts.REPLICATION_TYPE,
-          (keyArgs.getType() != null) ? keyArgs.getType().name() : null);
-      auditMap.put(OzoneConsts.REPLICATION_FACTOR,
-          (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null);
-      auditMap.put(OzoneConsts.KEY_LOCATION_INFO,
-          (keyArgs.getKeyLocationsList() != null) ?
-              keyArgs.getKeyLocationsList().toString() : null);
-      return auditMap;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
deleted file mode 100644
index 2b2448d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ /dev/null
@@ -1,280 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketEncryptionInfoProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CryptoProtocolVersionProto.ENCRYPTION_ZONES;
-
-/**
- * Handles CreateBucket Request.
- */
-public class OMBucketCreateRequest extends OMClientRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketCreateRequest.class);
-
-  public OMBucketCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-
-    // Get original request.
-    CreateBucketRequest createBucketRequest =
-        getOmRequest().getCreateBucketRequest();
-    BucketInfo bucketInfo = createBucketRequest.getBucketInfo();
-
-    // Get KMS provider.
-    KeyProviderCryptoExtension kmsProvider =
-        ozoneManager.getKmsProvider();
-
-    // Create new Bucket request with new bucket info.
-    CreateBucketRequest.Builder newCreateBucketRequest =
-        createBucketRequest.toBuilder();
-
-    BucketInfo.Builder newBucketInfo = bucketInfo.toBuilder();
-
-    // Set creation time.
-    newBucketInfo.setCreationTime(Time.now());
-
-    if (bucketInfo.hasBeinfo()) {
-      newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo));
-    }
-
-    newCreateBucketRequest.setBucketInfo(newBucketInfo.build());
-
-    return getOmRequest().toBuilder().setUserInfo(getUserInfo())
-       .setCreateBucketRequest(newCreateBucketRequest.build()).build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumBucketCreates();
-
-    OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
-
-    BucketInfo bucketInfo = getBucketInfoFromRequest();
-
-    String volumeName = bucketInfo.getVolumeName();
-    String bucketName = bucketInfo.getBucketName();
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
-        OzoneManagerProtocolProtos.Status.OK);
-    OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-
-    String volumeKey = metadataManager.getVolumeKey(volumeName);
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    IOException exception = null;
-    boolean acquiredBucketLock = false;
-    boolean acquiredVolumeLock = false;
-    OMClientResponse omClientResponse = null;
-
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE,
-            volumeName, bucketName, null);
-      }
-
-      acquiredVolumeLock =
-          metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
-      acquiredBucketLock = metadataManager.getLock().acquireWriteLock(
-          BUCKET_LOCK, volumeName, bucketName);
-
-      OmVolumeArgs omVolumeArgs =
-          metadataManager.getVolumeTable().get(volumeKey);
-      //Check if the volume exists
-      if (omVolumeArgs == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new OMException("Volume doesn't exist",
-            OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      //Check if bucket already exists
-      if (metadataManager.getBucketTable().get(bucketKey) != null) {
-        LOG.debug("bucket: {} already exists ", bucketName);
-        throw new OMException("Bucket already exist",
-            OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
-      }
-
-      // Add default acls from volume.
-      addDefaultAcls(omBucketInfo, omVolumeArgs);
-
-      // Update table cache.
-      metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
-          new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
-
-      omResponse.setCreateBucketResponse(
-          CreateBucketResponse.newBuilder().build());
-      omClientResponse = new OMBucketCreateResponse(omBucketInfo,
-          omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMBucketCreateResponse(omBucketInfo,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredBucketLock) {
-        metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-      if (acquiredVolumeLock) {
-        metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET,
-        omBucketInfo.toAuditMap(), exception, userInfo));
-
-    // return response.
-    if (exception == null) {
-      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
-      omMetrics.incNumBuckets();
-      return omClientResponse;
-    } else {
-      omMetrics.incNumBucketCreateFails();
-      LOG.error("Bucket creation failed for bucket:{} in volume:{}",
-          bucketName, volumeName, exception);
-      return omClientResponse;
-    }
-  }
-
-
-  /**
-   * Add default acls for bucket. These acls are inherited from volume
-   * default acl list.
-   * @param omBucketInfo
-   * @param omVolumeArgs
-   */
-  private void addDefaultAcls(OmBucketInfo omBucketInfo,
-      OmVolumeArgs omVolumeArgs) {
-    // Add default acls from volume.
-    List<OzoneAcl> acls = new ArrayList<>();
-    if (omBucketInfo.getAcls() != null) {
-      acls.addAll(omBucketInfo.getAcls());
-    }
-
-    List<OzoneAcl> defaultVolumeAclList = omVolumeArgs.getAclMap()
-        .getDefaultAclList().stream().map(OzoneAcl::fromProtobuf)
-        .collect(Collectors.toList());
-
-    OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAclList);
-    omBucketInfo.setAcls(acls);
-  }
-
-
-  private BucketInfo getBucketInfoFromRequest() {
-    CreateBucketRequest createBucketRequest =
-        getOmRequest().getCreateBucketRequest();
-    return createBucketRequest.getBucketInfo();
-  }
-
-  private BucketEncryptionInfoProto getBeinfo(
-      KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo)
-      throws IOException {
-    BucketEncryptionInfoProto bek = bucketInfo.getBeinfo();
-    BucketEncryptionInfoProto.Builder bekb = null;
-    if (kmsProvider == null) {
-      throw new OMException("Invalid KMS provider, check configuration " +
-          CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
-          OMException.ResultCodes.INVALID_KMS_PROVIDER);
-    }
-    if (bek.getKeyName() == null) {
-      throw new OMException("Bucket encryption key needed.", OMException
-          .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
-    }
-    // Talk to KMS to retrieve the bucket encryption key info.
-    KeyProvider.Metadata metadata = kmsProvider.getMetadata(
-        bek.getKeyName());
-    if (metadata == null) {
-      throw new OMException("Bucket encryption key " + bek.getKeyName()
-          + " doesn't exist.",
-          OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
-    }
-    // If the provider supports pool for EDEKs, this will fill in the pool
-    kmsProvider.warmUpEncryptedKeys(bek.getKeyName());
-    bekb = BucketEncryptionInfoProto.newBuilder()
-        .setKeyName(bek.getKeyName())
-        .setCryptoProtocolVersion(ENCRYPTION_ZONES)
-        .setSuite(OMPBHelper.convert(
-            CipherSuite.convert(metadata.getCipher())));
-    return bekb.build();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
deleted file mode 100644
index 9469f887..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-/**
- * Handles DeleteBucket Request.
- */
-public class OMBucketDeleteRequest extends OMClientRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketDeleteRequest.class);
-
-  public OMBucketDeleteRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumBucketDeletes();
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    OMRequest omRequest = getOmRequest();
-    String volumeName = omRequest.getDeleteBucketRequest().getVolumeName();
-    String bucketName = omRequest.getDeleteBucketRequest().getBucketName();
-
-    // Generate end user response
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setCmdType(omRequest.getCmdType());
-
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    Map<String, String> auditMap = buildVolumeAuditMap(volumeName);
-    auditMap.put(OzoneConsts.BUCKET, bucketName);
-
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-    IOException exception = null;
-
-    boolean acquiredBucketLock = false;
-    boolean acquiredVolumeLock = false;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
-            volumeName, bucketName, null);
-      }
-
-
-      // acquire lock
-      acquiredVolumeLock =
-          omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
-      acquiredBucketLock =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      // No need to check volume exists here, as bucket cannot be created
-      // with out volume creation.
-      //Check if bucket exists
-      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo omBucketInfo =
-          omMetadataManager.getBucketTable().get(bucketKey);
-      if (omBucketInfo == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new OMException("Bucket doesn't exist",
-            OMException.ResultCodes.BUCKET_NOT_FOUND);
-      }
-      //Check if bucket is empty
-      if (!omMetadataManager.isBucketEmpty(volumeName, bucketName)) {
-        LOG.debug("bucket: {} is not empty ", bucketName);
-        throw new OMException("Bucket is not empty",
-            OMException.ResultCodes.BUCKET_NOT_EMPTY);
-      }
-      omMetrics.decNumBuckets();
-
-      // Update table cache.
-      omMetadataManager.getBucketTable().addCacheEntry(
-          new CacheKey<>(bucketKey),
-          new CacheValue<>(Optional.absent(), transactionLogIndex));
-
-      omResponse.setDeleteBucketResponse(
-          DeleteBucketResponse.newBuilder().build());
-
-      // Add to double buffer.
-      omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName,
-          omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredBucketLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-      if (acquiredVolumeLock) {
-        omMetadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_BUCKET,
-        auditMap, exception, userInfo));
-
-    // return response.
-    if (exception == null) {
-      LOG.debug("Deleted bucket:{} in volume:{}", bucketName, volumeName);
-      return omClientResponse;
-    } else {
-      omMetrics.incNumBucketDeleteFails();
-      LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
-          volumeName, exception);
-      return omClientResponse;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
deleted file mode 100644
index 6c5f5fa..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.io.IOException;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetPropertyResponse;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetBucketPropertyResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handle SetBucketProperty Request.
- */
-public class OMBucketSetPropertyRequest extends OMClientRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketSetPropertyRequest.class);
-
-  public OMBucketSetPropertyRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-
-    SetBucketPropertyRequest setBucketPropertyRequest =
-        getOmRequest().getSetBucketPropertyRequest();
-
-    Preconditions.checkNotNull(setBucketPropertyRequest);
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumBucketUpdates();
-
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    BucketArgs bucketArgs = setBucketPropertyRequest.getBucketArgs();
-    OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs);
-
-    String volumeName = bucketArgs.getVolumeName();
-    String bucketName = bucketArgs.getBucketName();
-
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
-        OzoneManagerProtocolProtos.Status.OK);
-    OmBucketInfo omBucketInfo = null;
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-    IOException exception = null;
-    boolean acquiredBucketLock = false;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
-            volumeName, bucketName, null);
-      }
-
-      // acquire lock.
-      acquiredBucketLock =  omMetadataManager.getLock().acquireWriteLock(
-          BUCKET_LOCK, volumeName, bucketName);
-
-      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo oldBucketInfo =
-          omMetadataManager.getBucketTable().get(bucketKey);
-      //Check if bucket exist
-      if (oldBucketInfo == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new OMException("Bucket doesn't exist",
-            OMException.ResultCodes.BUCKET_NOT_FOUND);
-      }
-      OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
-      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
-          .setBucketName(oldBucketInfo.getBucketName());
-      bucketInfoBuilder.addAllMetadata(KeyValueUtil
-          .getFromProtobuf(bucketArgs.getMetadataList()));
-
-      //Check StorageType to update
-      StorageType storageType = omBucketArgs.getStorageType();
-      if (storageType != null) {
-        bucketInfoBuilder.setStorageType(storageType);
-        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
-      }
-
-      //Check Versioning to update
-      Boolean versioning = omBucketArgs.getIsVersionEnabled();
-      if (versioning != null) {
-        bucketInfoBuilder.setIsVersionEnabled(versioning);
-        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder
-            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
-      }
-
-      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
-
-      // Set acls from oldBucketInfo if it has any.
-      if (oldBucketInfo.getAcls() != null) {
-        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
-      }
-
-      omBucketInfo = bucketInfoBuilder.build();
-
-      // Update table cache.
-      omMetadataManager.getBucketTable().addCacheEntry(
-          new CacheKey<>(bucketKey),
-          new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
-
-      omResponse.setSetBucketPropertyResponse(
-          SetBucketPropertyResponse.newBuilder().build());
-      omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo,
-        omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredBucketLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET,
-        omBucketArgs.toAuditMap(), exception, userInfo));
-
-    // return response.
-    if (exception == null) {
-      LOG.debug("Setting bucket property for bucket:{} in volume:{}",
-          bucketName, volumeName);
-      return omClientResponse;
-    } else {
-      LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
-          bucketName, volumeName, exception);
-      omMetrics.incNumBucketUpdateFails();
-      return omClientResponse;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java
deleted file mode 100644
index 87ad600..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.util.BooleanBiFunction;
-import org.apache.hadoop.ozone.om.request.util.ObjectParser;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Base class for Bucket acl request.
- */
-public abstract class OMBucketAclRequest extends OMClientRequest {
-
-  private BooleanBiFunction<List<OzoneAcl>, OmBucketInfo> omBucketAclOp;
-
-  public OMBucketAclRequest(OMRequest omRequest,
-      BooleanBiFunction<List<OzoneAcl>, OmBucketInfo> aclOp) {
-    super(omRequest);
-    omBucketAclOp = aclOp;
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    // protobuf guarantees acls are non-null.
-    List<OzoneAcl> ozoneAcls = getAcls();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumBucketUpdates();
-    OmBucketInfo omBucketInfo = null;
-
-    OMResponse.Builder omResponse = onInit();
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean lockAcquired = false;
-    String volume = null;
-    String bucket = null;
-    boolean operationResult = false;
-    try {
-      ObjectParser objectParser = new ObjectParser(getPath(),
-          ObjectType.BUCKET);
-
-      volume = objectParser.getVolume();
-      bucket = objectParser.getBucket();
-
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
-            volume, null, null);
-      }
-      lockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume,
-              bucket);
-
-      String dbBucketKey = omMetadataManager.getBucketKey(volume, bucket);
-      omBucketInfo = omMetadataManager.getBucketTable().get(dbBucketKey);
-      if (omBucketInfo == null) {
-        throw new OMException(OMException.ResultCodes.BUCKET_NOT_FOUND);
-      }
-
-      operationResult = omBucketAclOp.apply(ozoneAcls, omBucketInfo);
-
-      if (operationResult) {
-        // update cache.
-        omMetadataManager.getBucketTable().addCacheEntry(
-            new CacheKey<>(dbBucketKey),
-            new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
-      }
-
-      omClientResponse = onSuccess(omResponse, omBucketInfo, operationResult);
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = onFailure(omResponse, ex);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (lockAcquired) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume,
-            bucket);
-      }
-    }
-
-
-    onComplete(operationResult, exception, ozoneManager.getMetrics());
-
-    return omClientResponse;
-  }
-
-  /**
-   * Get the Acls from the request.
-   * @return List of OzoneAcls, for add/remove it is a single element list
-   * for set it can be non-single element list.
-   */
-  abstract List<OzoneAcl> getAcls();
-
-  /**
-   * Get the path name from the request.
-   * @return path name
-   */
-  abstract String getPath();
-
-  // TODO: Finer grain metrics can be moved to these callbacks. They can also
-  // be abstracted into separate interfaces in future.
-  /**
-   * Get the initial om response builder with lock.
-   * @return om response builder.
-   */
-  abstract OMResponse.Builder onInit();
-
-  /**
-   * Get the om client response on success case with lock.
-   * @param omResponse
-   * @param omBucketInfo
-   * @param operationResult
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onSuccess(
-      OMResponse.Builder omResponse, OmBucketInfo omBucketInfo,
-      boolean operationResult);
-
-  /**
-   * Get the om client response on failure case with lock.
-   * @param omResponse
-   * @param exception
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception);
-
-  /**
-   * Completion hook for final processing before return without lock.
-   * Usually used for logging without lock and metric update.
-   * @param operationResult
-   * @param exception
-   * @param omMetrics
-   */
-  abstract void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics);
-
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
deleted file mode 100644
index 41aef6d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.util.BooleanBiFunction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AddAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-/**
- * Handle add Acl request for bucket.
- */
-public class OMBucketAddAclRequest extends OMBucketAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketAddAclRequest.class);
-
-  private static BooleanBiFunction<List<OzoneAcl>, OmBucketInfo> bucketAddAclOp;
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  static {
-    bucketAddAclOp = (ozoneAcls, omBucketInfo) -> {
-      return omBucketInfo.addAcl(ozoneAcls.get(0));
-    };
-  }
-
-  public OMBucketAddAclRequest(OMRequest omRequest) {
-    super(omRequest, bucketAddAclOp);
-    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
-        getOmRequest().getAddAclRequest();
-    path = addAclRequest.getObj().getPath();
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
-  }
-
-  @Override
-  List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.AddAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmBucketInfo omBucketInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setAddAclResponse(AddAclResponse.newBuilder()
-         .setResponse(operationResult));
-    return new OMBucketAclResponse(omBucketInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMBucketAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      LOG.debug("Add acl: {} to path: {} success!", getAcls(), getPath());
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.error("Add acl {} to path {} failed, because acl already exist",
-            getAcls(), getPath());
-      } else {
-        LOG.error("Add acl {} to path {} failed!", getAcls(), getPath(),
-            exception);
-      }
-    }
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
deleted file mode 100644
index 1d62677..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.util.BooleanBiFunction;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-
-
-/**
- * Handle removeAcl request for bucket.
- */
-public class OMBucketRemoveAclRequest extends OMBucketAclRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketAddAclRequest.class);
-
-  private static BooleanBiFunction<List<OzoneAcl>, OmBucketInfo> bucketAddAclOp;
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  static {
-    bucketAddAclOp = (ozoneAcls, omBucketInfo) -> {
-      return omBucketInfo.removeAcl(ozoneAcls.get(0));
-    };
-  }
-
-  public OMBucketRemoveAclRequest(OMRequest omRequest) {
-    super(omRequest, bucketAddAclOp);
-    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
-        getOmRequest().getRemoveAclRequest();
-    path = removeAclRequest.getObj().getPath();
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
-  }
-
-  @Override
-  List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmBucketInfo omBucketInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMBucketAclResponse(omBucketInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMBucketAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      LOG.debug("Remove acl: {} for path: {} success!", getAcls(), getPath());
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.error("Remove acl {} for path {} failed, because acl does not " +
-                "exist",
-            getAcls(), getPath());
-      } else {
-        LOG.error("Remove acl {} for path {} failed!", getAcls(), getPath(),
-            exception);
-      }
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
deleted file mode 100644
index b97de95..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.bucket.acl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.util.BooleanBiFunction;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse;
-
-/**
- * Handle setAcl request for bucket.
- */
-public class OMBucketSetAclRequest extends OMBucketAclRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMBucketAddAclRequest.class);
-
-  private static BooleanBiFunction< List<OzoneAcl>,
-        OmBucketInfo > bucketAddAclOp;
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  static {
-    bucketAddAclOp = (ozoneAcls, omBucketInfo) -> {
-      return omBucketInfo.setAcls(ozoneAcls);
-    };
-  }
-
-  public OMBucketSetAclRequest(OMRequest omRequest) {
-    super(omRequest, bucketAddAclOp);
-    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
-        getOmRequest().getSetAclRequest();
-    path = setAclRequest.getObj().getPath();
-    ozoneAcls = new ArrayList<>();
-    setAclRequest.getAclList().forEach(aclInfo ->
-        ozoneAcls.add(OzoneAcl.fromProtobuf(aclInfo)));
-  }
-
-  @Override
-  List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.SetAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmBucketInfo omBucketInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setSetAclResponse(SetAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMBucketAclResponse(omBucketInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMBucketAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath());
-      }
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.error("Set acl {} for path {} failed", getAcls(), getPath());
-      } else {
-        LOG.error("Set acl {} for path {} failed!", getAcls(), getPath(),
-            exception);
-      }
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java
deleted file mode 100644
index 7b3b43d5..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains classes for handling acl requests for bucket.
- */
-package org.apache.hadoop.ozone.om.request.bucket.acl;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
deleted file mode 100644
index f0ca3b4..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to bucket requests.
- */
-package org.apache.hadoop.ozone.om.request.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
deleted file mode 100644
index 4b591db..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.file;
-
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
-import static  org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
-/**
- * Handle create directory request.
- */
-public class OMDirectoryCreateRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMDirectoryCreateRequest.class);
-
-  public OMDirectoryCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) {
-    CreateDirectoryRequest createDirectoryRequest =
-        getOmRequest().getCreateDirectoryRequest();
-    Preconditions.checkNotNull(createDirectoryRequest);
-
-    KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs()
-        .toBuilder().setModificationTime(Time.now());
-
-    CreateDirectoryRequest.Builder newCreateDirectoryRequest =
-        createDirectoryRequest.toBuilder().setKeyArgs(newKeyArgs);
-
-    return getOmRequest().toBuilder().setCreateDirectoryRequest(
-        newCreateDirectoryRequest).setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    KeyArgs keyArgs = getOmRequest().getCreateDirectoryRequest().getKeyArgs();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    OMResponse.Builder omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType(
-            OzoneManagerProtocolProtos.Type.CreateDirectory).setStatus(
-            OzoneManagerProtocolProtos.Status.OK);
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumCreateDirectory();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean acquiredLock = false;
-    IOException exception = null;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      checkBucketAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      // Check if this is the root of the filesystem.
-      if (keyName.length() == 0) {
-        return new OMDirectoryCreateResponse(null,
-            omResponse.setCreateDirectoryResponse(
-                CreateDirectoryResponse.newBuilder()).build());
-      }
-      // acquire lock
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      // TODO: Not checking volume exist here, once we have full cache we can
-      //  add volume exist check also.
-
-      OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(
-              omMetadataManager.getBucketKey(volumeName, bucketName));
-
-      if (omBucketInfo == null) {
-        throw new OMException("Bucket not found " + bucketName,
-            BUCKET_NOT_FOUND);
-      }
-
-      // Need to check if any files exist in the given path, if they exist we
-      // cannot create a directory with the given key.
-      OMFileRequest.OMDirectoryResult omDirectoryResult =
-          OMFileRequest.verifyFilesInPath(omMetadataManager,
-          volumeName, bucketName, keyName, Paths.get(keyName));
-
-      OmKeyInfo dirKeyInfo = null;
-      if (omDirectoryResult == FILE_EXISTS ||
-          omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException("Unable to create directory: " +keyName
-            + " in volume/bucket: " + volumeName + "/" + bucketName,
-            FILE_ALREADY_EXISTS);
-      } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
-          omDirectoryResult == NONE) {
-        dirKeyInfo = createDirectoryKeyInfo(ozoneManager, omBucketInfo,
-            volumeName, bucketName, keyName, keyArgs);
-
-        omMetadataManager.getKeyTable().addCacheEntry(
-            new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName,
-                dirKeyInfo.getKeyName())),
-            new CacheValue<>(Optional.of(dirKeyInfo), transactionLogIndex));
-      }
-      // if directory already exists do nothing or do we need to throw
-      // exception? Current KeyManagerImpl code does just return, following
-      // similar approach.
-
-      omResponse.setCreateDirectoryResponse(
-          CreateDirectoryResponse.newBuilder());
-      omClientResponse = new OMDirectoryCreateResponse(dirKeyInfo,
-          omResponse.build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMDirectoryCreateResponse(null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
-        auditMap, exception, userInfo));
-
-    if (exception == null) {
-      LOG.debug("Directory is successfully created for Key: {} in " +
-              "volume/bucket:{}/{}", keyName, volumeName, bucketName);
-      return omClientResponse;
-    } else {
-      LOG.error("CreateDirectory failed for Key: {} in volume/bucket:{}/{}",
-          keyName, volumeName, bucketName, exception);
-      omMetrics.incNumCreateDirectoryFails();
-      return omClientResponse;
-    }
-  }
-
-  private OmKeyInfo createDirectoryKeyInfo(OzoneManager ozoneManager,
-      OmBucketInfo omBucketInfo, String volumeName, String bucketName,
-      String keyName, KeyArgs keyArgs)
-      throws IOException {
-    Optional<FileEncryptionInfo> encryptionInfo =
-        getFileEncryptionInfo(ozoneManager, omBucketInfo);
-    String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-
-    return new OmKeyInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(dirName)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(0)
-        .setReplicationType(HddsProtos.ReplicationType.RATIS)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setFileEncryptionInfo(encryptionInfo.orNull())
-        .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
-        .build();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
deleted file mode 100644
index 20b5174..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.file;
-
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.UniqueId;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
-
-/**
- * Handles create file request.
- */
-public class OMFileCreateRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMFileCreateRequest.class);
-  public OMFileCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
-    Preconditions.checkNotNull(createFileRequest);
-
-    KeyArgs keyArgs = createFileRequest.getKeyArgs();
-
-    if (keyArgs.getKeyName().length() == 0) {
-      // Check if this is the root of the filesystem.
-      // Not throwing exception here, as need to throw exception after
-      // checking volume/bucket exists.
-      return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
-    }
-
-    long scmBlockSize = ozoneManager.getScmBlockSize();
-
-    // NOTE size of a key is not a hard limit on anything, it is a value that
-    // client should expect, in terms of current size of key. If client sets
-    // a value, then this value is used, otherwise, we allocate a single
-    // block which is the current size, if read by the client.
-    final long requestedSize = keyArgs.getDataSize() > 0 ?
-        keyArgs.getDataSize() : scmBlockSize;
-
-    boolean useRatis = ozoneManager.shouldUseRatis();
-
-    HddsProtos.ReplicationFactor factor = keyArgs.getFactor();
-    if (factor == null) {
-      factor = useRatis ? HddsProtos.ReplicationFactor.THREE :
-          HddsProtos.ReplicationFactor.ONE;
-    }
-
-    HddsProtos.ReplicationType type = keyArgs.getType();
-    if (type == null) {
-      type = useRatis ? HddsProtos.ReplicationType.RATIS :
-          HddsProtos.ReplicationType.STAND_ALONE;
-    }
-
-    // TODO: Here we are allocating block with out any check for
-    //  bucket/key/volume or not and also with out any authorization checks.
-
-    List< OmKeyLocationInfo > omKeyLocationInfoList =
-        allocateBlock(ozoneManager.getScmClient(),
-              ozoneManager.getBlockTokenSecretManager(), type, factor,
-              new ExcludeList(), requestedSize, scmBlockSize,
-              ozoneManager.getPreallocateBlocksMax(),
-              ozoneManager.isGrpcBlockTokenEnabled(),
-              ozoneManager.getOMNodeId());
-
-    KeyArgs.Builder newKeyArgs = keyArgs.toBuilder()
-        .setModificationTime(Time.now()).setType(type).setFactor(factor)
-        .setDataSize(requestedSize);
-
-    newKeyArgs.addAllKeyLocations(omKeyLocationInfoList.stream()
-        .map(OmKeyLocationInfo::getProtobuf).collect(Collectors.toList()));
-
-    CreateFileRequest.Builder newCreateFileRequest =
-        createFileRequest.toBuilder().setKeyArgs(newKeyArgs)
-            .setClientID(UniqueId.next());
-
-    return getOmRequest().toBuilder()
-        .setCreateFileRequest(newCreateFileRequest).setUserInfo(getUserInfo())
-        .build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
-    KeyArgs keyArgs = createFileRequest.getKeyArgs();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    // if isRecursive is true, file would be created even if parent
-    // directories does not exist.
-    boolean isRecursive = createFileRequest.getIsRecursive();
-
-    // if isOverWrite is true, file would be over written.
-    boolean isOverWrite = createFileRequest.getIsOverwrite();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumCreateFile();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    boolean acquiredLock = false;
-    IOException exception = null;
-    Optional<FileEncryptionInfo> encryptionInfo = Optional.absent();
-    OmKeyInfo omKeyInfo = null;
-
-    final List<OmKeyLocationInfo> locations = new ArrayList<>();
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      checkBucketAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      // acquire lock
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      OmBucketInfo bucketInfo =
-          omMetadataManager.getBucketTable().get(
-              omMetadataManager.getBucketKey(volumeName, bucketName));
-
-      if (bucketInfo == null) {
-        throw new OMException("Bucket " + bucketName + " not found",
-            OMException.ResultCodes.BUCKET_NOT_FOUND);
-      }
-
-      if (keyName.length() == 0) {
-        // Check if this is the root of the filesystem.
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      }
-
-      OMFileRequest.OMDirectoryResult omDirectoryResult =
-          OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName,
-              bucketName, keyName, Paths.get(keyName));
-
-      // Check if a file or directory exists with same key name.
-      if (omDirectoryResult == FILE_EXISTS) {
-        if (!isOverWrite) {
-          throw new OMException("File " + keyName + " already exists",
-              OMException.ResultCodes.FILE_ALREADY_EXISTS);
-        }
-      } else if (omDirectoryResult == DIRECTORY_EXISTS) {
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException("Can not create file: " + keyName + "as there " +
-            "is already file in the given path",
-            OMException.ResultCodes.NOT_A_FILE);
-      }
-
-      if (!isRecursive) {
-        // We cannot create a file if complete parent directories does not exist
-
-        // verifyFilesInPath, checks only the path and its parent directories.
-        // But there may be some keys below the given path. So this method
-        // checks them.
-
-        // Example:
-        // Existing keys in table
-        // a/b/c/d/e
-        // a/b/c/d/f
-        // a/b
-
-        // Take an example if given key to be created with isRecursive set
-        // to false is "a/b/c/e".
-
-        // There is no key in keyTable with the provided path.
-        // Check in case if there are keys exist in given path. (This can
-        // happen if keys are directly created using key requests.)
-
-        // We need to do this check only in the case of non-recursive, so
-        // not included the checks done in checkKeysUnderPath in
-        // verifyFilesInPath method, as that method is common method for
-        // directory and file create request. This also avoid's this
-        // unnecessary check which is not required for those cases.
-        if (omDirectoryResult == NONE ||
-            omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH) {
-          boolean canBeCreated = checkKeysUnderPath(omMetadataManager,
-              volumeName, bucketName, keyName);
-          if (!canBeCreated) {
-            throw new OMException("Can not create file: " + keyName + "as one" +
-                " of parent directory is not created",
-                OMException.ResultCodes.NOT_A_FILE);
-          }
-        }
-      }
-
-      // do open key
-      encryptionInfo = getFileEncryptionInfo(ozoneManager, bucketInfo);
-      omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
-          omMetadataManager.getOzoneKey(volumeName, bucketName,
-              keyName), keyArgs.getDataSize(), locations,
-          encryptionInfo.orNull(), ozoneManager.getPrefixManager(), bucketInfo);
-
-      omClientResponse =  prepareCreateKeyResponse(keyArgs, omKeyInfo,
-          locations, encryptionInfo.orNull(), exception,
-          createFileRequest.getClientID(), transactionLogIndex, volumeName,
-          bucketName, keyName, ozoneManager,
-          OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), bucketInfo);
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse =  prepareCreateKeyResponse(keyArgs, omKeyInfo,
-          locations, encryptionInfo.orNull(), exception,
-          createFileRequest.getClientID(), transactionLogIndex,
-          volumeName, bucketName, keyName, ozoneManager,
-          OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), null);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    return omClientResponse;
-  }
-
-
-
-  /**
-   * Check if any keys exist under given path.
-   * @param omMetadataManager
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @return if exists true, else false. If key name is one level path return
-   * true.
-   * @throws IOException
-   */
-  private boolean checkKeysUnderPath(OMMetadataManager omMetadataManager,
-      @Nonnull String volumeName, @Nonnull String bucketName,
-      @Nonnull String keyName) throws IOException {
-
-    Path parentPath =  Paths.get(keyName).getParent();
-
-    if (parentPath != null) {
-      String dbKeyPath = omMetadataManager.getOzoneDirKey(volumeName,
-          bucketName, parentPath.toString());
-
-      // First check in key table cache.
-      Iterator< Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator =
-          omMetadataManager.getKeyTable().cacheIterator();
-
-      while (iterator.hasNext()) {
-        Map.Entry< CacheKey< String >, CacheValue< OmKeyInfo > > entry =
-            iterator.next();
-        String key = entry.getKey().getCacheKey();
-        OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
-        // Making sure that entry is not for delete key request.
-        if (key.startsWith(dbKeyPath) && omKeyInfo != null) {
-          return true;
-        }
-      }
-      try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-               keyIter = omMetadataManager.getKeyTable().iterator()) {
-        Table.KeyValue<String, OmKeyInfo> kv = keyIter.seek(dbKeyPath);
-
-
-        if (kv != null) {
-          // Check the entry in db is not marked for delete. This can happen
-          // while entry is marked for delete, but it is not flushed to DB.
-          CacheValue<OmKeyInfo> cacheValue = omMetadataManager.getKeyTable()
-              .getCacheValue(new CacheKey<>(kv.getKey()));
-          if (cacheValue != null) {
-            if (kv.getKey().startsWith(dbKeyPath)
-                && cacheValue.getCacheValue() != null) {
-              return true; // we found at least one key with this db key path
-            }
-          } else {
-            if (kv.getKey().startsWith(dbKeyPath)) {
-              return true; // we found at least one key with this db key path
-            }
-          }
-        }
-      }
-    } else {
-      // one level key path.
-      // We can safely return true, as this method is called after
-      // verifyFilesInPath, so with this keyName there is no file and directory.
-      return true;
-    }
-    return false;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
deleted file mode 100644
index dbe056c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.file;
-
-import java.io.IOException;
-import java.nio.file.Path;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-
-import javax.annotation.Nonnull;
-
-/**
- * Base class for file requests.
- */
-public final class OMFileRequest {
-
-  private OMFileRequest() {
-  }
-  /**
-   * Verify any files exist in the given path in the specified volume/bucket.
-   * @param omMetadataManager
-   * @param volumeName
-   * @param bucketName
-   * @param keyPath
-   * @return true - if file exist in the given path, else false.
-   * @throws IOException
-   */
-  public static OMDirectoryResult verifyFilesInPath(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull String volumeName,
-      @Nonnull String bucketName, @Nonnull String keyName,
-      @Nonnull Path keyPath) throws IOException {
-
-    String fileNameFromDetails = omMetadataManager.getOzoneKey(volumeName,
-        bucketName, keyName);
-    String dirNameFromDetails = omMetadataManager.getOzoneDirKey(volumeName,
-        bucketName, keyName);
-
-    while (keyPath != null) {
-      String pathName = keyPath.toString();
-
-      String dbKeyName = omMetadataManager.getOzoneKey(volumeName,
-          bucketName, pathName);
-      String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
-          bucketName, pathName);
-
-      if (omMetadataManager.getKeyTable().get(dbKeyName) != null) {
-        // Found a file in the given path.
-        // Check if this is actual file or a file in the given path
-        if (dbKeyName.equals(fileNameFromDetails)) {
-          return OMDirectoryResult.FILE_EXISTS;
-        } else {
-          return OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
-        }
-      } else if (omMetadataManager.getKeyTable().get(dbDirKeyName) != null) {
-        // Found a directory in the given path.
-        // Check if this is actual directory or a directory in the given path
-        if (dbDirKeyName.equals(dirNameFromDetails)) {
-          return OMDirectoryResult.DIRECTORY_EXISTS;
-        } else {
-          return OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
-        }
-      }
-      keyPath = keyPath.getParent();
-    }
-
-    // Found no files/ directories in the given path.
-    return OMDirectoryResult.NONE;
-  }
-
-  /**
-   * Return codes used by verifyFilesInPath method.
-   */
-  enum OMDirectoryResult {
-
-    // In below examples path is assumed as "a/b/c" in volume volume1 and
-    // bucket b1.
-
-    // When a directory exists in given path.
-    // If we have a directory with name "a/b" we return this enum value.
-    DIRECTORY_EXISTS_IN_GIVENPATH,
-
-    // When a file exists in given path.
-    // If we have a file with name "a/b" we return this enum value.
-    FILE_EXISTS_IN_GIVENPATH,
-
-    // When file already exists with the given path.
-    // If we have a file with name "a/b/c" we return this enum value.
-    FILE_EXISTS,
-
-    // When directory exists with the given path.
-    // If we have a file with name "a/b/c" we return this enum value.
-    DIRECTORY_EXISTS,
-
-    // If no file/directory exists with the given path.
-    // If we don't have any file/directory name with "a/b/c" or any
-    // sub-directory or file name from the given path we return this enum value.
-    NONE
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java
deleted file mode 100644
index 3184500..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to file requests.
- */
-package org.apache.hadoop.ozone.om.request.file;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
deleted file mode 100644
index e800927..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .KEY_NOT_FOUND;
-
-/**
- * Handles allocate block request.
- */
-public class OMAllocateBlockRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMAllocateBlockRequest.class);
-
-  public OMAllocateBlockRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-
-    AllocateBlockRequest allocateBlockRequest =
-        getOmRequest().getAllocateBlockRequest();
-
-    Preconditions.checkNotNull(allocateBlockRequest);
-
-    KeyArgs keyArgs = allocateBlockRequest.getKeyArgs();
-
-    ExcludeList excludeList = new ExcludeList();
-    if (allocateBlockRequest.hasExcludeList()) {
-      excludeList =
-          ExcludeList.getFromProtoBuf(allocateBlockRequest.getExcludeList());
-    }
-
-    // TODO: Here we are allocating block with out any check for key exist in
-    //  open table or not and also with out any authorization checks.
-    //  Assumption here is that allocateBlocks with out openKey will be less.
-    //  There is a chance some one can misuse this api to flood allocateBlock
-    //  calls. But currently allocateBlock is internally called from
-    //  BlockOutputStreamEntryPool, so we are fine for now. But if one some
-    //  one uses direct omclient we might be in trouble.
-
-
-    // To allocate atleast one block passing requested size and scmBlockSize
-    // as same value. When allocating block requested size is same as
-    // scmBlockSize.
-    List<OmKeyLocationInfo> omKeyLocationInfoList =
-        allocateBlock(ozoneManager.getScmClient(),
-            ozoneManager.getBlockTokenSecretManager(), keyArgs.getType(),
-            keyArgs.getFactor(), excludeList, ozoneManager.getScmBlockSize(),
-            ozoneManager.getScmBlockSize(),
-            ozoneManager.getPreallocateBlocksMax(),
-            ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMNodeId());
-
-    // Set modification time
-    KeyArgs.Builder newKeyArgs = keyArgs.toBuilder()
-        .setModificationTime(Time.now());
-
-    AllocateBlockRequest.Builder newAllocatedBlockRequest =
-        AllocateBlockRequest.newBuilder()
-            .setClientID(allocateBlockRequest.getClientID())
-            .setKeyArgs(newKeyArgs);
-
-
-
-    if (allocateBlockRequest.hasExcludeList()) {
-      newAllocatedBlockRequest.setExcludeList(
-          allocateBlockRequest.getExcludeList());
-    }
-
-    // Add allocated block info.
-    newAllocatedBlockRequest.setKeyLocation(
-        omKeyLocationInfoList.get(0).getProtobuf());
-
-    return getOmRequest().toBuilder().setUserInfo(getUserInfo())
-        .setAllocateBlockRequest(newAllocatedBlockRequest).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest =
-        getOmRequest().getAllocateBlockRequest();
-
-    OzoneManagerProtocolProtos.KeyArgs keyArgs =
-        allocateBlockRequest.getKeyArgs();
-
-    OzoneManagerProtocolProtos.KeyLocation blockLocation =
-        allocateBlockRequest.getKeyLocation();
-    Preconditions.checkNotNull(blockLocation);
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-    long clientID = allocateBlockRequest.getClientID();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumBlockAllocateCalls();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.AllocateBlock).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    IOException exception = null;
-    OmKeyInfo omKeyInfo = null;
-    try {
-      // check Acl
-      checkBucketAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-      validateBucketAndVolume(omMetadataManager, volumeName,
-          bucketName);
-
-      String openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
-
-      // Here we don't acquire bucket/volume lock because for a single client
-      // allocateBlock is called in serial fashion.
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      if (omKeyInfo == null) {
-        throw new OMException("Open Key not found " + openKey, KEY_NOT_FOUND);
-      }
-
-      // Append new block
-      omKeyInfo.appendNewBlocks(Collections.singletonList(
-          OmKeyLocationInfo.getFromProtobuf(blockLocation)), false);
-
-      // Set modification time.
-      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-
-      // Add to cache.
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(openKey), new CacheValue<>(Optional.of(omKeyInfo),
-              transactionLogIndex));
-
-    } catch (IOException ex) {
-      exception = ex;
-    }
-
-    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    OMClientResponse omClientResponse = null;
-    if (exception == null) {
-      omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
-          .setKeyLocation(blockLocation).build());
-      omClientResponse = new OMAllocateBlockResponse(omKeyInfo,
-          clientID, omResponse.build());
-    } else {
-      omMetrics.incNumBlockAllocateCallFails();
-      omClientResponse = new OMAllocateBlockResponse(null, -1L,
-          createErrorOMResponse(omResponse, exception));
-    }
-
-    omClientResponse.setFlushFuture(
-        ozoneManagerDoubleBufferHelper.add(omClientResponse,
-            transactionLogIndex));
-    return omClientResponse;
-
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
deleted file mode 100644
index 196d61c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handles CommitKey request.
- */
-public class OMKeyCommitRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyCommitRequest.class);
-
-  public OMKeyCommitRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
-    Preconditions.checkNotNull(commitKeyRequest);
-
-    KeyArgs keyArgs = commitKeyRequest.getKeyArgs();
-
-    KeyArgs.Builder newKeyArgs =
-        keyArgs.toBuilder().setModificationTime(Time.now());
-
-    return getOmRequest().toBuilder()
-        .setCommitKeyRequest(commitKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
-
-    KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
-
-    String volumeName = commitKeyArgs.getVolumeName();
-    String bucketName = commitKeyArgs.getBucketName();
-    String keyName = commitKeyArgs.getKeyName();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumKeyCommits();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
-
-    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType(
-            OzoneManagerProtocolProtos.Type.CommitKey).setStatus(
-            OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    IOException exception = null;
-    OmKeyInfo omKeyInfo = null;
-    OMClientResponse omClientResponse = null;
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    try {
-      // check Acl
-      checkBucketAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      List<OmKeyLocationInfo> locationInfoList = commitKeyArgs
-          .getKeyLocationsList().stream()
-          .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList());
-
-      String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-          keyName);
-      String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          keyName, commitKeyRequest.getClientID());
-
-      omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
-          bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
-      if (omKeyInfo == null) {
-        throw new OMException("Failed to commit key, as " + dbOpenKey +
-            "entry is not found in the openKey table", KEY_NOT_FOUND);
-      }
-      omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
-
-      omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
-
-      //update the block length for each block
-      omKeyInfo.updateLocationInfoList(locationInfoList);
-
-      // Add to cache of open key table and key table.
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(dbOpenKey),
-          new CacheValue<>(Optional.absent(), transactionLogIndex));
-
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(dbOzoneKey),
-          new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
-
-      omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder().build());
-      omClientResponse =
-          new OMKeyCommitResponse(omKeyInfo, commitKeyRequest.getClientID(),
-              omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMKeyCommitResponse(null, -1L,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-          bucketName);
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    // return response after releasing lock.
-    if (exception == null) {
-      omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder().build());
-
-      // As when we commit the key, then it is visible in ozone, so we should
-      // increment here.
-      // As key also can have multiple versions, we need to increment keys
-      // only if version is 0. Currently we have not complete support of
-      // versioning of keys. So, this can be revisited later.
-
-      if (omKeyInfo.getKeyLocationVersions().size() == 1) {
-        omMetrics.incNumKeys();
-      }
-      return omClientResponse;
-    } else {
-      LOG.error("CommitKey failed for Key: {} in volume/bucket:{}/{}",
-          keyName, bucketName, volumeName, exception);
-      omMetrics.incNumKeyCommitFails();
-      return omClientResponse;
-    }
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
deleted file mode 100644
index baa13ad..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.UniqueId;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-/**
- * Handles CreateKey request.
- */
-
-public class OMKeyCreateRequest extends OMKeyRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyCreateRequest.class);
-
-  public OMKeyCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest();
-    Preconditions.checkNotNull(createKeyRequest);
-
-    KeyArgs keyArgs = createKeyRequest.getKeyArgs();
-
-    // We cannot allocate block for multipart upload part when
-    // createMultipartKey is called, as we will not know type and factor with
-    // which initiateMultipartUpload has started for this key. When
-    // allocateBlock call happen's we shall know type and factor, as we set
-    // the type and factor read from multipart table, and set the KeyInfo in
-    // validateAndUpdateCache and return to the client. TODO: See if we can fix
-    //  this. We do not call allocateBlock in openKey for multipart upload.
-
-    CreateKeyRequest.Builder newCreateKeyRequest = null;
-    KeyArgs.Builder newKeyArgs = null;
-    if (!keyArgs.getIsMultipartKey()) {
-
-      long scmBlockSize = ozoneManager.getScmBlockSize();
-
-      // NOTE size of a key is not a hard limit on anything, it is a value that
-      // client should expect, in terms of current size of key. If client sets
-      // a value, then this value is used, otherwise, we allocate a single
-      // block which is the current size, if read by the client.
-      final long requestedSize = keyArgs.getDataSize() > 0 ?
-          keyArgs.getDataSize() : scmBlockSize;
-
-      boolean useRatis = ozoneManager.shouldUseRatis();
-
-      HddsProtos.ReplicationFactor factor = keyArgs.getFactor();
-      if (factor == null) {
-        factor = useRatis ? HddsProtos.ReplicationFactor.THREE :
-            HddsProtos.ReplicationFactor.ONE;
-      }
-
-      HddsProtos.ReplicationType type = keyArgs.getType();
-      if (type == null) {
-        type = useRatis ? HddsProtos.ReplicationType.RATIS :
-            HddsProtos.ReplicationType.STAND_ALONE;
-      }
-
-      // TODO: Here we are allocating block with out any check for
-      //  bucket/key/volume or not and also with out any authorization checks.
-      //  As for a client for the first time this can be executed on any OM,
-      //  till leader is identified.
-
-      List< OmKeyLocationInfo > omKeyLocationInfoList =
-          allocateBlock(ozoneManager.getScmClient(),
-              ozoneManager.getBlockTokenSecretManager(), type, factor,
-              new ExcludeList(), requestedSize, scmBlockSize,
-              ozoneManager.getPreallocateBlocksMax(),
-              ozoneManager.isGrpcBlockTokenEnabled(),
-              ozoneManager.getOMNodeId());
-
-      newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now())
-              .setType(type).setFactor(factor)
-              .setDataSize(requestedSize);
-
-      newKeyArgs.addAllKeyLocations(omKeyLocationInfoList.stream()
-          .map(OmKeyLocationInfo::getProtobuf).collect(Collectors.toList()));
-    } else {
-      newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now());
-    }
-
-    newCreateKeyRequest =
-        createKeyRequest.toBuilder().setKeyArgs(newKeyArgs)
-            .setClientID(UniqueId.next());
-
-    return getOmRequest().toBuilder()
-        .setCreateKeyRequest(newCreateKeyRequest).setUserInfo(getUserInfo())
-        .build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest();
-
-
-    KeyArgs keyArgs = createKeyRequest.getKeyArgs();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumKeyAllocates();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    OmKeyInfo omKeyInfo = null;
-    final List< OmKeyLocationInfo > locations = new ArrayList<>();
-    Optional<FileEncryptionInfo> encryptionInfo = Optional.absent();
-    IOException exception = null;
-    boolean acquireLock = false;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      checkBucketAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-      //TODO: We can optimize this get here, if getKmsProvider is null, then
-      // bucket encryptionInfo will be not set. If this assumption holds
-      // true, we can avoid get from bucket table.
-
-      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
-              omMetadataManager.getBucketKey(volumeName, bucketName));
-
-      encryptionInfo = getFileEncryptionInfo(ozoneManager, bucketInfo);
-
-      omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
-          omMetadataManager.getOzoneKey(volumeName, bucketName, keyName),
-          keyArgs.getDataSize(), locations, encryptionInfo.orNull(),
-          ozoneManager.getPrefixManager(), bucketInfo);
-      omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
-          locations, encryptionInfo.orNull(), exception,
-          createKeyRequest.getClientID(), transactionLogIndex, volumeName,
-          bucketName, keyName, ozoneManager, OMAction.ALLOCATE_KEY,
-          ozoneManager.getPrefixManager(), bucketInfo);
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
-          encryptionInfo.orNull(), exception, createKeyRequest.getClientID(),
-          transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
-          OMAction.ALLOCATE_KEY, ozoneManager.getPrefixManager(), null);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquireLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    return omClientResponse;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
deleted file mode 100644
index ee4b9b2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handles DeleteKey request.
- */
-public class OMKeyDeleteRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyDeleteRequest.class);
-
-  public OMKeyDeleteRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
-    Preconditions.checkNotNull(deleteKeyRequest);
-
-    OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
-
-    OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs =
-        keyArgs.toBuilder().setModificationTime(Time.now());
-
-    return getOmRequest().toBuilder()
-        .setDeleteKeyRequest(deleteKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
-
-    OzoneManagerProtocolProtos.KeyArgs deleteKeyArgs =
-        deleteKeyRequest.getKeyArgs();
-
-    String volumeName = deleteKeyArgs.getVolumeName();
-    String bucketName = deleteKeyArgs.getBucketName();
-    String keyName = deleteKeyArgs.getKeyName();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumKeyDeletes();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(deleteKeyArgs);
-
-    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType(
-            OzoneManagerProtocolProtos.Type.DeleteKey).setStatus(
-            OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    IOException exception = null;
-    boolean acquiredLock = false;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName);
-
-      String objectKey = omMetadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      // Not doing bucket/volume checks here. In this way we can avoid db
-      // checks for them.
-      // TODO: Once we have volume/bucket full cache, we can add
-      // them back, as these checks will be inexpensive at that time.
-      OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey);
-
-      if (omKeyInfo == null) {
-        throw new OMException("Key not found", KEY_NOT_FOUND);
-      }
-
-      // Update table cache.
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName,
-              keyName)),
-          new CacheValue<>(Optional.absent(), transactionLogIndex));
-
-      // No need to add cache entries to delete table. As delete table will
-      // be used by DeleteKeyService only, not used for any client response
-      // validation, so we don't need to add to cache.
-      // TODO: Revisit if we need it later.
-
-      omClientResponse = new OMKeyDeleteResponse(omKeyInfo,
-          omResponse.setDeleteKeyResponse(
-              DeleteKeyResponse.newBuilder()).build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMKeyDeleteResponse(null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
-        exception, userInfo));
-
-    // return response.
-    if (exception == null) {
-      omMetrics.decNumKeys();
-      return omClientResponse;
-    } else {
-      omMetrics.incNumKeyDeleteFails();
-      return omClientResponse;
-    }
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
deleted file mode 100644
index 0699b2a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * Handles purging of keys from OM DB.
- */
-public class OMKeyPurgeRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyPurgeRequest.class);
-
-  public OMKeyPurgeRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest();
-    List<String> purgeKeysList = purgeKeysRequest.getKeysList();
-
-    LOG.debug("Processing Purge Keys for {} number of keys.",
-        purgeKeysList.size());
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(Type.PurgeKeys)
-        .setPurgeKeysResponse(
-            OzoneManagerProtocolProtos.PurgeKeysResponse.newBuilder().build())
-        .setStatus(Status.OK)
-        .setSuccess(true)
-        .build();
-
-    OMClientResponse omClientResponse = new OMKeyPurgeResponse(purgeKeysList,
-        omResponse);
-    omClientResponse.setFlushFuture(
-        ozoneManagerDoubleBufferHelper.add(omClientResponse,
-            transactionLogIndex));
-    return omClientResponse;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
deleted file mode 100644
index 526473c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RenameKeyResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handles rename key request.
- */
-public class OMKeyRenameRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyRenameRequest.class);
-
-  public OMKeyRenameRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-
-    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
-    Preconditions.checkNotNull(renameKeyRequest);
-
-    // Set modification time.
-    KeyArgs.Builder newKeyArgs = renameKeyRequest.getKeyArgs().toBuilder()
-            .setModificationTime(Time.now());
-
-    return getOmRequest().toBuilder()
-        .setRenameKeyRequest(renameKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
-
-    OzoneManagerProtocolProtos.KeyArgs renameKeyArgs =
-        renameKeyRequest.getKeyArgs();
-
-    String volumeName = renameKeyArgs.getVolumeName();
-    String bucketName = renameKeyArgs.getBucketName();
-    String fromKeyName = renameKeyArgs.getKeyName();
-    String toKeyName = renameKeyRequest.getToKeyName();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumKeyRenames();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(renameKeyArgs);
-
-    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType(
-            OzoneManagerProtocolProtos.Type.CommitKey).setStatus(
-            OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean acquiredLock = false;
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-    OmKeyInfo fromKeyValue = null;
-    try {
-      if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
-        throw new OMException("Key name is empty",
-            OMException.ResultCodes.INVALID_KEY_NAME);
-      }
-      // check Acl
-      checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName);
-
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      // Not doing bucket/volume checks here. In this way we can avoid db
-      // checks for them.
-      // TODO: Once we have volume/bucket full cache, we can add
-      // them back, as these checks will be inexpensive at that time.
-
-      // fromKeyName should exist
-      String fromKey = omMetadataManager.getOzoneKey(
-          volumeName, bucketName, fromKeyName);
-      fromKeyValue = omMetadataManager.getKeyTable().get(fromKey);
-      if (fromKeyValue == null) {
-        // TODO: Add support for renaming open key
-        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
-      }
-
-      // toKeyName should not exist
-      String toKey =
-          omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName);
-      OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey);
-      if (toKeyValue != null) {
-        throw new OMException("Key already exists " + toKeyName,
-            OMException.ResultCodes.KEY_ALREADY_EXISTS);
-      }
-
-      fromKeyValue.setKeyName(toKeyName);
-
-      //Set modification time
-      fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime());
-
-      // Add to cache.
-      // fromKey should be deleted, toKey should be added with newly updated
-      // omKeyInfo.
-      Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
-
-      keyTable.addCacheEntry(new CacheKey<>(fromKey),
-          new CacheValue<>(Optional.absent(), transactionLogIndex));
-
-      keyTable.addCacheEntry(new CacheKey<>(toKey),
-          new CacheValue<>(Optional.of(fromKeyValue), transactionLogIndex));
-
-      omClientResponse = new OMKeyRenameResponse(fromKeyValue, toKeyName,
-        fromKeyName, omResponse.setRenameKeyResponse(
-            RenameKeyResponse.newBuilder()).build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMKeyRenameResponse(null, null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-
-    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
-          " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName,
-          toKeyName);
-      return omClientResponse;
-    } else {
-      ozoneManager.getMetrics().incNumKeyRenameFails();
-      LOG.error(
-          "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-              + "Key: {} not found.", volumeName, bucketName, fromKeyName,
-          toKeyName, fromKeyName);
-      return omClientResponse;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
deleted file mode 100644
index 8e1e760..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ /dev/null
@@ -1,536 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.PrefixManager;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
-    .EncryptedKeyVersion;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ScmClient;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .VOLUME_NOT_FOUND;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey;
-import static org.apache.hadoop.util.Time.monotonicNow;
-
-/**
- * Interface for key write requests.
- */
-public abstract class OMKeyRequest extends OMClientRequest {
-
-  private static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class);
-
-  public OMKeyRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  /**
-   * This methods avoids multiple rpc calls to SCM by allocating multiple blocks
-   * in one rpc call.
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  protected List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient,
-      OzoneBlockTokenSecretManager secretManager,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor,
-      ExcludeList excludeList, long requestedSize, long scmBlockSize,
-      int preallocateBlocksMax, boolean grpcBlockTokenEnabled, String omID)
-      throws IOException {
-
-    int numBlocks = Math.min((int) ((requestedSize - 1) / scmBlockSize + 1),
-        preallocateBlocksMax);
-
-    List<OmKeyLocationInfo> locationInfos = new ArrayList<>(numBlocks);
-    String remoteUser = getRemoteUser().getShortUserName();
-    List<AllocatedBlock> allocatedBlocks;
-    try {
-      allocatedBlocks = scmClient.getBlockClient()
-          .allocateBlock(scmBlockSize, numBlocks, replicationType,
-              replicationFactor, omID, excludeList);
-    } catch (SCMException ex) {
-      if (ex.getResult()
-          .equals(SCMException.ResultCodes.SAFE_MODE_EXCEPTION)) {
-        throw new OMException(ex.getMessage(),
-            OMException.ResultCodes.SCM_IN_SAFE_MODE);
-      }
-      throw ex;
-    }
-    for (AllocatedBlock allocatedBlock : allocatedBlocks) {
-      OmKeyLocationInfo.Builder builder = new OmKeyLocationInfo.Builder()
-          .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-          .setLength(scmBlockSize)
-          .setOffset(0)
-          .setPipeline(allocatedBlock.getPipeline());
-      if (grpcBlockTokenEnabled) {
-        builder.setToken(secretManager
-            .generateToken(remoteUser, allocatedBlock.getBlockID().toString(),
-                getAclForUser(remoteUser), scmBlockSize));
-      }
-      locationInfos.add(builder.build());
-    }
-    return locationInfos;
-  }
-
-  /* Optimize ugi lookup for RPC operations to avoid a trip through
-   * UGI.getCurrentUser which is synch'ed.
-   */
-  private UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
-  /**
-   * Return acl for user.
-   * @param user
-   *
-   * */
-  private EnumSet< HddsProtos.BlockTokenSecretProto.AccessModeProto>
-      getAclForUser(String user) {
-    // TODO: Return correct acl for user.
-    return EnumSet.allOf(
-        HddsProtos.BlockTokenSecretProto.AccessModeProto.class);
-  }
-
-  /**
-   * Validate bucket and volume exists or not.
-   * @param omMetadataManager
-   * @param volumeName
-   * @param bucketName
-   * @throws IOException
-   */
-  public void validateBucketAndVolume(OMMetadataManager omMetadataManager,
-      String volumeName, String bucketName)
-      throws IOException {
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-    // Check if bucket exists
-    if (!omMetadataManager.getBucketTable().isExist(bucketKey)) {
-      String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-      // If the volume also does not exist, we should throw volume not found
-      // exception
-      if (!omMetadataManager.getVolumeTable().isExist(volumeKey)) {
-        throw new OMException("Volume not found " + volumeName,
-            VOLUME_NOT_FOUND);
-      }
-
-      // if the volume exists but bucket does not exist, throw bucket not found
-      // exception
-      throw new OMException("Bucket not found " + bucketName, BUCKET_NOT_FOUND);
-    }
-  }
-
-  protected Optional<FileEncryptionInfo> getFileEncryptionInfo(
-      OzoneManager ozoneManager, OmBucketInfo bucketInfo) throws IOException {
-    Optional<FileEncryptionInfo> encInfo = Optional.absent();
-    BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo();
-    if (ezInfo != null) {
-      if (ozoneManager.getKmsProvider() == null) {
-        throw new OMException("Invalid KMS provider, check configuration " +
-            CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
-            OMException.ResultCodes.INVALID_KMS_PROVIDER);
-      }
-
-      final String ezKeyName = ezInfo.getKeyName();
-      EncryptedKeyVersion edek = generateEDEK(ozoneManager, ezKeyName);
-      encInfo = Optional.of(new FileEncryptionInfo(ezInfo.getSuite(),
-        ezInfo.getVersion(),
-          edek.getEncryptedKeyVersion().getMaterial(),
-          edek.getEncryptedKeyIv(), ezKeyName,
-          edek.getEncryptionKeyVersionName()));
-    }
-    return encInfo;
-  }
-
-  private EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager,
-      String ezKeyName) throws IOException {
-    if (ezKeyName == null) {
-      return null;
-    }
-    long generateEDEKStartTime = monotonicNow();
-    EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
-        new PrivilegedExceptionAction<EncryptedKeyVersion >() {
-          @Override
-          public EncryptedKeyVersion run() throws IOException {
-            try {
-              return ozoneManager.getKmsProvider()
-                  .generateEncryptedKey(ezKeyName);
-            } catch (GeneralSecurityException e) {
-              throw new IOException(e);
-            }
-          }
-        });
-    long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
-    LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
-    Preconditions.checkNotNull(edek);
-    return edek;
-  }
-
-  /**
-   * Prepare the response returned to the client.
-   * @return OMClientResponse
-   */
-  @SuppressWarnings("parameternumber")
-  protected OMClientResponse prepareCreateKeyResponse(@Nonnull KeyArgs keyArgs,
-      OmKeyInfo omKeyInfo, @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encryptionInfo, @Nullable IOException exception,
-      long clientID, long transactionLogIndex, @Nonnull String volumeName,
-      @Nonnull String bucketName, @Nonnull String keyName,
-      @Nonnull OzoneManager ozoneManager, @Nonnull OMAction omAction,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo) {
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setStatus(OzoneManagerProtocolProtos.Status.OK);
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-
-    OMClientResponse omClientResponse = null;
-    if (exception == null) {
-      if (omKeyInfo == null) {
-        // the key does not exist, create a new object, the new blocks are the
-        // version 0
-        omKeyInfo = createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
-            keyArgs.getType(), keyArgs.getDataSize(),
-            encryptionInfo, prefixManager, omBucketInfo);
-      }
-
-      long openVersion = omKeyInfo.getLatestVersionLocations().getVersion();
-
-      // Append blocks
-      try {
-        omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream()
-            .map(OmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()), false);
-
-      } catch (IOException ex) {
-        exception = ex;
-      }
-
-      if (exception != null) {
-        LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
-            omAction.getAction(), keyName, bucketName, volumeName, exception);
-        omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
-            omAction, exception, omResponse);
-      } else {
-        String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName,
-            bucketName, keyName, clientID);
-
-        // Add to cache entry can be done outside of lock for this openKey.
-        // Even if bucket gets deleted, when commitKey we shall identify if
-        // bucket gets deleted.
-        omMetadataManager.getOpenKeyTable().addCacheEntry(
-            new CacheKey<>(dbOpenKeyName),
-            new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
-
-        LOG.debug("{} for Key: {} in volume/bucket: {}/{}",
-            omAction.getAction(), keyName, volumeName, bucketName);
-
-
-        if (omAction == OMAction.CREATE_FILE) {
-          omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
-                  .setKeyInfo(omKeyInfo.getProtobuf())
-                  .setID(clientID)
-                  .setOpenVersion(openVersion).build());
-          omResponse.setCmdType(CreateFile);
-          omClientResponse = new OMFileCreateResponse(omKeyInfo, clientID,
-              omResponse.build());
-        } else {
-          omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
-              .setKeyInfo(omKeyInfo.getProtobuf())
-              .setID(clientID).setOpenVersion(openVersion)
-              .build());
-          omResponse.setCmdType(CreateKey);
-          omClientResponse = new OMKeyCreateResponse(omKeyInfo, clientID,
-              omResponse.build());
-        }
-      }
-
-    } else {
-      LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
-          omAction.getAction(), keyName, volumeName, bucketName, exception);
-      omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
-          omAction, exception, omResponse);
-    }
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(omAction,
-        auditMap, exception, getOmRequest().getUserInfo()));
-    return omClientResponse;
-  }
-
-  /**
-   * Create OmKeyInfo object.
-   * @return OmKeyInfo
-   */
-  @SuppressWarnings("parameterNumber")
-  protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nonnull HddsProtos.ReplicationFactor factor,
-      @Nonnull HddsProtos.ReplicationType type, long size,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(size)
-        .setReplicationType(type)
-        .setReplicationFactor(factor)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .build();
-  }
-
-  private List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
-      OmBucketInfo bucketInfo, PrefixManager prefixManager) {
-    List<OzoneAcl> acls = new ArrayList<>();
-
-    if(keyArgs.getAclsList() != null) {
-      acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
-    }
-
-    // Inherit DEFAULT acls from prefix.
-    if(prefixManager != null) {
-      List< OmPrefixInfo > prefixList = prefixManager.getLongestPrefixPath(
-          OZONE_URI_DELIMITER +
-              keyArgs.getVolumeName() + OZONE_URI_DELIMITER +
-              keyArgs.getBucketName() + OZONE_URI_DELIMITER +
-              keyArgs.getKeyName());
-
-      if(prefixList.size() > 0) {
-        // Add all acls from direct parent to key.
-        OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1);
-        if(prefixInfo  != null) {
-          if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) {
-            return acls;
-          }
-        }
-      }
-    }
-
-    // Inherit DEFAULT acls from bucket only if DEFAULT acls for
-    // prefix are not set.
-    if (bucketInfo != null) {
-      if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) {
-        return acls;
-      }
-    }
-
-    return acls;
-  }
-
-  /**
-   * Prepare OmKeyInfo which will be persisted to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  protected OmKeyInfo prepareKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs keyArgs, @Nonnull String dbKeyName, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo)
-      throws IOException {
-    OmKeyInfo keyInfo = null;
-    if (keyArgs.getIsMultipartKey()) {
-      keyInfo = prepareMultipartKeyInfo(omMetadataManager, keyArgs, size,
-          locations, encInfo, prefixManager, omBucketInfo);
-      //TODO args.getMetadata
-    } else if (omMetadataManager.getKeyTable().isExist(dbKeyName)) {
-      // TODO: Need to be fixed, as when key already exists, we are
-      //  appending new blocks to existing key.
-      keyInfo = omMetadataManager.getKeyTable().get(dbKeyName);
-      // the key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      keyInfo.addNewVersion(locations, false);
-      keyInfo.setDataSize(size + keyInfo.getDataSize());
-      // The modification time is set in preExecute, use the same as
-      // modification time when key already exists.
-      keyInfo.setModificationTime(keyArgs.getModificationTime());
-    }
-    return keyInfo;
-  }
-
-  /**
-   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
-   * to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  private OmKeyInfo prepareMultipartKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs args, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo) throws IOException {
-    HddsProtos.ReplicationFactor factor;
-    HddsProtos.ReplicationType type;
-
-    Preconditions.checkArgument(args.getMultipartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      factor = partKeyInfo.getFactor();
-      type = partKeyInfo.getType();
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, factor, type, size, encInfo,
-        prefixManager, omBucketInfo);
-  }
-
-
-  private OMClientResponse createKeyErrorResponse(@Nonnull OMMetrics omMetrics,
-      @Nonnull OMAction omAction, @Nonnull IOException exception,
-      @Nonnull OMResponse.Builder omResponse) {
-    if (omAction == OMAction.CREATE_FILE) {
-      omMetrics.incNumCreateFileFails();
-      omResponse.setCmdType(CreateFile);
-      return new OMFileCreateResponse(null, -1L,
-          createErrorOMResponse(omResponse, exception));
-    } else {
-      omMetrics.incNumKeyAllocateFails();
-      omResponse.setCmdType(CreateKey);
-      return new OMKeyCreateResponse(null, -1L,
-          createErrorOMResponse(omResponse, exception));
-    }
-  }
-
-  /**
-   * Check Acls for the ozone bucket.
-   * @param ozoneManager
-   * @param volume
-   * @param bucket
-   * @param key
-   * @throws IOException
-   */
-  protected void checkBucketAcls(OzoneManager ozoneManager, String volume,
-      String bucket, String key) throws IOException {
-    if (ozoneManager.getAclsEnabled()) {
-      checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
-          OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
-          volume, bucket, key);
-    }
-  }
-
-
-  /**
-   * Check Acls for the ozone key.
-   * @param ozoneManager
-   * @param volume
-   * @param bucket
-   * @param key
-   * @throws IOException
-   */
-  protected void checkKeyAcls(OzoneManager ozoneManager, String volume,
-      String bucket, String key) throws IOException {
-    if (ozoneManager.getAclsEnabled()) {
-      checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
-          OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
-          volume, bucket, key);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
deleted file mode 100644
index d1fac4f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl;
-
-import java.io.IOException;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.request.util.ObjectParser;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Base class for Bucket acl request.
- */
-public abstract class OMKeyAclRequest extends OMClientRequest {
-
-
-  public OMKeyAclRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    OmKeyInfo omKeyInfo = null;
-
-    OMResponse.Builder omResponse = onInit();
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean lockAcquired = false;
-    String volume = null;
-    String bucket = null;
-    String key = null;
-    boolean operationResult = false;
-    try {
-      ObjectParser objectParser = new ObjectParser(getPath(),
-          ObjectType.KEY);
-
-      volume = objectParser.getVolume();
-      bucket = objectParser.getBucket();
-      key = objectParser.getKey();
-
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
-            volume, bucket, key);
-      }
-      lockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume,
-              bucket);
-
-      String dbKey = omMetadataManager.getOzoneKey(volume, bucket, key);
-      omKeyInfo = omMetadataManager.getKeyTable().get(dbKey);
-
-      if (omKeyInfo == null) {
-        throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND);
-      }
-
-      operationResult = apply(omKeyInfo);
-
-      if (operationResult) {
-        // update cache.
-        omMetadataManager.getKeyTable().addCacheEntry(
-            new CacheKey<>(dbKey),
-            new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
-      }
-
-      omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult);
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = onFailure(omResponse, ex);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (lockAcquired) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume,
-            bucket);
-      }
-    }
-
-
-    onComplete(operationResult, exception);
-
-    return omClientResponse;
-  }
-
-  /**
-   * Get the path name from the request.
-   * @return path name
-   */
-  abstract String getPath();
-
-  // TODO: Finer grain metrics can be moved to these callbacks. They can also
-  // be abstracted into separate interfaces in future.
-  /**
-   * Get the initial om response builder with lock.
-   * @return om response builder.
-   */
-  abstract OMResponse.Builder onInit();
-
-  /**
-   * Get the om client response on success case with lock.
-   * @param omResponse
-   * @param omKeyInfo
-   * @param operationResult
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onSuccess(
-      OMResponse.Builder omResponse, OmKeyInfo omKeyInfo,
-      boolean operationResult);
-
-  /**
-   * Get the om client response on failure case with lock.
-   * @param omResponse
-   * @param exception
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception);
-
-  /**
-   * Completion hook for final processing before return without lock.
-   * Usually used for logging without lock and metric update.
-   * @param operationResult
-   * @param exception
-   */
-  abstract void onComplete(boolean operationResult, IOException exception);
-
-  /**
-   * Apply the acl operation, if successfully completed returns true,
-   * else false.
-   * @param omKeyInfo
-   */
-  abstract boolean apply(OmKeyInfo omKeyInfo);
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
deleted file mode 100644
index 8d69a24..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-
-/**
- * Handle add Acl request for bucket.
- */
-public class OMKeyAddAclRequest extends OMKeyAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyAddAclRequest.class);
-
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMKeyAddAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
-        getOmRequest().getAddAclRequest();
-    path = addAclRequest.getObj().getPath();
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.AddAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmKeyInfo omKeyInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setAddAclResponse(AddAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMKeyAclResponse(omKeyInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMKeyAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception) {
-    if (operationResult) {
-      LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, path);
-    } else {
-      if (exception == null) {
-        LOG.debug("Add acl {} to path {} failed, because acl already exist",
-            ozoneAcls, path);
-      } else {
-        LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception);
-      }
-    }
-  }
-
-  @Override
-  boolean apply(OmKeyInfo omKeyInfo) {
-    // No need to check not null here, this will be never called with null.
-    return omKeyInfo.addAcl(ozoneAcls.get(0));
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
deleted file mode 100644
index 0bd81d3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse;
-
-/**
- * Handle add Acl request for bucket.
- */
-public class OMKeyRemoveAclRequest extends OMKeyAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyAddAclRequest.class);
-
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMKeyRemoveAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
-        getOmRequest().getRemoveAclRequest();
-    path = removeAclRequest.getObj().getPath();
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmKeyInfo omKeyInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMKeyAclResponse(omKeyInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMKeyAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception) {
-    if (operationResult) {
-      LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, path);
-    } else {
-      if (exception == null) {
-        LOG.debug("Remove acl {} to path {} failed, because acl already exist",
-            ozoneAcls, path);
-      } else {
-        LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path,
-            exception);
-      }
-    }
-  }
-
-  @Override
-  boolean apply(OmKeyInfo omKeyInfo) {
-    // No need to check not null here, this will be never called with null.
-    return omKeyInfo.removeAcl(ozoneAcls.get(0));
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
deleted file mode 100644
index 24d46f8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse;
-
-/**
- * Handle add Acl request for bucket.
- */
-public class OMKeySetAclRequest extends OMKeyAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyAddAclRequest.class);
-
-  private String path;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMKeySetAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
-        getOmRequest().getSetAclRequest();
-    path = setAclRequest.getObj().getPath();
-    ozoneAcls = Lists.newArrayList(
-        OzoneAclUtil.fromProtobuf(setAclRequest.getAclList()));
-  }
-
-  @Override
-  String getPath() {
-    return path;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.SetAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmKeyInfo omKeyInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setSetAclResponse(SetAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMKeyAclResponse(omKeyInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMKeyAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception) {
-    if (operationResult) {
-      LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path);
-    } else {
-      if (exception == null) {
-        LOG.debug("Set acl {} to path {} failed!", ozoneAcls, path);
-      } else {
-        LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception);
-      }
-    }
-  }
-
-  @Override
-  boolean apply(OmKeyInfo omKeyInfo) {
-    // No need to check not null here, this will be never called with null.
-    return omKeyInfo.setAcls(ozoneAcls);
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java
deleted file mode 100644
index c532519..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to acl requests for keys.
- */
-package org.apache.hadoop.ozone.om.request.key.acl;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
deleted file mode 100644
index 3b30e4a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
-
-import java.io.IOException;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK;
-
-/**
- * Base class for Prefix acl request.
- */
-public abstract class OMPrefixAclRequest extends OMClientRequest {
-
-  public OMPrefixAclRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-
-    OmPrefixInfo omPrefixInfo = null;
-
-    OMResponse.Builder omResponse = onInit();
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean lockAcquired = false;
-    String volume = null;
-    String bucket = null;
-    String key = null;
-    OMPrefixAclOpResult operationResult = null;
-    boolean result = false;
-
-    PrefixManagerImpl prefixManager =
-        (PrefixManagerImpl) ozoneManager.getPrefixManager();
-    try {
-      String prefixPath = getOzoneObj().getPath();
-
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.PREFIX,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
-            volume, bucket, key);
-      }
-
-      lockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(PREFIX_LOCK, prefixPath);
-
-      omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath);
-
-      try {
-        operationResult = apply(prefixManager, omPrefixInfo);
-      } catch (IOException ex) {
-        // In HA case this will never happen.
-        // As in add/remove/setAcl method we have logic to update database,
-        // that can throw exception. But in HA case we shall not update DB.
-        // The code in prefixManagerImpl is being done, because update
-        // in-memory should be done after DB update for Non-HA code path.
-        operationResult = new OMPrefixAclOpResult(null, false);
-      }
-
-      if (operationResult.isOperationsResult()) {
-        // As for remove acl list, for a prefix if after removing acl from
-        // the existing acl list, if list size becomes zero, delete the
-        // prefix from prefix table.
-        if (getOmRequest().hasRemoveAclRequest() &&
-            operationResult.getOmPrefixInfo().getAcls().size() == 0) {
-          omMetadataManager.getPrefixTable().addCacheEntry(
-              new CacheKey<>(prefixPath),
-              new CacheValue<>(Optional.absent(), transactionLogIndex));
-        } else {
-          // update cache.
-          omMetadataManager.getPrefixTable().addCacheEntry(
-              new CacheKey<>(prefixPath),
-              new CacheValue<>(Optional.of(operationResult.getOmPrefixInfo()),
-                  transactionLogIndex));
-        }
-      }
-
-      result  = operationResult.isOperationsResult();
-      omClientResponse = onSuccess(omResponse,
-          operationResult.getOmPrefixInfo(), result);
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = onFailure(omResponse, ex);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (lockAcquired) {
-        omMetadataManager.getLock().releaseWriteLock(PREFIX_LOCK,
-            getOzoneObj().getPath());
-      }
-    }
-
-    onComplete(result, exception, ozoneManager.getMetrics());
-
-    return omClientResponse;
-  }
-
-  /**
-   * Get the path name from the request.
-   * @return path name
-   */
-  abstract OzoneObj getOzoneObj();
-
-  // TODO: Finer grain metrics can be moved to these callbacks. They can also
-  // be abstracted into separate interfaces in future.
-  /**
-   * Get the initial om response builder with lock.
-   * @return om response builder.
-   */
-  abstract OMResponse.Builder onInit();
-
-  /**
-   * Get the om client response on success case with lock.
-   * @param omResponse
-   * @param omPrefixInfo
-   * @param operationResult
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onSuccess(
-      OMResponse.Builder omResponse, OmPrefixInfo omPrefixInfo,
-      boolean operationResult);
-
-  /**
-   * Get the om client response on failure case with lock.
-   * @param omResponse
-   * @param exception
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception);
-
-  /**
-   * Completion hook for final processing before return without lock.
-   * Usually used for logging without lock and metric update.
-   * @param operationResult
-   * @param exception
-   * @param omMetrics
-   */
-  abstract void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics);
-
-  /**
-   * Apply the acl operation, if successfully completed returns true,
-   * else false.
-   * @param prefixManager
-   * @param omPrefixInfo
-   * @throws IOException
-   */
-  abstract OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager,
-      OmPrefixInfo omPrefixInfo) throws IOException;
-
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
deleted file mode 100644
index 086190a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AddAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-/**
- * Handle add Acl request for prefix.
- */
-public class OMPrefixAddAclRequest extends OMPrefixAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMPrefixAddAclRequest.class);
-
-  private OzoneObj ozoneObj;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMPrefixAddAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
-        getOmRequest().getAddAclRequest();
-    // TODO: conversion of OzoneObj to protobuf can be avoided when we have
-    //  single code path for HA and Non-HA
-    ozoneObj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj());
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
-  }
-
-  @Override
-  OzoneObj getOzoneObj() {
-    return ozoneObj;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.AddAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmPrefixInfo omPrefixInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setAddAclResponse(AddAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMPrefixAclResponse(omPrefixInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMPrefixAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      LOG.debug("Add acl: {} to path: {} success!", ozoneAcls,
-          ozoneObj.getPath());
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.debug("Add acl {} to path {} failed, because acl already exist",
-            ozoneAcls, ozoneObj.getPath());
-      } else {
-        LOG.error("Add acl {} to path {} failed!", ozoneAcls,
-            ozoneObj.getPath(), exception);
-      }
-    }
-  }
-
-  @Override
-  OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager,
-      OmPrefixInfo omPrefixInfo) throws IOException {
-    return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo);
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
deleted file mode 100644
index 32d9b22..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse;
-
-/**
- * Handle add Acl request for prefix.
- */
-public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMPrefixAddAclRequest.class);
-
-  private OzoneObj ozoneObj;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMPrefixRemoveAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
-        getOmRequest().getRemoveAclRequest();
-    // TODO: conversion of OzoneObj to protobuf can be avoided when we have
-    //  single code path for HA and Non-HA
-    ozoneObj = OzoneObjInfo.fromProtobuf(removeAclRequest.getObj());
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
-  }
-
-  @Override
-  OzoneObj getOzoneObj() {
-    return ozoneObj;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmPrefixInfo omPrefixInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMPrefixAclResponse(omPrefixInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMPrefixAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls,
-          ozoneObj.getPath());
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.debug("Remove acl {} to path {} failed, because acl does not exist",
-            ozoneAcls, ozoneObj.getPath());
-      } else {
-        LOG.error("Remove acl {} to path {} failed!", ozoneAcls,
-            ozoneObj.getPath(), exception);
-      }
-    }
-  }
-
-  @Override
-  OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager,
-      OmPrefixInfo omPrefixInfo) throws IOException {
-    return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo);
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
deleted file mode 100644
index 563d76e7..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl;
-import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse;
-
-/**
- * Handle add Acl request for prefix.
- */
-public class OMPrefixSetAclRequest extends OMPrefixAclRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMPrefixAddAclRequest.class);
-
-  private OzoneObj ozoneObj;
-  private List<OzoneAcl> ozoneAcls;
-
-  public OMPrefixSetAclRequest(OMRequest omRequest) {
-    super(omRequest);
-    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
-        getOmRequest().getSetAclRequest();
-    // TODO: conversion of OzoneObj to protobuf can be avoided when we have
-    //  single code path for HA and Non-HA
-    ozoneObj = OzoneObjInfo.fromProtobuf(setAclRequest.getObj());
-    ozoneAcls = new ArrayList<>();
-    setAclRequest.getAclList().forEach(aclInfo ->
-        ozoneAcls.add(OzoneAcl.fromProtobuf(aclInfo)));
-  }
-
-  @Override
-  OzoneObj getOzoneObj() {
-    return ozoneObj;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.SetAcl).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmPrefixInfo omPrefixInfo, boolean operationResult) {
-    omResponse.setSuccess(operationResult);
-    omResponse.setSetAclResponse(SetAclResponse.newBuilder()
-        .setResponse(operationResult));
-    return new OMPrefixAclResponse(omPrefixInfo,
-        omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException exception) {
-    return new OMPrefixAclResponse(null,
-        createErrorOMResponse(omResponse, exception));
-  }
-
-  @Override
-  void onComplete(boolean operationResult, IOException exception,
-      OMMetrics omMetrics) {
-    if (operationResult) {
-      LOG.debug("Set acl: {} to path: {} success!", ozoneAcls,
-          ozoneObj.getPath());
-    } else {
-      omMetrics.incNumBucketUpdateFails();
-      if (exception == null) {
-        LOG.debug("Set acl {} to path {} failed", ozoneAcls,
-            ozoneObj.getPath());
-      } else {
-        LOG.error("Set acl {} to path {} failed!", ozoneAcls,
-            ozoneObj.getPath(), exception);
-      }
-    }
-  }
-
-  @Override
-  OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager,
-      OmPrefixInfo omPrefixInfo) throws IOException {
-    return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo);
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java
deleted file mode 100644
index 0a027cc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to acl requests for prefix.
- */
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java
deleted file mode 100644
index af20fe1..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to key requests.
- */
-package org.apache.hadoop.ozone.om.request.key;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java
deleted file mode 100644
index ee324cf..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains classes for handling OMRequests.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java
deleted file mode 100644
index f3a352a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.bucket;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .S3CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .S3CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .S3CreateVolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_VOLUME_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MAX_LENGTH;
-import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MIN_LENGTH;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-/**
- * Handles S3 Bucket create request.
- */
-public class S3BucketCreateRequest extends OMVolumeRequest {
-
-  private static final String S3_ADMIN_NAME = "OzoneS3Manager";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3CreateBucketRequest.class);
-
-  public S3BucketCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    S3CreateBucketRequest s3CreateBucketRequest =
-        getOmRequest().getCreateS3BucketRequest();
-    Preconditions.checkNotNull(s3CreateBucketRequest);
-
-    S3CreateBucketRequest.Builder newS3CreateBucketRequest =
-        s3CreateBucketRequest.toBuilder().setS3CreateVolumeInfo(
-            S3CreateVolumeInfo.newBuilder().setCreationTime(Time.now()));
-
-    // TODO: Do we need to enforce the bucket rules in this code path?
-    // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
-
-    // For now only checked the length.
-    int bucketLength = s3CreateBucketRequest.getS3Bucketname().length();
-    if (bucketLength < S3_BUCKET_MIN_LENGTH ||
-        bucketLength >= S3_BUCKET_MAX_LENGTH) {
-      throw new OMException("S3BucketName must be at least 3 and not more " +
-          "than 63 characters long",
-          OMException.ResultCodes.S3_BUCKET_INVALID_LENGTH);
-    }
-
-    return getOmRequest().toBuilder()
-        .setCreateS3BucketRequest(newS3CreateBucketRequest)
-        .setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    S3CreateBucketRequest s3CreateBucketRequest =
-        getOmRequest().getCreateS3BucketRequest();
-
-    String userName = s3CreateBucketRequest.getUserName();
-    String s3BucketName = s3CreateBucketRequest.getS3Bucketname();
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.CreateS3Bucket).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumS3BucketCreates();
-
-    // When s3 Bucket is created, we internally create ozone volume/ozone
-    // bucket.
-
-    // ozone volume name is generated from userName by calling
-    // formatOzoneVolumeName.
-
-    // ozone bucket name is same as s3 bucket name.
-    // In S3 buckets are unique, so we create a mapping like s3BucketName ->
-    // ozoneVolume/ozoneBucket and add it to s3 mapping table. If
-    // s3BucketName exists in mapping table, bucket already exist or we go
-    // ahead and create a bucket.
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    IOException exception = null;
-
-    boolean volumeCreated = false;
-    boolean acquiredVolumeLock = false;
-    boolean acquiredUserLock = false;
-    boolean acquiredS3Lock = false;
-    String volumeName = formatOzoneVolumeName(userName);
-    OMClientResponse omClientResponse = null;
-    try {
-
-      // TODO to support S3 ACL later.
-      acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock(
-          S3_BUCKET_LOCK, s3BucketName);
-
-      // First check if this s3Bucket exists
-      if (omMetadataManager.getS3Table().isExist(s3BucketName)) {
-        throw new OMException("S3Bucket " + s3BucketName + " already exists",
-            OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS);
-      }
-
-      OMVolumeCreateResponse omVolumeCreateResponse = null;
-      try {
-        acquiredVolumeLock =
-            omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK,
-                volumeName);
-        acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(
-            USER_LOCK, userName);
-        // Check if volume exists, if it does not exist create
-        // ozone volume.
-        String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-        if (!omMetadataManager.getVolumeTable().isExist(volumeKey)) {
-          OmVolumeArgs omVolumeArgs = createOmVolumeArgs(volumeName, userName,
-              s3CreateBucketRequest.getS3CreateVolumeInfo()
-                  .getCreationTime());
-          UserVolumeInfo volumeList = omMetadataManager.getUserTable().get(
-              omMetadataManager.getUserKey(userName));
-          volumeList = addVolumeToOwnerList(volumeList,
-              volumeName, userName, ozoneManager.getMaxUserVolumeCount(),
-              transactionLogIndex);
-          createVolume(omMetadataManager, omVolumeArgs, volumeList, volumeKey,
-              omMetadataManager.getUserKey(userName), transactionLogIndex);
-          volumeCreated = true;
-          omVolumeCreateResponse = new OMVolumeCreateResponse(omVolumeArgs,
-              volumeList, omResponse.build());
-        }
-      } finally {
-        if (acquiredUserLock) {
-          omMetadataManager.getLock().releaseWriteLock(USER_LOCK, userName);
-        }
-        if (acquiredVolumeLock) {
-          omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volumeName);
-        }
-      }
-
-      // check if ozone bucket exists, if it does not exist create ozone
-      // bucket
-      OmBucketInfo omBucketInfo = createBucket(omMetadataManager, volumeName,
-          s3BucketName, userName,
-          s3CreateBucketRequest.getS3CreateVolumeInfo().getCreationTime(),
-          transactionLogIndex);
-
-      // Now finally add it to s3 table cache.
-      omMetadataManager.getS3Table().addCacheEntry(
-          new CacheKey<>(s3BucketName), new CacheValue<>(
-              Optional.of(formatS3MappingName(volumeName, s3BucketName)),
-              transactionLogIndex));
-
-      OMBucketCreateResponse omBucketCreateResponse =
-          new OMBucketCreateResponse(omBucketInfo, omResponse.build());
-
-      omClientResponse = new S3BucketCreateResponse(omVolumeCreateResponse,
-          omBucketCreateResponse, s3BucketName,
-          formatS3MappingName(volumeName, s3BucketName),
-          omResponse.setCreateS3BucketResponse(
-              S3CreateBucketResponse.newBuilder()).build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3BucketCreateResponse(null, null, null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredS3Lock) {
-        omMetadataManager.getLock().releaseWriteLock(
-            S3_BUCKET_LOCK, s3BucketName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(ozoneManager.getAuditLogger(),
-        buildAuditMessage(OMAction.CREATE_S3_BUCKET,
-            buildAuditMap(userName, s3BucketName), exception,
-            getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("S3Bucket is successfully created for userName: {}, " +
-          "s3BucketName {}, volumeName {}", userName, s3BucketName, volumeName);
-      OMVolumeCreateResponse omVolumeCreateResponse = null;
-      if (volumeCreated) {
-        omMetrics.incNumVolumes();
-      }
-      omMetrics.incNumBuckets();
-      omMetrics.incNumS3Buckets();
-
-      return omClientResponse;
-    } else {
-      LOG.error("S3Bucket Creation Failed for userName: {}, s3BucketName {}, " +
-          "VolumeName {}", userName, s3BucketName, volumeName);
-      omMetrics.incNumS3BucketCreateFails();
-      return omClientResponse;
-    }
-  }
-
-
-  private OmBucketInfo createBucket(OMMetadataManager omMetadataManager,
-      String volumeName, String s3BucketName, String userName,
-      long creationTime, long transactionLogIndex) throws IOException {
-    // check if ozone bucket exists, if it does not exist create ozone
-    // bucket
-    boolean acquireBucketLock = false;
-    OmBucketInfo omBucketInfo = null;
-    try {
-      acquireBucketLock =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
-              s3BucketName);
-      String bucketKey = omMetadataManager.getBucketKey(volumeName,
-          s3BucketName);
-      if (!omMetadataManager.getBucketTable().isExist(bucketKey)) {
-        omBucketInfo = createOmBucketInfo(volumeName, s3BucketName, userName,
-            creationTime);
-        // Add to bucket table cache.
-        omMetadataManager.getBucketTable().addCacheEntry(
-            new CacheKey<>(bucketKey),
-            new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
-      } else {
-        // This can happen when a ozone bucket exists already in the
-        // volume, but this is not a s3 bucket.
-        throw new OMException("Bucket " + s3BucketName + " already exists",
-            OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
-      }
-    } finally {
-      if (acquireBucketLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            s3BucketName);
-      }
-    }
-    return omBucketInfo;
-  }
-
-  /**
-   * Generate Ozone volume name from userName.
-   * @param userName
-   * @return volume name
-   */
-  @VisibleForTesting
-  public static String formatOzoneVolumeName(String userName) {
-    return String.format(OM_S3_VOLUME_PREFIX + "%s", userName);
-  }
-
-  /**
-   * Generate S3Mapping for provided volume and bucket. This information will
-   * be persisted in s3 table in OM DB.
-   * @param volumeName
-   * @param bucketName
-   * @return s3Mapping
-   */
-  @VisibleForTesting
-  public static String formatS3MappingName(String volumeName,
-      String bucketName) {
-    return String.format("%s" + OzoneConsts.OM_KEY_PREFIX + "%s", volumeName,
-        bucketName);
-  }
-
-  /**
-   * Create {@link OmVolumeArgs} which needs to be persisted in volume table
-   * in OM DB.
-   * @param volumeName
-   * @param userName
-   * @param creationTime
-   * @return {@link OmVolumeArgs}
-   */
-  private OmVolumeArgs createOmVolumeArgs(String volumeName, String userName,
-      long creationTime) throws IOException {
-    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder()
-        .setAdminName(S3_ADMIN_NAME).setVolume(volumeName)
-        .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES)
-        .setOwnerName(userName)
-        .setCreationTime(creationTime);
-
-    // Set default acls.
-    for (OzoneAcl acl : getDefaultAcls(userName)) {
-      builder.addOzoneAcls(OzoneAcl.toProtobuf(acl));
-    }
-
-    return builder.build();
-  }
-
-  /**
-   * Create {@link OmBucketInfo} which needs to be persisted in to bucket table
-   * in OM DB.
-   * @param volumeName
-   * @param s3BucketName
-   * @param creationTime
-   * @return {@link OmBucketInfo}
-   */
-  private OmBucketInfo createOmBucketInfo(String volumeName,
-      String s3BucketName, String userName, long creationTime) {
-    //TODO: Now S3Bucket API takes only bucketName as param. In future if we
-    // support some configurable options we need to fix this.
-    OmBucketInfo.Builder builder =
-        OmBucketInfo.newBuilder().setVolumeName(volumeName)
-            .setBucketName(s3BucketName).setIsVersionEnabled(Boolean.FALSE)
-            .setStorageType(StorageType.DEFAULT).setCreationTime(creationTime);
-
-    // Set default acls.
-    builder.setAcls(getDefaultAcls(userName));
-
-    return builder.build();
-  }
-
-  /**
-   * Build auditMap.
-   * @param userName
-   * @param s3BucketName
-   * @return auditMap
-   */
-  private Map<String, String> buildAuditMap(String userName,
-      String s3BucketName) {
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put(userName, OzoneConsts.USERNAME);
-    auditMap.put(s3BucketName, OzoneConsts.S3_BUCKET);
-    return auditMap;
-  }
-
-  /**
-   * Get default acls.
-   * */
-  private List<OzoneAcl> getDefaultAcls(String userName) {
-    UserGroupInformation ugi = createUGI();
-    return OzoneAcl.parseAcls("user:" + (ugi == null ? userName :
-        ugi.getUserName()) + ":a,user:" + S3_ADMIN_NAME + ":a");
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java
deleted file mode 100644
index 5d5932f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.bucket;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketDeleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .S3DeleteBucketRequest;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MAX_LENGTH;
-import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MIN_LENGTH;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK;
-
-/**
- * Handle Create S3Bucket request.
- */
-public class S3BucketDeleteRequest extends OMVolumeRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3BucketDeleteRequest.class);
-
-  public S3BucketDeleteRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    S3DeleteBucketRequest s3DeleteBucketRequest =
-        getOmRequest().getDeleteS3BucketRequest();
-
-    // TODO: Do we need to enforce the bucket rules in this code path?
-    // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
-
-    // For now only checked the length.
-    int bucketLength = s3DeleteBucketRequest.getS3BucketName().length();
-    if (bucketLength < S3_BUCKET_MIN_LENGTH ||
-        bucketLength >= S3_BUCKET_MAX_LENGTH) {
-      throw new OMException("S3BucketName must be at least 3 and not more " +
-          "than 63 characters long",
-          OMException.ResultCodes.S3_BUCKET_INVALID_LENGTH);
-    }
-
-    return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    S3DeleteBucketRequest s3DeleteBucketRequest =
-        getOmRequest().getDeleteS3BucketRequest();
-
-    String s3BucketName = s3DeleteBucketRequest.getS3BucketName();
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumS3BucketDeletes();
-    IOException exception = null;
-    boolean acquiredS3Lock = false;
-    boolean acquiredBucketLock = false;
-    String volumeName = null;
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    OMClientResponse omClientResponse = null;
-    try {
-      // TODO to support S3 ACL later.
-      acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock(
-          S3_BUCKET_LOCK, s3BucketName);
-
-      String s3Mapping = omMetadataManager.getS3Table().get(s3BucketName);
-
-      if (s3Mapping == null) {
-        throw new OMException("S3Bucket " + s3BucketName + " not found",
-            OMException.ResultCodes.S3_BUCKET_NOT_FOUND);
-      } else {
-        volumeName = getOzoneVolumeName(s3Mapping);
-
-        acquiredBucketLock =
-            omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-                volumeName, s3BucketName);
-
-        String bucketKey = omMetadataManager.getBucketKey(volumeName,
-            s3BucketName);
-
-        // Update bucket table cache and s3 table cache.
-        omMetadataManager.getBucketTable().addCacheEntry(
-            new CacheKey<>(bucketKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-        omMetadataManager.getS3Table().addCacheEntry(
-            new CacheKey<>(s3BucketName),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-      }
-
-      omResponse.setDeleteS3BucketResponse(
-          OzoneManagerProtocolProtos.S3DeleteBucketResponse.newBuilder());
-
-      omClientResponse = new S3BucketDeleteResponse(s3BucketName, volumeName,
-          omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3BucketDeleteResponse(null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredBucketLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            s3BucketName);
-      }
-      if (acquiredS3Lock) {
-        omMetadataManager.getLock().releaseWriteLock(S3_BUCKET_LOCK,
-            s3BucketName);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(ozoneManager.getAuditLogger(),
-        buildAuditMessage(OMAction.DELETE_S3_BUCKET,
-            buildAuditMap(s3BucketName), exception,
-            getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      // Decrement s3 bucket and ozone bucket count. As S3 bucket is mapped to
-      // ozonevolume/ozone bucket.
-      LOG.debug("S3Bucket {} successfully deleted", s3BucketName);
-      omMetrics.decNumS3Buckets();
-      omMetrics.decNumBuckets();
-
-      return omClientResponse;
-    } else {
-      LOG.error("S3Bucket Deletion failed for S3Bucket:{}", s3BucketName,
-          exception);
-      omMetrics.incNumS3BucketDeleteFails();
-      return omClientResponse;
-    }
-  }
-
-  /**
-   * Extract volumeName from s3Mapping.
-   * @param s3Mapping
-   * @return volumeName
-   * @throws IOException
-   */
-  private String getOzoneVolumeName(String s3Mapping) throws IOException {
-    return s3Mapping.split("/")[0];
-  }
-
-  private Map<String, String> buildAuditMap(String s3BucketName) {
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put(s3BucketName, OzoneConsts.S3_BUCKET);
-    return auditMap;
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java
deleted file mode 100644
index 7296585..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to s3 bucket requests.
- */
-package org.apache.hadoop.ozone.om.request.s3.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
deleted file mode 100644
index df0e168..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.UniqueId;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handles initiate multipart upload request.
- */
-public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
-
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3InitiateMultipartUploadRequest.class);
-
-  public S3InitiateMultipartUploadRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) {
-    MultipartInfoInitiateRequest multipartInfoInitiateRequest =
-        getOmRequest().getInitiateMultiPartUploadRequest();
-    Preconditions.checkNotNull(multipartInfoInitiateRequest);
-
-    OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs =
-        multipartInfoInitiateRequest.getKeyArgs().toBuilder()
-            .setMultipartUploadID(UUID.randomUUID().toString() + "-" +
-                UniqueId.next()).setModificationTime(Time.now());
-
-    return getOmRequest().toBuilder()
-        .setUserInfo(getUserInfo())
-        .setInitiateMultiPartUploadRequest(
-            multipartInfoInitiateRequest.toBuilder().setKeyArgs(newKeyArgs))
-        .build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    MultipartInfoInitiateRequest multipartInfoInitiateRequest =
-        getOmRequest().getInitiateMultiPartUploadRequest();
-
-    OzoneManagerProtocolProtos.KeyArgs keyArgs =
-        multipartInfoInitiateRequest.getKeyArgs();
-
-    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
-    boolean acquiredBucketLock = false;
-    IOException exception = null;
-    OmMultipartKeyInfo multipartKeyInfo = null;
-    OmKeyInfo omKeyInfo = null;
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true);
-    OMClientResponse omClientResponse = null;
-    try {
-      // TODO to support S3 ACL later.
-      acquiredBucketLock =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
-              bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      // We are adding uploadId to key, because if multiple users try to
-      // perform multipart upload on the same key, each will try to upload, who
-      // ever finally commit the key, we see that key in ozone. Suppose if we
-      // don't add id, and use the same key /volume/bucket/key, when multiple
-      // users try to upload the key, we update the parts of the key's from
-      // multiple users to same key, and the key output can be a mix of the
-      // parts from multiple users.
-
-      // So on same key if multiple time multipart upload is initiated we
-      // store multiple entries in the openKey Table.
-      // Checked AWS S3, when we try to run multipart upload, each time a
-      // new uploadId is returned. And also even if a key exist when initiate
-      // multipart upload request is received, it returns multipart upload id
-      // for the key.
-
-      String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, keyArgs.getMultipartUploadID());
-
-      // Not checking if there is an already key for this in the keyTable, as
-      // during final complete multipart upload we take care of this. AWS S3
-      // behavior is also like this, even when key exists in a bucket, user
-      // can still initiate MPU.
-
-
-      multipartKeyInfo = new OmMultipartKeyInfo(
-          keyArgs.getMultipartUploadID(), new HashMap<>());
-
-      omKeyInfo = new OmKeyInfo.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setCreationTime(keyArgs.getModificationTime())
-          .setModificationTime(keyArgs.getModificationTime())
-          .setReplicationType(keyArgs.getType())
-          .setReplicationFactor(keyArgs.getFactor())
-          .setOmKeyLocationInfos(Collections.singletonList(
-              new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-          .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
-          .build();
-
-
-      // Add to cache
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
-          new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
-      omMetadataManager.getMultipartInfoTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
-          new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
-
-
-      omClientResponse =
-          new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo,
-          omResponse.setInitiateMultiPartUploadResponse(
-              MultipartInfoInitiateResponse.newBuilder()
-                  .setVolumeName(volumeName)
-                  .setBucketName(bucketName)
-                  .setKeyName(keyName)
-                  .setMultipartUploadID(keyArgs.getMultipartUploadID()))
-              .build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3InitiateMultipartUploadResponse(null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredBucketLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs),
-        exception, getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
-          "Volume/Bucket {}/{} is successfully completed", keyName,
-          volumeName, bucketName);
-
-      return omClientResponse;
-
-    } else {
-      ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
-      LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
-              "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
-          exception);
-      return omClientResponse;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
deleted file mode 100644
index b65328d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.io.IOException;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart
-    .S3MultipartUploadAbortResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartUploadAbortResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handles Abort of multipart upload request.
- */
-public class S3MultipartUploadAbortRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3MultipartUploadAbortRequest.class);
-
-  public S3MultipartUploadAbortRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    KeyArgs keyArgs =
-        getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs();
-
-    return getOmRequest().toBuilder().setAbortMultiPartUploadRequest(
-        getOmRequest().getAbortMultiPartUploadRequest().toBuilder()
-            .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())))
-        .setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    OzoneManagerProtocolProtos.KeyArgs keyArgs =
-        getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    ozoneManager.getMetrics().incNumAbortMultipartUploads();
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean acquiredLock = false;
-    IOException exception = null;
-    OmMultipartKeyInfo multipartKeyInfo = null;
-    String multipartKey = null;
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true);
-    OMClientResponse omClientResponse = null;
-    try {
-      // TODO to support S3 ACL later.
-      acquiredLock =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
-              bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      multipartKey = omMetadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, keyArgs.getMultipartUploadID());
-
-      OmKeyInfo omKeyInfo =
-          omMetadataManager.getOpenKeyTable().get(multipartKey);
-
-      // If there is no entry in openKeyTable, then there is no multipart
-      // upload initiated for this key.
-      if (omKeyInfo == null) {
-        throw new OMException("Abort Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      } else {
-        multipartKeyInfo = omMetadataManager
-            .getMultipartInfoTable().get(multipartKey);
-
-
-        // Update cache of openKeyTable and multipartInfo table.
-        // No need to add the cache entries to delete table, as the entries
-        // in delete table are not used by any read/write operations.
-        omMetadataManager.getOpenKeyTable().addCacheEntry(
-            new CacheKey<>(multipartKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-        omMetadataManager.getMultipartInfoTable().addCacheEntry(
-            new CacheKey<>(multipartKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-      }
-
-      omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
-          multipartKeyInfo,
-          omResponse.setAbortMultiPartUploadResponse(
-              MultipartUploadAbortResponse.newBuilder()).build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
-          multipartKeyInfo, createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs),
-        exception, getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("Abort Multipart request is successfully completed for " +
-          "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName,
-          bucketName);
-    } else {
-      ozoneManager.getMetrics().incNumAbortMultipartUploadFails();
-      LOG.error("Abort Multipart request is failed for " +
-          "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName,
-          bucketName, exception);
-    }
-    return omClientResponse;
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
deleted file mode 100644
index cf7db65..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart
-    .S3MultipartUploadCommitPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
-/**
- * Handle Multipart upload commit upload part file.
- */
-public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
-
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class);
-
-  public S3MultipartUploadCommitPartRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) {
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        getOmRequest().getCommitMultiPartUploadRequest();
-
-    return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(
-        multipartCommitUploadPartRequest.toBuilder()
-            .setKeyArgs(multipartCommitUploadPartRequest.getKeyArgs()
-                .toBuilder().setModificationTime(Time.now())))
-        .setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        getOmRequest().getCommitMultiPartUploadRequest();
-
-    OzoneManagerProtocolProtos.KeyArgs keyArgs =
-        multipartCommitUploadPartRequest.getKeyArgs();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    ozoneManager.getMetrics().incNumCommitMultipartUploadParts();
-
-    boolean acquiredLock = false;
-
-    IOException exception = null;
-    String partName = null;
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true);
-    OMClientResponse omClientResponse = null;
-    OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
-    String openKey = null;
-    OmKeyInfo omKeyInfo = null;
-    String multipartKey = null;
-    OmMultipartKeyInfo multipartKeyInfo = null;
-    try {
-      // TODO to support S3 ACL later.
-      acquiredLock =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
-              bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-
-      multipartKeyInfo =
-          omMetadataManager.getMultipartInfoTable().get(multipartKey);
-
-      long clientID = multipartCommitUploadPartRequest.getClientID();
-
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-      if (omKeyInfo == null) {
-        throw new OMException("Failed to commit Multipart Upload key, as " +
-            openKey + "entry is not found in the openKey table", KEY_NOT_FOUND);
-      }
-
-      // set the data size and location info list
-      omKeyInfo.setDataSize(keyArgs.getDataSize());
-      omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
-          .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()));
-      // Set Modification time
-      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-
-      partName = omMetadataManager.getOzoneKey(volumeName, bucketName,
-          keyName) + clientID;
-
-      if (multipartKeyInfo == null) {
-        // This can occur when user started uploading part by the time commit
-        // of that part happens, in between the user might have requested
-        // abort multipart upload. If we just throw exception, then the data
-        // will not be garbage collected, so move this part to delete table
-        // and throw error
-        // Move this part to delete table.
-        throw new OMException("No such Multipart upload is with specified " +
-            "uploadId " + uploadID,
-            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      } else {
-        int partNumber = keyArgs.getMultipartNumber();
-        oldPartKeyInfo = multipartKeyInfo.getPartKeyInfo(partNumber);
-
-        // Build this multipart upload part info.
-        OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo =
-            OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
-        partKeyInfo.setPartName(partName);
-        partKeyInfo.setPartNumber(partNumber);
-        partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf());
-
-        // Add this part information in to multipartKeyInfo.
-        multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build());
-
-        // Add to cache.
-
-        // Delete from open key table and add it to multipart info table.
-        // No need to add cache entries to delete table, as no
-        // read/write requests that info for validation.
-        omMetadataManager.getMultipartInfoTable().addCacheEntry(
-            new CacheKey<>(multipartKey),
-            new CacheValue<>(Optional.of(multipartKeyInfo),
-                transactionLogIndex));
-
-        omMetadataManager.getOpenKeyTable().addCacheEntry(
-            new CacheKey<>(openKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-      }
-
-      omResponse.setCommitMultiPartUploadResponse(
-          MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
-      omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
-        openKey, omKeyInfo, multipartKeyInfo,
-          oldPartKeyInfo, omResponse.build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
-          openKey, omKeyInfo, multipartKeyInfo,
-          oldPartKeyInfo, createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, buildKeyArgsAuditMap(keyArgs),
-        exception, getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName);
-
-    } else {
-      LOG.error("MultipartUpload Commit is failed for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception);
-      ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
-    }
-    return omClientResponse;
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
deleted file mode 100644
index ace2dbc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handle Multipart upload complete request.
- */
-public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class);
-
-  public S3MultipartUploadCompleteRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
-        getOmRequest().getCompleteMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
-
-    return getOmRequest().toBuilder()
-        .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest
-            .toBuilder().setKeyArgs(keyArgs.toBuilder()
-                .setModificationTime(Time.now())))
-        .setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
-  @SuppressWarnings("methodlength")
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
-        getOmRequest().getCompleteMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
-
-    List<OzoneManagerProtocolProtos.Part> partsList =
-        multipartUploadCompleteRequest.getPartsListList();
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-    String uploadID = keyArgs.getMultipartUploadID();
-
-    ozoneManager.getMetrics().incNumCompleteMultipartUploads();
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    boolean acquiredLock = false;
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true);
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-    OmMultipartUploadCompleteList multipartUploadList = null;
-    try {
-      // TODO to support S3 ACL later.
-      TreeMap<Integer, String> partsMap = new TreeMap<>();
-      for (OzoneManagerProtocolProtos.Part part : partsList) {
-        partsMap.put(part.getPartNumber(), part.getPartName());
-      }
-
-      multipartUploadList = new OmMultipartUploadCompleteList(partsMap);
-
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-      String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-          keyName);
-      OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-      OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
-          .getMultipartInfoTable().get(multipartKey);
-
-      if (multipartKeyInfo == null) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      }
-      TreeMap<Integer, PartKeyInfo> partKeyInfoMap =
-          multipartKeyInfo.getPartKeyInfoMap();
-
-      TreeMap<Integer, String> multipartMap = multipartUploadList
-          .getMultipartMap();
-
-      // Last key in the map should be having key value as size, as map's
-      // are sorted. Last entry in both maps should have partNumber as size
-      // of the map. As we have part entries 1, 2, 3, 4 and then we get
-      // complete multipart upload request so the map last entry should have 4,
-      // if it is having value greater or less than map size, then there is
-      // some thing wrong throw error.
-
-      Map.Entry<Integer, String> multipartMapLastEntry = multipartMap
-          .lastEntry();
-      Map.Entry<Integer, PartKeyInfo> partKeyInfoLastEntry =
-          partKeyInfoMap.lastEntry();
-      if (partKeyInfoMap.size() != multipartMap.size()) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            OMException.ResultCodes.MISMATCH_MULTIPART_LIST);
-      }
-
-      // Last entry part Number should be the size of the map, otherwise this
-      // means we have missing some parts but we got a complete request.
-      if (multipartMapLastEntry.getKey() != partKeyInfoMap.size() ||
-          partKeyInfoLastEntry.getKey() != partKeyInfoMap.size()) {
-        throw new OMException("Complete Multipart Upload Failed: volume: " +
-            volumeName + "bucket: " + bucketName + "key: " + keyName,
-            OMException.ResultCodes.MISSING_UPLOAD_PARTS);
-      }
-      HddsProtos.ReplicationType type = partKeyInfoLastEntry.getValue()
-          .getPartKeyInfo().getType();
-      HddsProtos.ReplicationFactor factor = partKeyInfoLastEntry.getValue()
-          .getPartKeyInfo().getFactor();
-      List< OmKeyLocationInfo > locations = new ArrayList<>();
-      long size = 0;
-      int partsCount =1;
-      int partsMapSize = partKeyInfoMap.size();
-      for(Map.Entry<Integer, PartKeyInfo > partKeyInfoEntry : partKeyInfoMap
-          .entrySet()) {
-        int partNumber = partKeyInfoEntry.getKey();
-        PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
-        // Check we have all parts to complete multipart upload and also
-        // check partNames provided match with actual part names
-        String providedPartName = multipartMap.get(partNumber);
-        String actualPartName = partKeyInfo.getPartName();
-        if (partNumber == partsCount) {
-          if (!actualPartName.equals(providedPartName)) {
-            throw new OMException("Complete Multipart Upload Failed: volume: " +
-                volumeName + "bucket: " + bucketName + "key: " + keyName,
-                OMException.ResultCodes.MISMATCH_MULTIPART_LIST);
-          }
-          OmKeyInfo currentPartKeyInfo = OmKeyInfo
-              .getFromProtobuf(partKeyInfo.getPartKeyInfo());
-          // Check if any part size is less than 5mb, last part can be less
-          // than 5 mb.
-          if (partsCount != partsMapSize &&
-              currentPartKeyInfo.getDataSize() < OM_MULTIPART_MIN_SIZE) {
-            LOG.error("MultipartUpload: " + ozoneKey + "Part number: " +
-                partKeyInfo.getPartNumber() + "size " + currentPartKeyInfo
-                .getDataSize() + " is less than minimum part size " +
-                OzoneConsts.OM_MULTIPART_MIN_SIZE);
-            throw new OMException("Complete Multipart Upload Failed: Entity " +
-                "too small: volume: " + volumeName + "bucket: " + bucketName
-                + "key: " + keyName, OMException.ResultCodes.ENTITY_TOO_SMALL);
-          }
-          // As all part keys will have only one version.
-          OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
-              .getKeyLocationVersions().get(0);
-          locations.addAll(currentKeyInfoGroup.getLocationList());
-          size += currentPartKeyInfo.getDataSize();
-        } else {
-          throw new OMException("Complete Multipart Upload Failed: volume: " +
-              volumeName + "bucket: " + bucketName + "key: " + keyName,
-              OMException.ResultCodes.MISSING_UPLOAD_PARTS);
-        }
-        partsCount++;
-      }
-      if (omKeyInfo == null) {
-        // This is a newly added key, it does not have any versions.
-        OmKeyLocationInfoGroup keyLocationInfoGroup = new
-            OmKeyLocationInfoGroup(0, locations);
-        // A newly created key, this is the first version.
-        omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName)
-            .setBucketName(bucketName).setKeyName(keyName)
-            .setReplicationFactor(factor).setReplicationType(type)
-            .setCreationTime(keyArgs.getModificationTime())
-            .setModificationTime(keyArgs.getModificationTime())
-            .setDataSize(size)
-            .setOmKeyLocationInfos(
-                Collections.singletonList(keyLocationInfoGroup))
-            .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
-            .build();
-      } else {
-        // Already a version exists, so we should add it as a new version.
-        // But now as versioning is not supported, just following the commit
-        // key approach. When versioning support comes, then we can uncomment
-        // below code keyInfo.addNewVersion(locations);
-        omKeyInfo.updateLocationInfoList(locations);
-        omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      }
-
-      updateCache(omMetadataManager, ozoneKey, multipartKey, omKeyInfo,
-          transactionLogIndex);
-
-      omResponse.setCompleteMultiPartUploadResponse(
-          MultipartUploadCompleteResponse.newBuilder()
-              .setVolume(volumeName)
-              .setBucket(bucketName)
-              .setKey(keyName)
-              .setHash(DigestUtils.sha256Hex(keyName)));
-
-      omClientResponse = new S3MultipartUploadCompleteResponse(multipartKey,
-          omKeyInfo, omResponse.build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3MultipartUploadCompleteResponse(null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
-      }
-    }
-
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-    if (multipartUploadList != null) {
-      auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList
-          .getMultipartMap().toString());
-    }
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception,
-        getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("MultipartUpload Complete request is successfull for Key: {} " +
-          "in Volume/Bucket {}/{}", keyName, volumeName, bucketName);
-    } else {
-      LOG.error("MultipartUpload Complete request failed for Key: {} " +
-          "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception);
-      ozoneManager.getMetrics().incNumCompleteMultipartUploadFails();
-    }
-
-    return omClientResponse;
-  }
-
-  private void updateCache(OMMetadataManager omMetadataManager,
-      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
-      long transactionLogIndex) {
-    // Update cache.
-    // 1. Add key entry to key table.
-    // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
-    omMetadataManager.getKeyTable().addCacheEntry(
-        new CacheKey<>(ozoneKey),
-        new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
-
-    omMetadataManager.getOpenKeyTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
-        new CacheValue<>(Optional.absent(), transactionLogIndex));
-    omMetadataManager.getMultipartInfoTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
-        new CacheValue<>(Optional.absent(), transactionLogIndex));
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java
deleted file mode 100644
index 42b9920..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to S3 multipart upload  requests.
- */
-package org.apache.hadoop.ozone.om.request.s3.multipart;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java
deleted file mode 100644
index d8f6478..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.security;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.security.S3GetSecretResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateGetS3SecretRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK;
-
-/**
- * Handles GetS3Secret request.
- */
-public class S3GetSecretRequest extends OMClientRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3GetSecretRequest.class);
-
-  public S3GetSecretRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    GetS3SecretRequest s3GetSecretRequest =
-        getOmRequest().getGetS3SecretRequest();
-
-    // Generate S3 Secret to be used by OM quorum.
-    String kerberosID = s3GetSecretRequest.getKerberosID();
-
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-    if (!user.getUserName().equals(kerberosID)) {
-      throw new OMException("User mismatch. Requested user name is " +
-          "mismatched " + kerberosID +", with current user " +
-          user.getUserName(), OMException.ResultCodes.USER_MISMATCH);
-    }
-
-    String s3Secret = DigestUtils.sha256Hex(OmUtils.getSHADigest());
-
-    UpdateGetS3SecretRequest updateGetS3SecretRequest =
-        UpdateGetS3SecretRequest.newBuilder()
-            .setAwsSecret(s3Secret)
-            .setKerberosID(kerberosID).build();
-
-    // Client issues GetS3Secret request, when received by OM leader
-    // it will generate s3Secret. Original GetS3Secret request is
-    // converted to UpdateGetS3Secret request with the generated token
-    // information. This updated request will be submitted to Ratis. In this
-    // way S3Secret created by leader, will be replicated across all
-    // OMs. With this approach, original GetS3Secret request from
-    // client does not need any proto changes.
-    OMRequest.Builder omRequest = OMRequest.newBuilder()
-        .setUserInfo(getUserInfo())
-        .setUpdateGetS3SecretRequest(updateGetS3SecretRequest)
-        .setCmdType(getOmRequest().getCmdType())
-        .setClientId(getOmRequest().getClientId());
-
-    if (getOmRequest().hasTraceID()) {
-      omRequest.setTraceID(getOmRequest().getTraceID());
-    }
-
-    return omRequest.build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-
-    OMClientResponse omClientResponse = null;
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.GetS3Secret)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true);
-    boolean acquiredLock = false;
-    IOException exception = null;
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    UpdateGetS3SecretRequest updateGetS3SecretRequest =
-        getOmRequest().getUpdateGetS3SecretRequest();
-    String kerberosID = updateGetS3SecretRequest.getKerberosID();
-    try {
-      String awsSecret = updateGetS3SecretRequest.getAwsSecret();
-      acquiredLock =
-         omMetadataManager.getLock().acquireWriteLock(S3_SECRET_LOCK,
-             kerberosID);
-
-      S3SecretValue s3SecretValue =
-          omMetadataManager.getS3SecretTable().get(kerberosID);
-
-      // If s3Secret for user is not in S3Secret table, add the Secret to cache.
-      if (s3SecretValue == null) {
-        omMetadataManager.getS3SecretTable().addCacheEntry(
-            new CacheKey<>(kerberosID),
-            new CacheValue<>(Optional.of(new S3SecretValue(kerberosID,
-                awsSecret)), transactionLogIndex));
-      } else {
-        // If it already exists, use the existing one.
-        awsSecret = s3SecretValue.getAwsSecret();
-      }
-
-      GetS3SecretResponse.Builder getS3SecretResponse = GetS3SecretResponse
-          .newBuilder().setS3Secret(S3Secret.newBuilder()
-          .setAwsSecret(awsSecret).setKerberosID(kerberosID));
-
-      if (s3SecretValue == null) {
-        omClientResponse =
-            new S3GetSecretResponse(new S3SecretValue(kerberosID, awsSecret),
-            omResponse.setGetS3SecretResponse(getS3SecretResponse).build());
-      } else {
-        // As when it already exists, we don't need to add to DB again. So
-        // set the value to null.
-        omClientResponse = new S3GetSecretResponse(null,
-            omResponse.setGetS3SecretResponse(getS3SecretResponse).build());
-      }
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new S3GetSecretResponse(null,
-          createErrorOMResponse(omResponse, ex));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(ozoneManagerDoubleBufferHelper.add(
-            omClientResponse, transactionLogIndex));
-      }
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(S3_SECRET_LOCK,
-            kerberosID);
-      }
-    }
-
-
-    Map<String, String> auditMap = new HashMap<>();
-    auditMap.put(OzoneConsts.S3_GETSECRET_USER, kerberosID);
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.GET_S3_SECRET, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    if (exception == null) {
-      LOG.debug("Secret for accessKey:{} is generated Successfully",
-          kerberosID);
-    } else {
-      LOG.error("Secret for accessKey:{} is generation failed", kerberosID,
-          exception);
-    }
-    return omClientResponse;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java
deleted file mode 100644
index 94a6b11..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to S3 security requests.
- */
-package org.apache.hadoop.ozone.om.request.s3.security;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java
deleted file mode 100644
index 7bf7a0b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.security;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.security.OMCancelDelegationTokenResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.proto.SecurityProtos;
-import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handle CancelDelegationToken Request.
- */
-public class OMCancelDelegationTokenRequest extends OMClientRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMGetDelegationTokenRequest.class);
-
-  public OMCancelDelegationTokenRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-
-    // Call OM to cancel token, this does check whether we can cancel token
-    // or not. This does not remove token from DB/in-memory.
-    ozoneManager.cancelDelegationToken(getToken());
-
-    return super.preExecute(ozoneManager);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    OMClientResponse omClientResponse = null;
-    OMResponse.Builder omResponse =
-        OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CancelDelegationToken)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true);
-    OzoneTokenIdentifier ozoneTokenIdentifier = null;
-    try {
-      ozoneTokenIdentifier =
-          OzoneTokenIdentifier.readProtoBuf(getToken().getIdentifier());
-
-      // Remove token from in-memory.
-      ozoneManager.getDelegationTokenMgr().removeToken(ozoneTokenIdentifier);
-
-      // Update Cache.
-      omMetadataManager.getDelegationTokenTable().addCacheEntry(
-          new CacheKey<>(ozoneTokenIdentifier),
-          new CacheValue<>(Optional.absent(), transactionLogIndex));
-
-      omClientResponse =
-          new OMCancelDelegationTokenResponse(ozoneTokenIdentifier,
-              omResponse.setCancelDelegationTokenResponse(
-                  CancelDelegationTokenResponseProto.newBuilder().setResponse(
-                      SecurityProtos.CancelDelegationTokenResponseProto
-                          .newBuilder())).build());
-    } catch (IOException ex) {
-      LOG.error("Error in cancel DelegationToken {}", ozoneTokenIdentifier, ex);
-      omClientResponse = new OMCancelDelegationTokenResponse(null,
-          createErrorOMResponse(omResponse, ex));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Cancelled delegation token: {}", ozoneTokenIdentifier);
-    }
-
-    return omClientResponse;
-  }
-
-
-  public Token<OzoneTokenIdentifier> getToken() {
-    CancelDelegationTokenRequestProto cancelDelegationTokenRequest =
-        getOmRequest().getCancelDelegationTokenRequest();
-
-    return OMPBHelper.convertToDelegationToken(
-        cancelDelegationTokenRequest.getToken());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java
deleted file mode 100644
index 9c667e8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.security;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.security.OMGetDelegationTokenResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateGetDelegationTokenRequest;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.proto.SecurityProtos;
-import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handle GetDelegationToken Request.
- */
-public class OMGetDelegationTokenRequest extends OMClientRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMGetDelegationTokenRequest.class);
-
-  public OMGetDelegationTokenRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    GetDelegationTokenRequestProto getDelegationTokenRequest =
-        getOmRequest().getGetDelegationTokenRequest();
-
-    // Call OM to create token
-    Token<OzoneTokenIdentifier> token = ozoneManager
-        .getDelegationToken(new Text(getDelegationTokenRequest.getRenewer()));
-
-
-    // Client issues GetDelegationToken request, when received by OM leader
-    // it will generate a token. Original GetDelegationToken request is
-    // converted to UpdateGetDelegationToken request with the generated token
-    // information. This updated request will be submitted to Ratis. In this
-    // way delegation token created by leader, will be replicated across all
-    // OMs. With this approach, original GetDelegationToken request from
-    // client does not need any proto changes.
-
-    // Create UpdateGetDelegationTokenRequest with token response.
-
-    OMRequest.Builder omRequest;
-    if (token != null) {
-      omRequest = OMRequest.newBuilder().setUserInfo(getUserInfo())
-          .setUpdateGetDelegationTokenRequest(
-              UpdateGetDelegationTokenRequest.newBuilder()
-                  .setGetDelegationTokenResponse(
-                      GetDelegationTokenResponseProto.newBuilder()
-                          .setResponse(
-                              SecurityProtos.GetDelegationTokenResponseProto
-                              .newBuilder().setToken(OMPBHelper
-                                  .convertToTokenProto(token)).build())
-                          .build()))
-          .setCmdType(getOmRequest().getCmdType())
-          .setClientId(getOmRequest().getClientId());
-
-
-    } else {
-      // If token is null, do not set GetDelegationTokenResponse with response.
-      omRequest = OMRequest.newBuilder().setUserInfo(getUserInfo())
-          .setUpdateGetDelegationTokenRequest(
-              UpdateGetDelegationTokenRequest.newBuilder()
-                  .setGetDelegationTokenResponse(
-                      GetDelegationTokenResponseProto.newBuilder()))
-          .setCmdType(getOmRequest().getCmdType())
-          .setClientId(getOmRequest().getClientId());
-    }
-    if (getOmRequest().hasTraceID()) {
-      omRequest.setTraceID(getOmRequest().getTraceID());
-    }
-    return omRequest.build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest =
-        getOmRequest().getUpdateGetDelegationTokenRequest();
-
-    OMResponse.Builder omResponse =
-        OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.GetDelegationToken)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true);
-
-    OMClientResponse omClientResponse = null;
-
-
-    // If security is not enabled and token request is received, leader
-    // returns token null. So, check here if updatedGetDelegationTokenResponse
-    // has response set or not. If it is not set, then token is null.
-    if (!updateGetDelegationTokenRequest.getGetDelegationTokenResponse()
-        .hasResponse()) {
-      omClientResponse = new OMGetDelegationTokenResponse(null, -1L,
-          omResponse.setGetDelegationTokenResponse(
-              GetDelegationTokenResponseProto.newBuilder()).build());
-      omClientResponse.setFlushFuture(
-          ozoneManagerDoubleBufferHelper.add(omClientResponse,
-              transactionLogIndex));
-      return omClientResponse;
-    }
-
-    SecurityProtos.TokenProto tokenProto = updateGetDelegationTokenRequest
-        .getGetDelegationTokenResponse().getResponse().getToken();
-
-    Token<OzoneTokenIdentifier> ozoneTokenIdentifierToken =
-        OMPBHelper.convertToDelegationToken(tokenProto);
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    try {
-      OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier.
-          readProtoBuf(ozoneTokenIdentifierToken.getIdentifier());
-
-      // Update in memory map of token.
-      long renewTime = ozoneManager.getDelegationTokenMgr()
-          .updateToken(ozoneTokenIdentifierToken, ozoneTokenIdentifier);
-
-     // Update Cache.
-      omMetadataManager.getDelegationTokenTable().addCacheEntry(
-          new CacheKey<>(ozoneTokenIdentifier),
-          new CacheValue<>(Optional.of(renewTime), transactionLogIndex));
-
-      omClientResponse =
-          new OMGetDelegationTokenResponse(ozoneTokenIdentifier, renewTime,
-              omResponse.setGetDelegationTokenResponse(
-                  updateGetDelegationTokenRequest
-                      .getGetDelegationTokenResponse()).build());
-    } catch (IOException ex) {
-      LOG.error("Error in Updating DelegationToken {}",
-          ozoneTokenIdentifierToken, ex);
-      omClientResponse = new OMGetDelegationTokenResponse(null, -1L,
-          createErrorOMResponse(omResponse, ex));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Updated delegation token in-memory map: {}",
-          ozoneTokenIdentifierToken);
-    }
-
-    return omClientResponse;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java
deleted file mode 100644
index b2c03bb..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.security;
-
-import java.io.IOException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.security.OMRenewDelegationTokenResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateRenewDelegationTokenRequest;
-import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-/**
- * Handle RenewDelegationToken Request.
- */
-public class OMRenewDelegationTokenRequest extends OMClientRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMRenewDelegationTokenRequest.class);
-
-  public OMRenewDelegationTokenRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    RenewDelegationTokenRequestProto renewDelegationTokenRequest =
-        getOmRequest().getRenewDelegationTokenRequest();
-
-    // Call OM to renew token
-    long renewTime = ozoneManager.renewDelegationToken(
-        OMPBHelper.convertToDelegationToken(
-            renewDelegationTokenRequest.getToken()));
-
-    RenewDelegationTokenResponseProto.Builder renewResponse =
-        RenewDelegationTokenResponseProto.newBuilder();
-
-    renewResponse.setResponse(org.apache.hadoop.security.proto.SecurityProtos
-        .RenewDelegationTokenResponseProto.newBuilder()
-        .setNewExpiryTime(renewTime));
-
-
-    // Client issues RenewDelegationToken request, when received by OM leader
-    // it will renew the token. Original RenewDelegationToken request is
-    // converted to UpdateRenewDelegationToken request with the token and renew
-    // information. This updated request will be submitted to Ratis. In this
-    // way delegation token renewd by leader, will be replicated across all
-    // OMs. With this approach, original RenewDelegationToken request from
-    // client does not need any proto changes.
-
-    // Create UpdateRenewDelegationTokenRequest with original request and
-    // expiry time.
-    OMRequest.Builder omRequest = OMRequest.newBuilder()
-        .setUserInfo(getUserInfo())
-        .setUpdatedRenewDelegationTokenRequest(
-            UpdateRenewDelegationTokenRequest.newBuilder()
-                .setRenewDelegationTokenRequest(renewDelegationTokenRequest)
-                .setRenewDelegationTokenResponse(renewResponse))
-        .setCmdType(getOmRequest().getCmdType())
-        .setClientId(getOmRequest().getClientId());
-
-    if (getOmRequest().hasTraceID()) {
-      omRequest.setTraceID(getOmRequest().getTraceID());
-    }
-
-    return omRequest.build();
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    UpdateRenewDelegationTokenRequest updateRenewDelegationTokenRequest =
-        getOmRequest().getUpdatedRenewDelegationTokenRequest();
-
-    Token<OzoneTokenIdentifier> ozoneTokenIdentifierToken =
-        OMPBHelper.convertToDelegationToken(updateRenewDelegationTokenRequest
-            .getRenewDelegationTokenRequest().getToken());
-
-    long renewTime = updateRenewDelegationTokenRequest
-        .getRenewDelegationTokenResponse().getResponse().getNewExpiryTime();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    OMClientResponse omClientResponse = null;
-    OMResponse.Builder omResponse =
-        OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenewDelegationToken)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true);
-    try {
-
-      OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier.
-          readProtoBuf(ozoneTokenIdentifierToken.getIdentifier());
-
-      // Update in memory map of token.
-      ozoneManager.getDelegationTokenMgr()
-          .updateRenewToken(ozoneTokenIdentifierToken, ozoneTokenIdentifier,
-              renewTime);
-
-      // Update Cache.
-      omMetadataManager.getDelegationTokenTable().addCacheEntry(
-          new CacheKey<>(ozoneTokenIdentifier),
-          new CacheValue<>(Optional.of(renewTime), transactionLogIndex));
-
-      omClientResponse =
-          new OMRenewDelegationTokenResponse(ozoneTokenIdentifier, renewTime,
-              omResponse.setRenewDelegationTokenResponse(
-                  updateRenewDelegationTokenRequest
-                      .getRenewDelegationTokenResponse()).build());
-    } catch (IOException ex) {
-      LOG.error("Error in Updating Renew DelegationToken {}",
-          ozoneTokenIdentifierToken, ex);
-      omClientResponse = new OMRenewDelegationTokenResponse(null, -1L,
-          createErrorOMResponse(omResponse, ex));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Updated renew delegation token in-memory map: {} with expiry" +
-              " time {}", ozoneTokenIdentifierToken, renewTime);
-    }
-
-    return omClientResponse;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java
deleted file mode 100644
index c7608e8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains classes which handle security requests.
- */
-package org.apache.hadoop.ozone.om.request.security;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java
deleted file mode 100644
index c12cdac..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.util;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OzoneObj.ObjectType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-
-/**
- * Utility class to parse {@link OzoneObj#getPath()}.
- */
-public class ObjectParser {
-
-  private String volume;
-  private String bucket;
-  private String key;
-
-  /**
-   * Parse the path and extract volume, bucket and key names.
-   * @param path
-   */
-  public ObjectParser(String path, ObjectType objectType) throws OMException {
-    Preconditions.checkNotNull(path);
-    String[] tokens = StringUtils.split(path, OZONE_URI_DELIMITER, 3);
-
-    if (objectType == ObjectType.VOLUME && tokens.length == 1) {
-      volume = tokens[0];
-    } else if (objectType == ObjectType.BUCKET && tokens.length == 2) {
-      volume = tokens[0];
-      bucket = tokens[1];
-    } else if (objectType == ObjectType.KEY && tokens.length == 3) {
-      volume = tokens[0];
-      bucket = tokens[1];
-      key = tokens[2];
-    } else {
-      throw new OMException("Illegal path " + path,
-          OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST);
-    }
-  }
-
-  public String getVolume() {
-    return volume;
-  }
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java
deleted file mode 100644
index 72fc09a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains helper/utility classes for requests.
- */
-package org.apache.hadoop.ozone.om.request.util;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
deleted file mode 100644
index 69da19f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.util.Time;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
-
-/**
- * Handles volume create request.
- */
-public class OMVolumeCreateRequest extends OMVolumeRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeCreateRequest.class);
-
-  public OMVolumeCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-
-    VolumeInfo volumeInfo  =
-        getOmRequest().getCreateVolumeRequest().getVolumeInfo();
-
-    // Set creation time
-    VolumeInfo updatedVolumeInfo =
-        volumeInfo.toBuilder().setCreationTime(Time.now()).build();
-
-
-    return getOmRequest().toBuilder().setCreateVolumeRequest(
-        CreateVolumeRequest.newBuilder().setVolumeInfo(updatedVolumeInfo))
-        .setUserInfo(getUserInfo())
-        .build();
-
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    CreateVolumeRequest createVolumeRequest =
-        getOmRequest().getCreateVolumeRequest();
-    Preconditions.checkNotNull(createVolumeRequest);
-    VolumeInfo volumeInfo = createVolumeRequest.getVolumeInfo();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumVolumeCreates();
-
-    String volume = volumeInfo.getVolume();
-    String owner = volumeInfo.getOwnerName();
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.CreateVolume).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    // Doing this here, so we can do protobuf conversion outside of lock.
-    boolean acquiredVolumeLock = false;
-    boolean acquiredUserLock = false;
-    IOException exception = null;
-    OMClientResponse omClientResponse = null;
-    OmVolumeArgs omVolumeArgs = null;
-    Map<String, String> auditMap = new HashMap<>();
-    Collection<String> ozAdmins = ozoneManager.getOzoneAdmins();
-    try {
-      omVolumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
-      // when you create a volume, we set both Object ID and update ID to the
-      // same ratis transaction ID. The Object ID will never change, but update
-      // ID will be set to transactionID each time we update the object.
-      omVolumeArgs.setUpdateID(transactionLogIndex);
-      omVolumeArgs.setObjectID(transactionLogIndex);
-      auditMap = omVolumeArgs.toAuditMap();
-
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        if (!ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD) &&
-            !ozAdmins.contains(getUserInfo().getUserName())) {
-          throw new OMException("Only admin users are authorized to create " +
-              "Ozone volumes. User: " + getUserInfo().getUserName(),
-              OMException.ResultCodes.PERMISSION_DENIED);
-        }
-      }
-
-      UserVolumeInfo volumeList = null;
-
-      // acquire lock.
-      acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock(
-          VOLUME_LOCK, volume);
-
-      acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK,
-          owner);
-
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-
-      OmVolumeArgs dbVolumeArgs =
-          omMetadataManager.getVolumeTable().get(dbVolumeKey);
-
-      if (dbVolumeArgs == null) {
-        String dbUserKey = omMetadataManager.getUserKey(owner);
-        volumeList = omMetadataManager.getUserTable().get(dbUserKey);
-        volumeList = addVolumeToOwnerList(volumeList, volume, owner,
-            ozoneManager.getMaxUserVolumeCount(), transactionLogIndex);
-        createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey,
-            dbUserKey, transactionLogIndex);
-
-        omResponse.setCreateVolumeResponse(CreateVolumeResponse.newBuilder()
-            .build());
-        omClientResponse = new OMVolumeCreateResponse(omVolumeArgs, volumeList,
-            omResponse.build());
-        LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume());
-      } else {
-        LOG.debug("volume:{} already exists", omVolumeArgs.getVolume());
-        throw new OMException("Volume already exists",
-            OMException.ResultCodes.VOLUME_ALREADY_EXISTS);
-      }
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMVolumeCreateResponse(null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredUserLock) {
-        omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner);
-      }
-      if (acquiredVolumeLock) {
-        omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(ozoneManager.getAuditLogger(),
-        buildAuditMessage(OMAction.CREATE_VOLUME, auditMap, exception,
-            getOmRequest().getUserInfo()));
-
-    // return response after releasing lock.
-    if (exception == null) {
-      LOG.info("created volume:{} for user:{}", volume, owner);
-      omMetrics.incNumVolumes();
-    } else {
-      LOG.error("Volume creation failed for user:{} volume:{}", owner,
-          volume, exception);
-      omMetrics.incNumVolumeCreateFails();
-    }
-    return omClientResponse;
-  }
-}
-
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
deleted file mode 100644
index f91b02d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.io.IOException;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeDeleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
-/**
- * Handles volume delete request.
- */
-public class OMVolumeDeleteRequest extends OMVolumeRequest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeDeleteRequest.class);
-
-  public OMVolumeDeleteRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    DeleteVolumeRequest deleteVolumeRequest =
-        getOmRequest().getDeleteVolumeRequest();
-    Preconditions.checkNotNull(deleteVolumeRequest);
-
-    String volume = deleteVolumeRequest.getVolumeName();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumVolumeDeletes();
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.DeleteVolume).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean acquiredUserLock = false;
-    boolean acquiredVolumeLock = false;
-    IOException exception = null;
-    String owner = null;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE, volume,
-            null, null);
-      }
-
-      OmVolumeArgs omVolumeArgs = null;
-      OzoneManagerProtocolProtos.UserVolumeInfo newVolumeList = null;
-
-      acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock(
-          VOLUME_LOCK, volume);
-      owner = getVolumeInfo(omMetadataManager, volume).getOwnerName();
-      acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK,
-          owner);
-
-      String dbUserKey = omMetadataManager.getUserKey(owner);
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-
-      if (!omMetadataManager.isVolumeEmpty(volume)) {
-        LOG.debug("volume:{} is not empty", volume);
-        throw new OMException(OMException.ResultCodes.VOLUME_NOT_EMPTY);
-      }
-
-      newVolumeList = omMetadataManager.getUserTable().get(owner);
-
-      // delete the volume from the owner list
-      // as well as delete the volume entry
-      newVolumeList = delVolumeFromOwnerList(newVolumeList, volume, owner,
-          transactionLogIndex);
-
-      omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey),
-          new CacheValue<>(Optional.of(newVolumeList), transactionLogIndex));
-
-      omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey), new CacheValue<>(Optional.absent(),
-              transactionLogIndex));
-
-      omResponse.setDeleteVolumeResponse(
-          DeleteVolumeResponse.newBuilder().build());
-      omClientResponse = new OMVolumeDeleteResponse(volume, owner,
-          newVolumeList, omResponse.build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMVolumeDeleteResponse(null, null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredUserLock) {
-        omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner);
-      }
-      if (acquiredVolumeLock) {
-        omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(ozoneManager.getAuditLogger(),
-        buildAuditMessage(OMAction.DELETE_VOLUME, buildVolumeAuditMap(volume),
-            exception, getOmRequest().getUserInfo()));
-
-    // return response after releasing lock.
-    if (exception == null) {
-      LOG.debug("Volume deleted for user:{} volume:{}", owner, volume);
-      omMetrics.decNumVolumes();
-    } else {
-      LOG.error("Volume deletion failed for user:{} volume:{}",
-          owner, volume, exception);
-      omMetrics.incNumVolumeDeleteFails();
-    }
-    return omClientResponse;
-
-  }
-
-  /**
-   * Return volume info for the specified volume. This method should be
-   * called after acquiring volume lock.
-   * @param omMetadataManager
-   * @param volume
-   * @return OmVolumeArgs
-   * @throws IOException
-   */
-  private OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager,
-      String volume) throws IOException {
-
-    String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-    OmVolumeArgs volumeArgs =
-        omMetadataManager.getVolumeTable().get(dbVolumeKey);
-    if (volumeArgs == null) {
-      throw new OMException("Volume " + volume + " is not found",
-          OMException.ResultCodes.VOLUME_NOT_FOUND);
-    }
-    return volumeArgs;
-
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
deleted file mode 100644
index 7c38c41..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Defines common methods required for volume requests.
- */
-public abstract class OMVolumeRequest extends OMClientRequest {
-
-  public OMVolumeRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  /**
-   * Delete volume from user volume list. This method should be called after
-   * acquiring user lock.
-   * @param volumeList - current volume list owned by user.
-   * @param volume - volume which needs to deleted from the volume list.
-   * @param owner - Name of the Owner.
-   * @param txID - The transaction ID that is updating this value.
-   * @return UserVolumeInfo - updated UserVolumeInfo.
-   * @throws IOException
-   */
-  protected UserVolumeInfo delVolumeFromOwnerList(UserVolumeInfo volumeList,
-      String volume, String owner, long txID) throws IOException {
-
-    List<String> prevVolList = new ArrayList<>();
-
-    if (volumeList != null) {
-      prevVolList.addAll(volumeList.getVolumeNamesList());
-    } else {
-      // No Volumes for this user
-      throw new OMException("User not found: " + owner,
-          OMException.ResultCodes.USER_NOT_FOUND);
-    }
-
-    // Remove the volume from the list
-    prevVolList.remove(volume);
-    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
-        .addAllVolumeNames(prevVolList)
-            .setObjectID(volumeList.getObjectID())
-            .setUpdateID(txID)
-         .build();
-    return newVolList;
-  }
-
-
-  /**
-   * Add volume to user volume list. This method should be called after
-   * acquiring user lock.
-   * @param volumeList - current volume list owned by user.
-   * @param volume - volume which needs to be added to this list.
-   * @param owner
-   * @param maxUserVolumeCount
-   * @return VolumeList - which is updated volume list.
-   * @throws OMException - if user has volumes greater than
-   * maxUserVolumeCount, an exception is thrown.
-   */
-  protected UserVolumeInfo addVolumeToOwnerList(UserVolumeInfo volumeList,
-      String volume, String owner, long maxUserVolumeCount, long txID)
-      throws IOException {
-
-    // Check the volume count
-    if (volumeList != null &&
-        volumeList.getVolumeNamesList().size() >= maxUserVolumeCount) {
-      throw new OMException("Too many volumes for user:" + owner,
-          OMException.ResultCodes.USER_TOO_MANY_VOLUMES);
-    }
-
-    List<String> prevVolList = new ArrayList<>();
-    long objectID = txID;
-    if (volumeList != null) {
-      prevVolList.addAll(volumeList.getVolumeNamesList());
-      objectID = volumeList.getObjectID();
-    }
-
-
-    // Add the new volume to the list
-    prevVolList.add(volume);
-    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
-        .setObjectID(objectID)
-        .setUpdateID(txID)
-        .addAllVolumeNames(prevVolList).build();
-
-    return newVolList;
-  }
-
-  /**
-   * Create Ozone Volume. This method should be called after acquiring user
-   * and volume Lock.
-   * @param omMetadataManager
-   * @param omVolumeArgs
-   * @param volumeList
-   * @param dbVolumeKey
-   * @param dbUserKey
-   * @param transactionLogIndex
-   * @throws IOException
-   */
-  protected void createVolume(final OMMetadataManager omMetadataManager,
-      OmVolumeArgs omVolumeArgs, UserVolumeInfo volumeList, String dbVolumeKey,
-      String dbUserKey, long transactionLogIndex) {
-    // Update cache: Update user and volume cache.
-    omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey),
-        new CacheValue<>(Optional.of(volumeList), transactionLogIndex));
-
-    omMetadataManager.getVolumeTable().addCacheEntry(
-        new CacheKey<>(dbVolumeKey),
-        new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
deleted file mode 100644
index d1f1e8b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-/**
- * Handle set owner request for volume.
- */
-public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeSetOwnerRequest.class);
-
-  public OMVolumeSetOwnerRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    SetVolumePropertyRequest setVolumePropertyRequest =
-        getOmRequest().getSetVolumePropertyRequest();
-
-    Preconditions.checkNotNull(setVolumePropertyRequest);
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-    // In production this will never happen, this request will be called only
-    // when we have ownerName in setVolumePropertyRequest.
-    if (!setVolumePropertyRequest.hasOwnerName()) {
-      omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST)
-          .setSuccess(false);
-      return new OMVolumeSetOwnerResponse(null, null, null, null,
-          omResponse.build());
-    }
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumVolumeUpdates();
-    String volume = setVolumePropertyRequest.getVolumeName();
-    String newOwner = setVolumePropertyRequest.getOwnerName();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-
-    Map<String, String> auditMap = buildVolumeAuditMap(volume);
-    auditMap.put(OzoneConsts.OWNER, newOwner);
-
-    boolean acquiredUserLocks = false;
-    boolean acquiredVolumeLock = false;
-    IOException exception = null;
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    String oldOwner = null;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
-            volume, null, null);
-      }
-
-
-      long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount();
-
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-
-      OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null;
-      OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null;
-      OmVolumeArgs omVolumeArgs = null;
-
-
-
-      acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock(
-          VOLUME_LOCK, volume);
-
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
-
-      if (omVolumeArgs == null) {
-        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
-            newOwner, volume);
-        throw new OMException("Volume " + volume + " is not found",
-            OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      oldOwner = omVolumeArgs.getOwnerName();
-
-      acquiredUserLocks =
-          omMetadataManager.getLock().acquireMultiUserLock(newOwner, oldOwner);
-
-      oldOwnerVolumeList =
-          omMetadataManager.getUserTable().get(oldOwner);
-
-      oldOwnerVolumeList = delVolumeFromOwnerList(
-          oldOwnerVolumeList, volume, oldOwner, transactionLogIndex);
-
-      newOwnerVolumeList = omMetadataManager.getUserTable().get(newOwner);
-      newOwnerVolumeList = addVolumeToOwnerList(
-          newOwnerVolumeList, volume, newOwner,
-          maxUserVolumeCount, transactionLogIndex);
-
-      // Set owner with new owner name.
-      omVolumeArgs.setOwnerName(newOwner);
-      omVolumeArgs.setUpdateID(transactionLogIndex);
-
-      // Update cache.
-      omMetadataManager.getUserTable().addCacheEntry(
-          new CacheKey<>(omMetadataManager.getUserKey(newOwner)),
-              new CacheValue<>(Optional.of(newOwnerVolumeList),
-                  transactionLogIndex));
-      omMetadataManager.getUserTable().addCacheEntry(
-          new CacheKey<>(omMetadataManager.getUserKey(oldOwner)),
-          new CacheValue<>(Optional.of(oldOwnerVolumeList),
-              transactionLogIndex));
-      omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey),
-          new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
-
-      omResponse.setSetVolumePropertyResponse(
-          SetVolumePropertyResponse.newBuilder().build());
-      omClientResponse = new OMVolumeSetOwnerResponse(oldOwner,
-          oldOwnerVolumeList, newOwnerVolumeList, omVolumeArgs,
-          omResponse.build());
-
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMVolumeSetOwnerResponse(null, null, null, null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquiredUserLocks) {
-        omMetadataManager.getLock().releaseMultiUserLock(newOwner, oldOwner);
-      }
-      if (acquiredVolumeLock) {
-        omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, auditMap,
-        exception, userInfo));
-
-    // return response after releasing lock.
-    if (exception == null) {
-      LOG.debug("Successfully changed Owner of Volume {} from {} -> {}", volume,
-          oldOwner, newOwner);
-    } else {
-      LOG.error("Changing volume ownership failed for user:{} volume:{}",
-          newOwner, volume, exception);
-      omMetrics.incNumVolumeUpdateFails();
-    }
-    return omClientResponse;
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
deleted file mode 100644
index ef6d8ae..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-
-/**
- * Handles set Quota request for volume.
- */
-public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeSetQuotaRequest.class);
-
-  public OMVolumeSetQuotaRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-
-    SetVolumePropertyRequest setVolumePropertyRequest =
-        getOmRequest().getSetVolumePropertyRequest();
-
-    Preconditions.checkNotNull(setVolumePropertyRequest);
-
-    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-
-
-
-    // In production this will never happen, this request will be called only
-    // when we have quota in bytes is set in setVolumePropertyRequest.
-    if (!setVolumePropertyRequest.hasQuotaInBytes()) {
-      omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST)
-          .setSuccess(false);
-      return new OMVolumeSetQuotaResponse(null,
-          omResponse.build());
-    }
-
-    String volume = setVolumePropertyRequest.getVolumeName();
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumVolumeUpdates();
-
-    AuditLogger auditLogger = ozoneManager.getAuditLogger();
-    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
-    Map<String, String> auditMap = buildVolumeAuditMap(volume);
-    auditMap.put(OzoneConsts.QUOTA,
-        String.valueOf(setVolumePropertyRequest.getQuotaInBytes()));
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    IOException exception = null;
-    boolean acquireVolumeLock = false;
-    OMClientResponse omClientResponse = null;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, volume,
-            null, null);
-      }
-
-      OmVolumeArgs omVolumeArgs = null;
-
-      acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock(
-          VOLUME_LOCK, volume);
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
-
-      if (omVolumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes());
-
-      // update cache.
-      omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey),
-          new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
-
-      omResponse.setSetVolumePropertyResponse(
-          SetVolumePropertyResponse.newBuilder().build());
-      omClientResponse = new OMVolumeSetQuotaResponse(omVolumeArgs,
-        omResponse.build());
-    } catch (IOException ex) {
-      exception = ex;
-      omClientResponse = new OMVolumeSetQuotaResponse(null,
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (acquireVolumeLock) {
-        omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
-      }
-    }
-
-    // Performing audit logging outside of the lock.
-    auditLog(auditLogger, buildAuditMessage(OMAction.SET_QUOTA, auditMap,
-        exception, userInfo));
-
-    // return response after releasing lock.
-    if (exception == null) {
-      LOG.debug("Changing volume quota is successfully completed for volume: " +
-          "{} quota:{}", volume, setVolumePropertyRequest.getQuotaInBytes());
-    } else {
-      omMetrics.incNumVolumeUpdateFails();
-      LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
-          setVolumePropertyRequest.getQuotaInBytes(), exception);
-    }
-    return omClientResponse;
-  }
-
-
-}
-
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
deleted file mode 100644
index 6b4dc75..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import java.io.IOException;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
-
-/**
- * Base class for OMVolumeAcl Request.
- */
-public abstract class OMVolumeAclRequest extends OMClientRequest {
-
-  private CheckedBiFunction<List<OzoneAcl>, OmVolumeArgs, IOException>
-      omVolumeAclOp;
-
-  public OMVolumeAclRequest(OzoneManagerProtocolProtos.OMRequest omRequest,
-      CheckedBiFunction<List<OzoneAcl>, OmVolumeArgs, IOException> aclOp) {
-    super(omRequest);
-    omVolumeAclOp = aclOp;
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    // protobuf guarantees volume and acls are non-null.
-    String volume = getVolumeName();
-    List<OzoneAcl> ozoneAcls = getAcls();
-
-    OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumVolumeUpdates();
-    OmVolumeArgs omVolumeArgs = null;
-
-    OMResponse.Builder omResponse = onInit();
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    boolean lockAcquired = false;
-    try {
-      // check Acl
-      if (ozoneManager.getAclsEnabled()) {
-        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
-            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
-            volume, null, null);
-      }
-      lockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK, volume);
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
-      if (omVolumeArgs == null) {
-        throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      // result is false upon add existing acl or remove non-existing acl
-      boolean result = true;
-      try {
-        omVolumeAclOp.apply(ozoneAcls, omVolumeArgs);
-      } catch (OMException ex) {
-        result = false;
-      }
-
-      if (result) {
-        // update cache.
-        omMetadataManager.getVolumeTable().addCacheEntry(
-            new CacheKey<>(dbVolumeKey),
-            new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
-      }
-
-      omClientResponse = onSuccess(omResponse, omVolumeArgs, result);
-    } catch (IOException ex) {
-      exception = ex;
-      omMetrics.incNumVolumeUpdateFails();
-      omClientResponse = onFailure(omResponse, ex);
-    } finally {
-      if (omClientResponse != null) {
-        omClientResponse.setFlushFuture(
-            ozoneManagerDoubleBufferHelper.add(omClientResponse,
-                transactionLogIndex));
-      }
-      if (lockAcquired) {
-        omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
-      }
-    }
-
-    onComplete(exception);
-
-    return omClientResponse;
-  }
-
-  /**
-   * Get the Acls from the request.
-   * @return List of OzoneAcls, for add/remove it is a single element list
-   * for set it can be non-single element list.
-   */
-  abstract List<OzoneAcl> getAcls();
-
-  /**
-   * Get the volume name from the request.
-   * @return volume name
-   * This is needed for case where volume does not exist and the omVolumeArgs is
-   * null.
-   */
-  abstract String getVolumeName();
-
-  // TODO: Finer grain metrics can be moved to these callbacks. They can also
-  // be abstracted into separate interfaces in future.
-  /**
-   * Get the initial om response builder with lock.
-   * @return om response builder.
-   */
-  abstract OMResponse.Builder onInit();
-
-  /**
-   * Get the om client response on success case with lock.
-   * @param omResponse
-   * @param omVolumeArgs
-   * @param result
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onSuccess(
-      OMResponse.Builder omResponse, OmVolumeArgs omVolumeArgs, boolean result);
-
-  /**
-   * Get the om client response on failure case with lock.
-   * @param omResponse
-   * @param ex
-   * @return OMClientResponse
-   */
-  abstract OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException ex);
-
-  /**
-   * Completion hook for final processing before return without lock.
-   * Usually used for logging without lock.
-   * @param ex
-   */
-  abstract void onComplete(IOException ex);
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
deleted file mode 100644
index 6bb8564..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Handles volume add acl request.
- */
-public class OMVolumeAddAclRequest extends OMVolumeAclRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeAddAclRequest.class);
-
-  private static CheckedBiFunction<List<OzoneAcl>,
-      OmVolumeArgs, IOException> volumeAddAclOp;
-
-  static {
-    volumeAddAclOp = (acls, volArgs) -> volArgs.addAcl(acls.get(0));
-  }
-
-  private List<OzoneAcl> ozoneAcls;
-  private String volumeName;
-
-  public OMVolumeAddAclRequest(OMRequest omRequest) {
-    super(omRequest, volumeAddAclOp);
-    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
-        getOmRequest().getAddAclRequest();
-    Preconditions.checkNotNull(addAclRequest);
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
-    volumeName = addAclRequest.getObj().getPath().substring(1);
-  }
-
-  @Override
-  public List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  private OzoneAcl getAcl() {
-    return ozoneAcls.get(0);
-  }
-
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.AddAcl)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean result){
-    omResponse.setAddAclResponse(OzoneManagerProtocolProtos.AddAclResponse
-        .newBuilder().setResponse(result).build());
-    return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException ex) {
-    return new OMVolumeAclOpResponse(null,
-        createErrorOMResponse(omResponse, ex));
-  }
-
-  @Override
-  void onComplete(IOException ex) {
-    if (ex == null) {
-      LOG.debug("Add acl: {} to volume: {} success!",
-          getAcl(), getVolumeName());
-    } else {
-      LOG.error("Add acl {} to volume {} failed!",
-          getAcl(), getVolumeName(), ex);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
deleted file mode 100644
index 188e205..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Handles volume remove acl request.
- */
-public class OMVolumeRemoveAclRequest extends OMVolumeAclRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeRemoveAclRequest.class);
-
-  private static CheckedBiFunction<List<OzoneAcl>,
-      OmVolumeArgs, IOException> volumeRemoveAclOp;
-
-  static {
-    volumeRemoveAclOp = (acls, volArgs) -> volArgs.removeAcl(acls.get(0));
-  }
-
-  private List<OzoneAcl> ozoneAcls;
-  private String volumeName;
-
-  public OMVolumeRemoveAclRequest(OMRequest omRequest) {
-    super(omRequest, volumeRemoveAclOp);
-    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
-        getOmRequest().getRemoveAclRequest();
-    Preconditions.checkNotNull(removeAclRequest);
-    ozoneAcls = Lists.newArrayList(
-        OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
-    volumeName = removeAclRequest.getObj().getPath().substring(1);
-  }
-
-  @Override
-  public List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  private OzoneAcl getAcl() {
-    return ozoneAcls.get(0);
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.RemoveAcl)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean result){
-    omResponse.setRemoveAclResponse(OzoneManagerProtocolProtos.RemoveAclResponse
-        .newBuilder().setResponse(result).build());
-    return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException ex) {
-    return new OMVolumeAclOpResponse(null,
-        createErrorOMResponse(omResponse, ex));
-  }
-
-  @Override
-  void onComplete(IOException ex) {
-    if (ex == null) {
-      LOG.debug("Remove acl: {} from volume: {} success!",
-          getAcl(), getVolumeName());
-    } else {
-      LOG.error("Remove acl {} from volume {} failed!",
-          getAcl(), getVolumeName(), ex);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
deleted file mode 100644
index a5abbcc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Handles volume set acl request.
- */
-public class OMVolumeSetAclRequest extends OMVolumeAclRequest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMVolumeSetAclRequest.class);
-
-  private static CheckedBiFunction<List<OzoneAcl>,
-      OmVolumeArgs, IOException> volumeSetAclOp;
-
-  static {
-    volumeSetAclOp = (acls, volArgs) -> volArgs.setAcls(acls);
-  }
-
-  private List<OzoneAcl> ozoneAcls;
-  private String volumeName;
-
-  public OMVolumeSetAclRequest(OMRequest omRequest) {
-    super(omRequest, volumeSetAclOp);
-    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
-        getOmRequest().getSetAclRequest();
-    Preconditions.checkNotNull(setAclRequest);
-    ozoneAcls = new ArrayList<>();
-    setAclRequest.getAclList().forEach(oai ->
-        ozoneAcls.add(OzoneAcl.fromProtobuf(oai)));
-    volumeName = setAclRequest.getObj().getPath().substring(1);
-  }
-
-  @Override
-  public List<OzoneAcl> getAcls() {
-    return ozoneAcls;
-  }
-
-  @Override
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  @Override
-  OMResponse.Builder onInit() {
-    return OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.RemoveAcl)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
-  }
-
-  @Override
-  OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean result){
-    omResponse.setSetAclResponse(OzoneManagerProtocolProtos.SetAclResponse
-        .newBuilder().setResponse(result).build());
-    return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build());
-  }
-
-  @Override
-  OMClientResponse onFailure(OMResponse.Builder omResponse,
-      IOException ex) {
-    return new OMVolumeAclOpResponse(null,
-        createErrorOMResponse(omResponse, ex));
-  }
-
-  @Override
-  void onComplete(IOException ex) {
-    if (ex == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Set acls: {} to volume: {} success!",
-            getAcls(), getVolumeName());
-      }
-    } else {
-      LOG.error("Set acls {} to volume {} failed!",
-          getAcls(), getVolumeName(), ex);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
deleted file mode 100644
index 79c4afd..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to volume acl requests and responses.
- */
-package org.apache.hadoop.ozone.om.request.volume.acl;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
deleted file mode 100644
index 708f708..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to volume requests.
- */
-package org.apache.hadoop.ozone.om.request.volume;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java
deleted file mode 100644
index 92d75eb..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Interface for OM Responses, each OM response should implement this interface.
- */
-public abstract class OMClientResponse {
-
-  private OMResponse omResponse;
-  private CompletableFuture<Void> flushFuture = null;
-
-  public OMClientResponse(OMResponse omResponse) {
-    Preconditions.checkNotNull(omResponse);
-    this.omResponse = omResponse;
-  }
-
-  /**
-   * Implement logic to add the response to batch.
-   * @param omMetadataManager
-   * @param batchOperation
-   * @throws IOException
-   */
-  public abstract void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException;
-
-  /**
-   * Return OMResponse.
-   * @return OMResponse
-   */
-  public OMResponse getOMResponse() {
-    return omResponse;
-  }
-
-  public void setFlushFuture(CompletableFuture<Void> flushFuture) {
-    this.flushFuture = flushFuture;
-  }
-
-  public CompletableFuture<Void> getFlushFuture() {
-    return flushFuture;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
deleted file mode 100644
index 3f800d3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for CreateBucket request.
- */
-public final class OMBucketCreateResponse extends OMClientResponse {
-
-  private final OmBucketInfo omBucketInfo;
-
-  public OMBucketCreateResponse(@Nullable OmBucketInfo omBucketInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omBucketInfo = omBucketInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String dbBucketKey =
-          omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-              omBucketInfo.getBucketName());
-      omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-          dbBucketKey, omBucketInfo);
-    }
-  }
-
-  @Nullable
-  public OmBucketInfo getOmBucketInfo() {
-    return omBucketInfo;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
deleted file mode 100644
index 0e0b398..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-
-/**
- * Response for DeleteBucket request.
- */
-public final class OMBucketDeleteResponse extends OMClientResponse {
-
-  private String volumeName;
-  private String bucketName;
-
-  public OMBucketDeleteResponse(
-      String volumeName, String bucketName,
-      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
-    super(omResponse);
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String dbBucketKey =
-          omMetadataManager.getBucketKey(volumeName, bucketName);
-      omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
-          dbBucketKey);
-    }
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
deleted file mode 100644
index f9ce204..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for SetBucketProperty request.
- */
-public class OMBucketSetPropertyResponse extends OMClientResponse {
-  private OmBucketInfo omBucketInfo;
-
-  public OMBucketSetPropertyResponse(@Nullable OmBucketInfo omBucketInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omBucketInfo = omBucketInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String dbBucketKey =
-          omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-              omBucketInfo.getBucketName());
-      omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-          dbBucketKey, omBucketInfo);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java
deleted file mode 100644
index b534a56..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.bucket.acl;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Response for Bucket acl request.
- */
-public class OMBucketAclResponse extends OMClientResponse {
-
-  private final OmBucketInfo omBucketInfo;
-
-  public OMBucketAclResponse(@Nullable OmBucketInfo omBucketInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omBucketInfo = omBucketInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // If response status is OK and success is true, add to DB batch.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK &&
-        getOMResponse().getSuccess()) {
-      String dbBucketKey =
-          omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-              omBucketInfo.getBucketName());
-      omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-          dbBucketKey, omBucketInfo);
-    }
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java
deleted file mode 100644
index dd26272..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains classes for handling bucket acl responses.
- */
-package org.apache.hadoop.ozone.om.response.bucket.acl;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
deleted file mode 100644
index e70c1c3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to bucket responses.
- */
-package org.apache.hadoop.ozone.om.response.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
deleted file mode 100644
index 2690dda..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.file;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-import java.io.IOException;
-
-/**
- * Response for create directory request.
- */
-public class OMDirectoryCreateResponse extends OMClientResponse {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OMDirectoryCreateResponse.class);
-  private OmKeyInfo dirKeyInfo;
-
-  public OMDirectoryCreateResponse(@Nullable OmKeyInfo dirKeyInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.dirKeyInfo = dirKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      if (dirKeyInfo != null) {
-        String dirKey =
-            omMetadataManager.getOzoneKey(dirKeyInfo.getVolumeName(),
-                dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName());
-        omMetadataManager.getKeyTable().putWithBatch(batchOperation, dirKey,
-            dirKeyInfo);
-      } else {
-        // When directory already exists, we don't add it to cache. And it is
-        // not an error, in this case dirKeyInfo will be null.
-        LOG.debug("Response Status is OK, dirKeyInfo is null in " +
-            "OMDirectoryCreateResponse");
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
deleted file mode 100644
index 8da7313..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.file;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-
-
-/**
- * Response for crate file request.
- */
-public class OMFileCreateResponse extends OMKeyCreateResponse {
-
-  public OMFileCreateResponse(@Nullable OmKeyInfo omKeyInfo,
-      long openKeySessionID, @Nonnull OMResponse omResponse) {
-    super(omKeyInfo, openKeySessionID, omResponse);
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java
deleted file mode 100644
index 135eca9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to file responses.
- */
-package org.apache.hadoop.ozone.om.response.file;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
deleted file mode 100644
index c35fa6c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for AllocateBlock request.
- */
-public class OMAllocateBlockResponse extends OMClientResponse {
-
-  private final OmKeyInfo omKeyInfo;
-  private final long clientID;
-
-  public OMAllocateBlockResponse(@Nullable OmKeyInfo omKeyInfo,
-      long clientID, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.clientID = clientID;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(),
-          omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), clientID);
-      omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey,
-          omKeyInfo);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
deleted file mode 100644
index 0eb97f3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for CommitKey request.
- */
-public class OMKeyCommitResponse extends OMClientResponse {
-
-  private OmKeyInfo omKeyInfo;
-  private long openKeySessionID;
-
-  public OMKeyCommitResponse(@Nullable OmKeyInfo omKeyInfo,
-      long openKeySessionID,
-      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.openKeySessionID = openKeySessionID;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String volumeName = omKeyInfo.getVolumeName();
-      String bucketName = omKeyInfo.getBucketName();
-      String keyName = omKeyInfo.getKeyName();
-      String openKey = omMetadataManager.getOpenKey(volumeName,
-          bucketName, keyName, openKeySessionID);
-      String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-          keyName);
-
-      // Delete from open key table and add entry to key table.
-      omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-          openKey);
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
-          omKeyInfo);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
deleted file mode 100644
index fde646c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import java.io.IOException;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Response for CreateKey request.
- */
-public class OMKeyCreateResponse extends OMClientResponse {
-
-  private OmKeyInfo omKeyInfo;
-  private long openKeySessionID;
-
-  public OMKeyCreateResponse(@Nullable OmKeyInfo omKeyInfo,
-      long openKeySessionID, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.openKeySessionID = openKeySessionID;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(),
-          omKeyInfo.getBucketName(), omKeyInfo.getKeyName(),
-          openKeySessionID);
-      omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation,
-          openKey, omKeyInfo);
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
deleted file mode 100644
index 96aedd1..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for DeleteKey request.
- */
-public class OMKeyDeleteResponse extends OMClientResponse {
-  private OmKeyInfo omKeyInfo;
-
-  public OMKeyDeleteResponse(@Nullable OmKeyInfo omKeyInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-          omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-          ozoneKey);
-
-      // If Key is not empty add this to delete table.
-      if (!isKeyEmpty(omKeyInfo)) {
-        // If a deleted key is put in the table where a key with the same
-        // name already exists, then the old deleted key information would be
-        // lost. To avoid this, first check if a key with same name exists.
-        // deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
-        // The RepeatedOmKeyInfo is the structure that allows us to store a
-        // list of OmKeyInfo that can be tied to same key name. For a keyName
-        // if RepeatedOMKeyInfo structure is null, we create a new instance,
-        // if it is not null, then we simply add to the list and store this
-        // instance in deletedTable.
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            omMetadataManager.getDeletedTable().get(ozoneKey);
-        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-            omKeyInfo, repeatedOmKeyInfo);
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            ozoneKey, repeatedOmKeyInfo);
-      }
-    }
-  }
-
-  /**
-   * Check if the key is empty or not. Key will be empty if it does not have
-   * blocks.
-   *
-   * @param keyInfo
-   * @return if empty true, else false.
-   */
-  private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) {
-    if (keyInfo == null) {
-      return true;
-    }
-    for (OmKeyLocationInfoGroup keyLocationList : keyInfo
-        .getKeyLocationVersions()) {
-      if (keyLocationList.getLocationList().size() != 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
deleted file mode 100644
index 513b94d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import java.util.List;
-import javax.annotation.Nonnull;
-
-/**
- * Response for {@link OMKeyPurgeRequest} request.
- */
-public class OMKeyPurgeResponse extends OMClientResponse {
-
-  private List<String> purgeKeyList;
-
-  public OMKeyPurgeResponse(List<String> keyList,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.purgeKeyList = keyList;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      for (String key : purgeKeyList) {
-        omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation,
-            key);
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
deleted file mode 100644
index 0e9ae17..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for RenameKey request.
- */
-public class OMKeyRenameResponse extends OMClientResponse {
-
-  private final OmKeyInfo renameKeyInfo;
-  private final String toKeyName;
-  private final String fromKeyName;
-
-  public OMKeyRenameResponse(@Nullable OmKeyInfo renameKeyInfo,
-      String toKeyName, String fromKeyName, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.renameKeyInfo = renameKeyInfo;
-    this.toKeyName = toKeyName;
-    this.fromKeyName = fromKeyName;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-
-      // If both from and toKeyName are equal do nothing
-      if (!toKeyName.equals(fromKeyName)) {
-        String volumeName = renameKeyInfo.getVolumeName();
-        String bucketName = renameKeyInfo.getBucketName();
-        omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-            omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
-        omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-            omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
-            renameKeyInfo);
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
deleted file mode 100644
index 8c8bc97..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key.acl;
-
-import java.io.IOException;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Response for Bucket acl request.
- */
-public class OMKeyAclResponse extends OMClientResponse {
-
-  private final OmKeyInfo omKeyInfo;
-
-  public OMKeyAclResponse(@Nullable OmKeyInfo omKeyInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // If response status is OK and success is true, add to DB batch.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK &&
-        getOMResponse().getSuccess()) {
-      String dbKey =
-          omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-              omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-          dbKey, omKeyInfo);
-    }
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java
deleted file mode 100644
index 6a17231..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to bucket acl responses.
- */
-package org.apache.hadoop.ozone.om.response.key.acl;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
deleted file mode 100644
index 1fa02da..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key.acl.prefix;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Response for Prefix Acl request.
- */
-public class OMPrefixAclResponse extends OMClientResponse {
-  private final OmPrefixInfo prefixInfo;
-
-  public OMPrefixAclResponse(@Nullable OmPrefixInfo omPrefixInfo,
-      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
-    super(omResponse);
-    this.prefixInfo = omPrefixInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // If response status is OK and success is true, add to DB batch.
-    if (getOMResponse().getSuccess()) {
-      if ((getOMResponse().hasAddAclResponse()
-          && getOMResponse().getAddAclResponse().getResponse()) ||
-          (getOMResponse().hasSetAclResponse()
-              && getOMResponse().getSetAclResponse().getResponse())) {
-        omMetadataManager.getPrefixTable().putWithBatch(batchOperation,
-            prefixInfo.getName(), prefixInfo);
-      } else if ((getOMResponse().hasRemoveAclResponse()
-          && getOMResponse().getRemoveAclResponse().getResponse())) {
-        if (prefixInfo.getAcls().size() == 0) {
-          // if acl list size is zero delete.
-          omMetadataManager.getPrefixTable().deleteWithBatch(batchOperation,
-              prefixInfo.getName());
-        } else {
-          omMetadataManager.getPrefixTable().putWithBatch(batchOperation,
-              prefixInfo.getName(), prefixInfo);
-        }
-      }
-    }
-  }
-
-}
-
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java
deleted file mode 100644
index 4b53e96..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to prefix acl response.
- */
-package org.apache.hadoop.ozone.om.response.key.acl.prefix;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java
deleted file mode 100644
index 2097d22..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to key responses.
- */
-package org.apache.hadoop.ozone.om.response.key;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java
deleted file mode 100644
index d66cac7..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response;
-
-
-/**
- * This package contains classes for the OM Responses.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java
deleted file mode 100644
index f91c205..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.bucket;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-import com.google.common.base.Preconditions;
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Response for S3Bucket create request.
- */
-public class S3BucketCreateResponse extends OMClientResponse {
-
-  private OMVolumeCreateResponse omVolumeCreateResponse;
-  private OMBucketCreateResponse omBucketCreateResponse;
-  private String s3Bucket;
-  private String s3Mapping;
-
-  public S3BucketCreateResponse(
-      @Nullable OMVolumeCreateResponse omVolumeCreateResponse,
-      @Nullable OMBucketCreateResponse omBucketCreateResponse,
-      @Nullable String s3BucketName,
-      @Nullable String s3Mapping, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omVolumeCreateResponse = omVolumeCreateResponse;
-    this.omBucketCreateResponse = omBucketCreateResponse;
-    this.s3Bucket = s3BucketName;
-    this.s3Mapping = s3Mapping;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      if (omVolumeCreateResponse != null) {
-        omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-      }
-
-      Preconditions.checkState(omBucketCreateResponse != null);
-      omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-      omMetadataManager.getS3Table().putWithBatch(batchOperation, s3Bucket,
-          s3Mapping);
-    }
-  }
-
-  @VisibleForTesting
-  public String getS3Mapping() {
-    return s3Mapping;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java
deleted file mode 100644
index 979318d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.bucket;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Response for S3Bucket Delete request.
- */
-public class S3BucketDeleteResponse extends OMClientResponse {
-
-  private String s3BucketName;
-  private String volumeName;
-  public S3BucketDeleteResponse(@Nullable String s3BucketName,
-      @Nullable String volumeName, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.s3BucketName = s3BucketName;
-    this.volumeName = volumeName;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
-          omMetadataManager.getBucketKey(volumeName, s3BucketName));
-      omMetadataManager.getS3Table().deleteWithBatch(batchOperation,
-          s3BucketName);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java
deleted file mode 100644
index f484ecc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains classes related to s3 bucket responses.
- */
-package org.apache.hadoop.ozone.om.response.s3.bucket;
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java
deleted file mode 100644
index a63edd8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Response for S3 Initiate Multipart Upload request.
- */
-public class S3InitiateMultipartUploadResponse extends OMClientResponse {
-
-  private OmMultipartKeyInfo omMultipartKeyInfo;
-  private OmKeyInfo omKeyInfo;
-
-  public S3InitiateMultipartUploadResponse(
-      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
-      @Nullable OmKeyInfo omKeyInfo,
-      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
-    super(omResponse);
-    this.omMultipartKeyInfo = omMultipartKeyInfo;
-    this.omKeyInfo = omKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-
-      String multipartKey =
-          omMetadataManager.getMultipartKey(omKeyInfo.getVolumeName(),
-              omKeyInfo.getBucketName(), omKeyInfo.getKeyName(),
-              omMultipartKeyInfo.getUploadID());
-
-      omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation,
-          multipartKey, omKeyInfo);
-      omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
-          multipartKey, omMultipartKeyInfo);
-    }
-  }
-
-  @VisibleForTesting
-  public OmMultipartKeyInfo getOmMultipartKeyInfo() {
-    return omMultipartKeyInfo;
-  }
-
-  @VisibleForTesting
-  public OmKeyInfo getOmKeyInfo() {
-    return omKeyInfo;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
deleted file mode 100644
index a9a4024..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .PartKeyInfo;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.TreeMap;
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for Multipart Abort Request.
- */
-public class S3MultipartUploadAbortResponse extends OMClientResponse {
-
-  private String multipartKey;
-  private OmMultipartKeyInfo omMultipartKeyInfo;
-
-  public S3MultipartUploadAbortResponse(String multipartKey,
-      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.multipartKey = multipartKey;
-    this.omMultipartKeyInfo = omMultipartKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-
-      // Delete from openKey table and multipart info table.
-      omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-          multipartKey);
-      omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
-          multipartKey);
-
-      // Move all the parts to delete table
-      TreeMap<Integer, PartKeyInfo > partKeyInfoMap =
-          omMultipartKeyInfo.getPartKeyInfoMap();
-      for (Map.Entry<Integer, PartKeyInfo > partKeyInfoEntry :
-          partKeyInfoMap.entrySet()) {
-        PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
-        OmKeyInfo currentKeyPartInfo =
-            OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
-
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
-
-        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-            currentKeyPartInfo, repeatedOmKeyInfo);
-
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            partKeyInfo.getPartName(),
-            repeatedOmKeyInfo);
-      }
-
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
deleted file mode 100644
index fef3698..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .Status.NO_SUCH_MULTIPART_UPLOAD_ERROR;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .Status.OK;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for S3MultipartUploadCommitPart request.
- */
-public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
-
-  private String multipartKey;
-  private String openKey;
-  private OmKeyInfo deletePartKeyInfo;
-  private OmMultipartKeyInfo omMultipartKeyInfo;
-  private OzoneManagerProtocolProtos.PartKeyInfo oldMultipartKeyInfo;
-
-
-  public S3MultipartUploadCommitPartResponse(String multipartKey,
-      String openKey, @Nullable OmKeyInfo deletePartKeyInfo,
-      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
-      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.multipartKey = multipartKey;
-    this.openKey = openKey;
-    this.deletePartKeyInfo = deletePartKeyInfo;
-    this.omMultipartKeyInfo = omMultipartKeyInfo;
-    this.oldMultipartKeyInfo = oldPartKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-      // Means by the time we try to commit part, some one has aborted this
-      // multipart upload. So, delete this part information.
-      RepeatedOmKeyInfo repeatedOmKeyInfo =
-          omMetadataManager.getDeletedTable().get(openKey);
-
-      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-          deletePartKeyInfo, repeatedOmKeyInfo);
-
-
-      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-          openKey,
-          repeatedOmKeyInfo);
-    }
-
-    if (getOMResponse().getStatus() == OK) {
-
-      // If we have old part info:
-      // Need to do 3 steps:
-      //   0. Strip GDPR related metadata from multipart info
-      //   1. add old part to delete table
-      //   2. Commit multipart info which has information about this new part.
-      //   3. delete this new part entry from open key table.
-
-      // This means for this multipart upload part upload, we have an old
-      // part information, so delete it.
-      if (oldMultipartKeyInfo != null) {
-        OmKeyInfo partKey =
-            OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo());
-
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            omMetadataManager.getDeletedTable()
-                .get(oldMultipartKeyInfo.getPartName());
-
-        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKey,
-            repeatedOmKeyInfo);
-
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            oldMultipartKeyInfo.getPartName(),
-            repeatedOmKeyInfo);
-      }
-
-      omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
-          multipartKey, omMultipartKeyInfo);
-
-      //  This information has been added to multipartKeyInfo. So, we can
-      //  safely delete part key info from open key table.
-      omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-          openKey);
-    }
-  }
-
-
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
deleted file mode 100644
index b0cc8b5..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nullable;
-import javax.annotation.Nonnull;
-
-/**
- * Response for Multipart Upload Complete request.
- */
-public class S3MultipartUploadCompleteResponse extends OMClientResponse {
-  private String multipartKey;
-  private OmKeyInfo omKeyInfo;
-
-
-  public S3MultipartUploadCompleteResponse(@Nullable String multipartKey,
-      @Nullable OmKeyInfo omKeyInfo, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.multipartKey = multipartKey;
-    this.omKeyInfo = omKeyInfo;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-          omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-              omKeyInfo.getBucketName(), omKeyInfo.getKeyName()), omKeyInfo);
-      omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-          multipartKey);
-      omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
-          multipartKey);
-    }
-  }
-}
-
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java
deleted file mode 100644
index 2e1474d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Package contains classes related to S3 multipart upload responses.
- */
-package org.apache.hadoop.ozone.om.response.s3.multipart;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java
deleted file mode 100644
index 6467c72..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.security;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Response for GetS3Secret request.
- */
-public class S3GetSecretResponse extends OMClientResponse {
-
-
-  private S3SecretValue s3SecretValue;
-
-  public S3GetSecretResponse(@Nullable S3SecretValue s3SecretValue,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.s3SecretValue = s3SecretValue;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    if (s3SecretValue != null &&
-        getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      omMetadataManager.getS3SecretTable().putWithBatch(batchOperation,
-          s3SecretValue.getKerberosID(), s3SecretValue);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
deleted file mode 100644
index d9024d1..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to S3 security responses.
- */
-package org.apache.hadoop.ozone.om.request.s3.security;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java
deleted file mode 100644
index 8f2632d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.security;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Handle response for CancelDelegationToken request.
- */
-public class OMCancelDelegationTokenResponse extends OMClientResponse {
-
-  private OzoneTokenIdentifier ozoneTokenIdentifier;
-
-  public OMCancelDelegationTokenResponse(
-      @Nullable OzoneTokenIdentifier ozoneTokenIdentifier,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.ozoneTokenIdentifier = ozoneTokenIdentifier;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    Table table = omMetadataManager.getDelegationTokenTable();
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      table.deleteWithBatch(batchOperation, ozoneTokenIdentifier);
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java
deleted file mode 100644
index 7f902d9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.security;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Handle response for GetDelegationToken request.
- */
-public class OMGetDelegationTokenResponse extends OMClientResponse {
-
-  private OzoneTokenIdentifier ozoneTokenIdentifier;
-  private long renewTime = -1L;
-
-  public OMGetDelegationTokenResponse(
-      @Nullable OzoneTokenIdentifier ozoneTokenIdentifier,
-      long renewTime, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.ozoneTokenIdentifier = ozoneTokenIdentifier;
-    this.renewTime = renewTime;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    Table table = omMetadataManager.getDelegationTokenTable();
-    if (ozoneTokenIdentifier != null &&
-        getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      table.putWithBatch(batchOperation, ozoneTokenIdentifier, renewTime);
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java
deleted file mode 100644
index f0f1cd3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.security;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.IOException;
-
-/**
- * Handle response for RenewDelegationToken request.
- */
-public class OMRenewDelegationTokenResponse extends OMClientResponse {
-
-  private OzoneTokenIdentifier ozoneTokenIdentifier;
-  private long renewTime = -1L;
-
-  public OMRenewDelegationTokenResponse(
-      @Nullable OzoneTokenIdentifier ozoneTokenIdentifier,
-      long renewTime, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.ozoneTokenIdentifier = ozoneTokenIdentifier;
-    this.renewTime = renewTime;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    Table table = omMetadataManager.getDelegationTokenTable();
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      table.putWithBatch(batchOperation, ozoneTokenIdentifier, renewTime);
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java
deleted file mode 100644
index 014bc42..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains classes which handle security request responses.
- */
-package org.apache.hadoop.ozone.om.response.security;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
deleted file mode 100644
index 2b797d9b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-import java.io.IOException;
-
-/**
- * Response for om volume acl operation request.
- */
-public class OMVolumeAclOpResponse extends OMClientResponse {
-
-  private OmVolumeArgs omVolumeArgs;
-
-  public OMVolumeAclOpResponse(OmVolumeArgs omVolumeArgs,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omVolumeArgs = omVolumeArgs;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getSuccess()) {
-      if ((getOMResponse().hasAddAclResponse() &&
-          getOMResponse().getAddAclResponse().getResponse()) ||
-          (getOMResponse().hasRemoveAclResponse() &&
-              getOMResponse().getRemoveAclResponse().getResponse()) ||
-          (getOMResponse().hasSetAclResponse() &&
-              getOMResponse().getSetAclResponse().getResponse())) {
-        omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
-            omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
-            omVolumeArgs);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public OmVolumeArgs getOmVolumeArgs() {
-    return omVolumeArgs;
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
deleted file mode 100644
index 1bd3e4f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import java.io.IOException;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
-
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-
-/**
- * Response for CreateBucket request.
- */
-public class OMVolumeCreateResponse extends OMClientResponse {
-
-  private UserVolumeInfo userVolumeInfo;
-  private OmVolumeArgs omVolumeArgs;
-
-  public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs,
-      UserVolumeInfo userVolumeInfo, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omVolumeArgs = omVolumeArgs;
-    this.userVolumeInfo = userVolumeInfo;
-  }
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String dbVolumeKey =
-          omMetadataManager.getVolumeKey(omVolumeArgs.getVolume());
-      String dbUserKey =
-          omMetadataManager.getUserKey(omVolumeArgs.getOwnerName());
-
-      omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
-          dbVolumeKey, omVolumeArgs);
-      omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey,
-          userVolumeInfo);
-    }
-  }
-
-  @VisibleForTesting
-  public OmVolumeArgs getOmVolumeArgs() {
-    return omVolumeArgs;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
deleted file mode 100644
index 6718ce5..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-
-/**
- * Response for CreateVolume request.
- */
-public class OMVolumeDeleteResponse extends OMClientResponse {
-  private String volume;
-  private String owner;
-  private UserVolumeInfo updatedVolumeList;
-
-  public OMVolumeDeleteResponse(String volume, String owner,
-      UserVolumeInfo updatedVolumeList, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.volume = volume;
-    this.owner = owner;
-    this.updatedVolumeList = updatedVolumeList;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String dbUserKey = omMetadataManager.getUserKey(owner);
-      UserVolumeInfo volumeList = updatedVolumeList;
-      if (updatedVolumeList.getVolumeNamesList().size() == 0) {
-        omMetadataManager.getUserTable().deleteWithBatch(batchOperation,
-            dbUserKey);
-      } else {
-        omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey,
-            volumeList);
-      }
-      omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation,
-          omMetadataManager.getVolumeKey(volume));
-    }
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
deleted file mode 100644
index 8e02702..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import javax.annotation.Nonnull;
-
-/**
- * Response for set owner request.
- */
-public class OMVolumeSetOwnerResponse extends OMClientResponse {
-
-  private String oldOwner;
-  private UserVolumeInfo oldOwnerVolumeList;
-  private UserVolumeInfo newOwnerVolumeList;
-  private OmVolumeArgs newOwnerVolumeArgs;
-
-  public OMVolumeSetOwnerResponse(String oldOwner,
-      UserVolumeInfo oldOwnerVolumeList, UserVolumeInfo newOwnerVolumeList,
-      OmVolumeArgs newOwnerVolumeArgs, @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.oldOwner = oldOwner;
-    this.oldOwnerVolumeList = oldOwnerVolumeList;
-    this.newOwnerVolumeList = newOwnerVolumeList;
-    this.newOwnerVolumeArgs = newOwnerVolumeArgs;
-  }
-
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String oldOwnerKey = omMetadataManager.getUserKey(oldOwner);
-      String newOwnerKey =
-          omMetadataManager.getUserKey(newOwnerVolumeArgs.getOwnerName());
-      if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) {
-        omMetadataManager.getUserTable().deleteWithBatch(batchOperation,
-            oldOwnerKey);
-      } else {
-        omMetadataManager.getUserTable().putWithBatch(batchOperation,
-            oldOwnerKey, oldOwnerVolumeList);
-      }
-      omMetadataManager.getUserTable().putWithBatch(batchOperation, newOwnerKey,
-          newOwnerVolumeList);
-
-      String dbVolumeKey =
-          omMetadataManager.getVolumeKey(newOwnerVolumeArgs.getVolume());
-      omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
-          dbVolumeKey, newOwnerVolumeArgs);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
deleted file mode 100644
index 13e05fa..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import java.io.IOException;
-
-import javax.annotation.Nonnull;
-
-/**
- * Response for set quota request.
- */
-public class OMVolumeSetQuotaResponse extends OMClientResponse {
-  private OmVolumeArgs omVolumeArgs;
-
-  public OMVolumeSetQuotaResponse(OmVolumeArgs omVolumeArgs,
-      @Nonnull OMResponse omResponse) {
-    super(omResponse);
-    this.omVolumeArgs = omVolumeArgs;
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    // For OmResponse with failure, this should do nothing. This method is
-    // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
-          omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
-          omVolumeArgs);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
deleted file mode 100644
index 478a19d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package contains classes related to volume requests.
- */
-package org.apache.hadoop.ozone.om.response.volume;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
deleted file mode 100644
index 5bca52d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.snapshot;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
-import org.apache.http.Header;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY;
-
-/**
- * OzoneManagerSnapshotProvider downloads the latest checkpoint from the
- * leader OM and loads the checkpoint into State Machine.
- */
-public class OzoneManagerSnapshotProvider {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerSnapshotProvider.class);
-
-  private final File omSnapshotDir;
-  private Map<String, OMNodeDetails> peerNodesMap;
-  private final HttpConfig.Policy httpPolicy;
-  private final RequestConfig httpRequestConfig;
-  private CloseableHttpClient httpClient;
-
-  private static final String OM_SNAPSHOT_DB = "om.snapshot.db";
-
-  public OzoneManagerSnapshotProvider(Configuration conf,
-      File omRatisSnapshotDir, List<OMNodeDetails> peerNodes) {
-
-    LOG.info("Initializing OM Snapshot Provider");
-    this.omSnapshotDir = omRatisSnapshotDir;
-
-    this.peerNodesMap = new HashMap<>();
-    for (OMNodeDetails peerNode : peerNodes) {
-      this.peerNodesMap.put(peerNode.getOMNodeId(), peerNode);
-    }
-
-    this.httpPolicy = DFSUtil.getHttpPolicy(conf);
-    this.httpRequestConfig = getHttpRequestConfig(conf);
-  }
-
-  private RequestConfig getHttpRequestConfig(Configuration conf) {
-    TimeUnit socketTimeoutUnit =
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getUnit();
-    int socketTimeoutMS = (int) conf.getTimeDuration(
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY,
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getDuration(),
-        socketTimeoutUnit);
-
-    TimeUnit connectionTimeoutUnit =
-        OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getUnit();
-    int connectionTimeoutMS = (int) conf.getTimeDuration(
-        OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY,
-        OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getDuration(),
-        connectionTimeoutUnit);
-
-    TimeUnit requestTimeoutUnit =
-        OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getUnit();
-    int requestTimeoutMS = (int) conf.getTimeDuration(
-        OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY,
-        OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getDuration(),
-        requestTimeoutUnit);
-
-    RequestConfig requestConfig = RequestConfig.custom()
-        .setSocketTimeout(socketTimeoutMS)
-        .setConnectTimeout(connectionTimeoutMS)
-        .setConnectionRequestTimeout(requestTimeoutMS)
-        .build();
-
-    return requestConfig;
-  }
-
-  /**
-   * Create and return http client object.
-   */
-  private HttpClient getHttpClient() {
-    if (httpClient == null) {
-      httpClient = HttpClientBuilder
-          .create()
-          .setDefaultRequestConfig(httpRequestConfig)
-          .build();
-    }
-    return httpClient;
-  }
-
-  /**
-   * Close http client object.
-   */
-  private void closeHttpClient() throws IOException {
-    if (httpClient != null) {
-      httpClient.close();
-      httpClient = null;
-    }
-  }
-
-  /**
-   * Download the latest checkpoint from OM Leader via HTTP.
-   * @param leaderOMNodeID leader OM Node ID.
-   * @return the DB checkpoint (including the ratis snapshot index)
-   */
-  public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID)
-      throws IOException {
-    String snapshotFileName = OM_SNAPSHOT_DB + "_" + System.currentTimeMillis();
-    File targetFile = new File(omSnapshotDir, snapshotFileName + ".tar.gz");
-
-    String omCheckpointUrl = peerNodesMap.get(leaderOMNodeID)
-        .getOMDBCheckpointEnpointUrl(httpPolicy);
-
-    LOG.info("Downloading latest checkpoint from Leader OM {}. Checkpoint " +
-        "URL: {}", leaderOMNodeID, omCheckpointUrl);
-
-    try {
-      HttpGet httpGet = new HttpGet(omCheckpointUrl);
-      HttpResponse response = getHttpClient().execute(httpGet);
-      int errorCode = response.getStatusLine().getStatusCode();
-      HttpEntity entity = response.getEntity();
-
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-
-        Header header = response.getFirstHeader(OM_RATIS_SNAPSHOT_INDEX);
-        if (header == null) {
-          throw new IOException("The HTTP response header " +
-              OM_RATIS_SNAPSHOT_INDEX + " is missing.");
-        }
-
-        long snapshotIndex = Long.parseLong(header.getValue());
-
-        try (InputStream inputStream = entity.getContent()) {
-          FileUtils.copyInputStreamToFile(inputStream, targetFile);
-        }
-
-        // Untar the checkpoint file.
-        Path untarredDbDir = Paths.get(omSnapshotDir.getAbsolutePath(),
-            snapshotFileName);
-        FileUtil.unTar(targetFile, untarredDbDir.toFile());
-        FileUtils.deleteQuietly(targetFile);
-
-        LOG.info("Sucessfully downloaded latest checkpoint with snapshot " +
-            "index {} from leader OM: {}",  snapshotIndex, leaderOMNodeID);
-
-        RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir);
-        omCheckpoint.setRatisSnapshotIndex(snapshotIndex);
-        return omCheckpoint;
-      }
-
-      if (entity != null) {
-        throw new IOException("Unexpected exception when trying to reach " +
-            "OM to download latest checkpoint. Checkpoint URL: " +
-            omCheckpointUrl + ". Entity: " + EntityUtils.toString(entity));
-      } else {
-        throw new IOException("Unexpected null in http payload, while " +
-            "processing request to OM to download latest checkpoint. " +
-            "Checkpoint Url: " + omCheckpointUrl);
-      }
-    } finally {
-      closeHttpClient();
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java
deleted file mode 100644
index 3c82a69..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.snapshot;
-
-/**
- * This package contains OM Ratis Snapshot related classes.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java
deleted file mode 100644
index f84e623..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-/**
- * Handler to handle OM requests in OM HA.
- */
-public interface OzoneManagerHARequestHandler extends RequestHandler {
-
-  /**
-   * Handle Apply Transaction Requests from OzoneManager StateMachine.
-   * @param omRequest
-   * @param transactionLogIndex - ratis transaction log index
-   * @return OMResponse
-   */
-  OMResponse handleApplyTransaction(OMRequest omRequest,
-      long transactionLogIndex);
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
deleted file mode 100644
index 2d305d7..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .Type;
-
-/**
- * Command Handler for OM requests. OM State Machine calls this handler for
- * deserializing the client request and sending it to OM.
- */
-public class OzoneManagerHARequestHandlerImpl
-    extends OzoneManagerRequestHandler implements OzoneManagerHARequestHandler {
-
-  private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
-
-  public OzoneManagerHARequestHandlerImpl(OzoneManager om,
-      OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) {
-    super(om);
-    this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer;
-  }
-
-
-  @Override
-  public OMResponse handleApplyTransaction(OMRequest omRequest,
-      long transactionLogIndex) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received OMRequest: {}, ", omRequest);
-    }
-    Type cmdType = omRequest.getCmdType();
-    switch (cmdType) {
-    case CreateVolume:
-    case SetVolumeProperty:
-    case DeleteVolume:
-    case CreateBucket:
-    case DeleteBucket:
-    case SetBucketProperty:
-    case AllocateBlock:
-    case CreateKey:
-    case CommitKey:
-    case DeleteKey:
-    case RenameKey:
-    case CreateDirectory:
-    case CreateFile:
-    case PurgeKeys:
-    case CreateS3Bucket:
-    case DeleteS3Bucket:
-    case InitiateMultiPartUpload:
-    case CommitMultiPartUpload:
-    case AbortMultiPartUpload:
-    case CompleteMultiPartUpload:
-    case AddAcl:
-    case RemoveAcl:
-    case SetAcl:
-    case GetDelegationToken:
-    case CancelDelegationToken:
-    case RenewDelegationToken:
-      //TODO: We don't need to pass transactionID, this will be removed when
-      // complete write requests is changed to new model. And also we can
-      // return OMClientResponse, then adding to doubleBuffer can be taken
-      // care by stateMachine. And also integrate both HA and NON HA code
-      // paths.
-      OMClientRequest omClientRequest =
-          OzoneManagerRatisUtils.createClientRequest(omRequest);
-      if (omClientRequest != null) {
-        OMClientResponse omClientResponse =
-            omClientRequest.validateAndUpdateCache(getOzoneManager(),
-                transactionLogIndex, ozoneManagerDoubleBuffer::add);
-        return omClientResponse.getOMResponse();
-      } else {
-        //TODO: remove this once we have all HA support for all write request.
-        return handle(omRequest);
-      }
-
-    default:
-      // As all request types are not changed so we need to call handle
-      // here.
-      return handle(omRequest);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
deleted file mode 100644
index ff2c966..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.NotLeaderException;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.util.ExitUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Optional;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link OzoneManagerProtocolPB}
- * to the OzoneManagerService server implementation.
- */
-public class OzoneManagerProtocolServerSideTranslatorPB implements
-    OzoneManagerProtocolPB {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class);
-  private final OzoneManagerRatisServer omRatisServer;
-  private final RequestHandler handler;
-  private final boolean isRatisEnabled;
-  private final OzoneManager ozoneManager;
-  private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
-  private final AtomicLong transactionIndex = new AtomicLong(0L);
-  private final OzoneProtocolMessageDispatcher<OMRequest, OMResponse>
-      dispatcher;
-
-  /**
-   * Constructs an instance of the server handler.
-   *
-   * @param impl OzoneManagerProtocolPB
-   */
-  public OzoneManagerProtocolServerSideTranslatorPB(
-      OzoneManager impl,
-      OzoneManagerRatisServer ratisServer,
-      ProtocolMessageMetrics metrics,
-      boolean enableRatis) {
-    this.ozoneManager = impl;
-    handler = new OzoneManagerRequestHandler(impl);
-    this.omRatisServer = ratisServer;
-    this.isRatisEnabled = enableRatis;
-    this.ozoneManagerDoubleBuffer =
-        new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), (i) -> {
-          // Do nothing.
-          // For OM NON-HA code, there is no need to save transaction index.
-          // As we wait until the double buffer flushes DB to disk.
-        }, isRatisEnabled);
-
-    dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol",
-        metrics, LOG);
-
-  }
-
-  /**
-   * Submit requests to Ratis server for OM HA implementation.
-   * TODO: Once HA is implemented fully, we should have only one server side
-   * translator for OM protocol.
-   */
-  @Override
-  public OMResponse submitRequest(RpcController controller,
-      OMRequest request) throws ServiceException {
-
-    return dispatcher.processRequest(request, this::processRequest,
-        request.getCmdType(), request.getTraceID());
-  }
-
-  private OMResponse processRequest(OMRequest request) throws
-      ServiceException {
-
-    if (isRatisEnabled) {
-      // Check if the request is a read only request
-      if (OmUtils.isReadOnly(request)) {
-        return submitReadRequestToOM(request);
-      } else {
-        if (omRatisServer.isLeader()) {
-          try {
-            OMClientRequest omClientRequest =
-                OzoneManagerRatisUtils.createClientRequest(request);
-            Preconditions.checkState(omClientRequest != null,
-                "Unrecognized write command type request" + request.toString());
-            request = omClientRequest.preExecute(ozoneManager);
-          } catch (IOException ex) {
-            // As some of the preExecute returns error. So handle here.
-            return createErrorResponse(request, ex);
-          }
-          return submitRequestToRatis(request);
-        } else {
-          // throw not leader exception. This is being done, so to avoid
-          // unnecessary execution of preExecute on follower OM's. This
-          // will be helpful in the case like where we we reduce the
-          // chance of allocate blocks on follower OM's. Right now our
-          // leader status is updated every 1 second.
-          throw createNotLeaderException();
-        }
-      }
-    } else {
-      return submitRequestDirectlyToOM(request);
-    }
-  }
-
-  /**
-   * Create OMResponse from the specified OMRequest and exception.
-   *
-   * @param omRequest
-   * @param exception
-   * @return OMResponse
-   */
-  private OMResponse createErrorResponse(
-      OMRequest omRequest, IOException exception) {
-    OzoneManagerProtocolProtos.Type cmdType = omRequest.getCmdType();
-    // Added all write command types here, because in future if any of the
-    // preExecute is changed to return IOException, we can return the error
-    // OMResponse to the client.
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setStatus(
-            OzoneManagerRatisUtils.exceptionToResponseStatus(exception))
-        .setCmdType(cmdType)
-        .setSuccess(false);
-    if (exception.getMessage() != null) {
-      omResponse.setMessage(exception.getMessage());
-    }
-    return omResponse.build();
-  }
-
-  /**
-   * Submits request to OM's Ratis server.
-   */
-  private OMResponse submitRequestToRatis(OMRequest request)
-      throws ServiceException {
-    //TODO: Need to remove OzoneManagerRatisClient, as now we are using
-    // RatisServer Api's.
-    return omRatisServer.submitRequest(request);
-  }
-
-  private OMResponse submitReadRequestToOM(OMRequest request)
-      throws ServiceException {
-    // Check if this OM is the leader.
-    if (omRatisServer.isLeader()) {
-      return handler.handle(request);
-    } else {
-      throw createNotLeaderException();
-    }
-  }
-
-  private ServiceException createNotLeaderException() {
-    RaftPeerId raftPeerId = omRatisServer.getRaftPeerId();
-    Optional<RaftPeerId> leaderRaftPeerId = omRatisServer
-        .getCachedLeaderPeerId();
-
-    NotLeaderException notLeaderException;
-    if (leaderRaftPeerId.isPresent()) {
-      notLeaderException = new NotLeaderException(raftPeerId.toString());
-    } else {
-      notLeaderException = new NotLeaderException(
-          raftPeerId.toString(), leaderRaftPeerId.toString());
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(notLeaderException.getMessage());
-    }
-
-    return new ServiceException(notLeaderException);
-  }
-
-  /**
-   * Submits request directly to OM.
-   */
-  private OMResponse submitRequestDirectlyToOM(OMRequest request) {
-    OMClientResponse omClientResponse = null;
-    long index = 0L;
-    try {
-      if (OmUtils.isReadOnly(request)) {
-        return handler.handle(request);
-      } else {
-        OMClientRequest omClientRequest =
-            OzoneManagerRatisUtils.createClientRequest(request);
-        Preconditions.checkState(omClientRequest != null,
-            "Unrecognized write command type request" + request.toString());
-        request = omClientRequest.preExecute(ozoneManager);
-        index = transactionIndex.incrementAndGet();
-        omClientRequest = OzoneManagerRatisUtils.createClientRequest(request);
-        omClientResponse = omClientRequest.validateAndUpdateCache(
-            ozoneManager, index, ozoneManagerDoubleBuffer::add);
-      }
-    } catch(IOException ex) {
-      // As some of the preExecute returns error. So handle here.
-      return createErrorResponse(request, ex);
-    }
-    try {
-      omClientResponse.getFlushFuture().get();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Future for {} is completed", request);
-      }
-    } catch (ExecutionException | InterruptedException ex) {
-      // terminate OM. As if we are in this stage means, while getting
-      // response from flush future, we got an exception.
-      String errorMessage = "Got error during waiting for flush to be " +
-          "completed for " + "request" + request.toString();
-      ExitUtils.terminate(1, errorMessage, ex, LOG);
-    }
-    return omClientResponse.getOMResponse();
-  }
-
-  public void stop() {
-    if (!isRatisEnabled) {
-      ozoneManagerDoubleBuffer.stop();
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
deleted file mode 100644
index ef96e0c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ /dev/null
@@ -1,1132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeMap;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
-import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.security.token.Token;
-
-import com.google.common.collect.Lists;
-
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
-
-/**
- * Command Handler for OM requests. OM State Machine calls this handler for
- * deserializing the client request and sending it to OM.
- */
-public class OzoneManagerRequestHandler implements RequestHandler {
-  static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerRequestHandler.class);
-  private final OzoneManager impl;
-
-  public OzoneManagerRequestHandler(OzoneManager om) {
-    this.impl = om;
-  }
-
-  //TODO simplify it to make it shorter
-  @SuppressWarnings("methodlength")
-  @Override
-  public OMResponse handle(OMRequest request) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received OMRequest: {}, ", request);
-    }
-    Type cmdType = request.getCmdType();
-    OMResponse.Builder responseBuilder = OMResponse.newBuilder()
-        .setCmdType(cmdType)
-        .setStatus(Status.OK);
-    try {
-      switch (cmdType) {
-      case CreateVolume:
-        CreateVolumeResponse createVolumeResponse = createVolume(
-            request.getCreateVolumeRequest());
-        responseBuilder.setCreateVolumeResponse(createVolumeResponse);
-        break;
-      case SetVolumeProperty:
-        SetVolumePropertyResponse setVolumePropertyResponse = setVolumeProperty(
-            request.getSetVolumePropertyRequest());
-        responseBuilder.setSetVolumePropertyResponse(setVolumePropertyResponse);
-        break;
-      case CheckVolumeAccess:
-        CheckVolumeAccessResponse checkVolumeAccessResponse = checkVolumeAccess(
-            request.getCheckVolumeAccessRequest());
-        responseBuilder.setCheckVolumeAccessResponse(checkVolumeAccessResponse);
-        break;
-      case InfoVolume:
-        InfoVolumeResponse infoVolumeResponse = infoVolume(
-            request.getInfoVolumeRequest());
-        responseBuilder.setInfoVolumeResponse(infoVolumeResponse);
-        break;
-      case DeleteVolume:
-        DeleteVolumeResponse deleteVolumeResponse = deleteVolume(
-            request.getDeleteVolumeRequest());
-        responseBuilder.setDeleteVolumeResponse(deleteVolumeResponse);
-        break;
-      case ListVolume:
-        ListVolumeResponse listVolumeResponse = listVolumes(
-            request.getListVolumeRequest());
-        responseBuilder.setListVolumeResponse(listVolumeResponse);
-        break;
-      case CreateBucket:
-        CreateBucketResponse createBucketResponse = createBucket(
-            request.getCreateBucketRequest());
-        responseBuilder.setCreateBucketResponse(createBucketResponse);
-        break;
-      case InfoBucket:
-        InfoBucketResponse infoBucketResponse = infoBucket(
-            request.getInfoBucketRequest());
-        responseBuilder.setInfoBucketResponse(infoBucketResponse);
-        break;
-      case SetBucketProperty:
-        SetBucketPropertyResponse setBucketPropertyResponse = setBucketProperty(
-            request.getSetBucketPropertyRequest());
-        responseBuilder.setSetBucketPropertyResponse(setBucketPropertyResponse);
-        break;
-      case DeleteBucket:
-        DeleteBucketResponse deleteBucketResponse = deleteBucket(
-            request.getDeleteBucketRequest());
-        responseBuilder.setDeleteBucketResponse(deleteBucketResponse);
-        break;
-      case ListBuckets:
-        ListBucketsResponse listBucketsResponse = listBuckets(
-            request.getListBucketsRequest());
-        responseBuilder.setListBucketsResponse(listBucketsResponse);
-        break;
-      case CreateKey:
-        CreateKeyResponse createKeyResponse = createKey(
-            request.getCreateKeyRequest());
-        responseBuilder.setCreateKeyResponse(createKeyResponse);
-        break;
-      case LookupKey:
-        LookupKeyResponse lookupKeyResponse = lookupKey(
-            request.getLookupKeyRequest());
-        responseBuilder.setLookupKeyResponse(lookupKeyResponse);
-        break;
-      case RenameKey:
-        RenameKeyResponse renameKeyResponse = renameKey(
-            request.getRenameKeyRequest());
-        responseBuilder.setRenameKeyResponse(renameKeyResponse);
-        break;
-      case DeleteKey:
-        DeleteKeyResponse deleteKeyResponse = deleteKey(
-            request.getDeleteKeyRequest());
-        responseBuilder.setDeleteKeyResponse(deleteKeyResponse);
-        break;
-      case ListKeys:
-        ListKeysResponse listKeysResponse = listKeys(
-            request.getListKeysRequest());
-        responseBuilder.setListKeysResponse(listKeysResponse);
-        break;
-      case CommitKey:
-        CommitKeyResponse commitKeyResponse = commitKey(
-            request.getCommitKeyRequest());
-        responseBuilder.setCommitKeyResponse(commitKeyResponse);
-        break;
-      case AllocateBlock:
-        AllocateBlockResponse allocateBlockResponse = allocateBlock(
-            request.getAllocateBlockRequest());
-        responseBuilder.setAllocateBlockResponse(allocateBlockResponse);
-        break;
-      case CreateS3Bucket:
-        S3CreateBucketResponse s3CreateBucketResponse = createS3Bucket(
-            request.getCreateS3BucketRequest());
-        responseBuilder.setCreateS3BucketResponse(s3CreateBucketResponse);
-        break;
-      case DeleteS3Bucket:
-        S3DeleteBucketResponse s3DeleteBucketResponse = deleteS3Bucket(
-            request.getDeleteS3BucketRequest());
-        responseBuilder.setDeleteS3BucketResponse(s3DeleteBucketResponse);
-        break;
-      case InfoS3Bucket:
-        S3BucketInfoResponse s3BucketInfoResponse = getS3Bucketinfo(
-            request.getInfoS3BucketRequest());
-        responseBuilder.setInfoS3BucketResponse(s3BucketInfoResponse);
-        break;
-      case ListS3Buckets:
-        S3ListBucketsResponse s3ListBucketsResponse = listS3Buckets(
-            request.getListS3BucketsRequest());
-        responseBuilder.setListS3BucketsResponse(s3ListBucketsResponse);
-        break;
-      case InitiateMultiPartUpload:
-        MultipartInfoInitiateResponse multipartInfoInitiateResponse =
-            initiateMultiPartUpload(
-                request.getInitiateMultiPartUploadRequest());
-        responseBuilder.setInitiateMultiPartUploadResponse(
-            multipartInfoInitiateResponse);
-        break;
-      case CommitMultiPartUpload:
-        MultipartCommitUploadPartResponse commitUploadPartResponse =
-            commitMultipartUploadPart(
-                request.getCommitMultiPartUploadRequest());
-        responseBuilder.setCommitMultiPartUploadResponse(
-            commitUploadPartResponse);
-        break;
-      case CompleteMultiPartUpload:
-        MultipartUploadCompleteResponse completeMultiPartUploadResponse =
-            completeMultipartUpload(
-                request.getCompleteMultiPartUploadRequest());
-        responseBuilder.setCompleteMultiPartUploadResponse(
-            completeMultiPartUploadResponse);
-        break;
-      case AbortMultiPartUpload:
-        MultipartUploadAbortResponse abortMultiPartAbortResponse =
-            abortMultipartUpload(request.getAbortMultiPartUploadRequest());
-        responseBuilder.setAbortMultiPartUploadResponse(
-            abortMultiPartAbortResponse);
-        break;
-      case ListMultiPartUploadParts:
-        MultipartUploadListPartsResponse listPartsResponse =
-            listParts(request.getListMultipartUploadPartsRequest());
-        responseBuilder.setListMultipartUploadPartsResponse(listPartsResponse);
-        break;
-      case ListMultipartUploads:
-        ListMultipartUploadsResponse response =
-            listMultipartUploads(request.getListMultipartUploadsRequest());
-        responseBuilder.setListMultipartUploadsResponse(response);
-        break;
-      case ServiceList:
-        ServiceListResponse serviceListResponse = getServiceList(
-            request.getServiceListRequest());
-        responseBuilder.setServiceListResponse(serviceListResponse);
-        break;
-      case DBUpdates:
-        DBUpdatesResponse dbUpdatesResponse = getOMDBUpdates(
-            request.getDbUpdatesRequest());
-        responseBuilder.setDbUpdatesResponse(dbUpdatesResponse);
-        break;
-      case GetDelegationToken:
-        GetDelegationTokenResponseProto getDtResp = getDelegationToken(
-            request.getGetDelegationTokenRequest());
-        responseBuilder.setGetDelegationTokenResponse(getDtResp);
-        break;
-      case RenewDelegationToken:
-        RenewDelegationTokenResponseProto renewDtResp = renewDelegationToken(
-            request.getRenewDelegationTokenRequest());
-        responseBuilder.setRenewDelegationTokenResponse(renewDtResp);
-        break;
-      case CancelDelegationToken:
-        CancelDelegationTokenResponseProto cancelDtResp = cancelDelegationToken(
-            request.getCancelDelegationTokenRequest());
-        responseBuilder.setCancelDelegationTokenResponse(cancelDtResp);
-        break;
-      case GetS3Secret:
-        GetS3SecretResponse getS3SecretResp = getS3Secret(request
-            .getGetS3SecretRequest());
-        responseBuilder.setGetS3SecretResponse(getS3SecretResp);
-        break;
-      case GetFileStatus:
-        GetFileStatusResponse getFileStatusResponse =
-            getOzoneFileStatus(request.getGetFileStatusRequest());
-        responseBuilder.setGetFileStatusResponse(getFileStatusResponse);
-        break;
-      case CreateDirectory:
-        createDirectory(request.getCreateDirectoryRequest());
-        break;
-      case CreateFile:
-        CreateFileResponse createFileResponse =
-            createFile(request.getCreateFileRequest());
-        responseBuilder.setCreateFileResponse(createFileResponse);
-        break;
-      case LookupFile:
-        LookupFileResponse lookupFileResponse =
-            lookupFile(request.getLookupFileRequest());
-        responseBuilder.setLookupFileResponse(lookupFileResponse);
-        break;
-      case ListStatus:
-        ListStatusResponse listStatusResponse =
-            listStatus(request.getListStatusRequest());
-        responseBuilder.setListStatusResponse(listStatusResponse);
-        break;
-      case AddAcl:
-        AddAclResponse addAclResponse =
-            addAcl(request.getAddAclRequest());
-        responseBuilder.setAddAclResponse(addAclResponse);
-        break;
-      case RemoveAcl:
-        RemoveAclResponse removeAclResponse =
-            removeAcl(request.getRemoveAclRequest());
-        responseBuilder.setRemoveAclResponse(removeAclResponse);
-        break;
-      case SetAcl:
-        SetAclResponse setAclResponse =
-            setAcl(request.getSetAclRequest());
-        responseBuilder.setSetAclResponse(setAclResponse);
-        break;
-      case GetAcl:
-        GetAclResponse getAclResponse =
-            getAcl(request.getGetAclRequest());
-        responseBuilder.setGetAclResponse(getAclResponse);
-        break;
-      default:
-        responseBuilder.setSuccess(false);
-        responseBuilder.setMessage("Unrecognized Command Type: " + cmdType);
-        break;
-      }
-      responseBuilder.setSuccess(true);
-    } catch (IOException ex) {
-      responseBuilder.setSuccess(false);
-      responseBuilder.setStatus(exceptionToResponseStatus(ex));
-      if (ex.getMessage() != null) {
-        responseBuilder.setMessage(ex.getMessage());
-      }
-    }
-    return responseBuilder.build();
-  }
-
-  private DBUpdatesResponse getOMDBUpdates(
-      DBUpdatesRequest dbUpdatesRequest)
-      throws SequenceNumberNotFoundException {
-
-    DBUpdatesResponse.Builder builder = DBUpdatesResponse
-        .newBuilder();
-    DBUpdatesWrapper dbUpdatesWrapper =
-        impl.getDBUpdates(dbUpdatesRequest);
-    for (int i = 0; i < dbUpdatesWrapper.getData().size(); i++) {
-      builder.addData(OMPBHelper.getByteString(
-          dbUpdatesWrapper.getData().get(i)));
-    }
-    builder.setSequenceNumber(dbUpdatesWrapper.getCurrentSequenceNumber());
-    return builder.build();
-  }
-
-  private GetAclResponse getAcl(GetAclRequest req) throws IOException {
-    List<OzoneAclInfo> acls = new ArrayList<>();
-    List<OzoneAcl> aclList =
-        impl.getAcl(OzoneObjInfo.fromProtobuf(req.getObj()));
-    if (aclList != null) {
-      aclList.forEach(a -> acls.add(OzoneAcl.toProtobuf(a)));
-    }
-    return GetAclResponse.newBuilder().addAllAcls(acls).build();
-  }
-
-  private RemoveAclResponse removeAcl(RemoveAclRequest req)
-      throws IOException {
-    boolean response = impl.removeAcl(OzoneObjInfo.fromProtobuf(req.getObj()),
-        OzoneAcl.fromProtobuf(req.getAcl()));
-    return RemoveAclResponse.newBuilder().setResponse(response).build();
-  }
-
-  private SetAclResponse setAcl(SetAclRequest req) throws IOException {
-    boolean response = impl.setAcl(OzoneObjInfo.fromProtobuf(req.getObj()),
-        req.getAclList().stream().map(a -> OzoneAcl.fromProtobuf(a)).
-            collect(Collectors.toList()));
-    return SetAclResponse.newBuilder().setResponse(response).build();
-  }
-
-  private AddAclResponse addAcl(AddAclRequest req) throws IOException {
-    boolean response = impl.addAcl(OzoneObjInfo.fromProtobuf(req.getObj()),
-        OzoneAcl.fromProtobuf(req.getAcl()));
-    return AddAclResponse.newBuilder().setResponse(response).build();
-  }
-
-  // Convert and exception to corresponding status code
-  protected Status exceptionToResponseStatus(IOException ex) {
-    if (ex instanceof OMException) {
-      return Status.values()[((OMException) ex).getResult().ordinal()];
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unknown error occurs", ex);
-      }
-      return Status.INTERNAL_ERROR;
-    }
-  }
-
-  /**
-   * Validates that the incoming OM request has required parameters.
-   * TODO: Add more validation checks before writing the request to Ratis log.
-   *
-   * @param omRequest client request to OM
-   * @throws OMException thrown if required parameters are set to null.
-   */
-  @Override
-  public void validateRequest(OMRequest omRequest) throws OMException {
-    Type cmdType = omRequest.getCmdType();
-    if (cmdType == null) {
-      throw new OMException("CmdType is null",
-          OMException.ResultCodes.INVALID_REQUEST);
-    }
-    if (omRequest.getClientId() == null) {
-      throw new OMException("ClientId is null",
-          OMException.ResultCodes.INVALID_REQUEST);
-    }
-  }
-
-  private CreateVolumeResponse createVolume(CreateVolumeRequest request)
-      throws IOException {
-    impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
-    return
-        CreateVolumeResponse.newBuilder().build();
-  }
-
-  private SetVolumePropertyResponse setVolumeProperty(
-      SetVolumePropertyRequest request) throws IOException {
-    SetVolumePropertyResponse.Builder resp =
-        SetVolumePropertyResponse.newBuilder();
-
-    String volume = request.getVolumeName();
-
-    if (request.hasQuotaInBytes()) {
-      long quota = request.getQuotaInBytes();
-      impl.setQuota(volume, quota);
-    } else {
-      String owner = request.getOwnerName();
-      impl.setOwner(volume, owner);
-    }
-
-    return resp.build();
-  }
-
-  private CheckVolumeAccessResponse checkVolumeAccess(
-      CheckVolumeAccessRequest request) throws IOException {
-    CheckVolumeAccessResponse.Builder resp =
-        CheckVolumeAccessResponse.newBuilder();
-    boolean access = impl.checkVolumeAccess(request.getVolumeName(),
-        request.getUserAcl());
-    // if no access, set the response status as access denied
-
-    if (!access) {
-      throw new OMException(OMException.ResultCodes.ACCESS_DENIED);
-    }
-
-    return resp.build();
-  }
-
-  private InfoVolumeResponse infoVolume(InfoVolumeRequest request)
-      throws IOException {
-    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
-    String volume = request.getVolumeName();
-
-    OmVolumeArgs ret = impl.getVolumeInfo(volume);
-    resp.setVolumeInfo(ret.getProtobuf());
-
-    return resp.build();
-  }
-
-  private DeleteVolumeResponse deleteVolume(DeleteVolumeRequest request)
-      throws IOException {
-    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
-
-    impl.deleteVolume(request.getVolumeName());
-
-    return resp.build();
-  }
-
-  private ListVolumeResponse listVolumes(ListVolumeRequest request)
-      throws IOException {
-    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
-    List<OmVolumeArgs> result = Lists.newArrayList();
-
-    if (request.getScope()
-        == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
-      result = impl.listVolumeByUser(request.getUserName(),
-          request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
-    } else if (request.getScope()
-        == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
-      result =
-          impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
-              request.getMaxKeys());
-    }
-
-    result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
-
-    return resp.build();
-  }
-
-  private CreateBucketResponse createBucket(CreateBucketRequest request)
-      throws IOException {
-    CreateBucketResponse.Builder resp =
-        CreateBucketResponse.newBuilder();
-    impl.createBucket(OmBucketInfo.getFromProtobuf(
-        request.getBucketInfo()));
-    return resp.build();
-  }
-
-  private InfoBucketResponse infoBucket(InfoBucketRequest request)
-      throws IOException {
-    InfoBucketResponse.Builder resp =
-        InfoBucketResponse.newBuilder();
-    OmBucketInfo omBucketInfo = impl.getBucketInfo(
-        request.getVolumeName(), request.getBucketName());
-    resp.setBucketInfo(omBucketInfo.getProtobuf());
-
-    return resp.build();
-  }
-
-  private CreateKeyResponse createKey(CreateKeyRequest request)
-      throws IOException {
-    CreateKeyResponse.Builder resp =
-        CreateKeyResponse.newBuilder();
-    KeyArgs keyArgs = request.getKeyArgs();
-    HddsProtos.ReplicationType type =
-        keyArgs.hasType() ? keyArgs.getType() : null;
-    HddsProtos.ReplicationFactor factor =
-        keyArgs.hasFactor() ? keyArgs.getFactor() : null;
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setDataSize(keyArgs.getDataSize())
-        .setType(type)
-        .setFactor(factor)
-        .setIsMultipartKey(keyArgs.getIsMultipartKey())
-        .setMultipartUploadID(keyArgs.getMultipartUploadID())
-        .setMultipartUploadPartNumber(keyArgs.getMultipartNumber())
-        .setAcls(keyArgs.getAclsList().stream().map(a ->
-            OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .build();
-    if (keyArgs.hasDataSize()) {
-      omKeyArgs.setDataSize(keyArgs.getDataSize());
-    } else {
-      omKeyArgs.setDataSize(0);
-    }
-    OpenKeySession openKey = impl.openKey(omKeyArgs);
-    resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
-    resp.setID(openKey.getId());
-    resp.setOpenVersion(openKey.getOpenVersion());
-    return resp.build();
-  }
-
-  private LookupKeyResponse lookupKey(LookupKeyRequest request)
-      throws IOException {
-    LookupKeyResponse.Builder resp =
-        LookupKeyResponse.newBuilder();
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setRefreshPipeline(true)
-        .setSortDatanodesInPipeline(keyArgs.getSortDatanodes())
-        .build();
-    OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs);
-    resp.setKeyInfo(keyInfo.getProtobuf());
-
-    return resp.build();
-  }
-
-  private RenameKeyResponse renameKey(RenameKeyRequest request)
-      throws IOException {
-    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setRefreshPipeline(true)
-        .build();
-    impl.renameKey(omKeyArgs, request.getToKeyName());
-
-    return resp.build();
-  }
-
-  private SetBucketPropertyResponse setBucketProperty(
-      SetBucketPropertyRequest request) throws IOException {
-    SetBucketPropertyResponse.Builder resp =
-        SetBucketPropertyResponse.newBuilder();
-    impl.setBucketProperty(OmBucketArgs.getFromProtobuf(
-        request.getBucketArgs()));
-
-    return resp.build();
-  }
-
-  private DeleteKeyResponse deleteKey(DeleteKeyRequest request)
-      throws IOException {
-    DeleteKeyResponse.Builder resp =
-        DeleteKeyResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .build();
-    impl.deleteKey(omKeyArgs);
-
-    return resp.build();
-  }
-
-  private DeleteBucketResponse deleteBucket(DeleteBucketRequest request)
-      throws IOException {
-    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
-
-    impl.deleteBucket(request.getVolumeName(), request.getBucketName());
-
-    return resp.build();
-  }
-
-  private ListBucketsResponse listBuckets(ListBucketsRequest request)
-      throws IOException {
-    ListBucketsResponse.Builder resp =
-        ListBucketsResponse.newBuilder();
-
-    List<OmBucketInfo> buckets = impl.listBuckets(
-        request.getVolumeName(),
-        request.getStartKey(),
-        request.getPrefix(),
-        request.getCount());
-    for (OmBucketInfo bucket : buckets) {
-      resp.addBucketInfo(bucket.getProtobuf());
-    }
-
-    return resp.build();
-  }
-
-  private ListKeysResponse listKeys(ListKeysRequest request)
-      throws IOException {
-    ListKeysResponse.Builder resp =
-        ListKeysResponse.newBuilder();
-
-    List<OmKeyInfo> keys = impl.listKeys(
-        request.getVolumeName(),
-        request.getBucketName(),
-        request.getStartKey(),
-        request.getPrefix(),
-        request.getCount());
-    for (OmKeyInfo key : keys) {
-      resp.addKeyInfo(key.getProtobuf());
-    }
-
-    return resp.build();
-  }
-
-  private CommitKeyResponse commitKey(CommitKeyRequest request)
-      throws IOException {
-    CommitKeyResponse.Builder resp =
-        CommitKeyResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    HddsProtos.ReplicationType type =
-        keyArgs.hasType() ? keyArgs.getType() : null;
-    HddsProtos.ReplicationFactor factor =
-        keyArgs.hasFactor() ? keyArgs.getFactor() : null;
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setLocationInfoList(keyArgs.getKeyLocationsList().stream()
-            .map(OmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()))
-        .setType(type)
-        .setFactor(factor)
-        .setDataSize(keyArgs.getDataSize())
-        .build();
-    impl.commitKey(omKeyArgs, request.getClientID());
-
-    return resp.build();
-  }
-
-  private AllocateBlockResponse allocateBlock(AllocateBlockRequest request)
-      throws IOException {
-    AllocateBlockResponse.Builder resp =
-        AllocateBlockResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .build();
-
-    OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs,
-        request.getClientID(), ExcludeList.getFromProtoBuf(
-            request.getExcludeList()));
-
-    resp.setKeyLocation(newLocation.getProtobuf());
-
-    return resp.build();
-  }
-
-  private ServiceListResponse getServiceList(ServiceListRequest request)
-      throws IOException {
-    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
-
-    resp.addAllServiceInfo(impl.getServiceInfo().getServiceInfoList().stream()
-        .map(ServiceInfo::getProtobuf)
-        .collect(Collectors.toList()));
-    if (impl.getServiceInfo().getCaCertificate() != null) {
-      resp.setCaCertificate(impl.getServiceInfo().getCaCertificate());
-    }
-    return resp.build();
-  }
-
-  private S3CreateBucketResponse createS3Bucket(S3CreateBucketRequest request)
-      throws IOException {
-    S3CreateBucketResponse.Builder resp = S3CreateBucketResponse.newBuilder();
-
-    impl.createS3Bucket(request.getUserName(), request.getS3Bucketname());
-
-    return resp.build();
-  }
-
-  private S3DeleteBucketResponse deleteS3Bucket(S3DeleteBucketRequest request)
-      throws IOException {
-    S3DeleteBucketResponse.Builder resp = S3DeleteBucketResponse.newBuilder();
-
-    impl.deleteS3Bucket(request.getS3BucketName());
-
-    return resp.build();
-  }
-
-  private S3BucketInfoResponse getS3Bucketinfo(S3BucketInfoRequest request)
-      throws IOException {
-    S3BucketInfoResponse.Builder resp = S3BucketInfoResponse.newBuilder();
-
-    resp.setOzoneMapping(
-        impl.getOzoneBucketMapping(request.getS3BucketName()));
-    return resp.build();
-  }
-
-  private S3ListBucketsResponse listS3Buckets(S3ListBucketsRequest request)
-      throws IOException {
-    S3ListBucketsResponse.Builder resp = S3ListBucketsResponse.newBuilder();
-
-    List<OmBucketInfo> buckets = impl.listS3Buckets(
-        request.getUserName(),
-        request.getStartKey(),
-        request.getPrefix(),
-        request.getCount());
-    for (OmBucketInfo bucket : buckets) {
-      resp.addBucketInfo(bucket.getProtobuf());
-    }
-
-    return resp.build();
-  }
-
-  private MultipartInfoInitiateResponse initiateMultiPartUpload(
-      MultipartInfoInitiateRequest request) throws IOException {
-    MultipartInfoInitiateResponse.Builder resp = MultipartInfoInitiateResponse
-        .newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setType(keyArgs.getType())
-        .setFactor(keyArgs.getFactor())
-        .setAcls(keyArgs.getAclsList().stream().map(a ->
-            OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
-        .build();
-    OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs);
-    resp.setVolumeName(multipartInfo.getVolumeName());
-    resp.setBucketName(multipartInfo.getBucketName());
-    resp.setKeyName(multipartInfo.getKeyName());
-    resp.setMultipartUploadID(multipartInfo.getUploadID());
-
-    return resp.build();
-  }
-
-  private MultipartCommitUploadPartResponse commitMultipartUploadPart(
-      MultipartCommitUploadPartRequest request) throws IOException {
-    MultipartCommitUploadPartResponse.Builder resp =
-        MultipartCommitUploadPartResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setMultipartUploadID(keyArgs.getMultipartUploadID())
-        .setIsMultipartKey(keyArgs.getIsMultipartKey())
-        .setMultipartUploadPartNumber(keyArgs.getMultipartNumber())
-        .setDataSize(keyArgs.getDataSize())
-        .setLocationInfoList(keyArgs.getKeyLocationsList().stream()
-            .map(OmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()))
-        .build();
-    OmMultipartCommitUploadPartInfo commitUploadPartInfo =
-        impl.commitMultipartUploadPart(omKeyArgs, request.getClientID());
-    resp.setPartName(commitUploadPartInfo.getPartName());
-
-    return resp.build();
-  }
-
-  private MultipartUploadCompleteResponse completeMultipartUpload(
-      MultipartUploadCompleteRequest request) throws IOException {
-    MultipartUploadCompleteResponse.Builder response =
-        MultipartUploadCompleteResponse.newBuilder();
-
-    KeyArgs keyArgs = request.getKeyArgs();
-    List<Part> partsList = request.getPartsListList();
-
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    for (Part part : partsList) {
-      partsMap.put(part.getPartNumber(), part.getPartName());
-    }
-
-    OmMultipartUploadCompleteList omMultipartUploadCompleteList =
-        new OmMultipartUploadCompleteList(partsMap);
-
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setAcls(keyArgs.getAclsList().stream().map(a ->
-            OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
-        .setMultipartUploadID(keyArgs.getMultipartUploadID())
-        .build();
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl
-        .completeMultipartUpload(omKeyArgs, omMultipartUploadCompleteList);
-
-    response.setVolume(omMultipartUploadCompleteInfo.getVolume())
-        .setBucket(omMultipartUploadCompleteInfo.getBucket())
-        .setKey(omMultipartUploadCompleteInfo.getKey())
-        .setHash(omMultipartUploadCompleteInfo.getHash());
-
-    return response.build();
-  }
-
-  private MultipartUploadAbortResponse abortMultipartUpload(
-      MultipartUploadAbortRequest multipartUploadAbortRequest)
-      throws IOException {
-    MultipartUploadAbortResponse.Builder response =
-        MultipartUploadAbortResponse.newBuilder();
-
-    KeyArgs keyArgs = multipartUploadAbortRequest.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setMultipartUploadID(keyArgs.getMultipartUploadID())
-        .build();
-    impl.abortMultipartUpload(omKeyArgs);
-
-    return response.build();
-  }
-
-  private MultipartUploadListPartsResponse listParts(
-      MultipartUploadListPartsRequest multipartUploadListPartsRequest)
-      throws IOException {
-
-    MultipartUploadListPartsResponse.Builder response =
-        MultipartUploadListPartsResponse.newBuilder();
-
-    OmMultipartUploadListParts omMultipartUploadListParts =
-        impl.listParts(multipartUploadListPartsRequest.getVolume(),
-            multipartUploadListPartsRequest.getBucket(),
-            multipartUploadListPartsRequest.getKey(),
-            multipartUploadListPartsRequest.getUploadID(),
-            multipartUploadListPartsRequest.getPartNumbermarker(),
-            multipartUploadListPartsRequest.getMaxParts());
-
-    List<OmPartInfo> omPartInfoList =
-        omMultipartUploadListParts.getPartInfoList();
-
-    List<PartInfo> partInfoList =
-        new ArrayList<>();
-
-    omPartInfoList.forEach(partInfo -> partInfoList.add(partInfo.getProto()));
-
-    response.setType(omMultipartUploadListParts.getReplicationType());
-    response.setFactor(omMultipartUploadListParts.getReplicationFactor());
-    response.setNextPartNumberMarker(
-        omMultipartUploadListParts.getNextPartNumberMarker());
-    response.setIsTruncated(omMultipartUploadListParts.isTruncated());
-
-    return response.addAllPartsList(partInfoList).build();
-
-
-  }
-
-  private ListMultipartUploadsResponse listMultipartUploads(
-      ListMultipartUploadsRequest request)
-      throws IOException {
-
-    OmMultipartUploadList omMultipartUploadList =
-        impl.listMultipartUploads(request.getVolume(), request.getBucket(),
-            request.getPrefix());
-
-    List<MultipartUploadInfo> info = omMultipartUploadList
-        .getUploads()
-        .stream()
-        .map(upload -> MultipartUploadInfo.newBuilder()
-            .setVolumeName(upload.getVolumeName())
-            .setBucketName(upload.getBucketName())
-            .setKeyName(upload.getKeyName())
-            .setUploadId(upload.getUploadId())
-            .setType(upload.getReplicationType())
-            .setFactor(upload.getReplicationFactor())
-            .setCreationTime(upload.getCreationTime().toEpochMilli())
-            .build())
-        .collect(Collectors.toList());
-
-    ListMultipartUploadsResponse response =
-        ListMultipartUploadsResponse.newBuilder()
-            .addAllUploadsList(info)
-            .build();
-
-    return response;
-  }
-
-  private GetDelegationTokenResponseProto getDelegationToken(
-      GetDelegationTokenRequestProto request) throws OMException {
-    GetDelegationTokenResponseProto.Builder rb =
-        GetDelegationTokenResponseProto.newBuilder();
-
-    Token<OzoneTokenIdentifier> token = impl
-        .getDelegationToken(new Text(request.getRenewer()));
-    if (token != null) {
-      rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos
-          .GetDelegationTokenResponseProto.newBuilder().setToken(OMPBHelper
-              .convertToTokenProto(token)).build());
-    }
-
-    return rb.build();
-  }
-
-  private RenewDelegationTokenResponseProto renewDelegationToken(
-      RenewDelegationTokenRequestProto request) throws OMException {
-    RenewDelegationTokenResponseProto.Builder rb =
-        RenewDelegationTokenResponseProto.newBuilder();
-
-    if (request.hasToken()) {
-      long expiryTime = impl
-          .renewDelegationToken(
-              OMPBHelper.convertToDelegationToken(request.getToken()));
-      rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos
-          .RenewDelegationTokenResponseProto.newBuilder()
-          .setNewExpiryTime(expiryTime).build());
-    }
-
-    return rb.build();
-  }
-
-  private CancelDelegationTokenResponseProto cancelDelegationToken(
-      CancelDelegationTokenRequestProto req) throws OMException {
-    CancelDelegationTokenResponseProto.Builder rb =
-        CancelDelegationTokenResponseProto.newBuilder();
-
-    if (req.hasToken()) {
-      impl.cancelDelegationToken(
-          OMPBHelper.convertToDelegationToken(req.getToken()));
-    }
-    rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos
-        .CancelDelegationTokenResponseProto.getDefaultInstance());
-
-    return rb.build();
-  }
-
-  private GetS3SecretResponse getS3Secret(
-      GetS3SecretRequest request)
-      throws IOException {
-    GetS3SecretResponse.Builder rb =
-        GetS3SecretResponse.newBuilder();
-
-    rb.setS3Secret(impl.getS3Secret(request.getKerberosID()).getProtobuf());
-
-    return rb.build();
-  }
-
-  private GetFileStatusResponse getOzoneFileStatus(
-      GetFileStatusRequest request) throws IOException {
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .build();
-
-    GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder();
-    rb.setStatus(impl.getFileStatus(omKeyArgs).getProtobuf());
-
-    return rb.build();
-  }
-
-  private void createDirectory(CreateDirectoryRequest request)
-      throws IOException {
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setAcls(keyArgs.getAclsList().stream().map(a ->
-            OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
-        .build();
-    impl.createDirectory(omKeyArgs);
-  }
-
-  private CreateFileResponse createFile(
-      CreateFileRequest request) throws IOException {
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setDataSize(keyArgs.getDataSize())
-        .setType(keyArgs.getType())
-        .setFactor(keyArgs.getFactor())
-        .setAcls(keyArgs.getAclsList().stream().map(a ->
-            OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
-        .build();
-    OpenKeySession keySession =
-        impl.createFile(omKeyArgs, request.getIsOverwrite(),
-            request.getIsRecursive());
-    return CreateFileResponse.newBuilder()
-        .setKeyInfo(keySession.getKeyInfo().getProtobuf())
-        .setID(keySession.getId())
-        .setOpenVersion(keySession.getOpenVersion())
-        .build();
-  }
-
-  private LookupFileResponse lookupFile(
-      LookupFileRequest request)
-      throws IOException {
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setSortDatanodesInPipeline(keyArgs.getSortDatanodes())
-        .build();
-    return LookupFileResponse.newBuilder()
-        .setKeyInfo(impl.lookupFile(omKeyArgs).getProtobuf())
-        .build();
-  }
-
-  private ListStatusResponse listStatus(
-      ListStatusRequest request) throws IOException {
-    KeyArgs keyArgs = request.getKeyArgs();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .build();
-    List<OzoneFileStatus> statuses =
-        impl.listStatus(omKeyArgs, request.getRecursive(),
-            request.getStartKey(), request.getNumEntries());
-    ListStatusResponse.Builder
-        listStatusResponseBuilder =
-        ListStatusResponse.newBuilder();
-    for (OzoneFileStatus status : statuses) {
-      listStatusResponseBuilder.addStatuses(status.getProtobuf());
-    }
-    return listStatusResponseBuilder.build();
-  }
-
-  protected OzoneManager getOzoneManager() {
-    return impl;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
deleted file mode 100644
index f19dc48..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
-    OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
-    OMResponse;
-
-/**
- * Handler to handle the OmRequests.
- */
-public interface RequestHandler {
-
-  /**
-   * Handle the OmRequest, and returns OmResponse.
-   * @param request
-   * @return OmResponse
-   */
-  OMResponse handle(OMRequest request);
-
-
-  /**
-   * Validates that the incoming OM request has required parameters.
-   * TODO: Add more validation checks before writing the request to Ratis log.
-   *
-   * @param omRequest client request to OM
-   * @throws OMException thrown if required parameters are set to null.
-   */
-  void validateRequest(OMRequest omRequest) throws OMException;
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 9bc393d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * OM protocol buffer translators.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
deleted file mode 100644
index 0b7c51a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ozone.om.BucketManager;
-import org.apache.hadoop.ozone.om.KeyManager;
-import org.apache.hadoop.ozone.om.PrefixManager;
-import org.apache.hadoop.ozone.om.VolumeManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
-
-/**
- * Public API for Ozone ACLs. Security providers providing support for Ozone
- * ACLs should implement this.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"})
-@InterfaceStability.Evolving
-public class OzoneNativeAuthorizer implements IAccessAuthorizer {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneNativeAuthorizer.class);
-  private VolumeManager volumeManager;
-  private BucketManager bucketManager;
-  private KeyManager keyManager;
-  private PrefixManager prefixManager;
-
-  public OzoneNativeAuthorizer() {
-  }
-
-  public OzoneNativeAuthorizer(VolumeManager volumeManager,
-      BucketManager bucketManager, KeyManager keyManager,
-      PrefixManager prefixManager) {
-    this.volumeManager = volumeManager;
-    this.bucketManager = bucketManager;
-    this.keyManager = keyManager;
-    this.prefixManager = prefixManager;
-  }
-
-  /**
-   * Check access for given ozoneObject.
-   *
-   * @param ozObject object for which access needs to be checked.
-   * @param context Context object encapsulating all user related information.
-   * @return true if user has access else false.
-   */
-  public boolean checkAccess(IOzoneObj ozObject, RequestContext context)
-      throws OMException {
-    Objects.requireNonNull(ozObject);
-    Objects.requireNonNull(context);
-    OzoneObjInfo objInfo;
-
-    if (ozObject instanceof OzoneObjInfo) {
-      objInfo = (OzoneObjInfo) ozObject;
-    } else {
-      throw new OMException("Unexpected input received. OM native acls are " +
-          "configured to work with OzoneObjInfo type only.", INVALID_REQUEST);
-    }
-
-    switch (objInfo.getResourceType()) {
-    case VOLUME:
-      LOG.trace("Checking access for volume: {}", objInfo);
-      return volumeManager.checkAccess(objInfo, context);
-    case BUCKET:
-      LOG.trace("Checking access for bucket: {}", objInfo);
-      return (bucketManager.checkAccess(objInfo, context)
-          && volumeManager.checkAccess(objInfo, context));
-    case KEY:
-      LOG.trace("Checking access for Key: {}", objInfo);
-      return (keyManager.checkAccess(objInfo, context)
-          && prefixManager.checkAccess(objInfo, context)
-          && bucketManager.checkAccess(objInfo, context)
-          && volumeManager.checkAccess(objInfo, context));
-    case PREFIX:
-      LOG.trace("Checking access for Prefix: {]", objInfo);
-      return (prefixManager.checkAccess(objInfo, context)
-          && bucketManager.checkAccess(objInfo, context)
-          && volumeManager.checkAccess(objInfo, context));
-    default:
-      throw new OMException("Unexpected object type:" +
-          objInfo.getResourceType(), INVALID_REQUEST);
-    }
-  }
-
-  public void setVolumeManager(VolumeManager volumeManager) {
-    this.volumeManager = volumeManager;
-  }
-
-  public void setBucketManager(BucketManager bucketManager) {
-    this.bucketManager = bucketManager;
-  }
-
-  public void setKeyManager(KeyManager keyManager) {
-    this.keyManager = keyManager;
-  }
-
-  public void setPrefixManager(PrefixManager prefixManager) {
-    this.prefixManager = prefixManager;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
deleted file mode 100644
index 20e747a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security.acl;
-
-/**
- * OM native acl implementation.
- */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
deleted file mode 100644
index 6405eef..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Common interface for command handling.
- */
-@Command(mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public abstract class Handler implements Callable<Void> {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(Handler.class);
-
-  @ParentCommand
-  private GenericParentCommand parent;
-
-  @Override
-  public Void call() throws Exception {
-    throw new UnsupportedOperationException();
-  }
-
-  public boolean isVerbose() {
-    return parent.isVerbose();
-  }
-
-  public OzoneConfiguration createOzoneConfiguration() {
-    return parent.createOzoneConfiguration();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java
deleted file mode 100644
index 2a17275..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-/**
- * Utility to print out response object in human readable form.
- */
-public final class ObjectPrinter {
-  private ObjectPrinter() {
-  }
-
-  public static String getObjectAsJson(Object o) throws IOException {
-    return JsonUtils.toJsonStringWithDefaultPrettyPrinter(o);
-  }
-
-  public static void printObjectAsJson(Object o) throws IOException {
-    System.out.println(getObjectAsJson(o));
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
deleted file mode 100644
index 4cb283e..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME;
-import org.apache.http.client.utils.URIBuilder;
-
-/**
- * Address of an ozone object for ozone shell.
- */
-public class OzoneAddress {
-
-  private static final String EMPTY_HOST = "___DEFAULT___";
-
-  private URI ozoneURI;
-
-  private String volumeName = "";
-
-  private String bucketName = "";
-
-  private String keyName = "";
-
-  public OzoneAddress() throws OzoneClientException {
-    this("o3:///");
-  }
-
-  public OzoneAddress(String address)
-      throws OzoneClientException {
-    if (address == null || address.equals("")) {
-      address = OZONE_RPC_SCHEME + ":///";
-    }
-    this.ozoneURI = parseURI(address);
-    String path = this.ozoneURI.getPath();
-
-    path = path.replaceAll("^/+", "");
-
-    int sep1 = path.indexOf('/');
-    int sep2 = path.indexOf('/', sep1 + 1);
-
-    if (sep1 == -1) {
-      volumeName = path;
-    } else {
-      //we have vol/bucket
-      volumeName = path.substring(0, sep1);
-      if (sep2 == -1) {
-        bucketName = path.substring(sep1 + 1);
-      } else {
-        //we have vol/bucket/key/.../...
-        bucketName = path.substring(sep1 + 1, sep2);
-        keyName = path.substring(sep2 + 1);
-      }
-    }
-
-  }
-
-  public OzoneClient createClient(OzoneConfiguration conf)
-      throws IOException, OzoneClientException {
-    OzoneClient client;
-    String scheme = ozoneURI.getScheme();
-    if (ozoneURI.getScheme() == null || scheme.isEmpty()) {
-      scheme = OZONE_RPC_SCHEME;
-    }
-    if (scheme.equals(OZONE_HTTP_SCHEME)) {
-      throw new UnsupportedOperationException(
-          "REST schema is not supported any more. Please use AWS S3 protocol "
-              + "if you need REST interface.");
-    } else if (scheme.equals(OZONE_RPC_SCHEME)) {
-      if (ozoneURI.getHost() != null && !ozoneURI.getAuthority()
-          .equals(EMPTY_HOST)) {
-        if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) {
-          // When host is an HA service ID
-          if (ozoneURI.getPort() != -1) {
-            throw new OzoneClientException(
-                "Port " + ozoneURI.getPort() + " specified in URI but host '"
-                    + ozoneURI.getHost() + "' is a logical (HA) OzoneManager "
-                    + "and does not use port information.");
-          }
-          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost(), conf);
-        } else if (ozoneURI.getPort() == -1) {
-          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost());
-        } else {
-          client = OzoneClientFactory
-              .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
-        }
-      } else {
-        // When host is not specified
-        if (OmUtils.isServiceIdsDefined(conf)) {
-          throw new OzoneClientException("Service ID or host name must not"
-              + " be omitted when ozone.om.service.ids is defined.");
-        }
-        client = OzoneClientFactory.getRpcClient(conf);
-      }
-    } else {
-      throw new OzoneClientException(
-          "Invalid URI, unknown protocol scheme: " + scheme);
-    }
-    return client;
-  }
-
-  /**
-   * verifies user provided URI.
-   *
-   * @param uri - UriString
-   * @return URI
-   * @throws URISyntaxException
-   * @throws OzoneException
-   */
-  protected URI parseURI(String uri)
-      throws OzoneClientException {
-    if ((uri == null) || uri.isEmpty()) {
-      throw new OzoneClientException(
-          "Ozone URI is needed to execute this command.");
-    }
-    URIBuilder uriBuilder = new URIBuilder(stringToUri(uri));
-    if (uriBuilder.getPort() == 0) {
-      uriBuilder.setPort(Shell.DEFAULT_OZONE_PORT);
-    }
-
-    try {
-      return uriBuilder.build();
-    } catch (URISyntaxException e) {
-      throw new OzoneClientException("Invalid URI: " + ozoneURI, e);
-    }
-  }
-
-  /**
-   * Construct a URI from a String with unescaped special characters
-   * that have non-standard semantics. e.g. /, ?, #. A custom parsing
-   * is needed to prevent misbehavior.
-   *
-   * @param pathString The input path in string form
-   * @return URI
-   */
-  private static URI stringToUri(String pathString) {
-    // parse uri components
-    String scheme = null;
-    String authority = null;
-    int start = 0;
-
-    // parse uri scheme, if any
-    int colon = pathString.indexOf(':');
-    int slash = pathString.indexOf('/');
-    if (colon > 0 && (slash == colon + 1)) {
-      // has a non zero-length scheme
-      scheme = pathString.substring(0, colon);
-      start = colon + 1;
-    }
-
-    // parse uri authority, if any
-    if (pathString.startsWith("//", start) &&
-        (pathString.length() - start > 2)) {
-      start += 2;
-      int nextSlash = pathString.indexOf('/', start);
-      int authEnd = nextSlash > 0 ? nextSlash : pathString.length();
-      authority = pathString.substring(start, authEnd);
-      start = authEnd;
-    }
-    // uri path is the rest of the string. ? or # are not interpreted,
-    // but any occurrence of them will be quoted by the URI ctor.
-    String path = pathString.substring(start, pathString.length());
-
-    // add leading slash to the path, if it does not exist
-    int firstSlash = path.indexOf('/');
-    if(firstSlash != 0) {
-      path = "/" + path;
-    }
-
-    if (authority == null || authority.equals("")) {
-      authority = EMPTY_HOST;
-    }
-    // Construct the URI
-    try {
-      return new URI(scheme, authority, path, null, null);
-    } catch (URISyntaxException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public void ensureBucketAddress() throws OzoneClientException {
-    if (keyName.length() > 0) {
-      throw new OzoneClientException(
-          "Invalid bucket name. Delimiters (/) not allowed in bucket name");
-    } else if (volumeName.length() == 0) {
-      throw new OzoneClientException(
-          "Volume name is required.");
-    } else if (bucketName.length() == 0) {
-      throw new OzoneClientException(
-          "Bucket name is required.");
-    }
-  }
-
-  public void ensureKeyAddress() throws OzoneClientException {
-    if (keyName.length() == 0) {
-      throw new OzoneClientException(
-          "Key name is missing.");
-    } else if (volumeName.length() == 0) {
-      throw new OzoneClientException(
-          "Volume name is missing");
-    } else if (bucketName.length() == 0) {
-      throw new OzoneClientException(
-          "Bucket name is missing");
-    }
-  }
-
-  public void ensureVolumeAddress() throws OzoneClientException {
-    if (keyName.length() != 0) {
-      throw new OzoneClientException(
-          "Invalid volume name. Delimiters (/) not allowed in volume name");
-    } else if (volumeName.length() == 0) {
-      throw new OzoneClientException(
-          "Volume name is required");
-    } else if (bucketName.length() != 0) {
-      throw new OzoneClientException(
-          "Invalid volume name. Delimiters (/) not allowed in volume name");
-    }
-  }
-
-  public void ensureRootAddress() throws OzoneClientException {
-    if (keyName.length() != 0 || bucketName.length() != 0
-        || volumeName.length() != 0) {
-      throw new OzoneClientException(
-          "Invalid URI. Volume/bucket/key elements should not been used");
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java
deleted file mode 100644
index 239cee9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.web.ozShell.bucket.BucketCommands;
-import org.apache.hadoop.ozone.web.ozShell.keys.KeyCommands;
-import org.apache.hadoop.ozone.web.ozShell.token.TokenCommands;
-import org.apache.hadoop.ozone.web.ozShell.volume.VolumeCommands;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-import picocli.CommandLine.Command;
-
-/**
- * Shell commands for native rpc object manipulation.
- */
-@Command(name = "ozone sh",
-    description = "Shell for Ozone object store",
-    subcommands = {
-        VolumeCommands.class,
-        BucketCommands.class,
-        KeyCommands.class,
-        TokenCommands.class
-    },
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class OzoneShell extends Shell {
-
-  /**
-   * Main for the ozShell Command handling.
-   *
-   * @param argv - System Args Strings[]
-   * @throws Exception
-   */
-  public static void main(String[] argv) throws Exception {
-    new OzoneShell().run(argv);
-  }
-
-  @Override
-  public void execute(String[] argv) {
-    TracingUtil.initTracing("shell");
-    try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) {
-      super.execute(argv);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
deleted file mode 100644
index 999eede..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ozone user interface commands.
- *
- * This class uses dispatch method to make calls
- * to appropriate handlers that execute the ozone functions.
- */
-public abstract class Shell extends GenericCli {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Shell.class);
-
-  public static final String OZONE_URI_DESCRIPTION = "Ozone URI could start "
-      + "with o3:// or without prefix. URI may contain the host and port "
-      + "of the OM server. Both are optional. "
-      + "If they are not specified it will be identified from "
-      + "the config files.";
-
-  public static final String OZONE_VOLUME_URI_DESCRIPTION =
-      "URI of the volume.\n" + OZONE_URI_DESCRIPTION;
-
-  public static final String OZONE_BUCKET_URI_DESCRIPTION =
-      "URI of the volume/bucket.\n" + OZONE_URI_DESCRIPTION;
-
-  public static final String OZONE_KEY_URI_DESCRIPTION =
-      "URI of the volume/bucket/key.\n" + OZONE_URI_DESCRIPTION;
-
-  public static final String OZONE_S3BUCKET_URI_DESCRIPTION = "URI of the " +
-      "S3Bucket.\n" + OZONE_URI_DESCRIPTION;
-
-  // General options
-  public static final int DEFAULT_OZONE_PORT = 50070;
-
-
-
-  @Override
-  protected void printError(Throwable errorArg) {
-    if (errorArg instanceof OMException) {
-      if (isVerbose()) {
-        errorArg.printStackTrace(System.err);
-      } else {
-        OMException omException = (OMException) errorArg;
-        System.err.println(String
-            .format("%s %s", omException.getResult().name(),
-                omException.getMessage()));
-      }
-    } else {
-      super.printError(errorArg);
-    }
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java
deleted file mode 100644
index 112e8f3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Add acl handler for bucket.
- */
-@Command(name = "addacl",
-    description = "Add a new Acl.")
-public class AddAclBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "new acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "New acl to be added not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().addAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl added successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
deleted file mode 100644
index ba1ef8ce..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Subcommands for the bucket related operations.
- */
-@Command(name = "bucket",
-    description = "Bucket specific operations",
-    subcommands = {
-        InfoBucketHandler.class,
-        ListBucketHandler.class,
-        CreateBucketHandler.class,
-        DeleteBucketHandler.class,
-        AddAclBucketHandler.class,
-        RemoveAclBucketHandler.class,
-        GetAclBucketHandler.class,
-        SetAclBucketHandler.class
-    },
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class BucketCommands implements GenericParentCommand, Callable<Void> {
-
-  @ParentCommand
-  private Shell shell;
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.shell.getCmd().getSubcommands().get("bucket"));
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return shell.isVerbose();
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    return shell.createOzoneConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
deleted file mode 100644
index b4951e8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-/**
- * create bucket handler.
- */
-@Command(name = "create",
-    description = "creates a bucket in a given volume")
-public class CreateBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @Option(names = {"--bucketkey", "-k"},
-      description = "bucket encryption key name")
-  private String bekName;
-
-  @Option(names = {"--enforcegdpr", "-g"},
-      description = "if true, indicates GDPR enforced bucket, " +
-          "false/unspecified indicates otherwise")
-  private Boolean isGdprEnforced;
-
-  /**
-   * Executes create bucket.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    BucketArgs.Builder bb = new BucketArgs.Builder()
-        .setStorageType(StorageType.DEFAULT)
-        .setVersioning(false);
-
-    if(isGdprEnforced != null) {
-      if(isGdprEnforced) {
-        bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(Boolean.TRUE));
-      } else {
-        bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(Boolean.FALSE));
-      }
-    }
-
-    if (bekName != null) {
-      if (!bekName.isEmpty()) {
-        bb.setBucketEncryptionKey(bekName);
-      } else {
-        throw new IllegalArgumentException("Bucket encryption key name must " +
-            "be specified to enable bucket encryption!");
-      }
-    }
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      if (bekName != null) {
-        System.out.printf("Bucket Encryption enabled with Key Name: %s%n",
-            bekName);
-      }
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    vol.createBucket(bucketName, bb.build());
-
-    if (isVerbose()) {
-      OzoneBucket bucket = vol.getBucket(bucketName);
-      ObjectPrinter.printObjectAsJson(bucket);
-    }
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
deleted file mode 100644
index 6ed6ddf..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Delete bucket Handler.
- */
-@Command(name = "delete",
-    description = "deletes an empty bucket")
-public class DeleteBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    vol.deleteBucket(bucketName);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java
deleted file mode 100644
index ccb5d46..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.List;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Get acl handler for bucket.
- */
-@Command(name = "getacl",
-    description = "List all acls.")
-public class GetAclBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    List<OzoneAcl> result = client.getObjectStore().getAcl(obj);
-
-    System.out.printf("%s%n",
-        JsonUtils.toJsonStringWithDefaultPrettyPrinter(result));
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
deleted file mode 100644
index e5677a4..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes Info bucket.
- */
-@Command(name = "info",
-    description = "returns information about a bucket")
-public class InfoBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-
-    ObjectPrinter.printObjectAsJson(bucket);
-
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
deleted file mode 100644
index 746c727..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import java.util.Iterator;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Help.Visibility;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes List Bucket.
- */
-@Command(name = "list",
-    aliases = "ls",
-    description = "lists the buckets in a volume.")
-public class ListBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION)
-  private String uri;
-
-  @Option(names = {"--length", "-l"},
-      description = "Limit of the max results",
-      defaultValue = "100",
-      showDefaultValue = Visibility.ALWAYS)
-  private int maxBuckets;
-
-  @Option(names = {"--start", "-s"},
-      description = "The first bucket to start the listing")
-  private String startBucket;
-
-  @Option(names = {"--prefix", "-p"},
-      description = "Prefix to filter the buckets")
-  private String prefix;
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    if (maxBuckets < 1) {
-      throw new IllegalArgumentException(
-          "the length should be a positive number");
-    }
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-    }
-
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    Iterator<? extends OzoneBucket> bucketIterator =
-        vol.listBuckets(prefix, startBucket);
-
-    int counter = 0;
-    while (maxBuckets > 0 && bucketIterator.hasNext()) {
-      ObjectPrinter.printObjectAsJson(bucketIterator.next());
-
-      maxBuckets -= 1;
-      counter++;
-    }
-
-    if (isVerbose()) {
-      System.out.printf("Found : %d buckets for volume : %s ",
-          counter, volumeName);
-    }
-
-    return null;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java
deleted file mode 100644
index 216f66c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Executes Info bucket.
- */
-@Command(name = "removeacl",
-    description = "Remove an acl.")
-public class RemoveAclBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "Remove acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Remove  acl handler for bucket.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "ACL to be removed not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().removeAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl removed successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
deleted file mode 100644
index d147f9b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * S3Bucket mapping handler, which returns volume name and Ozone fs uri for
- * that bucket.
- */
-@Command(name = "path",
-    description = "Returns the ozone path for S3Bucket")
-public class S3BucketMapping extends Handler {
-
-  @Parameters(arity = "1..1", description = "Name of the s3 bucket.")
-  private String s3BucketName;
-
-  /**
-   * Executes create bucket.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress ozoneAddress = new OzoneAddress();
-    OzoneClient client = ozoneAddress.createClient(createOzoneConfiguration());
-
-    String mapping =
-        client.getObjectStore().getOzoneBucketMapping(s3BucketName);
-    String volumeName =
-        client.getObjectStore().getOzoneVolumeName(s3BucketName);
-
-    if (isVerbose()) {
-      System.out.printf("Mapping created for S3Bucket is : %s%n", mapping);
-    }
-
-    System.out.printf("Volume name for S3Bucket is : %s%n", volumeName);
-
-    String ozoneFsUri = String.format("%s://%s.%s", OzoneConsts
-        .OZONE_URI_SCHEME, s3BucketName, volumeName);
-
-    System.out.printf("Ozone FileSystem Uri is : %s%n", ozoneFsUri);
-
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java
deleted file mode 100644
index e603068..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Set acl handler for bucket.
- */
-@Command(name = "setacl",
-    description = "Set acls.")
-public class SetAclBucketHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acls", "-al"},
-      required = true,
-      description = "Comma seperated acls." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw,user:user2:a,group:hadoop:a")
-  private String acls;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acls, "Acls to be set not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.BUCKET)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().setAcl(obj,
-        OzoneAcl.parseAcls(acls));
-
-    System.out.printf("%s%n", "Acl set successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java
deleted file mode 100644
index c344c35..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.ozShell.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java
deleted file mode 100644
index b4e8134..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Add  acl handler for key.
- */
-@Command(name = "addacl",
-    description = "Add a new Acl.")
-public class AddAclKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "Add acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "New acl to be added not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setKeyName(address.getKeyName())
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().addAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl added successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
deleted file mode 100644
index 6f8bdff..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes Delete Key.
- */
-@Command(name = "delete",
-    description = "deletes an existing key")
-public class DeleteKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION)
-  private String uri;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    bucket.deleteKey(keyName);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java
deleted file mode 100644
index 6423dbb..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.List;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Get acl handler for Key.
- */
-@Command(name = "getacl",
-    description = "List all acls.")
-public class GetAclKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setKeyName(keyName)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    List<OzoneAcl> result = client.getObjectStore().getAcl(obj);
-
-    System.out.printf("%s%n",
-        JsonUtils.toJsonStringWithDefaultPrettyPrinter(result));
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
deleted file mode 100644
index 4e86699..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Gets an existing key.
- */
-@Command(name = "get",
-    description = "Gets a specific key from ozone server")
-public class GetKeyHandler extends Handler {
-
-  @Parameters(index = "0", arity = "1..1", description =
-      Shell.OZONE_KEY_URI_DESCRIPTION)
-  private String uri;
-
-  @Parameters(index = "1", arity = "1..1",
-      description = "File path to download the key to")
-  private String fileName;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    Path dataFilePath = Paths.get(fileName);
-    File dataFile = new File(fileName);
-
-    if (dataFile.exists() && dataFile.isDirectory()) {
-      dataFile = new File(fileName, keyName);
-    }
-
-    if (dataFile.exists()) {
-      throw new OzoneClientException(
-          fileName + "exists. Download will overwrite an "
-              + "existing file. Aborting.");
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    OzoneInputStream keyInputStream = bucket.readKey(keyName);
-    if (dataFilePath != null) {
-      FileOutputStream outputStream = new FileOutputStream(dataFile);
-      IOUtils.copyBytes(keyInputStream, outputStream,
-          (int) new OzoneConfiguration()
-              .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
-                  OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES));
-      outputStream.close();
-    } else {
-      throw new OzoneClientException(
-          "Can not access the file \"" + fileName + "\"");
-    }
-    if (isVerbose()) {
-      FileInputStream stream = new FileInputStream(dataFile);
-      String hash = DigestUtils.md5Hex(stream);
-      System.out.printf("Downloaded file hash : %s%n", hash);
-      stream.close();
-    }
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
deleted file mode 100644
index 7cb54f2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes Info Object.
- */
-@Command(name = "info",
-    description = "returns information about an existing key")
-public class InfoKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION)
-  private String uri;
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    OzoneKeyDetails key = bucket.getKey(keyName);
-    // For compliance/security, GDPR Secret & Algorithm details are removed
-    // from local copy of metadata before printing. This doesn't remove these
-    // from Ozone Manager's actual metadata.
-    key.getMetadata().remove(OzoneConsts.GDPR_SECRET);
-    key.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
-
-    ObjectPrinter.printObjectAsJson(key);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
deleted file mode 100644
index 4de97c5..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Subcommand to group key related operations.
- */
-@Command(name = "key",
-    description = "Key specific operations",
-    subcommands = {
-        InfoKeyHandler.class,
-        ListKeyHandler.class,
-        GetKeyHandler.class,
-        PutKeyHandler.class,
-        RenameKeyHandler.class,
-        DeleteKeyHandler.class,
-        AddAclKeyHandler.class,
-        RemoveAclKeyHandler.class,
-        SetAclKeyHandler.class,
-        GetAclKeyHandler.class
-    },
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class KeyCommands implements GenericParentCommand, Callable<Void> {
-
-  @ParentCommand
-  private Shell shell;
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.shell.getCmd().getSubcommands().get("key"));
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return shell.isVerbose();
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    return shell.createOzoneConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
deleted file mode 100644
index 9829eefed..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import java.util.Iterator;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes List Keys.
- */
-@Command(name = "list",
-    aliases = "ls",
-    description = "list all keys in a given bucket")
-public class ListKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @Option(names = {"--length", "-l"},
-      description = "Limit of the max results",
-      defaultValue = "100")
-  private int maxKeys;
-
-  @Option(names = {"--start", "-s"},
-      description = "The first key to start the listing")
-  private String startKey;
-
-  @Option(names = {"--prefix", "-p"},
-      description = "Prefix to filter the key")
-  private String prefix;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (maxKeys < 1) {
-      throw new IllegalArgumentException(
-          "the length should be a positive number");
-    }
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("bucket Name : %s%n", bucketName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    Iterator<? extends OzoneKey> keyIterator = bucket.listKeys(prefix,
-        startKey);
-
-    int maxKeyLimit = maxKeys;
-
-    int counter = 0;
-    while (maxKeys > 0 && keyIterator.hasNext()) {
-      OzoneKey ozoneKey = keyIterator.next();
-      ObjectPrinter.printObjectAsJson(ozoneKey);
-      maxKeys -= 1;
-      counter++;
-    }
-
-    // More keys were returned notify about max length
-    if (keyIterator.hasNext()) {
-      System.out.println("Listing first " + maxKeyLimit + " entries of the " +
-          "result. Use --length (-l) to override max returned keys.");
-    } else if (isVerbose()) {
-      System.out.printf("Found : %d keys for bucket %s in volume : %s ",
-          counter, bucketName, volumeName);
-    }
-
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
deleted file mode 100644
index d80f36b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Puts a file into an ozone bucket.
- */
-@Command(name = "put",
-    description = "creates or overwrites an existing key")
-public class PutKeyHandler extends Handler {
-
-  @Parameters(index = "0", arity = "1..1", description =
-      Shell.OZONE_KEY_URI_DESCRIPTION)
-  private String uri;
-
-  @Parameters(index = "1", arity = "1..1", description = "File to upload")
-  private String fileName;
-
-  @Option(names = {"-r", "--replication"},
-      description = "Replication factor of the new key. (use ONE or THREE) "
-          + "Default is specified in the cluster-wide config.")
-  private ReplicationFactor replicationFactor;
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    File dataFile = new File(fileName);
-
-    if (isVerbose()) {
-      FileInputStream stream = new FileInputStream(dataFile);
-      String hash = DigestUtils.md5Hex(stream);
-      System.out.printf("File Hash : %s%n", hash);
-      stream.close();
-    }
-
-    Configuration conf = new OzoneConfiguration();
-    if (replicationFactor == null) {
-      replicationFactor = ReplicationFactor.valueOf(
-          conf.getInt(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT));
-    }
-
-    ReplicationType replicationType = ReplicationType.valueOf(
-        conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    Map<String, String> keyMetadata = new HashMap<>();
-    if(Boolean.valueOf(bucket.getMetadata().get(OzoneConsts.GDPR_FLAG))){
-      keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
-    }
-    OzoneOutputStream outputStream = bucket
-        .createKey(keyName, dataFile.length(), replicationType,
-            replicationFactor, keyMetadata);
-    FileInputStream fileInputStream = new FileInputStream(dataFile);
-    IOUtils.copyBytes(fileInputStream, outputStream, (int) conf
-        .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT,
-            StorageUnit.BYTES));
-    outputStream.close();
-    fileInputStream.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java
deleted file mode 100644
index f561aa2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Remove acl handler for key.
- */
-@Command(name = "removeacl",
-    description = "Remove an acl.")
-public class RemoveAclKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "Remove acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "ACL to be removed not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-      System.out.printf("Key Name : %s%n", keyName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setKeyName(keyName)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().removeAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl removed successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java
deleted file mode 100644
index b2ecbda..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Renames an existing key.
- */
-@Command(name = "rename",
-    description = "renames an existing key")
-public class RenameKeyHandler extends Handler {
-
-  @Parameters(index = "0", arity = "1..1",
-      description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @Parameters(index = "1", arity = "1..1",
-      description = "The existing key to be renamed")
-  private String fromKey;
-
-  @Parameters(index = "2", arity = "1..1",
-      description = "The new desired name of the key")
-  private String toKey;
-
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureBucketAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    bucket.renameKey(fromKey, toKey);
-
-    if (isVerbose()) {
-      System.out.printf("Renamed Key : %s to %s%n", fromKey, toKey);
-    }
-
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java
deleted file mode 100644
index a6a4872..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Set acl handler for Key.
- */
-@Command(name = "setacl",
-    description = "Set acls.")
-public class SetAclKeyHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acls", "-al"},
-      required = true,
-      description = "Comma separated acls." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw,user:user2:a,group:hadoop:a")
-  private String acls;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acls, "New acls to be added not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureKeyAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-    String keyName = address.getKeyName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setKeyName(keyName)
-        .setResType(OzoneObj.ResourceType.KEY)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().setAcl(obj,
-        OzoneAcl.parseAcls(acls));
-
-    System.out.printf("%s%n", "Acl set successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java
deleted file mode 100644
index 1deb7ad..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.ozShell.keys;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
deleted file mode 100644
index e33b6e7..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- *  ozShell Class acts as the command line interface to
- *  the ozone Rest Client.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-
-/**
- A simple CLI to work against Ozone.
- **/
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java
deleted file mode 100644
index 1a359e2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.s3;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.security.UserGroupInformation;
-import picocli.CommandLine.Command;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-/**
- * Executes getsecret calls.
- */
-@Command(name = "getsecret",
-    description = "Returns s3 secret for current user")
-public class GetS3SecretHandler extends Handler {
-
-  public static final String OZONE_GETS3SECRET_ERROR = "This command is not" +
-      " supported in unsecure clusters.";
-  /**
-   * Executes getS3Secret.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-    OzoneClient client =
-        new OzoneAddress().createClient(ozoneConfiguration);
-
-    // getS3Secret works only with secured clusters
-    if (ozoneConfiguration.getBoolean(OZONE_SECURITY_ENABLED_KEY, false)) {
-      System.out.println(
-          client.getObjectStore().getS3Secret(
-              UserGroupInformation.getCurrentUser().getUserName()
-          ).toString()
-      );
-    } else {
-      // log a warning message for unsecured cluster
-      System.out.println(OZONE_GETS3SECRET_ERROR);
-    }
-
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java
deleted file mode 100644
index ebb9d6ec..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.s3;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.ozShell.bucket.S3BucketMapping;
-import picocli.CommandLine.Command;
-
-/**
- * Shell for s3 related operations.
- */
-@Command(name = "ozone s3",
-    description = "Shell for S3 specific operations",
-    subcommands = {
-        GetS3SecretHandler.class,
-        S3BucketMapping.class
-    })
-
-public class S3Shell extends Shell {
-
-  @Override
-  public void execute(String[] argv) {
-    TracingUtil.initTracing("s3shell");
-    try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) {
-      super.execute(argv);
-    }
-  }
-
-  /**
-   * Main for the S3Shell Command handling.
-   *
-   * @param argv - System Args Strings[]
-   * @throws Exception
-   */
-  public static void main(String[] argv) throws Exception {
-    new S3Shell().run(argv);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java
deleted file mode 100644
index 079ef18..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-/**
- * S3 commands for Ozone.
- */
-package org.apache.hadoop.ozone.web.ozShell.s3;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java
deleted file mode 100644
index a025e24..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.security.token.Token;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-
-/**
- * Executes cancelDelegationToken api.
- */
-@Command(name = "cancel",
-    description = "cancel a delegation token.")
-public class CancelTokenHandler extends Handler {
-
-  @CommandLine.Option(names = {"--token", "-t"},
-      description = "file containing encoded token",
-      defaultValue = "/tmp/token.txt",
-      showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
-  private String tokenFile;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress("");
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) {
-      System.err.println("Error:Token operations work only when security is " +
-          "enabled. To enable security set ozone.security.enabled to true.");
-      return null;
-    }
-
-    if (Files.notExists(Paths.get(tokenFile))) {
-      System.err.println("Error:Cancel token operation failed as token file: "
-          + tokenFile + " containing encoded token doesn't exist.");
-      return null;
-    }
-    Token token = new Token();
-    token.decodeFromUrlString(
-        new String(Files.readAllBytes(Paths.get(tokenFile)),
-            StandardCharsets.UTF_8));
-    client.getObjectStore().cancelDelegationToken(token);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java
deleted file mode 100644
index 6d1777c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.util.Objects;
-
-/**
- * Executes getDelegationToken api.
- */
-@Command(name = "get",
-    description = "get a delegation token.")
-public class GetTokenHandler extends Handler {
-
-
-
-  @CommandLine.Option(names = {"--renewer", "-r"},
-      description = "Token renewer",
-      showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
-  private String renewer;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) {
-      System.err.println("Error:Token operations work only when security is " +
-          "enabled. To enable security set ozone.security.enabled to true.");
-      return null;
-    }
-
-    if(StringUtils.isEmpty(renewer)){
-      renewer = UserGroupInformation.getCurrentUser().getShortUserName();
-    }
-    Token token = client.getObjectStore().getDelegationToken(new Text(renewer));
-    if(Objects.isNull(token)){
-      System.err.println("Error: Get delegation token operation failed. Check" +
-          " OzoneManager logs for more details.");
-      return null;
-    }
-
-    System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        token.encodeToUrlString()));
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java
deleted file mode 100644
index 24f9100..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import org.apache.hadoop.security.token.Token;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-
-/**
- * Executes getDelegationToken api.
- */
-@Command(name = "print",
-    description = "print a delegation token.")
-public class PrintTokenHandler extends Handler {
-
-  @CommandLine.Option(names = {"--token", "-t"},
-      description = "file containing encoded token",
-      defaultValue = "/tmp/token.txt",
-      showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
-  private String tokenFile;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    if (!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) {
-      System.err.println("Error:Token operations work only when security is " +
-          "enabled. To enable security set ozone.security.enabled to true.");
-      return null;
-    }
-
-    if (Files.notExists(Paths.get(tokenFile))) {
-      System.err.println("Error: Print token operation failed as token file: "
-          + tokenFile + " containing encoded token doesn't exist.");
-      return null;
-    }
-
-    String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)),
-        StandardCharsets.UTF_8);
-    Token token = new Token();
-    token.decodeFromUrlString(encodedToken);
-
-    System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        token.toString()));
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java
deleted file mode 100644
index faf74ae..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.security.token.Token;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-
-/**
- * Executes renewDelegationToken api.
- */
-@Command(name = "renew",
-    description = "renew a delegation token.")
-public class RenewTokenHandler extends Handler {
-
-  @CommandLine.Option(names = {"--token", "-t"},
-      description = "file containing encoded token",
-      defaultValue = "/tmp/token.txt",
-      showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
-  private String tokenFile;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress("");
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) {
-      System.err.println("Error:Token operations work only when security is " +
-          "enabled. To enable security set ozone.security.enabled to true.");
-      return null;
-    }
-
-    if (Files.notExists(Paths.get(tokenFile))) {
-      System.err.println("Error:Renew token operation failed as token file: "
-          + tokenFile + " containing encoded token doesn't exist.");
-      return null;
-    }
-    Token token = new Token();
-    token.decodeFromUrlString(
-        new String(Files.readAllBytes(Paths.get(tokenFile)),
-            StandardCharsets.UTF_8));
-    long expiryTime = client.getObjectStore().renewDelegationToken(token);
-
-    System.out.printf("Token renewed successfully, expiry time: %s",
-        expiryTime);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java
deleted file mode 100644
index 2501ad9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * Sub-command to group token related operations.
- */
-@Command(name = "token",
-    description = "Token specific operations",
-    subcommands = {
-        GetTokenHandler.class,
-        CancelTokenHandler.class,
-        RenewTokenHandler.class,
-        PrintTokenHandler.class
-    },
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class TokenCommands implements GenericParentCommand, Callable<Void> {
-
-  @ParentCommand
-  private Shell shell;
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.shell.getCmd().getSubcommands().get("token"));
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return shell.isVerbose();
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    return shell.createOzoneConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java
deleted file mode 100644
index 5e03895..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * ozShell Class acts as the command line interface to the ozone Rest Client.
- */
-package org.apache.hadoop.ozone.web.ozShell.token;
-
-/**
- Ozone delegation token commands.
- **/
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java
deleted file mode 100644
index b9d5743..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Add acl handler for volume.
- */
-@Command(name = "addacl",
-    description = "Add a new Acl.")
-public class AddAclVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "Add acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "New acl to be added not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().addAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl added successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
deleted file mode 100644
index ddd8350..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes the create volume call for the shell.
- */
-@Command(name = "create",
-    description = "Creates a volume for the specified user")
-public class CreateVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION)
-  private String uri;
-
-  @Option(names = {"--user", "-u"},
-      description = "Owner of of the volume")
-  private String userName;
-
-  @Option(names = {"--quota", "-q"},
-      description =
-          "Quota of the newly created volume (eg. 1G)")
-  private String quota;
-
-  @Option(names = {"--root"},
-      description = "Development flag to execute the "
-          + "command as the admin (hdfs) user.")
-  private boolean root;
-
-  /**
-   * Executes the Create Volume.
-   */
-  @Override
-  public Void call() throws Exception {
-    if(userName == null) {
-      userName = UserGroupInformation.getCurrentUser().getUserName();
-    }
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume name : %s%n", volumeName);
-    }
-
-    String rootName;
-    if (root) {
-      rootName = "hdfs";
-    } else {
-      rootName = UserGroupInformation.getCurrentUser().getShortUserName();
-    }
-
-    VolumeArgs.Builder volumeArgsBuilder = VolumeArgs.newBuilder()
-        .setAdmin(rootName)
-        .setOwner(userName);
-    if (quota != null) {
-      volumeArgsBuilder.setQuota(quota);
-    }
-    client.getObjectStore().createVolume(volumeName, volumeArgsBuilder.build());
-
-    if (isVerbose()) {
-      OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-      ObjectPrinter.printObjectAsJson(vol);
-    }
-    return null;
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
deleted file mode 100644
index 87286d2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes deleteVolume call for the shell.
- */
-@Command(name = "delete",
-    description = "deletes a volume if it is empty")
-public class DeleteVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION)
-  private String uri;
-
-  /**
-   * Executes the delete volume call.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume name : %s%n", volumeName);
-    }
-
-    client.getObjectStore().deleteVolume(volumeName);
-    System.out.printf("Volume %s is deleted%n", volumeName);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java
deleted file mode 100644
index 6c0bb20..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.List;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Get acl handler for volume.
- */
-@Command(name = "getacl",
-    description = "List all acls.")
-public class GetAclVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-    String volumeName = address.getVolumeName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-    List<OzoneAcl> result = client.getObjectStore().getAcl(obj);
-    System.out.printf("%s%n",
-        JsonUtils.toJsonStringWithDefaultPrettyPrinter(result));
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
deleted file mode 100644
index 0d8723f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes volume Info calls.
- */
-@Command(name = "info",
-    description = "returns information about a specific volume")
-public class InfoVolumeHandler extends Handler{
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION)
-  private String uri;
-
-  /**
-   * Executes volume Info.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
-    ObjectPrinter.printObjectAsJson(vol);
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
deleted file mode 100644
index a486fb1..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import java.util.Iterator;
-
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes List Volume call.
- */
-@Command(name = "list",
-    aliases = "ls",
-    description = "List the volumes of a given user")
-public class ListVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1",
-      description = Shell.OZONE_VOLUME_URI_DESCRIPTION,
-      defaultValue = "/")
-  private String uri;
-
-  @Option(names = {"--length", "-l"},
-      description = "Limit of the max results",
-      defaultValue = "100")
-  private int maxVolumes;
-
-  @Option(names = {"--start", "-s"},
-      description = "The first volume to start the listing")
-  private String startVolume;
-
-  @Option(names = {"--prefix", "-p"},
-      description = "Prefix to filter the volumes")
-  private String prefix;
-
-  @Option(names = {"--user", "-u"},
-      description = "Owner of the volumes to list.")
-  private String userName;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureRootAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    if (userName == null) {
-      userName = UserGroupInformation.getCurrentUser().getUserName();
-    }
-
-    if (maxVolumes < 1) {
-      throw new IllegalArgumentException(
-          "the length should be a positive number");
-    }
-
-    Iterator<? extends OzoneVolume> volumeIterator;
-    if(userName != null) {
-      volumeIterator = client.getObjectStore()
-          .listVolumesByUser(userName, prefix, startVolume);
-    } else {
-      volumeIterator = client.getObjectStore().listVolumes(prefix);
-    }
-
-    int counter = 0;
-    while (maxVolumes > 0 && volumeIterator.hasNext()) {
-      OzoneVolume next = volumeIterator.next();
-      ObjectPrinter.printObjectAsJson(next);
-      maxVolumes -= 1;
-      counter++;
-    }
-
-    if (isVerbose()) {
-      System.out.printf("Found : %d volumes for user : %s ", counter,
-          userName);
-    }
-
-    return null;
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java
deleted file mode 100644
index d984f48..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Remove acl handler for volume.
- */
-@Command(name = "removeacl",
-    description = "Remove an acl.")
-public class RemoveAclVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acl", "-a"},
-      required = true,
-      description = "Remove acl." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw or group:hadoop:rw")
-  private String acl;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acl, "ACL to be removed not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-
-    boolean result = client.getObjectStore().removeAcl(obj,
-        OzoneAcl.parseAcl(acl));
-
-    System.out.printf("%s%n", "Acl removed successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java
deleted file mode 100644
index 185f862..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-
-/**
- * Set acl handler for volume.
- */
-@Command(name = "setacl",
-    description = "Set acls.")
-public class SetAclVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION)
-  private String uri;
-
-  @CommandLine.Option(names = {"--acls", "-al"},
-      required = true,
-      description = "Comma separated acls." +
-          "r = READ," +
-          "w = WRITE," +
-          "c = CREATE," +
-          "d = DELETE," +
-          "l = LIST," +
-          "a = ALL," +
-          "n = NONE," +
-          "x = READ_AC," +
-          "y = WRITE_AC" +
-          "Ex user:user1:rw,user:user2:a,group:hadoop:a")
-  private String acls;
-
-  @CommandLine.Option(names = {"--store", "-s"},
-      required = false,
-      description = "store type. i.e OZONE or S3")
-  private String storeType;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-    Objects.requireNonNull(acls, "New acls to be added not specified.");
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-    String bucketName = address.getBucketName();
-
-    if (isVerbose()) {
-      System.out.printf("Volume Name : %s%n", volumeName);
-      System.out.printf("Bucket Name : %s%n", bucketName);
-    }
-
-    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setResType(OzoneObj.ResourceType.VOLUME)
-        .setStoreType(storeType == null ? OZONE :
-            OzoneObj.StoreType.valueOf(storeType))
-        .build();
-    System.out.printf(" acls" +acls.length() + " " + acls);
-    boolean result = client.getObjectStore().setAcl(obj,
-        OzoneAcl.parseAcls(acls));
-
-    System.out.printf("%s%n", "Acl set successfully: " + result);
-
-    client.close();
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
deleted file mode 100644
index 7ddeae9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter;
-import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-
-/**
- * Executes update volume calls.
- */
-@Command(name = "update",
-    description = "Updates parameter of the volumes")
-public class UpdateVolumeHandler extends Handler {
-
-  @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION)
-  private String uri;
-
-  @Option(names = {"--user"},
-      description = "Owner of the volume to set")
-  private String ownerName;
-
-  @Option(names = {"--quota"},
-      description = "Quota of the volume to set"
-          + "(eg. 1G)")
-  private String quota;
-
-  /**
-   * Executes the Client Calls.
-   */
-  @Override
-  public Void call() throws Exception {
-
-    OzoneAddress address = new OzoneAddress(uri);
-    address.ensureVolumeAddress();
-    OzoneClient client = address.createClient(createOzoneConfiguration());
-
-    String volumeName = address.getVolumeName();
-
-    OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
-    if (quota != null && !quota.isEmpty()) {
-      volume.setQuota(OzoneQuota.parseQuota(quota));
-    }
-
-    if (ownerName != null && !ownerName.isEmpty()) {
-      volume.setOwner(ownerName);
-    }
-
-    ObjectPrinter.printObjectAsJson(volume);
-    return null;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
deleted file mode 100644
index 833457b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell.volume;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Subcommand to group volume related operations.
- */
-@Command(name = "volume",
-    aliases = "vol",
-    description = "Volume specific operations",
-    subcommands = {
-        InfoVolumeHandler.class,
-        ListVolumeHandler.class,
-        CreateVolumeHandler.class,
-        UpdateVolumeHandler.class,
-        DeleteVolumeHandler.class,
-        AddAclVolumeHandler.class,
-        RemoveAclVolumeHandler.class,
-        SetAclVolumeHandler.class,
-        GetAclVolumeHandler.class
-    },
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class VolumeCommands implements GenericParentCommand, Callable<Void> {
-
-  @ParentCommand
-  private Shell shell;
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.shell.getCmd().getSubcommands().get("volume"));
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return shell.isVerbose();
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    return shell.createOzoneConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java
deleted file mode 100644
index fc19274..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.ozShell.volume;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java
deleted file mode 100644
index 1a7275c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-/**
- * This package contains generic class for the internal http server
- * and REST interfaces.
- */
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html
deleted file mode 100644
index 1b5e693..0000000
--- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="Ozone Manager">
-
-    <title>Ozone Manager</title>
-
-    <link href="static/bootstrap-3.4.1/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="ozoneManager">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">Ozone Manager</a>
-        </div>
-        <navmenu
-                metrics="{ 'OM metrics' : '#!/metrics/ozoneManager', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-    </div>
-</header>
-
-<div class="container-fluid">
-    <ng-view></ng-view>
-</div><!-- /.container -->
-
-<script src="static/jquery-3.4.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="ozoneManager.js"></script>
-<script src="static/bootstrap-3.4.1/js/bootstrap.min.js"></script>
-</body>
-</html>
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css
deleted file mode 100644
index e442adc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-  padding-top: 50px;
-}
-.starter-template {
-  padding: 40px 15px;
-  text-align: center;
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html
deleted file mode 100644
index 0821899..0000000
--- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-</overview>
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html
deleted file mode 100644
index 839c64c..0000000
--- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>OzoneManager Metrics</h1>
-
-<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
-    <h2>{{type}}</h2>
-    <div class="container">
-        <div class="col-md-6">
-            <h3>Requests ({{numbers.ops || numbers.total}} ops)</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.all"></nvd3>
-        </div>
-        <div class="col-md-6">
-            <h3>Failures</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.failures"></nvd3>
-        </div>
-    </div>
-</div>
-
-<div ng-show="$ctrl.metrics.others.length > 0">
-    <h2>Other JMX properties</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js
deleted file mode 100644
index fda6d8f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
-    };
-
-    angular.module('ozoneManager', ['ozone', 'nvd3']);
-    angular.module('ozoneManager').config(function ($routeProvider) {
-        $routeProvider
-            .when("/metrics/ozoneManager", {
-                template: "<om-metrics></om-metrics>"
-            });
-    });
-    angular.module('ozoneManager').component('omMetrics', {
-        templateUrl: 'om-metrics.html',
-        controller: function ($http) {
-            var ctrl = this;
-
-            ctrl.graphOptions = {
-                chart: {
-                    type: 'pieChart',
-                    height: 500,
-                    x: function (d) {
-                        return d.key;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showLabels: true,
-                    labelType: 'value',
-                    duration: 500,
-                    labelThreshold: 0.01,
-                    valueFormat: function(d) {
-                        return d3.format('d')(d);
-                    },
-                    legend: {
-                        margin: {
-                            top: 5,
-                            right: 35,
-                            bottom: 5,
-                            left: 0
-                        }
-                    }
-                }
-            };
-
-
-            $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics")
-                .then(function (result) {
-
-                    var groupedMetrics = {others: [], nums: {}};
-                    var metrics = result.data.beans[0]
-                    for (var key in metrics) {
-                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)([A-Z].+?)(Fails)?$/);
-                        if (numericalStatistic) {
-                            var type = numericalStatistic[1];
-                            var name = numericalStatistic[2];
-                            var failed = numericalStatistic[3];
-                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
-                                    failures: [],
-                                    all: [],
-                                    total: 0,
-                                };
-                            if (failed) {
-                                groupedMetrics.nums[type].failures.push({
-                                    key: name,
-                                    value: metrics[key]
-                                })
-                            } else {
-                                if (name == "Ops") {
-                                    groupedMetrics.nums[type].ops = metrics[key]
-                                } else {
-                                    groupedMetrics.nums[type].total += metrics[key];
-                                    groupedMetrics.nums[type].all.push({
-                                        key: name,
-                                        value: metrics[key]
-                                    })
-                                }
-                            }
-                        } else if (isIgnoredJmxKeys(key)) {
-                            //ignore
-                        } else {
-                            groupedMetrics.others.push({
-                                'key': key,
-                                'value': metrics[key]
-                            });
-                        }
-                    }
-                    ctrl.metrics = groupedMetrics;
-                })
-        }
-    });
-
-})();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
deleted file mode 100644
index 982e87e..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result;
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success;
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure;
-
-/**
- * This is a testing client that allows us to intercept calls from OzoneManager
- * to SCM.
- * <p>
- * TODO: OzoneManager#getScmBlockClient -- so that we can load this class up via
- * config setting into OzoneManager. Right now, we just pass this to
- * KeyDeletingService only.
- * <p>
- * TODO: Move this class to a generic test utils so we can use this class in
- * other Ozone Manager tests.
- */
-public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ScmBlockLocationTestingClient.class);
-  private final String clusterID;
-  private final String scmId;
-
-  // 0 means no calls will fail, +1 means all calls will fail, +2 means every
-  // second call will fail, +3 means every third and so on.
-  private final int failCallsFrequency;
-  private int currentCall = 0;
-
-  /**
-   * If ClusterID or SCMID is blank a per instance ID is generated.
-   *
-   * @param clusterID - String or blank.
-   * @param scmId - String or Blank.
-   * @param failCallsFrequency - Set to 0 for no failures, 1 for always to fail,
-   * a positive number for that frequency of failure.
-   */
-  public ScmBlockLocationTestingClient(String clusterID, String scmId,
-      int failCallsFrequency) {
-    this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID :
-        UUID.randomUUID().toString();
-    this.scmId = StringUtils.isNotBlank(scmId) ? scmId :
-        UUID.randomUUID().toString();
-    this.failCallsFrequency = Math.abs(failCallsFrequency);
-    switch (this.failCallsFrequency) {
-    case 0:
-      LOG.debug("Set to no failure mode, all delete block calls will " +
-          "succeed.");
-      break;
-    case 1:
-      LOG.debug("Set to all failure mode. All delete block calls to SCM" +
-          " will fail.");
-      break;
-    default:
-      LOG.debug("Set to Mix mode, every {} -th call will fail",
-          this.failCallsFrequency);
-    }
-
-  }
-
-  /**
-   * Returns Fake blocks to the BlockManager so we get blocks in the Database.
-   * @param size - size of the block.
-   * @param type Replication Type
-   * @param factor - Replication factor
-   * @param owner - String owner.
-   * @param excludeList list of dns/pipelines to exclude
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public List<AllocatedBlock> allocateBlock(long size, int num,
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner, ExcludeList excludeList) throws IOException {
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    Pipeline pipeline = createPipeline(datanodeDetails);
-    long containerID = Time.monotonicNow();
-    long localID = Time.monotonicNow();
-    AllocatedBlock.Builder abb =
-        new AllocatedBlock.Builder()
-            .setContainerBlockID(new ContainerBlockID(containerID, localID))
-            .setPipeline(pipeline);
-    return Collections.singletonList(abb.build());
-  }
-
-  private Pipeline createPipeline(DatanodeDetails datanode) {
-    List<DatanodeDetails> dns = new ArrayList<>();
-    dns.add(datanode);
-    Pipeline pipeline = Pipeline.newBuilder()
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setNodes(dns)
-        .build();
-    return pipeline;
-  }
-
-  @Override
-  public List<DeleteBlockGroupResult> deleteKeyBlocks(
-      List<BlockGroup> keyBlocksInfoList) throws IOException {
-    List<DeleteBlockGroupResult> results = new ArrayList<>();
-    List<DeleteBlockResult> blockResultList = new ArrayList<>();
-    Result result;
-    for (BlockGroup keyBlocks : keyBlocksInfoList) {
-      for (BlockID blockKey : keyBlocks.getBlockIDList()) {
-        currentCall++;
-        switch (this.failCallsFrequency) {
-        case 0:
-          result = success;
-          break;
-        case 1:
-          result = unknownFailure;
-          break;
-        default:
-          if (currentCall % this.failCallsFrequency == 0) {
-            result = unknownFailure;
-          } else {
-            result = success;
-          }
-        }
-        blockResultList.add(new DeleteBlockResult(blockKey, result));
-      }
-      results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
-          blockResultList));
-    }
-    return results;
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    ScmInfo.Builder builder =
-        new ScmInfo.Builder()
-            .setClusterId(clusterID)
-            .setScmId(scmId);
-    return builder.build();
-  }
-
-  @Override
-  public List<DatanodeDetails> sortDatanodes(List<String> nodes,
-      String clientMachine) throws IOException {
-    return null;
-  }
-
-  @Override
-  public void close() throws IOException {
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
deleted file mode 100644
index c151afa..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.*;
-
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.mockito.runners.MockitoJUnitRunner;
-
-/**
- * Tests BucketManagerImpl, mocks OMMetadataManager for testing.
- */
-@RunWith(MockitoJUnitRunner.class)
-@Ignore("Bucket Manager does not use cache, Disable it for now.")
-public class TestBucketManagerImpl {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneConfiguration createNewTestPath() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString());
-    return conf;
-  }
-
-  private OmMetadataManagerImpl createSampleVol() throws IOException {
-    OzoneConfiguration conf = createNewTestPath();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
-    String volumeKey = metaMgr.getVolumeKey("sampleVol");
-    // This is a simple hack for testing, we just test if the volume via a
-    // null check, do not parse the value part. So just write some dummy value.
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setVolume("sampleVol")
-            .setAdminName("bilbo")
-            .setOwnerName("bilbo")
-            .build();
-    metaMgr.getVolumeTable().put(volumeKey, args);
-    return metaMgr;
-  }
-
-  @Test
-  public void testCreateBucketWithoutVolume() throws Exception {
-    thrown.expectMessage("Volume doesn't exist");
-    OzoneConfiguration conf = createNewTestPath();
-    OmMetadataManagerImpl metaMgr =
-        new OmMetadataManagerImpl(conf);
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    } catch (OMException omEx) {
-      Assert.assertEquals(ResultCodes.VOLUME_NOT_FOUND,
-          omEx.getResult());
-      throw omEx;
-    } finally {
-      metaMgr.getStore().close();
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws Exception {
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    KeyProviderCryptoExtension kmsProvider = Mockito.mock(
-        KeyProviderCryptoExtension.class);
-    String testBekName = "key1";
-    String testCipherName = "AES/CTR/NoPadding";
-
-    KeyProvider.Metadata mockMetadata = Mockito.mock(KeyProvider.Metadata
-        .class);
-    Mockito.when(kmsProvider.getMetadata(testBekName)).thenReturn(mockMetadata);
-    Mockito.when(mockMetadata.getCipher()).thenReturn(testCipherName);
-
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr,
-        kmsProvider);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setBucketEncryptionKey(new
-            BucketEncryptionKeyInfo.Builder().setKeyName("key1").build())
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
-
-    OmBucketInfo bucketInfoRead =
-        bucketManager.getBucketInfo("sampleVol",  "bucketOne");
-
-    Assert.assertTrue(bucketInfoRead.getEncryptionKeyInfo().getKeyName()
-        .equals(bucketInfo.getEncryptionKeyInfo().getKeyName()));
-    metaMgr.getStore().close();
-  }
-
-
-  @Test
-  public void testCreateEncryptedBucket() throws Exception {
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol",
-        "bucketOne"));
-    metaMgr.getStore().close();
-  }
-
-  @Test
-  public void testCreateAlreadyExistingBucket() throws Exception {
-    thrown.expectMessage("Bucket already exist");
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-      bucketManager.createBucket(bucketInfo);
-    } catch (OMException omEx) {
-      Assert.assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS,
-          omEx.getResult());
-      throw omEx;
-    } finally {
-      metaMgr.getStore().close();
-    }
-  }
-
-  @Test
-  public void testGetBucketInfoForInvalidBucket() throws Exception {
-    thrown.expectMessage("Bucket not found");
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-    try {
-
-
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      bucketManager.getBucketInfo("sampleVol", "bucketOne");
-    } catch (OMException omEx) {
-      Assert.assertEquals(ResultCodes.BUCKET_NOT_FOUND,
-          omEx.getResult());
-      throw omEx;
-    } finally {
-      metaMgr.getStore().close();
-    }
-  }
-
-  @Test
-  public void testGetBucketInfo() throws Exception {
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    createBucket(metaMgr, bucketInfo);
-    OmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    Assert.assertEquals(false, result.getIsVersionEnabled());
-    metaMgr.getStore().close();
-  }
-
-  private void createBucket(OMMetadataManager metadataManager,
-      OmBucketInfo bucketInfo) throws IOException {
-    TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo);
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeStorageType() throws Exception {
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .build();
-    createBucket(metaMgr, bucketInfo);
-    OmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.SSD)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.SSD,
-        updatedResult.getStorageType());
-    metaMgr.getStore().close();
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeVersioning() throws Exception {
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    OmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertFalse(result.getIsVersionEnabled());
-    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(true)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertTrue(updatedResult.getIsVersionEnabled());
-    metaMgr.getStore().close();
-  }
-
-  @Test
-  public void testDeleteBucket() throws Exception {
-    thrown.expectMessage("Bucket not found");
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    for (int i = 0; i < 5; i++) {
-      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucket_" + i)
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    }
-    for (int i = 0; i < 5; i++) {
-      Assert.assertEquals("bucket_" + i,
-          bucketManager.getBucketInfo(
-              "sampleVol", "bucket_" + i).getBucketName());
-    }
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucket_1");
-      Assert.assertNotNull(bucketManager.getBucketInfo(
-          "sampleVol", "bucket_2"));
-    } catch (IOException ex) {
-      Assert.fail(ex.getMessage());
-    }
-    try {
-      bucketManager.getBucketInfo("sampleVol", "bucket_1");
-    } catch (OMException omEx) {
-      Assert.assertEquals(ResultCodes.BUCKET_NOT_FOUND,
-          omEx.getResult());
-      throw omEx;
-    }
-    metaMgr.getStore().close();
-  }
-
-  @Test
-  public void testDeleteNonEmptyBucket() throws Exception {
-    thrown.expectMessage("Bucket is not empty");
-    OmMetadataManagerImpl metaMgr = createSampleVol();
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    //Create keys in bucket
-    metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one",
-        new OmKeyInfo.Builder()
-            .setBucketName("bucketOne")
-            .setVolumeName("sampleVol")
-            .setKeyName("key_one")
-            .setReplicationFactor(ReplicationFactor.ONE)
-            .setReplicationType(ReplicationType.STAND_ALONE)
-            .build());
-    metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_two",
-        new OmKeyInfo.Builder()
-            .setBucketName("bucketOne")
-            .setVolumeName("sampleVol")
-            .setKeyName("key_two")
-            .setReplicationFactor(ReplicationFactor.ONE)
-            .setReplicationType(ReplicationType.STAND_ALONE)
-            .build());
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucketOne");
-    } catch (OMException omEx) {
-      Assert.assertEquals(ResultCodes.BUCKET_NOT_EMPTY,
-          omEx.getResult());
-      throw omEx;
-    }
-    metaMgr.getStore().close();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
deleted file mode 100644
index 78e1c44..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
-import org.apache.hadoop.ozone.client.io.KeyInputStream;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests KeyInputStream and KeyOutputStream.
- */
-public class TestChunkStreams {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void testReadGroupInputStream() throws Exception {
-    try (KeyInputStream groupInputStream = new KeyInputStream()) {
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes(UTF_8);
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        BlockInputStream in =
-            new BlockInputStream(null, 100, null, null, true, null) {
-              private long pos = 0;
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public synchronized void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public synchronized long getPos() throws IOException {
-                return pos;
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public synchronized int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public synchronized  int read(byte[] b, int off, int len)
-                  throws IOException {
-                int readLen = in.read(b, off, len);
-                pos += readLen;
-                return readLen;
-              }
-            };
-        offset += 100;
-        groupInputStream.addStream(in);
-      }
-
-      byte[] resBuf = new byte[500];
-      int len = groupInputStream.read(resBuf, 0, 500);
-
-      assertEquals(500, len);
-      assertEquals(dataString, new String(resBuf, UTF_8));
-    }
-  }
-
-  @Test
-  public void testErrorReadGroupInputStream() throws Exception {
-    try (KeyInputStream groupInputStream = new KeyInputStream()) {
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes(UTF_8);
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        BlockInputStream in =
-            new BlockInputStream(null, 100, null, null, true, null) {
-              private long pos = 0;
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public synchronized void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public synchronized long getPos() throws IOException {
-                return pos;
-              }
-
-              @Override
-              public synchronized boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public synchronized int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public synchronized int read(byte[] b, int off, int len)
-                  throws IOException {
-                int readLen = in.read(b, off, len);
-                pos += readLen;
-                return readLen;
-              }
-            };
-        offset += 100;
-        groupInputStream.addStream(in);
-      }
-
-      byte[] resBuf = new byte[600];
-      // read 300 bytes first
-      int len = groupInputStream.read(resBuf, 0, 340);
-      assertEquals(3, groupInputStream.getCurrentStreamIndex());
-      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
-      assertEquals(340, len);
-      assertEquals(dataString.substring(0, 340),
-          new String(resBuf, UTF_8).substring(0, 340));
-
-      // read following 300 bytes, but only 200 left
-      len = groupInputStream.read(resBuf, 340, 260);
-      assertEquals(4, groupInputStream.getCurrentStreamIndex());
-      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
-      assertEquals(160, len);
-      assertEquals(dataString, new String(resBuf, UTF_8).substring(0, 500));
-
-      // further read should get EOF
-      len = groupInputStream.read(resBuf, 0, 1);
-      // reached EOF, further read should get -1
-      assertEquals(-1, len);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
deleted file mode 100644
index 3c707ba..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-/**
- * Test Key Deleting Service.
- * <p>
- * This test does the following things.
- * <p>
- * 1. Creates a bunch of keys. 2. Then executes delete key directly using
- * Metadata Manager. 3. Waits for a while for the KeyDeleting Service to pick up
- * and call into SCM. 4. Confirms that calls have been successful.
- */
-public class TestKeyDeletingService {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneConfiguration createConfAndInitValues() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
-    ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString());
-    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
-        TimeUnit.MILLISECONDS);
-    conf.setQuietMode(false);
-
-    return conf;
-  }
-
-  /**
-   * In this test, we create a bunch of keys and delete them. Then we start the
-   * KeyDeletingService and pass a SCMClient which does not fail. We make sure
-   * that all the keys that we deleted is picked up and deleted by
-   * OzoneManager.
-   *
-   * @throws IOException - on Failure.
-   */
-
-  @Test(timeout = 30000)
-  public void checkIfDeleteServiceisDeletingKeys()
-      throws IOException, TimeoutException, InterruptedException {
-    OzoneConfiguration conf = createConfAndInitValues();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
-    KeyManager keyManager =
-        new KeyManagerImpl(
-            new ScmBlockLocationTestingClient(null, null, 0),
-            metaMgr, conf, UUID.randomUUID().toString(), null);
-    keyManager.start(conf);
-    final int keyCount = 100;
-    createAndDeleteKeys(keyManager, keyCount, 1);
-    KeyDeletingService keyDeletingService =
-        (KeyDeletingService) keyManager.getDeletingService();
-    GenericTestUtils.waitFor(
-        () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount,
-        1000, 10000);
-    Assert.assertTrue(keyDeletingService.getRunCount().get() > 1);
-    Assert.assertEquals(
-        keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0);
-  }
-
-  @Test(timeout = 30000)
-  public void checkIfDeleteServiceWithFailingSCM()
-      throws IOException, TimeoutException, InterruptedException {
-    OzoneConfiguration conf = createConfAndInitValues();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
-    //failCallsFrequency = 1 , means all calls fail.
-    KeyManager keyManager =
-        new KeyManagerImpl(
-            new ScmBlockLocationTestingClient(null, null, 1),
-            metaMgr, conf, UUID.randomUUID().toString(), null);
-    keyManager.start(conf);
-    final int keyCount = 100;
-    createAndDeleteKeys(keyManager, keyCount, 1);
-    KeyDeletingService keyDeletingService =
-        (KeyDeletingService) keyManager.getDeletingService();
-    keyManager.start(conf);
-    Assert.assertEquals(
-        keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount);
-    // Make sure that we have run the background thread 5 times more
-    GenericTestUtils.waitFor(
-        () -> keyDeletingService.getRunCount().get() >= 5,
-        100, 1000);
-    // Since SCM calls are failing, deletedKeyCount should be zero.
-    Assert.assertEquals(keyDeletingService.getDeletedKeyCount().get(), 0);
-    Assert.assertEquals(
-        keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount);
-  }
-
-  @Test(timeout = 30000)
-  public void checkDeletionForEmptyKey()
-      throws IOException, TimeoutException, InterruptedException {
-    OzoneConfiguration conf = createConfAndInitValues();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
-    //failCallsFrequency = 1 , means all calls fail.
-    KeyManager keyManager =
-        new KeyManagerImpl(
-            new ScmBlockLocationTestingClient(null, null, 1),
-            metaMgr, conf, UUID.randomUUID().toString(), null);
-    keyManager.start(conf);
-    final int keyCount = 100;
-    createAndDeleteKeys(keyManager, keyCount, 0);
-    KeyDeletingService keyDeletingService =
-        (KeyDeletingService) keyManager.getDeletingService();
-    keyManager.start(conf);
-
-    // Since empty keys are directly deleted from db there should be no
-    // pending deletion keys. Also deletedKeyCount should be zero.
-    Assert.assertEquals(
-        keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0);
-    // Make sure that we have run the background thread 2 times or more
-    GenericTestUtils.waitFor(
-        () -> keyDeletingService.getRunCount().get() >= 2,
-        100, 1000);
-    Assert.assertEquals(keyDeletingService.getDeletedKeyCount().get(), 0);
-  }
-
-  private void createAndDeleteKeys(KeyManager keyManager, int keyCount,
-      int numBlocks) throws IOException {
-    for (int x = 0; x < keyCount; x++) {
-      String volumeName = String.format("volume%s",
-          RandomStringUtils.randomAlphanumeric(5));
-      String bucketName = String.format("bucket%s",
-          RandomStringUtils.randomAlphanumeric(5));
-      String keyName = String.format("key%s",
-          RandomStringUtils.randomAlphanumeric(5));
-      String volumeBytes =
-          keyManager.getMetadataManager().getVolumeKey(volumeName);
-      String bucketBytes =
-          keyManager.getMetadataManager().getBucketKey(volumeName, bucketName);
-      // cheat here, just create a volume and bucket entry so that we can
-      // create the keys, we put the same data for key and value since the
-      // system does not decode the object
-      TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(),
-          OmVolumeArgs.newBuilder()
-              .setOwnerName("o")
-              .setAdminName("a")
-              .setVolume(volumeName)
-              .build());
-
-      TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(),
-          OmBucketInfo.newBuilder().setVolumeName(volumeName)
-              .setBucketName(bucketName)
-              .build());
-
-      OmKeyArgs arg =
-          new OmKeyArgs.Builder()
-              .setVolumeName(volumeName)
-              .setBucketName(bucketName)
-              .setKeyName(keyName)
-              .setAcls(Collections.emptyList())
-              .setLocationInfoList(new ArrayList<>())
-              .build();
-      //Open, Commit and Delete the Keys in the Key Manager.
-      OpenKeySession session = keyManager.openKey(arg);
-      for (int i = 0; i < numBlocks; i++) {
-        arg.addLocationInfo(
-            keyManager.allocateBlock(arg, session.getId(), new ExcludeList()));
-      }
-      keyManager.commitKey(arg, session.getId());
-      keyManager.deleteKey(arg);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
deleted file mode 100644
index b00bf44..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Unit test key manager.
- */
-public class TestKeyManagerUnit {
-
-  private OmMetadataManagerImpl metadataManager;
-  private KeyManagerImpl keyManager;
-
-  private Instant startDate;
-
-  @Before
-  public void setup() throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        GenericTestUtils.getRandomizedTestDir().toString());
-    metadataManager = new OmMetadataManagerImpl(configuration);
-    keyManager = new KeyManagerImpl(
-        Mockito.mock(ScmBlockLocationProtocol.class),
-        metadataManager,
-        configuration,
-        "omtest",
-        Mockito.mock(OzoneBlockTokenSecretManager.class)
-    );
-
-    startDate = Instant.now();
-  }
-
-  @Test
-  public void listMultipartUploadPartsWithZeroUpload() throws IOException {
-    //GIVEN
-    createBucket(metadataManager, "vol1", "bucket1");
-
-    OmMultipartInfo omMultipartInfo =
-        initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
-
-    //WHEN
-    OmMultipartUploadListParts omMultipartUploadListParts = keyManager
-        .listParts("vol1", "bucket1", "dir/key1", omMultipartInfo.getUploadID(),
-            0, 10);
-
-    Assert.assertEquals(0,
-        omMultipartUploadListParts.getPartInfoList().size());
-
-    this.startDate = Instant.now();
-  }
-
-  @Test
-  public void listMultipartUploads() throws IOException {
-
-    //GIVEN
-    createBucket(metadataManager, "vol1", "bucket1");
-    createBucket(metadataManager, "vol1", "bucket2");
-
-    OmMultipartInfo upload1 =
-        initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
-
-    OmMultipartInfo upload2 =
-        initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2");
-
-    OmMultipartInfo upload3 =
-        initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1");
-
-    //WHEN
-    OmMultipartUploadList omMultipartUploadList =
-        keyManager.listMultipartUploads("vol1", "bucket1", "");
-
-    //THEN
-    List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
-    Assert.assertEquals(2, uploads.size());
-    Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
-    Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
-
-    Assert.assertNotNull(uploads.get(1));
-    Assert.assertNotNull(uploads.get(1).getCreationTime());
-    Assert.assertTrue("Creation date is too old",
-        uploads.get(1).getCreationTime().compareTo(startDate) > 0);
-  }
-
-  @Test
-  public void listMultipartUploadsWithPrefix() throws IOException {
-
-    //GIVEN
-    createBucket(metadataManager, "vol1", "bucket1");
-    createBucket(metadataManager, "vol1", "bucket2");
-
-    OmMultipartInfo upload1 =
-        initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1");
-
-    initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
-    initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2");
-    initMultipartUpload(keyManager, "vol1", "bucket1", "key3");
-
-    initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1");
-
-    //WHEN
-    OmMultipartUploadList omMultipartUploadList =
-        keyManager.listMultipartUploads("vol1", "bucket1", "dir");
-
-    //THEN
-    List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
-    Assert.assertEquals(2, uploads.size());
-    Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
-    Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
-  }
-
-  private void createBucket(OmMetadataManagerImpl omMetadataManager,
-      String volume, String bucket)
-      throws IOException {
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volume)
-        .setBucketName(bucket)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .setAcls(new ArrayList<>())
-        .build();
-    TestOMRequestUtils.addBucketToOM(metadataManager, omBucketInfo);
-  }
-
-  private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest,
-      String volume, String bucket, String key)
-      throws IOException {
-    OmKeyArgs key1 = new Builder()
-        .setVolumeName(volume)
-        .setBucketName(bucket)
-        .setKeyName(key)
-        .setType(ReplicationType.RATIS)
-        .setFactor(ReplicationFactor.THREE)
-        .setAcls(new ArrayList<>())
-        .build();
-    return omtest.initiateMultipartUpload(key1);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
deleted file mode 100644
index e0e4c61..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ /dev/null
@@ -1,417 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.List;
-import java.util.TreeSet;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-
-/**
- * Tests OzoneManager MetadataManager.
- */
-public class TestOmMetadataManager {
-
-  private OMMetadataManager omMetadataManager;
-  private OzoneConfiguration ozoneConfiguration;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  @Before
-  public void setup() throws Exception {
-    ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OZONE_OM_DB_DIRS,
-        folder.getRoot().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-  }
-  @Test
-  public void testListBuckets() throws Exception {
-
-    String volumeName1 = "volumeA";
-    String prefixBucketNameWithOzoneOwner = "ozoneBucket";
-    String prefixBucketNameWithHadoopOwner = "hadoopBucket";
-
-    TestOMRequestUtils.addVolumeToDB(volumeName1, omMetadataManager);
-
-
-    TreeSet<String> volumeABucketsPrefixWithOzoneOwner = new TreeSet<>();
-    TreeSet<String> volumeABucketsPrefixWithHadoopOwner = new TreeSet<>();
-    for (int i=1; i<= 100; i++) {
-      if (i % 2 == 0) {
-        volumeABucketsPrefixWithOzoneOwner.add(
-            prefixBucketNameWithOzoneOwner + i);
-        addBucketsToCache(volumeName1, prefixBucketNameWithOzoneOwner + i);
-      } else {
-        volumeABucketsPrefixWithHadoopOwner.add(
-            prefixBucketNameWithHadoopOwner + i);
-        addBucketsToCache(volumeName1, prefixBucketNameWithHadoopOwner + i);
-      }
-    }
-
-    String volumeName2 = "volumeB";
-    TreeSet<String> volumeBBucketsPrefixWithOzoneOwner = new TreeSet<>();
-    TreeSet<String> volumeBBucketsPrefixWithHadoopOwner = new TreeSet<>();
-    TestOMRequestUtils.addVolumeToDB(volumeName2, omMetadataManager);
-    for (int i=1; i<= 100; i++) {
-      if (i % 2 == 0) {
-        volumeBBucketsPrefixWithOzoneOwner.add(
-            prefixBucketNameWithOzoneOwner + i);
-        addBucketsToCache(volumeName2, prefixBucketNameWithOzoneOwner + i);
-      } else {
-        volumeBBucketsPrefixWithHadoopOwner.add(
-            prefixBucketNameWithHadoopOwner + i);
-        addBucketsToCache(volumeName2, prefixBucketNameWithHadoopOwner + i);
-      }
-    }
-
-    // List all buckets which have prefix ozoneBucket
-    List<OmBucketInfo> omBucketInfoList =
-        omMetadataManager.listBuckets(volumeName1,
-            null, prefixBucketNameWithOzoneOwner, 100);
-
-    Assert.assertEquals(omBucketInfoList.size(),  50);
-
-    for (OmBucketInfo omBucketInfo : omBucketInfoList) {
-      Assert.assertTrue(omBucketInfo.getBucketName().startsWith(
-          prefixBucketNameWithOzoneOwner));
-    }
-
-
-    String startBucket = prefixBucketNameWithOzoneOwner + 10;
-    omBucketInfoList =
-        omMetadataManager.listBuckets(volumeName1,
-            startBucket, prefixBucketNameWithOzoneOwner,
-            100);
-
-    Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet(
-        startBucket).size() - 1, omBucketInfoList.size());
-
-    startBucket = prefixBucketNameWithOzoneOwner + 38;
-    omBucketInfoList =
-        omMetadataManager.listBuckets(volumeName1,
-            startBucket, prefixBucketNameWithOzoneOwner,
-            100);
-
-    Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet(
-        startBucket).size() - 1, omBucketInfoList.size());
-
-    for (OmBucketInfo omBucketInfo : omBucketInfoList) {
-      Assert.assertTrue(omBucketInfo.getBucketName().startsWith(
-          prefixBucketNameWithOzoneOwner));
-      Assert.assertFalse(omBucketInfo.getBucketName().equals(
-          prefixBucketNameWithOzoneOwner + 10));
-    }
-
-
-
-    omBucketInfoList = omMetadataManager.listBuckets(volumeName2,
-        null, prefixBucketNameWithHadoopOwner, 100);
-
-    Assert.assertEquals(omBucketInfoList.size(),  50);
-
-    for (OmBucketInfo omBucketInfo : omBucketInfoList) {
-      Assert.assertTrue(omBucketInfo.getBucketName().startsWith(
-          prefixBucketNameWithHadoopOwner));
-    }
-
-    // Try to get buckets by count 10, like that get all buckets in the
-    // volumeB with prefixBucketNameWithHadoopOwner.
-    startBucket = null;
-    TreeSet<String> expectedBuckets = new TreeSet<>();
-    for (int i=0; i<5; i++) {
-
-      omBucketInfoList = omMetadataManager.listBuckets(volumeName2,
-          startBucket, prefixBucketNameWithHadoopOwner, 10);
-
-      Assert.assertEquals(omBucketInfoList.size(), 10);
-
-      for (OmBucketInfo omBucketInfo : omBucketInfoList) {
-        expectedBuckets.add(omBucketInfo.getBucketName());
-        Assert.assertTrue(omBucketInfo.getBucketName().startsWith(
-            prefixBucketNameWithHadoopOwner));
-        startBucket =  omBucketInfo.getBucketName();
-      }
-    }
-
-
-    Assert.assertEquals(volumeBBucketsPrefixWithHadoopOwner, expectedBuckets);
-    // As now we have iterated all 50 buckets, calling next time should
-    // return empty list.
-    omBucketInfoList = omMetadataManager.listBuckets(volumeName2,
-        startBucket, prefixBucketNameWithHadoopOwner, 10);
-
-    Assert.assertEquals(omBucketInfoList.size(), 0);
-
-  }
-
-
-  private void addBucketsToCache(String volumeName, String bucketName) {
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-
-    omMetadataManager.getBucketTable().addCacheEntry(
-        new CacheKey<>(omMetadataManager.getBucketKey(volumeName, bucketName)),
-        new CacheValue<>(Optional.of(omBucketInfo), 1));
-  }
-
-  @Test
-  public void testListKeys() throws Exception {
-
-    String volumeNameA = "volumeA";
-    String volumeNameB = "volumeB";
-    String ozoneBucket = "ozoneBucket";
-    String hadoopBucket = "hadoopBucket";
-
-
-    // Create volumes and buckets.
-    TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeNameB, omMetadataManager);
-    addBucketsToCache(volumeNameA, ozoneBucket);
-    addBucketsToCache(volumeNameB, hadoopBucket);
-
-
-    String prefixKeyA = "key-a";
-    String prefixKeyB = "key-b";
-    TreeSet<String> keysASet = new TreeSet<>();
-    TreeSet<String> keysBSet = new TreeSet<>();
-    for (int i=1; i<= 100; i++) {
-      if (i % 2 == 0) {
-        keysASet.add(
-            prefixKeyA + i);
-        addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i);
-      } else {
-        keysBSet.add(
-            prefixKeyB + i);
-        addKeysToOM(volumeNameA, hadoopBucket, prefixKeyB + i, i);
-      }
-    }
-
-
-    TreeSet<String> keysAVolumeBSet = new TreeSet<>();
-    TreeSet<String> keysBVolumeBSet = new TreeSet<>();
-    for (int i=1; i<= 100; i++) {
-      if (i % 2 == 0) {
-        keysAVolumeBSet.add(
-            prefixKeyA + i);
-        addKeysToOM(volumeNameB, ozoneBucket, prefixKeyA + i, i);
-      } else {
-        keysBVolumeBSet.add(
-            prefixKeyB + i);
-        addKeysToOM(volumeNameB, hadoopBucket, prefixKeyB + i, i);
-      }
-    }
-
-
-    // List all keys which have prefix "key-a"
-    List<OmKeyInfo> omKeyInfoList =
-        omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-            null, prefixKeyA, 100);
-
-    Assert.assertEquals(omKeyInfoList.size(),  50);
-
-    for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-      Assert.assertTrue(omKeyInfo.getKeyName().startsWith(
-          prefixKeyA));
-    }
-
-
-    String startKey = prefixKeyA + 10;
-    omKeyInfoList =
-        omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-            startKey, prefixKeyA, 100);
-
-    Assert.assertEquals(keysASet.tailSet(
-        startKey).size() - 1, omKeyInfoList.size());
-
-    startKey = prefixKeyA + 38;
-    omKeyInfoList =
-        omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-            startKey, prefixKeyA, 100);
-
-    Assert.assertEquals(keysASet.tailSet(
-        startKey).size() - 1, omKeyInfoList.size());
-
-    for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-      Assert.assertTrue(omKeyInfo.getKeyName().startsWith(
-          prefixKeyA));
-      Assert.assertFalse(omKeyInfo.getBucketName().equals(
-          prefixKeyA + 38));
-    }
-
-
-
-    omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket,
-        null, prefixKeyB, 100);
-
-    Assert.assertEquals(omKeyInfoList.size(),  50);
-
-    for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-      Assert.assertTrue(omKeyInfo.getKeyName().startsWith(
-          prefixKeyB));
-    }
-
-    // Try to get keys by count 10, like that get all keys in the
-    // volumeB/ozoneBucket with "key-a".
-    startKey = null;
-    TreeSet<String> expectedKeys = new TreeSet<>();
-    for (int i=0; i<5; i++) {
-
-      omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket,
-          startKey, prefixKeyB, 10);
-
-      Assert.assertEquals(10, omKeyInfoList.size());
-
-      for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-        expectedKeys.add(omKeyInfo.getKeyName());
-        Assert.assertTrue(omKeyInfo.getKeyName().startsWith(
-            prefixKeyB));
-        startKey =  omKeyInfo.getKeyName();
-      }
-    }
-
-    Assert.assertEquals(expectedKeys, keysBVolumeBSet);
-
-
-    // As now we have iterated all 50 buckets, calling next time should
-    // return empty list.
-    omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket,
-        startKey, prefixKeyB, 10);
-
-    Assert.assertEquals(omKeyInfoList.size(), 0);
-
-  }
-
-  @Test
-  public void testListKeysWithFewDeleteEntriesInCache() throws Exception {
-    String volumeNameA = "volumeA";
-    String ozoneBucket = "ozoneBucket";
-
-    // Create volumes and bucket.
-    TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager);
-
-    addBucketsToCache(volumeNameA, ozoneBucket);
-
-    String prefixKeyA = "key-a";
-    TreeSet<String> keysASet = new TreeSet<>();
-    TreeSet<String> deleteKeySet = new TreeSet<>();
-
-
-    for (int i=1; i<= 100; i++) {
-      if (i % 2 == 0) {
-        keysASet.add(
-            prefixKeyA + i);
-        addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i);
-      } else {
-        addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i);
-        String key = omMetadataManager.getOzoneKey(volumeNameA,
-            ozoneBucket, prefixKeyA + i);
-        // Mark as deleted in cache.
-        omMetadataManager.getKeyTable().addCacheEntry(
-            new CacheKey<>(key),
-            new CacheValue<>(Optional.absent(), 100L));
-        deleteKeySet.add(key);
-      }
-    }
-
-    // Now list keys which match with prefixKeyA.
-    List<OmKeyInfo> omKeyInfoList =
-        omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-            null, prefixKeyA, 100);
-
-    // As in total 100, 50 are marked for delete. It should list only 50 keys.
-    Assert.assertEquals(50, omKeyInfoList.size());
-
-    TreeSet<String> expectedKeys = new TreeSet<>();
-
-    for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-      expectedKeys.add(omKeyInfo.getKeyName());
-      Assert.assertTrue(omKeyInfo.getKeyName().startsWith(prefixKeyA));
-    }
-
-    Assert.assertEquals(expectedKeys, keysASet);
-
-
-    // Now get key count by 10.
-    String startKey = null;
-    expectedKeys = new TreeSet<>();
-    for (int i=0; i<5; i++) {
-
-      omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-          startKey, prefixKeyA, 10);
-
-      System.out.println(i);
-      Assert.assertEquals(10, omKeyInfoList.size());
-
-      for (OmKeyInfo omKeyInfo : omKeyInfoList) {
-        expectedKeys.add(omKeyInfo.getKeyName());
-        Assert.assertTrue(omKeyInfo.getKeyName().startsWith(
-            prefixKeyA));
-        startKey =  omKeyInfo.getKeyName();
-      }
-    }
-
-    Assert.assertEquals(keysASet, expectedKeys);
-
-
-    // As now we have iterated all 50 buckets, calling next time should
-    // return empty list.
-    omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket,
-        startKey, prefixKeyA, 10);
-
-    Assert.assertEquals(omKeyInfoList.size(), 0);
-
-
-
-  }
-
-  private void addKeysToOM(String volumeName, String bucketName,
-      String keyName, int i) throws Exception {
-
-    if (i%2== 0) {
-      TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-          1000L, HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    } else {
-      TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, keyName,
-          HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE,
-          omMetadataManager);
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
deleted file mode 100644
index b071e27..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server of OM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestOzoneManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestOzoneManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestOzoneManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
-    conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    OzoneManagerHttpServer server = null;
-    try {
-      server = new OzoneManagerHttpServer(conf, null);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(implies(policy.isHttpEnabled() &&
-              !policy.isHttpsEnabled(),
-          !canAccess("https", server.getHttpsAddress())));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          !canAccess("http", server.getHttpsAddress())));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (Exception e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
deleted file mode 100644
index 8028169..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import static org.junit.Assert.*;
-
-/**
- * This class is used to test the CLI provided by OzoneManagerStarter, which is
- * used to start and init the OzoneManager. The calls to the Ozone Manager are
- * mocked so the tests only validate the CLI calls the correct methods are
- * invoked.
- */
-public class TestOzoneManagerStarter {
-
-  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
-  private final PrintStream originalOut = System.out;
-  private final PrintStream originalErr = System.err;
-
-  private MockOMStarter mock;
-
-  @Before
-  public void setUpStreams() {
-    System.setOut(new PrintStream(outContent));
-    System.setErr(new PrintStream(errContent));
-    mock = new MockOMStarter();
-  }
-
-  @After
-  public void restoreStreams() {
-    System.setOut(originalOut);
-    System.setErr(originalErr);
-  }
-
-  @Test
-  public void testCallsStartWhenServerStarted() throws Exception {
-    executeCommand();
-    assertTrue(mock.startCalled);
-  }
-
-  @Test
-  public void testExceptionThrownWhenStartFails() throws Exception {
-    mock.throwOnStart = true;
-    try {
-      executeCommand();
-      fail("Exception should have been thrown");
-    } catch (Exception e) {
-      assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testStartNotCalledWithInvalidParam() throws Exception {
-    executeCommand("--invalid");
-    assertFalse(mock.startCalled);
-  }
-
-  @Test
-  public void testPassingInitSwitchCallsInit() {
-    executeCommand("--init");
-    assertTrue(mock.initCalled);
-  }
-
-  @Test
-  public void testInitSwitchWithInvalidParamDoesNotRun() {
-    executeCommand("--init", "--invalid");
-    assertFalse(mock.initCalled);
-  }
-
-  @Test
-  public void testUnSuccessfulInitThrowsException() {
-    mock.throwOnInit = true;
-    try {
-      executeCommand("--init");
-      fail("Exception show have been thrown");
-    } catch (Exception e) {
-      assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testInitThatReturnsFalseThrowsException() {
-    mock.initStatus = false;
-    try {
-      executeCommand("--init");
-      fail("Exception show have been thrown");
-    } catch (Exception e) {
-      assertTrue(true);
-    }
-  }
-
-  @Test
-  public void testUsagePrintedOnInvalidInput() {
-    executeCommand("--invalid");
-    Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage");
-    Matcher m = p.matcher(errContent.toString());
-    assertTrue(m.find());
-  }
-
-  private void executeCommand(String... args) {
-    new OzoneManagerStarter(mock).execute(args);
-  }
-
-  static class MockOMStarter implements OMStarterInterface {
-
-    private boolean startCalled = false;
-    private boolean initCalled = false;
-    private boolean initStatus = true;
-    private boolean throwOnStart = false;
-    private boolean throwOnInit = false;
-
-    public void start(OzoneConfiguration conf) throws IOException,
-        AuthenticationException {
-      startCalled = true;
-      if (throwOnStart) {
-        throw new IOException("Simulated Exception");
-      }
-    }
-
-    public boolean init(OzoneConfiguration conf) throws IOException,
-        AuthenticationException {
-      initCalled = true;
-      if (throwOnInit) {
-        throw new IOException("Simulated Exception");
-      }
-      return initStatus;
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
deleted file mode 100644
index ef35d4d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-
-import static org.junit.Assert.*;
-
-/**
- * Tests for S3 Bucket Manager.
- */
-public class TestS3BucketManager {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private OzoneConfiguration conf;
-  private OmMetadataManagerImpl metaMgr;
-  private BucketManager bucketManager;
-  private VolumeManager volumeManager;
-
-  @Before
-  public void init() throws IOException {
-    conf = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString());
-    metaMgr = new OmMetadataManagerImpl(conf);
-    volumeManager = new VolumeManagerImpl(metaMgr, conf);
-    bucketManager = new BucketManagerImpl(metaMgr);
-  }
-
-  @Test
-  public void testOzoneVolumeNameForUser() throws IOException {
-    S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
-        volumeManager, bucketManager);
-    String userName = "ozone";
-    String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName);
-    assertEquals(OzoneConsts.OM_S3_VOLUME_PREFIX + userName, volumeName);
-  }
-
-  @Test
-  public void testOzoneVolumeNameForUserFails() throws IOException {
-    S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
-        volumeManager, bucketManager);
-    String userName = null;
-    try {
-      String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName);
-      fail("testOzoneVolumeNameForUserFails failed");
-    } catch (NullPointerException ex) {
-      GenericTestUtils.assertExceptionContains("UserName cannot be null", ex);
-    }
-
-  }
-
-  @Test
-  public void testGetS3BucketMapping() throws IOException {
-    S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
-        volumeManager, bucketManager);
-    String userName = "bilbo";
-    metaMgr.getS3Table().put("newBucket",
-        s3BucketManager.formatOzoneVolumeName(userName) + "/newBucket");
-    String mapping = s3BucketManager.getOzoneBucketMapping("newBucket");
-    Assert.assertTrue(mapping.startsWith("s3bilbo/"));
-    Assert.assertTrue(mapping.endsWith("/newBucket"));
-  }
-
-  @Test
-  public void testGetOzoneNames() throws IOException {
-    S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
-        volumeManager, bucketManager);
-    String userName = "batman";
-    String s3BucketName = "gotham";
-    metaMgr.getS3Table().put(s3BucketName,
-        s3BucketManager.formatOzoneVolumeName(userName) + "/" + s3BucketName);
-    String volumeName = s3BucketManager.getOzoneVolumeName(s3BucketName);
-    Assert.assertTrue(volumeName.equalsIgnoreCase("s3"+userName));
-    String bucketName =s3BucketManager.getOzoneBucketName(s3BucketName);
-    Assert.assertTrue(bucketName.equalsIgnoreCase(s3BucketName));
-    // try to get a bucket that does not exist.
-    thrown.expectMessage("No such S3 bucket.");
-    s3BucketManager.getOzoneBucketMapping("raven");
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
deleted file mode 100644
index 12fcf7c..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.om;
-/**
- * OM tests
- */
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
deleted file mode 100644
index 56c806a..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * This class tests OzoneManagerDoubleBuffer implementation with
- * dummy response class.
- */
-public class TestOzoneManagerDoubleBufferWithDummyResponse {
-
-  private OMMetadataManager omMetadataManager;
-  private OzoneManagerDoubleBuffer doubleBuffer;
-  private AtomicLong trxId = new AtomicLong(0);
-  private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot;
-  private long lastAppliedIndex;
-
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setup() throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_METADATA_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager =
-        new OmMetadataManagerImpl(configuration);
-    ozoneManagerRatisSnapshot = index -> {
-      lastAppliedIndex = index;
-    };
-    doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager,
-        ozoneManagerRatisSnapshot);
-  }
-
-  @After
-  public void stop() {
-    doubleBuffer.stop();
-  }
-
-  /**
-   * This tests add's 100 bucket creation responses to doubleBuffer, and
-   * check OM DB bucket table has 100 entries or not. In addition checks
-   * flushed transaction count is matching with expected count or not.
-   * @throws Exception
-   */
-  @Test(timeout = 300_000)
-  public void testDoubleBufferWithDummyResponse() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    int bucketCount = 100;
-    OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics =
-        doubleBuffer.getOzoneManagerDoubleBufferMetrics();
-
-    // As we have not flushed/added any transactions, all metrics should have
-    // value zero.
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getTotalNumOfFlushOperations() == 0);
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getTotalNumOfFlushedTransactions() == 0);
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getMaxNumberOfTransactionsFlushedInOneIteration() == 0);
-
-    for (int i=0; i < bucketCount; i++) {
-      doubleBuffer.add(createDummyBucketResponse(volumeName,
-          UUID.randomUUID().toString()), trxId.incrementAndGet());
-    }
-    GenericTestUtils.waitFor(() ->
-            doubleBuffer.getFlushedTransactionCount() == bucketCount, 100,
-        60000);
-
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getTotalNumOfFlushOperations() > 0);
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getTotalNumOfFlushedTransactions() == bucketCount);
-    Assert.assertTrue(ozoneManagerDoubleBufferMetrics
-        .getMaxNumberOfTransactionsFlushedInOneIteration() > 0);
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getBucketTable()) == (bucketCount));
-    Assert.assertTrue(doubleBuffer.getFlushIterations() > 0);
-
-    // Check lastAppliedIndex is updated correctly or not.
-    Assert.assertEquals(bucketCount, lastAppliedIndex);
-  }
-
-  /**
-   * Create DummyBucketCreate response.
-   * @param volumeName
-   * @param bucketName
-   * @return OMDummyCreateBucketResponse
-   */
-  private OMDummyCreateBucketResponse createDummyBucketResponse(
-      String volumeName, String bucketName) {
-    OmBucketInfo omBucketInfo =
-        OmBucketInfo.newBuilder().setVolumeName(volumeName)
-            .setBucketName(bucketName).setCreationTime(Time.now()).build();
-    return new OMDummyCreateBucketResponse(omBucketInfo,
-        OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCreateBucketResponse(CreateBucketResponse.newBuilder().build())
-            .build());
-  }
-
-
-  /**
-   * DummyCreatedBucket Response class used in testing.
-   */
-  public static class OMDummyCreateBucketResponse extends OMClientResponse {
-    private final OmBucketInfo omBucketInfo;
-
-    public OMDummyCreateBucketResponse(OmBucketInfo omBucketInfo,
-        OMResponse omResponse) {
-      super(omResponse);
-      this.omBucketInfo = omBucketInfo;
-    }
-
-    @Override
-    public void addToDBBatch(OMMetadataManager omMetadataManager,
-        BatchOperation batchOperation) throws IOException {
-      String dbBucketKey =
-          omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-              omBucketInfo.getBucketName());
-      omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-          dbBucketKey, omBucketInfo);
-    }
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
deleted file mode 100644
index 441f1c1..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ /dev/null
@@ -1,496 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import java.io.IOException;
-import java.util.Queue;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Daemon;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * This class tests OzoneManagerDouble Buffer with actual OMResponse classes.
- */
-public class TestOzoneManagerDoubleBufferWithOMResponse {
-
-  private OzoneManager ozoneManager;
-  private OMMetrics omMetrics;
-  private AuditLogger auditLogger;
-  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper;
-  private OMMetadataManager omMetadataManager;
-  private OzoneManagerDoubleBuffer doubleBuffer;
-  private final AtomicLong trxId = new AtomicLong(0);
-  private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot;
-  private volatile long lastAppliedIndex;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setup() throws IOException {
-    ozoneManager = Mockito.mock(OzoneManager.class,
-        Mockito.withSettings().stubOnly());
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    ozoneConfiguration.setInt(HDDS_LOCK_MAX_CONCURRENCY, 1000);
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-    ozoneManagerRatisSnapshot = index -> {
-      lastAppliedIndex = index;
-    };
-    doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager,
-        ozoneManagerRatisSnapshot);
-    ozoneManagerDoubleBufferHelper = doubleBuffer::add;
-  }
-
-  @After
-  public void stop() {
-    doubleBuffer.stop();
-  }
-
-  /**
-   * This tests OzoneManagerDoubleBuffer implementation. It calls
-   * testDoubleBuffer with number of iterations to do transactions and
-   * number of buckets to be created in each iteration. It then
-   * verifies OM DB entries count is matching with total number of
-   * transactions or not.
-   * @throws Exception
-   */
-  @Test(timeout = 500_000)
-  public void testDoubleBuffer() throws Exception {
-    // This test checks whether count in tables are correct or not.
-    testDoubleBuffer(1, 10);
-    testDoubleBuffer(10, 100);
-    testDoubleBuffer(100, 100);
-    testDoubleBuffer(1000, 500);
-  }
-
-  /**
-   * This test first creates a volume, and then does a mix of transactions
-   * like create/delete buckets and add them to double buffer. Then it
-   * verifies OM DB entries are matching with actual responses added to
-   * double buffer or not.
-   * @throws Exception
-   */
-  @Test
-  public void testDoubleBufferWithMixOfTransactions() throws Exception {
-    // This test checks count, data in table is correct or not.
-    Queue< OMBucketCreateResponse > bucketQueue =
-        new ConcurrentLinkedQueue<>();
-    Queue< OMBucketDeleteResponse > deleteBucketQueue =
-        new ConcurrentLinkedQueue<>();
-
-    String volumeName = UUID.randomUUID().toString();
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        (OMVolumeCreateResponse) createVolume(volumeName,
-            trxId.incrementAndGet());
-
-    int bucketCount = 10;
-
-    doMixTransactions(volumeName, 10, deleteBucketQueue, bucketQueue);
-
-    // As for every 2 transactions of create bucket we add deleted bucket.
-    final int deleteCount = 5;
-
-    // We are doing +1 for volume transaction.
-    GenericTestUtils.waitFor(() ->
-        doubleBuffer.getFlushedTransactionCount() ==
-            (bucketCount + deleteCount + 1), 100, 120000);
-
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getVolumeTable()) == 1);
-
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getBucketTable()) == 5);
-
-    // Now after this in our DB we should have 5 buckets and one volume
-
-    checkVolume(volumeName, omVolumeCreateResponse);
-
-    checkCreateBuckets(bucketQueue);
-
-    checkDeletedBuckets(deleteBucketQueue);
-
-    // Check lastAppliedIndex is updated correctly or not.
-    Assert.assertEquals(bucketCount + deleteCount + 1, lastAppliedIndex);
-  }
-
-  /**
-   * This test first creates a volume, and then does a mix of transactions
-   * like create/delete buckets in parallel and add to double buffer. Then it
-   * verifies OM DB entries are matching with actual responses added to
-   * double buffer or not.
-   * @throws Exception
-   */
-  @Test
-  public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception {
-    // This test checks count, data in table is correct or not.
-
-    Queue< OMBucketCreateResponse > bucketQueue =
-        new ConcurrentLinkedQueue<>();
-    Queue< OMBucketDeleteResponse > deleteBucketQueue =
-        new ConcurrentLinkedQueue<>();
-
-    String volumeName1 = UUID.randomUUID().toString();
-
-    OMVolumeCreateResponse omVolumeCreateResponse1 =
-        (OMVolumeCreateResponse) createVolume(volumeName1,
-            trxId.incrementAndGet());
-
-    String volumeName2 = UUID.randomUUID().toString();
-    OMVolumeCreateResponse omVolumeCreateResponse2 =
-        (OMVolumeCreateResponse) createVolume(volumeName2,
-            trxId.incrementAndGet());
-
-
-    Daemon daemon1 = new Daemon(() -> doMixTransactions(volumeName1, 10,
-        deleteBucketQueue, bucketQueue));
-    Daemon daemon2 = new Daemon(() -> doMixTransactions(volumeName2, 10,
-        deleteBucketQueue, bucketQueue));
-
-    daemon1.start();
-    daemon2.start();
-
-    int bucketCount = 20;
-
-      // As for every 2 transactions of create bucket we add deleted bucket.
-    final int deleteCount = 10;
-
-    // We are doing +1 for volume transaction.
-    GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount()
-            == (bucketCount + deleteCount + 2), 100, 120000);
-
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getVolumeTable()) == 2);
-
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getBucketTable()) == 10);
-
-    // Now after this in our DB we should have 5 buckets and one volume
-
-
-    checkVolume(volumeName1, omVolumeCreateResponse1);
-    checkVolume(volumeName2, omVolumeCreateResponse2);
-
-    checkCreateBuckets(bucketQueue);
-
-    checkDeletedBuckets(deleteBucketQueue);
-
-    // Check lastAppliedIndex is updated correctly or not.
-    Assert.assertEquals(bucketCount + deleteCount + 2, lastAppliedIndex);
-  }
-
-  /**
-   * This method add's a mix of createBucket/DeleteBucket responses to double
-   * buffer. Total number of responses added is specified by bucketCount.
-   * @param volumeName
-   * @param bucketCount
-   * @param deleteBucketQueue
-   * @param bucketQueue
-   */
-  private void doMixTransactions(String volumeName, int bucketCount,
-      Queue<OMBucketDeleteResponse> deleteBucketQueue,
-      Queue<OMBucketCreateResponse> bucketQueue) {
-    for (int i=0; i < bucketCount; i++) {
-      String bucketName = UUID.randomUUID().toString();
-      long transactionID = trxId.incrementAndGet();
-      OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName,
-          bucketName, transactionID);
-      // For every 2 transactions have a deleted bucket.
-      if (i % 2 == 0) {
-        OMBucketDeleteResponse omBucketDeleteResponse =
-            (OMBucketDeleteResponse) deleteBucket(volumeName, bucketName,
-                trxId.incrementAndGet());
-        deleteBucketQueue.add(omBucketDeleteResponse);
-      } else {
-        bucketQueue.add(omBucketCreateResponse);
-      }
-    }
-  }
-
-  private OMClientResponse deleteBucket(String volumeName, String bucketName,
-      long transactionID) {
-    OzoneManagerProtocolProtos.OMRequest omRequest =
-        TestOMRequestUtils.createDeleteBucketRequest(volumeName, bucketName);
-
-    OMBucketDeleteRequest omBucketDeleteRequest =
-        new OMBucketDeleteRequest(omRequest);
-
-    return omBucketDeleteRequest.validateAndUpdateCache(ozoneManager,
-        transactionID, ozoneManagerDoubleBufferHelper);
-  }
-
-  /**
-   * Verifies volume table data is matching with actual response added to
-   * double buffer.
-   * @param volumeName
-   * @param omVolumeCreateResponse
-   * @throws Exception
-   */
-  private void checkVolume(String volumeName,
-      OMVolumeCreateResponse omVolumeCreateResponse) throws Exception {
-    OmVolumeArgs tableVolumeArgs = omMetadataManager.getVolumeTable().get(
-        omMetadataManager.getVolumeKey(volumeName));
-    Assert.assertTrue(tableVolumeArgs != null);
-
-    OmVolumeArgs omVolumeArgs = omVolumeCreateResponse.getOmVolumeArgs();
-
-    Assert.assertEquals(omVolumeArgs.getVolume(), tableVolumeArgs.getVolume());
-    Assert.assertEquals(omVolumeArgs.getAdminName(),
-        tableVolumeArgs.getAdminName());
-    Assert.assertEquals(omVolumeArgs.getOwnerName(),
-        tableVolumeArgs.getOwnerName());
-    Assert.assertEquals(omVolumeArgs.getCreationTime(),
-        tableVolumeArgs.getCreationTime());
-  }
-
-  /**
-   * Verifies bucket table data is matching with actual response added to
-   * double buffer.
-   * @param bucketQueue
-   */
-  private void checkCreateBuckets(Queue<OMBucketCreateResponse> bucketQueue) {
-    bucketQueue.forEach((omBucketCreateResponse) -> {
-      OmBucketInfo omBucketInfo = omBucketCreateResponse.getOmBucketInfo();
-      String bucket = omBucketInfo.getBucketName();
-      OmBucketInfo tableBucketInfo = null;
-      try {
-        tableBucketInfo =
-            omMetadataManager.getBucketTable().get(
-                omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-                    bucket));
-      } catch (IOException ex) {
-        fail("testDoubleBufferWithMixOfTransactions failed");
-      }
-      Assert.assertNotNull(tableBucketInfo);
-
-      Assert.assertEquals(omBucketInfo.getVolumeName(),
-          tableBucketInfo.getVolumeName());
-      Assert.assertEquals(omBucketInfo.getBucketName(),
-          tableBucketInfo.getBucketName());
-      Assert.assertEquals(omBucketInfo.getCreationTime(),
-          tableBucketInfo.getCreationTime());
-    });
-  }
-
-  /**
-   * Verifies deleted bucket responses added to double buffer are actually
-   * removed from the OM DB or not.
-   * @param deleteBucketQueue
-   */
-  private void checkDeletedBuckets(Queue<OMBucketDeleteResponse>
-      deleteBucketQueue) {
-    deleteBucketQueue.forEach((omBucketDeleteResponse -> {
-      try {
-        Assert.assertNull(omMetadataManager.getBucketTable().get(
-            omMetadataManager.getBucketKey(
-                omBucketDeleteResponse.getVolumeName(),
-                omBucketDeleteResponse.getBucketName())));
-      } catch (IOException ex) {
-        fail("testDoubleBufferWithMixOfTransactions failed");
-      }
-    }));
-  }
-
-  /**
-   * Create bucketCount number of createBucket responses for each iteration.
-   * All these iterations are run in parallel. Then verify OM DB has correct
-   * number of entries or not.
-   * @param iterations
-   * @param bucketCount
-   * @throws Exception
-   */
-  public void testDoubleBuffer(int iterations, int bucketCount)
-      throws Exception {
-    try {
-      // Reset transaction id.
-      trxId.set(0);
-      // Calling setup and stop here because this method is called from a
-      // single test multiple times.
-      setup();
-      for (int i = 0; i < iterations; i++) {
-        Daemon d1 = new Daemon(() ->
-            doTransactions(RandomStringUtils.randomAlphabetic(5),
-                bucketCount));
-        d1.start();
-      }
-
-      // We are doing +1 for volume transaction.
-      long expectedTransactions = (bucketCount + 1) * iterations;
-      GenericTestUtils.waitFor(() -> lastAppliedIndex == expectedTransactions,
-          100, 500000);
-
-      Assert.assertEquals(expectedTransactions,
-          doubleBuffer.getFlushedTransactionCount()
-      );
-
-      GenericTestUtils.waitFor(() -> {
-        long count = 0L;
-        try {
-          count =
-              omMetadataManager.countRowsInTable(
-                  omMetadataManager.getVolumeTable());
-        } catch (IOException ex) {
-          fail("testDoubleBuffer failed");
-        }
-        return count == iterations;
-
-      }, 300, 300000);
-
-
-      GenericTestUtils.waitFor(() -> {
-        long count = 0L;
-        try {
-          count = omMetadataManager.countRowsInTable(
-              omMetadataManager.getBucketTable());
-        } catch (IOException ex) {
-          fail("testDoubleBuffer failed");
-        }
-        return count == bucketCount * iterations;
-      }, 300, 300000);
-
-      Assert.assertTrue(doubleBuffer.getFlushIterations() > 0);
-    } finally {
-      stop();
-    }
-  }
-
-  /**
-   * This method adds bucketCount number of createBucket responses to double
-   * buffer.
-   * @param volumeName
-   * @param bucketCount
-   */
-  public void doTransactions(String volumeName, int bucketCount) {
-    createVolume(volumeName, trxId.incrementAndGet());
-    for (int i=0; i< bucketCount; i++) {
-      createBucket(volumeName, UUID.randomUUID().toString(),
-          trxId.incrementAndGet());
-    }
-  }
-
-  /**
-   * Create OMVolumeCreateResponse for specified volume.
-   * @param volumeName
-   * @return OMVolumeCreateResponse
-   */
-  private OMClientResponse createVolume(String volumeName,
-      long transactionId) {
-
-    String admin = "ozone";
-    String owner = UUID.randomUUID().toString();
-    OzoneManagerProtocolProtos.OMRequest omRequest =
-        TestOMRequestUtils.createVolumeRequest(volumeName, admin, owner);
-
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(omRequest);
-
-    return omVolumeCreateRequest.validateAndUpdateCache(ozoneManager,
-        transactionId, ozoneManagerDoubleBufferHelper);
-  }
-
-  /**
-   * Create OMBucketCreateResponse for specified volume and bucket.
-   * @param volumeName
-   * @param bucketName
-   * @return OMBucketCreateResponse
-   */
-  private OMBucketCreateResponse createBucket(String volumeName,
-      String bucketName, long transactionID)  {
-
-    OzoneManagerProtocolProtos.OMRequest omRequest =
-        TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false,
-            OzoneManagerProtocolProtos.StorageTypeProto.DISK);
-
-    OMBucketCreateRequest omBucketCreateRequest =
-        new OMBucketCreateRequest(omRequest);
-
-    return (OMBucketCreateResponse) omBucketCreateRequest
-        .validateAndUpdateCache(ozoneManager, transactionID,
-            ozoneManagerDoubleBufferHelper);
-
-  }
-
-  /**
-   * Create OMBucketDeleteResponse for specified volume and bucket.
-   * @param volumeName
-   * @param bucketName
-   * @return OMBucketDeleteResponse
-   */
-  private OMBucketDeleteResponse deleteBucket(String volumeName,
-      String bucketName) {
-    return new OMBucketDeleteResponse(volumeName, bucketName,
-        OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setDeleteBucketResponse(DeleteBucketResponse.newBuilder().build())
-            .build());
-  }
-
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
deleted file mode 100644
index c04fba2..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ratis;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.util.LifeCycle;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-import org.slf4j.LoggerFactory;
-
-import static org.junit.Assert.assertFalse;
-import static org.mockito.Mockito.when;
-
-/**
- * Test OM Ratis server.
- */
-public class TestOzoneManagerRatisServer {
-
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneConfiguration conf;
-  private OzoneManagerRatisServer omRatisServer;
-  private OzoneManagerRatisClient omRatisClient;
-  private String omID;
-  private String clientId = UUID.randomUUID().toString();
-  private static final long LEADER_ELECTION_TIMEOUT = 500L;
-  private OMMetadataManager omMetadataManager;
-  private OzoneManager ozoneManager;
-  private OMNodeDetails omNodeDetails;
-
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    omID = UUID.randomUUID().toString();
-    final String path = GenericTestUtils.getTempPath(omID);
-    Path metaDirPath = Paths.get(path, "om-meta");
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    conf.setTimeDuration(
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
-    int ratisPort = conf.getInt(
-        OMConfigKeys.OZONE_OM_RATIS_PORT_KEY,
-        OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT);
-    InetSocketAddress rpcAddress = new InetSocketAddress(
-        InetAddress.getLocalHost(), 0);
-    omNodeDetails = new OMNodeDetails.Builder()
-        .setRpcAddress(rpcAddress)
-        .setRatisPort(ratisPort)
-        .setOMNodeId(omID)
-        .setOMServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT)
-        .build();
-    // Starts a single node Ratis server
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    OMRatisSnapshotInfo omRatisSnapshotInfo = new OMRatisSnapshotInfo(
-        folder.newFolder());
-    when(ozoneManager.getSnapshotInfo()).thenReturn(omRatisSnapshotInfo);
-    omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager,
-      omNodeDetails, Collections.emptyList());
-    omRatisServer.start();
-    omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient(omID,
-        omRatisServer.getRaftGroup(), conf);
-    omRatisClient.connect();
-  }
-
-  @After
-  public void shutdown() {
-    if (omRatisServer != null) {
-      omRatisServer.stop();
-    }
-    if (omRatisClient != null) {
-      omRatisClient.close();
-    }
-  }
-
-  /**
-   * Start a OM Ratis Server and checks its state.
-   */
-  @Test
-  public void testStartOMRatisServer() throws Exception {
-    Assert.assertEquals("Ratis Server should be in running state",
-        LifeCycle.State.RUNNING, omRatisServer.getServerState());
-  }
-
-  @Test
-  public void testLoadSnapshotInfoOnStart() throws Exception {
-    // Stop the Ratis server and manually update the snapshotInfo.
-    long oldSnaphsotIndex = ozoneManager.saveRatisSnapshot();
-    ozoneManager.getSnapshotInfo().saveRatisSnapshotToDisk(oldSnaphsotIndex);
-    omRatisServer.stop();
-    long newSnapshotIndex = oldSnaphsotIndex + 100;
-    ozoneManager.getSnapshotInfo().saveRatisSnapshotToDisk(newSnapshotIndex);
-
-    // Start new Ratis server. It should pick up and load the new SnapshotInfo
-    omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager,
-        omNodeDetails, Collections.emptyList());
-    omRatisServer.start();
-    long lastAppliedIndex = omRatisServer.getStateMachineLastAppliedIndex();
-
-    Assert.assertEquals(newSnapshotIndex, lastAppliedIndex);
-  }
-
-  /**
-   * Test that all of {@link OzoneManagerProtocolProtos.Type} enum values are
-   * categorized in {@link OmUtils#isReadOnly(OMRequest)}.
-   */
-  @Test
-  public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception {
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(LoggerFactory.getLogger(OmUtils.class));
-    OzoneManagerProtocolProtos.Type[] cmdTypes =
-        OzoneManagerProtocolProtos.Type.values();
-
-    for (OzoneManagerProtocolProtos.Type cmdtype : cmdTypes) {
-      OMRequest request = OMRequest.newBuilder()
-          .setCmdType(cmdtype)
-          .setClientId(clientId)
-          .build();
-      OmUtils.isReadOnly(request);
-      assertFalse(cmdtype + " is not categorized in " +
-              "OmUtils#isReadyOnly",
-          logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " +
-              "categorized as readOnly or not."));
-      logCapturer.clearOutput();
-    }
-  }
-
-  @Test
-  public void verifyRaftGroupIdGenerationWithDefaultOmServiceId() throws
-      Exception {
-    UUID uuid = UUID.nameUUIDFromBytes(OzoneConsts.OM_SERVICE_ID_DEFAULT
-        .getBytes());
-    RaftGroupId raftGroupId = omRatisServer.getRaftGroup().getGroupId();
-    Assert.assertEquals(uuid, raftGroupId.getUuid());
-    Assert.assertEquals(raftGroupId.toByteString().size(), 16);
-  }
-
-  @Test
-  public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws
-      Exception {
-    String customOmServiceId = "omSIdCustom123";
-    OzoneConfiguration newConf = new OzoneConfiguration();
-    String newOmId = UUID.randomUUID().toString();
-    String path = GenericTestUtils.getTempPath(newOmId);
-    Path metaDirPath = Paths.get(path, "om-meta");
-    newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    newConf.setTimeDuration(
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
-    int ratisPort = 9873;
-    InetSocketAddress rpcAddress = new InetSocketAddress(
-        InetAddress.getLocalHost(), 0);
-    OMNodeDetails nodeDetails = new OMNodeDetails.Builder()
-        .setRpcAddress(rpcAddress)
-        .setRatisPort(ratisPort)
-        .setOMNodeId(newOmId)
-        .setOMServiceId(customOmServiceId)
-        .build();
-    // Starts a single node Ratis server
-    omRatisServer.stop();
-    OzoneManagerRatisServer newOmRatisServer = OzoneManagerRatisServer
-        .newOMRatisServer(newConf, ozoneManager, nodeDetails,
-            Collections.emptyList());
-    newOmRatisServer.start();
-    OzoneManagerRatisClient newOmRatisClient = OzoneManagerRatisClient
-        .newOzoneManagerRatisClient(
-            newOmId,
-            newOmRatisServer.getRaftGroup(), newConf);
-    newOmRatisClient.connect();
-
-    UUID uuid = UUID.nameUUIDFromBytes(customOmServiceId.getBytes());
-    RaftGroupId raftGroupId = newOmRatisServer.getRaftGroup().getGroupId();
-    Assert.assertEquals(uuid, raftGroupId.getUuid());
-    Assert.assertEquals(raftGroupId.toByteString().size(), 16);
-    newOmRatisServer.stop();
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
deleted file mode 100644
index bdaee6e..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request;
-
-import java.net.InetAddress;
-import java.util.UUID;
-
-import mockit.Mock;
-import mockit.MockUp;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import static org.mockito.Mockito.when;
-
-/**
- * Test OMClient Request with user information.
- */
-public class TestOMClientRequestWithUserInfo {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneManager ozoneManager;
-  private OMMetrics omMetrics;
-  private OMMetadataManager omMetadataManager;
-  private UserGroupInformation userGroupInformation =
-      UserGroupInformation.createRemoteUser("temp");
-  private InetAddress inetAddress;
-
-  @Before
-  public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    inetAddress = InetAddress.getByName("127.0.0.1");
-
-    new MockUp<ProtobufRpcEngine.Server>() {
-      @Mock
-      public UserGroupInformation getRemoteUser() {
-        return userGroupInformation;
-      }
-
-      public InetAddress getRemoteAddress() {
-        return inetAddress;
-      }
-    };
-  }
-
-  @Test
-  public void testUserInfo() throws Exception {
-
-    String bucketName = UUID.randomUUID().toString();
-    String volumeName = UUID.randomUUID().toString();
-    OzoneManagerProtocolProtos.OMRequest omRequest =
-        TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true,
-            OzoneManagerProtocolProtos.StorageTypeProto.DISK);
-
-    OMBucketCreateRequest omBucketCreateRequest =
-        new OMBucketCreateRequest(omRequest);
-
-    Assert.assertFalse(omRequest.hasUserInfo());
-
-    OzoneManagerProtocolProtos.OMRequest modifiedRequest =
-        omBucketCreateRequest.preExecute(ozoneManager);
-
-    Assert.assertTrue(modifiedRequest.hasUserInfo());
-
-    // Now pass modified request to OMBucketCreateRequest and check ugi and
-    // remote Address.
-    omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest);
-
-    InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress();
-    UserGroupInformation ugi = omBucketCreateRequest.createUGI();
-
-
-    // Now check we have original user info and remote address or not.
-    // Here from OMRequest user info, converted to UGI and InetAddress.
-    Assert.assertEquals(inetAddress.getHostAddress(),
-        remoteAddress.getHostAddress());
-    Assert.assertEquals(userGroupInformation.getUserName(), ugi.getUserName());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
deleted file mode 100644
index 472d46a..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request;
-
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartInfoInitiateRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AddAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RemoveAclRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetAclRequest;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
-
-import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-/**
- * Helper class to test OMClientRequest classes.
- */
-public final class TestOMRequestUtils {
-
-  private TestOMRequestUtils() {
-    //Do nothing
-  }
-
-  /**
-   * Add's volume and bucket creation entries to OM DB.
-   * @param volumeName
-   * @param bucketName
-   * @param omMetadataManager
-   * @throws Exception
-   */
-  public static void addVolumeAndBucketToDB(String volumeName,
-      String bucketName, OMMetadataManager omMetadataManager) throws Exception {
-
-    addVolumeToDB(volumeName, omMetadataManager);
-
-    OmBucketInfo omBucketInfo =
-        OmBucketInfo.newBuilder().setVolumeName(volumeName)
-            .setBucketName(bucketName).setCreationTime(Time.now()).build();
-
-    // Add to cache.
-    omMetadataManager.getBucketTable().addCacheEntry(
-        new CacheKey<>(omMetadataManager.getBucketKey(volumeName, bucketName)),
-        new CacheValue<>(Optional.of(omBucketInfo), 1L));
-  }
-
-  /**
-   * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
-   * to openKeyTable, else add's it to keyTable.
-   * @param openKeyTable
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param clientID
-   * @param replicationType
-   * @param replicationFactor
-   * @param omMetadataManager
-   * @throws Exception
-   */
-  @SuppressWarnings("parameterNumber")
-  public static void addKeyToTable(boolean openKeyTable, String volumeName,
-      String bucketName,
-      String keyName, long clientID,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor,
-      OMMetadataManager omMetadataManager) throws Exception {
-
-
-    OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName,
-        replicationType, replicationFactor);
-
-    if (openKeyTable) {
-      omMetadataManager.getOpenKeyTable().put(
-          omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
-              clientID), omKeyInfo);
-    } else {
-      omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey(
-          volumeName, bucketName, keyName), omKeyInfo);
-    }
-
-  }
-
-  /**
-   * Add key entry to key table cache.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param replicationType
-   * @param replicationFactor
-   * @param omMetadataManager
-   */
-  @SuppressWarnings("parameterNumber")
-  public static void addKeyToTableCache(String volumeName,
-      String bucketName,
-      String keyName,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor,
-      OMMetadataManager omMetadataManager) {
-
-
-    OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName,
-        replicationType, replicationFactor);
-
-    omMetadataManager.getKeyTable().addCacheEntry(
-        new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName,
-            keyName)), new CacheValue<>(Optional.of(omKeyInfo),
-            1L));
-
-  }
-
-  private OmKeyInfo createKeyInfo(String volumeName, String bucketName,
-      String keyName, HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setDataSize(1000L)
-        .setReplicationType(replicationType)
-        .setReplicationFactor(replicationFactor).build();
-  }
-
-
-  /**
-   * Create OmKeyInfo.
-   */
-
-  public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
-      String keyName, HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setDataSize(1000L)
-        .setReplicationType(replicationType)
-        .setReplicationFactor(replicationFactor).build();
-  }
-
-
-  /**
-   * Add volume creation entry to OM DB.
-   * @param volumeName
-   * @param omMetadataManager
-   * @throws Exception
-   */
-  public static void addVolumeToDB(String volumeName,
-      OMMetadataManager omMetadataManager) throws Exception {
-    addVolumeToDB(volumeName, UUID.randomUUID().toString(), omMetadataManager);
-  }
-
-  public static void addS3BucketToDB(String volumeName, String s3BucketName,
-      OMMetadataManager omMetadataManager) throws Exception {
-    omMetadataManager.getS3Table().put(s3BucketName,
-        S3BucketCreateRequest.formatS3MappingName(volumeName, s3BucketName));
-  }
-
-  /**
-   * Add volume creation entry to OM DB.
-   * @param volumeName
-   * @param ownerName
-   * @param omMetadataManager
-   * @throws Exception
-   */
-  public static void addVolumeToDB(String volumeName, String ownerName,
-      OMMetadataManager omMetadataManager) throws Exception {
-    OmVolumeArgs omVolumeArgs =
-        OmVolumeArgs.newBuilder().setCreationTime(Time.now())
-            .setVolume(volumeName).setAdminName(ownerName)
-            .setOwnerName(ownerName).build();
-    omMetadataManager.getVolumeTable().put(
-        omMetadataManager.getVolumeKey(volumeName), omVolumeArgs);
-
-    // Add to cache.
-    omMetadataManager.getVolumeTable().addCacheEntry(
-        new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)),
-            new CacheValue<>(Optional.of(omVolumeArgs), 1L));
-  }
-
-
-  public static OzoneManagerProtocolProtos.OMRequest createBucketRequest(
-      String bucketName, String volumeName, boolean isVersionEnabled,
-      OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
-    OzoneManagerProtocolProtos.BucketInfo bucketInfo =
-        OzoneManagerProtocolProtos.BucketInfo.newBuilder()
-            .setBucketName(bucketName)
-            .setVolumeName(volumeName)
-            .setIsVersionEnabled(isVersionEnabled)
-            .setStorageType(storageTypeProto)
-            .addAllMetadata(getMetadataList()).build();
-    OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
-        OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
-    req.setBucketInfo(bucketInfo);
-    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
-        .setCreateBucketRequest(req)
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-  public static OzoneManagerProtocolProtos.OMRequest createS3BucketRequest(
-      String userName, String s3BucketName) {
-    OzoneManagerProtocolProtos.S3CreateBucketRequest request =
-        OzoneManagerProtocolProtos.S3CreateBucketRequest.newBuilder()
-            .setUserName(userName)
-            .setS3Bucketname(s3BucketName).build();
-
-    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
-        .setCreateS3BucketRequest(request)
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-  public static OzoneManagerProtocolProtos.OMRequest deleteS3BucketRequest(
-      String s3BucketName) {
-    OzoneManagerProtocolProtos.S3DeleteBucketRequest request =
-        OzoneManagerProtocolProtos.S3DeleteBucketRequest.newBuilder()
-            .setS3BucketName(s3BucketName).build();
-    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
-        .setDeleteS3BucketRequest(request)
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteS3Bucket)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-  public static List< HddsProtos.KeyValue> getMetadataList() {
-    List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
-    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
-        "value1").build());
-    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
-        "value2").build());
-    return metadataList;
-  }
-
-
-  /**
-   * Add user to user table.
-   * @param volumeName
-   * @param ownerName
-   * @param omMetadataManager
-   * @throws Exception
-   */
-  public static void addUserToDB(String volumeName, String ownerName,
-      OMMetadataManager omMetadataManager) throws Exception {
-    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo =
-        OzoneManagerProtocolProtos.UserVolumeInfo
-            .newBuilder()
-            .addVolumeNames(volumeName)
-            .setObjectID(1)
-            .setUpdateID(1)
-            .build();
-    omMetadataManager.getUserTable().put(
-        omMetadataManager.getUserKey(ownerName), userVolumeInfo);
-  }
-
-  /**
-   * Create OMRequest for set volume property request with owner set.
-   * @param volumeName
-   * @param newOwner
-   * @return OMRequest
-   */
-  public static OMRequest createSetVolumePropertyRequest(String volumeName,
-      String newOwner) {
-    SetVolumePropertyRequest setVolumePropertyRequest =
-        SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName)
-            .setOwnerName(newOwner).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
-        .setSetVolumePropertyRequest(setVolumePropertyRequest).build();
-  }
-
-
-  /**
-   * Create OMRequest for set volume property request with quota set.
-   * @param volumeName
-   * @param quota
-   * @return OMRequest
-   */
-  public static OMRequest createSetVolumePropertyRequest(String volumeName,
-      long quota) {
-    SetVolumePropertyRequest setVolumePropertyRequest =
-        SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName)
-            .setQuotaInBytes(quota).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
-        .setSetVolumePropertyRequest(setVolumePropertyRequest).build();
-  }
-
-  public static OMRequest createVolumeAddAclRequest(String volumeName,
-      OzoneAcl acl) {
-    AddAclRequest.Builder addAclRequestBuilder = AddAclRequest.newBuilder();
-    addAclRequestBuilder.setObj(OzoneObj.toProtobuf(new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(ResourceType.VOLUME)
-        .setStoreType(StoreType.OZONE)
-        .build()));
-    if (acl != null) {
-      addAclRequestBuilder.setAcl(OzoneAcl.toProtobuf(acl));
-    }
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.AddAcl)
-        .setAddAclRequest(addAclRequestBuilder.build()).build();
-  }
-
-  public static OMRequest createVolumeRemoveAclRequest(String volumeName,
-      OzoneAcl acl) {
-    RemoveAclRequest.Builder removeAclRequestBuilder =
-        RemoveAclRequest.newBuilder();
-    removeAclRequestBuilder.setObj(OzoneObj.toProtobuf(
-        new OzoneObjInfo.Builder()
-            .setVolumeName(volumeName)
-            .setResType(ResourceType.VOLUME)
-            .setStoreType(StoreType.OZONE)
-            .build()));
-    if (acl != null) {
-      removeAclRequestBuilder.setAcl(OzoneAcl.toProtobuf(acl));
-    }
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.RemoveAcl)
-        .setRemoveAclRequest(removeAclRequestBuilder.build()).build();
-  }
-
-  public static OMRequest createVolumeSetAclRequest(String volumeName,
-      List<OzoneAcl> acls) {
-    SetAclRequest.Builder setAclRequestBuilder = SetAclRequest.newBuilder();
-    setAclRequestBuilder.setObj(OzoneObj.toProtobuf(new OzoneObjInfo.Builder()
-        .setVolumeName(volumeName)
-        .setResType(ResourceType.VOLUME)
-        .setStoreType(StoreType.OZONE)
-        .build()));
-    if (acls != null) {
-      acls.forEach(
-          acl -> setAclRequestBuilder.addAcl(OzoneAcl.toProtobuf(acl)));
-    }
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetAcl)
-        .setSetAclRequest(setAclRequestBuilder.build()).build();
-  }
-
-  /**
-   * Deletes key from Key table and adds it to DeletedKeys table.
-   * @return the deletedKey name
-   */
-  public static String deleteKey(String ozoneKey,
-      OMMetadataManager omMetadataManager) throws IOException {
-    // Retrieve the keyInfo
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    // Delete key from KeyTable and put in DeletedKeyTable
-    omMetadataManager.getKeyTable().delete(ozoneKey);
-
-    RepeatedOmKeyInfo repeatedOmKeyInfo =
-        omMetadataManager.getDeletedTable().get(ozoneKey);
-
-    repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo,
-        repeatedOmKeyInfo);
-
-    omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
-
-    return ozoneKey;
-  }
-
-  /**
-   * Create OMRequest which encapsulates InitiateMultipartUpload request.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   */
-  public static OMRequest createInitiateMPURequest(String volumeName,
-      String bucketName, String keyName) {
-    MultipartInfoInitiateRequest
-        multipartInfoInitiateRequest =
-        MultipartInfoInitiateRequest.newBuilder().setKeyArgs(
-            KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName)
-                .setBucketName(bucketName)).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
-        .setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest)
-        .build();
-  }
-
-  /**
-   * Create OMRequest which encapsulates InitiateMultipartUpload request.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   */
-  public static OMRequest createCommitPartMPURequest(String volumeName,
-      String bucketName, String keyName, long clientID, long size,
-      String multipartUploadID, int partNumber) {
-
-    // Just set dummy size.
-    KeyArgs.Builder keyArgs =
-        KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName)
-            .setBucketName(bucketName)
-            .setDataSize(size)
-            .setMultipartNumber(partNumber)
-            .setMultipartUploadID(multipartUploadID)
-            .addAllKeyLocations(new ArrayList<>());
-    // Just adding dummy list. As this is for UT only.
-
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        MultipartCommitUploadPartRequest.newBuilder()
-            .setKeyArgs(keyArgs).setClientID(clientID).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
-        .setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest)
-        .build();
-  }
-
-  public static OMRequest createAbortMPURequest(String volumeName,
-      String bucketName, String keyName, String multipartUploadID) {
-    KeyArgs.Builder keyArgs =
-        KeyArgs.newBuilder().setVolumeName(volumeName)
-            .setKeyName(keyName)
-            .setBucketName(bucketName)
-            .setMultipartUploadID(multipartUploadID);
-
-    MultipartUploadAbortRequest multipartUploadAbortRequest =
-        MultipartUploadAbortRequest.newBuilder().setKeyArgs(keyArgs).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
-        .setAbortMultiPartUploadRequest(multipartUploadAbortRequest).build();
-  }
-
-  public static OMRequest createCompleteMPURequest(String volumeName,
-      String bucketName, String keyName, String multipartUploadID,
-      List<OzoneManagerProtocolProtos.Part> partList) {
-    KeyArgs.Builder keyArgs =
-        KeyArgs.newBuilder().setVolumeName(volumeName)
-            .setKeyName(keyName)
-            .setBucketName(bucketName)
-            .setMultipartUploadID(multipartUploadID);
-
-    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
-        MultipartUploadCompleteRequest.newBuilder().setKeyArgs(keyArgs)
-            .addAllPartsList(partList).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload)
-        .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest)
-        .build();
-
-  }
-
-  /**
-   * Create OMRequest for create volume.
-   * @param volumeName
-   * @param adminName
-   * @param ownerName
-   * @return OMRequest
-   */
-  public static OMRequest createVolumeRequest(String volumeName,
-      String adminName, String ownerName) {
-    OzoneManagerProtocolProtos.VolumeInfo volumeInfo =
-        OzoneManagerProtocolProtos.VolumeInfo.newBuilder().setVolume(volumeName)
-        .setAdminName(adminName).setOwnerName(ownerName).build();
-    OzoneManagerProtocolProtos.CreateVolumeRequest createVolumeRequest =
-        OzoneManagerProtocolProtos.CreateVolumeRequest.newBuilder()
-            .setVolumeInfo(volumeInfo).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-        .setCreateVolumeRequest(createVolumeRequest).build();
-  }
-
-  /**
-   * Create OMRequest for delete bucket.
-   * @param volumeName
-   * @param bucketName
-   */
-  public static OMRequest createDeleteBucketRequest(String volumeName,
-      String bucketName) {
-    return OMRequest.newBuilder().setDeleteBucketRequest(
-        OzoneManagerProtocolProtos.DeleteBucketRequest.newBuilder()
-            .setBucketName(bucketName).setVolumeName(volumeName))
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-  /**
-   * Add the Bucket information to OzoneManager DB and cache.
-   * @param omMetadataManager
-   * @param omBucketInfo
-   * @throws IOException
-   */
-  public static void addBucketToOM(OMMetadataManager omMetadataManager,
-      OmBucketInfo omBucketInfo) throws IOException {
-    String dbBucketKey =
-        omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName());
-    omMetadataManager.getBucketTable().put(dbBucketKey, omBucketInfo);
-    omMetadataManager.getBucketTable().addCacheEntry(
-        new CacheKey<>(dbBucketKey),
-        new CacheValue<>(Optional.of(omBucketInfo), 1L));
-  }
-
-  /**
-   * Add the Volume information to OzoneManager DB and Cache.
-   * @param omMetadataManager
-   * @param omVolumeArgs
-   * @throws IOException
-   */
-  public static void addVolumeToOM(OMMetadataManager omMetadataManager,
-      OmVolumeArgs omVolumeArgs) throws IOException {
-    String dbVolumeKey =
-        omMetadataManager.getVolumeKey(omVolumeArgs.getVolume());
-    omMetadataManager.getVolumeTable().put(dbVolumeKey, omVolumeArgs);
-    omMetadataManager.getVolumeTable().addCacheEntry(
-        new CacheKey<>(dbVolumeKey),
-        new CacheValue<>(Optional.of(omVolumeArgs), 1L));
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
deleted file mode 100644
index 1ddd753..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * Base test class for Bucket request.
- */
-@SuppressWarnings("visibilityModifier")
-public class TestBucketRequest {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OzoneManager ozoneManager;
-  protected OMMetrics omMetrics;
-  protected OMMetadataManager omMetadataManager;
-  protected AuditLogger auditLogger;
-
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-
-  @Before
-  public void setup() throws Exception {
-
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-  }
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
deleted file mode 100644
index 552aa15..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .StorageTypeProto;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.util.Time;
-
-/**
- * Tests OMBucketCreateRequest class, which handles CreateBucket request.
- */
-public class TestOMBucketCreateRequest extends TestBucketRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    doPreExecute(volumeName, bucketName);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName,
-        bucketName);
-
-    doValidateAndUpdateCache(volumeName, bucketName,
-        omBucketCreateRequest.getOmRequest());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithNoVolume() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OMRequest originalRequest = TestOMRequestUtils.createBucketRequest(
-        bucketName, volumeName, false, StorageTypeProto.SSD);
-
-    OMBucketCreateRequest omBucketCreateRequest =
-        new OMBucketCreateRequest(originalRequest);
-
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-
-    // As we have not still called validateAndUpdateCache, get() should
-    // return null.
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
-
-    OMClientResponse omClientResponse =
-        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateBucketResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-
-    // As request is invalid bucket table should not have entry.
-    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketAlreadyExists()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OMBucketCreateRequest omBucketCreateRequest =
-        doPreExecute(volumeName, bucketName);
-
-    doValidateAndUpdateCache(volumeName, bucketName,
-        omBucketCreateRequest.getOmRequest());
-
-    // Try create same bucket again
-    OMClientResponse omClientResponse =
-        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateBucketResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_ALREADY_EXISTS,
-        omResponse.getStatus());
-  }
-
-
-  private OMBucketCreateRequest doPreExecute(String volumeName,
-      String bucketName) throws Exception {
-    addCreateVolumeToTable(volumeName, omMetadataManager);
-    OMRequest originalRequest =
-        TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false,
-            StorageTypeProto.SSD);
-
-    OMBucketCreateRequest omBucketCreateRequest =
-        new OMBucketCreateRequest(originalRequest);
-
-    OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager);
-    verifyRequest(modifiedRequest, originalRequest);
-    return new OMBucketCreateRequest(modifiedRequest);
-  }
-
-  private void doValidateAndUpdateCache(String volumeName, String bucketName,
-      OMRequest modifiedRequest) throws Exception {
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-
-    // As we have not still called validateAndUpdateCache, get() should
-    // return null.
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
-    OMBucketCreateRequest omBucketCreateRequest =
-        new OMBucketCreateRequest(modifiedRequest);
-
-
-    OMClientResponse omClientResponse =
-        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // As now after validateAndUpdateCache it should add entry to cache, get
-    // should return non null value.
-    OmBucketInfo omBucketInfo =
-        omMetadataManager.getBucketTable().get(bucketKey);
-    Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey));
-
-    // verify table data with actual request data.
-    Assert.assertEquals(OmBucketInfo.getFromProtobuf(
-        modifiedRequest.getCreateBucketRequest().getBucketInfo()),
-        omBucketInfo);
-
-    // verify OMResponse.
-    verifySuccessCreateBucketResponse(omClientResponse.getOMResponse());
-
-  }
-
-
-  private void verifyRequest(OMRequest modifiedOmRequest,
-      OMRequest originalRequest) {
-    OzoneManagerProtocolProtos.BucketInfo original =
-        originalRequest.getCreateBucketRequest().getBucketInfo();
-    OzoneManagerProtocolProtos.BucketInfo updated =
-        modifiedOmRequest.getCreateBucketRequest().getBucketInfo();
-
-    Assert.assertEquals(original.getBucketName(), updated.getBucketName());
-    Assert.assertEquals(original.getVolumeName(), updated.getVolumeName());
-    Assert.assertEquals(original.getIsVersionEnabled(),
-        updated.getIsVersionEnabled());
-    Assert.assertEquals(original.getStorageType(), updated.getStorageType());
-    Assert.assertEquals(original.getMetadataList(), updated.getMetadataList());
-    Assert.assertNotEquals(original.getCreationTime(),
-        updated.getCreationTime());
-  }
-
-  public static void verifySuccessCreateBucketResponse(OMResponse omResponse) {
-    Assert.assertNotNull(omResponse.getCreateBucketResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateBucket,
-        omResponse.getCmdType());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-  }
-
-  public static void addCreateVolumeToTable(String volumeName,
-      OMMetadataManager omMetadataManager) throws Exception {
-    OmVolumeArgs omVolumeArgs =
-        OmVolumeArgs.newBuilder().setCreationTime(Time.now())
-            .setVolume(volumeName).setAdminName(UUID.randomUUID().toString())
-            .setOwnerName(UUID.randomUUID().toString()).build();
-    TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs);
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java
deleted file mode 100644
index 6b4bf7a..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Tests OMBucketDeleteRequest class which handles DeleteBucket request.
- */
-public class TestOMBucketDeleteRequest extends TestBucketRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    OMRequest omRequest =
-        createDeleteBucketRequest(UUID.randomUUID().toString(),
-            UUID.randomUUID().toString());
-
-    OMBucketDeleteRequest omBucketDeleteRequest =
-        new OMBucketDeleteRequest(omRequest);
-
-    // As user info gets added.
-    Assert.assertNotEquals(omRequest,
-        omBucketDeleteRequest.preExecute(ozoneManager));
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OMRequest omRequest =
-        createDeleteBucketRequest(volumeName, bucketName);
-
-    OMBucketDeleteRequest omBucketDeleteRequest =
-        new OMBucketDeleteRequest(omRequest);
-
-    // Create Volume and bucket entries in DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(
-        omMetadataManager.getBucketKey(volumeName, bucketName)));
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheFailure() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OMRequest omRequest =
-        createDeleteBucketRequest(volumeName, bucketName);
-
-    OMBucketDeleteRequest omBucketDeleteRequest =
-        new OMBucketDeleteRequest(omRequest);
-
-
-    OMClientResponse omClientResponse =
-        omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(
-        omMetadataManager.getBucketKey(volumeName, bucketName)));
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-  }
-
-
-
-
-  private OMRequest createDeleteBucketRequest(String volumeName,
-      String bucketName) {
-    return OMRequest.newBuilder().setDeleteBucketRequest(
-        DeleteBucketRequest.newBuilder()
-            .setBucketName(bucketName).setVolumeName(volumeName))
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java
deleted file mode 100644
index 7df0667..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
-    BucketArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetBucketPropertyRequest;
-
-/**
- * Tests OMBucketSetPropertyRequest class which handles OMSetBucketProperty
- * request.
- */
-public class TestOMBucketSetPropertyRequest extends TestBucketRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-
-    OMRequest omRequest = createSetBucketPropertyRequest(volumeName,
-        bucketName, true);
-
-    OMBucketSetPropertyRequest omBucketSetPropertyRequest =
-        new OMBucketSetPropertyRequest(omRequest);
-
-    // As user info gets added.
-    Assert.assertNotEquals(omRequest,
-        omBucketSetPropertyRequest.preExecute(ozoneManager));
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-
-    OMRequest omRequest = createSetBucketPropertyRequest(volumeName,
-        bucketName, true);
-
-    // Create with default BucketInfo values
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMBucketSetPropertyRequest omBucketSetPropertyRequest =
-        new OMBucketSetPropertyRequest(omRequest);
-
-    OMClientResponse omClientResponse =
-        omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(true,
-        omMetadataManager.getBucketTable().get(
-            omMetadataManager.getBucketKey(volumeName, bucketName))
-            .getIsVersionEnabled());
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheFails() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-
-    OMRequest omRequest = createSetBucketPropertyRequest(volumeName,
-        bucketName, true);
-
-
-    OMBucketSetPropertyRequest omBucketSetPropertyRequest =
-        new OMBucketSetPropertyRequest(omRequest);
-
-    OMClientResponse omClientResponse =
-        omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(
-        omMetadataManager.getBucketKey(volumeName, bucketName)));
-
-  }
-
-  private OMRequest createSetBucketPropertyRequest(String volumeName,
-      String bucketName, boolean isVersionEnabled) {
-    return OMRequest.newBuilder().setSetBucketPropertyRequest(
-        SetBucketPropertyRequest.newBuilder().setBucketArgs(
-            BucketArgs.newBuilder().setBucketName(bucketName)
-                .setVolumeName(volumeName)
-                .setIsVersionEnabled(isVersionEnabled).build()))
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
deleted file mode 100644
index b89c651..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for bucket requests.
- */
-package org.apache.hadoop.ozone.om.request.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java
deleted file mode 100644
index 4e93b13..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java
+++ /dev/null
@@ -1,337 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.file;
-
-import java.util.UUID;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * Test OM directory create request.
- */
-public class TestOMDirectoryCreateRequest {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneManager ozoneManager;
-  private OMMetrics omMetrics;
-  private OMMetadataManager omMetadataManager;
-  private AuditLogger auditLogger;
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-  @Before
-  public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-  }
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-
-  @Test
-  public void testPreExecute() throws Exception {
-
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = "a/b/c";
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    // As in preExecute, we modify original request.
-    Assert.assertNotEquals(omRequest, modifiedOmRequest);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 3; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omClientResponse =
-        omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.OK);
-    Assert.assertTrue(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName)) != null);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 3; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omClientResponse =
-        omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
-
-    // Key should not exist in DB
-    Assert.assertTrue(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName)) == null);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithSubDirectoryInPath()
-      throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 3; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omClientResponse =
-        omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.OK);
-
-    // Key should exist in DB and cache.
-    Assert.assertTrue(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName)) != null);
-    Assert.assertTrue(omMetadataManager.getKeyTable().getCacheValue(
-        new CacheKey<>(omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName))) != null);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
-      throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 3; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L,
-        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE,
-        omMetadataManager);
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omClientResponse =
-        omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.OK);
-
-    // Key should exist in DB
-    Assert.assertTrue(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName)) != null);
-
-    // As it already exists, it should not be in cache.
-    Assert.assertTrue(omMetadataManager.getKeyTable().getCacheValue(
-        new CacheKey<>(omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName))) == null);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
-    String volumeName = "vol1";
-    String bucketName = "bucket1";
-    String keyName = RandomStringUtils.randomAlphabetic(5);
-    for (int i =0; i< 3; i++) {
-      keyName += "/" + RandomStringUtils.randomAlphabetic(5);
-    }
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    // Add a key with first two levels.
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
-        keyName);
-    OMDirectoryCreateRequest omDirectoryCreateRequest =
-        new OMDirectoryCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest =
-        omDirectoryCreateRequest.preExecute(ozoneManager);
-
-    omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omClientResponse =
-        omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
-
-    // Key should not exist in DB
-    Assert.assertTrue(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(
-            volumeName, bucketName, keyName)) == null);
-
-  }
-
-  /**
-   * Create OMRequest which encapsulates CreateDirectory request.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @return OMRequest
-   */
-  private OMRequest createDirectoryRequest(String volumeName, String bucketName,
-      String keyName) {
-    return OMRequest.newBuilder().setCreateDirectoryRequest(
-        CreateDirectoryRequest.newBuilder().setKeyArgs(
-            KeyArgs.newBuilder().setVolumeName(volumeName)
-                .setBucketName(bucketName).setKeyName(keyName)))
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
deleted file mode 100644
index 9639af0..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ /dev/null
@@ -1,374 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.file;
-
-import java.util.List;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE;
-
-/**
- * Tests OMFileCreateRequest.
- */
-public class TestOMFileCreateRequest extends TestOMKeyRequest {
-
-
-  @Test
-  public void testPreExecute() throws Exception{
-    OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName,
-        HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
-        false, false);
-
-    OMFileCreateRequest omFileCreateRequest =
-        new OMFileCreateRequest(omRequest);
-
-    OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
-    Assert.assertNotEquals(omRequest, modifiedOmRequest);
-
-
-    // Check clientID and modification time is set or not.
-    Assert.assertTrue(modifiedOmRequest.hasCreateFileRequest());
-    Assert.assertTrue(
-        modifiedOmRequest.getCreateFileRequest().getClientID() > 0);
-
-    KeyArgs keyArgs = modifiedOmRequest.getCreateFileRequest().getKeyArgs();
-    Assert.assertNotNull(keyArgs);
-    Assert.assertTrue(keyArgs.getModificationTime() > 0);
-
-    // As our data size is 100, and scmBlockSize is default to 1000, so we
-    // shall have only one block.
-    List< OzoneManagerProtocolProtos.KeyLocation> keyLocations =
-        keyArgs.getKeyLocationsList();
-
-    // KeyLocation should be set.
-    Assert.assertTrue(keyLocations.size() == 1);
-    Assert.assertEquals(containerID,
-        keyLocations.get(0).getBlockID().getContainerBlockID()
-            .getContainerID());
-    Assert.assertEquals(localID,
-        keyLocations.get(0).getBlockID().getContainerBlockID()
-            .getLocalID());
-    Assert.assertTrue(keyLocations.get(0).hasPipeline());
-
-    Assert.assertEquals(0, keyLocations.get(0).getOffset());
-
-    Assert.assertEquals(scmBlockSize, keyLocations.get(0).getLength());
-  }
-
-  @Test
-  public void testPreExecuteWithBlankKey() throws Exception{
-    OMRequest omRequest = createFileRequest(volumeName, bucketName, "",
-        HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
-        false, false);
-
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
-
-    OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
-    Assert.assertNotEquals(omRequest, modifiedOmRequest);
-
-
-    // When KeyName is root, nothing will be set.
-    Assert.assertTrue(modifiedOmRequest.hasCreateFileRequest());
-    Assert.assertFalse(
-        modifiedOmRequest.getCreateFileRequest().getClientID() > 0);
-
-    KeyArgs keyArgs = modifiedOmRequest.getCreateFileRequest().getKeyArgs();
-    Assert.assertNotNull(keyArgs);
-    Assert.assertTrue(keyArgs.getModificationTime() == 0);
-    Assert.assertTrue(keyArgs.getKeyLocationsList().size() == 0);
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName,
-        HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
-        false, true);
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
-
-    OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
-
-
-    long id = modifiedOmRequest.getCreateFileRequest().getClientID();
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
-    // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNull(omKeyInfo);
-
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omFileCreateResponse =
-        omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omFileCreateResponse.getOMResponse().getStatus());
-
-    // Check open table whether key is added or not.
-
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(omKeyInfo);
-
-    List< OmKeyLocationInfo > omKeyLocationInfoList =
-        omKeyInfo.getLatestVersionLocations().getLocationList();
-    Assert.assertTrue(omKeyLocationInfoList.size() == 1);
-
-    OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0);
-
-    // Check modification time
-    Assert.assertEquals(modifiedOmRequest.getCreateFileRequest()
-        .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
-
-    Assert.assertEquals(omKeyInfo.getModificationTime(),
-        omKeyInfo.getCreationTime());
-
-
-    // Check data of the block
-    OzoneManagerProtocolProtos.KeyLocation keyLocation =
-        modifiedOmRequest.getCreateFileRequest().getKeyArgs()
-            .getKeyLocations(0);
-
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-        .getContainerID(), omKeyLocationInfo.getContainerID());
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-        .getLocalID(), omKeyLocationInfo.getLocalID());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-    OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName,
-        HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
-        false, true);
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
-
-    OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
-
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
-
-    OMClientResponse omFileCreateResponse =
-        omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-    Assert.assertEquals(BUCKET_NOT_FOUND,
-        omFileCreateResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
-    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
-    testNonRecursivePath("a/b", false, false, true);
-
-    // Create some child keys for the path
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        "a/b/c/d", 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    testNonRecursivePath("a/b/c", false, false, false);
-
-    // Delete child key and add a path "a/b/ to key table
-    omMetadataManager.getKeyTable().delete(omMetadataManager.getOzoneKey(
-        volumeName, bucketName, "a/b/c/d"));
-
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        "a/b/", 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    testNonRecursivePath("a/b/e", false, false, false);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithRecursive() throws Exception {
-    // Should be able to create file even if parent directories does not
-    // exist and key already exist, as this is with overwrite enabled.
-    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        "c/d/e/f", 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    testNonRecursivePath("c/d/e/f", true, true, false);
-    // Create some child keys for the path
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        "a/b/c/d", 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    testNonRecursivePath("a/b/c", false, true, false);
-
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
-      throws Exception {
-
-    String key = "c/d/e/f";
-    // Should be able to create file even if parent directories does not exist
-    testNonRecursivePath(key, false, true, false);
-
-    // Add the key to key table
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        key, 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-
-    // Even if key exists, should be able to create file as overwrite is set
-    // to true
-    testNonRecursivePath(key, true, true, false);
-    testNonRecursivePath(key, false, true, true);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
-      throws Exception {
-
-    String key = "c/d/e/f";
-    // Need to add the path which starts with "c/d/e" to keyTable as this is
-    // non-recursive parent should exist.
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        "c/d/e/h", 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-    testNonRecursivePath(key, false, false, false);
-
-    // Add the key to key table
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        key, 0L,  HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-
-    // Even if key exists, should be able to create file as overwrite is set
-    // to true
-    testNonRecursivePath(key, true, false, false);
-    testNonRecursivePath(key, false, false, true);
-  }
-
-
-  private void testNonRecursivePath(String key,
-      boolean overWrite, boolean recursive, boolean fail) throws Exception {
-    OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
-        HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
-        overWrite, recursive);
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
-
-    OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
-
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
-    OMClientResponse omFileCreateResponse =
-        omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    if (fail) {
-      Assert.assertTrue(omFileCreateResponse.getOMResponse()
-          .getStatus() == NOT_A_FILE || omFileCreateResponse.getOMResponse()
-          .getStatus() == FILE_ALREADY_EXISTS);
-    } else {
-      long id = modifiedOmRequest.getCreateFileRequest().getClientID();
-
-      String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          key, id);
-      OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      Assert.assertNotNull(omKeyInfo);
-
-      List< OmKeyLocationInfo > omKeyLocationInfoList =
-          omKeyInfo.getLatestVersionLocations().getLocationList();
-      Assert.assertTrue(omKeyLocationInfoList.size() == 1);
-
-      OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0);
-
-      // Check modification time
-      Assert.assertEquals(modifiedOmRequest.getCreateFileRequest()
-          .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
-
-
-      // Check data of the block
-      OzoneManagerProtocolProtos.KeyLocation keyLocation =
-          modifiedOmRequest.getCreateFileRequest().getKeyArgs()
-              .getKeyLocations(0);
-
-      Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-          .getContainerID(), omKeyLocationInfo.getContainerID());
-      Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-          .getLocalID(), omKeyLocationInfo.getLocalID());
-    }
-  }
-
-
-  /**
-   * Create OMRequest which encapsulates OMFileCreateRequest.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param replicationFactor
-   * @param replicationType
-   * @return OMRequest
-   */
-  private OMRequest createFileRequest(
-      String volumeName, String bucketName, String keyName,
-      HddsProtos.ReplicationFactor replicationFactor,
-      HddsProtos.ReplicationType replicationType, boolean overWrite,
-      boolean recursive) {
-
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-            .setVolumeName(volumeName).setBucketName(bucketName)
-            .setKeyName(keyName).setFactor(replicationFactor)
-            .setType(replicationType).setDataSize(dataSize);
-
-    CreateFileRequest createFileRequest = CreateFileRequest.newBuilder()
-        .setKeyArgs(keyArgs)
-        .setIsOverwrite(overWrite)
-        .setIsRecursive(recursive).build();
-
-    return OMRequest.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
-        .setClientId(UUID.randomUUID().toString())
-        .setCreateFileRequest(createFileRequest).build();
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java
deleted file mode 100644
index ab81a7e..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for file requests.
- */
-package org.apache.hadoop.ozone.om.request.file;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
deleted file mode 100644
index be3e4a7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone.om.request.key;
-
-
-import java.util.List;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Tests OMAllocateBlockRequest class.
- */
-public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-
-    doPreExecute(createAllocateBlockRequest());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createAllocateBlockRequest());
-
-    OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
-
-
-    // Add volume, bucket, key entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    // Check before calling validateAndUpdateCache. As adding DB entry has
-    // not added any blocks, so size should be zero.
-
-    OmKeyInfo omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
-
-    List<OmKeyLocationInfo> omKeyLocationInfo =
-        omKeyInfo.getLatestVersionLocations().getLocationList();
-
-    Assert.assertTrue(omKeyLocationInfo.size() == 0);
-
-    OMClientResponse omAllocateBlockResponse =
-        omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omAllocateBlockResponse.getOMResponse().getStatus());
-
-    // Check open table whether new block is added or not.
-
-    omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
-
-
-    // Check modification time
-    Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest()
-        .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
-    Assert.assertNotEquals(omKeyInfo.getCreationTime(),
-        omKeyInfo.getModificationTime());
-
-    // Check data of the block
-    OzoneManagerProtocolProtos.KeyLocation keyLocation =
-        modifiedOmRequest.getAllocateBlockRequest().getKeyLocation();
-
-    omKeyLocationInfo =
-        omKeyInfo.getLatestVersionLocations().getLocationList();
-
-    Assert.assertTrue(omKeyLocationInfo.size() == 1);
-
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-        .getContainerID(), omKeyLocationInfo.get(0).getContainerID());
-
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-            .getLocalID(), omKeyLocationInfo.get(0).getLocalID());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createAllocateBlockRequest());
-
-    OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
-
-
-    OMClientResponse omAllocateBlockResponse =
-        omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createAllocateBlockRequest());
-
-    OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
-
-
-    // Added only volume to DB.
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
-
-    OMClientResponse omAllocateBlockResponse =
-        omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createAllocateBlockRequest());
-
-    OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
-
-    // Add volume, bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-
-    OMClientResponse omAllocateBlockResponse =
-        omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);
-
-  }
-
-  /**
-   * This method calls preExecute and verify the modified request.
-   * @param originalOMRequest
-   * @return OMRequest - modified request returned from preExecute.
-   * @throws Exception
-   */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
-
-    OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(originalOMRequest);
-
-    OMRequest modifiedOmRequest =
-        omAllocateBlockRequest.preExecute(ozoneManager);
-
-
-    Assert.assertEquals(originalOMRequest.getCmdType(),
-        modifiedOmRequest.getCmdType());
-    Assert.assertEquals(originalOMRequest.getClientId(),
-        modifiedOmRequest.getClientId());
-
-    Assert.assertTrue(modifiedOmRequest.hasAllocateBlockRequest());
-    AllocateBlockRequest allocateBlockRequest =
-        modifiedOmRequest.getAllocateBlockRequest();
-    // Time should be set
-    Assert.assertTrue(allocateBlockRequest.getKeyArgs()
-        .getModificationTime() > 0);
-
-    // KeyLocation should be set.
-    Assert.assertTrue(allocateBlockRequest.hasKeyLocation());
-    Assert.assertEquals(containerID,
-        allocateBlockRequest.getKeyLocation().getBlockID()
-            .getContainerBlockID().getContainerID());
-    Assert.assertEquals(localID,
-        allocateBlockRequest.getKeyLocation().getBlockID()
-            .getContainerBlockID().getLocalID());
-    Assert.assertTrue(allocateBlockRequest.getKeyLocation().hasPipeline());
-
-    Assert.assertEquals(allocateBlockRequest.getClientID(),
-        allocateBlockRequest.getClientID());
-
-    return modifiedOmRequest;
-  }
-
-
-  private OMRequest createAllocateBlockRequest() {
-
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setFactor(replicationFactor).setType(replicationType)
-        .build();
-
-    AllocateBlockRequest allocateBlockRequest =
-        AllocateBlockRequest.newBuilder().setClientID(clientID)
-            .setKeyArgs(keyArgs).build();
-
-    return OMRequest.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
-        .setClientId(UUID.randomUUID().toString())
-        .setAllocateBlockRequest(allocateBlockRequest).build();
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
deleted file mode 100644
index 9bfac6c..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyLocation;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-
-
-/**
- * Class tests OMKeyCommitRequest class.
- */
-public class TestOMKeyCommitRequest extends TestOMKeyRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    doPreExecute(createCommitKeyRequest());
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
-
-    OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    // Key should not be there in key table, as validateAndUpdateCache is
-    // still not called.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omClientResponse =
-        omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
-        100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    // Entry should be deleted from openKey Table.
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(ozoneKey);
-    Assert.assertNull(omKeyInfo);
-
-    // Now entry should be created in key Table.
-    omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNotNull(omKeyInfo);
-
-    // Check modification time
-
-    CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest();
-    Assert.assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(),
-        omKeyInfo.getModificationTime());
-
-    // Check block location.
-    List<OmKeyLocationInfo> locationInfoListFromCommitKeyRequest =
-        commitKeyRequest.getKeyArgs()
-        .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf)
-        .collect(Collectors.toList());
-
-    Assert.assertEquals(locationInfoListFromCommitKeyRequest,
-        omKeyInfo.getLatestVersionLocations().getLocationList());
-
-  }
-
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
-
-    OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    // Key should not be there in key table, as validateAndUpdateCache is
-    // still not called.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omClientResponse =
-        omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
-
-    OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    // Key should not be there in key table, as validateAndUpdateCache is
-    // still not called.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omClientResponse =
-        omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
-
-    OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    // Key should not be there in key table, as validateAndUpdateCache is
-    // still not called.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omClientResponse =
-        omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-  }
-
-  /**
-   * This method calls preExecute and verify the modified request.
-   * @param originalOMRequest
-   * @return OMRequest - modified request returned from preExecute.
-   * @throws Exception
-   */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
-
-    OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(originalOMRequest);
-
-    OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
-
-    Assert.assertTrue(modifiedOmRequest.hasCommitKeyRequest());
-    KeyArgs originalKeyArgs =
-        originalOMRequest.getCommitKeyRequest().getKeyArgs();
-    KeyArgs modifiedKeyArgs =
-        modifiedOmRequest.getCommitKeyRequest().getKeyArgs();
-    verifyKeyArgs(originalKeyArgs, modifiedKeyArgs);
-    return modifiedOmRequest;
-  }
-
-  /**
-   * Verify KeyArgs.
-   * @param originalKeyArgs
-   * @param modifiedKeyArgs
-   */
-  private void verifyKeyArgs(KeyArgs originalKeyArgs, KeyArgs modifiedKeyArgs) {
-
-    // Check modification time is set or not.
-    Assert.assertTrue(modifiedKeyArgs.getModificationTime() > 0);
-    Assert.assertTrue(originalKeyArgs.getModificationTime() == 0);
-
-    Assert.assertEquals(originalKeyArgs.getVolumeName(),
-        modifiedKeyArgs.getVolumeName());
-    Assert.assertEquals(originalKeyArgs.getBucketName(),
-        modifiedKeyArgs.getBucketName());
-    Assert.assertEquals(originalKeyArgs.getKeyName(),
-        modifiedKeyArgs.getKeyName());
-    Assert.assertEquals(originalKeyArgs.getDataSize(),
-        modifiedKeyArgs.getDataSize());
-    Assert.assertEquals(originalKeyArgs.getKeyLocationsList(),
-        modifiedKeyArgs.getKeyLocationsList());
-    Assert.assertEquals(originalKeyArgs.getType(),
-        modifiedKeyArgs.getType());
-    Assert.assertEquals(originalKeyArgs.getFactor(),
-        modifiedKeyArgs.getFactor());
-  }
-
-  /**
-   * Create OMRequest which encapsulates CommitKeyRequest.
-   */
-  private OMRequest createCommitKeyRequest() {
-    KeyArgs keyArgs =
-        KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName)
-            .setKeyName(keyName).setBucketName(bucketName)
-            .setType(replicationType).setFactor(replicationFactor)
-            .addAllKeyLocations(getKeyLocation()).build();
-
-    CommitKeyRequest commitKeyRequest =
-        CommitKeyRequest.newBuilder().setKeyArgs(keyArgs)
-            .setClientID(clientID).build();
-
-    return OMRequest.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
-        .setCommitKeyRequest(commitKeyRequest)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-  /**
-   * Create KeyLocation list.
-   */
-  private List<KeyLocation> getKeyLocation() {
-    List<KeyLocation> keyLocations = new ArrayList<>();
-
-    for (int i=0; i < 5; i++) {
-      KeyLocation keyLocation =
-          KeyLocation.newBuilder()
-              .setBlockID(HddsProtos.BlockID.newBuilder()
-                  .setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder()
-                      .setContainerID(i+1000).setLocalID(i+100).build()))
-              .setOffset(0).setLength(200).build();
-      keyLocations.add(keyLocation);
-    }
-    return keyLocations;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
deleted file mode 100644
index 340cc04..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.util.List;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Tests OMCreateKeyRequest class.
- */
-public class TestOMKeyCreateRequest extends TestOMKeyRequest {
-
-  @Test
-  public void testPreExecuteWithNormalKey() throws Exception {
-    doPreExecute(createKeyRequest(false, 0));
-  }
-
-  @Test
-  public void testPreExecuteWithMultipartKey() throws Exception {
-    doPreExecute(createKeyRequest(true, 1));
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createKeyRequest(false, 0));
-
-    OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
-    // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omKeyCreateResponse =
-        omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omKeyCreateResponse.getOMResponse().getStatus());
-
-    // Check open table whether key is added or not.
-
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNotNull(omKeyInfo);
-
-    List<OmKeyLocationInfo> omKeyLocationInfoList =
-        omKeyInfo.getLatestVersionLocations().getLocationList();
-    Assert.assertTrue(omKeyLocationInfoList.size() == 1);
-
-    OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0);
-
-    // Check modification time
-    Assert.assertEquals(modifiedOmRequest.getCreateKeyRequest()
-        .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
-
-    Assert.assertEquals(omKeyInfo.getModificationTime(),
-        omKeyInfo.getCreationTime());
-
-
-    // Check data of the block
-    OzoneManagerProtocolProtos.KeyLocation keyLocation =
-        modifiedOmRequest.getCreateKeyRequest().getKeyArgs().getKeyLocations(0);
-
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-        .getContainerID(), omKeyLocationInfo.getContainerID());
-    Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID()
-        .getLocalID(), omKeyLocationInfo.getLocalID());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithNoSuchMultipartUploadError()
-      throws Exception {
-
-
-    int partNumber = 1;
-    OMRequest modifiedOmRequest =
-        doPreExecute(createKeyRequest(true, partNumber));
-
-    OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
-
-    // Add volume and bucket entries to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
-    // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omKeyCreateResponse =
-        omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR,
-        omKeyCreateResponse.getOMResponse().getStatus());
-
-    // As we got error, no entry should be created in openKeyTable.
-
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-  }
-
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createKeyRequest(false, 0));
-
-    OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
-
-
-    long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
-
-    // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omKeyCreateResponse =
-        omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omKeyCreateResponse.getOMResponse().getStatus());
-
-
-    // As We got an error, openKey Table should not have entry.
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-
-
-    OMRequest modifiedOmRequest =
-        doPreExecute(createKeyRequest(
-            false, 0));
-
-    OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
-
-
-    long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
-
-    // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-    OMClientResponse omKeyCreateResponse =
-        omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omKeyCreateResponse.getOMResponse().getStatus());
-
-
-    // As We got an error, openKey Table should not have entry.
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-
-    Assert.assertNull(omKeyInfo);
-
-  }
-
-
-
-  /**
-   * This method calls preExecute and verify the modified request.
-   * @param originalOMRequest
-   * @return OMRequest - modified request returned from preExecute.
-   * @throws Exception
-   */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
-
-    OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(originalOMRequest);
-
-    OMRequest modifiedOmRequest =
-        omKeyCreateRequest.preExecute(ozoneManager);
-
-    Assert.assertEquals(originalOMRequest.getCmdType(),
-        modifiedOmRequest.getCmdType());
-    Assert.assertEquals(originalOMRequest.getClientId(),
-        modifiedOmRequest.getClientId());
-
-    Assert.assertTrue(modifiedOmRequest.hasCreateKeyRequest());
-
-    CreateKeyRequest createKeyRequest =
-        modifiedOmRequest.getCreateKeyRequest();
-
-    KeyArgs keyArgs = createKeyRequest.getKeyArgs();
-    // Time should be set
-    Assert.assertTrue(keyArgs.getModificationTime() > 0);
-
-
-    // Client ID should be set.
-    Assert.assertTrue(createKeyRequest.hasClientID());
-    Assert.assertTrue(createKeyRequest.getClientID() > 0);
-
-
-    if (!originalOMRequest.getCreateKeyRequest().getKeyArgs()
-        .getIsMultipartKey()) {
-
-      // As our data size is 100, and scmBlockSize is default to 1000, so we
-      // shall have only one block.
-      List< OzoneManagerProtocolProtos.KeyLocation> keyLocations =
-          keyArgs.getKeyLocationsList();
-      // KeyLocation should be set.
-      Assert.assertTrue(keyLocations.size() == 1);
-      Assert.assertEquals(containerID,
-          keyLocations.get(0).getBlockID().getContainerBlockID()
-              .getContainerID());
-      Assert.assertEquals(localID,
-          keyLocations.get(0).getBlockID().getContainerBlockID()
-              .getLocalID());
-      Assert.assertTrue(keyLocations.get(0).hasPipeline());
-
-      Assert.assertEquals(0, keyLocations.get(0).getOffset());
-
-      Assert.assertEquals(scmBlockSize, keyLocations.get(0).getLength());
-    } else {
-      // We don't create blocks for multipart key in createKey preExecute.
-      Assert.assertTrue(keyArgs.getKeyLocationsList().size() == 0);
-    }
-
-    return modifiedOmRequest;
-
-  }
-
-  /**
-   * Create OMRequest which encapsulates CreateKeyRequest.
-   * @param isMultipartKey
-   * @param partNumber
-   * @return OMRequest.
-   */
-
-  @SuppressWarnings("parameterNumber")
-  private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) {
-
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setKeyName(keyName).setIsMultipartKey(isMultipartKey)
-        .setFactor(replicationFactor).setType(replicationType);
-
-    if (isMultipartKey) {
-      keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber);
-    }
-
-    OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest =
-        CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build();
-
-    return OMRequest.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
-        .setClientId(UUID.randomUUID().toString())
-        .setCreateKeyRequest(createKeyRequest).build();
-
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
deleted file mode 100644
index e95ecd5..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-
-/**
- * Tests OmKeyDelete request.
- */
-public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    doPreExecute(createDeleteKeyRequest());
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
-
-    // Add volume, bucket and key entries to OM DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    // As we added manually to key table.
-    Assert.assertNotNull(omKeyInfo);
-
-    OMClientResponse omClientResponse =
-        omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
-        100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-    // Now after calling validateAndUpdateCache, it should be deleted.
-
-    omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-    Assert.assertNull(omKeyInfo);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
-    // Add only volume and bucket entry to DB.
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-
-    OMClientResponse omClientResponse =
-        omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithOutVolumeAndBucket()
-      throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key. So it should still return error KEY_NOT_FOUND
-
-    OMClientResponse omClientResponse =
-        omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-  }
-
-
-  /**
-   * This method calls preExecute and verify the modified request.
-   * @param originalOmRequest
-   * @return OMRequest - modified request returned from preExecute.
-   * @throws Exception
-   */
-  private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(originalOmRequest);
-
-    OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager);
-
-    // Will not be equal, as UserInfo will be set.
-    Assert.assertNotEquals(originalOmRequest, modifiedOmRequest);
-
-    return modifiedOmRequest;
-  }
-
-  /**
-   * Create OMRequest which encapsulates DeleteKeyRequest.
-   * @return OMRequest
-   */
-  private OMRequest createDeleteKeyRequest() {
-    KeyArgs keyArgs = KeyArgs.newBuilder().setBucketName(bucketName)
-        .setVolumeName(volumeName).setKeyName(keyName).build();
-
-    DeleteKeyRequest deleteKeyRequest =
-        DeleteKeyRequest.newBuilder().setKeyArgs(keyArgs).build();
-
-    return OMRequest.newBuilder().setDeleteKeyRequest(deleteKeyRequest)
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
-        .setClientId(UUID.randomUUID().toString()).build();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
deleted file mode 100644
index df6b177..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Tests {@link OMKeyPurgeRequest} and {@link OMKeyPurgeResponse}.
- */
-public class TestOMKeyPurgeRequestAndResponse extends TestOMKeyRequest {
-
-  private int numKeys = 10;
-
-  /**
-   * Creates volume, bucket and key entries and adds to OM DB and then
-   * deletes these keys to move them to deletedKeys table.
-   */
-  private List<String> createAndDeleteKeys() throws Exception {
-    // Add volume, bucket and key entries to OM DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    List<String> ozoneKeyNames = new ArrayList<>(numKeys);
-    for (int i = 1; i <= numKeys; i++) {
-      String key = keyName + "-" + i;
-      TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, key,
-          clientID, replicationType, replicationFactor, omMetadataManager);
-      ozoneKeyNames.add(
-          omMetadataManager.getOzoneKey(volumeName, bucketName, key));
-    }
-
-    List<String> deletedKeyNames = new ArrayList<>(numKeys);
-    for (String ozoneKey : ozoneKeyNames) {
-      String deletedKeyName = TestOMRequestUtils.deleteKey(
-          ozoneKey, omMetadataManager);
-      deletedKeyNames.add(deletedKeyName);
-    }
-
-    return deletedKeyNames;
-  }
-
-  /**
-   * Create OMRequest which encapsulates DeleteKeyRequest.
-   * @return OMRequest
-   */
-  private OMRequest createPurgeKeysRequest(List<String> deletedKeys) {
-    PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder()
-        .addAllKeys(deletedKeys)
-        .build();
-
-    return OMRequest.newBuilder()
-        .setPurgeKeysRequest(purgeKeysRequest)
-        .setCmdType(Type.PurgeKeys)
-        .setClientId(UUID.randomUUID().toString())
-        .build();
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    // Create and Delete keys. The keys should be moved to DeletedKeys table
-    List<String> deletedKeyNames = createAndDeleteKeys();
-
-    // The keys should be present in the DeletedKeys table before purging
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-
-    // Create PurgeKeysRequest to purge the deleted keys
-    OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames);
-
-    OMRequest preExecutedRequest = preExecute(omRequest);
-    OMKeyPurgeRequest omKeyPurgeRequest =
-        new OMKeyPurgeRequest(preExecutedRequest);
-
-    OMClientResponse omClientResponse =
-        omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance())
-        .setCmdType(Type.PurgeKeys)
-        .setStatus(Status.OK)
-        .build();
-
-    BatchOperation batchOperation =
-        omMetadataManager.getStore().initBatchOperation();
-
-    OMKeyPurgeResponse omKeyPurgeResponse =
-        new OMKeyPurgeResponse(deletedKeyNames, omResponse);
-    omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // The keys should not exist in the DeletedKeys table
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-  }
-
-  private OMRequest preExecute(OMRequest originalOmRequest) throws IOException {
-    OMKeyPurgeRequest omKeyPurgeRequest =
-        new OMKeyPurgeRequest(originalOmRequest);
-
-    OMRequest modifiedOmRequest = omKeyPurgeRequest.preExecute(ozoneManager);
-
-    // Will not be equal, as UserInfo will be set.
-    Assert.assertNotEquals(originalOmRequest, modifiedOmRequest);
-
-    return modifiedOmRequest;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
deleted file mode 100644
index 864ba06..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.util.UUID;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .RenameKeyRequest;
-
-/**
- * Tests RenameKey request.
- */
-public class TestOMKeyRenameRequest extends TestOMKeyRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    doPreExecute(createRenameKeyRequest(UUID.randomUUID().toString()));
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String toKeyName = UUID.randomUUID().toString();
-    OMRequest modifiedOmRequest =
-        doPreExecute(createRenameKeyRequest(toKeyName));
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omKeyRenameResponse.getOMResponse().getStatus());
-
-    String key = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
-    // Original key should be deleted, toKey should exist.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(key);
-
-    Assert.assertNull(omKeyInfo);
-
-    omKeyInfo =
-        omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey(
-            volumeName, bucketName, toKeyName));
-
-    Assert.assertNotNull(omKeyInfo);
-
-    // For new key modification time should be updated.
-
-    KeyArgs keyArgs = modifiedOmRequest.getRenameKeyRequest().getKeyArgs();
-
-    Assert.assertEquals(keyArgs.getModificationTime(),
-        omKeyInfo.getModificationTime());
-
-    // KeyName should be updated in OmKeyInfo to toKeyName.
-    Assert.assertEquals(omKeyInfo.getKeyName(), toKeyName);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
-    String toKeyName = UUID.randomUUID().toString();
-    OMRequest modifiedOmRequest =
-        doPreExecute(createRenameKeyRequest(toKeyName));
-
-    // Add only volume and bucket entry to DB.
-
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
-        omKeyRenameResponse.getOMResponse().getStatus());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithOutVolumeAndBucket()
-      throws Exception {
-    String toKeyName = UUID.randomUUID().toString();
-    OMRequest modifiedOmRequest =
-        doPreExecute(createRenameKeyRequest(toKeyName));
-
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key. So it should still return error KEY_NOT_FOUND
-
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
-        omKeyRenameResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithToKeyInvalid() throws Exception {
-    String toKeyName = "";
-    OMRequest modifiedOmRequest =
-        doPreExecute(createRenameKeyRequest(toKeyName));
-
-    // Add only volume and bucket entry to DB.
-
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
-        omKeyRenameResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception {
-    String toKeyName = UUID.randomUUID().toString();
-    keyName = "";
-    OMRequest modifiedOmRequest =
-        doPreExecute(createRenameKeyRequest(toKeyName));
-
-    // Add only volume and bucket entry to DB.
-
-    // In actual implementation we don't check for bucket/volume exists
-    // during delete key.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
-        omKeyRenameResponse.getOMResponse().getStatus());
-
-  }
-
-
-  /**
-   * This method calls preExecute and verify the modified request.
-   * @param originalOmRequest
-   * @return OMRequest - modified request returned from preExecute.
-   * @throws Exception
-   */
-
-  private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(originalOmRequest);
-
-    OMRequest modifiedOmRequest = omKeyRenameRequest.preExecute(ozoneManager);
-
-    // Will not be equal, as UserInfo will be set and modification time is
-    // set in KeyArgs.
-    Assert.assertNotEquals(originalOmRequest, modifiedOmRequest);
-
-    Assert.assertTrue(modifiedOmRequest.getRenameKeyRequest()
-        .getKeyArgs().getModificationTime() > 0);
-
-    return modifiedOmRequest;
-  }
-
-  /**
-   * Create OMRequest which encapsulates RenameKeyRequest.
-   * @return OMRequest
-   */
-  private OMRequest createRenameKeyRequest(String toKeyName) {
-    KeyArgs keyArgs = KeyArgs.newBuilder().setKeyName(keyName)
-        .setVolumeName(volumeName).setBucketName(bucketName).build();
-
-    RenameKeyRequest renameKeyRequest = RenameKeyRequest.newBuilder()
-            .setKeyArgs(keyArgs).setToKeyName(toKeyName).build();
-
-    return OMRequest.newBuilder()
-        .setClientId(UUID.randomUUID().toString())
-        .setRenameKeyRequest(renameKeyRequest)
-        .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey).build();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
deleted file mode 100644
index 92d6cdb..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ScmClient;
-import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
-import org.apache.hadoop.util.Time;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.when;
-
-/**
- * Base test class for key request.
- */
-@SuppressWarnings("visibilitymodifier")
-public class TestOMKeyRequest {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OzoneManager ozoneManager;
-  protected OMMetrics omMetrics;
-  protected OMMetadataManager omMetadataManager;
-  protected AuditLogger auditLogger;
-
-  protected ScmClient scmClient;
-  protected OzoneBlockTokenSecretManager ozoneBlockTokenSecretManager;
-  protected ScmBlockLocationProtocol scmBlockLocationProtocol;
-
-  protected final long containerID = 1000L;
-  protected final long localID = 100L;
-
-  protected String volumeName;
-  protected String bucketName;
-  protected String keyName;
-  protected HddsProtos.ReplicationType replicationType;
-  protected HddsProtos.ReplicationFactor replicationFactor;
-  protected long clientID;
-  protected long scmBlockSize = 1000L;
-  protected long dataSize;
-
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-
-  @Before
-  public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-
-    scmClient = Mockito.mock(ScmClient.class);
-    ozoneBlockTokenSecretManager =
-        Mockito.mock(OzoneBlockTokenSecretManager.class);
-    scmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class);
-    when(ozoneManager.getScmClient()).thenReturn(scmClient);
-    when(ozoneManager.getBlockTokenSecretManager())
-        .thenReturn(ozoneBlockTokenSecretManager);
-    when(ozoneManager.getScmBlockSize()).thenReturn(scmBlockSize);
-    when(ozoneManager.getPreallocateBlocksMax()).thenReturn(2);
-    when(ozoneManager.isGrpcBlockTokenEnabled()).thenReturn(false);
-    when(ozoneManager.getOMNodeId()).thenReturn(UUID.randomUUID().toString());
-    when(scmClient.getBlockClient()).thenReturn(scmBlockLocationProtocol);
-
-    Pipeline pipeline = Pipeline.newBuilder()
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setNodes(new ArrayList<>())
-        .build();
-
-    AllocatedBlock allocatedBlock =
-        new AllocatedBlock.Builder()
-            .setContainerBlockID(new ContainerBlockID(containerID, localID))
-            .setPipeline(pipeline).build();
-
-    List<AllocatedBlock> allocatedBlocks = new ArrayList<>();
-
-    allocatedBlocks.add(allocatedBlock);
-
-    when(scmBlockLocationProtocol.allocateBlock(anyLong(), anyInt(),
-        any(HddsProtos.ReplicationType.class),
-        any(HddsProtos.ReplicationFactor.class),
-        anyString(), any(ExcludeList.class))).thenReturn(allocatedBlocks);
-
-
-    volumeName = UUID.randomUUID().toString();
-    bucketName = UUID.randomUUID().toString();
-    keyName = UUID.randomUUID().toString();
-    replicationFactor = HddsProtos.ReplicationFactor.ONE;
-    replicationType = HddsProtos.ReplicationType.RATIS;
-    clientID = Time.now();
-    dataSize = 1000L;
-
-  }
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java
deleted file mode 100644
index 2034670..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for key requests.
- */
-package org.apache.hadoop.ozone.om.request.key;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java
deleted file mode 100644
index 0bdab7d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for OM request.
- */
-package org.apache.hadoop.ozone.om.request;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java
deleted file mode 100644
index cd42ec6..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.bucket;
-
-import java.util.UUID;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-import static org.junit.Assert.fail;
-
-/**
- * Tests S3BucketCreateRequest class, which handles S3 CreateBucket request.
- */
-public class TestS3BucketCreateRequest extends TestS3BucketRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String userName = UUID.randomUUID().toString();
-    String s3BucketName = UUID.randomUUID().toString();
-    doPreExecute(userName, s3BucketName);
-  }
-
-  @Test
-  public void testPreExecuteInvalidBucketLength() throws Exception {
-    String userName = UUID.randomUUID().toString();
-
-    // set bucket name which is less than 3 characters length
-    String s3BucketName = RandomStringUtils.randomAlphabetic(2);
-
-    try {
-      doPreExecute(userName, s3BucketName);
-      fail("testPreExecuteInvalidBucketLength failed");
-    } catch (OMException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
-    }
-
-    // set bucket name which is greater than 63 characters length
-    s3BucketName = RandomStringUtils.randomAlphabetic(64);
-
-    try {
-      doPreExecute(userName, s3BucketName);
-      fail("testPreExecuteInvalidBucketLength failed");
-    } catch (OMException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
-    }
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String userName = UUID.randomUUID().toString();
-    String s3BucketName = UUID.randomUUID().toString();
-
-    S3BucketCreateRequest s3BucketCreateRequest = doPreExecute(userName,
-        s3BucketName);
-
-    doValidateAndUpdateCache(userName, s3BucketName,
-        s3BucketCreateRequest.getOmRequest());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithS3BucketAlreadyExists()
-      throws Exception {
-    String userName = UUID.randomUUID().toString();
-    String s3BucketName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addS3BucketToDB(
-        S3BucketCreateRequest.formatOzoneVolumeName(userName), s3BucketName,
-        omMetadataManager);
-
-    S3BucketCreateRequest s3BucketCreateRequest =
-        doPreExecute(userName, s3BucketName);
-
-
-    // Try create same bucket again
-    OMClientResponse omClientResponse =
-        s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateBucketResponse());
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.S3_BUCKET_ALREADY_EXISTS,
-        omResponse.getStatus());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketAlreadyExists()
-      throws Exception {
-    String userName = UUID.randomUUID().toString();
-    String s3BucketName = UUID.randomUUID().toString();
-
-    S3BucketCreateRequest s3BucketCreateRequest =
-        doPreExecute(userName, s3BucketName);
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(
-        s3BucketCreateRequest.formatOzoneVolumeName(userName),
-        s3BucketName, omMetadataManager);
-
-
-    // Try create same bucket again
-    OMClientResponse omClientResponse =
-        s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateBucketResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_ALREADY_EXISTS,
-        omResponse.getStatus());
-  }
-
-
-
-  private S3BucketCreateRequest doPreExecute(String userName,
-      String s3BucketName) throws Exception {
-    OMRequest originalRequest =
-        TestOMRequestUtils.createS3BucketRequest(userName, s3BucketName);
-
-    S3BucketCreateRequest s3BucketCreateRequest =
-        new S3BucketCreateRequest(originalRequest);
-
-    OMRequest modifiedRequest = s3BucketCreateRequest.preExecute(ozoneManager);
-    // Modification time will be set, so requests should not be equal.
-    Assert.assertNotEquals(originalRequest, modifiedRequest);
-    return new S3BucketCreateRequest(modifiedRequest);
-  }
-
-  private void doValidateAndUpdateCache(String userName, String s3BucketName,
-      OMRequest modifiedRequest) throws Exception {
-
-    // As we have not still called validateAndUpdateCache, get() should
-    // return null.
-
-    Assert.assertNull(omMetadataManager.getS3Table().get(s3BucketName));
-    S3BucketCreateRequest s3BucketCreateRequest =
-        new S3BucketCreateRequest(modifiedRequest);
-
-
-    OMClientResponse omClientResponse =
-        s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // As now after validateAndUpdateCache it should add entry to cache, get
-    // should return non null value.
-
-    Assert.assertNotNull(omMetadataManager.getS3Table().get(s3BucketName));
-
-    String bucketKey =
-        omMetadataManager.getBucketKey(
-            s3BucketCreateRequest.formatOzoneVolumeName(userName),
-            s3BucketName);
-
-    // check ozone bucket entry is created or not.
-    Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey));
-
-    String volumeKey = omMetadataManager.getVolumeKey(
-        s3BucketCreateRequest.formatOzoneVolumeName(userName));
-
-    // Check volume entry is created or not.
-    Assert.assertNotNull(omMetadataManager.getVolumeTable().get(volumeKey));
-
-    // check om response.
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateS3Bucket,
-        omClientResponse.getOMResponse().getCmdType());
-
-  }
-
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
deleted file mode 100644
index f542268..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.bucket;
-
-import java.util.UUID;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.junit.Assert.fail;
-
-/**
- * Tests S3BucketDelete Request.
- */
-public class TestS3BucketDeleteRequest extends TestS3BucketRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String s3BucketName = UUID.randomUUID().toString();
-    doPreExecute(s3BucketName);
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String s3BucketName = UUID.randomUUID().toString();
-    OMRequest omRequest = doPreExecute(s3BucketName);
-
-    // Add s3Bucket to s3Bucket table.
-    TestOMRequestUtils.addS3BucketToDB("ozone", s3BucketName,
-        omMetadataManager);
-
-    S3BucketDeleteRequest s3BucketDeleteRequest =
-        new S3BucketDeleteRequest(omRequest);
-
-    OMClientResponse s3BucketDeleteResponse =
-        s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        s3BucketDeleteResponse.getOMResponse().getStatus());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithS3BucketNotFound()
-      throws Exception {
-    String s3BucketName = UUID.randomUUID().toString();
-    OMRequest omRequest = doPreExecute(s3BucketName);
-
-    S3BucketDeleteRequest s3BucketDeleteRequest =
-        new S3BucketDeleteRequest(omRequest);
-
-    OMClientResponse s3BucketDeleteResponse =
-        s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.S3_BUCKET_NOT_FOUND,
-        s3BucketDeleteResponse.getOMResponse().getStatus());
-  }
-
-  @Test
-  public void testPreExecuteInvalidBucketLength() throws Exception {
-    // set bucket name which is less than 3 characters length
-    String s3BucketName = RandomStringUtils.randomAlphabetic(2);
-
-    try {
-      doPreExecute(s3BucketName);
-      fail("testPreExecuteInvalidBucketLength failed");
-    } catch (OMException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
-    }
-
-    // set bucket name which is less than 3 characters length
-    s3BucketName = RandomStringUtils.randomAlphabetic(65);
-
-    try {
-      doPreExecute(s3BucketName);
-      fail("testPreExecuteInvalidBucketLength failed");
-    } catch (OMException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
-    }
-  }
-
-  private OMRequest doPreExecute(String s3BucketName) throws Exception {
-    OMRequest omRequest =
-        TestOMRequestUtils.deleteS3BucketRequest(s3BucketName);
-
-    S3BucketDeleteRequest s3BucketDeleteRequest =
-        new S3BucketDeleteRequest(omRequest);
-
-    OMRequest modifiedOMRequest =
-        s3BucketDeleteRequest.preExecute(ozoneManager);
-
-    // As user name will be set both should not be equal.
-    Assert.assertNotEquals(omRequest, modifiedOMRequest);
-
-    return modifiedOMRequest;
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java
deleted file mode 100644
index 747efb0..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.bucket;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * Base test class for S3 Bucket request.
- */
-@SuppressWarnings("visibilityModifier")
-public class TestS3BucketRequest {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OzoneManager ozoneManager;
-  protected OMMetrics omMetrics;
-  protected OMMetadataManager omMetadataManager;
-  protected AuditLogger auditLogger;
-
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-
-  @Before
-  public void setup() throws Exception {
-
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-  }
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java
deleted file mode 100644
index 8b2e84b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for s3 bucket requests.
- */
-package org.apache.hadoop.ozone.om.request.s3.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
deleted file mode 100644
index 1d78560..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-
-/**
- * Tests S3 Initiate Multipart Upload request.
- */
-public class TestS3InitiateMultipartUploadRequest
-    extends TestS3MultipartRequest {
-
-  @Test
-  public void testPreExecute() {
-    doPreExecuteInitiateMPU(UUID.randomUUID().toString(),
-        UUID.randomUUID().toString(), UUID.randomUUID().toString());
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    // Add volume and bucket to DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID());
-
-    Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNotNull(omMetadataManager.getMultipartInfoTable()
-        .get(multipartKey));
-
-    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID(),
-        omMetadataManager.getMultipartInfoTable().get(multipartKey)
-            .getUploadID());
-
-    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
-        .getKeyArgs().getModificationTime(),
-        omMetadataManager.getOpenKeyTable().get(multipartKey)
-        .getModificationTime());
-    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getModificationTime(),
-        omMetadataManager.getOpenKeyTable().get(multipartKey)
-            .getCreationTime());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-    OMRequest modifiedRequest = doPreExecuteInitiateMPU(
-        volumeName, bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID());
-
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNull(omMetadataManager.getMultipartInfoTable()
-        .get(multipartKey));
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-
-    OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
-        keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID());
-
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNull(omMetadataManager.getMultipartInfoTable()
-        .get(multipartKey));
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
deleted file mode 100644
index 9950027..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * Base test class for S3 Multipart upload request.
- */
-@SuppressWarnings("visibilitymodifier")
-public class TestS3MultipartRequest {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OzoneManager ozoneManager;
-  protected OMMetrics omMetrics;
-  protected OMMetadataManager omMetadataManager;
-  protected AuditLogger auditLogger;
-
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-
-  @Before
-  public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-  }
-
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-
-  /**
-   * Perform preExecute of Initiate Multipart upload request for given
-   * volume, bucket and key name.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @return OMRequest - returned from preExecute.
-   */
-  protected OMRequest doPreExecuteInitiateMPU(
-      String volumeName, String bucketName, String keyName) {
-    OMRequest omRequest =
-        TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
-            keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(omRequest);
-
-    OMRequest modifiedRequest =
-        s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
-
-    Assert.assertNotEquals(omRequest, modifiedRequest);
-    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
-    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
-        .getKeyArgs().getMultipartUploadID());
-    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
-        .getKeyArgs().getModificationTime() > 0);
-
-    return modifiedRequest;
-  }
-
-  /**
-   * Perform preExecute of Commit Multipart Upload request for given volume,
-   * bucket and keyName.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param clientID
-   * @param multipartUploadID
-   * @param partNumber
-   * @return OMRequest - returned from preExecute.
-   */
-  protected OMRequest doPreExecuteCommitMPU(
-      String volumeName, String bucketName, String keyName,
-      long clientID, String multipartUploadID, int partNumber) {
-
-    // Just set dummy size
-    long dataSize = 100L;
-    OMRequest omRequest =
-        TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName,
-            keyName, clientID, dataSize, multipartUploadID, partNumber);
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(omRequest);
-
-
-    OMRequest modifiedRequest =
-        s3MultipartUploadCommitPartRequest.preExecute(ozoneManager);
-
-    // UserInfo and modification time is set.
-    Assert.assertNotEquals(omRequest, modifiedRequest);
-
-    return modifiedRequest;
-  }
-
-  /**
-   * Perform preExecute of Abort Multipart Upload request for given volume,
-   * bucket and keyName.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param multipartUploadID
-   * @return OMRequest - returned from preExecute.
-   * @throws IOException
-   */
-  protected OMRequest doPreExecuteAbortMPU(
-      String volumeName, String bucketName, String keyName,
-      String multipartUploadID) throws IOException {
-
-    OMRequest omRequest =
-        TestOMRequestUtils.createAbortMPURequest(volumeName, bucketName,
-            keyName, multipartUploadID);
-
-
-    S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(omRequest);
-
-    OMRequest modifiedRequest =
-        s3MultipartUploadAbortRequest.preExecute(ozoneManager);
-
-    // UserInfo and modification time is set.
-    Assert.assertNotEquals(omRequest, modifiedRequest);
-
-    return modifiedRequest;
-
-  }
-
-  protected OMRequest doPreExecuteCompleteMPU(String volumeName,
-      String bucketName, String keyName, String multipartUploadID,
-      List<Part> partList) throws IOException {
-
-    OMRequest omRequest =
-        TestOMRequestUtils.createCompleteMPURequest(volumeName, bucketName,
-            keyName, multipartUploadID, partList);
-
-    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(omRequest);
-
-    OMRequest modifiedRequest =
-        s3MultipartUploadCompleteRequest.preExecute(ozoneManager);
-
-    // UserInfo and modification time is set.
-    Assert.assertNotEquals(omRequest, modifiedRequest);
-
-    return modifiedRequest;
-
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
deleted file mode 100644
index d0b61c7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Test Multipart upload abort request.
- */
-public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
-
-
-  @Test
-  public void testPreExecute() throws IOException {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    doPreExecuteAbortMPU(volumeName, bucketName, keyName,
-        UUID.randomUUID().toString());
-  }
-
-  @Test
-  public void testValidateAndUpdateCache() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            1L, ozoneManagerDoubleBufferHelper);
-
-    String multipartUploadID = omClientResponse.getOMResponse()
-        .getInitiateMultiPartUploadResponse().getMultipartUploadID();
-
-    OMRequest abortMPURequest =
-        doPreExecuteAbortMPU(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
-
-    omClientResponse =
-        s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
-            ozoneManagerDoubleBufferHelper);
-
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    // Check table and response.
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-    Assert.assertNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    String multipartUploadID = "randomMPU";
-
-    OMRequest abortMPURequest =
-        doPreExecuteAbortMPU(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
-            ozoneManagerDoubleBufferHelper);
-
-    // Check table and response.
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheVolumeNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-
-    String multipartUploadID = "randomMPU";
-
-    OMRequest abortMPURequest =
-        doPreExecuteAbortMPU(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
-            ozoneManagerDoubleBufferHelper);
-
-    // Check table and response.
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheBucketNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-    String multipartUploadID = "randomMPU";
-
-    OMRequest abortMPURequest =
-        doPreExecuteAbortMPU(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
-            ozoneManagerDoubleBufferHelper);
-
-    // Check table and response.
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
deleted file mode 100644
index 5b220bf..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.UUID;
-
-/**
- * Tests S3 Multipart upload commit part request.
- */
-public class TestS3MultipartUploadCommitPartRequest
-    extends TestS3MultipartRequest {
-
-  @Test
-  public void testPreExecute() {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(),
-        UUID.randomUUID().toString(), 1);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-        1L, ozoneManagerDoubleBufferHelper);
-
-    long clientID = Time.now();
-    String multipartUploadID = omClientResponse.getOMResponse()
-        .getInitiateMultiPartUploadResponse().getMultipartUploadID();
-
-    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
-        bucketName, keyName, clientID, multipartUploadID, 1);
-
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
-
-    // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-
-    omClientResponse =
-        s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
-        2L, ozoneManagerDoubleBufferHelper);
-
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.OK);
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    Assert.assertNotNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-    Assert.assertTrue(omMetadataManager.getMultipartInfoTable()
-        .get(multipartKey).getPartKeyInfoMap().size() == 1);
-    Assert.assertNull(omMetadataManager.getOpenKeyTable()
-        .get(omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
-            clientID)));
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-
-    long clientID = Time.now();
-    String multipartUploadID = UUID.randomUUID().toString();
-
-    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
-        bucketName, keyName, clientID, multipartUploadID, 1);
-
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
-
-    // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
-            2L, ozoneManagerDoubleBufferHelper);
-
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    Assert.assertNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheKeyNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-
-    long clientID = Time.now();
-    String multipartUploadID = UUID.randomUUID().toString();
-
-    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
-        bucketName, keyName, clientID, multipartUploadID, 1);
-
-    // Don't add key to open table entry, and we are trying to commit this MPU
-    // part. It will fail with KEY_NOT_FOUND
-
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
-
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
-            2L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheBucketFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-
-    long clientID = Time.now();
-    String multipartUploadID = UUID.randomUUID().toString();
-
-    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
-        bucketName, keyName, clientID, multipartUploadID, 1);
-
-    // Don't add key to open table entry, and we are trying to commit this MPU
-    // part. It will fail with KEY_NOT_FOUND
-
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
-
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
-            2L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
deleted file mode 100644
index a04f51f..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
-import org.apache.hadoop.util.Time;
-
-
-/**
- * Tests S3 Multipart Upload Complete request.
- */
-public class TestS3MultipartUploadCompleteRequest
-    extends TestS3MultipartRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    doPreExecuteCompleteMPU(volumeName, bucketName, keyName,
-        UUID.randomUUID().toString(), new ArrayList<>());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            1L, ozoneManagerDoubleBufferHelper);
-
-    long clientID = Time.now();
-    String multipartUploadID = omClientResponse.getOMResponse()
-        .getInitiateMultiPartUploadResponse().getMultipartUploadID();
-
-    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
-        bucketName, keyName, clientID, multipartUploadID, 1);
-
-    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
-
-    // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
-
-    s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
-        2L, ozoneManagerDoubleBufferHelper);
-
-    List<Part> partList = new ArrayList<>();
-
-    partList.add(Part.newBuilder().setPartName(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName) +
-            clientID).setPartNumber(1).build());
-
-    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
-        bucketName, keyName, multipartUploadID, partList);
-
-    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
-
-    omClientResponse =
-        s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
-            3L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-    Assert.assertNotNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheVolumeNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    List<Part> partList = new ArrayList<>();
-
-    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
-        bucketName, keyName, UUID.randomUUID().toString(), partList);
-
-    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
-            3L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheBucketNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    List<Part> partList = new ArrayList<>();
-
-    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
-        bucketName, keyName, UUID.randomUUID().toString(), partList);
-
-    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
-            3L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheNoSuchMultipartUploadError()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    List<Part> partList = new ArrayList<>();
-
-    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
-        bucketName, keyName, UUID.randomUUID().toString(), partList);
-
-    // Doing  complete multipart upload request with out initiate.
-    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
-
-    OMClientResponse omClientResponse =
-        s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
-            3L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(
-        OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR,
-        omClientResponse.getOMResponse().getStatus());
-
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java
deleted file mode 100644
index 4455418..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for S3 MPU requests.
- */
-
-package org.apache.hadoop.ozone.om.request.s3.multipart;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
deleted file mode 100644
index b685711..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeInfo;
-
-import static org.mockito.Mockito.when;
-
-/**
- * Tests create volume request.
- */
-
-public class TestOMVolumeCreateRequest extends TestOMVolumeRequest {
-
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String adminName = UUID.randomUUID().toString();
-    String ownerName = UUID.randomUUID().toString();
-    doPreExecute(volumeName, adminName, ownerName);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount()
-      throws Exception {
-    when(ozoneManager.getMaxUserVolumeCount()).thenReturn(0L);
-    String volumeName = UUID.randomUUID().toString();
-    String adminName = "user1";
-    String ownerName = "user1";
-
-    OMRequest originalRequest = createVolumeRequest(volumeName, adminName,
-        ownerName);
-
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    omVolumeCreateRequest.preExecute(ozoneManager);
-
-    try {
-      OMClientResponse omClientResponse =
-          omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-              ozoneManagerDoubleBufferHelper);
-      Assert.assertTrue(omClientResponse instanceof OMVolumeCreateResponse);
-      OMVolumeCreateResponse respone =
-          (OMVolumeCreateResponse) omClientResponse;
-      Assert.assertEquals(1, respone.getOmVolumeArgs().getObjectID());
-      Assert.assertEquals(1, respone.getOmVolumeArgs().getUpdateID());
-    } catch (IllegalArgumentException ex){
-      GenericTestUtils.assertExceptionContains("should be greater than zero",
-          ex);
-    }
-
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String adminName = "user1";
-    String ownerName = "user1";
-
-    OMRequest originalRequest = createVolumeRequest(volumeName, adminName,
-        ownerName);
-
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-    String ownerKey = omMetadataManager.getUserKey(ownerName);
-
-    // As we have not still called validateAndUpdateCache, get() should
-    // return null.
-
-    Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey));
-    Assert.assertNull(omMetadataManager.getUserTable().get(ownerKey));
-
-    omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-
-    // Get volumeInfo from request.
-    VolumeInfo volumeInfo = omVolumeCreateRequest.getOmRequest()
-        .getCreateVolumeRequest().getVolumeInfo();
-
-    OmVolumeArgs omVolumeArgs =
-        omMetadataManager.getVolumeTable().get(volumeKey);
-    // As request is valid volume table should not have entry.
-    Assert.assertNotNull(omVolumeArgs);
-    Assert.assertEquals(2, omVolumeArgs.getObjectID());
-    Assert.assertEquals(2, omVolumeArgs.getUpdateID());
-
-    // Check data from table and request.
-    Assert.assertEquals(volumeInfo.getVolume(), omVolumeArgs.getVolume());
-    Assert.assertEquals(volumeInfo.getOwnerName(), omVolumeArgs.getOwnerName());
-    Assert.assertEquals(volumeInfo.getAdminName(), omVolumeArgs.getAdminName());
-    Assert.assertEquals(volumeInfo.getCreationTime(),
-        omVolumeArgs.getCreationTime());
-
-    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo = omMetadataManager
-        .getUserTable().get(ownerKey);
-    Assert.assertNotNull(userVolumeInfo);
-    Assert.assertEquals(volumeName, userVolumeInfo.getVolumeNames(0));
-
-    // Create another volume for the user.
-    originalRequest = createVolumeRequest("vol1", adminName,
-        ownerName);
-
-    omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
-
-    omClientResponse =
-        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2L,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    Assert.assertTrue(omMetadataManager
-        .getUserTable().get(ownerKey).getVolumeNamesList().size() == 2);
-
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeAlreadyExists()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String adminName = "user1";
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-    OMRequest originalRequest = createVolumeRequest(volumeName, adminName,
-        ownerName);
-
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
-
-    omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS,
-        omResponse.getStatus());
-
-    // Check really if we have a volume with the specified volume name.
-    Assert.assertNotNull(omMetadataManager.getVolumeTable().get(
-        omMetadataManager.getVolumeKey(volumeName)));
-
-  }
-
-
-  private void doPreExecute(String volumeName,
-      String adminName, String ownerName) throws Exception {
-
-    OMRequest originalRequest = createVolumeRequest(volumeName, adminName,
-        ownerName);
-
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
-    verifyRequest(modifiedRequest, originalRequest);
-  }
-
-  /**
-   * Verify modifiedOmRequest and originalRequest.
-   * @param modifiedRequest
-   * @param originalRequest
-   */
-  private void verifyRequest(OMRequest modifiedRequest,
-      OMRequest originalRequest) {
-    VolumeInfo original = originalRequest.getCreateVolumeRequest()
-        .getVolumeInfo();
-    VolumeInfo updated = modifiedRequest.getCreateVolumeRequest()
-        .getVolumeInfo();
-
-    Assert.assertEquals(original.getAdminName(), updated.getAdminName());
-    Assert.assertEquals(original.getVolume(), updated.getVolume());
-    Assert.assertEquals(original.getOwnerName(),
-        updated.getOwnerName());
-    Assert.assertNotEquals(original.getCreationTime(),
-        updated.getCreationTime());
-  }
-
-  /**
-   * Create OMRequest for create volume.
-   * @param volumeName
-   * @param adminName
-   * @param ownerName
-   * @return OMRequest
-   */
-  private OMRequest createVolumeRequest(String volumeName, String adminName,
-      String ownerName) {
-    VolumeInfo volumeInfo = VolumeInfo.newBuilder().setVolume(volumeName)
-        .setAdminName(adminName).setOwnerName(ownerName).build();
-    CreateVolumeRequest createVolumeRequest =
-        CreateVolumeRequest.newBuilder().setVolumeInfo(volumeInfo).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-        .setCreateVolumeRequest(createVolumeRequest).build();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
deleted file mode 100644
index 8b30a23..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.util.UUID;
-
-import org.junit.Assert;;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Tests delete volume request.
- */
-public class TestOMVolumeDeleteRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OMRequest originalRequest = deleteVolumeRequest(volumeName);
-
-    OMVolumeDeleteRequest omVolumeDeleteRequest =
-        new OMVolumeDeleteRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeDeleteRequest.preExecute(ozoneManager);
-    Assert.assertNotEquals(originalRequest, modifiedRequest);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    OMRequest originalRequest = deleteVolumeRequest(volumeName);
-
-    OMVolumeDeleteRequest omVolumeDeleteRequest =
-        new OMVolumeDeleteRequest(originalRequest);
-
-    omVolumeDeleteRequest.preExecute(ozoneManager);
-
-    // Add volume and user to DB
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-    String ownerKey = omMetadataManager.getUserKey(ownerName);
-
-
-    Assert.assertNotNull(omMetadataManager.getVolumeTable().get(volumeKey));
-    Assert.assertNotNull(omMetadataManager.getUserTable().get(ownerKey));
-
-    OMClientResponse omClientResponse =
-        omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-
-
-    Assert.assertTrue(omMetadataManager.getUserTable().get(ownerKey)
-        .getVolumeNamesList().size() == 0);
-    // As now volume is deleted, table should not have those entries.
-    Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey));
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OMRequest originalRequest = deleteVolumeRequest(volumeName);
-
-    OMVolumeDeleteRequest omVolumeDeleteRequest =
-        new OMVolumeDeleteRequest(originalRequest);
-
-    omVolumeDeleteRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotEmpty() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    OMRequest originalRequest = deleteVolumeRequest(volumeName);
-
-    OMVolumeDeleteRequest omVolumeDeleteRequest =
-        new OMVolumeDeleteRequest(originalRequest);
-
-    omVolumeDeleteRequest.preExecute(ozoneManager);
-
-    // Add some bucket to bucket table cache.
-    String bucketName = UUID.randomUUID().toString();
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName).build();
-    TestOMRequestUtils.addBucketToOM(omMetadataManager, omBucketInfo);
-
-    // Add user and volume to DB.
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_EMPTY,
-        omResponse.getStatus());
-  }
-
-  /**
-   * Create OMRequest for delete volume.
-   * @param volumeName
-   * @return OMRequest
-   */
-  private OMRequest deleteVolumeRequest(String volumeName) {
-    DeleteVolumeRequest deleteVolumeRequest =
-        DeleteVolumeRequest.newBuilder().setVolumeName(volumeName).build();
-
-    return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume)
-        .setDeleteVolumeRequest(deleteVolumeRequest).build();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java
deleted file mode 100644
index cfcdcb7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.audit.AuditLogger;
-import org.apache.hadoop.ozone.audit.AuditMessage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.when;
-
-/**
- * Base test class for Volume request.
- */
-@SuppressWarnings("visibilitymodifier")
-public class TestOMVolumeRequest {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OzoneManager ozoneManager;
-  protected OMMetrics omMetrics;
-  protected OMMetadataManager omMetadataManager;
-  protected AuditLogger auditLogger;
-  // Just setting ozoneManagerDoubleBuffer which does nothing.
-  protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
-      ((response, transactionIndex) -> {
-        return null;
-      });
-
-  @Before
-  public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
-    omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
-    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
-    when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
-    auditLogger = Mockito.mock(AuditLogger.class);
-    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
-    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
-  }
-
-  @After
-  public void stop() {
-    omMetrics.unRegister();
-    Mockito.framework().clearInlineMocks();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
deleted file mode 100644
index af38ba0..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-
-/**
- * Tests set volume property request.
- */
-public class TestOMVolumeSetOwnerRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String newOwner = "user1";
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner);
-
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute(
-        ozoneManager);
-    Assert.assertNotEquals(modifiedRequest, originalRequest);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    String newOwner = "user2";
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner);
-
-    OMVolumeSetOwnerRequest omVolumeSetOwnerRequest =
-        new OMVolumeSetOwnerRequest(originalRequest);
-
-    omVolumeSetOwnerRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-    String ownerKey = omMetadataManager.getUserKey(ownerName);
-    String newOwnerKey = omMetadataManager.getUserKey(newOwner);
-
-
-
-    OMClientResponse omClientResponse =
-        omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getSetVolumePropertyResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-
-    String fromDBOwner = omMetadataManager
-        .getVolumeTable().get(volumeKey).getOwnerName();
-    Assert.assertEquals(newOwner, fromDBOwner);
-
-
-    OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList =
-        omMetadataManager.getUserTable().get(newOwnerKey);
-
-    Assert.assertNotNull(newOwnerVolumeList);
-    Assert.assertEquals(volumeName,
-        newOwnerVolumeList.getVolumeNamesList().get(0));
-
-    OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList =
-        omMetadataManager.getUserTable().get(
-            omMetadataManager.getUserKey(ownerKey));
-
-    Assert.assertNotNull(oldOwnerVolumeList);
-    Assert.assertTrue(oldOwnerVolumeList.getVolumeNamesList().size() == 0);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName,
-            ownerName);
-
-    OMVolumeSetOwnerRequest omVolumeSetOwnerRequest =
-        new OMVolumeSetOwnerRequest(originalRequest);
-
-    omVolumeSetOwnerRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-
-  }
-
-  @Test
-  public void testInvalidRequest() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-
-    // create request with quota set.
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName,
-            100L);
-
-    OMVolumeSetOwnerRequest omVolumeSetOwnerRequest =
-        new OMVolumeSetOwnerRequest(originalRequest);
-
-    omVolumeSetOwnerRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST,
-        omResponse.getStatus());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
deleted file mode 100644
index 963fc33..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume;
-
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-
-/**
- * Tests set volume property request.
- */
-public class TestOMVolumeSetQuotaRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    long quota = 100L;
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota);
-
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute(
-        ozoneManager);
-    Assert.assertNotEquals(modifiedRequest, originalRequest);
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-    long quotaSet = 100L;
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quotaSet);
-
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    omVolumeSetQuotaRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-
-
-    // Get Quota before validateAndUpdateCache.
-    OmVolumeArgs omVolumeArgs =
-        omMetadataManager.getVolumeTable().get(volumeKey);
-    // As request is valid volume table should not have entry.
-    Assert.assertNotNull(omVolumeArgs);
-    long quotaBeforeSet = omVolumeArgs.getQuotaInBytes();
-
-
-    OMClientResponse omClientResponse =
-        omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getSetVolumePropertyResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-
-    long quotaAfterSet = omMetadataManager
-        .getVolumeTable().get(volumeKey).getQuotaInBytes();
-    Assert.assertEquals(quotaSet, quotaAfterSet);
-    Assert.assertNotEquals(quotaBeforeSet, quotaAfterSet);
-
-  }
-
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-    long quota = 100L;
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota);
-
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    omVolumeSetQuotaRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-
-  }
-
-  @Test
-  public void testInvalidRequest() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-
-    // create request with owner set.
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName,
-            "user1");
-
-    // Creating OMVolumeSetQuotaRequest with SetProperty request set with owner.
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    omVolumeSetQuotaRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getCreateVolumeResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST,
-        omResponse.getStatus());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java
deleted file mode 100644
index 66a122f..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.UUID;
-
-/**
- * Tests volume addAcl request.
- */
-public class TestOMVolumeAddAclRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl);
-
-    OMVolumeAddAclRequest omVolumeAddAclRequest =
-        new OMVolumeAddAclRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeAddAclRequest.preExecute(
-        ozoneManager);
-    Assert.assertNotEquals(modifiedRequest, originalRequest);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl);
-
-    OMVolumeAddAclRequest omVolumeAddAclRequest =
-        new OMVolumeAddAclRequest(originalRequest);
-
-    omVolumeAddAclRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-
-    // Get Acl before validateAndUpdateCache.
-    OmVolumeArgs omVolumeArgs =
-        omMetadataManager.getVolumeTable().get(volumeKey);
-    // As request is valid volume table should have entry.
-    Assert.assertNotNull(omVolumeArgs);
-    OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap();
-
-    OMClientResponse omClientResponse =
-        omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getAddAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-    OmOzoneAclMap aclMapAfterSet = omMetadataManager
-        .getVolumeTable().get(volumeKey).getAclMap();
-
-    // acl is added to aclMapAfterSet
-    Assert.assertEquals(1, aclMapAfterSet.getAcl().size());
-    Assert.assertEquals(acl, aclMapAfterSet.getAcl().get(0));
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl);
-
-    OMVolumeAddAclRequest omVolumeAddAclRequest =
-        new OMVolumeAddAclRequest(originalRequest);
-
-    omVolumeAddAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getAddAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java
deleted file mode 100644
index dfd0a23..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.UUID;
-
-/**
- * Tests volume removeAcl request.
- */
-public class TestOMVolumeRemoveAclRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl);
-
-    OMVolumeRemoveAclRequest omVolumeRemoveAclRequest =
-        new OMVolumeRemoveAclRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeRemoveAclRequest.preExecute(
-        ozoneManager);
-    Assert.assertNotEquals(modifiedRequest, originalRequest);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
-    // add acl first
-    OMRequest addAclRequest =
-        TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl);
-    OMVolumeAddAclRequest omVolumeAddAclRequest =
-        new OMVolumeAddAclRequest(addAclRequest);
-    omVolumeAddAclRequest.preExecute(ozoneManager);
-    OMClientResponse omClientAddResponse =
-        omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-    OMResponse omAddAclResponse = omClientAddResponse.getOMResponse();
-    Assert.assertNotNull(omAddAclResponse.getAddAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omAddAclResponse.getStatus());
-
-
-    // remove acl
-    OMRequest removeAclRequest =
-        TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl);
-    OMVolumeRemoveAclRequest omVolumeRemoveAclRequest =
-        new OMVolumeRemoveAclRequest(removeAclRequest);
-    omVolumeRemoveAclRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-
-    // Get Acl before Remove.
-    OmVolumeArgs omVolumeArgs =
-        omMetadataManager.getVolumeTable().get(volumeKey);
-    // As request is valid volume table should have entry.
-    Assert.assertNotNull(omVolumeArgs);
-    OmOzoneAclMap aclMapBeforeRemove = omVolumeArgs.getAclMap();
-    Assert.assertEquals(acl, aclMapBeforeRemove.getAcl().get(0));
-
-    OMClientResponse omClientRemoveResponse =
-        omVolumeRemoveAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omRemoveAclResponse = omClientRemoveResponse.getOMResponse();
-    Assert.assertNotNull(omRemoveAclResponse.getRemoveAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omRemoveAclResponse.getStatus());
-
-    // acl is removed from aclMapAfterSet
-    OmOzoneAclMap aclMapAfterRemove = omMetadataManager
-        .getVolumeTable().get(volumeKey).getAclMap();
-    Assert.assertEquals(0, aclMapAfterRemove.getAcl().size());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl);
-
-    OMVolumeRemoveAclRequest omVolumeRemoveAclRequest =
-        new OMVolumeRemoveAclRequest(originalRequest);
-
-    omVolumeRemoveAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeRemoveAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getRemoveAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java
deleted file mode 100644
index 087ba71..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.volume.acl;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope.ACCESS;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope.DEFAULT;
-
-/**
- * Tests volume setAcl request.
- */
-public class TestOMVolumeSetAclRequest extends TestOMVolumeRequest {
-
-  @Test
-  public void testPreExecute() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeSetAclRequest(volumeName,
-            Lists.newArrayList(acl));
-
-    OMVolumeSetAclRequest omVolumeSetAclRequest =
-        new OMVolumeSetAclRequest(originalRequest);
-
-    OMRequest modifiedRequest = omVolumeSetAclRequest.preExecute(
-        ozoneManager);
-    Assert.assertNotEquals(modifiedRequest, originalRequest);
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheSuccess() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]");
-    OzoneAcl groupDefaultAcl =
-        OzoneAcl.parseAcl("group:admin:rwdlncxy[DEFAULT]");
-
-    List<OzoneAcl> acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl);
-
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeSetAclRequest(volumeName, acls);
-
-    OMVolumeSetAclRequest omVolumeSetAclRequest =
-        new OMVolumeSetAclRequest(originalRequest);
-
-    omVolumeSetAclRequest.preExecute(ozoneManager);
-
-    String volumeKey = omMetadataManager.getVolumeKey(volumeName);
-
-    // Get Acl before validateAndUpdateCache.
-    OmVolumeArgs omVolumeArgs =
-        omMetadataManager.getVolumeTable().get(volumeKey);
-    // As request is valid volume table should have entry.
-    Assert.assertNotNull(omVolumeArgs);
-    OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap();
-
-    OMClientResponse omClientResponse =
-        omVolumeSetAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getSetAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omResponse.getStatus());
-
-    OmOzoneAclMap aclMapAfterSet = omMetadataManager
-        .getVolumeTable().get(volumeKey).getAclMap();
-
-    // Acl is added to aclMapAfterSet
-    Assert.assertEquals(2, aclMapAfterSet.getAcl().size());
-    Assert.assertTrue("Default Acl should be set.",
-        aclMapAfterSet.getAclsByScope(ACCESS).contains(userAccessAcl));
-    Assert.assertTrue("Default Acl should be set.",
-        aclMapAfterSet.getAclsByScope(DEFAULT).contains(groupDefaultAcl));
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound()
-      throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    OMRequest originalRequest =
-        TestOMRequestUtils.createVolumeSetAclRequest(volumeName,
-            Lists.newArrayList(acl));
-
-    OMVolumeSetAclRequest omVolumeSetAclRequest =
-        new OMVolumeSetAclRequest(originalRequest);
-
-    omVolumeSetAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse =
-        omVolumeSetAclRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = omClientResponse.getOMResponse();
-    Assert.assertNotNull(omResponse.getSetAclResponse());
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omResponse.getStatus());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
deleted file mode 100644
index 1552af7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Package contains test classes for volume acl requests.
- */
-package org.apache.hadoop.ozone.om.request.volume.acl;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
deleted file mode 100644
index cbe3e2d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Package contains test classes for volume requests.
- */
-package org.apache.hadoop.ozone.om.request.volume;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java
deleted file mode 100644
index 5e41d2d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response;
-
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.util.Time;
-
-/**
- * Helper class to test OMClientResponse classes.
- */
-public final class TestOMResponseUtils {
-
-  // No one can instantiate, this is just utility class with all static methods.
-  private TestOMResponseUtils() {
-  }
-
-  public static  OmBucketInfo createBucket(String volume, String bucket) {
-    return OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket)
-        .setCreationTime(Time.now()).setIsVersionEnabled(true).addMetadata(
-            "key1", "value1").build();
-
-  }
-
-  public static S3BucketCreateResponse createS3BucketResponse(String userName,
-      String volumeName, String s3BucketName) {
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true)
-            .setCreateS3BucketResponse(
-                OzoneManagerProtocolProtos.S3CreateBucketResponse
-                    .getDefaultInstance())
-            .build();
-
-    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo =
-        OzoneManagerProtocolProtos.UserVolumeInfo.newBuilder()
-            .setObjectID(1)
-            .setUpdateID(1)
-            .addVolumeNames(volumeName).build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(userName).setAdminName(userName)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, userVolumeInfo, omResponse);
-
-
-    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
-        volumeName, s3BucketName);
-    OMBucketCreateResponse omBucketCreateResponse =
-        new OMBucketCreateResponse(omBucketInfo, omResponse);
-
-    String s3Mapping = S3BucketCreateRequest.formatS3MappingName(volumeName,
-        s3BucketName);
-    return
-        new S3BucketCreateResponse(omVolumeCreateResponse,
-            omBucketCreateResponse, s3BucketName, s3Mapping, omResponse);
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java
deleted file mode 100644
index 20ac2f9..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * This class tests OMBucketCreateResponse.
- */
-public class TestOMBucketCreateResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
-        volumeName, bucketName);
-    Assert.assertEquals(0,
-        omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
-    OMBucketCreateResponse omBucketCreateResponse =
-        new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCreateBucketResponse(
-                CreateBucketResponse.newBuilder().build()).build());
-
-    omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
-
-    Table.KeyValue<String, OmBucketInfo> keyValue =
-        omMetadataManager.getBucketTable().iterator().next();
-
-    Assert.assertEquals(omMetadataManager.getBucketKey(volumeName,
-        bucketName), keyValue.getKey());
-    Assert.assertEquals(omBucketInfo, keyValue.getValue());
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java
deleted file mode 100644
index e8843eb..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * This class tests OMBucketDeleteResponse.
- */
-public class TestOMBucketDeleteResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
-        volumeName, bucketName);
-    OMBucketCreateResponse omBucketCreateResponse =
-        new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCreateBucketResponse(
-                CreateBucketResponse.newBuilder().build()).build());
-
-    OMBucketDeleteResponse omBucketDeleteResponse =
-        new OMBucketDeleteResponse(volumeName, bucketName,
-            OMResponse.newBuilder()
-                .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
-                .setStatus(OzoneManagerProtocolProtos.Status.OK)
-                .setDeleteBucketResponse(
-                    DeleteBucketResponse.getDefaultInstance()).build());
-
-    omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-    omBucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNull(omMetadataManager.getBucketTable().get(
-            omMetadataManager.getBucketKey(volumeName, bucketName)));
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java
deleted file mode 100644
index b0cafa6..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.bucket;
-
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * This class tests OMBucketSetPropertyResponse.
- */
-public class TestOMBucketSetPropertyResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
-        volumeName, bucketName);
-    OMBucketSetPropertyResponse omBucketCreateResponse =
-        new OMBucketSetPropertyResponse(omBucketInfo, OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCreateBucketResponse(
-                CreateBucketResponse.newBuilder().build()).build());
-
-    omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
-
-    Table.KeyValue<String, OmBucketInfo> keyValue =
-        omMetadataManager.getBucketTable().iterator().next();
-
-    Assert.assertEquals(omMetadataManager.getBucketKey(volumeName,
-        bucketName), keyValue.getKey());
-    Assert.assertEquals(omBucketInfo, keyValue.getValue());
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
deleted file mode 100644
index 0980106..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for bucket responses.
- */
-package org.apache.hadoop.ozone.om.response.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java
deleted file mode 100644
index 1fc3661..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.file;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.event.Level;
-
-import java.util.UUID;
-
-/**
- * Tests OMDirectoryCreateResponse.
- */
-public class TestOMDirectoryCreateResponse {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName),
-        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE);
-
-    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
-        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
-            .build();
-
-    OMDirectoryCreateResponse omDirectoryCreateResponse =
-        new OMDirectoryCreateResponse(omKeyInfo, omResponse);
-
-    omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNotNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName)));
-  }
-
-  @Test
-  public void testAddToDBBatchWithNullOmkeyInfo() throws Exception {
-
-    GenericTestUtils.setLogLevel(OMDirectoryCreateResponse.LOG, Level.DEBUG);
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(OMDirectoryCreateResponse.LOG);
-
-
-    String volumeName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
-        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
-        .build();
-
-    OMDirectoryCreateResponse omDirectoryCreateResponse =
-        new OMDirectoryCreateResponse(null, omResponse);
-
-    omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName)));
-
-    Assert.assertTrue(logCapturer.getOutput().contains("Response Status is " +
-        "OK, dirKeyInfo is null"));
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java
deleted file mode 100644
index 4c6c005..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for file responses.
- */
-package org.apache.hadoop.ozone.om.response.file;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
deleted file mode 100644
index 5dfc48e..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-/**
- * Tests OMAllocateBlockResponse.
- */
-public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setAllocateBlockResponse(
-            AllocateBlockResponse.getDefaultInstance())
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
-        .build();
-    OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omKeyInfo, clientID, omResponse);
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-
-    // Not adding key entry before to test whether commit is successful or not.
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    omAllocateBlockResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-  }
-
-  @Test
-  public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setAllocateBlockResponse(
-            AllocateBlockResponse.getDefaultInstance())
-        .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-        .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
-        .build();
-    OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omKeyInfo, clientID, omResponse);
-
-    // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-    omAllocateBlockResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // As omResponse is error it is a no-op.
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
deleted file mode 100644
index 2b6e6d7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-/**
- * Tests OMKeyCommitResponse.
- */
-public class TestOMKeyCommitResponse extends TestOMKeyResponse {
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
-            OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
-            .build();
-
-    // As during commit Key, entry will be already there in openKeyTable.
-    // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-    OMKeyCommitResponse omKeyCommitResponse =
-        new OMKeyCommitResponse(omKeyInfo, clientID, omResponse);
-
-    omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // When key commit key is deleted from openKey table and added to keyTable.
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
-  }
-
-  @Test
-  public void testAddToDBBatchNoOp() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
-            OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-            .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
-            .build();
-
-
-    OMKeyCommitResponse omKeyCommitResponse =
-        new OMKeyCommitResponse(omKeyInfo, clientID, omResponse);
-
-    // As during commit Key, entry will be already there in openKeyTable.
-    // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-
-    omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-
-    // As omResponse is error it is a no-op. So, entry should still be in
-    // openKey table.
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
deleted file mode 100644
index 77692a7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-
-
-/**
- * Tests MKeyCreateResponse.
- */
-public class TestOMKeyCreateResponse extends TestOMKeyResponse {
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
-                CreateKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
-            .build();
-
-    OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omKeyInfo, clientID, omResponse);
-
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-  }
-
-  @Test
-  public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
-        CreateKeyResponse.getDefaultInstance())
-        .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
-        .build();
-
-    OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omKeyInfo, clientID, omResponse);
-
-    // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-    omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // As omResponse is error it is a no-op.
-    Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
deleted file mode 100644
index ba2b738..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests OMKeyDeleteResponse.
- */
-public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
-            OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
-            .build();
-
-    OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, omResponse);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
-    omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
-
-    // As default key entry does not have any blocks, it should not be in
-    // deletedKeyTable.
-    Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
-        ozoneKey));
-  }
-
-  @Test
-  public void testAddToDBBatchWithNonEmptyBlocks() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    // Add block to key.
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-
-    Pipeline pipeline = Pipeline.newBuilder()
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .setType(replicationType)
-        .setFactor(replicationFactor)
-        .setNodes(new ArrayList<>())
-        .build();
-
-    OmKeyLocationInfo omKeyLocationInfo =
-        new OmKeyLocationInfo.Builder().setBlockID(
-            new BlockID(100L, 1000L))
-            .setOffset(0).setLength(100L).setPipeline(pipeline).build();
-
-
-    omKeyLocationInfoList.add(omKeyLocationInfo);
-
-    omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
-            OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
-            .build();
-
-    OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, omResponse);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
-    omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
-
-    // Key has blocks, it should not be in deletedKeyTable.
-    Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-        ozoneKey));
-  }
-
-
-  @Test
-  public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
-            OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-            .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
-            .build();
-
-    OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, omResponse);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
-
-    omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // As omResponse is error it is a no-op. So, entry should be still in the
-    // keyTable.
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java
deleted file mode 100644
index 92daa1d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-
-/**
- * Tests OMKeyRenameResponse.
- */
-public class TestOMKeyRenameResponse extends TestOMKeyResponse {
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse(
-            OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
-            .build();
-
-    String toKeyName = UUID.randomUUID().toString();
-
-    OMKeyRenameResponse omKeyRenameResponse =
-        new OMKeyRenameResponse(omKeyInfo, toKeyName, keyName, omResponse);
-
-    String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        toKeyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey));
-
-    omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneToKey));
-  }
-
-  @Test
-  public void testAddToDBBatchWithErrorResponse() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse(
-            OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
-            .build();
-
-    String toKeyName = UUID.randomUUID().toString();
-
-    OMKeyRenameResponse omKeyRenameResponse =
-        new OMKeyRenameResponse(omKeyInfo, toKeyName, keyName, omResponse);
-
-    String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        toKeyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey));
-
-    omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // As omResponse has error, it is a no-op. So, no changes should happen.
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey));
-
-  }
-
-  @Test
-  public void testAddToDBBatchWithSameKeyName() throws Exception {
-
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse(
-            OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance())
-            .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
-            .build();
-
-
-    // Passing toKeyName also same as KeyName.
-    OMKeyRenameResponse omKeyRenameResponse =
-        new OMKeyRenameResponse(omKeyInfo, keyName, keyName, omResponse);
-
-    String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-
-    omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey));
-
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
deleted file mode 100644
index 626a3de..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.key;
-
-import java.util.UUID;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Base test class for key response.
- */
-@SuppressWarnings("visibilitymodifier")
-public class TestOMKeyResponse {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OMMetadataManager omMetadataManager;
-  protected BatchOperation batchOperation;
-
-  protected String volumeName;
-  protected String bucketName;
-  protected String keyName;
-  protected HddsProtos.ReplicationFactor replicationFactor;
-  protected HddsProtos.ReplicationType replicationType;
-  protected long clientID;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-
-    volumeName = UUID.randomUUID().toString();
-    bucketName = UUID.randomUUID().toString();
-    keyName = UUID.randomUUID().toString();
-    replicationFactor = HddsProtos.ReplicationFactor.ONE;
-    replicationType = HddsProtos.ReplicationType.RATIS;
-    clientID = 1000L;
-  }
-
-  @After
-  public void stop() {
-    Mockito.framework().clearInlineMocks();
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java
deleted file mode 100644
index 1ebf4c2..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for key responses.
- */
-package org.apache.hadoop.ozone.om.response.key;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java
deleted file mode 100644
index fd48e14..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for OM Response.
- */
-package org.apache.hadoop.ozone.om.response;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java
deleted file mode 100644
index f4a76e3..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.s3.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
-import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Class to test S3BucketCreateResponse.
- */
-public class TestS3BucketCreateResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-    String userName = UUID.randomUUID().toString();
-    String s3BucketName = UUID.randomUUID().toString();
-    String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName);
-
-    S3BucketCreateResponse s3BucketCreateResponse =
-        TestOMResponseUtils.createS3BucketResponse(userName, volumeName,
-            s3BucketName);
-
-    s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNotNull(omMetadataManager.getS3Table().get(s3BucketName));
-    Assert.assertEquals(s3BucketCreateResponse.getS3Mapping(),
-        omMetadataManager.getS3Table().get(s3BucketName));
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
-
-    Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName),
-        omMetadataManager.getVolumeTable().iterator().next().getKey());
-    Assert.assertNotNull(omMetadataManager.getBucketKey(volumeName,
-        s3BucketName), omMetadataManager.getBucketTable().iterator().next()
-        .getKey());
-
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
deleted file mode 100644
index 865f4c6..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.bucket;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
-import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .S3DeleteBucketResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-
-
-/**
- * Tests S3BucketDeleteResponse.
- */
-public class TestS3BucketDeleteResponse {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-    String s3BucketName = UUID.randomUUID().toString();
-    String userName = "ozone";
-    String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName);
-    S3BucketCreateResponse s3BucketCreateResponse =
-        TestOMResponseUtils.createS3BucketResponse(userName, volumeName,
-            s3BucketName);
-
-    s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    OMResponse omResponse = OMResponse.newBuilder().setCmdType(
-        OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus(
-        OzoneManagerProtocolProtos.Status.OK).setSuccess(true)
-        .setDeleteS3BucketResponse(S3DeleteBucketResponse.newBuilder()).build();
-
-    S3BucketDeleteResponse s3BucketDeleteResponse =
-        new S3BucketDeleteResponse(s3BucketName, volumeName, omResponse);
-
-    s3BucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // Check now s3 bucket exists or not.
-    Assert.assertNull(omMetadataManager.getS3Table().get(s3BucketName));
-    Assert.assertNull(omMetadataManager.getBucketTable().get(
-        omMetadataManager.getBucketKey(volumeName, s3BucketName)));
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java
deleted file mode 100644
index 364396b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for s3 bucket responses.
- */
-package org.apache.hadoop.ozone.om.response.s3.bucket;
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
deleted file mode 100644
index 4996bd0..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import java.util.UUID;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Class tests S3 Initiate MPU response.
- */
-public class TestS3InitiateMultipartUploadResponse
-    extends TestS3MultipartResponse {
-
-  @Test
-  public void addDBToBatch() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String multipartUploadID = UUID.randomUUID().toString();
-
-    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse =
-        createS3InitiateMPUResponse(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-
-    s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager,
-        batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNotNull(omMetadataManager.getMultipartInfoTable()
-        .get(multipartKey));
-
-    Assert.assertEquals(multipartUploadID,
-        omMetadataManager.getMultipartInfoTable().get(multipartKey)
-            .getUploadID());
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
deleted file mode 100644
index 09b028b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.UUID;
-
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartUploadAbortResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .PartKeyInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-
-/**
- * Base test class for S3 MPU response.
- */
-
-@SuppressWarnings("VisibilityModifier")
-public class TestS3MultipartResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  protected OMMetadataManager omMetadataManager;
-  protected BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-
-  public S3InitiateMultipartUploadResponse createS3InitiateMPUResponse(
-      String volumeName, String bucketName, String keyName,
-      String multipartUploadID) {
-    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo(
-        multipartUploadID, new HashMap<>());
-
-    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setCreationTime(Time.now())
-        .setModificationTime(Time.now())
-        .setReplicationType(HddsProtos.ReplicationType.RATIS)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .build();
-
-    OMResponse omResponse = OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true).setInitiateMultiPartUploadResponse(
-            OzoneManagerProtocolProtos.MultipartInfoInitiateResponse
-                .newBuilder().setVolumeName(volumeName)
-                .setBucketName(bucketName)
-                .setKeyName(keyName)
-                .setMultipartUploadID(multipartUploadID)).build();
-
-    return new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo,
-            omResponse);
-  }
-
-  public S3MultipartUploadAbortResponse createS3AbortMPUResponse(
-      String multipartKey, long timeStamp,
-      OmMultipartKeyInfo omMultipartKeyInfo) {
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true)
-        .setAbortMultiPartUploadResponse(
-            MultipartUploadAbortResponse.newBuilder().build()).build();
-
-    return new S3MultipartUploadAbortResponse(multipartKey, omMultipartKeyInfo,
-            omResponse);
-  }
-
-
-  public void addPart(int partNumber, PartKeyInfo partKeyInfo,
-      OmMultipartKeyInfo omMultipartKeyInfo) {
-    omMultipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo);
-  }
-
-  public PartKeyInfo createPartKeyInfo(
-      String volumeName, String bucketName, String keyName, int partNumber) {
-    return PartKeyInfo.newBuilder()
-        .setPartNumber(partNumber)
-        .setPartName(omMetadataManager.getMultipartKey(volumeName,
-            bucketName, keyName, UUID.randomUUID().toString()))
-        .setPartKeyInfo(KeyInfo.newBuilder()
-            .setVolumeName(volumeName)
-            .setBucketName(bucketName)
-            .setKeyName(keyName)
-            .setDataSize(100L) // Just set dummy size for testing
-            .setCreationTime(Time.now())
-            .setModificationTime(Time.now())
-            .setType(HddsProtos.ReplicationType.RATIS)
-            .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
deleted file mode 100644
index 60aacd5..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
-
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .PartKeyInfo;
-import org.apache.hadoop.util.Time;
-
-/**
- * Test multipart upload abort response.
- */
-public class TestS3MultipartUploadAbortResponse
-    extends TestS3MultipartResponse {
-
-
-  @Test
-  public void testAddDBToBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse =
-        createS3InitiateMPUResponse(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager,
-        batchOperation);
-
-    S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey, Time.now(),
-            s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo());
-
-    s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager,
-        batchOperation);
-
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-
-    // As no parts are created, so no entries should be there in delete table.
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getDeletedTable()) == 0);
-  }
-
-  @Test
-  public void testAddDBToBatchWithParts() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-    String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
-
-    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse =
-        createS3InitiateMPUResponse(volumeName, bucketName, keyName,
-            multipartUploadID);
-
-    s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager,
-        batchOperation);
-
-
-    // Add some dummy parts for testing.
-    // Not added any key locations, as this just test is to see entries are
-    // adding to delete table or not.
-
-    OmMultipartKeyInfo omMultipartKeyInfo =
-        s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo();
-
-    PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName,
-        keyName, 1);
-    PartKeyInfo part2 = createPartKeyInfo(volumeName, bucketName,
-        keyName, 2);
-
-    addPart(1, part1, omMultipartKeyInfo);
-    addPart(2, part2, omMultipartKeyInfo);
-
-
-    long timeStamp = Time.now();
-    S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey, timeStamp,
-            s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo());
-
-    s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager,
-        batchOperation);
-
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
-    Assert.assertNull(
-        omMetadataManager.getMultipartInfoTable().get(multipartKey));
-
-    // As 2 parts are created, so 2 entries should be there in delete table.
-    Assert.assertTrue(omMetadataManager.countRowsInTable(
-        omMetadataManager.getDeletedTable()) == 2);
-
-    String part1DeletedKeyName =
-        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
-
-    String part2DeletedKeyName =
-        omMultipartKeyInfo.getPartKeyInfo(2).getPartName();
-
-    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
-        part1DeletedKeyName));
-    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
-        part2DeletedKeyName));
-
-    RepeatedOmKeyInfo ro =
-        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
-    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
-        ro.getOmKeyInfoList().get(0));
-
-    ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName);
-    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()),
-        ro.getOmKeyInfoList().get(0));
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java
deleted file mode 100644
index 1fc3a95..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Package contains test classes for S3 MPU responses.
- */
-
-package org.apache.hadoop.ozone.om.response.s3.multipart;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
deleted file mode 100644
index b69d8b7..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests OMVolumeCreateResponse.
- */
-public class TestOMVolumeCreateResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String userName = "user1";
-    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(1).setUpdateID(1)
-        .addVolumeNames(volumeName).build();
-
-    OMResponse omResponse = OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true)
-            .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(userName).setAdminName(userName)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
-
-    omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
-    Assert.assertEquals(omVolumeArgs,
-        omMetadataManager.getVolumeTable().iterator().next().getValue());
-
-    Assert.assertEquals(volumeList,
-        omMetadataManager.getUserTable().get(
-            omMetadataManager.getUserKey(userName)));
-  }
-
-  @Test
-  public void testAddToDBBatchNoOp() throws Exception {
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-        .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS)
-        .setSuccess(false)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(null, null, omResponse);
-
-    try {
-      omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-      Assert.assertTrue(omMetadataManager.countRowsInTable(
-          omMetadataManager.getVolumeTable()) == 0);
-    } catch (IOException ex) {
-      fail("testAddToDBBatchFailure failed");
-    }
-
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java
deleted file mode 100644
index 5d6b481..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests OMVolumeCreateResponse.
- */
-public class TestOMVolumeDeleteResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String userName = "user1";
-    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(1)
-        .setUpdateID(1)
-        .addVolumeNames(volumeName).build();
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(userName).setAdminName(userName)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
-
-    // As we are deleting updated volume list should be empty.
-    UserVolumeInfo updatedVolumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(1).setUpdateID(1).build();
-    OMVolumeDeleteResponse omVolumeDeleteResponse =
-        new OMVolumeDeleteResponse(volumeName, userName, updatedVolumeList,
-            omResponse);
-
-    omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-    omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertNull(null,
-        omMetadataManager.getVolumeTable().get(
-            omMetadataManager.getVolumeKey(volumeName)));
-
-    Assert.assertEquals(null,
-        omMetadataManager.getUserTable().get(
-            omMetadataManager.getUserKey(userName)));
-  }
-
-  @Test
-  public void testAddToDBBatchNoOp() throws Exception {
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume)
-        .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND)
-        .setSuccess(false)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OMVolumeDeleteResponse omVolumeDeleteResponse =
-        new OMVolumeDeleteResponse(null, null, null, omResponse);
-
-    try {
-      omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
-    } catch (IOException ex) {
-      fail("testAddToDBBatchFailure failed");
-    }
-
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
deleted file mode 100644
index 0951c06..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .UserVolumeInfo;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests OMVolumeCreateResponse.
- */
-public class TestOMVolumeSetOwnerResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String oldOwner = "user1";
-    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(1)
-        .setUpdateID(1)
-        .addVolumeNames(volumeName).build();
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(oldOwner).setAdminName(oldOwner)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
-
-
-
-    String newOwner = "user2";
-    UserVolumeInfo newOwnerVolumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(1)
-        .setUpdateID(1)
-        .addVolumeNames(volumeName).build();
-    UserVolumeInfo oldOwnerVolumeList = UserVolumeInfo.newBuilder()
-        .setObjectID(2)
-        .setUpdateID(2)
-        .build();
-    OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(newOwner).setAdminName(newOwner)
-        .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime())
-        .build();
-
-    OMVolumeSetOwnerResponse omVolumeSetOwnerResponse =
-        new OMVolumeSetOwnerResponse(oldOwner,  oldOwnerVolumeList,
-            newOwnerVolumeList, newOwnerVolumeArgs, omResponse);
-
-    omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
-    omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
-
-    Table.KeyValue<String, OmVolumeArgs> keyValue =
-        omMetadataManager.getVolumeTable().iterator().next();
-
-    Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName),
-        keyValue.getKey());
-    Assert.assertEquals(newOwnerVolumeArgs, keyValue.getValue());
-
-    Assert.assertEquals(volumeList,
-        omMetadataManager.getUserTable().get(
-            omMetadataManager.getUserKey(newOwner)));
-  }
-
-  @Test
-  public void testAddToDBBatchNoOp() throws Exception {
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
-        .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND)
-        .setSuccess(false)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OMVolumeSetOwnerResponse omVolumeSetOwnerResponse =
-        new OMVolumeSetOwnerResponse(null, null, null, null, omResponse);
-
-    try {
-      omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation);
-      Assert.assertTrue(omMetadataManager.countRowsInTable(
-          omMetadataManager.getVolumeTable()) == 0);
-    } catch (IOException ex) {
-      fail("testAddToDBBatchFailure failed");
-    }
-
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
deleted file mode 100644
index 25250bd..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.response.volume;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.junit.Assert.fail;
-
-/**
- * This class tests OMVolumeCreateResponse.
- */
-public class TestOMVolumeSetQuotaResponse {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OMMetadataManager omMetadataManager;
-  private BatchOperation batchOperation;
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
-        folder.newFolder().getAbsolutePath());
-    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-  }
-
-  @Test
-  public void testAddToDBBatch() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String userName = "user1";
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
-        .setStatus(OzoneManagerProtocolProtos.Status.OK)
-        .setSuccess(true)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(userName).setAdminName(userName)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OMVolumeSetQuotaResponse omVolumeSetQuotaResponse =
-        new OMVolumeSetQuotaResponse(omVolumeArgs, omResponse);
-
-    omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    Assert.assertEquals(1,
-        omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
-
-    Table.KeyValue<String, OmVolumeArgs> keyValue =
-        omMetadataManager.getVolumeTable().iterator().next();
-
-    Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName),
-        keyValue.getKey());
-    Assert.assertEquals(omVolumeArgs, keyValue.getValue());
-
-  }
-
-  @Test
-  public void testAddToDBBatchNoOp() throws Exception {
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-        .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND)
-        .setSuccess(false)
-        .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
-        .build();
-
-    OMVolumeSetQuotaResponse omVolumeSetQuotaResponse =
-        new OMVolumeSetQuotaResponse(null, omResponse);
-
-    try {
-      omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation);
-      Assert.assertTrue(omMetadataManager.countRowsInTable(
-          omMetadataManager.getVolumeTable()) == 0);
-    } catch (IOException ex) {
-      fail("testAddToDBBatchFailure failed");
-    }
-  }
-
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
deleted file mode 100644
index 98788cd..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Test Volume functions.
- */
-package org.apache.hadoop.ozone.om.response.volume;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java
deleted file mode 100644
index ea2d46a..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.Signature;
-import java.security.cert.X509Certificate;
-import java.util.EnumSet;
-
-/**
- * Test class for {@link OzoneBlockTokenSecretManager}.
- */
-public class TestOzoneBlockTokenSecretManager {
-
-  private OzoneBlockTokenSecretManager secretManager;
-  private KeyPair keyPair;
-  private X509Certificate x509Certificate;
-  private long expiryTime;
-  private String omCertSerialId;
-  private CertificateClient client;
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneBlockTokenSecretManager.class.getSimpleName());
-
-
-  @Before
-  public void setUp() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, BASEDIR);
-    // Create Ozone Master key pair.
-    keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-    expiryTime = Time.monotonicNow() + 60 * 60 * 24;
-    // Create Ozone Master certificate (SCM CA issued cert) and key store.
-    SecurityConfig securityConfig = new SecurityConfig(conf);
-    x509Certificate = KeyStoreTestUtil
-        .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA");
-    omCertSerialId = x509Certificate.getSerialNumber().toString();
-    secretManager = new OzoneBlockTokenSecretManager(securityConfig,
-        expiryTime, omCertSerialId);
-    client = getCertificateClient(securityConfig);
-    client.init();
-    secretManager.start(client);
-  }
-
-  private CertificateClient getCertificateClient(SecurityConfig secConf)
-      throws Exception {
-    return new OMCertificateClient(secConf){
-      @Override
-      public X509Certificate getCertificate() {
-        return x509Certificate;
-      }
-
-      @Override
-      public PrivateKey getPrivateKey() {
-        return keyPair.getPrivate();
-      }
-
-      @Override
-      public PublicKey getPublicKey() {
-        return keyPair.getPublic();
-      }
-    };
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    secretManager = null;
-  }
-
-  @Test
-  public void testGenerateToken() throws Exception {
-    Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(
-        "101", EnumSet.allOf(AccessModeProto.class), 100);
-    OzoneBlockTokenIdentifier identifier =
-        OzoneBlockTokenIdentifier.readFieldsProtobuf(new DataInputStream(
-            new ByteArrayInputStream(token.getIdentifier())));
-    // Check basic details.
-    Assert.assertTrue(identifier.getBlockId().equals("101"));
-    Assert.assertTrue(identifier.getAccessModes().equals(EnumSet
-        .allOf(AccessModeProto.class)));
-    Assert.assertTrue(identifier.getOmCertSerialId().equals(omCertSerialId));
-
-    validateHash(token.getPassword(), token.getIdentifier());
-  }
-
-  @Test
-  public void testCreateIdentifierSuccess() throws Exception {
-    OzoneBlockTokenIdentifier btIdentifier = secretManager.createIdentifier(
-        "testUser", "101", EnumSet.allOf(AccessModeProto.class), 100);
-
-    // Check basic details.
-    Assert.assertTrue(btIdentifier.getOwnerId().equals("testUser"));
-    Assert.assertTrue(btIdentifier.getBlockId().equals("101"));
-    Assert.assertTrue(btIdentifier.getAccessModes().equals(EnumSet
-        .allOf(AccessModeProto.class)));
-    Assert.assertTrue(btIdentifier.getOmCertSerialId().equals(omCertSerialId));
-
-    byte[] hash = secretManager.createPassword(btIdentifier);
-    validateHash(hash, btIdentifier.getBytes());
-  }
-
-  /**
-   * Validate hash using public key of KeyPair.
-   * */
-  private void validateHash(byte[] hash, byte[] identifier) throws Exception {
-    Signature rsaSignature =
-        Signature.getInstance(secretManager.getDefaultSignatureAlgorithm());
-    rsaSignature.initVerify(client.getPublicKey());
-    rsaSignature.update(identifier);
-    Assert.assertTrue(rsaSignature.verify(hash));
-  }
-
-  @Test
-  public void testCreateIdentifierFailure() throws Exception {
-    LambdaTestUtils.intercept(SecurityException.class,
-        "Ozone block token can't be created without owner and access mode "
-            + "information.", () -> {
-          secretManager.createIdentifier();
-        });
-  }
-
-  @Test
-  public void testRenewToken() throws Exception {
-    LambdaTestUtils.intercept(UnsupportedOperationException.class,
-        "Renew token operation is not supported for ozone block" +
-            " tokens.", () -> {
-          secretManager.renewToken(null, null);
-        });
-  }
-
-  @Test
-  public void testCancelToken() throws Exception {
-    LambdaTestUtils.intercept(UnsupportedOperationException.class,
-        "Cancel token operation is not supported for ozone block" +
-            " tokens.", () -> {
-          secretManager.cancelToken(null, null);
-        });
-  }
-
-  @Test
-  public void testVerifySignatureFailure() throws Exception {
-    OzoneBlockTokenIdentifier id = new OzoneBlockTokenIdentifier(
-        "testUser", "4234", EnumSet.allOf(AccessModeProto.class),
-        Time.now() + 60 * 60 * 24, "123444", 1024);
-    LambdaTestUtils.intercept(UnsupportedOperationException.class, "operation" +
-            " is not supported for block tokens",
-        () -> secretManager.verifySignature(id,
-            client.signData(id.getBytes())));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
deleted file mode 100644
index 874252d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.security;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.S3SecretManager;
-import org.apache.hadoop.ozone.om.S3SecretManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.Signature;
-import java.security.cert.X509Certificate;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN;
-
-
-/**
- * Test class for {@link OzoneDelegationTokenSecretManager}.
- */
-public class TestOzoneDelegationTokenSecretManager {
-
-  private OzoneDelegationTokenSecretManager secretManager;
-  private SecurityConfig securityConfig;
-  private CertificateClient certificateClient;
-  private long expiryTime;
-  private Text serviceRpcAdd;
-  private OzoneConfiguration conf;
-  private final static Text TEST_USER = new Text("testUser");
-  private long tokenMaxLifetime = 1000 * 20;
-  private long tokenRemoverScanInterval = 1000 * 20;
-  private S3SecretManager s3SecretManager;
-  private String s3Secret = "dbaksbzljandlkandlsd";
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    conf = createNewTestPath();
-    securityConfig = new SecurityConfig(conf);
-    certificateClient = setupCertificateClient();
-    certificateClient.init();
-    expiryTime = Time.monotonicNow() + 60 * 60 * 24;
-    serviceRpcAdd = new Text("localhost");
-    final Map<String, String> s3Secrets = new HashMap<>();
-    s3Secrets.put("testuser1", s3Secret);
-    s3Secrets.put("abc", "djakjahkd");
-    OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf);
-    s3SecretManager = new S3SecretManagerImpl(conf, metadataManager) {
-      @Override
-      public S3SecretValue getS3Secret(String kerberosID) {
-        if(s3Secrets.containsKey(kerberosID)) {
-          return new S3SecretValue(kerberosID, s3Secrets.get(kerberosID));
-        }
-        return null;
-      }
-
-      @Override
-      public String getS3UserSecretString(String awsAccessKey) {
-        if(s3Secrets.containsKey(awsAccessKey)) {
-          return s3Secrets.get(awsAccessKey);
-        }
-        return null;
-      }
-    };
-  }
-
-  private OzoneConfiguration createNewTestPath() throws IOException {
-    OzoneConfiguration config = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
-      Assert.assertTrue(newFolder.mkdirs());
-    }
-    ServerUtils.setOzoneMetaDirPath(config, newFolder.toString());
-    return config;
-  }
-
-  /**
-   * Helper function to create certificate client.
-   * */
-  private CertificateClient setupCertificateClient() throws Exception {
-    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-    X509Certificate cert = KeyStoreTestUtil
-        .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA");
-
-    return new OMCertificateClient(securityConfig) {
-      @Override
-      public X509Certificate getCertificate() {
-        return cert;
-      }
-
-      @Override
-      public PrivateKey getPrivateKey() {
-        return keyPair.getPrivate();
-      }
-
-      @Override
-      public PublicKey getPublicKey() {
-        return keyPair.getPublic();
-      }
-
-      @Override
-      public X509Certificate getCertificate(String serialId) {
-        return cert;
-      }
-    };
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    secretManager.stop();
-  }
-
-  @Test
-  public void testCreateToken() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER, TEST_USER);
-    OzoneTokenIdentifier identifier =
-        OzoneTokenIdentifier.readProtoBuf(token.getIdentifier());
-    // Check basic details.
-    Assert.assertTrue(identifier.getRealUser().equals(TEST_USER));
-    Assert.assertTrue(identifier.getRenewer().equals(TEST_USER));
-    Assert.assertTrue(identifier.getOwner().equals(TEST_USER));
-
-    validateHash(token.getPassword(), token.getIdentifier());
-  }
-
-  private void restartSecretManager() throws IOException {
-    secretManager.stop();
-    secretManager = null;
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-  }
-
-  private void testRenewTokenSuccessHelper(boolean restartSecretManager)
-      throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER,
-        TEST_USER);
-    Thread.sleep(10 * 5);
-
-    if (restartSecretManager) {
-      restartSecretManager();
-    }
-
-    long renewalTime = secretManager.renewToken(token, TEST_USER.toString());
-    Assert.assertTrue(renewalTime > 0);
-  }
-
-  @Test
-  public void testReloadAndRenewToken() throws Exception {
-    testRenewTokenSuccessHelper(true);
-  }
-
-  @Test
-  public void testRenewTokenSuccess() throws Exception {
-    testRenewTokenSuccessHelper(false);
-  }
-
-  /**
-   * Tests failure for mismatch in renewer.
-   */
-  @Test
-  public void testRenewTokenFailure() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER, TEST_USER);
-    LambdaTestUtils.intercept(AccessControlException.class,
-        "rougeUser tries to renew a token", () -> {
-          secretManager.renewToken(token, "rougeUser");
-        });
-  }
-
-  /**
-   * Tests token renew failure due to max time.
-   */
-  @Test
-  public void testRenewTokenFailureMaxTime() throws Exception {
-    secretManager = createSecretManager(conf, 100,
-        100, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER,
-        TEST_USER);
-    Thread.sleep(101);
-    LambdaTestUtils.intercept(IOException.class,
-        "testUser tried to renew an expired token", () -> {
-          secretManager.renewToken(token, TEST_USER.toString());
-        });
-  }
-
-  /**
-   * Tests token renew failure due to renewal time.
-   */
-  @Test
-  public void testRenewTokenFailureRenewalTime() throws Exception {
-    secretManager = createSecretManager(conf, 1000 * 10,
-        10, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER,
-        TEST_USER);
-    Thread.sleep(15);
-    LambdaTestUtils.intercept(IOException.class, "is expired", () -> {
-      secretManager.renewToken(token, TEST_USER.toString());
-    });
-  }
-
-  @Test
-  public void testCreateIdentifier() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    OzoneTokenIdentifier identifier = secretManager.createIdentifier();
-    // Check basic details.
-    Assert.assertTrue(identifier.getOwner().equals(new Text("")));
-    Assert.assertTrue(identifier.getRealUser().equals(new Text("")));
-    Assert.assertTrue(identifier.getRenewer().equals(new Text("")));
-  }
-
-  @Test
-  public void testCancelTokenSuccess() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER, TEST_USER);
-    secretManager.cancelToken(token, TEST_USER.toString());
-  }
-
-  @Test
-  public void testCancelTokenFailure() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    Token<OzoneTokenIdentifier> token = secretManager.createToken(TEST_USER,
-        TEST_USER,
-        TEST_USER);
-    LambdaTestUtils.intercept(AccessControlException.class,
-        "rougeUser is not authorized to cancel the token", () -> {
-          secretManager.cancelToken(token, "rougeUser");
-        });
-  }
-
-  @Test
-  public void testVerifySignatureSuccess() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    OzoneTokenIdentifier id = new OzoneTokenIdentifier();
-    id.setOmCertSerialId(certificateClient.getCertificate()
-        .getSerialNumber().toString());
-    id.setMaxDate(Time.now() + 60 * 60 * 24);
-    id.setOwner(new Text("test"));
-    Assert.assertTrue(secretManager.verifySignature(id,
-        certificateClient.signData(id.getBytes())));
-  }
-
-  @Test
-  public void testVerifySignatureFailure() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-    OzoneTokenIdentifier id = new OzoneTokenIdentifier();
-    // set invalid om cert serial id
-    id.setOmCertSerialId("1927393");
-    id.setMaxDate(Time.now() + 60*60*24);
-    id.setOwner(new Text("test"));
-    Assert.assertFalse(secretManager.verifySignature(id, id.getBytes()));
-  }
-
-  @Test
-  public void testValidateS3TOKENSuccess() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-
-    OzoneTokenIdentifier identifier = new OzoneTokenIdentifier();
-    identifier.setTokenType(S3TOKEN);
-    identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" +
-        "5f9baccf4765f46a14cd745ad");
-    identifier.setStrToSign("AWS4-HMAC-SHA256\n" +
-        "20190221T002037Z\n" +
-        "20190221/us-west-1/s3/aws4_request\n" +
-        "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d");
-    identifier.setAwsAccessId("testuser1");
-    secretManager.retrievePassword(identifier);
-  }
-
-  @Test
-  public void testValidateS3TOKENFailure() throws Exception {
-    secretManager = createSecretManager(conf, tokenMaxLifetime,
-        expiryTime, tokenRemoverScanInterval);
-    secretManager.start(certificateClient);
-
-    OzoneTokenIdentifier identifier = new OzoneTokenIdentifier();
-    identifier.setTokenType(S3TOKEN);
-    identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" +
-        "5f9baccf4765f46a14cd745ad");
-    identifier.setStrToSign("AWS4-HMAC-SHA256\n" +
-        "20190221T002037Z\n" +
-        "20190221/us-west-1/s3/aws4_request\n" +
-        "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d");
-    identifier.setAwsAccessId("testuser2");
-    // Case 1: User don't have aws secret set.
-    LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " +
-            "secret found for S3 identifier",
-        () -> secretManager.retrievePassword(identifier));
-
-    // Case 2: Invalid hash in string to sign.
-    identifier.setStrToSign("AWS4-HMAC-SHA256\n" +
-        "20190221T002037Z\n" +
-        "20190221/us-west-1/s3/aws4_request\n" +
-        "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d" +
-        "+invalidhash");
-    LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " +
-            "secret found for S3 identifier",
-        () -> secretManager.retrievePassword(identifier));
-
-    // Case 3: Invalid hash in authorization hmac.
-    identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" +
-        "+invalidhash" + "5f9baccf4765f46a14cd745ad");
-    identifier.setStrToSign("AWS4-HMAC-SHA256\n" +
-        "20190221T002037Z\n" +
-        "20190221/us-west-1/s3/aws4_request\n" +
-        "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d");
-    LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " +
-            "secret found for S3 identifier",
-        () -> secretManager.retrievePassword(identifier));
-  }
-
-  /**
-   * Validate hash using public key of KeyPair.
-   */
-  private void validateHash(byte[] hash, byte[] identifier) throws Exception {
-    Signature rsaSignature =
-        Signature.getInstance(securityConfig.getSignatureAlgo(),
-            securityConfig.getProvider());
-    rsaSignature.initVerify(certificateClient.getPublicKey());
-    rsaSignature.update(identifier);
-    Assert.assertTrue(rsaSignature.verify(hash));
-  }
-
-  /**
-   * Create instance of {@link OzoneDelegationTokenSecretManager}.
-   */
-  private OzoneDelegationTokenSecretManager
-      createSecretManager(OzoneConfiguration config, long tokenMaxLife,
-      long expiry, long tokenRemoverScanTime) throws IOException {
-    return new OzoneDelegationTokenSecretManager(config, tokenMaxLife,
-        expiry, tokenRemoverScanTime, serviceRpcAdd, s3SecretManager,
-        certificateClient);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java
deleted file mode 100644
index cb7caf3..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.crypto.KeyGenerator;
-import javax.crypto.Mac;
-import javax.crypto.SecretKey;
-import java.io.File;
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.Signature;
-import java.security.SignatureException;
-import java.security.cert.Certificate;
-import java.security.cert.CertificateEncodingException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Test class for OzoneManagerDelegationToken.
- */
-public class TestOzoneManagerBlockToken {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestOzoneManagerBlockToken.class);
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneManagerBlockToken.class.getSimpleName());
-  private static final String KEYSTORES_DIR =
-      new File(BASEDIR).getAbsolutePath();
-  private static long expiryTime;
-  private static KeyPair keyPair;
-  private static X509Certificate cert;
-  private static final long MAX_LEN = 1000;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    expiryTime = Time.monotonicNow() + 60 * 60 * 24;
-
-    // Create Ozone Master key pair.
-    keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-    // Create Ozone Master certificate (SCM CA issued cert) and key store.
-    cert = KeyStoreTestUtil
-        .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA");
-  }
-
-  @After
-  public void cleanUp() {
-  }
-
-  @Test
-  public void testSignToken() throws GeneralSecurityException, IOException {
-    String keystore = new File(KEYSTORES_DIR, "keystore.jks")
-        .getAbsolutePath();
-    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
-        .getAbsolutePath();
-    String trustPassword = "trustPass";
-    String keyStorePassword = "keyStorePass";
-    String keyPassword = "keyPass";
-
-
-    KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword,
-        "OzoneMaster", keyPair.getPrivate(), cert);
-
-    // Create trust store and put the certificate in the trust store
-    Map<String, X509Certificate> certs = Collections.singletonMap("server",
-        cert);
-    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
-
-    // Sign the OzoneMaster Token with Ozone Master private key
-    PrivateKey privateKey = keyPair.getPrivate();
-    OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(
-        "testUser", "84940",
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), MAX_LEN);
-    byte[] signedToken = signTokenAsymmetric(tokenId, privateKey);
-
-    // Verify a valid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert);
-    LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid.");
-
-    // Verify an invalid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    tokenId = new OzoneBlockTokenIdentifier("", "",
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), MAX_LEN);
-    LOG.info("Unsigned token {} is {}", tokenId,
-        verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert));
-
-  }
-
-  public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId,
-      PrivateKey privateKey) throws NoSuchAlgorithmException,
-      InvalidKeyException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initSign(privateKey);
-    rsaSignature.update(tokenId.getBytes());
-    byte[] signature = rsaSignature.sign();
-    return signature;
-  }
-
-  public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId,
-      byte[] signature, Certificate certificate) throws InvalidKeyException,
-      NoSuchAlgorithmException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initVerify(certificate);
-    rsaSignature.update(tokenId.getBytes());
-    boolean isValid = rsaSignature.verify(signature);
-    return isValid;
-  }
-
-  private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier,
-      Mac mac, SecretKey key) {
-    try {
-      mac.init(key);
-    } catch (InvalidKeyException ike) {
-      throw new IllegalArgumentException("Invalid key to HMAC computation",
-          ike);
-    }
-    return mac.doFinal(identifier.getBytes());
-  }
-
-  OzoneBlockTokenIdentifier generateTestToken() {
-    return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6),
-        RandomStringUtils.randomAlphabetic(5),
-        EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class),
-        expiryTime, cert.getSerialNumber().toString(), MAX_LEN);
-  }
-
-  @Test
-  public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException,
-      CertificateEncodingException, NoSuchProviderException,
-      InvalidKeyException, SignatureException {
-    final int testTokenCount = 1000;
-    List<OzoneBlockTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordAsym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA");
-
-    // Create Ozone Master certificate (SCM CA issued cert) and key store
-    X509Certificate omCert;
-    omCert = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster",
-        kp, 30, "SHA256withRSA");
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordAsym.add(
-          signTokenAsymmetric(tokenIds.get(i), kp.getPrivate()));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns",
-        duration / testTokenCount);
-
-    startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), omCert);
-    }
-    duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token verify time with HmacSha256(RSA/1024 key) "
-        + "is {} ns", duration / testTokenCount);
-  }
-
-  @Test
-  public void testSymmetricTokenPerf() {
-    String hmacSHA1 = "HmacSHA1";
-    String hmacSHA256 = "HmacSHA256";
-
-    testSymmetricTokenPerfHelper(hmacSHA1, 64);
-    testSymmetricTokenPerfHelper(hmacSHA256, 1024);
-  }
-
-  public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) {
-    final int testTokenCount = 1000;
-    List<OzoneBlockTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordSym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyGenerator keyGen;
-    try {
-      keyGen = KeyGenerator.getInstance(hmacAlgorithm);
-      keyGen.init(keyLen);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    Mac mac;
-    try {
-      mac = Mac.getInstance(hmacAlgorithm);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    SecretKey secretKey = keyGen.generateKey();
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordSym.add(
-          signTokenSymmetric(tokenIds.get(i), mac, secretKey));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with {}({} symmetric key) is {} ns",
-        hmacAlgorithm, keyLen, duration / testTokenCount);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
deleted file mode 100644
index f26869d..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import java.security.InvalidKeyException;
-import java.security.KeyPair;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.Signature;
-import java.security.SignatureException;
-import java.security.cert.Certificate;
-import java.security.cert.CertificateEncodingException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import javax.crypto.KeyGenerator;
-import javax.crypto.Mac;
-import javax.crypto.SecretKey;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.security.ssl.TestSSLFactory;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test class for {@link OzoneTokenIdentifier}.
- */
-public class TestOzoneTokenIdentifier {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestOzoneTokenIdentifier.class);
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneTokenIdentifier.class.getSimpleName());
-  private static final String KEYSTORES_DIR =
-      new File(BASEDIR).getAbsolutePath();
-  private static File base;
-  private static String sslConfsDir;
-  private static final String EXCLUDE_CIPHERS =
-      "TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
-          + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,  \n"
-          + "SSL_RSA_WITH_DES_CBC_SHA,"
-          + "SSL_DHE_RSA_WITH_DES_CBC_SHA,  "
-          + "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n"
-          + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,"
-          + "SSL_RSA_WITH_RC4_128_MD5";
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-  }
-
-  private Configuration createConfiguration(boolean clientCert,
-      boolean trustStore)
-      throws Exception {
-    Configuration conf = new Configuration();
-    KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
-        clientCert, trustStore, EXCLUDE_CIPHERS);
-    sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
-    return conf;
-  }
-
-  @AfterClass
-  static public void cleanUp() throws Exception {
-    FileUtil.fullyDelete(base);
-    KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
-  }
-
-  @Test
-  public void testSignToken() throws GeneralSecurityException, IOException {
-    String keystore = new File(KEYSTORES_DIR, "keystore.jks")
-        .getAbsolutePath();
-    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
-        .getAbsolutePath();
-    String trustPassword = "trustPass";
-    String keyStorePassword = "keyStorePass";
-    String keyPassword = "keyPass";
-
-    // Create Ozone Master key pair
-    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-
-    // Create Ozone Master certificate (SCM CA issued cert) and key store
-    X509Certificate cert = KeyStoreTestUtil
-        .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA");
-    KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword,
-        "OzoneMaster", keyPair.getPrivate(), cert);
-
-    // Create trust store and put the certificate in the trust store
-    Map<String, X509Certificate> certs = Collections.singletonMap("server",
-        cert);
-    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
-
-    // Sign the OzoneMaster Token with Ozone Master private key
-    PrivateKey privateKey = keyPair.getPrivate();
-    OzoneTokenIdentifier tokenId = new OzoneTokenIdentifier();
-    tokenId.setOmCertSerialId("123");
-    byte[] signedToken = signTokenAsymmetric(tokenId, privateKey);
-
-    // Verify a valid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert);
-    LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid.");
-
-    // Verify an invalid signed OzoneMaster Token with Ozone Master
-    // public key(certificate)
-    tokenId = new OzoneTokenIdentifier(new Text("oozie"),
-        new Text("rm"), new Text("client"));
-    tokenId.setOmCertSerialId("123");
-    LOG.info("Unsigned token {} is {}", tokenId,
-        verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert));
-
-  }
-
-  public byte[] signTokenAsymmetric(OzoneTokenIdentifier tokenId,
-      PrivateKey privateKey) throws NoSuchAlgorithmException,
-      InvalidKeyException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initSign(privateKey);
-    rsaSignature.update(tokenId.getBytes());
-    byte[] signature = rsaSignature.sign();
-    return signature;
-  }
-
-  public boolean verifyTokenAsymmetric(OzoneTokenIdentifier tokenId,
-      byte[] signature, Certificate certificate) throws InvalidKeyException,
-      NoSuchAlgorithmException, SignatureException {
-    Signature rsaSignature = Signature.getInstance("SHA256withRSA");
-    rsaSignature.initVerify(certificate);
-    rsaSignature.update(tokenId.getBytes());
-    boolean isValide = rsaSignature.verify(signature);
-    return isValide;
-  }
-
-  private byte[] signTokenSymmetric(OzoneTokenIdentifier identifier,
-      Mac mac, SecretKey key) {
-    try {
-      mac.init(key);
-    } catch (InvalidKeyException ike) {
-      throw new IllegalArgumentException("Invalid key to HMAC computation",
-          ike);
-    }
-    return mac.doFinal(identifier.getBytes());
-  }
-
-  OzoneTokenIdentifier generateTestToken() {
-    OzoneTokenIdentifier tokenIdentifier = new OzoneTokenIdentifier(
-        new Text(RandomStringUtils.randomAlphabetic(6)),
-        new Text(RandomStringUtils.randomAlphabetic(5)),
-        new Text(RandomStringUtils.randomAlphabetic(4)));
-    tokenIdentifier.setOmCertSerialId("123");
-    return tokenIdentifier;
-  }
-
-  @Test
-  public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException,
-      CertificateEncodingException, NoSuchProviderException,
-      InvalidKeyException, SignatureException {
-    final int testTokenCount = 1000;
-    List<OzoneTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordAsym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
-
-    // Create Ozone Master certificate (SCM CA issued cert) and key store
-    X509Certificate cert;
-    cert = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster",
-        keyPair, 30, "SHA256withRSA");
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordAsym.add(
-          signTokenAsymmetric(tokenIds.get(i), keyPair.getPrivate()));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns",
-        duration/testTokenCount);
-
-    startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), cert);
-    }
-    duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token verify time with HmacSha256(RSA/1024 key) "
-        + "is {} ns", duration/testTokenCount);
-  }
-
-  @Test
-  public void testSymmetricTokenPerf() {
-    String hmacSHA1 = "HmacSHA1";
-    String hmacSHA256 = "HmacSHA256";
-
-    testSymmetricTokenPerfHelper(hmacSHA1, 64);
-    testSymmetricTokenPerfHelper(hmacSHA256, 1024);
-  }
-
-
-  public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) {
-    final int testTokenCount = 1000;
-    List<OzoneTokenIdentifier> tokenIds = new ArrayList<>();
-    List<byte[]> tokenPasswordSym = new ArrayList<>();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenIds.add(generateTestToken());
-    }
-
-    KeyGenerator keyGen;
-    try {
-      keyGen = KeyGenerator.getInstance(hmacAlgorithm);
-      keyGen.init(keyLen);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    Mac mac;
-    try {
-      mac =  Mac.getInstance(hmacAlgorithm);
-    } catch (NoSuchAlgorithmException nsa) {
-      throw new IllegalArgumentException("Can't find " + hmacAlgorithm +
-          " algorithm.");
-    }
-
-    SecretKey secretKey = keyGen.generateKey();
-
-    long startTime = Time.monotonicNowNanos();
-    for (int i = 0; i < testTokenCount; i++) {
-      tokenPasswordSym.add(
-          signTokenSymmetric(tokenIds.get(i), mac, secretKey));
-    }
-    long duration = Time.monotonicNowNanos() - startTime;
-    LOG.info("Average token sign time with {}({} symmetric key) is {} ns",
-        hmacAlgorithm, keyLen, duration/testTokenCount);
-  }
-
-  /*
-   * Test serialization/deserialization of OzoneTokenIdentifier.
-   */
-  @Test
-  public void testReadWriteInProtobuf() throws IOException {
-    OzoneTokenIdentifier id = getIdentifierInst();
-    File idFile = new File(BASEDIR + "/tokenFile");
-
-    FileOutputStream fop = new FileOutputStream(idFile);
-    DataOutputStream dataOutputStream = new DataOutputStream(fop);
-    id.write(dataOutputStream);
-    fop.close();
-
-    FileInputStream fis = new FileInputStream(idFile);
-    DataInputStream dis = new DataInputStream(fis);
-    OzoneTokenIdentifier id2 = new OzoneTokenIdentifier();
-
-    id2.readFields(dis);
-    Assert.assertEquals(id, id2);
-  }
-
-
-  public OzoneTokenIdentifier getIdentifierInst() {
-    OzoneTokenIdentifier id = new OzoneTokenIdentifier();
-    id.setOwner(new Text("User1"));
-    id.setRenewer(new Text("yarn"));
-    id.setIssueDate(Time.now());
-    id.setMaxDate(Time.now() + 5000);
-    id.setSequenceNumber(1);
-    id.setOmCertSerialId("123");
-    return id;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java
deleted file mode 100644
index a36f325..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.security;
-/**
- * Ozone security tests.
- */
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java
deleted file mode 100644
index 82e755c..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-
-import org.junit.Assert;
-import static org.junit.Assert.*;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test the json object printer.
- */
-public class TestObjectPrinter {
-
-  @Test
-  public void printObjectAsJson() throws IOException {
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    OzoneVolume volume =
-        new OzoneVolume(conf, Mockito.mock(ClientProtocol.class), "name",
-            "admin", "owner", 1L, 0L,
-            new ArrayList<>());
-
-    String result = ObjectPrinter.getObjectAsJson(volume);
-    Assert.assertTrue("Result is not a proper json",
-        result.contains("\"owner\""));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java
deleted file mode 100644
index 7ae0520..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.ozShell;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.apache.hadoop.ozone.client.OzoneClientException;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-/**
- * Test ozone URL parsing.
- */
-@RunWith(Parameterized.class)
-public class TestOzoneAddress {
-
-  @Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {"o3fs://localhost:9878/"},
-        {"o3fs://localhost/"},
-        {"o3fs:///"},
-        {"/"},
-        {""}
-    });
-  }
-
-  private String prefix;
-
-  public TestOzoneAddress(String prefix) {
-    this.prefix = prefix;
-  }
-
-  @Test
-  public void checkUrlTypes() throws OzoneClientException, IOException {
-    OzoneAddress address;
-
-    address = new OzoneAddress("");
-    address.ensureRootAddress();
-
-    address = new OzoneAddress(prefix + "");
-    address.ensureRootAddress();
-
-    address = new OzoneAddress(prefix + "vol1");
-    address.ensureVolumeAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-
-    address = new OzoneAddress(prefix + "vol1/bucket");
-    address.ensureBucketAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-    Assert.assertEquals("bucket", address.getBucketName());
-
-    address = new OzoneAddress(prefix + "vol1/bucket/");
-    address.ensureBucketAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-    Assert.assertEquals("bucket", address.getBucketName());
-
-    address = new OzoneAddress(prefix + "vol1/bucket/key");
-    address.ensureKeyAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-    Assert.assertEquals("bucket", address.getBucketName());
-    Assert.assertEquals("key", address.getKeyName());
-
-    address = new OzoneAddress(prefix + "vol1/bucket/key/");
-    address.ensureKeyAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-    Assert.assertEquals("bucket", address.getBucketName());
-    Assert.assertEquals("key/", address.getKeyName());
-
-    address = new OzoneAddress(prefix + "vol1/bucket/key1/key3/key");
-    address.ensureKeyAddress();
-    Assert.assertEquals("vol1", address.getVolumeName());
-    Assert.assertEquals("bucket", address.getBucketName());
-    Assert.assertEquals("key1/key3/key", address.getKeyName());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
deleted file mode 100644
index 80c1985..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell;
-/**
- * Tests for ozone shell..
- */
diff --git a/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
deleted file mode 100644
index 3c9e1c8..0000000
--- a/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-mock-maker-inline
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml
deleted file mode 100644
index 1645ccc..0000000
--- a/hadoop-ozone/ozonefs-lib-current/pom.xml
+++ /dev/null
@@ -1,214 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
-  <name>Apache Hadoop Ozone FileSystem Single Jar Library</name>
-  <packaging>jar</packaging>
-  <description>This projects creates an uber jar from ozonefs with all the
-    dependencies.
-  </description>
-  <version>0.5.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-    <shaded.prefix>org.apache.hadoop.ozone.shaded</shaded.prefix>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-            <configuration>
-              <transformers>
-                <transformer
-                        implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
-                  <resources>
-                    <resource>META-INF/BC1024KE.DSA</resource>
-                    <resource>META-INF/BC2048KE.DSA</resource>
-                    <resource>META-INF/BC1024KE.SF</resource>
-                    <resource>META-INF/BC2048KE.SF</resource>
-                  </resources>
-                </transformer>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
-                  <resource>ozone-default-generated.xml</resource>
-                </transformer>
-              </transformers>
-              <relocations>
-                <relocation>
-                  <pattern>org</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.org
-                  </shadedPattern>
-                  <includes>
-                    <include>org.yaml.**.*</include>
-                    <include>org.sqlite.**.*</include>
-                    <include>org.tukaani.**.*</include>
-                    <include>org.bouncycastle.**.*</include>
-                    <include>org.fusesource.leveldbjni.**.*</include>
-                    <include>org.rocksdb.**.*</include>
-                    <include>org.apache.commons.cli.**.*</include>
-                    <include>org.apache.commons.compress.**.*</include>
-                    <include>org.apache.commons.codec.**.*</include>
-                    <include>org.apache.commons.beanutils.**.*</include>
-                    <include>org.apache.commons.collections.**.*</include>
-                    <include>org.apache.commons.digester.**.*</include>
-                    <include>org.apache.commons.logging.**.*</include>
-                    <include>org.apache.commons.pool2.**.*</include>
-                    <include>org.apache.commons.validator.**.*</include>
-                    <include>org.sqlite.**.*</include>
-                    <include>org.apache.thrift.**.*</include>
-                    <!-- level db -->
-                    <include>org.iq80.**.*</include>
-                    <include>org.fusesource.**.*</include>
-                    <!-- http client and core -->
-                    <include>org.apache.http.**.*</include>
-                  </includes>
-                </relocation>
-                <relocation>
-                  <pattern>com</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.com
-                  </shadedPattern>
-                  <includes>
-                    <include>com.google.common.**.*</include>
-                    <include>com.google.gson.**.*</include>
-                    <include>com.codahale.**.*</include>
-                    <include>com.lmax.**.*</include>
-                    <include>com.github.joshelser.**.*</include>
-                    <include>com.twitter.**.*</include>
-                  </includes>
-                </relocation>
-                <relocation>
-                  <pattern>picocli</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.picocli
-                  </shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>info</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.info
-                  </shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>io</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.io
-                  </shadedPattern>
-                </relocation>
-
-                <!-- handling some special packages with special names -->
-                <relocation>
-                  <pattern>okio</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.okio
-                  </shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>okhttp3</pattern>
-                  <shadedPattern>
-                    ${shaded.prefix}.okhttp3
-                  </shadedPattern>
-                </relocation>
-              </relocations>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-common</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs-client</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.logging.log4j</groupId>
-          <artifactId>log4j-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.logging.log4j</groupId>
-          <artifactId>log4j-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.code.findbugs</groupId>
-          <artifactId>jsr305</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 0368002..0000000
--- a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
deleted file mode 100644
index c248308..0000000
--- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml
+++ /dev/null
@@ -1,139 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
-  <name>Apache Hadoop Ozone FileSystem Legacy Jar Library</name>
-  <description>This projects creates an uberjar from ozonefs with all the
-    dependencies, but the dependencies are located in an isolated subdir
-    and loaded by a custom class loader. Can be used together with Hadoop 2.x
-  </description>
-  <packaging>jar</packaging>
-  <version>0.5.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>include-dependencies</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes/libs</outputDirectory>
-              <includeScope>compile</includeScope>
-              <excludes>META-INF/*.SF</excludes>
-              <excludeArtifactIds>
-                slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem
-              </excludeArtifactIds>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-lib
-              </markersDirectory>
-            </configuration>
-          </execution>
-
-          <execution>
-            <id>include-ozonefs</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes</outputDirectory>
-              <includeArtifactIds>hadoop-ozone-filesystem,hadoop-ozone-common
-              </includeArtifactIds>
-              <includeScope>compile</includeScope>
-              <excludes>META-INF/*.SF</excludes>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-direct
-              </markersDirectory>
-            </configuration>
-          </execution>
-
-          <execution>
-            <id>include-token</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes</outputDirectory>
-              <includeArtifactIds>hadoop-ozone-common,hadoop-hdds-common</includeArtifactIds>
-              <includeScope>compile</includeScope>
-              <includes>
-                      org/apache/hadoop/ozone/security/OzoneTokenIdentifier.class,org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.class,org/apache/hadoop/ozone/protocol/proto/OzoneManagerProtocolProtos*,org/apache/hadoop/hdds/protocol/proto/HddsProtos*
-              </includes>
-              <excludes>META-INF/*.SF</excludes>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-token
-              </markersDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-      <scope>compile</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 39ca348..0000000
--- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt
deleted file mode 100644
index 85c1307..0000000
--- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Apache Hadoop Ozone placeholder file.
-
-The usage of the legacy version of the uber jar can be detected based on
-the existence of this file.
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
deleted file mode 100644
index 4f85070..0000000
--- a/hadoop-ozone/ozonefs/pom.xml
+++ /dev/null
@@ -1,219 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-filesystem</artifactId>
-  <name>Apache Hadoop Ozone FileSystem</name>
-  <packaging>jar</packaging>
-  <version>0.5.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>deplist</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>list</goal>
-            </goals>
-            <configuration>
-              <!-- build a shellprofile -->
-              <outputFile>
-                ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt
-              </outputFile>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <includes>
-            <include>ITestOzoneContract*.java</include>
-            <include>**/Test*.java</include>
-            <include>**/*Test.java</include>
-            <include>**/*Tests.java</include>
-            <include>**/*TestCase.java</include>
-          </includes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.httpcomponents</groupId>
-      <artifactId>httpclient</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-integration-test</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <version>1.10.19</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-module-junit4</artifactId>
-      <version>1.6.5</version>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.javassist</groupId>
-          <artifactId>javassist</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-api-mockito</artifactId>
-      <version>1.6.5</version>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.hamcrest</groupId>
-          <artifactId>hamcrest-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java
deleted file mode 100644
index 06ebc15..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-/**
- * Minimum set of Ozone key information attributes.
- * <p>
- * This class doesn't depend on any other ozone class just on primitive
- * java types. It could be used easily in the signature of OzoneClientAdapter
- * as even if a separated class loader is loaded it it won't cause any
- * dependency problem.
- */
-public class BasicKeyInfo {
-
-  private String name;
-
-  private long modificationTime;
-
-  private long dataSize;
-
-  public BasicKeyInfo(String name, long modificationTime, long size) {
-    this.name = name;
-    this.modificationTime = modificationTime;
-    this.dataSize = size;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
deleted file mode 100644
index 52a8ede..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-/**
- * ozone implementation of AbstractFileSystem.
- * This impl delegates to the OzoneFileSystem
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class BasicOzFs extends DelegateToFileSystem {
-
-  public BasicOzFs(URI theUri, Configuration conf)
-      throws IOException, URISyntaxException {
-    super(theUri, new BasicOzoneFileSystem(), conf,
-        OzoneConsts.OZONE_URI_SCHEME, false);
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
deleted file mode 100644
index 9ea03b5..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ /dev/null
@@ -1,446 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenRenewer;
-
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Basic Implementation of the OzoneFileSystem calls.
- * <p>
- * This is the minimal version which doesn't include any statistics.
- * <p>
- * For full featured version use OzoneClientAdapterImpl.
- */
-public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(BasicOzoneClientAdapterImpl.class);
-
-  private OzoneClient ozoneClient;
-  private ObjectStore objectStore;
-  private OzoneVolume volume;
-  private OzoneBucket bucket;
-  private ReplicationType replicationType;
-  private ReplicationFactor replicationFactor;
-  private boolean securityEnabled;
-
-  /**
-   * Create new OzoneClientAdapter implementation.
-   *
-   * @param volumeStr Name of the volume to use.
-   * @param bucketStr Name of the bucket to use
-   * @throws IOException In case of a problem.
-   */
-  public BasicOzoneClientAdapterImpl(String volumeStr, String bucketStr)
-      throws IOException {
-    this(createConf(), volumeStr, bucketStr);
-  }
-
-  private static OzoneConfiguration createConf() {
-    ClassLoader contextClassLoader =
-        Thread.currentThread().getContextClassLoader();
-    Thread.currentThread().setContextClassLoader(null);
-    try {
-      return new OzoneConfiguration();
-    } finally {
-      Thread.currentThread().setContextClassLoader(contextClassLoader);
-    }
-  }
-
-  public BasicOzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr,
-      String bucketStr)
-      throws IOException {
-    this(null, -1, conf, volumeStr, bucketStr);
-  }
-
-  public BasicOzoneClientAdapterImpl(String omHost, int omPort,
-      Configuration hadoopConf, String volumeStr, String bucketStr)
-      throws IOException {
-
-    ClassLoader contextClassLoader =
-        Thread.currentThread().getContextClassLoader();
-    Thread.currentThread().setContextClassLoader(null);
-
-    try {
-      OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf);
-
-      if (omHost == null && OmUtils.isServiceIdsDefined(conf)) {
-        // When the host name or service id isn't given
-        // but ozone.om.service.ids is defined, declare failure.
-
-        // This is a safety precaution that prevents the client from
-        // accidentally failing over to an unintended OM.
-        throw new IllegalArgumentException("Service ID or host name must not"
-            + " be omitted when ozone.om.service.ids is defined.");
-      }
-
-      if (omPort != -1) {
-        // When the port number is specified, perform the following check
-        if (OmUtils.isOmHAServiceId(conf, omHost)) {
-          // If omHost is a service id, it shouldn't use a port
-          throw new IllegalArgumentException("Port " + omPort +
-              " specified in URI but host '" + omHost + "' is a "
-              + "logical (HA) OzoneManager and does not use port information.");
-        }
-      } else {
-        // When port number is not specified, read it from config
-        omPort = OmUtils.getOmRpcPort(conf);
-      }
-
-      SecurityConfig secConfig = new SecurityConfig(conf);
-
-      if (secConfig.isSecurityEnabled()) {
-        this.securityEnabled = true;
-      }
-
-      String replicationTypeConf =
-          conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
-              OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT);
-
-      int replicationCountConf = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION,
-          OzoneConfigKeys.OZONE_REPLICATION_DEFAULT);
-
-      if (OmUtils.isOmHAServiceId(conf, omHost)) {
-        // omHost is listed as one of the service ids in the config,
-        // thus we should treat omHost as omServiceId
-        this.ozoneClient =
-            OzoneClientFactory.getRpcClient(omHost, conf);
-      } else if (StringUtils.isNotEmpty(omHost) && omPort != -1) {
-        this.ozoneClient =
-            OzoneClientFactory.getRpcClient(omHost, omPort, conf);
-      } else {
-        this.ozoneClient =
-            OzoneClientFactory.getRpcClient(conf);
-      }
-      objectStore = ozoneClient.getObjectStore();
-      this.volume = objectStore.getVolume(volumeStr);
-      this.bucket = volume.getBucket(bucketStr);
-      this.replicationType = ReplicationType.valueOf(replicationTypeConf);
-      this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf);
-    } finally {
-      Thread.currentThread().setContextClassLoader(contextClassLoader);
-    }
-
-  }
-
-  @Override
-  public void close() throws IOException {
-    ozoneClient.close();
-  }
-
-  @Override
-  public InputStream readFile(String key) throws IOException {
-    incrementCounter(Statistic.OBJECTS_READ);
-    try {
-      return bucket.readFile(key).getInputStream();
-    } catch (OMException ex) {
-      if (ex.getResult() == OMException.ResultCodes.FILE_NOT_FOUND
-          || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) {
-        throw new FileNotFoundException(
-            ex.getResult().name() + ": " + ex.getMessage());
-      } else {
-        throw ex;
-      }
-    }
-  }
-
-  protected void incrementCounter(Statistic objectsRead) {
-    //noop: Use OzoneClientAdapterImpl which supports statistics.
-  }
-
-  @Override
-  public OzoneFSOutputStream createFile(String key, boolean overWrite,
-      boolean recursive) throws IOException {
-    incrementCounter(Statistic.OBJECTS_CREATED);
-    try {
-      OzoneOutputStream ozoneOutputStream = bucket
-          .createFile(key, 0, replicationType, replicationFactor, overWrite,
-              recursive);
-      return new OzoneFSOutputStream(ozoneOutputStream.getOutputStream());
-    } catch (OMException ex) {
-      if (ex.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS
-          || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) {
-        throw new FileAlreadyExistsException(
-            ex.getResult().name() + ": " + ex.getMessage());
-      } else {
-        throw ex;
-      }
-    }
-  }
-
-  @Override
-  public void renameKey(String key, String newKeyName) throws IOException {
-    incrementCounter(Statistic.OBJECTS_RENAMED);
-    bucket.renameKey(key, newKeyName);
-  }
-
-  /**
-   * Helper method to create an directory specified by key name in bucket.
-   *
-   * @param keyName key name to be created as directory
-   * @return true if the key is created, false otherwise
-   */
-  @Override
-  public boolean createDirectory(String keyName) throws IOException {
-    LOG.trace("creating dir for key:{}", keyName);
-    incrementCounter(Statistic.OBJECTS_CREATED);
-    try {
-      bucket.createDirectory(keyName);
-    } catch (OMException e) {
-      if (e.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS) {
-        throw new FileAlreadyExistsException(e.getMessage());
-      }
-      throw e;
-    }
-    return true;
-  }
-
-  /**
-   * Helper method to delete an object specified by key name in bucket.
-   *
-   * @param keyName key name to be deleted
-   * @return true if the key is deleted, false otherwise
-   */
-  @Override
-  public boolean deleteObject(String keyName) {
-    LOG.trace("issuing delete for key" + keyName);
-    try {
-      incrementCounter(Statistic.OBJECTS_DELETED);
-      bucket.deleteKey(keyName);
-      return true;
-    } catch (IOException ioe) {
-      LOG.error("delete key failed " + ioe.getMessage());
-      return false;
-    }
-  }
-
-  public FileStatusAdapter getFileStatus(String key, URI uri,
-      Path qualifiedPath, String userName)
-      throws IOException {
-    try {
-      incrementCounter(Statistic.OBJECTS_QUERY);
-      OzoneFileStatus status = bucket.getFileStatus(key);
-      makeQualified(status, uri, qualifiedPath, userName);
-      return toFileStatusAdapter(status);
-
-    } catch (OMException e) {
-      if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
-        throw new
-            FileNotFoundException(key + ": No such file or directory!");
-      }
-      throw e;
-    }
-  }
-
-  public void makeQualified(FileStatus status, URI uri, Path path,
-      String username) {
-    if (status instanceof OzoneFileStatus) {
-      ((OzoneFileStatus) status)
-          .makeQualified(uri, path,
-              username, username);
-    }
-
-  }
-
-  @Override
-  public Iterator<BasicKeyInfo> listKeys(String pathKey) {
-    incrementCounter(Statistic.OBJECTS_LIST);
-    return new IteratorAdapter(bucket.listKeys(pathKey));
-  }
-
-  public List<FileStatusAdapter> listStatus(String keyName, boolean recursive,
-      String startKey, long numEntries, URI uri,
-      Path workingDir, String username) throws IOException {
-    try {
-      incrementCounter(Statistic.OBJECTS_LIST);
-      List<OzoneFileStatus> statuses = bucket
-          .listStatus(keyName, recursive, startKey, numEntries);
-
-      List<FileStatusAdapter> result = new ArrayList<>();
-      for (OzoneFileStatus status : statuses) {
-        Path qualifiedPath = status.getPath().makeQualified(uri, workingDir);
-        makeQualified(status, uri, qualifiedPath, username);
-        result.add(toFileStatusAdapter(status));
-      }
-      return result;
-    } catch (OMException e) {
-      if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
-        throw new FileNotFoundException(e.getMessage());
-      }
-      throw e;
-    }
-  }
-
-  @Override
-  public Token<OzoneTokenIdentifier> getDelegationToken(String renewer)
-      throws IOException {
-    if (!securityEnabled) {
-      return null;
-    }
-    Token<OzoneTokenIdentifier> token = ozoneClient.getObjectStore()
-        .getDelegationToken(renewer == null ? null : new Text(renewer));
-    token.setKind(OzoneTokenIdentifier.KIND_NAME);
-    return token;
-
-  }
-
-  @Override
-  public KeyProvider getKeyProvider() throws IOException {
-    return objectStore.getKeyProvider();
-  }
-
-  @Override
-  public URI getKeyProviderUri() throws IOException {
-    return objectStore.getKeyProviderUri();
-  }
-
-  @Override
-  public String getCanonicalServiceName() {
-    return objectStore.getCanonicalServiceName();
-  }
-
-  /**
-   * Ozone Delegation Token Renewer.
-   */
-  @InterfaceAudience.Private
-  public static class Renewer extends TokenRenewer {
-
-    //Ensure that OzoneConfiguration files are loaded before trying to use
-    // the renewer.
-    static {
-      OzoneConfiguration.activate();
-    }
-
-    public Text getKind() {
-      return OzoneTokenIdentifier.KIND_NAME;
-    }
-
-    @Override
-    public boolean handleKind(Text kind) {
-      return getKind().equals(kind);
-    }
-
-    @Override
-    public boolean isManaged(Token<?> token) throws IOException {
-      return true;
-    }
-
-    @Override
-    public long renew(Token<?> token, Configuration conf)
-        throws IOException, InterruptedException {
-      Token<OzoneTokenIdentifier> ozoneDt =
-          (Token<OzoneTokenIdentifier>) token;
-      OzoneClient ozoneClient =
-          OzoneClientFactory.getRpcClient(conf);
-      return ozoneClient.getObjectStore().renewDelegationToken(ozoneDt);
-    }
-
-    @Override
-    public void cancel(Token<?> token, Configuration conf)
-        throws IOException, InterruptedException {
-      Token<OzoneTokenIdentifier> ozoneDt =
-          (Token<OzoneTokenIdentifier>) token;
-      OzoneClient ozoneClient =
-          OzoneClientFactory.getRpcClient(conf);
-      ozoneClient.getObjectStore().cancelDelegationToken(ozoneDt);
-    }
-  }
-
-  /**
-   * Adapter to convert OzoneKey to a safe and simple Key implementation.
-   */
-  public static class IteratorAdapter implements Iterator<BasicKeyInfo> {
-
-    private Iterator<? extends OzoneKey> original;
-
-    public IteratorAdapter(Iterator<? extends OzoneKey> listKeys) {
-      this.original = listKeys;
-    }
-
-    @Override
-    public boolean hasNext() {
-      return original.hasNext();
-    }
-
-    @Override
-    public BasicKeyInfo next() {
-      OzoneKey next = original.next();
-      if (next == null) {
-        return null;
-      } else {
-        return new BasicKeyInfo(
-            next.getName(),
-            next.getModificationTime(),
-            next.getDataSize()
-        );
-      }
-    }
-  }
-
-  private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status) {
-    return new FileStatusAdapter(
-        status.getLen(),
-        status.getPath(),
-        status.isDirectory(),
-        status.getReplication(),
-        status.getBlockSize(),
-        status.getModificationTime(),
-        status.getAccessTime(),
-        status.getPermission().toShort(),
-        status.getOwner(),
-        status.getGroup(),
-        status.getPath()
-    );
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
deleted file mode 100644
index 298fd2e..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ /dev/null
@@ -1,787 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Objects;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Progressable;
-
-import com.google.common.base.Preconditions;
-import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
-
-import org.apache.http.client.utils.URIBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The minimal Ozone Filesystem implementation.
- * <p>
- * This is a basic version which doesn't extend
- * KeyProviderTokenIssuer and doesn't include statistics. It can be used
- * from older hadoop version. For newer hadoop version use the full featured
- * OzoneFileSystem.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class BasicOzoneFileSystem extends FileSystem {
-  static final Logger LOG =
-      LoggerFactory.getLogger(BasicOzoneFileSystem.class);
-
-  /**
-   * The Ozone client for connecting to Ozone server.
-   */
-
-  private URI uri;
-  private String userName;
-  private Path workingDir;
-
-  private OzoneClientAdapter adapter;
-
-  private static final Pattern URL_SCHEMA_PATTERN =
-      Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)");
-
-  private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " +
-      "should be one of the following formats: " +
-      "o3fs://bucket.volume/key  OR " +
-      "o3fs://bucket.volume.om-host.example.com/key  OR " +
-      "o3fs://bucket.volume.om-host.example.com:5678/key";
-
-  @Override
-  public void initialize(URI name, Configuration conf) throws IOException {
-    super.initialize(name, conf);
-    setConf(conf);
-    Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
-    Preconditions.checkArgument(getScheme().equals(name.getScheme()),
-        "Invalid scheme provided in " + name);
-
-    String authority = name.getAuthority();
-    if (authority == null) {
-      // authority is null when fs.defaultFS is not a qualified o3fs URI and
-      // o3fs:/// is passed to the client. matcher will NPE if authority is null
-      throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
-    }
-
-    Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
-
-    if (!matcher.matches()) {
-      throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
-    }
-    String bucketStr = matcher.group(1);
-    String volumeStr = matcher.group(2);
-    String remaining = matcher.groupCount() == 3 ? matcher.group(3) : null;
-
-    String omHost = null;
-    int omPort = -1;
-    if (!isEmpty(remaining)) {
-      String[] parts = remaining.split(":");
-      // Array length should be either 1(hostname or service id) or 2(host:port)
-      if (parts.length > 2) {
-        throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
-      }
-      omHost = parts[0];
-      if (parts.length == 2) {
-        try {
-          omPort = Integer.parseInt(parts[1]);
-        } catch (NumberFormatException e) {
-          throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
-        }
-      }
-    }
-
-    try {
-      uri = new URIBuilder().setScheme(OZONE_URI_SCHEME)
-          .setHost(authority)
-          .build();
-      LOG.trace("Ozone URI for ozfs initialization is " + uri);
-
-      //isolated is the default for ozonefs-lib-legacy which includes the
-      // /ozonefs.txt, otherwise the default is false. It could be overridden.
-      boolean defaultValue =
-          BasicOzoneFileSystem.class.getClassLoader()
-              .getResource("ozonefs.txt")
-              != null;
-
-      //Use string here instead of the constant as constant may not be available
-      //on the classpath of a hadoop 2.7
-      boolean isolatedClassloader =
-          conf.getBoolean("ozone.fs.isolated-classloader", defaultValue);
-
-      this.adapter = createAdapter(conf, bucketStr, volumeStr, omHost, omPort,
-          isolatedClassloader);
-
-      try {
-        this.userName =
-            UserGroupInformation.getCurrentUser().getShortUserName();
-      } catch (IOException e) {
-        this.userName = OZONE_DEFAULT_USER;
-      }
-      this.workingDir = new Path(OZONE_USER_DIR, this.userName)
-          .makeQualified(this.uri, this.workingDir);
-    } catch (URISyntaxException ue) {
-      final String msg = "Invalid Ozone endpoint " + name;
-      LOG.error(msg, ue);
-      throw new IOException(msg, ue);
-    }
-  }
-
-  protected OzoneClientAdapter createAdapter(Configuration conf,
-      String bucketStr,
-      String volumeStr, String omHost, int omPort,
-      boolean isolatedClassloader) throws IOException {
-
-    if (isolatedClassloader) {
-
-      return OzoneClientAdapterFactory
-          .createAdapter(volumeStr, bucketStr);
-
-    } else {
-
-      return new BasicOzoneClientAdapterImpl(omHost, omPort, conf,
-          volumeStr, bucketStr);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      adapter.close();
-    } finally {
-      super.close();
-    }
-  }
-
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
-  @Override
-  public String getScheme() {
-    return OZONE_URI_SCHEME;
-  }
-
-  @Override
-  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    incrementCounter(Statistic.INVOCATION_OPEN);
-    statistics.incrementWriteOps(1);
-    LOG.trace("open() path:{}", f);
-    final String key = pathToKey(f);
-    return new FSDataInputStream(new OzoneFSInputStream(adapter.readFile(key)));
-  }
-
-  protected void incrementCounter(Statistic statistic) {
-    //don't do anyting in this default implementation.
-  }
-
-  @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
-      boolean overwrite, int bufferSize,
-      short replication, long blockSize,
-      Progressable progress) throws IOException {
-    LOG.trace("create() path:{}", f);
-    incrementCounter(Statistic.INVOCATION_CREATE);
-    statistics.incrementWriteOps(1);
-    final String key = pathToKey(f);
-    return createOutputStream(key, overwrite, true);
-  }
-
-  @Override
-  public FSDataOutputStream createNonRecursive(Path path,
-      FsPermission permission,
-      EnumSet<CreateFlag> flags,
-      int bufferSize,
-      short replication,
-      long blockSize,
-      Progressable progress) throws IOException {
-    incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE);
-    statistics.incrementWriteOps(1);
-    final String key = pathToKey(path);
-    return createOutputStream(key, flags.contains(CreateFlag.OVERWRITE), false);
-  }
-
-  private FSDataOutputStream createOutputStream(String key, boolean overwrite,
-      boolean recursive) throws IOException {
-    return new FSDataOutputStream(adapter.createFile(key, overwrite, recursive),
-        statistics);
-  }
-
-  @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
-      Progressable progress) throws IOException {
-    throw new UnsupportedOperationException("append() Not implemented by the "
-        + getClass().getSimpleName() + " FileSystem implementation");
-  }
-
-  private class RenameIterator extends OzoneListingIterator {
-    private final String srcKey;
-    private final String dstKey;
-
-    RenameIterator(Path srcPath, Path dstPath)
-        throws IOException {
-      super(srcPath);
-      srcKey = pathToKey(srcPath);
-      dstKey = pathToKey(dstPath);
-      LOG.trace("rename from:{} to:{}", srcKey, dstKey);
-    }
-
-    @Override
-    boolean processKey(String key) throws IOException {
-      String newKeyName = dstKey.concat(key.substring(srcKey.length()));
-      adapter.renameKey(key, newKeyName);
-      return true;
-    }
-  }
-
-  /**
-   * Check whether the source and destination path are valid and then perform
-   * rename from source path to destination path.
-   * <p>
-   * The rename operation is performed by renaming the keys with src as prefix.
-   * For such keys the prefix is changed from src to dst.
-   *
-   * @param src source path for rename
-   * @param dst destination path for rename
-   * @return true if rename operation succeeded or
-   * if the src and dst have the same path and are of the same type
-   * @throws IOException on I/O errors or if the src/dst paths are invalid.
-   */
-  @Override
-  public boolean rename(Path src, Path dst) throws IOException {
-    incrementCounter(Statistic.INVOCATION_RENAME);
-    statistics.incrementWriteOps(1);
-    if (src.equals(dst)) {
-      return true;
-    }
-
-    LOG.trace("rename() from:{} to:{}", src, dst);
-    if (src.isRoot()) {
-      // Cannot rename root of file system
-      LOG.trace("Cannot rename the root of a filesystem");
-      return false;
-    }
-
-    // Cannot rename a directory to its own subdirectory
-    Path dstParent = dst.getParent();
-    while (dstParent != null && !src.equals(dstParent)) {
-      dstParent = dstParent.getParent();
-    }
-    Preconditions.checkArgument(dstParent == null,
-        "Cannot rename a directory to its own subdirectory");
-    // Check if the source exists
-    FileStatus srcStatus;
-    try {
-      srcStatus = getFileStatus(src);
-    } catch (FileNotFoundException fnfe) {
-      // source doesn't exist, return
-      return false;
-    }
-
-    // Check if the destination exists
-    FileStatus dstStatus;
-    try {
-      dstStatus = getFileStatus(dst);
-    } catch (FileNotFoundException fnde) {
-      dstStatus = null;
-    }
-
-    if (dstStatus == null) {
-      // If dst doesn't exist, check whether dst parent dir exists or not
-      // if the parent exists, the source can still be renamed to dst path
-      dstStatus = getFileStatus(dst.getParent());
-      if (!dstStatus.isDirectory()) {
-        throw new IOException(String.format(
-            "Failed to rename %s to %s, %s is a file", src, dst,
-            dst.getParent()));
-      }
-    } else {
-      // if dst exists and source and destination are same,
-      // check both the src and dst are of same type
-      if (srcStatus.getPath().equals(dstStatus.getPath())) {
-        return !srcStatus.isDirectory();
-      } else if (dstStatus.isDirectory()) {
-        // If dst is a directory, rename source as subpath of it.
-        // for example rename /source to /dst will lead to /dst/source
-        dst = new Path(dst, src.getName());
-        FileStatus[] statuses;
-        try {
-          statuses = listStatus(dst);
-        } catch (FileNotFoundException fnde) {
-          statuses = null;
-        }
-
-        if (statuses != null && statuses.length > 0) {
-          // If dst exists and not a directory not empty
-          throw new FileAlreadyExistsException(String.format(
-              "Failed to rename %s to %s, file already exists or not empty!",
-              src, dst));
-        }
-      } else {
-        // If dst is not a directory
-        throw new FileAlreadyExistsException(String.format(
-            "Failed to rename %s to %s, file already exists!", src, dst));
-      }
-    }
-
-    if (srcStatus.isDirectory()) {
-      if (dst.toString().startsWith(src.toString() + OZONE_URI_DELIMITER)) {
-        LOG.trace("Cannot rename a directory to a subdirectory of self");
-        return false;
-      }
-    }
-    RenameIterator iterator = new RenameIterator(src, dst);
-    boolean result = iterator.iterate();
-    if (result) {
-      createFakeParentDirectory(src);
-    }
-    return result;
-  }
-
-  private class DeleteIterator extends OzoneListingIterator {
-    private boolean recursive;
-
-    DeleteIterator(Path f, boolean recursive)
-        throws IOException {
-      super(f);
-      this.recursive = recursive;
-      if (getStatus().isDirectory()
-          && !this.recursive
-          && listStatus(f).length != 0) {
-        throw new PathIsNotEmptyDirectoryException(f.toString());
-      }
-    }
-
-    @Override
-    boolean processKey(String key) throws IOException {
-      if (key.equals("")) {
-        LOG.trace("Skipping deleting root directory");
-        return true;
-      } else {
-        LOG.trace("deleting key:" + key);
-        boolean succeed = adapter.deleteObject(key);
-        // if recursive delete is requested ignore the return value of
-        // deleteObject and issue deletes for other keys.
-        return recursive || succeed;
-      }
-    }
-  }
-
-  /**
-   * Deletes the children of the input dir path by iterating though the
-   * DeleteIterator.
-   *
-   * @param f directory path to be deleted
-   * @return true if successfully deletes all required keys, false otherwise
-   * @throws IOException
-   */
-  private boolean innerDelete(Path f, boolean recursive) throws IOException {
-    LOG.trace("delete() path:{} recursive:{}", f, recursive);
-    try {
-      DeleteIterator iterator = new DeleteIterator(f, recursive);
-      return iterator.iterate();
-    } catch (FileNotFoundException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Couldn't delete {} - does not exist", f);
-      }
-      return false;
-    }
-  }
-
-  @Override
-  public boolean delete(Path f, boolean recursive) throws IOException {
-    incrementCounter(Statistic.INVOCATION_DELETE);
-    statistics.incrementWriteOps(1);
-    LOG.debug("Delete path {} - recursive {}", f, recursive);
-    FileStatus status;
-    try {
-      status = getFileStatus(f);
-    } catch (FileNotFoundException ex) {
-      LOG.warn("delete: Path does not exist: {}", f);
-      return false;
-    }
-
-    String key = pathToKey(f);
-    boolean result;
-
-    if (status.isDirectory()) {
-      LOG.debug("delete: Path is a directory: {}", f);
-      key = addTrailingSlashIfNeeded(key);
-
-      if (key.equals("/")) {
-        LOG.warn("Cannot delete root directory.");
-        return false;
-      }
-
-      result = innerDelete(f, recursive);
-    } else {
-      LOG.debug("delete: Path is a file: {}", f);
-      result = adapter.deleteObject(key);
-    }
-
-    if (result) {
-      // If this delete operation removes all files/directories from the
-      // parent direcotry, then an empty parent directory must be created.
-      createFakeParentDirectory(f);
-    }
-
-    return result;
-  }
-
-  /**
-   * Create a fake parent directory key if it does not already exist and no
-   * other child of this parent directory exists.
-   *
-   * @param f path to the fake parent directory
-   * @throws IOException
-   */
-  private void createFakeParentDirectory(Path f) throws IOException {
-    Path parent = f.getParent();
-    if (parent != null && !parent.isRoot()) {
-      createFakeDirectoryIfNecessary(parent);
-    }
-  }
-
-  /**
-   * Create a fake directory key if it does not already exist.
-   *
-   * @param f path to the fake directory
-   * @throws IOException
-   */
-  private void createFakeDirectoryIfNecessary(Path f) throws IOException {
-    String key = pathToKey(f);
-    if (!key.isEmpty() && !o3Exists(f)) {
-      LOG.debug("Creating new fake directory at {}", f);
-      String dirKey = addTrailingSlashIfNeeded(key);
-      adapter.createDirectory(dirKey);
-    }
-  }
-
-  /**
-   * Check if a file or directory exists corresponding to given path.
-   *
-   * @param f path to file/directory.
-   * @return true if it exists, false otherwise.
-   * @throws IOException
-   */
-  private boolean o3Exists(final Path f) throws IOException {
-    Path path = makeQualified(f);
-    try {
-      getFileStatus(path);
-      return true;
-    } catch (FileNotFoundException ex) {
-      return false;
-    }
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path f) throws IOException {
-    incrementCounter(Statistic.INVOCATION_LIST_STATUS);
-    statistics.incrementReadOps(1);
-    LOG.trace("listStatus() path:{}", f);
-    int numEntries = LISTING_PAGE_SIZE;
-    LinkedList<FileStatus> statuses = new LinkedList<>();
-    List<FileStatus> tmpStatusList;
-    String startKey = "";
-
-    do {
-      tmpStatusList =
-          adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri,
-              workingDir, getUsername())
-              .stream()
-              .map(this::convertFileStatus)
-              .collect(Collectors.toList());
-
-      if (!tmpStatusList.isEmpty()) {
-        if (startKey.isEmpty()) {
-          statuses.addAll(tmpStatusList);
-        } else {
-          statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size()));
-        }
-        startKey = pathToKey(statuses.getLast().getPath());
-      }
-      // listStatus returns entries numEntries in size if available.
-      // Any lesser number of entries indicate that the required entries have
-      // exhausted.
-    } while (tmpStatusList.size() == numEntries);
-
-
-    return statuses.toArray(new FileStatus[0]);
-  }
-
-  @Override
-  public void setWorkingDirectory(Path newDir) {
-    workingDir = newDir;
-  }
-
-  @Override
-  public Path getWorkingDirectory() {
-    return workingDir;
-  }
-
-  @Override
-  public Token<?> getDelegationToken(String renewer) throws IOException {
-    return adapter.getDelegationToken(renewer);
-  }
-
-  /**
-   * Get a canonical service name for this file system. If the URI is logical,
-   * the hostname part of the URI will be returned.
-   *
-   * @return a service string that uniquely identifies this file system.
-   */
-  @Override
-  public String getCanonicalServiceName() {
-    return adapter.getCanonicalServiceName();
-  }
-
-  /**
-   * Get the username of the FS.
-   *
-   * @return the short name of the user who instantiated the FS
-   */
-  public String getUsername() {
-    return userName;
-  }
-
-  /**
-   * Creates a directory. Directory is represented using a key with no value.
-   *
-   * @param path directory path to be created
-   * @return true if directory exists or created successfully.
-   * @throws IOException
-   */
-  private boolean mkdir(Path path) throws IOException {
-    return adapter.createDirectory(pathToKey(path));
-  }
-
-  @Override
-  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
-    LOG.trace("mkdir() path:{} ", f);
-    String key = pathToKey(f);
-    if (isEmpty(key)) {
-      return false;
-    }
-    return mkdir(f);
-  }
-
-  @Override
-  public FileStatus getFileStatus(Path f) throws IOException {
-    incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS);
-    statistics.incrementReadOps(1);
-    LOG.trace("getFileStatus() path:{}", f);
-    Path qualifiedPath = f.makeQualified(uri, workingDir);
-    String key = pathToKey(qualifiedPath);
-    FileStatus fileStatus = null;
-    try {
-      fileStatus = convertFileStatus(
-          adapter.getFileStatus(key, uri, qualifiedPath, getUsername()));
-    } catch (OMException ex) {
-      if (ex.getResult().equals(OMException.ResultCodes.KEY_NOT_FOUND)) {
-        throw new FileNotFoundException("File not found. path:" + f);
-      }
-    }
-    return fileStatus;
-  }
-
-  /**
-   * Turn a path (relative or otherwise) into an Ozone key.
-   *
-   * @param path the path of the file.
-   * @return the key of the object that represents the file.
-   */
-  public String pathToKey(Path path) {
-    Objects.requireNonNull(path, "Path canf not be null!");
-    if (!path.isAbsolute()) {
-      path = new Path(workingDir, path);
-    }
-    // removing leading '/' char
-    String key = path.toUri().getPath().substring(1);
-    LOG.trace("path for key:{} is:{}", key, path);
-    return key;
-  }
-
-  /**
-   * Add trailing delimiter to path if it is already not present.
-   *
-   * @param key the ozone Key which needs to be appended
-   * @return delimiter appended key
-   */
-  private String addTrailingSlashIfNeeded(String key) {
-    if (!isEmpty(key) && !key.endsWith(OZONE_URI_DELIMITER)) {
-      return key + OZONE_URI_DELIMITER;
-    } else {
-      return key;
-    }
-  }
-
-  @Override
-  public String toString() {
-    return "OzoneFileSystem{URI=" + uri + ", "
-        + "workingDir=" + workingDir + ", "
-        + "userName=" + userName + ", "
-        + "statistics=" + statistics
-        + "}";
-  }
-
-  /**
-   * This class provides an interface to iterate through all the keys in the
-   * bucket prefixed with the input path key and process them.
-   * <p>
-   * Each implementing class should define how the keys should be processed
-   * through the processKey() function.
-   */
-  private abstract class OzoneListingIterator {
-    private final Path path;
-    private final FileStatus status;
-    private String pathKey;
-    private Iterator<BasicKeyInfo> keyIterator;
-
-    OzoneListingIterator(Path path)
-        throws IOException {
-      this.path = path;
-      this.status = getFileStatus(path);
-      this.pathKey = pathToKey(path);
-      if (status.isDirectory()) {
-        this.pathKey = addTrailingSlashIfNeeded(pathKey);
-      }
-      keyIterator = adapter.listKeys(pathKey);
-    }
-
-    /**
-     * The output of processKey determines if further iteration through the
-     * keys should be done or not.
-     *
-     * @return true if we should continue iteration of keys, false otherwise.
-     * @throws IOException
-     */
-    abstract boolean processKey(String key) throws IOException;
-
-    /**
-     * Iterates thorugh all the keys prefixed with the input path's key and
-     * processes the key though processKey().
-     * If for any key, the processKey() returns false, then the iteration is
-     * stopped and returned with false indicating that all the keys could not
-     * be processed successfully.
-     *
-     * @return true if all keys are processed successfully, false otherwise.
-     * @throws IOException
-     */
-    boolean iterate() throws IOException {
-      LOG.trace("Iterating path {}", path);
-      if (status.isDirectory()) {
-        LOG.trace("Iterating directory:{}", pathKey);
-        while (keyIterator.hasNext()) {
-          BasicKeyInfo key = keyIterator.next();
-          LOG.trace("iterating key:{}", key.getName());
-          if (!processKey(key.getName())) {
-            return false;
-          }
-        }
-        return true;
-      } else {
-        LOG.trace("iterating file:{}", path);
-        return processKey(pathKey);
-      }
-    }
-
-    String getPathKey() {
-      return pathKey;
-    }
-
-    boolean pathIsDirectory() {
-      return status.isDirectory();
-    }
-
-    FileStatus getStatus() {
-      return status;
-    }
-  }
-
-  public OzoneClientAdapter getAdapter() {
-    return adapter;
-  }
-
-  public boolean isEmpty(CharSequence cs) {
-    return cs == null || cs.length() == 0;
-  }
-
-  public boolean isNumber(String number) {
-    try {
-      Integer.parseInt(number);
-    } catch (NumberFormatException ex) {
-      return false;
-    }
-    return true;
-  }
-
-  private FileStatus convertFileStatus(
-      FileStatusAdapter fileStatusAdapter) {
-
-    Path symLink = null;
-    try {
-      fileStatusAdapter.getSymlink();
-    } catch (Exception ex) {
-      //NOOP: If not symlink symlink remains null.
-    }
-
-    return new FileStatus(
-        fileStatusAdapter.getLength(),
-        fileStatusAdapter.isDir(),
-        fileStatusAdapter.getBlockReplication(),
-        fileStatusAdapter.getBlocksize(),
-        fileStatusAdapter.getModificationTime(),
-        fileStatusAdapter.getAccessTime(),
-        new FsPermission(fileStatusAdapter.getPermission()),
-        fileStatusAdapter.getOwner(),
-        fileStatusAdapter.getGroup(),
-        symLink,
-        fileStatusAdapter.getPath()
-    );
-
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
deleted file mode 100644
index 832a0cb..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-/**
- * Constants for Ozone FileSystem implementation.
- */
-public final class Constants {
-
-  public static final String OZONE_DEFAULT_USER = "hdfs";
-
-  public static final String OZONE_USER_DIR = "/user";
-
-  /** Local buffer directory. */
-  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
-
-  /** Temporary directory. */
-  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
-
-  /** Page size for Ozone listing operation. */
-  public static final int LISTING_PAGE_SIZE = 1024;
-
-  private Constants() {
-
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
deleted file mode 100644
index 9159783..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.fs.Path;
-
-/**
- * Class to hold the internal information of a FileStatus.
- * <p>
- * As FileStatus class is not compatible between 3.x and 2.x hadoop we can
- * use this adapter to hold all the required information. Hadoop 3.x FileStatus
- * information can be converted to this class, and this class can be used to
- * create hadoop 2.x FileStatus.
- * <p>
- * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x)
- */
-public final class FileStatusAdapter {
-
-  private final long length;
-  private final Path path;
-  private final boolean isdir;
-  private final short blockReplication;
-  private final long blocksize;
-  private final long modificationTime;
-  private final long accessTime;
-  private final short permission;
-  private final String owner;
-  private final String group;
-  private final Path symlink;
-
-  @SuppressWarnings("checkstyle:ParameterNumber")
-  public FileStatusAdapter(long length, Path path, boolean isdir,
-      short blockReplication, long blocksize, long modificationTime,
-      long accessTime, short permission, String owner,
-      String group, Path symlink) {
-    this.length = length;
-    this.path = path;
-    this.isdir = isdir;
-    this.blockReplication = blockReplication;
-    this.blocksize = blocksize;
-    this.modificationTime = modificationTime;
-    this.accessTime = accessTime;
-    this.permission = permission;
-    this.owner = owner;
-    this.group = group;
-    this.symlink = symlink;
-  }
-
-  public Path getPath() {
-    return path;
-  }
-
-  public boolean isDir() {
-    return isdir;
-  }
-
-  public short getBlockReplication() {
-    return blockReplication;
-  }
-
-  public long getBlocksize() {
-    return blocksize;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public long getAccessTime() {
-    return accessTime;
-  }
-
-  public short getPermission() {
-    return permission;
-  }
-
-  public String getOwner() {
-    return owner;
-  }
-
-  public String getGroup() {
-    return group;
-  }
-
-  public Path getSymlink() {
-    return symlink;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
deleted file mode 100644
index a90797e..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Class loader which delegates the loading only for the selected class.
- *
- * <p>
- * By default java classloader delegates first all the class loading to the
- * parent, and loads the class only if it's not found in the class.
- * <p>
- * This simple class loader do the opposit. Everything is loaded with this
- * class loader without delegation _except_ the few classes which are defined
- * in the constructor.
- * <p>
- * With this method we can use two separated class loader (the original main
- * classloader and instance of this which loaded separated classes, but the
- * few selected classes are shared between the two class loaders.
- * <p>
- * With this approach it's possible to use any older hadoop version
- * (main classloader) together with ozonefs (instance of this classloader) as
- * only the selected classes are selected between the class loaders.
- */
-public class FilteredClassLoader extends URLClassLoader {
-
-  private final ClassLoader systemClassLoader;
-
-  private final ClassLoader delegate;
-  private Set<String> delegatedClasses = new HashSet<>();
-
-  public FilteredClassLoader(URL[] urls, ClassLoader parent) {
-    super(urls, null);
-    delegatedClasses.add("org.apache.hadoop.crypto.key.KeyProvider");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.FileStatusAdapter");
-    delegatedClasses.add("org.apache.hadoop.security.token.Token");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic");
-    delegatedClasses.add("org.apache.hadoop.fs.Seekable");
-    delegatedClasses.add("org.apache.hadoop.io.Text");
-    delegatedClasses.add("org.apache.hadoop.fs.Path");
-    delegatedClasses.addAll(StringUtils.getTrimmedStringCollection(
-        System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")));
-    this.delegate = parent;
-    systemClassLoader = getSystemClassLoader();
-
-  }
-
-  @Override
-  public Class<?> loadClass(String name) throws ClassNotFoundException {
-    if (delegatedClasses.contains(name) ||
-        name.startsWith("org.apache.log4j") ||
-        name.startsWith("org.slf4j")) {
-      return delegate.loadClass(name);
-    }
-    return super.loadClass(name);
-  }
-
-  private Class<?> loadFromSystem(String name) {
-    if (systemClassLoader != null) {
-      try {
-        return systemClassLoader.loadClass(name);
-      } catch (ClassNotFoundException ex) {
-        //no problem
-        return null;
-      }
-    } else {
-      return null;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java
deleted file mode 100644
index a0ec01f..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.DtFetcher;
-import org.apache.hadoop.security.token.Token;
-
-
-/**
- * A DT fetcher for OzoneFileSystem.
- * It is only needed for the `hadoop dtutil` command.
- */
-public class O3fsDtFetcher implements DtFetcher {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(O3fsDtFetcher.class);
-
-  private static final String SERVICE_NAME = OzoneConsts.OZONE_URI_SCHEME;
-
-  private static final String FETCH_FAILED =
-      "Fetch ozone delegation token failed";
-
-  /**
-   * Returns the service name for O3fs, which is also a valid URL prefix.
-   */
-  public Text getServiceName() {
-    return new Text(SERVICE_NAME);
-  }
-
-  public boolean isTokenRequired() {
-    return UserGroupInformation.isSecurityEnabled();
-  }
-
-  /**
-   *  Returns Token object via FileSystem, null if bad argument.
-   *  @param conf - a Configuration object used with FileSystem.get()
-   *  @param creds - a Credentials object to which token(s) will be added
-   *  @param renewer  - the renewer to send with the token request
-   *  @param url  - the URL to which the request is sent
-   *  @return a Token, or null if fetch fails.
-   */
-  public Token<?> addDelegationTokens(Configuration conf, Credentials creds,
-      String renewer, String url) throws Exception {
-    if (!url.startsWith(getServiceName().toString())) {
-      url = getServiceName().toString() + "://" + url;
-    }
-    LOG.debug("addDelegationTokens from {} renewer {}.", url, renewer);
-    FileSystem fs = FileSystem.get(URI.create(url), conf);
-    Token<?> token = fs.getDelegationToken(renewer);
-    if (token == null) {
-      LOG.error(FETCH_FAILED);
-      throw new IOException(FETCH_FAILED);
-    }
-    creds.addToken(token.getService(), token);
-    return token;
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
deleted file mode 100644
index 4163c13..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-/**
- * ozone implementation of AbstractFileSystem.
- * This impl delegates to the OzoneFileSystem
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class OzFs extends DelegateToFileSystem {
-
-  public OzFs(URI theUri, Configuration conf)
-      throws IOException, URISyntaxException {
-    super(theUri, new OzoneFileSystem(), conf,
-        OzoneConsts.OZONE_URI_SCHEME, false);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
deleted file mode 100644
index 0ae8c8f..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * Lightweight adapter to separate hadoop/ozone classes.
- * <p>
- * This class contains only the bare minimum Ozone classes in the signature.
- * It could be loaded by a different classloader because only the objects in
- * the method signatures should be shared between the classloader.
- */
-public interface OzoneClientAdapter {
-
-  void close() throws IOException;
-
-  InputStream readFile(String key) throws IOException;
-
-  OzoneFSOutputStream createFile(String key, boolean overWrite,
-      boolean recursive) throws IOException;
-
-  void renameKey(String key, String newKeyName) throws IOException;
-
-  boolean createDirectory(String keyName) throws IOException;
-
-  boolean deleteObject(String keyName);
-
-  Iterator<BasicKeyInfo> listKeys(String pathKey);
-
-  List<FileStatusAdapter> listStatus(String keyName, boolean recursive,
-      String startKey, long numEntries, URI uri,
-      Path workingDir, String username) throws IOException;
-
-  Token<OzoneTokenIdentifier> getDelegationToken(String renewer)
-      throws IOException;
-
-  KeyProvider getKeyProvider() throws IOException;
-
-  URI getKeyProviderUri() throws IOException;
-
-  String getCanonicalServiceName();
-
-  FileStatusAdapter getFileStatus(String key, URI uri,
-      Path qualifiedPath, String userName) throws IOException;
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
deleted file mode 100644
index fee4298..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-
-import org.apache.hadoop.fs.StorageStatistics;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Creates OzoneClientAdapter with classloader separation.
- */
-public final class OzoneClientAdapterFactory {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(OzoneClientAdapterFactory.class);
-
-  private OzoneClientAdapterFactory() {
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr) throws IOException {
-    return createAdapter(volumeStr, bucketStr, true,
-        (aClass) -> (OzoneClientAdapter) aClass
-            .getConstructor(String.class, String.class)
-            .newInstance(
-                volumeStr,
-                bucketStr));
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr,
-      StorageStatistics storageStatistics) throws IOException {
-    return createAdapter(volumeStr, bucketStr, false,
-        (aClass) -> (OzoneClientAdapter) aClass
-            .getConstructor(String.class, String.class,
-                OzoneFSStorageStatistics.class)
-            .newInstance(
-                volumeStr,
-                bucketStr,
-                storageStatistics));
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr,
-      boolean basic,
-      OzoneClientAdapterCreator creator) throws IOException {
-
-    ClassLoader currentClassLoader =
-        OzoneClientAdapterFactory.class.getClassLoader();
-    List<URL> urls = new ArrayList<>();
-
-    findEmbeddedLibsUrl(urls, currentClassLoader);
-
-    findConfigDirUrl(urls, currentClassLoader);
-
-    ClassLoader classLoader =
-        new FilteredClassLoader(urls.toArray(new URL[0]), currentClassLoader);
-
-    try {
-
-      ClassLoader contextClassLoader =
-          Thread.currentThread().getContextClassLoader();
-      Thread.currentThread().setContextClassLoader(classLoader);
-
-      //this class caches the context classloader during the first load
-      //call it here when the context class loader is set to the isoloated
-      //loader to make sure the grpc class will be loaded by the right
-      //loader
-      Class<?> reflectionUtils =
-          classLoader.loadClass("org.apache.ratis.util.ReflectionUtils");
-      reflectionUtils.getMethod("getClassByName", String.class)
-          .invoke(null, "org.apache.ratis.grpc.GrpcFactory");
-
-      Class<?> adapterClass = null;
-      if (basic) {
-        adapterClass = classLoader
-            .loadClass(
-                "org.apache.hadoop.fs.ozone.BasicOzoneClientAdapterImpl");
-      } else {
-        adapterClass = classLoader
-            .loadClass(
-                "org.apache.hadoop.fs.ozone.OzoneClientAdapterImpl");
-      }
-      OzoneClientAdapter ozoneClientAdapter =
-          creator.createOzoneClientAdapter(adapterClass);
-
-      Thread.currentThread().setContextClassLoader(contextClassLoader);
-
-      return ozoneClientAdapter;
-    } catch (Exception e) {
-      LOG.error("Can't initialize the ozoneClientAdapter", e);
-      throw new IOException(
-          "Can't initialize the OzoneClientAdapter implementation", e);
-    }
-
-  }
-
-  private static void findConfigDirUrl(List<URL> urls,
-      ClassLoader currentClassLoader) throws IOException {
-    Enumeration<URL> conf =
-        currentClassLoader.getResources("ozone-site.xml");
-    while (conf.hasMoreElements()) {
-      urls.add(
-          new URL(
-              conf.nextElement().toString().replace("ozone-site.xml", "")));
-
-    }
-  }
-
-  private static void findEmbeddedLibsUrl(List<URL> urls,
-      ClassLoader currentClassloader)
-      throws MalformedURLException {
-
-    //marker file is added to the jar to make it easier to find the URL
-    // for the current jar.
-    String markerFile = "ozonefs.txt";
-    ClassLoader currentClassLoader =
-        OzoneClientAdapterFactory.class.getClassLoader();
-
-    URL ozFs = currentClassLoader
-        .getResource(markerFile);
-    String rootPath = ozFs.toString().replace(markerFile, "");
-    urls.add(new URL(rootPath));
-
-    urls.add(new URL(rootPath + "libs/"));
-
-  }
-
-  /**
-   * Interface to create OzoneClientAdapter implementation with reflection.
-   */
-  @FunctionalInterface
-  interface OzoneClientAdapterCreator {
-    OzoneClientAdapter createOzoneClientAdapter(Class<?> clientAdapter)
-        throws NoSuchMethodException, IllegalAccessException,
-        InvocationTargetException, InstantiationException;
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java
deleted file mode 100644
index 975bbf7..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-/**
- * Implementation of the OzoneFileSystem calls.
- */
-public class OzoneClientAdapterImpl extends BasicOzoneClientAdapterImpl {
-
-  private OzoneFSStorageStatistics storageStatistics;
-
-  public OzoneClientAdapterImpl(String volumeStr, String bucketStr,
-      OzoneFSStorageStatistics storageStatistics)
-      throws IOException {
-    super(volumeStr, bucketStr);
-    this.storageStatistics = storageStatistics;
-  }
-
-  public OzoneClientAdapterImpl(
-      OzoneConfiguration conf, String volumeStr, String bucketStr,
-      OzoneFSStorageStatistics storageStatistics)
-      throws IOException {
-    super(conf, volumeStr, bucketStr);
-    this.storageStatistics = storageStatistics;
-  }
-
-  public OzoneClientAdapterImpl(String omHost, int omPort,
-      Configuration hadoopConf, String volumeStr, String bucketStr,
-      OzoneFSStorageStatistics storageStatistics)
-      throws IOException {
-    super(omHost, omPort, hadoopConf, volumeStr, bucketStr);
-    this.storageStatistics = storageStatistics;
-  }
-
-  @Override
-  protected void incrementCounter(Statistic objectsRead) {
-    if (storageStatistics != null) {
-      storageStatistics.incrementCounter(objectsRead, 1);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
deleted file mode 100644
index 909b2af..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.Seekable;
-
-/**
- * The input stream for Ozone file system.
- *
- * TODO: Make inputStream generic for both rest and rpc clients
- * This class is not thread safe.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class OzoneFSInputStream extends FSInputStream {
-
-  private final InputStream inputStream;
-
-  public OzoneFSInputStream(InputStream inputStream) {
-    this.inputStream = inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return inputStream.read();
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    return inputStream.read(b, off, len);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    inputStream.close();
-  }
-
-  @Override
-  public void seek(long pos) throws IOException {
-    ((Seekable) inputStream).seek(pos);
-  }
-
-  @Override
-  public long getPos() throws IOException {
-    return ((Seekable) inputStream).getPos();
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public int available() throws IOException {
-    return inputStream.available();
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
deleted file mode 100644
index efbf93b..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-
-/**
- * The output stream for Ozone file system.
- *
- * TODO: Make outputStream generic for both rest and rpc clients
- * This class is not thread safe.
- */
-public class OzoneFSOutputStream extends OutputStream {
-
-  private final OutputStream outputStream;
-
-  public OzoneFSOutputStream(OutputStream outputStream) {
-    this.outputStream = outputStream;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    outputStream.write(b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    outputStream.write(b, off, len);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    outputStream.flush();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    outputStream.close();
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java
deleted file mode 100644
index 56c95df..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.EnumMap;
-import java.util.Collections;
-import java.util.NoSuchElementException;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Storage statistics for OzoneFileSystem.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class OzoneFSStorageStatistics extends StorageStatistics
-    implements Iterable<StorageStatistics.LongStatistic> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneFSStorageStatistics.class);
-
-  public static final String NAME = "OzoneFSStorageStatistics";
-  private final Map<Statistic, AtomicLong> opsCount =
-      new EnumMap<>(Statistic.class);
-
-  public OzoneFSStorageStatistics() {
-    super(NAME);
-    for (Statistic opType : Statistic.values()) {
-      opsCount.put(opType, new AtomicLong(0));
-    }
-  }
-
-  /**
-   * Increment a specific counter.
-   * @param op operation
-   * @param count increment value
-   * @return the new value
-   */
-  public long incrementCounter(Statistic op, long count) {
-    long updated = opsCount.get(op).addAndGet(count);
-    LOG.debug("{} += {}  ->  {}", op, count, updated);
-    return updated;
-  }
-
-  private class LongIterator implements Iterator<LongStatistic> {
-    private Iterator<Map.Entry<Statistic, AtomicLong>> iterator =
-        Collections.unmodifiableSet(opsCount.entrySet()).iterator();
-
-    @Override
-    public boolean hasNext() {
-      return iterator.hasNext();
-    }
-
-    @Override
-    public LongStatistic next() {
-      if (!iterator.hasNext()) {
-        throw new NoSuchElementException();
-      }
-      final Map.Entry<Statistic, AtomicLong> entry = iterator.next();
-      return new LongStatistic(entry.getKey().getSymbol(),
-          entry.getValue().get());
-    }
-
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException();
-    }
-  }
-
-  @Override
-  public String getScheme() {
-    return OzoneConsts.OZONE_URI_SCHEME;
-  }
-
-  @Override
-  public Iterator<LongStatistic> getLongStatistics() {
-    return new LongIterator();
-  }
-
-  @Override
-  public Iterator<LongStatistic> iterator() {
-    return getLongStatistics();
-  }
-
-  @Override
-  public Long getLong(String key) {
-    final Statistic type = Statistic.fromSymbol(key);
-    return type == null ? null : opsCount.get(type).get();
-  }
-
-  @Override
-  public boolean isTracked(String key) {
-    return Statistic.fromSymbol(key) != null;
-  }
-
-  @Override
-  public void reset() {
-    for (AtomicLong value : opsCount.values()) {
-      value.set(0);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
deleted file mode 100644
index 0514bd7..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.GlobalStorageStatistics;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.security.token.DelegationTokenIssuer;
-
-/**
- * The Ozone Filesystem implementation.
- * <p>
- * This subclass is marked as private as code should not be creating it
- * directly; use {@link FileSystem#get(Configuration)} and variants to create
- * one. If cast to {@link OzoneFileSystem}, extra methods and features may be
- * accessed. Consider those private and unstable.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class OzoneFileSystem extends BasicOzoneFileSystem
-    implements KeyProviderTokenIssuer {
-
-  private OzoneFSStorageStatistics storageStatistics;
-
-  @Override
-  public KeyProvider getKeyProvider() throws IOException {
-    return getAdapter().getKeyProvider();
-  }
-
-  @Override
-  public URI getKeyProviderUri() throws IOException {
-    return getAdapter().getKeyProviderUri();
-  }
-
-  @Override
-  public DelegationTokenIssuer[] getAdditionalTokenIssuers()
-      throws IOException {
-    KeyProvider keyProvider;
-    try {
-      keyProvider = getKeyProvider();
-    } catch (IOException ioe) {
-      LOG.debug("Error retrieving KeyProvider.", ioe);
-      return null;
-    }
-    if (keyProvider instanceof DelegationTokenIssuer) {
-      return new DelegationTokenIssuer[]{(DelegationTokenIssuer)keyProvider};
-    }
-    return null;
-  }
-
-  StorageStatistics getOzoneFSOpsCountStatistics() {
-    return storageStatistics;
-  }
-
-  @Override
-  protected void incrementCounter(Statistic statistic) {
-    if (storageStatistics != null) {
-      storageStatistics.incrementCounter(statistic, 1);
-    }
-  }
-
-  @Override
-  protected OzoneClientAdapter createAdapter(Configuration conf,
-      String bucketStr,
-      String volumeStr, String omHost, int omPort,
-      boolean isolatedClassloader) throws IOException {
-
-    this.storageStatistics =
-        (OzoneFSStorageStatistics) GlobalStorageStatistics.INSTANCE
-            .put(OzoneFSStorageStatistics.NAME,
-                OzoneFSStorageStatistics::new);
-
-    if (isolatedClassloader) {
-      return OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr,
-          storageStatistics);
-
-    } else {
-      return new OzoneClientAdapterImpl(omHost, omPort, conf,
-          volumeStr, bucketStr, storageStatistics);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
deleted file mode 100644
index e3d8742..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.shell.CommandFactory;
-import org.apache.hadoop.fs.shell.FsCommand;
-import org.apache.hadoop.util.ToolRunner;
-
-/** Provide command line access to a Ozone FileSystem. */
-@InterfaceAudience.Private
-public class OzoneFsShell extends FsShell {
-
-  private final String ozoneUsagePrefix = "Usage: ozone fs [generic options]";
-
-  /**
-   * Default ctor with no configuration.  Be sure to invoke
-   * {@link #setConf(Configuration)} with a valid configuration prior
-   * to running commands.
-   */
-  public OzoneFsShell() {
-    this(null);
-  }
-
-  /**
-   * Construct a OzoneFsShell with the given configuration.
-   *
-   * Commands can be executed via {@link #run(String[])}
-   * @param conf the hadoop configuration
-   */
-  public OzoneFsShell(Configuration conf) {
-    super(conf);
-  }
-
-  protected void registerCommands(CommandFactory factory) {
-    // TODO: DFSAdmin subclasses FsShell so need to protect the command
-    // registration.  This class should morph into a base class for
-    // commands, and then this method can be abstract
-    if (this.getClass().equals(OzoneFsShell.class)) {
-      factory.registerCommands(FsCommand.class);
-    }
-  }
-
-  @Override
-  protected String getUsagePrefix() {
-    return ozoneUsagePrefix;
-  }
-
-  /**
-   * Main entry point to execute fs commands.
-   *
-   * @param argv the command and its arguments
-   * @throws Exception upon error
-   */
-  public static void main(String[] argv) throws Exception {
-    OzoneFsShell shell = newShellInstance();
-    Configuration conf = new Configuration();
-    conf.setQuietMode(false);
-    shell.setConf(conf);
-    int res;
-    try {
-      res = ToolRunner.run(shell, argv);
-    } finally {
-      shell.close();
-    }
-    System.exit(res);
-  }
-
-  // TODO: this should be abstract in a base class
-  protected static OzoneFsShell newShellInstance() {
-    return new OzoneFsShell();
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
deleted file mode 100644
index 136d999..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Statistic which are collected in OzoneFileSystem.
- * These statistics are available at a low level in
- * {@link OzoneFSStorageStatistics}
- */
-public enum Statistic {
-  OBJECTS_RENAMED("objects_renamed",
-      "Total number of objects renamed within the object store."),
-  OBJECTS_CREATED("objects_created",
-      "Total number of objects created through the object store."),
-  OBJECTS_DELETED("objects_deleted",
-      "Total number of objects deleted from the object store."),
-  OBJECTS_READ("objects_read",
-      "Total number of objects read from the object store."),
-  OBJECTS_QUERY("objects_query",
-      "Total number of objects queried from the object store."),
-  OBJECTS_LIST("objects_list",
-      "Total number of object list query from the object store."),
-  INVOCATION_COPY_FROM_LOCAL_FILE(CommonStatisticNames.OP_COPY_FROM_LOCAL_FILE,
-      "Calls of copyFromLocalFile()"),
-  INVOCATION_CREATE(CommonStatisticNames.OP_CREATE,
-      "Calls of create()"),
-  INVOCATION_CREATE_NON_RECURSIVE(CommonStatisticNames.OP_CREATE_NON_RECURSIVE,
-      "Calls of createNonRecursive()"),
-  INVOCATION_DELETE(CommonStatisticNames.OP_DELETE,
-      "Calls of delete()"),
-  INVOCATION_EXISTS(CommonStatisticNames.OP_EXISTS,
-      "Calls of exists()"),
-  INVOCATION_GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM,
-      "Calls of getFileChecksum()"),
-  INVOCATION_GET_FILE_STATUS(CommonStatisticNames.OP_GET_FILE_STATUS,
-      "Calls of getFileStatus()"),
-  INVOCATION_GLOB_STATUS(CommonStatisticNames.OP_GLOB_STATUS,
-      "Calls of globStatus()"),
-  INVOCATION_IS_DIRECTORY(CommonStatisticNames.OP_IS_DIRECTORY,
-      "Calls of isDirectory()"),
-  INVOCATION_IS_FILE(CommonStatisticNames.OP_IS_FILE,
-      "Calls of isFile()"),
-  INVOCATION_LIST_FILES(CommonStatisticNames.OP_LIST_FILES,
-      "Calls of listFiles()"),
-  INVOCATION_LIST_LOCATED_STATUS(CommonStatisticNames.OP_LIST_LOCATED_STATUS,
-      "Calls of listLocatedStatus()"),
-  INVOCATION_LIST_STATUS(CommonStatisticNames.OP_LIST_STATUS,
-      "Calls of listStatus()"),
-  INVOCATION_MKDIRS(CommonStatisticNames.OP_MKDIRS,
-      "Calls of mkdirs()"),
-  INVOCATION_OPEN(CommonStatisticNames.OP_OPEN,
-      "Calls of open()"),
-  INVOCATION_RENAME(CommonStatisticNames.OP_RENAME,
-      "Calls of rename()");
-
-  private static final Map<String, Statistic> SYMBOL_MAP =
-      new HashMap<>(Statistic.values().length);
-  static {
-    for (Statistic stat : values()) {
-      SYMBOL_MAP.put(stat.getSymbol(), stat);
-    }
-  }
-
-  Statistic(String symbol, String description) {
-    this.symbol = symbol;
-    this.description = description;
-  }
-
-  private final String symbol;
-  private final String description;
-
-  public String getSymbol() {
-    return symbol;
-  }
-
-  /**
-   * Get a statistic from a symbol.
-   * @param symbol statistic to look up
-   * @return the value or null.
-   */
-  public static Statistic fromSymbol(String symbol) {
-    return SYMBOL_MAP.get(symbol);
-  }
-
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * The string value is simply the symbol.
-   * This makes this operation very low cost.
-   * @return the symbol of this statistic.
-   */
-  @Override
-  public String toString() {
-    return symbol;
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
deleted file mode 100644
index 93e82c3..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Ozone Filesystem.
- *
- * Except for the exceptions, it should all be hidden as implementation details.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher
deleted file mode 100644
index 6e86731..0000000
--- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-org.apache.hadoop.fs.ozone.O3fsDtFetcher
diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
deleted file mode 100644
index e0292bc..0000000
--- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.ozone.security.OzoneTokenIdentifier
-org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
deleted file mode 100644
index bbb8221..0000000
--- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-org.apache.hadoop.fs.ozone.BasicOzoneClientAdapterImpl$Renewer
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java
deleted file mode 100644
index 26a77eb..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.api.mockito.PowerMockito;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.when;
-
-/**
- * FilteredClassLoader test using mocks.
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({ FilteredClassLoader.class, OzoneFSInputStream.class})
-public class TestFilteredClassLoader {
-  @Test
-  public void testFilteredClassLoader() {
-    PowerMockito.mockStatic(System.class);
-    when(System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))
-        .thenReturn("org.apache.hadoop.fs.ozone.OzoneFSInputStream");
-
-    ClassLoader currentClassLoader =
-        TestFilteredClassLoader.class.getClassLoader();
-
-    List<URL> urls = new ArrayList<>();
-    ClassLoader classLoader = new FilteredClassLoader(
-        urls.toArray(new URL[0]), currentClassLoader);
-
-    try {
-      classLoader.loadClass(
-          "org.apache.hadoop.fs.ozone.OzoneFSInputStream");
-      ClassLoader expectedClassLoader =
-          OzoneFSInputStream.class.getClassLoader();
-      assertEquals(expectedClassLoader, currentClassLoader);
-    } catch (ClassNotFoundException e) {
-      e.printStackTrace();
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
deleted file mode 100644
index 2e9e3a4..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test OzoneFSInputStream by reading through multiple interfaces.
- */
-public class TestOzoneFSInputStream {
-  private static MiniOzoneCluster cluster = null;
-  private static FileSystem fs;
-  private static Path filePath = null;
-  private static byte[] data = null;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 10,
-        StorageUnit.MB);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-
-    // Fetch the host and port for File System init
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-
-    // Set the fs.defaultFS and start the filesystem
-    String uri = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
-    fs =  FileSystem.get(conf);
-    int fileLen = 100 * 1024 * 1024;
-    data = DFSUtil.string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
-    filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
-    try (FSDataOutputStream stream = fs.create(filePath)) {
-      stream.write(data);
-    }
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    fs.close();
-    cluster.shutdown();
-  }
-
-  @Test
-  public void testO3FSSingleByteRead() throws IOException {
-    FSDataInputStream inputStream = fs.open(filePath);
-    byte[] value = new byte[data.length];
-    int i = 0;
-    while(true) {
-      int val = inputStream.read();
-      if (val == -1) {
-        break;
-      }
-      value[i] = (byte)val;
-      Assert.assertEquals("value mismatch at:" + i, value[i], data[i]);
-      i++;
-    }
-    Assert.assertEquals(i, data.length);
-    Assert.assertTrue(Arrays.equals(value, data));
-    inputStream.close();
-  }
-
-  @Test
-  public void testO3FSMultiByteRead() throws IOException {
-    FSDataInputStream inputStream = fs.open(filePath);
-    byte[] value = new byte[data.length];
-    byte[] tmp = new byte[1* 1024 *1024];
-    int i = 0;
-    while(true) {
-      int val = inputStream.read(tmp);
-      if (val == -1) {
-        break;
-      }
-      System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length);
-      i++;
-    }
-    Assert.assertEquals(i * tmp.length, data.length);
-    Assert.assertTrue(Arrays.equals(value, data));
-    inputStream.close();
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
deleted file mode 100644
index 2a72101..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.GlobalStorageStatistics;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
-import org.junit.After;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-/**
- * Test OzoneFileSystem Interfaces.
- *
- * This test will test the various interfaces i.e.
- * create, read, write, getFileStatus
- */
-@RunWith(Parameterized.class)
-public class TestOzoneFileInterfaces {
-
-  private String rootPath;
-  private String userName;
-
-  /**
-   * Parameter class to set absolute url/defaultFS handling.
-   * <p>
-   * Hadoop file systems could be used in multiple ways: Using the defaultfs
-   * and file path without the schema, or use absolute url-s even with
-   * different defaultFS. This parameter matrix would test both the use cases.
-   */
-  @Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {{false, true}, {true, false}});
-  }
-
-  private boolean setDefaultFs;
-
-  private boolean useAbsolutePath;
-
-  private MiniOzoneCluster cluster = null;
-
-  private FileSystem fs;
-
-  private OzoneFileSystem o3fs;
-
-  private String volumeName;
-
-  private String bucketName;
-
-  private OzoneFSStorageStatistics statistics;
-
-  private OMMetrics omMetrics;
-
-  public TestOzoneFileInterfaces(boolean setDefaultFs,
-      boolean useAbsolutePath) {
-    this.setDefaultFs = setDefaultFs;
-    this.useAbsolutePath = useAbsolutePath;
-    GlobalStorageStatistics.INSTANCE.reset();
-  }
-
-  @Before
-  public void init() throws Exception {
-    volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
-    bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
-
-    rootPath = String
-        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
-            volumeName);
-    if (setDefaultFs) {
-      // Set the fs.defaultFS and start the filesystem
-      conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-      fs = FileSystem.get(conf);
-    } else {
-      fs = FileSystem.get(new URI(rootPath + "/test.txt"), conf);
-    }
-    o3fs = (OzoneFileSystem) fs;
-    statistics = (OzoneFSStorageStatistics) o3fs.getOzoneFSOpsCountStatistics();
-    omMetrics = cluster.getOzoneManager().getMetrics();
-  }
-
-  @After
-  public void teardown() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.closeQuietly(fs);
-  }
-
-  @Test
-  public void testFileSystemInit() throws IOException {
-    if (setDefaultFs) {
-      assertTrue(
-          "The initialized file system is not OzoneFileSystem but " +
-              fs.getClass(),
-          fs instanceof OzoneFileSystem);
-      assertEquals(OzoneConsts.OZONE_URI_SCHEME, fs.getUri().getScheme());
-      assertEquals(OzoneConsts.OZONE_URI_SCHEME, statistics.getScheme());
-    }
-  }
-
-  @Test
-  public void testOzFsReadWrite() throws IOException {
-    long currentTime = Time.now();
-    int stringLen = 20;
-    String data = RandomStringUtils.randomAlphanumeric(stringLen);
-    String filePath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + filePath);
-    try (FSDataOutputStream stream = fs.create(path)) {
-      stream.writeBytes(data);
-    }
-
-    assertEquals(statistics.getLong(
-        StorageStatistics.CommonStatisticNames.OP_CREATE).longValue(), 1);
-    assertEquals(statistics.getLong("objects_created").longValue(), 1);
-
-    FileStatus status = fs.getFileStatus(path);
-    assertEquals(statistics.getLong(
-        StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS).longValue(),
-        1);
-    assertEquals(statistics.getLong("objects_query").longValue(), 1);
-    // The timestamp of the newly created file should always be greater than
-    // the time when the test was started
-    assertTrue("Modification time has not been recorded: " + status,
-        status.getModificationTime() > currentTime);
-
-    assertFalse(status.isDirectory());
-    assertEquals(FsPermission.getFileDefault(), status.getPermission());
-    verifyOwnerGroup(status);
-
-    try (FSDataInputStream inputStream = fs.open(path)) {
-      byte[] buffer = new byte[stringLen];
-      // This read will not change the offset inside the file
-      int readBytes = inputStream.read(0, buffer, 0, buffer.length);
-      String out = new String(buffer, 0, buffer.length, UTF_8);
-      assertEquals(data, out);
-      assertEquals(readBytes, buffer.length);
-      assertEquals(0, inputStream.getPos());
-
-      // The following read will change the internal offset
-      readBytes = inputStream.read(buffer, 0, buffer.length);
-      assertEquals(data, out);
-      assertEquals(readBytes, buffer.length);
-      assertEquals(buffer.length, inputStream.getPos());
-    }
-    assertEquals(statistics.getLong(
-        StorageStatistics.CommonStatisticNames.OP_OPEN).longValue(), 1);
-    assertEquals(statistics.getLong("objects_read").longValue(), 1);
-  }
-
-  private void verifyOwnerGroup(FileStatus fileStatus) {
-    String owner = getCurrentUser();
-    assertEquals(owner, fileStatus.getOwner());
-    assertEquals(owner, fileStatus.getGroup());
-  }
-
-
-  @Test
-  public void testDirectory() throws IOException {
-    String dirPath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + dirPath);
-    assertTrue("Makedirs returned with false for the path " + path,
-        fs.mkdirs(path));
-
-    FileStatus status = fs.getFileStatus(path);
-    assertTrue("The created path is not directory.", status.isDirectory());
-
-    assertTrue(status.isDirectory());
-    assertEquals(FsPermission.getDirDefault(), status.getPermission());
-    verifyOwnerGroup(status);
-
-    assertEquals(0, status.getLen());
-
-    FileStatus[] statusList = fs.listStatus(createPath("/"));
-    assertEquals(1, statusList.length);
-    assertEquals(status, statusList[0]);
-
-    fs.getFileStatus(createPath("/"));
-    assertTrue("Root dir (/) is not a directory.", status.isDirectory());
-    assertEquals(0, status.getLen());
-  }
-
-  @Test
-  public void testListStatus() throws IOException {
-    List<Path> paths = new ArrayList<>();
-    String dirPath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + dirPath);
-    paths.add(path);
-    assertTrue("Makedirs returned with false for the path " + path,
-        fs.mkdirs(path));
-
-    long listObjects = statistics.getLong(Statistic.OBJECTS_LIST.getSymbol());
-    long omListStatus = omMetrics.getNumListStatus();
-    FileStatus[] statusList = fs.listStatus(createPath("/"));
-    assertEquals(1, statusList.length);
-    assertEquals(++listObjects,
-        statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue());
-    assertEquals(++omListStatus, omMetrics.getNumListStatus());
-    assertEquals(fs.getFileStatus(path), statusList[0]);
-
-    dirPath = RandomStringUtils.randomAlphanumeric(5);
-    path = createPath("/" + dirPath);
-    paths.add(path);
-    assertTrue("Makedirs returned with false for the path " + path,
-        fs.mkdirs(path));
-
-    statusList = fs.listStatus(createPath("/"));
-    assertEquals(2, statusList.length);
-    assertEquals(++listObjects,
-        statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue());
-    assertEquals(++omListStatus, omMetrics.getNumListStatus());
-    for (Path p : paths) {
-      assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p)));
-    }
-  }
-
-  @Test
-  public void testOzoneManagerFileSystemInterface() throws IOException {
-    String dirPath = RandomStringUtils.randomAlphanumeric(5);
-
-    Path path = createPath("/" + dirPath);
-    assertTrue("Makedirs returned with false for the path " + path,
-        fs.mkdirs(path));
-
-    long numFileStatus =
-        cluster.getOzoneManager().getMetrics().getNumGetFileStatus();
-    FileStatus status = fs.getFileStatus(path);
-
-    Assert.assertEquals(numFileStatus + 1,
-        cluster.getOzoneManager().getMetrics().getNumGetFileStatus());
-    assertTrue(status.isDirectory());
-    assertEquals(FsPermission.getDirDefault(), status.getPermission());
-    verifyOwnerGroup(status);
-
-    long currentTime = System.currentTimeMillis();
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(o3fs.pathToKey(path))
-        .build();
-    OzoneFileStatus omStatus =
-        cluster.getOzoneManager().getFileStatus(keyArgs);
-    //Another get file status here, incremented the counter.
-    Assert.assertEquals(numFileStatus + 2,
-        cluster.getOzoneManager().getMetrics().getNumGetFileStatus());
-
-    assertTrue("The created path is not directory.", omStatus.isDirectory());
-
-    // For directories, the time returned is the current time.
-    assertEquals(0, omStatus.getLen());
-    assertTrue(omStatus.getModificationTime() >= currentTime);
-    assertEquals(omStatus.getPath().getName(), o3fs.pathToKey(path));
-  }
-
-  @Test
-  public void testPathToKey() throws Exception {
-
-    assertEquals("a/b/1", o3fs.pathToKey(new Path("/a/b/1")));
-
-    assertEquals("user/" + getCurrentUser() + "/key1/key2",
-        o3fs.pathToKey(new Path("key1/key2")));
-
-    assertEquals("key1/key2",
-        o3fs.pathToKey(new Path("o3fs://test1/key1/key2")));
-  }
-
-  private String getCurrentUser() {
-    try {
-      return UserGroupInformation.getCurrentUser().getShortUserName();
-    } catch (IOException e) {
-      return OZONE_DEFAULT_USER;
-    }
-  }
-
-  private Path createPath(String relativePath) {
-    if (useAbsolutePath) {
-      return new Path(
-          rootPath + (relativePath.startsWith("/") ? "" : "/") + relativePath);
-    } else {
-      return new Path(relativePath);
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
deleted file mode 100644
index 0dc7c99..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.apache.commons.io.IOUtils;
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Ozone file system tests that are not covered by contract tests.
- */
-public class TestOzoneFileSystem {
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(300_000);
-
-  private static MiniOzoneCluster cluster = null;
-
-  private static FileSystem fs;
-  private static OzoneFileSystem o3fs;
-
-  private String volumeName;
-  private String bucketName;
-
-  private String rootPath;
-
-  @Before
-  public void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-    volumeName = bucket.getVolumeName();
-    bucketName = bucket.getName();
-
-    rootPath = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
-
-    // Set the fs.defaultFS and start the filesystem
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-    fs = FileSystem.get(conf);
-    o3fs = (OzoneFileSystem) fs;
-  }
-
-  @After
-  public void teardown() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.closeQuietly(fs);
-  }
-
-  @Test
-  public void testOzoneFsServiceLoader() throws IOException {
-    assertEquals(
-        FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null),
-        OzoneFileSystem.class);
-  }
-
-  @Test
-  public void testCreateDoesNotAddParentDirKeys() throws Exception {
-    Path grandparent = new Path("/testCreateDoesNotAddParentDirKeys");
-    Path parent = new Path(grandparent, "parent");
-    Path child = new Path(parent, "child");
-    ContractTestUtils.touch(fs, child);
-
-    OzoneKeyDetails key = getKey(child, false);
-    assertEquals(key.getName(), o3fs.pathToKey(child));
-
-    // Creating a child should not add parent keys to the bucket
-    try {
-      getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
-    }
-
-    // List status on the parent should show the child file
-    assertEquals("List status of parent should include the 1 child file", 1L,
-        (long)fs.listStatus(parent).length);
-    assertTrue("Parent directory does not appear to be a directory",
-        fs.getFileStatus(parent).isDirectory());
-  }
-
-  @Test
-  public void testDeleteCreatesFakeParentDir() throws Exception {
-    Path grandparent = new Path("/testDeleteCreatesFakeParentDir");
-    Path parent = new Path(grandparent, "parent");
-    Path child = new Path(parent, "child");
-    ContractTestUtils.touch(fs, child);
-
-    // Verify that parent dir key does not exist
-    // Creating a child should not add parent keys to the bucket
-    try {
-      getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
-    }
-
-    // Delete the child key
-    fs.delete(child, false);
-
-    // Deleting the only child should create the parent dir key if it does
-    // not exist
-    String parentKey = o3fs.pathToKey(parent) + "/";
-    OzoneKeyDetails parentKeyInfo = getKey(parent, true);
-    assertEquals(parentKey, parentKeyInfo.getName());
-  }
-
-  @Test
-  public void testListStatus() throws Exception {
-    Path parent = new Path("/testListStatus");
-    Path file1 = new Path(parent, "key1");
-    Path file2 = new Path(parent, "key2");
-    ContractTestUtils.touch(fs, file1);
-    ContractTestUtils.touch(fs, file2);
-
-
-    // ListStatus on a directory should return all subdirs along with
-    // files, even if there exists a file and sub-dir with the same name.
-    FileStatus[] fileStatuses = o3fs.listStatus(parent);
-    assertEquals("FileStatus did not return all children of the directory",
-        2, fileStatuses.length);
-
-    // ListStatus should return only the immediate children of a directory.
-    Path file3 = new Path(parent, "dir1/key3");
-    Path file4 = new Path(parent, "dir1/key4");
-    ContractTestUtils.touch(fs, file3);
-    ContractTestUtils.touch(fs, file4);
-    fileStatuses = o3fs.listStatus(parent);
-    assertEquals("FileStatus did not return all children of the directory",
-        3, fileStatuses.length);
-  }
-
-  /**
-   * Tests listStatus operation on root directory.
-   */
-  @Test
-  public void testListStatusOnRoot() throws Exception {
-    Path root = new Path("/");
-    Path dir1 = new Path(root, "dir1");
-    Path dir12 = new Path(dir1, "dir12");
-    Path dir2 = new Path(root, "dir2");
-    fs.mkdirs(dir12);
-    fs.mkdirs(dir2);
-
-    // ListStatus on root should return dir1 (even though /dir1 key does not
-    // exist) and dir2 only. dir12 is not an immediate child of root and
-    // hence should not be listed.
-    FileStatus[] fileStatuses = o3fs.listStatus(root);
-    assertEquals("FileStatus should return only the immediate children", 2,
-        fileStatuses.length);
-
-    // Verify that dir12 is not included in the result of the listStatus on root
-    String fileStatus1 = fileStatuses[0].getPath().toUri().getPath();
-    String fileStatus2 = fileStatuses[1].getPath().toUri().getPath();
-    assertFalse(fileStatus1.equals(dir12.toString()));
-    assertFalse(fileStatus2.equals(dir12.toString()));
-  }
-
-  /**
-   * Tests listStatus operation on root directory.
-   */
-  @Test
-  public void testListStatusOnLargeDirectory() throws Exception {
-    Path root = new Path("/");
-    Set<String> paths = new TreeSet<>();
-    int numDirs = 5111;
-    for(int i = 0; i < numDirs; i++) {
-      Path p = new Path(root, String.valueOf(i));
-      fs.mkdirs(p);
-      paths.add(p.getName());
-    }
-
-    FileStatus[] fileStatuses = o3fs.listStatus(root);
-    assertEquals(
-        "Total directories listed do not match the existing directories",
-        numDirs, fileStatuses.length);
-
-    for (int i=0; i < numDirs; i++) {
-      assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
-    }
-  }
-
-  /**
-   * Tests listStatus on a path with subdirs.
-   */
-  @Test
-  public void testListStatusOnSubDirs() throws Exception {
-    // Create the following key structure
-    //      /dir1/dir11/dir111
-    //      /dir1/dir12
-    //      /dir1/dir12/file121
-    //      /dir2
-    // ListStatus on /dir1 should return all its immediated subdirs only
-    // which are /dir1/dir11 and /dir1/dir12. Super child files/dirs
-    // (/dir1/dir12/file121 and /dir1/dir11/dir111) should not be returned by
-    // listStatus.
-    Path dir1 = new Path("/dir1");
-    Path dir11 = new Path(dir1, "dir11");
-    Path dir111 = new Path(dir11, "dir111");
-    Path dir12 = new Path(dir1, "dir12");
-    Path file121 = new Path(dir12, "file121");
-    Path dir2 = new Path("/dir2");
-    fs.mkdirs(dir111);
-    fs.mkdirs(dir12);
-    ContractTestUtils.touch(fs, file121);
-    fs.mkdirs(dir2);
-
-    FileStatus[] fileStatuses = o3fs.listStatus(dir1);
-    assertEquals("FileStatus should return only the immediate children", 2,
-        fileStatuses.length);
-
-    // Verify that the two children of /dir1 returned by listStatus operation
-    // are /dir1/dir11 and /dir1/dir12.
-    String fileStatus1 = fileStatuses[0].getPath().toUri().getPath();
-    String fileStatus2 = fileStatuses[1].getPath().toUri().getPath();
-    assertTrue(fileStatus1.equals(dir11.toString()) ||
-        fileStatus1.equals(dir12.toString()));
-    assertTrue(fileStatus2.equals(dir11.toString()) ||
-        fileStatus2.equals(dir12.toString()));
-  }
-
-  @Test
-  public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved()
-      throws Exception {
-    Path source = new Path("/source");
-    Path interimPath = new Path(source, "interimPath");
-    Path leafInsideInterimPath = new Path(interimPath, "leaf");
-    Path target = new Path("/target");
-    Path leafInTarget = new Path(target, "leaf");
-
-    fs.mkdirs(source);
-    fs.mkdirs(target);
-    fs.mkdirs(leafInsideInterimPath);
-    assertTrue(fs.rename(leafInsideInterimPath, leafInTarget));
-
-    // after rename listStatus for interimPath should succeed and
-    // interimPath should have no children
-    FileStatus[] statuses = fs.listStatus(interimPath);
-    assertNotNull("liststatus returns a null array", statuses);
-    assertEquals("Statuses array is not empty", 0, statuses.length);
-    FileStatus fileStatus = fs.getFileStatus(interimPath);
-    assertEquals("FileStatus does not point to interimPath",
-        interimPath.getName(), fileStatus.getPath().getName());
-  }
-
-  private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
-      throws IOException, OzoneClientException {
-    String key = o3fs.pathToKey(keyPath);
-    if (isDirectory) {
-      key = key + "/";
-    }
-    return cluster.getClient().getObjectStore().getVolume(volumeName)
-        .getBucket(bucketName).getKey(key);
-  }
-
-  private void assertKeyNotFoundException(IOException ex) {
-    GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
deleted file mode 100644
index 51fd3c8..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.api.mockito.PowerMockito;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-/**
- * Ozone File system tests that are light weight and use mocks.
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({ OzoneClientFactory.class, UserGroupInformation.class })
-public class TestOzoneFileSystemWithMocks {
-
-  @Test
-  public void testFSUriWithHostPortOverrides() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    OzoneClient ozoneClient = mock(OzoneClient.class);
-    ObjectStore objectStore = mock(ObjectStore.class);
-    OzoneVolume volume = mock(OzoneVolume.class);
-    OzoneBucket bucket = mock(OzoneBucket.class);
-
-    when(ozoneClient.getObjectStore()).thenReturn(objectStore);
-    when(objectStore.getVolume(eq("volume1"))).thenReturn(volume);
-    when(volume.getBucket("bucket1")).thenReturn(bucket);
-
-    PowerMockito.mockStatic(OzoneClientFactory.class);
-    PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"),
-        eq(5899), eq(conf))).thenReturn(ozoneClient);
-
-    UserGroupInformation ugi = mock(UserGroupInformation.class);
-    PowerMockito.mockStatic(UserGroupInformation.class);
-    PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi);
-    when(ugi.getShortUserName()).thenReturn("user1");
-
-    URI uri = new URI("o3fs://bucket1.volume1.local.host:5899");
-
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals(ozfs.getUri().getAuthority(),
-        "bucket1.volume1.local.host:5899");
-    PowerMockito.verifyStatic();
-    OzoneClientFactory.getRpcClient("local.host", 5899, conf);
-  }
-
-  @Test
-  public void testFSUriWithHostPortUnspecified() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    final int omPort = OmUtils.getOmRpcPort(conf);
-
-    OzoneClient ozoneClient = mock(OzoneClient.class);
-    ObjectStore objectStore = mock(ObjectStore.class);
-    OzoneVolume volume = mock(OzoneVolume.class);
-    OzoneBucket bucket = mock(OzoneBucket.class);
-
-    when(ozoneClient.getObjectStore()).thenReturn(objectStore);
-    when(objectStore.getVolume(eq("volume1"))).thenReturn(volume);
-    when(volume.getBucket("bucket1")).thenReturn(bucket);
-
-    PowerMockito.mockStatic(OzoneClientFactory.class);
-    PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"),
-        eq(omPort), eq(conf))).thenReturn(ozoneClient);
-
-    UserGroupInformation ugi = mock(UserGroupInformation.class);
-    PowerMockito.mockStatic(UserGroupInformation.class);
-    PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi);
-    when(ugi.getShortUserName()).thenReturn("user1");
-
-    URI uri = new URI("o3fs://bucket1.volume1.local.host");
-
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals(ozfs.getUri().getHost(), "bucket1.volume1.local.host");
-    // The URI doesn't contain a port number, expect -1 from getPort()
-    assertEquals(ozfs.getUri().getPort(), -1);
-    PowerMockito.verifyStatic();
-    // Check the actual port number in use
-    OzoneClientFactory.getRpcClient("local.host", omPort, conf);
-  }
-
-  @Test
-  public void testFSUriHostVersionDefault() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    OzoneClient ozoneClient = mock(OzoneClient.class);
-    ObjectStore objectStore = mock(ObjectStore.class);
-    OzoneVolume volume = mock(OzoneVolume.class);
-    OzoneBucket bucket = mock(OzoneBucket.class);
-
-    when(ozoneClient.getObjectStore()).thenReturn(objectStore);
-    when(objectStore.getVolume(eq("volume1"))).thenReturn(volume);
-    when(volume.getBucket("bucket1")).thenReturn(bucket);
-
-    PowerMockito.mockStatic(OzoneClientFactory.class);
-    PowerMockito.when(OzoneClientFactory.getRpcClient(eq(conf)))
-        .thenReturn(ozoneClient);
-
-    UserGroupInformation ugi = mock(UserGroupInformation.class);
-    PowerMockito.mockStatic(UserGroupInformation.class);
-    PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi);
-    when(ugi.getShortUserName()).thenReturn("user1");
-
-    URI uri = new URI("o3fs://bucket1.volume1/key");
-
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals(ozfs.getUri().getAuthority(), "bucket1.volume1");
-    PowerMockito.verifyStatic();
-    OzoneClientFactory.getRpcClient(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
deleted file mode 100644
index ab35191..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.ratis.util.LifeCycle;
-import org.hamcrest.core.StringContains;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.Optional;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsUtils.getHostName;
-import static org.apache.hadoop.hdds.HddsUtils.getHostPort;
-
-/**
- * Test client-side URI handling with Ozone Manager HA.
- */
-public class TestOzoneFsHAURLs {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestOzoneFsHAURLs.class);
-
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster cluster;
-  private String omId;
-  private String omServiceId;
-  private String clusterId;
-  private String scmId;
-  private OzoneManager om;
-  private int numOfOMs;
-
-  private String volumeName;
-  private String bucketName;
-  private String rootPath;
-
-  private final String o3fsImplKey =
-      "fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl";
-  private final String o3fsImplValue =
-      "org.apache.hadoop.fs.ozone.OzoneFileSystem";
-
-  private static final long LEADER_ELECTION_TIMEOUT = 500L;
-
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    omId = UUID.randomUUID().toString();
-    omServiceId = "om-service-test1";
-    numOfOMs = 3;
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    final String path = GenericTestUtils.getTempPath(omId);
-    java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta");
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    conf.setTimeDuration(
-        OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
-
-    OMStorage omStore = new OMStorage(conf);
-    omStore.setClusterId(clusterId);
-    omStore.setScmId(scmId);
-    // writes the version file properties
-    omStore.initialize();
-
-    // Start the cluster
-    cluster = MiniOzoneCluster.newHABuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(numOfOMs)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    om = cluster.getOzoneManager();
-    Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
-
-    volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    ObjectStore objectStore =
-        OzoneClientFactory.getRpcClient(omServiceId, conf).getObjectStore();
-    objectStore.createVolume(volumeName);
-
-    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
-    bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    retVolumeinfo.createBucket(bucketName);
-
-    rootPath = String.format("%s://%s.%s.%s/", OzoneConsts.OZONE_URI_SCHEME,
-        bucketName, volumeName, omServiceId);
-    // Set fs.defaultFS
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-    FileSystem fs = FileSystem.get(conf);
-    // Create some dirs
-    Path root = new Path("/");
-    Path dir1 = new Path(root, "dir1");
-    Path dir12 = new Path(dir1, "dir12");
-    Path dir2 = new Path(root, "dir2");
-    fs.mkdirs(dir12);
-    fs.mkdirs(dir2);
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * @return the leader OM's RPC address in the MiniOzoneHACluster
-   */
-  private String getLeaderOMNodeAddr() {
-    String leaderOMNodeAddr = null;
-    Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, omServiceId);
-    assert(omNodeIds.size() == numOfOMs);
-    MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster;
-    // Note: this loop may be implemented inside MiniOzoneHAClusterImpl
-    for (String omNodeId : omNodeIds) {
-      // Find the leader OM
-      if (!haCluster.getOzoneManager(omNodeId).isLeader()) {
-        continue;
-      }
-      // ozone.om.address.omServiceId.omNode
-      String leaderOMNodeAddrKey = OmUtils.addKeySuffixes(
-          OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId);
-      leaderOMNodeAddr = conf.get(leaderOMNodeAddrKey);
-      LOG.info("Found leader OM: nodeId=" + omNodeId + ", " +
-          leaderOMNodeAddrKey + "=" + leaderOMNodeAddr);
-      // Leader found, no need to continue loop
-      break;
-    }
-    // There has to be a leader
-    assert(leaderOMNodeAddr != null);
-    return leaderOMNodeAddr;
-  }
-
-  /**
-   * Get host name from an address. This uses getHostName() internally.
-   * @param addr Address with port number
-   * @return Host name
-   */
-  private String getHostFromAddress(String addr) {
-    Optional<String> hostOptional = getHostName(addr);
-    assert(hostOptional.isPresent());
-    return hostOptional.get();
-  }
-
-  /**
-   * Get port number from an address. This uses getHostPort() internally.
-   * @param addr Address with port
-   * @return Port number
-   */
-  private int getPortFromAddress(String addr) {
-    Optional<Integer> portOptional = getHostPort(addr);
-    assert(portOptional.isPresent());
-    return portOptional.get();
-  }
-
-  /**
-   * Test OM HA URLs with qualified fs.defaultFS.
-   * @throws Exception
-   */
-  @Test
-  public void testWithQualifiedDefaultFS() throws Exception {
-    OzoneConfiguration clientConf = new OzoneConfiguration(conf);
-    clientConf.setQuietMode(false);
-    clientConf.set(o3fsImplKey, o3fsImplValue);
-    // fs.defaultFS = o3fs://bucketName.volumeName.omServiceId/
-    clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-
-    // Pick leader OM's RPC address and assign it to ozone.om.address for
-    // the test case: ozone fs -ls o3fs://bucket.volume.om1/
-    String leaderOMNodeAddr = getLeaderOMNodeAddr();
-    // ozone.om.address was set to service id in MiniOzoneHAClusterImpl
-    clientConf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, leaderOMNodeAddr);
-
-    FsShell shell = new FsShell(clientConf);
-    int res;
-    try {
-      // Test case 1: ozone fs -ls /
-      // Expectation: Success.
-      res = ToolRunner.run(shell, new String[] {"-ls", "/"});
-      // Check return value, should be 0 (success)
-      Assert.assertEquals(res, 0);
-
-      // Test case 2: ozone fs -ls o3fs:///
-      // Expectation: Success. fs.defaultFS is a fully qualified path.
-      res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"});
-      Assert.assertEquals(res, 0);
-
-      // Test case 3: ozone fs -ls o3fs://bucket.volume/
-      // Expectation: Fail. Must have service id or host name when HA is enabled
-      String unqualifiedPath1 = String.format("%s://%s.%s/",
-          OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-      try (GenericTestUtils.SystemErrCapturer capture =
-          new GenericTestUtils.SystemErrCapturer()) {
-        res = ToolRunner.run(shell, new String[] {"-ls", unqualifiedPath1});
-        // Check stderr, inspired by testDFSWithInvalidCommmand
-        Assert.assertThat("Command did not print the error message " +
-                "correctly for test case: ozone fs -ls o3fs://bucket.volume/",
-            capture.getOutput(), StringContains.containsString(
-                "-ls: Service ID or host name must not"
-                    + " be omitted when ozone.om.service.ids is defined."));
-      }
-      // Check return value, should be -1 (failure)
-      Assert.assertEquals(res, -1);
-
-      // Test case 4: ozone fs -ls o3fs://bucket.volume.om1/
-      // Expectation: Success. The client should use the port number
-      // set in ozone.om.address.
-      String qualifiedPath1 = String.format("%s://%s.%s.%s/",
-          OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName,
-          getHostFromAddress(leaderOMNodeAddr));
-      res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath1});
-      // Note: this test case will fail if the port is not from the leader node
-      Assert.assertEquals(res, 0);
-
-      // Test case 5: ozone fs -ls o3fs://bucket.volume.om1:port/
-      // Expectation: Success.
-      String qualifiedPath2 = String.format("%s://%s.%s.%s/",
-          OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName,
-          leaderOMNodeAddr);
-      res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath2});
-      Assert.assertEquals(res, 0);
-
-      // Test case 6: ozone fs -ls o3fs://bucket.volume.id1/
-      // Expectation: Success.
-      String qualifiedPath3 = String.format("%s://%s.%s.%s/",
-          OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, omServiceId);
-      res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath3});
-      Assert.assertEquals(res, 0);
-
-      // Test case 7: ozone fs -ls o3fs://bucket.volume.id1:port/
-      // Expectation: Fail. Service ID does not use port information.
-      // Use the port number from leader OM (doesn't really matter)
-      String unqualifiedPath2 = String.format("%s://%s.%s.%s:%d/",
-          OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName,
-          omServiceId, getPortFromAddress(leaderOMNodeAddr));
-      try (GenericTestUtils.SystemErrCapturer capture =
-          new GenericTestUtils.SystemErrCapturer()) {
-        res = ToolRunner.run(shell, new String[] {"-ls", unqualifiedPath2});
-        // Check stderr
-        Assert.assertThat("Command did not print the error message " +
-                "correctly for test case: "
-                + "ozone fs -ls o3fs://bucket.volume.id1:port/",
-            capture.getOutput(), StringContains.containsString(
-                "does not use port information"));
-      }
-      // Check return value, should be -1 (failure)
-      Assert.assertEquals(res, -1);
-    } finally {
-      shell.close();
-    }
-  }
-
-  /**
-   * Helper function for testOtherDefaultFS(),
-   * run fs -ls o3fs:/// against different fs.defaultFS input.
-   *
-   * @param defaultFS Desired fs.defaultFS to be used in the test
-   * @throws Exception
-   */
-  private void testWithDefaultFS(String defaultFS) throws Exception {
-    OzoneConfiguration clientConf = new OzoneConfiguration(conf);
-    clientConf.setQuietMode(false);
-    clientConf.set(o3fsImplKey, o3fsImplValue);
-    // fs.defaultFS = file:///
-    clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
-        defaultFS);
-
-    FsShell shell = new FsShell(clientConf);
-    try {
-      // Test case: ozone fs -ls o3fs:///
-      // Expectation: Fail. fs.defaultFS is not a qualified o3fs URI.
-      int res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"});
-      Assert.assertEquals(res, -1);
-    } finally {
-      shell.close();
-    }
-  }
-
-  /**
-   * Test OM HA URLs with some unqualified fs.defaultFS.
-   * @throws Exception
-   */
-  @Test
-  public void testOtherDefaultFS() throws Exception {
-    // Test scenarios where fs.defaultFS isn't a fully qualified o3fs
-
-    // fs.defaultFS = file:///
-    testWithDefaultFS(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT);
-
-    // fs.defaultFS = hdfs://ns1/
-    testWithDefaultFS("hdfs://ns1/");
-
-    // fs.defaultFS = o3fs:///
-    String unqualifiedFs1 = String.format(
-        "%s:///", OzoneConsts.OZONE_URI_SCHEME);
-    testWithDefaultFS(unqualifiedFs1);
-
-    // fs.defaultFS = o3fs://bucketName.volumeName/
-    String unqualifiedFs2 = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-    testWithDefaultFS(unqualifiedFs2);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
deleted file mode 100644
index 1d584651..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-
-import org.junit.After;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Unit Test for verifying directory rename operation through OzoneFS.
- */
-public class TestOzoneFsRenameDir {
-  public static final Logger LOG = LoggerFactory.getLogger(
-      TestOzoneFsRenameDir.class);
-
-  private MiniOzoneCluster cluster = null;
-  private OzoneConfiguration conf = null;
-  private static FileSystem fs;
-
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-
-    // Fetch the host and port for File System init
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-
-    // Set the fs.defaultFS and start the filesystem
-    String uri = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
-    fs =  FileSystem.get(conf);
-    LOG.info("fs.defaultFS=" + fs.getUri());
-  }
-
-  @After
-  public void teardown() {
-    if (cluster != null) {
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  /**
-   * Tests directory rename opertion through OzoneFS.
-   */
-  @Test(timeout=300_000)
-  public void testRenameDir() throws IOException {
-    final String dir = "/root_dir/dir1";
-    final Path source = new Path(fs.getUri().toString() + dir);
-    final Path dest = new Path(source.toString() + ".renamed");
-    // Add a sub-dir to the directory to be moved.
-    final Path subdir = new Path(source, "sub_dir1");
-    fs.mkdirs(subdir);
-    LOG.info("Created dir {}", subdir);
-    LOG.info("Will move {} to {}", source, dest);
-    fs.rename(source, dest);
-    assertTrue("Directory rename failed", fs.exists(dest));
-    // Verify that the subdir is also renamed i.e. keys corresponding to the
-    // sub-directories of the renamed directory have also been renamed.
-    assertTrue("Keys under the renamed direcotry not renamed",
-        fs.exists(new Path(dest, "sub_dir1")));
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
deleted file mode 100644
index dd54315..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests creating files.
- */
-public class ITestOzoneContractCreate extends AbstractContractCreateTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
deleted file mode 100644
index f0a3d8d..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering deletes.
- */
-public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
deleted file mode 100644
index 134a9ad..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-
-/**
- * Contract test suite covering S3A integration with DistCp.
- * Uses the block output stream, buffered to disk. This is the
- * recommended output mechanism for DistCP due to its scalability.
- */
-public class ITestOzoneContractDistCp extends AbstractContractDistCpTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected OzoneContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
deleted file mode 100644
index 362b22f..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering getFileStatus.
- */
-public class ITestOzoneContractGetFileStatus
-    extends AbstractContractGetFileStatusTest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-
-  @Override
-  public void teardown() throws Exception {
-    LOG.info("FS details {}", getFileSystem());
-    super.teardown();
-  }
-
-  @Override
-  protected Configuration createConfiguration() {
-    return super.createConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
deleted file mode 100644
index bc0de5d..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Test dir operations on Ozone.
- */
-public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
deleted file mode 100644
index 0bc57d4..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests opening files.
- */
-public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
deleted file mode 100644
index 8ce1d1b..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering rename.
- */
-public class ITestOzoneContractRename extends AbstractContractRenameTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
deleted file mode 100644
index 3156eb2..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import java.io.IOException;
-
-/**
- * Ozone contract test for ROOT directory operations.
- */
-public class ITestOzoneContractRootDir extends
-    AbstractContractRootDirectoryTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
deleted file mode 100644
index c4bc0ff..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering file seek.
- */
-public class ITestOzoneContractSeek extends AbstractContractSeekTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
deleted file mode 100644
index 56d63ac..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-
-import org.junit.Assert;
-
-/**
- * The contract of Ozone: only enabled if the test bucket is provided.
- */
-class OzoneContract extends AbstractFSContract {
-
-  private static MiniOzoneCluster cluster;
-  private static final String CONTRACT_XML = "contract/ozone.xml";
-
-  OzoneContract(Configuration conf) {
-    super(conf);
-    //insert the base features
-    addConfResource(CONTRACT_XML);
-  }
-
-  @Override
-  public String getScheme() {
-    return OzoneConsts.OZONE_URI_SCHEME;
-  }
-
-  @Override
-  public Path getTestPath() {
-    Path path = new Path("/test");
-    return path;
-  }
-
-  public static void createCluster() throws IOException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.addResource(CONTRACT_XML);
-
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
-    try {
-      cluster.waitForClusterToBeReady();
-    } catch (Exception e) {
-      throw new IOException(e);
-    }
-  }
-
-  private void copyClusterConfigs(String configKey) {
-    getConf().set(configKey, cluster.getConf().get(configKey));
-  }
-
-  @Override
-  public FileSystem getTestFileSystem() throws IOException {
-    //assumes cluster is not null
-    Assert.assertNotNull("cluster not created", cluster);
-
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-
-    String uri = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
-    getConf().set("fs.defaultFS", uri);
-    copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
-    copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-    return FileSystem.get(getConf());
-  }
-
-  public static void destroyCluster() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
deleted file mode 100644
index 51284c2..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Ozone FS Contract tests.
- */
-package org.apache.hadoop.fs.ozone;
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 0368002..0000000
--- a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git a/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml
deleted file mode 100644
index fe2075c..0000000
--- a/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-
-<configuration>
-  <!--
-  Ozone is a blobstore, with very different behavior than a classic filesystem.
-  -->
-
-    <property>
-        <name>fs.contract.test.root-tests-enabled</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.test.random-seek-count</name>
-        <value>10</value>
-    </property>
-
-    <property>
-        <name>fs.contract.is-blobstore</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.create-visibility-delayed</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.is-case-sensitive</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rename-returns-false-if-source-missing</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rename-remove-dest-if-empty-dir</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-append</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-atomic-directory-delete</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-atomic-rename</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-block-locality</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-concat</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-getfilestatus</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-seek</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-seek-on-closed-file</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rejects-seek-past-eof</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-strict-exceptions</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-unix-permissions</name>
-        <value>false</value>
-    </property>
-</configuration>
diff --git a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
deleted file mode 100644
index 8666dcf..0000000
--- a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,27 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
-
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-
-# Suppress info messages on every put key from Ratis
-log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
-
-# for debugging low level Ozone operations, uncomment this line
-# log4j.logger.org.apache.hadoop.ozone=DEBUG
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
deleted file mode 100644
index 825e65c..0000000
--- a/hadoop-ozone/pom.xml
+++ /dev/null
@@ -1,414 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
---><project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-main-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-    <relativePath>../pom.ozone.xml</relativePath>
-  </parent>
-  <artifactId>hadoop-ozone</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Project</description>
-  <name>Apache Hadoop Ozone</name>
-  <packaging>pom</packaging>
-
-  <properties>
-    <hdds.version>0.5.0-SNAPSHOT</hdds.version>
-    <ozone.version>0.5.0-SNAPSHOT</ozone.version>
-    <ratis.version>0.5.0-201fc85-SNAPSHOT</ratis.version>
-    <bouncycastle.version>1.60</bouncycastle.version>
-    <ozone.release>Crater Lake</ozone.release>
-    <declared.ozone.version>${ozone.version}</declared.ozone.version>
-    <maven-surefire-plugin.version>3.0.0-M1</maven-surefire-plugin.version>
-    <guice.version>4.0</guice.version>
-    <docker.image>apache/ozone:${project.version}</docker.image>
-  </properties>
-  <modules>
-    <module>common</module>
-    <module>client</module>
-    <module>ozone-manager</module>
-    <module>ozonefs</module>
-    <module>ozonefs-lib-current</module>
-    <module>ozonefs-lib-legacy</module>
-    <module>tools</module>
-    <module>integration-test</module>
-    <module>datanode</module>
-    <module>s3gateway</module>
-    <module>dist</module>
-    <module>recon</module>
-    <module>recon-codegen</module>
-    <module>upgrade</module>
-    <module>csi</module>
-    <module>fault-injection-test</module>
-    <module>insight</module>
-  </modules>
-
-  <repositories>
-    <repository>
-      <id>apache.snapshots.https</id>
-      <url>https://repository.apache.org/content/repositories/snapshots</url>
-    </repository>
-  </repositories>
-
-  <dependencyManagement>
-
-    <dependencies>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-common</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-client</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-ozone-manager</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-s3gateway</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-csi</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-datanode</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-tools</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-config</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-integration-test</artifactId>
-        <version>${ozone.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-ozone-manager</artifactId>
-        <version>${ozone.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-common</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-framework</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-docs</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-client</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-tools</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-insight</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-recon</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-upgrade</artifactId>
-        <version>${ozone.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <type>test-jar</type>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.xml.bind</groupId>
-        <artifactId>jaxb-impl</artifactId>
-        <version>2.3.0.1</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.xml.bind</groupId>
-        <artifactId>jaxb-core</artifactId>
-        <version>2.3.0.1</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.xml.bind</groupId>
-        <artifactId>jaxb-api</artifactId>
-        <version>2.3.0</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.activation</groupId>
-        <artifactId>activation</artifactId>
-        <version>1.1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.bouncycastle</groupId>
-        <artifactId>bcprov-jdk15on</artifactId>
-        <version>${bouncycastle.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-lang</groupId>
-        <artifactId>commons-lang</artifactId>
-        <version>2.6</version>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-  <dependencies>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-enforcer-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>depcheck</id>
-            <configuration>
-              <rules>
-                <DependencyConvergence>
-                  <uniqueVersions>false</uniqueVersions>
-                </DependencyConvergence>
-              </rules>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/*.json</exclude>
-            <exclude>**/hs_err*.log</exclude>
-            <exclude>**/target/**</exclude>
-            <exclude>.gitattributes</exclude>
-            <exclude>.idea/**</exclude>
-            <exclude>dev-support/*tests</exclude>
-            <exclude>dev-support/checkstyle*</exclude>
-            <exclude>dev-support/jdiff/**</exclude>
-            <exclude>src/contrib/**</exclude>
-            <exclude>src/main/webapps/datanode/robots.txt</exclude>
-            <exclude>src/main/webapps/hdfs/robots.txt</exclude>
-            <exclude>src/main/webapps/journal/robots.txt</exclude>
-            <exclude>src/main/webapps/router/robots.txt</exclude>
-            <exclude>src/main/webapps/secondary/robots.txt</exclude>
-            <exclude>src/site/resources/images/*</exclude>
-            <exclude>src/test/all-tests</exclude>
-            <exclude>src/test/empty-file</exclude>
-            <exclude>src/test/resources/*.log</exclude>
-            <exclude>src/test/resources/*.tgz</exclude>
-            <exclude>src/test/resources/data*</exclude>
-            <exclude>src/test/resources/empty-file</exclude>
-            <exclude>src/test/resources/ssl/*</exclude>
-            <exclude>src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-CLI.txt</exclude>
-            <exclude>src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-ANT.txt</exclude>
-            <exclude>webapps/static/angular-1.6.4.min.js</exclude>
-            <exclude>webapps/static/angular-nvd3-1.0.9.min.js</exclude>
-            <exclude>webapps/static/angular-route-1.6.4.min.js</exclude>
-            <exclude>webapps/static/bootstrap-3.4.1/**</exclude>
-            <exclude>webapps/static/d3-3.5.17.min.js</exclude>
-            <exclude>webapps/static/jquery-3.4.1.min.js</exclude>
-            <exclude>webapps/static/jquery.dataTables.min.js</exclude>
-            <exclude>webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            <exclude>webapps/static/nvd3-1.8.5.min.css</exclude>
-            <exclude>webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            <exclude>webapps/static/nvd3-1.8.5.min.js</exclude>
-            <exclude>**/dependency-reduced-pom.xml</exclude>
-            <exclude>**/node_modules/**</exclude>
-            <exclude>**/yarn.lock</exclude>
-            <exclude>**/ozone-recon-web/build/**</exclude>
-            <exclude>src/main/license/**</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-classpath-descriptor</id>
-            <phase>package</phase>
-            <goals>
-              <goal>build-classpath</goal>
-            </goals>
-            <configuration>
-              <outputFile>${project.build.directory}/classpath</outputFile>
-              <prefix>$HDDS_LIB_JARS_DIR</prefix>
-              <outputFilterFile>true</outputFilterFile>
-              <includeScope>runtime</includeScope>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>attach-classpath-artifact</id>
-            <phase>package</phase>
-            <goals>
-              <goal>attach-artifact</goal>
-            </goals>
-            <configuration>
-              <artifacts>
-                <artifact>
-                  <file>${project.build.directory}/classpath</file>
-                  <type>cp</type>
-                  <classifier>classpath</classifier>
-                </artifact>
-              </artifacts>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/node_modules/*</exclude>
-            <exclude>**/ozone-recon-web/**</exclude>
-          </excludes>
-        </configuration>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>docker-build</id>
-      <properties>
-        <docker.image>${user.name}/ozone:${project.version}</docker.image>
-      </properties>
-    </profile>
-    <profile>
-      <id>parallel-tests</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-maven-plugins</artifactId>
-            <executions>
-              <execution>
-                <id>parallel-tests-createdir</id>
-                <goals>
-                  <goal>parallel-tests-createdir</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-surefire-plugin</artifactId>
-            <configuration>
-              <forkCount>${testsThreadCount}</forkCount>
-              <reuseForks>false</reuseForks>
-              <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
-              <systemPropertyVariables>
-                <testsThreadCount>${testsThreadCount}</testsThreadCount>
-                <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
-                <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
-                <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
-
-                <!-- This is intentionally the same directory for all JUnit -->
-                <!-- forks, for use in the very rare situation that -->
-                <!-- concurrent tests need to coordinate, such as using lock -->
-                <!-- files. -->
-                <test.build.shared.data>${test.build.data}</test.build.shared.data>
-
-                <!-- Due to a Maven quirk, setting this to just -->
-                <!-- surefire.forkNumber won't do the parameter substitution. -->
-                <!-- Putting a prefix in front of it like "fork-" makes it -->
-                <!-- work. -->
-                <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
-              </systemPropertyVariables>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml
deleted file mode 100644
index 6abc5ef..0000000
--- a/hadoop-ozone/recon-codegen/pom.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>hadoop-ozone</artifactId>
-    <groupId>org.apache.hadoop</groupId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-  <artifactId>hadoop-ozone-reconcodegen</artifactId>
-  <name>Apache Hadoop Ozone Recon CodeGen</name>
-  <properties>
-    <jooq.version>3.11.10</jooq.version>
-  </properties>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.25.2</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-multibindings</artifactId>
-      <version>${guice.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework</groupId>
-      <artifactId>spring-jdbc</artifactId>
-      <version>5.1.3.RELEASE</version>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq-codegen</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq-meta</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject</groupId>
-      <artifactId>guice</artifactId>
-      <version>${guice.version}</version>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java
deleted file mode 100644
index fce4e0b..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.hadoop.ozone.recon.codegen;
-
-import java.io.File;
-import java.sql.SQLException;
-import java.util.Set;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.io.FileUtils;
-import org.hadoop.ozone.recon.schema.ReconSchemaDefinition;
-import org.jooq.codegen.GenerationTool;
-import org.jooq.meta.jaxb.Configuration;
-import org.jooq.meta.jaxb.Database;
-import org.jooq.meta.jaxb.Generate;
-import org.jooq.meta.jaxb.Generator;
-import org.jooq.meta.jaxb.Jdbc;
-import org.jooq.meta.jaxb.Strategy;
-import org.jooq.meta.jaxb.Target;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.sqlite.SQLiteDataSource;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-
-/**
- * Utility class that generates the Dao and Pojos for Recon schema. The
- * implementations of {@link ReconSchemaDefinition} are discovered through
- * Guice bindings in order to avoid ugly reflection code, and invoked to
- * generate the schema over an embedded database. The jooq code generator then
- * runs over the embedded database to generate classes for recon.
- */
-public class JooqCodeGenerator {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(JooqCodeGenerator.class);
-
-  private static final String SQLITE_DB =
-      System.getProperty("java.io.tmpdir") + "/recon-generated-schema";
-  private static final String JDBC_URL = "jdbc:sqlite:" + SQLITE_DB;
-
-  private final Set<ReconSchemaDefinition> allDefinitions;
-
-  @Inject
-  public JooqCodeGenerator(Set<ReconSchemaDefinition> allDefinitions) {
-    this.allDefinitions = allDefinitions;
-  }
-
-  /**
-   * Create schema.
-   */
-  private void initializeSchema() throws SQLException {
-    for (ReconSchemaDefinition definition : allDefinitions) {
-      definition.initializeSchema();
-    }
-  }
-
-  /**
-   * Generate entity and DAO classes.
-   */
-  private void generateSourceCode(String outputDir) throws Exception {
-    Configuration configuration =
-        new Configuration()
-            .withJdbc(new Jdbc()
-                .withDriver("org.sqlite.JDBC")
-                .withUrl(JDBC_URL)
-                .withUser("sa")
-                .withPassword("sa"))
-            .withGenerator(new Generator()
-                .withDatabase(new Database()
-                    .withName("org.jooq.meta.sqlite.SQLiteDatabase")
-                    .withOutputSchemaToDefault(true)
-                    .withIncludeTables(true)
-                    .withIncludePrimaryKeys(true))
-                .withGenerate(new Generate()
-                    .withDaos(true)
-                    .withEmptyCatalogs(true)
-                    .withEmptySchemas(true))
-                .withStrategy(new Strategy().withName(
-                    "org.hadoop.ozone.recon.codegen.TableNamingStrategy"))
-                .withTarget(new Target()
-                    .withPackageName("org.hadoop.ozone.recon.schema")
-                    .withClean(true)
-                    .withDirectory(outputDir)));
-    GenerationTool.generate(configuration);
-  }
-
-  /**
-   * Provider for embedded datasource.
-   */
-  static class LocalDataSourceProvider implements Provider<DataSource> {
-    private static SQLiteDataSource db;
-
-    static {
-      db = new SQLiteDataSource();
-      db.setUrl(JDBC_URL);
-    }
-
-    @Override
-    public DataSource get() {
-      return db;
-    }
-
-    static void cleanup() {
-      FileUtils.deleteQuietly(new File(SQLITE_DB));
-    }
-  }
-
-  public static void main(String[] args) {
-    if (args.length < 1) {
-      throw new IllegalArgumentException("Missing required arguments: " +
-          "Need a ouput directory for generated code.\nUsage: " +
-          "org.apache.hadoop.ozone.recon.persistence.JooqCodeGenerator " +
-          "<outputDirectory>.");
-    }
-
-    String outputDir = args[0];
-    Injector injector = Guice.createInjector(
-        new ReconSchemaGenerationModule(),
-        new AbstractModule() {
-          @Override
-          protected void configure() {
-            bind(DataSource.class).toProvider(new LocalDataSourceProvider());
-            bind(JooqCodeGenerator.class);
-          }
-        });
-
-    JooqCodeGenerator codeGenerator =
-        injector.getInstance(JooqCodeGenerator.class);
-
-    // Create tables
-    try {
-      codeGenerator.initializeSchema();
-    } catch (SQLException e) {
-      LOG.error("Unable to initialize schema.", e);
-      throw new ExceptionInInitializerError(e);
-    }
-
-    // Generate Pojos and Daos
-    try {
-      codeGenerator.generateSourceCode(outputDir);
-    } catch (Exception e) {
-      LOG.error("Code generation failed. Aborting build.", e);
-      throw new ExceptionInInitializerError(e);
-    }
-
-    // Cleanup after
-    LocalDataSourceProvider.cleanup();
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java
deleted file mode 100644
index c393cc2..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.hadoop.ozone.recon.codegen;
-
-import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition;
-import org.hadoop.ozone.recon.schema.ReconSchemaDefinition;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.multibindings.Multibinder;
-
-/**
- * Bindings for DDL generation and used by
- * {@link org.hadoop.ozone.recon.codegen.JooqCodeGenerator}.
- */
-public class ReconSchemaGenerationModule extends AbstractModule {
-  @Override
-  protected void configure() {
-    // SQL schema creation and related bindings
-    Multibinder<ReconSchemaDefinition> schemaBinder =
-        Multibinder.newSetBinder(binder(), ReconSchemaDefinition.class);
-    schemaBinder.addBinding().to(UtilizationSchemaDefinition.class);
-    schemaBinder.addBinding().to(ReconInternalSchemaDefinition.class);
-    schemaBinder.addBinding().to(StatsSchemaDefinition.class);
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java
deleted file mode 100644
index 93c23c4..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.hadoop.ozone.recon.codegen;
-
-import org.jooq.codegen.DefaultGeneratorStrategy;
-import org.jooq.meta.Definition;
-import org.jooq.meta.TableDefinition;
-import org.jooq.tools.StringUtils;
-
-/**
- * Generate Table classes with a different name from POJOS to improve
- * readability, loaded at runtime.
- */
-public class TableNamingStrategy extends DefaultGeneratorStrategy {
-  @Override
-  public String getJavaClassName(Definition definition, Mode mode) {
-    if (definition instanceof TableDefinition && mode == Mode.DEFAULT) {
-      StringBuilder result = new StringBuilder();
-
-      result.append(StringUtils.toCamelCase(
-          definition.getOutputName()
-              .replace(' ', '_')
-              .replace('-', '_')
-              .replace('.', '_')
-      ));
-
-      result.append("Table");
-      return result.toString();
-    } else {
-      return super.getJavaClassName(definition, mode);
-    }
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java
deleted file mode 100644
index 2e5cf0f..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Recon code generation support for entities and daos.
- */
-package org.hadoop.ozone.recon.codegen;
\ No newline at end of file
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java
deleted file mode 100644
index 9ab9e38..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.hadoop.ozone.recon.schema;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-
-import javax.sql.DataSource;
-
-import org.jooq.impl.DSL;
-import org.jooq.impl.SQLDataType;
-
-import com.google.inject.Inject;
-
-/**
- * Class used to create tables that are required for Recon's internal
- * management.
- */
-public class ReconInternalSchemaDefinition implements ReconSchemaDefinition {
-
-  public static final String RECON_TASK_STATUS_TABLE_NAME =
-      "recon_task_status";
-  private final DataSource dataSource;
-
-  @Inject
-  ReconInternalSchemaDefinition(DataSource dataSource) {
-    this.dataSource = dataSource;
-  }
-
-  @Override
-  public void initializeSchema() throws SQLException {
-    Connection conn = dataSource.getConnection();
-    createReconTaskStatus(conn);
-  }
-
-  /**
-   * Create the Recon Task Status table.
-   * @param conn connection
-   */
-  private void createReconTaskStatus(Connection conn) {
-    DSL.using(conn).createTableIfNotExists(RECON_TASK_STATUS_TABLE_NAME)
-        .column("task_name", SQLDataType.VARCHAR(1024))
-        .column("last_updated_timestamp", SQLDataType.BIGINT)
-        .column("last_updated_seq_number", SQLDataType.BIGINT)
-        .constraint(DSL.constraint("pk_task_name")
-            .primaryKey("task_name"))
-        .execute();
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java
deleted file mode 100644
index 72a105e..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.hadoop.ozone.recon.schema;
-
-import java.sql.SQLException;
-
-/**
- * Classes meant to initialize the SQL schema for Recon. The implementations of
- * this class will be used to create the SQL schema programmatically.
- * Note: Make sure add a binding for your implementation to the Guice module,
- * otherwise code-generator will not pick up the schema changes.
- */
-public interface ReconSchemaDefinition {
-
-  /**
-   * Execute DDL that will create Recon schema.
-   */
-  void initializeSchema() throws SQLException;
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java
deleted file mode 100644
index 6763bc8..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.hadoop.ozone.recon.schema;
-
-import com.google.inject.Inject;
-import org.jooq.impl.DSL;
-import org.jooq.impl.SQLDataType;
-
-import javax.sql.DataSource;
-import java.sql.Connection;
-import java.sql.SQLException;
-
-/**
- * Class used to create tables that are required for storing Ozone statistics.
- */
-public class StatsSchemaDefinition implements ReconSchemaDefinition {
-
-  public static final String GLOBAL_STATS_TABLE_NAME = "global_stats";
-  private final DataSource dataSource;
-
-  @Inject
-  StatsSchemaDefinition(DataSource dataSource) {
-    this.dataSource = dataSource;
-  }
-
-  @Override
-  public void initializeSchema() throws SQLException {
-    Connection conn = dataSource.getConnection();
-    createGlobalStatsTable(conn);
-  }
-
-  /**
-   * Create the Ozone Global Stats table.
-   * @param conn connection
-   */
-  private void createGlobalStatsTable(Connection conn) {
-    DSL.using(conn).createTableIfNotExists(GLOBAL_STATS_TABLE_NAME)
-        .column("key", SQLDataType.VARCHAR(255))
-        .column("value", SQLDataType.BIGINT)
-        .column("last_updated_timestamp", SQLDataType.TIMESTAMP)
-        .constraint(DSL.constraint("pk_key")
-            .primaryKey("key"))
-        .execute();
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
deleted file mode 100644
index b8e6560..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.hadoop.ozone.recon.schema;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-
-import javax.sql.DataSource;
-
-import org.jooq.impl.DSL;
-import org.jooq.impl.SQLDataType;
-import org.springframework.transaction.annotation.Transactional;
-
-import com.google.inject.Inject;
-
-/**
- * Programmatic definition of Recon DDL.
- */
-public class UtilizationSchemaDefinition implements ReconSchemaDefinition {
-
-  private final DataSource dataSource;
-
-  public static final String CLUSTER_GROWTH_DAILY_TABLE_NAME =
-      "cluster_growth_daily";
-
-  public static final String FILE_COUNT_BY_SIZE_TABLE_NAME =
-      "file_count_by_size";
-
-  @Inject
-  UtilizationSchemaDefinition(DataSource dataSource) {
-    this.dataSource = dataSource;
-  }
-
-  @Override
-  @Transactional
-  public void initializeSchema() throws SQLException {
-    Connection conn = dataSource.getConnection();
-    createClusterGrowthTable(conn);
-    createFileSizeCount(conn);
-  }
-
-  void createClusterGrowthTable(Connection conn) {
-    DSL.using(conn).createTableIfNotExists(CLUSTER_GROWTH_DAILY_TABLE_NAME)
-        .column("timestamp", SQLDataType.TIMESTAMP)
-        .column("datanode_id", SQLDataType.INTEGER)
-        .column("datanode_host", SQLDataType.VARCHAR(1024))
-        .column("rack_id", SQLDataType.VARCHAR(1024))
-        .column("available_size", SQLDataType.BIGINT)
-        .column("used_size", SQLDataType.BIGINT)
-        .column("container_count", SQLDataType.INTEGER)
-        .column("block_count", SQLDataType.INTEGER)
-        .constraint(DSL.constraint("pk_timestamp_datanode_id")
-            .primaryKey("timestamp", "datanode_id"))
-        .execute();
-  }
-
-  void createFileSizeCount(Connection conn) {
-    DSL.using(conn).createTableIfNotExists(FILE_COUNT_BY_SIZE_TABLE_NAME)
-        .column("file_size", SQLDataType.BIGINT)
-        .column("count", SQLDataType.BIGINT)
-        .constraint(DSL.constraint("pk_file_size")
-            .primaryKey("file_size"))
-        .execute();
-  }
-}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java
deleted file mode 100644
index 3c701f9..0000000
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Classes in this package define the schema for Recon Sql database.
- */
-package org.hadoop.ozone.recon.schema;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 7c0ba4d..0000000
--- a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.hadoop.ozone.recon.schema"/>
-  </Match>
-  <Match>
-    <Package name="org.hadoop.ozone.recon.schema.tables"/>
-  </Match>
-  <Match>
-    <Package name="org.hadoop.ozone.recon.schema.tables.pojos"/>
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml
deleted file mode 100644
index 55318a5..0000000
--- a/hadoop-ozone/recon/pom.xml
+++ /dev/null
@@ -1,311 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>hadoop-ozone</artifactId>
-    <groupId>org.apache.hadoop</groupId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <name>Apache Hadoop Ozone Recon</name>
-  <modelVersion>4.0.0</modelVersion>
-  <artifactId>hadoop-ozone-recon</artifactId>
-  <properties>
-    <jooq.version>3.11.10</jooq.version>
-    <spring.version>5.1.3.RELEASE</spring.version>
-  </properties>
-  <build>
-    <resources>
-      <resource>
-        <directory>src/main/resources</directory>
-        <excludes>
-          <exclude>**/node_modules/**</exclude>
-        </excludes>
-      </resource>
-    </resources>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <version>${exec-maven-plugin.version}</version>
-        <executions>
-          <execution>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>java</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <executable>java</executable>
-          <classpathScope>compile</classpathScope>
-          <mainClass>org.hadoop.ozone.recon.codegen.JooqCodeGenerator</mainClass>
-          <arguments>
-            <argument>${project.build.directory}/generated-sources/java</argument>
-          </arguments>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.build.directory}/generated-sources/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-      <!--
-      1. used for local installation of node and yarn
-      2. to install dependencies with yarn install
-      3. building the frontend application
-      -->
-      <plugin>
-        <groupId>com.github.eirslett</groupId>
-        <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.6</version>
-        <configuration>
-          <installDirectory>target</installDirectory>
-          <workingDirectory>${basedir}/src/main/resources/webapps/recon/ozone-recon-web</workingDirectory>
-        </configuration>
-        <executions>
-          <execution>
-            <id>Install node and yarn locally to the project</id>
-            <goals>
-              <goal>install-node-and-yarn</goal>
-            </goals>
-            <configuration>
-              <nodeVersion>v12.1.0</nodeVersion>
-              <yarnVersion>v1.9.2</yarnVersion>
-            </configuration>
-          </execution>
-          <execution>
-            <id>yarn install</id>
-            <goals>
-              <goal>yarn</goal>
-            </goals>
-            <configuration>
-              <arguments>install</arguments>
-            </configuration>
-          </execution>
-          <execution>
-            <id>Build frontend</id>
-            <goals>
-              <goal>yarn</goal>
-            </goals>
-            <configuration>
-              <arguments>run build</arguments>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-resources-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>Copy frontend build to target</id>
-            <phase>process-resources</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${project.build.outputDirectory}/webapps/recon</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>Copy frontend static files to target</id>
-            <phase>process-resources</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${project.build.outputDirectory}/webapps/static</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build/static</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-      <exclusions>
-        <exclusion>
-          <artifactId>jersey-server</artifactId>
-          <groupId>com.sun.jersey</groupId>
-        </exclusion>
-        <exclusion>
-          <artifactId>jersey-core</artifactId>
-          <groupId>com.sun.jersey</groupId>
-        </exclusion>
-        <exclusion>
-          <artifactId>jersey-servlet</artifactId>
-          <groupId>com.sun.jersey</groupId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-reconcodegen</artifactId>
-      <version>${ozone.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject</groupId>
-      <artifactId>guice</artifactId>
-      <version>${guice.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-servlet</artifactId>
-      <version>${guice.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.containers</groupId>
-      <artifactId>jersey-container-servlet</artifactId>
-      <version>2.27</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.glassfish.hk2</groupId>
-          <artifactId>hk2-api</artifactId>
-        </exclusion>
-      </exclusions>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.containers</groupId>
-      <artifactId>jersey-container-servlet-core</artifactId>
-      <version>2.27</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.hk2</groupId>
-      <artifactId>guice-bridge</artifactId>
-      <version>2.5.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.core</groupId>
-      <artifactId>jersey-server</artifactId>
-      <version>2.27</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.media</groupId>
-      <artifactId>jersey-media-json-jackson</artifactId>
-      <version>2.27</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-assistedinject</artifactId>
-      <version>${guice.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.inject</groupId>
-      <artifactId>jersey-hk2</artifactId>
-      <version>2.27</version>
-      <exclusions>
-        <exclusion>
-          <artifactId>hk2-api</artifactId>
-          <groupId>org.glassfish.hk2</groupId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.glassfish.hk2.external</groupId>
-          <artifactId>aopalliance-repackaged</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.glassfish.hk2</groupId>
-          <artifactId>hk2-utils</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.8.9</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq-meta</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.jooq</groupId>
-      <artifactId>jooq-codegen</artifactId>
-      <version>${jooq.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>com.jolbox</groupId>
-      <artifactId>bonecp</artifactId>
-      <version>0.8.0.RELEASE</version>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.25.2</version>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework</groupId>
-      <artifactId>spring-jdbc</artifactId>
-      <version>${spring.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>javax.activation</groupId>
-      <artifactId>activation</artifactId>
-      <version>1.1.1</version>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java
deleted file mode 100644
index 5b01958..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import com.google.inject.Provider;
-import org.apache.hadoop.conf.Configuration;
-
-
-/**
- * Ozone Configuration Provider.
- * <p>
- * As the OzoneConfiguration is created by the CLI application here we inject
- * it via a singleton instance to the Jax-RS/CDI instances.
- */
-public class ConfigurationProvider implements
-    Provider<Configuration> {
-
-  private static Configuration configuration;
-
-  static void setConfiguration(Configuration conf) {
-    ConfigurationProvider.configuration = conf;
-  }
-
-  @Override
-  public Configuration get() {
-    return configuration;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
deleted file mode 100644
index 86c8a32..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
-
-/**
- * Recon Server constants file.
- */
-public final class ReconConstants {
-
-  private ReconConstants() {
-    // Never Constructed
-  }
-
-  public static final String RECON_CONTAINER_DB = "recon-" +
-      CONTAINER_DB_SUFFIX;
-
-  public static final String CONTAINER_COUNT_KEY = "totalCount";
-
-  public static final String RECON_OM_SNAPSHOT_DB =
-      "om.snapshot.db";
-
-  public static final String CONTAINER_KEY_TABLE =
-      "containerKeyTable";
-
-  public static final String CONTAINER_KEY_COUNT_TABLE =
-      "containerKeyCountTable";
-
-  public static final String FETCH_ALL = "-1";
-  public static final String RECON_QUERY_PREVKEY = "prevKey";
-  public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0";
-  public static final String RECON_QUERY_LIMIT = "limit";
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
deleted file mode 100644
index c9e870e..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_AUTO_COMMIT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_CONNECTION_TIMEOUT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_DRIVER;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_JDBC_URL;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_PASSWORD;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_USER;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_CONNECTION_AGE;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.recon.persistence.DataSourceConfiguration;
-import org.apache.hadoop.ozone.recon.persistence.JooqPersistenceModule;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider;
-import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
-import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
-import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask;
-import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
-import org.apache.hadoop.ozone.recon.tasks.ReconTaskControllerImpl;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.ratis.protocol.ClientId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-
-/**
- * Guice controller that defines concrete bindings.
- */
-public class ReconControllerModule extends AbstractModule {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReconControllerModule.class);
-
-  @Override
-  protected void configure() {
-    bind(Configuration.class).toProvider(ConfigurationProvider.class);
-    bind(ReconHttpServer.class).in(Singleton.class);
-    bind(DBStore.class)
-        .toProvider(ReconContainerDBProvider.class).in(Singleton.class);
-    bind(ReconOMMetadataManager.class)
-        .to(ReconOmMetadataManagerImpl.class).in(Singleton.class);
-    bind(OMMetadataManager.class).to(ReconOmMetadataManagerImpl.class)
-        .in(Singleton.class);
-    bind(ContainerDBServiceProvider.class)
-        .to(ContainerDBServiceProviderImpl.class).in(Singleton.class);
-    bind(OzoneManagerServiceProvider.class)
-        .to(OzoneManagerServiceProviderImpl.class).in(Singleton.class);
-    bind(ReconUtils.class).in(Singleton.class);
-    // Persistence - inject configuration provider
-    install(new JooqPersistenceModule(
-        getProvider(DataSourceConfiguration.class)));
-
-    bind(ReconTaskController.class)
-        .to(ReconTaskControllerImpl.class).in(Singleton.class);
-    bind(ContainerKeyMapperTask.class);
-    bind(FileSizeCountTask.class);
-  }
-
-  @Provides
-  OzoneManagerProtocol getOzoneManagerProtocol(
-      final OzoneConfiguration ozoneConfiguration) {
-    OzoneManagerProtocol ozoneManagerClient = null;
-    try {
-      ClientId clientId = ClientId.randomId();
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      ozoneManagerClient = new
-          OzoneManagerProtocolClientSideTranslatorPB(
-          ozoneConfiguration, clientId.toString(), null, ugi);
-    } catch (IOException ioEx) {
-      LOG.error("Error in provisioning OzoneManagerProtocol ", ioEx);
-    }
-    return ozoneManagerClient;
-  }
-
-  @Provides
-  DataSourceConfiguration getDataSourceConfiguration(
-      final OzoneConfiguration ozoneConfiguration) {
-
-    return new DataSourceConfiguration() {
-      @Override
-      public String getDriverClass() {
-        return ozoneConfiguration.get(OZONE_RECON_SQL_DB_DRIVER,
-            "org.sqlite.JDBC");
-      }
-
-      @Override
-      public String getJdbcUrl() {
-        return ozoneConfiguration.get(OZONE_RECON_SQL_DB_JDBC_URL);
-      }
-
-      @Override
-      public String getUserName() {
-        return ozoneConfiguration.get(OZONE_RECON_SQL_DB_USER);
-      }
-
-      @Override
-      public String getPassword() {
-        return ozoneConfiguration.get(OZONE_RECON_SQL_DB_PASSWORD);
-      }
-
-      @Override
-      public boolean setAutoCommit() {
-        return ozoneConfiguration.getBoolean(
-            OZONE_RECON_SQL_AUTO_COMMIT, false);
-      }
-
-      @Override
-      public long getConnectionTimeout() {
-        return ozoneConfiguration.getLong(
-            OZONE_RECON_SQL_CONNECTION_TIMEOUT, 30000);
-      }
-
-      @Override
-      public String getSqlDialect() {
-        return JooqPersistenceModule.DEFAULT_DIALECT.toString();
-      }
-
-      @Override
-      public Integer getMaxActiveConnections() {
-        return ozoneConfiguration.getInt(
-            OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS, 10);
-      }
-
-      @Override
-      public Integer getMaxConnectionAge() {
-        return ozoneConfiguration.getInt(
-            OZONE_RECON_SQL_MAX_CONNECTION_AGE, 1800);
-      }
-
-      @Override
-      public Integer getMaxIdleConnectionAge() {
-        return ozoneConfiguration.getInt(
-            OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE, 3600);
-      }
-
-      @Override
-      public String getConnectionTestStatement() {
-        return ozoneConfiguration.get(
-            OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT, "SELECT 1");
-      }
-
-      @Override
-      public Integer getIdleConnectionTestPeriod() {
-        return ozoneConfiguration.getInt(
-            OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD, 60);
-      }
-    };
-
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
deleted file mode 100644
index ab11f0e..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import com.google.inject.Injector;
-import com.google.inject.servlet.GuiceServletContextListener;
-
-/**
- * Servlet Context Listener that provides the Guice injector.
- */
-public class ReconGuiceServletContextListener
-    extends GuiceServletContextListener {
-
-  private static Injector injector;
-
-  @Override
-  public Injector getInjector() {
-    return injector;
-  }
-
-  static void setInjector(Injector inj) {
-    injector = inj;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java
deleted file mode 100644
index e7dcb0c..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-import com.google.inject.Inject;
-
-/**
- * Recon http server with recon supplied config defaults.
- */
-
-public class ReconHttpServer extends BaseHttpServer {
-
-  @Inject
-  ReconHttpServer(Configuration conf) throws IOException {
-    super(conf, "recon");
-  }
-
-  @Override
-  protected String getHttpAddressKey() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpsAddressKey() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpBindHostKey() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getHttpsBindHostKey() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getBindHostDefault() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpBindPortDefault() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpsBindPortDefault() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected String getKeytabFile() {
-    return ReconServerConfigKeys.OZONE_RECON_KEYTAB_FILE;
-  }
-
-  @Override
-  protected String getSpnegoPrincipal() {
-    return ReconServerConfigKeys
-        .OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override
-  protected String getEnabledKey() {
-    return ReconServerConfigKeys.OZONE_RECON_HTTP_ENABLED_KEY;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java
deleted file mode 100644
index 5a69e66..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import java.net.URL;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.glassfish.hk2.api.ServiceLocator;
-import org.glassfish.jersey.internal.inject.InjectionManager;
-import org.glassfish.jersey.server.ResourceConfig;
-import org.glassfish.jersey.server.spi.Container;
-import org.glassfish.jersey.server.spi.ContainerLifecycleListener;
-import org.glassfish.jersey.servlet.ServletContainer;
-import org.jvnet.hk2.guice.bridge.api.GuiceBridge;
-import org.jvnet.hk2.guice.bridge.api.GuiceIntoHK2Bridge;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Injector;
-import com.google.inject.Scopes;
-import com.google.inject.servlet.ServletModule;
-
-/**
- * Class to scan API Service classes and bind them to the injector.
- */
-public abstract class ReconRestServletModule extends ServletModule {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReconRestServletModule.class);
-
-  @Override
-  abstract protected void configureServlets();
-
-  /**
-   * Interface to provide packages for scanning.
-   */
-  public interface RestKeyBindingBuilder {
-    void packages(String... packages);
-  }
-
-  protected RestKeyBindingBuilder rest(String... urlPatterns) {
-    return new RestKeyBindingBuilderImpl(Arrays.asList(urlPatterns));
-  }
-
-  private class RestKeyBindingBuilderImpl implements RestKeyBindingBuilder {
-    private List<String> paths;
-
-    RestKeyBindingBuilderImpl(List<String> paths) {
-      this.paths = paths;
-    }
-
-    private void checkIfPackageExistsAndLog(String pkg) {
-      String resourcePath = pkg.replace(".", "/");
-      URL resource = getClass().getClassLoader().getResource(resourcePath);
-      if (resource != null) {
-        LOG.info("rest(" + paths + ").packages(" + pkg + ")");
-      } else {
-        LOG.info("No Beans in '" + pkg + "' found. Requests " + paths
-            + " will fail.");
-      }
-    }
-
-    @Override
-    public void packages(String... packages) {
-      StringBuilder sb = new StringBuilder();
-
-      for (String pkg : packages) {
-        if (sb.length() > 0) {
-          sb.append(',');
-        }
-        checkIfPackageExistsAndLog(pkg);
-        sb.append(pkg);
-      }
-      Map<String, String> params = new HashMap<>();
-      params.put("javax.ws.rs.Application",
-          GuiceResourceConfig.class.getCanonicalName());
-      if (sb.length() > 0) {
-        params.put("jersey.config.server.provider.packages", sb.toString());
-      }
-      bind(ServletContainer.class).in(Scopes.SINGLETON);
-      for (String path : paths) {
-        serve(path).with(ServletContainer.class, params);
-      }
-    }
-  }
-}
-
-/**
- * Class to bridge Guice bindings to Jersey hk2 bindings.
- */
-class GuiceResourceConfig extends ResourceConfig {
-  GuiceResourceConfig() {
-    register(new ContainerLifecycleListener() {
-      public void onStartup(Container container) {
-        ServletContainer servletContainer = (ServletContainer) container;
-        InjectionManager injectionManager = container.getApplicationHandler()
-            .getInjectionManager();
-        ServiceLocator serviceLocator = injectionManager
-            .getInstance(ServiceLocator.class);
-        GuiceBridge.getGuiceBridge().initializeGuiceBridge(serviceLocator);
-        GuiceIntoHK2Bridge guiceBridge = serviceLocator
-            .getService(GuiceIntoHK2Bridge.class);
-        Injector injector = (Injector) servletContainer.getServletContext()
-            .getAttribute(Injector.class.getName());
-        guiceBridge.bridgeGuiceInjector(injector);
-      }
-
-      public void onReload(Container container) {
-      }
-
-      public void onShutdown(Container container) {
-      }
-    });
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
deleted file mode 100644
index 1aaf887..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
-import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Recon server main class that stops and starts recon services.
- */
-public class ReconServer extends GenericCli {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ReconServer.class);
-  private final ScheduledExecutorService scheduler =
-      Executors.newScheduledThreadPool(1);
-  private Injector injector;
-
-  @Inject
-  private ReconHttpServer httpServer;
-
-  public static void main(String[] args) {
-    new ReconServer().run(args);
-  }
-
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-    ConfigurationProvider.setConfiguration(ozoneConfiguration);
-
-    injector =  Guice.createInjector(new
-        ReconControllerModule(),
-        new ReconRestServletModule() {
-          @Override
-          protected void configureServlets() {
-            rest("/api/*")
-              .packages("org.apache.hadoop.ozone.recon.api");
-          }
-        },
-        new ReconTaskBindingModule());
-
-    //Pass on injector to listener that does the Guice - Jersey HK2 bridging.
-    ReconGuiceServletContextListener.setInjector(injector);
-
-    LOG.info("Initializing Recon server...");
-    try {
-      StatsSchemaDefinition statsSchemaDefinition = injector.getInstance(
-          StatsSchemaDefinition.class);
-      statsSchemaDefinition.initializeSchema();
-
-      UtilizationSchemaDefinition utilizationSchemaDefinition =
-          injector.getInstance(UtilizationSchemaDefinition.class);
-      utilizationSchemaDefinition.initializeSchema();
-
-      ReconInternalSchemaDefinition reconInternalSchemaDefinition =
-          injector.getInstance(ReconInternalSchemaDefinition.class);
-      reconInternalSchemaDefinition.initializeSchema();
-
-      LOG.info("Recon server initialized successfully!");
-
-      httpServer = injector.getInstance(ReconHttpServer.class);
-      LOG.info("Starting Recon server");
-      httpServer.start();
-
-      //Start Ozone Manager Service that pulls data from OM.
-      OzoneManagerServiceProvider ozoneManagerServiceProvider = injector
-          .getInstance(OzoneManagerServiceProvider.class);
-      ozoneManagerServiceProvider.start();
-    } catch (Exception e) {
-      LOG.error("Error during initializing Recon server.", e);
-      stop();
-    }
-
-    Runtime.getRuntime().addShutdownHook(new Thread(() -> {
-      try {
-        stop();
-      } catch (Exception e) {
-        LOG.error("Error during stop Recon server", e);
-      }
-    }));
-    return null;
-  }
-
-  void stop() throws Exception {
-    LOG.info("Stopping Recon server");
-    httpServer.stop();
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
deleted file mode 100644
index 034af4a..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * This class contains constants for Recon configuration keys.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ReconServerConfigKeys {
-
-  public static final String OZONE_RECON_HTTP_ENABLED_KEY =
-      "ozone.recon.http.enabled";
-  public static final String OZONE_RECON_HTTP_BIND_HOST_KEY =
-      "ozone.recon.http-bind-host";
-  public static final String OZONE_RECON_HTTPS_BIND_HOST_KEY =
-      "ozone.recon.https-bind-host";
-  public static final String OZONE_RECON_HTTP_ADDRESS_KEY =
-      "ozone.recon.http-address";
-  public static final String OZONE_RECON_HTTPS_ADDRESS_KEY =
-      "ozone.recon.https-address";
-  public static final String OZONE_RECON_KEYTAB_FILE =
-      "ozone.recon.keytab.file";
-  public static final String OZONE_RECON_HTTP_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-  public static final int OZONE_RECON_HTTP_BIND_PORT_DEFAULT = 9888;
-  public static final int OZONE_RECON_HTTPS_BIND_PORT_DEFAULT = 9889;
-  public static final String OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.recon.authentication.kerberos.principal";
-
-  public static final String OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB =
-      "ozone.recon.container.db.cache.size.mb";
-  public static final int OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_RECON_DB_DIR = "ozone.recon.db.dir";
-
-  public static final String OZONE_RECON_OM_SNAPSHOT_DB_DIR =
-      "ozone.recon.om.db.dir";
-
-  public static final String RECON_OM_SOCKET_TIMEOUT =
-      "recon.om.socket.timeout";
-  public static final String RECON_OM_SOCKET_TIMEOUT_DEFAULT = "5s";
-
-  public static final String RECON_OM_CONNECTION_TIMEOUT =
-      "recon.om.connection.timeout";
-  public static final String RECON_OM_CONNECTION_TIMEOUT_DEFAULT = "5s";
-
-  public static final String RECON_OM_CONNECTION_REQUEST_TIMEOUT =
-      "recon.om.connection.request.timeout";
-
-  public static final String RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "5s";
-
-  public static final String RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY =
-      "recon.om.snapshot.task.initial.delay";
-  public static final String
-      RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT = "1m";
-
-  public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL =
-      "ozone.recon.container.db.impl";
-  public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL_DEFAULT =
-      OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-  public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL =
-      "recon.om.snapshot.task.interval.delay";
-  public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT
-      = "10m";
-
-  public static final String RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM =
-      "recon.om.snapshot.task.flush.param";
-
-  // Persistence properties
-  public static final String OZONE_RECON_SQL_DB_DRIVER =
-      "ozone.recon.sql.db.driver";
-  public static final String OZONE_RECON_SQL_DB_JDBC_URL =
-      "ozone.recon.sql.db.jdbc.url";
-  public static final String OZONE_RECON_SQL_DB_USER =
-      "ozone.recon.sql.db.username";
-  public static final String OZONE_RECON_SQL_DB_PASSWORD =
-      "ozone.recon.sql.db.password";
-  public static final String OZONE_RECON_SQL_AUTO_COMMIT =
-      "ozone.recon.sql.db.auto.commit";
-  public static final String OZONE_RECON_SQL_CONNECTION_TIMEOUT =
-      "ozone.recon.sql.db.conn.timeout";
-  public static final String OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS =
-      "ozone.recon.sql.db.conn.max.active";
-  public static final String OZONE_RECON_SQL_MAX_CONNECTION_AGE =
-      "ozone.recon.sql.db.conn.max.age";
-  public static final String OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE =
-      "ozone.recon.sql.db.conn.idle.max.age";
-  public static final String OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD =
-      "ozone.recon.sql.db.conn.idle.test.period";
-  public static final String OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT =
-      "ozone.recon.sql.db.conn.idle.test";
-
-  public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY =
-      "ozone.recon.task.thread.count";
-  public static final int OZONE_RECON_TASK_THREAD_COUNT_DEFAULT = 5;
-
-  /**
-   * Private constructor for utility class.
-   */
-  private ReconServerConfigKeys() {
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java
deleted file mode 100644
index 19cc0da..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
-import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask;
-import org.apache.hadoop.ozone.recon.tasks.ReconDBUpdateTask;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.multibindings.Multibinder;
-
-/**
- * Binds the various Recon Tasks.
- */
-public class ReconTaskBindingModule extends AbstractModule {
-
-  @Override
-  protected void configure() {
-    Multibinder<ReconDBUpdateTask> taskBinder =
-        Multibinder.newSetBinder(binder(), ReconDBUpdateTask.class);
-    taskBinder.addBinding().to(ContainerKeyMapperTask.class);
-    taskBinder.addBinding().to(FileSizeCountTask.class);
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
deleted file mode 100644
index 2d29d3f..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.zip.GZIPOutputStream;
-
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-
-import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Recon Utility class.
- */
-public class ReconUtils {
-
-  private final static int WRITE_BUFFER = 1048576; //1MB
-
-  public ReconUtils() {
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ReconUtils.class);
-
-  /**
-   * Get configured Recon DB directory value based on config. If not present,
-   * fallback to ozone.metadata.dirs
-   *
-   * @param conf         configuration bag
-   * @param dirConfigKey key to check
-   * @return Return File based on configured or fallback value.
-   */
-  public File getReconDbDir(Configuration conf, String dirConfigKey) {
-
-    File metadataDir = getDirectoryFromConfig(conf, dirConfigKey,
-        "Recon");
-    if (metadataDir != null) {
-      return metadataDir;
-    }
-
-    LOG.warn("{} is not configured. We recommend adding this setting. " +
-            "Falling back to {} instead.",
-        dirConfigKey, HddsConfigKeys.OZONE_METADATA_DIRS);
-    return getOzoneMetaDirPath(conf);
-  }
-
-  /**
-   * Given a source directory, create a tar.gz file from it.
-   *
-   * @param sourcePath the path to the directory to be archived.
-   * @return tar.gz file
-   * @throws IOException
-   */
-  public static File createTarFile(Path sourcePath) throws IOException {
-    TarArchiveOutputStream tarOs = null;
-    try {
-      String sourceDir = sourcePath.toString();
-      String fileName = sourceDir.concat(".tar.gz");
-      FileOutputStream fileOutputStream = new FileOutputStream(fileName);
-      GZIPOutputStream gzipOutputStream =
-          new GZIPOutputStream(new BufferedOutputStream(fileOutputStream));
-      tarOs = new TarArchiveOutputStream(gzipOutputStream);
-      File folder = new File(sourceDir);
-      File[] filesInDir = folder.listFiles();
-      if (filesInDir != null) {
-        for (File file : filesInDir) {
-          addFilesToArchive(file.getName(), file, tarOs);
-        }
-      }
-      return new File(fileName);
-    } finally {
-      try {
-        org.apache.hadoop.io.IOUtils.closeStream(tarOs);
-      } catch (Exception e) {
-        LOG.error("Exception encountered when closing " +
-            "TAR file output stream: " + e);
-      }
-    }
-  }
-
-  private static void addFilesToArchive(String source, File file,
-                                        TarArchiveOutputStream
-                                            tarFileOutputStream)
-      throws IOException {
-    tarFileOutputStream.putArchiveEntry(new TarArchiveEntry(file, source));
-    if (file.isFile()) {
-      FileInputStream fileInputStream = new FileInputStream(file);
-      BufferedInputStream bufferedInputStream =
-          new BufferedInputStream(fileInputStream);
-      org.apache.commons.compress.utils.IOUtils.copy(bufferedInputStream,
-          tarFileOutputStream);
-      tarFileOutputStream.closeArchiveEntry();
-      fileInputStream.close();
-    } else if (file.isDirectory()) {
-      tarFileOutputStream.closeArchiveEntry();
-      File[] filesInDir = file.listFiles();
-      if (filesInDir != null) {
-        for (File cFile : filesInDir) {
-          addFilesToArchive(cFile.getAbsolutePath(), cFile,
-              tarFileOutputStream);
-        }
-      }
-    }
-  }
-
-  /**
-   * Untar DB snapshot tar file to recon OM snapshot directory.
-   *
-   * @param tarFile  source tar file
-   * @param destPath destination path to untar to.
-   * @throws IOException ioException
-   */
-  public void untarCheckpointFile(File tarFile, Path destPath)
-      throws IOException {
-
-    FileInputStream fileInputStream = null;
-    BufferedInputStream buffIn = null;
-    GzipCompressorInputStream gzIn = null;
-    try {
-      fileInputStream = new FileInputStream(tarFile);
-      buffIn = new BufferedInputStream(fileInputStream);
-      gzIn = new GzipCompressorInputStream(buffIn);
-
-      //Create Destination directory if it does not exist.
-      if (!destPath.toFile().exists()) {
-        boolean success = destPath.toFile().mkdirs();
-        if (!success) {
-          throw new IOException("Unable to create Destination directory.");
-        }
-      }
-
-      try (TarArchiveInputStream tarInStream =
-               new TarArchiveInputStream(gzIn)) {
-        TarArchiveEntry entry = null;
-
-        while ((entry = (TarArchiveEntry) tarInStream.getNextEntry()) != null) {
-          //If directory, create a directory.
-          if (entry.isDirectory()) {
-            File f = new File(Paths.get(destPath.toString(),
-                entry.getName()).toString());
-            boolean success = f.mkdirs();
-            if (!success) {
-              LOG.error("Unable to create directory found in tar.");
-            }
-          } else {
-            //Write contents of file in archive to a new file.
-            int count;
-            byte[] data = new byte[WRITE_BUFFER];
-
-            FileOutputStream fos = new FileOutputStream(
-                Paths.get(destPath.toString(), entry.getName()).toString());
-            try (BufferedOutputStream dest =
-                     new BufferedOutputStream(fos, WRITE_BUFFER)) {
-              while ((count =
-                  tarInStream.read(data, 0, WRITE_BUFFER)) != -1) {
-                dest.write(data, 0, count);
-              }
-            }
-          }
-        }
-      }
-    } finally {
-      IOUtils.closeStream(gzIn);
-      IOUtils.closeStream(buffIn);
-      IOUtils.closeStream(fileInputStream);
-    }
-  }
-
-  /**
-   * Make HTTP GET call on the URL and return inputstream to the response.
-   * @param httpClient HttpClient to use.
-   * @param url url to call
-   * @return Inputstream to the response of the HTTP call.
-   * @throws IOException While reading the response.
-   */
-  public InputStream makeHttpCall(CloseableHttpClient httpClient,
-                                         String url)
-      throws IOException {
-
-    HttpGet httpGet = new HttpGet(url);
-    HttpResponse response = httpClient.execute(httpGet);
-    int errorCode = response.getStatusLine().getStatusCode();
-    HttpEntity entity = response.getEntity();
-
-    if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-      return entity.getContent();
-    }
-
-    if (entity != null) {
-      throw new IOException("Unexpected exception when trying to reach Ozone " +
-          "Manager, " + EntityUtils.toString(entity));
-    } else {
-      throw new IOException("Unexpected null in http payload," +
-          " while processing request");
-    }
-  }
-
-  /**
-   * Load last known DB in Recon.
-   * @param reconDbDir
-   * @param fileNamePrefix
-   * @return
-   */
-  public File getLastKnownDB(File reconDbDir, String fileNamePrefix) {
-    String lastKnownSnapshotFileName = null;
-    long lastKnonwnSnapshotTs = Long.MIN_VALUE;
-    if (reconDbDir != null) {
-      File[] snapshotFiles = reconDbDir.listFiles((dir, name) ->
-          name.startsWith(fileNamePrefix));
-      if (snapshotFiles != null) {
-        for (File snapshotFile : snapshotFiles) {
-          String fileName = snapshotFile.getName();
-          try {
-            String[] fileNameSplits = fileName.split("_");
-            if (fileNameSplits.length <= 1) {
-              continue;
-            }
-            long snapshotTimestamp = Long.parseLong(fileNameSplits[1]);
-            if (lastKnonwnSnapshotTs < snapshotTimestamp) {
-              lastKnonwnSnapshotTs = snapshotTimestamp;
-              lastKnownSnapshotFileName = fileName;
-            }
-          } catch (NumberFormatException nfEx) {
-            LOG.warn("Unknown file found in Recon DB dir : {}", fileName);
-          }
-        }
-      }
-    }
-    return lastKnownSnapshotFileName == null ? null :
-        new File(reconDbDir.getPath(), lastKnownSnapshotFileName);
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java
deleted file mode 100644
index 4a7abc3..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api;
-
-import java.io.IOException;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.stream.Collectors;
-
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-import javax.inject.Inject;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
-import org.apache.hadoop.ozone.recon.api.types.ContainersResponse;
-import org.apache.hadoop.ozone.recon.api.types.KeyMetadata;
-import org.apache.hadoop.ozone.recon.api.types.KeyMetadata.ContainerBlockMetadata;
-import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.recon.ReconConstants.FETCH_ALL;
-import static org.apache.hadoop.ozone.recon.ReconConstants.PREV_CONTAINER_ID_DEFAULT_VALUE;
-import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT;
-import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY;
-
-
-/**
- * Endpoint for querying keys that belong to a container.
- */
-@Path("/containers")
-@Produces(MediaType.APPLICATION_JSON)
-public class ContainerKeyService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerKeyService.class);
-
-  @Inject
-  private ContainerDBServiceProvider containerDBServiceProvider;
-
-  @Inject
-  private ReconOMMetadataManager omMetadataManager;
-
-  /**
-   * Return @{@link org.apache.hadoop.ozone.recon.api.types.ContainerMetadata}
-   * for the containers starting from the given "prev-key" query param for the
-   * given "limit". The given "prev-key" is skipped from the results returned.
-   *
-   * @param limit max no. of containers to get.
-   * @param prevKey the containerID after which results are returned.
-   * @return {@link Response}
-   */
-  @GET
-  public Response getContainers(
-      @DefaultValue(FETCH_ALL) @QueryParam(RECON_QUERY_LIMIT) int limit,
-      @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE)
-      @QueryParam(RECON_QUERY_PREVKEY) long prevKey) {
-    Map<Long, ContainerMetadata> containersMap;
-    long containersCount;
-    try {
-      containersMap = containerDBServiceProvider.getContainers(limit, prevKey);
-      containersCount = containerDBServiceProvider.getCountForContainers();
-    } catch (IOException ioEx) {
-      throw new WebApplicationException(ioEx,
-          Response.Status.INTERNAL_SERVER_ERROR);
-    }
-    ContainersResponse containersResponse =
-        new ContainersResponse(containersCount, containersMap.values());
-    return Response.ok(containersResponse).build();
-  }
-
-  /**
-   * Return @{@link org.apache.hadoop.ozone.recon.api.types.KeyMetadata} for
-   * all keys that belong to the container identified by the id param
-   * starting from the given "prev-key" query param for the given "limit".
-   * The given prevKeyPrefix is skipped from the results returned.
-   *
-   * @param containerID the given containerID.
-   * @param limit max no. of keys to get.
-   * @param prevKeyPrefix the key prefix after which results are returned.
-   * @return {@link Response}
-   */
-  @GET
-  @Path("/{id}/keys")
-  public Response getKeysForContainer(
-      @PathParam("id") Long containerID,
-      @DefaultValue(FETCH_ALL) @QueryParam(RECON_QUERY_LIMIT) int limit,
-      @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY)
-          String prevKeyPrefix) {
-    Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();
-    long totalCount;
-    try {
-      Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap =
-          containerDBServiceProvider.getKeyPrefixesForContainer(containerID,
-              prevKeyPrefix);
-
-      // Get set of Container-Key mappings for given containerId.
-      for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap
-          .keySet()) {
-
-        // Directly calling get() on the Key table instead of iterating since
-        // only full keys are supported now. When we change to using a prefix
-        // of the key, this needs to change to prefix seek (TODO).
-        OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(
-            containerKeyPrefix.getKeyPrefix());
-        if (null == omKeyInfo) {
-          continue;
-        }
-
-        // Filter keys by version.
-        List<OmKeyLocationInfoGroup> matchedKeys = omKeyInfo
-            .getKeyLocationVersions()
-            .stream()
-            .filter(k -> (k.getVersion() == containerKeyPrefix.getKeyVersion()))
-            .collect(Collectors.toList());
-
-        List<ContainerBlockMetadata> blockIds = new ArrayList<>();
-        for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : matchedKeys) {
-          List<OmKeyLocationInfo> omKeyLocationInfos = omKeyLocationInfoGroup
-              .getLocationList()
-              .stream()
-              .filter(c -> c.getContainerID() == containerID)
-              .collect(Collectors.toList());
-          for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
-            blockIds.add(new ContainerBlockMetadata(omKeyLocationInfo
-                .getContainerID(), omKeyLocationInfo.getLocalID()));
-          }
-        }
-
-        String ozoneKey = omMetadataManager.getOzoneKey(
-            omKeyInfo.getVolumeName(),
-            omKeyInfo.getBucketName(),
-            omKeyInfo.getKeyName());
-        if (keyMetadataMap.containsKey(ozoneKey)) {
-          keyMetadataMap.get(ozoneKey).getVersions()
-              .add(containerKeyPrefix.getKeyVersion());
-
-          keyMetadataMap.get(ozoneKey).getBlockIds().putAll(
-              Collections.singletonMap(containerKeyPrefix.getKeyVersion(),
-                  blockIds));
-        } else {
-          // break the for loop if limit has been reached
-          if (keyMetadataMap.size() == limit) {
-            break;
-          }
-          KeyMetadata keyMetadata = new KeyMetadata();
-          keyMetadata.setBucket(omKeyInfo.getBucketName());
-          keyMetadata.setVolume(omKeyInfo.getVolumeName());
-          keyMetadata.setKey(omKeyInfo.getKeyName());
-          keyMetadata.setCreationTime(
-              Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
-          keyMetadata.setModificationTime(
-              Instant.ofEpochMilli(omKeyInfo.getModificationTime()));
-          keyMetadata.setDataSize(omKeyInfo.getDataSize());
-          keyMetadata.setVersions(new ArrayList<Long>() {{
-              add(containerKeyPrefix.getKeyVersion());
-            }});
-          keyMetadataMap.put(ozoneKey, keyMetadata);
-          keyMetadata.setBlockIds(new TreeMap<Long,
-              List<ContainerBlockMetadata>>() {{
-              put(containerKeyPrefix.getKeyVersion(), blockIds);
-            }});
-        }
-      }
-
-      totalCount =
-          containerDBServiceProvider.getKeyCountForContainer(containerID);
-    } catch (IOException ioEx) {
-      throw new WebApplicationException(ioEx,
-          Response.Status.INTERNAL_SERVER_ERROR);
-    }
-    KeysResponse keysResponse =
-        new KeysResponse(totalCount, keyMetadataMap.values());
-    return Response.ok(keysResponse).build();
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java
deleted file mode 100644
index 0bc33f3..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.api;
-
-import javax.inject.Inject;
-import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
-import org.jooq.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import java.util.List;
-
-/**
- * Endpoint for querying the counts of a certain file Size.
- */
-@Path("/utilization")
-@Produces(MediaType.APPLICATION_JSON)
-public class UtilizationService {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(UtilizationService.class);
-
-  private FileCountBySizeDao fileCountBySizeDao;
-
-  @Inject
-  private Configuration sqlConfiguration;
-
-
-  FileCountBySizeDao getDao() {
-    if (fileCountBySizeDao == null) {
-      fileCountBySizeDao = new FileCountBySizeDao(sqlConfiguration);
-    }
-    return fileCountBySizeDao;
-  }
-  /**
-   * Return the file counts from Recon DB.
-   * @return {@link Response}
-   */
-  @GET
-  @Path("/fileCount")
-  public Response getFileCounts() {
-    fileCountBySizeDao = getDao();
-    List<FileCountBySize> resultSet = fileCountBySizeDao.findAll();
-    return Response.ok(resultSet).build();
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java
deleted file mode 100644
index 894e9d5..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * The classes in this package define api endpoints for Recon.
- */
-
-package org.apache.hadoop.ozone.recon.api;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
deleted file mode 100644
index be9ecbd..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.api.types;
-
-/**
- * Class to encapsulate the Key information needed for the Recon container DB.
- * Currently, it is the containerId and the whole key + key version.
- */
-public class ContainerKeyPrefix {
-
-  private long containerId;
-  private String keyPrefix;
-  private long keyVersion = -1;
-
-  public ContainerKeyPrefix(long containerId, String keyPrefix) {
-    this.containerId = containerId;
-    this.keyPrefix = keyPrefix;
-  }
-
-  public ContainerKeyPrefix(long containerId, String keyPrefix,
-                            long keyVersion) {
-    this.containerId = containerId;
-    this.keyPrefix = keyPrefix;
-    this.keyVersion = keyVersion;
-  }
-
-  public ContainerKeyPrefix(long containerId) {
-    this.containerId = containerId;
-  }
-
-  public long getContainerId() {
-    return containerId;
-  }
-
-  public void setContainerId(long containerId) {
-    this.containerId = containerId;
-  }
-
-  public String getKeyPrefix() {
-    return keyPrefix;
-  }
-
-  public void setKeyPrefix(String keyPrefix) {
-    this.keyPrefix = keyPrefix;
-  }
-
-  public long getKeyVersion() {
-    return keyVersion;
-  }
-
-  public void setKeyVersion(long keyVersion) {
-    this.keyVersion = keyVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-
-    if (!(o instanceof ContainerKeyPrefix)) {
-      return false;
-    }
-    ContainerKeyPrefix that = (ContainerKeyPrefix) o;
-    return (this.containerId == that.containerId) &&
-        this.keyPrefix.equals(that.keyPrefix) &&
-        this.keyVersion == that.keyVersion;
-  }
-
-  @Override
-  public int hashCode() {
-    return Long.valueOf(containerId).hashCode() + 13 * keyPrefix.hashCode() +
-        17 * Long.valueOf(keyVersion).hashCode();
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java
deleted file mode 100644
index 381f2ff..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api.types;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-
-/**
- * Metadata object that represents a Container.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-public class ContainerMetadata {
-
-  @XmlElement(name = "ContainerID")
-  private long containerID;
-
-  @XmlElement(name = "NumberOfKeys")
-  private long numberOfKeys;
-
-  public ContainerMetadata(long containerID) {
-    this.containerID = containerID;
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public void setContainerID(long containerID) {
-    this.containerID = containerID;
-  }
-
-  public long getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  public void setNumberOfKeys(long numberOfKeys) {
-    this.numberOfKeys = numberOfKeys;
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java
deleted file mode 100644
index 2bad498..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api.types;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- * Class that represents the API Response structure of Containers.
- */
-public class ContainersResponse {
-  /**
-   * Contains a map with total count of containers and list of containers.
-   */
-  @JsonProperty("data")
-  private ContainersResponseData containersResponseData;
-
-  public ContainersResponse() {
-    this(0, new ArrayList<>());
-  }
-
-  public ContainersResponse(long totalCount,
-                            Collection<ContainerMetadata> containers) {
-    this.containersResponseData =
-        new ContainersResponseData(totalCount, containers);
-  }
-
-  public String toJsonString() {
-    try {
-      return JsonUtils.toJsonString(this);
-    } catch (IOException ignored) {
-      return null;
-    }
-  }
-
-  public ContainersResponseData getContainersResponseData() {
-    return containersResponseData;
-  }
-
-  public void setContainersResponseData(ContainersResponseData
-                                            containersResponseData) {
-    this.containersResponseData = containersResponseData;
-  }
-
-  /**
-   * Class that encapsulates the data presented in Containers API Response.
-   */
-  public static class ContainersResponseData {
-    /**
-     * Total count of the containers.
-     */
-    @JsonProperty("totalCount")
-    private long totalCount;
-
-    /**
-     * An array of containers.
-     */
-    @JsonProperty("containers")
-    private Collection<ContainerMetadata> containers;
-
-    ContainersResponseData(long totalCount,
-                           Collection<ContainerMetadata> containers) {
-      this.totalCount = totalCount;
-      this.containers = containers;
-    }
-
-    public long getTotalCount() {
-      return totalCount;
-    }
-
-    public Collection<ContainerMetadata> getContainers() {
-      return containers;
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java
deleted file mode 100644
index 7bcdbe1..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api.types;
-
-import java.time.Instant;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
-
-import javax.xml.bind.annotation.adapters.XmlAdapter;
-
-/**
- * A converter to convert Instant to standard date string.
- */
-public class IsoDateAdapter extends XmlAdapter<String, Instant> {
-
-  private DateTimeFormatter iso8861Formatter;
-
-  public IsoDateAdapter() {
-    iso8861Formatter =
-        DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX")
-            .withZone(ZoneOffset.UTC);
-  }
-
-  @Override
-  public Instant unmarshal(String v) throws Exception {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public String marshal(Instant v) throws Exception {
-    return iso8861Formatter.format(v);
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
deleted file mode 100644
index 3168263..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api.types;
-
-import java.time.Instant;
-import java.util.List;
-import java.util.Map;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-
-/**
- * Metadata object represents one key in the object store.
- */
-@XmlRootElement (name = "KeyMetadata")
-@XmlAccessorType(XmlAccessType.FIELD)
-public class KeyMetadata {
-
-  @XmlElement(name = "Volume")
-  private String volume;
-
-  @XmlElement(name = "Bucket")
-  private String bucket;
-
-  @XmlElement(name = "Key")
-  private String key;
-
-  @XmlElement(name = "DataSize")
-  private long dataSize;
-
-  @XmlElement(name = "Versions")
-  private List<Long> versions;
-
-  @XmlElement(name = "Blocks")
-  private Map<Long, List<ContainerBlockMetadata>> blockIds;
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "CreationTime")
-  private Instant creationTime;
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "ModificationTime")
-  private Instant modificationTime;
-
-  public String getVolume() {
-    return volume;
-  }
-
-  public void setVolume(String volume) {
-    this.volume = volume;
-  }
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long dataSize) {
-    this.dataSize = dataSize;
-  }
-
-  public Instant getCreationTime() {
-    return creationTime;
-  }
-
-  public void setCreationTime(Instant creationTime) {
-    this.creationTime = creationTime;
-  }
-
-  public Instant getModificationTime() {
-    return modificationTime;
-  }
-
-  public void setModificationTime(Instant modificationTime) {
-    this.modificationTime = modificationTime;
-  }
-
-  public List<Long> getVersions() {
-    return versions;
-  }
-
-  public void setVersions(List<Long> versions) {
-    this.versions = versions;
-  }
-
-  public Map<Long, List<ContainerBlockMetadata>> getBlockIds() {
-    return blockIds;
-  }
-
-  public void setBlockIds(Map<Long, List<ContainerBlockMetadata>> blockIds) {
-    this.blockIds = blockIds;
-  }
-
-  /**
-   * Class to hold ContainerID and BlockID.
-   */
-  public static class ContainerBlockMetadata {
-    private long containerID;
-    private long localID;
-
-    public ContainerBlockMetadata(long containerID, long localID) {
-      this.containerID = containerID;
-      this.localID = localID;
-    }
-
-    public long getContainerID() {
-      return containerID;
-    }
-
-    public long getLocalID() {
-      return localID;
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
deleted file mode 100644
index f2704c5..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.api.types;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- * Class that represents the API Response structure of Keys within a container.
- */
-public class KeysResponse {
-  /**
-   * Contains a map with total count of keys inside the given container and a
-   * list of keys with metadata.
-   */
-  @JsonProperty("data")
-  private KeysResponseData keysResponseData;
-
-  public KeysResponse() {
-    this(0, new ArrayList<>());
-  }
-
-  public KeysResponse(long totalCount,
-                      Collection<KeyMetadata> keys) {
-    this.keysResponseData =
-        new KeysResponseData(totalCount, keys);
-  }
-
-  public String toJsonString() {
-    try {
-      return JsonUtils.toJsonString(this);
-    } catch (IOException ignored) {
-      return null;
-    }
-  }
-
-  public KeysResponseData getKeysResponseData() {
-    return keysResponseData;
-  }
-
-  public void setKeysResponseData(KeysResponseData keysResponseData) {
-    this.keysResponseData = keysResponseData;
-  }
-
-  /**
-   * Class that encapsulates the data presented in Keys API Response.
-   */
-  public static class KeysResponseData {
-    /**
-     * Total count of the keys.
-     */
-    @JsonProperty("totalCount")
-    private long totalCount;
-
-    /**
-     * An array of keys.
-     */
-    @JsonProperty("keys")
-    private Collection<KeyMetadata> keys;
-
-    KeysResponseData(long totalCount, Collection<KeyMetadata> keys) {
-      this.totalCount = totalCount;
-      this.keys = keys;
-    }
-
-    public long getTotalCount() {
-      return totalCount;
-    }
-
-    public Collection<KeyMetadata> getKeys() {
-      return keys;
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java
deleted file mode 100644
index cc2a7ab..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Common type definitions for Recon API.
- */
-package org.apache.hadoop.ozone.recon.api.types;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java
deleted file mode 100644
index db27ffc..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains application entry point and related classes for Recon.
- */
-package org.apache.hadoop.ozone.recon;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java
deleted file mode 100644
index ec6995a..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java
+++ /dev/null
@@ -1,86 +0,0 @@
-package org.apache.hadoop.ozone.recon.persistence;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Common configuration needed to instantiate {@link javax.sql.DataSource}.
- */
-public interface DataSourceConfiguration {
-  /**
-   * Get database driver class name available on the classpath.
-   */
-  String getDriverClass();
-
-  /**
-   * Get Jdbc Url for the database server.
-   */
-  String getJdbcUrl();
-
-  /**
-   * Get username for the db.
-   */
-  String getUserName();
-
-  /**
-   * Get password for the db.
-   */
-  String getPassword();
-
-  /**
-   * Should autocommit be turned on for the datasource.
-   */
-  boolean setAutoCommit();
-
-  /**
-   * Sets the maximum time (in milliseconds) to wait before a call to
-   * getConnection is timed out.
-   */
-  long getConnectionTimeout();
-
-  /**
-   * Get a string representation of {@link org.jooq.SQLDialect}.
-   */
-  String getSqlDialect();
-
-  /**
-   * In a production database this should be set to something like 10.
-   * SQLite does not allow multiple connections, hence this defaults to 1.
-   */
-  Integer getMaxActiveConnections();
-
-  /**
-   * Sets the maximum connection age (in seconds).
-   */
-  Integer getMaxConnectionAge();
-
-  /**
-   * Sets the maximum idle connection age (in seconds).
-   */
-  Integer getMaxIdleConnectionAge();
-
-  /**
-   * Statement specific to database, usually SELECT 1.
-   */
-  String getConnectionTestStatement();
-
-  /**
-   * How often to test idle connections for being active (in seconds).
-   */
-  Integer getIdleConnectionTestPeriod();
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java
deleted file mode 100644
index 7b28d00..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java
+++ /dev/null
@@ -1,74 +0,0 @@
-package org.apache.hadoop.ozone.recon.persistence;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import javax.sql.DataSource;
-
-import org.apache.commons.lang3.StringUtils;
-import org.sqlite.SQLiteDataSource;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.jolbox.bonecp.BoneCPDataSource;
-
-/**
- * Provide a {@link javax.sql.DataSource} for the application.
- */
-public class DefaultDataSourceProvider implements Provider<DataSource> {
-
-  @Inject
-  private DataSourceConfiguration configuration;
-
-  /**
-   * Create a pooled datasource for the application.
-   *
-   * Default sqlite database does not work with a connection pool, actually
-   * most embedded databases do not, hence returning native implementation for
-   * default db.
-   */
-  @Override
-  public DataSource get() {
-    if (StringUtils.contains(configuration.getJdbcUrl(), "sqlite")) {
-      SQLiteDataSource ds = new SQLiteDataSource();
-      ds.setUrl(configuration.getJdbcUrl());
-      return ds;
-    }
-
-    BoneCPDataSource cpDataSource = new BoneCPDataSource();
-
-    cpDataSource.setDriverClass(configuration.getDriverClass());
-    cpDataSource.setJdbcUrl(configuration.getJdbcUrl());
-    cpDataSource.setUsername(configuration.getUserName());
-    cpDataSource.setPassword(configuration.getPassword());
-    cpDataSource.setDefaultAutoCommit(configuration.setAutoCommit());
-    cpDataSource.setConnectionTimeoutInMs(configuration.getConnectionTimeout());
-    cpDataSource.setMaxConnectionsPerPartition(
-        configuration.getMaxActiveConnections());
-    cpDataSource.setMaxConnectionAgeInSeconds(
-        configuration.getMaxConnectionAge());
-    cpDataSource.setIdleMaxAgeInSeconds(
-        configuration.getMaxIdleConnectionAge());
-    cpDataSource.setIdleConnectionTestPeriodInSeconds(
-        configuration.getIdleConnectionTestPeriod());
-    cpDataSource.setConnectionTestStatement(
-        configuration.getConnectionTestStatement());
-
-    return cpDataSource;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java
deleted file mode 100644
index 2ba4cf7..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.persistence;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-import java.sql.Connection;
-import javax.sql.DataSource;
-
-import org.jooq.Configuration;
-import org.jooq.ConnectionProvider;
-import org.jooq.SQLDialect;
-import org.jooq.impl.DefaultConfiguration;
-import org.springframework.dao.DataAccessException;
-import org.springframework.jdbc.datasource.DataSourceTransactionManager;
-import org.springframework.jdbc.datasource.DataSourceUtils;
-import org.springframework.jdbc.datasource.TransactionAwareDataSourceProxy;
-import org.springframework.transaction.annotation.Transactional;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Provider;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-
-/**
- * Persistence module that provides binding for {@link DataSource} and
- * a MethodInterceptor for nested transactions support.
- */
-public class JooqPersistenceModule extends AbstractModule {
-
-  private Provider<DataSourceConfiguration> configurationProvider;
-  public static final SQLDialect DEFAULT_DIALECT = SQLDialect.SQLITE;
-
-  public JooqPersistenceModule(
-      Provider<DataSourceConfiguration> configurationProvider) {
-    this.configurationProvider = configurationProvider;
-  }
-
-  @Override
-  protected void configure() {
-    bind(DataSource.class).toProvider(DefaultDataSourceProvider.class)
-        .in(Singleton.class);
-
-    TransactionalMethodInterceptor interceptor =
-        new TransactionalMethodInterceptor(
-            getProvider(DataSourceTransactionManager.class));
-
-    bindInterceptor(annotatedWith(Transactional.class), any(), interceptor);
-    bindInterceptor(any(), annotatedWith(Transactional.class), interceptor);
-  }
-
-  @Provides
-  @Singleton
-  Configuration getConfiguration(DefaultDataSourceProvider provider) {
-    DataSource dataSource = provider.get();
-
-    return new DefaultConfiguration()
-        .set(dataSource)
-        .set(new SpringConnectionProvider(dataSource))
-        .set(SQLDialect.valueOf(configurationProvider.get().getSqlDialect()));
-  }
-
-  @Provides
-  @Singleton
-  DataSourceTransactionManager provideDataSourceTransactionManager(
-      DataSource dataSource) {
-    return new DataSourceTransactionManager(
-        new TransactionAwareDataSourceProxy(dataSource));
-  }
-
-  /**
-   * This connection provider uses Spring to extract the
-   * {@link TransactionAwareDataSourceProxy} from our BoneCP pooled connection
-   * {@link DataSource}.
-   */
-  static class SpringConnectionProvider implements ConnectionProvider {
-
-    private final DataSource dataSource;
-
-    SpringConnectionProvider(DataSource dataSource) {
-      this.dataSource = dataSource;
-    }
-
-    @Override
-    public Connection acquire() throws DataAccessException {
-      return DataSourceUtils.getConnection(dataSource);
-    }
-
-    @Override
-    public void release(Connection connection) throws DataAccessException {
-      DataSourceUtils.releaseConnection(connection, dataSource);
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java
deleted file mode 100644
index 4479ddd..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.persistence;
-
-import org.aopalliance.intercept.MethodInterceptor;
-import org.aopalliance.intercept.MethodInvocation;
-import org.springframework.jdbc.datasource.DataSourceTransactionManager;
-import org.springframework.transaction.TransactionStatus;
-import org.springframework.transaction.UnexpectedRollbackException;
-import org.springframework.transaction.support.DefaultTransactionDefinition;
-
-import com.google.inject.Provider;
-
-/**
- * A {@link MethodInterceptor} that implements nested transactions.
- * <p>
- * Only the outermost transactional method will <code>commit()</code> or
- * <code>rollback()</code> the contextual transaction. This can be verified
- * through {@link TransactionStatus#isNewTransaction()}, which returns
- * <code>true</code> only for the outermost transactional method call.
- * <p>
- */
-public class TransactionalMethodInterceptor implements MethodInterceptor {
-
-  private Provider<DataSourceTransactionManager> transactionManagerProvider;
-
-  TransactionalMethodInterceptor(
-      Provider<DataSourceTransactionManager> transactionManagerProvider) {
-    this.transactionManagerProvider = transactionManagerProvider;
-  }
-
-  @Override
-  public Object invoke(MethodInvocation invocation) throws Throwable {
-    DataSourceTransactionManager transactionManager =
-        transactionManagerProvider.get();
-
-    DefaultTransactionDefinition transactionDefinition =
-        new DefaultTransactionDefinition();
-    TransactionStatus transaction = transactionManager.getTransaction(
-        transactionDefinition);
-
-    try {
-      Object result = invocation.proceed();
-
-      try {
-        if (transaction.isNewTransaction()) {
-          transactionManager.commit(transaction);
-        }
-      } catch (UnexpectedRollbackException ignore) {
-      }
-
-      return result;
-    } catch (Exception e) {
-      if (transaction.isNewTransaction()) {
-        transactionManager.rollback(transaction);
-      }
-
-      throw e;
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
deleted file mode 100644
index 0ba0fa4..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package defines the persistence interfaces for Recon SQL DB.
- */
-package org.apache.hadoop.ozone.recon.persistence;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
deleted file mode 100644
index fcfcaa5..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.recovery;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-
-/**
- * Interface for the OM Metadata Manager + DB store maintained by
- * Recon.
- */
-public interface ReconOMMetadataManager extends OMMetadataManager {
-
-  /**
-   * Refresh the DB instance to point to a new location. Get rid of the old
-   * DB instance.
-   * @param dbLocation New location of the OM Snapshot DB.
-   */
-  void updateOmDB(File dbLocation) throws IOException;
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
deleted file mode 100644
index 3d55c99..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.recovery;
-
-import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-
-import java.io.File;
-import java.io.IOException;
-
-import javax.inject.Inject;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Recon's implementation of the OM Metadata manager. By extending and
- * relying on the OmMetadataManagerImpl, we can make sure all changes made to
- * schema in OM will be automatically picked up by Recon.
- */
-public class ReconOmMetadataManagerImpl extends OmMetadataManagerImpl
-    implements ReconOMMetadataManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReconOmMetadataManagerImpl.class);
-
-  private OzoneConfiguration ozoneConfiguration;
-  private ReconUtils reconUtils;
-
-  @Inject
-  public ReconOmMetadataManagerImpl(OzoneConfiguration configuration,
-                                    ReconUtils reconUtils) {
-    this.reconUtils = reconUtils;
-    this.ozoneConfiguration = configuration;
-  }
-
-  @Override
-  public void start(OzoneConfiguration configuration) throws IOException {
-    LOG.info("Starting ReconOMMetadataManagerImpl");
-    File reconDbDir =
-        reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR);
-    File lastKnownOMSnapshot =
-        reconUtils.getLastKnownDB(reconDbDir, RECON_OM_SNAPSHOT_DB);
-    if (lastKnownOMSnapshot != null) {
-      LOG.info("Last known snapshot for OM : {}",
-          lastKnownOMSnapshot.getAbsolutePath());
-      initializeNewRdbStore(lastKnownOMSnapshot);
-    }
-  }
-
-  /**
-   * Replace existing DB instance with new one.
-   *
-   * @param dbFile new DB file location.
-   */
-  private void initializeNewRdbStore(File dbFile) throws IOException {
-    try {
-      DBStoreBuilder dbStoreBuilder =
-          DBStoreBuilder.newBuilder(ozoneConfiguration)
-          .setName(dbFile.getName())
-          .setPath(dbFile.toPath().getParent());
-      addOMTablesAndCodecs(dbStoreBuilder);
-      DBStore newStore = dbStoreBuilder.build();
-      setStore(newStore);
-      LOG.info("Created OM DB snapshot at {}.",
-          dbFile.getAbsolutePath());
-    } catch (IOException ioEx) {
-      LOG.error("Unable to initialize Recon OM DB snapshot store.",
-          ioEx);
-    }
-    if (getStore() != null) {
-      initializeOmTables();
-    }
-  }
-
-  @Override
-  public void updateOmDB(File newDbLocation) throws IOException {
-    if (getStore() != null) {
-      File oldDBLocation = getStore().getDbLocation();
-      if (oldDBLocation.exists()) {
-        LOG.info("Cleaning up old OM snapshot db at {}.",
-            oldDBLocation.getAbsolutePath());
-        FileUtils.deleteDirectory(oldDBLocation);
-      }
-    }
-    initializeNewRdbStore(newDbLocation);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
deleted file mode 100644
index 5c00ee9..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * The classes in this package handle OM snapshot recovery and checkpoints.
- */
-package org.apache.hadoop.ozone.recon.recovery;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java
deleted file mode 100644
index 3da35cc..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-
-/**
- * The Recon Container DB Service interface.
- */
-@InterfaceStability.Unstable
-public interface ContainerDBServiceProvider {
-
-  /**
-   * Create new container DB and bulk Store the container to Key prefix
-   * mapping.
-   * @param containerKeyPrefixCounts Map of containerId, key-prefix tuple to
-   *                                 key count.
-   */
-  void initNewContainerDB(Map<ContainerKeyPrefix, Integer>
-                                    containerKeyPrefixCounts)
-      throws IOException;
-
-  /**
-   * Store the container to Key prefix mapping into the Recon Container DB.
-   *
-   * @param containerKeyPrefix the containerId, key-prefix tuple.
-   * @param count              Count of Keys with that prefix.
-   */
-  void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix,
-                                Integer count) throws IOException;
-
-  /**
-   * Store the containerID -> no. of keys count into the container DB store.
-   *
-   * @param containerID the containerID.
-   * @param count count of the keys within the given containerID.
-   * @throws IOException
-   */
-  void storeContainerKeyCount(Long containerID, Long count) throws IOException;
-
-  /**
-   * Store the total count of containers into the container DB store.
-   *
-   * @param count count of the containers present in the system.
-   */
-  void storeContainerCount(Long count);
-
-  /**
-   * Get the stored key prefix count for the given containerID, key prefix.
-   *
-   * @param containerKeyPrefix the containerID, key-prefix tuple.
-   * @return count of keys with that prefix.
-   */
-  Integer getCountForContainerKeyPrefix(
-      ContainerKeyPrefix containerKeyPrefix) throws IOException;
-
-  /**
-   * Get the total count of keys within the given containerID.
-   *
-   * @param containerID the given containerId.
-   * @return count of keys within the given containerID.
-   * @throws IOException
-   */
-  long getKeyCountForContainer(Long containerID) throws IOException;
-
-  /**
-   * Get if a containerID exists or not.
-   *
-   * @param containerID the given containerID.
-   * @return if the given ContainerID exists or not.
-   * @throws IOException
-   */
-  boolean doesContainerExists(Long containerID) throws IOException;
-
-  /**
-   * Get the stored key prefixes for the given containerId.
-   *
-   * @param containerId the given containerId.
-   * @return Map of Key prefix -> count.
-   */
-  Map<ContainerKeyPrefix, Integer> getKeyPrefixesForContainer(
-      long containerId) throws IOException;
-
-  /**
-   * Get the stored key prefixes for the given containerId starting
-   * after the given keyPrefix.
-   *
-   * @param containerId the given containerId.
-   * @param prevKeyPrefix the key prefix to seek to and start scanning.
-   * @return Map of Key prefix -> count.
-   */
-  Map<ContainerKeyPrefix, Integer> getKeyPrefixesForContainer(
-      long containerId, String prevKeyPrefix) throws IOException;
-
-  /**
-   * Get a Map of containerID, containerMetadata of Containers only for the
-   * given limit. If the limit is -1 or any integer <0, then return all
-   * the containers without any limit.
-   *
-   * @param limit the no. of containers to fetch.
-   * @param prevContainer containerID after which the results are returned.
-   * @return Map of containerID -> containerMetadata.
-   * @throws IOException
-   */
-  Map<Long, ContainerMetadata> getContainers(int limit, long prevContainer)
-      throws IOException;
-
-  /**
-   * Delete an entry in the container DB.
-   *
-   * @param containerKeyPrefix container key prefix to be deleted.
-   * @throws IOException exception.
-   */
-  void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix)
-      throws IOException;
-
-  /**
-   * Get iterator to the entire container DB.
-   * @return TableIterator
-   */
-  TableIterator getContainerTableIterator() throws IOException;
-
-  /**
-   * Get the total count of containers present in the system.
-   *
-   * @return total count of containers.
-   * @throws IOException
-   */
-  long getCountForContainers() throws IOException;
-
-  /**
-   * Increment the total count for containers in the system by the given count.
-   *
-   * @param count no. of new containers to add to containers total count.
-   */
-  void incrementContainerCountBy(long count);
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java
deleted file mode 100644
index ce7d414..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package org.apache.hadoop.ozone.recon.spi;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Interface to access datanode endpoints.
- */
-public interface HddsDatanodeServiceProvider {
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java
deleted file mode 100644
index 3f57af6..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java
+++ /dev/null
@@ -1,43 +0,0 @@
-package org.apache.hadoop.ozone.recon.spi;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-
-/**
- * Interface to access OM endpoints.
- */
-public interface OzoneManagerServiceProvider {
-
-  /**
-   * Start a task to sync data from OM.
-   */
-  void start();
-
-  /**
-   * Stop the OM sync data.
-   */
-  void stop();
-
-  /**
-   * Return instance of OM Metadata manager.
-   * @return OM metadata manager instance.
-   */
-  OMMetadataManager getOMMetadataManagerInstance();
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java
deleted file mode 100644
index db052a7..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package org.apache.hadoop.ozone.recon.spi;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Interface to access SCM endpoints.
- */
-public interface StorageContainerServiceProvider {
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java
deleted file mode 100644
index 85edb7e..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT_KEY;
-import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE;
-import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE;
-import static org.jooq.impl.DSL.currentTimestamp;
-import static org.jooq.impl.DSL.select;
-import static org.jooq.impl.DSL.using;
-
-import java.io.File;
-import java.io.IOException;
-import java.sql.Timestamp;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
-import org.jooq.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of the Recon Container DB Service.
- */
-@Singleton
-public class ContainerDBServiceProviderImpl
-    implements ContainerDBServiceProvider {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerDBServiceProviderImpl.class);
-
-  private Table<ContainerKeyPrefix, Integer> containerKeyTable;
-  private Table<Long, Long> containerKeyCountTable;
-  private GlobalStatsDao globalStatsDao;
-
-  @Inject
-  private OzoneConfiguration configuration;
-
-  @Inject
-  private DBStore containerDbStore;
-
-  @Inject
-  private Configuration sqlConfiguration;
-
-  @Inject
-  private ReconUtils reconUtils;
-
-  @Inject
-  public ContainerDBServiceProviderImpl(DBStore dbStore,
-                                        Configuration sqlConfiguration) {
-    globalStatsDao = new GlobalStatsDao(sqlConfiguration);
-    initializeTables(dbStore);
-  }
-
-  /**
-   * Initialize a new container DB instance, getting rid of the old instance
-   * and then storing the passed in container prefix counts into the created
-   * DB instance. Also, truncate or reset the SQL tables as required.
-   * @param containerKeyPrefixCounts Map of container key-prefix to
-   *                                 number of keys with the prefix.
-   * @throws IOException
-   */
-  @Override
-  public void initNewContainerDB(Map<ContainerKeyPrefix, Integer>
-                                     containerKeyPrefixCounts)
-      throws IOException {
-
-    File oldDBLocation = containerDbStore.getDbLocation();
-    containerDbStore = ReconContainerDBProvider
-        .getNewDBStore(configuration, reconUtils);
-    LOG.info("Creating new Recon Container DB at {}",
-        containerDbStore.getDbLocation().getAbsolutePath());
-    initializeTables(containerDbStore);
-
-    if (oldDBLocation.exists()) {
-      LOG.info("Cleaning up old Recon Container DB at {}.",
-          oldDBLocation.getAbsolutePath());
-      FileUtils.deleteDirectory(oldDBLocation);
-    }
-
-    if (containerKeyPrefixCounts != null) {
-      for (Map.Entry<ContainerKeyPrefix, Integer> entry :
-          containerKeyPrefixCounts.entrySet()) {
-        containerKeyTable.put(entry.getKey(), entry.getValue());
-      }
-    }
-
-    // reset total count of containers to zero
-    storeContainerCount(0L);
-  }
-
-  /**
-   * Initialize the container DB tables.
-   * @param dbStore
-   */
-  private void initializeTables(DBStore dbStore) {
-    try {
-      this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE,
-          ContainerKeyPrefix.class, Integer.class);
-      this.containerKeyCountTable = dbStore.getTable(CONTAINER_KEY_COUNT_TABLE,
-          Long.class, Long.class);
-    } catch (IOException e) {
-      LOG.error("Unable to create Container Key tables." + e);
-    }
-  }
-  /**
-   * Concatenate the containerID and Key Prefix using a delimiter and store the
-   * count into the container DB store.
-   *
-   * @param containerKeyPrefix the containerID, key-prefix tuple.
-   * @param count Count of the keys matching that prefix.
-   * @throws IOException
-   */
-  @Override
-  public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix,
-                                       Integer count)
-      throws IOException {
-    containerKeyTable.put(containerKeyPrefix, count);
-  }
-
-  /**
-   * Store the containerID -> no. of keys count into the container DB store.
-   *
-   * @param containerID the containerID.
-   * @param count count of the keys within the given containerID.
-   * @throws IOException
-   */
-  @Override
-  public void storeContainerKeyCount(Long containerID, Long count)
-      throws IOException {
-    containerKeyCountTable.put(containerID, count);
-  }
-
-  /**
-   * Get the total count of keys within the given containerID.
-   *
-   * @param containerID the given containerID.
-   * @return count of keys within the given containerID.
-   * @throws IOException
-   */
-  @Override
-  public long getKeyCountForContainer(Long containerID) throws IOException {
-    Long keyCount = containerKeyCountTable.get(containerID);
-    return keyCount == null ? 0L : keyCount;
-  }
-
-  /**
-   * Get if a containerID exists or not.
-   *
-   * @param containerID the given containerID.
-   * @return if the given ContainerID exists or not.
-   * @throws IOException
-   */
-  @Override
-  public boolean doesContainerExists(Long containerID) throws IOException {
-    return containerKeyCountTable.get(containerID) != null;
-  }
-
-  /**
-   * Put together the key from the passed in object and get the count from
-   * the container DB store.
-   *
-   * @param containerKeyPrefix the containerID, key-prefix tuple.
-   * @return count of keys matching the containerID, key-prefix.
-   * @throws IOException
-   */
-  @Override
-  public Integer getCountForContainerKeyPrefix(
-      ContainerKeyPrefix containerKeyPrefix) throws IOException {
-    Integer count =  containerKeyTable.get(containerKeyPrefix);
-    return count == null ? Integer.valueOf(0) : count;
-  }
-
-  /**
-   * Get key prefixes for the given container ID.
-   *
-   * @param containerId the given containerID.
-   * @return Map of (Key-Prefix,Count of Keys).
-   */
-  @Override
-  public Map<ContainerKeyPrefix, Integer> getKeyPrefixesForContainer(
-      long containerId) throws IOException {
-    // set the default startKeyPrefix to empty string
-    return getKeyPrefixesForContainer(containerId, StringUtils.EMPTY);
-  }
-
-  /**
-   * Use the DB's prefix seek iterator to start the scan from the given
-   * container ID and prev key prefix. The prev key prefix is skipped from
-   * the result.
-   *
-   * @param containerId the given containerId.
-   * @param prevKeyPrefix the given key prefix to start the scan from.
-   * @return Map of (Key-Prefix,Count of Keys).
-   */
-  @Override
-  public Map<ContainerKeyPrefix, Integer> getKeyPrefixesForContainer(
-      long containerId, String prevKeyPrefix) throws IOException {
-
-    Map<ContainerKeyPrefix, Integer> prefixes = new LinkedHashMap<>();
-    TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix,
-        Integer>> containerIterator = containerKeyTable.iterator();
-    ContainerKeyPrefix seekKey;
-    boolean skipPrevKey = false;
-    if (StringUtils.isNotBlank(prevKeyPrefix)) {
-      skipPrevKey = true;
-      seekKey = new ContainerKeyPrefix(containerId, prevKeyPrefix);
-    } else {
-      seekKey = new ContainerKeyPrefix(containerId);
-    }
-    KeyValue<ContainerKeyPrefix, Integer> seekKeyValue =
-        containerIterator.seek(seekKey);
-
-    // check if RocksDB was able to seek correctly to the given key prefix
-    // if not, then return empty result
-    // In case of an empty prevKeyPrefix, all the keys in the container are
-    // returned
-    if (seekKeyValue == null ||
-        (StringUtils.isNotBlank(prevKeyPrefix) &&
-            !seekKeyValue.getKey().getKeyPrefix().equals(prevKeyPrefix))) {
-      return prefixes;
-    }
-
-    while (containerIterator.hasNext()) {
-      KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
-      ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
-
-      // skip the prev key if prev key is present
-      if (skipPrevKey &&
-          containerKeyPrefix.getKeyPrefix().equals(prevKeyPrefix)) {
-        continue;
-      }
-
-      // The prefix seek only guarantees that the iterator's head will be
-      // positioned at the first prefix match. We still have to check the key
-      // prefix.
-      if (containerKeyPrefix.getContainerId() == containerId) {
-        if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) {
-          prefixes.put(new ContainerKeyPrefix(containerId,
-              containerKeyPrefix.getKeyPrefix(),
-              containerKeyPrefix.getKeyVersion()),
-              keyValue.getValue());
-        } else {
-          LOG.warn("Null key prefix returned for containerId = " + containerId);
-        }
-      } else {
-        break; //Break when the first mismatch occurs.
-      }
-    }
-    return prefixes;
-  }
-
-  /**
-   * Iterate the DB to construct a Map of containerID -> containerMetadata
-   * only for the given limit from the given start key. The start containerID
-   * is skipped from the result.
-   *
-   * Return all the containers if limit < 0.
-   *
-   * @param limit No of containers to get.
-   * @param prevContainer containerID after which the
-   *                      list of containers are scanned.
-   * @return Map of containerID -> containerMetadata.
-   * @throws IOException
-   */
-  @Override
-  public Map<Long, ContainerMetadata> getContainers(int limit,
-                                                    long prevContainer)
-      throws IOException {
-    Map<Long, ContainerMetadata> containers = new LinkedHashMap<>();
-    TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix,
-        Integer>> containerIterator = containerKeyTable.iterator();
-    ContainerKeyPrefix seekKey;
-    if (prevContainer > 0L) {
-      seekKey = new ContainerKeyPrefix(prevContainer);
-      KeyValue<ContainerKeyPrefix,
-          Integer> seekKeyValue = containerIterator.seek(seekKey);
-      // Check if RocksDB was able to correctly seek to the given
-      // prevContainer containerId. If not, then return empty result
-      if (seekKeyValue != null &&
-          seekKeyValue.getKey().getContainerId() != prevContainer) {
-        return containers;
-      } else {
-        // seek to the prevContainer+1 containerID to start scan
-        seekKey = new ContainerKeyPrefix(prevContainer + 1);
-        containerIterator.seek(seekKey);
-      }
-    }
-    while (containerIterator.hasNext()) {
-      KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
-      ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
-      Long containerID = containerKeyPrefix.getContainerId();
-      Integer numberOfKeys = keyValue.getValue();
-
-      // break the loop if limit has been reached
-      // and one more new entity needs to be added to the containers map
-      if (containers.size() == limit && !containers.containsKey(containerID)) {
-        break;
-      }
-
-      // initialize containerMetadata with 0 as number of keys.
-      containers.computeIfAbsent(containerID, ContainerMetadata::new);
-      // increment number of keys for the containerID
-      ContainerMetadata containerMetadata = containers.get(containerID);
-      containerMetadata.setNumberOfKeys(containerMetadata.getNumberOfKeys() +
-          numberOfKeys);
-      containers.put(containerID, containerMetadata);
-    }
-    return containers;
-  }
-
-  @Override
-  public void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix)
-      throws IOException {
-    containerKeyTable.delete(containerKeyPrefix);
-  }
-
-  /**
-   * Get total count of containers.
-   *
-   * @return total count of containers.
-   */
-  @Override
-  public long getCountForContainers() {
-    GlobalStats containerCountRecord =
-        globalStatsDao.fetchOneByKey(CONTAINER_COUNT_KEY);
-
-    return (containerCountRecord == null) ? 0L :
-        containerCountRecord.getValue();
-  }
-
-  @Override
-  public TableIterator getContainerTableIterator() {
-    return containerKeyTable.iterator();
-  }
-
-  /**
-   * Store the total count of containers into the container DB store.
-   *
-   * @param count count of the containers present in the system.
-   */
-  @Override
-  public void storeContainerCount(Long count) {
-    // Get the current timestamp
-    Timestamp now =
-        using(sqlConfiguration).fetchValue(select(currentTimestamp()));
-    GlobalStats containerCountRecord =
-        globalStatsDao.fetchOneByKey(CONTAINER_COUNT_KEY);
-    GlobalStats globalStatsRecord =
-        new GlobalStats(CONTAINER_COUNT_KEY, count, now);
-
-    // Insert a new record for CONTAINER_COUNT_KEY if it does not exist
-    if (containerCountRecord == null) {
-      globalStatsDao.insert(globalStatsRecord);
-    } else {
-      globalStatsDao.update(globalStatsRecord);
-    }
-  }
-
-  /**
-   * Increment the total count for containers in the system by the given count.
-   *
-   * @param count no. of new containers to add to containers total count.
-   */
-  @Override
-  public void incrementContainerCountBy(long count) {
-    long containersCount = getCountForContainers();
-    storeContainerCount(containersCount + count);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java
deleted file mode 100644
index e35f900..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.commons.compress.utils.CharsetNames.UTF_8;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.hdds.utils.db.Codec;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-
-/**
- * Codec to encode ContainerKeyPrefix as byte array.
- */
-public class ContainerKeyPrefixCodec implements Codec<ContainerKeyPrefix>{
-
-  private final static String KEY_DELIMITER = "_";
-
-  @Override
-  public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix)
-      throws IOException {
-    Preconditions.checkNotNull(containerKeyPrefix,
-            "Null object can't be converted to byte array.");
-    byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix
-        .getContainerId());
-
-    //Prefix seek can be done only with containerId. In that case, we can
-    // expect the key and version to be undefined.
-    if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) {
-      byte[] keyPrefixBytes = (KEY_DELIMITER +
-          containerKeyPrefix.getKeyPrefix()).getBytes(UTF_8);
-      containerIdBytes = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes);
-    }
-
-    if (containerKeyPrefix.getKeyVersion() != -1) {
-      containerIdBytes = ArrayUtils.addAll(containerIdBytes, KEY_DELIMITER
-          .getBytes(UTF_8));
-      containerIdBytes = ArrayUtils.addAll(containerIdBytes, Longs.toByteArray(
-          containerKeyPrefix.getKeyVersion()));
-    }
-    return containerIdBytes;
-  }
-
-  @Override
-  public ContainerKeyPrefix fromPersistedFormat(byte[] rawData)
-      throws IOException {
-
-    // First 8 bytes is the containerId.
-    long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray(
-        rawData, 0, Long.BYTES)).getLong();
-    // When reading from byte[], we can always expect to have the containerId,
-    // key and version parts in the byte array.
-    byte[] keyBytes = ArrayUtils.subarray(rawData,
-        Long.BYTES + 1,
-        rawData.length - Long.BYTES - 1);
-    String keyPrefix = new String(keyBytes, UTF_8);
-
-    // Last 8 bytes is the key version.
-    byte[] versionBytes = ArrayUtils.subarray(rawData,
-        rawData.length - Long.BYTES,
-        rawData.length);
-    long version = ByteBuffer.wrap(versionBytes).getLong();
-    return new ContainerKeyPrefix(containerIdFromDB, keyPrefix, version);
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
deleted file mode 100644
index 789b301..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++ /dev/null
@@ -1,362 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
-import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_TIMEOUT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT_DEFAULT;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
-import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler;
-import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch;
-import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.ratis.protocol.ClientId;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Implementation of the OzoneManager Service provider.
- */
-@Singleton
-public class OzoneManagerServiceProviderImpl
-    implements OzoneManagerServiceProvider {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneManagerServiceProviderImpl.class);
-
-  private final CloseableHttpClient httpClient;
-  private File omSnapshotDBParentDir = null;
-  private String omDBSnapshotUrl;
-
-  private OzoneManagerProtocol ozoneManagerClient;
-  private final ClientId clientId = ClientId.randomId();
-  private final OzoneConfiguration configuration;
-  private final ScheduledExecutorService scheduler =
-      Executors.newScheduledThreadPool(1);
-
-  private ReconOMMetadataManager omMetadataManager;
-  private ReconTaskController reconTaskController;
-  private ReconTaskStatusDao reconTaskStatusDao;
-  private ReconUtils reconUtils;
-  private enum OmSnapshotTaskName {
-    OM_DB_FULL_SNAPSHOT,
-    OM_DB_DELTA_UPDATES
-  }
-
-  @Inject
-  public OzoneManagerServiceProviderImpl(
-      OzoneConfiguration configuration,
-      ReconOMMetadataManager omMetadataManager,
-      ReconTaskController reconTaskController,
-      ReconUtils reconUtils,
-      OzoneManagerProtocol ozoneManagerClient) throws IOException {
-
-    String ozoneManagerHttpAddress = configuration.get(OMConfigKeys
-        .OZONE_OM_HTTP_ADDRESS_KEY);
-
-    String ozoneManagerHttpsAddress = configuration.get(OMConfigKeys
-        .OZONE_OM_HTTPS_ADDRESS_KEY);
-
-    omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration,
-        OZONE_RECON_OM_SNAPSHOT_DB_DIR);
-
-    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(configuration);
-
-    int socketTimeout = (int) configuration.getTimeDuration(
-        RECON_OM_SOCKET_TIMEOUT, RECON_OM_SOCKET_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    int connectionTimeout = (int) configuration.getTimeDuration(
-        RECON_OM_CONNECTION_TIMEOUT,
-        RECON_OM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    int connectionRequestTimeout = (int)configuration.getTimeDuration(
-        RECON_OM_CONNECTION_REQUEST_TIMEOUT,
-        RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-
-    RequestConfig config = RequestConfig.custom()
-        .setConnectTimeout(socketTimeout)
-        .setConnectionRequestTimeout(connectionTimeout)
-        .setSocketTimeout(connectionRequestTimeout).build();
-
-    httpClient = HttpClientBuilder
-        .create()
-        .setDefaultRequestConfig(config)
-        .build();
-
-    omDBSnapshotUrl = "http://" + ozoneManagerHttpAddress +
-        OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
-
-    if (policy.isHttpsEnabled()) {
-      omDBSnapshotUrl = "https://" + ozoneManagerHttpsAddress +
-          OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
-    }
-
-    boolean flushParam = configuration.getBoolean(
-        RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM, false);
-
-    if (flushParam) {
-      omDBSnapshotUrl += "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true";
-    }
-
-    this.reconUtils = reconUtils;
-    this.omMetadataManager = omMetadataManager;
-    this.reconTaskController = reconTaskController;
-    this.reconTaskStatusDao = reconTaskController.getReconTaskStatusDao();
-    this.ozoneManagerClient = ozoneManagerClient;
-    this.configuration = configuration;
-  }
-
-  @Override
-  public OMMetadataManager getOMMetadataManagerInstance() {
-    return omMetadataManager;
-  }
-
-  @Override
-  public void start() {
-    try {
-      omMetadataManager.start(configuration);
-    } catch (IOException ioEx) {
-      LOG.error("Error staring Recon OM Metadata Manager.", ioEx);
-    }
-    long initialDelay = configuration.getTimeDuration(
-        RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY,
-        RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    long interval = configuration.getTimeDuration(
-        RECON_OM_SNAPSHOT_TASK_INTERVAL,
-        RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    scheduler.scheduleWithFixedDelay(this::syncDataFromOM,
-        initialDelay,
-        interval,
-        TimeUnit.MILLISECONDS);
-  }
-
-  @Override
-  public void stop() {
-    reconTaskController.stop();
-    scheduler.shutdownNow();
-  }
-
-  /**
-   * Method to obtain current OM DB Snapshot.
-   * @return DBCheckpoint instance.
-   */
-  @VisibleForTesting
-  DBCheckpoint getOzoneManagerDBSnapshot() {
-    String snapshotFileName = RECON_OM_SNAPSHOT_DB + "_" + System
-        .currentTimeMillis();
-    File targetFile = new File(omSnapshotDBParentDir, snapshotFileName +
-        ".tar.gz");
-    try {
-      try (InputStream inputStream = reconUtils.makeHttpCall(httpClient,
-          omDBSnapshotUrl)) {
-        FileUtils.copyInputStreamToFile(inputStream, targetFile);
-      }
-
-      // Untar the checkpoint file.
-      Path untarredDbDir = Paths.get(omSnapshotDBParentDir.getAbsolutePath(),
-          snapshotFileName);
-      reconUtils.untarCheckpointFile(targetFile, untarredDbDir);
-      FileUtils.deleteQuietly(targetFile);
-
-      // TODO Create Checkpoint based on OM DB type.
-      // Currently, OM DB type is not configurable. Hence, defaulting to
-      // RocksDB.
-      return new RocksDBCheckpoint(untarredDbDir);
-    } catch (IOException e) {
-      LOG.error("Unable to obtain Ozone Manager DB Snapshot. ", e);
-    }
-    return null;
-  }
-
-  /**
-   * Update Local OM DB with new OM DB snapshot.
-   * @throws IOException
-   */
-  @VisibleForTesting
-  boolean updateReconOmDBWithNewSnapshot() throws IOException {
-    // Obtain the current DB snapshot from OM and
-    // update the in house OM metadata managed DB instance.
-    DBCheckpoint dbSnapshot = getOzoneManagerDBSnapshot();
-    if (dbSnapshot != null && dbSnapshot.getCheckpointLocation() != null) {
-      LOG.info("Got new checkpoint from OM : " +
-          dbSnapshot.getCheckpointLocation());
-      try {
-        omMetadataManager.updateOmDB(dbSnapshot.getCheckpointLocation()
-            .toFile());
-        return true;
-      } catch (IOException e) {
-        LOG.error("Unable to refresh Recon OM DB Snapshot. ", e);
-      }
-    } else {
-      LOG.error("Null snapshot location got from OM.");
-    }
-    return false;
-  }
-
-  /**
-   * Get Delta updates from OM through RPC call and apply to local OM DB as
-   * well as accumulate in a buffer.
-   * @param fromSequenceNumber from sequence number to request from.
-   * @param omdbUpdatesHandler OM DB updates handler to buffer updates.
-   * @throws IOException when OM RPC request fails.
-   * @throws RocksDBException when writing to RocksDB fails.
-   */
-  @VisibleForTesting
-  void getAndApplyDeltaUpdatesFromOM(
-      long fromSequenceNumber, OMDBUpdatesHandler omdbUpdatesHandler)
-      throws IOException, RocksDBException {
-    DBUpdatesRequest dbUpdatesRequest = DBUpdatesRequest.newBuilder()
-        .setSequenceNumber(fromSequenceNumber).build();
-    DBUpdatesWrapper dbUpdates = ozoneManagerClient.getDBUpdates(
-        dbUpdatesRequest);
-    if (null != dbUpdates) {
-      RDBStore rocksDBStore = (RDBStore)omMetadataManager.getStore();
-      RocksDB rocksDB = rocksDBStore.getDb();
-      LOG.debug("Number of updates received from OM : " +
-          dbUpdates.getData().size());
-      for (byte[] data : dbUpdates.getData()) {
-        WriteBatch writeBatch = new WriteBatch(data);
-        writeBatch.iterate(omdbUpdatesHandler);
-        RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(writeBatch);
-        rdbBatchOperation.commit(rocksDB, new WriteOptions());
-      }
-    }
-  }
-
-  /**
-   * Based on current state of Recon's OM DB, we either get delta updates or
-   * full snapshot from Ozone Manager.
-   */
-  @VisibleForTesting
-  void syncDataFromOM() {
-    LOG.info("Syncing data from Ozone Manager.");
-    long currentSequenceNumber = getCurrentOMDBSequenceNumber();
-    boolean fullSnapshot = false;
-
-    if (currentSequenceNumber <= 0) {
-      fullSnapshot = true;
-    } else {
-      OMDBUpdatesHandler omdbUpdatesHandler =
-          new OMDBUpdatesHandler(omMetadataManager);
-      try {
-        LOG.info("Obtaining delta updates from Ozone Manager");
-        // Get updates from OM and apply to local Recon OM DB.
-        getAndApplyDeltaUpdatesFromOM(currentSequenceNumber,
-            omdbUpdatesHandler);
-        // Update timestamp of successful delta updates query.
-        ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(
-            OmSnapshotTaskName.OM_DB_DELTA_UPDATES.name(),
-                System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
-        reconTaskStatusDao.update(reconTaskStatusRecord);
-        // Pass on DB update events to tasks that are listening.
-        reconTaskController.consumeOMEvents(new OMUpdateEventBatch(
-            omdbUpdatesHandler.getEvents()), omMetadataManager);
-      } catch (IOException | InterruptedException | RocksDBException e) {
-        LOG.warn("Unable to get and apply delta updates from OM.", e);
-        fullSnapshot = true;
-      }
-    }
-
-    if (fullSnapshot) {
-      try {
-        LOG.info("Obtaining full snapshot from Ozone Manager");
-        // Update local Recon OM DB to new snapshot.
-        boolean success = updateReconOmDBWithNewSnapshot();
-        // Update timestamp of successful delta updates query.
-        if (success) {
-          ReconTaskStatus reconTaskStatusRecord =
-              new ReconTaskStatus(
-                  OmSnapshotTaskName.OM_DB_FULL_SNAPSHOT.name(),
-                  System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
-          reconTaskStatusDao.update(reconTaskStatusRecord);
-          // Reinitialize tasks that are listening.
-          LOG.info("Calling reprocess on Recon tasks.");
-          reconTaskController.reInitializeTasks(omMetadataManager);
-        }
-      } catch (IOException | InterruptedException e) {
-        LOG.error("Unable to update Recon's OM DB with new snapshot ", e);
-      }
-    }
-  }
-
-  /**
-   * Get OM RocksDB's latest sequence number.
-   * @return latest sequence number.
-   */
-  private long getCurrentOMDBSequenceNumber() {
-    RDBStore rocksDBStore = (RDBStore)omMetadataManager.getStore();
-    if (null == rocksDBStore) {
-      return 0;
-    } else {
-      return rocksDBStore.getDb().getLatestSequenceNumber();
-    }
-  }
-}
-
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java
deleted file mode 100644
index 9c3e987..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE;
-import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_DB;
-import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
-
-import java.io.File;
-import java.nio.file.Path;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.hdds.utils.db.IntegerCodec;
-import org.apache.hadoop.hdds.utils.db.LongCodec;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.ProvisionException;
-
-/**
- * Provider for the Recon container DB (Metadata store).
- */
-public class ReconContainerDBProvider implements Provider<DBStore> {
-
-  @VisibleForTesting
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReconContainerDBProvider.class);
-
-  private OzoneConfiguration configuration;
-  private ReconUtils reconUtils;
-
-  @Inject
-  public ReconContainerDBProvider(OzoneConfiguration configuration,
-                                  ReconUtils reconUtils) {
-    this.configuration = configuration;
-    this.reconUtils = reconUtils;
-  }
-
-  @Override
-  public DBStore get() {
-    DBStore dbStore;
-    File reconDbDir =
-        reconUtils.getReconDbDir(configuration, OZONE_RECON_DB_DIR);
-    File lastKnownOMSnapshot =
-        reconUtils.getLastKnownDB(reconDbDir, RECON_CONTAINER_DB);
-    if (lastKnownOMSnapshot != null) {
-      dbStore = getDBStore(configuration, reconUtils,
-          lastKnownOMSnapshot.getName());
-    } else {
-      dbStore = getNewDBStore(configuration, reconUtils);
-    }
-    if (dbStore == null) {
-      throw new ProvisionException("Unable to provide instance of DBStore " +
-          "store.");
-    }
-    return dbStore;
-  }
-
-  private static DBStore getDBStore(OzoneConfiguration configuration,
-                            ReconUtils reconUtils, String dbName) {
-    DBStore dbStore = null;
-    try {
-      Path metaDir = reconUtils.getReconDbDir(
-          configuration, OZONE_RECON_DB_DIR).toPath();
-      dbStore = DBStoreBuilder.newBuilder(configuration)
-          .setPath(metaDir)
-          .setName(dbName)
-          .addTable(CONTAINER_KEY_TABLE)
-          .addTable(CONTAINER_KEY_COUNT_TABLE)
-          .addCodec(ContainerKeyPrefix.class, new ContainerKeyPrefixCodec())
-          .addCodec(Long.class, new LongCodec())
-          .addCodec(Integer.class, new IntegerCodec())
-          .build();
-    } catch (Exception ex) {
-      LOG.error("Unable to initialize Recon container metadata store.", ex);
-    }
-    return dbStore;
-  }
-
-  static DBStore getNewDBStore(OzoneConfiguration configuration,
-                               ReconUtils reconUtils) {
-    String dbName = RECON_CONTAINER_DB + "_" + System.currentTimeMillis();
-    return getDBStore(configuration, reconUtils, dbName);
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
deleted file mode 100644
index 1ed4429..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * The classes in this package define the Service Provider implementations for
- * Recon. This provides connectivity to underlying Ozone subsystems.
- */
-package org.apache.hadoop.ozone.recon.spi.impl;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java
deleted file mode 100644
index 24692fa..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * The classes in this package define the Service Provider interfaces for
- * Recon. The implementations of Spi interfaces provide connectivity to
- * underlying Ozone subsystems.
- */
-package org.apache.hadoop.ozone.recon.spi;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
deleted file mode 100644
index eae17bd..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-
-import java.io.IOException;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-
-/**
- * Class to iterate over the OM DB and populate the Recon container DB with
- * the container -> Key reverse mapping.
- */
-public class ContainerKeyMapperTask implements ReconDBUpdateTask {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerKeyMapperTask.class);
-
-  private ContainerDBServiceProvider containerDBServiceProvider;
-
-  @Inject
-  public ContainerKeyMapperTask(ContainerDBServiceProvider
-                                    containerDBServiceProvider) {
-    this.containerDBServiceProvider = containerDBServiceProvider;
-  }
-
-  /**
-   * Read Key -> ContainerId data from OM snapshot DB and write reverse map
-   * (container, key) -> count to Recon Container DB.
-   */
-  @Override
-  public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
-    long omKeyCount = 0;
-    try {
-      LOG.info("Starting a 'reprocess' run of ContainerKeyMapperTask.");
-      Instant start = Instant.now();
-
-      // initialize new container DB
-      containerDBServiceProvider.initNewContainerDB(new HashMap<>());
-
-      Table<String, OmKeyInfo> omKeyInfoTable = omMetadataManager.getKeyTable();
-      try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-               keyIter = omKeyInfoTable.iterator()) {
-        while (keyIter.hasNext()) {
-          Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
-          OmKeyInfo omKeyInfo = kv.getValue();
-          writeOMKeyToContainerDB(kv.getKey(), omKeyInfo);
-          omKeyCount++;
-        }
-      }
-      LOG.info("Completed 'reprocess' of ContainerKeyMapperTask.");
-      Instant end = Instant.now();
-      long duration = Duration.between(start, end).toMillis();
-      LOG.info("It took me " + (double) duration / 1000.0 + " seconds to " +
-          "process " + omKeyCount + " keys.");
-    } catch (IOException ioEx) {
-      LOG.error("Unable to populate Container Key Prefix data in Recon DB. ",
-          ioEx);
-      return new ImmutablePair<>(getTaskName(), false);
-    }
-    return new ImmutablePair<>(getTaskName(), true);
-  }
-
-  @Override
-  public String getTaskName() {
-    return "ContainerKeyMapperTask";
-  }
-
-  @Override
-  public Collection<String> getTaskTables() {
-    return Collections.singletonList(KEY_TABLE);
-  }
-
-  @Override
-  public Pair<String, Boolean> process(OMUpdateEventBatch events) {
-    Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
-    int eventCount = 0;
-    while (eventIterator.hasNext()) {
-      OMDBUpdateEvent<String, OmKeyInfo> omdbUpdateEvent = eventIterator.next();
-      String updatedKey = omdbUpdateEvent.getKey();
-      OmKeyInfo updatedKeyValue = omdbUpdateEvent.getValue();
-      try {
-        switch (omdbUpdateEvent.getAction()) {
-        case PUT:
-          writeOMKeyToContainerDB(updatedKey, updatedKeyValue);
-          break;
-
-        case DELETE:
-          deleteOMKeyFromContainerDB(updatedKey);
-          break;
-
-        default: LOG.debug("Skipping DB update event : " + omdbUpdateEvent
-            .getAction());
-        }
-        eventCount++;
-      } catch (IOException e) {
-        LOG.error("Unexpected exception while updating key data : {} ",
-            updatedKey, e);
-        return new ImmutablePair<>(getTaskName(), false);
-      }
-    }
-    LOG.info("{} successfully processed {} OM DB update event(s).",
-        getTaskName(), eventCount);
-    return new ImmutablePair<>(getTaskName(), true);
-  }
-
-  /**
-   * Delete an OM Key from Container DB and update containerID -> no. of keys
-   * count.
-   *
-   * @param key key String.
-   * @throws IOException If Unable to write to container DB.
-   */
-  private void  deleteOMKeyFromContainerDB(String key)
-      throws IOException {
-
-    TableIterator<ContainerKeyPrefix, ? extends
-        Table.KeyValue<ContainerKeyPrefix, Integer>> containerIterator =
-        containerDBServiceProvider.getContainerTableIterator();
-
-    Set<ContainerKeyPrefix> keysToBeDeleted = new HashSet<>();
-
-    while (containerIterator.hasNext()) {
-      Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
-          containerIterator.next();
-      String keyPrefix = keyValue.getKey().getKeyPrefix();
-      if (keyPrefix.equals(key)) {
-        keysToBeDeleted.add(keyValue.getKey());
-      }
-    }
-
-    for (ContainerKeyPrefix containerKeyPrefix : keysToBeDeleted) {
-      containerDBServiceProvider.deleteContainerMapping(containerKeyPrefix);
-
-      // decrement count and update containerKeyCount.
-      Long containerID = containerKeyPrefix.getContainerId();
-      long keyCount =
-          containerDBServiceProvider.getKeyCountForContainer(containerID);
-      if (keyCount > 0) {
-        containerDBServiceProvider.storeContainerKeyCount(containerID,
-            --keyCount);
-      }
-    }
-  }
-
-  /**
-   * Write an OM key to container DB and update containerID -> no. of keys
-   * count.
-   *
-   * @param key key String
-   * @param omKeyInfo omKeyInfo value
-   * @throws IOException if unable to write to recon DB.
-   */
-  private void  writeOMKeyToContainerDB(String key, OmKeyInfo omKeyInfo)
-      throws IOException {
-    long containerCountToIncrement = 0;
-    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo
-        .getKeyLocationVersions()) {
-      long keyVersion = omKeyLocationInfoGroup.getVersion();
-      for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup
-          .getLocationList()) {
-        long containerId = omKeyLocationInfo.getContainerID();
-        ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(
-            containerId, key, keyVersion);
-        if (containerDBServiceProvider.getCountForContainerKeyPrefix(
-            containerKeyPrefix) == 0) {
-          // Save on writes. No need to save same container-key prefix
-          // mapping again.
-          containerDBServiceProvider.storeContainerKeyMapping(
-              containerKeyPrefix, 1);
-
-          // check if container already exists and
-          // increment the count of containers if it does not exist
-          if (!containerDBServiceProvider.doesContainerExists(containerId)) {
-            containerCountToIncrement++;
-          }
-
-          // update the count of keys for the given containerID
-          long keyCount =
-              containerDBServiceProvider.getKeyCountForContainer(containerId);
-
-          // increment the count and update containerKeyCount.
-          // keyCount will be 0 if containerID is not found. So, there is no
-          // need to initialize keyCount for the first time.
-          containerDBServiceProvider.storeContainerKeyCount(containerId,
-              ++keyCount);
-        }
-      }
-    }
-
-    if (containerCountToIncrement > 0) {
-      containerDBServiceProvider
-          .incrementContainerCountBy(containerCountToIncrement);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
deleted file mode 100644
index 3874dda..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import com.google.inject.Inject;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
-import org.jooq.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.recon.tasks.
-    OMDBUpdateEvent.OMDBUpdateAction.DELETE;
-import static org.apache.hadoop.ozone.recon.tasks.
-    OMDBUpdateEvent.OMDBUpdateAction.PUT;
-
-/**
- * Class to iterate over the OM DB and store the counts of existing/new
- * files binned into ranges (1KB, 2Kb..,4MB,.., 1TB,..1PB) to the Recon
- * fileSize DB.
- */
-public class FileSizeCountTask implements ReconDBUpdateTask {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FileSizeCountTask.class);
-
-  private int maxBinSize = -1;
-  private long maxFileSizeUpperBound = 1125899906842624L; // 1 PB
-  private long[] upperBoundCount;
-  private long oneKb = 1024L;
-  private FileCountBySizeDao fileCountBySizeDao;
-
-  @Inject
-  public FileSizeCountTask(Configuration sqlConfiguration) {
-    fileCountBySizeDao = new FileCountBySizeDao(sqlConfiguration);
-    upperBoundCount = new long[getMaxBinSize()];
-  }
-
-  long getOneKB() {
-    return oneKb;
-  }
-
-  long getMaxFileSizeUpperBound() {
-    return maxFileSizeUpperBound;
-  }
-
-  int getMaxBinSize() {
-    if (maxBinSize == -1) {
-      // extra bin to add files > 1PB.
-      // 1 KB (2 ^ 10) is the smallest tracked file.
-      maxBinSize = nextClosestPowerIndexOfTwo(maxFileSizeUpperBound) - 10 + 1;
-    }
-    return maxBinSize;
-  }
-
-  /**
-   * Read the Keys from OM snapshot DB and calculate the upper bound of
-   * File Size it belongs to.
-   *
-   * @param omMetadataManager OM Metadata instance.
-   * @return Pair
-   */
-  @Override
-  public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
-    Table<String, OmKeyInfo> omKeyInfoTable = omMetadataManager.getKeyTable();
-    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-        keyIter = omKeyInfoTable.iterator()) {
-      while (keyIter.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
-
-        // reprocess() is a PUT operation on the DB.
-        updateUpperBoundCount(kv.getValue(), PUT);
-      }
-    } catch (IOException ioEx) {
-      LOG.error("Unable to populate File Size Count in Recon DB. ", ioEx);
-      return new ImmutablePair<>(getTaskName(), false);
-    }
-    populateFileCountBySizeDB();
-
-    LOG.info("Completed a 'reprocess' run of FileSizeCountTask.");
-    return new ImmutablePair<>(getTaskName(), true);
-  }
-
-  @Override
-  public String getTaskName() {
-    return "FileSizeCountTask";
-  }
-
-  @Override
-  public Collection<String> getTaskTables() {
-    return Collections.singletonList(KEY_TABLE);
-  }
-
-  private void updateCountFromDB() {
-    // Read - Write operations to DB are in ascending order
-    // of file size upper bounds.
-    List<FileCountBySize> resultSet = fileCountBySizeDao.findAll();
-    int index = 0;
-    if (resultSet != null) {
-      for (FileCountBySize row : resultSet) {
-        upperBoundCount[index] = row.getCount();
-        index++;
-      }
-    }
-  }
-
-  /**
-   * Read the Keys from update events and update the count of files
-   * pertaining to a certain upper bound.
-   *
-   * @param events Update events - PUT/DELETE.
-   * @return Pair
-   */
-  @Override
-  public Pair<String, Boolean> process(OMUpdateEventBatch events) {
-    Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
-
-    //update array with file size count from DB
-    updateCountFromDB();
-
-    while (eventIterator.hasNext()) {
-      OMDBUpdateEvent<String, OmKeyInfo> omdbUpdateEvent = eventIterator.next();
-      String updatedKey = omdbUpdateEvent.getKey();
-      OmKeyInfo omKeyInfo = omdbUpdateEvent.getValue();
-
-      try{
-        switch (omdbUpdateEvent.getAction()) {
-        case PUT:
-          updateUpperBoundCount(omKeyInfo, PUT);
-          break;
-
-        case DELETE:
-          updateUpperBoundCount(omKeyInfo, DELETE);
-          break;
-
-        default: LOG.trace("Skipping DB update event : " + omdbUpdateEvent
-                  .getAction());
-        }
-      } catch (IOException e) {
-        LOG.error("Unexpected exception while updating key data : {} {}",
-                updatedKey, e.getMessage());
-        return new ImmutablePair<>(getTaskName(), false);
-      }
-      populateFileCountBySizeDB();
-    }
-    LOG.info("Completed a 'process' run of FileSizeCountTask.");
-    return new ImmutablePair<>(getTaskName(), true);
-  }
-
-  /**
-   * Calculate the bin index based on size of the Key.
-   * index is calculated as the number of right shifts
-   * needed until dataSize becomes zero.
-   *
-   * @param dataSize Size of the key.
-   * @return int bin index in upperBoundCount
-   */
-  public int calculateBinIndex(long dataSize) {
-    if (dataSize >= getMaxFileSizeUpperBound()) {
-      return getMaxBinSize() - 1;
-    }
-    int index = nextClosestPowerIndexOfTwo(dataSize);
-    // The smallest file size being tracked for count
-    // is 1 KB i.e. 1024 = 2 ^ 10.
-    return index < 10 ? 0 : index - 10;
-  }
-
-  int nextClosestPowerIndexOfTwo(long dataSize) {
-    int index = 0;
-    while(dataSize != 0) {
-      dataSize >>= 1;
-      index += 1;
-    }
-    return index;
-  }
-
-  /**
-   * Populate DB with the counts of file sizes calculated
-   * using the dao.
-   *
-   */
-  void populateFileCountBySizeDB() {
-    for (int i = 0; i < upperBoundCount.length; i++) {
-      long fileSizeUpperBound = (i == upperBoundCount.length - 1) ?
-          Long.MAX_VALUE : (long) Math.pow(2, (10 + i));
-      FileCountBySize fileCountRecord =
-          fileCountBySizeDao.findById(fileSizeUpperBound);
-      FileCountBySize newRecord = new
-          FileCountBySize(fileSizeUpperBound, upperBoundCount[i]);
-      if (fileCountRecord == null) {
-        fileCountBySizeDao.insert(newRecord);
-      } else {
-        fileCountBySizeDao.update(newRecord);
-      }
-    }
-  }
-
-  /**
-   * Calculate and update the count of files being tracked by
-   * upperBoundCount[].
-   * Used by reprocess() and process().
-   *
-   * @param omKeyInfo OmKey being updated for count
-   * @param operation (PUT, DELETE)
-   */
-  void updateUpperBoundCount(OmKeyInfo omKeyInfo,
-      OMDBUpdateEvent.OMDBUpdateAction operation) throws IOException {
-    int binIndex = calculateBinIndex(omKeyInfo.getDataSize());
-    if (operation == PUT) {
-      upperBoundCount[binIndex]++;
-    } else if (operation == DELETE) {
-      if (upperBoundCount[binIndex] != 0) {
-        //decrement only if it had files before, default DB value is 0
-        upperBoundCount[binIndex]--;
-      } else {
-        LOG.warn("Unexpected error while updating bin count. Found 0 count " +
-            "for index : " + binIndex + " while processing DELETE event for "
-            + omKeyInfo.getKeyName());
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java
deleted file mode 100644
index 0fcabccb..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-/**
- * A class used to encapsulate a single OM DB update event.
- * Currently only PUT and DELETE are supported.
- * @param <KEY> Type of Key.
- * @param <VALUE> Type of Value.
- */
-public final class OMDBUpdateEvent<KEY, VALUE> {
-
-  private final OMDBUpdateAction action;
-  private final String table;
-  private final KEY updatedKey;
-  private final VALUE updatedValue;
-  private final long sequenceNumber;
-
-  private OMDBUpdateEvent(OMDBUpdateAction action,
-                          String table,
-                          KEY updatedKey,
-                          VALUE updatedValue,
-                          long sequenceNumber) {
-    this.action = action;
-    this.table = table;
-    this.updatedKey = updatedKey;
-    this.updatedValue = updatedValue;
-    this.sequenceNumber = sequenceNumber;
-  }
-
-  public OMDBUpdateAction getAction() {
-    return action;
-  }
-
-  public String getTable() {
-    return table;
-  }
-
-  public KEY getKey() {
-    return updatedKey;
-  }
-
-  public VALUE getValue() {
-    return updatedValue;
-  }
-
-  public long getSequenceNumber() {
-    return sequenceNumber;
-  }
-
-  /**
-   * Builder used to construct an OM DB Update event.
-   * @param <KEY> Key type.
-   * @param <VALUE> Value type.
-   */
-  public static class OMUpdateEventBuilder<KEY, VALUE> {
-
-    private OMDBUpdateAction action;
-    private String table;
-    private KEY updatedKey;
-    private VALUE updatedValue;
-    private long lastSequenceNumber;
-
-    OMUpdateEventBuilder setAction(OMDBUpdateAction omdbUpdateAction) {
-      this.action = omdbUpdateAction;
-      return this;
-    }
-
-    OMUpdateEventBuilder setTable(String tableName) {
-      this.table = tableName;
-      return this;
-    }
-
-    OMUpdateEventBuilder setKey(KEY key) {
-      this.updatedKey = key;
-      return this;
-    }
-
-    OMUpdateEventBuilder setValue(VALUE value) {
-      this.updatedValue = value;
-      return this;
-    }
-
-    OMUpdateEventBuilder setSequenceNumber(long sequenceNumber) {
-      this.lastSequenceNumber = sequenceNumber;
-      return this;
-    }
-
-    /**
-     * Build an OM update event.
-     * @return OMDBUpdateEvent
-     */
-    public OMDBUpdateEvent build() {
-      return new OMDBUpdateEvent<KEY, VALUE>(
-          action,
-          table,
-          updatedKey,
-          updatedValue,
-          lastSequenceNumber);
-    }
-  }
-
-  /**
-   * Supported Actions - PUT, DELETE.
-   */
-  public enum OMDBUpdateAction {
-    PUT, DELETE
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java
deleted file mode 100644
index 47d5900..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.hdds.utils.db.CodecRegistry;
-import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class used to listen on OM RocksDB updates.
- */
-public class OMDBUpdatesHandler extends WriteBatch.Handler {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OMDBUpdatesHandler.class);
-
-  private Map<Integer, String> tablesNames;
-  private CodecRegistry codecRegistry;
-  private List<OMDBUpdateEvent> omdbUpdateEvents = new ArrayList<>();
-
-  public OMDBUpdatesHandler(OMMetadataManager omMetadataManager) {
-    tablesNames = omMetadataManager.getStore().getTableNames();
-    codecRegistry = omMetadataManager.getStore().getCodecRegistry();
-  }
-
-  @Override
-  public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) throws
-      RocksDBException {
-    try {
-      processEvent(cfIndex, keyBytes, valueBytes,
-          OMDBUpdateEvent.OMDBUpdateAction.PUT);
-    } catch (IOException ioEx) {
-      LOG.error("Exception when reading key : " + ioEx);
-    }
-  }
-
-  @Override
-  public void delete(int cfIndex, byte[] keyBytes) throws RocksDBException {
-    try {
-      processEvent(cfIndex, keyBytes, null,
-          OMDBUpdateEvent.OMDBUpdateAction.DELETE);
-    } catch (IOException ioEx) {
-      LOG.error("Exception when reading key : " + ioEx);
-    }
-  }
-
-  /**
-   *
-   * @param cfIndex
-   * @param keyBytes
-   * @param valueBytes
-   * @param action
-   * @throws IOException
-   */
-  private void processEvent(int cfIndex, byte[] keyBytes, byte[]
-      valueBytes, OMDBUpdateEvent.OMDBUpdateAction action)
-      throws IOException {
-    String tableName = tablesNames.get(cfIndex);
-    Class keyType = getKeyType(tableName);
-    Class valueType = getValueType(tableName);
-    if (valueType != null) {
-      OMDBUpdateEvent.OMUpdateEventBuilder builder =
-          new OMDBUpdateEvent.OMUpdateEventBuilder<>();
-      builder.setTable(tableName);
-
-      Object key = codecRegistry.asObject(keyBytes, keyType);
-      builder.setKey(key);
-
-      if (!action.equals(OMDBUpdateEvent.OMDBUpdateAction.DELETE)) {
-        Object value = codecRegistry.asObject(valueBytes, valueType);
-        builder.setValue(value);
-      }
-
-      builder.setAction(action);
-      OMDBUpdateEvent event = builder.build();
-      LOG.debug("Generated OM update Event for table : " + event.getTable()
-          + ", Key = " + event.getKey() + ", action = " + event.getAction());
-      // Temporarily adding to an event buffer for testing. In subsequent JIRAs,
-      // a Recon side class will be implemented that requests delta updates
-      // from OM and calls on this handler. In that case, we will fill up
-      // this buffer and pass it on to the ReconTaskController which has
-      // tasks waiting on OM events.
-      omdbUpdateEvents.add(event);
-    }
-  }
-
-  // There are no use cases yet for the remaining methods in Recon. These
-  // will be implemented as and when need arises.
-
-  @Override
-  public void put(byte[] bytes, byte[] bytes1) {
-
-  }
-
-  @Override
-  public void merge(int i, byte[] bytes, byte[] bytes1)
-      throws RocksDBException {
-  }
-
-  @Override
-  public void merge(byte[] bytes, byte[] bytes1) {
-  }
-
-  @Override
-  public void delete(byte[] bytes) {
-  }
-
-  @Override
-  public void singleDelete(int i, byte[] bytes) throws RocksDBException {
-  }
-
-  @Override
-  public void singleDelete(byte[] bytes) {
-  }
-
-  @Override
-  public void deleteRange(int i, byte[] bytes, byte[] bytes1)
-      throws RocksDBException {
-  }
-
-  @Override
-  public void deleteRange(byte[] bytes, byte[] bytes1) {
-
-  }
-
-  @Override
-  public void logData(byte[] bytes) {
-
-  }
-
-  @Override
-  public void putBlobIndex(int i, byte[] bytes, byte[] bytes1)
-      throws RocksDBException {
-  }
-
-  @Override
-  public void markBeginPrepare() throws RocksDBException {
-
-  }
-
-  @Override
-  public void markEndPrepare(byte[] bytes) throws RocksDBException {
-
-  }
-
-  @Override
-  public void markNoop(boolean b) throws RocksDBException {
-
-  }
-
-  @Override
-  public void markRollback(byte[] bytes) throws RocksDBException {
-
-  }
-
-  @Override
-  public void markCommit(byte[] bytes) throws RocksDBException {
-
-  }
-
-  /**
-   * Return Key type class for a given table name.
-   * @param name table name.
-   * @return String.class by default.
-   */
-  private Class getKeyType(String name) {
-    return String.class;
-  }
-
-  /**
-   * Return Value type class for a given table.
-   * @param name table name
-   * @return Value type based on table name.
-   */
-  @VisibleForTesting
-  protected Class getValueType(String name) {
-    switch (name) {
-    case KEY_TABLE : return OmKeyInfo.class;
-    case VOLUME_TABLE : return OmVolumeArgs.class;
-    case BUCKET_TABLE : return OmBucketInfo.class;
-    default: return null;
-    }
-  }
-
-  /**
-   * Get List of events. (Temporary API to unit test the class).
-   * @return List of events.
-   */
-  public List<OMDBUpdateEvent> getEvents() {
-    return omdbUpdateEvents;
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java
deleted file mode 100644
index f137418..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Wrapper class to hold multiple OM DB update events.
- */
-public class OMUpdateEventBatch {
-
-  private List<OMDBUpdateEvent> events;
-
-  public OMUpdateEventBatch(Collection<OMDBUpdateEvent> e) {
-    events = new ArrayList<>(e);
-  }
-
-  /**
-   * Get Sequence Number and timestamp of last event in this batch.
-   * @return Event Info instance.
-   */
-  long getLastSequenceNumber() {
-    if (events.isEmpty()) {
-      return -1;
-    } else {
-      return events.get(events.size() - 1).getSequenceNumber();
-    }
-  }
-
-  /**
-   * Return iterator to Event batch.
-   * @return iterator
-   */
-  public Iterator<OMDBUpdateEvent> getIterator() {
-    return events.iterator();
-  }
-
-  /**
-   * Filter events based on Tables.
-   * @param tables set of tables to filter on.
-   * @return trimmed event batch.
-   */
-  public OMUpdateEventBatch filter(Collection<String> tables) {
-    return new OMUpdateEventBatch(events
-        .stream()
-        .filter(e -> tables.contains(e.getTable()))
-        .collect(Collectors.toList()));
-  }
-
-  /**
-   * Return if empty.
-   * @return true if empty, else false.
-   */
-  public boolean isEmpty() {
-    return !getIterator().hasNext();
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java
deleted file mode 100644
index 426e0ae..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import java.util.Collection;
-
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-
-/**
- * Interface used to denote a Recon task that needs to act on OM DB events.
- */
-public interface ReconDBUpdateTask {
-
-  /**
-   * Return task name.
-   * @return task name
-   */
-  String getTaskName();
-
-  /**
-   * Return the list of tables that the task is listening on.
-   * Empty list means the task is NOT listening on any tables.
-   * @return Collection of Tables.
-   */
-  Collection<String> getTaskTables();
-
-  /**
-   * Process a set of OM events on tables that the task is listening on.
-   * @param events Set of events to be processed by the task.
-   * @return Pair of task name -> task success.
-   */
-  Pair<String, Boolean> process(OMUpdateEventBatch events);
-
-  /**
-   * Process a  on tables that the task is listening on.
-   * @param omMetadataManager OM Metadata manager instance.
-   * @return Pair of task name -> task success.
-   */
-  Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager);
-
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java
deleted file mode 100644
index 728a199..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import java.util.Map;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-
-/**
- * Controller used by Recon to manage Tasks that are waiting on Recon events.
- */
-public interface ReconTaskController {
-
-  /**
-   * Register API used by tasks to register themselves.
-   * @param task task instance
-   */
-  void registerTask(ReconDBUpdateTask task);
-
-  /**
-   * Pass on a set of OM DB update events to the registered tasks.
-   * @param events set of events
-   * @throws InterruptedException InterruptedException
-   */
-  void consumeOMEvents(OMUpdateEventBatch events,
-                       OMMetadataManager omMetadataManager)
-      throws InterruptedException;
-
-  /**
-   * Pass on the handle to a new OM DB instance to the registered tasks.
-   * @param omMetadataManager OM Metadata Manager instance
-   */
-  void reInitializeTasks(OMMetadataManager omMetadataManager)
-      throws InterruptedException;
-
-  /**
-   * Get set of registered tasks.
-   * @return Map of Task name -> Task.
-   */
-  Map<String, ReconDBUpdateTask> getRegisteredTasks();
-
-  /**
-   * Get instance of ReconTaskStatusDao.
-   * @return instance of ReconTaskStatusDao
-   */
-  ReconTaskStatusDao getReconTaskStatusDao();
-
-  /**
-   * Stop the tasks. Start API is not needed since it is implicit.
-   */
-  void stop();
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java
deleted file mode 100644
index 9135705..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_KEY;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.jooq.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-
-/**
- * Implementation of ReconTaskController.
- */
-public class ReconTaskControllerImpl implements ReconTaskController {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ReconTaskControllerImpl.class);
-
-  private Map<String, ReconDBUpdateTask> reconDBUpdateTasks;
-  private ExecutorService executorService;
-  private int threadCount = 1;
-  private final Semaphore taskSemaphore = new Semaphore(1);
-  private Map<String, AtomicInteger> taskFailureCounter = new HashMap<>();
-  private static final int TASK_FAILURE_THRESHOLD = 2;
-  private ReconTaskStatusDao reconTaskStatusDao;
-
-  @Inject
-  public ReconTaskControllerImpl(OzoneConfiguration configuration,
-                                 Configuration sqlConfiguration,
-                                 Set<ReconDBUpdateTask> tasks) {
-    reconDBUpdateTasks = new HashMap<>();
-    threadCount = configuration.getInt(OZONE_RECON_TASK_THREAD_COUNT_KEY,
-        OZONE_RECON_TASK_THREAD_COUNT_DEFAULT);
-    executorService = Executors.newFixedThreadPool(threadCount);
-    reconTaskStatusDao = new ReconTaskStatusDao(sqlConfiguration);
-    for (ReconDBUpdateTask task : tasks) {
-      registerTask(task);
-    }
-  }
-
-  @Override
-  public void registerTask(ReconDBUpdateTask task) {
-    String taskName = task.getTaskName();
-    LOG.info("Registered task " + taskName + " with controller.");
-
-    // Store task in Task Map.
-    reconDBUpdateTasks.put(taskName, task);
-    // Store Task in Task failure tracker.
-    taskFailureCounter.put(taskName, new AtomicInteger(0));
-    // Create DB record for the task.
-    ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName,
-        0L, 0L);
-    if (!reconTaskStatusDao.existsById(taskName)) {
-      reconTaskStatusDao.insert(reconTaskStatusRecord);
-    }
-  }
-
-  /**
-   * For every registered task, we try process step twice and then reprocess
-   * once (if process failed twice) to absorb the events. If a task has failed
-   * reprocess call more than 2 times across events, it is unregistered
-   * (blacklisted).
-   * @param events set of events
-   * @throws InterruptedException
-   */
-  @Override
-  public void consumeOMEvents(OMUpdateEventBatch events,
-                              OMMetadataManager omMetadataManager)
-      throws InterruptedException {
-    taskSemaphore.acquire();
-
-    try {
-      if (!events.isEmpty()) {
-        Collection<Callable<Pair>> tasks = new ArrayList<>();
-        for (Map.Entry<String, ReconDBUpdateTask> taskEntry :
-            reconDBUpdateTasks.entrySet()) {
-          ReconDBUpdateTask task = taskEntry.getValue();
-          Collection<String> tables = task.getTaskTables();
-          tasks.add(() -> task.process(events.filter(tables)));
-        }
-
-        List<Future<Pair>> results = executorService.invokeAll(tasks);
-        List<String> failedTasks = processTaskResults(results, events);
-
-        // Retry
-        List<String> retryFailedTasks = new ArrayList<>();
-        if (!failedTasks.isEmpty()) {
-          tasks.clear();
-          for (String taskName : failedTasks) {
-            ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName);
-            Collection<String> tables = task.getTaskTables();
-            tasks.add(() -> task.process(events.filter(tables)));
-          }
-          results = executorService.invokeAll(tasks);
-          retryFailedTasks = processTaskResults(results, events);
-        }
-
-        // Reprocess the failed tasks.
-        // TODO Move to a separate task queue since reprocess may be a heavy
-        // operation for large OM DB instances
-        if (!retryFailedTasks.isEmpty()) {
-          tasks.clear();
-          for (String taskName : failedTasks) {
-            ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName);
-            tasks.add(() -> task.reprocess(omMetadataManager));
-          }
-          results = executorService.invokeAll(tasks);
-          List<String> reprocessFailedTasks =
-              processTaskResults(results, events);
-          for (String taskName : reprocessFailedTasks) {
-            LOG.info("Reprocess step failed for task : " + taskName);
-            if (taskFailureCounter.get(taskName).incrementAndGet() >
-                TASK_FAILURE_THRESHOLD) {
-              LOG.info("Blacklisting Task since it failed retry and " +
-                  "reprocess more than " + TASK_FAILURE_THRESHOLD + " times.");
-              reconDBUpdateTasks.remove(taskName);
-            }
-          }
-        }
-      }
-    } catch (ExecutionException e) {
-      LOG.error("Unexpected error : ", e);
-    } finally {
-      taskSemaphore.release();
-    }
-  }
-
-  @Override
-  public void reInitializeTasks(OMMetadataManager omMetadataManager)
-      throws InterruptedException {
-    taskSemaphore.acquire();
-
-    try {
-      Collection<Callable<Pair>> tasks = new ArrayList<>();
-      for (Map.Entry<String, ReconDBUpdateTask> taskEntry :
-          reconDBUpdateTasks.entrySet()) {
-        ReconDBUpdateTask task = taskEntry.getValue();
-        tasks.add(() -> task.reprocess(omMetadataManager));
-      }
-
-      List<Future<Pair>> results = executorService.invokeAll(tasks);
-      for (Future<Pair> f : results) {
-        String taskName = f.get().getLeft().toString();
-        if (!(Boolean)f.get().getRight()) {
-          LOG.info("Init failed for task : " + taskName);
-        }
-      }
-    } catch (ExecutionException e) {
-      LOG.error("Unexpected error : ", e);
-    } finally {
-      taskSemaphore.release();
-    }
-  }
-
-  /**
-   * Store the last completed event sequence number and timestamp to the DB
-   * for that task.
-   * @param taskName taskname to be updated.
-   * @param lastSequenceNumber contains the new sequence number.
-   */
-  private void storeLastCompletedTransaction(
-      String taskName, long lastSequenceNumber) {
-    ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName,
-        System.currentTimeMillis(), lastSequenceNumber);
-    reconTaskStatusDao.update(reconTaskStatusRecord);
-  }
-
-  @Override
-  public Map<String, ReconDBUpdateTask> getRegisteredTasks() {
-    return reconDBUpdateTasks;
-  }
-
-  @Override
-  public ReconTaskStatusDao getReconTaskStatusDao() {
-    return reconTaskStatusDao;
-  }
-
-  @Override
-  public void stop() {
-    this.executorService.shutdownNow();
-  }
-
-  /**
-   * Wait on results of all tasks.
-   * @param results Set of Futures.
-   * @param events Events.
-   * @return List of failed task names
-   * @throws ExecutionException execution Exception
-   * @throws InterruptedException Interrupted Exception
-   */
-  private List<String> processTaskResults(List<Future<Pair>> results,
-                                          OMUpdateEventBatch events)
-      throws ExecutionException, InterruptedException {
-    List<String> failedTasks = new ArrayList<>();
-    for (Future<Pair> f : results) {
-      String taskName = f.get().getLeft().toString();
-      if (!(Boolean)f.get().getRight()) {
-        LOG.info("Failed task : " + taskName);
-        failedTasks.add(f.get().getLeft().toString());
-      } else {
-        taskFailureCounter.get(taskName).set(0);
-        storeLastCompletedTransaction(taskName, events.getLastSequenceNumber());
-      }
-    }
-    return failedTasks;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
deleted file mode 100644
index fe47f4d..0000000
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * The classes in this package contains the various scheduled tasks used by
- * Recon.
- */
-package org.apache.hadoop.ozone.recon.tasks;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml b/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml
deleted file mode 100644
index 972f3bb..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<web-app version="3.0" xmlns="http://java.sun.com/xml/ns/javaee"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
-  <listener>
-    <listener-class>org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener</listener-class>
-  </listener>
-  <filter>
-    <filter-name>guiceFilter</filter-name>
-    <filter-class>com.google.inject.servlet.GuiceFilter</filter-class>
-  </filter>
-  <filter-mapping>
-    <filter-name>guiceFilter</filter-name>
-    <url-pattern>/*</url-pattern>
-  </filter-mapping>
-</web-app>
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore
deleted file mode 100644
index 4d29575..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
-# dependencies
-/node_modules
-/.pnp
-.pnp.js
-
-# testing
-/coverage
-
-# production
-/build
-
-# misc
-.DS_Store
-.env.local
-.env.development.local
-.env.test.local
-.env.production.local
-
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE
deleted file mode 100644
index e6a896f..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE
+++ /dev/null
@@ -1,17279 +0,0 @@
-
---------------------------------------------------------------------------------
-
-THE FOLLOWING SETS FORTH ATTRIBUTION NOTICES FOR THIRD PARTY SOFTWARE THAT MAY BE CONTAINED IN PORTIONS OF THE OZONE RECON PRODUCT.
-
------
-
-The following software may be included in this product: @ant-design/create-react-context, create-react-context. A copy of the source code may be downloaded from https://github.com/ant-design/create-react-context (@ant-design/create-react-context), https://github.com/thejameskyle/create-react-context (create-react-context). This software contains the following license and notice below:
-
-Copyright (c) 2017-present James Kyle <me@thejameskyle.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @babel/code-frame, @babel/helper-annotate-as-pure, @babel/helper-get-function-arity, @babel/helper-member-expression-to-functions, @babel/helper-module-imports, @babel/helper-optimise-call-expression, @babel/helper-plugin-utils, @babel/highlight, @babel/preset-react. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-code-frame (@babel/code-frame), https://github.com/babel/babel/tree/master/packages/babel-helper-annotate-as-pure (@babel/helper-annotate-as-pure), https://github.com/babel/babel/tree/master/packages/babel-helper-get-function-arity (@babel/helper-get-function-arity), https://github.com/babel/babel/tree/master/packages/babel-helper-member-expression-to-functions (@babel/helper-member-expression-to-functions), https://github.com/babel/babel/tree/master/packages/babel-helper-module-imports (@babel/helper-module-imports), https://github.com/babel/babel/tree/master/packages/babel-helper-optimise-call-expression (@babel/helper-optimise-call-expression), https://github.com/babel/babel/tree/master/packages/babel-helper-plugin-utils (@babel/helper-plugin-utils), https://github.com/babel/babel/tree/master/packages/babel-highlight (@babel/highlight), https://github.com/babel/babel/tree/master/packages/babel-preset-react (@babel/preset-react). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014-2018 Sebastian McKenzie <sebmck@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: @babel/core, @babel/generator, @babel/helper-builder-react-jsx, @babel/helper-call-delegate, @babel/helper-create-class-features-plugin, @babel/helper-define-map, @babel/helper-hoist-variables, @babel/helper-module-transforms, @babel/helper-regex, @babel/helper-replace-supers, @babel/helper-split-export-declaration, @babel/helpers, @babel/plugin-proposal-class-properties, @babel/plugin-proposal-decorators, @babel/plugin-proposal-object-rest-spread, @babel/plugin-proposal-unicode-property-regex, @babel/plugin-syntax-typescript, @babel/plugin-transform-async-to-generator, @babel/plugin-transform-block-scoping, @babel/plugin-transform-classes, @babel/plugin-transform-destructuring, @babel/plugin-transform-dotall-regex, @babel/plugin-transform-flow-strip-types, @babel/plugin-transform-for-of, @babel/plugin-transform-function-name, @babel/plugin-transform-modules-commonjs, @babel/plugin-transform-modules-systemjs, @babel/plugin-transform-named-capturing-groups-regex, @babel/plugin-transform-new-target, @babel/plugin-transform-parameters, @babel/plugin-transform-react-jsx, @babel/plugin-transform-regenerator, @babel/plugin-transform-runtime, @babel/plugin-transform-typescript, @babel/plugin-transform-unicode-regex, @babel/preset-env, @babel/preset-typescript, @babel/runtime, @babel/template, @babel/traverse, @babel/types. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-core (@babel/core), https://github.com/babel/babel/tree/master/packages/babel-generator (@babel/generator), https://github.com/babel/babel/tree/master/packages/babel-helper-builder-react-jsx (@babel/helper-builder-react-jsx), https://github.com/babel/babel/tree/master/packages/babel-helper-call-delegate (@babel/helper-call-delegate), https://github.com/babel/babel/tree/master/packages/babel-helper-create-class-features-plugin (@babel/helper-create-class-features-plugin), https://github.com/babel/babel/tree/master/packages/babel-helper-define-map (@babel/helper-define-map), https://github.com/babel/babel/tree/master/packages/babel-helper-hoist-variables (@babel/helper-hoist-variables), https://github.com/babel/babel/tree/master/packages/babel-helper-module-transforms (@babel/helper-module-transforms), https://github.com/babel/babel/tree/master/packages/babel-helper-regex (@babel/helper-regex), https://github.com/babel/babel/tree/master/packages/babel-helper-replace-supers (@babel/helper-replace-supers), https://github.com/babel/babel/tree/master/packages/babel-helper-split-export-declaration (@babel/helper-split-export-declaration), https://github.com/babel/babel/tree/master/packages/babel-helpers (@babel/helpers), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-class-properties (@babel/plugin-proposal-class-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-decorators (@babel/plugin-proposal-decorators), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-object-rest-spread (@babel/plugin-proposal-object-rest-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-unicode-property-regex (@babel/plugin-proposal-unicode-property-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-typescript (@babel/plugin-syntax-typescript), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-async-to-generator (@babel/plugin-transform-async-to-generator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-block-scoping (@babel/plugin-transform-block-scoping), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-classes (@babel/plugin-transform-classes), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-destructuring (@babel/plugin-transform-destructuring), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-dotall-regex (@babel/plugin-transform-dotall-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-flow-strip-types (@babel/plugin-transform-flow-strip-types), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-for-of (@babel/plugin-transform-for-of), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-function-name (@babel/plugin-transform-function-name), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-commonjs (@babel/plugin-transform-modules-commonjs), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-systemjs (@babel/plugin-transform-modules-systemjs), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-named-capturing-groups-regex (@babel/plugin-transform-named-capturing-groups-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-new-target (@babel/plugin-transform-new-target), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-parameters (@babel/plugin-transform-parameters), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx (@babel/plugin-transform-react-jsx), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-regenerator (@babel/plugin-transform-regenerator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-runtime (@babel/plugin-transform-runtime), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-typescript (@babel/plugin-transform-typescript), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-unicode-regex (@babel/plugin-transform-unicode-regex), https://github.com/babel/babel/tree/master/packages/babel-preset-env (@babel/preset-env), https://github.com/babel/babel/tree/master/packages/babel-preset-typescript (@babel/preset-typescript), https://github.com/babel/babel/tree/master/packages/babel-runtime (@babel/runtime), https://github.com/babel/babel/tree/master/packages/babel-template (@babel/template), https://github.com/babel/babel/tree/master/packages/babel-traverse (@babel/traverse), https://github.com/babel/babel/tree/master/packages/babel-types (@babel/types). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014-present Sebastian McKenzie and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: @babel/helper-builder-binary-assignment-operator-visitor, @babel/helper-explode-assignable-expression, @babel/helper-function-name, @babel/helper-remap-async-to-generator, @babel/helper-simple-access, @babel/helper-wrap-function, @babel/plugin-proposal-async-generator-functions, @babel/plugin-proposal-json-strings, @babel/plugin-proposal-optional-catch-binding, @babel/plugin-syntax-async-generators, @babel/plugin-syntax-decorators, @babel/plugin-syntax-dynamic-import, @babel/plugin-syntax-flow, @babel/plugin-syntax-json-strings, @babel/plugin-syntax-jsx, @babel/plugin-syntax-object-rest-spread, @babel/plugin-syntax-optional-catch-binding, @babel/plugin-transform-arrow-functions, @babel/plugin-transform-block-scoped-functions, @babel/plugin-transform-computed-properties, @babel/plugin-transform-duplicate-keys, @babel/plugin-transform-exponentiation-operator, @babel/plugin-transform-literals, @babel/plugin-transform-member-expression-literals, @babel/plugin-transform-modules-amd, @babel/plugin-transform-modules-umd, @babel/plugin-transform-object-super, @babel/plugin-transform-property-literals, @babel/plugin-transform-react-constant-elements, @babel/plugin-transform-react-display-name, @babel/plugin-transform-react-jsx-self, @babel/plugin-transform-react-jsx-source, @babel/plugin-transform-reserved-words, @babel/plugin-transform-shorthand-properties, @babel/plugin-transform-spread, @babel/plugin-transform-sticky-regex, @babel/plugin-transform-template-literals, @babel/plugin-transform-typeof-symbol. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-helper-builder-binary-assignment-operator-visitor (@babel/helper-builder-binary-assignment-operator-visitor), https://github.com/babel/babel/tree/master/packages/babel-helper-explode-assignable-expression (@babel/helper-explode-assignable-expression), https://github.com/babel/babel/tree/master/packages/babel-helper-function-name (@babel/helper-function-name), https://github.com/babel/babel/tree/master/packages/babel-helper-remap-async-to-generator (@babel/helper-remap-async-to-generator), https://github.com/babel/babel/tree/master/packages/babel-helper-simple-access (@babel/helper-simple-access), https://github.com/babel/babel/tree/master/packages/babel-helper-wrap-function (@babel/helper-wrap-function), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-async-generator-functions (@babel/plugin-proposal-async-generator-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-json-strings (@babel/plugin-proposal-json-strings), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-optional-catch-binding (@babel/plugin-proposal-optional-catch-binding), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-async-generators (@babel/plugin-syntax-async-generators), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-decorators (@babel/plugin-syntax-decorators), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-dynamic-import (@babel/plugin-syntax-dynamic-import), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-flow (@babel/plugin-syntax-flow), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-json-strings (@babel/plugin-syntax-json-strings), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-jsx (@babel/plugin-syntax-jsx), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-object-rest-spread (@babel/plugin-syntax-object-rest-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-catch-binding (@babel/plugin-syntax-optional-catch-binding), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-arrow-functions (@babel/plugin-transform-arrow-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-block-scoped-functions (@babel/plugin-transform-block-scoped-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-computed-properties (@babel/plugin-transform-computed-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-duplicate-keys (@babel/plugin-transform-duplicate-keys), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-exponentiation-operator (@babel/plugin-transform-exponentiation-operator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-literals (@babel/plugin-transform-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-member-expression-literals (@babel/plugin-transform-member-expression-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-amd (@babel/plugin-transform-modules-amd), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-umd (@babel/plugin-transform-modules-umd), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-object-super (@babel/plugin-transform-object-super), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-property-literals (@babel/plugin-transform-property-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-constant-elements (@babel/plugin-transform-react-constant-elements), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-display-name (@babel/plugin-transform-react-display-name), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx-self (@babel/plugin-transform-react-jsx-self), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx-source (@babel/plugin-transform-react-jsx-source), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-reserved-words (@babel/plugin-transform-reserved-words), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-shorthand-properties (@babel/plugin-transform-shorthand-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-spread (@babel/plugin-transform-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-sticky-regex (@babel/plugin-transform-sticky-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-template-literals (@babel/plugin-transform-template-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-typeof-symbol (@babel/plugin-transform-typeof-symbol). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014-2018 Sebastian McKenzie and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: @babel/parser, babylon. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-parser (@babel/parser), https://github.com/babel/babylon (babylon). This software contains the following license and notice below:
-
-Copyright (C) 2012-2014 by various contributors (see AUTHORS)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: @cnakazawa/watch, aws-sign2, forever-agent, oauth-sign, request, tunnel-agent. A copy of the source code may be downloaded from git://github.com/mikeal/watch.git (@cnakazawa/watch), https://github.com/mikeal/aws-sign (aws-sign2), https://github.com/mikeal/forever-agent (forever-agent), https://github.com/mikeal/oauth-sign (oauth-sign), https://github.com/request/request.git (request), https://github.com/mikeal/tunnel-agent (tunnel-agent). This software contains the following license and notice below:
-
-Apache License
-
-Version 2.0, January 2004
-
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of this License; and
-
-You must cause any modified files to carry prominent notices stating that You changed the files; and
-
-You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-
-If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: @csstools/convert-colors, css-blank-pseudo, css-has-pseudo, css-prefers-color-scheme, postcss-browser-comments, postcss-color-functional-notation, postcss-color-mod-function, postcss-dir-pseudo-class, postcss-double-position-gradients, postcss-env-function, postcss-focus-visible, postcss-focus-within, postcss-gap-properties, postcss-image-set-function, postcss-lab-function, postcss-logical, postcss-nesting, postcss-normalize, postcss-overflow-shorthand, postcss-place, postcss-preset-env, postcss-pseudo-class-any-link. A copy of the source code may be downloaded from https://github.com/jonathantneal/convert-colors.git (@csstools/convert-colors), https://github.com/csstools/css-blank-pseudo.git (css-blank-pseudo), https://github.com/csstools/css-has-pseudo.git (css-has-pseudo), https://github.com/csstools/css-prefers-color-scheme.git (css-prefers-color-scheme), https://github.com/csstools/postcss-browser-comments.git (postcss-browser-comments), https://github.com/jonathantneal/postcss-color-functional-notation.git (postcss-color-functional-notation), https://github.com/jonathantneal/postcss-color-mod-function.git (postcss-color-mod-function), https://github.com/jonathantneal/postcss-dir-pseudo-class.git (postcss-dir-pseudo-class), https://github.com/jonathantneal/postcss-double-position-gradients.git (postcss-double-position-gradients), https://github.com/jonathantneal/postcss-env-function.git (postcss-env-function), https://github.com/jonathantneal/postcss-focus-visible.git (postcss-focus-visible), https://github.com/jonathantneal/postcss-focus-within.git (postcss-focus-within), https://github.com/jonathantneal/postcss-gap-properties.git (postcss-gap-properties), https://github.com/jonathantneal/postcss-image-set-function.git (postcss-image-set-function), https://github.com/jonathantneal/postcss-lab-function.git (postcss-lab-function), https://github.com/jonathantneal/postcss-logical.git (postcss-logical), https://github.com/jonathantneal/postcss-nesting.git (postcss-nesting), https://github.com/csstools/postcss-normalize.git (postcss-normalize), https://github.com/jonathantneal/postcss-overflow-shorthand.git (postcss-overflow-shorthand), https://github.com/jonathantneal/postcss-place.git (postcss-place), https://github.com/csstools/postcss-preset-env.git (postcss-preset-env), https://github.com/jonathantneal/postcss-pseudo-class-any-link.git (postcss-pseudo-class-any-link). This software contains the following license and notice below:
-
-# CC0 1.0 Universal
-
-## Statement of Purpose
-
-The laws of most jurisdictions throughout the world automatically confer
-exclusive Copyright and Related Rights (defined below) upon the creator and
-subsequent owner(s) (each and all, an “owner”) of an original work of
-authorship and/or a database (each, a “Work”).
-
-Certain owners wish to permanently relinquish those rights to a Work for the
-purpose of contributing to a commons of creative, cultural and scientific works
-(“Commons”) that the public can reliably and without fear of later claims of
-infringement build upon, modify, incorporate in other works, reuse and
-redistribute as freely as possible in any form whatsoever and for any purposes,
-including without limitation commercial purposes. These owners may contribute
-to the Commons to promote the ideal of a free culture and the further
-production of creative, cultural and scientific works, or to gain reputation or
-greater distribution for their Work in part through the use and efforts of
-others.
-
-For these and/or other purposes and motivations, and without any expectation of
-additional consideration or compensation, the person associating CC0 with a
-Work (the “Affirmer”), to the extent that he or she is an owner of Copyright
-and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and
-publicly distribute the Work under its terms, with knowledge of his or her
-Copyright and Related Rights in the Work and the meaning and intended legal
-effect of CC0 on those rights.
-
-1. Copyright and Related Rights. A Work made available under CC0 may be
-   protected by copyright and related or neighboring rights (“Copyright and
-   Related Rights”). Copyright and Related Rights include, but are not limited
-   to, the following:
-   1. the right to reproduce, adapt, distribute, perform, display, communicate,
-      and translate a Work;
-   2. moral rights retained by the original author(s) and/or performer(s);
-   3. publicity and privacy rights pertaining to a person’s image or likeness
-      depicted in a Work;
-   4. rights protecting against unfair competition in regards to a Work,
-      subject to the limitations in paragraph 4(i), below;
-   5. rights protecting the extraction, dissemination, use and reuse of data in
-      a Work;
-   6. database rights (such as those arising under Directive 96/9/EC of the
-      European Parliament and of the Council of 11 March 1996 on the legal
-      protection of databases, and under any national implementation thereof,
-      including any amended or successor version of such directive); and
-   7. other similar, equivalent or corresponding rights throughout the world
-      based on applicable law or treaty, and any national implementations
-      thereof.
-
-2. Waiver. To the greatest extent permitted by, but not in contravention of,
-   applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and
-   unconditionally waives, abandons, and surrenders all of Affirmer’s Copyright
-   and Related Rights and associated claims and causes of action, whether now
-   known or unknown (including existing as well as future claims and causes of
-   action), in the Work (i) in all territories worldwide, (ii) for the maximum
-   duration provided by applicable law or treaty (including future time
-   extensions), (iii) in any current or future medium and for any number of
-   copies, and (iv) for any purpose whatsoever, including without limitation
-   commercial, advertising or promotional purposes (the “Waiver”). Affirmer
-   makes the Waiver for the benefit of each member of the public at large and
-   to the detriment of Affirmer’s heirs and successors, fully intending that
-   such Waiver shall not be subject to revocation, rescission, cancellation,
-   termination, or any other legal or equitable action to disrupt the quiet
-   enjoyment of the Work by the public as contemplated by Affirmer’s express
-   Statement of Purpose.
-
-3. Public License Fallback. Should any part of the Waiver for any reason be
-   judged legally invalid or ineffective under applicable law, then the Waiver
-   shall be preserved to the maximum extent permitted taking into account
-   Affirmer’s express Statement of Purpose. In addition, to the extent the
-   Waiver is so judged Affirmer hereby grants to each affected person a
-   royalty-free, non transferable, non sublicensable, non exclusive,
-   irrevocable and unconditional license to exercise Affirmer’s Copyright and
-   Related Rights in the Work (i) in all territories worldwide, (ii) for the
-   maximum duration provided by applicable law or treaty (including future time
-   extensions), (iii) in any current or future medium and for any number of
-   copies, and (iv) for any purpose whatsoever, including without limitation
-   commercial, advertising or promotional purposes (the “License”). The License
-   shall be deemed effective as of the date CC0 was applied by Affirmer to the
-   Work. Should any part of the License for any reason be judged legally
-   invalid or ineffective under applicable law, such partial invalidity or
-   ineffectiveness shall not invalidate the remainder of the License, and in
-   such case Affirmer hereby affirms that he or she will not (i) exercise any
-   of his or her remaining Copyright and Related Rights in the Work or (ii)
-   assert any associated claims and causes of action with respect to the Work,
-   in either case contrary to Affirmer’s express Statement of Purpose.
-
-4. Limitations and Disclaimers.
-   1. No trademark or patent rights held by Affirmer are waived, abandoned,
-      surrendered, licensed or otherwise affected by this document.
-   2. Affirmer offers the Work as-is and makes no representations or warranties
-      of any kind concerning the Work, express, implied, statutory or
-      otherwise, including without limitation warranties of title,
-      merchantability, fitness for a particular purpose, non infringement, or
-      the absence of latent or other defects, accuracy, or the present or
-      absence of errors, whether or not discoverable, all to the greatest
-      extent permissible under applicable law.
-   3. Affirmer disclaims responsibility for clearing rights of other persons
-      that may apply to the Work or any use thereof, including without
-      limitation any person’s Copyright and Related Rights in the Work.
-      Further, Affirmer disclaims responsibility for obtaining any necessary
-      consents, permissions or other rights required for any use of the Work.
-   4. Affirmer understands and acknowledges that Creative Commons is not a
-      party to this document and has no duty or obligation with respect to this
-      CC0 or use of the Work.
-
-For more information, please see
-http://creativecommons.org/publicdomain/zero/1.0/.
-
------
-
-The following software may be included in this product: @csstools/normalize.css. A copy of the source code may be downloaded from https://github.com/csstools/normalize.css.git. This software contains the following license and notice below:
-
-# The MIT License (MIT)
-
-Copyright © Jonathan Neal and Nicolas Gallagher
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @jest/console, @jest/core, @jest/environment, @jest/fake-timers, @jest/reporters, @jest/source-map, @jest/test-result, @jest/test-sequencer, @jest/transform, @jest/types, babel-jest, babel-plugin-jest-hoist, babel-preset-jest, diff-sequences, expect, jest, jest-changed-files, jest-cli, jest-config, jest-diff, jest-docblock, jest-each, jest-environment-jsdom, jest-environment-node, jest-get-type, jest-haste-map, jest-jasmine2, jest-leak-detector, jest-matcher-utils, jest-message-util, jest-mock, jest-regex-util, jest-resolve, jest-resolve-dependencies, jest-runner, jest-runtime, jest-serializer, jest-snapshot, jest-util, jest-validate, jest-watcher, jest-worker, pretty-format. A copy of the source code may be downloaded from https://github.com/facebook/jest.git (@jest/console), https://github.com/facebook/jest (@jest/core), https://github.com/facebook/jest.git (@jest/environment), https://github.com/facebook/jest.git (@jest/fake-timers), https://github.com/facebook/jest (@jest/reporters), https://github.com/facebook/jest.git (@jest/source-map), https://github.com/facebook/jest.git (@jest/test-result), https://github.com/facebook/jest.git (@jest/test-sequencer), https://github.com/facebook/jest.git (@jest/transform), https://github.com/facebook/jest.git (@jest/types), https://github.com/facebook/jest.git (babel-jest), https://github.com/facebook/jest.git (babel-plugin-jest-hoist), https://github.com/facebook/jest.git (babel-preset-jest), https://github.com/facebook/jest.git (diff-sequences), https://github.com/facebook/jest.git (expect), https://github.com/facebook/jest (jest), https://github.com/facebook/jest.git (jest-changed-files), https://github.com/facebook/jest (jest-cli), https://github.com/facebook/jest.git (jest-config), https://github.com/facebook/jest.git (jest-diff), https://github.com/facebook/jest.git (jest-docblock), https://github.com/facebook/jest.git (jest-each), https://github.com/facebook/jest.git (jest-environment-jsdom), https://github.com/facebook/jest.git (jest-environment-node), https://github.com/facebook/jest.git (jest-get-type), https://github.com/facebook/jest.git (jest-haste-map), https://github.com/facebook/jest.git (jest-jasmine2), https://github.com/facebook/jest.git (jest-leak-detector), https://github.com/facebook/jest.git (jest-matcher-utils), https://github.com/facebook/jest.git (jest-message-util), https://github.com/facebook/jest.git (jest-mock), https://github.com/facebook/jest.git (jest-regex-util), https://github.com/facebook/jest.git (jest-resolve), https://github.com/facebook/jest.git (jest-resolve-dependencies), https://github.com/facebook/jest.git (jest-runner), https://github.com/facebook/jest.git (jest-runtime), https://github.com/facebook/jest.git (jest-serializer), https://github.com/facebook/jest.git (jest-snapshot), https://github.com/facebook/jest.git (jest-util), https://github.com/facebook/jest.git (jest-validate), https://github.com/facebook/jest (jest-watcher), https://github.com/facebook/jest.git (jest-worker), https://github.com/facebook/jest.git (pretty-format). This software contains the following license and notice below:
-
-MIT License
-
-For Jest software
-
-Copyright (c) 2014-present, Facebook, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @mrmlnc/readdir-enhanced. A copy of the source code may be downloaded from https://github.com/bigstickcarpet/readdir-enhanced.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 James Messinger
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-.
-
------
-
-The following software may be included in this product: @svgr/babel-plugin-add-jsx-attribute, @svgr/babel-plugin-remove-jsx-attribute, @svgr/babel-plugin-remove-jsx-empty-expression, @svgr/babel-plugin-replace-jsx-attribute-value, @svgr/babel-plugin-svg-dynamic-title, @svgr/babel-plugin-svg-em-dimensions, @svgr/babel-plugin-transform-react-native-svg, @svgr/babel-plugin-transform-svg-component, @svgr/babel-preset, @svgr/core, @svgr/hast-util-to-babel-ast, @svgr/plugin-jsx, @svgr/plugin-svgo, @svgr/webpack. A copy of the source code may be downloaded from https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-add-jsx-attribute (@svgr/babel-plugin-add-jsx-attribute), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-remove-jsx-attribute (@svgr/babel-plugin-remove-jsx-attribute), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-remove-jsx-empty-expression (@svgr/babel-plugin-remove-jsx-empty-expression), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-replace-jsx-attribute-value (@svgr/babel-plugin-replace-jsx-attribute-value), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-svg-dynamic-title (@svgr/babel-plugin-svg-dynamic-title), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-svg-em-dimensions (@svgr/babel-plugin-svg-em-dimensions), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-transform-react-native-svg (@svgr/babel-plugin-transform-react-native-svg), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-transform-svg-component (@svgr/babel-plugin-transform-svg-component), https://github.com/smooth-code/svgr/tree/master/packages/babel-preset (@svgr/babel-preset), https://github.com/smooth-code/svgr/tree/master/packages/core (@svgr/core), https://github.com/smooth-code/svgr/tree/master/packages/hast-util-to-babel-ast (@svgr/hast-util-to-babel-ast), https://github.com/smooth-code/svgr/tree/master/packages/plugin-jsx (@svgr/plugin-jsx), https://github.com/smooth-code/svgr/tree/master/packages/plugin-svgo (@svgr/plugin-svgo), git@github.com:smooth-code/svgr.git (@svgr/webpack). This software contains the following license and notice below:
-
-Copyright 2017 Smooth Code
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: @types/babel__core, @types/babel__generator, @types/babel__template, @types/babel__traverse, @types/history, @types/hoist-non-react-statics, @types/istanbul-lib-coverage, @types/jest, @types/jest-diff, @types/node, @types/prop-types, @types/q, @types/react, @types/react-dom, @types/react-router, @types/react-router-dom, @types/react-slick, @types/stack-utils, @types/unist, @types/vfile, @types/vfile-message, @types/yargs. A copy of the source code may be downloaded from https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__core), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__generator), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__template), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__traverse), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/history), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/hoist-non-react-statics), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/istanbul-lib-coverage), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/jest), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/jest-diff), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/node), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/prop-types), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/q), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-dom), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-router), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-router-dom), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-slick), https://www.github.com/DefinitelyTyped/DefinitelyTyped.git (@types/stack-utils), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/unist), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/vfile), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/vfile-message), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/yargs). This software contains the following license and notice below:
-
-MIT License
-
-    Copyright (c) Microsoft Corporation. All rights reserved.
-
-    Permission is hereby granted, free of charge, to any person obtaining a copy
-    of this software and associated documentation files (the "Software"), to deal
-    in the Software without restriction, including without limitation the rights
-    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-    copies of the Software, and to permit persons to whom the Software is
-    furnished to do so, subject to the following conditions:
-
-    The above copyright notice and this permission notice shall be included in all
-    copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-    SOFTWARE
-
------
-
-The following software may be included in this product: @typescript-eslint/eslint-plugin. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2019 TypeScript ESLint and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @typescript-eslint/parser. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below:
-
-TypeScript ESLint Parser
-Copyright JS Foundation and other contributors, https://js.foundation
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: @typescript-eslint/typescript-estree. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below:
-
-TypeScript ESTree
-
-Originally extracted from:
-
-TypeScript ESLint Parser
-Copyright JS Foundation and other contributors, https://js.foundation
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: @webassemblyjs/ast, @webassemblyjs/helper-api-error, @webassemblyjs/helper-buffer, @webassemblyjs/helper-code-frame, @webassemblyjs/helper-fsm, @webassemblyjs/helper-module-context, @webassemblyjs/helper-wasm-bytecode, @webassemblyjs/helper-wasm-section, @webassemblyjs/ieee754, @webassemblyjs/utf8, @webassemblyjs/wasm-edit, @webassemblyjs/wasm-gen, @webassemblyjs/wasm-opt, @webassemblyjs/wasm-parser, @webassemblyjs/wast-parser, @webassemblyjs/wast-printer. A copy of the source code may be downloaded from https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/ast), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-buffer), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-code-frame), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-module-context), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-wasm-bytecode), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-wasm-section), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/utf8), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-edit), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-gen), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-opt), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-parser), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wast-parser), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wast-printer). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Sven Sauleau <sven@sauleau.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @webassemblyjs/floating-point-hex-parser. A copy of the source code may be downloaded from https://github.com/xtuc/webassemblyjs.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Mauro Bringolf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: @webassemblyjs/leb128. This software contains the following license and notice below:
-
-Copyright 2012 The Obvious Corporation.
-http://obvious.com/
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
--------------------------------------------------------------------------
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: @xtuc/ieee754. A copy of the source code may be downloaded from git://github.com/feross/ieee754.git. This software contains the following license and notice below:
-
-Copyright (c) 2008, Fair Oaks Labs, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
-   this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name of Fair Oaks Labs, Inc. nor the names of its contributors
-   may be used to endorse or promote products derived from this software
-   without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: @xtuc/long, spdx-correct, validate-npm-package-license. A copy of the source code may be downloaded from https://github.com/dcodeIO/long.js.git (@xtuc/long), https://github.com/jslicense/spdx-correct.js.git (spdx-correct), https://github.com/kemitchell/validate-npm-package-license.js.git (validate-npm-package-license). This software contains the following license and notice below:
-
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
------
-
-The following software may be included in this product: abab. A copy of the source code may be downloaded from git+https://github.com/jsdom/abab.git. This software contains the following license and notice below:
-
-Both the original source code and new contributions in this repository are released under the [W3C 3-clause BSD license](https://github.com/w3c/web-platform-tests/blob/master/LICENSE.md#w3c-3-clause-bsd-license).
-
-# W3C 3-clause BSD License
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-*    Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer.
-*    Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-*    Neither the name of the W3C nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: abbrev. A copy of the source code may be downloaded from http://github.com/isaacs/abbrev-js. This software contains the following license and notice below:
-
-This software is dual-licensed under the ISC and MIT licenses.
-You may use this software under EITHER of the following licenses.
-
-----------
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-----------
-
-Copyright Isaac Z. Schlueter and Contributors
-All rights reserved.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: accepts, mime-types. A copy of the source code may be downloaded from https://github.com/jshttp/accepts.git (accepts), https://github.com/jshttp/mime-types.git (mime-types). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: acorn, acorn-walk. A copy of the source code may be downloaded from https://github.com/acornjs/acorn.git (acorn), https://github.com/acornjs/acorn.git (acorn-walk). This software contains the following license and notice below:
-
-Copyright (C) 2012-2018 by various contributors (see AUTHORS)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: acorn-dynamic-import. A copy of the source code may be downloaded from https://github.com/kesne/acorn-dynamic-import. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016 Jordan Gensler
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: acorn-globals, is-promise. A copy of the source code may be downloaded from https://github.com/ForbesLindesay/acorn-globals.git (acorn-globals), https://github.com/then/is-promise.git (is-promise). This software contains the following license and notice below:
-
-Copyright (c) 2014 Forbes Lindesay
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: acorn-jsx. A copy of the source code may be downloaded from https://github.com/RReverser/acorn-jsx. This software contains the following license and notice below:
-
-Copyright (C) 2012-2017 by Ingvar Stepanyan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: add-dom-event-listener, async-validator, css-animation, dom-align, rc-align, rc-animate, rc-collapse, rc-dialog, rc-dropdown, rc-form, rc-input-number, rc-menu, rc-notification, rc-pagination, rc-progress, rc-rate, rc-steps, rc-switch, rc-tabs, rc-time-picker. A copy of the source code may be downloaded from git@github.com:yiminghe/add-dom-event-listener (add-dom-event-listener), git@github.com:yiminghe/async-validator.git (async-validator), git@github.com:yiminghe/css-animation.git (css-animation), git@github.com:yiminghe/dom-align.git (dom-align), git@github.com:react-component/align.git (rc-align), git@github.com:react-component/animate.git (rc-animate), git@github.com:react-component/collapse.git (rc-collapse), git@github.com:react-component/dialog.git (rc-dialog), git@github.com:react-component/dropdown.git (rc-dropdown), https://github.com/react-component/form.git (rc-form), git@github.com:react-component/input-number.git (rc-input-number), git@github.com:react-component/menu.git (rc-menu), git@github.com:react-component/notification.git (rc-notification), git@github.com:react-component/pagination.git (rc-pagination), git@github.com:react-component/progress.git (rc-progress), https://github.com/react-component/rate.git (rc-rate),  git+ssh://git@github.com/react-component/steps.git (rc-steps), git@github.com:react-component/switch.git (rc-switch), git@github.com:react-component/tabs.git (rc-tabs), git@github.com:react-component/time-picker.git (rc-time-picker). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-present yiminghe
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: address. A copy of the source code may be downloaded from git://github.com/node-modules/address.git. This software contains the following license and notice below:
-
-This software is licensed under the MIT License.
-
-Copyright (C) 2013 - 2014 fengmk2 <fengmk2@gmail.com>
-Copyright (C) 2015 - 2016 node-modules
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: ajv. A copy of the source code may be downloaded from https://github.com/epoberezkin/ajv.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2017 Evgeny Poberezkin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: ajv-errors, fast-deep-equal, json-schema-traverse. A copy of the source code may be downloaded from git+https://github.com/epoberezkin/ajv-errors.git (ajv-errors), git+https://github.com/epoberezkin/fast-deep-equal.git (fast-deep-equal), git+https://github.com/epoberezkin/json-schema-traverse.git (json-schema-traverse). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Evgeny Poberezkin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: ajv-keywords. A copy of the source code may be downloaded from git+https://github.com/epoberezkin/ajv-keywords.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Evgeny Poberezkin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: alphanum-sort, postcss-minify-font-values, postcss-value-parser. A copy of the source code may be downloaded from https://github.com/TrySound/alphanum-sort.git (alphanum-sort), https://github.com/cssnano/cssnano.git (postcss-minify-font-values), https://github.com/TrySound/postcss-value-parser.git (postcss-value-parser). This software contains the following license and notice below:
-
-Copyright (c) Bogdan Chadkin <trysound@yandex.ru>
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: ansi-colors. A copy of the source code may be downloaded from https://github.com/doowb/ansi-colors.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-present, Brian Woodward.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: ansi-escapes, ansi-regex, ansi-styles, binary-extensions, callsites, camelcase, chalk, decamelize, del, execa, find-up, get-stream, globals, globby, gzip-size, has-flag, import-fresh, import-local, internal-ip, invert-kv, is-generator-fn, is-root, is-svg, lcid, locate-path, make-dir, mem, mimic-fn, normalize-url, opn, os-locale, p-is-promise, p-limit, p-locate, p-map, p-try, parent-module, parse-json, path-type, pify, pkg-dir, pretty-bytes, read-pkg, read-pkg-up, resolve-from, slash, string-length, string-width, strip-ansi, supports-color. A copy of the source code may be downloaded from https://github.com/sindresorhus/ansi-escapes.git (ansi-escapes), https://github.com/chalk/ansi-regex.git (ansi-regex), https://github.com/chalk/ansi-styles.git (ansi-styles), https://github.com/sindresorhus/binary-extensions.git (binary-extensions), https://github.com/sindresorhus/callsites.git (callsites), https://github.com/sindresorhus/camelcase.git (camelcase), https://github.com/chalk/chalk.git (chalk), https://github.com/sindresorhus/decamelize.git (decamelize), https://github.com/sindresorhus/del.git (del), https://github.com/sindresorhus/execa.git (execa), https://github.com/sindresorhus/find-up.git (find-up), https://github.com/sindresorhus/get-stream.git (get-stream), https://github.com/sindresorhus/globals.git (globals), https://github.com/sindresorhus/globby.git (globby), https://github.com/sindresorhus/gzip-size.git (gzip-size), https://github.com/sindresorhus/has-flag.git (has-flag), https://github.com/sindresorhus/import-fresh.git (import-fresh), https://github.com/sindresorhus/import-local.git (import-local), https://github.com/sindresorhus/internal-ip.git (internal-ip), https://github.com/sindresorhus/invert-kv.git (invert-kv), https://github.com/sindresorhus/is-generator-fn.git (is-generator-fn), https://github.com/sindresorhus/is-root.git (is-root), https://github.com/sindresorhus/is-svg.git (is-svg), https://github.com/sindresorhus/lcid.git (lcid), https://github.com/sindresorhus/locate-path.git (locate-path), https://github.com/sindresorhus/make-dir.git (make-dir), https://github.com/sindresorhus/mem.git (mem), https://github.com/sindresorhus/mimic-fn.git (mimic-fn), https://github.com/sindresorhus/normalize-url.git (normalize-url), https://github.com/sindresorhus/opn.git (opn), https://github.com/sindresorhus/os-locale.git (os-locale), https://github.com/sindresorhus/p-is-promise.git (p-is-promise), https://github.com/sindresorhus/p-limit.git (p-limit), https://github.com/sindresorhus/p-locate.git (p-locate), https://github.com/sindresorhus/p-map.git (p-map), https://github.com/sindresorhus/p-try.git (p-try), https://github.com/sindresorhus/parent-module.git (parent-module), https://github.com/sindresorhus/parse-json.git (parse-json), https://github.com/sindresorhus/path-type.git (path-type), https://github.com/sindresorhus/pify.git (pify), https://github.com/sindresorhus/pkg-dir.git (pkg-dir), https://github.com/sindresorhus/pretty-bytes.git (pretty-bytes), https://github.com/sindresorhus/read-pkg.git (read-pkg), https://github.com/sindresorhus/read-pkg-up.git (read-pkg-up), https://github.com/sindresorhus/resolve-from.git (resolve-from), https://github.com/sindresorhus/slash.git (slash), https://github.com/sindresorhus/string-length.git (string-length), https://github.com/sindresorhus/string-width.git (string-width), https://github.com/chalk/strip-ansi.git (strip-ansi), https://github.com/chalk/supports-color.git (supports-color). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: ansi-html. A copy of the source code may be downloaded from git://github.com/Tjatse/ansi-html.git. This software contains the following license and notice below:
-
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
------
-
-The following software may be included in this product: ansi-regex, ansi-styles, array-union, array-uniq, arrify, caller-callsite, caller-path, callsites, camelcase, chalk, cli-cursor, code-point-at, decamelize, detect-newline, dot-prop, escape-string-regexp, figures, find-up, globby, has-ansi, import-cwd, import-fresh, import-from, ip-regex, is-absolute-url, is-binary-path, is-fullwidth-code-point, is-obj, is-path-in-cwd, is-path-inside, is-plain-obj, is-stream, is-wsl, leven, load-json-file, locate-path, npm-run-path, number-is-nan, object-assign, onetime, os-homedir, os-tmpdir, p-defer, p-each-series, p-finally, p-locate, p-reduce, p-try, parse-json, path-exists, path-is-absolute, path-key, path-type, pify, pkg-dir, pkg-up, read-pkg, read-pkg-up, resolve-cwd, resolve-from, restore-cursor, shebang-regex, string-width, strip-ansi, strip-bom, strip-eof, strip-json-comments, supports-color, trim-right, wrap-ansi. A copy of the source code may be downloaded from https://github.com/chalk/ansi-regex.git (ansi-regex), https://github.com/chalk/ansi-styles.git (ansi-styles), https://github.com/sindresorhus/array-union.git (array-union), https://github.com/sindresorhus/array-uniq.git (array-uniq), https://github.com/sindresorhus/arrify.git (arrify), https://github.com/sindresorhus/caller-callsite.git (caller-callsite), https://github.com/sindresorhus/caller-path.git (caller-path), https://github.com/sindresorhus/callsites.git (callsites), https://github.com/sindresorhus/camelcase.git (camelcase), https://github.com/chalk/chalk.git (chalk), https://github.com/sindresorhus/cli-cursor.git (cli-cursor), https://github.com/sindresorhus/code-point-at.git (code-point-at), https://github.com/sindresorhus/decamelize.git (decamelize), https://github.com/sindresorhus/detect-newline.git (detect-newline), https://github.com/sindresorhus/dot-prop.git (dot-prop), https://github.com/sindresorhus/escape-string-regexp.git (escape-string-regexp), https://github.com/sindresorhus/figures.git (figures), https://github.com/sindresorhus/find-up.git (find-up), https://github.com/sindresorhus/globby.git (globby), https://github.com/sindresorhus/has-ansi.git (has-ansi), https://github.com/sindresorhus/import-cwd.git (import-cwd), https://github.com/sindresorhus/import-fresh.git (import-fresh), https://github.com/sindresorhus/import-from.git (import-from), https://github.com/sindresorhus/ip-regex.git (ip-regex), https://github.com/sindresorhus/is-absolute-url.git (is-absolute-url), https://github.com/sindresorhus/is-binary-path.git (is-binary-path), https://github.com/sindresorhus/is-fullwidth-code-point.git (is-fullwidth-code-point), https://github.com/sindresorhus/is-obj.git (is-obj), https://github.com/sindresorhus/is-path-in-cwd.git (is-path-in-cwd), https://github.com/sindresorhus/is-path-inside.git (is-path-inside), https://github.com/sindresorhus/is-plain-obj.git (is-plain-obj), https://github.com/sindresorhus/is-stream.git (is-stream), https://github.com/sindresorhus/is-wsl.git (is-wsl), https://github.com/sindresorhus/leven.git (leven), https://github.com/sindresorhus/load-json-file.git (load-json-file), https://github.com/sindresorhus/locate-path.git (locate-path), https://github.com/sindresorhus/npm-run-path.git (npm-run-path), https://github.com/sindresorhus/number-is-nan.git (number-is-nan), https://github.com/sindresorhus/object-assign.git (object-assign), https://github.com/sindresorhus/onetime.git (onetime), https://github.com/sindresorhus/os-homedir.git (os-homedir), https://github.com/sindresorhus/os-tmpdir.git (os-tmpdir), https://github.com/sindresorhus/p-defer.git (p-defer), https://github.com/sindresorhus/p-each-series.git (p-each-series), https://github.com/sindresorhus/p-finally.git (p-finally), https://github.com/sindresorhus/p-locate.git (p-locate), https://github.com/sindresorhus/p-reduce.git (p-reduce), https://github.com/sindresorhus/p-try.git (p-try), https://github.com/sindresorhus/parse-json.git (parse-json), https://github.com/sindresorhus/path-exists.git (path-exists), https://github.com/sindresorhus/path-is-absolute.git (path-is-absolute), https://github.com/sindresorhus/path-key.git (path-key), https://github.com/sindresorhus/path-type.git (path-type), https://github.com/sindresorhus/pify.git (pify), https://github.com/sindresorhus/pkg-dir.git (pkg-dir), https://github.com/sindresorhus/pkg-up.git (pkg-up), https://github.com/sindresorhus/read-pkg.git (read-pkg), https://github.com/sindresorhus/read-pkg-up.git (read-pkg-up), https://github.com/sindresorhus/resolve-cwd.git (resolve-cwd), https://github.com/sindresorhus/resolve-from.git (resolve-from), https://github.com/sindresorhus/restore-cursor.git (restore-cursor), https://github.com/sindresorhus/shebang-regex.git (shebang-regex), https://github.com/sindresorhus/string-width.git (string-width), https://github.com/chalk/strip-ansi.git (strip-ansi), https://github.com/sindresorhus/strip-bom.git (strip-bom), https://github.com/sindresorhus/strip-eof.git (strip-eof), https://github.com/sindresorhus/strip-json-comments.git (strip-json-comments), https://github.com/chalk/supports-color.git (supports-color), https://github.com/sindresorhus/trim-right.git (trim-right), https://github.com/chalk/wrap-ansi.git (wrap-ansi). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: antd. A copy of the source code may be downloaded from https://github.com/ant-design/ant-design. This software contains the following license and notice below:
-
-MIT LICENSE
-
-Copyright (c) 2015-present Ant UED, https://xtech.antfin.com/
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: anymatch. A copy of the source code may be downloaded from https://github.com/micromatch/anymatch. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) 2014 Elan Shanker
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: append-transform, find-cache-dir, node-modules-regexp, normalize-range. A copy of the source code may be downloaded from https://github.com/istanbuljs/append-transform.git (append-transform), https://github.com/jamestalmage/find-cache-dir.git (find-cache-dir), https://github.com/jamestalmage/node-modules-regexp.git (node-modules-regexp), https://github.com/jamestalmage/normalize-range.git (normalize-range). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) James Talmage <james@talmage.io> (github.com/jamestalmage)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: aproba, wide-align. A copy of the source code may be downloaded from https://github.com/iarna/aproba (aproba), https://github.com/iarna/wide-align (wide-align). This software contains the following license and notice below:
-
-Copyright (c) 2015, Rebecca Turner <me@re-becca.org>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: are-we-there-yet. A copy of the source code may be downloaded from https://github.com/iarna/are-we-there-yet.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Rebecca Turner
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: argparse. A copy of the source code may be downloaded from https://github.com/nodeca/argparse.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (C) 2012 by Vitaly Puzrin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: aria-query, axobject-query. A copy of the source code may be downloaded from git+https://github.com/A11yance/aria-query.git (aria-query), git+https://github.com/A11yance/axobject-query.git (axobject-query). This software contains the following license and notice below:
-
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction,
-and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by
-the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all
-other entities that control, are controlled by, or are under common
-control with that entity. For the purposes of this definition,
-"control" means (i) the power, direct or indirect, to cause the
-direction or management of such entity, whether by contract or
-otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity
-exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications,
-including but not limited to software source code, documentation
-source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical
-transformation or translation of a Source form, including but
-not limited to compiled object code, generated documentation,
-and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or
-Object form, made available under the License, as indicated by a
-copyright notice that is included in or attached to the work
-(an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object
-form, that is based on (or derived from) the Work and for which the
-editorial revisions, annotations, elaborations, or other modifications
-represent, as a whole, an original work of authorship. For the purposes
-of this License, Derivative Works shall not include works that remain
-separable from, or merely link (or bind by name) to the interfaces of,
-the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including
-the original version of the Work and any modifications or additions
-to that Work or Derivative Works thereof, that is intentionally
-submitted to Licensor for inclusion in the Work by the copyright owner
-or by an individual or Legal Entity authorized to submit on behalf of
-the copyright owner. For the purposes of this definition, "submitted"
-means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems,
-and issue tracking systems that are managed by, or on behalf of, the
-Licensor for the purpose of discussing and improving the Work, but
-excluding communication that is conspicuously marked or otherwise
-designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity
-on behalf of whom a Contribution has been received by Licensor and
-subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-this License, each Contributor hereby grants to You a perpetual,
-worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the
-Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-this License, each Contributor hereby grants to You a perpetual,
-worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-(except as stated in this section) patent license to make, have made,
-use, offer to sell, sell, import, and otherwise transfer the Work,
-where such license applies only to those patent claims licensable
-by such Contributor that are necessarily infringed by their
-Contribution(s) alone or by combination of their Contribution(s)
-with the Work to which such Contribution(s) was submitted. If You
-institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work
-or a Contribution incorporated within the Work constitutes direct
-or contributory patent infringement, then any patent licenses
-granted to You under this License for that Work shall terminate
-as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-Work or Derivative Works thereof in any medium, with or without
-modifications, and in Source or Object form, provided that You
-meet the following conditions:
-
-(a) You must give any other recipients of the Work or
-Derivative Works a copy of this License; and
-
-(b) You must cause any modified files to carry prominent notices
-stating that You changed the files; and
-
-(c) You must retain, in the Source form of any Derivative Works
-that You distribute, all copyright, patent, trademark, and
-attribution notices from the Source form of the Work,
-excluding those notices that do not pertain to any part of
-the Derivative Works; and
-
-(d) If the Work includes a "NOTICE" text file as part of its
-distribution, then any Derivative Works that You distribute must
-include a readable copy of the attribution notices contained
-within such NOTICE file, excluding those notices that do not
-pertain to any part of the Derivative Works, in at least one
-of the following places: within a NOTICE text file distributed
-as part of the Derivative Works; within the Source form or
-documentation, if provided along with the Derivative Works; or,
-within a display generated by the Derivative Works, if and
-wherever such third-party notices normally appear. The contents
-of the NOTICE file are for informational purposes only and
-do not modify the License. You may add Your own attribution
-notices within Derivative Works that You distribute, alongside
-or as an addendum to the NOTICE text from the Work, provided
-that such additional attribution notices cannot be construed
-as modifying the License.
-
-You may add Your own copyright statement to Your modifications and
-may provide additional or different license terms and conditions
-for use, reproduction, or distribution of Your modifications, or
-for any such Derivative Works as a whole, provided Your use,
-reproduction, and distribution of the Work otherwise complies with
-the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-any Contribution intentionally submitted for inclusion in the Work
-by You to the Licensor shall be under the terms and conditions of
-this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify
-the terms of any separate license agreement you may have executed
-with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-names, trademarks, service marks, or product names of the Licensor,
-except as required for reasonable and customary use in describing the
-origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-agreed to in writing, Licensor provides the Work (and each
-Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-implied, including, without limitation, any warranties or conditions
-of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-PARTICULAR PURPOSE. You are solely responsible for determining the
-appropriateness of using or redistributing the Work and assume any
-risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-whether in tort (including negligence), contract, or otherwise,
-unless required by applicable law (such as deliberate and grossly
-negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special,
-incidental, or consequential damages of any character arising as a
-result of this License or out of the use or inability to use the
-Work (including but not limited to damages for loss of goodwill,
-work stoppage, computer failure or malfunction, or any and all
-other commercial damages or losses), even if such Contributor
-has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-the Work or Derivative Works thereof, You may choose to offer,
-and charge a fee for, acceptance of support, warranty, indemnity,
-or other liability obligations and/or rights consistent with this
-License. However, in accepting such obligations, You may act only
-on Your own behalf and on Your sole responsibility, not on behalf
-of any other Contributor, and only if You agree to indemnify,
-defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason
-of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-To apply the Apache License to your work, attach the following
-boilerplate notice, with the fields enclosed by brackets "{}"
-replaced with your own identifying information. (Don't include
-the brackets!)  The text should be enclosed in the appropriate
-comment syntax for the file format. We also recommend that a
-file or class name and description of purpose be included on the
-same "printed page" as the copyright notice for easier
-identification within third-party archives.
-
-Copyright {yyyy} {name of copyright owner}
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
------
-
-The following software may be included in this product: arr-diff, fill-range, for-in, has-value, has-values, kind-of, normalize-path, set-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-diff.git (arr-diff), https://github.com/jonschlinkert/fill-range.git (fill-range), https://github.com/jonschlinkert/for-in.git (for-in), https://github.com/jonschlinkert/has-value.git (has-value), https://github.com/jonschlinkert/has-values.git (has-values), https://github.com/jonschlinkert/kind-of.git (kind-of), https://github.com/jonschlinkert/normalize-path.git (normalize-path), https://github.com/jonschlinkert/set-value.git (set-value). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: arr-flatten, clone-deep, is-glob, is-plain-object, kind-of. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-flatten.git (arr-flatten), https://github.com/jonschlinkert/clone-deep.git (clone-deep), https://github.com/micromatch/is-glob.git (is-glob), https://github.com/jonschlinkert/is-plain-object.git (is-plain-object), https://github.com/jonschlinkert/kind-of.git (kind-of). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: arr-union, get-value, has-value, has-values, is-directory, is-glob, isobject, object.pick, repeat-string. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-union.git (arr-union), https://github.com/jonschlinkert/get-value.git (get-value), https://github.com/jonschlinkert/has-value.git (has-value), https://github.com/jonschlinkert/has-values.git (has-values), https://github.com/jonschlinkert/is-directory.git (is-directory), https://github.com/jonschlinkert/is-glob.git (is-glob), https://github.com/jonschlinkert/isobject.git (isobject), https://github.com/jonschlinkert/object.pick.git (object.pick), https://github.com/jonschlinkert/repeat-string.git (repeat-string). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2016, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: array-equal, destroy, ee-first, mime-db. A copy of the source code may be downloaded from https://github.com/component/array-equal.git (array-equal), https://github.com/stream-utils/destroy.git (destroy), https://github.com/jonathanong/ee-first.git (ee-first), https://github.com/jshttp/mime-db.git (mime-db). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jonathan Ong me@jongleberry.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: array-flatten, camel-case, lower-case, no-case, param-case, path-to-regexp, upper-case. A copy of the source code may be downloaded from git://github.com/blakeembrey/array-flatten.git (array-flatten), git://github.com/blakeembrey/camel-case.git (camel-case), git://github.com/blakeembrey/lower-case.git (lower-case), git://github.com/blakeembrey/no-case.git (no-case), git://github.com/blakeembrey/param-case.git (param-case), https://github.com/component/path-to-regexp.git (path-to-regexp), git://github.com/blakeembrey/upper-case.git (upper-case). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Blake Embrey (hello@blakeembrey.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: array-includes, define-properties, es-abstract. A copy of the source code may be downloaded from git://github.com/ljharb/array-includes.git (array-includes), git://github.com/ljharb/define-properties.git (define-properties), git://github.com/ljharb/es-abstract.git (es-abstract). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (C) 2015 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: array-map, array-reduce, concat-map, deep-equal, fast-json-stable-stringify, is-typedarray, json-stable-stringify, json-stable-stringify-without-jsonify, minimist, path-browserify, resolve, safe-regex, text-table, tty-browserify, vm-browserify, wordwrap. A copy of the source code may be downloaded from git://github.com/substack/array-map.git (array-map), git://github.com/substack/array-reduce.git (array-reduce), git://github.com/substack/node-concat-map.git (concat-map), http://github.com/substack/node-deep-equal.git (deep-equal), git://github.com/epoberezkin/fast-json-stable-stringify.git (fast-json-stable-stringify), git://github.com/hughsk/is-typedarray.git (is-typedarray), git://github.com/substack/json-stable-stringify.git (json-stable-stringify), git://github.com/samn/json-stable-stringify.git (json-stable-stringify-without-jsonify), git://github.com/substack/minimist.git (minimist), git://github.com/substack/path-browserify.git (path-browserify), git://github.com/substack/node-resolve.git (resolve), git://github.com/substack/safe-regex.git (safe-regex), git://github.com/substack/text-table.git (text-table), git://github.com/substack/tty-browserify.git (tty-browserify), http://github.com/substack/vm-browserify.git (vm-browserify), git://github.com/substack/node-wordwrap.git (wordwrap). This software contains the following license and notice below:
-
-This software is released under the MIT license:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: array-unique, is-extglob, is-number. A copy of the source code may be downloaded from https://github.com/jonschlinkert/array-unique.git (array-unique), https://github.com/jonschlinkert/is-extglob.git (is-extglob), https://github.com/jonschlinkert/is-number.git (is-number). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2016, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: asap. A copy of the source code may be downloaded from https://github.com/kriskowal/asap.git. This software contains the following license and notice below:
-
-Copyright 2009–2014 Contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: asn1. A copy of the source code may be downloaded from git://github.com/joyent/node-asn1.git. This software contains the following license and notice below:
-
-Copyright (c) 2011 Mark Cavage, All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE
-
------
-
-The following software may be included in this product: assert, util. A copy of the source code may be downloaded from git://github.com/defunctzombie/commonjs-assert.git (assert), git://github.com/defunctzombie/node-util (util). This software contains the following license and notice below:
-
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: assign-symbols, contains-path, define-property, is-accessor-descriptor, is-data-descriptor, is-extendable, lazy-cache, pascalcase, shallow-clone. A copy of the source code may be downloaded from https://github.com/jonschlinkert/assign-symbols.git (assign-symbols), https://github.com/jonschlinkert/contains-path.git (contains-path), https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/is-accessor-descriptor.git (is-accessor-descriptor), https://github.com/jonschlinkert/is-data-descriptor.git (is-data-descriptor), https://github.com/jonschlinkert/is-extendable.git (is-extendable), https://github.com/jonschlinkert/lazy-cache.git (lazy-cache), https://github.com/jonschlinkert/pascalcase.git (pascalcase), https://github.com/jonschlinkert/shallow-clone.git (shallow-clone). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: astral-regex, dir-glob. A copy of the source code may be downloaded from https://github.com/kevva/astral-regex.git (astral-regex), https://github.com/kevva/dir-glob.git (dir-glob). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) Kevin Mårtensson <kevinmartensson@gmail.com> (github.com/kevva)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: async. A copy of the source code may be downloaded from https://github.com/caolan/async.git. This software contains the following license and notice below:
-
-Copyright (c) 2010-2014 Caolan McMahon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: async. A copy of the source code may be downloaded from https://github.com/caolan/async.git. This software contains the following license and notice below:
-
-Copyright (c) 2010-2018 Caolan McMahon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: async-limiter. A copy of the source code may be downloaded from https://github.com/strml/async-limiter.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-Copyright (c) 2017 Samuel Reed <samuel.trace.reed@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: asynckit. A copy of the source code may be downloaded from git+https://github.com/alexindigo/asynckit.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Alex Indigo
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: atob. A copy of the source code may be downloaded from git://git.coolaj86.com/coolaj86/atob.js.git. This software contains the following license and notice below:
-
-At your option you may choose either of the following licenses:
-
-  * The MIT License (MIT)
-  * The Apache License 2.0 (Apache-2.0)
-
-
-The MIT License (MIT)
-
-Copyright (c) 2015 AJ ONeal
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright 2015 AJ ONeal
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
------
-
-The following software may be included in this product: autoprefixer, postcss, postcss-safe-parser. A copy of the source code may be downloaded from https://github.com/postcss/autoprefixer.git (autoprefixer), https://github.com/postcss/postcss.git (postcss), https://github.com/postcss/postcss-safe-parser.git (postcss-safe-parser). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2013 Andrey Sitnik <andrey@sitnik.ru>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: aws4. A copy of the source code may be downloaded from https://github.com/mhart/aws4.git. This software contains the following license and notice below:
-
-Copyright 2013 Michael Hart (michael.hart.au@gmail.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: babel-eslint. A copy of the source code may be downloaded from https://github.com/babel/babel-eslint.git. This software contains the following license and notice below:
-
-Copyright (c) 2014-2016 Sebastian McKenzie <sebmck@gmail.com>
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: babel-extract-comments. A copy of the source code may be downloaded from https://github.com/jonschlinkert/babel-extract-comments.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015, 2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: babel-loader. A copy of the source code may be downloaded from https://github.com/babel/babel-loader.git. This software contains the following license and notice below:
-
-Copyright (c) 2014-2016 Luís Couto <hello@luiscouto.pt>
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: babel-plugin-dynamic-import-node. A copy of the source code may be downloaded from git+https://github.com/airbnb/babel-plugin-dynamic-import-node.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016 Airbnb
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: babel-plugin-istanbul. A copy of the source code may be downloaded from git+https://github.com/istanbuljs/babel-plugin-istanbul.git. This software contains the following license and notice below:
-
-Copyright (c) 2016, Istanbul Code Coverage
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* Neither the name of babel-plugin-istanbul nor the names of its
-  contributors may be used to endorse or promote products derived from
-  this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: babel-plugin-macros. A copy of the source code may be downloaded from https://github.com/kentcdodds/babel-plugin-macros.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-Copyright (c) 2017 Kent C. Dodds
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: babel-plugin-named-asset-import, babel-preset-react-app, confusing-browser-globals, create-react-class, eslint-config-react-app, invariant, prop-types, react-app-polyfill, react-dev-utils, react-error-overlay, react-scripts, warning. A copy of the source code may be downloaded from https://github.com/facebook/create-react-app.git (babel-plugin-named-asset-import), https://github.com/facebook/create-react-app.git (babel-preset-react-app), https://github.com/facebook/create-react-app.git (confusing-browser-globals), https://github.com/facebook/react.git (create-react-class), https://github.com/facebook/create-react-app.git (eslint-config-react-app), https://github.com/zertosh/invariant (invariant), https://github.com/facebook/prop-types.git (prop-types), https://github.com/facebook/create-react-app.git (react-app-polyfill), https://github.com/facebook/create-react-app.git (react-dev-utils), https://github.com/facebook/create-react-app.git (react-error-overlay), https://github.com/facebook/create-react-app.git (react-scripts), https://github.com/BerkeleyTrue/warning.git (warning). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2013-present, Facebook, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: babel-plugin-transform-react-remove-prop-types. A copy of the source code may be downloaded from https://github.com/oliviertassinari/babel-plugin-transform-react-remove-prop-types.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Nikita Gusakov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: bail, ccount. A copy of the source code may be downloaded from https://github.com/wooorm/bail.git (bail), https://github.com/wooorm/ccount.git (ccount). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2015 Titus Wormer <tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: balanced-match. A copy of the source code may be downloaded from git://github.com/juliangruber/balanced-match.git. This software contains the following license and notice below:
-
-(MIT)
-
-Copyright (c) 2013 Julian Gruber &lt;julian@juliangruber.com&gt;
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: base, extglob, is-accessor-descriptor, is-data-descriptor, split-string. A copy of the source code may be downloaded from https://github.com/node-base/base.git (base), https://github.com/micromatch/extglob.git (extglob), https://github.com/jonschlinkert/is-accessor-descriptor.git (is-accessor-descriptor), https://github.com/jonschlinkert/is-data-descriptor.git (is-data-descriptor), https://github.com/jonschlinkert/split-string.git (split-string). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2017, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: base64-js. A copy of the source code may be downloaded from git://github.com/beatgammit/base64-js.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: batch. A copy of the source code may be downloaded from https://github.com/visionmedia/batch.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013 TJ Holowaychuk <tj@vision-media.ca>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: bcrypt-pbkdf. A copy of the source code may be downloaded from git://github.com/joyent/node-bcrypt-pbkdf.git. This software contains the following license and notice below:
-
-The Blowfish portions are under the following license:
-
-Blowfish block cipher for OpenBSD
-Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
-All rights reserved.
-
-Implementation advice by David Mazieres <dm@lcs.mit.edu>.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-   derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-The bcrypt_pbkdf portions are under the following license:
-
-Copyright (c) 2013 Ted Unangst <tedu@openbsd.org>
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-
-Performance improvements (Javascript-specific):
-
-Copyright 2016, Joyent Inc
-Author: Alex Wilson <alex.wilson@joyent.com>
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: bluebird. A copy of the source code may be downloaded from git://github.com/petkaantonov/bluebird.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013-2018 Petka Antonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: body-parser, compression, type-is. A copy of the source code may be downloaded from https://github.com/expressjs/body-parser.git (body-parser), https://github.com/expressjs/compression.git (compression), https://github.com/jshttp/type-is.git (type-is). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2014-2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: bonjour. A copy of the source code may be downloaded from https://github.com/watson/bonjour.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2016 Thomas Watson Steen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: brace-expansion. A copy of the source code may be downloaded from git://github.com/juliangruber/brace-expansion.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2013 Julian Gruber <julian@juliangruber.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: braces, micromatch, normalize-path. A copy of the source code may be downloaded from https://github.com/micromatch/braces.git (braces), https://github.com/micromatch/micromatch.git (micromatch), https://github.com/jonschlinkert/normalize-path.git (normalize-path). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: browser-process-hrtime. A copy of the source code may be downloaded from git://github.com/kumavis/browser-process-hrtime.git. This software contains the following license and notice below:
-
-Copyright 2014 kumavis
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: browser-resolve. A copy of the source code may be downloaded from git://github.com/shtylman/node-browser-resolve.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013-2015 Roman Shtylman <shtylman@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-aes. A copy of the source code may be downloaded from git://github.com/crypto-browserify/browserify-aes.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017 browserify-aes contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-cipher. A copy of the source code may be downloaded from git@github.com:crypto-browserify/browserify-cipher.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017 Calvin Metcalf & contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-des. A copy of the source code may be downloaded from git+https://github.com/crypto-browserify/browserify-des.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017 Calvin Metcalf, Fedor Indutny & contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-rsa. A copy of the source code may be downloaded from git@github.com:crypto-browserify/browserify-rsa.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2015 Calvin Metcalf & contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-sign. A copy of the source code may be downloaded from https://github.com/crypto-browserify/browserify-sign.git. This software contains the following license and notice below:
-
-Copyright (c) 2014-2015 Calvin Metcalf and browserify-sign contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: browserify-zlib. A copy of the source code may be downloaded from git+https://github.com/devongovett/browserify-zlib.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2015 Devon Govett <devongovett@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-This project contains parts of Node.js.
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
------
-
-The following software may be included in this product: browserslist. A copy of the source code may be downloaded from https://github.com/browserslist/browserslist.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2014 Andrey Sitnik <andrey@sitnik.ru>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: buffer. A copy of the source code may be downloaded from git://github.com/feross/buffer.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Feross Aboukhadijeh, and other contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: buffer-from. A copy of the source code may be downloaded from https://github.com/LinusU/buffer-from.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016, 2018 Linus Unnebäck
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: buffer-indexof. A copy of the source code may be downloaded from git://github.com/soldair/node-buffer-indexof.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Ryan Day
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: buffer-xor. A copy of the source code may be downloaded from https://github.com/crypto-browserify/buffer-xor.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Daniel Cousens
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: builtin-status-codes. A copy of the source code may be downloaded from https://github.com/bendrucker/builtin-status-codes.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Ben Drucker <bvdrucker@gmail.com> (bendrucker.me)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: bytes. A copy of the source code may be downloaded from https://github.com/visionmedia/bytes.js.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2014 TJ Holowaychuk <tj@vision-media.ca>
-Copyright (c) 2015 Jed Watson <jed.watson@me.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: cacache, figgy-pudding, ssri. A copy of the source code may be downloaded from https://github.com/zkat/cacache (cacache), https://github.com/zkat/figgy-pudding (figgy-pudding), https://github.com/zkat/ssri (ssri). This software contains the following license and notice below:
-
-ISC License
-
-Copyright (c) npm, Inc.
-
-Permission to use, copy, modify, and/or distribute this software for
-any purpose with or without fee is hereby granted, provided that the
-above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS
-ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
-CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
-USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: cache-base, isobject, write. A copy of the source code may be downloaded from https://github.com/jonschlinkert/cache-base.git (cache-base), https://github.com/jonschlinkert/isobject.git (isobject), https://github.com/jonschlinkert/write.git (write). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: call-me-maybe. A copy of the source code may be downloaded from git+https://github.com/limulus/call-me-maybe.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Eric McCarthy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: caniuse-api. A copy of the source code may be downloaded from https://github.com/nyalab/caniuse-api.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Sébastien Balayn
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: caniuse-lite. A copy of the source code may be downloaded from https://github.com/ben-eb/caniuse-lite.git. This software contains the following license and notice below:
-
-Attribution 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
-     Considerations for licensors: Our public licenses are
-     intended for use by those authorized to give the public
-     permission to use material in ways otherwise restricted by
-     copyright and certain other rights. Our licenses are
-     irrevocable. Licensors should read and understand the terms
-     and conditions of the license they choose before applying it.
-     Licensors should also secure all rights necessary before
-     applying our licenses so that the public can reuse the
-     material as expected. Licensors should clearly mark any
-     material not subject to the license. This includes other CC-
-     licensed material, or material used under an exception or
-     limitation to copyright. More considerations for licensors:
-	wiki.creativecommons.org/Considerations_for_licensors
-
-     Considerations for the public: By using one of our public
-     licenses, a licensor grants the public permission to use the
-     licensed material under specified terms and conditions. If
-     the licensor's permission is not necessary for any reason--for
-     example, because of any applicable exception or limitation to
-     copyright--then that use is not regulated by the license. Our
-     licenses grant only permissions under copyright and certain
-     other rights that a licensor has authority to grant. Use of
-     the licensed material may still be restricted for other
-     reasons, including because others have copyright or other
-     rights in the material. A licensor may make special requests,
-     such as asking that all changes be marked or described.
-     Although not required by our licenses, you are encouraged to
-     respect those requests where reasonable. More_considerations
-     for the public:
-	wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution 4.0 International Public License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution 4.0 International Public License ("Public License"). To the
-extent this Public License may be interpreted as a contract, You are
-granted the Licensed Rights in consideration of Your acceptance of
-these terms and conditions, and the Licensor grants You such rights in
-consideration of benefits the Licensor receives from making the
-Licensed Material available under these terms and conditions.
-
-
-Section 1 -- Definitions.
-
-  a. Adapted Material means material subject to Copyright and Similar
-     Rights that is derived from or based upon the Licensed Material
-     and in which the Licensed Material is translated, altered,
-     arranged, transformed, or otherwise modified in a manner requiring
-     permission under the Copyright and Similar Rights held by the
-     Licensor. For purposes of this Public License, where the Licensed
-     Material is a musical work, performance, or sound recording,
-     Adapted Material is always produced where the Licensed Material is
-     synched in timed relation with a moving image.
-
-  b. Adapter's License means the license You apply to Your Copyright
-     and Similar Rights in Your contributions to Adapted Material in
-     accordance with the terms and conditions of this Public License.
-
-  c. Copyright and Similar Rights means copyright and/or similar rights
-     closely related to copyright including, without limitation,
-     performance, broadcast, sound recording, and Sui Generis Database
-     Rights, without regard to how the rights are labeled or
-     categorized. For purposes of this Public License, the rights
-     specified in Section 2(b)(1)-(2) are not Copyright and Similar
-     Rights.
-
-  d. Effective Technological Measures means those measures that, in the
-     absence of proper authority, may not be circumvented under laws
-     fulfilling obligations under Article 11 of the WIPO Copyright
-     Treaty adopted on December 20, 1996, and/or similar international
-     agreements.
-
-  e. Exceptions and Limitations means fair use, fair dealing, and/or
-     any other exception or limitation to Copyright and Similar Rights
-     that applies to Your use of the Licensed Material.
-
-  f. Licensed Material means the artistic or literary work, database,
-     or other material to which the Licensor applied this Public
-     License.
-
-  g. Licensed Rights means the rights granted to You subject to the
-     terms and conditions of this Public License, which are limited to
-     all Copyright and Similar Rights that apply to Your use of the
-     Licensed Material and that the Licensor has authority to license.
-
-  h. Licensor means the individual(s) or entity(ies) granting rights
-     under this Public License.
-
-  i. Share means to provide material to the public by any means or
-     process that requires permission under the Licensed Rights, such
-     as reproduction, public display, public performance, distribution,
-     dissemination, communication, or importation, and to make material
-     available to the public including in ways that members of the
-     public may access the material from a place and at a time
-     individually chosen by them.
-
-  j. Sui Generis Database Rights means rights other than copyright
-     resulting from Directive 96/9/EC of the European Parliament and of
-     the Council of 11 March 1996 on the legal protection of databases,
-     as amended and/or succeeded, as well as other essentially
-     equivalent rights anywhere in the world.
-
-  k. You means the individual or entity exercising the Licensed Rights
-     under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
-  a. License grant.
-
-       1. Subject to the terms and conditions of this Public License,
-          the Licensor hereby grants You a worldwide, royalty-free,
-          non-sublicensable, non-exclusive, irrevocable license to
-          exercise the Licensed Rights in the Licensed Material to:
-
-            a. reproduce and Share the Licensed Material, in whole or
-               in part; and
-
-            b. produce, reproduce, and Share Adapted Material.
-
-       2. Exceptions and Limitations. For the avoidance of doubt, where
-          Exceptions and Limitations apply to Your use, this Public
-          License does not apply, and You do not need to comply with
-          its terms and conditions.
-
-       3. Term. The term of this Public License is specified in Section
-          6(a).
-
-       4. Media and formats; technical modifications allowed. The
-          Licensor authorizes You to exercise the Licensed Rights in
-          all media and formats whether now known or hereafter created,
-          and to make technical modifications necessary to do so. The
-          Licensor waives and/or agrees not to assert any right or
-          authority to forbid You from making technical modifications
-          necessary to exercise the Licensed Rights, including
-          technical modifications necessary to circumvent Effective
-          Technological Measures. For purposes of this Public License,
-          simply making modifications authorized by this Section 2(a)
-          (4) never produces Adapted Material.
-
-       5. Downstream recipients.
-
-            a. Offer from the Licensor -- Licensed Material. Every
-               recipient of the Licensed Material automatically
-               receives an offer from the Licensor to exercise the
-               Licensed Rights under the terms and conditions of this
-               Public License.
-
-            b. No downstream restrictions. You may not offer or impose
-               any additional or different terms or conditions on, or
-               apply any Effective Technological Measures to, the
-               Licensed Material if doing so restricts exercise of the
-               Licensed Rights by any recipient of the Licensed
-               Material.
-
-       6. No endorsement. Nothing in this Public License constitutes or
-          may be construed as permission to assert or imply that You
-          are, or that Your use of the Licensed Material is, connected
-          with, or sponsored, endorsed, or granted official status by,
-          the Licensor or others designated to receive attribution as
-          provided in Section 3(a)(1)(A)(i).
-
-  b. Other rights.
-
-       1. Moral rights, such as the right of integrity, are not
-          licensed under this Public License, nor are publicity,
-          privacy, and/or other similar personality rights; however, to
-          the extent possible, the Licensor waives and/or agrees not to
-          assert any such rights held by the Licensor to the limited
-          extent necessary to allow You to exercise the Licensed
-          Rights, but not otherwise.
-
-       2. Patent and trademark rights are not licensed under this
-          Public License.
-
-       3. To the extent possible, the Licensor waives any right to
-          collect royalties from You for the exercise of the Licensed
-          Rights, whether directly or through a collecting society
-          under any voluntary or waivable statutory or compulsory
-          licensing scheme. In all other cases the Licensor expressly
-          reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
-  a. Attribution.
-
-       1. If You Share the Licensed Material (including in modified
-          form), You must:
-
-            a. retain the following if it is supplied by the Licensor
-               with the Licensed Material:
-
-                 i. identification of the creator(s) of the Licensed
-                    Material and any others designated to receive
-                    attribution, in any reasonable manner requested by
-                    the Licensor (including by pseudonym if
-                    designated);
-
-                ii. a copyright notice;
-
-               iii. a notice that refers to this Public License;
-
-                iv. a notice that refers to the disclaimer of
-                    warranties;
-
-                 v. a URI or hyperlink to the Licensed Material to the
-                    extent reasonably practicable;
-
-            b. indicate if You modified the Licensed Material and
-               retain an indication of any previous modifications; and
-
-            c. indicate the Licensed Material is licensed under this
-               Public License, and include the text of, or the URI or
-               hyperlink to, this Public License.
-
-       2. You may satisfy the conditions in Section 3(a)(1) in any
-          reasonable manner based on the medium, means, and context in
-          which You Share the Licensed Material. For example, it may be
-          reasonable to satisfy the conditions by providing a URI or
-          hyperlink to a resource that includes the required
-          information.
-
-       3. If requested by the Licensor, You must remove any of the
-          information required by Section 3(a)(1)(A) to the extent
-          reasonably practicable.
-
-       4. If You Share Adapted Material You produce, the Adapter's
-          License You apply must not prevent recipients of the Adapted
-          Material from complying with this Public License.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
-  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
-     to extract, reuse, reproduce, and Share all or a substantial
-     portion of the contents of the database;
-
-  b. if You include all or a substantial portion of the database
-     contents in a database in which You have Sui Generis Database
-     Rights, then the database in which You have Sui Generis Database
-     Rights (but not its individual contents) is Adapted Material; and
-
-  c. You must comply with the conditions in Section 3(a) if You Share
-     all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
-  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
-     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
-     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
-     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
-     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
-     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
-     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
-     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
-     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
-     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
-  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
-     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
-     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
-     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
-     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
-     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
-     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
-     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
-     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
-  c. The disclaimer of warranties and limitation of liability provided
-     above shall be interpreted in a manner that, to the extent
-     possible, most closely approximates an absolute disclaimer and
-     waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
-  a. This Public License applies for the term of the Copyright and
-     Similar Rights licensed here. However, if You fail to comply with
-     this Public License, then Your rights under this Public License
-     terminate automatically.
-
-  b. Where Your right to use the Licensed Material has terminated under
-     Section 6(a), it reinstates:
-
-       1. automatically as of the date the violation is cured, provided
-          it is cured within 30 days of Your discovery of the
-          violation; or
-
-       2. upon express reinstatement by the Licensor.
-
-     For the avoidance of doubt, this Section 6(b) does not affect any
-     right the Licensor may have to seek remedies for Your violations
-     of this Public License.
-
-  c. For the avoidance of doubt, the Licensor may also offer the
-     Licensed Material under separate terms or conditions or stop
-     distributing the Licensed Material at any time; however, doing so
-     will not terminate this Public License.
-
-  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
-     License.
-
-
-Section 7 -- Other Terms and Conditions.
-
-  a. The Licensor shall not be bound by any additional or different
-     terms or conditions communicated by You unless expressly agreed.
-
-  b. Any arrangements, understandings, or agreements regarding the
-     Licensed Material not stated herein are separate from and
-     independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
-  a. For the avoidance of doubt, this Public License does not, and
-     shall not be interpreted to, reduce, limit, restrict, or impose
-     conditions on any use of the Licensed Material that could lawfully
-     be made without permission under this Public License.
-
-  b. To the extent possible, if any provision of this Public License is
-     deemed unenforceable, it shall be automatically reformed to the
-     minimum extent necessary to make it enforceable. If the provision
-     cannot be reformed, it shall be severed from this Public License
-     without affecting the enforceability of the remaining terms and
-     conditions.
-
-  c. No term or condition of this Public License will be waived and no
-     failure to comply consented to unless expressly agreed to by the
-     Licensor.
-
-  d. Nothing in this Public License constitutes or may be interpreted
-     as a limitation upon, or waiver of, any privileges and immunities
-     that apply to the Licensor or You, including from the legal
-     processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public
-licenses. Notwithstanding, Creative Commons may elect to apply one of
-its public licenses to material it publishes and in those instances
-will be considered the “Licensor.” The text of the Creative Commons
-public licenses is dedicated to the public domain under the CC0 Public
-Domain Dedication. Except for the limited purpose of indicating that
-material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the
-public licenses.
-
-Creative Commons may be contacted at creativecommons.org.
-
------
-
-The following software may be included in this product: case-sensitive-paths-webpack-plugin. A copy of the source code may be downloaded from git+https://github.com/Urthen/case-sensitive-paths-webpack-plugin.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2018 Michael Pratt
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: caseless. A copy of the source code may be downloaded from https://github.com/mikeal/caseless. This software contains the following license and notice below:
-
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-1. Definitions.
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-You must give any other recipients of the Work or Derivative Works a copy of this License; and
-You must cause any modified files to carry prominent notices stating that You changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: chardet. A copy of the source code may be downloaded from git@github.com:runk/node-chardet.git. This software contains the following license and notice below:
-
-Copyright (C) 2018 Dmitry Shirokov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: chownr, fs-minipass, fs-write-stream-atomic, glob, ignore-walk, ini, isexe, json-stringify-safe, lru-cache, minimatch, mute-stream, nopt, npm-packlist, npmlog, once, osenv, rimraf, semver, tar, which, wrappy, yallist. A copy of the source code may be downloaded from git://github.com/isaacs/chownr.git (chownr), git+https://github.com/npm/fs-minipass.git (fs-minipass), https://github.com/npm/fs-write-stream-atomic (fs-write-stream-atomic), git://github.com/isaacs/node-glob.git (glob), git+https://github.com/isaacs/ignore-walk.git (ignore-walk), git://github.com/isaacs/ini.git (ini), git+https://github.com/isaacs/isexe.git (isexe), git://github.com/isaacs/json-stringify-safe (json-stringify-safe), git://github.com/isaacs/node-lru-cache.git (lru-cache), git://github.com/isaacs/minimatch.git (minimatch), git://github.com/isaacs/mute-stream (mute-stream), https://github.com/npm/nopt.git (nopt), git+https://github.com/npm/npm-packlist.git (npm-packlist), https://github.com/npm/npmlog.git (npmlog), git://github.com/isaacs/once (once), https://github.com/npm/osenv (osenv), git://github.com/isaacs/rimraf.git (rimraf), https://github.com/npm/node-semver (semver), https://github.com/npm/node-tar.git (tar), git://github.com/isaacs/node-which.git (which), https://github.com/npm/wrappy (wrappy), git+https://github.com/isaacs/yallist.git (yallist). This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: chrome-trace-event. A copy of the source code may be downloaded from github.com:samccone/chrome-trace-event. This software contains the following license and notice below:
-
-# This is the MIT license
-
-Copyright (c) 2015 Joyent Inc. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: ci-info, is-ci. A copy of the source code may be downloaded from https://github.com/watson/ci-info.git (ci-info), https://github.com/watson/is-ci.git (is-ci). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016-2018 Thomas Watson Steen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: cipher-base, create-hash, create-hmac, evp_bytestokey. A copy of the source code may be downloaded from git+https://github.com/crypto-browserify/cipher-base.git (cipher-base), git@github.com:crypto-browserify/createHash.git (create-hash), https://github.com/crypto-browserify/createHmac.git (create-hmac), https://github.com/crypto-browserify/EVP_BytesToKey.git (evp_bytestokey). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 crypto-browserify contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: class-utils. A copy of the source code may be downloaded from https://github.com/jonschlinkert/class-utils.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015, 2017-2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: classnames. A copy of the source code may be downloaded from https://github.com/JedWatson/classnames.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Jed Watson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: clean-css. A copy of the source code may be downloaded from https://github.com/jakubpawlowicz/clean-css.git. This software contains the following license and notice below:
-
-Copyright (C) 2017 JakubPawlowicz.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is furnished
-to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: cli-width. A copy of the source code may be downloaded from git@github.com:knownasilya/cli-width.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Ilya Radchenko <ilya@burstcreations.com>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: cliui. A copy of the source code may be downloaded from http://github.com/yargs/cliui.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Contributors
-
-Permission to use, copy, modify, and/or distribute this software
-for any purpose with or without fee is hereby granted, provided
-that the above copyright notice and this permission notice
-appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE
-LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES
-OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: clone. A copy of the source code may be downloaded from git://github.com/pvorb/node-clone.git. This software contains the following license and notice below:
-
-Copyright © 2011-2015 Paul Vorbach <paul@vorba.ch>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the “Software”), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: clone-deep, extend-shallow, mixin-object. A copy of the source code may be downloaded from https://github.com/jonschlinkert/clone-deep.git (clone-deep), https://github.com/jonschlinkert/extend-shallow.git (extend-shallow), https://github.com/jonschlinkert/mixin-object.git (mixin-object). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2015, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: co. A copy of the source code may be downloaded from https://github.com/tj/co.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 TJ Holowaychuk &lt;tj@vision-media.ca&gt;
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: coa. A copy of the source code may be downloaded from git://github.com/veged/coa.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-present Sergey Berezhnoy <veged@ya.ru>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: collection-visit, define-property, object-visit, unset-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/collection-visit.git (collection-visit), https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/object-visit.git (object-visit), https://github.com/jonschlinkert/unset-value.git (unset-value). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015, 2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: color. A copy of the source code may be downloaded from https://github.com/Qix-/color.git. This software contains the following license and notice below:
-
-Copyright (c) 2012 Heather Arthur
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: color-convert. A copy of the source code may be downloaded from https://github.com/Qix-/color-convert.git. This software contains the following license and notice below:
-
-Copyright (c) 2011-2016 Heather Arthur <fayearthur@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: color-name. A copy of the source code may be downloaded from git@github.com:colorjs/color-name.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-Copyright (c) 2015 Dmitry Ivanov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: color-string. A copy of the source code may be downloaded from https://github.com/Qix-/color-string.git. This software contains the following license and notice below:
-
-Copyright (c) 2011 Heather Arthur <fayearthur@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: combined-stream, delayed-stream. A copy of the source code may be downloaded from git://github.com/felixge/node-combined-stream.git (combined-stream), git://github.com/felixge/node-delayed-stream.git (delayed-stream). This software contains the following license and notice below:
-
-Copyright (c) 2011 Debuggable Limited <felix@debuggable.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: comma-separated-tokens, hast-util-from-parse5, hast-util-parse-selector, hastscript, space-separated-tokens, unist-util-stringify-position, vendors, web-namespaces. A copy of the source code may be downloaded from https://github.com/wooorm/comma-separated-tokens.git (comma-separated-tokens), https://github.com/syntax-tree/hast-util-from-parse5.git (hast-util-from-parse5), https://github.com/syntax-tree/hast-util-parse-selector.git (hast-util-parse-selector), https://github.com/syntax-tree/hastscript.git (hastscript), https://github.com/wooorm/space-separated-tokens.git (space-separated-tokens), https://github.com/syntax-tree/unist-util-stringify-position.git (unist-util-stringify-position), https://github.com/wooorm/vendors.git (vendors), https://github.com/wooorm/web-namespaces.git (web-namespaces). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2016 Titus Wormer <tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: commander. A copy of the source code may be downloaded from https://github.com/tj/commander.js.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2011 TJ Holowaychuk <tj@vision-media.ca>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: common-tags. A copy of the source code may be downloaded from https://github.com/declandewet/common-tags. This software contains the following license and notice below:
-
-License (MIT)
--------------
-
-Copyright © Declan de Wet
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: commondir, shell-quote. A copy of the source code may be downloaded from http://github.com/substack/node-commondir.git (commondir), http://github.com/substack/node-shell-quote.git (shell-quote). This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2013 James Halliday (mail@substack.net)
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: compare-versions. A copy of the source code may be downloaded from git+https://github.com/omichelsen/compare-versions.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2017 Ole Michelsen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: component-emitter. A copy of the source code may be downloaded from https://github.com/component/emitter.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Component contributors <dev@component.io>
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: compressible. A copy of the source code may be downloaded from https://github.com/jshttp/compressible.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2014 Jeremiah Senkpiel <fishrock123@rocketmail.com>
-Copyright (c) 2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: concat-stream. A copy of the source code may be downloaded from http://github.com/maxogden/concat-stream.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2013 Max Ogden
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: connect-history-api-fallback. A copy of the source code may be downloaded from http://github.com/bripkens/connect-history-api-fallback.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2012 Ben Ripkens http://bripkens.de
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: console-control-strings, gauge. A copy of the source code may be downloaded from https://github.com/iarna/console-control-strings (console-control-strings), https://github.com/iarna/gauge (gauge). This software contains the following license and notice below:
-
-Copyright (c) 2014, Rebecca Turner <me@re-becca.org>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: content-disposition, media-typer, on-headers. A copy of the source code may be downloaded from https://github.com/jshttp/content-disposition.git (content-disposition), https://github.com/jshttp/media-typer.git (media-typer), https://github.com/jshttp/on-headers.git (on-headers). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: content-type. A copy of the source code may be downloaded from https://github.com/jshttp/content-type.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2015 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: convert-source-map. A copy of the source code may be downloaded from git://github.com/thlorenz/convert-source-map.git. This software contains the following license and notice below:
-
-Copyright 2013 Thorsten Lorenz.
-All rights reserved.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: cookie. A copy of the source code may be downloaded from https://github.com/jshttp/cookie.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2014 Roman Shtylman <shtylman@gmail.com>
-Copyright (c) 2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: copy-concurrently, move-concurrently, promise-inflight. A copy of the source code may be downloaded from git+https://github.com/npm/copy-concurrently.git (copy-concurrently), git+https://github.com/npm/move-concurrently.git (move-concurrently), git+https://github.com/iarna/promise-inflight.git (promise-inflight). This software contains the following license and notice below:
-
-Copyright (c) 2017, Rebecca Turner <me@re-becca.org>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: copy-descriptor, expand-brackets. A copy of the source code may be downloaded from https://github.com/jonschlinkert/copy-descriptor.git (copy-descriptor), https://github.com/jonschlinkert/expand-brackets.git (expand-brackets). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2016, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: copy-to-clipboard. A copy of the source code may be downloaded from git+https://github.com/sudodoki/copy-to-clipboard. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 sudodoki <smd.deluzion@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: core-js, core-js-pure. A copy of the source code may be downloaded from https://github.com/zloirock/core-js.git (core-js), https://github.com/zloirock/core-js.git (core-js-pure). This software contains the following license and notice below:
-
-Copyright (c) 2014-2019 Denis Pushkarev
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: core-js. A copy of the source code may be downloaded from https://github.com/zloirock/core-js.git. This software contains the following license and notice below:
-
-Copyright (c) 2015 Denis Pushkarev
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: core-util-is. A copy of the source code may be downloaded from git://github.com/isaacs/core-util-is. This software contains the following license and notice below:
-
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: cosmiconfig. A copy of the source code may be downloaded from git+https://github.com/davidtheclark/cosmiconfig.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 David Clark
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: create-ecdh. A copy of the source code may be downloaded from https://github.com/crypto-browserify/createECDH.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2017 createECDH contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: create-react-context. A copy of the source code may be downloaded from https://github.com/thejameskyle/create-react-context. This software contains the following license and notice below:
-
-Copyright (c) Jamie Kyle
-
-This license is granted to everyone except for the following entities and
-any of their subsidiaries:
-
-- "Microsoft Corporation" (for working with ICE)
-- "Palantir Technologies" (for working with ICE)
-- "Amazon.com, Inc." (for abusive treatment of workers and for working with ICE)
-- "Northeastern University" (for working with ICE)
-- "Ernst & Young" (for working with ICE)
-- "Thomson Reuters" (for working with ICE)
-- "Motorola Solutions" (for working with ICE)
-- "Deloitte Consulting LLP" (for working with ICE)
-- "John Hopkins University" (for working with ICE)
-- "Dell Inc" (for working with ICE)
-- "Xerox Corporation" (for working with ICE)
-- "Canon Inc" (for working with ICE)
-- "Vermont State Colleges" (for working with ICE)
-- "Charter Communications"/"Spectrum"/"Time Warner Cable" (for working with ICE)
-- "LinkedIn Corporation" (for working with ICE)
-- "United Parcel Service Co" (for working with ICE)
-- "Walmart Inc" (for abusive treatment of workers)
-- "Sears Holding Corporation" (for abusive treatment of workers)
-- "Apple Inc" (for abusive treatment of workers)
-- "Tyson Foods Inc" (for abusive treatment of workers)
-- "Target Corporation" (for union busting and anti-union propaganda)
-- "The H&M group" (for abusive treatment of workers)
-- "Tesla, Inc" (for abusive treatment of workers)
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: cross-spawn. A copy of the source code may be downloaded from git@github.com:moxystudio/node-cross-spawn.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2018 Made With MOXY Lda <hello@moxy.studio>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: crypto-browserify. A copy of the source code may be downloaded from git://github.com/crypto-browserify/crypto-browserify.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2013 Dominic Tarr
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: css-declaration-sorter. A copy of the source code may be downloaded from https://github.com/Siilwyn/css-declaration-sorter.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2016 Selwyn <talk@selwyn.cc>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: css-loader, enhanced-resolve, file-loader, html-webpack-plugin, less-loader, loader-utils, mini-css-extract-plugin, schema-utils, style-loader, terser-webpack-plugin, url-loader, watchpack, webpack, webpack-dev-middleware, webpack-dev-server. A copy of the source code may be downloaded from https://github.com/webpack-contrib/css-loader.git (css-loader), git://github.com/webpack/enhanced-resolve.git (enhanced-resolve), https://github.com/webpack-contrib/file-loader.git (file-loader), https://github.com/jantimon/html-webpack-plugin.git (html-webpack-plugin), https://github.com/webpack-contrib/less-loader.git (less-loader), https://github.com/webpack/loader-utils.git (loader-utils), https://github.com/webpack-contrib/mini-css-extract-plugin.git (mini-css-extract-plugin), https://github.com/webpack-contrib/schema-utils (schema-utils), https://github.com/webpack-contrib/style-loader.git (style-loader), https://github.com/webpack-contrib/terser-webpack-plugin.git (terser-webpack-plugin), https://github.com/webpack-contrib/url-loader.git (url-loader), https://github.com/webpack/watchpack.git (watchpack), https://github.com/webpack/webpack.git (webpack), https://github.com/webpack/webpack-dev-middleware.git (webpack-dev-middleware), https://github.com/webpack/webpack-dev-server.git (webpack-dev-server). This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: css-select, css-what, domelementtype, domhandler, domutils, entities, nth-check. A copy of the source code may be downloaded from git://github.com/fb55/css-select.git (css-select), https://github.com/fb55/css-what (css-what), git://github.com/fb55/domelementtype.git (domelementtype), git://github.com/fb55/DomHandler.git (domhandler), git://github.com/FB55/domutils.git (domutils), git://github.com/fb55/entities.git (entities), https://github.com/fb55/nth-check (nth-check). This software contains the following license and notice below:
-
-Copyright (c) Felix Böhm
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: css-select-base-adapter. A copy of the source code may be downloaded from https://github.com/nrkn/css-select-base-adapter.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Nik Coughlin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: css-tree. A copy of the source code may be downloaded from https://github.com/csstree/csstree.git. This software contains the following license and notice below:
-
-Copyright (C) 2016 by Roman Dvornov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: css-unit-converter. A copy of the source code may be downloaded from https://github.com/andyjansson/css-unit-converter.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2015 Andy Jansson <andyjansson@users.noreply.github.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: css-url-regex. A copy of the source code may be downloaded from https://github.com/johnotander/css-url-regex.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) John Otander <johnotander@gmail.com> (johnotander.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: cssdb. A copy of the source code may be downloaded from https://github.com/csstools/cssdb.git. This software contains the following license and notice below:
-
-# CC0 1.0 Universal
-
-## Statement of Purpose
-
-The laws of most jurisdictions throughout the world automatically confer
-exclusive Copyright and Related Rights (defined below) upon the creator and
-subsequent owner(s) (each and all, an “owner”) of an original work of
-authorship and/or a database (each, a “Work”).
-
-Certain owners wish to permanently relinquish those rights to a Work for the
-purpose of contributing to a commons of creative, cultural and scientific works
-(“Commons”) that the public can reliably and without fear of later claims of
-infringement build upon, modify, incorporate in other works, reuse and
-redistribute as freely as possible in any form whatsoever and for any purposes,
-including without limitation commercial purposes. These owners may contribute
-to the Commons to promote the ideal of a free culture and the further
-production of creative, cultural and scientific works, or to gain reputation or
-greater distribution for their Work in part through the use and efforts of
-others.
-
-For these and/or other purposes and motivations, and without any expectation of
-additional consideration or compensation, the person associating CC0 with a
-Work (the “Affirmer”), to the extent that he or she is an owner of Copyright
-and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and
-publicly distribute the Work under its terms, with knowledge of his or her
-Copyright and Related Rights in the Work and the meaning and intended legal
-effect of CC0 on those rights.
-
-1. Copyright and Related Rights. A Work made available under CC0 may be
-   protected by copyright and related or neighboring rights (“Copyright and
-   Related Rights”). Copyright and Related Rights include, but are not limited
-   to, the following:
-   1. the right to reproduce, adapt, distribute, perform, display,
-      communicate, and translate a Work;
-   2. moral rights retained by the original author(s) and/or performer(s);
-   3. publicity and privacy rights pertaining to a person’s image or likeness
-      depicted in a Work;
-   4. rights protecting against unfair competition in regards to a Work,
-      subject to the limitations in paragraph 4(i), below;
-   5. rights protecting the extraction, dissemination, use and reuse of data
-      in a Work;
-   6. database rights (such as those arising under Directive 96/9/EC of the
-      European Parliament and of the Council of 11 March 1996 on the legal
-      protection of databases, and under any national implementation thereof,
-      including any amended or successor version of such directive); and
-   7. other similar, equivalent or corresponding rights throughout the world
-      based on applicable law or treaty, and any national implementations
-      thereof.
-
-2. Waiver. To the greatest extent permitted by, but not in contravention of,
-applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and
-unconditionally waives, abandons, and surrenders all of Affirmer’s Copyright
-and Related Rights and associated claims and causes of action, whether now
-known or unknown (including existing as well as future claims and causes of
-action), in the Work (i) in all territories worldwide, (ii) for the maximum
-duration provided by applicable law or treaty (including future time
-extensions), (iii) in any current or future medium and for any number of
-copies, and (iv) for any purpose whatsoever, including without limitation
-commercial, advertising or promotional purposes (the “Waiver”). Affirmer makes
-the Waiver for the benefit of each member of the public at large and to the
-detriment of Affirmer’s heirs and successors, fully intending that such Waiver
-shall not be subject to revocation, rescission, cancellation, termination, or
-any other legal or equitable action to disrupt the quiet enjoyment of the Work
-by the public as contemplated by Affirmer’s express Statement of Purpose.
-
-3. Public License Fallback. Should any part of the Waiver for any reason be
-judged legally invalid or ineffective under applicable law, then the Waiver
-shall be preserved to the maximum extent permitted taking into account
-Affirmer’s express Statement of Purpose. In addition, to the extent the Waiver
-is so judged Affirmer hereby grants to each affected person a royalty-free, non
-transferable, non sublicensable, non exclusive, irrevocable and unconditional
-license to exercise Affirmer’s Copyright and Related Rights in the Work (i) in
-all territories worldwide, (ii) for the maximum duration provided by applicable
-law or treaty (including future time extensions), (iii) in any current or
-future medium and for any number of copies, and (iv) for any purpose
-whatsoever, including without limitation commercial, advertising or promotional
-purposes (the “License”). The License shall be deemed effective as of the date
-CC0 was applied by Affirmer to the Work. Should any part of the License for any
-reason be judged legally invalid or ineffective under applicable law, such
-partial invalidity or ineffectiveness shall not invalidate the remainder of the
-License, and in such case Affirmer hereby affirms that he or she will not (i)
-exercise any of his or her remaining Copyright and Related Rights in the Work
-or (ii) assert any associated claims and causes of action with respect to the
-Work, in either case contrary to Affirmer’s express Statement of Purpose.
-
-4. Limitations and Disclaimers.
-   1. No trademark or patent rights held by Affirmer are waived, abandoned,
-      surrendered, licensed or otherwise affected by this document.
-   2. Affirmer offers the Work as-is and makes no representations or
-      warranties of any kind concerning the Work, express, implied, statutory
-      or otherwise, including without limitation warranties of title,
-      merchantability, fitness for a particular purpose, non infringement, or
-      the absence of latent or other defects, accuracy, or the present or
-      absence of errors, whether or not discoverable, all to the greatest
-      extent permissible under applicable law.
-   3. Affirmer disclaims responsibility for clearing rights of other persons
-      that may apply to the Work or any use thereof, including without
-      limitation any person’s Copyright and Related Rights in the Work.
-      Further, Affirmer disclaims responsibility for obtaining any necessary
-      consents, permissions or other rights required for any use of the Work.
-   4. Affirmer understands and acknowledges that Creative Commons is not a
-      party to this document and has no duty or obligation with respect to
-      this CC0 or use of the Work.
-
-For more information, please see
-https://creativecommons.org/publicdomain/zero/1.0/.
-
------
-
-The following software may be included in this product: csso. A copy of the source code may be downloaded from https://github.com/css/csso.git. This software contains the following license and notice below:
-
-Copyright (C) 2011-2017 by Sergey Kryzhanovsky
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: cssom. A copy of the source code may be downloaded from https://github.com/NV/CSSOM.git. This software contains the following license and notice below:
-
-Copyright (c) Nikita Vasilyev
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: csstype. A copy of the source code may be downloaded from https://github.com/frenic/csstype. This software contains the following license and notice below:
-
-Copyright (c) 2017-2018 Fredrik Nicol
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: dashdash. A copy of the source code may be downloaded from git://github.com/trentm/node-dashdash.git. This software contains the following license and notice below:
-
-# This is the MIT license
-
-Copyright (c) 2013 Trent Mick. All rights reserved.
-Copyright (c) 2013 Joyent Inc. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: data-urls, whatwg-mimetype. A copy of the source code may be downloaded from https://github.com/jsdom/data-urls.git (data-urls), https://github.com/jsdom/whatwg-mimetype.git (whatwg-mimetype). This software contains the following license and notice below:
-
-Copyright © 2017–2018 Domenic Denicola <d@domenic.me>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: debug. A copy of the source code may be downloaded from git://github.com/visionmedia/debug.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 TJ Holowaychuk <tj@vision-media.ca>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-and associated documentation files (the 'Software'), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
-LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: decode-uri-component. A copy of the source code may be downloaded from https://github.com/SamVerschueren/decode-uri-component.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Sam Verschueren <sam.verschueren@gmail.com> (github.com/SamVerschueren)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: deep-extend. A copy of the source code may be downloaded from git://github.com/unclechu/node-deep-extend.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013-2018, Viacheslav Lotsmanov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: deep-is. A copy of the source code may be downloaded from http://github.com/thlorenz/deep-is.git. This software contains the following license and notice below:
-
-Copyright (c) 2012, 2013 Thorsten Lorenz <thlorenz@gmx.de>
-Copyright (c) 2012 James Halliday <mail@substack.net>
-Copyright (c) 2009 Thomas Robinson <280north.com>
-
-This software is released under the MIT license:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: default-gateway. A copy of the source code may be downloaded from https://github.com/silverwind/default-gateway.git. This software contains the following license and notice below:
-
-Copyright (c) silverwind
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: default-require-extensions. A copy of the source code may be downloaded from https://github.com/avajs/default-require-extensions.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Node.js contributors, James Talmage <james@talmage.io> (github.com/jamestalmage)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: define-property, is-windows, strip-comments. A copy of the source code may be downloaded from https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/is-windows.git (is-windows), https://github.com/jonschlinkert/strip-comments.git (strip-comments). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: delegates. A copy of the source code may be downloaded from https://github.com/visionmedia/node-delegates.git. This software contains the following license and notice below:
-
-Copyright (c) 2015 TJ Holowaychuk <tj@vision-media.ca>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: depd, forwarded, vary. A copy of the source code may be downloaded from https://github.com/dougwilson/nodejs-depd.git (depd), https://github.com/jshttp/forwarded.git (forwarded), https://github.com/jshttp/vary.git (vary). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014-2017 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: detect-libc. A copy of the source code may be downloaded from git://github.com/lovell/detect-libc. This software contains the following license and notice below:
-
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
------
-
-The following software may be included in this product: detect-node. A copy of the source code may be downloaded from https://github.com/iliakan/detect-node. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Ilya Kantor
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: detect-port-alt. A copy of the source code may be downloaded from git://github.com/node-modules/detect-port.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 xdf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: diffie-hellman, public-encrypt. A copy of the source code may be downloaded from https://github.com/crypto-browserify/diffie-hellman.git (diffie-hellman), https://github.com/crypto-browserify/publicEncrypt.git (public-encrypt). This software contains the following license and notice below:
-
-Copyright (c) 2017 Calvin Metcalf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: dns-equal. A copy of the source code may be downloaded from git+https://github.com/watson/dns-equal.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Thomas Watson Steen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: dns-packet, stream-shift. A copy of the source code may be downloaded from https://github.com/mafintosh/dns-packet (dns-packet), https://github.com/mafintosh/stream-shift.git (stream-shift). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Mathias Buus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: dns-txt. A copy of the source code may be downloaded from https://github.com/watson/dns-txt.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Thomas Watson Steen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: doctrine. A copy of the source code may be downloaded from https://github.com/eslint/doctrine.git. This software contains the following license and notice below:
-
-Apache License
-                       Version 2.0, January 2004
-                    http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-  "License" shall mean the terms and conditions for use, reproduction,
-  and distribution as defined by Sections 1 through 9 of this document.
-
-  "Licensor" shall mean the copyright owner or entity authorized by
-  the copyright owner that is granting the License.
-
-  "Legal Entity" shall mean the union of the acting entity and all
-  other entities that control, are controlled by, or are under common
-  control with that entity. For the purposes of this definition,
-  "control" means (i) the power, direct or indirect, to cause the
-  direction or management of such entity, whether by contract or
-  otherwise, or (ii) ownership of fifty percent (50%) or more of the
-  outstanding shares, or (iii) beneficial ownership of such entity.
-
-  "You" (or "Your") shall mean an individual or Legal Entity
-  exercising permissions granted by this License.
-
-  "Source" form shall mean the preferred form for making modifications,
-  including but not limited to software source code, documentation
-  source, and configuration files.
-
-  "Object" form shall mean any form resulting from mechanical
-  transformation or translation of a Source form, including but
-  not limited to compiled object code, generated documentation,
-  and conversions to other media types.
-
-  "Work" shall mean the work of authorship, whether in Source or
-  Object form, made available under the License, as indicated by a
-  copyright notice that is included in or attached to the work
-  (an example is provided in the Appendix below).
-
-  "Derivative Works" shall mean any work, whether in Source or Object
-  form, that is based on (or derived from) the Work and for which the
-  editorial revisions, annotations, elaborations, or other modifications
-  represent, as a whole, an original work of authorship. For the purposes
-  of this License, Derivative Works shall not include works that remain
-  separable from, or merely link (or bind by name) to the interfaces of,
-  the Work and Derivative Works thereof.
-
-  "Contribution" shall mean any work of authorship, including
-  the original version of the Work and any modifications or additions
-  to that Work or Derivative Works thereof, that is intentionally
-  submitted to Licensor for inclusion in the Work by the copyright owner
-  or by an individual or Legal Entity authorized to submit on behalf of
-  the copyright owner. For the purposes of this definition, "submitted"
-  means any form of electronic, verbal, or written communication sent
-  to the Licensor or its representatives, including but not limited to
-  communication on electronic mailing lists, source code control systems,
-  and issue tracking systems that are managed by, or on behalf of, the
-  Licensor for the purpose of discussing and improving the Work, but
-  excluding communication that is conspicuously marked or otherwise
-  designated in writing by the copyright owner as "Not a Contribution."
-
-  "Contributor" shall mean Licensor and any individual or Legal Entity
-  on behalf of whom a Contribution has been received by Licensor and
-  subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  copyright license to reproduce, prepare Derivative Works of,
-  publicly display, publicly perform, sublicense, and distribute the
-  Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  (except as stated in this section) patent license to make, have made,
-  use, offer to sell, sell, import, and otherwise transfer the Work,
-  where such license applies only to those patent claims licensable
-  by such Contributor that are necessarily infringed by their
-  Contribution(s) alone or by combination of their Contribution(s)
-  with the Work to which such Contribution(s) was submitted. If You
-  institute patent litigation against any entity (including a
-  cross-claim or counterclaim in a lawsuit) alleging that the Work
-  or a Contribution incorporated within the Work constitutes direct
-  or contributory patent infringement, then any patent licenses
-  granted to You under this License for that Work shall terminate
-  as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-  Work or Derivative Works thereof in any medium, with or without
-  modifications, and in Source or Object form, provided that You
-  meet the following conditions:
-
-  (a) You must give any other recipients of the Work or
-      Derivative Works a copy of this License; and
-
-  (b) You must cause any modified files to carry prominent notices
-      stating that You changed the files; and
-
-  (c) You must retain, in the Source form of any Derivative Works
-      that You distribute, all copyright, patent, trademark, and
-      attribution notices from the Source form of the Work,
-      excluding those notices that do not pertain to any part of
-      the Derivative Works; and
-
-  (d) If the Work includes a "NOTICE" text file as part of its
-      distribution, then any Derivative Works that You distribute must
-      include a readable copy of the attribution notices contained
-      within such NOTICE file, excluding those notices that do not
-      pertain to any part of the Derivative Works, in at least one
-      of the following places: within a NOTICE text file distributed
-      as part of the Derivative Works; within the Source form or
-      documentation, if provided along with the Derivative Works; or,
-      within a display generated by the Derivative Works, if and
-      wherever such third-party notices normally appear. The contents
-      of the NOTICE file are for informational purposes only and
-      do not modify the License. You may add Your own attribution
-      notices within Derivative Works that You distribute, alongside
-      or as an addendum to the NOTICE text from the Work, provided
-      that such additional attribution notices cannot be construed
-      as modifying the License.
-
-  You may add Your own copyright statement to Your modifications and
-  may provide additional or different license terms and conditions
-  for use, reproduction, or distribution of Your modifications, or
-  for any such Derivative Works as a whole, provided Your use,
-  reproduction, and distribution of the Work otherwise complies with
-  the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-  any Contribution intentionally submitted for inclusion in the Work
-  by You to the Licensor shall be under the terms and conditions of
-  this License, without any additional terms or conditions.
-  Notwithstanding the above, nothing herein shall supersede or modify
-  the terms of any separate license agreement you may have executed
-  with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-  names, trademarks, service marks, or product names of the Licensor,
-  except as required for reasonable and customary use in describing the
-  origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-  agreed to in writing, Licensor provides the Work (and each
-  Contributor provides its Contributions) on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-  implied, including, without limitation, any warranties or conditions
-  of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-  PARTICULAR PURPOSE. You are solely responsible for determining the
-  appropriateness of using or redistributing the Work and assume any
-  risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-  whether in tort (including negligence), contract, or otherwise,
-  unless required by applicable law (such as deliberate and grossly
-  negligent acts) or agreed to in writing, shall any Contributor be
-  liable to You for damages, including any direct, indirect, special,
-  incidental, or consequential damages of any character arising as a
-  result of this License or out of the use or inability to use the
-  Work (including but not limited to damages for loss of goodwill,
-  work stoppage, computer failure or malfunction, or any and all
-  other commercial damages or losses), even if such Contributor
-  has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-  the Work or Derivative Works thereof, You may choose to offer,
-  and charge a fee for, acceptance of support, warranty, indemnity,
-  or other liability obligations and/or rights consistent with this
-  License. However, in accepting such obligations, You may act only
-  on Your own behalf and on Your sole responsibility, not on behalf
-  of any other Contributor, and only if You agree to indemnify,
-  defend, and hold each Contributor harmless for any liability
-  incurred by, or claims asserted against, such Contributor by reason
-  of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: doctrine. A copy of the source code may be downloaded from https://github.com/eslint/doctrine.git. This software contains the following license and notice below:
-
-Doctrine
-Copyright jQuery Foundation and other contributors, https://jquery.org/
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: dom-closest, dom-matches. A copy of the source code may be downloaded from https://github.com/necolas/dom-closest.git (dom-closest), https://github.com/necolas/dom-matches.git (dom-matches). This software contains the following license and notice below:
-
-Copyright (c) Nicolas Gallagher
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: dom-converter, pretty-error. A copy of the source code may be downloaded from https://github.com/AriaMinaei/dom-converter (dom-converter), https://github.com/AriaMinaei/pretty-error.git (pretty-error). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Aria Minaei
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: dom-serializer. A copy of the source code may be downloaded from git://github.com/cheeriojs/dom-renderer.git. This software contains the following license and notice below:
-
-License
-
-(The MIT License)
-
-Copyright (c) 2014 The cheeriojs contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: domain-browser. A copy of the source code may be downloaded from https://github.com/bevry/domain-browser.git. This software contains the following license and notice below:
-
-<!-- LICENSEFILE/ -->
-
-<h1>License</h1>
-
-Unless stated otherwise all works are:
-
-<ul><li>Copyright &copy; 2013+ <a href="http://bevry.me">Bevry Pty Ltd</a></li></ul>
-
-and licensed under:
-
-<ul><li><a href="http://spdx.org/licenses/MIT.html">MIT License</a></li></ul>
-
-<h2>MIT License</h2>
-
-<pre>
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-</pre>
-
-<!-- /LICENSEFILE -->
-
------
-
-The following software may be included in this product: domexception. A copy of the source code may be downloaded from https://github.com/jsdom/domexception.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright © 2017 Domenic Denicola
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: dotenv. A copy of the source code may be downloaded from git://github.com/motdotla/dotenv.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Scott Motte
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: dotenv-expand. This software contains the following license and notice below:
-
-Copyright (c) 2016, Scott Motte
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: draft-js. A copy of the source code may be downloaded from https://github.com/facebook/draft-js.git. This software contains the following license and notice below:
-
-BSD License
-
-For Draft.js software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: duplexify, end-of-stream, pump, pumpify. A copy of the source code may be downloaded from git://github.com/mafintosh/duplexify (duplexify), git://github.com/mafintosh/end-of-stream.git (end-of-stream), git://github.com/mafintosh/pump.git (pump), git://github.com/mafintosh/pumpify (pumpify). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Mathias Buus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: ecc-jsbn. A copy of the source code may be downloaded from https://github.com/quartzjer/ecc-jsbn.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jeremie Miller
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: electron-to-chromium. A copy of the source code may be downloaded from https://github.com/kilian/electron-to-chromium/. This software contains the following license and notice below:
-
-Copyright 2018 Kilian Valkhof
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: emojis-list. A copy of the source code may be downloaded from git+https://github.com/kikobeats/emojis-list.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright © 2015 Kiko Beats
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: encodeurl. A copy of the source code may be downloaded from https://github.com/pillarjs/encodeurl.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2016 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: encoding. A copy of the source code may be downloaded from https://github.com/andris9/encoding.git. This software contains the following license and notice below:
-
-Copyright (c) 2012-2014 Andris Reinman
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: enquire.js. A copy of the source code may be downloaded from git://github.com/WickyNilliams/enquire.js.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2012 Nick Williams
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: error-ex, is-arrayish. A copy of the source code may be downloaded from https://github.com/qix-/node-error-ex.git (error-ex), https://github.com/qix-/node-is-arrayish.git (is-arrayish). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 JD Ballard
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: es-to-primitive, is-callable, is-date-object, is-symbol, object.getownpropertydescriptors, object.values. A copy of the source code may be downloaded from git://github.com/ljharb/es-to-primitive.git (es-to-primitive), git://github.com/ljharb/is-callable.git (is-callable), git://github.com/ljharb/is-date-object.git (is-date-object), git://github.com/ljharb/is-symbol.git (is-symbol), git://github.com/ljharb/object.getownpropertydescriptors.git (object.getownpropertydescriptors), git://github.com/es-shims/Object.values.git (object.values). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: escape-html. A copy of the source code may be downloaded from https://github.com/component/escape-html.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2013 TJ Holowaychuk
-Copyright (c) 2015 Andreas Lubbe
-Copyright (c) 2015 Tiancheng "Timothy" Gu
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: escodegen, estraverse, esutils. A copy of the source code may be downloaded from http://github.com/estools/escodegen.git (escodegen), http://github.com/estools/estraverse.git (estraverse), http://github.com/estools/esutils.git (esutils). This software contains the following license and notice below:
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: eslint. A copy of the source code may be downloaded from https://github.com/eslint/eslint.git. This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors, https://js.foundation
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-loader. A copy of the source code may be downloaded from https://github.com/webpack-contrib/eslint-loader.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Maxime Thirouin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-plugin-flowtype. A copy of the source code may be downloaded from https://github.com/gajus/eslint-plugin-flowtype. This software contains the following license and notice below:
-
-Copyright (c) 2015, Gajus Kuizinas (http://gajus.com/)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the Gajus Kuizinas (http://gajus.com/) nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: eslint-plugin-import. A copy of the source code may be downloaded from https://github.com/benmosher/eslint-plugin-import. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Ben Mosher
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-plugin-jsx-a11y, jsx-ast-utils. A copy of the source code may be downloaded from https://github.com/evcohen/eslint-plugin-jsx-a11y (eslint-plugin-jsx-a11y), https://github.com/evcohen/jsx-ast-utils (jsx-ast-utils). This software contains the following license and notice below:
-
-The MIT License (MIT)
-Copyright (c) 2016 Ethan Cohen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-plugin-react. A copy of the source code may be downloaded from https://github.com/yannickcr/eslint-plugin-react. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Yannick Croissant
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-plugin-react-hooks, react, react-dom, react-is, scheduler. A copy of the source code may be downloaded from https://github.com/facebook/react.git (eslint-plugin-react-hooks), https://github.com/facebook/react.git (react), https://github.com/facebook/react.git (react-dom), https://github.com/facebook/react.git (react-is), https://github.com/facebook/react.git (scheduler). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) Facebook, Inc. and its affiliates.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-scope. A copy of the source code may be downloaded from https://github.com/eslint/eslint-scope.git. This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors, https://js.foundation
-Copyright (C) 2012-2013 Yusuke Suzuki (twitter: @Constellation) and other contributors.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: eslint-scope. A copy of the source code may be downloaded from https://github.com/eslint/eslint-scope.git. This software contains the following license and notice below:
-
-eslint-scope
-Copyright JS Foundation and other contributors, https://js.foundation
-Copyright (C) 2012-2013 Yusuke Suzuki (twitter: @Constellation) and other contributors.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: eslint-utils, regexpp. A copy of the source code may be downloaded from git+https://github.com/mysticatea/eslint-utils.git (eslint-utils), git+https://github.com/mysticatea/regexpp.git (regexpp). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Toru Nagashima
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eslint-visitor-keys. A copy of the source code may be downloaded from https://github.com/eslint/eslint-visitor-keys.git. This software contains the following license and notice below:
-
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright contributors
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
------
-
-The following software may be included in this product: espree. A copy of the source code may be downloaded from https://github.com/eslint/espree.git. This software contains the following license and notice below:
-
-Espree
-Copyright JS Foundation and other contributors, https://js.foundation
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: esprima. A copy of the source code may be downloaded from https://github.com/jquery/esprima.git. This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors, https://js.foundation/
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: esquery. A copy of the source code may be downloaded from https://github.com/jrfeenst/esquery.git. This software contains the following license and notice below:
-
-Copyright (c) 2013, Joel Feenstra
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the ESQuery nor the names of its contributors may
-      be used to endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL JOEL FEENSTRA BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: etag, proxy-addr. A copy of the source code may be downloaded from https://github.com/jshttp/etag.git (etag), https://github.com/jshttp/proxy-addr.git (proxy-addr). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014-2016 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: eventemitter3. A copy of the source code may be downloaded from git://github.com/primus/eventemitter3.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Arnout Kazemier
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: eventlistener. A copy of the source code may be downloaded from git@github.com:finn-no/eventlistener.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013 FINN.no AS
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: events. A copy of the source code may be downloaded from git://github.com/Gozala/events.git. This software contains the following license and notice below:
-
-MIT
-
-Copyright Joyent, Inc. and other Node contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the
-following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: eventsource. A copy of the source code may be downloaded from git://github.com/EventSource/eventsource.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) EventSource GitHub organisation
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: exec-sh. A copy of the source code may be downloaded from git@github.com:tsertkov/exec-sh.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Aleksandr Tsertkov <tsertkov@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: express. A copy of the source code may be downloaded from https://github.com/expressjs/express.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2009-2014 TJ Holowaychuk <tj@vision-media.ca>
-Copyright (c) 2013-2014 Roman Shtylman <shtylman+expressjs@gmail.com>
-Copyright (c) 2014-2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: extend. A copy of the source code may be downloaded from https://github.com/justmoon/node-extend.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Stefan Thomas
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: extend-shallow, mixin-deep. A copy of the source code may be downloaded from https://github.com/jonschlinkert/extend-shallow.git (extend-shallow), https://github.com/jonschlinkert/mixin-deep.git (mixin-deep). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2015, 2017, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: external-editor. A copy of the source code may be downloaded from git+https://github.com/mrkmg/node-external-editor.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Kevin Gravier
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: extsprintf, jsprim. A copy of the source code may be downloaded from git://github.com/davepacheco/node-extsprintf.git (extsprintf), git://github.com/joyent/node-jsprim.git (jsprim). This software contains the following license and notice below:
-
-Copyright (c) 2012, Joyent, Inc. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE
-
------
-
-The following software may be included in this product: fast-glob. A copy of the source code may be downloaded from https://github.com/mrmlnc/fast-glob.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Denis Malinochkin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: fast-levenshtein. A copy of the source code may be downloaded from https://github.com/hiddentao/fast-levenshtein.git. This software contains the following license and notice below:
-
-(MIT License)
-
-Copyright (c) 2013 [Ramesh Nair](http://www.hiddentao.com/)
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: fbjs. A copy of the source code may be downloaded from https://github.com/facebook/fbjs.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2013-present, Facebook, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: file-entry-cache, flat-cache. A copy of the source code may be downloaded from https://github.com/royriojas/file-entry-cache.git (file-entry-cache), https://github.com/royriojas/flat-cache.git (flat-cache). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Roy Riojas
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: filesize. A copy of the source code may be downloaded from git://github.com/avoidwork/filesize.js.git. This software contains the following license and notice below:
-
-Copyright (c) 2018, Jason Mulligan
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* Neither the name of filesize nor the names of its
-  contributors may be used to endorse or promote products derived from
-  this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: finalhandler. A copy of the source code may be downloaded from https://github.com/pillarjs/finalhandler.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014-2017 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: find-cache-dir. A copy of the source code may be downloaded from https://github.com/avajs/find-cache-dir.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) James Talmage <james@talmage.io> (github.com/jamestalmage)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: flatted. A copy of the source code may be downloaded from git+https://github.com/WebReflection/flatted.git. This software contains the following license and notice below:
-
-ISC License
-
-Copyright (c) 2018, Andrea Giammarchi, @WebReflection
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: flatten. A copy of the source code may be downloaded from git://github.com/jesusabdullah/node-flatten.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Joshua Holbrook
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: flush-write-stream, multicast-dns, multicast-dns-service-types, stream-each. A copy of the source code may be downloaded from https://github.com/mafintosh/flush-write-stream.git (flush-write-stream), https://github.com/mafintosh/multicast-dns.git (multicast-dns), https://github.com/mafintosh/multicast-dns-service-types.git (multicast-dns-service-types), https://github.com/mafintosh/stream-each.git (stream-each). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Mathias Buus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: follow-redirects. A copy of the source code may be downloaded from git@github.com:follow-redirects/follow-redirects.git. This software contains the following license and notice below:
-
-Copyright 2014–present Olivier Lalonde <olalonde@gmail.com>, James Talmage <james@talmage.io>, Ruben Verborgh
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: for-own. A copy of the source code may be downloaded from https://github.com/jonschlinkert/for-own.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2015, 2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: fork-ts-checker-webpack-plugin. A copy of the source code may be downloaded from https://github.com/Realytics/fork-ts-checker-webpack-plugin.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Realytics
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: form-data. A copy of the source code may be downloaded from git://github.com/form-data/form-data.git. This software contains the following license and notice below:
-
-Copyright (c) 2012 Felix Geisendörfer (felix@debuggable.com) and contributors
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-
------
-
-The following software may be included in this product: fragment-cache, posix-character-classes. A copy of the source code may be downloaded from https://github.com/jonschlinkert/fragment-cache.git (fragment-cache), https://github.com/jonschlinkert/posix-character-classes.git (posix-character-classes). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016-2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: fresh. A copy of the source code may be downloaded from https://github.com/jshttp/fresh.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012 TJ Holowaychuk <tj@vision-media.ca>
-Copyright (c) 2016-2017 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: from2. A copy of the source code may be downloaded from git://github.com/hughsk/from2. This software contains the following license and notice below:
-
-## The MIT License (MIT) ##
-
-Copyright (c) 2014 Hugh Kennedy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: fs-extra. A copy of the source code may be downloaded from https://github.com/jprichardson/node-fs-extra. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2011-2017 JP Richardson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
-(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
- merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
-OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: fs.realpath. A copy of the source code may be downloaded from git+https://github.com/isaacs/fs.realpath.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-----
-
-This library bundles a version of the `fs.realpath` and `fs.realpathSync`
-methods from Node.js v0.10 under the terms of the Node.js MIT license.
-
-Node's license follows, also included at the header of `old.js` which contains
-the licensed code:
-
-  Copyright Joyent, Inc. and other Node contributors.
-
-  Permission is hereby granted, free of charge, to any person obtaining a
-  copy of this software and associated documentation files (the "Software"),
-  to deal in the Software without restriction, including without limitation
-  the rights to use, copy, modify, merge, publish, distribute, sublicense,
-  and/or sell copies of the Software, and to permit persons to whom the
-  Software is furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-  DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: fsevents. A copy of the source code may be downloaded from https://github.com/fsevents/fsevents.git. This software contains the following license and notice below:
-
-MIT License
------------
-
-Copyright (C) 2010-2019 by Philipp Dunkel, Ben Noordhuis, Elan Shankar
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: fsevents. A copy of the source code may be downloaded from https://github.com/strongloop/fsevents.git. This software contains the following license and notice below:
-
-MIT License
------------
-
-Copyright (C) 2010-2014 Philipp Dunkel
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: function-bind. A copy of the source code may be downloaded from git://github.com/Raynos/function-bind.git. This software contains the following license and notice below:
-
-Copyright (c) 2013 Raynos.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: functional-red-black-tree, uniq. A copy of the source code may be downloaded from git://github.com/mikolalysenko/functional-red-black-tree.git (functional-red-black-tree), git://github.com/mikolalysenko/uniq.git (uniq). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Mikola Lysenko
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: get-caller-file. A copy of the source code may be downloaded from git+https://github.com/stefanpenner/get-caller-file.git. This software contains the following license and notice below:
-
-ISC License (ISC)
-Copyright 2018 Stefan Penner
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: getpass, http-signature, sshpk. A copy of the source code may be downloaded from https://github.com/arekinath/node-getpass.git (getpass), git://github.com/joyent/node-http-signature.git (http-signature), git+https://github.com/joyent/node-sshpk.git (sshpk). This software contains the following license and notice below:
-
-Copyright Joyent, Inc. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: glob-parent. A copy of the source code may be downloaded from https://github.com/es128/glob-parent. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) 2015 Elan Shanker
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: global-modules, global-prefix, repeat-element, use. A copy of the source code may be downloaded from https://github.com/jonschlinkert/global-modules.git (global-modules), https://github.com/jonschlinkert/global-prefix.git (global-prefix), https://github.com/jonschlinkert/repeat-element.git (repeat-element), https://github.com/jonschlinkert/use.git (use). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-present, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: graceful-fs. A copy of the source code may be downloaded from https://github.com/isaacs/node-graceful-fs. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter, Ben Noordhuis, and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: hammerjs. A copy of the source code may be downloaded from git://github.com/hammerjs/hammer.js.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (C) 2011-2014 by Jorik Tangelder (Eight Media)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: handlebars. A copy of the source code may be downloaded from https://github.com/wycats/handlebars.js.git. This software contains the following license and notice below:
-
-Copyright (C) 2011-2017 by Yehuda Katz
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: har-schema. A copy of the source code may be downloaded from https://github.com/ahmadnassri/har-schema.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Ahmad Nassri <ahmad@ahmadnassri.com>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: har-validator. A copy of the source code may be downloaded from https://github.com/ahmadnassri/node-har-validator.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Ahmad Nassri <ahmad@ahmadnassri.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: has-symbols. A copy of the source code may be downloaded from git://github.com/ljharb/has-symbols.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: has-unicode. A copy of the source code may be downloaded from https://github.com/iarna/has-unicode. This software contains the following license and notice below:
-
-Copyright (c) 2014, Rebecca Turner <me@re-becca.org>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: hash-base, md5.js. A copy of the source code may be downloaded from https://github.com/crypto-browserify/hash-base.git (hash-base), https://github.com/crypto-browserify/md5.js.git (md5.js). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Kirill Fomichev
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: hex-color-regex. A copy of the source code may be downloaded from https://github.com/regexps/hex-color-regex.git. This software contains the following license and notice below:
-
-# The MIT License
-
-Copyright (c) 2015 [Charlike Make Reagent](http://j.mp/1stW47C)
-
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-> SOFTWARE.
-
------
-
-The following software may be included in this product: history, react-router, react-router-dom. A copy of the source code may be downloaded from https://github.com/ReactTraining/history.git (history), https://github.com/ReactTraining/react-router.git (react-router), https://github.com/ReactTraining/react-router.git (react-router-dom). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) React Training 2016-2018
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: hoek. A copy of the source code may be downloaded from git://github.com/hapijs/hoek. This software contains the following license and notice below:
-
-Copyright (c) 2011-2018, Project contributors
-Copyright (c) 2011-2014, Walmart
-Copyright (c) 2011, Yahoo Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * The names of any contributors may not be used to endorse or promote
-      products derived from this software without specific prior written
-      permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: hoist-non-react-statics. A copy of the source code may be downloaded from git://github.com/mridgway/hoist-non-react-statics.git. This software contains the following license and notice below:
-
-Software License Agreement (BSD License)
-========================================
-
-Copyright (c) 2015, Yahoo! Inc. All rights reserved.
-----------------------------------------------------
-
-Redistribution and use of this software in source and binary forms, with or
-without modification, are permitted provided that the following conditions are
-met:
-
-  * Redistributions of source code must retain the above copyright notice, this
-    list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright notice,
-    this list of conditions and the following disclaimer in the documentation
-    and/or other materials provided with the distribution.
-  * Neither the name of Yahoo! Inc. nor the names of YUI's contributors may be
-    used to endorse or promote products derived from this software without
-    specific prior written permission of Yahoo! Inc.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: hosted-git-info. A copy of the source code may be downloaded from git+https://github.com/npm/hosted-git-info.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Rebecca Turner
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: hsl-regex, hsla-regex, rgb-regex, rgba-regex. A copy of the source code may be downloaded from https://github.com/regexps/hsl-regex.git (hsl-regex), https://github.com/regexps/hsla-regex.git (hsla-regex), https://github.com/regexps/rgb-regex.git (rgb-regex), https://github.com/johnotander/rgba-regex.git (rgba-regex). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 John Otander
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: html-comment-regex. A copy of the source code may be downloaded from https://github.com/stevemao/html-comment-regex.git. This software contains the following license and notice below:
-
-Copyright 2018 Steve Mao
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: html-encoding-sniffer. A copy of the source code may be downloaded from https://github.com/jsdom/html-encoding-sniffer.git. This software contains the following license and notice below:
-
-Copyright © 2016 Domenic Denicola <d@domenic.me>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: html-entities. A copy of the source code may be downloaded from https://github.com/mdevils/node-html-entities.git. This software contains the following license and notice below:
-
-Copyright (c) 2013 Dulin Marat
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: html-minifier. A copy of the source code may be downloaded from git+https://github.com/kangax/html-minifier.git. This software contains the following license and notice below:
-
-Copyright (c) 2010-2018 Juriy "kangax" Zaytsev
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: htmlparser2. A copy of the source code may be downloaded from git://github.com/fb55/htmlparser2.git. This software contains the following license and notice below:
-
-Copyright 2010, 2011, Chris Winberry <chris@winberry.net>. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: http-errors. A copy of the source code may be downloaded from https://github.com/jshttp/http-errors.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jonathan Ong me@jongleberry.com
-Copyright (c) 2016 Douglas Christopher Wilson doug@somethingdoug.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: http-parser-js. A copy of the source code may be downloaded from git://github.com/creationix/http-parser-js.git. This software contains the following license and notice below:
-
-Copyright (c) 2015 Tim Caswell (https://github.com/creationix) and other
-contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-Some files from the tests folder are from joyent/node and mscedex/io.js, a fork
-of nodejs/io.js:
-
-- tests/iojs/test-http-parser-durability.js
-
-  This file is from https://github.com/mscdex/io.js/blob/js-http-parser/test/pummel/test-http-parser-durability.js
-  with modifications by Jan Schär (jscissr).
-
-  """
-  Copyright io.js contributors. All rights reserved.
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to
-  deal in the Software without restriction, including without limitation the
-  rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-  sell copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-  IN THE SOFTWARE.
-  """
-
-- tests/fixtures/*
-  tests/parallel/*
-  tests/testpy/*
-  tests/common.js
-  tests/test.py
-  tests/utils.py
-
-  These files are from https://github.com/nodejs/node with changes by
-  Jan Schär (jscissr).
-
-  Node.js is licensed for use as follows:
-
-  """
-  Copyright Node.js contributors. All rights reserved.
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to
-  deal in the Software without restriction, including without limitation the
-  rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-  sell copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-  IN THE SOFTWARE.
-  """
-
-  This license applies to parts of Node.js originating from the
-  https://github.com/joyent/node repository:
-
-  """
-  Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to
-  deal in the Software without restriction, including without limitation the
-  rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-  sell copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-  IN THE SOFTWARE.
-  """
-
------
-
-The following software may be included in this product: http-proxy. A copy of the source code may be downloaded from https://github.com/nodejitsu/node-http-proxy.git. This software contains the following license and notice below:
-
-node-http-proxy
-
-  Copyright (c) 2010-2016 Charlie Robbins, Jarrett Cruger & the Contributors.
-
-  Permission is hereby granted, free of charge, to any person obtaining
-  a copy of this software and associated documentation files (the
-  "Software"), to deal in the Software without restriction, including
-  without limitation the rights to use, copy, modify, merge, publish,
-  distribute, sublicense, and/or sell copies of the Software, and to
-  permit persons to whom the Software is furnished to do so, subject to
-  the following conditions:
-
-  The above copyright notice and this permission notice shall be
-  included in all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: http-proxy-middleware. A copy of the source code may be downloaded from https://github.com/chimurai/http-proxy-middleware.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Steven Chim
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: https-browserify, stream-browserify. A copy of the source code may be downloaded from git://github.com/substack/https-browserify.git (https-browserify), git://github.com/browserify/stream-browserify.git (stream-browserify). This software contains the following license and notice below:
-
-This software is released under the MIT license:
-
-Copyright (c) James Halliday
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: iconv-lite. A copy of the source code may be downloaded from git://github.com/ashtuchkin/iconv-lite.git. This software contains the following license and notice below:
-
-Copyright (c) 2011 Alexander Shtuchkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: icss-utils. A copy of the source code may be downloaded from git+https://github.com/css-modules/icss-utils.git. This software contains the following license and notice below:
-
-ISC License (ISC)
-Copyright 2018 Glen Maddern
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: identity-obj-proxy. A copy of the source code may be downloaded from git+https://github.com/keyanzhang/identity-obj-proxy.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Keyan Zhang
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: ieee754. A copy of the source code may be downloaded from git://github.com/feross/ieee754.git. This software contains the following license and notice below:
-
-Copyright 2008 Fair Oaks Labs, Inc.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: iferr. A copy of the source code may be downloaded from https://github.com/shesek/iferr. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Nadav Ivgi
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: image-size. A copy of the source code may be downloaded from https://github.com/image-size/image-size.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright © 2017 Aditya Yadav, http://netroy.in
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: immer. A copy of the source code may be downloaded from https://github.com/mweststrate/immer.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Michel Weststrate
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: immutable. A copy of the source code may be downloaded from git://github.com/facebook/immutable-js.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014-present, Facebook, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: immutable. A copy of the source code may be downloaded from git://github.com/facebook/immutable-js.git. This software contains the following license and notice below:
-
-BSD License
-
-For Immutable JS software
-
-Copyright (c) 2014-2015, Facebook, Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: indexes-of. A copy of the source code may be downloaded from git://github.com/dominictarr/indexes-of.git. This software contains the following license and notice below:
-
-Copyright (c) 2013 Dominic Tarr
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: inflight. A copy of the source code may be downloaded from https://github.com/npm/inflight.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: inherits. A copy of the source code may be downloaded from git://github.com/isaacs/inherits. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: inquirer. A copy of the source code may be downloaded from https://github.com/SBoudrias/Inquirer.js.git. This software contains the following license and notice below:
-
-Copyright (c) 2012 Simon Boudrias
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: ipaddr.js. A copy of the source code may be downloaded from git://github.com/whitequark/ipaddr.js. This software contains the following license and notice below:
-
-Copyright (C) 2011-2017 whitequark <whitequark@whitequark.org>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: is-buffer, safe-buffer. A copy of the source code may be downloaded from git://github.com/feross/is-buffer.git (is-buffer), git://github.com/feross/safe-buffer.git (safe-buffer). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Feross Aboukhadijeh
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: is-color-stop. A copy of the source code may be downloaded from git+https://github.com/pigcan/is-color-stop.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 pigcan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: is-descriptor, is-extendable, shallow-clone. A copy of the source code may be downloaded from https://github.com/jonschlinkert/is-descriptor.git (is-descriptor), https://github.com/jonschlinkert/is-extendable.git (is-extendable), https://github.com/jonschlinkert/shallow-clone.git (shallow-clone). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2017, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: is-regex. A copy of the source code may be downloaded from git://github.com/ljharb/is-regex.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: is-resolvable. A copy of the source code may be downloaded from https://github.com/shinnn/is-resolvable.git. This software contains the following license and notice below:
-
-ISC License (ISC)
-Copyright 2018 Shinnosuke Watanabe
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: isemail. A copy of the source code may be downloaded from git://github.com/hapijs/isemail. This software contains the following license and notice below:
-
-Copyright (c) 2014-2015, Eli Skeggs and Project contributors
-Copyright (c) 2013-2014, GlobeSherpa
-Copyright (c) 2008-2011, Dominic Sayers
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * The names of any contributors may not be used to endorse or promote
-      products derived from this software without specific prior written
-      permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-                                  *   *   *
-
-The complete list of contributors can be found at: https://github.com/hapijs/isemail/graphs/contributors
-Previously published under the 2-Clause-BSD license published here: https://github.com/hapijs/isemail/blob/v1.2.0/LICENSE
-
------
-
-The following software may be included in this product: isomorphic-fetch. A copy of the source code may be downloaded from https://github.com/matthew-andrews/isomorphic-fetch.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Matt Andrews
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: isstream. A copy of the source code may be downloaded from https://github.com/rvagg/isstream.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-=====================
-
-Copyright (c) 2015 Rod Vagg
----------------------------
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: istanbul-api, istanbul-lib-source-maps. A copy of the source code may be downloaded from git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-api), git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-lib-source-maps). This software contains the following license and notice below:
-
-Copyright 2015 Yahoo! Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the Yahoo! Inc. nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: istanbul-lib-coverage, istanbul-lib-hook, istanbul-lib-instrument, istanbul-lib-report, istanbul-reports. A copy of the source code may be downloaded from git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-coverage), git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-lib-hook), git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-instrument), git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-report), git@github.com:istanbuljs/istanbuljs (istanbul-reports). This software contains the following license and notice below:
-
-Copyright 2012-2015 Yahoo! Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the Yahoo! Inc. nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: jest-environment-jsdom-fourteen. A copy of the source code may be downloaded from https://github.com/ianschmitz/jest-environment-jsdom-fourteen. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2019 Ian Schmitz
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: jest-watch-typeahead. A copy of the source code may be downloaded from https://github.com/jest-community/jest-watch-typeahead.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Rogelio Guzman
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: joi, topo. A copy of the source code may be downloaded from git://github.com/hapijs/joi (joi), git://github.com/hapijs/topo (topo). This software contains the following license and notice below:
-
-Copyright (c) 2012-2018, Project contributors
-Copyright (c) 2012-2014, Walmart
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * The names of any contributors may not be used to endorse or promote
-      products derived from this software without specific prior written
-      permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: js-levenshtein. A copy of the source code may be downloaded from https://github.com/gustf/js-levenshtein.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Gustaf Andersson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: js-tokens. A copy of the source code may be downloaded from https://github.com/lydell/js-tokens.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014, 2015, 2016, 2017, 2018 Simon Lydell
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: js-tokens, source-map-resolve. A copy of the source code may be downloaded from https://github.com/lydell/js-tokens.git (js-tokens), https://github.com/lydell/source-map-resolve.git (source-map-resolve). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014, 2015, 2016, 2017 Simon Lydell
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: js-yaml. A copy of the source code may be downloaded from https://github.com/nodeca/js-yaml.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (C) 2011-2015 by Vitaly Puzrin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: jsbn. A copy of the source code may be downloaded from https://github.com/andyperlitch/jsbn.git. This software contains the following license and notice below:
-
-Licensing
----------
-
-This software is covered under the following copyright:
-
-/*
- * Copyright (c) 2003-2005  Tom Wu
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- *
- * IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
- * INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER
- * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF
- * THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * In addition, the following condition applies:
- *
- * All redistributions must retain an intact copy of this copyright notice
- * and disclaimer.
- */
-
-Address all questions regarding this license to:
-
-  Tom Wu
-  tjw@cs.Stanford.EDU
-
------
-
-The following software may be included in this product: jsdom. A copy of the source code may be downloaded from https://github.com/jsdom/jsdom.git. This software contains the following license and notice below:
-
-Copyright (c) 2010 Elijah Insua
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: json-parse-better-errors. A copy of the source code may be downloaded from https://github.com/zkat/json-parse-better-errors. This software contains the following license and notice below:
-
-Copyright 2017 Kat Marchán
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: json2mq, react-slick. A copy of the source code may be downloaded from https://github.com/akiran/json2mq (json2mq), https://github.com/akiran/react-slick (react-slick). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Kiran Abburi
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: json3. A copy of the source code may be downloaded from git://github.com/bestiejs/json3.git. This software contains the following license and notice below:
-
-Copyright (c) 2012-2014 Kit Cambridge.
-http://kitcambridge.be/
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: json5. A copy of the source code may be downloaded from git+https://github.com/json5/json5.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2012-2018 Aseem Kishore, and [others].
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-[others]: https://github.com/json5/json5/contributors
-
------
-
-The following software may be included in this product: jsonfile. A copy of the source code may be downloaded from git@github.com:jprichardson/node-jsonfile.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2015, JP Richardson <jprichardson@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
-(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
- merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
-OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: killable. A copy of the source code may be downloaded from https://github.com/marten-de-vries/killable.git. This software contains the following license and notice below:
-
-Copyright 2014 Marten de Vries
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: kind-of. A copy of the source code may be downloaded from https://github.com/jonschlinkert/kind-of.git. This software contains the following license and notice below:
-
-Copyright (c) 2014-2015, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: kleur. A copy of the source code may be downloaded from https://github.com/lukeed/kleur.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Luke Edwards <luke.edwards05@gmail.com> (lukeed.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: last-call-webpack-plugin, optimize-css-assets-webpack-plugin. A copy of the source code may be downloaded from http://github.com/NMFR/last-call-webpack-plugin.git (last-call-webpack-plugin), http://github.com/NMFR/optimize-css-assets-webpack-plugin.git (optimize-css-assets-webpack-plugin). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Nuno Rodrigues
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: lazy-cache, map-cache, snapdragon, to-object-path. A copy of the source code may be downloaded from https://github.com/jonschlinkert/lazy-cache.git (lazy-cache), https://github.com/jonschlinkert/map-cache.git (map-cache), https://github.com/jonschlinkert/snapdragon.git (snapdragon), https://github.com/jonschlinkert/to-object-path.git (to-object-path). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2016, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: less. A copy of the source code may be downloaded from https://github.com/less/less.js.git. This software contains the following license and notice below:
-
-Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: levn, optionator, prelude-ls, type-check. A copy of the source code may be downloaded from git://github.com/gkz/levn.git (levn), git://github.com/gkz/optionator.git (optionator), git://github.com/gkz/prelude-ls.git (prelude-ls), git://github.com/gkz/type-check.git (type-check). This software contains the following license and notice below:
-
-Copyright (c) George Zahariev
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: loader-fs-cache. This software contains the following license and notice below:
-
-Copyright (c) 2014-2016 Ade Viankakrisna Fadlil <viankakrisna@gmail.com>
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: loader-runner, tapable. A copy of the source code may be downloaded from git+https://github.com/webpack/loader-runner.git (loader-runner), http://github.com/webpack/tapable.git (tapable). This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) Tobias Koppers @sokra
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: lodash. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git. This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors <https://js.foundation/>
-
-Based on Underscore.js, copyright Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-This software consists of voluntary contributions made by many
-individuals. For exact contribution history, see the revision history
-available at https://github.com/lodash/lodash
-
-The following license applies to all parts of this software except as
-documented below:
-
-====
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-====
-
-Copyright and related rights for sample code are waived via CC0. Sample
-code is defined as all source code displayed within the prose of the
-documentation.
-
-CC0: http://creativecommons.org/publicdomain/zero/1.0/
-
-====
-
-Files located in the node_modules and vendor directories are externally
-maintained libraries used by this software which have their own
-licenses; we recommend you read them, as their terms may differ from the
-terms above.
-
------
-
-The following software may be included in this product: lodash._getnative, lodash.isarray, lodash.keys. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git (lodash._getnative), https://github.com/lodash/lodash.git (lodash.isarray), https://github.com/lodash/lodash.git (lodash.keys). This software contains the following license and notice below:
-
-Copyright 2012-2015 The Dojo Foundation <http://dojofoundation.org/>
-Based on Underscore.js, copyright 2009-2015 Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: lodash._reinterpolate. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git. This software contains the following license and notice below:
-
-Copyright 2012-2015 The Dojo Foundation <http://dojofoundation.org/>
-Based on Underscore.js 1.7.0, copyright 2009-2015 Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: lodash.debounce, lodash.flow, lodash.isarguments, lodash.memoize, lodash.sortby, lodash.tail, lodash.template, lodash.templatesettings, lodash.throttle, lodash.unescape, lodash.uniq. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git (lodash.debounce), https://github.com/lodash/lodash.git (lodash.flow), https://github.com/lodash/lodash.git (lodash.isarguments), https://github.com/lodash/lodash.git (lodash.memoize), https://github.com/lodash/lodash.git (lodash.sortby), https://github.com/lodash/lodash.git (lodash.tail), https://github.com/lodash/lodash.git (lodash.template), https://github.com/lodash/lodash.git (lodash.templatesettings), https://github.com/lodash/lodash.git (lodash.throttle), https://github.com/lodash/lodash.git (lodash.unescape), https://github.com/lodash/lodash.git (lodash.uniq). This software contains the following license and notice below:
-
-Copyright jQuery Foundation and other contributors <https://jquery.org/>
-
-Based on Underscore.js, copyright Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-This software consists of voluntary contributions made by many
-individuals. For exact contribution history, see the revision history
-available at https://github.com/lodash/lodash
-
-The following license applies to all parts of this software except as
-documented below:
-
-====
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-====
-
-Copyright and related rights for sample code are waived via CC0. Sample
-code is defined as all source code displayed within the prose of the
-documentation.
-
-CC0: http://creativecommons.org/publicdomain/zero/1.0/
-
-====
-
-Files located in the node_modules and vendor directories are externally
-maintained libraries used by this software which have their own
-licenses; we recommend you read them, as their terms may differ from the
-terms above.
-
------
-
-The following software may be included in this product: loose-envify. A copy of the source code may be downloaded from git://github.com/zertosh/loose-envify.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Andres Suarez <zertosh@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: makeerror, tmpl. A copy of the source code may be downloaded from https://github.com/daaku/nodejs-makeerror (makeerror), https://github.com/daaku/nodejs-tmpl (tmpl). This software contains the following license and notice below:
-
-BSD License
-
-Copyright (c) 2014, Naitik Shah. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Naitik Shah nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: map-age-cleaner. A copy of the source code may be downloaded from https://github.com/SamVerschueren/map-age-cleaner.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) Sam Verschueren <sam.verschueren@gmail.com> (github.com/SamVerschueren)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: map-visit, to-regex-range, union-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/map-visit.git (map-visit), https://github.com/micromatch/to-regex-range.git (to-regex-range), https://github.com/jonschlinkert/union-value.git (union-value). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: mdn-data. A copy of the source code may be downloaded from https://github.com/mdn/data.git. This software contains the following license and notice below:
-
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
-    means each individual or legal entity that creates, contributes to
-    the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-    means the combination of the Contributions of others (if any) used
-    by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-    means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-    means Source Code Form to which the initial Contributor has attached
-    the notice in Exhibit A, the Executable Form of such Source Code
-    Form, and Modifications of such Source Code Form, in each case
-    including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
-    means
-
-    (a) that the initial Contributor has attached the notice described
-        in Exhibit B to the Covered Software; or
-
-    (b) that the Covered Software was made available under the terms of
-        version 1.1 or earlier of the License, but not also under the
-        terms of a Secondary License.
-
-1.6. "Executable Form"
-    means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-    means a work that combines Covered Software with other material, in
-    a separate file or files, that is not Covered Software.
-
-1.8. "License"
-    means this document.
-
-1.9. "Licensable"
-    means having the right to grant, to the maximum extent possible,
-    whether at the time of the initial grant or subsequently, any and
-    all of the rights conveyed by this License.
-
-1.10. "Modifications"
-    means any of the following:
-
-    (a) any file in Source Code Form that results from an addition to,
-        deletion from, or modification of the contents of Covered
-        Software; or
-
-    (b) any new file in Source Code Form that contains any Covered
-        Software.
-
-1.11. "Patent Claims" of a Contributor
-    means any patent claim(s), including without limitation, method,
-    process, and apparatus claims, in any patent Licensable by such
-    Contributor that would be infringed, but for the grant of the
-    License, by the making, using, selling, offering for sale, having
-    made, import, or transfer of either its Contributions or its
-    Contributor Version.
-
-1.12. "Secondary License"
-    means either the GNU General Public License, Version 2.0, the GNU
-    Lesser General Public License, Version 2.1, the GNU Affero General
-    Public License, Version 3.0, or any later versions of those
-    licenses.
-
-1.13. "Source Code Form"
-    means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-    means an individual or a legal entity exercising rights under this
-    License. For legal entities, "You" includes any entity that
-    controls, is controlled by, or is under common control with You. For
-    purposes of this definition, "control" means (a) the power, direct
-    or indirect, to cause the direction or management of such entity,
-    whether by contract or otherwise, or (b) ownership of more than
-    fifty percent (50%) of the outstanding shares or beneficial
-    ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
-    Licensable by such Contributor to use, reproduce, make available,
-    modify, display, perform, distribute, and otherwise exploit its
-    Contributions, either on an unmodified basis, with Modifications, or
-    as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
-    for sale, have made, import, and otherwise transfer either its
-    Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
-    or
-
-(b) for infringements caused by: (i) Your and any other third party's
-    modifications of Covered Software, or (ii) the combination of its
-    Contributions with other software (except as part of its Contributor
-    Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
-    its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
-    Form, as described in Section 3.1, and You must inform recipients of
-    the Executable Form how they can obtain a copy of such Source Code
-    Form by reasonable means in a timely manner, at a charge no more
-    than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
-    License, or sublicense it under different terms, provided that the
-    license for the Executable Form does not attempt to limit or alter
-    the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-*                                                                      *
-*  6. Disclaimer of Warranty                                           *
-*  -------------------------                                           *
-*                                                                      *
-*  Covered Software is provided under this License on an "as is"       *
-*  basis, without warranty of any kind, either expressed, implied, or  *
-*  statutory, including, without limitation, warranties that the       *
-*  Covered Software is free of defects, merchantable, fit for a        *
-*  particular purpose or non-infringing. The entire risk as to the     *
-*  quality and performance of the Covered Software is with You.        *
-*  Should any Covered Software prove defective in any respect, You     *
-*  (not any Contributor) assume the cost of any necessary servicing,   *
-*  repair, or correction. This disclaimer of warranty constitutes an   *
-*  essential part of this License. No use of any Covered Software is   *
-*  authorized under this License except under this disclaimer.         *
-*                                                                      *
-************************************************************************
-
-************************************************************************
-*                                                                      *
-*  7. Limitation of Liability                                          *
-*  --------------------------                                          *
-*                                                                      *
-*  Under no circumstances and under no legal theory, whether tort      *
-*  (including negligence), contract, or otherwise, shall any           *
-*  Contributor, or anyone who distributes Covered Software as          *
-*  permitted above, be liable to You for any direct, indirect,         *
-*  special, incidental, or consequential damages of any character      *
-*  including, without limitation, damages for lost profits, loss of    *
-*  goodwill, work stoppage, computer failure or malfunction, or any    *
-*  and all other commercial damages or losses, even if such party      *
-*  shall have been informed of the possibility of such damages. This   *
-*  limitation of liability shall not apply to liability for death or   *
-*  personal injury resulting from such party's negligence to the       *
-*  extent applicable law prohibits such limitation. Some               *
-*  jurisdictions do not allow the exclusion or limitation of           *
-*  incidental or consequential damages, so this exclusion and          *
-*  limitation may not apply to You.                                    *
-*                                                                      *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
-  This Source Code Form is subject to the terms of the Mozilla Public
-  License, v. 2.0. If a copy of the MPL was not distributed with this
-  file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
-  This Source Code Form is "Incompatible With Secondary Licenses", as
-  defined by the Mozilla Public License, v. 2.0.
-
------
-
-The following software may be included in this product: merge-deep. A copy of the source code may be downloaded from https://github.com/jonschlinkert/merge-deep.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-present, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: merge-descriptors. A copy of the source code may be downloaded from https://github.com/component/merge-descriptors.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: merge-stream. A copy of the source code may be downloaded from https://github.com/grncdr/merge-stream.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Stephen Sugden <me@stephensugden.com> (stephensugden.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: merge2. A copy of the source code may be downloaded from git@github.com:teambition/merge2.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-2018 Teambition
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: methods. A copy of the source code may be downloaded from https://github.com/jshttp/methods.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013-2014 TJ Holowaychuk <tj@vision-media.ca>
-Copyright (c) 2015-2016 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: mime. A copy of the source code may be downloaded from https://github.com/broofa/node-mime. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2010 Benjamin Thomas, Robert Kieffer
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: minimalistic-assert. A copy of the source code may be downloaded from https://github.com/calvinmetcalf/minimalistic-assert.git. This software contains the following license and notice below:
-
-Copyright 2015 Calvin Metcalf
-
-Permission to use, copy, modify, and/or distribute this software for any purpose
-with or without fee is hereby granted, provided that the above copyright notice
-and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: minipass, npm-bundled. A copy of the source code may be downloaded from git+https://github.com/isaacs/minipass.git (minipass), git+https://github.com/npm/npm-bundled.git (npm-bundled). This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) npm, Inc. and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: minizlib. A copy of the source code may be downloaded from git+https://github.com/isaacs/minizlib.git. This software contains the following license and notice below:
-
-Minizlib was created by Isaac Z. Schlueter.
-It is a derivative work of the Node.js project.
-
-"""
-Copyright Isaac Z. Schlueter and Contributors
-Copyright Node.js contributors. All rights reserved.
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-"""
-
------
-
-The following software may be included in this product: mississippi. A copy of the source code may be downloaded from git+https://github.com/maxogden/mississippi.git. This software contains the following license and notice below:
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: mkdirp, optimist. A copy of the source code may be downloaded from https://github.com/substack/node-mkdirp.git (mkdirp), http://github.com/substack/node-optimist.git (optimist). This software contains the following license and notice below:
-
-Copyright 2010 James Halliday (mail@substack.net)
-
-This project is free software released under the MIT/X11 license:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: moment. A copy of the source code may be downloaded from https://github.com/moment/moment.git. This software contains the following license and notice below:
-
-Copyright (c) JS Foundation and other contributors
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: ms. A copy of the source code may be downloaded from https://github.com/zeit/ms.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Zeit, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: mutationobserver-shim. A copy of the source code may be downloaded from github.com/megawac/MutationObserver.js. This software contains the following license and notice below:
-
-Copyright © 2014 Graeme Yeates <yeatesgraeme@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: nan. A copy of the source code may be downloaded from git://github.com/nodejs/nan.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-=====================
-
-Copyright (c) 2018 NAN contributors
------------------------------------
-
-*NAN contributors listed at <https://github.com/nodejs/nan#contributors>*
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: nanomatch, to-regex. A copy of the source code may be downloaded from https://github.com/micromatch/nanomatch.git (nanomatch), https://github.com/jonschlinkert/to-regex.git (to-regex). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016-2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: needle. A copy of the source code may be downloaded from https://github.com/tomas/needle.git. This software contains the following license and notice below:
-
-Copyright (c) Fork, Ltd.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: negotiator. A copy of the source code may be downloaded from https://github.com/jshttp/negotiator.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2014 Federico Romero
-Copyright (c) 2012-2014 Isaac Z. Schlueter
-Copyright (c) 2014-2015 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: neo-async. A copy of the source code may be downloaded from git@github.com:suguru03/neo-async.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014-2018 Suguru Motegi
-Based on Async.js, Copyright Caolan McMahon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: nice-try. A copy of the source code may be downloaded from https://github.com/electerious/nice-try.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2018 Tobias Reich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: node-fetch. A copy of the source code may be downloaded from https://github.com/bitinn/node-fetch.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 David Frank
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: node-forge. A copy of the source code may be downloaded from https://github.com/digitalbazaar/forge. This software contains the following license and notice below:
-
-You may use the Forge project under the terms of either the BSD License or the
-GNU General Public License (GPL) Version 2.
-
-The BSD License is recommended for most projects. It is simple and easy to
-understand and it places almost no restrictions on what you can do with the
-Forge project.
-
-If the GPL suits your project better you are also free to use Forge under
-that license.
-
-You don't have to do anything special to choose one license or the other and
-you don't have to notify anyone which license you are using. You are free to
-use this project in commercial projects as long as the copyright header is
-left intact.
-
-If you are a commercial entity and use this set of libraries in your
-commercial software then reasonable payment to Digital Bazaar, if you can
-afford it, is not required but is expected and would be appreciated. If this
-library saves you time, then it's saving you money. The cost of developing
-the Forge software was on the order of several hundred hours and tens of
-thousands of dollars. We are attempting to strike a balance between helping
-the development community while not being taken advantage of by lucrative
-commercial entities for our efforts.
-
--------------------------------------------------------------------------------
-New BSD License (3-clause)
-Copyright (c) 2010, Digital Bazaar, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of Digital Bazaar, Inc. nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL DIGITAL BAZAAR BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------------------
-        GNU GENERAL PUBLIC LICENSE
-           Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-          Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Lesser General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-        GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-          NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
------
-
-The following software may be included in this product: node-int64. A copy of the source code may be downloaded from https://github.com/broofa/node-int64. This software contains the following license and notice below:
-
-Copyright (c) 2014 Robert Kieffer
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: node-libs-browser. A copy of the source code may be downloaded from git+https://github.com/webpack/node-libs-browser.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012 Tobias Koppers
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: node-notifier. A copy of the source code may be downloaded from git+ssh://git@github.com/mikaelbr/node-notifier.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Mikael Brevik
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: node-pre-gyp. A copy of the source code may be downloaded from git://github.com/mapbox/node-pre-gyp.git. This software contains the following license and notice below:
-
-Copyright (c), Mapbox
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice,
-      this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-    * Neither the name of node-pre-gyp nor the names of its contributors
-      may be used to endorse or promote products derived from this software
-      without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: node-releases. A copy of the source code may be downloaded from git+https://github.com/chicoxyzzy/node-releases.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2017 Sergey Rubanov (https://github.com/chicoxyzzy)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: normalize-package-data. A copy of the source code may be downloaded from git://github.com/npm/normalize-package-data.git. This software contains the following license and notice below:
-
-This package contains code originally written by Isaac Z. Schlueter.
-Used with permission.
-
-Copyright (c) Meryn Stol ("Author")
-All rights reserved.
-
-The BSD License
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: num2fraction, postcss-media-minmax. A copy of the source code may be downloaded from git@github.com:yisibl/num2fraction.git (num2fraction), https://github.com/postcss/postcss-media-minmax.git (postcss-media-minmax). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 PostCSS
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: nwsapi. A copy of the source code may be downloaded from git://github.com/dperini/nwsapi.git. This software contains the following license and notice below:
-
-Copyright (c) 2007-2019 Diego Perini (http://www.iport.it/)
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: object-copy, static-extend. A copy of the source code may be downloaded from https://github.com/jonschlinkert/object-copy.git (object-copy), https://github.com/jonschlinkert/static-extend.git (static-extend). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: object-hash. A copy of the source code may be downloaded from https://github.com/puleos/object-hash. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 object-hash contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: object-keys. A copy of the source code may be downloaded from git://github.com/ljharb/object-keys.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (C) 2013 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: object.assign. A copy of the source code may be downloaded from git://github.com/ljharb/object.assign.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: object.fromentries. A copy of the source code may be downloaded from git://github.com/es-shims/Object.fromEntries.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: obuf. A copy of the source code may be downloaded from git@github.com:indutny/offset-buffer. This software contains the following license and notice below:
-
-Copyright Fedor Indutny, 2015.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the
-following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: omit.js. A copy of the source code may be downloaded from git+https://github.com/benjycui/omit.js.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016 Benjy Cui
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: on-finished. A copy of the source code may be downloaded from https://github.com/jshttp/on-finished.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2014 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: original, querystringify, requires-port, url-parse. A copy of the source code may be downloaded from https://github.com/unshiftio/original (original), https://github.com/unshiftio/querystringify (querystringify), https://github.com/unshiftio/requires-port (requires-port), https://github.com/unshiftio/url-parse.git (url-parse). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Unshift.io, Arnout Kazemier,  the Contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: os-browserify. A copy of the source code may be downloaded from http://github.com/CoderPuppy/os-browserify.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 CoderPuppy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: pako. A copy of the source code may be downloaded from https://github.com/nodeca/pako.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (C) 2014-2017 by Vitaly Puzrin and Andrei Tuputcyn
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: parallel-transform. A copy of the source code may be downloaded from git://github.com/mafintosh/parallel-transform. This software contains the following license and notice below:
-
-Copyright 2013 Mathias Buus
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: parse-asn1. A copy of the source code may be downloaded from git://github.com/crypto-browserify/parse-asn1.git. This software contains the following license and notice below:
-
-Copyright (c) 2017, crypto-browserify contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: parse5. A copy of the source code may be downloaded from git://github.com/inikulin/parse5.git. This software contains the following license and notice below:
-
-Copyright (c) 2013-2016 Ivan Nikulin (ifaaan@gmail.com, https://github.com/inikulin)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: parseurl. A copy of the source code may be downloaded from https://github.com/pillarjs/parseurl.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2014-2017 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: path-dirname. A copy of the source code may be downloaded from https://github.com/es128/path-dirname.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Elan Shanker and Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: path-is-inside. A copy of the source code may be downloaded from https://github.com/domenic/path-is-inside.git. This software contains the following license and notice below:
-
-Dual licensed under WTFPL and MIT:
-
----
-
-Copyright © 2013–2016 Domenic Denicola <d@domenic.me>
-
-This work is free. You can redistribute it and/or modify it under the
-terms of the Do What The Fuck You Want To Public License, Version 2,
-as published by Sam Hocevar. See below for more details.
-
-        DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
-                    Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
-            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. You just DO WHAT THE FUCK YOU WANT TO.
-
----
-
-The MIT License (MIT)
-
-Copyright © 2013–2016 Domenic Denicola <d@domenic.me>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: path-parse. A copy of the source code may be downloaded from https://github.com/jbgutierrez/path-parse.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Javier Blanco
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: pbkdf2. A copy of the source code may be downloaded from https://github.com/crypto-browserify/pbkdf2.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Daniel Cousens
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: performance-now. A copy of the source code may be downloaded from git://github.com/braveg1rl/performance-now.git. This software contains the following license and notice below:
-
-Copyright (c) 2013 Braveg1rl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: pinkie, pinkie-promise, require-from-string. A copy of the source code may be downloaded from https://github.com/floatdrop/pinkie.git (pinkie), https://github.com/floatdrop/pinkie-promise.git (pinkie-promise), https://github.com/floatdrop/require-from-string.git (require-from-string). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Vsevolod Strukchinsky <floatdrop@gmail.com> (github.com/floatdrop)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: pirates. A copy of the source code may be downloaded from https://github.com/ariporad/pirates.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016-2018 Ari Porad
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: portfinder. A copy of the source code may be downloaded from git@github.com:indexzero/node-portfinder.git. This software contains the following license and notice below:
-
-node-portfinder
-
-Copyright (c) 2012 Charlie Robbins
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-attribute-case-insensitive. A copy of the source code may be downloaded from git+https://github.com/Semigradsky/postcss-attribute-case-insensitive.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2016 Dmitry Semigradsky <semigradskyd@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-calc, postcss-color-rebeccapurple. A copy of the source code may be downloaded from https://github.com/postcss/postcss-calc.git (postcss-calc), https://github.com/postcss/postcss-color-rebeccapurple.git (postcss-color-rebeccapurple). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Maxime Thirouin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-color-gray. A copy of the source code may be downloaded from https://github.com/postcss/postcss-color-gray.git. This software contains the following license and notice below:
-
-# ISC License (ISC)
-
-## Copyright 2018 Shinnosuke Watanabe
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
-For more information, please see
-https://opensource.org/licenses/ISC.
-
------
-
-The following software may be included in this product: postcss-color-hex-alpha, postcss-custom-media, postcss-custom-properties, postcss-custom-selectors. A copy of the source code may be downloaded from https://github.com/postcss/postcss-color-hex-alpha.git (postcss-color-hex-alpha), https://github.com/postcss/postcss-custom-media.git (postcss-custom-media), https://github.com/postcss/postcss-custom-properties.git (postcss-custom-properties), https://github.com/postcss/postcss-custom-selectors.git (postcss-custom-selectors). This software contains the following license and notice below:
-
-# The MIT License (MIT)
-
-Copyright © PostCSS
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-discard-overridden. A copy of the source code may be downloaded from https://github.com/cssnano/cssnano.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2016 Justineo <justice360@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-flexbugs-fixes. A copy of the source code may be downloaded from https://github.com/luisrudge/postcss-flexbugs-fixes.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2015 Luis Rudge <luis@luisrudge.net>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-font-variant. A copy of the source code may be downloaded from https://github.com/postcss/postcss-font-variant.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Maxime Thirouin & Ian Storm Taylor
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-initial. A copy of the source code may be downloaded from https://github.com/maximkoretskiy/postcss-initial.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2015 Maksim Koretskiy <mr.green.tv@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-load-config. A copy of the source code may be downloaded from https://github.com/michael-ciniawsky/postcss-load-config.git. This software contains the following license and notice below:
-
-License (MIT)
-
-Copyright (c) Michael Ciniawsky <michael.ciniawsky@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-loader. A copy of the source code may be downloaded from https://github.com/postcss/postcss-loader.git. This software contains the following license and notice below:
-
-License (MIT)
-
-Copyright 2017 Andrey Sitnik <andrey@sitnik.ru>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-minify-params, postcss-normalize-charset. A copy of the source code may be downloaded from https://github.com/cssnano/cssnano.git (postcss-minify-params), https://github.com/cssnano/cssnano.git (postcss-normalize-charset). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2015 Bogdan Chadkin <trysound@yandex.ru>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-modules-extract-imports. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-extract-imports.git. This software contains the following license and notice below:
-
-Copyright 2015 Glen Maddern
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-modules-local-by-default. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-local-by-default.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2015 Mark Dalgleish <mark.john.dalgleish@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-modules-scope. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-scope.git. This software contains the following license and notice below:
-
-ISC License (ISC)
-
-Copyright (c) 2015, Glen Maddern
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-modules-values. A copy of the source code may be downloaded from git+https://github.com/css-modules/postcss-modules-values.git. This software contains the following license and notice below:
-
-ISC License (ISC)
-
-Copyright (c) 2015, Glen Maddern
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-page-break. A copy of the source code may be downloaded from https://github.com/shrpne/postcss-page-break.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2017 AUTHOR_NAME <AUTHOR_EMAIL>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-replace-overflow-wrap. A copy of the source code may be downloaded from https://github.com/MattDiMu/postcss-replace-overflow-wrap.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2016 Matthias Müller <MattDiMu@users.noreply.github.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-selector-matches, postcss-selector-not. A copy of the source code may be downloaded from https://github.com/postcss/postcss-selector-matches.git (postcss-selector-matches), https://github.com/postcss/postcss-selector-not.git (postcss-selector-not). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Maxime Thirouin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: postcss-values-parser. A copy of the source code may be downloaded from https://github.com/lesshint/postcss-values-parser.git. This software contains the following license and notice below:
-
-Copyright (c) Andrew Powell <andrew@shellscape.org>
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: private. A copy of the source code may be downloaded from git://github.com/benjamn/private.git. This software contains the following license and notice below:
-
-Copyright (c) 2014 Ben Newman <bn@cs.stanford.edu>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: process. A copy of the source code may be downloaded from git://github.com/shtylman/node-process.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2013 Roman Shtylman <shtylman@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: process-nextick-args. A copy of the source code may be downloaded from https://github.com/calvinmetcalf/process-nextick-args.git. This software contains the following license and notice below:
-
-# Copyright (c) 2015 Calvin Metcalf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.**
-
------
-
-The following software may be included in this product: progress. A copy of the source code may be downloaded from git://github.com/visionmedia/node-progress. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2017 TJ Holowaychuk <tj@vision-media.ca>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: promise. A copy of the source code may be downloaded from https://github.com/then/promise.git. This software contains the following license and notice below:
-
-Copyright (c) 2014 Forbes Lindesay
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: prompts, sisteransi. A copy of the source code may be downloaded from https://github.com/terkelg/prompts.git (prompts), https://github.com/terkelg/sisteransi (sisteransi). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Terkel Gjervig Nielsen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: property-information. A copy of the source code may be downloaded from https://github.com/wooorm/property-information.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2015 Titus Wormer <mailto:tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: prr. A copy of the source code may be downloaded from https://github.com/rvagg/prr.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-=====================
-
-Copyright (c) 2014 Rod Vagg
----------------------------
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: psl. A copy of the source code may be downloaded from git@github.com:wrangr/psl.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Lupo Montero lupomontero@gmail.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: q. A copy of the source code may be downloaded from git://github.com/kriskowal/q.git. This software contains the following license and notice below:
-
-Copyright 2009–2017 Kristopher Michael Kowal. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: qs. A copy of the source code may be downloaded from https://github.com/ljharb/qs.git. This software contains the following license and notice below:
-
-Copyright (c) 2014 Nathan LaFreniere and other contributors.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * The names of any contributors may not be used to endorse or promote
-      products derived from this software without specific prior written
-      permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-                                  *   *   *
-
-The complete list of contributors can be found at: https://github.com/hapijs/qs/graphs/contributors
-
------
-
-The following software may be included in this product: querystring, querystring-es3. A copy of the source code may be downloaded from git://github.com/Gozala/querystring.git (querystring), git://github.com/mike-spainhower/querystring.git (querystring-es3). This software contains the following license and notice below:
-
-Copyright 2012 Irakli Gozalishvili. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: raf. A copy of the source code may be downloaded from git://github.com/chrisdickinson/raf.git. This software contains the following license and notice below:
-
-Copyright 2013 Chris Dickinson <chris@neversaw.us>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: randombytes, randomfill. A copy of the source code may be downloaded from git@github.com:crypto-browserify/randombytes.git (randombytes), https://github.com/crypto-browserify/randomfill.git (randomfill). This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 crypto-browserify
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: range-parser. A copy of the source code may be downloaded from https://github.com/jshttp/range-parser.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012-2014 TJ Holowaychuk <tj@vision-media.ca>
-Copyright (c) 2015-2016 Douglas Christopher Wilson <doug@somethingdoug.com
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: raw-body. A copy of the source code may be downloaded from https://github.com/stream-utils/raw-body.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013-2014 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2014-2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: rc, through. A copy of the source code may be downloaded from https://github.com/dominictarr/rc.git (rc), https://github.com/dominictarr/through.git (through). This software contains the following license and notice below:
-
-Apache License, Version 2.0
-
-Copyright (c) 2011 Dominic Tarr
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
------
-
-The following software may be included in this product: rc-calendar. A copy of the source code may be downloaded from git@github.com:react-component/calendar.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-present yiminghe
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: rc-cascader, rc-editor-mention, rc-select. A copy of the source code may be downloaded from https://github.com/react-component/cascader.git (rc-cascader), https://github.com/react-component/mention.git (rc-editor-mention), git@github.com:react-component/select.git (rc-select). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014-present alipay.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: rc-checkbox. A copy of the source code may be downloaded from git@github.com:react-component/checkbox.git. This software contains the following license and notice below:
-
-The MIT License (MIT) Copyright (c) 2016 React Components
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: rc-drawer, rc-table, rc-tree, rc-tree-select. A copy of the source code may be downloaded from https://github.com/ant-motion/drawer.git (rc-drawer), git@github.com:react-component/table.git (rc-table), git@github.com:react-component/tree.git (rc-tree), https://github.com/react-component/tree-select.git (rc-tree-select). This software contains the following license and notice below:
-
-MIT LICENSE
-
-Copyright (c) 2015-present Alipay.com, https://www.alipay.com/
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: rc-editor-core. A copy of the source code may be downloaded from https://github.com/react-component/editor-core.git. This software contains the following license and notice below:
-
-BSD License
-
-For Draft.js software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: rc-hammerjs. A copy of the source code may be downloaded from https://github.com/react-component/react-hammerjs. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Jed Watson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: rc-slider, rc-tooltip, rc-trigger, rc-util. A copy of the source code may be downloaded from git@github.com:react-component/slider.git (rc-slider), git@github.com:react-component/tooltip.git (rc-tooltip), https://github.com/react-component/trigger.git (rc-trigger), git@github.com:react-component/util.git (rc-util). This software contains the following license and notice below:
-
-The MIT License (MIT)
-Copyright (c) 2015-present Alipay.com, https://www.alipay.com/
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: react-app-rewired. A copy of the source code may be downloaded from git+https://github.com/timarney/react-app-rewired. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2016 Tim Arney
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: react-lazy-load. A copy of the source code may be downloaded from https://github.com/loktar00/react-lazy-load.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Jason
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: react-lifecycles-compat. A copy of the source code may be downloaded from https://github.com/reactjs/react-lifecycles-compat.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2013-present, Facebook, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: readable-stream. A copy of the source code may be downloaded from git://github.com/nodejs/readable-stream. This software contains the following license and notice below:
-
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
------
-
-The following software may be included in this product: readdirp. A copy of the source code may be downloaded from git://github.com/paulmillr/readdirp.git. This software contains the following license and notice below:
-
-This software is released under the MIT license:
-
-Copyright (c) 2012-2015 Thorsten Lorenz
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: realpath-native. A copy of the source code may be downloaded from https://github.com/SimenB/realpath-native.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Simen Bekkhus
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: recursive-readdir. A copy of the source code may be downloaded from git://github.com/jergason/recursive-readdir.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) <year> <copyright holders>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: regex-not. A copy of the source code may be downloaded from https://github.com/jonschlinkert/regex-not.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016, 2018, Jon Schlinkert.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: regexp-tree. A copy of the source code may be downloaded from https://github.com/DmitrySoshnikov/regexp-tree.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Dmitry Soshnikov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: regjsgen. A copy of the source code may be downloaded from https://github.com/bnjmnt4n/regjsgen.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright 2014-2018 Benjamin Tan <https://bnjmnt4n.now.sh/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: regjsparser. A copy of the source code may be downloaded from git@github.com:jviereck/regjsparser.git. This software contains the following license and notice below:
-
-Copyright (c) Julian Viereck and Contributors, All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  * Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: relateurl. A copy of the source code may be downloaded from git://github.com/stevenvachon/relateurl.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Steven Vachon <contact@svachon.com> (svachon.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: remove-trailing-separator. A copy of the source code may be downloaded from git+https://github.com/darsain/remove-trailing-separator.git. This software contains the following license and notice below:
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: renderkid. A copy of the source code may be downloaded from https://github.com/AriaMinaei/RenderKid.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Aria Minaei
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: replace-ext. A copy of the source code may be downloaded from https://github.com/gulpjs/replace-ext.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Blaine Bublitz <blaine.bublitz@gmail.com>, Eric Schoffstall <yo@contra.io> and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: request-promise-core. A copy of the source code may be downloaded from git+https://github.com/request/promise-core.git. This software contains the following license and notice below:
-
-ISC License
-
-Copyright (c) 2016, Nicolai Kamenzky and contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: request-promise-native, stealthy-require. A copy of the source code may be downloaded from git+https://github.com/request/request-promise-native.git (request-promise-native), git+https://github.com/analog-nico/stealthy-require.git (stealthy-require). This software contains the following license and notice below:
-
-ISC License
-
-Copyright (c) 2017, Nicolai Kamenzky and contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: require-directory. A copy of the source code may be downloaded from git://github.com/troygoode/node-require-directory.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2011 Troy Goode <troygoode@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: require-main-filename, set-blocking, test-exclude, yargs-parser. A copy of the source code may be downloaded from git+ssh://git@github.com/yargs/require-main-filename.git (require-main-filename), git+https://github.com/yargs/set-blocking.git (set-blocking), git+https://github.com/istanbuljs/istanbuljs.git (test-exclude), git@github.com:yargs/yargs-parser.git (yargs-parser). This software contains the following license and notice below:
-
-Copyright (c) 2016, Contributors
-
-Permission to use, copy, modify, and/or distribute this software
-for any purpose with or without fee is hereby granted, provided
-that the above copyright notice and this permission notice
-appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE
-LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES
-OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: resize-observer-polyfill. A copy of the source code may be downloaded from https://github.com/que-etc/resize-observer-polyfill.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Denis Rul
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: resolve-url, urix. A copy of the source code may be downloaded from https://github.com/lydell/resolve-url.git (resolve-url), https://github.com/lydell/urix.git (urix). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Simon Lydell
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: ret. A copy of the source code may be downloaded from git://github.com/fent/ret.js.git. This software contains the following license and notice below:
-
-Copyright (C) 2011 by Roly Fentanes
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: ripemd160. A copy of the source code may be downloaded from https://github.com/crypto-browserify/ripemd160. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 crypto-browserify
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: rsvp. A copy of the source code may be downloaded from https://github.com/tildeio/rsvp.js.git. This software contains the following license and notice below:
-
-Copyright (c) 2014 Yehuda Katz, Tom Dale, Stefan Penner and contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: run-async. A copy of the source code may be downloaded from https://github.com/SBoudrias/run-async.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Simon Boudrias
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: rxjs. A copy of the source code may be downloaded from https://github.com/reactivex/rxjs.git. This software contains the following license and notice below:
-
-Apache License
-                         Version 2.0, January 2004
-                      http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
-    "License" shall mean the terms and conditions for use, reproduction,
-    and distribution as defined by Sections 1 through 9 of this document.
-
-    "Licensor" shall mean the copyright owner or entity authorized by
-    the copyright owner that is granting the License.
-
-    "Legal Entity" shall mean the union of the acting entity and all
-    other entities that control, are controlled by, or are under common
-    control with that entity. For the purposes of this definition,
-    "control" means (i) the power, direct or indirect, to cause the
-    direction or management of such entity, whether by contract or
-    otherwise, or (ii) ownership of fifty percent (50%) or more of the
-    outstanding shares, or (iii) beneficial ownership of such entity.
-
-    "You" (or "Your") shall mean an individual or Legal Entity
-    exercising permissions granted by this License.
-
-    "Source" form shall mean the preferred form for making modifications,
-    including but not limited to software source code, documentation
-    source, and configuration files.
-
-    "Object" form shall mean any form resulting from mechanical
-    transformation or translation of a Source form, including but
-    not limited to compiled object code, generated documentation,
-    and conversions to other media types.
-
-    "Work" shall mean the work of authorship, whether in Source or
-    Object form, made available under the License, as indicated by a
-    copyright notice that is included in or attached to the work
-    (an example is provided in the Appendix below).
-
-    "Derivative Works" shall mean any work, whether in Source or Object
-    form, that is based on (or derived from) the Work and for which the
-    editorial revisions, annotations, elaborations, or other modifications
-    represent, as a whole, an original work of authorship. For the purposes
-    of this License, Derivative Works shall not include works that remain
-    separable from, or merely link (or bind by name) to the interfaces of,
-    the Work and Derivative Works thereof.
-
-    "Contribution" shall mean any work of authorship, including
-    the original version of the Work and any modifications or additions
-    to that Work or Derivative Works thereof, that is intentionally
-    submitted to Licensor for inclusion in the Work by the copyright owner
-    or by an individual or Legal Entity authorized to submit on behalf of
-    the copyright owner. For the purposes of this definition, "submitted"
-    means any form of electronic, verbal, or written communication sent
-    to the Licensor or its representatives, including but not limited to
-    communication on electronic mailing lists, source code control systems,
-    and issue tracking systems that are managed by, or on behalf of, the
-    Licensor for the purpose of discussing and improving the Work, but
-    excluding communication that is conspicuously marked or otherwise
-    designated in writing by the copyright owner as "Not a Contribution."
-
-    "Contributor" shall mean Licensor and any individual or Legal Entity
-    on behalf of whom a Contribution has been received by Licensor and
-    subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
-    this License, each Contributor hereby grants to You a perpetual,
-    worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-    copyright license to reproduce, prepare Derivative Works of,
-    publicly display, publicly perform, sublicense, and distribute the
-    Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
-    this License, each Contributor hereby grants to You a perpetual,
-    worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-    (except as stated in this section) patent license to make, have made,
-    use, offer to sell, sell, import, and otherwise transfer the Work,
-    where such license applies only to those patent claims licensable
-    by such Contributor that are necessarily infringed by their
-    Contribution(s) alone or by combination of their Contribution(s)
-    with the Work to which such Contribution(s) was submitted. If You
-    institute patent litigation against any entity (including a
-    cross-claim or counterclaim in a lawsuit) alleging that the Work
-    or a Contribution incorporated within the Work constitutes direct
-    or contributory patent infringement, then any patent licenses
-    granted to You under this License for that Work shall terminate
-    as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
-    Work or Derivative Works thereof in any medium, with or without
-    modifications, and in Source or Object form, provided that You
-    meet the following conditions:
-
-    (a) You must give any other recipients of the Work or
-        Derivative Works a copy of this License; and
-
-    (b) You must cause any modified files to carry prominent notices
-        stating that You changed the files; and
-
-    (c) You must retain, in the Source form of any Derivative Works
-        that You distribute, all copyright, patent, trademark, and
-        attribution notices from the Source form of the Work,
-        excluding those notices that do not pertain to any part of
-        the Derivative Works; and
-
-    (d) If the Work includes a "NOTICE" text file as part of its
-        distribution, then any Derivative Works that You distribute must
-        include a readable copy of the attribution notices contained
-        within such NOTICE file, excluding those notices that do not
-        pertain to any part of the Derivative Works, in at least one
-        of the following places: within a NOTICE text file distributed
-        as part of the Derivative Works; within the Source form or
-        documentation, if provided along with the Derivative Works; or,
-        within a display generated by the Derivative Works, if and
-        wherever such third-party notices normally appear. The contents
-        of the NOTICE file are for informational purposes only and
-        do not modify the License. You may add Your own attribution
-        notices within Derivative Works that You distribute, alongside
-        or as an addendum to the NOTICE text from the Work, provided
-        that such additional attribution notices cannot be construed
-        as modifying the License.
-
-    You may add Your own copyright statement to Your modifications and
-    may provide additional or different license terms and conditions
-    for use, reproduction, or distribution of Your modifications, or
-    for any such Derivative Works as a whole, provided Your use,
-    reproduction, and distribution of the Work otherwise complies with
-    the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
-    any Contribution intentionally submitted for inclusion in the Work
-    by You to the Licensor shall be under the terms and conditions of
-    this License, without any additional terms or conditions.
-    Notwithstanding the above, nothing herein shall supersede or modify
-    the terms of any separate license agreement you may have executed
-    with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
-    names, trademarks, service marks, or product names of the Licensor,
-    except as required for reasonable and customary use in describing the
-    origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
-    agreed to in writing, Licensor provides the Work (and each
-    Contributor provides its Contributions) on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-    implied, including, without limitation, any warranties or conditions
-    of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-    PARTICULAR PURPOSE. You are solely responsible for determining the
-    appropriateness of using or redistributing the Work and assume any
-    risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
-    whether in tort (including negligence), contract, or otherwise,
-    unless required by applicable law (such as deliberate and grossly
-    negligent acts) or agreed to in writing, shall any Contributor be
-    liable to You for damages, including any direct, indirect, special,
-    incidental, or consequential damages of any character arising as a
-    result of this License or out of the use or inability to use the
-    Work (including but not limited to damages for loss of goodwill,
-    work stoppage, computer failure or malfunction, or any and all
-    other commercial damages or losses), even if such Contributor
-    has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
-    the Work or Derivative Works thereof, You may choose to offer,
-    and charge a fee for, acceptance of support, warranty, indemnity,
-    or other liability obligations and/or rights consistent with this
-    License. However, in accepting such obligations, You may act only
-    on Your own behalf and on Your sole responsibility, not on behalf
-    of any other Contributor, and only if You agree to indemnify,
-    defend, and hold each Contributor harmless for any liability
-    incurred by, or claims asserted against, such Contributor by reason
-    of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
-    To apply the Apache License to your work, attach the following
-    boilerplate notice, with the fields enclosed by brackets "[]"
-    replaced with your own identifying information. (Don't include
-    the brackets!)  The text should be enclosed in the appropriate
-    comment syntax for the file format. We also recommend that a
-    file or class name and description of purpose be included on the
-    same "printed page" as the copyright notice for easier
-    identification within third-party archives.
-
- Copyright (c) 2015-2018 Google, Inc., Netflix, Inc., Microsoft Corp. and contributors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
------
-
-The following software may be included in this product: safer-buffer. A copy of the source code may be downloaded from git+https://github.com/ChALkeR/safer-buffer.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2018 Nikita Skovoroda <chalkerx@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: sass-loader. A copy of the source code may be downloaded from https://github.com/webpack-contrib/sass-loader.git. This software contains the following license and notice below:
-
-Copyright JS Foundation and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: sax. A copy of the source code may be downloaded from git://github.com/isaacs/sax-js.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-====
-
-`String.fromCodePoint` by Mathias Bynens used according to terms of MIT
-License, as follows:
-
-    Copyright Mathias Bynens <https://mathiasbynens.be/>
-
-    Permission is hereby granted, free of charge, to any person obtaining
-    a copy of this software and associated documentation files (the
-    "Software"), to deal in the Software without restriction, including
-    without limitation the rights to use, copy, modify, merge, publish,
-    distribute, sublicense, and/or sell copies of the Software, and to
-    permit persons to whom the Software is furnished to do so, subject to
-    the following conditions:
-
-    The above copyright notice and this permission notice shall be
-    included in all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: saxes. A copy of the source code may be downloaded from https://github.com/lddubeau/saxes.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-====
-
-The following license is the one that governed sax, from which saxes
-was forked. Isaac Schlueter is not *directly* involved with saxes so
-don't go bugging him for saxes issues.
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-====
-
-`String.fromCodePoint` by Mathias Bynens is no longer used, but it can
-still be found in old commits. It was once used according to terms of
-MIT License, as follows:
-
-    Copyright Mathias Bynens <https://mathiasbynens.be/>
-
-    Permission is hereby granted, free of charge, to any person obtaining
-    a copy of this software and associated documentation files (the
-    "Software"), to deal in the Software without restriction, including
-    without limitation the rights to use, copy, modify, merge, publish,
-    distribute, sublicense, and/or sell copies of the Software, and to
-    permit persons to whom the Software is furnished to do so, subject to
-    the following conditions:
-
-    The above copyright notice and this permission notice shall be
-    included in all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: selfsigned. A copy of the source code may be downloaded from git://github.com/jfromaniello/selfsigned.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2013 José F. Romaniello
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: send. A copy of the source code may be downloaded from https://github.com/pillarjs/send.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2012 TJ Holowaychuk
-Copyright (c) 2014-2016 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: serialize-javascript. A copy of the source code may be downloaded from git+https://github.com/yahoo/serialize-javascript.git. This software contains the following license and notice below:
-
-Copyright 2014 Yahoo! Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-
-    * Neither the name of the Yahoo! Inc. nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: serve-index. A copy of the source code may be downloaded from https://github.com/expressjs/serve-index.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2010 Sencha Inc.
-Copyright (c) 2011 LearnBoost
-Copyright (c) 2011 TJ Holowaychuk
-Copyright (c) 2014-2015 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: serve-static. A copy of the source code may be downloaded from https://github.com/expressjs/serve-static.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2010 Sencha Inc.
-Copyright (c) 2011 LearnBoost
-Copyright (c) 2011 TJ Holowaychuk
-Copyright (c) 2014-2016 Douglas Christopher Wilson
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: setimmediate. A copy of the source code may be downloaded from https://github.com/YuzuJS/setImmediate.git. This software contains the following license and notice below:
-
-Copyright (c) 2012 Barnesandnoble.com, llc, Donavon West, and Domenic Denicola
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: setprototypeof. A copy of the source code may be downloaded from https://github.com/wesleytodd/setprototypeof.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Wes Todd
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: sha.js. A copy of the source code may be downloaded from git://github.com/crypto-browserify/sha.js.git. This software contains the following license and notice below:
-
-Copyright (c) 2013-2018 sha.js contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-Copyright (c) 1998 - 2009, Paul Johnston & Contributors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-Neither the name of the author nor the names of its contributors may be used to
-endorse or promote products derived from this software without specific prior
-written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: shallow-equal. A copy of the source code may be downloaded from https://github.com/moroshko/shallow-equal.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright © 2016 Misha Moroshko
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the “Software”), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: shallowequal. A copy of the source code may be downloaded from https://github.com/dashed/shallowequal.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Alberto Leal <mailforalberto@gmail.com> (github.com/dashed)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: shallowequal. A copy of the source code may be downloaded from https://github.com/dashed/shallowequal.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Alberto Leal <mailforalberto@gmail.com> (github.com/dashed)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: shebang-command. A copy of the source code may be downloaded from https://github.com/kevva/shebang-command.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Kevin Martensson <kevinmartensson@gmail.com> (github.com/kevva)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: shellwords. A copy of the source code may be downloaded from git://github.com/jimmycuadra/shellwords.git. This software contains the following license and notice below:
-
-Copyright (C) 2011 by Jimmy Cuadra
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: signal-exit. A copy of the source code may be downloaded from https://github.com/tapjs/signal-exit.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright (c) 2015, Contributors
-
-Permission to use, copy, modify, and/or distribute this software
-for any purpose with or without fee is hereby granted, provided
-that the above copyright notice and this permission notice
-appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE
-LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES
-OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: simple-swizzle. A copy of the source code may be downloaded from https://github.com/qix-/node-simple-swizzle.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Josh Junon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: slice-ansi. A copy of the source code may be downloaded from https://github.com/chalk/slice-ansi.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) DC <threedeecee@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: snapdragon-node, snapdragon-util. A copy of the source code may be downloaded from https://github.com/jonschlinkert/snapdragon-node.git (snapdragon-node), https://github.com/jonschlinkert/snapdragon-util.git (snapdragon-util). This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017, Jon Schlinkert
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: sockjs. A copy of the source code may be downloaded from https://github.com/sockjs/sockjs-node.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (C) 2011 VMware, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: sockjs-client. A copy of the source code may be downloaded from https://github.com/sockjs/sockjs-client.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2011-2012 VMware, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: source-list-map. A copy of the source code may be downloaded from https://github.com/webpack/source-list-map.git. This software contains the following license and notice below:
-
-Copyright 2017 JS Foundation
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: source-map. A copy of the source code may be downloaded from http://github.com/mozilla/source-map.git. This software contains the following license and notice below:
-
-Copyright (c) 2009-2011, Mozilla Foundation and contributors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* Neither the names of the Mozilla Foundation nor the names of project
-  contributors may be used to endorse or promote products derived from this
-  software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: source-map-support. A copy of the source code may be downloaded from https://github.com/evanw/node-source-map-support. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Evan Wallace
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: source-map-url. A copy of the source code may be downloaded from https://github.com/lydell/source-map-url.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Simon Lydell
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: spdx-expression-parse. A copy of the source code may be downloaded from https://github.com/jslicense/spdx-expression-parse.js.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2015 Kyle E. Mitchell & other authors listed in AUTHORS
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: sprintf-js. A copy of the source code may be downloaded from https://github.com/alexei/sprintf.js.git. This software contains the following license and notice below:
-
-Copyright (c) 2007-2014, Alexandru Marasteanu <hello [at) alexei (dot] ro>
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-* Redistributions of source code must retain the above copyright
-  notice, this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright
-  notice, this list of conditions and the following disclaimer in the
-  documentation and/or other materials provided with the distribution.
-* Neither the name of this software nor the names of its contributors may be
-  used to endorse or promote products derived from this software without
-  specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: stack-utils. A copy of the source code may be downloaded from https://github.com/tapjs/stack-utils.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Isaac Z. Schlueter <i@izs.me>, James Talmage <james@talmage.io> (github.com/jamestalmage), and Contributors
-
-Extracted from code in node-tap http://www.node-tap.org/
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: statuses. A copy of the source code may be downloaded from https://github.com/jshttp/statuses.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Jonathan Ong <me@jongleberry.com>
-Copyright (c) 2016 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: stream-http. A copy of the source code may be downloaded from git://github.com/jhiesey/stream-http.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2015 John Hiesey
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: string_decoder. A copy of the source code may be downloaded from git://github.com/nodejs/string_decoder.git. This software contains the following license and notice below:
-
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
------
-
-The following software may be included in this product: string-convert. A copy of the source code may be downloaded from https://github.com/akiran/string-convert. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Kiran Abburi
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: stringify-object. A copy of the source code may be downloaded from https://github.com/yeoman/stringify-object.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Yeoman team
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: svgo. A copy of the source code may be downloaded from git://github.com/svg/svgo.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright © 2012–2016 Kir Belevich
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-Лицензия MIT
-
-Copyright © 2012–2016 Кир Белевич
-
-Данная лицензия разрешает лицам, получившим копию данного
-программного обеспечения и сопутствующей документации
-(в дальнейшем именуемыми «Программное Обеспечение»), безвозмездно
-использовать Программное Обеспечение без ограничений, включая
-неограниченное право на использование, копирование, изменение,
-добавление, публикацию, распространение, сублицензирование
-и/или продажу копий Программного Обеспечения, также как и лицам,
-которым предоставляется данное Программное Обеспечение,
-при соблюдении следующих условий:
-
-Указанное выше уведомление об авторском праве и данные условия
-должны быть включены во все копии или значимые части данного
-Программного Обеспечения.
-
-ДАННОЕ ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ «КАК ЕСТЬ»,
-БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНО ВЫРАЖЕННЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ,
-ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ ТОВАРНОЙ ПРИГОДНОСТИ,
-СООТВЕТСТВИЯ ПО ЕГО КОНКРЕТНОМУ НАЗНАЧЕНИЮ И ОТСУТСТВИЯ НАРУШЕНИЙ
-ПРАВ. НИ В КАКОМ СЛУЧАЕ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ
-ОТВЕТСТВЕННОСТИ ПО ИСКАМ О ВОЗМЕЩЕНИИ УЩЕРБА, УБЫТКОВ ИЛИ ДРУГИХ
-ТРЕБОВАНИЙ ПО ДЕЙСТВУЮЩИМ КОНТРАКТАМ, ДЕЛИКТАМ ИЛИ ИНОМУ,
-ВОЗНИКШИМ ИЗ, ИМЕЮЩИМ ПРИЧИНОЙ ИЛИ СВЯЗАННЫМ С ПРОГРАММНЫМ
-ОБЕСПЕЧЕНИЕМ ИЛИ ИСПОЛЬЗОВАНИЕМ ПРОГРАММНОГО ОБЕСПЕЧЕНИЯ
-ИЛИ ИНЫМИ ДЕЙСТВИЯМИ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ.
-
------
-
-The following software may be included in this product: symbol-tree. A copy of the source code may be downloaded from https://github.com/jsdom/js-symbol-tree.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Joris van der Wel
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: table. A copy of the source code may be downloaded from https://github.com/gajus/table. This software contains the following license and notice below:
-
-Copyright (c) 2018, Gajus Kuizinas (http://gajus.com/)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the Gajus Kuizinas (http://gajus.com/) nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: terser, uglify-js. A copy of the source code may be downloaded from https://github.com/fabiosantoscode/terser.git (terser), https://github.com/mishoo/UglifyJS2.git (uglify-js). This software contains the following license and notice below:
-
-UglifyJS is released under the BSD license:
-
-Copyright 2012-2018 (c) Mihai Bazon <mihai.bazon@gmail.com>
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-    * Redistributions of source code must retain the above
-      copyright notice, this list of conditions and the following
-      disclaimer.
-
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials
-      provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER “AS IS” AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
-OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
-THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
-
------
-
-The following software may be included in this product: throat. A copy of the source code may be downloaded from https://github.com/ForbesLindesay/throat.git. This software contains the following license and notice below:
-
-Copyright (c) 2013 Forbes Lindesay
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: through2. A copy of the source code may be downloaded from https://github.com/rvagg/through2.git. This software contains the following license and notice below:
-
-# The MIT License (MIT)
-
-**Copyright (c) Rod Vagg (the "Original Author") and additional contributors**
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: thunky. A copy of the source code may be downloaded from git://github.com/mafintosh/thunky.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2018 Mathias Buus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: timers-browserify. A copy of the source code may be downloaded from git://github.com/jryans/timers-browserify.git. This software contains the following license and notice below:
-
-# timers-browserify
-
-This project uses the [MIT](http://jryans.mit-license.org/) license:
-
-    Copyright © 2012 J. Ryan Stinnett <jryans@gmail.com>
-
-    Permission is hereby granted, free of charge, to any person obtaining a
-    copy of this software and associated documentation files (the “Software”),
-    to deal in the Software without restriction, including without limitation
-    the rights to use, copy, modify, merge, publish, distribute, sublicense,
-    and/or sell copies of the Software, and to permit persons to whom the
-    Software is furnished to do so, subject to the following conditions:
-
-    The above copyright notice and this permission notice shall be included in
-    all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-    DEALINGS IN THE SOFTWARE.
-
-# lib/node
-
-The `lib/node` directory borrows files from joyent/node which uses the following license:
-
-    Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-    Permission is hereby granted, free of charge, to any person obtaining a copy
-    of this software and associated documentation files (the "Software"), to
-    deal in the Software without restriction, including without limitation the
-    rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-    sell copies of the Software, and to permit persons to whom the Software is
-    furnished to do so, subject to the following conditions:
-
-    The above copyright notice and this permission notice shall be included in
-    all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-    IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: timsort. A copy of the source code may be downloaded from https://github.com/mziccard/node-timsort.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2015 Marco Ziccardi
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: tiny-invariant. A copy of the source code may be downloaded from git+https://github.com/alexreardon/tiny-invariant.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2019 Alexander Reardon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: tinycolor2. A copy of the source code may be downloaded from https://bgrins.github.com/TinyColor. This software contains the following license and notice below:
-
-Copyright (c), Brian Grinstead, http://briangrinstead.com
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: tmp. A copy of the source code may be downloaded from https://github.com/raszi/node-tmp.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 KARASZI István
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: to-arraybuffer. A copy of the source code may be downloaded from git://github.com/jhiesey/to-arraybuffer.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2016 John Hiesey
-
-Permission is hereby granted, free of charge,
-to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to
-deal in the Software without restriction, including
-without limitation the rights to use, copy, modify,
-merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom
-the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: to-fast-properties. A copy of the source code may be downloaded from https://github.com/sindresorhus/to-fast-properties.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2014 Petka Antonov
-              2015 Sindre Sorhus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: tough-cookie. A copy of the source code may be downloaded from git://github.com/salesforce/tough-cookie.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Salesforce.com, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: tr46. A copy of the source code may be downloaded from https://github.com/Sebmaster/tr46.js.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Sebastian Mayr
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: trough. A copy of the source code may be downloaded from https://github.com/wooorm/trough.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2016 Titus Wormer <tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: tslib, typescript. A copy of the source code may be downloaded from https://github.com/Microsoft/tslib.git (tslib), https://github.com/Microsoft/TypeScript.git (typescript). This software contains the following license and notice below:
-
-Apache License
-
-Version 2.0, January 2004
-
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of this License; and
-
-You must cause any modified files to carry prominent notices stating that You changed the files; and
-
-You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-
-If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: tsutils. A copy of the source code may be downloaded from https://github.com/ajafff/tsutils. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Klaus Meinhardt
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: tweetnacl. A copy of the source code may be downloaded from https://github.com/dchest/tweetnacl-js.git. This software contains the following license and notice below:
-
-This is free and unencumbered software released into the public domain.
-
-Anyone is free to copy, modify, publish, use, compile, sell, or
-distribute this software, either in source code form or as a compiled
-binary, for any purpose, commercial or non-commercial, and by any
-means.
-
-In jurisdictions that recognize copyright laws, the author or authors
-of this software dedicate any and all copyright interest in the
-software to the public domain. We make this dedication for the benefit
-of the public at large and to the detriment of our heirs and
-successors. We intend this dedication to be an overt act of
-relinquishment in perpetuity of all present and future rights to this
-software under copyright law.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-For more information, please refer to <http://unlicense.org>
-
------
-
-The following software may be included in this product: typedarray. A copy of the source code may be downloaded from git://github.com/substack/typedarray.git. This software contains the following license and notice below:
-
-/*
- Copyright (c) 2010, Linden Research, Inc.
- Copyright (c) 2012, Joshua Bell
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- $/LicenseInfo$
- */
-
-// Original can be found at:
-//   https://bitbucket.org/lindenlab/llsd
-// Modifications by Joshua Bell inexorabletash@gmail.com
-//   https://github.com/inexorabletash/polyfill
-
-// ES3/ES5 implementation of the Krhonos Typed Array Specification
-//   Ref: http://www.khronos.org/registry/typedarray/specs/latest/
-//   Date: 2011-02-01
-//
-// Variations:
-//  * Allows typed_array.get/set() as alias for subscripts (typed_array[])
-
------
-
-The following software may be included in this product: ua-parser-js. A copy of the source code may be downloaded from https://github.com/faisalman/ua-parser-js.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2012-2018 Faisal Salman <<f@faisalman.com>>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: unified, vfile. A copy of the source code may be downloaded from https://github.com/unifiedjs/unified.git (unified), https://github.com/vfile/vfile.git (vfile). This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2015 Titus Wormer <tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: unique-filename. A copy of the source code may be downloaded from https://github.com/iarna/unique-filename.git. This software contains the following license and notice below:
-
-Copyright npm, Inc
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: unique-slug. A copy of the source code may be downloaded from git://github.com/iarna/unique-slug.git. This software contains the following license and notice below:
-
-The ISC License
-
-Copyright npm, Inc
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: universalify. A copy of the source code may be downloaded from git+https://github.com/RyanZim/universalify.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2017, Ryan Zimmerman <opensrc@ryanzim.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the 'Software'), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: unpipe. A copy of the source code may be downloaded from https://github.com/stream-utils/unpipe.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2015 Douglas Christopher Wilson <doug@somethingdoug.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: unquote. A copy of the source code may be downloaded from https://github.com/lakenen/node-unquote.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2017 Cameron Lakenen
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sub-license, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: upath. A copy of the source code may be downloaded from git://github.com/anodynos/upath. This software contains the following license and notice below:
-
-Copyright(c) 2014-2017 Angelos Pikoulas (agelos.pikoulas@gmail.com)
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: url. A copy of the source code may be downloaded from https://github.com/defunctzombie/node-url.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright Joyent, Inc. and other Node contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: util-deprecate. A copy of the source code may be downloaded from git://github.com/TooTallNate/util-deprecate.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2014 Nathan Rajlich <nathan@tootallnate.net>
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: util.promisify. A copy of the source code may be downloaded from git+https://github.com/ljharb/util.promisify.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 Jordan Harband
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: utila. A copy of the source code may be downloaded from https://github.com/AriaMinaei/utila.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Aria Minaei
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: utils-merge. A copy of the source code may be downloaded from git://github.com/jaredhanson/utils-merge.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2013-2017 Jared Hanson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: uuid. A copy of the source code may be downloaded from https://github.com/kelektiv/node-uuid.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2010-2016 Robert Kieffer and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: verror. A copy of the source code may be downloaded from git://github.com/davepacheco/node-verror.git. This software contains the following license and notice below:
-
-Copyright (c) 2016, Joyent, Inc. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE
-
------
-
-The following software may be included in this product: vfile-message. A copy of the source code may be downloaded from https://github.com/vfile/vfile-message.git. This software contains the following license and notice below:
-
-(The MIT License)
-
-Copyright (c) 2017 Titus Wormer <tituswormer@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: walker. A copy of the source code may be downloaded from https://github.com/daaku/nodejs-walker. This software contains the following license and notice below:
-
-Copyright 2013 Naitik Shah
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
------
-
-The following software may be included in this product: warning. A copy of the source code may be downloaded from https://github.com/BerkeleyTrue/warning.git. This software contains the following license and notice below:
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-2015, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: webidl-conversions. A copy of the source code may be downloaded from https://github.com/jsdom/webidl-conversions.git. This software contains the following license and notice below:
-
-# The BSD 2-Clause License
-
-Copyright (c) 2014, Domenic Denicola
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------
-
-The following software may be included in this product: webpack-log. A copy of the source code may be downloaded from https://github.com/webpack-contrib/webpack-log.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 webpack-contrib
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: webpack-manifest-plugin. A copy of the source code may be downloaded from https://github.com/danethurber/webpack-manifest-plugin.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) Dane Thurber <dane.thurber@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: webpack-sources. A copy of the source code may be downloaded from git+https://github.com/webpack/webpack-sources.git. This software contains the following license and notice below:
-
-MIT License
-
-Copyright (c) 2017 JS Foundation and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: websocket-driver. A copy of the source code may be downloaded from git://github.com/faye/websocket-driver-node.git. This software contains the following license and notice below:
-
-# The MIT License
-
-Copyright (c) 2010-2017 James Coglan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the 'Software'), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: websocket-extensions. A copy of the source code may be downloaded from git://github.com/faye/websocket-extensions-node.git. This software contains the following license and notice below:
-
-# The MIT License
-
-Copyright (c) 2014-2017 James Coglan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the 'Software'), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: whatwg-encoding. A copy of the source code may be downloaded from https://github.com/jsdom/whatwg-encoding.git. This software contains the following license and notice below:
-
-Copyright © 2016–2018 Domenic Denicola <d@domenic.me>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: whatwg-fetch. A copy of the source code may be downloaded from https://github.com/github/fetch.git. This software contains the following license and notice below:
-
-Copyright (c) 2014-2016 GitHub, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: whatwg-url. A copy of the source code may be downloaded from https://github.com/jsdom/whatwg-url.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2015–2016 Sebastian Mayr
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: which-module. A copy of the source code may be downloaded from git+https://github.com/nexdrew/which-module.git. This software contains the following license and notice below:
-
-Copyright (c) 2016, Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any purpose
-with or without fee is hereby granted, provided that the above copyright notice
-and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
-THIS SOFTWARE.
-
------
-
-The following software may be included in this product: workbox-background-sync, workbox-broadcast-update, workbox-build, workbox-cacheable-response, workbox-core, workbox-expiration, workbox-google-analytics, workbox-navigation-preload, workbox-precaching, workbox-range-requests, workbox-routing, workbox-strategies, workbox-streams, workbox-sw, workbox-webpack-plugin, workbox-window. A copy of the source code may be downloaded from https://github.com/googlechrome/workbox.git (workbox-background-sync), https://github.com/googlechrome/workbox.git (workbox-broadcast-update), https://github.com/googlechrome/workbox.git (workbox-build), https://github.com/googlechrome/workbox.git (workbox-cacheable-response), https://github.com/googlechrome/workbox.git (workbox-core), https://github.com/googlechrome/workbox.git (workbox-expiration), https://github.com/googlechrome/workbox.git (workbox-google-analytics), https://github.com/googlechrome/workbox.git (workbox-navigation-preload), https://github.com/googlechrome/workbox.git (workbox-precaching), https://github.com/googlechrome/workbox.git (workbox-range-requests), https://github.com/googlechrome/workbox.git (workbox-routing), https://github.com/googlechrome/workbox.git (workbox-strategies), https://github.com/googlechrome/workbox.git (workbox-streams), https://github.com/googlechrome/workbox.git (workbox-sw), https://github.com/googlechrome/workbox.git (workbox-webpack-plugin), https://github.com/googlechrome/workbox.git (workbox-window). This software contains the following license and notice below:
-
-Copyright 2018 Google LLC
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: worker-farm. A copy of the source code may be downloaded from https://github.com/rvagg/node-worker-farm.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-=====================
-
-Copyright (c) 2014 LevelUP contributors
----------------------------------------
-
-*LevelUP contributors listed at <https://github.com/rvagg/node-levelup#contributors>*
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: write-file-atomic. A copy of the source code may be downloaded from git@github.com:iarna/write-file-atomic.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Rebecca Turner
-
-Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
------
-
-The following software may be included in this product: ws. A copy of the source code may be downloaded from https://github.com/websockets/ws.git. This software contains the following license and notice below:
-
-The MIT License (MIT)
-
-Copyright (c) 2011 Einar Otto Stangvik <einaros@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
------
-
-The following software may be included in this product: xml-name-validator. A copy of the source code may be downloaded from https://github.com/jsdom/xml-name-validator.git. This software contains the following license and notice below:
-
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
------
-
-The following software may be included in this product: xmlchars. A copy of the source code may be downloaded from https://github.com/lddubeau/xmlchars.git. This software contains the following license and notice below:
-
-Copyright Louis-Dominique Dubeau and contributors to xmlchars
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
------
-
-The following software may be included in this product: xregexp. A copy of the source code may be downloaded from https://github.com/slevithan/xregexp.git. This software contains the following license and notice below:
-
-The MIT License
-
-Copyright (c) 2007-2017 Steven Levithan <http://xregexp.com/>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
------
-
-The following software may be included in this product: y18n. A copy of the source code may be downloaded from git@github.com:yargs/y18n.git. This software contains the following license and notice below:
-
-Copyright (c) 2015, Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any purpose
-with or without fee is hereby granted, provided that the above copyright notice
-and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
-THIS SOFTWARE.
-
------
-
-The following software may be included in this product: yargs. A copy of the source code may be downloaded from https://github.com/yargs/yargs.git. This software contains the following license and notice below:
-
-Copyright 2010 James Halliday (mail@substack.net)
-Modified work Copyright 2014 Contributors (ben@npmjs.com)
-
-This project is free software released under the MIT/X11 license:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE
deleted file mode 100644
index b1e9608..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Hadoop Ozone Recon
-Copyright 2019 and onwards The Apache Software Foundation.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md
deleted file mode 100644
index d555ccd..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
---->
-
-This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
-
-## Available Scripts
-
-In the project directory, you can run:
-
-### `yarn start`
-
-Runs the app in the development mode.<br>
-Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
-
-The page will reload if you make edits.<br>
-You will also see any lint errors in the console.
-
-### `yarn test`
-
-Launches the test runner in the interactive watch mode.<br>
-See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
-
-### `yarn run build`
-
-Builds the app for production to the `build` folder.<br>
-It correctly bundles React in production mode and optimizes the build for the best performance.
-
-The build is minified and the filenames include the hashes.<br>
-Your app is ready to be deployed!
-
-See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js
deleted file mode 100644
index d29b530..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const { override, fixBabelImports, addLessLoader} = require('customize-cra');
-
-module.exports = override(
-    fixBabelImports('import', {
-    libraryName: 'antd',
-    libraryDirectory: 'es',
-    style: true
-    }),
-    addLessLoader({
-      javascriptEnabled: true,
-      modifyVars: {
-          '@primary-color': '#1DA57A'
-      }
-    })
-);
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json
deleted file mode 100644
index cd55957..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json
+++ /dev/null
@@ -1,47 +0,0 @@
-{
-  "name": "ozone-recon",
-  "version": "0.1.0",
-  "private": true,
-  "dependencies": {
-    "@babel/core": "^7.0.0",
-    "@types/jest": "24.0.12",
-    "@types/node": "11.13.9",
-    "@types/react": "16.8.15",
-    "@types/react-dom": "16.8.4",
-    "@types/react-router-dom": "^4.3.3",
-    "antd": "^3.16.6",
-    "babel-jest": "24.7.1",
-    "babel-plugin-import": "^1.11.0",
-    "classnames": "^2.2.6",
-    "customize-cra": "^0.2.12",
-    "less": "^3.9.0",
-    "less-loader": "^5.0.0",
-    "react": "^16.8.6",
-    "react-app-rewired": "^2.1.3",
-    "react-dom": "^16.8.6",
-    "react-router-dom": "^5.0.0",
-    "react-scripts": "3.0.0",
-    "typescript": "3.4.5"
-  },
-  "scripts": {
-    "start": "react-app-rewired start",
-    "build": "react-app-rewired build",
-    "test": "react-app-rewired test",
-    "eject": "react-scripts eject"
-  },
-  "eslintConfig": {
-    "extends": "react-app"
-  },
-  "browserslist": {
-    "production": [
-      ">0.2%",
-      "not dead",
-      "not op_mini all"
-    ],
-    "development": [
-      "last 1 chrome version",
-      "last 1 firefox version",
-      "last 1 safari version"
-    ]
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico
deleted file mode 100644
index df12210..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico
+++ /dev/null
Binary files differ
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/index.html b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/index.html
deleted file mode 100644
index 631f2ac..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/index.html
+++ /dev/null
@@ -1,56 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8" />
-    <link rel="shortcut icon" href="%PUBLIC_URL%/favicon.ico" />
-    <meta name="viewport" content="width=device-width, initial-scale=1" />
-    <meta name="theme-color" content="#000000" />
-    <!--
-      manifest.json provides metadata used when your web app is installed on a
-      user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
-    -->
-    <link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
-    <!--
-      Notice the use of %PUBLIC_URL% in the tags above.
-      It will be replaced with the URL of the `public` folder during the build.
-      Only files inside the `public` folder can be referenced from the HTML.
-
-      Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
-      work correctly both with client-side routing and a non-root public URL.
-      Learn how to configure a non-root public URL by running `npm run build`.
-    -->
-
-    <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500" />
-    <title>React App</title>
-  </head>
-  <body>
-    <noscript>You need to enable JavaScript to run this app.</noscript>
-    <div id="root"></div>
-    <!--
-      This HTML file is a template.
-      If you open it directly in the browser, you will see an empty page.
-
-      You can add webfonts, meta tags, or analytics to this file.
-      The build step will place the bundled scripts into the <body> tag.
-
-      To begin the development, run `npm start` or `yarn start`.
-      To create a production bundle, use `npm run build` or `yarn build`.
-    -->
-  </body>
-</html>
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json
deleted file mode 100644
index 1f2f141..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "short_name": "React App",
-  "name": "Create React App Sample",
-  "icons": [
-    {
-      "src": "favicon.ico",
-      "sizes": "64x64 32x32 24x24 16x16",
-      "type": "image/x-icon"
-    }
-  ],
-  "start_url": ".",
-  "display": "standalone",
-  "theme_color": "#000000",
-  "background_color": "#ffffff"
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less
deleted file mode 100644
index 1d6ee7c..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@import "./components/NavBar/NavBar.less";
-
-.ant-layout-header {
-  padding: 0 20px;
-  height: 50px;
-  line-height: 50px;
-  background: #FFF;
-}
-
-.content-layout {
-  margin-left: 200px;
-  &.sidebar-collapsed {
-    margin-left: @sidebar-collapsed-width;
-  }
-}
-
-.page-header {
-  padding: 10px 0;
-  font-size: 20px;
-  font-weight: 500;
-}
-
-.content-div {
-  padding: 24px;
-  background-color: #FFF;
-  min-height: 80vh;
-}
-
-body {
-  font-family: 'Roboto', sans-serif;
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx
deleted file mode 100644
index 0205e74..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-import ReactDOM from 'react-dom';
-import App from './App';
-
-it('renders without crashing', () => {
-  const div = document.createElement('div');
-  ReactDOM.render(<App />, div);
-  ReactDOM.unmountComponentAtNode(div);
-});
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx
deleted file mode 100644
index 8c1e7c0..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-
-import { Layout } from 'antd';
-import './App.less';
-import NavBar from './components/NavBar/NavBar';
-import Breadcrumbs from './components/Breadcrumbs/Breadcrumbs';
-import { BrowserRouter as Router, Switch, Route, Redirect } from 'react-router-dom';
-import { routes } from './routes';
-import { MakeRouteWithSubRoutes } from './makeRouteWithSubRoutes';
-
-const classNames = require('classnames');
-const {
-  Header, Content, Footer
-} = Layout;
-
-interface Props {
-}
-
-interface State {
-  collapsed: boolean;
-}
-
-class App extends React.Component<Props, State>  {
-
-  constructor(props: Props) {
-    super(props);
-
-    this.state = {collapsed: false};
-  }
-
-  onCollapse = (collapsed: boolean) => {
-    this.setState({ collapsed });
-  };
-
-  render() {
-    const { collapsed } = this.state;
-    const layoutClass = classNames('content-layout', {'sidebar-collapsed': collapsed});
-
-    return (
-      <Router>
-        <Layout style={{ minHeight: '100vh' }}>
-          <NavBar collapsed={collapsed} onCollapse={this.onCollapse}/>
-          <Layout className={layoutClass}>
-            <Header>
-              <div style={{ margin: '16px 0' }}>
-                <Breadcrumbs/>
-              </div>
-            </Header>
-            <Content style={{ margin: '0 16px 0', overflow: 'initial' }}>
-              <Switch>
-                <Route exact path="/">
-                  <Redirect to="/Dashboard"/>
-                </Route>
-                {
-                  routes.map(
-                      (route, index) => <MakeRouteWithSubRoutes key={index} {...route} />
-                  )
-                }
-              </Switch>
-            </Content>
-            <Footer style={{ textAlign: 'center' }}>
-            </Footer>
-          </Layout>
-        </Layout>
-      </Router>
-    );
-  }
-}
-
-export default App;
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx
deleted file mode 100644
index 3e8b13d..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-import { Breadcrumb } from 'antd';
-import { withRouter, Link } from 'react-router-dom';
-import { RouteComponentProps } from 'react-router';
-import { breadcrumbNameMap } from '../../constants/breadcrumbs.constants';
-
-interface Props extends RouteComponentProps<any> {
-  collapsed: boolean;
-  onCollapse: (arg: boolean) => void;
-}
-
-class Breadcrumbs extends React.Component<RouteComponentProps> {
-
-  render() {
-    const { location } = this.props;
-    const pathSnippets = location.pathname.split('/').filter(i => i);
-    const extraBreadcrumbItems = pathSnippets.map((_, index) => {
-      const url = `/${pathSnippets.slice(0, index + 1).join('/')}`;
-      return (
-          <Breadcrumb.Item key={url}>
-            <Link to={url}>
-              {breadcrumbNameMap[url]}
-            </Link>
-          </Breadcrumb.Item>
-      );
-    });
-    const breadcrumbItems = [(
-        <Breadcrumb.Item key="home">
-          <Link to="/">Home</Link>
-        </Breadcrumb.Item>
-    )].concat(extraBreadcrumbItems);
-    return (
-        <Breadcrumb>
-          {breadcrumbItems}
-        </Breadcrumb>
-    );
-  }
-}
-
-export default withRouter(Breadcrumbs);
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less
deleted file mode 100644
index cd3ab1f..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@sidebar-collapsed-width: 50px;
-
-.logo {
-  color: #FFF;
-  font-size: 20px;
-  font-weight: 500;
-  padding: 10px;
-  background-color: #002040;
-  .logo-text {
-    margin-left: 10px;
-  }
-}
-
-.ant-layout-sider-collapsed .logo-text {
-  display: none;
-}
-
-.ant-menu-inline-collapsed {
-  width: @sidebar-collapsed-width;
-  .ant-menu-item {
-    padding-left: 17px !important;
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx
deleted file mode 100644
index 69af969..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-import logo from '../../logo.png';
-import { Layout, Menu, Icon } from 'antd';
-import './NavBar.less';
-import { withRouter, Link } from 'react-router-dom';
-import { RouteComponentProps } from 'react-router';
-const { Sider } = Layout;
-
-interface NavBarProps extends RouteComponentProps<any> {
-  collapsed: boolean;
-  onCollapse: (arg: boolean) => void;
-}
-
-class NavBar extends React.Component<NavBarProps> {
-  render() {
-    const {location} = this.props;
-    return (
-        <Sider
-            collapsible
-            collapsed={this.props.collapsed}
-            collapsedWidth={50}
-            onCollapse={this.props.onCollapse}
-            style={{
-              overflow: 'auto', height: '100vh', position: 'fixed', left: 0,
-            }}
-        >
-          <div className="logo">
-            <img src={logo} alt="Ozone Recon Logo" width={32} height={32}/>
-            <span className="logo-text">Ozone Recon</span>
-          </div>
-          <Menu theme="dark" defaultSelectedKeys={['/Dashboard']}
-                mode="inline" selectedKeys={[location.pathname]}>
-            <Menu.Item key="/Dashboard">
-              <Icon type="dashboard"/>
-              <span>Dashboard</span>
-              <Link to="/Dashboard"/>
-            </Menu.Item>
-            <Menu.Item key="/ContainerBrowser">
-              <Icon type="file-search"/>
-              <span>Container Browser</span>
-              <Link to="/ContainerBrowser"/>
-            </Menu.Item>
-          </Menu>
-        </Sider>
-    );
-  }
-}
-
-export default withRouter(NavBar);
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx
deleted file mode 100644
index 5af6458..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-interface IBreadcrumbNameMap {
-  [path: string]: string;
-}
-
-export const breadcrumbNameMap: IBreadcrumbNameMap = {
-  '/Dashboard': 'Dashboard',
-  '/ContainerBrowser': 'Container Browser'
-};
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less
deleted file mode 100644
index 1b94f4e..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-body {
-  margin: 0;
-  padding: 0;
-  font-family: -apple-system, BlinkMacSystemFont, 'Roboto', 'Segoe UI',
-    'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans',
-    'Helvetica Neue', sans-serif;
-  -webkit-font-smoothing: antialiased;
-  -moz-osx-font-smoothing: grayscale;
-}
-
-code {
-  font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
-    monospace;
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx
deleted file mode 100644
index a3e450c..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-import ReactDOM from 'react-dom';
-import './index.less';
-import App from './App';
-import * as serviceWorker from './serviceWorker';
-
-ReactDOM.render(<App />, document.getElementById('root'));
-
-// If you want your app to work offline and load faster, you can change
-// unregister() to register() below. Note this comes with some pitfalls.
-// Learn more about service workers: https://bit.ly/CRA-PWA
-serviceWorker.unregister();
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png
deleted file mode 100644
index 0438317..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png
+++ /dev/null
Binary files differ
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/makeRouteWithSubRoutes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/makeRouteWithSubRoutes.tsx
deleted file mode 100644
index 6107a8a..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/makeRouteWithSubRoutes.tsx
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-import { Route } from 'react-router-dom';
-import { IRoute } from './routes.types';
-
-export const MakeRouteWithSubRoutes = (route: IRoute) => {
-  return (
-      <Route
-          path={route.path}
-          render={props => (
-              <route.component { ...props} routes={route.routes} />
-          )}
-      />
-  );
-};
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts
deleted file mode 100644
index 15f01c4..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/// <reference types="react-scripts" />
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx
deleted file mode 100644
index 4ea0a39..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { Dashboard } from './views/Dashboard/Dashboard';
-import { ContainerBrowser } from './views/ContainerBrowser/ContainerBrowser';
-import { NotFound } from './views/NotFound/NotFound';
-import { IRoute } from "./routes.types";
-
-export const routes:IRoute[] = [
-  {
-    path: "/Dashboard",
-    component: Dashboard
-  },
-  {
-    path: "/ContainerBrowser",
-    component: ContainerBrowser
-  },
-  {
-    path: "/:NotFound",
-    component: NotFound,
-  }
-];
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx
deleted file mode 100644
index 7e12d80..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface IRoute {
-  path: string;
-  component: any;
-  routes?: IRoute[];
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts
deleted file mode 100644
index 47bb33b..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// This optional code is used to register a service worker.
-// register() is not called by default.
-
-// This lets the app load faster on subsequent visits in production, and gives
-// it offline capabilities. However, it also means that developers (and users)
-// will only see deployed updates on subsequent visits to a page, after all the
-// existing tabs open on the page have been closed, since previously cached
-// resources are updated in the background.
-
-// To learn more about the benefits of this model and instructions on how to
-// opt-in, read https://bit.ly/CRA-PWA
-
-const isLocalhost = Boolean(
-  window.location.hostname === 'localhost' ||
-    // [::1] is the IPv6 localhost address.
-    window.location.hostname === '[::1]' ||
-    // 127.0.0.1/8 is considered localhost for IPv4.
-    window.location.hostname.match(
-      /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
-    )
-);
-
-type Config = {
-  onSuccess?: (registration: ServiceWorkerRegistration) => void;
-  onUpdate?: (registration: ServiceWorkerRegistration) => void;
-};
-
-export function register(config?: Config) {
-  if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
-    // The URL constructor is available in all browsers that support SW.
-    const publicUrl = new URL(
-      (process as { env: { [key: string]: string } }).env.PUBLIC_URL,
-      window.location.href
-    );
-    if (publicUrl.origin !== window.location.origin) {
-      // Our service worker won't work if PUBLIC_URL is on a different origin
-      // from what our page is served on. This might happen if a CDN is used to
-      // serve assets; see https://github.com/facebook/create-react-app/issues/2374
-      return;
-    }
-
-    window.addEventListener('load', () => {
-      const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
-
-      if (isLocalhost) {
-        // This is running on localhost. Let's check if a service worker still exists or not.
-        checkValidServiceWorker(swUrl, config);
-
-        // Add some additional logging to localhost, pointing developers to the
-        // service worker/PWA documentation.
-        navigator.serviceWorker.ready.then(() => {
-          console.log(
-            'This web app is being served cache-first by a service ' +
-              'worker. To learn more, visit https://bit.ly/CRA-PWA'
-          );
-        });
-      } else {
-        // Is not localhost. Just register service worker
-        registerValidSW(swUrl, config);
-      }
-    });
-  }
-}
-
-function registerValidSW(swUrl: string, config?: Config) {
-  navigator.serviceWorker
-    .register(swUrl)
-    .then(registration => {
-      registration.onupdatefound = () => {
-        const installingWorker = registration.installing;
-        if (installingWorker == null) {
-          return;
-        }
-        installingWorker.onstatechange = () => {
-          if (installingWorker.state === 'installed') {
-            if (navigator.serviceWorker.controller) {
-              // At this point, the updated precached content has been fetched,
-              // but the previous service worker will still serve the older
-              // content until all client tabs are closed.
-              console.log(
-                'New content is available and will be used when all ' +
-                  'tabs for this page are closed. See https://bit.ly/CRA-PWA.'
-              );
-
-              // Execute callback
-              if (config && config.onUpdate) {
-                config.onUpdate(registration);
-              }
-            } else {
-              // At this point, everything has been precached.
-              // It's the perfect time to display a
-              // "Content is cached for offline use." message.
-              console.log('Content is cached for offline use.');
-
-              // Execute callback
-              if (config && config.onSuccess) {
-                config.onSuccess(registration);
-              }
-            }
-          }
-        };
-      };
-    })
-    .catch(error => {
-      console.error('Error during service worker registration:', error);
-    });
-}
-
-function checkValidServiceWorker(swUrl: string, config?: Config) {
-  // Check if the service worker can be found. If it can't reload the page.
-  fetch(swUrl)
-    .then(response => {
-      // Ensure service worker exists, and that we really are getting a JS file.
-      const contentType = response.headers.get('content-type');
-      if (
-        response.status === 404 ||
-        (contentType != null && contentType.indexOf('javascript') === -1)
-      ) {
-        // No service worker found. Probably a different app. Reload the page.
-        navigator.serviceWorker.ready.then(registration => {
-          registration.unregister().then(() => {
-            window.location.reload();
-          });
-        });
-      } else {
-        // Service worker found. Proceed as normal.
-        registerValidSW(swUrl, config);
-      }
-    })
-    .catch(() => {
-      console.log(
-        'No internet connection found. App is running in offline mode.'
-      );
-    });
-}
-
-export function unregister() {
-  if ('serviceWorker' in navigator) {
-    navigator.serviceWorker.ready.then(registration => {
-      registration.unregister();
-    });
-  }
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx
deleted file mode 100644
index 981f76799..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-
-export const ContainerBrowser:React.FC= () => {
-  return (
-
-      <div>
-        <div className="page-header">
-          Container Browser
-        </div>
-        <div className="content-div">
-          Container Browser content
-        </div>
-      </div>
-  );
-};
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx
deleted file mode 100644
index 682d599..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-
-export const Dashboard:React.FC= () => {
-  return (
-    <div>
-      <div className="page-header">
-        Dashboard
-      </div>
-      <div className="content-div">
-        Dashboard content
-      </div>
-    </div>
-  );
-};
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx
deleted file mode 100644
index 5bc27cb..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import React from 'react';
-
-export const NotFound:React.FC= () => {
-  return (
-      <div>
-        <div className="page-header">
-          404 Page Not Found :(
-        </div>
-      </div>
-  );
-};
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json
deleted file mode 100644
index 96c8b91..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es5",
-    "lib": [
-      "dom",
-      "dom.iterable",
-      "esnext"
-    ],
-    "allowJs": true,
-    "skipLibCheck": true,
-    "esModuleInterop": true,
-    "allowSyntheticDefaultImports": true,
-    "strict": true,
-    "forceConsistentCasingInFileNames": true,
-    "module": "esnext",
-    "moduleResolution": "node",
-    "resolveJsonModule": true,
-    "isolatedModules": true,
-    "noEmit": true,
-    "jsx": "preserve",
-    "rootDir": "src",
-    "baseUrl": "src"
-  },
-  "include": [
-    "src"
-  ]
-}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock
deleted file mode 100644
index 1f3de1d..0000000
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock
+++ /dev/null
@@ -1,11114 +0,0 @@
-# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
-# yarn lockfile v1
-
-
-"@ant-design/colors@^3.1.0":
-  version "3.2.1"
-  resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-3.2.1.tgz#ad86cbf2d1a0039d01586aa73a7ea8a7ce0455a2"
-  integrity sha512-ibJybOcR1+h2IEr0Yxx4y/Wcz8obEtKvl2EYvxh8ugMkYniGSItpLKGzKNyyqzOaum5jb6fVCyH1aR9VkdpFRA==
-  dependencies:
-    tinycolor2 "^1.4.1"
-
-"@ant-design/create-react-context@^0.2.4":
-  version "0.2.4"
-  resolved "https://registry.yarnpkg.com/@ant-design/create-react-context/-/create-react-context-0.2.4.tgz#0fe9adad030350c0c9bb296dd6dcf5a8a36bd425"
-  integrity sha512-8sw+/w6r+aEbd+OJ62ojoSE4zDt/3yfQydmbWFznoftjr8v/opOswGjM+/MU0rSaREbluqzOmZ6xdecHpSaS2w==
-  dependencies:
-    gud "^1.0.0"
-    warning "^4.0.3"
-
-"@ant-design/icons-react@~2.0.1":
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/@ant-design/icons-react/-/icons-react-2.0.1.tgz#17a2513571ab317aca2927e58cea25dd31e536fb"
-  integrity sha512-r1QfoltMuruJZqdiKcbPim3d8LNsVPB733U0gZEUSxBLuqilwsW28K2rCTWSMTjmFX7Mfpf+v/wdiFe/XCqThw==
-  dependencies:
-    "@ant-design/colors" "^3.1.0"
-    babel-runtime "^6.26.0"
-
-"@ant-design/icons@~2.1.1":
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-2.1.1.tgz#7b9c08dffd4f5d41db667d9dbe5e0107d0bd9a4a"
-  integrity sha512-jCH+k2Vjlno4YWl6g535nHR09PwCEmTBKAG6VqF+rhkrSPRLfgpU2maagwbZPLjaHuU5Jd1DFQ2KJpQuI6uG8w==
-
-"@babel/code-frame@7.5.5", "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.5.5.tgz#bc0782f6d69f7b7d49531219699b988f669a8f9d"
-  integrity sha512-27d4lZoomVyo51VegxI20xZPuSHusqbQag/ztrBC7wegWoQ1nLREPVSKSW8byhTlzTKyNE4ifaTA6lCp7JjpFw==
-  dependencies:
-    "@babel/highlight" "^7.0.0"
-
-"@babel/core@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.4.3.tgz#198d6d3af4567be3989550d97e068de94503074f"
-  integrity sha512-oDpASqKFlbspQfzAE7yaeTmdljSH2ADIvBlb0RwbStltTuWa0+7CCI1fYVINNv9saHPa1W7oaKeuNuKj+RQCvA==
-  dependencies:
-    "@babel/code-frame" "^7.0.0"
-    "@babel/generator" "^7.4.0"
-    "@babel/helpers" "^7.4.3"
-    "@babel/parser" "^7.4.3"
-    "@babel/template" "^7.4.0"
-    "@babel/traverse" "^7.4.3"
-    "@babel/types" "^7.4.0"
-    convert-source-map "^1.1.0"
-    debug "^4.1.0"
-    json5 "^2.1.0"
-    lodash "^4.17.11"
-    resolve "^1.3.2"
-    semver "^5.4.1"
-    source-map "^0.5.0"
-
-"@babel/core@^7.0.0", "@babel/core@^7.1.0", "@babel/core@^7.1.6", "@babel/core@^7.4.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.5.5.tgz#17b2686ef0d6bc58f963dddd68ab669755582c30"
-  integrity sha512-i4qoSr2KTtce0DmkuuQBV4AuQgGPUcPXMr9L5MyYAtk06z068lQ10a4O009fe5OB/DfNV+h+qqT7ddNV8UnRjg==
-  dependencies:
-    "@babel/code-frame" "^7.5.5"
-    "@babel/generator" "^7.5.5"
-    "@babel/helpers" "^7.5.5"
-    "@babel/parser" "^7.5.5"
-    "@babel/template" "^7.4.4"
-    "@babel/traverse" "^7.5.5"
-    "@babel/types" "^7.5.5"
-    convert-source-map "^1.1.0"
-    debug "^4.1.0"
-    json5 "^2.1.0"
-    lodash "^4.17.13"
-    resolve "^1.3.2"
-    semver "^5.4.1"
-    source-map "^0.5.0"
-
-"@babel/generator@^7.4.0", "@babel/generator@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.5.5.tgz#873a7f936a3c89491b43536d12245b626664e3cf"
-  integrity sha512-ETI/4vyTSxTzGnU2c49XHv2zhExkv9JHLTwDAFz85kmcwuShvYG2H08FwgIguQf4JC75CBnXAUM5PqeF4fj0nQ==
-  dependencies:
-    "@babel/types" "^7.5.5"
-    jsesc "^2.5.1"
-    lodash "^4.17.13"
-    source-map "^0.5.0"
-    trim-right "^1.0.1"
-
-"@babel/helper-annotate-as-pure@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0.tgz#323d39dd0b50e10c7c06ca7d7638e6864d8c5c32"
-  integrity sha512-3UYcJUj9kvSLbLbUIfQTqzcy5VX7GRZ/CCDrnOaZorFFM01aXp1+GJwuFGV4NDDoAS+mOUyHcO6UD/RfqOks3Q==
-  dependencies:
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-builder-binary-assignment-operator-visitor@^7.1.0":
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.1.0.tgz#6b69628dfe4087798e0c4ed98e3d4a6b2fbd2f5f"
-  integrity sha512-qNSR4jrmJ8M1VMM9tibvyRAHXQs2PmaksQF7c1CGJNipfe3D8p+wgNwgso/P2A2r2mdgBWAXljNWR0QRZAMW8w==
-  dependencies:
-    "@babel/helper-explode-assignable-expression" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-builder-react-jsx@^7.3.0":
-  version "7.3.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-builder-react-jsx/-/helper-builder-react-jsx-7.3.0.tgz#a1ac95a5d2b3e88ae5e54846bf462eeb81b318a4"
-  integrity sha512-MjA9KgwCuPEkQd9ncSXvSyJ5y+j2sICHyrI0M3L+6fnS4wMSNDc1ARXsbTfbb2cXHn17VisSnU/sHFTCxVxSMw==
-  dependencies:
-    "@babel/types" "^7.3.0"
-    esutils "^2.0.0"
-
-"@babel/helper-call-delegate@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/helper-call-delegate/-/helper-call-delegate-7.4.4.tgz#87c1f8ca19ad552a736a7a27b1c1fcf8b1ff1f43"
-  integrity sha512-l79boDFJ8S1c5hvQvG+rc+wHw6IuH7YldmRKsYtpbawsxURu/paVy57FZMomGK22/JckepaikOkY0MoAmdyOlQ==
-  dependencies:
-    "@babel/helper-hoist-variables" "^7.4.4"
-    "@babel/traverse" "^7.4.4"
-    "@babel/types" "^7.4.4"
-
-"@babel/helper-create-class-features-plugin@^7.4.0", "@babel/helper-create-class-features-plugin@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.5.5.tgz#401f302c8ddbc0edd36f7c6b2887d8fa1122e5a4"
-  integrity sha512-ZsxkyYiRA7Bg+ZTRpPvB6AbOFKTFFK4LrvTet8lInm0V468MWCaSYJE+I7v2z2r8KNLtYiV+K5kTCnR7dvyZjg==
-  dependencies:
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/helper-member-expression-to-functions" "^7.5.5"
-    "@babel/helper-optimise-call-expression" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-replace-supers" "^7.5.5"
-    "@babel/helper-split-export-declaration" "^7.4.4"
-
-"@babel/helper-define-map@^7.4.0", "@babel/helper-define-map@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.5.5.tgz#3dec32c2046f37e09b28c93eb0b103fd2a25d369"
-  integrity sha512-fTfxx7i0B5NJqvUOBBGREnrqbTxRh7zinBANpZXAVDlsZxYdclDp467G1sQ8VZYMnAURY3RpBUAgOYT9GfzHBg==
-  dependencies:
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/types" "^7.5.5"
-    lodash "^4.17.13"
-
-"@babel/helper-explode-assignable-expression@^7.1.0":
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.1.0.tgz#537fa13f6f1674df745b0c00ec8fe4e99681c8f6"
-  integrity sha512-NRQpfHrJ1msCHtKjbzs9YcMmJZOg6mQMmGRB+hbamEdG5PNpaSm95275VD92DvJKuyl0s2sFiDmMZ+EnnvufqA==
-  dependencies:
-    "@babel/traverse" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-function-name@^7.1.0":
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53"
-  integrity sha512-A95XEoCpb3TO+KZzJ4S/5uW5fNe26DjBGqf1o9ucyLyCmi1dXq/B3c8iaWTfBk3VvetUxl16e8tIrd5teOCfGw==
-  dependencies:
-    "@babel/helper-get-function-arity" "^7.0.0"
-    "@babel/template" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-get-function-arity@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3"
-  integrity sha512-r2DbJeg4svYvt3HOS74U4eWKsUAMRH01Z1ds1zx8KNTPtpTL5JAsdFv8BNyOpVqdFhHkkRDIg5B4AsxmkjAlmQ==
-  dependencies:
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-hoist-variables@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.4.4.tgz#0298b5f25c8c09c53102d52ac4a98f773eb2850a"
-  integrity sha512-VYk2/H/BnYbZDDg39hr3t2kKyifAm1W6zHRfhx8jGjIHpQEBv9dry7oQ2f3+J703TLu69nYdxsovl0XYfcnK4w==
-  dependencies:
-    "@babel/types" "^7.4.4"
-
-"@babel/helper-member-expression-to-functions@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.5.5.tgz#1fb5b8ec4453a93c439ee9fe3aeea4a84b76b590"
-  integrity sha512-5qZ3D1uMclSNqYcXqiHoA0meVdv+xUEex9em2fqMnrk/scphGlGgg66zjMrPJESPwrFJ6sbfFQYUSa0Mz7FabA==
-  dependencies:
-    "@babel/types" "^7.5.5"
-
-"@babel/helper-module-imports@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.0.0.tgz#96081b7111e486da4d2cd971ad1a4fe216cc2e3d"
-  integrity sha512-aP/hlLq01DWNEiDg4Jn23i+CXxW/owM4WpDLFUbpjxe4NS3BhLVZQ5i7E0ZrxuQ/vwekIeciyamgB1UIYxxM6A==
-  dependencies:
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-module-transforms@^7.1.0", "@babel/helper-module-transforms@^7.4.4":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.5.5.tgz#f84ff8a09038dcbca1fd4355661a500937165b4a"
-  integrity sha512-jBeCvETKuJqeiaCdyaheF40aXnnU1+wkSiUs/IQg3tB85up1LyL8x77ClY8qJpuRJUcXQo+ZtdNESmZl4j56Pw==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/helper-simple-access" "^7.1.0"
-    "@babel/helper-split-export-declaration" "^7.4.4"
-    "@babel/template" "^7.4.4"
-    "@babel/types" "^7.5.5"
-    lodash "^4.17.13"
-
-"@babel/helper-optimise-call-expression@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.0.0.tgz#a2920c5702b073c15de51106200aa8cad20497d5"
-  integrity sha512-u8nd9NQePYNQV8iPWu/pLLYBqZBa4ZaY1YWRFMuxrid94wKI1QNt67NEZ7GAe5Kc/0LLScbim05xZFWkAdrj9g==
-  dependencies:
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-plugin-utils@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250"
-  integrity sha512-CYAOUCARwExnEixLdB6sDm2dIJ/YgEAKDM1MOeMeZu9Ld/bDgVo8aiWrXwcY7OBh+1Ea2uUcVRcxKk0GJvW7QA==
-
-"@babel/helper-regex@^7.0.0", "@babel/helper-regex@^7.4.4":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.5.5.tgz#0aa6824f7100a2e0e89c1527c23936c152cab351"
-  integrity sha512-CkCYQLkfkiugbRDO8eZn6lRuR8kzZoGXCg3149iTk5se7g6qykSpy3+hELSwquhu+TgHn8nkLiBwHvNX8Hofcw==
-  dependencies:
-    lodash "^4.17.13"
-
-"@babel/helper-remap-async-to-generator@^7.1.0":
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.1.0.tgz#361d80821b6f38da75bd3f0785ece20a88c5fe7f"
-  integrity sha512-3fOK0L+Fdlg8S5al8u/hWE6vhufGSn0bN09xm2LXMy//REAF8kDCrYoOBKYmA8m5Nom+sV9LyLCwrFynA8/slg==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-wrap-function" "^7.1.0"
-    "@babel/template" "^7.1.0"
-    "@babel/traverse" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-replace-supers@^7.4.0", "@babel/helper-replace-supers@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.5.5.tgz#f84ce43df031222d2bad068d2626cb5799c34bc2"
-  integrity sha512-XvRFWrNnlsow2u7jXDuH4jDDctkxbS7gXssrP4q2nUD606ukXHRvydj346wmNg+zAgpFx4MWf4+usfC93bElJg==
-  dependencies:
-    "@babel/helper-member-expression-to-functions" "^7.5.5"
-    "@babel/helper-optimise-call-expression" "^7.0.0"
-    "@babel/traverse" "^7.5.5"
-    "@babel/types" "^7.5.5"
-
-"@babel/helper-simple-access@^7.1.0":
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.1.0.tgz#65eeb954c8c245beaa4e859da6188f39d71e585c"
-  integrity sha512-Vk+78hNjRbsiu49zAPALxTb+JUQCz1aolpd8osOF16BGnLtseD21nbHgLPGUwrXEurZgiCOUmvs3ExTu4F5x6w==
-  dependencies:
-    "@babel/template" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@babel/helper-split-export-declaration@^7.4.0", "@babel/helper-split-export-declaration@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677"
-  integrity sha512-Ro/XkzLf3JFITkW6b+hNxzZ1n5OQ80NvIUdmHspih1XAhtN3vPTuUFT4eQnela+2MaZ5ulH+iyP513KJrxbN7Q==
-  dependencies:
-    "@babel/types" "^7.4.4"
-
-"@babel/helper-wrap-function@^7.1.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.2.0.tgz#c4e0012445769e2815b55296ead43a958549f6fa"
-  integrity sha512-o9fP1BZLLSrYlxYEYyl2aS+Flun5gtjTIG8iln+XuEzQTs0PLagAGSXUcqruJwD5fM48jzIEggCKpIfWTcR7pQ==
-  dependencies:
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/template" "^7.1.0"
-    "@babel/traverse" "^7.1.0"
-    "@babel/types" "^7.2.0"
-
-"@babel/helpers@^7.4.3", "@babel/helpers@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.5.5.tgz#63908d2a73942229d1e6685bc2a0e730dde3b75e"
-  integrity sha512-nRq2BUhxZFnfEn/ciJuhklHvFOqjJUD5wpx+1bxUF2axL9C+v4DE/dmp5sT2dKnpOs4orZWzpAZqlCy8QqE/7g==
-  dependencies:
-    "@babel/template" "^7.4.4"
-    "@babel/traverse" "^7.5.5"
-    "@babel/types" "^7.5.5"
-
-"@babel/highlight@^7.0.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.5.0.tgz#56d11312bd9248fa619591d02472be6e8cb32540"
-  integrity sha512-7dV4eu9gBxoM0dAnj/BCFDW9LFU0zvTrkq0ugM7pnHEgguOEeOz1so2ZghEdzviYzQEED0r4EAgpsBChKy1TRQ==
-  dependencies:
-    chalk "^2.0.0"
-    esutils "^2.0.2"
-    js-tokens "^4.0.0"
-
-"@babel/parser@^7.0.0", "@babel/parser@^7.1.0", "@babel/parser@^7.4.3", "@babel/parser@^7.4.4", "@babel/parser@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.5.5.tgz#02f077ac8817d3df4a832ef59de67565e71cca4b"
-  integrity sha512-E5BN68cqR7dhKan1SfqgPGhQ178bkVKpXTPEXnFJBrEt8/DKRZlybmy+IgYLTeN7tp1R5Ccmbm2rBk17sHYU3g==
-
-"@babel/plugin-proposal-async-generator-functions@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.2.0.tgz#b289b306669dce4ad20b0252889a15768c9d417e"
-  integrity sha512-+Dfo/SCQqrwx48ptLVGLdE39YtWRuKc/Y9I5Fy0P1DDBB9lsAHpjcEJQt+4IifuSOSTLBKJObJqMvaO1pIE8LQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-remap-async-to-generator" "^7.1.0"
-    "@babel/plugin-syntax-async-generators" "^7.2.0"
-
-"@babel/plugin-proposal-class-properties@7.4.0":
-  version "7.4.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.4.0.tgz#d70db61a2f1fd79de927eea91f6411c964e084b8"
-  integrity sha512-t2ECPNOXsIeK1JxJNKmgbzQtoG27KIlVE61vTqX0DKR9E9sZlVVxWUtEW9D5FlZ8b8j7SBNCHY47GgPKCKlpPg==
-  dependencies:
-    "@babel/helper-create-class-features-plugin" "^7.4.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-proposal-decorators@7.4.0":
-  version "7.4.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.4.0.tgz#8e1bfd83efa54a5f662033afcc2b8e701f4bb3a9"
-  integrity sha512-d08TLmXeK/XbgCo7ZeZ+JaeZDtDai/2ctapTRsWWkkmy7G/cqz8DQN/HlWG7RR4YmfXxmExsbU3SuCjlM7AtUg==
-  dependencies:
-    "@babel/helper-create-class-features-plugin" "^7.4.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-decorators" "^7.2.0"
-
-"@babel/plugin-proposal-dynamic-import@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.5.0.tgz#e532202db4838723691b10a67b8ce509e397c506"
-  integrity sha512-x/iMjggsKTFHYC6g11PL7Qy58IK8H5zqfm9e6hu4z1iH2IRyAp9u9dL80zA6R76yFovETFLKz2VJIC2iIPBuFw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-dynamic-import" "^7.2.0"
-
-"@babel/plugin-proposal-json-strings@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.2.0.tgz#568ecc446c6148ae6b267f02551130891e29f317"
-  integrity sha512-MAFV1CA/YVmYwZG0fBQyXhmj0BHCB5egZHCKWIFVv/XCxAeVGIHfos3SwDck4LvCllENIAg7xMKOG5kH0dzyUg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-json-strings" "^7.2.0"
-
-"@babel/plugin-proposal-object-rest-spread@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.4.3.tgz#be27cd416eceeba84141305b93c282f5de23bbb4"
-  integrity sha512-xC//6DNSSHVjq8O2ge0dyYlhshsH4T7XdCVoxbi5HzLYWfsC5ooFlJjrXk8RcAT+hjHAK9UjBXdylzSoDK3t4g==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-object-rest-spread" "^7.2.0"
-
-"@babel/plugin-proposal-object-rest-spread@^7.4.3", "@babel/plugin-proposal-object-rest-spread@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.5.5.tgz#61939744f71ba76a3ae46b5eea18a54c16d22e58"
-  integrity sha512-F2DxJJSQ7f64FyTVl5cw/9MWn6naXGdk3Q3UhDbFEEHv+EilCPoeRD3Zh/Utx1CJz4uyKlQ4uH+bJPbEhMV7Zw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-object-rest-spread" "^7.2.0"
-
-"@babel/plugin-proposal-optional-catch-binding@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.2.0.tgz#135d81edb68a081e55e56ec48541ece8065c38f5"
-  integrity sha512-mgYj3jCcxug6KUcX4OBoOJz3CMrwRfQELPQ5560F70YQUBZB7uac9fqaWamKR1iWUzGiK2t0ygzjTScZnVz75g==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-optional-catch-binding" "^7.2.0"
-
-"@babel/plugin-proposal-unicode-property-regex@^7.4.0", "@babel/plugin-proposal-unicode-property-regex@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.4.4.tgz#501ffd9826c0b91da22690720722ac7cb1ca9c78"
-  integrity sha512-j1NwnOqMG9mFUOH58JTFsA/+ZYzQLUZ/drqWUqxCYLGeu2JFZL8YrNC9hBxKmWtAuOCHPcRpgv7fhap09Fb4kA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-regex" "^7.4.4"
-    regexpu-core "^4.5.4"
-
-"@babel/plugin-syntax-async-generators@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.2.0.tgz#69e1f0db34c6f5a0cf7e2b3323bf159a76c8cb7f"
-  integrity sha512-1ZrIRBv2t0GSlcwVoQ6VgSLpLgiN/FVQUzt9znxo7v2Ov4jJrs8RY8tv0wvDmFN3qIdMKWrmMMW6yZ0G19MfGg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-decorators@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.2.0.tgz#c50b1b957dcc69e4b1127b65e1c33eef61570c1b"
-  integrity sha512-38QdqVoXdHUQfTpZo3rQwqQdWtCn5tMv4uV6r2RMfTqNBuv4ZBhz79SfaQWKTVmxHjeFv/DnXVC/+agHCklYWA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-dynamic-import@7.2.0", "@babel/plugin-syntax-dynamic-import@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.2.0.tgz#69c159ffaf4998122161ad8ebc5e6d1f55df8612"
-  integrity sha512-mVxuJ0YroI/h/tbFTPGZR8cv6ai+STMKNBq0f8hFxsxWjl94qqhsb+wXbpNMDPU3cfR1TIsVFzU3nXyZMqyK4w==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-flow@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.2.0.tgz#a765f061f803bc48f240c26f8747faf97c26bf7c"
-  integrity sha512-r6YMuZDWLtLlu0kqIim5o/3TNRAlWb073HwT3e2nKf9I8IIvOggPrnILYPsrrKilmn/mYEMCf/Z07w3yQJF6dg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-json-strings@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.2.0.tgz#72bd13f6ffe1d25938129d2a186b11fd62951470"
-  integrity sha512-5UGYnMSLRE1dqqZwug+1LISpA403HzlSfsg6P9VXU6TBjcSHeNlw4DxDx7LgpF+iKZoOG/+uzqoRHTdcUpiZNg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-jsx@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.2.0.tgz#0b85a3b4bc7cdf4cc4b8bf236335b907ca22e7c7"
-  integrity sha512-VyN4QANJkRW6lDBmENzRszvZf3/4AXaj9YR7GwrWeeN9tEBPuXbmDYVU9bYBN0D70zCWVwUy0HWq2553VCb6Hw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-object-rest-spread@^7.0.0", "@babel/plugin-syntax-object-rest-spread@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.2.0.tgz#3b7a3e733510c57e820b9142a6579ac8b0dfad2e"
-  integrity sha512-t0JKGgqk2We+9may3t0xDdmneaXmyxq0xieYcKHxIsrJO64n1OiMWNUtc5gQK1PA0NpdCRrtZp4z+IUaKugrSA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-optional-catch-binding@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.2.0.tgz#a94013d6eda8908dfe6a477e7f9eda85656ecf5c"
-  integrity sha512-bDe4xKNhb0LI7IvZHiA13kff0KEfaGX/Hv4lMA9+7TEc63hMNvfKo6ZFpXhKuEp+II/q35Gc4NoMeDZyaUbj9w==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-syntax-typescript@^7.2.0":
-  version "7.3.3"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.3.3.tgz#a7cc3f66119a9f7ebe2de5383cce193473d65991"
-  integrity sha512-dGwbSMA1YhVS8+31CnPR7LB4pcbrzcV99wQzby4uAfrkZPYZlQ7ImwdpzLqi6Z6IL02b8IAL379CaMwo0x5Lag==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-arrow-functions@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.2.0.tgz#9aeafbe4d6ffc6563bf8f8372091628f00779550"
-  integrity sha512-ER77Cax1+8/8jCB9fo4Ud161OZzWN5qawi4GusDuRLcDbDG+bIGYY20zb2dfAFdTRGzrfq2xZPvF0R64EHnimg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-async-to-generator@^7.4.0", "@babel/plugin-transform-async-to-generator@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.5.0.tgz#89a3848a0166623b5bc481164b5936ab947e887e"
-  integrity sha512-mqvkzwIGkq0bEF1zLRRiTdjfomZJDV33AH3oQzHVGkI2VzEmXLpKKOBvEVaFZBJdN0XTyH38s9j/Kiqr68dggg==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-remap-async-to-generator" "^7.1.0"
-
-"@babel/plugin-transform-block-scoped-functions@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.2.0.tgz#5d3cc11e8d5ddd752aa64c9148d0db6cb79fd190"
-  integrity sha512-ntQPR6q1/NKuphly49+QiQiTN0O63uOwjdD6dhIjSWBI5xlrbUFh720TIpzBhpnrLfv2tNH/BXvLIab1+BAI0w==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-block-scoping@^7.4.0", "@babel/plugin-transform-block-scoping@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.5.5.tgz#a35f395e5402822f10d2119f6f8e045e3639a2ce"
-  integrity sha512-82A3CLRRdYubkG85lKwhZB0WZoHxLGsJdux/cOVaJCJpvYFl1LVzAIFyRsa7CvXqW8rBM4Zf3Bfn8PHt5DP0Sg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    lodash "^4.17.13"
-
-"@babel/plugin-transform-classes@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.4.3.tgz#adc7a1137ab4287a555d429cc56ecde8f40c062c"
-  integrity sha512-PUaIKyFUDtG6jF5DUJOfkBdwAS/kFFV3XFk7Nn0a6vR7ZT8jYw5cGtIlat77wcnd0C6ViGqo/wyNf4ZHytF/nQ==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-define-map" "^7.4.0"
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/helper-optimise-call-expression" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-replace-supers" "^7.4.0"
-    "@babel/helper-split-export-declaration" "^7.4.0"
-    globals "^11.1.0"
-
-"@babel/plugin-transform-classes@^7.4.3", "@babel/plugin-transform-classes@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.5.5.tgz#d094299d9bd680a14a2a0edae38305ad60fb4de9"
-  integrity sha512-U2htCNK/6e9K7jGyJ++1p5XRU+LJjrwtoiVn9SzRlDT2KubcZ11OOwy3s24TjHxPgxNwonCYP7U2K51uVYCMDg==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-define-map" "^7.5.5"
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/helper-optimise-call-expression" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-replace-supers" "^7.5.5"
-    "@babel/helper-split-export-declaration" "^7.4.4"
-    globals "^11.1.0"
-
-"@babel/plugin-transform-computed-properties@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.2.0.tgz#83a7df6a658865b1c8f641d510c6f3af220216da"
-  integrity sha512-kP/drqTxY6Xt3NNpKiMomfgkNn4o7+vKxK2DDKcBG9sHj51vHqMBGy8wbDS/J4lMxnqs153/T3+DmCEAkC5cpA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-destructuring@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.4.3.tgz#1a95f5ca2bf2f91ef0648d5de38a8d472da4350f"
-  integrity sha512-rVTLLZpydDFDyN4qnXdzwoVpk1oaXHIvPEOkOLyr88o7oHxVc/LyrnDx+amuBWGOwUb7D1s/uLsKBNTx08htZg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-destructuring@^7.4.3", "@babel/plugin-transform-destructuring@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.5.0.tgz#f6c09fdfe3f94516ff074fe877db7bc9ef05855a"
-  integrity sha512-YbYgbd3TryYYLGyC7ZR+Tq8H/+bCmwoaxHfJHupom5ECstzbRLTch6gOQbhEY9Z4hiCNHEURgq06ykFv9JZ/QQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-dotall-regex@^7.4.3", "@babel/plugin-transform-dotall-regex@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.4.4.tgz#361a148bc951444312c69446d76ed1ea8e4450c3"
-  integrity sha512-P05YEhRc2h53lZDjRPk/OektxCVevFzZs2Gfjd545Wde3k+yFDbXORgl2e0xpbq8mLcKJ7Idss4fAg0zORN/zg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-regex" "^7.4.4"
-    regexpu-core "^4.5.4"
-
-"@babel/plugin-transform-duplicate-keys@^7.2.0", "@babel/plugin-transform-duplicate-keys@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.5.0.tgz#c5dbf5106bf84cdf691222c0974c12b1df931853"
-  integrity sha512-igcziksHizyQPlX9gfSjHkE2wmoCH3evvD2qR5w29/Dk0SMKE/eOI7f1HhBdNhR/zxJDqrgpoDTq5YSLH/XMsQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-exponentiation-operator@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.2.0.tgz#a63868289e5b4007f7054d46491af51435766008"
-  integrity sha512-umh4hR6N7mu4Elq9GG8TOu9M0bakvlsREEC+ialrQN6ABS4oDQ69qJv1VtR3uxlKMCQMCvzk7vr17RHKcjx68A==
-  dependencies:
-    "@babel/helper-builder-binary-assignment-operator-visitor" "^7.1.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-flow-strip-types@7.4.0":
-  version "7.4.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.4.0.tgz#f3c59eecff68c99b9c96eaafe4fe9d1fa8947138"
-  integrity sha512-C4ZVNejHnfB22vI2TYN4RUp2oCmq6cSEAg4RygSvYZUECRqUu9O4PMEMNJ4wsemaRGg27BbgYctG4BZh+AgIHw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-flow" "^7.2.0"
-
-"@babel/plugin-transform-for-of@^7.4.3", "@babel/plugin-transform-for-of@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.4.4.tgz#0267fc735e24c808ba173866c6c4d1440fc3c556"
-  integrity sha512-9T/5Dlr14Z9TIEXLXkt8T1DU7F24cbhwhMNUziN3hB1AXoZcdzPcTiKGRn/6iOymDqtTKWnr/BtRKN9JwbKtdQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-function-name@^7.4.3", "@babel/plugin-transform-function-name@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.4.4.tgz#e1436116abb0610c2259094848754ac5230922ad"
-  integrity sha512-iU9pv7U+2jC9ANQkKeNF6DrPy4GBa4NWQtl6dHB4Pb3izX2JOEvDTFarlNsBj/63ZEzNNIAMs3Qw4fNCcSOXJA==
-  dependencies:
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-literals@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.2.0.tgz#690353e81f9267dad4fd8cfd77eafa86aba53ea1"
-  integrity sha512-2ThDhm4lI4oV7fVQ6pNNK+sx+c/GM5/SaML0w/r4ZB7sAneD/piDJtwdKlNckXeyGK7wlwg2E2w33C/Hh+VFCg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-member-expression-literals@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.2.0.tgz#fa10aa5c58a2cb6afcf2c9ffa8cb4d8b3d489a2d"
-  integrity sha512-HiU3zKkSU6scTidmnFJ0bMX8hz5ixC93b4MHMiYebmk2lUVNGOboPsqQvx5LzooihijUoLR/v7Nc1rbBtnc7FA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-modules-amd@^7.2.0", "@babel/plugin-transform-modules-amd@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.5.0.tgz#ef00435d46da0a5961aa728a1d2ecff063e4fb91"
-  integrity sha512-n20UsQMKnWrltocZZm24cRURxQnWIvsABPJlw/fvoy9c6AgHZzoelAIzajDHAQrDpuKFFPPcFGd7ChsYuIUMpg==
-  dependencies:
-    "@babel/helper-module-transforms" "^7.1.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    babel-plugin-dynamic-import-node "^2.3.0"
-
-"@babel/plugin-transform-modules-commonjs@^7.4.3", "@babel/plugin-transform-modules-commonjs@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.5.0.tgz#425127e6045231360858eeaa47a71d75eded7a74"
-  integrity sha512-xmHq0B+ytyrWJvQTc5OWAC4ii6Dhr0s22STOoydokG51JjWhyYo5mRPXoi+ZmtHQhZZwuXNN+GG5jy5UZZJxIQ==
-  dependencies:
-    "@babel/helper-module-transforms" "^7.4.4"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-simple-access" "^7.1.0"
-    babel-plugin-dynamic-import-node "^2.3.0"
-
-"@babel/plugin-transform-modules-systemjs@^7.4.0", "@babel/plugin-transform-modules-systemjs@^7.5.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.5.0.tgz#e75266a13ef94202db2a0620977756f51d52d249"
-  integrity sha512-Q2m56tyoQWmuNGxEtUyeEkm6qJYFqs4c+XyXH5RAuYxObRNz9Zgj/1g2GMnjYp2EUyEy7YTrxliGCXzecl/vJg==
-  dependencies:
-    "@babel/helper-hoist-variables" "^7.4.4"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    babel-plugin-dynamic-import-node "^2.3.0"
-
-"@babel/plugin-transform-modules-umd@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.2.0.tgz#7678ce75169f0877b8eb2235538c074268dd01ae"
-  integrity sha512-BV3bw6MyUH1iIsGhXlOK6sXhmSarZjtJ/vMiD9dNmpY8QXFFQTj+6v92pcfy1iqa8DeAfJFwoxcrS/TUZda6sw==
-  dependencies:
-    "@babel/helper-module-transforms" "^7.1.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-named-capturing-groups-regex@^7.4.2", "@babel/plugin-transform-named-capturing-groups-regex@^7.4.5":
-  version "7.4.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.4.5.tgz#9d269fd28a370258199b4294736813a60bbdd106"
-  integrity sha512-z7+2IsWafTBbjNsOxU/Iv5CvTJlr5w4+HGu1HovKYTtgJ362f7kBcQglkfmlspKKZ3bgrbSGvLfNx++ZJgCWsg==
-  dependencies:
-    regexp-tree "^0.1.6"
-
-"@babel/plugin-transform-new-target@^7.4.0", "@babel/plugin-transform-new-target@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.4.4.tgz#18d120438b0cc9ee95a47f2c72bc9768fbed60a5"
-  integrity sha512-r1z3T2DNGQwwe2vPGZMBNjioT2scgWzK9BCnDEh+46z8EEwXBq24uRzd65I7pjtugzPSj921aM15RpESgzsSuA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-object-super@^7.2.0", "@babel/plugin-transform-object-super@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.5.5.tgz#c70021df834073c65eb613b8679cc4a381d1a9f9"
-  integrity sha512-un1zJQAhSosGFBduPgN/YFNvWVpRuHKU7IHBglLoLZsGmruJPOo6pbInneflUdmq7YvSVqhpPs5zdBvLnteltQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-replace-supers" "^7.5.5"
-
-"@babel/plugin-transform-parameters@^7.4.3", "@babel/plugin-transform-parameters@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.4.4.tgz#7556cf03f318bd2719fe4c922d2d808be5571e16"
-  integrity sha512-oMh5DUO1V63nZcu/ZVLQFqiihBGo4OpxJxR1otF50GMeCLiRx5nUdtokd+u9SuVJrvvuIh9OosRFPP4pIPnwmw==
-  dependencies:
-    "@babel/helper-call-delegate" "^7.4.4"
-    "@babel/helper-get-function-arity" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-property-literals@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.2.0.tgz#03e33f653f5b25c4eb572c98b9485055b389e905"
-  integrity sha512-9q7Dbk4RhgcLp8ebduOpCbtjh7C0itoLYHXd9ueASKAG/is5PQtMR5VJGka9NKqGhYEGn5ITahd4h9QeBMylWQ==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-react-constant-elements@7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.2.0.tgz#ed602dc2d8bff2f0cb1a5ce29263dbdec40779f7"
-  integrity sha512-YYQFg6giRFMsZPKUM9v+VcHOdfSQdz9jHCx3akAi3UYgyjndmdYGSXylQ/V+HswQt4fL8IklchD9HTsaOCrWQQ==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-react-constant-elements@^7.0.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.5.0.tgz#4d6ae4033bc38f8a65dfca2b6235c44522a422fc"
-  integrity sha512-c5Ba8cpybZFp1Izkf2sWGuNjOxoQ32tFgBvvYvwGhi4+9f6vGiSK9Gex4uVuO/Va6YJFu41aAh1MzMjUWkp0IQ==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-react-display-name@7.2.0", "@babel/plugin-transform-react-display-name@^7.0.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.2.0.tgz#ebfaed87834ce8dc4279609a4f0c324c156e3eb0"
-  integrity sha512-Htf/tPa5haZvRMiNSQSFifK12gtr/8vwfr+A9y69uF0QcU77AVu4K7MiHEkTxF7lQoHOL0F9ErqgfNEAKgXj7A==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-react-jsx-self@^7.0.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.2.0.tgz#461e21ad9478f1031dd5e276108d027f1b5240ba"
-  integrity sha512-v6S5L/myicZEy+jr6ielB0OR8h+EH/1QFx/YJ7c7Ua+7lqsjj/vW6fD5FR9hB/6y7mGbfT4vAURn3xqBxsUcdg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-jsx" "^7.2.0"
-
-"@babel/plugin-transform-react-jsx-source@^7.0.0":
-  version "7.5.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.5.0.tgz#583b10c49cf057e237085bcbd8cc960bd83bd96b"
-  integrity sha512-58Q+Jsy4IDCZx7kqEZuSDdam/1oW8OdDX8f+Loo6xyxdfg1yF0GE2XNJQSTZCaMol93+FBzpWiPEwtbMloAcPg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-jsx" "^7.2.0"
-
-"@babel/plugin-transform-react-jsx@^7.0.0":
-  version "7.3.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.3.0.tgz#f2cab99026631c767e2745a5368b331cfe8f5290"
-  integrity sha512-a/+aRb7R06WcKvQLOu4/TpjKOdvVEKRLWFpKcNuHhiREPgGRB4TQJxq07+EZLS8LFVYpfq1a5lDUnuMdcCpBKg==
-  dependencies:
-    "@babel/helper-builder-react-jsx" "^7.3.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-jsx" "^7.2.0"
-
-"@babel/plugin-transform-regenerator@^7.4.3", "@babel/plugin-transform-regenerator@^7.4.5":
-  version "7.4.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.4.5.tgz#629dc82512c55cee01341fb27bdfcb210354680f"
-  integrity sha512-gBKRh5qAaCWntnd09S8QC7r3auLCqq5DI6O0DlfoyDjslSBVqBibrMdsqO+Uhmx3+BlOmE/Kw1HFxmGbv0N9dA==
-  dependencies:
-    regenerator-transform "^0.14.0"
-
-"@babel/plugin-transform-reserved-words@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.2.0.tgz#4792af87c998a49367597d07fedf02636d2e1634"
-  integrity sha512-fz43fqW8E1tAB3DKF19/vxbpib1fuyCwSPE418ge5ZxILnBhWyhtPgz8eh1RCGGJlwvksHkyxMxh0eenFi+kFw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-runtime@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.4.3.tgz#4d6691690ecdc9f5cb8c3ab170a1576c1f556371"
-  integrity sha512-7Q61bU+uEI7bCUFReT1NKn7/X6sDQsZ7wL1sJ9IYMAO7cI+eg6x9re1cEw2fCRMbbTVyoeUKWSV1M6azEfKCfg==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    resolve "^1.8.1"
-    semver "^5.5.1"
-
-"@babel/plugin-transform-shorthand-properties@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.2.0.tgz#6333aee2f8d6ee7e28615457298934a3b46198f0"
-  integrity sha512-QP4eUM83ha9zmYtpbnyjTLAGKQritA5XW/iG9cjtuOI8s1RuL/3V6a3DeSHfKutJQ+ayUfeZJPcnCYEQzaPQqg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-spread@^7.2.0":
-  version "7.2.2"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.2.2.tgz#3103a9abe22f742b6d406ecd3cd49b774919b406"
-  integrity sha512-KWfky/58vubwtS0hLqEnrWJjsMGaOeSBn90Ezn5Jeg9Z8KKHmELbP1yGylMlm5N6TPKeY9A2+UaSYLdxahg01w==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-sticky-regex@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.2.0.tgz#a1e454b5995560a9c1e0d537dfc15061fd2687e1"
-  integrity sha512-KKYCoGaRAf+ckH8gEL3JHUaFVyNHKe3ASNsZ+AlktgHevvxGigoIttrEJb8iKN03Q7Eazlv1s6cx2B2cQ3Jabw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-regex" "^7.0.0"
-
-"@babel/plugin-transform-template-literals@^7.2.0", "@babel/plugin-transform-template-literals@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.4.4.tgz#9d28fea7bbce637fb7612a0750989d8321d4bcb0"
-  integrity sha512-mQrEC4TWkhLN0z8ygIvEL9ZEToPhG5K7KDW3pzGqOfIGZ28Jb0POUkeWcoz8HnHvhFy6dwAT1j8OzqN8s804+g==
-  dependencies:
-    "@babel/helper-annotate-as-pure" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-typeof-symbol@^7.2.0":
-  version "7.2.0"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.2.0.tgz#117d2bcec2fbf64b4b59d1f9819894682d29f2b2"
-  integrity sha512-2LNhETWYxiYysBtrBTqL8+La0jIoQQnIScUJc74OYvUGRmkskNY4EzLCnjHBzdmb38wqtTaixpo1NctEcvMDZw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-
-"@babel/plugin-transform-typescript@^7.3.2":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.5.5.tgz#6d862766f09b2da1cb1f7d505fe2aedab6b7d4b8"
-  integrity sha512-pehKf4m640myZu5B2ZviLaiBlxMCjSZ1qTEO459AXKX5GnPueyulJeCqZFs1nz/Ya2dDzXQ1NxZ/kKNWyD4h6w==
-  dependencies:
-    "@babel/helper-create-class-features-plugin" "^7.5.5"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-syntax-typescript" "^7.2.0"
-
-"@babel/plugin-transform-unicode-regex@^7.4.3", "@babel/plugin-transform-unicode-regex@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.4.4.tgz#ab4634bb4f14d36728bf5978322b35587787970f"
-  integrity sha512-il+/XdNw01i93+M9J9u4T7/e/Ue/vWfNZE4IRUQjplu2Mqb/AFTDimkw2tdEdSH50wuQXZAbXSql0UphQke+vA==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/helper-regex" "^7.4.4"
-    regexpu-core "^4.5.4"
-
-"@babel/preset-env@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.4.3.tgz#e71e16e123dc0fbf65a52cbcbcefd072fbd02880"
-  integrity sha512-FYbZdV12yHdJU5Z70cEg0f6lvtpZ8jFSDakTm7WXeJbLXh4R0ztGEu/SW7G1nJ2ZvKwDhz8YrbA84eYyprmGqw==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-proposal-async-generator-functions" "^7.2.0"
-    "@babel/plugin-proposal-json-strings" "^7.2.0"
-    "@babel/plugin-proposal-object-rest-spread" "^7.4.3"
-    "@babel/plugin-proposal-optional-catch-binding" "^7.2.0"
-    "@babel/plugin-proposal-unicode-property-regex" "^7.4.0"
-    "@babel/plugin-syntax-async-generators" "^7.2.0"
-    "@babel/plugin-syntax-json-strings" "^7.2.0"
-    "@babel/plugin-syntax-object-rest-spread" "^7.2.0"
-    "@babel/plugin-syntax-optional-catch-binding" "^7.2.0"
-    "@babel/plugin-transform-arrow-functions" "^7.2.0"
-    "@babel/plugin-transform-async-to-generator" "^7.4.0"
-    "@babel/plugin-transform-block-scoped-functions" "^7.2.0"
-    "@babel/plugin-transform-block-scoping" "^7.4.0"
-    "@babel/plugin-transform-classes" "^7.4.3"
-    "@babel/plugin-transform-computed-properties" "^7.2.0"
-    "@babel/plugin-transform-destructuring" "^7.4.3"
-    "@babel/plugin-transform-dotall-regex" "^7.4.3"
-    "@babel/plugin-transform-duplicate-keys" "^7.2.0"
-    "@babel/plugin-transform-exponentiation-operator" "^7.2.0"
-    "@babel/plugin-transform-for-of" "^7.4.3"
-    "@babel/plugin-transform-function-name" "^7.4.3"
-    "@babel/plugin-transform-literals" "^7.2.0"
-    "@babel/plugin-transform-member-expression-literals" "^7.2.0"
-    "@babel/plugin-transform-modules-amd" "^7.2.0"
-    "@babel/plugin-transform-modules-commonjs" "^7.4.3"
-    "@babel/plugin-transform-modules-systemjs" "^7.4.0"
-    "@babel/plugin-transform-modules-umd" "^7.2.0"
-    "@babel/plugin-transform-named-capturing-groups-regex" "^7.4.2"
-    "@babel/plugin-transform-new-target" "^7.4.0"
-    "@babel/plugin-transform-object-super" "^7.2.0"
-    "@babel/plugin-transform-parameters" "^7.4.3"
-    "@babel/plugin-transform-property-literals" "^7.2.0"
-    "@babel/plugin-transform-regenerator" "^7.4.3"
-    "@babel/plugin-transform-reserved-words" "^7.2.0"
-    "@babel/plugin-transform-shorthand-properties" "^7.2.0"
-    "@babel/plugin-transform-spread" "^7.2.0"
-    "@babel/plugin-transform-sticky-regex" "^7.2.0"
-    "@babel/plugin-transform-template-literals" "^7.2.0"
-    "@babel/plugin-transform-typeof-symbol" "^7.2.0"
-    "@babel/plugin-transform-unicode-regex" "^7.4.3"
-    "@babel/types" "^7.4.0"
-    browserslist "^4.5.2"
-    core-js-compat "^3.0.0"
-    invariant "^2.2.2"
-    js-levenshtein "^1.1.3"
-    semver "^5.5.0"
-
-"@babel/preset-env@^7.1.6":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.5.5.tgz#bc470b53acaa48df4b8db24a570d6da1fef53c9a"
-  integrity sha512-GMZQka/+INwsMz1A5UEql8tG015h5j/qjptpKY2gJ7giy8ohzU710YciJB5rcKsWGWHiW3RUnHib0E5/m3Tp3A==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-proposal-async-generator-functions" "^7.2.0"
-    "@babel/plugin-proposal-dynamic-import" "^7.5.0"
-    "@babel/plugin-proposal-json-strings" "^7.2.0"
-    "@babel/plugin-proposal-object-rest-spread" "^7.5.5"
-    "@babel/plugin-proposal-optional-catch-binding" "^7.2.0"
-    "@babel/plugin-proposal-unicode-property-regex" "^7.4.4"
-    "@babel/plugin-syntax-async-generators" "^7.2.0"
-    "@babel/plugin-syntax-dynamic-import" "^7.2.0"
-    "@babel/plugin-syntax-json-strings" "^7.2.0"
-    "@babel/plugin-syntax-object-rest-spread" "^7.2.0"
-    "@babel/plugin-syntax-optional-catch-binding" "^7.2.0"
-    "@babel/plugin-transform-arrow-functions" "^7.2.0"
-    "@babel/plugin-transform-async-to-generator" "^7.5.0"
-    "@babel/plugin-transform-block-scoped-functions" "^7.2.0"
-    "@babel/plugin-transform-block-scoping" "^7.5.5"
-    "@babel/plugin-transform-classes" "^7.5.5"
-    "@babel/plugin-transform-computed-properties" "^7.2.0"
-    "@babel/plugin-transform-destructuring" "^7.5.0"
-    "@babel/plugin-transform-dotall-regex" "^7.4.4"
-    "@babel/plugin-transform-duplicate-keys" "^7.5.0"
-    "@babel/plugin-transform-exponentiation-operator" "^7.2.0"
-    "@babel/plugin-transform-for-of" "^7.4.4"
-    "@babel/plugin-transform-function-name" "^7.4.4"
-    "@babel/plugin-transform-literals" "^7.2.0"
-    "@babel/plugin-transform-member-expression-literals" "^7.2.0"
-    "@babel/plugin-transform-modules-amd" "^7.5.0"
-    "@babel/plugin-transform-modules-commonjs" "^7.5.0"
-    "@babel/plugin-transform-modules-systemjs" "^7.5.0"
-    "@babel/plugin-transform-modules-umd" "^7.2.0"
-    "@babel/plugin-transform-named-capturing-groups-regex" "^7.4.5"
-    "@babel/plugin-transform-new-target" "^7.4.4"
-    "@babel/plugin-transform-object-super" "^7.5.5"
-    "@babel/plugin-transform-parameters" "^7.4.4"
-    "@babel/plugin-transform-property-literals" "^7.2.0"
-    "@babel/plugin-transform-regenerator" "^7.4.5"
-    "@babel/plugin-transform-reserved-words" "^7.2.0"
-    "@babel/plugin-transform-shorthand-properties" "^7.2.0"
-    "@babel/plugin-transform-spread" "^7.2.0"
-    "@babel/plugin-transform-sticky-regex" "^7.2.0"
-    "@babel/plugin-transform-template-literals" "^7.4.4"
-    "@babel/plugin-transform-typeof-symbol" "^7.2.0"
-    "@babel/plugin-transform-unicode-regex" "^7.4.4"
-    "@babel/types" "^7.5.5"
-    browserslist "^4.6.0"
-    core-js-compat "^3.1.1"
-    invariant "^2.2.2"
-    js-levenshtein "^1.1.3"
-    semver "^5.5.0"
-
-"@babel/preset-react@7.0.0", "@babel/preset-react@^7.0.0":
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.0.0.tgz#e86b4b3d99433c7b3e9e91747e2653958bc6b3c0"
-  integrity sha512-oayxyPS4Zj+hF6Et11BwuBkmpgT/zMxyuZgFrMeZID6Hdh3dGlk4sHCAhdBCpuCKW2ppBfl2uCCetlrUIJRY3w==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-transform-react-display-name" "^7.0.0"
-    "@babel/plugin-transform-react-jsx" "^7.0.0"
-    "@babel/plugin-transform-react-jsx-self" "^7.0.0"
-    "@babel/plugin-transform-react-jsx-source" "^7.0.0"
-
-"@babel/preset-typescript@7.3.3":
-  version "7.3.3"
-  resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.3.3.tgz#88669911053fa16b2b276ea2ede2ca603b3f307a"
-  integrity sha512-mzMVuIP4lqtn4du2ynEfdO0+RYcslwrZiJHXu4MGaC1ctJiW2fyaeDrtjJGs7R/KebZ1sgowcIoWf4uRpEfKEg==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    "@babel/plugin-transform-typescript" "^7.3.2"
-
-"@babel/runtime@7.4.3":
-  version "7.4.3"
-  resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.4.3.tgz#79888e452034223ad9609187a0ad1fe0d2ad4bdc"
-  integrity sha512-9lsJwJLxDh/T3Q3SZszfWOTkk3pHbkmH+3KY+zwIDmsNlxsumuhS2TH3NIpktU4kNvfzy+k3eLT7aTJSPTo0OA==
-  dependencies:
-    regenerator-runtime "^0.13.2"
-
-"@babel/runtime@^7.0.0", "@babel/runtime@^7.1.2", "@babel/runtime@^7.3.4", "@babel/runtime@^7.4.0", "@babel/runtime@^7.4.2":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.5.5.tgz#74fba56d35efbeca444091c7850ccd494fd2f132"
-  integrity sha512-28QvEGyQyNkB0/m2B4FU7IEZGK2NUrcMtT6BZEFALTguLk+AUT6ofsHtPk5QyjAdUkpMJ+/Em+quwz4HOt30AQ==
-  dependencies:
-    regenerator-runtime "^0.13.2"
-
-"@babel/template@^7.1.0", "@babel/template@^7.4.0", "@babel/template@^7.4.4":
-  version "7.4.4"
-  resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.4.4.tgz#f4b88d1225689a08f5bc3a17483545be9e4ed237"
-  integrity sha512-CiGzLN9KgAvgZsnivND7rkA+AeJ9JB0ciPOD4U59GKbQP2iQl+olF1l76kJOupqidozfZ32ghwBEJDhnk9MEcw==
-  dependencies:
-    "@babel/code-frame" "^7.0.0"
-    "@babel/parser" "^7.4.4"
-    "@babel/types" "^7.4.4"
-
-"@babel/traverse@^7.0.0", "@babel/traverse@^7.1.0", "@babel/traverse@^7.4.3", "@babel/traverse@^7.4.4", "@babel/traverse@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.5.5.tgz#f664f8f368ed32988cd648da9f72d5ca70f165bb"
-  integrity sha512-MqB0782whsfffYfSjH4TM+LMjrJnhCNEDMDIjeTpl+ASaUvxcjoiVCo/sM1GhS1pHOXYfWVCYneLjMckuUxDaQ==
-  dependencies:
-    "@babel/code-frame" "^7.5.5"
-    "@babel/generator" "^7.5.5"
-    "@babel/helper-function-name" "^7.1.0"
-    "@babel/helper-split-export-declaration" "^7.4.4"
-    "@babel/parser" "^7.5.5"
-    "@babel/types" "^7.5.5"
-    debug "^4.1.0"
-    globals "^11.1.0"
-    lodash "^4.17.13"
-
-"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.3.0", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.5.5":
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.5.5.tgz#97b9f728e182785909aa4ab56264f090a028d18a"
-  integrity sha512-s63F9nJioLqOlW3UkyMd+BYhXt44YuaFm/VV0VwuteqjYwRrObkU7ra9pY4wAJR3oXi8hJrMcrcJdO/HH33vtw==
-  dependencies:
-    esutils "^2.0.2"
-    lodash "^4.17.13"
-    to-fast-properties "^2.0.0"
-
-"@cnakazawa/watch@^1.0.3":
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.3.tgz#099139eaec7ebf07a27c1786a3ff64f39464d2ef"
-  integrity sha512-r5160ogAvGyHsal38Kux7YYtodEKOj89RGb28ht1jh3SJb08VwRwAKKJL0bGb04Zd/3r9FL3BFIc3bBidYffCA==
-  dependencies:
-    exec-sh "^0.3.2"
-    minimist "^1.2.0"
-
-"@csstools/convert-colors@^1.4.0":
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/@csstools/convert-colors/-/convert-colors-1.4.0.tgz#ad495dc41b12e75d588c6db8b9834f08fa131eb7"
-  integrity sha512-5a6wqoJV/xEdbRNKVo6I4hO3VjyDq//8q2f9I6PBAvMesJHFauXDorcNCsr9RzvsZnaWi5NYCcfyqP1QeFHFbw==
-
-"@csstools/normalize.css@^9.0.1":
-  version "9.0.1"
-  resolved "https://registry.yarnpkg.com/@csstools/normalize.css/-/normalize.css-9.0.1.tgz#c27b391d8457d1e893f1eddeaf5e5412d12ffbb5"
-  integrity sha512-6It2EVfGskxZCQhuykrfnALg7oVeiI6KclWSmGDqB0AiInVrTGB9Jp9i4/Ad21u9Jde/voVQz6eFX/eSg/UsPA==
-
-"@hapi/address@2.x.x":
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/@hapi/address/-/address-2.0.0.tgz#9f05469c88cb2fd3dcd624776b54ee95c312126a"
-  integrity sha512-mV6T0IYqb0xL1UALPFplXYQmR0twnXG0M6jUswpquqT2sD12BOiCiLy3EvMp/Fy7s3DZElC4/aPjEjo2jeZpvw==
-
-"@hapi/bourne@1.x.x":
-  version "1.3.2"
-  resolved "https://registry.yarnpkg.com/@hapi/bourne/-/bourne-1.3.2.tgz#0a7095adea067243ce3283e1b56b8a8f453b242a"
-  integrity sha512-1dVNHT76Uu5N3eJNTYcvxee+jzX4Z9lfciqRRHCU27ihbUcYi+iSc2iml5Ke1LXe1SyJCLA0+14Jh4tXJgOppA==
-
-"@hapi/hoek@8.x.x":
-  version "8.2.1"
-  resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-8.2.1.tgz#924af04cbb22e17359c620d2a9c946e63f58eb77"
-  integrity sha512-JPiBy+oSmsq3St7XlipfN5pNA6bDJ1kpa73PrK/zR29CVClDVqy04AanM/M/qx5bSF+I61DdCfAvRrujau+zRg==
-
-"@hapi/joi@^15.0.0":
-  version "15.1.1"
-  resolved "https://registry.yarnpkg.com/@hapi/joi/-/joi-15.1.1.tgz#c675b8a71296f02833f8d6d243b34c57b8ce19d7"
-  integrity sha512-entf8ZMOK8sc+8YfeOlM8pCfg3b5+WZIKBfUaaJT8UsjAAPjartzxIYm3TIbjvA4u+u++KbcXD38k682nVHDAQ==
-  dependencies:
-    "@hapi/address" "2.x.x"
-    "@hapi/bourne" "1.x.x"
-    "@hapi/hoek" "8.x.x"
-    "@hapi/topo" "3.x.x"
-
-"@hapi/topo@3.x.x":
-  version "3.1.3"
-  resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-3.1.3.tgz#c7a02e0d936596d29f184e6d7fdc07e8b5efce11"
-  integrity sha512-JmS9/vQK6dcUYn7wc2YZTqzIKubAQcJKu2KCKAru6es482U5RT5fP1EXCPtlXpiK7PR0On/kpQKI4fRKkzpZBQ==
-  dependencies:
-    "@hapi/hoek" "8.x.x"
-
-"@jest/console@^24.7.1", "@jest/console@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/console/-/console-24.9.0.tgz#79b1bc06fb74a8cfb01cbdedf945584b1b9707f0"
-  integrity sha512-Zuj6b8TnKXi3q4ymac8EQfc3ea/uhLeCGThFqXeC8H9/raaH8ARPUTdId+XyGd03Z4In0/VjD2OYFcBF09fNLQ==
-  dependencies:
-    "@jest/source-map" "^24.9.0"
-    chalk "^2.0.1"
-    slash "^2.0.0"
-
-"@jest/core@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/core/-/core-24.9.0.tgz#2ceccd0b93181f9c4850e74f2a9ad43d351369c4"
-  integrity sha512-Fogg3s4wlAr1VX7q+rhV9RVnUv5tD7VuWfYy1+whMiWUrvl7U3QJSJyWcDio9Lq2prqYsZaeTv2Rz24pWGkJ2A==
-  dependencies:
-    "@jest/console" "^24.7.1"
-    "@jest/reporters" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/transform" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    ansi-escapes "^3.0.0"
-    chalk "^2.0.1"
-    exit "^0.1.2"
-    graceful-fs "^4.1.15"
-    jest-changed-files "^24.9.0"
-    jest-config "^24.9.0"
-    jest-haste-map "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-regex-util "^24.3.0"
-    jest-resolve "^24.9.0"
-    jest-resolve-dependencies "^24.9.0"
-    jest-runner "^24.9.0"
-    jest-runtime "^24.9.0"
-    jest-snapshot "^24.9.0"
-    jest-util "^24.9.0"
-    jest-validate "^24.9.0"
-    jest-watcher "^24.9.0"
-    micromatch "^3.1.10"
-    p-each-series "^1.0.0"
-    realpath-native "^1.1.0"
-    rimraf "^2.5.4"
-    slash "^2.0.0"
-    strip-ansi "^5.0.0"
-
-"@jest/environment@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-24.9.0.tgz#21e3afa2d65c0586cbd6cbefe208bafade44ab18"
-  integrity sha512-5A1QluTPhvdIPFYnO3sZC3smkNeXPVELz7ikPbhUj0bQjB07EoE9qtLrem14ZUYWdVayYbsjVwIiL4WBIMV4aQ==
-  dependencies:
-    "@jest/fake-timers" "^24.9.0"
-    "@jest/transform" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    jest-mock "^24.9.0"
-
-"@jest/fake-timers@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-24.9.0.tgz#ba3e6bf0eecd09a636049896434d306636540c93"
-  integrity sha512-eWQcNa2YSwzXWIMC5KufBh3oWRIijrQFROsIqt6v/NS9Io/gknw1jsAC9c+ih/RQX4A3O7SeWAhQeN0goKhT9A==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-mock "^24.9.0"
-
-"@jest/reporters@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-24.9.0.tgz#86660eff8e2b9661d042a8e98a028b8d631a5b43"
-  integrity sha512-mu4X0yjaHrffOsWmVLzitKmmmWSQ3GGuefgNscUSWNiUNcEOSEQk9k3pERKEQVBb0Cnn88+UESIsZEMH3o88Gw==
-  dependencies:
-    "@jest/environment" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/transform" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    chalk "^2.0.1"
-    exit "^0.1.2"
-    glob "^7.1.2"
-    istanbul-lib-coverage "^2.0.2"
-    istanbul-lib-instrument "^3.0.1"
-    istanbul-lib-report "^2.0.4"
-    istanbul-lib-source-maps "^3.0.1"
-    istanbul-reports "^2.2.6"
-    jest-haste-map "^24.9.0"
-    jest-resolve "^24.9.0"
-    jest-runtime "^24.9.0"
-    jest-util "^24.9.0"
-    jest-worker "^24.6.0"
-    node-notifier "^5.4.2"
-    slash "^2.0.0"
-    source-map "^0.6.0"
-    string-length "^2.0.0"
-
-"@jest/source-map@^24.3.0", "@jest/source-map@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-24.9.0.tgz#0e263a94430be4b41da683ccc1e6bffe2a191714"
-  integrity sha512-/Xw7xGlsZb4MJzNDgB7PW5crou5JqWiBQaz6xyPd3ArOg2nfn/PunV8+olXbbEZzNl591o5rWKE9BRDaFAuIBg==
-  dependencies:
-    callsites "^3.0.0"
-    graceful-fs "^4.1.15"
-    source-map "^0.6.0"
-
-"@jest/test-result@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-24.9.0.tgz#11796e8aa9dbf88ea025757b3152595ad06ba0ca"
-  integrity sha512-XEFrHbBonBJ8dGp2JmF8kP/nQI/ImPpygKHwQ/SY+es59Z3L5PI4Qb9TQQMAEeYsThG1xF0k6tmG0tIKATNiiA==
-  dependencies:
-    "@jest/console" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    "@types/istanbul-lib-coverage" "^2.0.0"
-
-"@jest/test-sequencer@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-24.9.0.tgz#f8f334f35b625a4f2f355f2fe7e6036dad2e6b31"
-  integrity sha512-6qqsU4o0kW1dvA95qfNog8v8gkRN9ph6Lz7r96IvZpHdNipP2cBcb07J1Z45mz/VIS01OHJ3pY8T5fUY38tg4A==
-  dependencies:
-    "@jest/test-result" "^24.9.0"
-    jest-haste-map "^24.9.0"
-    jest-runner "^24.9.0"
-    jest-runtime "^24.9.0"
-
-"@jest/transform@^24.7.1", "@jest/transform@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-24.9.0.tgz#4ae2768b296553fadab09e9ec119543c90b16c56"
-  integrity sha512-TcQUmyNRxV94S0QpMOnZl0++6RMiqpbH/ZMccFB/amku6Uwvyb1cjYX7xkp5nGNkbX4QPH/FcB6q1HBTHynLmQ==
-  dependencies:
-    "@babel/core" "^7.1.0"
-    "@jest/types" "^24.9.0"
-    babel-plugin-istanbul "^5.1.0"
-    chalk "^2.0.1"
-    convert-source-map "^1.4.0"
-    fast-json-stable-stringify "^2.0.0"
-    graceful-fs "^4.1.15"
-    jest-haste-map "^24.9.0"
-    jest-regex-util "^24.9.0"
-    jest-util "^24.9.0"
-    micromatch "^3.1.10"
-    pirates "^4.0.1"
-    realpath-native "^1.1.0"
-    slash "^2.0.0"
-    source-map "^0.6.1"
-    write-file-atomic "2.4.1"
-
-"@jest/types@^24.7.0", "@jest/types@^24.9.0":
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/@jest/types/-/types-24.9.0.tgz#63cb26cb7500d069e5a389441a7c6ab5e909fc59"
-  integrity sha512-XKK7ze1apu5JWQ5eZjHITP66AX+QsLlbaJRBGYr8pNzwcAE2JVkwnf0yqjHTsDRcjR0mujy/NmZMXw5kl+kGBw==
-  dependencies:
-    "@types/istanbul-lib-coverage" "^2.0.0"
-    "@types/istanbul-reports" "^1.1.1"
-    "@types/yargs" "^13.0.0"
-
-"@mrmlnc/readdir-enhanced@^2.2.1":
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde"
-  integrity sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==
-  dependencies:
-    call-me-maybe "^1.0.1"
-    glob-to-regexp "^0.3.0"
-
-"@nodelib/fs.stat@^1.1.2":
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b"
-  integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==
-
-"@svgr/babel-plugin-add-jsx-attribute@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-4.2.0.tgz#dadcb6218503532d6884b210e7f3c502caaa44b1"
-  integrity sha512-j7KnilGyZzYr/jhcrSYS3FGWMZVaqyCG0vzMCwzvei0coIkczuYMcniK07nI0aHJINciujjH11T72ICW5eL5Ig==
-
-"@svgr/babel-plugin-remove-jsx-attribute@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-4.2.0.tgz#297550b9a8c0c7337bea12bdfc8a80bb66f85abc"
-  integrity sha512-3XHLtJ+HbRCH4n28S7y/yZoEQnRpl0tvTZQsHqvaeNXPra+6vE5tbRliH3ox1yZYPCxrlqaJT/Mg+75GpDKlvQ==
-
-"@svgr/babel-plugin-remove-jsx-empty-expression@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-4.2.0.tgz#c196302f3e68eab6a05e98af9ca8570bc13131c7"
-  integrity sha512-yTr2iLdf6oEuUE9MsRdvt0NmdpMBAkgK8Bjhl6epb+eQWk6abBaX3d65UZ3E3FWaOwePyUgNyNCMVG61gGCQ7w==
-
-"@svgr/babel-plugin-replace-jsx-attribute-value@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-4.2.0.tgz#310ec0775de808a6a2e4fd4268c245fd734c1165"
-  integrity sha512-U9m870Kqm0ko8beHawRXLGLvSi/ZMrl89gJ5BNcT452fAjtF2p4uRzXkdzvGJJJYBgx7BmqlDjBN/eCp5AAX2w==
-
-"@svgr/babel-plugin-svg-dynamic-title@^4.3.1":
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-4.3.1.tgz#646c2f5b5770c2fe318d6e51492344c3d62ddb63"
-  integrity sha512-p6z6JJroP989jHWcuraeWpzdejehTmLUpyC9smhTBWyPN0VVGe2phbYxpPTV7Vh8XzmFrcG55idrnfWn/2oQEw==
-
-"@svgr/babel-plugin-svg-em-dimensions@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-4.2.0.tgz#9a94791c9a288108d20a9d2cc64cac820f141391"
-  integrity sha512-C0Uy+BHolCHGOZ8Dnr1zXy/KgpBOkEUYY9kI/HseHVPeMbluaX3CijJr7D4C5uR8zrc1T64nnq/k63ydQuGt4w==
-
-"@svgr/babel-plugin-transform-react-native-svg@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-4.2.0.tgz#151487322843359a1ca86b21a3815fd21a88b717"
-  integrity sha512-7YvynOpZDpCOUoIVlaaOUU87J4Z6RdD6spYN4eUb5tfPoKGSF9OG2NuhgYnq4jSkAxcpMaXWPf1cePkzmqTPNw==
-
-"@svgr/babel-plugin-transform-svg-component@^4.2.0":
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-4.2.0.tgz#5f1e2f886b2c85c67e76da42f0f6be1b1767b697"
-  integrity sha512-hYfYuZhQPCBVotABsXKSCfel2slf/yvJY8heTVX1PCTaq/IgASq1IyxPPKJ0chWREEKewIU/JMSsIGBtK1KKxw==
-
-"@svgr/babel-preset@^4.3.1":
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-4.3.1.tgz#62ffcb85d756580e8ce608e9d2ac3b9063be9e28"
-  integrity sha512-rPFKLmyhlh6oeBv3j2vEAj2nd2QbWqpoJLKzBLjwQVt+d9aeXajVaPNEqrES2spjXKR4OxfgSs7U0NtmAEkr0Q==
-  dependencies:
-    "@svgr/babel-plugin-add-jsx-attribute" "^4.2.0"
-    "@svgr/babel-plugin-remove-jsx-attribute" "^4.2.0"
-    "@svgr/babel-plugin-remove-jsx-empty-expression" "^4.2.0"
-    "@svgr/babel-plugin-replace-jsx-attribute-value" "^4.2.0"
-    "@svgr/babel-plugin-svg-dynamic-title" "^4.3.1"
-    "@svgr/babel-plugin-svg-em-dimensions" "^4.2.0"
-    "@svgr/babel-plugin-transform-react-native-svg" "^4.2.0"
-    "@svgr/babel-plugin-transform-svg-component" "^4.2.0"
-
-"@svgr/core@^4.1.0":
-  version "4.3.2"
-  resolved "https://registry.yarnpkg.com/@svgr/core/-/core-4.3.2.tgz#939c89be670ad79b762f4c063f213f0e02535f2e"
-  integrity sha512-N+tP5CLFd1hP9RpO83QJPZY3NL8AtrdqNbuhRgBkjE/49RnMrrRsFm1wY8pueUfAGvzn6tSXUq29o6ah8RuR5w==
-  dependencies:
-    "@svgr/plugin-jsx" "^4.3.2"
-    camelcase "^5.3.1"
-    cosmiconfig "^5.2.1"
-
-"@svgr/hast-util-to-babel-ast@^4.3.2":
-  version "4.3.2"
-  resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-4.3.2.tgz#1d5a082f7b929ef8f1f578950238f630e14532b8"
-  integrity sha512-JioXclZGhFIDL3ddn4Kiq8qEqYM2PyDKV0aYno8+IXTLuYt6TOgHUbUAAFvqtb0Xn37NwP0BTHglejFoYr8RZg==
-  dependencies:
-    "@babel/types" "^7.4.4"
-
-"@svgr/plugin-jsx@^4.1.0", "@svgr/plugin-jsx@^4.3.2":
-  version "4.3.2"
-  resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-4.3.2.tgz#ce9ddafc8cdd74da884c9f7af014afcf37f93d3c"
-  integrity sha512-+1GW32RvmNmCsOkMoclA/TppNjHPLMnNZG3/Ecscxawp051XJ2MkO09Hn11VcotdC2EPrDfT8pELGRo+kbZ1Eg==
-  dependencies:
-    "@babel/core" "^7.4.5"
-    "@svgr/babel-preset" "^4.3.1"
-    "@svgr/hast-util-to-babel-ast" "^4.3.2"
-    svg-parser "^2.0.0"
-
-"@svgr/plugin-svgo@^4.0.3":
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-4.3.1.tgz#daac0a3d872e3f55935c6588dd370336865e9e32"
-  integrity sha512-PrMtEDUWjX3Ea65JsVCwTIXuSqa3CG9px+DluF1/eo9mlDrgrtFE7NE/DjdhjJgSM9wenlVBzkzneSIUgfUI/w==
-  dependencies:
-    cosmiconfig "^5.2.1"
-    merge-deep "^3.0.2"
-    svgo "^1.2.2"
-
-"@svgr/webpack@4.1.0":
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-4.1.0.tgz#20c88f32f731c7b1d4711045b2b993887d731c28"
-  integrity sha512-d09ehQWqLMywP/PT/5JvXwPskPK9QCXUjiSkAHehreB381qExXf5JFCBWhfEyNonRbkIneCeYM99w+Ud48YIQQ==
-  dependencies:
-    "@babel/core" "^7.1.6"
-    "@babel/plugin-transform-react-constant-elements" "^7.0.0"
-    "@babel/preset-env" "^7.1.6"
-    "@babel/preset-react" "^7.0.0"
-    "@svgr/core" "^4.1.0"
-    "@svgr/plugin-jsx" "^4.1.0"
-    "@svgr/plugin-svgo" "^4.0.3"
-    loader-utils "^1.1.0"
-
-"@types/babel__core@^7.1.0":
-  version "7.1.2"
-  resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.2.tgz#608c74f55928033fce18b99b213c16be4b3d114f"
-  integrity sha512-cfCCrFmiGY/yq0NuKNxIQvZFy9kY/1immpSpTngOnyIbD4+eJOG5mxphhHDv3CHL9GltO4GcKr54kGBg3RNdbg==
-  dependencies:
-    "@babel/parser" "^7.1.0"
-    "@babel/types" "^7.0.0"
-    "@types/babel__generator" "*"
-    "@types/babel__template" "*"
-    "@types/babel__traverse" "*"
-
-"@types/babel__generator@*":
-  version "7.0.2"
-  resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.0.2.tgz#d2112a6b21fad600d7674274293c85dce0cb47fc"
-  integrity sha512-NHcOfab3Zw4q5sEE2COkpfXjoE7o+PmqD9DQW4koUT3roNxwziUdXGnRndMat/LJNUtePwn1TlP4do3uoe3KZQ==
-  dependencies:
-    "@babel/types" "^7.0.0"
-
-"@types/babel__template@*":
-  version "7.0.2"
-  resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.0.2.tgz#4ff63d6b52eddac1de7b975a5223ed32ecea9307"
-  integrity sha512-/K6zCpeW7Imzgab2bLkLEbz0+1JlFSrUMdw7KoIIu+IUdu51GWaBZpd3y1VXGVXzynvGa4DaIaxNZHiON3GXUg==
-  dependencies:
-    "@babel/parser" "^7.1.0"
-    "@babel/types" "^7.0.0"
-
-"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6":
-  version "7.0.7"
-  resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.0.7.tgz#2496e9ff56196cc1429c72034e07eab6121b6f3f"
-  integrity sha512-CeBpmX1J8kWLcDEnI3Cl2Eo6RfbGvzUctA+CjZUhOKDFbLfcr7fc4usEqLNWetrlJd7RhAkyYe2czXop4fICpw==
-  dependencies:
-    "@babel/types" "^7.3.0"
-
-"@types/history@*":
-  version "4.7.3"
-  resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.3.tgz#856c99cdc1551d22c22b18b5402719affec9839a"
-  integrity sha512-cS5owqtwzLN5kY+l+KgKdRJ/Cee8tlmQoGQuIE9tWnSmS3JMKzmxo2HIAk2wODMifGwO20d62xZQLYz+RLfXmw==
-
-"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0":
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.1.tgz#42995b446db9a48a11a07ec083499a860e9138ff"
-  integrity sha512-hRJD2ahnnpLgsj6KWMYSrmXkM3rm2Dl1qkx6IOFD5FnuNPXJIG5L0dhgKXCYTRMGzU4n0wImQ/xfmRc4POUFlg==
-
-"@types/istanbul-lib-report@*":
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-1.1.1.tgz#e5471e7fa33c61358dd38426189c037a58433b8c"
-  integrity sha512-3BUTyMzbZa2DtDI2BkERNC6jJw2Mr2Y0oGI7mRxYNBPxppbtEK1F66u3bKwU2g+wxwWI7PAoRpJnOY1grJqzHg==
-  dependencies:
-    "@types/istanbul-lib-coverage" "*"
-
-"@types/istanbul-reports@^1.1.1":
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-1.1.1.tgz#7a8cbf6a406f36c8add871625b278eaf0b0d255a"
-  integrity sha512-UpYjBi8xefVChsCoBpKShdxTllC9pwISirfoZsUa2AAdQg/Jd2KQGtSbw+ya7GPo7x/wAPlH6JBhKhAsXUEZNA==
-  dependencies:
-    "@types/istanbul-lib-coverage" "*"
-    "@types/istanbul-lib-report" "*"
-
-"@types/jest-diff@*":
-  version "20.0.1"
-  resolved "https://registry.yarnpkg.com/@types/jest-diff/-/jest-diff-20.0.1.tgz#35cc15b9c4f30a18ef21852e255fdb02f6d59b89"
-  integrity sha512-yALhelO3i0hqZwhjtcr6dYyaLoCHbAMshwtj6cGxTvHZAKXHsYGdff6E8EPw3xLKY0ELUTQ69Q1rQiJENnccMA==
-
-"@types/jest@24.0.12":
-  version "24.0.12"
-  resolved "https://registry.yarnpkg.com/@types/jest/-/jest-24.0.12.tgz#0553dd0a5ac744e7dc4e8700da6d3baedbde3e8f"
-  integrity sha512-60sjqMhat7i7XntZckcSGV8iREJyXXI6yFHZkSZvCPUeOnEJ/VP1rU/WpEWQ56mvoh8NhC+sfKAuJRTyGtCOow==
-  dependencies:
-    "@types/jest-diff" "*"
-
-"@types/node@11.13.9":
-  version "11.13.9"
-  resolved "https://registry.yarnpkg.com/@types/node/-/node-11.13.9.tgz#f80697caca7f7fb2526527a5c5a2743487f05ccc"
-  integrity sha512-NJ4yuEVw5podZbINp3tEqUIImMSAEHaCXRiWCf3KC32l6hIKf0iPJEh2uZdT0fELfRYk310yLmMXqy2leZQUbg==
-
-"@types/prop-types@*":
-  version "15.7.1"
-  resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.1.tgz#f1a11e7babb0c3cad68100be381d1e064c68f1f6"
-  integrity sha512-CFzn9idOEpHrgdw8JsoTkaDDyRWk1jrzIV8djzcgpq0y9tG4B4lFT+Nxh52DVpDXV+n4+NPNv7M1Dj5uMp6XFg==
-
-"@types/q@^1.5.1":
-  version "1.5.2"
-  resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.2.tgz#690a1475b84f2a884fd07cd797c00f5f31356ea8"
-  integrity sha512-ce5d3q03Ex0sy4R14722Rmt6MT07Ua+k4FwDfdcToYJcMKNtRVQvJ6JCAPdAmAnbRb6CsX6aYb9m96NGod9uTw==
-
-"@types/react-dom@16.8.4":
-  version "16.8.4"
-  resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.8.4.tgz#7fb7ba368857c7aa0f4e4511c4710ca2c5a12a88"
-  integrity sha512-eIRpEW73DCzPIMaNBDP5pPIpK1KXyZwNgfxiVagb5iGiz6da+9A5hslSX6GAQKdO7SayVCS/Fr2kjqprgAvkfA==
-  dependencies:
-    "@types/react" "*"
-
-"@types/react-router-dom@^4.3.3":
-  version "4.3.5"
-  resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-4.3.5.tgz#72f229967690c890d00f96e6b85e9ee5780db31f"
-  integrity sha512-eFajSUASYbPHg2BDM1G8Btx+YqGgvROPIg6sBhl3O4kbDdYXdFdfrgQFf/pcBuQVObjfT9AL/dd15jilR5DIEA==
-  dependencies:
-    "@types/history" "*"
-    "@types/react" "*"
-    "@types/react-router" "*"
-
-"@types/react-router@*":
-  version "5.0.3"
-  resolved "https://registry.yarnpkg.com/@types/react-router/-/react-router-5.0.3.tgz#855a1606e62de3f4d69ea34fb3c0e50e98e964d5"
-  integrity sha512-j2Gge5cvxca+5lK9wxovmGPgpVJMwjyu5lTA/Cd6fLGoPq7FXcUE1jFkEdxeyqGGz8VfHYSHCn5Lcn24BzaNKA==
-  dependencies:
-    "@types/history" "*"
-    "@types/react" "*"
-
-"@types/react-slick@^0.23.4":
-  version "0.23.4"
-  resolved "https://registry.yarnpkg.com/@types/react-slick/-/react-slick-0.23.4.tgz#c97e2a9e7e3d1933c68593b8e82752fab1e8ce53"
-  integrity sha512-vXoIy4GUfB7/YgqubR4H7RALo+pRdMYCeLgWwV3MPwl5pggTlEkFBTF19R7u+LJc85uMqC7RfsbkqPLMQ4ab+A==
-  dependencies:
-    "@types/react" "*"
-
-"@types/react@*":
-  version "16.9.2"
-  resolved "https://registry.yarnpkg.com/@types/react/-/react-16.9.2.tgz#6d1765431a1ad1877979013906731aae373de268"
-  integrity sha512-jYP2LWwlh+FTqGd9v7ynUKZzjj98T8x7Yclz479QdRhHfuW9yQ+0jjnD31eXSXutmBpppj5PYNLYLRfnZJvcfg==
-  dependencies:
-    "@types/prop-types" "*"
-    csstype "^2.2.0"
-
-"@types/react@16.8.15":
-  version "16.8.15"
-  resolved "https://registry.yarnpkg.com/@types/react/-/react-16.8.15.tgz#a76515fed5aa3e996603056f54427fec5f2a5122"
-  integrity sha512-dMhzw1rWK+wwJWvPp5Pk12ksSrm/z/C/+lOQbMZ7YfDQYnJ02bc0wtg4EJD9qrFhuxFrf/ywNgwTboucobJqQg==
-  dependencies:
-    "@types/prop-types" "*"
-    csstype "^2.2.0"
-
-"@types/stack-utils@^1.0.1":
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-1.0.1.tgz#0a851d3bd96498fa25c33ab7278ed3bd65f06c3e"
-  integrity sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw==
-
-"@types/yargs-parser@*":
-  version "13.0.0"
-  resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-13.0.0.tgz#453743c5bbf9f1bed61d959baab5b06be029b2d0"
-  integrity sha512-wBlsw+8n21e6eTd4yVv8YD/E3xq0O6nNnJIquutAsFGE7EyMKz7W6RNT6BRu1SmdgmlCZ9tb0X+j+D6HGr8pZw==
-
-"@types/yargs@^13.0.0":
-  version "13.0.2"
-  resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-13.0.2.tgz#a64674fc0149574ecd90ba746e932b5a5f7b3653"
-  integrity sha512-lwwgizwk/bIIU+3ELORkyuOgDjCh7zuWDFqRtPPhhVgq9N1F7CvLNKg1TX4f2duwtKQ0p044Au9r1PLIXHrIzQ==
-  dependencies:
-    "@types/yargs-parser" "*"
-
-"@typescript-eslint/eslint-plugin@1.6.0":
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-1.6.0.tgz#a5ff3128c692393fb16efa403ec7c8a5593dab0f"
-  integrity sha512-U224c29E2lo861TQZs6GSmyC0OYeRNg6bE9UVIiFBxN2MlA0nq2dCrgIVyyRbC05UOcrgf2Wk/CF2gGOPQKUSQ==
-  dependencies:
-    "@typescript-eslint/parser" "1.6.0"
-    "@typescript-eslint/typescript-estree" "1.6.0"
-    requireindex "^1.2.0"
-    tsutils "^3.7.0"
-
-"@typescript-eslint/parser@1.6.0":
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-1.6.0.tgz#f01189c8b90848e3b8e45a6cdad27870529d1804"
-  integrity sha512-VB9xmSbfafI+/kI4gUK3PfrkGmrJQfh0N4EScT1gZXSZyUxpsBirPL99EWZg9MmPG0pzq/gMtgkk7/rAHj4aQw==
-  dependencies:
-    "@typescript-eslint/typescript-estree" "1.6.0"
-    eslint-scope "^4.0.0"
-    eslint-visitor-keys "^1.0.0"
-
-"@typescript-eslint/typescript-estree@1.6.0":
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-1.6.0.tgz#6cf43a07fee08b8eb52e4513b428c8cdc9751ef0"
-  integrity sha512-A4CanUwfaG4oXobD5y7EXbsOHjCwn8tj1RDd820etpPAjH+Icjc2K9e/DQM1Hac5zH2BSy+u6bjvvF2wwREvYA==
-  dependencies:
-    lodash.unescape "4.0.1"
-    semver "5.5.0"
-
-"@webassemblyjs/ast@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.8.5.tgz#51b1c5fe6576a34953bf4b253df9f0d490d9e359"
-  integrity sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ==
-  dependencies:
-    "@webassemblyjs/helper-module-context" "1.8.5"
-    "@webassemblyjs/helper-wasm-bytecode" "1.8.5"
-    "@webassemblyjs/wast-parser" "1.8.5"
-
-"@webassemblyjs/floating-point-hex-parser@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz#1ba926a2923613edce496fd5b02e8ce8a5f49721"
-  integrity sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ==
-
-"@webassemblyjs/helper-api-error@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz#c49dad22f645227c5edb610bdb9697f1aab721f7"
-  integrity sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA==
-
-"@webassemblyjs/helper-buffer@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz#fea93e429863dd5e4338555f42292385a653f204"
-  integrity sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q==
-
-"@webassemblyjs/helper-code-frame@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz#9a740ff48e3faa3022b1dff54423df9aa293c25e"
-  integrity sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ==
-  dependencies:
-    "@webassemblyjs/wast-printer" "1.8.5"
-
-"@webassemblyjs/helper-fsm@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz#ba0b7d3b3f7e4733da6059c9332275d860702452"
-  integrity sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow==
-
-"@webassemblyjs/helper-module-context@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz#def4b9927b0101dc8cbbd8d1edb5b7b9c82eb245"
-  integrity sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    mamacro "^0.0.3"
-
-"@webassemblyjs/helper-wasm-bytecode@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz#537a750eddf5c1e932f3744206551c91c1b93e61"
-  integrity sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ==
-
-"@webassemblyjs/helper-wasm-section@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz#74ca6a6bcbe19e50a3b6b462847e69503e6bfcbf"
-  integrity sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-buffer" "1.8.5"
-    "@webassemblyjs/helper-wasm-bytecode" "1.8.5"
-    "@webassemblyjs/wasm-gen" "1.8.5"
-
-"@webassemblyjs/ieee754@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz#712329dbef240f36bf57bd2f7b8fb9bf4154421e"
-  integrity sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g==
-  dependencies:
-    "@xtuc/ieee754" "^1.2.0"
-
-"@webassemblyjs/leb128@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.8.5.tgz#044edeb34ea679f3e04cd4fd9824d5e35767ae10"
-  integrity sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A==
-  dependencies:
-    "@xtuc/long" "4.2.2"
-
-"@webassemblyjs/utf8@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.8.5.tgz#a8bf3b5d8ffe986c7c1e373ccbdc2a0915f0cedc"
-  integrity sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw==
-
-"@webassemblyjs/wasm-edit@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz#962da12aa5acc1c131c81c4232991c82ce56e01a"
-  integrity sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-buffer" "1.8.5"
-    "@webassemblyjs/helper-wasm-bytecode" "1.8.5"
-    "@webassemblyjs/helper-wasm-section" "1.8.5"
-    "@webassemblyjs/wasm-gen" "1.8.5"
-    "@webassemblyjs/wasm-opt" "1.8.5"
-    "@webassemblyjs/wasm-parser" "1.8.5"
-    "@webassemblyjs/wast-printer" "1.8.5"
-
-"@webassemblyjs/wasm-gen@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz#54840766c2c1002eb64ed1abe720aded714f98bc"
-  integrity sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-wasm-bytecode" "1.8.5"
-    "@webassemblyjs/ieee754" "1.8.5"
-    "@webassemblyjs/leb128" "1.8.5"
-    "@webassemblyjs/utf8" "1.8.5"
-
-"@webassemblyjs/wasm-opt@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz#b24d9f6ba50394af1349f510afa8ffcb8a63d264"
-  integrity sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-buffer" "1.8.5"
-    "@webassemblyjs/wasm-gen" "1.8.5"
-    "@webassemblyjs/wasm-parser" "1.8.5"
-
-"@webassemblyjs/wasm-parser@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz#21576f0ec88b91427357b8536383668ef7c66b8d"
-  integrity sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-api-error" "1.8.5"
-    "@webassemblyjs/helper-wasm-bytecode" "1.8.5"
-    "@webassemblyjs/ieee754" "1.8.5"
-    "@webassemblyjs/leb128" "1.8.5"
-    "@webassemblyjs/utf8" "1.8.5"
-
-"@webassemblyjs/wast-parser@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz#e10eecd542d0e7bd394f6827c49f3df6d4eefb8c"
-  integrity sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/floating-point-hex-parser" "1.8.5"
-    "@webassemblyjs/helper-api-error" "1.8.5"
-    "@webassemblyjs/helper-code-frame" "1.8.5"
-    "@webassemblyjs/helper-fsm" "1.8.5"
-    "@xtuc/long" "4.2.2"
-
-"@webassemblyjs/wast-printer@1.8.5":
-  version "1.8.5"
-  resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz#114bbc481fd10ca0e23b3560fa812748b0bae5bc"
-  integrity sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/wast-parser" "1.8.5"
-    "@xtuc/long" "4.2.2"
-
-"@xtuc/ieee754@^1.2.0":
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790"
-  integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==
-
-"@xtuc/long@4.2.2":
-  version "4.2.2"
-  resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d"
-  integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==
-
-abab@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.0.tgz#aba0ab4c5eee2d4c79d3487d85450fb2376ebb0f"
-  integrity sha512-sY5AXXVZv4Y1VACTtR11UJCPHHudgY5i26Qj5TypE6DKlIApbwb5uqhXcJ5UUGbvZNRh7EeIoW+LrJumBsKp7w==
-
-abbrev@1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8"
-  integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==
-
-accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7:
-  version "1.3.7"
-  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd"
-  integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==
-  dependencies:
-    mime-types "~2.1.24"
-    negotiator "0.6.2"
-
-acorn-dynamic-import@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-4.0.0.tgz#482210140582a36b83c3e342e1cfebcaa9240948"
-  integrity sha512-d3OEjQV4ROpoflsnUA8HozoIR504TFxNivYEUi6uwz0IYhBkTDXGuWlNdMtybRt3nqVx/L6XqMt0FxkXuWKZhw==
-
-acorn-globals@^4.1.0, acorn-globals@^4.3.0:
-  version "4.3.3"
-  resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-4.3.3.tgz#a86f75b69680b8780d30edd21eee4e0ea170c05e"
-  integrity sha512-vkR40VwS2SYO98AIeFvzWWh+xyc2qi9s7OoXSFEGIP/rOJKzjnhykaZJNnHdoq4BL2gGxI5EZOU16z896EYnOQ==
-  dependencies:
-    acorn "^6.0.1"
-    acorn-walk "^6.0.1"
-
-acorn-jsx@^5.0.0:
-  version "5.0.2"
-  resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.0.2.tgz#84b68ea44b373c4f8686023a551f61a21b7c4a4f"
-  integrity sha512-tiNTrP1MP0QrChmD2DdupCr6HWSFeKVw5d/dHTu4Y7rkAkRhU/Dt7dphAfIUyxtHpl/eBVip5uTNSpQJHylpAw==
-
-acorn-walk@^6.0.1:
-  version "6.2.0"
-  resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-6.2.0.tgz#123cb8f3b84c2171f1f7fb252615b1c78a6b1a8c"
-  integrity sha512-7evsyfH1cLOCdAzZAd43Cic04yKydNx0cF+7tiA19p1XnLLPU4dpCQOqpjqwokFe//vS0QqfqqjCS2JkiIs0cA==
-
-acorn@^5.5.3:
-  version "5.7.3"
-  resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.3.tgz#67aa231bf8812974b85235a96771eb6bd07ea279"
-  integrity sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==
-
-acorn@^6.0.1, acorn@^6.0.4, acorn@^6.0.5, acorn@^6.0.7:
-  version "6.3.0"
-  resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.3.0.tgz#0087509119ffa4fc0a0041d1e93a417e68cb856e"
-  integrity sha512-/czfa8BwS88b9gWQVhc8eknunSA2DoJpJyTQkhheIf5E48u1N0R4q/YxxsAeqRrmK9TQ/uYfgLDfZo91UlANIA==
-
-add-dom-event-listener@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/add-dom-event-listener/-/add-dom-event-listener-1.1.0.tgz#6a92db3a0dd0abc254e095c0f1dc14acbbaae310"
-  integrity sha512-WCxx1ixHT0GQU9hb0KI/mhgRQhnU+U3GvwY6ZvVjYq8rsihIGoaIOUbY0yMPBxLH5MDtr0kz3fisWGNcbWW7Jw==
-  dependencies:
-    object-assign "4.x"
-
-address@1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/address/-/address-1.1.0.tgz#ef8e047847fcd2c5b6f50c16965f924fd99fe709"
-  integrity sha512-4diPfzWbLEIElVG4AnqP+00SULlPzNuyJFNnmMrLgyaxG6tZXJ1sn7mjBu4fHrJE+Yp/jgylOweJn2xsLMFggQ==
-
-address@^1.0.1:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/address/-/address-1.1.2.tgz#bf1116c9c758c51b7a933d296b72c221ed9428b6"
-  integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==
-
-ajv-errors@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d"
-  integrity sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==
-
-ajv-keywords@^3.1.0:
-  version "3.4.1"
-  resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.4.1.tgz#ef916e271c64ac12171fd8384eaae6b2345854da"
-  integrity sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ==
-
-ajv@^6.1.0, ajv@^6.10.2, ajv@^6.5.5, ajv@^6.9.1:
-  version "6.10.2"
-  resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.2.tgz#d3cea04d6b017b2894ad69040fec8b623eb4bd52"
-  integrity sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw==
-  dependencies:
-    fast-deep-equal "^2.0.1"
-    fast-json-stable-stringify "^2.0.0"
-    json-schema-traverse "^0.4.1"
-    uri-js "^4.2.2"
-
-alphanum-sort@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3"
-  integrity sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=
-
-ansi-colors@^3.0.0:
-  version "3.2.4"
-  resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf"
-  integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==
-
-ansi-escapes@^3.0.0, ansi-escapes@^3.2.0:
-  version "3.2.0"
-  resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b"
-  integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==
-
-ansi-html@0.0.7:
-  version "0.0.7"
-  resolved "https://registry.yarnpkg.com/ansi-html/-/ansi-html-0.0.7.tgz#813584021962a9e9e6fd039f940d12f56ca7859e"
-  integrity sha1-gTWEAhliqenm/QOflA0S9WynhZ4=
-
-ansi-regex@^2.0.0:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
-  integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8=
-
-ansi-regex@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998"
-  integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=
-
-ansi-regex@^4.0.0, ansi-regex@^4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997"
-  integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==
-
-ansi-styles@^2.2.1:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
-  integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=
-
-ansi-styles@^3.2.0, ansi-styles@^3.2.1:
-  version "3.2.1"
-  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
-  integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
-  dependencies:
-    color-convert "^1.9.0"
-
-antd@^3.16.6:
-  version "3.22.2"
-  resolved "https://registry.yarnpkg.com/antd/-/antd-3.22.2.tgz#ae8279d940decf062a0708880bc4340783b7b270"
-  integrity sha512-IZz8yMUfi3qvVj0Y/zFMc759yOxzR0ihuCdrCh+3f/MgPVBrKR+u02/eJrDJBiSczrm4Qzb46NaNGQUR0GkxbA==
-  dependencies:
-    "@ant-design/create-react-context" "^0.2.4"
-    "@ant-design/icons" "~2.1.1"
-    "@ant-design/icons-react" "~2.0.1"
-    "@types/react-slick" "^0.23.4"
-    array-tree-filter "^2.1.0"
-    babel-runtime "6.x"
-    classnames "~2.2.6"
-    copy-to-clipboard "^3.2.0"
-    css-animation "^1.5.0"
-    dom-closest "^0.2.0"
-    enquire.js "^2.1.6"
-    lodash "^4.17.13"
-    moment "^2.24.0"
-    omit.js "^1.0.2"
-    prop-types "^15.7.2"
-    raf "^3.4.1"
-    rc-animate "^2.8.3"
-    rc-calendar "~9.15.5"
-    rc-cascader "~0.17.4"
-    rc-checkbox "~2.1.6"
-    rc-collapse "~1.11.3"
-    rc-dialog "~7.5.2"
-    rc-drawer "~2.0.1"
-    rc-dropdown "~2.4.1"
-    rc-editor-mention "^1.1.13"
-    rc-form "^2.4.5"
-    rc-input-number "~4.4.5"
-    rc-mentions "~0.4.0"
-    rc-menu "~7.4.23"
-    rc-notification "~3.3.1"
-    rc-pagination "~1.20.5"
-    rc-progress "~2.5.0"
-    rc-rate "~2.5.0"
-    rc-select "~9.2.0"
-    rc-slider "~8.6.11"
-    rc-steps "~3.5.0"
-    rc-switch "~1.9.0"
-    rc-table "~6.7.0"
-    rc-tabs "~9.6.4"
-    rc-time-picker "~3.7.1"
-    rc-tooltip "~3.7.3"
-    rc-tree "~2.1.0"
-    rc-tree-select "~2.9.1"
-    rc-trigger "^2.6.2"
-    rc-upload "~2.7.0"
-    rc-util "^4.10.0"
-    react-lazy-load "^3.0.13"
-    react-lifecycles-compat "^3.0.4"
-    react-slick "~0.25.2"
-    resize-observer-polyfill "^1.5.1"
-    shallowequal "^1.1.0"
-    warning "~4.0.3"
-
-anymatch@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb"
-  integrity sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==
-  dependencies:
-    micromatch "^3.1.4"
-    normalize-path "^2.1.1"
-
-aproba@^1.0.3, aproba@^1.1.1:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a"
-  integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==
-
-are-we-there-yet@~1.1.2:
-  version "1.1.5"
-  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21"
-  integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==
-  dependencies:
-    delegates "^1.0.0"
-    readable-stream "^2.0.6"
-
-argparse@^1.0.7:
-  version "1.0.10"
-  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911"
-  integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
-  dependencies:
-    sprintf-js "~1.0.2"
-
-aria-query@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-3.0.0.tgz#65b3fcc1ca1155a8c9ae64d6eee297f15d5133cc"
-  integrity sha1-ZbP8wcoRVajJrmTW7uKX8V1RM8w=
-  dependencies:
-    ast-types-flow "0.0.7"
-    commander "^2.11.0"
-
-arr-diff@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520"
-  integrity sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=
-
-arr-flatten@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1"
-  integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==
-
-arr-union@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4"
-  integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=
-
-array-equal@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
-  integrity sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM=
-
-array-filter@~0.0.0:
-  version "0.0.1"
-  resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec"
-  integrity sha1-fajPLiZijtcygDWB/SH2fKzS7uw=
-
-array-flatten@1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
-  integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=
-
-array-flatten@^2.1.0:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099"
-  integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==
-
-array-includes@^3.0.3:
-  version "3.0.3"
-  resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.0.3.tgz#184b48f62d92d7452bb31b323165c7f8bd02266d"
-  integrity sha1-GEtI9i2S10UrsxsyMWXH+L0CJm0=
-  dependencies:
-    define-properties "^1.1.2"
-    es-abstract "^1.7.0"
-
-array-map@~0.0.0:
-  version "0.0.0"
-  resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662"
-  integrity sha1-iKK6tz0c97zVwbEYoAP2b2ZfpmI=
-
-array-reduce@~0.0.0:
-  version "0.0.0"
-  resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b"
-  integrity sha1-FziZ0//Rx9k4PkR5Ul2+J4yrXys=
-
-array-tree-filter@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz#873ac00fec83749f255ac8dd083814b4f6329190"
-  integrity sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw==
-
-array-union@^1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39"
-  integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=
-  dependencies:
-    array-uniq "^1.0.1"
-
-array-uniq@^1.0.1:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6"
-  integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=
-
-array-unique@^0.3.2:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428"
-  integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=
-
-arrify@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
-  integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=
-
-asap@~2.0.3, asap@~2.0.6:
-  version "2.0.6"
-  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46"
-  integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=
-
-asn1.js@^4.0.0:
-  version "4.10.1"
-  resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0"
-  integrity sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==
-  dependencies:
-    bn.js "^4.0.0"
-    inherits "^2.0.1"
-    minimalistic-assert "^1.0.0"
-
-asn1@~0.2.3:
-  version "0.2.4"
-  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136"
-  integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==
-  dependencies:
-    safer-buffer "~2.1.0"
-
-assert-plus@1.0.0, assert-plus@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
-  integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=
-
-assert@^1.1.1:
-  version "1.5.0"
-  resolved "https://registry.yarnpkg.com/assert/-/assert-1.5.0.tgz#55c109aaf6e0aefdb3dc4b71240c70bf574b18eb"
-  integrity sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==
-  dependencies:
-    object-assign "^4.1.1"
-    util "0.10.3"
-
-assign-symbols@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367"
-  integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=
-
-ast-types-flow@0.0.7, ast-types-flow@^0.0.7:
-  version "0.0.7"
-  resolved "https://registry.yarnpkg.com/ast-types-flow/-/ast-types-flow-0.0.7.tgz#f70b735c6bca1a5c9c22d982c3e39e7feba3bdad"
-  integrity sha1-9wtzXGvKGlycItmCw+Oef+ujva0=
-
-astral-regex@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-1.0.0.tgz#6c8c3fb827dd43ee3918f27b82782ab7658a6fd9"
-  integrity sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==
-
-async-each@^1.0.1:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.3.tgz#b727dbf87d7651602f06f4d4ac387f47d91b0cbf"
-  integrity sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==
-
-async-limiter@~1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd"
-  integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==
-
-async-validator@~1.11.3:
-  version "1.11.5"
-  resolved "https://registry.yarnpkg.com/async-validator/-/async-validator-1.11.5.tgz#9d43cf49ef6bb76be5442388d19fb9a6e47597ea"
-  integrity sha512-XNtCsMAeAH1pdLMEg1z8/Bb3a8cdCbui9QbJATRFHHHW5kT6+NPI3zSVQUXgikTFITzsg+kYY5NTWhM2Orwt9w==
-
-async@^1.5.2:
-  version "1.5.2"
-  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
-  integrity sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=
-
-asynckit@^0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
-  integrity sha1-x57Zf380y48robyXkLzDZkdLS3k=
-
-atob@^2.1.1:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9"
-  integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==
-
-autoprefixer@^9.4.9:
-  version "9.6.1"
-  resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.6.1.tgz#51967a02d2d2300bb01866c1611ec8348d355a47"
-  integrity sha512-aVo5WxR3VyvyJxcJC3h4FKfwCQvQWb1tSI5VHNibddCVWrcD1NvlxEweg3TSgiPztMnWfjpy2FURKA2kvDE+Tw==
-  dependencies:
-    browserslist "^4.6.3"
-    caniuse-lite "^1.0.30000980"
-    chalk "^2.4.2"
-    normalize-range "^0.1.2"
-    num2fraction "^1.2.2"
-    postcss "^7.0.17"
-    postcss-value-parser "^4.0.0"
-
-aws-sign2@~0.7.0:
-  version "0.7.0"
-  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8"
-  integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=
-
-aws4@^1.8.0:
-  version "1.8.0"
-  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f"
-  integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==
-
-axobject-query@^2.0.2:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-2.0.2.tgz#ea187abe5b9002b377f925d8bf7d1c561adf38f9"
-  integrity sha512-MCeek8ZH7hKyO1rWUbKNQBbl4l2eY0ntk7OGi+q0RlafrCnfPxC06WZA+uebCfmYp4mNU9jRBP1AhGyf8+W3ww==
-  dependencies:
-    ast-types-flow "0.0.7"
-
-babel-code-frame@^6.22.0:
-  version "6.26.0"
-  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b"
-  integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=
-  dependencies:
-    chalk "^1.1.3"
-    esutils "^2.0.2"
-    js-tokens "^3.0.2"
-
-babel-eslint@10.0.1:
-  version "10.0.1"
-  resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.0.1.tgz#919681dc099614cd7d31d45c8908695092a1faed"
-  integrity sha512-z7OT1iNV+TjOwHNLLyJk+HN+YVWX+CLE6fPD2SymJZOZQBs+QIexFjhm4keGTm8MW9xr4EC9Q0PbaLB24V5GoQ==
-  dependencies:
-    "@babel/code-frame" "^7.0.0"
-    "@babel/parser" "^7.0.0"
-    "@babel/traverse" "^7.0.0"
-    "@babel/types" "^7.0.0"
-    eslint-scope "3.7.1"
-    eslint-visitor-keys "^1.0.0"
-
-babel-extract-comments@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/babel-extract-comments/-/babel-extract-comments-1.0.0.tgz#0a2aedf81417ed391b85e18b4614e693a0351a21"
-  integrity sha512-qWWzi4TlddohA91bFwgt6zO/J0X+io7Qp184Fw0m2JYRSTZnJbFR8+07KmzudHCZgOiKRCrjhylwv9Xd8gfhVQ==
-  dependencies:
-    babylon "^6.18.0"
-
-babel-jest@24.7.1:
-  version "24.7.1"
-  resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-24.7.1.tgz#73902c9ff15a7dfbdc9994b0b17fcefd96042178"
-  integrity sha512-GPnLqfk8Mtt0i4OemjWkChi73A3ALs4w2/QbG64uAj8b5mmwzxc7jbJVRZt8NJkxi6FopVHog9S3xX6UJKb2qg==
-  dependencies:
-    "@jest/transform" "^24.7.1"
-    "@jest/types" "^24.7.0"
-    "@types/babel__core" "^7.1.0"
-    babel-plugin-istanbul "^5.1.0"
-    babel-preset-jest "^24.6.0"
-    chalk "^2.4.2"
-    slash "^2.0.0"
-
-babel-jest@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-24.9.0.tgz#3fc327cb8467b89d14d7bc70e315104a783ccd54"
-  integrity sha512-ntuddfyiN+EhMw58PTNL1ph4C9rECiQXjI4nMMBKBaNjXvqLdkXpPRcMSr4iyBrJg/+wz9brFUD6RhOAT6r4Iw==
-  dependencies:
-    "@jest/transform" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    "@types/babel__core" "^7.1.0"
-    babel-plugin-istanbul "^5.1.0"
-    babel-preset-jest "^24.9.0"
-    chalk "^2.4.2"
-    slash "^2.0.0"
-
-babel-loader@8.0.5:
-  version "8.0.5"
-  resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.0.5.tgz#225322d7509c2157655840bba52e46b6c2f2fe33"
-  integrity sha512-NTnHnVRd2JnRqPC0vW+iOQWU5pchDbYXsG2E6DMXEpMfUcQKclF9gmf3G3ZMhzG7IG9ji4coL0cm+FxeWxDpnw==
-  dependencies:
-    find-cache-dir "^2.0.0"
-    loader-utils "^1.0.2"
-    mkdirp "^0.5.1"
-    util.promisify "^1.0.0"
-
-babel-plugin-dynamic-import-node@2.2.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.2.0.tgz#c0adfb07d95f4a4495e9aaac6ec386c4d7c2524e"
-  integrity sha512-fP899ELUnTaBcIzmrW7nniyqqdYWrWuJUyPWHxFa/c7r7hS6KC8FscNfLlBNIoPSc55kYMGEEKjPjJGCLbE1qA==
-  dependencies:
-    object.assign "^4.1.0"
-
-babel-plugin-dynamic-import-node@^2.3.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz#f00f507bdaa3c3e3ff6e7e5e98d90a7acab96f7f"
-  integrity sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==
-  dependencies:
-    object.assign "^4.1.0"
-
-babel-plugin-import@^1.11.0:
-  version "1.12.1"
-  resolved "https://registry.yarnpkg.com/babel-plugin-import/-/babel-plugin-import-1.12.1.tgz#a63b0a6f8f7484db660c59665185aa3b0c2f9f3f"
-  integrity sha512-3BwVJFEByTUyqZWOxizr/YwYcqqre2EebmgSUtXyToJbHzJv6rTxA0LApDntvwERlmIvcM6lUktUN0snMTjOsA==
-  dependencies:
-    "@babel/helper-module-imports" "^7.0.0"
-    "@babel/runtime" "^7.0.0"
-
-babel-plugin-istanbul@^5.1.0:
-  version "5.2.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-5.2.0.tgz#df4ade83d897a92df069c4d9a25cf2671293c854"
-  integrity sha512-5LphC0USA8t4i1zCtjbbNb6jJj/9+X6P37Qfirc/70EQ34xKlMW+a1RHGwxGI+SwWpNwZ27HqvzAobeqaXwiZw==
-  dependencies:
-    "@babel/helper-plugin-utils" "^7.0.0"
-    find-up "^3.0.0"
-    istanbul-lib-instrument "^3.3.0"
-    test-exclude "^5.2.3"
-
-babel-plugin-jest-hoist@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-24.9.0.tgz#4f837091eb407e01447c8843cbec546d0002d756"
-  integrity sha512-2EMA2P8Vp7lG0RAzr4HXqtYwacfMErOuv1U3wrvxHX6rD1sV6xS3WXG3r8TRQ2r6w8OhvSdWt+z41hQNwNm3Xw==
-  dependencies:
-    "@types/babel__traverse" "^7.0.6"
-
-babel-plugin-macros@2.5.1:
-  version "2.5.1"
-  resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.5.1.tgz#4a119ac2c2e19b458c259b9accd7ee34fd57ec6f"
-  integrity sha512-xN3KhAxPzsJ6OQTktCanNpIFnnMsCV+t8OloKxIL72D6+SUZYFn9qfklPgef5HyyDtzYZqqb+fs1S12+gQY82Q==
-  dependencies:
-    "@babel/runtime" "^7.4.2"
-    cosmiconfig "^5.2.0"
-    resolve "^1.10.0"
-
-babel-plugin-named-asset-import@^0.3.2:
-  version "0.3.3"
-  resolved "https://registry.yarnpkg.com/babel-plugin-named-asset-import/-/babel-plugin-named-asset-import-0.3.3.tgz#9ba2f3ac4dc78b042651654f07e847adfe50667c"
-  integrity sha512-1XDRysF4894BUdMChT+2HHbtJYiO7zx5Be7U6bT8dISy7OdyETMGIAQBMPQCsY1YRf0xcubwnKKaDr5bk15JTA==
-
-babel-plugin-syntax-object-rest-spread@^6.8.0:
-  version "6.13.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5"
-  integrity sha1-/WU28rzhODb/o6VFjEkDpZe7O/U=
-
-babel-plugin-transform-object-rest-spread@^6.26.0:
-  version "6.26.0"
-  resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz#0f36692d50fef6b7e2d4b3ac1478137a963b7b06"
-  integrity sha1-DzZpLVD+9rfi1LOsFHgTepY7ewY=
-  dependencies:
-    babel-plugin-syntax-object-rest-spread "^6.8.0"
-    babel-runtime "^6.26.0"
-
-babel-plugin-transform-react-remove-prop-types@0.4.24:
-  version "0.4.24"
-  resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-remove-prop-types/-/babel-plugin-transform-react-remove-prop-types-0.4.24.tgz#f2edaf9b4c6a5fbe5c1d678bfb531078c1555f3a"
-  integrity sha512-eqj0hVcJUR57/Ug2zE1Yswsw4LhuqqHhD+8v120T1cl3kjg76QwtyBrdIk4WVwK+lAhBJVYCd/v+4nc4y+8JsA==
-
-babel-preset-jest@^24.6.0, babel-preset-jest@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-24.9.0.tgz#192b521e2217fb1d1f67cf73f70c336650ad3cdc"
-  integrity sha512-izTUuhE4TMfTRPF92fFwD2QfdXaZW08qvWTFCI51V8rW5x00UuPgc3ajRoWofXOuxjfcOM5zzSYsQS3H8KGCAg==
-  dependencies:
-    "@babel/plugin-syntax-object-rest-spread" "^7.0.0"
-    babel-plugin-jest-hoist "^24.9.0"
-
-babel-preset-react-app@^8.0.0:
-  version "8.0.0"
-  resolved "https://registry.yarnpkg.com/babel-preset-react-app/-/babel-preset-react-app-8.0.0.tgz#930b6e28cdcfdff97ddb8bef9226d504f244d326"
-  integrity sha512-6Dmj7e8l7eWE+R6sKKLRrGEQXMfcBqBYlphaAgT1ml8qT1NEP+CyTZyfjmgKGqHZfwH3RQCUOuP6y4mpGc7tgg==
-  dependencies:
-    "@babel/core" "7.4.3"
-    "@babel/plugin-proposal-class-properties" "7.4.0"
-    "@babel/plugin-proposal-decorators" "7.4.0"
-    "@babel/plugin-proposal-object-rest-spread" "7.4.3"
-    "@babel/plugin-syntax-dynamic-import" "7.2.0"
-    "@babel/plugin-transform-classes" "7.4.3"
-    "@babel/plugin-transform-destructuring" "7.4.3"
-    "@babel/plugin-transform-flow-strip-types" "7.4.0"
-    "@babel/plugin-transform-react-constant-elements" "7.2.0"
-    "@babel/plugin-transform-react-display-name" "7.2.0"
-    "@babel/plugin-transform-runtime" "7.4.3"
-    "@babel/preset-env" "7.4.3"
-    "@babel/preset-react" "7.0.0"
-    "@babel/preset-typescript" "7.3.3"
-    "@babel/runtime" "7.4.3"
-    babel-plugin-dynamic-import-node "2.2.0"
-    babel-plugin-macros "2.5.1"
-    babel-plugin-transform-react-remove-prop-types "0.4.24"
-
-babel-runtime@6.x, babel-runtime@^6.23.0, babel-runtime@^6.26.0:
-  version "6.26.0"
-  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe"
-  integrity sha1-llxwWGaOgrVde/4E/yM3vItWR/4=
-  dependencies:
-    core-js "^2.4.0"
-    regenerator-runtime "^0.11.0"
-
-babylon@^6.18.0:
-  version "6.18.0"
-  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3"
-  integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==
-
-balanced-match@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767"
-  integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c=
-
-base64-js@^1.0.2:
-  version "1.3.1"
-  resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.3.1.tgz#58ece8cb75dd07e71ed08c736abc5fac4dbf8df1"
-  integrity sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==
-
-base@^0.11.1:
-  version "0.11.2"
-  resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f"
-  integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==
-  dependencies:
-    cache-base "^1.0.1"
-    class-utils "^0.3.5"
-    component-emitter "^1.2.1"
-    define-property "^1.0.0"
-    isobject "^3.0.1"
-    mixin-deep "^1.2.0"
-    pascalcase "^0.1.1"
-
-batch@0.6.1:
-  version "0.6.1"
-  resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16"
-  integrity sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=
-
-bcrypt-pbkdf@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e"
-  integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=
-  dependencies:
-    tweetnacl "^0.14.3"
-
-big.js@^5.2.2:
-  version "5.2.2"
-  resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328"
-  integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==
-
-binary-extensions@^1.0.0:
-  version "1.13.1"
-  resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.13.1.tgz#598afe54755b2868a5330d2aff9d4ebb53209b65"
-  integrity sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==
-
-bluebird@^3.5.5:
-  version "3.5.5"
-  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.5.tgz#a8d0afd73251effbbd5fe384a77d73003c17a71f"
-  integrity sha512-5am6HnnfN+urzt4yfg7IgTbotDjIT/u8AJpEt0sIU9FtXfVeezXAPKswrG+xKUCOYAINpSdgZVDU6QFh+cuH3w==
-
-bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0:
-  version "4.11.8"
-  resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f"
-  integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==
-
-body-parser@1.19.0:
-  version "1.19.0"
-  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.0.tgz#96b2709e57c9c4e09a6fd66a8fd979844f69f08a"
-  integrity sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==
-  dependencies:
-    bytes "3.1.0"
-    content-type "~1.0.4"
-    debug "2.6.9"
-    depd "~1.1.2"
-    http-errors "1.7.2"
-    iconv-lite "0.4.24"
-    on-finished "~2.3.0"
-    qs "6.7.0"
-    raw-body "2.4.0"
-    type-is "~1.6.17"
-
-bonjour@^3.5.0:
-  version "3.5.0"
-  resolved "https://registry.yarnpkg.com/bonjour/-/bonjour-3.5.0.tgz#8e890a183d8ee9a2393b3844c691a42bcf7bc9f5"
-  integrity sha1-jokKGD2O6aI5OzhExpGkK897yfU=
-  dependencies:
-    array-flatten "^2.1.0"
-    deep-equal "^1.0.1"
-    dns-equal "^1.0.0"
-    dns-txt "^2.0.2"
-    multicast-dns "^6.0.1"
-    multicast-dns-service-types "^1.1.0"
-
-boolbase@^1.0.0, boolbase@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e"
-  integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24=
-
-brace-expansion@^1.1.7:
-  version "1.1.11"
-  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
-  integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
-  dependencies:
-    balanced-match "^1.0.0"
-    concat-map "0.0.1"
-
-braces@^2.3.1, braces@^2.3.2:
-  version "2.3.2"
-  resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729"
-  integrity sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==
-  dependencies:
-    arr-flatten "^1.1.0"
-    array-unique "^0.3.2"
-    extend-shallow "^2.0.1"
-    fill-range "^4.0.0"
-    isobject "^3.0.1"
-    repeat-element "^1.1.2"
-    snapdragon "^0.8.1"
-    snapdragon-node "^2.0.1"
-    split-string "^3.0.2"
-    to-regex "^3.0.1"
-
-brorand@^1.0.1:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f"
-  integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=
-
-browser-process-hrtime@^0.1.2:
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz#616f00faef1df7ec1b5bf9cfe2bdc3170f26c7b4"
-  integrity sha512-bRFnI4NnjO6cnyLmOV/7PVoDEMJChlcfN0z4s1YMBY989/SvlfMI1lgCnkFUs53e9gQF+w7qu7XdllSTiSl8Aw==
-
-browser-resolve@^1.11.3:
-  version "1.11.3"
-  resolved "https://registry.yarnpkg.com/browser-resolve/-/browser-resolve-1.11.3.tgz#9b7cbb3d0f510e4cb86bdbd796124d28b5890af6"
-  integrity sha512-exDi1BYWB/6raKHmDTCicQfTkqwN5fioMFV4j8BsfMU4R2DK/QfZfK7kOVkmWCNANf0snkBzqGqAJBao9gZMdQ==
-  dependencies:
-    resolve "1.1.7"
-
-browserify-aes@^1.0.0, browserify-aes@^1.0.4:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48"
-  integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==
-  dependencies:
-    buffer-xor "^1.0.3"
-    cipher-base "^1.0.0"
-    create-hash "^1.1.0"
-    evp_bytestokey "^1.0.3"
-    inherits "^2.0.1"
-    safe-buffer "^5.0.1"
-
-browserify-cipher@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0"
-  integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==
-  dependencies:
-    browserify-aes "^1.0.4"
-    browserify-des "^1.0.0"
-    evp_bytestokey "^1.0.0"
-
-browserify-des@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c"
-  integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==
-  dependencies:
-    cipher-base "^1.0.1"
-    des.js "^1.0.0"
-    inherits "^2.0.1"
-    safe-buffer "^5.1.2"
-
-browserify-rsa@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524"
-  integrity sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=
-  dependencies:
-    bn.js "^4.1.0"
-    randombytes "^2.0.1"
-
-browserify-sign@^4.0.0:
-  version "4.0.4"
-  resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.0.4.tgz#aa4eb68e5d7b658baa6bf6a57e630cbd7a93d298"
-  integrity sha1-qk62jl17ZYuqa/alfmMMvXqT0pg=
-  dependencies:
-    bn.js "^4.1.1"
-    browserify-rsa "^4.0.0"
-    create-hash "^1.1.0"
-    create-hmac "^1.1.2"
-    elliptic "^6.0.0"
-    inherits "^2.0.1"
-    parse-asn1 "^5.0.0"
-
-browserify-zlib@^0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f"
-  integrity sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==
-  dependencies:
-    pako "~1.0.5"
-
-browserslist@4.6.6, browserslist@^4.0.0, browserslist@^4.1.1, browserslist@^4.4.2, browserslist@^4.5.2, browserslist@^4.6.0, browserslist@^4.6.3, browserslist@^4.6.6:
-  version "4.6.6"
-  resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.6.6.tgz#6e4bf467cde520bc9dbdf3747dafa03531cec453"
-  integrity sha512-D2Nk3W9JL9Fp/gIcWei8LrERCS+eXu9AM5cfXA8WEZ84lFks+ARnZ0q/R69m2SV3Wjma83QDDPxsNKXUwdIsyA==
-  dependencies:
-    caniuse-lite "^1.0.30000984"
-    electron-to-chromium "^1.3.191"
-    node-releases "^1.1.25"
-
-bser@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.0.tgz#65fc784bf7f87c009b973c12db6546902fa9c7b5"
-  integrity sha512-8zsjWrQkkBoLK6uxASk1nJ2SKv97ltiGDo6A3wA0/yRPz+CwmEyDo0hUrhIuukG2JHpAl3bvFIixw2/3Hi0DOg==
-  dependencies:
-    node-int64 "^0.4.0"
-
-buffer-from@^1.0.0:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef"
-  integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==
-
-buffer-indexof@^1.0.0:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/buffer-indexof/-/buffer-indexof-1.1.1.tgz#52fabcc6a606d1a00302802648ef68f639da268c"
-  integrity sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g==
-
-buffer-xor@^1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9"
-  integrity sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=
-
-buffer@^4.3.0:
-  version "4.9.1"
-  resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298"
-  integrity sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=
-  dependencies:
-    base64-js "^1.0.2"
-    ieee754 "^1.1.4"
-    isarray "^1.0.0"
-
-builtin-status-codes@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8"
-  integrity sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=
-
-bytes@3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048"
-  integrity sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=
-
-bytes@3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6"
-  integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==
-
-cacache@^11.0.2:
-  version "11.3.3"
-  resolved "https://registry.yarnpkg.com/cacache/-/cacache-11.3.3.tgz#8bd29df8c6a718a6ebd2d010da4d7972ae3bbadc"
-  integrity sha512-p8WcneCytvzPxhDvYp31PD039vi77I12W+/KfR9S8AZbaiARFBCpsPJS+9uhWfeBfeAtW7o/4vt3MUqLkbY6nA==
-  dependencies:
-    bluebird "^3.5.5"
-    chownr "^1.1.1"
-    figgy-pudding "^3.5.1"
-    glob "^7.1.4"
-    graceful-fs "^4.1.15"
-    lru-cache "^5.1.1"
-    mississippi "^3.0.0"
-    mkdirp "^0.5.1"
-    move-concurrently "^1.0.1"
-    promise-inflight "^1.0.1"
-    rimraf "^2.6.3"
-    ssri "^6.0.1"
-    unique-filename "^1.1.1"
-    y18n "^4.0.0"
-
-cacache@^12.0.2:
-  version "12.0.3"
-  resolved "https://registry.yarnpkg.com/cacache/-/cacache-12.0.3.tgz#be99abba4e1bf5df461cd5a2c1071fc432573390"
-  integrity sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==
-  dependencies:
-    bluebird "^3.5.5"
-    chownr "^1.1.1"
-    figgy-pudding "^3.5.1"
-    glob "^7.1.4"
-    graceful-fs "^4.1.15"
-    infer-owner "^1.0.3"
-    lru-cache "^5.1.1"
-    mississippi "^3.0.0"
-    mkdirp "^0.5.1"
-    move-concurrently "^1.0.1"
-    promise-inflight "^1.0.1"
-    rimraf "^2.6.3"
-    ssri "^6.0.1"
-    unique-filename "^1.1.1"
-    y18n "^4.0.0"
-
-cache-base@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2"
-  integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==
-  dependencies:
-    collection-visit "^1.0.0"
-    component-emitter "^1.2.1"
-    get-value "^2.0.6"
-    has-value "^1.0.0"
-    isobject "^3.0.1"
-    set-value "^2.0.0"
-    to-object-path "^0.3.0"
-    union-value "^1.0.0"
-    unset-value "^1.0.0"
-
-call-me-maybe@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b"
-  integrity sha1-JtII6onje1y95gJQoV8DHBak1ms=
-
-caller-callsite@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134"
-  integrity sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=
-  dependencies:
-    callsites "^2.0.0"
-
-caller-path@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4"
-  integrity sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=
-  dependencies:
-    caller-callsite "^2.0.0"
-
-callsites@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50"
-  integrity sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=
-
-callsites@^3.0.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
-  integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
-
-camel-case@3.0.x:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73"
-  integrity sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=
-  dependencies:
-    no-case "^2.2.0"
-    upper-case "^1.1.1"
-
-camelcase@^4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd"
-  integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=
-
-camelcase@^5.0.0, camelcase@^5.2.0, camelcase@^5.3.1:
-  version "5.3.1"
-  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
-  integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
-
-caniuse-api@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0"
-  integrity sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==
-  dependencies:
-    browserslist "^4.0.0"
-    caniuse-lite "^1.0.0"
-    lodash.memoize "^4.1.2"
-    lodash.uniq "^4.5.0"
-
-caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000939, caniuse-lite@^1.0.30000980, caniuse-lite@^1.0.30000984:
-  version "1.0.30000989"
-  resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000989.tgz#b9193e293ccf7e4426c5245134b8f2a56c0ac4b9"
-  integrity sha512-vrMcvSuMz16YY6GSVZ0dWDTJP8jqk3iFQ/Aq5iqblPwxSVVZI+zxDyTX0VPqtQsDnfdrBDcsmhgTEOh5R8Lbpw==
-
-capture-exit@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4"
-  integrity sha512-PiT/hQmTonHhl/HFGN+Lx3JJUznrVYJ3+AQsnthneZbvW7x+f08Tk7yLJTLEOUvBTbduLeeBkxEaYXUOUrRq6g==
-  dependencies:
-    rsvp "^4.8.4"
-
-case-sensitive-paths-webpack-plugin@2.2.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.2.0.tgz#3371ef6365ef9c25fa4b81c16ace0e9c7dc58c3e"
-  integrity sha512-u5ElzokS8A1pm9vM3/iDgTcI3xqHxuCao94Oz8etI3cf0Tio0p8izkDYbTIn09uP3yUUr6+veaE6IkjnTYS46g==
-
-caseless@~0.12.0:
-  version "0.12.0"
-  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
-  integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=
-
-chalk@2.4.2, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.4.1, chalk@^2.4.2:
-  version "2.4.2"
-  resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
-  integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
-  dependencies:
-    ansi-styles "^3.2.1"
-    escape-string-regexp "^1.0.5"
-    supports-color "^5.3.0"
-
-chalk@^1.1.3:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
-  integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=
-  dependencies:
-    ansi-styles "^2.2.1"
-    escape-string-regexp "^1.0.2"
-    has-ansi "^2.0.0"
-    strip-ansi "^3.0.0"
-    supports-color "^2.0.0"
-
-chardet@^0.7.0:
-  version "0.7.0"
-  resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e"
-  integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==
-
-chokidar@^2.0.0, chokidar@^2.0.2, chokidar@^2.0.4:
-  version "2.1.8"
-  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.8.tgz#804b3a7b6a99358c3c5c61e71d8728f041cff917"
-  integrity sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==
-  dependencies:
-    anymatch "^2.0.0"
-    async-each "^1.0.1"
-    braces "^2.3.2"
-    glob-parent "^3.1.0"
-    inherits "^2.0.3"
-    is-binary-path "^1.0.0"
-    is-glob "^4.0.0"
-    normalize-path "^3.0.0"
-    path-is-absolute "^1.0.0"
-    readdirp "^2.2.1"
-    upath "^1.1.1"
-  optionalDependencies:
-    fsevents "^1.2.7"
-
-chownr@^1.1.1:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.2.tgz#a18f1e0b269c8a6a5d3c86eb298beb14c3dd7bf6"
-  integrity sha512-GkfeAQh+QNy3wquu9oIZr6SS5x7wGdSgNQvD10X3r+AZr1Oys22HW8kAmDMvNg2+Dm0TeGaEuO8gFwdBXxwO8A==
-
-chrome-trace-event@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz#234090ee97c7d4ad1a2c4beae27505deffc608a4"
-  integrity sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ==
-  dependencies:
-    tslib "^1.9.0"
-
-ci-info@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46"
-  integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==
-
-cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de"
-  integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==
-  dependencies:
-    inherits "^2.0.1"
-    safe-buffer "^5.0.1"
-
-class-utils@^0.3.5:
-  version "0.3.6"
-  resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463"
-  integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==
-  dependencies:
-    arr-union "^3.1.0"
-    define-property "^0.2.5"
-    isobject "^3.0.0"
-    static-extend "^0.1.1"
-
-classnames@2.x, classnames@^2.2.0, classnames@^2.2.1, classnames@^2.2.3, classnames@^2.2.5, classnames@^2.2.6, classnames@~2.2.6:
-  version "2.2.6"
-  resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce"
-  integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q==
-
-clean-css@4.2.x:
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.1.tgz#2d411ef76b8569b6d0c84068dabe85b0aa5e5c17"
-  integrity sha512-4ZxI6dy4lrY6FHzfiy1aEOXgu4LIsW2MhwG0VBKdcoGoH/XLFgaHSdLTGr4O8Be6A8r3MOphEiI8Gc1n0ecf3g==
-  dependencies:
-    source-map "~0.6.0"
-
-cli-cursor@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5"
-  integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=
-  dependencies:
-    restore-cursor "^2.0.0"
-
-cli-width@^2.0.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639"
-  integrity sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=
-
-cliui@^4.0.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49"
-  integrity sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ==
-  dependencies:
-    string-width "^2.1.1"
-    strip-ansi "^4.0.0"
-    wrap-ansi "^2.0.0"
-
-cliui@^5.0.0:
-  version "5.0.0"
-  resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5"
-  integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==
-  dependencies:
-    string-width "^3.1.0"
-    strip-ansi "^5.2.0"
-    wrap-ansi "^5.1.0"
-
-clone-deep@^0.2.4:
-  version "0.2.4"
-  resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.2.4.tgz#4e73dd09e9fb971cc38670c5dced9c1896481cc6"
-  integrity sha1-TnPdCen7lxzDhnDF3O2cGJZIHMY=
-  dependencies:
-    for-own "^0.1.3"
-    is-plain-object "^2.0.1"
-    kind-of "^3.0.2"
-    lazy-cache "^1.0.3"
-    shallow-clone "^0.1.2"
-
-clone-deep@^2.0.1:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-2.0.2.tgz#00db3a1e173656730d1188c3d6aced6d7ea97713"
-  integrity sha512-SZegPTKjCgpQH63E+eN6mVEEPdQBOUzjyJm5Pora4lrwWRFS8I0QAxV/KD6vV/i0WuijHZWQC1fMsPEdxfdVCQ==
-  dependencies:
-    for-own "^1.0.0"
-    is-plain-object "^2.0.4"
-    kind-of "^6.0.0"
-    shallow-clone "^1.0.0"
-
-clone@^2.1.1, clone@^2.1.2:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f"
-  integrity sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=
-
-co@^4.6.0:
-  version "4.6.0"
-  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
-  integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=
-
-coa@^2.0.2:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/coa/-/coa-2.0.2.tgz#43f6c21151b4ef2bf57187db0d73de229e3e7ec3"
-  integrity sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==
-  dependencies:
-    "@types/q" "^1.5.1"
-    chalk "^2.4.1"
-    q "^1.1.2"
-
-code-point-at@^1.0.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
-  integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=
-
-collection-visit@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0"
-  integrity sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=
-  dependencies:
-    map-visit "^1.0.0"
-    object-visit "^1.0.0"
-
-color-convert@^1.9.0, color-convert@^1.9.1:
-  version "1.9.3"
-  resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
-  integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
-  dependencies:
-    color-name "1.1.3"
-
-color-name@1.1.3:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25"
-  integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=
-
-color-name@^1.0.0:
-  version "1.1.4"
-  resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
-  integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
-
-color-string@^1.5.2:
-  version "1.5.3"
-  resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.5.3.tgz#c9bbc5f01b58b5492f3d6857459cb6590ce204cc"
-  integrity sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==
-  dependencies:
-    color-name "^1.0.0"
-    simple-swizzle "^0.2.2"
-
-color@^3.0.0:
-  version "3.1.2"
-  resolved "https://registry.yarnpkg.com/color/-/color-3.1.2.tgz#68148e7f85d41ad7649c5fa8c8106f098d229e10"
-  integrity sha512-vXTJhHebByxZn3lDvDJYw4lR5+uB3vuoHsuYA5AKuxRVn5wzzIfQKGLBmgdVRHKTJYeK5rvJcHnrd0Li49CFpg==
-  dependencies:
-    color-convert "^1.9.1"
-    color-string "^1.5.2"
-
-combined-stream@^1.0.6, combined-stream@~1.0.6:
-  version "1.0.8"
-  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
-  integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
-  dependencies:
-    delayed-stream "~1.0.0"
-
-commander@2.17.x:
-  version "2.17.1"
-  resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf"
-  integrity sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==
-
-commander@^2.11.0, commander@^2.19.0, commander@^2.20.0, commander@~2.20.0:
-  version "2.20.0"
-  resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.0.tgz#d58bb2b5c1ee8f87b0d340027e9e94e222c5a422"
-  integrity sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==
-
-commander@~2.19.0:
-  version "2.19.0"
-  resolved "https://registry.yarnpkg.com/commander/-/commander-2.19.0.tgz#f6198aa84e5b83c46054b94ddedbfed5ee9ff12a"
-  integrity sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg==
-
-common-tags@^1.8.0:
-  version "1.8.0"
-  resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937"
-  integrity sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw==
-
-commondir@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
-  integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=
-
-component-classes@1.x, component-classes@^1.2.5, component-classes@^1.2.6:
-  version "1.2.6"
-  resolved "https://registry.yarnpkg.com/component-classes/-/component-classes-1.2.6.tgz#c642394c3618a4d8b0b8919efccbbd930e5cd691"
-  integrity sha1-xkI5TDYYpNiwuJGe/Mu9kw5c1pE=
-  dependencies:
-    component-indexof "0.0.3"
-
-component-emitter@^1.2.1:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0"
-  integrity sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==
-
-component-indexof@0.0.3:
-  version "0.0.3"
-  resolved "https://registry.yarnpkg.com/component-indexof/-/component-indexof-0.0.3.tgz#11d091312239eb8f32c8f25ae9cb002ffe8d3c24"
-  integrity sha1-EdCRMSI5648yyPJa6csAL/6NPCQ=
-
-compressible@~2.0.16:
-  version "2.0.17"
-  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.17.tgz#6e8c108a16ad58384a977f3a482ca20bff2f38c1"
-  integrity sha512-BGHeLCK1GV7j1bSmQQAi26X+GgWcTjLr/0tzSvMCl3LH1w1IJ4PFSPoV5316b30cneTziC+B1a+3OjoSUcQYmw==
-  dependencies:
-    mime-db ">= 1.40.0 < 2"
-
-compression@^1.5.2:
-  version "1.7.4"
-  resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f"
-  integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==
-  dependencies:
-    accepts "~1.3.5"
-    bytes "3.0.0"
-    compressible "~2.0.16"
-    debug "2.6.9"
-    on-headers "~1.0.2"
-    safe-buffer "5.1.2"
-    vary "~1.1.2"
-
-concat-map@0.0.1:
-  version "0.0.1"
-  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
-  integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=
-
-concat-stream@^1.5.0:
-  version "1.6.2"
-  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34"
-  integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==
-  dependencies:
-    buffer-from "^1.0.0"
-    inherits "^2.0.3"
-    readable-stream "^2.2.2"
-    typedarray "^0.0.6"
-
-confusing-browser-globals@^1.0.7:
-  version "1.0.8"
-  resolved "https://registry.yarnpkg.com/confusing-browser-globals/-/confusing-browser-globals-1.0.8.tgz#93ffec1f82a6e2bf2bc36769cc3a92fa20e502f3"
-  integrity sha512-lI7asCibVJ6Qd3FGU7mu4sfG4try4LX3+GVS+Gv8UlrEf2AeW57piecapnog2UHZSbcX/P/1UDWVaTsblowlZg==
-
-connect-history-api-fallback@^1.3.0:
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc"
-  integrity sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==
-
-console-browserify@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
-  integrity sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA=
-  dependencies:
-    date-now "^0.1.4"
-
-console-control-strings@^1.0.0, console-control-strings@~1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
-  integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=
-
-constants-browserify@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75"
-  integrity sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=
-
-contains-path@^0.1.0:
-  version "0.1.0"
-  resolved "https://registry.yarnpkg.com/contains-path/-/contains-path-0.1.0.tgz#fe8cf184ff6670b6baef01a9d4861a5cbec4120a"
-  integrity sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=
-
-content-disposition@0.5.3:
-  version "0.5.3"
-  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd"
-  integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==
-  dependencies:
-    safe-buffer "5.1.2"
-
-content-type@~1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
-  integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
-
-convert-source-map@^1.1.0, convert-source-map@^1.4.0:
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20"
-  integrity sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A==
-  dependencies:
-    safe-buffer "~5.1.1"
-
-cookie-signature@1.0.6:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
-  integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw=
-
-cookie@0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba"
-  integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==
-
-copy-concurrently@^1.0.0:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0"
-  integrity sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==
-  dependencies:
-    aproba "^1.1.1"
-    fs-write-stream-atomic "^1.0.8"
-    iferr "^0.1.5"
-    mkdirp "^0.5.1"
-    rimraf "^2.5.4"
-    run-queue "^1.0.0"
-
-copy-descriptor@^0.1.0:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d"
-  integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=
-
-copy-to-clipboard@^3.2.0:
-  version "3.2.0"
-  resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.2.0.tgz#d2724a3ccbfed89706fac8a894872c979ac74467"
-  integrity sha512-eOZERzvCmxS8HWzugj4Uxl8OJxa7T2k1Gi0X5qavwydHIfuSHq2dTD09LOg/XyGq4Zpb5IsR/2OJ5lbOegz78w==
-  dependencies:
-    toggle-selection "^1.0.6"
-
-core-js-compat@^3.0.0, core-js-compat@^3.1.1:
-  version "3.2.1"
-  resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.2.1.tgz#0cbdbc2e386e8e00d3b85dc81c848effec5b8150"
-  integrity sha512-MwPZle5CF9dEaMYdDeWm73ao/IflDH+FjeJCWEADcEgFSE9TLimFKwJsfmkwzI8eC0Aj0mgvMDjeQjrElkz4/A==
-  dependencies:
-    browserslist "^4.6.6"
-    semver "^6.3.0"
-
-core-js@3.1.4:
-  version "3.1.4"
-  resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.1.4.tgz#3a2837fc48e582e1ae25907afcd6cf03b0cc7a07"
-  integrity sha512-YNZN8lt82XIMLnLirj9MhKDFZHalwzzrL9YLt6eb0T5D0EDl4IQ90IGkua8mHbnxNrkj1d8hbdizMc0Qmg1WnQ==
-
-core-js@^1.0.0:
-  version "1.2.7"
-  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
-  integrity sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY=
-
-core-js@^2.4.0:
-  version "2.6.9"
-  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.9.tgz#6b4b214620c834152e179323727fc19741b084f2"
-  integrity sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==
-
-core-util-is@1.0.2, core-util-is@~1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
-  integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
-
-cosmiconfig@^5.0.0, cosmiconfig@^5.2.0, cosmiconfig@^5.2.1:
-  version "5.2.1"
-  resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.2.1.tgz#040f726809c591e77a17c0a3626ca45b4f168b1a"
-  integrity sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==
-  dependencies:
-    import-fresh "^2.0.0"
-    is-directory "^0.3.1"
-    js-yaml "^3.13.1"
-    parse-json "^4.0.0"
-
-create-ecdh@^4.0.0:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.3.tgz#c9111b6f33045c4697f144787f9254cdc77c45ff"
-  integrity sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==
-  dependencies:
-    bn.js "^4.1.0"
-    elliptic "^6.0.0"
-
-create-hash@^1.1.0, create-hash@^1.1.2:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196"
-  integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==
-  dependencies:
-    cipher-base "^1.0.1"
-    inherits "^2.0.1"
-    md5.js "^1.3.4"
-    ripemd160 "^2.0.1"
-    sha.js "^2.4.0"
-
-create-hmac@^1.1.0, create-hmac@^1.1.2, create-hmac@^1.1.4:
-  version "1.1.7"
-  resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff"
-  integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==
-  dependencies:
-    cipher-base "^1.0.3"
-    create-hash "^1.1.0"
-    inherits "^2.0.1"
-    ripemd160 "^2.0.0"
-    safe-buffer "^5.0.1"
-    sha.js "^2.4.8"
-
-create-react-class@^15.5.3:
-  version "15.6.3"
-  resolved "https://registry.yarnpkg.com/create-react-class/-/create-react-class-15.6.3.tgz#2d73237fb3f970ae6ebe011a9e66f46dbca80036"
-  integrity sha512-M+/3Q6E6DLO6Yx3OwrWjwHBnvfXXYA7W+dFjt/ZDBemHO1DDZhsalX/NUtnTYclN6GfnBDRh4qRHjcDHmlJBJg==
-  dependencies:
-    fbjs "^0.8.9"
-    loose-envify "^1.3.1"
-    object-assign "^4.1.1"
-
-cross-spawn@6.0.5, cross-spawn@^6.0.0, cross-spawn@^6.0.5:
-  version "6.0.5"
-  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4"
-  integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==
-  dependencies:
-    nice-try "^1.0.4"
-    path-key "^2.0.1"
-    semver "^5.5.0"
-    shebang-command "^1.2.0"
-    which "^1.2.9"
-
-crypto-browserify@^3.11.0:
-  version "3.12.0"
-  resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec"
-  integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==
-  dependencies:
-    browserify-cipher "^1.0.0"
-    browserify-sign "^4.0.0"
-    create-ecdh "^4.0.0"
-    create-hash "^1.1.0"
-    create-hmac "^1.1.0"
-    diffie-hellman "^5.0.0"
-    inherits "^2.0.1"
-    pbkdf2 "^3.0.3"
-    public-encrypt "^4.0.0"
-    randombytes "^2.0.0"
-    randomfill "^1.0.3"
-
-css-animation@1.x, css-animation@^1.3.2, css-animation@^1.5.0:
-  version "1.6.1"
-  resolved "https://registry.yarnpkg.com/css-animation/-/css-animation-1.6.1.tgz#162064a3b0d51f958b7ff37b3d6d4de18e17039e"
-  integrity sha512-/48+/BaEaHRY6kNQ2OIPzKf9A6g8WjZYjhiNDNuIVbsm5tXCGIAsHDjB4Xu1C4vXJtUWZo26O68OQkDpNBaPog==
-  dependencies:
-    babel-runtime "6.x"
-    component-classes "^1.2.5"
-
-css-blank-pseudo@^0.1.4:
-  version "0.1.4"
-  resolved "https://registry.yarnpkg.com/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz#dfdefd3254bf8a82027993674ccf35483bfcb3c5"
-  integrity sha512-LHz35Hr83dnFeipc7oqFDmsjHdljj3TQtxGGiNWSOsTLIAubSm4TEz8qCaKFpk7idaQ1GfWscF4E6mgpBysA1w==
-  dependencies:
-    postcss "^7.0.5"
-
-css-color-names@0.0.4, css-color-names@^0.0.4:
-  version "0.0.4"
-  resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0"
-  integrity sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=
-
-css-declaration-sorter@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz#c198940f63a76d7e36c1e71018b001721054cb22"
-  integrity sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==
-  dependencies:
-    postcss "^7.0.1"
-    timsort "^0.3.0"
-
-css-has-pseudo@^0.10.0:
-  version "0.10.0"
-  resolved "https://registry.yarnpkg.com/css-has-pseudo/-/css-has-pseudo-0.10.0.tgz#3c642ab34ca242c59c41a125df9105841f6966ee"
-  integrity sha512-Z8hnfsZu4o/kt+AuFzeGpLVhFOGO9mluyHBaA2bA8aCGTwah5sT3WV/fTHH8UNZUytOIImuGPrl/prlb4oX4qQ==
-  dependencies:
-    postcss "^7.0.6"
-    postcss-selector-parser "^5.0.0-rc.4"
-
-css-loader@2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-2.1.1.tgz#d8254f72e412bb2238bb44dd674ffbef497333ea"
-  integrity sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==
-  dependencies:
-    camelcase "^5.2.0"
-    icss-utils "^4.1.0"
-    loader-utils "^1.2.3"
-    normalize-path "^3.0.0"
-    postcss "^7.0.14"
-    postcss-modules-extract-imports "^2.0.0"
-    postcss-modules-local-by-default "^2.0.6"
-    postcss-modules-scope "^2.1.0"
-    postcss-modules-values "^2.0.0"
-    postcss-value-parser "^3.3.0"
-    schema-utils "^1.0.0"
-
-css-prefers-color-scheme@^3.1.1:
-  version "3.1.1"
-  resolved "https://registry.yarnpkg.com/css-prefers-color-scheme/-/css-prefers-color-scheme-3.1.1.tgz#6f830a2714199d4f0d0d0bb8a27916ed65cff1f4"
-  integrity sha512-MTu6+tMs9S3EUqzmqLXEcgNRbNkkD/TGFvowpeoWJn5Vfq7FMgsmRQs9X5NXAURiOBmOxm/lLjsDNXDE6k9bhg==
-  dependencies:
-    postcss "^7.0.5"
-
-css-select-base-adapter@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz#3b2ff4972cc362ab88561507a95408a1432135d7"
-  integrity sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==
-
-css-select@^1.1.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858"
-  integrity sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg=
-  dependencies:
-    boolbase "~1.0.0"
-    css-what "2.1"
-    domutils "1.5.1"
-    nth-check "~1.0.1"
-
-css-select@^2.0.0:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/css-select/-/css-select-2.0.2.tgz#ab4386cec9e1f668855564b17c3733b43b2a5ede"
-  integrity sha512-dSpYaDVoWaELjvZ3mS6IKZM/y2PMPa/XYoEfYNZePL4U/XgyxZNroHEHReDx/d+VgXh9VbCTtFqLkFbmeqeaRQ==
-  dependencies:
-    boolbase "^1.0.0"
-    css-what "^2.1.2"
-    domutils "^1.7.0"
-    nth-check "^1.0.2"
-
-css-tree@1.0.0-alpha.29:
-  version "1.0.0-alpha.29"
-  resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.29.tgz#3fa9d4ef3142cbd1c301e7664c1f352bd82f5a39"
-  integrity sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==
-  dependencies:
-    mdn-data "~1.1.0"
-    source-map "^0.5.3"
-
-css-tree@1.0.0-alpha.33:
-  version "1.0.0-alpha.33"
-  resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.33.tgz#970e20e5a91f7a378ddd0fc58d0b6c8d4f3be93e"
-  integrity sha512-SPt57bh5nQnpsTBsx/IXbO14sRc9xXu5MtMAVuo0BaQQmyf0NupNPPSoMaqiAF5tDFafYsTkfeH4Q/HCKXkg4w==
-  dependencies:
-    mdn-data "2.0.4"
-    source-map "^0.5.3"
-
-css-unit-converter@^1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/css-unit-converter/-/css-unit-converter-1.1.1.tgz#d9b9281adcfd8ced935bdbaba83786897f64e996"
-  integrity sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY=
-
-css-what@2.1, css-what@^2.1.2:
-  version "2.1.3"
-  resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2"
-  integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==
-
-cssdb@^4.3.0:
-  version "4.4.0"
-  resolved "https://registry.yarnpkg.com/cssdb/-/cssdb-4.4.0.tgz#3bf2f2a68c10f5c6a08abd92378331ee803cddb0"
-  integrity sha512-LsTAR1JPEM9TpGhl/0p3nQecC2LJ0kD8X5YARu1hk/9I1gril5vDtMZyNxcEpxxDj34YNck/ucjuoUd66K03oQ==
-
-cssesc@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-2.0.0.tgz#3b13bd1bb1cb36e1bcb5a4dcd27f54c5dcb35703"
-  integrity sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg==
-
-cssesc@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee"
-  integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==
-
-cssnano-preset-default@^4.0.7:
-  version "4.0.7"
-  resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-4.0.7.tgz#51ec662ccfca0f88b396dcd9679cdb931be17f76"
-  integrity sha512-x0YHHx2h6p0fCl1zY9L9roD7rnlltugGu7zXSKQx6k2rYw0Hi3IqxcoAGF7u9Q5w1nt7vK0ulxV8Lo+EvllGsA==
-  dependencies:
-    css-declaration-sorter "^4.0.1"
-    cssnano-util-raw-cache "^4.0.1"
-    postcss "^7.0.0"
-    postcss-calc "^7.0.1"
-    postcss-colormin "^4.0.3"
-    postcss-convert-values "^4.0.1"
-    postcss-discard-comments "^4.0.2"
-    postcss-discard-duplicates "^4.0.2"
-    postcss-discard-empty "^4.0.1"
-    postcss-discard-overridden "^4.0.1"
-    postcss-merge-longhand "^4.0.11"
-    postcss-merge-rules "^4.0.3"
-    postcss-minify-font-values "^4.0.2"
-    postcss-minify-gradients "^4.0.2"
-    postcss-minify-params "^4.0.2"
-    postcss-minify-selectors "^4.0.2"
-    postcss-normalize-charset "^4.0.1"
-    postcss-normalize-display-values "^4.0.2"
-    postcss-normalize-positions "^4.0.2"
-    postcss-normalize-repeat-style "^4.0.2"
-    postcss-normalize-string "^4.0.2"
-    postcss-normalize-timing-functions "^4.0.2"
-    postcss-normalize-unicode "^4.0.1"
-    postcss-normalize-url "^4.0.1"
-    postcss-normalize-whitespace "^4.0.2"
-    postcss-ordered-values "^4.1.2"
-    postcss-reduce-initial "^4.0.3"
-    postcss-reduce-transforms "^4.0.2"
-    postcss-svgo "^4.0.2"
-    postcss-unique-selectors "^4.0.1"
-
-cssnano-util-get-arguments@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz#ed3a08299f21d75741b20f3b81f194ed49cc150f"
-  integrity sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=
-
-cssnano-util-get-match@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d"
-  integrity sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=
-
-cssnano-util-raw-cache@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282"
-  integrity sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==
-  dependencies:
-    postcss "^7.0.0"
-
-cssnano-util-same-parent@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3"
-  integrity sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==
-
-cssnano@^4.1.0:
-  version "4.1.10"
-  resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2"
-  integrity sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ==
-  dependencies:
-    cosmiconfig "^5.0.0"
-    cssnano-preset-default "^4.0.7"
-    is-resolvable "^1.0.0"
-    postcss "^7.0.0"
-
-csso@^3.5.1:
-  version "3.5.1"
-  resolved "https://registry.yarnpkg.com/csso/-/csso-3.5.1.tgz#7b9eb8be61628973c1b261e169d2f024008e758b"
-  integrity sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==
-  dependencies:
-    css-tree "1.0.0-alpha.29"
-
-cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0", cssom@^0.3.4:
-  version "0.3.8"
-  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a"
-  integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==
-
-cssstyle@^1.0.0, cssstyle@^1.1.1:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-1.4.0.tgz#9d31328229d3c565c61e586b02041a28fccdccf1"
-  integrity sha512-GBrLZYZ4X4x6/QEoBnIrqb8B/f5l4+8me2dkom/j1Gtbxy0kBv6OGzKuAsGM75bkGwGAFkt56Iwg28S3XTZgSA==
-  dependencies:
-    cssom "0.3.x"
-
-csstype@^2.2.0:
-  version "2.6.6"
-  resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.6.tgz#c34f8226a94bbb10c32cc0d714afdf942291fc41"
-  integrity sha512-RpFbQGUE74iyPgvr46U9t1xoQBM8T4BL8SxrN66Le2xYAPSaDJJKeztV3awugusb3g3G9iL8StmkBBXhcbbXhg==
-
-customize-cra@^0.2.12:
-  version "0.2.14"
-  resolved "https://registry.yarnpkg.com/customize-cra/-/customize-cra-0.2.14.tgz#41f9b2d96d9a318bec760c4c9b3dc9c26d5a7594"
-  integrity sha512-LtEMXNzkhnnqGPc1dP5fnPlF1ic1dj34hDbRVJIzfMQgOaGByHhx51fTR7fv7sTPEbCPrOBP777MkCo0GPV57g==
-  dependencies:
-    lodash.flow "^3.5.0"
-
-cyclist@~0.2.2:
-  version "0.2.2"
-  resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-0.2.2.tgz#1b33792e11e914a2fd6d6ed6447464444e5fa640"
-  integrity sha1-GzN5LhHpFKL9bW7WRHRkRE5fpkA=
-
-damerau-levenshtein@^1.0.4:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.5.tgz#780cf7144eb2e8dbd1c3bb83ae31100ccc31a414"
-  integrity sha512-CBCRqFnpu715iPmw1KrdOrzRqbdFwQTwAWyyyYS42+iAgHCuXZ+/TdMgQkUENPomxEz9z1BEzuQU2Xw0kUuAgA==
-
-dashdash@^1.12.0:
-  version "1.14.1"
-  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
-  integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=
-  dependencies:
-    assert-plus "^1.0.0"
-
-data-urls@^1.0.0, data-urls@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-1.1.0.tgz#15ee0582baa5e22bb59c77140da8f9c76963bbfe"
-  integrity sha512-YTWYI9se1P55u58gL5GkQHW4P6VJBJ5iBT+B5a7i2Tjadhv52paJG0qHX4A0OR6/t52odI64KP2YvFpkDOi3eQ==
-  dependencies:
-    abab "^2.0.0"
-    whatwg-mimetype "^2.2.0"
-    whatwg-url "^7.0.0"
-
-date-now@^0.1.4:
-  version "0.1.4"
-  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
-  integrity sha1-6vQ5/U1ISK105cx9vvIAZyueNFs=
-
-debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8, debug@^2.6.9:
-  version "2.6.9"
-  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
-  integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
-  dependencies:
-    ms "2.0.0"
-
-debug@^3.0.0, debug@^3.2.5, debug@^3.2.6:
-  version "3.2.6"
-  resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b"
-  integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==
-  dependencies:
-    ms "^2.1.1"
-
-debug@^4.0.1, debug@^4.1.0, debug@^4.1.1:
-  version "4.1.1"
-  resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791"
-  integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==
-  dependencies:
-    ms "^2.1.1"
-
-decamelize@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
-  integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=
-
-decamelize@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-2.0.0.tgz#656d7bbc8094c4c788ea53c5840908c9c7d063c7"
-  integrity sha512-Ikpp5scV3MSYxY39ymh45ZLEecsTdv/Xj2CaQfI8RLMuwi7XvjX9H/fhraiSuU+C5w5NTDu4ZU72xNiZnurBPg==
-  dependencies:
-    xregexp "4.0.0"
-
-decode-uri-component@^0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545"
-  integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=
-
-deep-equal@^1.0.1:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.1.0.tgz#3103cdf8ab6d32cf4a8df7865458f2b8d33f3745"
-  integrity sha512-ZbfWJq/wN1Z273o7mUSjILYqehAktR2NVoSrOukDkU9kg2v/Uv89yU4Cvz8seJeAmtN5oqiefKq8FPuXOboqLw==
-  dependencies:
-    is-arguments "^1.0.4"
-    is-date-object "^1.0.1"
-    is-regex "^1.0.4"
-    object-is "^1.0.1"
-    object-keys "^1.1.1"
-    regexp.prototype.flags "^1.2.0"
-
-deep-extend@^0.6.0:
-  version "0.6.0"
-  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac"
-  integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==
-
-deep-is@~0.1.3:
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
-  integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=
-
-default-gateway@^4.2.0:
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/default-gateway/-/default-gateway-4.2.0.tgz#167104c7500c2115f6dd69b0a536bb8ed720552b"
-  integrity sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==
-  dependencies:
-    execa "^1.0.0"
-    ip-regex "^2.1.0"
-
-define-properties@^1.1.2, define-properties@^1.1.3:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1"
-  integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==
-  dependencies:
-    object-keys "^1.0.12"
-
-define-property@^0.2.5:
-  version "0.2.5"
-  resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116"
-  integrity sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=
-  dependencies:
-    is-descriptor "^0.1.0"
-
-define-property@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6"
-  integrity sha1-dp66rz9KY6rTr56NMEybvnm/sOY=
-  dependencies:
-    is-descriptor "^1.0.0"
-
-define-property@^2.0.2:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d"
-  integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==
-  dependencies:
-    is-descriptor "^1.0.2"
-    isobject "^3.0.1"
-
-del@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/del/-/del-3.0.0.tgz#53ecf699ffcbcb39637691ab13baf160819766e5"
-  integrity sha1-U+z2mf/LyzljdpGrE7rxYIGXZuU=
-  dependencies:
-    globby "^6.1.0"
-    is-path-cwd "^1.0.0"
-    is-path-in-cwd "^1.0.0"
-    p-map "^1.1.1"
-    pify "^3.0.0"
-    rimraf "^2.2.8"
-
-delayed-stream@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
-  integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk=
-
-delegates@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
-  integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=
-
-depd@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9"
-  integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=
-
-des.js@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.0.tgz#c074d2e2aa6a8a9a07dbd61f9a15c2cd83ec8ecc"
-  integrity sha1-wHTS4qpqipoH29YfmhXCzYPsjsw=
-  dependencies:
-    inherits "^2.0.1"
-    minimalistic-assert "^1.0.0"
-
-destroy@~1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
-  integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=
-
-detect-libc@^1.0.2:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b"
-  integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=
-
-detect-newline@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2"
-  integrity sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I=
-
-detect-node@^2.0.4:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.0.4.tgz#014ee8f8f669c5c58023da64b8179c083a28c46c"
-  integrity sha512-ZIzRpLJrOj7jjP2miAtgqIfmzbxa4ZOr5jJc601zklsfEx9oTzmmj2nVpIPRpNlRTIh8lc1kyViIY7BWSGNmKw==
-
-detect-port-alt@1.1.6:
-  version "1.1.6"
-  resolved "https://registry.yarnpkg.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz#24707deabe932d4a3cf621302027c2b266568275"
-  integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==
-  dependencies:
-    address "^1.0.1"
-    debug "^2.6.0"
-
-diff-sequences@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-24.9.0.tgz#5715d6244e2aa65f48bba0bc972db0b0b11e95b5"
-  integrity sha512-Dj6Wk3tWyTE+Fo1rW8v0Xhwk80um6yFYKbuAxc9c3EZxIHFDYwbi34Uk42u1CdnIiVorvt4RmlSDjIPyzGC2ew==
-
-diffie-hellman@^5.0.0:
-  version "5.0.3"
-  resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875"
-  integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==
-  dependencies:
-    bn.js "^4.1.0"
-    miller-rabin "^4.0.0"
-    randombytes "^2.0.0"
-
-dir-glob@2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.0.0.tgz#0b205d2b6aef98238ca286598a8204d29d0a0034"
-  integrity sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag==
-  dependencies:
-    arrify "^1.0.1"
-    path-type "^3.0.0"
-
-dns-equal@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d"
-  integrity sha1-s55/HabrCnW6nBcySzR1PEfgZU0=
-
-dns-packet@^1.3.1:
-  version "1.3.1"
-  resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a"
-  integrity sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg==
-  dependencies:
-    ip "^1.1.0"
-    safe-buffer "^5.0.1"
-
-dns-txt@^2.0.2:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/dns-txt/-/dns-txt-2.0.2.tgz#b91d806f5d27188e4ab3e7d107d881a1cc4642b6"
-  integrity sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=
-  dependencies:
-    buffer-indexof "^1.0.0"
-
-doctrine@1.5.0:
-  version "1.5.0"
-  resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-1.5.0.tgz#379dce730f6166f76cefa4e6707a159b02c5a6fa"
-  integrity sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=
-  dependencies:
-    esutils "^2.0.2"
-    isarray "^1.0.0"
-
-doctrine@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d"
-  integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==
-  dependencies:
-    esutils "^2.0.2"
-
-doctrine@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961"
-  integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==
-  dependencies:
-    esutils "^2.0.2"
-
-dom-align@^1.7.0:
-  version "1.10.2"
-  resolved "https://registry.yarnpkg.com/dom-align/-/dom-align-1.10.2.tgz#540ea1c9e20462bd11b9fc28c561dc8351ece4c6"
-  integrity sha512-AYZUzLepy05E9bCY4ExoqHrrIlM49PEak9oF93JEFoibqKL0F7w5DLM70/rosLOawerWZ3MlepQcl+EmHskOyw==
-
-dom-closest@^0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/dom-closest/-/dom-closest-0.2.0.tgz#ebd9f91d1bf22e8d6f477876bbcd3ec90216c0cf"
-  integrity sha1-69n5HRvyLo1vR3h2u80+yQIWwM8=
-  dependencies:
-    dom-matches ">=1.0.1"
-
-dom-converter@^0.2:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768"
-  integrity sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==
-  dependencies:
-    utila "~0.4"
-
-dom-matches@>=1.0.1:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/dom-matches/-/dom-matches-2.0.0.tgz#d2728b416a87533980eb089b848d253cf23a758c"
-  integrity sha1-0nKLQWqHUzmA6wibhI0lPPI6dYw=
-
-dom-scroll-into-view@1.x, dom-scroll-into-view@^1.2.0, dom-scroll-into-view@^1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/dom-scroll-into-view/-/dom-scroll-into-view-1.2.1.tgz#e8f36732dd089b0201a88d7815dc3f88e6d66c7e"
-  integrity sha1-6PNnMt0ImwIBqI14Fdw/iObWbH4=
-
-dom-serializer@0:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.2.1.tgz#13650c850daffea35d8b626a4cfc4d3a17643fdb"
-  integrity sha512-sK3ujri04WyjwQXVoK4PU3y8ula1stq10GJZpqHIUgoGZdsGzAGu65BnU3d08aTVSvO7mGPZUc0wTEDL+qGE0Q==
-  dependencies:
-    domelementtype "^2.0.1"
-    entities "^2.0.0"
-
-domain-browser@^1.1.1:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda"
-  integrity sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==
-
-domelementtype@1, domelementtype@^1.3.1:
-  version "1.3.1"
-  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f"
-  integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==
-
-domelementtype@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.0.1.tgz#1f8bdfe91f5a78063274e803b4bdcedf6e94f94d"
-  integrity sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ==
-
-domexception@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/domexception/-/domexception-1.0.1.tgz#937442644ca6a31261ef36e3ec677fe805582c90"
-  integrity sha512-raigMkn7CJNNo6Ihro1fzG7wr3fHuYVytzquZKX5n0yizGsTcYgzdIUwj1X9pK0VvjeihV+XiclP+DjwbsSKug==
-  dependencies:
-    webidl-conversions "^4.0.2"
-
-domhandler@^2.3.0:
-  version "2.4.2"
-  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.4.2.tgz#8805097e933d65e85546f726d60f5eb88b44f803"
-  integrity sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==
-  dependencies:
-    domelementtype "1"
-
-domutils@1.5.1:
-  version "1.5.1"
-  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
-  integrity sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=
-  dependencies:
-    dom-serializer "0"
-    domelementtype "1"
-
-domutils@^1.5.1, domutils@^1.7.0:
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.7.0.tgz#56ea341e834e06e6748af7a1cb25da67ea9f8c2a"
-  integrity sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==
-  dependencies:
-    dom-serializer "0"
-    domelementtype "1"
-
-dot-prop@^4.1.1:
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.0.tgz#1f19e0c2e1aa0e32797c49799f2837ac6af69c57"
-  integrity sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==
-  dependencies:
-    is-obj "^1.0.0"
-
-dotenv-expand@4.2.0:
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/dotenv-expand/-/dotenv-expand-4.2.0.tgz#def1f1ca5d6059d24a766e587942c21106ce1275"
-  integrity sha1-3vHxyl1gWdJKdm5YeULCEQbOEnU=
-
-dotenv@6.2.0, dotenv@^6.2.0:
-  version "6.2.0"
-  resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-6.2.0.tgz#941c0410535d942c8becf28d3f357dbd9d476064"
-  integrity sha512-HygQCKUBSFl8wKQZBSemMywRWcEDNidvNbjGVyZu3nbZ8qq9ubiPoGLMdRDpfSrpkkm9BXYFkpKxxFX38o/76w==
-
-draft-js@^0.10.0, draft-js@~0.10.0:
-  version "0.10.5"
-  resolved "https://registry.yarnpkg.com/draft-js/-/draft-js-0.10.5.tgz#bfa9beb018fe0533dbb08d6675c371a6b08fa742"
-  integrity sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg==
-  dependencies:
-    fbjs "^0.8.15"
-    immutable "~3.7.4"
-    object-assign "^4.1.0"
-
-duplexer@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.1.tgz#ace6ff808c1ce66b57d1ebf97977acb02334cfc1"
-  integrity sha1-rOb/gIwc5mtX0ev5eXessCM0z8E=
-
-duplexify@^3.4.2, duplexify@^3.6.0:
-  version "3.7.1"
-  resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309"
-  integrity sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==
-  dependencies:
-    end-of-stream "^1.0.0"
-    inherits "^2.0.1"
-    readable-stream "^2.0.0"
-    stream-shift "^1.0.0"
-
-ecc-jsbn@~0.1.1:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9"
-  integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=
-  dependencies:
-    jsbn "~0.1.0"
-    safer-buffer "^2.1.0"
-
-ee-first@1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
-  integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=
-
-electron-to-chromium@^1.3.191:
-  version "1.3.243"
-  resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.243.tgz#32f64f00fa121532d1d49f5c0a15fd77f52ae889"
-  integrity sha512-+edFdHGxLSmAKftXa5xZIg19rHkkJLiW+tRu0VMVG3RKztyeKX7d3pXf707lS6+BxB9uBun3RShbxCI1PtBAgQ==
-
-elliptic@^6.0.0:
-  version "6.5.0"
-  resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.0.tgz#2b8ed4c891b7de3200e14412a5b8248c7af505ca"
-  integrity sha512-eFOJTMyCYb7xtE/caJ6JJu+bhi67WCYNbkGSknu20pmM8Ke/bqOfdnZWxyoGN26JgfxTbXrsCkEw4KheCT/KGg==
-  dependencies:
-    bn.js "^4.4.0"
-    brorand "^1.0.1"
-    hash.js "^1.0.0"
-    hmac-drbg "^1.0.0"
-    inherits "^2.0.1"
-    minimalistic-assert "^1.0.0"
-    minimalistic-crypto-utils "^1.0.0"
-
-emoji-regex@^7.0.1, emoji-regex@^7.0.2:
-  version "7.0.3"
-  resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156"
-  integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==
-
-emojis-list@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389"
-  integrity sha1-TapNnbAPmBmIDHn6RXrlsJof04k=
-
-encodeurl@~1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
-  integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=
-
-encoding@^0.1.11:
-  version "0.1.12"
-  resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.12.tgz#538b66f3ee62cd1ab51ec323829d1f9480c74beb"
-  integrity sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=
-  dependencies:
-    iconv-lite "~0.4.13"
-
-end-of-stream@^1.0.0, end-of-stream@^1.1.0:
-  version "1.4.1"
-  resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43"
-  integrity sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==
-  dependencies:
-    once "^1.4.0"
-
-enhanced-resolve@^4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz#41c7e0bfdfe74ac1ffe1e57ad6a5c6c9f3742a7f"
-  integrity sha512-F/7vkyTtyc/llOIn8oWclcB25KdRaiPBpZYDgJHgh/UHtpgT2p2eldQgtQnLtUvfMKPKxbRaQM/hHkvLHt1Vng==
-  dependencies:
-    graceful-fs "^4.1.2"
-    memory-fs "^0.4.0"
-    tapable "^1.0.0"
-
-enquire.js@^2.1.6:
-  version "2.1.6"
-  resolved "https://registry.yarnpkg.com/enquire.js/-/enquire.js-2.1.6.tgz#3e8780c9b8b835084c3f60e166dbc3c2a3c89814"
-  integrity sha1-PoeAybi4NQhMP2DhZtvDwqPImBQ=
-
-entities@^1.1.1:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56"
-  integrity sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==
-
-entities@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/entities/-/entities-2.0.0.tgz#68d6084cab1b079767540d80e56a39b423e4abf4"
-  integrity sha512-D9f7V0JSRwIxlRI2mjMqufDrRDnx8p+eEOz7aUM9SuvF8gsBzra0/6tbjl1m8eQHrZlYj6PxqE00hZ1SAIKPLw==
-
-errno@^0.1.1, errno@^0.1.3, errno@~0.1.7:
-  version "0.1.7"
-  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.7.tgz#4684d71779ad39af177e3f007996f7c67c852618"
-  integrity sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==
-  dependencies:
-    prr "~1.0.1"
-
-error-ex@^1.2.0, error-ex@^1.3.1:
-  version "1.3.2"
-  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf"
-  integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==
-  dependencies:
-    is-arrayish "^0.2.1"
-
-es-abstract@^1.11.0, es-abstract@^1.12.0, es-abstract@^1.5.1, es-abstract@^1.7.0:
-  version "1.13.0"
-  resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.13.0.tgz#ac86145fdd5099d8dd49558ccba2eaf9b88e24e9"
-  integrity sha512-vDZfg/ykNxQVwup/8E1BZhVzFfBxs9NqMzGcvIJrqg5k2/5Za2bWo40dK2J1pgLngZ7c+Shh8lwYtLGyrwPutg==
-  dependencies:
-    es-to-primitive "^1.2.0"
-    function-bind "^1.1.1"
-    has "^1.0.3"
-    is-callable "^1.1.4"
-    is-regex "^1.0.4"
-    object-keys "^1.0.12"
-
-es-to-primitive@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.0.tgz#edf72478033456e8dda8ef09e00ad9650707f377"
-  integrity sha512-qZryBOJjV//LaxLTV6UC//WewneB3LcXOL9NP++ozKVXsIIIpm/2c13UDiD9Jp2eThsecw9m3jPqDwTyobcdbg==
-  dependencies:
-    is-callable "^1.1.4"
-    is-date-object "^1.0.1"
-    is-symbol "^1.0.2"
-
-escape-html@~1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
-  integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=
-
-escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
-  integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=
-
-escodegen@^1.11.0, escodegen@^1.9.1:
-  version "1.12.0"
-  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.12.0.tgz#f763daf840af172bb3a2b6dd7219c0e17f7ff541"
-  integrity sha512-TuA+EhsanGcme5T3R0L80u4t8CpbXQjegRmf7+FPTJrtCTErXFeelblRgHQa1FofEzqYYJmJ/OqjTwREp9qgmg==
-  dependencies:
-    esprima "^3.1.3"
-    estraverse "^4.2.0"
-    esutils "^2.0.2"
-    optionator "^0.8.1"
-  optionalDependencies:
-    source-map "~0.6.1"
-
-eslint-config-react-app@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/eslint-config-react-app/-/eslint-config-react-app-4.0.1.tgz#23fd0fd7ea89442ef1e733f66a7207674b23c8db"
-  integrity sha512-ZsaoXUIGsK8FCi/x4lT2bZR5mMkL/Kgj+Lnw690rbvvUr/uiwgFiD8FcfAhkCycm7Xte6O5lYz4EqMx2vX7jgw==
-  dependencies:
-    confusing-browser-globals "^1.0.7"
-
-eslint-import-resolver-node@^0.3.2:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.2.tgz#58f15fb839b8d0576ca980413476aab2472db66a"
-  integrity sha512-sfmTqJfPSizWu4aymbPr4Iidp5yKm8yDkHp+Ir3YiTHiiDfxh69mOUsmiqW6RZ9zRXFaF64GtYmN7e+8GHBv6Q==
-  dependencies:
-    debug "^2.6.9"
-    resolve "^1.5.0"
-
-eslint-loader@2.1.2:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/eslint-loader/-/eslint-loader-2.1.2.tgz#453542a1230d6ffac90e4e7cb9cadba9d851be68"
-  integrity sha512-rA9XiXEOilLYPOIInvVH5S/hYfyTPyxag6DZhoQOduM+3TkghAEQ3VcFO8VnX4J4qg/UIBzp72aOf/xvYmpmsg==
-  dependencies:
-    loader-fs-cache "^1.0.0"
-    loader-utils "^1.0.2"
-    object-assign "^4.0.1"
-    object-hash "^1.1.4"
-    rimraf "^2.6.1"
-
-eslint-module-utils@^2.3.0:
-  version "2.4.1"
-  resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.4.1.tgz#7b4675875bf96b0dbf1b21977456e5bb1f5e018c"
-  integrity sha512-H6DOj+ejw7Tesdgbfs4jeS4YMFrT8uI8xwd1gtQqXssaR0EQ26L+2O/w6wkYFy2MymON0fTwHmXBvvfLNZVZEw==
-  dependencies:
-    debug "^2.6.8"
-    pkg-dir "^2.0.0"
-
-eslint-plugin-flowtype@2.50.1:
-  version "2.50.1"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-flowtype/-/eslint-plugin-flowtype-2.50.1.tgz#36d4c961ac8b9e9e1dc091d3fba0537dad34ae8a"
-  integrity sha512-9kRxF9hfM/O6WGZcZPszOVPd2W0TLHBtceulLTsGfwMPtiCCLnCW0ssRiOOiXyqrCA20pm1iXdXm7gQeN306zQ==
-  dependencies:
-    lodash "^4.17.10"
-
-eslint-plugin-import@2.16.0:
-  version "2.16.0"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.16.0.tgz#97ac3e75d0791c4fac0e15ef388510217be7f66f"
-  integrity sha512-z6oqWlf1x5GkHIFgrSvtmudnqM6Q60KM4KvpWi5ubonMjycLjndvd5+8VAZIsTlHC03djdgJuyKG6XO577px6A==
-  dependencies:
-    contains-path "^0.1.0"
-    debug "^2.6.9"
-    doctrine "1.5.0"
-    eslint-import-resolver-node "^0.3.2"
-    eslint-module-utils "^2.3.0"
-    has "^1.0.3"
-    lodash "^4.17.11"
-    minimatch "^3.0.4"
-    read-pkg-up "^2.0.0"
-    resolve "^1.9.0"
-
-eslint-plugin-jsx-a11y@6.2.1:
-  version "6.2.1"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.2.1.tgz#4ebba9f339b600ff415ae4166e3e2e008831cf0c"
-  integrity sha512-cjN2ObWrRz0TTw7vEcGQrx+YltMvZoOEx4hWU8eEERDnBIU00OTq7Vr+jA7DFKxiwLNv4tTh5Pq2GUNEa8b6+w==
-  dependencies:
-    aria-query "^3.0.0"
-    array-includes "^3.0.3"
-    ast-types-flow "^0.0.7"
-    axobject-query "^2.0.2"
-    damerau-levenshtein "^1.0.4"
-    emoji-regex "^7.0.2"
-    has "^1.0.3"
-    jsx-ast-utils "^2.0.1"
-
-eslint-plugin-react-hooks@^1.5.0:
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-1.7.0.tgz#6210b6d5a37205f0b92858f895a4e827020a7d04"
-  integrity sha512-iXTCFcOmlWvw4+TOE8CLWj6yX1GwzT0Y6cUfHHZqWnSk144VmVIRcVGtUAzrLES7C798lmvnt02C7rxaOX1HNA==
-
-eslint-plugin-react@7.12.4:
-  version "7.12.4"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.12.4.tgz#b1ecf26479d61aee650da612e425c53a99f48c8c"
-  integrity sha512-1puHJkXJY+oS1t467MjbqjvX53uQ05HXwjqDgdbGBqf5j9eeydI54G3KwiJmWciQ0HTBacIKw2jgwSBSH3yfgQ==
-  dependencies:
-    array-includes "^3.0.3"
-    doctrine "^2.1.0"
-    has "^1.0.3"
-    jsx-ast-utils "^2.0.1"
-    object.fromentries "^2.0.0"
-    prop-types "^15.6.2"
-    resolve "^1.9.0"
-
-eslint-scope@3.7.1:
-  version "3.7.1"
-  resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8"
-  integrity sha1-PWPD7f2gLgbgGkUq2IyqzHzctug=
-  dependencies:
-    esrecurse "^4.1.0"
-    estraverse "^4.1.1"
-
-eslint-scope@^4.0.0, eslint-scope@^4.0.3:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848"
-  integrity sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==
-  dependencies:
-    esrecurse "^4.1.0"
-    estraverse "^4.1.1"
-
-eslint-utils@^1.3.1:
-  version "1.4.2"
-  resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.4.2.tgz#166a5180ef6ab7eb462f162fd0e6f2463d7309ab"
-  integrity sha512-eAZS2sEUMlIeCjBeubdj45dmBHQwPHWyBcT1VSYB7o9x9WRRqKxyUoiXlRjyAwzN7YEzHJlYg0NmzDRWx6GP4Q==
-  dependencies:
-    eslint-visitor-keys "^1.0.0"
-
-eslint-visitor-keys@^1.0.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz#e2a82cea84ff246ad6fb57f9bde5b46621459ec2"
-  integrity sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==
-
-eslint@^5.16.0:
-  version "5.16.0"
-  resolved "https://registry.yarnpkg.com/eslint/-/eslint-5.16.0.tgz#a1e3ac1aae4a3fbd8296fcf8f7ab7314cbb6abea"
-  integrity sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg==
-  dependencies:
-    "@babel/code-frame" "^7.0.0"
-    ajv "^6.9.1"
-    chalk "^2.1.0"
-    cross-spawn "^6.0.5"
-    debug "^4.0.1"
-    doctrine "^3.0.0"
-    eslint-scope "^4.0.3"
-    eslint-utils "^1.3.1"
-    eslint-visitor-keys "^1.0.0"
-    espree "^5.0.1"
-    esquery "^1.0.1"
-    esutils "^2.0.2"
-    file-entry-cache "^5.0.1"
-    functional-red-black-tree "^1.0.1"
-    glob "^7.1.2"
-    globals "^11.7.0"
-    ignore "^4.0.6"
-    import-fresh "^3.0.0"
-    imurmurhash "^0.1.4"
-    inquirer "^6.2.2"
-    js-yaml "^3.13.0"
-    json-stable-stringify-without-jsonify "^1.0.1"
-    levn "^0.3.0"
-    lodash "^4.17.11"
-    minimatch "^3.0.4"
-    mkdirp "^0.5.1"
-    natural-compare "^1.4.0"
-    optionator "^0.8.2"
-    path-is-inside "^1.0.2"
-    progress "^2.0.0"
-    regexpp "^2.0.1"
-    semver "^5.5.1"
-    strip-ansi "^4.0.0"
-    strip-json-comments "^2.0.1"
-    table "^5.2.3"
-    text-table "^0.2.0"
-
-espree@^5.0.1:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/espree/-/espree-5.0.1.tgz#5d6526fa4fc7f0788a5cf75b15f30323e2f81f7a"
-  integrity sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==
-  dependencies:
-    acorn "^6.0.7"
-    acorn-jsx "^5.0.0"
-    eslint-visitor-keys "^1.0.0"
-
-esprima@^3.1.3:
-  version "3.1.3"
-  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
-  integrity sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=
-
-esprima@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
-  integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
-
-esquery@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.0.1.tgz#406c51658b1f5991a5f9b62b1dc25b00e3e5c708"
-  integrity sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA==
-  dependencies:
-    estraverse "^4.0.0"
-
-esrecurse@^4.1.0:
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf"
-  integrity sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==
-  dependencies:
-    estraverse "^4.1.0"
-
-estraverse@^4.0.0, estraverse@^4.1.0, estraverse@^4.1.1, estraverse@^4.2.0:
-  version "4.3.0"
-  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d"
-  integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
-
-esutils@^2.0.0, esutils@^2.0.2:
-  version "2.0.3"
-  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64"
-  integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
-
-etag@~1.8.1:
-  version "1.8.1"
-  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887"
-  integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=
-
-eventemitter3@^3.0.0:
-  version "3.1.2"
-  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7"
-  integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q==
-
-eventlistener@0.0.1:
-  version "0.0.1"
-  resolved "https://registry.yarnpkg.com/eventlistener/-/eventlistener-0.0.1.tgz#ed2baabb852227af2bcf889152c72c63ca532eb8"
-  integrity sha1-7Suqu4UiJ68rz4iRUscsY8pTLrg=
-
-events@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/events/-/events-3.0.0.tgz#9a0a0dfaf62893d92b875b8f2698ca4114973e88"
-  integrity sha512-Dc381HFWJzEOhQ+d8pkNon++bk9h6cdAoAj4iE6Q4y6xgTzySWXlKn05/TVNpjnfRqi/X0EpJEJohPjNI3zpVA==
-
-eventsource@^1.0.7:
-  version "1.0.7"
-  resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-1.0.7.tgz#8fbc72c93fcd34088090bc0a4e64f4b5cee6d8d0"
-  integrity sha512-4Ln17+vVT0k8aWq+t/bF5arcS3EpT9gYtW66EPacdj/mAFevznsnyoHLPy2BA8gbIQeIHoPsvwmfBftfcG//BQ==
-  dependencies:
-    original "^1.0.0"
-
-evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02"
-  integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==
-  dependencies:
-    md5.js "^1.3.4"
-    safe-buffer "^5.1.1"
-
-exec-sh@^0.3.2:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.3.2.tgz#6738de2eb7c8e671d0366aea0b0db8c6f7d7391b"
-  integrity sha512-9sLAvzhI5nc8TpuQUh4ahMdCrWT00wPWz7j47/emR5+2qEfoZP5zzUXvx+vdx+H6ohhnsYC31iX04QLYJK8zTg==
-
-execa@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8"
-  integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==
-  dependencies:
-    cross-spawn "^6.0.0"
-    get-stream "^4.0.0"
-    is-stream "^1.1.0"
-    npm-run-path "^2.0.0"
-    p-finally "^1.0.0"
-    signal-exit "^3.0.0"
-    strip-eof "^1.0.0"
-
-exit@^0.1.2:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
-  integrity sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=
-
-expand-brackets@^2.1.4:
-  version "2.1.4"
-  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622"
-  integrity sha1-t3c14xXOMPa27/D4OwQVGiJEliI=
-  dependencies:
-    debug "^2.3.3"
-    define-property "^0.2.5"
-    extend-shallow "^2.0.1"
-    posix-character-classes "^0.1.0"
-    regex-not "^1.0.0"
-    snapdragon "^0.8.1"
-    to-regex "^3.0.1"
-
-expect@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/expect/-/expect-24.9.0.tgz#b75165b4817074fa4a157794f46fe9f1ba15b6ca"
-  integrity sha512-wvVAx8XIol3Z5m9zvZXiyZOQ+sRJqNTIm6sGjdWlaZIeupQGO3WbYI+15D/AmEwZywL6wtJkbAbJtzkOfBuR0Q==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    ansi-styles "^3.2.0"
-    jest-get-type "^24.9.0"
-    jest-matcher-utils "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-regex-util "^24.9.0"
-
-express@^4.16.2:
-  version "4.17.1"
-  resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134"
-  integrity sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==
-  dependencies:
-    accepts "~1.3.7"
-    array-flatten "1.1.1"
-    body-parser "1.19.0"
-    content-disposition "0.5.3"
-    content-type "~1.0.4"
-    cookie "0.4.0"
-    cookie-signature "1.0.6"
-    debug "2.6.9"
-    depd "~1.1.2"
-    encodeurl "~1.0.2"
-    escape-html "~1.0.3"
-    etag "~1.8.1"
-    finalhandler "~1.1.2"
-    fresh "0.5.2"
-    merge-descriptors "1.0.1"
-    methods "~1.1.2"
-    on-finished "~2.3.0"
-    parseurl "~1.3.3"
-    path-to-regexp "0.1.7"
-    proxy-addr "~2.0.5"
-    qs "6.7.0"
-    range-parser "~1.2.1"
-    safe-buffer "5.1.2"
-    send "0.17.1"
-    serve-static "1.14.1"
-    setprototypeof "1.1.1"
-    statuses "~1.5.0"
-    type-is "~1.6.18"
-    utils-merge "1.0.1"
-    vary "~1.1.2"
-
-extend-shallow@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f"
-  integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=
-  dependencies:
-    is-extendable "^0.1.0"
-
-extend-shallow@^3.0.0, extend-shallow@^3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8"
-  integrity sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=
-  dependencies:
-    assign-symbols "^1.0.0"
-    is-extendable "^1.0.1"
-
-extend@~3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
-  integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
-
-external-editor@^3.0.3:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495"
-  integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==
-  dependencies:
-    chardet "^0.7.0"
-    iconv-lite "^0.4.24"
-    tmp "^0.0.33"
-
-extglob@^2.0.4:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543"
-  integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==
-  dependencies:
-    array-unique "^0.3.2"
-    define-property "^1.0.0"
-    expand-brackets "^2.1.4"
-    extend-shallow "^2.0.1"
-    fragment-cache "^0.2.1"
-    regex-not "^1.0.0"
-    snapdragon "^0.8.1"
-    to-regex "^3.0.1"
-
-extsprintf@1.3.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05"
-  integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=
-
-extsprintf@^1.2.0:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f"
-  integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8=
-
-fast-deep-equal@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49"
-  integrity sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=
-
-fast-glob@^2.0.2:
-  version "2.2.7"
-  resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.7.tgz#6953857c3afa475fff92ee6015d52da70a4cd39d"
-  integrity sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==
-  dependencies:
-    "@mrmlnc/readdir-enhanced" "^2.2.1"
-    "@nodelib/fs.stat" "^1.1.2"
-    glob-parent "^3.1.0"
-    is-glob "^4.0.0"
-    merge2 "^1.2.3"
-    micromatch "^3.1.10"
-
-fast-json-stable-stringify@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2"
-  integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I=
-
-fast-levenshtein@~2.0.4:
-  version "2.0.6"
-  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
-  integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=
-
-faye-websocket@^0.10.0:
-  version "0.10.0"
-  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
-  integrity sha1-TkkvjQTftviQA1B/btvy1QHnxvQ=
-  dependencies:
-    websocket-driver ">=0.5.1"
-
-faye-websocket@~0.11.1:
-  version "0.11.3"
-  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.3.tgz#5c0e9a8968e8912c286639fde977a8b209f2508e"
-  integrity sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==
-  dependencies:
-    websocket-driver ">=0.5.1"
-
-fb-watchman@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.0.tgz#54e9abf7dfa2f26cd9b1636c588c1afc05de5d58"
-  integrity sha1-VOmr99+i8mzZsWNsWIwa/AXeXVg=
-  dependencies:
-    bser "^2.0.0"
-
-fbjs@^0.8.15, fbjs@^0.8.16, fbjs@^0.8.9:
-  version "0.8.17"
-  resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.8.17.tgz#c4d598ead6949112653d6588b01a5cdcd9f90fdd"
-  integrity sha1-xNWY6taUkRJlPWWIsBpc3Nn5D90=
-  dependencies:
-    core-js "^1.0.0"
-    isomorphic-fetch "^2.1.1"
-    loose-envify "^1.0.0"
-    object-assign "^4.1.0"
-    promise "^7.1.1"
-    setimmediate "^1.0.5"
-    ua-parser-js "^0.7.18"
-
-figgy-pudding@^3.5.1:
-  version "3.5.1"
-  resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.1.tgz#862470112901c727a0e495a80744bd5baa1d6790"
-  integrity sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==
-
-figures@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962"
-  integrity sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=
-  dependencies:
-    escape-string-regexp "^1.0.5"
-
-file-entry-cache@^5.0.1:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-5.0.1.tgz#ca0f6efa6dd3d561333fb14515065c2fafdf439c"
-  integrity sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==
-  dependencies:
-    flat-cache "^2.0.1"
-
-file-loader@3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-3.0.1.tgz#f8e0ba0b599918b51adfe45d66d1e771ad560faa"
-  integrity sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==
-  dependencies:
-    loader-utils "^1.0.2"
-    schema-utils "^1.0.0"
-
-filesize@3.6.1:
-  version "3.6.1"
-  resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.6.1.tgz#090bb3ee01b6f801a8a8be99d31710b3422bb317"
-  integrity sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg==
-
-fill-range@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7"
-  integrity sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=
-  dependencies:
-    extend-shallow "^2.0.1"
-    is-number "^3.0.0"
-    repeat-string "^1.6.1"
-    to-regex-range "^2.1.0"
-
-finalhandler@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d"
-  integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==
-  dependencies:
-    debug "2.6.9"
-    encodeurl "~1.0.2"
-    escape-html "~1.0.3"
-    on-finished "~2.3.0"
-    parseurl "~1.3.3"
-    statuses "~1.5.0"
-    unpipe "~1.0.0"
-
-find-cache-dir@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-0.1.1.tgz#c8defae57c8a52a8a784f9e31c57c742e993a0b9"
-  integrity sha1-yN765XyKUqinhPnjHFfHQumToLk=
-  dependencies:
-    commondir "^1.0.1"
-    mkdirp "^0.5.1"
-    pkg-dir "^1.0.0"
-
-find-cache-dir@^2.0.0, find-cache-dir@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7"
-  integrity sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==
-  dependencies:
-    commondir "^1.0.1"
-    make-dir "^2.0.0"
-    pkg-dir "^3.0.0"
-
-find-up@3.0.0, find-up@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73"
-  integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==
-  dependencies:
-    locate-path "^3.0.0"
-
-find-up@^1.0.0:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
-  integrity sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=
-  dependencies:
-    path-exists "^2.0.0"
-    pinkie-promise "^2.0.0"
-
-find-up@^2.0.0, find-up@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7"
-  integrity sha1-RdG35QbHF93UgndaK3eSCjwMV6c=
-  dependencies:
-    locate-path "^2.0.0"
-
-flat-cache@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-2.0.1.tgz#5d296d6f04bda44a4630a301413bdbc2ec085ec0"
-  integrity sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==
-  dependencies:
-    flatted "^2.0.0"
-    rimraf "2.6.3"
-    write "1.0.3"
-
-flatted@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/flatted/-/flatted-2.0.1.tgz#69e57caa8f0eacbc281d2e2cb458d46fdb449e08"
-  integrity sha512-a1hQMktqW9Nmqr5aktAux3JMNqaucxGcjtjWnZLHX7yyPCmlSV3M54nGYbqT8K+0GhF3NBgmJCc3ma+WOgX8Jg==
-
-flatten@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782"
-  integrity sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=
-
-flush-write-stream@^1.0.0:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8"
-  integrity sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==
-  dependencies:
-    inherits "^2.0.3"
-    readable-stream "^2.3.6"
-
-follow-redirects@^1.0.0:
-  version "1.8.1"
-  resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.8.1.tgz#24804f9eaab67160b0e840c085885d606371a35b"
-  integrity sha512-micCIbldHioIegeKs41DoH0KS3AXfFzgS30qVkM6z/XOE/GJgvmsoc839NUqa1B9udYe9dQxgv7KFwng6+p/dw==
-  dependencies:
-    debug "^3.0.0"
-
-for-in@^0.1.3:
-  version "0.1.8"
-  resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1"
-  integrity sha1-2Hc5COMSVhCZUrH9ubP6hn0ndeE=
-
-for-in@^1.0.1, for-in@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
-  integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=
-
-for-own@^0.1.3:
-  version "0.1.5"
-  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
-  integrity sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=
-  dependencies:
-    for-in "^1.0.1"
-
-for-own@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/for-own/-/for-own-1.0.0.tgz#c63332f415cedc4b04dbfe70cf836494c53cb44b"
-  integrity sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs=
-  dependencies:
-    for-in "^1.0.1"
-
-forever-agent@~0.6.1:
-  version "0.6.1"
-  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
-  integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=
-
-fork-ts-checker-webpack-plugin@1.5.0:
-  version "1.5.0"
-  resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-1.5.0.tgz#ce1d77190b44d81a761b10b6284a373795e41f0c"
-  integrity sha512-zEhg7Hz+KhZlBhILYpXy+Beu96gwvkROWJiTXOCyOOMMrdBIRPvsBpBqgTI4jfJGrJXcqGwJR8zsBGDmzY0jsA==
-  dependencies:
-    babel-code-frame "^6.22.0"
-    chalk "^2.4.1"
-    chokidar "^2.0.4"
-    micromatch "^3.1.10"
-    minimatch "^3.0.4"
-    semver "^5.6.0"
-    tapable "^1.0.0"
-    worker-rpc "^0.1.0"
-
-form-data@~2.3.2:
-  version "2.3.3"
-  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6"
-  integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==
-  dependencies:
-    asynckit "^0.4.0"
-    combined-stream "^1.0.6"
-    mime-types "^2.1.12"
-
-forwarded@~0.1.2:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84"
-  integrity sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=
-
-fragment-cache@^0.2.1:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19"
-  integrity sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=
-  dependencies:
-    map-cache "^0.2.2"
-
-fresh@0.5.2:
-  version "0.5.2"
-  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
-  integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=
-
-from2@^2.1.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af"
-  integrity sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=
-  dependencies:
-    inherits "^2.0.1"
-    readable-stream "^2.0.0"
-
-fs-extra@7.0.1, fs-extra@^7.0.0:
-  version "7.0.1"
-  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9"
-  integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==
-  dependencies:
-    graceful-fs "^4.1.2"
-    jsonfile "^4.0.0"
-    universalify "^0.1.0"
-
-fs-extra@^4.0.2:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94"
-  integrity sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg==
-  dependencies:
-    graceful-fs "^4.1.2"
-    jsonfile "^4.0.0"
-    universalify "^0.1.0"
-
-fs-minipass@^1.2.5:
-  version "1.2.6"
-  resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.6.tgz#2c5cc30ded81282bfe8a0d7c7c1853ddeb102c07"
-  integrity sha512-crhvyXcMejjv3Z5d2Fa9sf5xLYVCF5O1c71QxbVnbLsmYMBEvDAftewesN/HhY03YRoA7zOMxjNGrF5svGaaeQ==
-  dependencies:
-    minipass "^2.2.1"
-
-fs-write-stream-atomic@^1.0.8:
-  version "1.0.10"
-  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
-  integrity sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=
-  dependencies:
-    graceful-fs "^4.1.2"
-    iferr "^0.1.5"
-    imurmurhash "^0.1.4"
-    readable-stream "1 || 2"
-
-fs.realpath@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
-  integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8=
-
-fsevents@2.0.6:
-  version "2.0.6"
-  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.0.6.tgz#87b19df0bfb4a1a51d7ddb51b01b5f3bedb40c33"
-  integrity sha512-vfmKZp3XPM36DNF0qhW+Cdxk7xm7gTEHY1clv1Xq1arwRQuKZgAhw+NZNWbJBtuaNxzNXwhfdPYRrvIbjfS33A==
-
-fsevents@^1.2.7:
-  version "1.2.9"
-  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.9.tgz#3f5ed66583ccd6f400b5a00db6f7e861363e388f"
-  integrity sha512-oeyj2H3EjjonWcFjD5NvZNE9Rqe4UW+nQBU2HNeKw0koVLEFIhtyETyAakeAM3de7Z/SW5kcA+fZUait9EApnw==
-  dependencies:
-    nan "^2.12.1"
-    node-pre-gyp "^0.12.0"
-
-function-bind@^1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
-  integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
-
-functional-red-black-tree@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327"
-  integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=
-
-gauge@~2.7.3:
-  version "2.7.4"
-  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
-  integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=
-  dependencies:
-    aproba "^1.0.3"
-    console-control-strings "^1.0.0"
-    has-unicode "^2.0.0"
-    object-assign "^4.1.0"
-    signal-exit "^3.0.0"
-    string-width "^1.0.1"
-    strip-ansi "^3.0.1"
-    wide-align "^1.1.0"
-
-get-caller-file@^1.0.1:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a"
-  integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==
-
-get-caller-file@^2.0.1:
-  version "2.0.5"
-  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
-  integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
-
-get-own-enumerable-property-symbols@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.0.tgz#b877b49a5c16aefac3655f2ed2ea5b684df8d203"
-  integrity sha512-CIJYJC4GGF06TakLg8z4GQKvDsx9EMspVxOYih7LerEL/WosUnFIww45CGfxfeKHqlg3twgUrYRT1O3WQqjGCg==
-
-get-stream@^4.0.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5"
-  integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==
-  dependencies:
-    pump "^3.0.0"
-
-get-value@^2.0.3, get-value@^2.0.6:
-  version "2.0.6"
-  resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28"
-  integrity sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=
-
-getpass@^0.1.1:
-  version "0.1.7"
-  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
-  integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=
-  dependencies:
-    assert-plus "^1.0.0"
-
-glob-parent@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae"
-  integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=
-  dependencies:
-    is-glob "^3.1.0"
-    path-dirname "^1.0.0"
-
-glob-to-regexp@^0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab"
-  integrity sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=
-
-glob@^7.0.3, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4:
-  version "7.1.4"
-  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255"
-  integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==
-  dependencies:
-    fs.realpath "^1.0.0"
-    inflight "^1.0.4"
-    inherits "2"
-    minimatch "^3.0.4"
-    once "^1.3.0"
-    path-is-absolute "^1.0.0"
-
-global-modules@2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780"
-  integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==
-  dependencies:
-    global-prefix "^3.0.0"
-
-global-prefix@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97"
-  integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==
-  dependencies:
-    ini "^1.3.5"
-    kind-of "^6.0.2"
-    which "^1.3.1"
-
-globals@^11.1.0, globals@^11.7.0:
-  version "11.12.0"
-  resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e"
-  integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
-
-globby@8.0.2:
-  version "8.0.2"
-  resolved "https://registry.yarnpkg.com/globby/-/globby-8.0.2.tgz#5697619ccd95c5275dbb2d6faa42087c1a941d8d"
-  integrity sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w==
-  dependencies:
-    array-union "^1.0.1"
-    dir-glob "2.0.0"
-    fast-glob "^2.0.2"
-    glob "^7.1.2"
-    ignore "^3.3.5"
-    pify "^3.0.0"
-    slash "^1.0.0"
-
-globby@^6.1.0:
-  version "6.1.0"
-  resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c"
-  integrity sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=
-  dependencies:
-    array-union "^1.0.1"
-    glob "^7.0.3"
-    object-assign "^4.0.1"
-    pify "^2.0.0"
-    pinkie-promise "^2.0.0"
-
-graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6:
-  version "4.2.2"
-  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.2.tgz#6f0952605d0140c1cfdb138ed005775b92d67b02"
-  integrity sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q==
-
-growly@^1.3.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
-  integrity sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=
-
-gud@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0"
-  integrity sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw==
-
-gzip-size@5.1.1:
-  version "5.1.1"
-  resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-5.1.1.tgz#cb9bee692f87c0612b232840a873904e4c135274"
-  integrity sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA==
-  dependencies:
-    duplexer "^0.1.1"
-    pify "^4.0.1"
-
-hammerjs@^2.0.8:
-  version "2.0.8"
-  resolved "https://registry.yarnpkg.com/hammerjs/-/hammerjs-2.0.8.tgz#04ef77862cff2bb79d30f7692095930222bf60f1"
-  integrity sha1-BO93hiz/K7edMPdpIJWTAiK/YPE=
-
-handle-thing@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.0.tgz#0e039695ff50c93fc288557d696f3c1dc6776754"
-  integrity sha512-d4sze1JNC454Wdo2fkuyzCr6aHcbL6PGGuFAz0Li/NcOm1tCHGnWDRmJP85dh9IhQErTc2svWFEX5xHIOo//kQ==
-
-handlebars@^4.1.2:
-  version "4.1.2"
-  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.1.2.tgz#b6b37c1ced0306b221e094fc7aca3ec23b131b67"
-  integrity sha512-nvfrjqvt9xQ8Z/w0ijewdD/vvWDTOweBUm96NTr66Wfvo1mJenBLwcYmPs3TIBP5ruzYGD7Hx/DaM9RmhroGPw==
-  dependencies:
-    neo-async "^2.6.0"
-    optimist "^0.6.1"
-    source-map "^0.6.1"
-  optionalDependencies:
-    uglify-js "^3.1.4"
-
-har-schema@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92"
-  integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=
-
-har-validator@~5.1.0:
-  version "5.1.3"
-  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080"
-  integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==
-  dependencies:
-    ajv "^6.5.5"
-    har-schema "^2.0.0"
-
-harmony-reflect@^1.4.6:
-  version "1.6.1"
-  resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.1.tgz#c108d4f2bb451efef7a37861fdbdae72c9bdefa9"
-  integrity sha512-WJTeyp0JzGtHcuMsi7rw2VwtkvLa+JyfEKJCFyfcS0+CDkjQ5lHPu7zEhFZP+PDSRrEgXa5Ah0l1MbgbE41XjA==
-
-has-ansi@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
-  integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=
-  dependencies:
-    ansi-regex "^2.0.0"
-
-has-flag@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
-  integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0=
-
-has-symbols@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44"
-  integrity sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q=
-
-has-unicode@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
-  integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=
-
-has-value@^0.3.1:
-  version "0.3.1"
-  resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f"
-  integrity sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=
-  dependencies:
-    get-value "^2.0.3"
-    has-values "^0.1.4"
-    isobject "^2.0.0"
-
-has-value@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177"
-  integrity sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=
-  dependencies:
-    get-value "^2.0.6"
-    has-values "^1.0.0"
-    isobject "^3.0.0"
-
-has-values@^0.1.4:
-  version "0.1.4"
-  resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771"
-  integrity sha1-bWHeldkd/Km5oCCJrThL/49it3E=
-
-has-values@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f"
-  integrity sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=
-  dependencies:
-    is-number "^3.0.0"
-    kind-of "^4.0.0"
-
-has@^1.0.0, has@^1.0.1, has@^1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
-  integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
-  dependencies:
-    function-bind "^1.1.1"
-
-hash-base@^3.0.0:
-  version "3.0.4"
-  resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918"
-  integrity sha1-X8hoaEfs1zSZQDMZprCj8/auSRg=
-  dependencies:
-    inherits "^2.0.1"
-    safe-buffer "^5.0.1"
-
-hash.js@^1.0.0, hash.js@^1.0.3:
-  version "1.1.7"
-  resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42"
-  integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==
-  dependencies:
-    inherits "^2.0.3"
-    minimalistic-assert "^1.0.1"
-
-he@1.2.x:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f"
-  integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==
-
-hex-color-regex@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e"
-  integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==
-
-history@^4.9.0:
-  version "4.9.0"
-  resolved "https://registry.yarnpkg.com/history/-/history-4.9.0.tgz#84587c2068039ead8af769e9d6a6860a14fa1bca"
-  integrity sha512-H2DkjCjXf0Op9OAr6nJ56fcRkTSNrUiv41vNJ6IswJjif6wlpZK0BTfFbi7qK9dXLSYZxkq5lBsj3vUjlYBYZA==
-  dependencies:
-    "@babel/runtime" "^7.1.2"
-    loose-envify "^1.2.0"
-    resolve-pathname "^2.2.0"
-    tiny-invariant "^1.0.2"
-    tiny-warning "^1.0.0"
-    value-equal "^0.4.0"
-
-hmac-drbg@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1"
-  integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=
-  dependencies:
-    hash.js "^1.0.3"
-    minimalistic-assert "^1.0.0"
-    minimalistic-crypto-utils "^1.0.1"
-
-hoist-non-react-statics@^2.3.1:
-  version "2.5.5"
-  resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-2.5.5.tgz#c5903cf409c0dfd908f388e619d86b9c1174cb47"
-  integrity sha512-rqcy4pJo55FTTLWt+bU8ukscqHeE/e9KWvsOW2b/a3afxQZhwkQdT1rPPCJ0rYXdj4vNcasY8zHTH+jF/qStxw==
-
-hoist-non-react-statics@^3.1.0, hoist-non-react-statics@^3.3.0:
-  version "3.3.0"
-  resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.0.tgz#b09178f0122184fb95acf525daaecb4d8f45958b"
-  integrity sha512-0XsbTXxgiaCDYDIWFcwkmerZPSwywfUqYmwT4jzewKTQSWoE6FCMoUVOeBJWK3E/CrWbxRG3m5GzY4lnIwGRBA==
-  dependencies:
-    react-is "^16.7.0"
-
-hosted-git-info@^2.1.4:
-  version "2.8.4"
-  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.4.tgz#44119abaf4bc64692a16ace34700fed9c03e2546"
-  integrity sha512-pzXIvANXEFrc5oFFXRMkbLPQ2rXRoDERwDLyrcUxGhaZhgP54BBSl9Oheh7Vv0T090cszWBxPjkQQ5Sq1PbBRQ==
-
-hpack.js@^2.1.6:
-  version "2.1.6"
-  resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2"
-  integrity sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=
-  dependencies:
-    inherits "^2.0.1"
-    obuf "^1.0.0"
-    readable-stream "^2.0.1"
-    wbuf "^1.1.0"
-
-hsl-regex@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e"
-  integrity sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=
-
-hsla-regex@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38"
-  integrity sha1-wc56MWjIxmFAM6S194d/OyJfnDg=
-
-html-comment-regex@^1.1.0:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.2.tgz#97d4688aeb5c81886a364faa0cad1dda14d433a7"
-  integrity sha512-P+M65QY2JQ5Y0G9KKdlDpo0zK+/OHptU5AaBwUfAIDJZk1MYf32Frm84EcOytfJE0t5JvkAnKlmjsXDnWzCJmQ==
-
-html-encoding-sniffer@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8"
-  integrity sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw==
-  dependencies:
-    whatwg-encoding "^1.0.1"
-
-html-entities@^1.2.0:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.2.1.tgz#0df29351f0721163515dfb9e5543e5f6eed5162f"
-  integrity sha1-DfKTUfByEWNRXfueVUPl9u7VFi8=
-
-html-minifier@^3.5.20:
-  version "3.5.21"
-  resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.21.tgz#d0040e054730e354db008463593194015212d20c"
-  integrity sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==
-  dependencies:
-    camel-case "3.0.x"
-    clean-css "4.2.x"
-    commander "2.17.x"
-    he "1.2.x"
-    param-case "2.1.x"
-    relateurl "0.2.x"
-    uglify-js "3.4.x"
-
-html-webpack-plugin@4.0.0-beta.5:
-  version "4.0.0-beta.5"
-  resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-4.0.0-beta.5.tgz#2c53083c1151bfec20479b1f8aaf0039e77b5513"
-  integrity sha512-y5l4lGxOW3pz3xBTFdfB9rnnrWRPVxlAhX6nrBYIcW+2k2zC3mSp/3DxlWVCMBfnO6UAnoF8OcFn0IMy6kaKAQ==
-  dependencies:
-    html-minifier "^3.5.20"
-    loader-utils "^1.1.0"
-    lodash "^4.17.11"
-    pretty-error "^2.1.1"
-    tapable "^1.1.0"
-    util.promisify "1.0.0"
-
-htmlparser2@^3.3.0:
-  version "3.10.1"
-  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f"
-  integrity sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==
-  dependencies:
-    domelementtype "^1.3.1"
-    domhandler "^2.3.0"
-    domutils "^1.5.1"
-    entities "^1.1.1"
-    inherits "^2.0.1"
-    readable-stream "^3.1.1"
-
-http-deceiver@^1.2.7:
-  version "1.2.7"
-  resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87"
-  integrity sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=
-
-http-errors@1.7.2:
-  version "1.7.2"
-  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f"
-  integrity sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==
-  dependencies:
-    depd "~1.1.2"
-    inherits "2.0.3"
-    setprototypeof "1.1.1"
-    statuses ">= 1.5.0 < 2"
-    toidentifier "1.0.0"
-
-http-errors@~1.6.2:
-  version "1.6.3"
-  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d"
-  integrity sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=
-  dependencies:
-    depd "~1.1.2"
-    inherits "2.0.3"
-    setprototypeof "1.1.0"
-    statuses ">= 1.4.0 < 2"
-
-http-errors@~1.7.2:
-  version "1.7.3"
-  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.3.tgz#6c619e4f9c60308c38519498c14fbb10aacebb06"
-  integrity sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw==
-  dependencies:
-    depd "~1.1.2"
-    inherits "2.0.4"
-    setprototypeof "1.1.1"
-    statuses ">= 1.5.0 < 2"
-    toidentifier "1.0.0"
-
-"http-parser-js@>=0.4.0 <0.4.11":
-  version "0.4.10"
-  resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.4.10.tgz#92c9c1374c35085f75db359ec56cc257cbb93fa4"
-  integrity sha1-ksnBN0w1CF912zWexWzCV8u5P6Q=
-
-http-proxy-middleware@^0.19.1:
-  version "0.19.1"
-  resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz#183c7dc4aa1479150306498c210cdaf96080a43a"
-  integrity sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==
-  dependencies:
-    http-proxy "^1.17.0"
-    is-glob "^4.0.0"
-    lodash "^4.17.11"
-    micromatch "^3.1.10"
-
-http-proxy@^1.17.0:
-  version "1.17.0"
-  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.17.0.tgz#7ad38494658f84605e2f6db4436df410f4e5be9a"
-  integrity sha512-Taqn+3nNvYRfJ3bGvKfBSRwy1v6eePlm3oc/aWVxZp57DQr5Eq3xhKJi7Z4hZpS8PC3H4qI+Yly5EmFacGuA/g==
-  dependencies:
-    eventemitter3 "^3.0.0"
-    follow-redirects "^1.0.0"
-    requires-port "^1.0.0"
-
-http-signature@~1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1"
-  integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=
-  dependencies:
-    assert-plus "^1.0.0"
-    jsprim "^1.2.2"
-    sshpk "^1.7.0"
-
-https-browserify@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73"
-  integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=
-
-iconv-lite@0.4.24, iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@~0.4.13:
-  version "0.4.24"
-  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b"
-  integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==
-  dependencies:
-    safer-buffer ">= 2.1.2 < 3"
-
-icss-replace-symbols@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded"
-  integrity sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=
-
-icss-utils@^4.1.0:
-  version "4.1.1"
-  resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-4.1.1.tgz#21170b53789ee27447c2f47dd683081403f9a467"
-  integrity sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==
-  dependencies:
-    postcss "^7.0.14"
-
-identity-obj-proxy@3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/identity-obj-proxy/-/identity-obj-proxy-3.0.0.tgz#94d2bda96084453ef36fbc5aaec37e0f79f1fc14"
-  integrity sha1-lNK9qWCERT7zb7xarsN+D3nx/BQ=
-  dependencies:
-    harmony-reflect "^1.4.6"
-
-ieee754@^1.1.4:
-  version "1.1.13"
-  resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.13.tgz#ec168558e95aa181fd87d37f55c32bbcb6708b84"
-  integrity sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==
-
-iferr@^0.1.5:
-  version "0.1.5"
-  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
-  integrity sha1-xg7taebY/bazEEofy8ocGS3FtQE=
-
-ignore-walk@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8"
-  integrity sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ==
-  dependencies:
-    minimatch "^3.0.4"
-
-ignore@^3.3.5:
-  version "3.3.10"
-  resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043"
-  integrity sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==
-
-ignore@^4.0.6:
-  version "4.0.6"
-  resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc"
-  integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==
-
-image-size@~0.5.0:
-  version "0.5.5"
-  resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c"
-  integrity sha1-Cd/Uq50g4p6xw+gLiZA3jfnjy5w=
-
-immer@1.10.0:
-  version "1.10.0"
-  resolved "https://registry.yarnpkg.com/immer/-/immer-1.10.0.tgz#bad67605ba9c810275d91e1c2a47d4582e98286d"
-  integrity sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg==
-
-immutable@^3.7.4:
-  version "3.8.2"
-  resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3"
-  integrity sha1-wkOZUUVbs5kT2vKBN28VMOEErfM=
-
-immutable@~3.7.4:
-  version "3.7.6"
-  resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b"
-  integrity sha1-E7TTyxK++hVIKib+Gy665kAHHks=
-
-import-cwd@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/import-cwd/-/import-cwd-2.1.0.tgz#aa6cf36e722761285cb371ec6519f53e2435b0a9"
-  integrity sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=
-  dependencies:
-    import-from "^2.1.0"
-
-import-fresh@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546"
-  integrity sha1-2BNVwVYS04bGH53dOSLUMEgipUY=
-  dependencies:
-    caller-path "^2.0.0"
-    resolve-from "^3.0.0"
-
-import-fresh@^3.0.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.1.0.tgz#6d33fa1dcef6df930fae003446f33415af905118"
-  integrity sha512-PpuksHKGt8rXfWEr9m9EHIpgyyaltBy8+eF6GJM0QCAxMgxCfucMF3mjecK2QsJr0amJW7gTqh5/wht0z2UhEQ==
-  dependencies:
-    parent-module "^1.0.0"
-    resolve-from "^4.0.0"
-
-import-from@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/import-from/-/import-from-2.1.0.tgz#335db7f2a7affd53aaa471d4b8021dee36b7f3b1"
-  integrity sha1-M1238qev/VOqpHHUuAId7ja387E=
-  dependencies:
-    resolve-from "^3.0.0"
-
-import-local@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/import-local/-/import-local-2.0.0.tgz#55070be38a5993cf18ef6db7e961f5bee5c5a09d"
-  integrity sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==
-  dependencies:
-    pkg-dir "^3.0.0"
-    resolve-cwd "^2.0.0"
-
-imurmurhash@^0.1.4:
-  version "0.1.4"
-  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
-  integrity sha1-khi5srkoojixPcT7a21XbyMUU+o=
-
-indexes-of@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607"
-  integrity sha1-8w9xbI4r00bHtn0985FVZqfAVgc=
-
-infer-owner@^1.0.3:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467"
-  integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==
-
-inflight@^1.0.4:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
-  integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=
-  dependencies:
-    once "^1.3.0"
-    wrappy "1"
-
-inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
-  integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
-
-inherits@2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1"
-  integrity sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=
-
-inherits@2.0.3:
-  version "2.0.3"
-  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
-  integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
-
-ini@^1.3.5, ini@~1.3.0:
-  version "1.3.5"
-  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927"
-  integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==
-
-inquirer@6.5.0:
-  version "6.5.0"
-  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.0.tgz#2303317efc9a4ea7ec2e2df6f86569b734accf42"
-  integrity sha512-scfHejeG/lVZSpvCXpsB4j/wQNPM5JC8kiElOI0OUTwmc1RTpXr4H32/HOlQHcZiYl2z2VElwuCVDRG8vFmbnA==
-  dependencies:
-    ansi-escapes "^3.2.0"
-    chalk "^2.4.2"
-    cli-cursor "^2.1.0"
-    cli-width "^2.0.0"
-    external-editor "^3.0.3"
-    figures "^2.0.0"
-    lodash "^4.17.12"
-    mute-stream "0.0.7"
-    run-async "^2.2.0"
-    rxjs "^6.4.0"
-    string-width "^2.1.0"
-    strip-ansi "^5.1.0"
-    through "^2.3.6"
-
-inquirer@^6.2.2:
-  version "6.5.2"
-  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.2.tgz#ad50942375d036d327ff528c08bd5fab089928ca"
-  integrity sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==
-  dependencies:
-    ansi-escapes "^3.2.0"
-    chalk "^2.4.2"
-    cli-cursor "^2.1.0"
-    cli-width "^2.0.0"
-    external-editor "^3.0.3"
-    figures "^2.0.0"
-    lodash "^4.17.12"
-    mute-stream "0.0.7"
-    run-async "^2.2.0"
-    rxjs "^6.4.0"
-    string-width "^2.1.0"
-    strip-ansi "^5.1.0"
-    through "^2.3.6"
-
-internal-ip@^4.2.0:
-  version "4.3.0"
-  resolved "https://registry.yarnpkg.com/internal-ip/-/internal-ip-4.3.0.tgz#845452baad9d2ca3b69c635a137acb9a0dad0907"
-  integrity sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==
-  dependencies:
-    default-gateway "^4.2.0"
-    ipaddr.js "^1.9.0"
-
-invariant@^2.2.2, invariant@^2.2.4:
-  version "2.2.4"
-  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6"
-  integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==
-  dependencies:
-    loose-envify "^1.0.0"
-
-invert-kv@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02"
-  integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==
-
-ip-regex@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9"
-  integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=
-
-ip@^1.1.0, ip@^1.1.5:
-  version "1.1.5"
-  resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a"
-  integrity sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=
-
-ipaddr.js@1.9.0:
-  version "1.9.0"
-  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.0.tgz#37df74e430a0e47550fe54a2defe30d8acd95f65"
-  integrity sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==
-
-ipaddr.js@^1.9.0:
-  version "1.9.1"
-  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3"
-  integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
-
-is-absolute-url@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6"
-  integrity sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=
-
-is-accessor-descriptor@^0.1.6:
-  version "0.1.6"
-  resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6"
-  integrity sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=
-  dependencies:
-    kind-of "^3.0.2"
-
-is-accessor-descriptor@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656"
-  integrity sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==
-  dependencies:
-    kind-of "^6.0.0"
-
-is-arguments@^1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.0.4.tgz#3faf966c7cba0ff437fb31f6250082fcf0448cf3"
-  integrity sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==
-
-is-arrayish@^0.2.1:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
-  integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=
-
-is-arrayish@^0.3.1:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03"
-  integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==
-
-is-binary-path@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
-  integrity sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=
-  dependencies:
-    binary-extensions "^1.0.0"
-
-is-buffer@^1.0.2, is-buffer@^1.1.5:
-  version "1.1.6"
-  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be"
-  integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==
-
-is-callable@^1.1.4:
-  version "1.1.4"
-  resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.4.tgz#1e1adf219e1eeb684d691f9d6a05ff0d30a24d75"
-  integrity sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==
-
-is-ci@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c"
-  integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==
-  dependencies:
-    ci-info "^2.0.0"
-
-is-color-stop@^1.0.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345"
-  integrity sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=
-  dependencies:
-    css-color-names "^0.0.4"
-    hex-color-regex "^1.1.0"
-    hsl-regex "^1.0.0"
-    hsla-regex "^1.0.0"
-    rgb-regex "^1.0.1"
-    rgba-regex "^1.0.0"
-
-is-data-descriptor@^0.1.4:
-  version "0.1.4"
-  resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56"
-  integrity sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=
-  dependencies:
-    kind-of "^3.0.2"
-
-is-data-descriptor@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7"
-  integrity sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==
-  dependencies:
-    kind-of "^6.0.0"
-
-is-date-object@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16"
-  integrity sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=
-
-is-descriptor@^0.1.0:
-  version "0.1.6"
-  resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca"
-  integrity sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==
-  dependencies:
-    is-accessor-descriptor "^0.1.6"
-    is-data-descriptor "^0.1.4"
-    kind-of "^5.0.0"
-
-is-descriptor@^1.0.0, is-descriptor@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec"
-  integrity sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==
-  dependencies:
-    is-accessor-descriptor "^1.0.0"
-    is-data-descriptor "^1.0.0"
-    kind-of "^6.0.2"
-
-is-directory@^0.3.1:
-  version "0.3.1"
-  resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1"
-  integrity sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=
-
-is-extendable@^0.1.0, is-extendable@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
-  integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=
-
-is-extendable@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4"
-  integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==
-  dependencies:
-    is-plain-object "^2.0.4"
-
-is-extglob@^2.1.0, is-extglob@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
-  integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=
-
-is-fullwidth-code-point@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
-  integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs=
-  dependencies:
-    number-is-nan "^1.0.0"
-
-is-fullwidth-code-point@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f"
-  integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=
-
-is-generator-fn@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118"
-  integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==
-
-is-glob@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a"
-  integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=
-  dependencies:
-    is-extglob "^2.1.0"
-
-is-glob@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc"
-  integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==
-  dependencies:
-    is-extglob "^2.1.1"
-
-is-number@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195"
-  integrity sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=
-  dependencies:
-    kind-of "^3.0.2"
-
-is-obj@^1.0.0, is-obj@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f"
-  integrity sha1-PkcprB9f3gJc19g6iW2rn09n2w8=
-
-is-path-cwd@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d"
-  integrity sha1-0iXsIxMuie3Tj9p2dHLmLmXxEG0=
-
-is-path-in-cwd@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52"
-  integrity sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ==
-  dependencies:
-    is-path-inside "^1.0.0"
-
-is-path-inside@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036"
-  integrity sha1-jvW33lBDej/cprToZe96pVy0gDY=
-  dependencies:
-    path-is-inside "^1.0.1"
-
-is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677"
-  integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==
-  dependencies:
-    isobject "^3.0.1"
-
-is-promise@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.1.0.tgz#79a2a9ece7f096e80f36d2b2f3bc16c1ff4bf3fa"
-  integrity sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=
-
-is-regex@^1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491"
-  integrity sha1-VRdIm1RwkbCTDglWVM7SXul+lJE=
-  dependencies:
-    has "^1.0.1"
-
-is-regexp@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069"
-  integrity sha1-/S2INUXEa6xaYz57mgnof6LLUGk=
-
-is-resolvable@^1.0.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88"
-  integrity sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==
-
-is-root@2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c"
-  integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==
-
-is-stream@^1.0.1, is-stream@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
-  integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ=
-
-is-svg@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-3.0.0.tgz#9321dbd29c212e5ca99c4fa9794c714bcafa2f75"
-  integrity sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ==
-  dependencies:
-    html-comment-regex "^1.1.0"
-
-is-symbol@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.2.tgz#a055f6ae57192caee329e7a860118b497a950f38"
-  integrity sha512-HS8bZ9ox60yCJLH9snBpIwv9pYUAkcuLhSA1oero1UB5y9aiQpRA8y2ex945AOtCZL1lJDeIk3G5LthswI46Lw==
-  dependencies:
-    has-symbols "^1.0.0"
-
-is-typedarray@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
-  integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=
-
-is-windows@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d"
-  integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==
-
-is-wsl@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d"
-  integrity sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=
-
-isarray@0.0.1:
-  version "0.0.1"
-  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
-  integrity sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=
-
-isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
-  integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=
-
-isexe@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
-  integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=
-
-ismobilejs@^0.5.1:
-  version "0.5.2"
-  resolved "https://registry.yarnpkg.com/ismobilejs/-/ismobilejs-0.5.2.tgz#e81bacf6187c532ad8348355f4fecd6e6adfdce1"
-  integrity sha512-ta9UdV60xVZk/ZafFtSFslQaE76SvNkcs1r73d2PVR21zVzx9xuYv9tNe4MxA1NN7WoeCc2RjGot3Bz1eHDx3Q==
-
-isobject@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
-  integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=
-  dependencies:
-    isarray "1.0.0"
-
-isobject@^3.0.0, isobject@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df"
-  integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8=
-
-isomorphic-fetch@^2.1.1:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9"
-  integrity sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=
-  dependencies:
-    node-fetch "^1.0.1"
-    whatwg-fetch ">=0.10.0"
-
-isstream@~0.1.2:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
-  integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=
-
-istanbul-lib-coverage@^2.0.2, istanbul-lib-coverage@^2.0.5:
-  version "2.0.5"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz#675f0ab69503fad4b1d849f736baaca803344f49"
-  integrity sha512-8aXznuEPCJvGnMSRft4udDRDtb1V3pkQkMMI5LI+6HuQz5oQ4J2UFn1H82raA3qJtyOLkkwVqICBQkjnGtn5mA==
-
-istanbul-lib-instrument@^3.0.1, istanbul-lib-instrument@^3.3.0:
-  version "3.3.0"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-3.3.0.tgz#a5f63d91f0bbc0c3e479ef4c5de027335ec6d630"
-  integrity sha512-5nnIN4vo5xQZHdXno/YDXJ0G+I3dAm4XgzfSVTPLQpj/zAV2dV6Juy0yaf10/zrJOJeHoN3fraFe+XRq2bFVZA==
-  dependencies:
-    "@babel/generator" "^7.4.0"
-    "@babel/parser" "^7.4.3"
-    "@babel/template" "^7.4.0"
-    "@babel/traverse" "^7.4.3"
-    "@babel/types" "^7.4.0"
-    istanbul-lib-coverage "^2.0.5"
-    semver "^6.0.0"
-
-istanbul-lib-report@^2.0.4:
-  version "2.0.8"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-2.0.8.tgz#5a8113cd746d43c4889eba36ab10e7d50c9b4f33"
-  integrity sha512-fHBeG573EIihhAblwgxrSenp0Dby6tJMFR/HvlerBsrCTD5bkUuoNtn3gVh29ZCS824cGGBPn7Sg7cNk+2xUsQ==
-  dependencies:
-    istanbul-lib-coverage "^2.0.5"
-    make-dir "^2.1.0"
-    supports-color "^6.1.0"
-
-istanbul-lib-source-maps@^3.0.1:
-  version "3.0.6"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.6.tgz#284997c48211752ec486253da97e3879defba8c8"
-  integrity sha512-R47KzMtDJH6X4/YW9XTx+jrLnZnscW4VpNN+1PViSYTejLVPWv7oov+Duf8YQSPyVRUvueQqz1TcsC6mooZTXw==
-  dependencies:
-    debug "^4.1.1"
-    istanbul-lib-coverage "^2.0.5"
-    make-dir "^2.1.0"
-    rimraf "^2.6.3"
-    source-map "^0.6.1"
-
-istanbul-reports@^2.2.6:
-  version "2.2.6"
-  resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-2.2.6.tgz#7b4f2660d82b29303a8fe6091f8ca4bf058da1af"
-  integrity sha512-SKi4rnMyLBKe0Jy2uUdx28h8oG7ph2PPuQPvIAh31d+Ci+lSiEu4C+h3oBPuJ9+mPKhOyW0M8gY4U5NM1WLeXA==
-  dependencies:
-    handlebars "^4.1.2"
-
-jest-changed-files@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-24.9.0.tgz#08d8c15eb79a7fa3fc98269bc14b451ee82f8039"
-  integrity sha512-6aTWpe2mHF0DhL28WjdkO8LyGjs3zItPET4bMSeXU6T3ub4FPMw+mcOcbdGXQOAfmLcxofD23/5Bl9Z4AkFwqg==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    execa "^1.0.0"
-    throat "^4.0.0"
-
-jest-cli@^24.7.1:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-24.9.0.tgz#ad2de62d07472d419c6abc301fc432b98b10d2af"
-  integrity sha512-+VLRKyitT3BWoMeSUIHRxV/2g8y9gw91Jh5z2UmXZzkZKpbC08CSehVxgHUwTpy+HwGcns/tqafQDJW7imYvGg==
-  dependencies:
-    "@jest/core" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    chalk "^2.0.1"
-    exit "^0.1.2"
-    import-local "^2.0.0"
-    is-ci "^2.0.0"
-    jest-config "^24.9.0"
-    jest-util "^24.9.0"
-    jest-validate "^24.9.0"
-    prompts "^2.0.1"
-    realpath-native "^1.1.0"
-    yargs "^13.3.0"
-
-jest-config@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-24.9.0.tgz#fb1bbc60c73a46af03590719efa4825e6e4dd1b5"
-  integrity sha512-RATtQJtVYQrp7fvWg6f5y3pEFj9I+H8sWw4aKxnDZ96mob5i5SD6ZEGWgMLXQ4LE8UurrjbdlLWdUeo+28QpfQ==
-  dependencies:
-    "@babel/core" "^7.1.0"
-    "@jest/test-sequencer" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    babel-jest "^24.9.0"
-    chalk "^2.0.1"
-    glob "^7.1.1"
-    jest-environment-jsdom "^24.9.0"
-    jest-environment-node "^24.9.0"
-    jest-get-type "^24.9.0"
-    jest-jasmine2 "^24.9.0"
-    jest-regex-util "^24.3.0"
-    jest-resolve "^24.9.0"
-    jest-util "^24.9.0"
-    jest-validate "^24.9.0"
-    micromatch "^3.1.10"
-    pretty-format "^24.9.0"
-    realpath-native "^1.1.0"
-
-jest-diff@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-24.9.0.tgz#931b7d0d5778a1baf7452cb816e325e3724055da"
-  integrity sha512-qMfrTs8AdJE2iqrTp0hzh7kTd2PQWrsFyj9tORoKmu32xjPjeE4NyjVRDz8ybYwqS2ik8N4hsIpiVTyFeo2lBQ==
-  dependencies:
-    chalk "^2.0.1"
-    diff-sequences "^24.9.0"
-    jest-get-type "^24.9.0"
-    pretty-format "^24.9.0"
-
-jest-docblock@^24.3.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-24.9.0.tgz#7970201802ba560e1c4092cc25cbedf5af5a8ce2"
-  integrity sha512-F1DjdpDMJMA1cN6He0FNYNZlo3yYmOtRUnktrT9Q37njYzC5WEaDdmbynIgy0L/IvXvvgsG8OsqhLPXTpfmZAA==
-  dependencies:
-    detect-newline "^2.1.0"
-
-jest-each@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-24.9.0.tgz#eb2da602e2a610898dbc5f1f6df3ba86b55f8b05"
-  integrity sha512-ONi0R4BvW45cw8s2Lrx8YgbeXL1oCQ/wIDwmsM3CqM/nlblNCPmnC3IPQlMbRFZu3wKdQ2U8BqM6lh3LJ5Bsog==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    chalk "^2.0.1"
-    jest-get-type "^24.9.0"
-    jest-util "^24.9.0"
-    pretty-format "^24.9.0"
-
-jest-environment-jsdom-fourteen@0.1.0:
-  version "0.1.0"
-  resolved "https://registry.yarnpkg.com/jest-environment-jsdom-fourteen/-/jest-environment-jsdom-fourteen-0.1.0.tgz#aad6393a9d4b565b69a609109bf469f62bf18ccc"
-  integrity sha512-4vtoRMg7jAstitRzL4nbw83VmGH8Rs13wrND3Ud2o1fczDhMUF32iIrNKwYGgeOPUdfvZU4oy8Bbv+ni1fgVCA==
-  dependencies:
-    jest-mock "^24.5.0"
-    jest-util "^24.5.0"
-    jsdom "^14.0.0"
-
-jest-environment-jsdom@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-24.9.0.tgz#4b0806c7fc94f95edb369a69cc2778eec2b7375b"
-  integrity sha512-Zv9FV9NBRzLuALXjvRijO2351DRQeLYXtpD4xNvfoVFw21IOKNhZAEUKcbiEtjTkm2GsJ3boMVgkaR7rN8qetA==
-  dependencies:
-    "@jest/environment" "^24.9.0"
-    "@jest/fake-timers" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    jest-mock "^24.9.0"
-    jest-util "^24.9.0"
-    jsdom "^11.5.1"
-
-jest-environment-node@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-24.9.0.tgz#333d2d2796f9687f2aeebf0742b519f33c1cbfd3"
-  integrity sha512-6d4V2f4nxzIzwendo27Tr0aFm+IXWa0XEUnaH6nU0FMaozxovt+sfRvh4J47wL1OvF83I3SSTu0XK+i4Bqe7uA==
-  dependencies:
-    "@jest/environment" "^24.9.0"
-    "@jest/fake-timers" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    jest-mock "^24.9.0"
-    jest-util "^24.9.0"
-
-jest-get-type@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-24.9.0.tgz#1684a0c8a50f2e4901b6644ae861f579eed2ef0e"
-  integrity sha512-lUseMzAley4LhIcpSP9Jf+fTrQ4a1yHQwLNeeVa2cEmbCGeoZAtYPOIv8JaxLD/sUpKxetKGP+gsHl8f8TSj8Q==
-
-jest-haste-map@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-24.9.0.tgz#b38a5d64274934e21fa417ae9a9fbeb77ceaac7d"
-  integrity sha512-kfVFmsuWui2Sj1Rp1AJ4D9HqJwE4uwTlS/vO+eRUaMmd54BFpli2XhMQnPC2k4cHFVbB2Q2C+jtI1AGLgEnCjQ==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    anymatch "^2.0.0"
-    fb-watchman "^2.0.0"
-    graceful-fs "^4.1.15"
-    invariant "^2.2.4"
-    jest-serializer "^24.9.0"
-    jest-util "^24.9.0"
-    jest-worker "^24.9.0"
-    micromatch "^3.1.10"
-    sane "^4.0.3"
-    walker "^1.0.7"
-  optionalDependencies:
-    fsevents "^1.2.7"
-
-jest-jasmine2@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-24.9.0.tgz#1f7b1bd3242c1774e62acabb3646d96afc3be6a0"
-  integrity sha512-Cq7vkAgaYKp+PsX+2/JbTarrk0DmNhsEtqBXNwUHkdlbrTBLtMJINADf2mf5FkowNsq8evbPc07/qFO0AdKTzw==
-  dependencies:
-    "@babel/traverse" "^7.1.0"
-    "@jest/environment" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    chalk "^2.0.1"
-    co "^4.6.0"
-    expect "^24.9.0"
-    is-generator-fn "^2.0.0"
-    jest-each "^24.9.0"
-    jest-matcher-utils "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-runtime "^24.9.0"
-    jest-snapshot "^24.9.0"
-    jest-util "^24.9.0"
-    pretty-format "^24.9.0"
-    throat "^4.0.0"
-
-jest-leak-detector@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-24.9.0.tgz#b665dea7c77100c5c4f7dfcb153b65cf07dcf96a"
-  integrity sha512-tYkFIDsiKTGwb2FG1w8hX9V0aUb2ot8zY/2nFg087dUageonw1zrLMP4W6zsRO59dPkTSKie+D4rhMuP9nRmrA==
-  dependencies:
-    jest-get-type "^24.9.0"
-    pretty-format "^24.9.0"
-
-jest-matcher-utils@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-24.9.0.tgz#f5b3661d5e628dffe6dd65251dfdae0e87c3a073"
-  integrity sha512-OZz2IXsu6eaiMAwe67c1T+5tUAtQyQx27/EMEkbFAGiw52tB9em+uGbzpcgYVpA8wl0hlxKPZxrly4CXU/GjHA==
-  dependencies:
-    chalk "^2.0.1"
-    jest-diff "^24.9.0"
-    jest-get-type "^24.9.0"
-    pretty-format "^24.9.0"
-
-jest-message-util@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-24.9.0.tgz#527f54a1e380f5e202a8d1149b0ec872f43119e3"
-  integrity sha512-oCj8FiZ3U0hTP4aSui87P4L4jC37BtQwUMqk+zk/b11FR19BJDeZsZAvIHutWnmtw7r85UmR3CEWZ0HWU2mAlw==
-  dependencies:
-    "@babel/code-frame" "^7.0.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    "@types/stack-utils" "^1.0.1"
-    chalk "^2.0.1"
-    micromatch "^3.1.10"
-    slash "^2.0.0"
-    stack-utils "^1.0.1"
-
-jest-mock@^24.5.0, jest-mock@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-24.9.0.tgz#c22835541ee379b908673ad51087a2185c13f1c6"
-  integrity sha512-3BEYN5WbSq9wd+SyLDES7AHnjH9A/ROBwmz7l2y+ol+NtSFO8DYiEBzoO1CeFc9a8DYy10EO4dDFVv/wN3zl1w==
-  dependencies:
-    "@jest/types" "^24.9.0"
-
-jest-pnp-resolver@^1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.1.tgz#ecdae604c077a7fbc70defb6d517c3c1c898923a"
-  integrity sha512-pgFw2tm54fzgYvc/OHrnysABEObZCUNFnhjoRjaVOCN8NYc032/gVjPaHD4Aq6ApkSieWtfKAFQtmDKAmhupnQ==
-
-jest-regex-util@^24.3.0, jest-regex-util@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-24.9.0.tgz#c13fb3380bde22bf6575432c493ea8fe37965636"
-  integrity sha512-05Cmb6CuxaA+Ys6fjr3PhvV3bGQmO+2p2La4hFbU+W5uOc479f7FdLXUWXw4pYMAhhSZIuKHwSXSu6CsSBAXQA==
-
-jest-resolve-dependencies@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-24.9.0.tgz#ad055198959c4cfba8a4f066c673a3f0786507ab"
-  integrity sha512-Fm7b6AlWnYhT0BXy4hXpactHIqER7erNgIsIozDXWl5dVm+k8XdGVe1oTg1JyaFnOxarMEbax3wyRJqGP2Pq+g==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    jest-regex-util "^24.3.0"
-    jest-snapshot "^24.9.0"
-
-jest-resolve@24.7.1:
-  version "24.7.1"
-  resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-24.7.1.tgz#e4150198299298380a75a9fd55043fa3b9b17fde"
-  integrity sha512-Bgrc+/UUZpGJ4323sQyj85hV9d+ANyPNu6XfRDUcyFNX1QrZpSoM0kE4Mb2vZMAYTJZsBFzYe8X1UaOkOELSbw==
-  dependencies:
-    "@jest/types" "^24.7.0"
-    browser-resolve "^1.11.3"
-    chalk "^2.0.1"
-    jest-pnp-resolver "^1.2.1"
-    realpath-native "^1.1.0"
-
-jest-resolve@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-24.9.0.tgz#dff04c7687af34c4dd7e524892d9cf77e5d17321"
-  integrity sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    browser-resolve "^1.11.3"
-    chalk "^2.0.1"
-    jest-pnp-resolver "^1.2.1"
-    realpath-native "^1.1.0"
-
-jest-runner@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-24.9.0.tgz#574fafdbd54455c2b34b4bdf4365a23857fcdf42"
-  integrity sha512-KksJQyI3/0mhcfspnxxEOBueGrd5E4vV7ADQLT9ESaCzz02WnbdbKWIf5Mkaucoaj7obQckYPVX6JJhgUcoWWg==
-  dependencies:
-    "@jest/console" "^24.7.1"
-    "@jest/environment" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    chalk "^2.4.2"
-    exit "^0.1.2"
-    graceful-fs "^4.1.15"
-    jest-config "^24.9.0"
-    jest-docblock "^24.3.0"
-    jest-haste-map "^24.9.0"
-    jest-jasmine2 "^24.9.0"
-    jest-leak-detector "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-resolve "^24.9.0"
-    jest-runtime "^24.9.0"
-    jest-util "^24.9.0"
-    jest-worker "^24.6.0"
-    source-map-support "^0.5.6"
-    throat "^4.0.0"
-
-jest-runtime@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-24.9.0.tgz#9f14583af6a4f7314a6a9d9f0226e1a781c8e4ac"
-  integrity sha512-8oNqgnmF3v2J6PVRM2Jfuj8oX3syKmaynlDMMKQ4iyzbQzIG6th5ub/lM2bCMTmoTKM3ykcUYI2Pw9xwNtjMnw==
-  dependencies:
-    "@jest/console" "^24.7.1"
-    "@jest/environment" "^24.9.0"
-    "@jest/source-map" "^24.3.0"
-    "@jest/transform" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    "@types/yargs" "^13.0.0"
-    chalk "^2.0.1"
-    exit "^0.1.2"
-    glob "^7.1.3"
-    graceful-fs "^4.1.15"
-    jest-config "^24.9.0"
-    jest-haste-map "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-mock "^24.9.0"
-    jest-regex-util "^24.3.0"
-    jest-resolve "^24.9.0"
-    jest-snapshot "^24.9.0"
-    jest-util "^24.9.0"
-    jest-validate "^24.9.0"
-    realpath-native "^1.1.0"
-    slash "^2.0.0"
-    strip-bom "^3.0.0"
-    yargs "^13.3.0"
-
-jest-serializer@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-serializer/-/jest-serializer-24.9.0.tgz#e6d7d7ef96d31e8b9079a714754c5d5c58288e73"
-  integrity sha512-DxYipDr8OvfrKH3Kel6NdED3OXxjvxXZ1uIY2I9OFbGg+vUkkg7AGvi65qbhbWNPvDckXmzMPbK3u3HaDO49bQ==
-
-jest-snapshot@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-24.9.0.tgz#ec8e9ca4f2ec0c5c87ae8f925cf97497b0e951ba"
-  integrity sha512-uI/rszGSs73xCM0l+up7O7a40o90cnrk429LOiK3aeTvfC0HHmldbd81/B7Ix81KSFe1lwkbl7GnBGG4UfuDew==
-  dependencies:
-    "@babel/types" "^7.0.0"
-    "@jest/types" "^24.9.0"
-    chalk "^2.0.1"
-    expect "^24.9.0"
-    jest-diff "^24.9.0"
-    jest-get-type "^24.9.0"
-    jest-matcher-utils "^24.9.0"
-    jest-message-util "^24.9.0"
-    jest-resolve "^24.9.0"
-    mkdirp "^0.5.1"
-    natural-compare "^1.4.0"
-    pretty-format "^24.9.0"
-    semver "^6.2.0"
-
-jest-util@^24.5.0, jest-util@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-24.9.0.tgz#7396814e48536d2e85a37de3e4c431d7cb140162"
-  integrity sha512-x+cZU8VRmOJxbA1K5oDBdxQmdq0OIdADarLxk0Mq+3XS4jgvhG/oKGWcIDCtPG0HgjxOYvF+ilPJQsAyXfbNOg==
-  dependencies:
-    "@jest/console" "^24.9.0"
-    "@jest/fake-timers" "^24.9.0"
-    "@jest/source-map" "^24.9.0"
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    callsites "^3.0.0"
-    chalk "^2.0.1"
-    graceful-fs "^4.1.15"
-    is-ci "^2.0.0"
-    mkdirp "^0.5.1"
-    slash "^2.0.0"
-    source-map "^0.6.0"
-
-jest-validate@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-24.9.0.tgz#0775c55360d173cd854e40180756d4ff52def8ab"
-  integrity sha512-HPIt6C5ACwiqSiwi+OfSSHbK8sG7akG8eATl+IPKaeIjtPOeBUd/g3J7DghugzxrGjI93qS/+RPKe1H6PqvhRQ==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    camelcase "^5.3.1"
-    chalk "^2.0.1"
-    jest-get-type "^24.9.0"
-    leven "^3.1.0"
-    pretty-format "^24.9.0"
-
-jest-watch-typeahead@0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-0.3.0.tgz#f56d9ee17ea71ecbf8253fed213df3185a1584c9"
-  integrity sha512-+uOtlppt9ysST6k6ZTqsPI0WNz2HLa8bowiZylZoQCQaAVn7XsVmHhZREkz73FhKelrFrpne4hQQjdq42nFEmA==
-  dependencies:
-    ansi-escapes "^3.0.0"
-    chalk "^2.4.1"
-    jest-watcher "^24.3.0"
-    slash "^2.0.0"
-    string-length "^2.0.0"
-    strip-ansi "^5.0.0"
-
-jest-watcher@^24.3.0, jest-watcher@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-24.9.0.tgz#4b56e5d1ceff005f5b88e528dc9afc8dd4ed2b3b"
-  integrity sha512-+/fLOfKPXXYJDYlks62/4R4GoT+GU1tYZed99JSCOsmzkkF7727RqKrjNAxtfO4YpGv11wybgRvCjR73lK2GZw==
-  dependencies:
-    "@jest/test-result" "^24.9.0"
-    "@jest/types" "^24.9.0"
-    "@types/yargs" "^13.0.0"
-    ansi-escapes "^3.0.0"
-    chalk "^2.0.1"
-    jest-util "^24.9.0"
-    string-length "^2.0.0"
-
-jest-worker@^24.6.0, jest-worker@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-24.9.0.tgz#5dbfdb5b2d322e98567898238a9697bcce67b3e5"
-  integrity sha512-51PE4haMSXcHohnSMdM42anbvZANYTqMrr52tVKPqqsPJMzoP6FYYDVqahX/HrAoKEKz3uUPzSvKs9A3qR4iVw==
-  dependencies:
-    merge-stream "^2.0.0"
-    supports-color "^6.1.0"
-
-jest@24.7.1:
-  version "24.7.1"
-  resolved "https://registry.yarnpkg.com/jest/-/jest-24.7.1.tgz#0d94331cf510c75893ee32f87d7321d5bf8f2501"
-  integrity sha512-AbvRar5r++izmqo5gdbAjTeA6uNRGoNRuj5vHB0OnDXo2DXWZJVuaObiGgtlvhKb+cWy2oYbQSfxv7Q7GjnAtA==
-  dependencies:
-    import-local "^2.0.0"
-    jest-cli "^24.7.1"
-
-js-levenshtein@^1.1.3:
-  version "1.1.6"
-  resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d"
-  integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==
-
-"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499"
-  integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
-
-js-tokens@^3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b"
-  integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls=
-
-js-yaml@^3.13.0, js-yaml@^3.13.1:
-  version "3.13.1"
-  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847"
-  integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==
-  dependencies:
-    argparse "^1.0.7"
-    esprima "^4.0.0"
-
-jsbn@~0.1.0:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
-  integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM=
-
-jsdom@^11.5.1:
-  version "11.12.0"
-  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-11.12.0.tgz#1a80d40ddd378a1de59656e9e6dc5a3ba8657bc8"
-  integrity sha512-y8Px43oyiBM13Zc1z780FrfNLJCXTL40EWlty/LXUtcjykRBNgLlCjWXpfSPBl2iv+N7koQN+dvqszHZgT/Fjw==
-  dependencies:
-    abab "^2.0.0"
-    acorn "^5.5.3"
-    acorn-globals "^4.1.0"
-    array-equal "^1.0.0"
-    cssom ">= 0.3.2 < 0.4.0"
-    cssstyle "^1.0.0"
-    data-urls "^1.0.0"
-    domexception "^1.0.1"
-    escodegen "^1.9.1"
-    html-encoding-sniffer "^1.0.2"
-    left-pad "^1.3.0"
-    nwsapi "^2.0.7"
-    parse5 "4.0.0"
-    pn "^1.1.0"
-    request "^2.87.0"
-    request-promise-native "^1.0.5"
-    sax "^1.2.4"
-    symbol-tree "^3.2.2"
-    tough-cookie "^2.3.4"
-    w3c-hr-time "^1.0.1"
-    webidl-conversions "^4.0.2"
-    whatwg-encoding "^1.0.3"
-    whatwg-mimetype "^2.1.0"
-    whatwg-url "^6.4.1"
-    ws "^5.2.0"
-    xml-name-validator "^3.0.0"
-
-jsdom@^14.0.0:
-  version "14.1.0"
-  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-14.1.0.tgz#916463b6094956b0a6c1782c94e380cd30e1981b"
-  integrity sha512-O901mfJSuTdwU2w3Sn+74T+RnDVP+FuV5fH8tcPWyqrseRAb0s5xOtPgCFiPOtLcyK7CLIJwPyD83ZqQWvA5ng==
-  dependencies:
-    abab "^2.0.0"
-    acorn "^6.0.4"
-    acorn-globals "^4.3.0"
-    array-equal "^1.0.0"
-    cssom "^0.3.4"
-    cssstyle "^1.1.1"
-    data-urls "^1.1.0"
-    domexception "^1.0.1"
-    escodegen "^1.11.0"
-    html-encoding-sniffer "^1.0.2"
-    nwsapi "^2.1.3"
-    parse5 "5.1.0"
-    pn "^1.1.0"
-    request "^2.88.0"
-    request-promise-native "^1.0.5"
-    saxes "^3.1.9"
-    symbol-tree "^3.2.2"
-    tough-cookie "^2.5.0"
-    w3c-hr-time "^1.0.1"
-    w3c-xmlserializer "^1.1.2"
-    webidl-conversions "^4.0.2"
-    whatwg-encoding "^1.0.5"
-    whatwg-mimetype "^2.3.0"
-    whatwg-url "^7.0.0"
-    ws "^6.1.2"
-    xml-name-validator "^3.0.0"
-
-jsesc@^2.5.1:
-  version "2.5.2"
-  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
-  integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
-
-jsesc@~0.5.0:
-  version "0.5.0"
-  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
-  integrity sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=
-
-json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9"
-  integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==
-
-json-schema-traverse@^0.4.1:
-  version "0.4.1"
-  resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660"
-  integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==
-
-json-schema@0.2.3:
-  version "0.2.3"
-  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
-  integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=
-
-json-stable-stringify-without-jsonify@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651"
-  integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=
-
-json-stable-stringify@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
-  integrity sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=
-  dependencies:
-    jsonify "~0.0.0"
-
-json-stringify-safe@~5.0.1:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
-  integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=
-
-json2mq@^0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a"
-  integrity sha1-tje9O6nqvhIsg+lyBIOusQ0skEo=
-  dependencies:
-    string-convert "^0.2.0"
-
-json3@^3.3.2:
-  version "3.3.3"
-  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.3.tgz#7fc10e375fc5ae42c4705a5cc0aa6f62be305b81"
-  integrity sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA==
-
-json5@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe"
-  integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==
-  dependencies:
-    minimist "^1.2.0"
-
-json5@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.0.tgz#e7a0c62c48285c628d20a10b85c89bb807c32850"
-  integrity sha512-8Mh9h6xViijj36g7Dxi+Y4S6hNGV96vcJZr/SrlHh1LR/pEn/8j/+qIBbs44YKl69Lrfctp4QD+AdWLTMqEZAQ==
-  dependencies:
-    minimist "^1.2.0"
-
-jsonfile@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb"
-  integrity sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=
-  optionalDependencies:
-    graceful-fs "^4.1.6"
-
-jsonify@~0.0.0:
-  version "0.0.0"
-  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
-  integrity sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=
-
-jsprim@^1.2.2:
-  version "1.4.1"
-  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2"
-  integrity sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=
-  dependencies:
-    assert-plus "1.0.0"
-    extsprintf "1.3.0"
-    json-schema "0.2.3"
-    verror "1.10.0"
-
-jsx-ast-utils@^2.0.1:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-2.2.1.tgz#4d4973ebf8b9d2837ee91a8208cc66f3a2776cfb"
-  integrity sha512-v3FxCcAf20DayI+uxnCuw795+oOIkVu6EnJ1+kSzhqqTZHNkTZ7B66ZgLp4oLJ/gbA64cI0B7WRoHZMSRdyVRQ==
-  dependencies:
-    array-includes "^3.0.3"
-    object.assign "^4.1.0"
-
-killable@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/killable/-/killable-1.0.1.tgz#4c8ce441187a061c7474fb87ca08e2a638194892"
-  integrity sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg==
-
-kind-of@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5"
-  integrity sha1-AY7HpM5+OobLkUG+UZ0kyPqpgbU=
-  dependencies:
-    is-buffer "^1.0.2"
-
-kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0:
-  version "3.2.2"
-  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64"
-  integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=
-  dependencies:
-    is-buffer "^1.1.5"
-
-kind-of@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57"
-  integrity sha1-IIE989cSkosgc3hpGkUGb65y3Vc=
-  dependencies:
-    is-buffer "^1.1.5"
-
-kind-of@^5.0.0:
-  version "5.1.0"
-  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d"
-  integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==
-
-kind-of@^6.0.0, kind-of@^6.0.2:
-  version "6.0.2"
-  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051"
-  integrity sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==
-
-kleur@^3.0.3:
-  version "3.0.3"
-  resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e"
-  integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==
-
-last-call-webpack-plugin@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz#9742df0e10e3cf46e5c0381c2de90d3a7a2d7555"
-  integrity sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==
-  dependencies:
-    lodash "^4.17.5"
-    webpack-sources "^1.1.0"
-
-lazy-cache@^0.2.3:
-  version "0.2.7"
-  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65"
-  integrity sha1-f+3fLctu23fRHvHRF6tf/fCrG2U=
-
-lazy-cache@^1.0.3:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
-  integrity sha1-odePw6UEdMuAhF07O24dpJpEbo4=
-
-lcid@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf"
-  integrity sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==
-  dependencies:
-    invert-kv "^2.0.0"
-
-left-pad@^1.3.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/left-pad/-/left-pad-1.3.0.tgz#5b8a3a7765dfe001261dde915589e782f8c94d1e"
-  integrity sha512-XI5MPzVNApjAyhQzphX8BkmKsKUxD4LdyK24iZeQGinBN9yTQT3bFlCBy/aVx2HrNcqQGsdot8ghrjyrvMCoEA==
-
-less-loader@^5.0.0:
-  version "5.0.0"
-  resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-5.0.0.tgz#498dde3a6c6c4f887458ee9ed3f086a12ad1b466"
-  integrity sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg==
-  dependencies:
-    clone "^2.1.1"
-    loader-utils "^1.1.0"
-    pify "^4.0.1"
-
-less@^3.9.0:
-  version "3.10.3"
-  resolved "https://registry.yarnpkg.com/less/-/less-3.10.3.tgz#417a0975d5eeecc52cff4bcfa3c09d35781e6792"
-  integrity sha512-vz32vqfgmoxF1h3K4J+yKCtajH0PWmjkIFgbs5d78E/c/e+UQTnI+lWK+1eQRE95PXM2mC3rJlLSSP9VQHnaow==
-  dependencies:
-    clone "^2.1.2"
-  optionalDependencies:
-    errno "^0.1.1"
-    graceful-fs "^4.1.2"
-    image-size "~0.5.0"
-    mime "^1.4.1"
-    mkdirp "^0.5.0"
-    promise "^7.1.1"
-    request "^2.83.0"
-    source-map "~0.6.0"
-
-leven@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2"
-  integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==
-
-levn@^0.3.0, levn@~0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee"
-  integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=
-  dependencies:
-    prelude-ls "~1.1.2"
-    type-check "~0.3.2"
-
-load-json-file@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-2.0.0.tgz#7947e42149af80d696cbf797bcaabcfe1fe29ca8"
-  integrity sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=
-  dependencies:
-    graceful-fs "^4.1.2"
-    parse-json "^2.2.0"
-    pify "^2.0.0"
-    strip-bom "^3.0.0"
-
-load-json-file@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b"
-  integrity sha1-L19Fq5HjMhYjT9U62rZo607AmTs=
-  dependencies:
-    graceful-fs "^4.1.2"
-    parse-json "^4.0.0"
-    pify "^3.0.0"
-    strip-bom "^3.0.0"
-
-loader-fs-cache@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/loader-fs-cache/-/loader-fs-cache-1.0.2.tgz#54cedf6b727e1779fd8f01205f05f6e88706f086"
-  integrity sha512-70IzT/0/L+M20jUlEqZhZyArTU6VKLRTYRDAYN26g4jfzpJqjipLL3/hgYpySqI9PwsVRHHFja0LfEmsx9X2Cw==
-  dependencies:
-    find-cache-dir "^0.1.1"
-    mkdirp "0.5.1"
-
-loader-runner@^2.3.0:
-  version "2.4.0"
-  resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357"
-  integrity sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==
-
-loader-utils@1.2.3, loader-utils@^1.0.1, loader-utils@^1.0.2, loader-utils@^1.1.0, loader-utils@^1.2.3:
-  version "1.2.3"
-  resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.2.3.tgz#1ff5dc6911c9f0a062531a4c04b609406108c2c7"
-  integrity sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==
-  dependencies:
-    big.js "^5.2.2"
-    emojis-list "^2.0.0"
-    json5 "^1.0.1"
-
-locate-path@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e"
-  integrity sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=
-  dependencies:
-    p-locate "^2.0.0"
-    path-exists "^3.0.0"
-
-locate-path@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e"
-  integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==
-  dependencies:
-    p-locate "^3.0.0"
-    path-exists "^3.0.0"
-
-lodash._getnative@^3.0.0:
-  version "3.9.1"
-  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
-  integrity sha1-VwvH3t5G1hzc3mh9ZdPuy6o6r/U=
-
-lodash._reinterpolate@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d"
-  integrity sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=
-
-lodash.debounce@^4.0.0, lodash.debounce@^4.0.8:
-  version "4.0.8"
-  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af"
-  integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168=
-
-lodash.flow@^3.5.0:
-  version "3.5.0"
-  resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a"
-  integrity sha1-h79AKSuM+D5OjOGjrkIJ4gBxZ1o=
-
-lodash.isarguments@^3.0.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
-  integrity sha1-L1c9hcaiQon/AGY7SRwdM4/zRYo=
-
-lodash.isarray@^3.0.0:
-  version "3.0.4"
-  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
-  integrity sha1-eeTriMNqgSKvhvhEqpvNhRtfu1U=
-
-lodash.keys@^3.1.2:
-  version "3.1.2"
-  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
-  integrity sha1-TbwEcrFWvlCgsoaFXRvQsMZWCYo=
-  dependencies:
-    lodash._getnative "^3.0.0"
-    lodash.isarguments "^3.0.0"
-    lodash.isarray "^3.0.0"
-
-lodash.memoize@^4.1.2:
-  version "4.1.2"
-  resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe"
-  integrity sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=
-
-lodash.sortby@^4.7.0:
-  version "4.7.0"
-  resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438"
-  integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=
-
-lodash.tail@^4.1.1:
-  version "4.1.1"
-  resolved "https://registry.yarnpkg.com/lodash.tail/-/lodash.tail-4.1.1.tgz#d2333a36d9e7717c8ad2f7cacafec7c32b444664"
-  integrity sha1-0jM6NtnncXyK0vfKyv7HwytERmQ=
-
-lodash.template@^4.4.0, lodash.template@^4.5.0:
-  version "4.5.0"
-  resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.5.0.tgz#f976195cf3f347d0d5f52483569fe8031ccce8ab"
-  integrity sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==
-  dependencies:
-    lodash._reinterpolate "^3.0.0"
-    lodash.templatesettings "^4.0.0"
-
-lodash.templatesettings@^4.0.0:
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz#e481310f049d3cf6d47e912ad09313b154f0fb33"
-  integrity sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==
-  dependencies:
-    lodash._reinterpolate "^3.0.0"
-
-lodash.throttle@^4.0.0:
-  version "4.1.1"
-  resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4"
-  integrity sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=
-
-lodash.unescape@4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/lodash.unescape/-/lodash.unescape-4.0.1.tgz#bf2249886ce514cda112fae9218cdc065211fc9c"
-  integrity sha1-vyJJiGzlFM2hEvrpIYzcBlIR/Jw=
-
-lodash.uniq@^4.5.0:
-  version "4.5.0"
-  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
-  integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=
-
-"lodash@>=3.5 <5", lodash@^4.16.5, lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.4, lodash@^4.17.5:
-  version "4.17.15"
-  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548"
-  integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==
-
-loglevel@^1.4.1:
-  version "1.6.3"
-  resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.3.tgz#77f2eb64be55a404c9fd04ad16d57c1d6d6b1280"
-  integrity sha512-LoEDv5pgpvWgPF4kNYuIp0qqSJVWak/dML0RY74xlzMZiT9w77teNAwKYKWBTYjlokMirg+o3jBwp+vlLrcfAA==
-
-loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf"
-  integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==
-  dependencies:
-    js-tokens "^3.0.0 || ^4.0.0"
-
-lower-case@^1.1.1:
-  version "1.1.4"
-  resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac"
-  integrity sha1-miyr0bno4K6ZOkv31YdcOcQujqw=
-
-lru-cache@^5.1.1:
-  version "5.1.1"
-  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920"
-  integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==
-  dependencies:
-    yallist "^3.0.2"
-
-make-dir@^2.0.0, make-dir@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5"
-  integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==
-  dependencies:
-    pify "^4.0.1"
-    semver "^5.6.0"
-
-makeerror@1.0.x:
-  version "1.0.11"
-  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
-  integrity sha1-4BpckQnyr3lmDk6LlYd5AYT1qWw=
-  dependencies:
-    tmpl "1.0.x"
-
-mamacro@^0.0.3:
-  version "0.0.3"
-  resolved "https://registry.yarnpkg.com/mamacro/-/mamacro-0.0.3.tgz#ad2c9576197c9f1abf308d0787865bd975a3f3e4"
-  integrity sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==
-
-map-age-cleaner@^0.1.1:
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a"
-  integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==
-  dependencies:
-    p-defer "^1.0.0"
-
-map-cache@^0.2.2:
-  version "0.2.2"
-  resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf"
-  integrity sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=
-
-map-visit@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f"
-  integrity sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=
-  dependencies:
-    object-visit "^1.0.0"
-
-md5.js@^1.3.4:
-  version "1.3.5"
-  resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f"
-  integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==
-  dependencies:
-    hash-base "^3.0.0"
-    inherits "^2.0.1"
-    safe-buffer "^5.1.2"
-
-mdn-data@2.0.4:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.4.tgz#699b3c38ac6f1d728091a64650b65d388502fd5b"
-  integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==
-
-mdn-data@~1.1.0:
-  version "1.1.4"
-  resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-1.1.4.tgz#50b5d4ffc4575276573c4eedb8780812a8419f01"
-  integrity sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==
-
-media-typer@0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
-  integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=
-
-mem@^4.0.0:
-  version "4.3.0"
-  resolved "https://registry.yarnpkg.com/mem/-/mem-4.3.0.tgz#461af497bc4ae09608cdb2e60eefb69bff744178"
-  integrity sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==
-  dependencies:
-    map-age-cleaner "^0.1.1"
-    mimic-fn "^2.0.0"
-    p-is-promise "^2.0.0"
-
-memory-fs@^0.4.0, memory-fs@^0.4.1, memory-fs@~0.4.1:
-  version "0.4.1"
-  resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552"
-  integrity sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=
-  dependencies:
-    errno "^0.1.3"
-    readable-stream "^2.0.1"
-
-merge-deep@^3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/merge-deep/-/merge-deep-3.0.2.tgz#f39fa100a4f1bd34ff29f7d2bf4508fbb8d83ad2"
-  integrity sha512-T7qC8kg4Zoti1cFd8Cr0M+qaZfOwjlPDEdZIIPPB2JZctjaPM4fX+i7HOId69tAti2fvO6X5ldfYUONDODsrkA==
-  dependencies:
-    arr-union "^3.1.0"
-    clone-deep "^0.2.4"
-    kind-of "^3.0.2"
-
-merge-descriptors@1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
-  integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=
-
-merge-stream@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
-  integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==
-
-merge2@^1.2.3:
-  version "1.2.4"
-  resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.2.4.tgz#c9269589e6885a60cf80605d9522d4b67ca646e3"
-  integrity sha512-FYE8xI+6pjFOhokZu0We3S5NKCirLbCzSh2Usf3qEyr4X8U+0jNg9P8RZ4qz+V2UoECLVwSyzU3LxXBaLGtD3A==
-
-methods@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
-  integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=
-
-microevent.ts@~0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/microevent.ts/-/microevent.ts-0.1.1.tgz#70b09b83f43df5172d0205a63025bce0f7357fa0"
-  integrity sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g==
-
-micromatch@^3.1.10, micromatch@^3.1.4, micromatch@^3.1.8:
-  version "3.1.10"
-  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23"
-  integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==
-  dependencies:
-    arr-diff "^4.0.0"
-    array-unique "^0.3.2"
-    braces "^2.3.1"
-    define-property "^2.0.2"
-    extend-shallow "^3.0.2"
-    extglob "^2.0.4"
-    fragment-cache "^0.2.1"
-    kind-of "^6.0.2"
-    nanomatch "^1.2.9"
-    object.pick "^1.3.0"
-    regex-not "^1.0.0"
-    snapdragon "^0.8.1"
-    to-regex "^3.0.2"
-
-miller-rabin@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d"
-  integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==
-  dependencies:
-    bn.js "^4.0.0"
-    brorand "^1.0.1"
-
-mime-db@1.40.0, "mime-db@>= 1.40.0 < 2":
-  version "1.40.0"
-  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.40.0.tgz#a65057e998db090f732a68f6c276d387d4126c32"
-  integrity sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA==
-
-mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.19, mime-types@~2.1.24:
-  version "2.1.24"
-  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.24.tgz#b6f8d0b3e951efb77dedeca194cff6d16f676f81"
-  integrity sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==
-  dependencies:
-    mime-db "1.40.0"
-
-mime@1.6.0, mime@^1.4.1:
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1"
-  integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==
-
-mime@^2.0.3, mime@^2.4.2:
-  version "2.4.4"
-  resolved "https://registry.yarnpkg.com/mime/-/mime-2.4.4.tgz#bd7b91135fc6b01cde3e9bae33d659b63d8857e5"
-  integrity sha512-LRxmNwziLPT828z+4YkNzloCFC2YM4wrB99k+AV5ZbEyfGNWfG8SO1FUXLmLDBSo89NrJZ4DIWeLjy1CHGhMGA==
-
-mimic-fn@^1.0.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022"
-  integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==
-
-mimic-fn@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b"
-  integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==
-
-mini-create-react-context@^0.3.0:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/mini-create-react-context/-/mini-create-react-context-0.3.2.tgz#79fc598f283dd623da8e088b05db8cddab250189"
-  integrity sha512-2v+OeetEyliMt5VHMXsBhABoJ0/M4RCe7fatd/fBy6SMiKazUSEt3gxxypfnk2SHMkdBYvorHRoQxuGoiwbzAw==
-  dependencies:
-    "@babel/runtime" "^7.4.0"
-    gud "^1.0.0"
-    tiny-warning "^1.0.2"
-
-mini-css-extract-plugin@0.5.0:
-  version "0.5.0"
-  resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-0.5.0.tgz#ac0059b02b9692515a637115b0cc9fed3a35c7b0"
-  integrity sha512-IuaLjruM0vMKhUUT51fQdQzBYTX49dLj8w68ALEAe2A4iYNpIC4eMac67mt3NzycvjOlf07/kYxJDc0RTl1Wqw==
-  dependencies:
-    loader-utils "^1.1.0"
-    schema-utils "^1.0.0"
-    webpack-sources "^1.1.0"
-
-mini-store@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/mini-store/-/mini-store-2.0.0.tgz#0843c048d6942ce55e3e78b1b67fc063022b5488"
-  integrity sha512-EG0CuwpQmX+XL4QVS0kxNwHW5ftSbhygu1qxQH0pipugjnPkbvkalCdQbEihMwtQY6d3MTN+MS0q+aurs+RfLQ==
-  dependencies:
-    hoist-non-react-statics "^2.3.1"
-    prop-types "^15.6.0"
-    react-lifecycles-compat "^3.0.4"
-    shallowequal "^1.0.2"
-
-minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7"
-  integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
-
-minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a"
-  integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=
-
-minimatch@3.0.4, minimatch@^3.0.4:
-  version "3.0.4"
-  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083"
-  integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==
-  dependencies:
-    brace-expansion "^1.1.7"
-
-minimist@0.0.8:
-  version "0.0.8"
-  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
-  integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=
-
-minimist@^1.1.1, minimist@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
-  integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=
-
-minimist@~0.0.1:
-  version "0.0.10"
-  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf"
-  integrity sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=
-
-minipass@^2.2.1, minipass@^2.3.5:
-  version "2.4.0"
-  resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.4.0.tgz#38f0af94f42fb6f34d3d7d82a90e2c99cd3ff485"
-  integrity sha512-6PmOuSP4NnZXzs2z6rbwzLJu/c5gdzYg1mRI/WIYdx45iiX7T+a4esOzavD6V/KmBzAaopFSTZPZcUx73bqKWA==
-  dependencies:
-    safe-buffer "^5.1.2"
-    yallist "^3.0.0"
-
-minizlib@^1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.2.1.tgz#dd27ea6136243c7c880684e8672bb3a45fd9b614"
-  integrity sha512-7+4oTUOWKg7AuL3vloEWekXY2/D20cevzsrNT2kGWm+39J9hGTCBv8VI5Pm5lXZ/o3/mdR4f8rflAPhnQb8mPA==
-  dependencies:
-    minipass "^2.2.1"
-
-mississippi@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/mississippi/-/mississippi-3.0.0.tgz#ea0a3291f97e0b5e8776b363d5f0a12d94c67022"
-  integrity sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==
-  dependencies:
-    concat-stream "^1.5.0"
-    duplexify "^3.4.2"
-    end-of-stream "^1.1.0"
-    flush-write-stream "^1.0.0"
-    from2 "^2.1.0"
-    parallel-transform "^1.1.0"
-    pump "^3.0.0"
-    pumpify "^1.3.3"
-    stream-each "^1.1.0"
-    through2 "^2.0.0"
-
-mixin-deep@^1.2.0:
-  version "1.3.2"
-  resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566"
-  integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==
-  dependencies:
-    for-in "^1.0.2"
-    is-extendable "^1.0.1"
-
-mixin-object@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e"
-  integrity sha1-T7lJRB2rGCVA8f4DW6YOGUel5X4=
-  dependencies:
-    for-in "^0.1.3"
-    is-extendable "^0.1.1"
-
-mkdirp@0.5.1, mkdirp@0.5.x, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
-  version "0.5.1"
-  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
-  integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=
-  dependencies:
-    minimist "0.0.8"
-
-moment@2.x, moment@^2.24.0:
-  version "2.24.0"
-  resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b"
-  integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==
-
-move-concurrently@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92"
-  integrity sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=
-  dependencies:
-    aproba "^1.1.1"
-    copy-concurrently "^1.0.0"
-    fs-write-stream-atomic "^1.0.8"
-    mkdirp "^0.5.1"
-    rimraf "^2.5.4"
-    run-queue "^1.0.3"
-
-ms@2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
-  integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
-
-ms@2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a"
-  integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==
-
-ms@^2.1.1:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
-  integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
-
-multicast-dns-service-types@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz#899f11d9686e5e05cb91b35d5f0e63b773cfc901"
-  integrity sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=
-
-multicast-dns@^6.0.1:
-  version "6.2.3"
-  resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-6.2.3.tgz#a0ec7bd9055c4282f790c3c82f4e28db3b31b229"
-  integrity sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==
-  dependencies:
-    dns-packet "^1.3.1"
-    thunky "^1.0.2"
-
-mutationobserver-shim@^0.3.2:
-  version "0.3.3"
-  resolved "https://registry.yarnpkg.com/mutationobserver-shim/-/mutationobserver-shim-0.3.3.tgz#65869630bc89d7bf8c9cd9cb82188cd955aacd2b"
-  integrity sha512-gciOLNN8Vsf7YzcqRjKzlAJ6y7e+B86u7i3KXes0xfxx/nfLmozlW1Vn+Sc9x3tPIePFgc1AeIFhtRgkqTjzDQ==
-
-mute-stream@0.0.7:
-  version "0.0.7"
-  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab"
-  integrity sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=
-
-nan@^2.12.1:
-  version "2.14.0"
-  resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.0.tgz#7818f722027b2459a86f0295d434d1fc2336c52c"
-  integrity sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==
-
-nanomatch@^1.2.9:
-  version "1.2.13"
-  resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119"
-  integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==
-  dependencies:
-    arr-diff "^4.0.0"
-    array-unique "^0.3.2"
-    define-property "^2.0.2"
-    extend-shallow "^3.0.2"
-    fragment-cache "^0.2.1"
-    is-windows "^1.0.2"
-    kind-of "^6.0.2"
-    object.pick "^1.3.0"
-    regex-not "^1.0.0"
-    snapdragon "^0.8.1"
-    to-regex "^3.0.1"
-
-natural-compare@^1.4.0:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7"
-  integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=
-
-needle@^2.2.1:
-  version "2.4.0"
-  resolved "https://registry.yarnpkg.com/needle/-/needle-2.4.0.tgz#6833e74975c444642590e15a750288c5f939b57c"
-  integrity sha512-4Hnwzr3mi5L97hMYeNl8wRW/Onhy4nUKR/lVemJ8gJedxxUyBLm9kkrDColJvoSfwi0jCNhD+xCdOtiGDQiRZg==
-  dependencies:
-    debug "^3.2.6"
-    iconv-lite "^0.4.4"
-    sax "^1.2.4"
-
-negotiator@0.6.2:
-  version "0.6.2"
-  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb"
-  integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==
-
-neo-async@^2.5.0, neo-async@^2.6.0:
-  version "2.6.1"
-  resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c"
-  integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==
-
-nice-try@^1.0.4:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366"
-  integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==
-
-no-case@^2.2.0:
-  version "2.3.2"
-  resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac"
-  integrity sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==
-  dependencies:
-    lower-case "^1.1.1"
-
-node-fetch@^1.0.1:
-  version "1.7.3"
-  resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef"
-  integrity sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ==
-  dependencies:
-    encoding "^0.1.11"
-    is-stream "^1.0.1"
-
-node-forge@0.7.5:
-  version "0.7.5"
-  resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.7.5.tgz#6c152c345ce11c52f465c2abd957e8639cd674df"
-  integrity sha512-MmbQJ2MTESTjt3Gi/3yG1wGpIMhUfcIypUCGtTizFR9IiccFwxSpfp0vtIZlkFclEqERemxfnSdZEMR9VqqEFQ==
-
-node-int64@^0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
-  integrity sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs=
-
-node-libs-browser@^2.0.0:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.2.1.tgz#b64f513d18338625f90346d27b0d235e631f6425"
-  integrity sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==
-  dependencies:
-    assert "^1.1.1"
-    browserify-zlib "^0.2.0"
-    buffer "^4.3.0"
-    console-browserify "^1.1.0"
-    constants-browserify "^1.0.0"
-    crypto-browserify "^3.11.0"
-    domain-browser "^1.1.1"
-    events "^3.0.0"
-    https-browserify "^1.0.0"
-    os-browserify "^0.3.0"
-    path-browserify "0.0.1"
-    process "^0.11.10"
-    punycode "^1.2.4"
-    querystring-es3 "^0.2.0"
-    readable-stream "^2.3.3"
-    stream-browserify "^2.0.1"
-    stream-http "^2.7.2"
-    string_decoder "^1.0.0"
-    timers-browserify "^2.0.4"
-    tty-browserify "0.0.0"
-    url "^0.11.0"
-    util "^0.11.0"
-    vm-browserify "^1.0.1"
-
-node-modules-regexp@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz#8d9dbe28964a4ac5712e9131642107c71e90ec40"
-  integrity sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA=
-
-node-notifier@^5.4.2:
-  version "5.4.3"
-  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.4.3.tgz#cb72daf94c93904098e28b9c590fd866e464bd50"
-  integrity sha512-M4UBGcs4jeOK9CjTsYwkvH6/MzuUmGCyTW+kCY7uO+1ZVr0+FHGdPdIf5CCLqAaxnRrWidyoQlNkMIIVwbKB8Q==
-  dependencies:
-    growly "^1.3.0"
-    is-wsl "^1.1.0"
-    semver "^5.5.0"
-    shellwords "^0.1.1"
-    which "^1.3.0"
-
-node-pre-gyp@^0.12.0:
-  version "0.12.0"
-  resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.12.0.tgz#39ba4bb1439da030295f899e3b520b7785766149"
-  integrity sha512-4KghwV8vH5k+g2ylT+sLTjy5wmUOb9vPhnM8NHvRf9dHmnW/CndrFXy2aRPaPST6dugXSdHXfeaHQm77PIz/1A==
-  dependencies:
-    detect-libc "^1.0.2"
-    mkdirp "^0.5.1"
-    needle "^2.2.1"
-    nopt "^4.0.1"
-    npm-packlist "^1.1.6"
-    npmlog "^4.0.2"
-    rc "^1.2.7"
-    rimraf "^2.6.1"
-    semver "^5.3.0"
-    tar "^4"
-
-node-releases@^1.1.25:
-  version "1.1.28"
-  resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.28.tgz#503c3c70d0e4732b84e7aaa2925fbdde10482d4a"
-  integrity sha512-AQw4emh6iSXnCpDiFe0phYcThiccmkNWMZnFZ+lDJjAP8J0m2fVd59duvUUyuTirQOhIAajTFkzG6FHCLBO59g==
-  dependencies:
-    semver "^5.3.0"
-
-nopt@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d"
-  integrity sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=
-  dependencies:
-    abbrev "1"
-    osenv "^0.1.4"
-
-normalize-package-data@^2.3.2:
-  version "2.5.0"
-  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8"
-  integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==
-  dependencies:
-    hosted-git-info "^2.1.4"
-    resolve "^1.10.0"
-    semver "2 || 3 || 4 || 5"
-    validate-npm-package-license "^3.0.1"
-
-normalize-path@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
-  integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=
-  dependencies:
-    remove-trailing-separator "^1.0.1"
-
-normalize-path@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65"
-  integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==
-
-normalize-range@^0.1.2:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942"
-  integrity sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=
-
-normalize-url@^3.0.0:
-  version "3.3.0"
-  resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-3.3.0.tgz#b2e1c4dc4f7c6d57743df733a4f5978d18650559"
-  integrity sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==
-
-npm-bundled@^1.0.1:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.6.tgz#e7ba9aadcef962bb61248f91721cd932b3fe6bdd"
-  integrity sha512-8/JCaftHwbd//k6y2rEWp6k1wxVfpFzB6t1p825+cUb7Ym2XQfhwIC5KwhrvzZRJu+LtDE585zVaS32+CGtf0g==
-
-npm-packlist@^1.1.6:
-  version "1.4.4"
-  resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.4.tgz#866224233850ac534b63d1a6e76050092b5d2f44"
-  integrity sha512-zTLo8UcVYtDU3gdeaFu2Xu0n0EvelfHDGuqtNIn5RO7yQj4H1TqNdBc/yZjxnWA0PVB8D3Woyp0i5B43JwQ6Vw==
-  dependencies:
-    ignore-walk "^3.0.1"
-    npm-bundled "^1.0.1"
-
-npm-run-path@^2.0.0:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f"
-  integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=
-  dependencies:
-    path-key "^2.0.0"
-
-npmlog@^4.0.2:
-  version "4.1.2"
-  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b"
-  integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==
-  dependencies:
-    are-we-there-yet "~1.1.2"
-    console-control-strings "~1.1.0"
-    gauge "~2.7.3"
-    set-blocking "~2.0.0"
-
-nth-check@^1.0.2, nth-check@~1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.2.tgz#b2bd295c37e3dd58a3bf0700376663ba4d9cf05c"
-  integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==
-  dependencies:
-    boolbase "~1.0.0"
-
-num2fraction@^1.2.2:
-  version "1.2.2"
-  resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede"
-  integrity sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=
-
-number-is-nan@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
-  integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=
-
-nwsapi@^2.0.7, nwsapi@^2.1.3:
-  version "2.1.4"
-  resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.1.4.tgz#e006a878db23636f8e8a67d33ca0e4edf61a842f"
-  integrity sha512-iGfd9Y6SFdTNldEy2L0GUhcarIutFmk+MPWIn9dmj8NMIup03G08uUF2KGbbmv/Ux4RT0VZJoP/sVbWA6d/VIw==
-
-oauth-sign@~0.9.0:
-  version "0.9.0"
-  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455"
-  integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==
-
-object-assign@4.1.1, object-assign@4.x, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1:
-  version "4.1.1"
-  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
-  integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=
-
-object-copy@^0.1.0:
-  version "0.1.0"
-  resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c"
-  integrity sha1-fn2Fi3gb18mRpBupde04EnVOmYw=
-  dependencies:
-    copy-descriptor "^0.1.0"
-    define-property "^0.2.5"
-    kind-of "^3.0.3"
-
-object-hash@^1.1.4:
-  version "1.3.1"
-  resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-1.3.1.tgz#fde452098a951cb145f039bb7d455449ddc126df"
-  integrity sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA==
-
-object-is@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.0.1.tgz#0aa60ec9989a0b3ed795cf4d06f62cf1ad6539b6"
-  integrity sha1-CqYOyZiaCz7Xlc9NBvYs8a1lObY=
-
-object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
-  integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
-
-object-visit@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb"
-  integrity sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=
-  dependencies:
-    isobject "^3.0.0"
-
-object.assign@^4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da"
-  integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==
-  dependencies:
-    define-properties "^1.1.2"
-    function-bind "^1.1.1"
-    has-symbols "^1.0.0"
-    object-keys "^1.0.11"
-
-object.fromentries@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.0.tgz#49a543d92151f8277b3ac9600f1e930b189d30ab"
-  integrity sha512-9iLiI6H083uiqUuvzyY6qrlmc/Gz8hLQFOcb/Ri/0xXFkSNS3ctV+CbE6yM2+AnkYfOB3dGjdzC0wrMLIhQICA==
-  dependencies:
-    define-properties "^1.1.2"
-    es-abstract "^1.11.0"
-    function-bind "^1.1.1"
-    has "^1.0.1"
-
-object.getownpropertydescriptors@^2.0.3:
-  version "2.0.3"
-  resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz#8758c846f5b407adab0f236e0986f14b051caa16"
-  integrity sha1-h1jIRvW0B62rDyNuCYbxSwUcqhY=
-  dependencies:
-    define-properties "^1.1.2"
-    es-abstract "^1.5.1"
-
-object.pick@^1.3.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747"
-  integrity sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=
-  dependencies:
-    isobject "^3.0.1"
-
-object.values@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.0.tgz#bf6810ef5da3e5325790eaaa2be213ea84624da9"
-  integrity sha512-8mf0nKLAoFX6VlNVdhGj31SVYpaNFtUnuoOXWyFEstsWRgU837AK+JYM0iAxwkSzGRbwn8cbFmgbyxj1j4VbXg==
-  dependencies:
-    define-properties "^1.1.3"
-    es-abstract "^1.12.0"
-    function-bind "^1.1.1"
-    has "^1.0.3"
-
-obuf@^1.0.0, obuf@^1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e"
-  integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==
-
-omit.js@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/omit.js/-/omit.js-1.0.2.tgz#91a14f0eba84066dfa015bf30e474c47f30bc858"
-  integrity sha512-/QPc6G2NS+8d4L/cQhbk6Yit1WTB6Us2g84A7A/1+w9d/eRGHyEqC5kkQtHVoHZ5NFWGG7tUGgrhVZwgZanKrQ==
-  dependencies:
-    babel-runtime "^6.23.0"
-
-on-finished@~2.3.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
-  integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=
-  dependencies:
-    ee-first "1.1.1"
-
-on-headers@~1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f"
-  integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==
-
-once@^1.3.0, once@^1.3.1, once@^1.4.0:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
-  integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E=
-  dependencies:
-    wrappy "1"
-
-onetime@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4"
-  integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=
-  dependencies:
-    mimic-fn "^1.0.0"
-
-open@^6.3.0:
-  version "6.4.0"
-  resolved "https://registry.yarnpkg.com/open/-/open-6.4.0.tgz#5c13e96d0dc894686164f18965ecfe889ecfc8a9"
-  integrity sha512-IFenVPgF70fSm1keSd2iDBIDIBZkroLeuffXq+wKTzTJlBpesFWojV9lb8mzOfaAzM1sr7HQHuO0vtV0zYekGg==
-  dependencies:
-    is-wsl "^1.1.0"
-
-opn@^5.1.0:
-  version "5.5.0"
-  resolved "https://registry.yarnpkg.com/opn/-/opn-5.5.0.tgz#fc7164fab56d235904c51c3b27da6758ca3b9bfc"
-  integrity sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==
-  dependencies:
-    is-wsl "^1.1.0"
-
-optimist@^0.6.1:
-  version "0.6.1"
-  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
-  integrity sha1-2j6nRob6IaGaERwybpDrFaAZZoY=
-  dependencies:
-    minimist "~0.0.1"
-    wordwrap "~0.0.2"
-
-optimize-css-assets-webpack-plugin@5.0.1:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.1.tgz#9eb500711d35165b45e7fd60ba2df40cb3eb9159"
-  integrity sha512-Rqm6sSjWtx9FchdP0uzTQDc7GXDKnwVEGoSxjezPkzMewx7gEWE9IMUYKmigTRC4U3RaNSwYVnUDLuIdtTpm0A==
-  dependencies:
-    cssnano "^4.1.0"
-    last-call-webpack-plugin "^3.0.0"
-
-optionator@^0.8.1, optionator@^0.8.2:
-  version "0.8.2"
-  resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64"
-  integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=
-  dependencies:
-    deep-is "~0.1.3"
-    fast-levenshtein "~2.0.4"
-    levn "~0.3.0"
-    prelude-ls "~1.1.2"
-    type-check "~0.3.2"
-    wordwrap "~1.0.0"
-
-original@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f"
-  integrity sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==
-  dependencies:
-    url-parse "^1.4.3"
-
-os-browserify@^0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27"
-  integrity sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=
-
-os-homedir@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
-  integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M=
-
-os-locale@^3.0.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a"
-  integrity sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==
-  dependencies:
-    execa "^1.0.0"
-    lcid "^2.0.0"
-    mem "^4.0.0"
-
-os-tmpdir@^1.0.0, os-tmpdir@~1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
-  integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=
-
-osenv@^0.1.4:
-  version "0.1.5"
-  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410"
-  integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==
-  dependencies:
-    os-homedir "^1.0.0"
-    os-tmpdir "^1.0.0"
-
-p-defer@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c"
-  integrity sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=
-
-p-each-series@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-1.0.0.tgz#930f3d12dd1f50e7434457a22cd6f04ac6ad7f71"
-  integrity sha1-kw89Et0fUOdDRFeiLNbwSsatf3E=
-  dependencies:
-    p-reduce "^1.0.0"
-
-p-finally@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae"
-  integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=
-
-p-is-promise@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.1.0.tgz#918cebaea248a62cf7ffab8e3bca8c5f882fc42e"
-  integrity sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==
-
-p-limit@^1.1.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8"
-  integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==
-  dependencies:
-    p-try "^1.0.0"
-
-p-limit@^2.0.0:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.2.1.tgz#aa07a788cc3151c939b5131f63570f0dd2009537"
-  integrity sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg==
-  dependencies:
-    p-try "^2.0.0"
-
-p-locate@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43"
-  integrity sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=
-  dependencies:
-    p-limit "^1.1.0"
-
-p-locate@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4"
-  integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==
-  dependencies:
-    p-limit "^2.0.0"
-
-p-map@^1.1.1:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/p-map/-/p-map-1.2.0.tgz#e4e94f311eabbc8633a1e79908165fca26241b6b"
-  integrity sha512-r6zKACMNhjPJMTl8KcFH4li//gkrXWfbD6feV8l6doRHlzljFWGJ2AP6iKaCJXyZmAUMOPtvbW7EXkbWO/pLEA==
-
-p-reduce@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-reduce/-/p-reduce-1.0.0.tgz#18c2b0dd936a4690a529f8231f58a0fdb6a47dfa"
-  integrity sha1-GMKw3ZNqRpClKfgjH1ig/bakffo=
-
-p-try@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3"
-  integrity sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=
-
-p-try@^2.0.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
-  integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
-
-pako@~1.0.5:
-  version "1.0.10"
-  resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.10.tgz#4328badb5086a426aa90f541977d4955da5c9732"
-  integrity sha512-0DTvPVU3ed8+HNXOu5Bs+o//Mbdj9VNQMUOe9oKCwh8l0GNwpTDMKCWbRjgtD291AWnkAgkqA/LOnQS8AmS1tw==
-
-parallel-transform@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/parallel-transform/-/parallel-transform-1.1.0.tgz#d410f065b05da23081fcd10f28854c29bda33b06"
-  integrity sha1-1BDwZbBdojCB/NEPKIVMKb2jOwY=
-  dependencies:
-    cyclist "~0.2.2"
-    inherits "^2.0.3"
-    readable-stream "^2.1.5"
-
-param-case@2.1.x:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247"
-  integrity sha1-35T9jPZTHs915r75oIWPvHK+Ikc=
-  dependencies:
-    no-case "^2.2.0"
-
-parent-module@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
-  integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==
-  dependencies:
-    callsites "^3.0.0"
-
-parse-asn1@^5.0.0:
-  version "5.1.4"
-  resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.4.tgz#37f6628f823fbdeb2273b4d540434a22f3ef1fcc"
-  integrity sha512-Qs5duJcuvNExRfFZ99HDD3z4mAi3r9Wl/FOjEOijlxwCZs7E7mW2vjTpgQ4J8LpTF8x5v+1Vn5UQFejmWT11aw==
-  dependencies:
-    asn1.js "^4.0.0"
-    browserify-aes "^1.0.0"
-    create-hash "^1.1.0"
-    evp_bytestokey "^1.0.0"
-    pbkdf2 "^3.0.3"
-    safe-buffer "^5.1.1"
-
-parse-json@^2.2.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
-  integrity sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=
-  dependencies:
-    error-ex "^1.2.0"
-
-parse-json@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0"
-  integrity sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=
-  dependencies:
-    error-ex "^1.3.1"
-    json-parse-better-errors "^1.0.1"
-
-parse5@4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/parse5/-/parse5-4.0.0.tgz#6d78656e3da8d78b4ec0b906f7c08ef1dfe3f608"
-  integrity sha512-VrZ7eOd3T1Fk4XWNXMgiGBK/z0MG48BWG2uQNU4I72fkQuKUTZpl+u9k+CxEG0twMVzSmXEEz12z5Fnw1jIQFA==
-
-parse5@5.1.0:
-  version "5.1.0"
-  resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.0.tgz#c59341c9723f414c452975564c7c00a68d58acd2"
-  integrity sha512-fxNG2sQjHvlVAYmzBZS9YlDp6PTSSDwa98vkD4QgVDDCAo84z5X1t5XyJQ62ImdLXx5NdIIfihey6xpum9/gRQ==
-
-parseurl@~1.3.2, parseurl@~1.3.3:
-  version "1.3.3"
-  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4"
-  integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
-
-pascalcase@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14"
-  integrity sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=
-
-path-browserify@0.0.1:
-  version "0.0.1"
-  resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.1.tgz#e6c4ddd7ed3aa27c68a20cc4e50e1a4ee83bbc4a"
-  integrity sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==
-
-path-dirname@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0"
-  integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=
-
-path-exists@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
-  integrity sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=
-  dependencies:
-    pinkie-promise "^2.0.0"
-
-path-exists@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515"
-  integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=
-
-path-is-absolute@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
-  integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18=
-
-path-is-inside@^1.0.1, path-is-inside@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
-  integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=
-
-path-key@^2.0.0, path-key@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40"
-  integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=
-
-path-parse@^1.0.6:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c"
-  integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==
-
-path-to-regexp@0.1.7:
-  version "0.1.7"
-  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
-  integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=
-
-path-to-regexp@^1.7.0:
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.7.0.tgz#59fde0f435badacba103a84e9d3bc64e96b9937d"
-  integrity sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=
-  dependencies:
-    isarray "0.0.1"
-
-path-type@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/path-type/-/path-type-2.0.0.tgz#f012ccb8415b7096fc2daa1054c3d72389594c73"
-  integrity sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=
-  dependencies:
-    pify "^2.0.0"
-
-path-type@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f"
-  integrity sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==
-  dependencies:
-    pify "^3.0.0"
-
-pbkdf2@^3.0.3:
-  version "3.0.17"
-  resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.0.17.tgz#976c206530617b14ebb32114239f7b09336e93a6"
-  integrity sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==
-  dependencies:
-    create-hash "^1.1.2"
-    create-hmac "^1.1.4"
-    ripemd160 "^2.0.1"
-    safe-buffer "^5.0.1"
-    sha.js "^2.4.8"
-
-performance-now@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b"
-  integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=
-
-pify@^2.0.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
-  integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw=
-
-pify@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176"
-  integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=
-
-pify@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231"
-  integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==
-
-pinkie-promise@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
-  integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o=
-  dependencies:
-    pinkie "^2.0.0"
-
-pinkie@^2.0.0:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
-  integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA=
-
-pirates@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.1.tgz#643a92caf894566f91b2b986d2c66950a8e2fb87"
-  integrity sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA==
-  dependencies:
-    node-modules-regexp "^1.0.0"
-
-pkg-dir@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-1.0.0.tgz#7a4b508a8d5bb2d629d447056ff4e9c9314cf3d4"
-  integrity sha1-ektQio1bstYp1EcFb/TpyTFM89Q=
-  dependencies:
-    find-up "^1.0.0"
-
-pkg-dir@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-2.0.0.tgz#f6d5d1109e19d63edf428e0bd57e12777615334b"
-  integrity sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=
-  dependencies:
-    find-up "^2.1.0"
-
-pkg-dir@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3"
-  integrity sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==
-  dependencies:
-    find-up "^3.0.0"
-
-pkg-up@2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-2.0.0.tgz#c819ac728059a461cab1c3889a2be3c49a004d7f"
-  integrity sha1-yBmscoBZpGHKscOImivjxJoATX8=
-  dependencies:
-    find-up "^2.1.0"
-
-pn@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/pn/-/pn-1.1.0.tgz#e2f4cef0e219f463c179ab37463e4e1ecdccbafb"
-  integrity sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA==
-
-pnp-webpack-plugin@1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.2.1.tgz#cd9d698df2a6fcf7255093c1c9511adf65b9421b"
-  integrity sha512-W6GctK7K2qQiVR+gYSv/Gyt6jwwIH4vwdviFqx+Y2jAtVf5eZyYIDf5Ac2NCDMBiX5yWscBLZElPTsyA1UtVVA==
-  dependencies:
-    ts-pnp "^1.0.0"
-
-portfinder@^1.0.9:
-  version "1.0.23"
-  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.23.tgz#894db4bcc5daf02b6614517ce89cd21a38226b82"
-  integrity sha512-B729mL/uLklxtxuiJKfQ84WPxNw5a7Yhx3geQZdcA4GjNjZSTSSMMWyoennMVnTWSmAR0lMdzWYN0JLnHrg1KQ==
-  dependencies:
-    async "^1.5.2"
-    debug "^2.2.0"
-    mkdirp "0.5.x"
-
-posix-character-classes@^0.1.0:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab"
-  integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=
-
-postcss-attribute-case-insensitive@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-4.0.1.tgz#b2a721a0d279c2f9103a36331c88981526428cc7"
-  integrity sha512-L2YKB3vF4PetdTIthQVeT+7YiSzMoNMLLYxPXXppOOP7NoazEAy45sh2LvJ8leCQjfBcfkYQs8TtCcQjeZTp8A==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-selector-parser "^5.0.0"
-
-postcss-browser-comments@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-browser-comments/-/postcss-browser-comments-2.0.0.tgz#dc48d6a8ddbff188a80a000b7393436cb18aed88"
-  integrity sha512-xGG0UvoxwBc4Yx4JX3gc0RuDl1kc4bVihCzzk6UC72YPfq5fu3c717Nu8Un3nvnq1BJ31gBnFXIG/OaUTnpHgA==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-calc@^7.0.1:
-  version "7.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-7.0.1.tgz#36d77bab023b0ecbb9789d84dcb23c4941145436"
-  integrity sha512-oXqx0m6tb4N3JGdmeMSc/i91KppbYsFZKdH0xMOqK8V1rJlzrKlTdokz8ozUXLVejydRN6u2IddxpcijRj2FqQ==
-  dependencies:
-    css-unit-converter "^1.1.1"
-    postcss "^7.0.5"
-    postcss-selector-parser "^5.0.0-rc.4"
-    postcss-value-parser "^3.3.1"
-
-postcss-color-functional-notation@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-color-functional-notation/-/postcss-color-functional-notation-2.0.1.tgz#5efd37a88fbabeb00a2966d1e53d98ced93f74e0"
-  integrity sha512-ZBARCypjEDofW4P6IdPVTLhDNXPRn8T2s1zHbZidW6rPaaZvcnCS2soYFIQJrMZSxiePJ2XIYTlcb2ztr/eT2g==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-color-gray@^5.0.0:
-  version "5.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-color-gray/-/postcss-color-gray-5.0.0.tgz#532a31eb909f8da898ceffe296fdc1f864be8547"
-  integrity sha512-q6BuRnAGKM/ZRpfDascZlIZPjvwsRye7UDNalqVz3s7GDxMtqPY6+Q871liNxsonUw8oC61OG+PSaysYpl1bnw==
-  dependencies:
-    "@csstools/convert-colors" "^1.4.0"
-    postcss "^7.0.5"
-    postcss-values-parser "^2.0.0"
-
-postcss-color-hex-alpha@^5.0.2:
-  version "5.0.3"
-  resolved "https://registry.yarnpkg.com/postcss-color-hex-alpha/-/postcss-color-hex-alpha-5.0.3.tgz#a8d9ca4c39d497c9661e374b9c51899ef0f87388"
-  integrity sha512-PF4GDel8q3kkreVXKLAGNpHKilXsZ6xuu+mOQMHWHLPNyjiUBOr75sp5ZKJfmv1MCus5/DWUGcK9hm6qHEnXYw==
-  dependencies:
-    postcss "^7.0.14"
-    postcss-values-parser "^2.0.1"
-
-postcss-color-mod-function@^3.0.3:
-  version "3.0.3"
-  resolved "https://registry.yarnpkg.com/postcss-color-mod-function/-/postcss-color-mod-function-3.0.3.tgz#816ba145ac11cc3cb6baa905a75a49f903e4d31d"
-  integrity sha512-YP4VG+xufxaVtzV6ZmhEtc+/aTXH3d0JLpnYfxqTvwZPbJhWqp8bSY3nfNzNRFLgB4XSaBA82OE4VjOOKpCdVQ==
-  dependencies:
-    "@csstools/convert-colors" "^1.4.0"
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-color-rebeccapurple@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-4.0.1.tgz#c7a89be872bb74e45b1e3022bfe5748823e6de77"
-  integrity sha512-aAe3OhkS6qJXBbqzvZth2Au4V3KieR5sRQ4ptb2b2O8wgvB3SJBsdG+jsn2BZbbwekDG8nTfcCNKcSfe/lEy8g==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-colormin@^4.0.3:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-4.0.3.tgz#ae060bce93ed794ac71264f08132d550956bd381"
-  integrity sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==
-  dependencies:
-    browserslist "^4.0.0"
-    color "^3.0.0"
-    has "^1.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-convert-values@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz#ca3813ed4da0f812f9d43703584e449ebe189a7f"
-  integrity sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==
-  dependencies:
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-custom-media@^7.0.7:
-  version "7.0.8"
-  resolved "https://registry.yarnpkg.com/postcss-custom-media/-/postcss-custom-media-7.0.8.tgz#fffd13ffeffad73621be5f387076a28b00294e0c"
-  integrity sha512-c9s5iX0Ge15o00HKbuRuTqNndsJUbaXdiNsksnVH8H4gdc+zbLzr/UasOwNG6CTDpLFekVY4672eWdiiWu2GUg==
-  dependencies:
-    postcss "^7.0.14"
-
-postcss-custom-properties@^8.0.9:
-  version "8.0.11"
-  resolved "https://registry.yarnpkg.com/postcss-custom-properties/-/postcss-custom-properties-8.0.11.tgz#2d61772d6e92f22f5e0d52602df8fae46fa30d97"
-  integrity sha512-nm+o0eLdYqdnJ5abAJeXp4CEU1c1k+eB2yMCvhgzsds/e0umabFrN6HoTy/8Q4K5ilxERdl/JD1LO5ANoYBeMA==
-  dependencies:
-    postcss "^7.0.17"
-    postcss-values-parser "^2.0.1"
-
-postcss-custom-selectors@^5.1.2:
-  version "5.1.2"
-  resolved "https://registry.yarnpkg.com/postcss-custom-selectors/-/postcss-custom-selectors-5.1.2.tgz#64858c6eb2ecff2fb41d0b28c9dd7b3db4de7fba"
-  integrity sha512-DSGDhqinCqXqlS4R7KGxL1OSycd1lydugJ1ky4iRXPHdBRiozyMHrdu0H3o7qNOCiZwySZTUI5MV0T8QhCLu+w==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-selector-parser "^5.0.0-rc.3"
-
-postcss-dir-pseudo-class@^5.0.0:
-  version "5.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-5.0.0.tgz#6e3a4177d0edb3abcc85fdb6fbb1c26dabaeaba2"
-  integrity sha512-3pm4oq8HYWMZePJY+5ANriPs3P07q+LW6FAdTlkFH2XqDdP4HeeJYMOzn0HYLhRSjBO3fhiqSwwU9xEULSrPgw==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-selector-parser "^5.0.0-rc.3"
-
-postcss-discard-comments@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz#1fbabd2c246bff6aaad7997b2b0918f4d7af4033"
-  integrity sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-discard-duplicates@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz#3fe133cd3c82282e550fc9b239176a9207b784eb"
-  integrity sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-discard-empty@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz#c8c951e9f73ed9428019458444a02ad90bb9f765"
-  integrity sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-discard-overridden@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz#652aef8a96726f029f5e3e00146ee7a4e755ff57"
-  integrity sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-double-position-gradients@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-double-position-gradients/-/postcss-double-position-gradients-1.0.0.tgz#fc927d52fddc896cb3a2812ebc5df147e110522e"
-  integrity sha512-G+nV8EnQq25fOI8CH/B6krEohGWnF5+3A6H/+JEpOncu5dCnkS1QQ6+ct3Jkaepw1NGVqqOZH6lqrm244mCftA==
-  dependencies:
-    postcss "^7.0.5"
-    postcss-values-parser "^2.0.0"
-
-postcss-env-function@^2.0.2:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-env-function/-/postcss-env-function-2.0.2.tgz#0f3e3d3c57f094a92c2baf4b6241f0b0da5365d7"
-  integrity sha512-rwac4BuZlITeUbiBq60h/xbLzXY43qOsIErngWa4l7Mt+RaSkT7QBjXVGTcBHupykkblHMDrBFh30zchYPaOUw==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-flexbugs-fixes@4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/postcss-flexbugs-fixes/-/postcss-flexbugs-fixes-4.1.0.tgz#e094a9df1783e2200b7b19f875dcad3b3aff8b20"
-  integrity sha512-jr1LHxQvStNNAHlgco6PzY308zvLklh7SJVYuWUwyUQncofaAlD2l+P/gxKHOdqWKe7xJSkVLFF/2Tp+JqMSZA==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-focus-visible@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-focus-visible/-/postcss-focus-visible-4.0.0.tgz#477d107113ade6024b14128317ade2bd1e17046e"
-  integrity sha512-Z5CkWBw0+idJHSV6+Bgf2peDOFf/x4o+vX/pwcNYrWpXFrSfTkQ3JQ1ojrq9yS+upnAlNRHeg8uEwFTgorjI8g==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-focus-within@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-focus-within/-/postcss-focus-within-3.0.0.tgz#763b8788596cee9b874c999201cdde80659ef680"
-  integrity sha512-W0APui8jQeBKbCGZudW37EeMCjDeVxKgiYfIIEo8Bdh5SpB9sxds/Iq8SEuzS0Q4YFOlG7EPFulbbxujpkrV2w==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-font-variant@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-font-variant/-/postcss-font-variant-4.0.0.tgz#71dd3c6c10a0d846c5eda07803439617bbbabacc"
-  integrity sha512-M8BFYKOvCrI2aITzDad7kWuXXTm0YhGdP9Q8HanmN4EF1Hmcgs1KK5rSHylt/lUJe8yLxiSwWAHdScoEiIxztg==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-gap-properties@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-gap-properties/-/postcss-gap-properties-2.0.0.tgz#431c192ab3ed96a3c3d09f2ff615960f902c1715"
-  integrity sha512-QZSqDaMgXCHuHTEzMsS2KfVDOq7ZFiknSpkrPJY6jmxbugUPTuSzs/vuE5I3zv0WAS+3vhrlqhijiprnuQfzmg==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-image-set-function@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-image-set-function/-/postcss-image-set-function-3.0.1.tgz#28920a2f29945bed4c3198d7df6496d410d3f288"
-  integrity sha512-oPTcFFip5LZy8Y/whto91L9xdRHCWEMs3e1MdJxhgt4jy2WYXfhkng59fH5qLXSCPN8k4n94p1Czrfe5IOkKUw==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-initial@^3.0.0:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.1.tgz#99d319669a13d6c06ef8e70d852f68cb1b399b61"
-  integrity sha512-I2Sz83ZSHybMNh02xQDK609lZ1/QOyYeuizCjzEhlMgeV/HcDJapQiH4yTqLjZss0X6/6VvKFXUeObaHpJoINw==
-  dependencies:
-    lodash.template "^4.5.0"
-    postcss "^7.0.2"
-
-postcss-lab-function@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-lab-function/-/postcss-lab-function-2.0.1.tgz#bb51a6856cd12289ab4ae20db1e3821ef13d7d2e"
-  integrity sha512-whLy1IeZKY+3fYdqQFuDBf8Auw+qFuVnChWjmxm/UhHWqNHZx+B99EwxTvGYmUBqe3Fjxs4L1BoZTJmPu6usVg==
-  dependencies:
-    "@csstools/convert-colors" "^1.4.0"
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-load-config@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-2.1.0.tgz#c84d692b7bb7b41ddced94ee62e8ab31b417b003"
-  integrity sha512-4pV3JJVPLd5+RueiVVB+gFOAa7GWc25XQcMp86Zexzke69mKf6Nx9LRcQywdz7yZI9n1udOxmLuAwTBypypF8Q==
-  dependencies:
-    cosmiconfig "^5.0.0"
-    import-cwd "^2.0.0"
-
-postcss-loader@3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-3.0.0.tgz#6b97943e47c72d845fa9e03f273773d4e8dd6c2d"
-  integrity sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==
-  dependencies:
-    loader-utils "^1.1.0"
-    postcss "^7.0.0"
-    postcss-load-config "^2.0.0"
-    schema-utils "^1.0.0"
-
-postcss-logical@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-logical/-/postcss-logical-3.0.0.tgz#2495d0f8b82e9f262725f75f9401b34e7b45d5b5"
-  integrity sha512-1SUKdJc2vuMOmeItqGuNaC+N8MzBWFWEkAnRnLpFYj1tGGa7NqyVBujfRtgNa2gXR+6RkGUiB2O5Vmh7E2RmiA==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-media-minmax@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-media-minmax/-/postcss-media-minmax-4.0.0.tgz#b75bb6cbc217c8ac49433e12f22048814a4f5ed5"
-  integrity sha512-fo9moya6qyxsjbFAYl97qKO9gyre3qvbMnkOZeZwlsW6XYFsvs2DMGDlchVLfAd8LHPZDxivu/+qW2SMQeTHBw==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-merge-longhand@^4.0.11:
-  version "4.0.11"
-  resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz#62f49a13e4a0ee04e7b98f42bb16062ca2549e24"
-  integrity sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==
-  dependencies:
-    css-color-names "0.0.4"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-    stylehacks "^4.0.0"
-
-postcss-merge-rules@^4.0.3:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz#362bea4ff5a1f98e4075a713c6cb25aefef9a650"
-  integrity sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==
-  dependencies:
-    browserslist "^4.0.0"
-    caniuse-api "^3.0.0"
-    cssnano-util-same-parent "^4.0.0"
-    postcss "^7.0.0"
-    postcss-selector-parser "^3.0.0"
-    vendors "^1.0.0"
-
-postcss-minify-font-values@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz#cd4c344cce474343fac5d82206ab2cbcb8afd5a6"
-  integrity sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==
-  dependencies:
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-minify-gradients@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz#93b29c2ff5099c535eecda56c4aa6e665a663471"
-  integrity sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==
-  dependencies:
-    cssnano-util-get-arguments "^4.0.0"
-    is-color-stop "^1.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-minify-params@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz#6b9cef030c11e35261f95f618c90036d680db874"
-  integrity sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==
-  dependencies:
-    alphanum-sort "^1.0.0"
-    browserslist "^4.0.0"
-    cssnano-util-get-arguments "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-    uniqs "^2.0.0"
-
-postcss-minify-selectors@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz#e2e5eb40bfee500d0cd9243500f5f8ea4262fbd8"
-  integrity sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==
-  dependencies:
-    alphanum-sort "^1.0.0"
-    has "^1.0.0"
-    postcss "^7.0.0"
-    postcss-selector-parser "^3.0.0"
-
-postcss-modules-extract-imports@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz#818719a1ae1da325f9832446b01136eeb493cd7e"
-  integrity sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==
-  dependencies:
-    postcss "^7.0.5"
-
-postcss-modules-local-by-default@^2.0.6:
-  version "2.0.6"
-  resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz#dd9953f6dd476b5fd1ef2d8830c8929760b56e63"
-  integrity sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==
-  dependencies:
-    postcss "^7.0.6"
-    postcss-selector-parser "^6.0.0"
-    postcss-value-parser "^3.3.1"
-
-postcss-modules-scope@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-2.1.0.tgz#ad3f5bf7856114f6fcab901b0502e2a2bc39d4eb"
-  integrity sha512-91Rjps0JnmtUB0cujlc8KIKCsJXWjzuxGeT/+Q2i2HXKZ7nBUeF9YQTZZTNvHVoNYj1AthsjnGLtqDUE0Op79A==
-  dependencies:
-    postcss "^7.0.6"
-    postcss-selector-parser "^6.0.0"
-
-postcss-modules-values@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz#479b46dc0c5ca3dc7fa5270851836b9ec7152f64"
-  integrity sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==
-  dependencies:
-    icss-replace-symbols "^1.1.0"
-    postcss "^7.0.6"
-
-postcss-nesting@^7.0.0:
-  version "7.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-nesting/-/postcss-nesting-7.0.1.tgz#b50ad7b7f0173e5b5e3880c3501344703e04c052"
-  integrity sha512-FrorPb0H3nuVq0Sff7W2rnc3SmIcruVC6YwpcS+k687VxyxO33iE1amna7wHuRVzM8vfiYofXSBHNAZ3QhLvYg==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-normalize-charset@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz#8b35add3aee83a136b0471e0d59be58a50285dd4"
-  integrity sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-normalize-display-values@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz#0dbe04a4ce9063d4667ed2be476bb830c825935a"
-  integrity sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==
-  dependencies:
-    cssnano-util-get-match "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-positions@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz#05f757f84f260437378368a91f8932d4b102917f"
-  integrity sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==
-  dependencies:
-    cssnano-util-get-arguments "^4.0.0"
-    has "^1.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-repeat-style@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz#c4ebbc289f3991a028d44751cbdd11918b17910c"
-  integrity sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==
-  dependencies:
-    cssnano-util-get-arguments "^4.0.0"
-    cssnano-util-get-match "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-string@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz#cd44c40ab07a0c7a36dc5e99aace1eca4ec2690c"
-  integrity sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==
-  dependencies:
-    has "^1.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-timing-functions@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz#8e009ca2a3949cdaf8ad23e6b6ab99cb5e7d28d9"
-  integrity sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==
-  dependencies:
-    cssnano-util-get-match "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-unicode@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz#841bd48fdcf3019ad4baa7493a3d363b52ae1cfb"
-  integrity sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==
-  dependencies:
-    browserslist "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-url@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz#10e437f86bc7c7e58f7b9652ed878daaa95faae1"
-  integrity sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==
-  dependencies:
-    is-absolute-url "^2.0.0"
-    normalize-url "^3.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize-whitespace@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz#bf1d4070fe4fcea87d1348e825d8cc0c5faa7d82"
-  integrity sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==
-  dependencies:
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-normalize@7.0.1:
-  version "7.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-normalize/-/postcss-normalize-7.0.1.tgz#eb51568d962b8aa61a8318383c8bb7e54332282e"
-  integrity sha512-NOp1fwrG+6kVXWo7P9SizCHX6QvioxFD/hZcI2MLxPmVnFJFC0j0DDpIuNw2tUDeCFMni59gCVgeJ1/hYhj2OQ==
-  dependencies:
-    "@csstools/normalize.css" "^9.0.1"
-    browserslist "^4.1.1"
-    postcss "^7.0.2"
-    postcss-browser-comments "^2.0.0"
-
-postcss-ordered-values@^4.1.2:
-  version "4.1.2"
-  resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz#0cf75c820ec7d5c4d280189559e0b571ebac0eee"
-  integrity sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==
-  dependencies:
-    cssnano-util-get-arguments "^4.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-overflow-shorthand@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-overflow-shorthand/-/postcss-overflow-shorthand-2.0.0.tgz#31ecf350e9c6f6ddc250a78f0c3e111f32dd4c30"
-  integrity sha512-aK0fHc9CBNx8jbzMYhshZcEv8LtYnBIRYQD5i7w/K/wS9c2+0NSR6B3OVMu5y0hBHYLcMGjfU+dmWYNKH0I85g==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-page-break@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-page-break/-/postcss-page-break-2.0.0.tgz#add52d0e0a528cabe6afee8b46e2abb277df46bf"
-  integrity sha512-tkpTSrLpfLfD9HvgOlJuigLuk39wVTbbd8RKcy8/ugV2bNBUW3xU+AIqyxhDrQr1VUj1RmyJrBn1YWrqUm9zAQ==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-place@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-place/-/postcss-place-4.0.1.tgz#e9f39d33d2dc584e46ee1db45adb77ca9d1dcc62"
-  integrity sha512-Zb6byCSLkgRKLODj/5mQugyuj9bvAAw9LqJJjgwz5cYryGeXfFZfSXoP1UfveccFmeq0b/2xxwcTEVScnqGxBg==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-values-parser "^2.0.0"
-
-postcss-preset-env@6.6.0:
-  version "6.6.0"
-  resolved "https://registry.yarnpkg.com/postcss-preset-env/-/postcss-preset-env-6.6.0.tgz#642e7d962e2bdc2e355db117c1eb63952690ed5b"
-  integrity sha512-I3zAiycfqXpPIFD6HXhLfWXIewAWO8emOKz+QSsxaUZb9Dp8HbF5kUf+4Wy/AxR33o+LRoO8blEWCHth0ZsCLA==
-  dependencies:
-    autoprefixer "^9.4.9"
-    browserslist "^4.4.2"
-    caniuse-lite "^1.0.30000939"
-    css-blank-pseudo "^0.1.4"
-    css-has-pseudo "^0.10.0"
-    css-prefers-color-scheme "^3.1.1"
-    cssdb "^4.3.0"
-    postcss "^7.0.14"
-    postcss-attribute-case-insensitive "^4.0.1"
-    postcss-color-functional-notation "^2.0.1"
-    postcss-color-gray "^5.0.0"
-    postcss-color-hex-alpha "^5.0.2"
-    postcss-color-mod-function "^3.0.3"
-    postcss-color-rebeccapurple "^4.0.1"
-    postcss-custom-media "^7.0.7"
-    postcss-custom-properties "^8.0.9"
-    postcss-custom-selectors "^5.1.2"
-    postcss-dir-pseudo-class "^5.0.0"
-    postcss-double-position-gradients "^1.0.0"
-    postcss-env-function "^2.0.2"
-    postcss-focus-visible "^4.0.0"
-    postcss-focus-within "^3.0.0"
-    postcss-font-variant "^4.0.0"
-    postcss-gap-properties "^2.0.0"
-    postcss-image-set-function "^3.0.1"
-    postcss-initial "^3.0.0"
-    postcss-lab-function "^2.0.1"
-    postcss-logical "^3.0.0"
-    postcss-media-minmax "^4.0.0"
-    postcss-nesting "^7.0.0"
-    postcss-overflow-shorthand "^2.0.0"
-    postcss-page-break "^2.0.0"
-    postcss-place "^4.0.1"
-    postcss-pseudo-class-any-link "^6.0.0"
-    postcss-replace-overflow-wrap "^3.0.0"
-    postcss-selector-matches "^4.0.0"
-    postcss-selector-not "^4.0.0"
-
-postcss-pseudo-class-any-link@^6.0.0:
-  version "6.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-6.0.0.tgz#2ed3eed393b3702879dec4a87032b210daeb04d1"
-  integrity sha512-lgXW9sYJdLqtmw23otOzrtbDXofUdfYzNm4PIpNE322/swES3VU9XlXHeJS46zT2onFO7V1QFdD4Q9LiZj8mew==
-  dependencies:
-    postcss "^7.0.2"
-    postcss-selector-parser "^5.0.0-rc.3"
-
-postcss-reduce-initial@^4.0.3:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz#7fd42ebea5e9c814609639e2c2e84ae270ba48df"
-  integrity sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==
-  dependencies:
-    browserslist "^4.0.0"
-    caniuse-api "^3.0.0"
-    has "^1.0.0"
-    postcss "^7.0.0"
-
-postcss-reduce-transforms@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz#17efa405eacc6e07be3414a5ca2d1074681d4e29"
-  integrity sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==
-  dependencies:
-    cssnano-util-get-match "^4.0.0"
-    has "^1.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-
-postcss-replace-overflow-wrap@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-3.0.0.tgz#61b360ffdaedca84c7c918d2b0f0d0ea559ab01c"
-  integrity sha512-2T5hcEHArDT6X9+9dVSPQdo7QHzG4XKclFT8rU5TzJPDN7RIRTbO9c4drUISOVemLj03aezStHCR2AIcr8XLpw==
-  dependencies:
-    postcss "^7.0.2"
-
-postcss-safe-parser@4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-safe-parser/-/postcss-safe-parser-4.0.1.tgz#8756d9e4c36fdce2c72b091bbc8ca176ab1fcdea"
-  integrity sha512-xZsFA3uX8MO3yAda03QrG3/Eg1LN3EPfjjf07vke/46HERLZyHrTsQ9E1r1w1W//fWEhtYNndo2hQplN2cVpCQ==
-  dependencies:
-    postcss "^7.0.0"
-
-postcss-selector-matches@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-selector-matches/-/postcss-selector-matches-4.0.0.tgz#71c8248f917ba2cc93037c9637ee09c64436fcff"
-  integrity sha512-LgsHwQR/EsRYSqlwdGzeaPKVT0Ml7LAT6E75T8W8xLJY62CE4S/l03BWIt3jT8Taq22kXP08s2SfTSzaraoPww==
-  dependencies:
-    balanced-match "^1.0.0"
-    postcss "^7.0.2"
-
-postcss-selector-not@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-selector-not/-/postcss-selector-not-4.0.0.tgz#c68ff7ba96527499e832724a2674d65603b645c0"
-  integrity sha512-W+bkBZRhqJaYN8XAnbbZPLWMvZD1wKTu0UxtFKdhtGjWYmxhkUneoeOhRJKdAE5V7ZTlnbHfCR+6bNwK9e1dTQ==
-  dependencies:
-    balanced-match "^1.0.0"
-    postcss "^7.0.2"
-
-postcss-selector-parser@^3.0.0:
-  version "3.1.1"
-  resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz#4f875f4afb0c96573d5cf4d74011aee250a7e865"
-  integrity sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=
-  dependencies:
-    dot-prop "^4.1.1"
-    indexes-of "^1.0.1"
-    uniq "^1.0.1"
-
-postcss-selector-parser@^5.0.0, postcss-selector-parser@^5.0.0-rc.3, postcss-selector-parser@^5.0.0-rc.4:
-  version "5.0.0"
-  resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz#249044356697b33b64f1a8f7c80922dddee7195c"
-  integrity sha512-w+zLE5Jhg6Liz8+rQOWEAwtwkyqpfnmsinXjXg6cY7YIONZZtgvE0v2O0uhQBs0peNomOJwWRKt6JBfTdTd3OQ==
-  dependencies:
-    cssesc "^2.0.0"
-    indexes-of "^1.0.1"
-    uniq "^1.0.1"
-
-postcss-selector-parser@^6.0.0:
-  version "6.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz#934cf799d016c83411859e09dcecade01286ec5c"
-  integrity sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg==
-  dependencies:
-    cssesc "^3.0.0"
-    indexes-of "^1.0.1"
-    uniq "^1.0.1"
-
-postcss-svgo@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-4.0.2.tgz#17b997bc711b333bab143aaed3b8d3d6e3d38258"
-  integrity sha512-C6wyjo3VwFm0QgBy+Fu7gCYOkCmgmClghO+pjcxvrcBKtiKt0uCF+hvbMO1fyv5BMImRK90SMb+dwUnfbGd+jw==
-  dependencies:
-    is-svg "^3.0.0"
-    postcss "^7.0.0"
-    postcss-value-parser "^3.0.0"
-    svgo "^1.0.0"
-
-postcss-unique-selectors@^4.0.1:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz#9446911f3289bfd64c6d680f073c03b1f9ee4bac"
-  integrity sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==
-  dependencies:
-    alphanum-sort "^1.0.0"
-    postcss "^7.0.0"
-    uniqs "^2.0.0"
-
-postcss-value-parser@^3.0.0, postcss-value-parser@^3.3.0, postcss-value-parser@^3.3.1:
-  version "3.3.1"
-  resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281"
-  integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==
-
-postcss-value-parser@^4.0.0:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz#482282c09a42706d1fc9a069b73f44ec08391dc9"
-  integrity sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ==
-
-postcss-values-parser@^2.0.0, postcss-values-parser@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/postcss-values-parser/-/postcss-values-parser-2.0.1.tgz#da8b472d901da1e205b47bdc98637b9e9e550e5f"
-  integrity sha512-2tLuBsA6P4rYTNKCXYG/71C7j1pU6pK503suYOmn4xYrQIzW+opD+7FAFNuGSdZC/3Qfy334QbeMu7MEb8gOxg==
-  dependencies:
-    flatten "^1.0.2"
-    indexes-of "^1.0.1"
-    uniq "^1.0.1"
-
-postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.17, postcss@^7.0.2, postcss@^7.0.5, postcss@^7.0.6:
-  version "7.0.17"
-  resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.17.tgz#4da1bdff5322d4a0acaab4d87f3e782436bad31f"
-  integrity sha512-546ZowA+KZ3OasvQZHsbuEpysvwTZNGJv9EfyCQdsIDltPSWHAeTQ5fQy/Npi2ZDtLI3zs7Ps/p6wThErhm9fQ==
-  dependencies:
-    chalk "^2.4.2"
-    source-map "^0.6.1"
-    supports-color "^6.1.0"
-
-prelude-ls@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54"
-  integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=
-
-pretty-bytes@^5.1.0:
-  version "5.3.0"
-  resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.3.0.tgz#f2849e27db79fb4d6cfe24764fc4134f165989f2"
-  integrity sha512-hjGrh+P926p4R4WbaB6OckyRtO0F0/lQBiT+0gnxjV+5kjPBrfVBFCsCLbMqVQeydvIoouYTCmmEURiH3R1Bdg==
-
-pretty-error@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3"
-  integrity sha1-X0+HyPkeWuPzuoerTPXgOxoX8aM=
-  dependencies:
-    renderkid "^2.0.1"
-    utila "~0.4"
-
-pretty-format@^24.9.0:
-  version "24.9.0"
-  resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-24.9.0.tgz#12fac31b37019a4eea3c11aa9a959eb7628aa7c9"
-  integrity sha512-00ZMZUiHaJrNfk33guavqgvfJS30sLYf0f8+Srklv0AMPodGGHcoHgksZ3OThYnIvOd+8yMCn0YiEOogjlgsnA==
-  dependencies:
-    "@jest/types" "^24.9.0"
-    ansi-regex "^4.0.0"
-    ansi-styles "^3.2.0"
-    react-is "^16.8.4"
-
-private@^0.1.6:
-  version "0.1.8"
-  resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff"
-  integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==
-
-process-nextick-args@~2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2"
-  integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==
-
-process@^0.11.10:
-  version "0.11.10"
-  resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182"
-  integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI=
-
-progress@^2.0.0:
-  version "2.0.3"
-  resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8"
-  integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==
-
-promise-inflight@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3"
-  integrity sha1-mEcocL8igTL8vdhoEputEsPAKeM=
-
-promise@8.0.3:
-  version "8.0.3"
-  resolved "https://registry.yarnpkg.com/promise/-/promise-8.0.3.tgz#f592e099c6cddc000d538ee7283bb190452b0bf6"
-  integrity sha512-HeRDUL1RJiLhyA0/grn+PTShlBAcLuh/1BJGtrvjwbvRDCTLLMEz9rOGCV+R3vHY4MixIuoMEd9Yq/XvsTPcjw==
-  dependencies:
-    asap "~2.0.6"
-
-promise@^7.1.1:
-  version "7.3.1"
-  resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf"
-  integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==
-  dependencies:
-    asap "~2.0.3"
-
-prompts@^2.0.1:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.2.1.tgz#f901dd2a2dfee080359c0e20059b24188d75ad35"
-  integrity sha512-VObPvJiWPhpZI6C5m60XOzTfnYg/xc/an+r9VYymj9WJW3B/DIH+REzjpAACPf8brwPeP+7vz3bIim3S+AaMjw==
-  dependencies:
-    kleur "^3.0.3"
-    sisteransi "^1.0.3"
-
-prop-types@15.x, prop-types@^15.5.10, prop-types@^15.5.4, prop-types@^15.5.6, prop-types@^15.5.7, prop-types@^15.5.8, prop-types@^15.5.9, prop-types@^15.6.0, prop-types@^15.6.2, prop-types@^15.7.2:
-  version "15.7.2"
-  resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5"
-  integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==
-  dependencies:
-    loose-envify "^1.4.0"
-    object-assign "^4.1.1"
-    react-is "^16.8.1"
-
-proxy-addr@~2.0.5:
-  version "2.0.5"
-  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.5.tgz#34cbd64a2d81f4b1fd21e76f9f06c8a45299ee34"
-  integrity sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==
-  dependencies:
-    forwarded "~0.1.2"
-    ipaddr.js "1.9.0"
-
-prr@~1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476"
-  integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY=
-
-psl@^1.1.24, psl@^1.1.28:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/psl/-/psl-1.3.0.tgz#e1ebf6a3b5564fa8376f3da2275da76d875ca1bd"
-  integrity sha512-avHdspHO+9rQTLbv1RO+MPYeP/SzsCoxofjVnHanETfQhTJrmB0HlDoW+EiN/R+C0BZ+gERab9NY0lPN2TxNag==
-
-public-encrypt@^4.0.0:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0"
-  integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==
-  dependencies:
-    bn.js "^4.1.0"
-    browserify-rsa "^4.0.0"
-    create-hash "^1.1.0"
-    parse-asn1 "^5.0.0"
-    randombytes "^2.0.1"
-    safe-buffer "^5.1.2"
-
-pump@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909"
-  integrity sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==
-  dependencies:
-    end-of-stream "^1.1.0"
-    once "^1.3.1"
-
-pump@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64"
-  integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==
-  dependencies:
-    end-of-stream "^1.1.0"
-    once "^1.3.1"
-
-pumpify@^1.3.3:
-  version "1.5.1"
-  resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce"
-  integrity sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==
-  dependencies:
-    duplexify "^3.6.0"
-    inherits "^2.0.3"
-    pump "^2.0.0"
-
-punycode@1.3.2:
-  version "1.3.2"
-  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d"
-  integrity sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=
-
-punycode@^1.2.4, punycode@^1.4.1:
-  version "1.4.1"
-  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
-  integrity sha1-wNWmOycYgArY4esPpSachN1BhF4=
-
-punycode@^2.1.0, punycode@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec"
-  integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
-
-q@^1.1.2:
-  version "1.5.1"
-  resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7"
-  integrity sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=
-
-qs@6.7.0:
-  version "6.7.0"
-  resolved "https://registry.yarnpkg.com/qs/-/qs-6.7.0.tgz#41dc1a015e3d581f1621776be31afb2876a9b1bc"
-  integrity sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==
-
-qs@~6.5.2:
-  version "6.5.2"
-  resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36"
-  integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==
-
-querystring-es3@^0.2.0:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73"
-  integrity sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=
-
-querystring@0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620"
-  integrity sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=
-
-querystringify@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.1.1.tgz#60e5a5fd64a7f8bfa4d2ab2ed6fdf4c85bad154e"
-  integrity sha512-w7fLxIRCRT7U8Qu53jQnJyPkYZIaR4n5151KMfcJlO/A9397Wxb1amJvROTK6TOnp7PfoAmg/qXiNHI+08jRfA==
-
-raf@3.4.1, raf@^3.4.0, raf@^3.4.1:
-  version "3.4.1"
-  resolved "https://registry.yarnpkg.com/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39"
-  integrity sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==
-  dependencies:
-    performance-now "^2.1.0"
-
-randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a"
-  integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
-  dependencies:
-    safe-buffer "^5.1.0"
-
-randomfill@^1.0.3:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458"
-  integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==
-  dependencies:
-    randombytes "^2.0.5"
-    safe-buffer "^5.1.0"
-
-range-parser@^1.2.1, range-parser@~1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031"
-  integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-
-raw-body@2.4.0:
-  version "2.4.0"
-  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.0.tgz#a1ce6fb9c9bc356ca52e89256ab59059e13d0332"
-  integrity sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==
-  dependencies:
-    bytes "3.1.0"
-    http-errors "1.7.2"
-    iconv-lite "0.4.24"
-    unpipe "1.0.0"
-
-rc-align@^2.4.0, rc-align@^2.4.1:
-  version "2.4.5"
-  resolved "https://registry.yarnpkg.com/rc-align/-/rc-align-2.4.5.tgz#c941a586f59d1017f23a428f0b468663fb7102ab"
-  integrity sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==
-  dependencies:
-    babel-runtime "^6.26.0"
-    dom-align "^1.7.0"
-    prop-types "^15.5.8"
-    rc-util "^4.0.4"
-
-rc-animate@2.x, rc-animate@^2.3.0, rc-animate@^2.6.0, rc-animate@^2.8.2, rc-animate@^2.8.3:
-  version "2.10.0"
-  resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-2.10.0.tgz#d2224cee4700cc9e9836700eb47af6b6e41a080c"
-  integrity sha512-gZM3WteZO0e3X8B71KP0bs95EY2tAPRuiZyKnlhdLpOjTX/64SrhDZM3pT2Z8mJjKWNiiB5q2SSSf+BD8ljwVw==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.6"
-    css-animation "^1.3.2"
-    prop-types "15.x"
-    raf "^3.4.0"
-    rc-util "^4.8.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-animate@^3.0.0-rc.1:
-  version "3.0.0-rc.6"
-  resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-3.0.0-rc.6.tgz#04288eefa118e0cae214536c8a903ffaac1bc3fb"
-  integrity sha512-oBLPpiT6Q4t6YvD/pkLcmofBP1p01TX0Otse8Q4+Mxt8J+VSDflLZGIgf62EwkvRwsQUkLPjZVFBsldnPKLzjg==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-    component-classes "^1.2.6"
-    fbjs "^0.8.16"
-    prop-types "15.x"
-    raf "^3.4.0"
-    rc-util "^4.5.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-calendar@~9.15.5:
-  version "9.15.5"
-  resolved "https://registry.yarnpkg.com/rc-calendar/-/rc-calendar-9.15.5.tgz#21ab0cb2a5659d85fe6faead13d3ed764dd47c01"
-  integrity sha512-nvoEXk5P0DADt5b7FHlKiXKj+IhoWawQGSkb5soa6gXQIfoqQJ5+zB2Ogy7k1RxNbxQu4iIkEW/a3+HObVRDdA==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "2.x"
-    moment "2.x"
-    prop-types "^15.5.8"
-    rc-trigger "^2.2.0"
-    rc-util "^4.1.1"
-    react-lifecycles-compat "^3.0.4"
-
-rc-cascader@~0.17.4:
-  version "0.17.4"
-  resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-0.17.4.tgz#bb38ba3ed0990bfaa5ee547467d85ecc0d152f96"
-  integrity sha512-CeFQJIMzY7x++uPqlx4Xl/cH8iTs8nRoW522+DLb21kdL5kWqKlK+3iHXExoxcAymjwo5ScIiXi+NY4m8Pgq9w==
-  dependencies:
-    array-tree-filter "^2.1.0"
-    prop-types "^15.5.8"
-    rc-trigger "^2.2.0"
-    rc-util "^4.0.4"
-    react-lifecycles-compat "^3.0.4"
-    shallow-equal "^1.0.0"
-    warning "^4.0.1"
-
-rc-checkbox@~2.1.6:
-  version "2.1.7"
-  resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-2.1.7.tgz#ae6785525cf35fa4c62d706c4a1ff7b2b1fcb821"
-  integrity sha512-8L+0XuucUOMUM6F/7qH+hnQpEHPZfW1Um02lUHEVdpZNor5mC0Fj4x8GvTtwcM1pAl5tD3I6lHYD8cE1W8RZJw==
-  dependencies:
-    babel-runtime "^6.23.0"
-    classnames "2.x"
-    prop-types "15.x"
-    react-lifecycles-compat "^3.0.4"
-
-rc-collapse@~1.11.3:
-  version "1.11.6"
-  resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-1.11.6.tgz#9a70ac2bc2715e1ef7bae8e308bc0e844618d119"
-  integrity sha512-qckXftNVD7fawl/yrQYoMcKF9e8TFP9lJGrAQ1K6xA1xhSq6T9I++lsRRF57D1flxALkjXESpJpiSpVjM7sblA==
-  dependencies:
-    classnames "2.x"
-    css-animation "1.x"
-    prop-types "^15.5.6"
-    rc-animate "2.x"
-    react-is "^16.7.0"
-    react-lifecycles-compat "^3.0.4"
-    shallowequal "^1.1.0"
-
-rc-dialog@~7.5.2:
-  version "7.5.5"
-  resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-7.5.5.tgz#67bf2657a239d29fdd21e06c9b13017dbb110a75"
-  integrity sha512-WbGCPuibf4VDiKfx0+vJPQecJFiQtweJgvhXEXDQl8460bdME2TB9SJB7YVht8tzNS/5fDUbkDfYeO7VFLb5Wg==
-  dependencies:
-    babel-runtime "6.x"
-    rc-animate "2.x"
-    rc-util "^4.8.1"
-
-rc-drawer@~2.0.1:
-  version "2.0.8"
-  resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-2.0.8.tgz#ba0500590804283308f77acc22fff35f395e979a"
-  integrity sha512-BRX+pvC3aqNA/uuKJCHku7X5NBNdpQdewhnY/lrf2XmdpYXNKdHbf0TKsmM3P3N+eyqd/2/wePLCru++vbbBpg==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-    rc-util "^4.7.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-dropdown@~2.4.1:
-  version "2.4.1"
-  resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-2.4.1.tgz#aaef6eb3a5152cdd9982895c2a78d9b5f046cdec"
-  integrity sha512-p0XYn0wrOpAZ2fUGE6YJ6U8JBNc5ASijznZ6dkojdaEfQJAeZtV9KMEewhxkVlxGSbbdXe10ptjBlTEW9vEwEg==
-  dependencies:
-    babel-runtime "^6.26.0"
-    classnames "^2.2.6"
-    prop-types "^15.5.8"
-    rc-trigger "^2.5.1"
-    react-lifecycles-compat "^3.0.2"
-
-rc-editor-core@~0.8.3:
-  version "0.8.10"
-  resolved "https://registry.yarnpkg.com/rc-editor-core/-/rc-editor-core-0.8.10.tgz#6f215bc5df9c33ffa9f6c5b30ca73a7dabe8ab7c"
-  integrity sha512-T3aHpeMCIYA1sdAI7ynHHjXy5fqp83uPlD68ovZ0oClTSc3tbHmyCxXlA+Ti4YgmcpCYv7avF6a+TIbAka53kw==
-  dependencies:
-    babel-runtime "^6.26.0"
-    classnames "^2.2.5"
-    draft-js "^0.10.0"
-    immutable "^3.7.4"
-    lodash "^4.16.5"
-    prop-types "^15.5.8"
-    setimmediate "^1.0.5"
-
-rc-editor-mention@^1.1.13:
-  version "1.1.13"
-  resolved "https://registry.yarnpkg.com/rc-editor-mention/-/rc-editor-mention-1.1.13.tgz#9f1cab1065f86b01523840321790c2ab12ac5e8b"
-  integrity sha512-3AOmGir91Fi2ogfRRaXLtqlNuIwQpvla7oUnGHS1+3eo7b+fUp5IlKcagqtwUBB5oDNofoySXkLBxzWvSYNp/Q==
-  dependencies:
-    babel-runtime "^6.23.0"
-    classnames "^2.2.5"
-    dom-scroll-into-view "^1.2.0"
-    draft-js "~0.10.0"
-    immutable "~3.7.4"
-    prop-types "^15.5.8"
-    rc-animate "^2.3.0"
-    rc-editor-core "~0.8.3"
-
-rc-form@^2.4.5:
-  version "2.4.8"
-  resolved "https://registry.yarnpkg.com/rc-form/-/rc-form-2.4.8.tgz#79a1f124d4fa81dff2af2992e94aa3e58cad683c"
-  integrity sha512-hlHajcYg51pFQf+B6neAbhy2ZA+8DmxnDxiOYZRAXCLhPN788ZnrtZq5/iADDWcZqjHFnXiThoZE/Fu8syciDQ==
-  dependencies:
-    async-validator "~1.11.3"
-    babel-runtime "6.x"
-    create-react-class "^15.5.3"
-    dom-scroll-into-view "1.x"
-    hoist-non-react-statics "^3.3.0"
-    lodash "^4.17.4"
-    warning "^4.0.3"
-
-rc-hammerjs@~0.6.0:
-  version "0.6.9"
-  resolved "https://registry.yarnpkg.com/rc-hammerjs/-/rc-hammerjs-0.6.9.tgz#9a4ddbda1b2ec8f9b9596091a6a989842a243907"
-  integrity sha512-4llgWO3RgLyVbEqUdGsDfzUDqklRlQW5VEhE3x35IvhV+w//VPRG34SBavK3D2mD/UaLKaohgU41V4agiftC8g==
-  dependencies:
-    babel-runtime "6.x"
-    hammerjs "^2.0.8"
-    prop-types "^15.5.9"
-
-rc-input-number@~4.4.5:
-  version "4.4.5"
-  resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-4.4.5.tgz#81473d2885a6b312e486c4f2ba3f441c1ab88520"
-  integrity sha512-Dt20e8Ylc/N/6oXiPUlwDVdx3fz7W5umUOa4z5pBuWFG7NPlBVXRWkq7+nbnTyaK24UxN67PVpmD3+Omo+QRZQ==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.0"
-    prop-types "^15.5.7"
-    rc-util "^4.5.1"
-    rmc-feedback "^2.0.0"
-
-rc-mentions@~0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-0.4.0.tgz#483552c088290dbcddd059a0846e9f207ecc3539"
-  integrity sha512-xnkQBTUFp4llaJuDOLVFKX9ELrXFHk1FuUdIIC/ijQ6cLjDhCUu+jpHNcXWuQ/yIFzF376VlXkmT57iqxSnZzw==
-  dependencies:
-    "@ant-design/create-react-context" "^0.2.4"
-    classnames "^2.2.6"
-    rc-menu "^7.4.22"
-    rc-trigger "^2.6.2"
-    rc-util "^4.6.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-menu@^7.3.0, rc-menu@^7.4.22, rc-menu@~7.4.23:
-  version "7.4.23"
-  resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-7.4.23.tgz#e07d497864274076299d7d8a84d14fc86b6bd30d"
-  integrity sha512-d0pUMN0Zr3GCFxNpas8p7AUTeX8viItUOQXku4AsyX82ZzUz79HgGul2Nk17BIFTtLzqdB7/NT6WVb5PAOOILw==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "2.x"
-    dom-scroll-into-view "1.x"
-    ismobilejs "^0.5.1"
-    mini-store "^2.0.0"
-    mutationobserver-shim "^0.3.2"
-    prop-types "^15.5.6"
-    rc-animate "2.x"
-    rc-trigger "^2.3.0"
-    rc-util "^4.1.0"
-    resize-observer-polyfill "^1.5.0"
-
-rc-notification@~3.3.1:
-  version "3.3.1"
-  resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-3.3.1.tgz#0baa3e70f8d40ab015ce8fa78c260c490fc7beb4"
-  integrity sha512-U5+f4BmBVfMSf3OHSLyRagsJ74yKwlrQAtbbL5ijoA0F2C60BufwnOcHG18tVprd7iaIjzZt1TKMmQSYSvgrig==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "2.x"
-    prop-types "^15.5.8"
-    rc-animate "2.x"
-    rc-util "^4.0.4"
-
-rc-pagination@~1.20.5:
-  version "1.20.5"
-  resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-1.20.5.tgz#b64395a702e9fc452e26d0e491ccf7d9345309f7"
-  integrity sha512-gnVAowVIbRilW6bXYWCEpTsrtmAWTpM3qO/bltYfqTVKxgb6/sDqjRvCksJGy/D81pYkEkKeA9foWsgUgbUsQw==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.6"
-    prop-types "^15.5.7"
-    react-lifecycles-compat "^3.0.4"
-
-rc-progress@~2.5.0:
-  version "2.5.2"
-  resolved "https://registry.yarnpkg.com/rc-progress/-/rc-progress-2.5.2.tgz#ab01ba4e5d2fa36fc9f6f058b10b720e7315560c"
-  integrity sha512-ajI+MJkbBz9zYDuE9GQsY5gsyqPF7HFioZEDZ9Fmc+ebNZoiSeSJsTJImPFCg0dW/5WiRGUy2F69SX1aPtSJgA==
-  dependencies:
-    babel-runtime "6.x"
-    prop-types "^15.5.8"
-
-rc-rate@~2.5.0:
-  version "2.5.0"
-  resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.5.0.tgz#72d4984a03d0a7a0e6779c7a79efcea27626abf6"
-  integrity sha512-aXX5klRqbVZxvLghcKnLqqo7LvLVCHswEDteWsm5Gb7NBIPa1YKTcAbvb5SZ4Z4i4EeRoZaPwygRAWsQgGtbKw==
-  dependencies:
-    classnames "^2.2.5"
-    prop-types "^15.5.8"
-    rc-util "^4.3.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-select@~9.2.0:
-  version "9.2.1"
-  resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-9.2.1.tgz#0fbf47a933c36e1e6ef76c88fab32f58029e6a01"
-  integrity sha512-nW/Zr2OCgxN26OX8ff3xcO1wK0e1l5ixnEfyN15Rbdk7TNI/rIPJIjPCQAoihRpk9A2C/GH8pahjlvKV1Vj++g==
-  dependencies:
-    babel-runtime "^6.23.0"
-    classnames "2.x"
-    component-classes "1.x"
-    dom-scroll-into-view "1.x"
-    prop-types "^15.5.8"
-    raf "^3.4.0"
-    rc-animate "2.x"
-    rc-menu "^7.3.0"
-    rc-trigger "^2.5.4"
-    rc-util "^4.0.4"
-    react-lifecycles-compat "^3.0.2"
-    warning "^4.0.2"
-
-rc-slider@~8.6.11:
-  version "8.6.13"
-  resolved "https://registry.yarnpkg.com/rc-slider/-/rc-slider-8.6.13.tgz#88a8150c2dda6709f3f119135de11fba80af765b"
-  integrity sha512-fCUe8pPn8n9pq1ARX44nN2nzJoATtna4x/PdskUrxIvZXN8ja7HuceN/hq6kokZjo3FBD2B1yMZvZh6oi68l6Q==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-    prop-types "^15.5.4"
-    rc-tooltip "^3.7.0"
-    rc-util "^4.0.4"
-    shallowequal "^1.0.1"
-    warning "^4.0.3"
-
-rc-steps@~3.5.0:
-  version "3.5.0"
-  resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-3.5.0.tgz#36b2a7f1f49907b0d90363884b18623caf9fb600"
-  integrity sha512-2Vkkrpa7PZbg7qPsqTNzVDov4u78cmxofjjnIHiGB9+9rqKS8oTLPzbW2uiWDr3Lk+yGwh8rbpGO1E6VAgBCOg==
-  dependencies:
-    babel-runtime "^6.23.0"
-    classnames "^2.2.3"
-    lodash "^4.17.5"
-    prop-types "^15.5.7"
-
-rc-switch@~1.9.0:
-  version "1.9.0"
-  resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-1.9.0.tgz#ab2b878f2713c681358a453391976c9b95b290f7"
-  integrity sha512-Isas+egaK6qSk64jaEw4GgPStY4umYDbT7ZY93bZF1Af+b/JEsKsJdNOU2qG3WI0Z6tXo2DDq0kJCv8Yhu0zww==
-  dependencies:
-    classnames "^2.2.1"
-    prop-types "^15.5.6"
-    react-lifecycles-compat "^3.0.4"
-
-rc-table@~6.7.0:
-  version "6.7.0"
-  resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-6.7.0.tgz#8aca002f84a43a2d51a4fcda0f7a51694154286d"
-  integrity sha512-zzu7UtEHLTzZibB1EOoeKQejH21suoxRQx3evlGGLwz5NUh2HDUHobSr12z5Kd8EPr1+y/LPzXJdX1ctFPC+hA==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-    component-classes "^1.2.6"
-    lodash "^4.17.5"
-    mini-store "^2.0.0"
-    prop-types "^15.5.8"
-    rc-util "^4.0.4"
-    react-lifecycles-compat "^3.0.2"
-    shallowequal "^1.0.2"
-    warning "^3.0.0"
-
-rc-tabs@~9.6.4:
-  version "9.6.6"
-  resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-9.6.6.tgz#1378aae0e4a04d6c3f2bd61bfcb7f28a7ef3807f"
-  integrity sha512-8Vs4tLZKQODl72RetTNm+yVOuboAhtJlvf9fbxWJ4WiYuzMxU7Y8RZ8yVNDGt3+4WzCJUI53CtobptBWwcUkDA==
-  dependencies:
-    "@ant-design/create-react-context" "^0.2.4"
-    babel-runtime "6.x"
-    classnames "2.x"
-    lodash "^4.17.5"
-    prop-types "15.x"
-    raf "^3.4.1"
-    rc-hammerjs "~0.6.0"
-    rc-util "^4.0.4"
-    react-lifecycles-compat "^3.0.4"
-    resize-observer-polyfill "^1.5.1"
-    warning "^4.0.3"
-
-rc-time-picker@~3.7.1:
-  version "3.7.2"
-  resolved "https://registry.yarnpkg.com/rc-time-picker/-/rc-time-picker-3.7.2.tgz#fabe5501adf1374d31a2d3b47f1ba89fc2dc2467"
-  integrity sha512-UVWO9HXGyZoM4I2THlJsEAFcZQz+tYwdcpoHXCEFZsRLz9L2+7vV4EMp9Wa3UrtzMFEt83qSAX/90dCJeKl9sg==
-  dependencies:
-    classnames "2.x"
-    moment "2.x"
-    prop-types "^15.5.8"
-    raf "^3.4.1"
-    rc-trigger "^2.2.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-tooltip@^3.7.0, rc-tooltip@~3.7.3:
-  version "3.7.3"
-  resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-3.7.3.tgz#280aec6afcaa44e8dff0480fbaff9e87fc00aecc"
-  integrity sha512-dE2ibukxxkrde7wH9W8ozHKUO4aQnPZ6qBHtrTH9LoO836PjDdiaWO73fgPB05VfJs9FbZdmGPVEbXCeOP99Ww==
-  dependencies:
-    babel-runtime "6.x"
-    prop-types "^15.5.8"
-    rc-trigger "^2.2.2"
-
-rc-tree-select@~2.9.1:
-  version "2.9.1"
-  resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-2.9.1.tgz#d076b8ce5bf432df3fdd8a6a01cdd9c93c8e7399"
-  integrity sha512-AfJQC1ZzaeH+Onmx84TtVLUL2guBZe7exA8XSfj1RRB1doDbYGTtybzpP3CEw/tuSftSRnz+iPt+iaxRTrgXRw==
-  dependencies:
-    classnames "^2.2.1"
-    dom-scroll-into-view "^1.2.1"
-    prop-types "^15.5.8"
-    raf "^3.4.0"
-    rc-animate "^2.8.2"
-    rc-tree "~2.0.0"
-    rc-trigger "^3.0.0-rc.2"
-    rc-util "^4.5.0"
-    react-lifecycles-compat "^3.0.4"
-    shallowequal "^1.0.2"
-    warning "^4.0.1"
-
-rc-tree@~2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-2.0.0.tgz#68fc4c9ab696943b279a143619e2ecf05918fb53"
-  integrity sha512-DAT/jsbnFbHqG9Df9OaVG93CAVtTsJVnJiwKX+wqsG8TChpty3s6QX3zJZ+gBgjkq4ikLbu1kuFJtX63EKhSAA==
-  dependencies:
-    babel-runtime "^6.23.0"
-    classnames "2.x"
-    prop-types "^15.5.8"
-    rc-animate "^2.6.0"
-    rc-util "^4.5.1"
-    react-lifecycles-compat "^3.0.4"
-    warning "^3.0.0"
-
-rc-tree@~2.1.0:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-2.1.2.tgz#c70546cf1081e1c27bffa314a966e2e4d06b3f2b"
-  integrity sha512-IQG0bkY4bfK11oVIF44Y4V3IuIOAmIIc5j8b8XGkRjsnUOElRr/BNqKCvg9h2UsNJm1J2xv4OA0HfEIv70765Q==
-  dependencies:
-    "@ant-design/create-react-context" "^0.2.4"
-    classnames "2.x"
-    prop-types "^15.5.8"
-    rc-animate "^2.6.0"
-    rc-util "^4.5.1"
-    react-lifecycles-compat "^3.0.4"
-    warning "^4.0.3"
-
-rc-trigger@^2.2.0, rc-trigger@^2.2.2, rc-trigger@^2.3.0, rc-trigger@^2.5.1, rc-trigger@^2.5.4, rc-trigger@^2.6.2:
-  version "2.6.5"
-  resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-2.6.5.tgz#140a857cf28bd0fa01b9aecb1e26a50a700e9885"
-  integrity sha512-m6Cts9hLeZWsTvWnuMm7oElhf+03GOjOLfTuU0QmdB9ZrW7jR2IpI5rpNM7i9MvAAlMAmTx5Zr7g3uu/aMvZAw==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.6"
-    prop-types "15.x"
-    rc-align "^2.4.0"
-    rc-animate "2.x"
-    rc-util "^4.4.0"
-    react-lifecycles-compat "^3.0.4"
-
-rc-trigger@^3.0.0-rc.2:
-  version "3.0.0-rc.3"
-  resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-3.0.0-rc.3.tgz#35842df1674d25315e1426a44882a4c97652258b"
-  integrity sha512-4vB6cpxcUdm2qO5VtB9q1TZz0MoWm9BzFLvGknulphGrl1qI6uxUsPDCvqnmujdpDdAKGGfjxntFpA7RtAwkFQ==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.6"
-    prop-types "15.x"
-    raf "^3.4.0"
-    rc-align "^2.4.1"
-    rc-animate "^3.0.0-rc.1"
-    rc-util "^4.4.0"
-
-rc-upload@~2.7.0:
-  version "2.7.0"
-  resolved "https://registry.yarnpkg.com/rc-upload/-/rc-upload-2.7.0.tgz#f279b758655eb5f99ebf82a5a2648d80d88e0ff4"
-  integrity sha512-Oh9EJB4xE8MQUZ2D0OUST3UMIBjHjnO2IjPNW/cbPredxZz+lzbLPCZxcxRwUwu1gt0LA968UWXAgT1EvZdFfA==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-    prop-types "^15.5.7"
-    warning "4.x"
-
-rc-util@^4.0.4, rc-util@^4.1.0, rc-util@^4.1.1, rc-util@^4.10.0, rc-util@^4.3.0, rc-util@^4.4.0, rc-util@^4.5.0, rc-util@^4.5.1, rc-util@^4.6.0, rc-util@^4.7.0, rc-util@^4.8.0, rc-util@^4.8.1:
-  version "4.11.0"
-  resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-4.11.0.tgz#cf437dcff74ca08a8565ae14f0368acb3a650796"
-  integrity sha512-nB29kXOXsSVjBkWfH+Z1GVh6tRg7XGZtZ0Yfie+OI0stCDixGQ1cPrS6iYxlg+AV2St6COCK5MFrCmpTgghh0w==
-  dependencies:
-    add-dom-event-listener "^1.1.0"
-    babel-runtime "6.x"
-    prop-types "^15.5.10"
-    react-lifecycles-compat "^3.0.4"
-    shallowequal "^0.2.2"
-
-rc@^1.2.7:
-  version "1.2.8"
-  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed"
-  integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==
-  dependencies:
-    deep-extend "^0.6.0"
-    ini "~1.3.0"
-    minimist "^1.2.0"
-    strip-json-comments "~2.0.1"
-
-react-app-polyfill@^1.0.0:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/react-app-polyfill/-/react-app-polyfill-1.0.2.tgz#2a51175885c88245a2a356dc46df29f38ec9f060"
-  integrity sha512-yZcpLnIr0FOIzrOOz9JC37NWAWEuCaQWmYn9EWjEzlCW4cOmA5MkT5L3iP8QuUeFnoqVCTJgjIWYbXEJgNXhGA==
-  dependencies:
-    core-js "3.1.4"
-    object-assign "4.1.1"
-    promise "8.0.3"
-    raf "3.4.1"
-    regenerator-runtime "0.13.3"
-    whatwg-fetch "3.0.0"
-
-react-app-rewired@^2.1.3:
-  version "2.1.3"
-  resolved "https://registry.yarnpkg.com/react-app-rewired/-/react-app-rewired-2.1.3.tgz#5ae8583ecc9f9f968d40b735d2abbe871378a52f"
-  integrity sha512-NXC2EsQrnEMV7xD70rHcBq0B4PSEzjY/K2m/e+GRgit2jZO/uZApnpCZSKvIX2leLRN69Sqf2id0VXZ1F62CDw==
-  dependencies:
-    cross-spawn "^6.0.5"
-    dotenv "^6.2.0"
-    semver "^5.6.0"
-
-react-dev-utils@^9.0.0:
-  version "9.0.3"
-  resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-9.0.3.tgz#7607455587abb84599451460eb37cef0b684131a"
-  integrity sha512-OyInhcwsvycQ3Zr2pQN+HV4gtRXrky5mJXIy4HnqrWa+mI624xfYfqGuC9dYbxp4Qq3YZzP8GSGQjv0AgNU15w==
-  dependencies:
-    "@babel/code-frame" "7.5.5"
-    address "1.1.0"
-    browserslist "4.6.6"
-    chalk "2.4.2"
-    cross-spawn "6.0.5"
-    detect-port-alt "1.1.6"
-    escape-string-regexp "1.0.5"
-    filesize "3.6.1"
-    find-up "3.0.0"
-    fork-ts-checker-webpack-plugin "1.5.0"
-    global-modules "2.0.0"
-    globby "8.0.2"
-    gzip-size "5.1.1"
-    immer "1.10.0"
-    inquirer "6.5.0"
-    is-root "2.1.0"
-    loader-utils "1.2.3"
-    open "^6.3.0"
-    pkg-up "2.0.0"
-    react-error-overlay "^6.0.1"
-    recursive-readdir "2.2.2"
-    shell-quote "1.6.1"
-    sockjs-client "1.3.0"
-    strip-ansi "5.2.0"
-    text-table "0.2.0"
-
-react-dom@^16.8.6:
-  version "16.9.0"
-  resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.9.0.tgz#5e65527a5e26f22ae3701131bcccaee9fb0d3962"
-  integrity sha512-YFT2rxO9hM70ewk9jq0y6sQk8cL02xm4+IzYBz75CQGlClQQ1Bxq0nhHF6OtSbit+AIahujJgb/CPRibFkMNJQ==
-  dependencies:
-    loose-envify "^1.1.0"
-    object-assign "^4.1.1"
-    prop-types "^15.6.2"
-    scheduler "^0.15.0"
-
-react-error-overlay@^6.0.1:
-  version "6.0.1"
-  resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.1.tgz#b8d3cf9bb991c02883225c48044cb3ee20413e0f"
-  integrity sha512-V9yoTr6MeZXPPd4nV/05eCBvGH9cGzc52FN8fs0O0TVQ3HYYf1n7EgZVtHbldRq5xU9zEzoXIITjYNIfxDDdUw==
-
-react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1, react-is@^16.8.4:
-  version "16.9.0"
-  resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.9.0.tgz#21ca9561399aad0ff1a7701c01683e8ca981edcb"
-  integrity sha512-tJBzzzIgnnRfEm046qRcURvwQnZVXmuCbscxUO5RWrGTXpon2d4c8mI0D8WE6ydVIm29JiLB6+RslkIvym9Rjw==
-
-react-lazy-load@^3.0.13:
-  version "3.0.13"
-  resolved "https://registry.yarnpkg.com/react-lazy-load/-/react-lazy-load-3.0.13.tgz#3b0a92d336d43d3f0d73cbe6f35b17050b08b824"
-  integrity sha1-OwqS0zbUPT8Nc8vm81sXBQsIuCQ=
-  dependencies:
-    eventlistener "0.0.1"
-    lodash.debounce "^4.0.0"
-    lodash.throttle "^4.0.0"
-    prop-types "^15.5.8"
-
-react-lifecycles-compat@^3.0.2, react-lifecycles-compat@^3.0.4:
-  version "3.0.4"
-  resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362"
-  integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==
-
-react-router-dom@^5.0.0:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.0.1.tgz#ee66f4a5d18b6089c361958e443489d6bab714be"
-  integrity sha512-zaVHSy7NN0G91/Bz9GD4owex5+eop+KvgbxXsP/O+iW1/Ln+BrJ8QiIR5a6xNPtrdTvLkxqlDClx13QO1uB8CA==
-  dependencies:
-    "@babel/runtime" "^7.1.2"
-    history "^4.9.0"
-    loose-envify "^1.3.1"
-    prop-types "^15.6.2"
-    react-router "5.0.1"
-    tiny-invariant "^1.0.2"
-    tiny-warning "^1.0.0"
-
-react-router@5.0.1:
-  version "5.0.1"
-  resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.0.1.tgz#04ee77df1d1ab6cb8939f9f01ad5702dbadb8b0f"
-  integrity sha512-EM7suCPNKb1NxcTZ2LEOWFtQBQRQXecLxVpdsP4DW4PbbqYWeRiLyV/Tt1SdCrvT2jcyXAXmVTmzvSzrPR63Bg==
-  dependencies:
-    "@babel/runtime" "^7.1.2"
-    history "^4.9.0"
-    hoist-non-react-statics "^3.1.0"
-    loose-envify "^1.3.1"
-    mini-create-react-context "^0.3.0"
-    path-to-regexp "^1.7.0"
-    prop-types "^15.6.2"
-    react-is "^16.6.0"
-    tiny-invariant "^1.0.2"
-    tiny-warning "^1.0.0"
-
-react-scripts@3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/react-scripts/-/react-scripts-3.0.0.tgz#a715613ef3eace025907b409cec8505096e0233e"
-  integrity sha512-F4HegoBuUKZvEzXYksQu05Y6vJqallhHkQUEL6M7OQ5rYLBQC/4MTK6km9ZZvEK9TqMy1XA8SSEJGJgTEr6bSQ==
-  dependencies:
-    "@babel/core" "7.4.3"
-    "@svgr/webpack" "4.1.0"
-    "@typescript-eslint/eslint-plugin" "1.6.0"
-    "@typescript-eslint/parser" "1.6.0"
-    babel-eslint "10.0.1"
-    babel-jest "24.7.1"
-    babel-loader "8.0.5"
-    babel-plugin-named-asset-import "^0.3.2"
-    babel-preset-react-app "^8.0.0"
-    case-sensitive-paths-webpack-plugin "2.2.0"
-    css-loader "2.1.1"
-    dotenv "6.2.0"
-    dotenv-expand "4.2.0"
-    eslint "^5.16.0"
-    eslint-config-react-app "^4.0.0"
-    eslint-loader "2.1.2"
-    eslint-plugin-flowtype "2.50.1"
-    eslint-plugin-import "2.16.0"
-    eslint-plugin-jsx-a11y "6.2.1"
-    eslint-plugin-react "7.12.4"
-    eslint-plugin-react-hooks "^1.5.0"
-    file-loader "3.0.1"
-    fs-extra "7.0.1"
-    html-webpack-plugin "4.0.0-beta.5"
-    identity-obj-proxy "3.0.0"
-    is-wsl "^1.1.0"
-    jest "24.7.1"
-    jest-environment-jsdom-fourteen "0.1.0"
-    jest-resolve "24.7.1"
-    jest-watch-typeahead "0.3.0"
-    mini-css-extract-plugin "0.5.0"
-    optimize-css-assets-webpack-plugin "5.0.1"
-    pnp-webpack-plugin "1.2.1"
-    postcss-flexbugs-fixes "4.1.0"
-    postcss-loader "3.0.0"
-    postcss-normalize "7.0.1"
-    postcss-preset-env "6.6.0"
-    postcss-safe-parser "4.0.1"
-    react-app-polyfill "^1.0.0"
-    react-dev-utils "^9.0.0"
-    resolve "1.10.0"
-    sass-loader "7.1.0"
-    semver "6.0.0"
-    style-loader "0.23.1"
-    terser-webpack-plugin "1.2.3"
-    url-loader "1.1.2"
-    webpack "4.29.6"
-    webpack-dev-server "3.2.1"
-    webpack-manifest-plugin "2.0.4"
-    workbox-webpack-plugin "4.2.0"
-  optionalDependencies:
-    fsevents "2.0.6"
-
-react-slick@~0.25.2:
-  version "0.25.2"
-  resolved "https://registry.yarnpkg.com/react-slick/-/react-slick-0.25.2.tgz#56331b67d47d8bcfe2dceb6acab1c8fd5bd1f6bc"
-  integrity sha512-8MNH/NFX/R7zF6W/w+FS5VXNyDusF+XDW1OU0SzODEU7wqYB+ZTGAiNJ++zVNAVqCAHdyCybScaUB+FCZOmBBw==
-  dependencies:
-    classnames "^2.2.5"
-    enquire.js "^2.1.6"
-    json2mq "^0.2.0"
-    lodash.debounce "^4.0.8"
-    resize-observer-polyfill "^1.5.0"
-
-react@^16.8.6:
-  version "16.9.0"
-  resolved "https://registry.yarnpkg.com/react/-/react-16.9.0.tgz#40ba2f9af13bc1a38d75dbf2f4359a5185c4f7aa"
-  integrity sha512-+7LQnFBwkiw+BobzOF6N//BdoNw0ouwmSJTEm9cglOOmsg/TMiFHZLe2sEoN5M7LgJTj9oHH0gxklfnQe66S1w==
-  dependencies:
-    loose-envify "^1.1.0"
-    object-assign "^4.1.1"
-    prop-types "^15.6.2"
-
-read-pkg-up@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-2.0.0.tgz#6b72a8048984e0c41e79510fd5e9fa99b3b549be"
-  integrity sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=
-  dependencies:
-    find-up "^2.0.0"
-    read-pkg "^2.0.0"
-
-read-pkg-up@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-4.0.0.tgz#1b221c6088ba7799601c808f91161c66e58f8978"
-  integrity sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA==
-  dependencies:
-    find-up "^3.0.0"
-    read-pkg "^3.0.0"
-
-read-pkg@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-2.0.0.tgz#8ef1c0623c6a6db0dc6713c4bfac46332b2368f8"
-  integrity sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=
-  dependencies:
-    load-json-file "^2.0.0"
-    normalize-package-data "^2.3.2"
-    path-type "^2.0.0"
-
-read-pkg@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389"
-  integrity sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=
-  dependencies:
-    load-json-file "^4.0.0"
-    normalize-package-data "^2.3.2"
-    path-type "^3.0.0"
-
-"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6, readable-stream@~2.3.6:
-  version "2.3.6"
-  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
-  integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==
-  dependencies:
-    core-util-is "~1.0.0"
-    inherits "~2.0.3"
-    isarray "~1.0.0"
-    process-nextick-args "~2.0.0"
-    safe-buffer "~5.1.1"
-    string_decoder "~1.1.1"
-    util-deprecate "~1.0.1"
-
-readable-stream@^3.0.6, readable-stream@^3.1.1:
-  version "3.4.0"
-  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.4.0.tgz#a51c26754658e0a3c21dbf59163bd45ba6f447fc"
-  integrity sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==
-  dependencies:
-    inherits "^2.0.3"
-    string_decoder "^1.1.1"
-    util-deprecate "^1.0.1"
-
-readdirp@^2.2.1:
-  version "2.2.1"
-  resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525"
-  integrity sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==
-  dependencies:
-    graceful-fs "^4.1.11"
-    micromatch "^3.1.10"
-    readable-stream "^2.0.2"
-
-realpath-native@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/realpath-native/-/realpath-native-1.1.0.tgz#2003294fea23fb0672f2476ebe22fcf498a2d65c"
-  integrity sha512-wlgPA6cCIIg9gKz0fgAPjnzh4yR/LnXovwuo9hvyGvx3h8nX4+/iLZplfUWasXpqD8BdnGnP5njOFjkUwPzvjA==
-  dependencies:
-    util.promisify "^1.0.0"
-
-recursive-readdir@2.2.2:
-  version "2.2.2"
-  resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.2.tgz#9946fb3274e1628de6e36b2f6714953b4845094f"
-  integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==
-  dependencies:
-    minimatch "3.0.4"
-
-regenerate-unicode-properties@^8.1.0:
-  version "8.1.0"
-  resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz#ef51e0f0ea4ad424b77bf7cb41f3e015c70a3f0e"
-  integrity sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA==
-  dependencies:
-    regenerate "^1.4.0"
-
-regenerate@^1.4.0:
-  version "1.4.0"
-  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11"
-  integrity sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==
-
-regenerator-runtime@0.13.3, regenerator-runtime@^0.13.2:
-  version "0.13.3"
-  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz#7cf6a77d8f5c6f60eb73c5fc1955b2ceb01e6bf5"
-  integrity sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==
-
-regenerator-runtime@^0.11.0:
-  version "0.11.1"
-  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9"
-  integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==
-
-regenerator-transform@^0.14.0:
-  version "0.14.1"
-  resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.1.tgz#3b2fce4e1ab7732c08f665dfdb314749c7ddd2fb"
-  integrity sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ==
-  dependencies:
-    private "^0.1.6"
-
-regex-not@^1.0.0, regex-not@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c"
-  integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==
-  dependencies:
-    extend-shallow "^3.0.2"
-    safe-regex "^1.1.0"
-
-regexp-tree@^0.1.6:
-  version "0.1.12"
-  resolved "https://registry.yarnpkg.com/regexp-tree/-/regexp-tree-0.1.12.tgz#28eaaa6e66eeb3527c15108a3ff740d9e574e420"
-  integrity sha512-TsXZ8+cv2uxMEkLfgwO0E068gsNMLfuYwMMhiUxf0Kw2Vcgzq93vgl6wIlIYuPmfMqMjfQ9zAporiozqCnwLuQ==
-
-regexp.prototype.flags@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.2.0.tgz#6b30724e306a27833eeb171b66ac8890ba37e41c"
-  integrity sha512-ztaw4M1VqgMwl9HlPpOuiYgItcHlunW0He2fE6eNfT6E/CF2FtYi9ofOYe4mKntstYk0Fyh/rDRBdS3AnxjlrA==
-  dependencies:
-    define-properties "^1.1.2"
-
-regexpp@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-2.0.1.tgz#8d19d31cf632482b589049f8281f93dbcba4d07f"
-  integrity sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==
-
-regexpu-core@^4.5.4:
-  version "4.5.5"
-  resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.5.5.tgz#aaffe61c2af58269b3e516b61a73790376326411"
-  integrity sha512-FpI67+ky9J+cDizQUJlIlNZFKual/lUkFr1AG6zOCpwZ9cLrg8UUVakyUQJD7fCDIe9Z2nwTQJNPyonatNmDFQ==
-  dependencies:
-    regenerate "^1.4.0"
-    regenerate-unicode-properties "^8.1.0"
-    regjsgen "^0.5.0"
-    regjsparser "^0.6.0"
-    unicode-match-property-ecmascript "^1.0.4"
-    unicode-match-property-value-ecmascript "^1.1.0"
-
-regjsgen@^0.5.0:
-  version "0.5.0"
-  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.0.tgz#a7634dc08f89209c2049adda3525711fb97265dd"
-  integrity sha512-RnIrLhrXCX5ow/E5/Mh2O4e/oa1/jW0eaBKTSy3LaCj+M3Bqvm97GWDp2yUtzIs4LEn65zR2yiYGFqb2ApnzDA==
-
-regjsparser@^0.6.0:
-  version "0.6.0"
-  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.0.tgz#f1e6ae8b7da2bae96c99399b868cd6c933a2ba9c"
-  integrity sha512-RQ7YyokLiQBomUJuUG8iGVvkgOLxwyZM8k6d3q5SAXpg4r5TZJZigKFvC6PpD+qQ98bCDC5YelPeA3EucDoNeQ==
-  dependencies:
-    jsesc "~0.5.0"
-
-relateurl@0.2.x:
-  version "0.2.7"
-  resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9"
-  integrity sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=
-
-remove-trailing-separator@^1.0.1:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef"
-  integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8=
-
-renderkid@^2.0.1:
-  version "2.0.3"
-  resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.3.tgz#380179c2ff5ae1365c522bf2fcfcff01c5b74149"
-  integrity sha512-z8CLQp7EZBPCwCnncgf9C4XAi3WR0dv+uWu/PjIyhhAb5d6IJ/QZqlHFprHeKT+59//V6BNUsLbvN8+2LarxGA==
-  dependencies:
-    css-select "^1.1.0"
-    dom-converter "^0.2"
-    htmlparser2 "^3.3.0"
-    strip-ansi "^3.0.0"
-    utila "^0.4.0"
-
-repeat-element@^1.1.2:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce"
-  integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==
-
-repeat-string@^1.6.1:
-  version "1.6.1"
-  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
-  integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc=
-
-request-promise-core@1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.2.tgz#339f6aababcafdb31c799ff158700336301d3346"
-  integrity sha512-UHYyq1MO8GsefGEt7EprS8UrXsm1TxEvFUX1IMTuSLU2Rh7fTIdFtl8xD7JiEYiWU2dl+NYAjCTksTehQUxPag==
-  dependencies:
-    lodash "^4.17.11"
-
-request-promise-native@^1.0.5:
-  version "1.0.7"
-  resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.7.tgz#a49868a624bdea5069f1251d0a836e0d89aa2c59"
-  integrity sha512-rIMnbBdgNViL37nZ1b3L/VfPOpSi0TqVDQPAvO6U14lMzOLrt5nilxCQqtDKhZeDiW0/hkCXGoQjhgJd/tCh6w==
-  dependencies:
-    request-promise-core "1.1.2"
-    stealthy-require "^1.1.1"
-    tough-cookie "^2.3.3"
-
-request@^2.83.0, request@^2.87.0, request@^2.88.0:
-  version "2.88.0"
-  resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef"
-  integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==
-  dependencies:
-    aws-sign2 "~0.7.0"
-    aws4 "^1.8.0"
-    caseless "~0.12.0"
-    combined-stream "~1.0.6"
-    extend "~3.0.2"
-    forever-agent "~0.6.1"
-    form-data "~2.3.2"
-    har-validator "~5.1.0"
-    http-signature "~1.2.0"
-    is-typedarray "~1.0.0"
-    isstream "~0.1.2"
-    json-stringify-safe "~5.0.1"
-    mime-types "~2.1.19"
-    oauth-sign "~0.9.0"
-    performance-now "^2.1.0"
-    qs "~6.5.2"
-    safe-buffer "^5.1.2"
-    tough-cookie "~2.4.3"
-    tunnel-agent "^0.6.0"
-    uuid "^3.3.2"
-
-require-directory@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
-  integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I=
-
-require-main-filename@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
-  integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=
-
-require-main-filename@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b"
-  integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==
-
-requireindex@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/requireindex/-/requireindex-1.2.0.tgz#3463cdb22ee151902635aa6c9535d4de9c2ef1ef"
-  integrity sha512-L9jEkOi3ASd9PYit2cwRfyppc9NoABujTP8/5gFcbERmo5jUoAKovIC3fsF17pkTnGsrByysqX+Kxd2OTNI1ww==
-
-requires-port@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
-  integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=
-
-resize-observer-polyfill@^1.5.0, resize-observer-polyfill@^1.5.1:
-  version "1.5.1"
-  resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464"
-  integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==
-
-resolve-cwd@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a"
-  integrity sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=
-  dependencies:
-    resolve-from "^3.0.0"
-
-resolve-from@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748"
-  integrity sha1-six699nWiBvItuZTM17rywoYh0g=
-
-resolve-from@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6"
-  integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==
-
-resolve-pathname@^2.2.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-2.2.0.tgz#7e9ae21ed815fd63ab189adeee64dc831eefa879"
-  integrity sha512-bAFz9ld18RzJfddgrO2e/0S2O81710++chRMUxHjXOYKF6jTAMrUNZrEZ1PvV0zlhfjidm08iRPdTLPno1FuRg==
-
-resolve-url@^0.2.1:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a"
-  integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=
-
-resolve@1.1.7:
-  version "1.1.7"
-  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b"
-  integrity sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=
-
-resolve@1.10.0:
-  version "1.10.0"
-  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.10.0.tgz#3bdaaeaf45cc07f375656dfd2e54ed0810b101ba"
-  integrity sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg==
-  dependencies:
-    path-parse "^1.0.6"
-
-resolve@^1.10.0, resolve@^1.3.2, resolve@^1.5.0, resolve@^1.8.1, resolve@^1.9.0:
-  version "1.12.0"
-  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.12.0.tgz#3fc644a35c84a48554609ff26ec52b66fa577df6"
-  integrity sha512-B/dOmuoAik5bKcD6s6nXDCjzUKnaDvdkRyAk6rsmsKLipWj4797iothd7jmmUhWTfinVMU+wc56rYKsit2Qy4w==
-  dependencies:
-    path-parse "^1.0.6"
-
-restore-cursor@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf"
-  integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368=
-  dependencies:
-    onetime "^2.0.0"
-    signal-exit "^3.0.2"
-
-ret@~0.1.10:
-  version "0.1.15"
-  resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc"
-  integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==
-
-rgb-regex@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1"
-  integrity sha1-wODWiC3w4jviVKR16O3UGRX+rrE=
-
-rgba-regex@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3"
-  integrity sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=
-
-rimraf@2.6.3:
-  version "2.6.3"
-  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab"
-  integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==
-  dependencies:
-    glob "^7.1.3"
-
-rimraf@^2.2.8, rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.3:
-  version "2.7.1"
-  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec"
-  integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==
-  dependencies:
-    glob "^7.1.3"
-
-ripemd160@^2.0.0, ripemd160@^2.0.1:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c"
-  integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==
-  dependencies:
-    hash-base "^3.0.0"
-    inherits "^2.0.1"
-
-rmc-feedback@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/rmc-feedback/-/rmc-feedback-2.0.0.tgz#cbc6cb3ae63c7a635eef0e25e4fbaf5ac366eeaa"
-  integrity sha512-5PWOGOW7VXks/l3JzlOU9NIxRpuaSS8d9zA3UULUCuTKnpwBHNvv1jSJzxgbbCQeYzROWUpgKI4za3X4C/mKmQ==
-  dependencies:
-    babel-runtime "6.x"
-    classnames "^2.2.5"
-
-rsvp@^4.8.4:
-  version "4.8.5"
-  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-4.8.5.tgz#c8f155311d167f68f21e168df71ec5b083113734"
-  integrity sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==
-
-run-async@^2.2.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.3.0.tgz#0371ab4ae0bdd720d4166d7dfda64ff7a445a6c0"
-  integrity sha1-A3GrSuC91yDUFm19/aZP96RFpsA=
-  dependencies:
-    is-promise "^2.1.0"
-
-run-queue@^1.0.0, run-queue@^1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47"
-  integrity sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=
-  dependencies:
-    aproba "^1.1.1"
-
-rxjs@^6.4.0:
-  version "6.5.2"
-  resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.5.2.tgz#2e35ce815cd46d84d02a209fb4e5921e051dbec7"
-  integrity sha512-HUb7j3kvb7p7eCUHE3FqjoDsC1xfZQ4AHFWfTKSpZ+sAhhz5X1WX0ZuUqWbzB2QhSLp3DoLUG+hMdEDKqWo2Zg==
-  dependencies:
-    tslib "^1.9.0"
-
-safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
-  version "5.1.2"
-  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
-  integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
-
-safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.2.0:
-  version "5.2.0"
-  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519"
-  integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==
-
-safe-regex@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e"
-  integrity sha1-QKNmnzsHfR6UPURinhV91IAjvy4=
-  dependencies:
-    ret "~0.1.10"
-
-"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a"
-  integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
-
-sane@^4.0.3:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/sane/-/sane-4.1.0.tgz#ed881fd922733a6c461bc189dc2b6c006f3ffded"
-  integrity sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==
-  dependencies:
-    "@cnakazawa/watch" "^1.0.3"
-    anymatch "^2.0.0"
-    capture-exit "^2.0.0"
-    exec-sh "^0.3.2"
-    execa "^1.0.0"
-    fb-watchman "^2.0.0"
-    micromatch "^3.1.4"
-    minimist "^1.1.1"
-    walker "~1.0.5"
-
-sass-loader@7.1.0:
-  version "7.1.0"
-  resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-7.1.0.tgz#16fd5138cb8b424bf8a759528a1972d72aad069d"
-  integrity sha512-+G+BKGglmZM2GUSfT9TLuEp6tzehHPjAMoRRItOojWIqIGPloVCMhNIQuG639eJ+y033PaGTSjLaTHts8Kw79w==
-  dependencies:
-    clone-deep "^2.0.1"
-    loader-utils "^1.0.1"
-    lodash.tail "^4.1.1"
-    neo-async "^2.5.0"
-    pify "^3.0.0"
-    semver "^5.5.0"
-
-sax@^1.2.4, sax@~1.2.4:
-  version "1.2.4"
-  resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
-  integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
-
-saxes@^3.1.9:
-  version "3.1.11"
-  resolved "https://registry.yarnpkg.com/saxes/-/saxes-3.1.11.tgz#d59d1fd332ec92ad98a2e0b2ee644702384b1c5b"
-  integrity sha512-Ydydq3zC+WYDJK1+gRxRapLIED9PWeSuuS41wqyoRmzvhhh9nc+QQrVMKJYzJFULazeGhzSV0QleN2wD3boh2g==
-  dependencies:
-    xmlchars "^2.1.1"
-
-scheduler@^0.15.0:
-  version "0.15.0"
-  resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.15.0.tgz#6bfcf80ff850b280fed4aeecc6513bc0b4f17f8e"
-  integrity sha512-xAefmSfN6jqAa7Kuq7LIJY0bwAPG3xlCj0HMEBQk1lxYiDKZscY2xJ5U/61ZTrYbmNQbXa+gc7czPkVo11tnCg==
-  dependencies:
-    loose-envify "^1.1.0"
-    object-assign "^4.1.1"
-
-schema-utils@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770"
-  integrity sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==
-  dependencies:
-    ajv "^6.1.0"
-    ajv-errors "^1.0.0"
-    ajv-keywords "^3.1.0"
-
-select-hose@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca"
-  integrity sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=
-
-selfsigned@^1.9.1:
-  version "1.10.4"
-  resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.4.tgz#cdd7eccfca4ed7635d47a08bf2d5d3074092e2cd"
-  integrity sha512-9AukTiDmHXGXWtWjembZ5NDmVvP2695EtpgbCsxCa68w3c88B+alqbmZ4O3hZ4VWGXeGWzEVdvqgAJD8DQPCDw==
-  dependencies:
-    node-forge "0.7.5"
-
-"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0:
-  version "5.7.1"
-  resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7"
-  integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==
-
-semver@5.5.0:
-  version "5.5.0"
-  resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab"
-  integrity sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==
-
-semver@6.0.0:
-  version "6.0.0"
-  resolved "https://registry.yarnpkg.com/semver/-/semver-6.0.0.tgz#05e359ee571e5ad7ed641a6eec1e547ba52dea65"
-  integrity sha512-0UewU+9rFapKFnlbirLi3byoOuhrSsli/z/ihNnvM24vgF+8sNBiI1LZPBSH9wJKUwaUbw+s3hToDLCXkrghrQ==
-
-semver@^6.0.0, semver@^6.2.0, semver@^6.3.0:
-  version "6.3.0"
-  resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d"
-  integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==
-
-send@0.17.1:
-  version "0.17.1"
-  resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8"
-  integrity sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==
-  dependencies:
-    debug "2.6.9"
-    depd "~1.1.2"
-    destroy "~1.0.4"
-    encodeurl "~1.0.2"
-    escape-html "~1.0.3"
-    etag "~1.8.1"
-    fresh "0.5.2"
-    http-errors "~1.7.2"
-    mime "1.6.0"
-    ms "2.1.1"
-    on-finished "~2.3.0"
-    range-parser "~1.2.1"
-    statuses "~1.5.0"
-
-serialize-javascript@^1.4.0, serialize-javascript@^1.7.0:
-  version "1.8.0"
-  resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-1.8.0.tgz#9515fc687232e2321aea1ca7a529476eb34bb480"
-  integrity sha512-3tHgtF4OzDmeKYj6V9nSyceRS0UJ3C7VqyD2Yj28vC/z2j6jG5FmFGahOKMD9CrglxTm3tETr87jEypaYV8DUg==
-
-serve-index@^1.7.2:
-  version "1.9.1"
-  resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239"
-  integrity sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=
-  dependencies:
-    accepts "~1.3.4"
-    batch "0.6.1"
-    debug "2.6.9"
-    escape-html "~1.0.3"
-    http-errors "~1.6.2"
-    mime-types "~2.1.17"
-    parseurl "~1.3.2"
-
-serve-static@1.14.1:
-  version "1.14.1"
-  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.1.tgz#666e636dc4f010f7ef29970a88a674320898b2f9"
-  integrity sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==
-  dependencies:
-    encodeurl "~1.0.2"
-    escape-html "~1.0.3"
-    parseurl "~1.3.3"
-    send "0.17.1"
-
-set-blocking@^2.0.0, set-blocking@~2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
-  integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc=
-
-set-value@^2.0.0, set-value@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.1.tgz#a18d40530e6f07de4228c7defe4227af8cad005b"
-  integrity sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==
-  dependencies:
-    extend-shallow "^2.0.1"
-    is-extendable "^0.1.1"
-    is-plain-object "^2.0.3"
-    split-string "^3.0.1"
-
-setimmediate@^1.0.4, setimmediate@^1.0.5:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285"
-  integrity sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=
-
-setprototypeof@1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656"
-  integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==
-
-setprototypeof@1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683"
-  integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==
-
-sha.js@^2.4.0, sha.js@^2.4.8:
-  version "2.4.11"
-  resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7"
-  integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==
-  dependencies:
-    inherits "^2.0.1"
-    safe-buffer "^5.0.1"
-
-shallow-clone@^0.1.2:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060"
-  integrity sha1-WQnodLp3EG1zrEFM/sH/yofZcGA=
-  dependencies:
-    is-extendable "^0.1.1"
-    kind-of "^2.0.1"
-    lazy-cache "^0.2.3"
-    mixin-object "^2.0.1"
-
-shallow-clone@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-1.0.0.tgz#4480cd06e882ef68b2ad88a3ea54832e2c48b571"
-  integrity sha512-oeXreoKR/SyNJtRJMAKPDSvd28OqEwG4eR/xc856cRGBII7gX9lvAqDxusPm0846z/w/hWYjI1NpKwJ00NHzRA==
-  dependencies:
-    is-extendable "^0.1.1"
-    kind-of "^5.0.0"
-    mixin-object "^2.0.1"
-
-shallow-equal@^1.0.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/shallow-equal/-/shallow-equal-1.2.0.tgz#fd828d2029ff4e19569db7e19e535e94e2d1f5cc"
-  integrity sha512-Z21pVxR4cXsfwpMKMhCEIO1PCi5sp7KEp+CmOpBQ+E8GpHwKOw2sEzk7sgblM3d/j4z4gakoWEoPcjK0VJQogA==
-
-shallowequal@^0.2.2:
-  version "0.2.2"
-  resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-0.2.2.tgz#1e32fd5bcab6ad688a4812cb0cc04efc75c7014e"
-  integrity sha1-HjL9W8q2rWiKSBLLDMBO/HXHAU4=
-  dependencies:
-    lodash.keys "^3.1.2"
-
-shallowequal@^1.0.1, shallowequal@^1.0.2, shallowequal@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8"
-  integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==
-
-shebang-command@^1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
-  integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=
-  dependencies:
-    shebang-regex "^1.0.0"
-
-shebang-regex@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
-  integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=
-
-shell-quote@1.6.1:
-  version "1.6.1"
-  resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767"
-  integrity sha1-9HgZSczkAmlxJ0MOo7PFR29IF2c=
-  dependencies:
-    array-filter "~0.0.0"
-    array-map "~0.0.0"
-    array-reduce "~0.0.0"
-    jsonify "~0.0.0"
-
-shellwords@^0.1.1:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.1.tgz#d6b9181c1a48d397324c84871efbcfc73fc0654b"
-  integrity sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==
-
-signal-exit@^3.0.0, signal-exit@^3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
-  integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=
-
-simple-swizzle@^0.2.2:
-  version "0.2.2"
-  resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a"
-  integrity sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=
-  dependencies:
-    is-arrayish "^0.3.1"
-
-sisteransi@^1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.3.tgz#98168d62b79e3a5e758e27ae63c4a053d748f4eb"
-  integrity sha512-SbEG75TzH8G7eVXFSN5f9EExILKfly7SUvVY5DhhYLvfhKqhDFY0OzevWa/zwak0RLRfWS5AvfMWpd9gJvr5Yg==
-
-slash@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
-  integrity sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=
-
-slash@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44"
-  integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==
-
-slice-ansi@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-2.1.0.tgz#cacd7693461a637a5788d92a7dd4fba068e81636"
-  integrity sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==
-  dependencies:
-    ansi-styles "^3.2.0"
-    astral-regex "^1.0.0"
-    is-fullwidth-code-point "^2.0.0"
-
-snapdragon-node@^2.0.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b"
-  integrity sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==
-  dependencies:
-    define-property "^1.0.0"
-    isobject "^3.0.0"
-    snapdragon-util "^3.0.1"
-
-snapdragon-util@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2"
-  integrity sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==
-  dependencies:
-    kind-of "^3.2.0"
-
-snapdragon@^0.8.1:
-  version "0.8.2"
-  resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d"
-  integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==
-  dependencies:
-    base "^0.11.1"
-    debug "^2.2.0"
-    define-property "^0.2.5"
-    extend-shallow "^2.0.1"
-    map-cache "^0.2.2"
-    source-map "^0.5.6"
-    source-map-resolve "^0.5.0"
-    use "^3.1.0"
-
-sockjs-client@1.3.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.3.0.tgz#12fc9d6cb663da5739d3dc5fb6e8687da95cb177"
-  integrity sha512-R9jxEzhnnrdxLCNln0xg5uGHqMnkhPSTzUZH2eXcR03S/On9Yvoq2wyUZILRUhZCNVu2PmwWVoyuiPz8th8zbg==
-  dependencies:
-    debug "^3.2.5"
-    eventsource "^1.0.7"
-    faye-websocket "~0.11.1"
-    inherits "^2.0.3"
-    json3 "^3.3.2"
-    url-parse "^1.4.3"
-
-sockjs@0.3.19:
-  version "0.3.19"
-  resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.19.tgz#d976bbe800af7bd20ae08598d582393508993c0d"
-  integrity sha512-V48klKZl8T6MzatbLlzzRNhMepEys9Y4oGFpypBFFn1gLI/QQ9HtLLyWJNbPlwGLelOVOEijUbTTJeLLI59jLw==
-  dependencies:
-    faye-websocket "^0.10.0"
-    uuid "^3.0.1"
-
-source-list-map@^2.0.0:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34"
-  integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==
-
-source-map-resolve@^0.5.0:
-  version "0.5.2"
-  resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259"
-  integrity sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==
-  dependencies:
-    atob "^2.1.1"
-    decode-uri-component "^0.2.0"
-    resolve-url "^0.2.1"
-    source-map-url "^0.4.0"
-    urix "^0.1.0"
-
-source-map-support@^0.5.6, source-map-support@~0.5.10, source-map-support@~0.5.12:
-  version "0.5.13"
-  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932"
-  integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==
-  dependencies:
-    buffer-from "^1.0.0"
-    source-map "^0.6.0"
-
-source-map-url@^0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3"
-  integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=
-
-source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6:
-  version "0.5.7"
-  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc"
-  integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=
-
-source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1:
-  version "0.6.1"
-  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
-  integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
-
-spdx-correct@^3.0.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4"
-  integrity sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==
-  dependencies:
-    spdx-expression-parse "^3.0.0"
-    spdx-license-ids "^3.0.0"
-
-spdx-exceptions@^2.1.0:
-  version "2.2.0"
-  resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz#2ea450aee74f2a89bfb94519c07fcd6f41322977"
-  integrity sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==
-
-spdx-expression-parse@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0"
-  integrity sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==
-  dependencies:
-    spdx-exceptions "^2.1.0"
-    spdx-license-ids "^3.0.0"
-
-spdx-license-ids@^3.0.0:
-  version "3.0.5"
-  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654"
-  integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==
-
-spdy-transport@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-3.0.0.tgz#00d4863a6400ad75df93361a1608605e5dcdcf31"
-  integrity sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==
-  dependencies:
-    debug "^4.1.0"
-    detect-node "^2.0.4"
-    hpack.js "^2.1.6"
-    obuf "^1.1.2"
-    readable-stream "^3.0.6"
-    wbuf "^1.7.3"
-
-spdy@^4.0.0:
-  version "4.0.1"
-  resolved "https://registry.yarnpkg.com/spdy/-/spdy-4.0.1.tgz#6f12ed1c5db7ea4f24ebb8b89ba58c87c08257f2"
-  integrity sha512-HeZS3PBdMA+sZSu0qwpCxl3DeALD5ASx8pAX0jZdKXSpPWbQ6SYGnlg3BBmYLx5LtiZrmkAZfErCm2oECBcioA==
-  dependencies:
-    debug "^4.1.0"
-    handle-thing "^2.0.0"
-    http-deceiver "^1.2.7"
-    select-hose "^2.0.0"
-    spdy-transport "^3.0.0"
-
-split-string@^3.0.1, split-string@^3.0.2:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2"
-  integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==
-  dependencies:
-    extend-shallow "^3.0.0"
-
-sprintf-js@~1.0.2:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
-  integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=
-
-sshpk@^1.7.0:
-  version "1.16.1"
-  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877"
-  integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==
-  dependencies:
-    asn1 "~0.2.3"
-    assert-plus "^1.0.0"
-    bcrypt-pbkdf "^1.0.0"
-    dashdash "^1.12.0"
-    ecc-jsbn "~0.1.1"
-    getpass "^0.1.1"
-    jsbn "~0.1.0"
-    safer-buffer "^2.0.2"
-    tweetnacl "~0.14.0"
-
-ssri@^6.0.1:
-  version "6.0.1"
-  resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.1.tgz#2a3c41b28dd45b62b63676ecb74001265ae9edd8"
-  integrity sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA==
-  dependencies:
-    figgy-pudding "^3.5.1"
-
-stable@^0.1.8:
-  version "0.1.8"
-  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf"
-  integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==
-
-stack-utils@^1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-1.0.2.tgz#33eba3897788558bebfc2db059dc158ec36cebb8"
-  integrity sha512-MTX+MeG5U994cazkjd/9KNAapsHnibjMLnfXodlkXw76JEea0UiNzrqidzo1emMwk7w5Qhc9jd4Bn9TBb1MFwA==
-
-static-extend@^0.1.1:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6"
-  integrity sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=
-  dependencies:
-    define-property "^0.2.5"
-    object-copy "^0.1.0"
-
-"statuses@>= 1.4.0 < 2", "statuses@>= 1.5.0 < 2", statuses@~1.5.0:
-  version "1.5.0"
-  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c"
-  integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=
-
-stealthy-require@^1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b"
-  integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks=
-
-stream-browserify@^2.0.1:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.2.tgz#87521d38a44aa7ee91ce1cd2a47df0cb49dd660b"
-  integrity sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==
-  dependencies:
-    inherits "~2.0.1"
-    readable-stream "^2.0.2"
-
-stream-each@^1.1.0:
-  version "1.2.3"
-  resolved "https://registry.yarnpkg.com/stream-each/-/stream-each-1.2.3.tgz#ebe27a0c389b04fbcc233642952e10731afa9bae"
-  integrity sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==
-  dependencies:
-    end-of-stream "^1.1.0"
-    stream-shift "^1.0.0"
-
-stream-http@^2.7.2:
-  version "2.8.3"
-  resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc"
-  integrity sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==
-  dependencies:
-    builtin-status-codes "^3.0.0"
-    inherits "^2.0.1"
-    readable-stream "^2.3.6"
-    to-arraybuffer "^1.0.0"
-    xtend "^4.0.0"
-
-stream-shift@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.0.tgz#d5c752825e5367e786f78e18e445ea223a155952"
-  integrity sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI=
-
-string-convert@^0.2.0:
-  version "0.2.1"
-  resolved "https://registry.yarnpkg.com/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97"
-  integrity sha1-aYLMMEn7tM2F+LJFaLnZvznu/5c=
-
-string-length@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/string-length/-/string-length-2.0.0.tgz#d40dbb686a3ace960c1cffca562bf2c45f8363ed"
-  integrity sha1-1A27aGo6zpYMHP/KVivyxF+DY+0=
-  dependencies:
-    astral-regex "^1.0.0"
-    strip-ansi "^4.0.0"
-
-string-width@^1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
-  integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=
-  dependencies:
-    code-point-at "^1.0.0"
-    is-fullwidth-code-point "^1.0.0"
-    strip-ansi "^3.0.0"
-
-"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.0, string-width@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e"
-  integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==
-  dependencies:
-    is-fullwidth-code-point "^2.0.0"
-    strip-ansi "^4.0.0"
-
-string-width@^3.0.0, string-width@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961"
-  integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==
-  dependencies:
-    emoji-regex "^7.0.1"
-    is-fullwidth-code-point "^2.0.0"
-    strip-ansi "^5.1.0"
-
-string_decoder@^1.0.0, string_decoder@^1.1.1:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e"
-  integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
-  dependencies:
-    safe-buffer "~5.2.0"
-
-string_decoder@~1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
-  integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
-  dependencies:
-    safe-buffer "~5.1.0"
-
-stringify-object@^3.3.0:
-  version "3.3.0"
-  resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629"
-  integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==
-  dependencies:
-    get-own-enumerable-property-symbols "^3.0.0"
-    is-obj "^1.0.1"
-    is-regexp "^1.0.0"
-
-strip-ansi@5.2.0, strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0:
-  version "5.2.0"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae"
-  integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==
-  dependencies:
-    ansi-regex "^4.1.0"
-
-strip-ansi@^3.0.0, strip-ansi@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
-  integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=
-  dependencies:
-    ansi-regex "^2.0.0"
-
-strip-ansi@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f"
-  integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8=
-  dependencies:
-    ansi-regex "^3.0.0"
-
-strip-bom@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3"
-  integrity sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=
-
-strip-comments@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/strip-comments/-/strip-comments-1.0.2.tgz#82b9c45e7f05873bee53f37168af930aa368679d"
-  integrity sha512-kL97alc47hoyIQSV165tTt9rG5dn4w1dNnBhOQ3bOU1Nc1hel09jnXANaHJ7vzHLd4Ju8kseDGzlev96pghLFw==
-  dependencies:
-    babel-extract-comments "^1.0.0"
-    babel-plugin-transform-object-rest-spread "^6.26.0"
-
-strip-eof@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf"
-  integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=
-
-strip-json-comments@^2.0.1, strip-json-comments@~2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
-  integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo=
-
-style-loader@0.23.1:
-  version "0.23.1"
-  resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.23.1.tgz#cb9154606f3e771ab6c4ab637026a1049174d925"
-  integrity sha512-XK+uv9kWwhZMZ1y7mysB+zoihsEj4wneFWAS5qoiLwzW0WzSqMrrsIy+a3zkQJq0ipFtBpX5W3MqyRIBF/WFGg==
-  dependencies:
-    loader-utils "^1.1.0"
-    schema-utils "^1.0.0"
-
-stylehacks@^4.0.0:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-4.0.3.tgz#6718fcaf4d1e07d8a1318690881e8d96726a71d5"
-  integrity sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==
-  dependencies:
-    browserslist "^4.0.0"
-    postcss "^7.0.0"
-    postcss-selector-parser "^3.0.0"
-
-supports-color@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
-  integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=
-
-supports-color@^5.3.0:
-  version "5.5.0"
-  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
-  integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
-  dependencies:
-    has-flag "^3.0.0"
-
-supports-color@^6.1.0:
-  version "6.1.0"
-  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.1.0.tgz#0764abc69c63d5ac842dd4867e8d025e880df8f3"
-  integrity sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==
-  dependencies:
-    has-flag "^3.0.0"
-
-svg-parser@^2.0.0:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.2.tgz#d134cc396fa2681dc64f518330784e98bd801ec8"
-  integrity sha512-1gtApepKFweigFZj3sGO8KT8LvVZK8io146EzXrpVuWCDAbISz/yMucco3hWTkpZNoPabM+dnMOpy6Swue68Zg==
-
-svgo@^1.0.0, svgo@^1.2.2:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.3.0.tgz#bae51ba95ded9a33a36b7c46ce9c359ae9154313"
-  integrity sha512-MLfUA6O+qauLDbym+mMZgtXCGRfIxyQoeH6IKVcFslyODEe/ElJNwr0FohQ3xG4C6HK6bk3KYPPXwHVJk3V5NQ==
-  dependencies:
-    chalk "^2.4.1"
-    coa "^2.0.2"
-    css-select "^2.0.0"
-    css-select-base-adapter "^0.1.1"
-    css-tree "1.0.0-alpha.33"
-    csso "^3.5.1"
-    js-yaml "^3.13.1"
-    mkdirp "~0.5.1"
-    object.values "^1.1.0"
-    sax "~1.2.4"
-    stable "^0.1.8"
-    unquote "~1.1.1"
-    util.promisify "~1.0.0"
-
-symbol-tree@^3.2.2:
-  version "3.2.4"
-  resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2"
-  integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==
-
-table@^5.2.3:
-  version "5.4.6"
-  resolved "https://registry.yarnpkg.com/table/-/table-5.4.6.tgz#1292d19500ce3f86053b05f0e8e7e4a3bb21079e"
-  integrity sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==
-  dependencies:
-    ajv "^6.10.2"
-    lodash "^4.17.14"
-    slice-ansi "^2.1.0"
-    string-width "^3.0.0"
-
-tapable@^1.0.0, tapable@^1.1.0:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2"
-  integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==
-
-tar@^4:
-  version "4.4.10"
-  resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.10.tgz#946b2810b9a5e0b26140cf78bea6b0b0d689eba1"
-  integrity sha512-g2SVs5QIxvo6OLp0GudTqEf05maawKUxXru104iaayWA09551tFCTI8f1Asb4lPfkBr91k07iL4c11XO3/b0tA==
-  dependencies:
-    chownr "^1.1.1"
-    fs-minipass "^1.2.5"
-    minipass "^2.3.5"
-    minizlib "^1.2.1"
-    mkdirp "^0.5.0"
-    safe-buffer "^5.1.2"
-    yallist "^3.0.3"
-
-terser-webpack-plugin@1.2.3:
-  version "1.2.3"
-  resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.2.3.tgz#3f98bc902fac3e5d0de730869f50668561262ec8"
-  integrity sha512-GOK7q85oAb/5kE12fMuLdn2btOS9OBZn4VsecpHDywoUC/jLhSAKOiYo0ezx7ss2EXPMzyEWFoE0s1WLE+4+oA==
-  dependencies:
-    cacache "^11.0.2"
-    find-cache-dir "^2.0.0"
-    schema-utils "^1.0.0"
-    serialize-javascript "^1.4.0"
-    source-map "^0.6.1"
-    terser "^3.16.1"
-    webpack-sources "^1.1.0"
-    worker-farm "^1.5.2"
-
-terser-webpack-plugin@^1.1.0:
-  version "1.4.1"
-  resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.4.1.tgz#61b18e40eaee5be97e771cdbb10ed1280888c2b4"
-  integrity sha512-ZXmmfiwtCLfz8WKZyYUuuHf3dMYEjg8NrjHMb0JqHVHVOSkzp3cW2/XG1fP3tRhqEqSzMwzzRQGtAPbs4Cncxg==
-  dependencies:
-    cacache "^12.0.2"
-    find-cache-dir "^2.1.0"
-    is-wsl "^1.1.0"
-    schema-utils "^1.0.0"
-    serialize-javascript "^1.7.0"
-    source-map "^0.6.1"
-    terser "^4.1.2"
-    webpack-sources "^1.4.0"
-    worker-farm "^1.7.0"
-
-terser@^3.16.1:
-  version "3.17.0"
-  resolved "https://registry.yarnpkg.com/terser/-/terser-3.17.0.tgz#f88ffbeda0deb5637f9d24b0da66f4e15ab10cb2"
-  integrity sha512-/FQzzPJmCpjAH9Xvk2paiWrFq+5M6aVOf+2KRbwhByISDX/EujxsK+BAvrhb6H+2rtrLCHK9N01wO014vrIwVQ==
-  dependencies:
-    commander "^2.19.0"
-    source-map "~0.6.1"
-    source-map-support "~0.5.10"
-
-terser@^4.1.2:
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/terser/-/terser-4.2.1.tgz#1052cfe17576c66e7bc70fcc7119f22b155bdac1"
-  integrity sha512-cGbc5utAcX4a9+2GGVX4DsenG6v0x3glnDi5hx8816X1McEAwPlPgRtXPJzSBsbpILxZ8MQMT0KvArLuE0HP5A==
-  dependencies:
-    commander "^2.20.0"
-    source-map "~0.6.1"
-    source-map-support "~0.5.12"
-
-test-exclude@^5.2.3:
-  version "5.2.3"
-  resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-5.2.3.tgz#c3d3e1e311eb7ee405e092dac10aefd09091eac0"
-  integrity sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g==
-  dependencies:
-    glob "^7.1.3"
-    minimatch "^3.0.4"
-    read-pkg-up "^4.0.0"
-    require-main-filename "^2.0.0"
-
-text-table@0.2.0, text-table@^0.2.0:
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
-  integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=
-
-throat@^4.0.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/throat/-/throat-4.1.0.tgz#89037cbc92c56ab18926e6ba4cbb200e15672a6a"
-  integrity sha1-iQN8vJLFarGJJua6TLsgDhVnKmo=
-
-through2@^2.0.0:
-  version "2.0.5"
-  resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
-  integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
-  dependencies:
-    readable-stream "~2.3.6"
-    xtend "~4.0.1"
-
-through@^2.3.6:
-  version "2.3.8"
-  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
-  integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=
-
-thunky@^1.0.2:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.0.3.tgz#f5df732453407b09191dae73e2a8cc73f381a826"
-  integrity sha512-YwT8pjmNcAXBZqrubu22P4FYsh2D4dxRmnWBOL8Jk8bUcRUtc5326kx32tuTmFDAZtLOGEVNl8POAR8j896Iow==
-
-timers-browserify@^2.0.4:
-  version "2.0.11"
-  resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.11.tgz#800b1f3eee272e5bc53ee465a04d0e804c31211f"
-  integrity sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==
-  dependencies:
-    setimmediate "^1.0.4"
-
-timsort@^0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/timsort/-/timsort-0.3.0.tgz#405411a8e7e6339fe64db9a234de11dc31e02bd4"
-  integrity sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=
-
-tiny-invariant@^1.0.2:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.0.6.tgz#b3f9b38835e36a41c843a3b0907a5a7b3755de73"
-  integrity sha512-FOyLWWVjG+aC0UqG76V53yAWdXfH8bO6FNmyZOuUrzDzK8DI3/JRY25UD7+g49JWM1LXwymsKERB+DzI0dTEQA==
-
-tiny-warning@^1.0.0, tiny-warning@^1.0.2:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.3.tgz#94a30db453df4c643d0fd566060d60a875d84754"
-  integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==
-
-tinycolor2@^1.4.1:
-  version "1.4.1"
-  resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.4.1.tgz#f4fad333447bc0b07d4dc8e9209d8f39a8ac77e8"
-  integrity sha1-9PrTM0R7wLB9TcjpIJ2POaisd+g=
-
-tmp@^0.0.33:
-  version "0.0.33"
-  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9"
-  integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==
-  dependencies:
-    os-tmpdir "~1.0.2"
-
-tmpl@1.0.x:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
-  integrity sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE=
-
-to-arraybuffer@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43"
-  integrity sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=
-
-to-fast-properties@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e"
-  integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=
-
-to-object-path@^0.3.0:
-  version "0.3.0"
-  resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af"
-  integrity sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=
-  dependencies:
-    kind-of "^3.0.2"
-
-to-regex-range@^2.1.0:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38"
-  integrity sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=
-  dependencies:
-    is-number "^3.0.0"
-    repeat-string "^1.6.1"
-
-to-regex@^3.0.1, to-regex@^3.0.2:
-  version "3.0.2"
-  resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce"
-  integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==
-  dependencies:
-    define-property "^2.0.2"
-    extend-shallow "^3.0.2"
-    regex-not "^1.0.2"
-    safe-regex "^1.1.0"
-
-toggle-selection@^1.0.6:
-  version "1.0.6"
-  resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32"
-  integrity sha1-bkWxJj8gF/oKzH2J14sVuL932jI=
-
-toidentifier@1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553"
-  integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==
-
-tough-cookie@^2.3.3, tough-cookie@^2.3.4, tough-cookie@^2.5.0:
-  version "2.5.0"
-  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2"
-  integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==
-  dependencies:
-    psl "^1.1.28"
-    punycode "^2.1.1"
-
-tough-cookie@~2.4.3:
-  version "2.4.3"
-  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781"
-  integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==
-  dependencies:
-    psl "^1.1.24"
-    punycode "^1.4.1"
-
-tr46@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09"
-  integrity sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=
-  dependencies:
-    punycode "^2.1.0"
-
-trim-right@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
-  integrity sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=
-
-ts-pnp@^1.0.0:
-  version "1.1.4"
-  resolved "https://registry.yarnpkg.com/ts-pnp/-/ts-pnp-1.1.4.tgz#ae27126960ebaefb874c6d7fa4729729ab200d90"
-  integrity sha512-1J/vefLC+BWSo+qe8OnJQfWTYRS6ingxjwqmHMqaMxXMj7kFtKLgAaYW3JeX3mktjgUL+etlU8/B4VUAUI9QGw==
-
-tslib@^1.8.1, tslib@^1.9.0:
-  version "1.10.0"
-  resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.10.0.tgz#c3c19f95973fb0a62973fb09d90d961ee43e5c8a"
-  integrity sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==
-
-tsutils@^3.7.0:
-  version "3.17.1"
-  resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.17.1.tgz#ed719917f11ca0dee586272b2ac49e015a2dd759"
-  integrity sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g==
-  dependencies:
-    tslib "^1.8.1"
-
-tty-browserify@0.0.0:
-  version "0.0.0"
-  resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6"
-  integrity sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=
-
-tunnel-agent@^0.6.0:
-  version "0.6.0"
-  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
-  integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=
-  dependencies:
-    safe-buffer "^5.0.1"
-
-tweetnacl@^0.14.3, tweetnacl@~0.14.0:
-  version "0.14.5"
-  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
-  integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=
-
-type-check@~0.3.2:
-  version "0.3.2"
-  resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72"
-  integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=
-  dependencies:
-    prelude-ls "~1.1.2"
-
-type-is@~1.6.17, type-is@~1.6.18:
-  version "1.6.18"
-  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131"
-  integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==
-  dependencies:
-    media-typer "0.3.0"
-    mime-types "~2.1.24"
-
-typedarray@^0.0.6:
-  version "0.0.6"
-  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
-  integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=
-
-typescript@3.4.5:
-  version "3.4.5"
-  resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.4.5.tgz#2d2618d10bb566572b8d7aad5180d84257d70a99"
-  integrity sha512-YycBxUb49UUhdNMU5aJ7z5Ej2XGmaIBL0x34vZ82fn3hGvD+bgrMrVDpatgz2f7YxUMJxMkbWxJZeAvDxVe7Vw==
-
-ua-parser-js@^0.7.18:
-  version "0.7.20"
-  resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.20.tgz#7527178b82f6a62a0f243d1f94fd30e3e3c21098"
-  integrity sha512-8OaIKfzL5cpx8eCMAhhvTlft8GYF8b2eQr6JkCyVdrgjcytyOmPCXrqXFcUnhonRpLlh5yxEZVohm6mzaowUOw==
-
-uglify-js@3.4.x:
-  version "3.4.10"
-  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.4.10.tgz#9ad9563d8eb3acdfb8d38597d2af1d815f6a755f"
-  integrity sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==
-  dependencies:
-    commander "~2.19.0"
-    source-map "~0.6.1"
-
-uglify-js@^3.1.4:
-  version "3.6.0"
-  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.6.0.tgz#704681345c53a8b2079fb6cec294b05ead242ff5"
-  integrity sha512-W+jrUHJr3DXKhrsS7NUVxn3zqMOFn0hL/Ei6v0anCIMoKC93TjcflTagwIHLW7SfMFfiQuktQyFVCFHGUE0+yg==
-  dependencies:
-    commander "~2.20.0"
-    source-map "~0.6.1"
-
-unicode-canonical-property-names-ecmascript@^1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818"
-  integrity sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==
-
-unicode-match-property-ecmascript@^1.0.4:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c"
-  integrity sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==
-  dependencies:
-    unicode-canonical-property-names-ecmascript "^1.0.4"
-    unicode-property-aliases-ecmascript "^1.0.4"
-
-unicode-match-property-value-ecmascript@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz#5b4b426e08d13a80365e0d657ac7a6c1ec46a277"
-  integrity sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g==
-
-unicode-property-aliases-ecmascript@^1.0.4:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz#a9cc6cc7ce63a0a3023fc99e341b94431d405a57"
-  integrity sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw==
-
-union-value@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.1.tgz#0b6fe7b835aecda61c6ea4d4f02c14221e109847"
-  integrity sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==
-  dependencies:
-    arr-union "^3.1.0"
-    get-value "^2.0.6"
-    is-extendable "^0.1.1"
-    set-value "^2.0.1"
-
-uniq@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff"
-  integrity sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=
-
-uniqs@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02"
-  integrity sha1-/+3ks2slKQaW5uFl1KWe25mOawI=
-
-unique-filename@^1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230"
-  integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==
-  dependencies:
-    unique-slug "^2.0.0"
-
-unique-slug@^2.0.0:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c"
-  integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==
-  dependencies:
-    imurmurhash "^0.1.4"
-
-universalify@^0.1.0:
-  version "0.1.2"
-  resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66"
-  integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==
-
-unpipe@1.0.0, unpipe@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
-  integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=
-
-unquote@~1.1.1:
-  version "1.1.1"
-  resolved "https://registry.yarnpkg.com/unquote/-/unquote-1.1.1.tgz#8fded7324ec6e88a0ff8b905e7c098cdc086d544"
-  integrity sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=
-
-unset-value@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559"
-  integrity sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=
-  dependencies:
-    has-value "^0.3.1"
-    isobject "^3.0.0"
-
-upath@^1.1.1:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/upath/-/upath-1.1.2.tgz#3db658600edaeeccbe6db5e684d67ee8c2acd068"
-  integrity sha512-kXpym8nmDmlCBr7nKdIx8P2jNBa+pBpIUFRnKJ4dr8htyYGJFokkr2ZvERRtUN+9SY+JqXouNgUPtv6JQva/2Q==
-
-upper-case@^1.1.1:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598"
-  integrity sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg=
-
-uri-js@^4.2.2:
-  version "4.2.2"
-  resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0"
-  integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==
-  dependencies:
-    punycode "^2.1.0"
-
-urix@^0.1.0:
-  version "0.1.0"
-  resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72"
-  integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=
-
-url-loader@1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-1.1.2.tgz#b971d191b83af693c5e3fea4064be9e1f2d7f8d8"
-  integrity sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==
-  dependencies:
-    loader-utils "^1.1.0"
-    mime "^2.0.3"
-    schema-utils "^1.0.0"
-
-url-parse@^1.4.3:
-  version "1.4.7"
-  resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.4.7.tgz#a8a83535e8c00a316e403a5db4ac1b9b853ae278"
-  integrity sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg==
-  dependencies:
-    querystringify "^2.1.1"
-    requires-port "^1.0.0"
-
-url@^0.11.0:
-  version "0.11.0"
-  resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1"
-  integrity sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=
-  dependencies:
-    punycode "1.3.2"
-    querystring "0.2.0"
-
-use@^3.1.0:
-  version "3.1.1"
-  resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f"
-  integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==
-
-util-deprecate@^1.0.1, util-deprecate@~1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
-  integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
-
-util.promisify@1.0.0, util.promisify@^1.0.0, util.promisify@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.0.tgz#440f7165a459c9a16dc145eb8e72f35687097030"
-  integrity sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==
-  dependencies:
-    define-properties "^1.1.2"
-    object.getownpropertydescriptors "^2.0.3"
-
-util@0.10.3:
-  version "0.10.3"
-  resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9"
-  integrity sha1-evsa/lCAUkZInj23/g7TeTNqwPk=
-  dependencies:
-    inherits "2.0.1"
-
-util@^0.11.0:
-  version "0.11.1"
-  resolved "https://registry.yarnpkg.com/util/-/util-0.11.1.tgz#3236733720ec64bb27f6e26f421aaa2e1b588d61"
-  integrity sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==
-  dependencies:
-    inherits "2.0.3"
-
-utila@^0.4.0, utila@~0.4:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c"
-  integrity sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=
-
-utils-merge@1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713"
-  integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=
-
-uuid@^3.0.1, uuid@^3.3.2:
-  version "3.3.3"
-  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.3.tgz#4568f0216e78760ee1dbf3a4d2cf53e224112866"
-  integrity sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ==
-
-validate-npm-package-license@^3.0.1:
-  version "3.0.4"
-  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a"
-  integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==
-  dependencies:
-    spdx-correct "^3.0.0"
-    spdx-expression-parse "^3.0.0"
-
-value-equal@^0.4.0:
-  version "0.4.0"
-  resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-0.4.0.tgz#c5bdd2f54ee093c04839d71ce2e4758a6890abc7"
-  integrity sha512-x+cYdNnaA3CxvMaTX0INdTCN8m8aF2uY9BvEqmxuYp8bL09cs/kWVQPVGcA35fMktdOsP69IgU7wFj/61dJHEw==
-
-vary@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc"
-  integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=
-
-vendors@^1.0.0:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.3.tgz#a6467781abd366217c050f8202e7e50cc9eef8c0"
-  integrity sha512-fOi47nsJP5Wqefa43kyWSg80qF+Q3XA6MUkgi7Hp1HQaKDQW4cQrK2D0P7mmbFtsV1N89am55Yru/nyEwRubcw==
-
-verror@1.10.0:
-  version "1.10.0"
-  resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400"
-  integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=
-  dependencies:
-    assert-plus "^1.0.0"
-    core-util-is "1.0.2"
-    extsprintf "^1.2.0"
-
-vm-browserify@^1.0.1:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-1.1.0.tgz#bd76d6a23323e2ca8ffa12028dc04559c75f9019"
-  integrity sha512-iq+S7vZJE60yejDYM0ek6zg308+UZsdtPExWP9VZoCFCz1zkJoXFnAX7aZfd/ZwrkidzdUZL0C/ryW+JwAiIGw==
-
-w3c-hr-time@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz#82ac2bff63d950ea9e3189a58a65625fedf19045"
-  integrity sha1-gqwr/2PZUOqeMYmlimViX+3xkEU=
-  dependencies:
-    browser-process-hrtime "^0.1.2"
-
-w3c-xmlserializer@^1.1.2:
-  version "1.1.2"
-  resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-1.1.2.tgz#30485ca7d70a6fd052420a3d12fd90e6339ce794"
-  integrity sha512-p10l/ayESzrBMYWRID6xbuCKh2Fp77+sA0doRuGn4tTIMrrZVeqfpKjXHY+oDh3K4nLdPgNwMTVP6Vp4pvqbNg==
-  dependencies:
-    domexception "^1.0.1"
-    webidl-conversions "^4.0.2"
-    xml-name-validator "^3.0.0"
-
-walker@^1.0.7, walker@~1.0.5:
-  version "1.0.7"
-  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
-  integrity sha1-L3+bj9ENZ3JisYqITijRlhjgKPs=
-  dependencies:
-    makeerror "1.0.x"
-
-warning@4.x, warning@^4.0.1, warning@^4.0.2, warning@^4.0.3, warning@~4.0.3:
-  version "4.0.3"
-  resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3"
-  integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==
-  dependencies:
-    loose-envify "^1.0.0"
-
-warning@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/warning/-/warning-3.0.0.tgz#32e5377cb572de4ab04753bdf8821c01ed605b7c"
-  integrity sha1-MuU3fLVy3kqwR1O9+IIcAe1gW3w=
-  dependencies:
-    loose-envify "^1.0.0"
-
-watchpack@^1.5.0:
-  version "1.6.0"
-  resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.6.0.tgz#4bc12c2ebe8aa277a71f1d3f14d685c7b446cd00"
-  integrity sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA==
-  dependencies:
-    chokidar "^2.0.2"
-    graceful-fs "^4.1.2"
-    neo-async "^2.5.0"
-
-wbuf@^1.1.0, wbuf@^1.7.3:
-  version "1.7.3"
-  resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.3.tgz#c1d8d149316d3ea852848895cb6a0bfe887b87df"
-  integrity sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==
-  dependencies:
-    minimalistic-assert "^1.0.0"
-
-webidl-conversions@^4.0.2:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad"
-  integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==
-
-webpack-dev-middleware@^3.5.1:
-  version "3.7.0"
-  resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.7.0.tgz#ef751d25f4e9a5c8a35da600c5fda3582b5c6cff"
-  integrity sha512-qvDesR1QZRIAZHOE3iQ4CXLZZSQ1lAUsSpnQmlB1PBfoN/xdRjmge3Dok0W4IdaVLJOGJy3sGI4sZHwjRU0PCA==
-  dependencies:
-    memory-fs "^0.4.1"
-    mime "^2.4.2"
-    range-parser "^1.2.1"
-    webpack-log "^2.0.0"
-
-webpack-dev-server@3.2.1:
-  version "3.2.1"
-  resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.2.1.tgz#1b45ce3ecfc55b6ebe5e36dab2777c02bc508c4e"
-  integrity sha512-sjuE4mnmx6JOh9kvSbPYw3u/6uxCLHNWfhWaIPwcXWsvWOPN+nc5baq4i9jui3oOBRXGonK9+OI0jVkaz6/rCw==
-  dependencies:
-    ansi-html "0.0.7"
-    bonjour "^3.5.0"
-    chokidar "^2.0.0"
-    compression "^1.5.2"
-    connect-history-api-fallback "^1.3.0"
-    debug "^4.1.1"
-    del "^3.0.0"
-    express "^4.16.2"
-    html-entities "^1.2.0"
-    http-proxy-middleware "^0.19.1"
-    import-local "^2.0.0"
-    internal-ip "^4.2.0"
-    ip "^1.1.5"
-    killable "^1.0.0"
-    loglevel "^1.4.1"
-    opn "^5.1.0"
-    portfinder "^1.0.9"
-    schema-utils "^1.0.0"
-    selfsigned "^1.9.1"
-    semver "^5.6.0"
-    serve-index "^1.7.2"
-    sockjs "0.3.19"
-    sockjs-client "1.3.0"
-    spdy "^4.0.0"
-    strip-ansi "^3.0.0"
-    supports-color "^6.1.0"
-    url "^0.11.0"
-    webpack-dev-middleware "^3.5.1"
-    webpack-log "^2.0.0"
-    yargs "12.0.2"
-
-webpack-log@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-2.0.0.tgz#5b7928e0637593f119d32f6227c1e0ac31e1b47f"
-  integrity sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==
-  dependencies:
-    ansi-colors "^3.0.0"
-    uuid "^3.3.2"
-
-webpack-manifest-plugin@2.0.4:
-  version "2.0.4"
-  resolved "https://registry.yarnpkg.com/webpack-manifest-plugin/-/webpack-manifest-plugin-2.0.4.tgz#e4ca2999b09557716b8ba4475fb79fab5986f0cd"
-  integrity sha512-nejhOHexXDBKQOj/5v5IZSfCeTO3x1Dt1RZEcGfBSul891X/eLIcIVH31gwxPDdsi2Z8LKKFGpM4w9+oTBOSCg==
-  dependencies:
-    fs-extra "^7.0.0"
-    lodash ">=3.5 <5"
-    tapable "^1.0.0"
-
-webpack-sources@^1.1.0, webpack-sources@^1.3.0, webpack-sources@^1.4.0:
-  version "1.4.3"
-  resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933"
-  integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==
-  dependencies:
-    source-list-map "^2.0.0"
-    source-map "~0.6.1"
-
-webpack@4.29.6:
-  version "4.29.6"
-  resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.29.6.tgz#66bf0ec8beee4d469f8b598d3988ff9d8d90e955"
-  integrity sha512-MwBwpiE1BQpMDkbnUUaW6K8RFZjljJHArC6tWQJoFm0oQtfoSebtg4Y7/QHnJ/SddtjYLHaKGX64CFjG5rehJw==
-  dependencies:
-    "@webassemblyjs/ast" "1.8.5"
-    "@webassemblyjs/helper-module-context" "1.8.5"
-    "@webassemblyjs/wasm-edit" "1.8.5"
-    "@webassemblyjs/wasm-parser" "1.8.5"
-    acorn "^6.0.5"
-    acorn-dynamic-import "^4.0.0"
-    ajv "^6.1.0"
-    ajv-keywords "^3.1.0"
-    chrome-trace-event "^1.0.0"
-    enhanced-resolve "^4.1.0"
-    eslint-scope "^4.0.0"
-    json-parse-better-errors "^1.0.2"
-    loader-runner "^2.3.0"
-    loader-utils "^1.1.0"
-    memory-fs "~0.4.1"
-    micromatch "^3.1.8"
-    mkdirp "~0.5.0"
-    neo-async "^2.5.0"
-    node-libs-browser "^2.0.0"
-    schema-utils "^1.0.0"
-    tapable "^1.1.0"
-    terser-webpack-plugin "^1.1.0"
-    watchpack "^1.5.0"
-    webpack-sources "^1.3.0"
-
-websocket-driver@>=0.5.1:
-  version "0.7.3"
-  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.3.tgz#a2d4e0d4f4f116f1e6297eba58b05d430100e9f9"
-  integrity sha512-bpxWlvbbB459Mlipc5GBzzZwhoZgGEZLuqPaR0INBGnPAY1vdBX6hPnoFXiw+3yWxDuHyQjO2oXTMyS8A5haFg==
-  dependencies:
-    http-parser-js ">=0.4.0 <0.4.11"
-    safe-buffer ">=5.1.0"
-    websocket-extensions ">=0.1.1"
-
-websocket-extensions@>=0.1.1:
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29"
-  integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==
-
-whatwg-encoding@^1.0.1, whatwg-encoding@^1.0.3, whatwg-encoding@^1.0.5:
-  version "1.0.5"
-  resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0"
-  integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==
-  dependencies:
-    iconv-lite "0.4.24"
-
-whatwg-fetch@3.0.0, whatwg-fetch@>=0.10.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz#fc804e458cc460009b1a2b966bc8817d2578aefb"
-  integrity sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q==
-
-whatwg-mimetype@^2.1.0, whatwg-mimetype@^2.2.0, whatwg-mimetype@^2.3.0:
-  version "2.3.0"
-  resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf"
-  integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==
-
-whatwg-url@^6.4.1:
-  version "6.5.0"
-  resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-6.5.0.tgz#f2df02bff176fd65070df74ad5ccbb5a199965a8"
-  integrity sha512-rhRZRqx/TLJQWUpQ6bmrt2UV4f0HCQ463yQuONJqC6fO2VoEb1pTYddbe59SkYq87aoM5A3bdhMZiUiVws+fzQ==
-  dependencies:
-    lodash.sortby "^4.7.0"
-    tr46 "^1.0.1"
-    webidl-conversions "^4.0.2"
-
-whatwg-url@^7.0.0:
-  version "7.0.0"
-  resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.0.0.tgz#fde926fa54a599f3adf82dff25a9f7be02dc6edd"
-  integrity sha512-37GeVSIJ3kn1JgKyjiYNmSLP1yzbpb29jdmwBSgkD9h40/hyrR/OifpVUndji3tmwGgD8qpw7iQu3RSbCrBpsQ==
-  dependencies:
-    lodash.sortby "^4.7.0"
-    tr46 "^1.0.1"
-    webidl-conversions "^4.0.2"
-
-which-module@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a"
-  integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=
-
-which@^1.2.9, which@^1.3.0, which@^1.3.1:
-  version "1.3.1"
-  resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a"
-  integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
-  dependencies:
-    isexe "^2.0.0"
-
-wide-align@^1.1.0:
-  version "1.1.3"
-  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457"
-  integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==
-  dependencies:
-    string-width "^1.0.2 || 2"
-
-wordwrap@~0.0.2:
-  version "0.0.3"
-  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
-  integrity sha1-o9XabNXAvAAI03I0u68b7WMFkQc=
-
-wordwrap@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
-  integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=
-
-workbox-background-sync@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-background-sync/-/workbox-background-sync-4.3.1.tgz#26821b9bf16e9e37fd1d640289edddc08afd1950"
-  integrity sha512-1uFkvU8JXi7L7fCHVBEEnc3asPpiAL33kO495UMcD5+arew9IbKW2rV5lpzhoWcm/qhGB89YfO4PmB/0hQwPRg==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-broadcast-update@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-broadcast-update/-/workbox-broadcast-update-4.3.1.tgz#e2c0280b149e3a504983b757606ad041f332c35b"
-  integrity sha512-MTSfgzIljpKLTBPROo4IpKjESD86pPFlZwlvVG32Kb70hW+aob4Jxpblud8EhNb1/L5m43DUM4q7C+W6eQMMbA==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-build@^4.2.0:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-build/-/workbox-build-4.3.1.tgz#414f70fb4d6de47f6538608b80ec52412d233e64"
-  integrity sha512-UHdwrN3FrDvicM3AqJS/J07X0KXj67R8Cg0waq1MKEOqzo89ap6zh6LmaLnRAjpB+bDIz+7OlPye9iii9KBnxw==
-  dependencies:
-    "@babel/runtime" "^7.3.4"
-    "@hapi/joi" "^15.0.0"
-    common-tags "^1.8.0"
-    fs-extra "^4.0.2"
-    glob "^7.1.3"
-    lodash.template "^4.4.0"
-    pretty-bytes "^5.1.0"
-    stringify-object "^3.3.0"
-    strip-comments "^1.0.2"
-    workbox-background-sync "^4.3.1"
-    workbox-broadcast-update "^4.3.1"
-    workbox-cacheable-response "^4.3.1"
-    workbox-core "^4.3.1"
-    workbox-expiration "^4.3.1"
-    workbox-google-analytics "^4.3.1"
-    workbox-navigation-preload "^4.3.1"
-    workbox-precaching "^4.3.1"
-    workbox-range-requests "^4.3.1"
-    workbox-routing "^4.3.1"
-    workbox-strategies "^4.3.1"
-    workbox-streams "^4.3.1"
-    workbox-sw "^4.3.1"
-    workbox-window "^4.3.1"
-
-workbox-cacheable-response@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-cacheable-response/-/workbox-cacheable-response-4.3.1.tgz#f53e079179c095a3f19e5313b284975c91428c91"
-  integrity sha512-Rp5qlzm6z8IOvnQNkCdO9qrDgDpoPNguovs0H8C+wswLuPgSzSp9p2afb5maUt9R1uTIwOXrVQMmPfPypv+npw==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-core@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-core/-/workbox-core-4.3.1.tgz#005d2c6a06a171437afd6ca2904a5727ecd73be6"
-  integrity sha512-I3C9jlLmMKPxAC1t0ExCq+QoAMd0vAAHULEgRZ7kieCdUd919n53WC0AfvokHNwqRhGn+tIIj7vcb5duCjs2Kg==
-
-workbox-expiration@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-expiration/-/workbox-expiration-4.3.1.tgz#d790433562029e56837f341d7f553c4a78ebe921"
-  integrity sha512-vsJLhgQsQouv9m0rpbXubT5jw0jMQdjpkum0uT+d9tTwhXcEZks7qLfQ9dGSaufTD2eimxbUOJfWLbNQpIDMPw==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-google-analytics@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-google-analytics/-/workbox-google-analytics-4.3.1.tgz#9eda0183b103890b5c256e6f4ea15a1f1548519a"
-  integrity sha512-xzCjAoKuOb55CBSwQrbyWBKqp35yg1vw9ohIlU2wTy06ZrYfJ8rKochb1MSGlnoBfXGWss3UPzxR5QL5guIFdg==
-  dependencies:
-    workbox-background-sync "^4.3.1"
-    workbox-core "^4.3.1"
-    workbox-routing "^4.3.1"
-    workbox-strategies "^4.3.1"
-
-workbox-navigation-preload@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-navigation-preload/-/workbox-navigation-preload-4.3.1.tgz#29c8e4db5843803b34cd96dc155f9ebd9afa453d"
-  integrity sha512-K076n3oFHYp16/C+F8CwrRqD25GitA6Rkd6+qAmLmMv1QHPI2jfDwYqrytOfKfYq42bYtW8Pr21ejZX7GvALOw==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-precaching@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-precaching/-/workbox-precaching-4.3.1.tgz#9fc45ed122d94bbe1f0ea9584ff5940960771cba"
-  integrity sha512-piSg/2csPoIi/vPpp48t1q5JLYjMkmg5gsXBQkh/QYapCdVwwmKlU9mHdmy52KsDGIjVaqEUMFvEzn2LRaigqQ==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-range-requests@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-range-requests/-/workbox-range-requests-4.3.1.tgz#f8a470188922145cbf0c09a9a2d5e35645244e74"
-  integrity sha512-S+HhL9+iTFypJZ/yQSl/x2Bf5pWnbXdd3j57xnb0V60FW1LVn9LRZkPtneODklzYuFZv7qK6riZ5BNyc0R0jZA==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-routing@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-routing/-/workbox-routing-4.3.1.tgz#a675841af623e0bb0c67ce4ed8e724ac0bed0cda"
-  integrity sha512-FkbtrODA4Imsi0p7TW9u9MXuQ5P4pVs1sWHK4dJMMChVROsbEltuE79fBoIk/BCztvOJ7yUpErMKa4z3uQLX+g==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-strategies@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-strategies/-/workbox-strategies-4.3.1.tgz#d2be03c4ef214c115e1ab29c9c759c9fe3e9e646"
-  integrity sha512-F/+E57BmVG8dX6dCCopBlkDvvhg/zj6VDs0PigYwSN23L8hseSRwljrceU2WzTvk/+BSYICsWmRq5qHS2UYzhw==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-streams@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-streams/-/workbox-streams-4.3.1.tgz#0b57da70e982572de09c8742dd0cb40a6b7c2cc3"
-  integrity sha512-4Kisis1f/y0ihf4l3u/+ndMkJkIT4/6UOacU3A4BwZSAC9pQ9vSvJpIi/WFGQRH/uPXvuVjF5c2RfIPQFSS2uA==
-  dependencies:
-    workbox-core "^4.3.1"
-
-workbox-sw@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-sw/-/workbox-sw-4.3.1.tgz#df69e395c479ef4d14499372bcd84c0f5e246164"
-  integrity sha512-0jXdusCL2uC5gM3yYFT6QMBzKfBr2XTk0g5TPAV4y8IZDyVNDyj1a8uSXy3/XrvkVTmQvLN4O5k3JawGReXr9w==
-
-workbox-webpack-plugin@4.2.0:
-  version "4.2.0"
-  resolved "https://registry.yarnpkg.com/workbox-webpack-plugin/-/workbox-webpack-plugin-4.2.0.tgz#c94c3f69ff39c8a5b0c7e6bebc382cb53410a63d"
-  integrity sha512-YZsiA+y/ns/GdWRaBsfYv8dln1ebWtGnJcTOg1ppO0pO1tScAHX0yGtHIjndxz3L/UUhE8b0NQE9KeLNwJwA5A==
-  dependencies:
-    "@babel/runtime" "^7.0.0"
-    json-stable-stringify "^1.0.1"
-    workbox-build "^4.2.0"
-
-workbox-window@^4.3.1:
-  version "4.3.1"
-  resolved "https://registry.yarnpkg.com/workbox-window/-/workbox-window-4.3.1.tgz#ee6051bf10f06afa5483c9b8dfa0531994ede0f3"
-  integrity sha512-C5gWKh6I58w3GeSc0wp2Ne+rqVw8qwcmZnQGpjiek8A2wpbxSJb1FdCoQVO+jDJs35bFgo/WETgl1fqgsxN0Hg==
-  dependencies:
-    workbox-core "^4.3.1"
-
-worker-farm@^1.5.2, worker-farm@^1.7.0:
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/worker-farm/-/worker-farm-1.7.0.tgz#26a94c5391bbca926152002f69b84a4bf772e5a8"
-  integrity sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==
-  dependencies:
-    errno "~0.1.7"
-
-worker-rpc@^0.1.0:
-  version "0.1.1"
-  resolved "https://registry.yarnpkg.com/worker-rpc/-/worker-rpc-0.1.1.tgz#cb565bd6d7071a8f16660686051e969ad32f54d5"
-  integrity sha512-P1WjMrUB3qgJNI9jfmpZ/htmBEjFh//6l/5y8SD9hg1Ef5zTTVVoRjTrTEzPrNBQvmhMxkoTsjOXN10GWU7aCg==
-  dependencies:
-    microevent.ts "~0.1.1"
-
-wrap-ansi@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
-  integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=
-  dependencies:
-    string-width "^1.0.1"
-    strip-ansi "^3.0.1"
-
-wrap-ansi@^5.1.0:
-  version "5.1.0"
-  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09"
-  integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==
-  dependencies:
-    ansi-styles "^3.2.0"
-    string-width "^3.0.0"
-    strip-ansi "^5.0.0"
-
-wrappy@1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
-  integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=
-
-write-file-atomic@2.4.1:
-  version "2.4.1"
-  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.1.tgz#d0b05463c188ae804396fd5ab2a370062af87529"
-  integrity sha512-TGHFeZEZMnv+gBFRfjAcxL5bPHrsGKtnb4qsFAws7/vlh+QfwAaySIw4AXP9ZskTTh5GWu3FLuJhsWVdiJPGvg==
-  dependencies:
-    graceful-fs "^4.1.11"
-    imurmurhash "^0.1.4"
-    signal-exit "^3.0.2"
-
-write@1.0.3:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/write/-/write-1.0.3.tgz#0800e14523b923a387e415123c865616aae0f5c3"
-  integrity sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==
-  dependencies:
-    mkdirp "^0.5.1"
-
-ws@^5.2.0:
-  version "5.2.2"
-  resolved "https://registry.yarnpkg.com/ws/-/ws-5.2.2.tgz#dffef14866b8e8dc9133582514d1befaf96e980f"
-  integrity sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==
-  dependencies:
-    async-limiter "~1.0.0"
-
-ws@^6.1.2:
-  version "6.2.1"
-  resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb"
-  integrity sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==
-  dependencies:
-    async-limiter "~1.0.0"
-
-xml-name-validator@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a"
-  integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==
-
-xmlchars@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.1.1.tgz#ef1a81c05bff629c2280007f12daca21bd6f6c93"
-  integrity sha512-7hew1RPJ1iIuje/Y01bGD/mXokXxegAgVS+e+E0wSi2ILHQkYAH1+JXARwTjZSM4Z4Z+c73aKspEcqj+zPPL/w==
-
-xregexp@4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/xregexp/-/xregexp-4.0.0.tgz#e698189de49dd2a18cc5687b05e17c8e43943020"
-  integrity sha512-PHyM+sQouu7xspQQwELlGwwd05mXUFqwFYfqPO0cC7x4fxyHnnuetmQr6CjJiafIDoH4MogHb9dOoJzR/Y4rFg==
-
-xtend@^4.0.0, xtend@~4.0.1:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"
-  integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
-
-"y18n@^3.2.1 || ^4.0.0", y18n@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b"
-  integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==
-
-yallist@^3.0.0, yallist@^3.0.2, yallist@^3.0.3:
-  version "3.0.3"
-  resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.3.tgz#b4b049e314be545e3ce802236d6cd22cd91c3de9"
-  integrity sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A==
-
-yargs-parser@^10.1.0:
-  version "10.1.0"
-  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-10.1.0.tgz#7202265b89f7e9e9f2e5765e0fe735a905edbaa8"
-  integrity sha512-VCIyR1wJoEBZUqk5PA+oOBF6ypbwh5aNB3I50guxAL/quggdfs4TtNHQrSazFA3fYZ+tEqfs0zIGlv0c/rgjbQ==
-  dependencies:
-    camelcase "^4.1.0"
-
-yargs-parser@^13.1.1:
-  version "13.1.1"
-  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.1.tgz#d26058532aa06d365fe091f6a1fc06b2f7e5eca0"
-  integrity sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ==
-  dependencies:
-    camelcase "^5.0.0"
-    decamelize "^1.2.0"
-
-yargs@12.0.2:
-  version "12.0.2"
-  resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.2.tgz#fe58234369392af33ecbef53819171eff0f5aadc"
-  integrity sha512-e7SkEx6N6SIZ5c5H22RTZae61qtn3PYUE8JYbBFlK9sYmh3DMQ6E5ygtaG/2BW0JZi4WGgTR2IV5ChqlqrDGVQ==
-  dependencies:
-    cliui "^4.0.0"
-    decamelize "^2.0.0"
-    find-up "^3.0.0"
-    get-caller-file "^1.0.1"
-    os-locale "^3.0.0"
-    require-directory "^2.1.1"
-    require-main-filename "^1.0.1"
-    set-blocking "^2.0.0"
-    string-width "^2.0.0"
-    which-module "^2.0.0"
-    y18n "^3.2.1 || ^4.0.0"
-    yargs-parser "^10.1.0"
-
-yargs@^13.3.0:
-  version "13.3.0"
-  resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.0.tgz#4c657a55e07e5f2cf947f8a366567c04a0dedc83"
-  integrity sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA==
-  dependencies:
-    cliui "^5.0.0"
-    find-up "^3.0.0"
-    get-caller-file "^2.0.1"
-    require-directory "^2.1.1"
-    require-main-filename "^2.0.0"
-    set-blocking "^2.0.0"
-    string-width "^3.0.0"
-    which-module "^2.0.0"
-    y18n "^4.0.0"
-    yargs-parser "^13.1.1"
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
deleted file mode 100644
index ff3765a..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-
-/**
- * Utility methods for test classes.
- */
-public abstract class AbstractOMMetadataManagerTest {
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  /**
-   * Create a new OM Metadata manager instance with default volume and bucket.
-   * @throws IOException ioEx
-   */
-  protected OMMetadataManager initializeNewOmMetadataManager()
-      throws IOException {
-    File omDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration);
-
-    String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setVolume("sampleVol")
-            .setAdminName("TestUser")
-            .setOwnerName("TestUser")
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-
-    String bucketKey = omMetadataManager.getBucketKey(
-        bucketInfo.getVolumeName(), bucketInfo.getBucketName());
-
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
-
-    return omMetadataManager;
-  }
-
-  /**
-   * Create an empty OM Metadata manager instance.
-   * @throws IOException ioEx
-   */
-  protected OMMetadataManager initializeEmptyOmMetadataManager()
-      throws IOException {
-    File omDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    return new OmMetadataManagerImpl(omConfiguration);
-  }
-
-  /**
-   * Get an instance of Recon OM Metadata manager.
-   * @return ReconOMMetadataManager
-   * @throws IOException when creating the RocksDB instance.
-   */
-  protected ReconOMMetadataManager getTestMetadataManager(
-      OMMetadataManager omMetadataManager)
-      throws IOException {
-
-    DBCheckpoint checkpoint = omMetadataManager.getStore()
-        .getCheckpoint(true);
-    assertNotNull(checkpoint.getCheckpointLocation());
-
-    File reconOmDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
-        .getAbsolutePath());
-
-    ReconOMMetadataManager reconOMMetaMgr =
-        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
-    reconOMMetaMgr.start(configuration);
-
-    reconOMMetaMgr.updateOmDB(
-        checkpoint.getCheckpointLocation().toFile());
-    return reconOMMetaMgr;
-  }
-
-  /**
-   * Write a key to OM instance.
-   * @throws IOException while writing.
-   */
-  public void writeDataToOm(OMMetadataManager omMetadataManager,
-                                   String key) throws IOException {
-
-    String omKey = omMetadataManager.getOzoneKey("sampleVol",
-        "bucketOne", key);
-
-    omMetadataManager.getKeyTable().put(omKey,
-        new OmKeyInfo.Builder()
-            .setBucketName("bucketOne")
-            .setVolumeName("sampleVol")
-            .setKeyName(key)
-            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-            .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-            .build());
-  }
-
-  /**
-   * Write a key to OM instance.
-   * @throws IOException while writing.
-   */
-  protected void writeDataToOm(OMMetadataManager omMetadataManager,
-                               String key,
-                               String bucket,
-                               String volume,
-                               List<OmKeyLocationInfoGroup>
-                                   omKeyLocationInfoGroupList)
-      throws IOException {
-
-    String omKey = omMetadataManager.getOzoneKey(volume,
-        bucket, key);
-
-    omMetadataManager.getKeyTable().put(omKey,
-        new OmKeyInfo.Builder()
-            .setBucketName(bucket)
-            .setVolumeName(volume)
-            .setKeyName(key)
-            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-            .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-            .setOmKeyLocationInfos(omKeyLocationInfoGroupList)
-            .build());
-  }
-
-  /**
-   * Write a key to OM instance.
-   * @throws IOException while writing.
-   */
-  protected void writeDataToOm(OMMetadataManager omMetadataManager,
-      String key,
-      String bucket,
-      String volume,
-      Long dataSize,
-      List<OmKeyLocationInfoGroup>
-          omKeyLocationInfoGroupList)
-      throws IOException {
-
-    String omKey = omMetadataManager.getOzoneKey(volume,
-        bucket, key);
-
-    omMetadataManager.getKeyTable().put(omKey,
-        new OmKeyInfo.Builder()
-            .setBucketName(bucket)
-            .setVolumeName(volume)
-            .setKeyName(key)
-            .setDataSize(dataSize)
-            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-            .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-            .setOmKeyLocationInfos(omKeyLocationInfoGroupList)
-            .build());
-  }
-
-  /**
-   * Return random pipeline.
-   * @return pipeline
-   */
-  protected Pipeline getRandomPipeline() {
-    return Pipeline.newBuilder()
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setId(PipelineID.randomId())
-        .setNodes(Collections.EMPTY_LIST)
-        .setState(Pipeline.PipelineState.OPEN)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .build();
-  }
-
-  /**
-   * Get new OmKeyLocationInfo for given BlockID and Pipeline.
-   * @param blockID blockId
-   * @param pipeline pipeline
-   * @return new instance of OmKeyLocationInfo
-   */
-  protected OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID,
-                                                   Pipeline pipeline) {
-    return new OmKeyLocationInfo.Builder()
-        .setBlockID(blockID)
-        .setPipeline(pipeline)
-        .build();
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java
deleted file mode 100644
index 6f16c1c..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import org.apache.hadoop.ozone.recon.types.GuiceInjectorUtilsForTests;
-
-/**
- * Implementation for GuiceInjectorUtilsForTests.
- */
-public class GuiceInjectorUtilsForTestsImpl implements
-    GuiceInjectorUtilsForTests {
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
deleted file mode 100644
index 772c661..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.spi.impl.ContainerKeyPrefixCodec;
-import org.apache.hadoop.hdds.utils.db.Codec;
-import org.apache.hadoop.hdds.utils.db.IntegerCodec;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Unit Tests for Codecs used in Recon.
- */
-public class TestReconCodecs {
-
-  @Test
-  public void testContainerKeyPrefixCodec() throws IOException {
-    ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(
-        System.currentTimeMillis(), "TestKeyPrefix", 0);
-
-    Codec<ContainerKeyPrefix> codec = new ContainerKeyPrefixCodec();
-    byte[] persistedFormat = codec.toPersistedFormat(containerKeyPrefix);
-    Assert.assertTrue(persistedFormat != null);
-    ContainerKeyPrefix fromPersistedFormat =
-        codec.fromPersistedFormat(persistedFormat);
-    Assert.assertEquals(containerKeyPrefix, fromPersistedFormat);
-  }
-
-  @Test
-  public void testIntegerCodec() throws IOException {
-    Integer i = 1000;
-    Codec<Integer> codec = new IntegerCodec();
-    byte[] persistedFormat = codec.toPersistedFormat(i);
-    Assert.assertTrue(persistedFormat != null);
-    Integer fromPersistedFormat =
-        codec.fromPersistedFormat(persistedFormat);
-    Assert.assertEquals(i, fromPersistedFormat);
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
deleted file mode 100644
index 6bb8993..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon;
-
-import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.Charset;
-import java.nio.file.Paths;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.http.HttpEntity;
-import org.apache.http.StatusLine;
-import org.apache.http.client.methods.CloseableHttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-/**
- * Test Recon Utility methods.
- */
-public class TestReconUtils {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Test
-  public void testGetReconDbDir() throws Exception {
-
-    String filePath = folder.getRoot().getAbsolutePath();
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set("TEST_DB_DIR", filePath);
-
-    File file = new ReconUtils().getReconDbDir(configuration,
-        "TEST_DB_DIR");
-    Assert.assertEquals(filePath, file.getAbsolutePath());
-  }
-
-  @Test
-  public void testCreateTarFile() throws Exception {
-
-    File tempSnapshotDir = null;
-    FileInputStream fis = null;
-    FileOutputStream fos = null;
-    File tarFile = null;
-
-    try {
-      String testDirName = System.getProperty("java.io.tmpdir");
-      if (!testDirName.endsWith("/")) {
-        testDirName += "/";
-      }
-      testDirName += "TestCreateTarFile_Dir" + System.currentTimeMillis();
-      tempSnapshotDir = new File(testDirName);
-      tempSnapshotDir.mkdirs();
-
-      File file = new File(testDirName + "/temp1.txt");
-      FileWriter writer = new FileWriter(file);
-      writer.write("Test data 1");
-      writer.close();
-
-      file = new File(testDirName + "/temp2.txt");
-      writer = new FileWriter(file);
-      writer.write("Test data 2");
-      writer.close();
-
-      tarFile = createTarFile(Paths.get(testDirName));
-      Assert.assertNotNull(tarFile);
-
-    } finally {
-      org.apache.hadoop.io.IOUtils.closeStream(fis);
-      org.apache.hadoop.io.IOUtils.closeStream(fos);
-      FileUtils.deleteDirectory(tempSnapshotDir);
-      FileUtils.deleteQuietly(tarFile);
-    }
-  }
-
-  @Test
-  public void testUntarCheckpointFile() throws Exception {
-
-    File newDir = folder.newFolder();
-
-    File file1 = Paths.get(newDir.getAbsolutePath(), "file1")
-        .toFile();
-    String str = "File1 Contents";
-    BufferedWriter writer = new BufferedWriter(new FileWriter(
-        file1.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    File file2 = Paths.get(newDir.getAbsolutePath(), "file2")
-        .toFile();
-    str = "File2 Contents";
-    writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    //Create test tar file.
-    File tarFile = createTarFile(newDir.toPath());
-    File outputDir = folder.newFolder();
-    new ReconUtils().untarCheckpointFile(tarFile, outputDir.toPath());
-
-    assertTrue(outputDir.isDirectory());
-    assertTrue(outputDir.listFiles().length == 2);
-  }
-
-  @Test
-  public void testMakeHttpCall() throws Exception {
-
-    CloseableHttpClient httpClientMock = mock(CloseableHttpClient.class);
-    String url = "http://localhost:9874/dbCheckpoint";
-
-    CloseableHttpResponse httpResponseMock = mock(CloseableHttpResponse.class);
-    when(httpClientMock.execute(any(HttpGet.class)))
-        .thenReturn(httpResponseMock);
-
-    StatusLine statusLineMock = mock(StatusLine.class);
-    when(statusLineMock.getStatusCode()).thenReturn(200);
-    when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock);
-
-    HttpEntity httpEntityMock = mock(HttpEntity.class);
-    when(httpResponseMock.getEntity()).thenReturn(httpEntityMock);
-    File file1 = Paths.get(folder.getRoot().getPath(), "file1")
-        .toFile();
-    BufferedWriter writer = new BufferedWriter(new FileWriter(
-        file1.getAbsolutePath()));
-    writer.write("File 1 Contents");
-    writer.close();
-    InputStream fileInputStream = new FileInputStream(file1);
-
-    when(httpEntityMock.getContent()).thenReturn(new InputStream() {
-      @Override
-      public int read() throws IOException {
-        return fileInputStream.read();
-      }
-    });
-
-    InputStream inputStream = new ReconUtils()
-        .makeHttpCall(httpClientMock, url);
-    String contents = IOUtils.toString(inputStream, Charset.defaultCharset());
-
-    assertEquals("File 1 Contents", contents);
-  }
-
-  @Test
-  public void testGetLastKnownDB() throws IOException {
-    File newDir = folder.newFolder();
-
-    File file1 = Paths.get(newDir.getAbsolutePath(), "valid_1")
-        .toFile();
-    String str = "File1 Contents";
-    BufferedWriter writer = new BufferedWriter(new FileWriter(
-        file1.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    File file2 = Paths.get(newDir.getAbsolutePath(), "valid_2")
-        .toFile();
-    str = "File2 Contents";
-    writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-
-    File file3 = Paths.get(newDir.getAbsolutePath(), "invalid_3")
-        .toFile();
-    str = "File3 Contents";
-    writer = new BufferedWriter(new FileWriter(file3.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    ReconUtils reconUtils = new ReconUtils();
-    File latestValidFile = reconUtils.getLastKnownDB(newDir, "valid");
-    assertTrue(latestValidFile.getName().equals("valid_2"));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java
deleted file mode 100644
index 9cca5a7..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java
+++ /dev/null
@@ -1,373 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.api;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import javax.sql.DataSource;
-import javax.ws.rs.core.Response;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest;
-import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl;
-import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
-import org.apache.hadoop.ozone.recon.api.types.ContainersResponse;
-import org.apache.hadoop.ozone.recon.api.types.KeyMetadata;
-import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
-import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.jooq.impl.DSL;
-import org.jooq.impl.DefaultConfiguration;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Injector;
-
-/**
- * Test for container key service.
- */
-public class TestContainerKeyService extends AbstractOMMetadataManagerTest {
-
-  private ContainerDBServiceProvider containerDbServiceProvider;
-  private Injector injector;
-  private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider;
-  private ContainerKeyService containerKeyService;
-  private GuiceInjectorUtilsForTestsImpl guiceInjectorTest =
-      new GuiceInjectorUtilsForTestsImpl();
-  private boolean isSetupDone = false;
-  private ReconOMMetadataManager reconOMMetadataManager;
-  private void initializeInjector() throws Exception {
-    reconOMMetadataManager = getTestMetadataManager(
-        initializeNewOmMetadataManager());
-    ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider();
-
-    Injector parentInjector = guiceInjectorTest.getInjector(
-        ozoneManagerServiceProvider, reconOMMetadataManager, temporaryFolder);
-
-    injector = parentInjector.createChildInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        containerKeyService = new ContainerKeyService();
-        bind(ContainerKeyService.class).toInstance(containerKeyService);
-      }
-    });
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    // The following setup runs only once
-    if (!isSetupDone) {
-      initializeInjector();
-
-      DSL.using(new DefaultConfiguration().set(
-          injector.getInstance(DataSource.class)));
-
-      containerDbServiceProvider = injector.getInstance(
-          ContainerDBServiceProvider.class);
-
-      StatsSchemaDefinition schemaDefinition = injector.getInstance(
-          StatsSchemaDefinition.class);
-      schemaDefinition.initializeSchema();
-
-      isSetupDone = true;
-    }
-
-    //Write Data to OM
-    Pipeline pipeline = getRandomPipeline();
-
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-    BlockID blockID1 = new BlockID(1, 101);
-    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
-        pipeline);
-    omKeyLocationInfoList.add(omKeyLocationInfo1);
-
-    BlockID blockID2 = new BlockID(2, 102);
-    OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2,
-        pipeline);
-    omKeyLocationInfoList.add(omKeyLocationInfo2);
-
-    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
-        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
-
-    //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ]
-    writeDataToOm(reconOMMetadataManager,
-        "key_one", "bucketOne", "sampleVol",
-        Collections.singletonList(omKeyLocationInfoGroup));
-
-    List<OmKeyLocationInfoGroup> infoGroups = new ArrayList<>();
-    BlockID blockID3 = new BlockID(1, 103);
-    OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3,
-        pipeline);
-
-    List<OmKeyLocationInfo> omKeyLocationInfoListNew = new ArrayList<>();
-    omKeyLocationInfoListNew.add(omKeyLocationInfo3);
-    infoGroups.add(new OmKeyLocationInfoGroup(0,
-        omKeyLocationInfoListNew));
-
-    BlockID blockID4 = new BlockID(1, 104);
-    OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4,
-        pipeline);
-
-    omKeyLocationInfoListNew = new ArrayList<>();
-    omKeyLocationInfoListNew.add(omKeyLocationInfo4);
-    infoGroups.add(new OmKeyLocationInfoGroup(1,
-        omKeyLocationInfoListNew));
-
-    //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ]
-    writeDataToOm(reconOMMetadataManager,
-        "key_two", "bucketOne", "sampleVol", infoGroups);
-
-    List<OmKeyLocationInfo> omKeyLocationInfoList2 = new ArrayList<>();
-    BlockID blockID5 = new BlockID(2, 2);
-    OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5,
-        pipeline);
-    omKeyLocationInfoList2.add(omKeyLocationInfo5);
-
-    BlockID blockID6 = new BlockID(2, 3);
-    OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6,
-        pipeline);
-    omKeyLocationInfoList2.add(omKeyLocationInfo6);
-
-    OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new
-        OmKeyLocationInfoGroup(0, omKeyLocationInfoList2);
-
-    //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ]
-    writeDataToOm(reconOMMetadataManager,
-        "key_three", "bucketOne", "sampleVol",
-        Collections.singletonList(omKeyLocationInfoGroup2));
-
-    //Generate Recon container DB data.
-    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
-    Table tableMock = mock(Table.class);
-    when(tableMock.getName()).thenReturn("KeyTable");
-    when(omMetadataManagerMock.getKeyTable()).thenReturn(tableMock);
-    ContainerKeyMapperTask containerKeyMapperTask  =
-        new ContainerKeyMapperTask(containerDbServiceProvider);
-    containerKeyMapperTask.reprocess(reconOMMetadataManager);
-  }
-
-  @Test
-  public void testGetKeysForContainer() {
-
-    Response response = containerKeyService.getKeysForContainer(1L, -1, "");
-
-    KeysResponse responseObject = (KeysResponse) response.getEntity();
-    KeysResponse.KeysResponseData data = responseObject.getKeysResponseData();
-    Collection<KeyMetadata> keyMetadataList = data.getKeys();
-
-    assertEquals(3, data.getTotalCount());
-    assertEquals(2, keyMetadataList.size());
-
-    Iterator<KeyMetadata> iterator = keyMetadataList.iterator();
-
-    KeyMetadata keyMetadata = iterator.next();
-    assertEquals("key_one", keyMetadata.getKey());
-    assertEquals(1, keyMetadata.getVersions().size());
-    assertEquals(1, keyMetadata.getBlockIds().size());
-    Map<Long, List<KeyMetadata.ContainerBlockMetadata>> blockIds =
-        keyMetadata.getBlockIds();
-    assertEquals(101, blockIds.get(0L).iterator().next().getLocalID());
-
-    keyMetadata = iterator.next();
-    assertEquals("key_two", keyMetadata.getKey());
-    assertEquals(2, keyMetadata.getVersions().size());
-    assertTrue(keyMetadata.getVersions().contains(0L) && keyMetadata
-        .getVersions().contains(1L));
-    assertEquals(2, keyMetadata.getBlockIds().size());
-    blockIds = keyMetadata.getBlockIds();
-    assertEquals(103, blockIds.get(0L).iterator().next().getLocalID());
-    assertEquals(104, blockIds.get(1L).iterator().next().getLocalID());
-
-    response = containerKeyService.getKeysForContainer(3L, -1, "");
-    responseObject = (KeysResponse) response.getEntity();
-    data = responseObject.getKeysResponseData();
-    keyMetadataList = data.getKeys();
-    assertTrue(keyMetadataList.isEmpty());
-    assertEquals(0, data.getTotalCount());
-
-    // test if limit works as expected
-    response = containerKeyService.getKeysForContainer(1L, 1, "");
-    responseObject = (KeysResponse) response.getEntity();
-    data = responseObject.getKeysResponseData();
-    keyMetadataList = data.getKeys();
-    assertEquals(1, keyMetadataList.size());
-    assertEquals(3, data.getTotalCount());
-  }
-
-  @Test
-  public void testGetKeysForContainerWithPrevKey() {
-    // test if prev-key param works as expected
-    Response response = containerKeyService.getKeysForContainer(
-        1L, -1, "/sampleVol/bucketOne/key_one");
-
-    KeysResponse responseObject =
-        (KeysResponse) response.getEntity();
-
-    KeysResponse.KeysResponseData data =
-        responseObject.getKeysResponseData();
-    assertEquals(3, data.getTotalCount());
-
-    Collection<KeyMetadata> keyMetadataList = data.getKeys();
-    assertEquals(1, keyMetadataList.size());
-
-    Iterator<KeyMetadata> iterator = keyMetadataList.iterator();
-    KeyMetadata keyMetadata = iterator.next();
-
-    assertEquals("key_two", keyMetadata.getKey());
-    assertEquals(2, keyMetadata.getVersions().size());
-    assertEquals(2, keyMetadata.getBlockIds().size());
-
-    response = containerKeyService.getKeysForContainer(
-        1L, -1, StringUtils.EMPTY);
-    responseObject = (KeysResponse) response.getEntity();
-    data = responseObject.getKeysResponseData();
-    keyMetadataList = data.getKeys();
-
-    assertEquals(3, data.getTotalCount());
-    assertEquals(2, keyMetadataList.size());
-    iterator = keyMetadataList.iterator();
-    keyMetadata = iterator.next();
-    assertEquals("key_one", keyMetadata.getKey());
-
-    // test for negative cases
-    response = containerKeyService.getKeysForContainer(
-        1L, -1, "/sampleVol/bucketOne/invalid_key");
-    responseObject = (KeysResponse) response.getEntity();
-    data = responseObject.getKeysResponseData();
-    keyMetadataList = data.getKeys();
-    assertEquals(3, data.getTotalCount());
-    assertEquals(0, keyMetadataList.size());
-
-    response = containerKeyService.getKeysForContainer(
-        5L, -1, "");
-    responseObject = (KeysResponse) response.getEntity();
-    data = responseObject.getKeysResponseData();
-    keyMetadataList = data.getKeys();
-    assertEquals(0, keyMetadataList.size());
-    assertEquals(0, data.getTotalCount());
-  }
-
-  @Test
-  public void testGetContainers() {
-
-    Response response = containerKeyService.getContainers(-1, 0L);
-
-    ContainersResponse responseObject =
-        (ContainersResponse) response.getEntity();
-
-    ContainersResponse.ContainersResponseData data =
-        responseObject.getContainersResponseData();
-    assertEquals(2, data.getTotalCount());
-
-    List<ContainerMetadata> containers = new ArrayList<>(data.getContainers());
-
-    Iterator<ContainerMetadata> iterator = containers.iterator();
-
-    ContainerMetadata containerMetadata = iterator.next();
-    assertEquals(1L, containerMetadata.getContainerID());
-    // Number of keys for CID:1 should be 3 because of two different versions
-    // of key_two stored in CID:1
-    assertEquals(3L, containerMetadata.getNumberOfKeys());
-
-    containerMetadata = iterator.next();
-    assertEquals(2L, containerMetadata.getContainerID());
-    assertEquals(2L, containerMetadata.getNumberOfKeys());
-
-    // test if limit works as expected
-    response = containerKeyService.getContainers(1, 0L);
-    responseObject = (ContainersResponse) response.getEntity();
-    data = responseObject.getContainersResponseData();
-    containers = new ArrayList<>(data.getContainers());
-    assertEquals(1, containers.size());
-    assertEquals(2, data.getTotalCount());
-  }
-
-  @Test
-  public void testGetContainersWithPrevKey() {
-
-    Response response = containerKeyService.getContainers(1, 1L);
-
-    ContainersResponse responseObject =
-        (ContainersResponse) response.getEntity();
-
-    ContainersResponse.ContainersResponseData data =
-        responseObject.getContainersResponseData();
-    assertEquals(2, data.getTotalCount());
-
-    List<ContainerMetadata> containers = new ArrayList<>(data.getContainers());
-
-    Iterator<ContainerMetadata> iterator = containers.iterator();
-
-    ContainerMetadata containerMetadata = iterator.next();
-
-    assertEquals(1, containers.size());
-    assertEquals(2L, containerMetadata.getContainerID());
-
-    response = containerKeyService.getContainers(-1, 0L);
-    responseObject = (ContainersResponse) response.getEntity();
-    data = responseObject.getContainersResponseData();
-    containers = new ArrayList<>(data.getContainers());
-    assertEquals(2, containers.size());
-    assertEquals(2, data.getTotalCount());
-    iterator = containers.iterator();
-    containerMetadata = iterator.next();
-    assertEquals(1L, containerMetadata.getContainerID());
-
-    // test for negative cases
-    response = containerKeyService.getContainers(-1, 5L);
-    responseObject = (ContainersResponse) response.getEntity();
-    data = responseObject.getContainersResponseData();
-    containers = new ArrayList<>(data.getContainers());
-    assertEquals(0, containers.size());
-    assertEquals(2, data.getTotalCount());
-
-    response = containerKeyService.getContainers(-1, -1L);
-    responseObject = (ContainersResponse) response.getEntity();
-    data = responseObject.getContainersResponseData();
-    containers = new ArrayList<>(data.getContainers());
-    assertEquals(2, containers.size());
-    assertEquals(2, data.getTotalCount());
-  }
-
-  private OzoneManagerServiceProviderImpl getMockOzoneManagerServiceProvider() {
-    OzoneManagerServiceProviderImpl omServiceProviderMock =
-        mock(OzoneManagerServiceProviderImpl.class);
-    return omServiceProviderMock;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java
deleted file mode 100644
index a3265b8..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.api;
-
-import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * Test for File size count service.
- */
-public class TestUtilizationService {
-  private UtilizationService utilizationService;
-  private int maxBinSize = 42;
-
-  private List<FileCountBySize> setUpResultList() {
-    List<FileCountBySize> resultList = new ArrayList<>();
-    for (int i = 0; i < maxBinSize; i++) {
-      if (i == maxBinSize - 1) {
-        // for last bin file count is 41.
-        resultList.add(new FileCountBySize(Long.MAX_VALUE, (long) i));
-      } else {
-        // count of files of upperBound is equal to it's index.
-        resultList.add(new FileCountBySize((long) Math.pow(2, (10+i)),
-            (long) i));
-      }
-    }
-    return resultList;
-  }
-
-  @Test
-  public void testGetFileCounts() {
-    List<FileCountBySize> resultList = setUpResultList();
-
-    FileCountBySizeDao fileCountBySizeDao = mock(FileCountBySizeDao.class);
-    utilizationService = mock(UtilizationService.class);
-    when(utilizationService.getFileCounts()).thenCallRealMethod();
-    when(utilizationService.getDao()).thenReturn(fileCountBySizeDao);
-    when(fileCountBySizeDao.findAll()).thenReturn(resultList);
-
-    Response response = utilizationService.getFileCounts();
-    // get result list from Response entity
-    List<FileCountBySize> responseList =
-        (List<FileCountBySize>) response.getEntity();
-
-    verify(fileCountBySizeDao, times(1)).findAll();
-    assertEquals(maxBinSize, responseList.size());
-
-    assertEquals(resultList, responseList);
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java
deleted file mode 100644
index faf2658..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * The classes in this package test the Rest API layer of Recon.
- */
-package org.apache.hadoop.ozone.recon.api;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java
deleted file mode 100644
index d0066a3..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Package for recon server tests.
- */
-package org.apache.hadoop.ozone.recon;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java
deleted file mode 100644
index 898dd19..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.persistence;
-
-import java.io.File;
-import java.io.IOException;
-
-import javax.sql.DataSource;
-
-import org.jooq.DSLContext;
-import org.jooq.SQLDialect;
-import org.jooq.impl.DSL;
-import org.jooq.impl.DefaultConfiguration;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-
-/**
- * Create an injector for tests that need to access the SQl database.
- */
-public abstract class AbstractSqlDatabaseTest {
-
-  @ClassRule
-  public static TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  private static Injector injector;
-  private static DSLContext dslContext;
-
-  @BeforeClass
-  public static void setup() throws IOException {
-    File tempDir = temporaryFolder.newFolder();
-
-    DataSourceConfigurationProvider configurationProvider =
-        new DataSourceConfigurationProvider(tempDir);
-
-    JooqPersistenceModule persistenceModule =
-        new JooqPersistenceModule(configurationProvider);
-
-    injector = Guice.createInjector(persistenceModule, new AbstractModule() {
-      @Override
-      public void configure() {
-        bind(DataSourceConfiguration.class).toProvider(configurationProvider);
-        }
-    });
-    dslContext = DSL.using(new DefaultConfiguration().set(
-        injector.getInstance(DataSource.class)));
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    temporaryFolder.delete();
-  }
-
-  protected Injector getInjector() {
-    return injector;
-  }
-
-  protected DSLContext getDslContext() {
-    return dslContext;
-  }
-
-  /**
-   * Local Sqlite datasource provider.
-   */
-  public static class DataSourceConfigurationProvider implements
-      Provider<DataSourceConfiguration> {
-
-    private final File tempDir;
-
-    public DataSourceConfigurationProvider(File tempDir) {
-      this.tempDir = tempDir;
-    }
-
-    @Override
-    public DataSourceConfiguration get() {
-      return new DataSourceConfiguration() {
-        @Override
-        public String getDriverClass() {
-          return "org.sqlite.JDBC";
-        }
-
-        @Override
-        public String getJdbcUrl() {
-          return "jdbc:sqlite:" + tempDir.getAbsolutePath() +
-              File.separator + "sqlite_recon.db";
-        }
-
-        @Override
-        public String getUserName() {
-          return null;
-        }
-
-        @Override
-        public String getPassword() {
-          return null;
-        }
-
-        @Override
-        public boolean setAutoCommit() {
-          return true;
-        }
-
-        @Override
-        public long getConnectionTimeout() {
-          return 10000;
-        }
-
-        @Override
-        public String getSqlDialect() {
-          return SQLDialect.SQLITE.toString();
-        }
-
-        @Override
-        public Integer getMaxActiveConnections() {
-          return 2;
-        }
-
-        @Override
-        public Integer getMaxConnectionAge() {
-          return 120;
-        }
-
-        @Override
-        public Integer getMaxIdleConnectionAge() {
-          return 120;
-        }
-
-        @Override
-        public String getConnectionTestStatement() {
-          return "SELECT 1";
-        }
-
-        @Override
-        public Integer getIdleConnectionTestPeriod() {
-          return 30;
-        }
-      };
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
deleted file mode 100644
index 150007e..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.persistence;
-
-import static org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition.RECON_TASK_STATUS_TABLE_NAME;
-
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.jooq.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Class used to test ReconInternalSchemaDefinition.
- */
-public class TestReconInternalSchemaDefinition extends AbstractSqlDatabaseTest {
-
-  @Test
-  public void testSchemaCreated() throws Exception {
-    ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance(
-        ReconInternalSchemaDefinition.class);
-
-    schemaDefinition.initializeSchema();
-
-    Connection connection =
-        getInjector().getInstance(DataSource.class).getConnection();
-    // Verify table definition
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getColumns(null, null,
-        RECON_TASK_STATUS_TABLE_NAME, null);
-
-    List<Pair<String, Integer>> expectedPairs = new ArrayList<>();
-
-    expectedPairs.add(new ImmutablePair<>("task_name", Types.VARCHAR));
-    expectedPairs.add(new ImmutablePair<>("last_updated_timestamp",
-        Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("last_updated_seq_number",
-        Types.INTEGER));
-
-    List<Pair<String, Integer>> actualPairs = new ArrayList<>();
-
-    while (resultSet.next()) {
-      actualPairs.add(new ImmutablePair<>(
-          resultSet.getString("COLUMN_NAME"),
-          resultSet.getInt("DATA_TYPE")));
-    }
-
-    Assert.assertEquals(3, actualPairs.size());
-    Assert.assertEquals(expectedPairs, actualPairs);
-  }
-
-  @Test
-  public void testReconTaskStatusCRUDOperations() throws Exception {
-    // Verify table exists
-    ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance(
-        ReconInternalSchemaDefinition.class);
-
-    schemaDefinition.initializeSchema();
-
-    DataSource ds = getInjector().getInstance(DataSource.class);
-    Connection connection = ds.getConnection();
-
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getTables(null, null,
-        RECON_TASK_STATUS_TABLE_NAME, null);
-
-    while (resultSet.next()) {
-      Assert.assertEquals(RECON_TASK_STATUS_TABLE_NAME,
-          resultSet.getString("TABLE_NAME"));
-    }
-
-    ReconTaskStatusDao dao = new ReconTaskStatusDao(getInjector().getInstance(
-        Configuration.class));
-
-    long now = System.currentTimeMillis();
-    ReconTaskStatus newRecord = new ReconTaskStatus();
-    newRecord.setTaskName("HelloWorldTask");
-    newRecord.setLastUpdatedTimestamp(now);
-    newRecord.setLastUpdatedSeqNumber(100L);
-
-    // Create
-    dao.insert(newRecord);
-
-    ReconTaskStatus newRecord2 = new ReconTaskStatus();
-    newRecord2.setTaskName("GoodbyeWorldTask");
-    newRecord2.setLastUpdatedTimestamp(now);
-    newRecord2.setLastUpdatedSeqNumber(200L);
-    // Create
-    dao.insert(newRecord2);
-
-    // Read
-    ReconTaskStatus dbRecord = dao.findById("HelloWorldTask");
-
-    Assert.assertEquals("HelloWorldTask", dbRecord.getTaskName());
-    Assert.assertEquals(Long.valueOf(now), dbRecord.getLastUpdatedTimestamp());
-    Assert.assertEquals(Long.valueOf(100), dbRecord.getLastUpdatedSeqNumber());
-
-    // Update
-    dbRecord.setLastUpdatedSeqNumber(150L);
-    dao.update(dbRecord);
-
-    // Read updated
-    dbRecord = dao.findById("HelloWorldTask");
-    Assert.assertEquals(Long.valueOf(150), dbRecord.getLastUpdatedSeqNumber());
-
-    // Delete
-    dao.deleteById("GoodbyeWorldTask");
-
-    // Verify
-    dbRecord = dao.findById("GoodbyeWorldTask");
-
-    Assert.assertNull(dbRecord);
-  }
-
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
deleted file mode 100644
index 864e59e..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.persistence;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
-import org.jooq.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-import javax.sql.DataSource;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.hadoop.ozone.recon.schema.StatsSchemaDefinition.GLOBAL_STATS_TABLE_NAME;
-
-/**
- * Class used to test StatsSchemaDefinition.
- */
-public class TestStatsSchemaDefinition extends AbstractSqlDatabaseTest {
-
-  @Test
-  public void testIfStatsSchemaCreated() throws Exception {
-    StatsSchemaDefinition schemaDefinition = getInjector().getInstance(
-        StatsSchemaDefinition.class);
-
-    schemaDefinition.initializeSchema();
-
-    Connection connection =
-        getInjector().getInstance(DataSource.class).getConnection();
-    // Verify table definition
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getColumns(null, null,
-        GLOBAL_STATS_TABLE_NAME, null);
-
-    List<Pair<String, Integer>> expectedPairs = new ArrayList<>();
-
-    expectedPairs.add(new ImmutablePair<>("key", Types.VARCHAR));
-    expectedPairs.add(new ImmutablePair<>("value", Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("last_updated_timestamp",
-        Types.VARCHAR));
-
-    List<Pair<String, Integer>> actualPairs = new ArrayList<>();
-
-    while (resultSet.next()) {
-      actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"),
-          resultSet.getInt("DATA_TYPE")));
-    }
-
-    Assert.assertEquals(3, actualPairs.size());
-    Assert.assertEquals(expectedPairs, actualPairs);
-  }
-
-  @Test
-  public void testGlobalStatsCRUDOperations() throws Exception {
-    // Verify table exists
-    StatsSchemaDefinition schemaDefinition = getInjector().getInstance(
-        StatsSchemaDefinition.class);
-
-    schemaDefinition.initializeSchema();
-
-    DataSource ds = getInjector().getInstance(DataSource.class);
-    Connection connection = ds.getConnection();
-
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getTables(null, null,
-        GLOBAL_STATS_TABLE_NAME, null);
-
-    while (resultSet.next()) {
-      Assert.assertEquals(GLOBAL_STATS_TABLE_NAME,
-          resultSet.getString("TABLE_NAME"));
-    }
-
-    GlobalStatsDao dao = new GlobalStatsDao(
-        getInjector().getInstance(Configuration.class));
-
-    long now = System.currentTimeMillis();
-    GlobalStats newRecord = new GlobalStats();
-    newRecord.setLastUpdatedTimestamp(new Timestamp(now));
-    newRecord.setKey("key1");
-    newRecord.setValue(500L);
-
-    // Create
-    dao.insert(newRecord);
-    GlobalStats newRecord2 = new GlobalStats();
-    newRecord2.setLastUpdatedTimestamp(new Timestamp(now + 1000L));
-    newRecord2.setKey("key2");
-    newRecord2.setValue(10L);
-    dao.insert(newRecord2);
-
-    // Read
-    GlobalStats dbRecord = dao.findById("key1");
-
-    Assert.assertEquals("key1", dbRecord.getKey());
-    Assert.assertEquals(Long.valueOf(500), dbRecord.getValue());
-    Assert.assertEquals(new Timestamp(now), dbRecord.getLastUpdatedTimestamp());
-
-    dbRecord = dao.findById("key2");
-    Assert.assertEquals("key2", dbRecord.getKey());
-    Assert.assertEquals(Long.valueOf(10), dbRecord.getValue());
-    Assert.assertEquals(new Timestamp(now + 1000L),
-        dbRecord.getLastUpdatedTimestamp());
-
-    // Update
-    dbRecord.setValue(100L);
-    dbRecord.setLastUpdatedTimestamp(new Timestamp(now + 2000L));
-    dao.update(dbRecord);
-
-    // Read updated
-    dbRecord = dao.findById("key2");
-
-    Assert.assertEquals(new Timestamp(now + 2000L),
-        dbRecord.getLastUpdatedTimestamp());
-    Assert.assertEquals(Long.valueOf(100L), dbRecord.getValue());
-
-    // Delete
-    dao.deleteById("key1");
-
-    // Verify
-    dbRecord = dao.findById("key1");
-
-    Assert.assertNull(dbRecord);
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
deleted file mode 100644
index 22cc55b..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon.persistence;
-
-import static org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.CLUSTER_GROWTH_DAILY_TABLE_NAME;
-import static org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.FILE_COUNT_BY_SIZE_TABLE_NAME;
-import static org.hadoop.ozone.recon.schema.tables.ClusterGrowthDailyTable.CLUSTER_GROWTH_DAILY;
-import static org.junit.Assert.assertEquals;
-
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
-import org.hadoop.ozone.recon.schema.tables.daos.ClusterGrowthDailyDao;
-import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ClusterGrowthDaily;
-import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
-import org.hadoop.ozone.recon.schema.tables.records.FileCountBySizeRecord;
-import org.jooq.Configuration;
-import org.jooq.Table;
-import org.jooq.UniqueKey;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test persistence module provides connection and transaction awareness.
- */
-public class TestUtilizationSchemaDefinition extends AbstractSqlDatabaseTest {
-
-  @Test
-  public void testReconSchemaCreated() throws Exception {
-    UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance(
-        UtilizationSchemaDefinition.class);
-
-    schemaDefinition.initializeSchema();
-
-    Connection connection =
-        getInjector().getInstance(DataSource.class).getConnection();
-    // Verify table definition
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getColumns(null, null,
-        CLUSTER_GROWTH_DAILY_TABLE_NAME, null);
-
-    List<Pair<String, Integer>> expectedPairs = new ArrayList<>();
-
-    expectedPairs.add(new ImmutablePair<>("timestamp", Types.VARCHAR));
-    expectedPairs.add(new ImmutablePair<>("datanode_id", Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("datanode_host", Types.VARCHAR));
-    expectedPairs.add(new ImmutablePair<>("rack_id", Types.VARCHAR));
-    expectedPairs.add(new ImmutablePair<>("available_size", Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("used_size", Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("container_count", Types.INTEGER));
-    expectedPairs.add(new ImmutablePair<>("block_count", Types.INTEGER));
-
-    List<Pair<String, Integer>> actualPairs = new ArrayList<>();
-
-    while (resultSet.next()) {
-      actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"),
-          resultSet.getInt("DATA_TYPE")));
-    }
-
-    Assert.assertEquals(8, actualPairs.size());
-    Assert.assertEquals(expectedPairs, actualPairs);
-
-    ResultSet resultSetFileCount = metaData.getColumns(null, null,
-        FILE_COUNT_BY_SIZE_TABLE_NAME, null);
-
-    List<Pair<String, Integer>> expectedPairsFileCount = new ArrayList<>();
-    expectedPairsFileCount.add(
-        new ImmutablePair<>("file_size", Types.INTEGER));
-    expectedPairsFileCount.add(
-        new ImmutablePair<>("count", Types.INTEGER));
-
-    List<Pair<String, Integer>> actualPairsFileCount = new ArrayList<>();
-    while(resultSetFileCount.next()) {
-      actualPairsFileCount.add(new ImmutablePair<>(resultSetFileCount.getString(
-          "COLUMN_NAME"), resultSetFileCount.getInt(
-              "DATA_TYPE")));
-    }
-    assertEquals("Unexpected number of columns",
-        2, actualPairsFileCount.size());
-    assertEquals("Columns Do not Match ",
-        expectedPairsFileCount, actualPairsFileCount);
-  }
-
-  @Test
-  public void testClusterGrowthDailyCRUDOperations() throws Exception {
-    // Verify table exists
-    UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance(
-        UtilizationSchemaDefinition.class);
-    schemaDefinition.initializeSchema();
-
-    DataSource ds = getInjector().getInstance(DataSource.class);
-    Connection connection = ds.getConnection();
-
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getTables(null, null,
-        CLUSTER_GROWTH_DAILY_TABLE_NAME, null);
-
-    while (resultSet.next()) {
-      Assert.assertEquals(CLUSTER_GROWTH_DAILY_TABLE_NAME,
-          resultSet.getString("TABLE_NAME"));
-    }
-
-    ClusterGrowthDailyDao dao = new ClusterGrowthDailyDao(
-        getInjector().getInstance(Configuration.class));
-
-    long now = System.currentTimeMillis();
-    ClusterGrowthDaily newRecord = new ClusterGrowthDaily();
-    newRecord.setTimestamp(new Timestamp(now));
-    newRecord.setDatanodeId(10);
-    newRecord.setDatanodeHost("host1");
-    newRecord.setRackId("rack1");
-    newRecord.setAvailableSize(1024L);
-    newRecord.setUsedSize(512L);
-    newRecord.setContainerCount(10);
-    newRecord.setBlockCount(25);
-
-    // Create
-    dao.insert(newRecord);
-
-    // Read
-    ClusterGrowthDaily dbRecord =
-        dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP,
-            CLUSTER_GROWTH_DAILY.DATANODE_ID)
-            .value1(new Timestamp(now)).value2(10));
-
-    Assert.assertEquals("host1", dbRecord.getDatanodeHost());
-    Assert.assertEquals("rack1", dbRecord.getRackId());
-    Assert.assertEquals(Long.valueOf(1024), dbRecord.getAvailableSize());
-    Assert.assertEquals(Long.valueOf(512), dbRecord.getUsedSize());
-    Assert.assertEquals(Integer.valueOf(10), dbRecord.getContainerCount());
-    Assert.assertEquals(Integer.valueOf(25), dbRecord.getBlockCount());
-
-    // Update
-    dbRecord.setUsedSize(700L);
-    dbRecord.setBlockCount(30);
-    dao.update(dbRecord);
-
-    // Read updated
-    dbRecord =
-        dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP,
-            CLUSTER_GROWTH_DAILY.DATANODE_ID)
-            .value1(new Timestamp(now)).value2(10));
-
-    Assert.assertEquals(Long.valueOf(700), dbRecord.getUsedSize());
-    Assert.assertEquals(Integer.valueOf(30), dbRecord.getBlockCount());
-
-    // Delete
-    dao.deleteById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP,
-        CLUSTER_GROWTH_DAILY.DATANODE_ID)
-        .value1(new Timestamp(now)).value2(10));
-
-    // Verify
-    dbRecord =
-        dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP,
-            CLUSTER_GROWTH_DAILY.DATANODE_ID)
-            .value1(new Timestamp(now)).value2(10));
-
-    Assert.assertNull(dbRecord);
-  }
-
-  @Test
-  public void testFileCountBySizeCRUDOperations() throws SQLException {
-    UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance(
-        UtilizationSchemaDefinition.class);
-    schemaDefinition.initializeSchema();
-
-    DataSource ds = getInjector().getInstance(DataSource.class);
-    Connection connection = ds.getConnection();
-
-    DatabaseMetaData metaData = connection.getMetaData();
-    ResultSet resultSet = metaData.getTables(null, null,
-        FILE_COUNT_BY_SIZE_TABLE_NAME, null);
-
-    while (resultSet.next()) {
-      Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME,
-          resultSet.getString("TABLE_NAME"));
-    }
-
-    FileCountBySizeDao fileCountBySizeDao = new FileCountBySizeDao(
-        getInjector().getInstance(Configuration.class));
-
-    FileCountBySize newRecord = new FileCountBySize();
-    newRecord.setFileSize(1024L);
-    newRecord.setCount(1L);
-
-    fileCountBySizeDao.insert(newRecord);
-
-    FileCountBySize dbRecord = fileCountBySizeDao.findById(1024L);
-    assertEquals(Long.valueOf(1), dbRecord.getCount());
-
-    dbRecord.setCount(2L);
-    fileCountBySizeDao.update(dbRecord);
-
-    dbRecord = fileCountBySizeDao.findById(1024L);
-    assertEquals(Long.valueOf(2), dbRecord.getCount());
-
-
-
-    Table<FileCountBySizeRecord> fileCountBySizeRecordTable =
-        fileCountBySizeDao.getTable();
-    List<UniqueKey<FileCountBySizeRecord>> tableKeys =
-        fileCountBySizeRecordTable.getKeys();
-    for (UniqueKey key : tableKeys) {
-      String name = key.getName();
-    }
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
deleted file mode 100644
index 63b8505..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * End to end tests for persistence classes.
- */
-package org.apache.hadoop.ozone.recon.persistence;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
deleted file mode 100644
index a9e6aea..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.recovery;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-/**
- * Test Recon OM Metadata Manager implementation.
- */
-public class TestReconOmMetadataManagerImpl {
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  @Test
-  public void testStart() throws Exception {
-
-    OMMetadataManager omMetadataManager = getOMMetadataManager();
-
-    //Take checkpoint of the above OM DB.
-    DBCheckpoint checkpoint = omMetadataManager.getStore()
-        .getCheckpoint(true);
-    File snapshotFile = new File(
-        checkpoint.getCheckpointLocation().getParent() + "/" +
-            "om.snapshot.db_" + System.currentTimeMillis());
-    checkpoint.getCheckpointLocation().toFile().renameTo(snapshotFile);
-
-    //Create new Recon OM Metadata manager instance.
-    File reconOmDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
-        .getAbsolutePath());
-    FileUtils.copyDirectory(snapshotFile.getParentFile(), reconOmDbDir);
-
-    ReconOMMetadataManager reconOMMetadataManager =
-        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
-    reconOMMetadataManager.start(configuration);
-
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable());
-    Assert.assertNotNull(reconOMMetadataManager.getVolumeTable()
-        .get("/sampleVol"));
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable()
-        .get("/sampleVol/bucketOne"));
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-  }
-
-  @Test
-  public void testUpdateOmDB() throws Exception {
-
-    OMMetadataManager omMetadataManager = getOMMetadataManager();
-    //Make sure OM Metadata reflects the keys that were inserted.
-    Assert.assertNotNull(omMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNotNull(omMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-
-    //Take checkpoint of OM DB.
-    DBCheckpoint checkpoint = omMetadataManager.getStore()
-        .getCheckpoint(true);
-    Assert.assertNotNull(checkpoint.getCheckpointLocation());
-
-    //Create new Recon OM Metadata manager instance.
-    File reconOmDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
-        .getAbsolutePath());
-    ReconOMMetadataManager reconOMMetadataManager =
-        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
-    reconOMMetadataManager.start(configuration);
-
-    //Before accepting a snapshot, the metadata should have null tables.
-    Assert.assertNull(reconOMMetadataManager.getBucketTable());
-
-    //Update Recon OM DB with the OM DB checkpoint location.
-    reconOMMetadataManager.updateOmDB(
-        checkpoint.getCheckpointLocation().toFile());
-
-    //Now, the tables should have been initialized.
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable());
-
-    // Check volume and bucket entries.
-    Assert.assertNotNull(reconOMMetadataManager.getVolumeTable()
-        .get("/sampleVol"));
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable()
-        .get("/sampleVol/bucketOne"));
-
-    //Verify Keys inserted in OM DB are available in Recon OM DB.
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-
-  }
-
-  /**
-   * Get test OM metadata manager.
-   * @return OMMetadataManager instance
-   * @throws IOException
-   */
-  private OMMetadataManager getOMMetadataManager() throws IOException {
-    //Create a new OM Metadata Manager instance + DB.
-    File omDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration);
-
-    //Create a volume + bucket + 2 keys.
-    String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setVolume("sampleVol")
-            .setAdminName("TestUser")
-            .setOwnerName("TestUser")
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-
-    String bucketKey =
-        omMetadataManager.getBucketKey(bucketInfo.getVolumeName(),
-            bucketInfo.getBucketName());
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
-
-
-    omMetadataManager.getKeyTable().put("/sampleVol/bucketOne/key_one",
-        new OmKeyInfo.Builder()
-            .setBucketName("bucketOne")
-            .setVolumeName("sampleVol")
-            .setKeyName("key_one")
-            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-            .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-            .build());
-    omMetadataManager.getKeyTable().put("/sampleVol/bucketOne/key_two",
-        new OmKeyInfo.Builder()
-            .setBucketName("bucketOne")
-            .setVolumeName("sampleVol")
-            .setKeyName("key_two")
-            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-            .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-            .build());
-
-    return omMetadataManager;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
deleted file mode 100644
index c3b0b34..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Package for recon server - OM service specific tests.
- */
-package org.apache.hadoop.ozone.recon.recovery;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java
deleted file mode 100644
index 2392f8a..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.jooq.impl.DSL;
-import org.jooq.impl.DefaultConfiguration;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.Injector;
-
-import javax.sql.DataSource;
-
-/**
- * Unit Tests for ContainerDBServiceProviderImpl.
- */
-public class TestContainerDBServiceProviderImpl {
-
-  @ClassRule
-  public static TemporaryFolder tempFolder = new TemporaryFolder();
-  private static ContainerDBServiceProvider containerDbServiceProvider;
-  private static Injector injector;
-  private static GuiceInjectorUtilsForTestsImpl guiceInjectorTest =
-      new GuiceInjectorUtilsForTestsImpl();
-
-  private String keyPrefix1 = "V3/B1/K1";
-  private String keyPrefix2 = "V3/B1/K2";
-  private String keyPrefix3 = "V3/B2/K1";
-
-  private void populateKeysInContainers(long containerId1, long containerId2)
-      throws Exception {
-
-    ContainerKeyPrefix containerKeyPrefix1 = new
-        ContainerKeyPrefix(containerId1, keyPrefix1, 0);
-    containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix1,
-        1);
-
-    ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix(
-        containerId1, keyPrefix2, 0);
-    containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix2,
-        2);
-
-    ContainerKeyPrefix containerKeyPrefix3 = new ContainerKeyPrefix(
-        containerId2, keyPrefix3, 0);
-
-    containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix3,
-        3);
-  }
-
-  private static void initializeInjector() throws Exception {
-    injector = guiceInjectorTest.getInjector(
-        null, null, tempFolder);
-  }
-
-  @BeforeClass
-  public static void setupOnce() throws Exception {
-
-    initializeInjector();
-
-    DSL.using(new DefaultConfiguration().set(
-        injector.getInstance(DataSource.class)));
-
-    containerDbServiceProvider = injector.getInstance(
-        ContainerDBServiceProvider.class);
-
-    StatsSchemaDefinition schemaDefinition = injector.getInstance(
-        StatsSchemaDefinition.class);
-    schemaDefinition.initializeSchema();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    // Reset containerDB before running each test
-    containerDbServiceProvider.initNewContainerDB(null);
-  }
-
-  @Test
-  public void testInitNewContainerDB() throws Exception {
-    long containerId = System.currentTimeMillis();
-    Map<ContainerKeyPrefix, Integer> prefixCounts = new HashMap<>();
-
-    ContainerKeyPrefix ckp1 = new ContainerKeyPrefix(containerId,
-        "V1/B1/K1", 0);
-    prefixCounts.put(ckp1, 1);
-
-    ContainerKeyPrefix ckp2 = new ContainerKeyPrefix(containerId,
-        "V1/B1/K2", 0);
-    prefixCounts.put(ckp2, 2);
-
-    ContainerKeyPrefix ckp3 = new ContainerKeyPrefix(containerId,
-        "V1/B2/K3", 0);
-    prefixCounts.put(ckp3, 3);
-
-    for (ContainerKeyPrefix prefix : prefixCounts.keySet()) {
-      containerDbServiceProvider.storeContainerKeyMapping(
-          prefix, prefixCounts.get(prefix));
-    }
-
-    assertEquals(1, containerDbServiceProvider
-        .getCountForContainerKeyPrefix(ckp1).intValue());
-
-    prefixCounts.clear();
-    prefixCounts.put(ckp2, 12);
-    prefixCounts.put(ckp3, 13);
-    ContainerKeyPrefix ckp4 = new ContainerKeyPrefix(containerId,
-        "V1/B3/K1", 0);
-    prefixCounts.put(ckp4, 14);
-    ContainerKeyPrefix ckp5 = new ContainerKeyPrefix(containerId,
-        "V1/B3/K2", 0);
-    prefixCounts.put(ckp5, 15);
-
-    containerDbServiceProvider.initNewContainerDB(prefixCounts);
-    Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
-        containerDbServiceProvider.getKeyPrefixesForContainer(containerId);
-
-    assertEquals(4, keyPrefixesForContainer.size());
-    assertEquals(12, keyPrefixesForContainer.get(ckp2).intValue());
-    assertEquals(13, keyPrefixesForContainer.get(ckp3).intValue());
-    assertEquals(14, keyPrefixesForContainer.get(ckp4).intValue());
-    assertEquals(15, keyPrefixesForContainer.get(ckp5).intValue());
-
-    assertEquals(0, containerDbServiceProvider
-        .getCountForContainerKeyPrefix(ckp1).intValue());
-  }
-
-  @Test
-  public void testStoreContainerKeyMapping() throws Exception {
-
-    long containerId = System.currentTimeMillis();
-    Map<String, Integer> prefixCounts = new HashMap<>();
-    prefixCounts.put(keyPrefix1, 1);
-    prefixCounts.put(keyPrefix2, 2);
-    prefixCounts.put(keyPrefix3, 3);
-
-    for (String prefix : prefixCounts.keySet()) {
-      ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(
-          containerId, prefix, 0);
-      containerDbServiceProvider.storeContainerKeyMapping(
-          containerKeyPrefix, prefixCounts.get(prefix));
-    }
-
-    Assert.assertEquals(1,
-        containerDbServiceProvider.getCountForContainerKeyPrefix(
-            new ContainerKeyPrefix(containerId, keyPrefix1,
-                0)).longValue());
-    Assert.assertEquals(2,
-        containerDbServiceProvider.getCountForContainerKeyPrefix(
-            new ContainerKeyPrefix(containerId, keyPrefix2,
-                0)).longValue());
-    Assert.assertEquals(3,
-        containerDbServiceProvider.getCountForContainerKeyPrefix(
-            new ContainerKeyPrefix(containerId, keyPrefix3,
-                0)).longValue());
-  }
-
-  @Test
-  public void testStoreContainerKeyCount() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    containerDbServiceProvider.storeContainerKeyCount(containerId, 2L);
-    containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L);
-
-    assertEquals(2,
-        containerDbServiceProvider.getKeyCountForContainer(containerId));
-    assertEquals(3,
-        containerDbServiceProvider.getKeyCountForContainer(nextContainerId));
-
-    containerDbServiceProvider.storeContainerKeyCount(containerId, 20L);
-    assertEquals(20,
-        containerDbServiceProvider.getKeyCountForContainer(containerId));
-  }
-
-  @Test
-  public void testGetKeyCountForContainer() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    containerDbServiceProvider.storeContainerKeyCount(containerId, 2L);
-    containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L);
-
-    assertEquals(2,
-        containerDbServiceProvider.getKeyCountForContainer(containerId));
-    assertEquals(3,
-        containerDbServiceProvider.getKeyCountForContainer(nextContainerId));
-
-    assertEquals(0,
-        containerDbServiceProvider.getKeyCountForContainer(5L));
-  }
-
-  @Test
-  public void testDoesContainerExists() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    containerDbServiceProvider.storeContainerKeyCount(containerId, 2L);
-    containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L);
-
-    assertTrue(containerDbServiceProvider.doesContainerExists(containerId));
-    assertTrue(containerDbServiceProvider.doesContainerExists(nextContainerId));
-    assertFalse(containerDbServiceProvider.doesContainerExists(0L));
-    assertFalse(containerDbServiceProvider.doesContainerExists(3L));
-  }
-
-  @Test
-  public void testGetCountForContainerKeyPrefix() throws Exception {
-    long containerId = System.currentTimeMillis();
-
-    containerDbServiceProvider.storeContainerKeyMapping(new
-        ContainerKeyPrefix(containerId, keyPrefix1), 2);
-
-    Integer count = containerDbServiceProvider.
-        getCountForContainerKeyPrefix(new ContainerKeyPrefix(containerId,
-            keyPrefix1));
-    assertEquals(2L, count.longValue());
-
-    count = containerDbServiceProvider.
-        getCountForContainerKeyPrefix(new ContainerKeyPrefix(containerId,
-            "invalid"));
-    assertEquals(0L, count.longValue());
-  }
-
-  @Test
-  public void testGetKeyPrefixesForContainer() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    populateKeysInContainers(containerId, nextContainerId);
-
-    ContainerKeyPrefix containerKeyPrefix1 = new
-        ContainerKeyPrefix(containerId, keyPrefix1, 0);
-    ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix(
-        containerId, keyPrefix2, 0);
-    ContainerKeyPrefix containerKeyPrefix3 = new ContainerKeyPrefix(
-        nextContainerId, keyPrefix3, 0);
-
-
-    Map<ContainerKeyPrefix, Integer> keyPrefixMap =
-        containerDbServiceProvider.getKeyPrefixesForContainer(containerId);
-    assertEquals(2, keyPrefixMap.size());
-
-    assertEquals(1, keyPrefixMap.get(containerKeyPrefix1).longValue());
-    assertEquals(2, keyPrefixMap.get(containerKeyPrefix2).longValue());
-
-    keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer(
-        nextContainerId);
-    assertEquals(1, keyPrefixMap.size());
-    assertEquals(3, keyPrefixMap.get(containerKeyPrefix3).longValue());
-  }
-
-  @Test
-  public void testGetKeyPrefixesForContainerWithKeyPrefix() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    populateKeysInContainers(containerId, nextContainerId);
-
-    ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix(
-        containerId, keyPrefix2, 0);
-
-    Map<ContainerKeyPrefix, Integer> keyPrefixMap =
-        containerDbServiceProvider.getKeyPrefixesForContainer(containerId,
-            keyPrefix1);
-    assertEquals(1, keyPrefixMap.size());
-    assertEquals(2, keyPrefixMap.get(containerKeyPrefix2).longValue());
-
-    keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer(
-        nextContainerId, keyPrefix3);
-    assertEquals(0, keyPrefixMap.size());
-
-    // test for negative cases
-    keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer(
-        containerId, "V3/B1/invalid");
-    assertEquals(0, keyPrefixMap.size());
-
-    keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer(
-        containerId, keyPrefix3);
-    assertEquals(0, keyPrefixMap.size());
-
-    keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer(
-        10L, "");
-    assertEquals(0, keyPrefixMap.size());
-  }
-
-  @Test
-  public void testGetContainersWithPrevContainer() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    populateKeysInContainers(containerId, nextContainerId);
-
-    Map<Long, ContainerMetadata> containerMap =
-        containerDbServiceProvider.getContainers(-1, 0L);
-    assertEquals(2, containerMap.size());
-
-    assertEquals(3, containerMap.get(containerId).getNumberOfKeys());
-    assertEquals(3, containerMap.get(nextContainerId).getNumberOfKeys());
-
-    // test if limit works
-    containerMap = containerDbServiceProvider.getContainers(
-        1, 0L);
-    assertEquals(1, containerMap.size());
-    assertNull(containerMap.get(nextContainerId));
-
-    // test for prev key
-    containerMap = containerDbServiceProvider.getContainers(
-        -1, containerId);
-    assertEquals(1, containerMap.size());
-    // containerId must be skipped from containerMap result
-    assertNull(containerMap.get(containerId));
-
-    containerMap = containerDbServiceProvider.getContainers(
-        -1, nextContainerId);
-    assertEquals(0, containerMap.size());
-
-    // test for negative cases
-    containerMap = containerDbServiceProvider.getContainers(
-        -1, 10L);
-    assertEquals(0, containerMap.size());
-
-    containerMap = containerDbServiceProvider.getContainers(
-        0, containerId);
-    assertEquals(0, containerMap.size());
-  }
-
-  @Test
-  public void testDeleteContainerMapping() throws Exception {
-    long containerId = 1L;
-    long nextContainerId = 2L;
-    populateKeysInContainers(containerId, nextContainerId);
-
-    Map<ContainerKeyPrefix, Integer> keyPrefixMap =
-        containerDbServiceProvider.getKeyPrefixesForContainer(containerId);
-    assertEquals(2, keyPrefixMap.size());
-
-    containerDbServiceProvider.deleteContainerMapping(new ContainerKeyPrefix(
-        containerId, keyPrefix2, 0));
-    keyPrefixMap =
-        containerDbServiceProvider.getKeyPrefixesForContainer(containerId);
-    assertEquals(1, keyPrefixMap.size());
-  }
-
-  @Test
-  public void testGetCountForContainers() throws Exception {
-
-    assertEquals(0, containerDbServiceProvider.getCountForContainers());
-
-    containerDbServiceProvider.storeContainerCount(5L);
-
-    assertEquals(5L, containerDbServiceProvider.getCountForContainers());
-    containerDbServiceProvider.incrementContainerCountBy(1L);
-
-    assertEquals(6L, containerDbServiceProvider.getCountForContainers());
-
-    containerDbServiceProvider.storeContainerCount(10L);
-    assertEquals(10L, containerDbServiceProvider.getCountForContainers());
-  }
-
-  @Test
-  public void testStoreContainerCount() throws Exception {
-    containerDbServiceProvider.storeContainerCount(3L);
-    assertEquals(3L, containerDbServiceProvider.getCountForContainers());
-
-    containerDbServiceProvider.storeContainerCount(5L);
-    assertEquals(5L, containerDbServiceProvider.getCountForContainers());
-  }
-
-  @Test
-  public void testIncrementContainerCountBy() throws Exception {
-    assertEquals(0, containerDbServiceProvider.getCountForContainers());
-
-    containerDbServiceProvider.incrementContainerCountBy(1L);
-    assertEquals(1L, containerDbServiceProvider.getCountForContainers());
-
-    containerDbServiceProvider.incrementContainerCountBy(3L);
-    assertEquals(4L, containerDbServiceProvider.getCountForContainers());
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
deleted file mode 100644
index 63b4140..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
+++ /dev/null
@@ -1,338 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.doCallRealMethod;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Paths;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest;
-import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler;
-import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch;
-import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.rocksdb.RocksDB;
-import org.rocksdb.TransactionLogIterator;
-import org.rocksdb.WriteBatch;
-
-/**
- * Class to test Ozone Manager Service Provider Implementation.
- */
-public class TestOzoneManagerServiceProviderImpl extends
-    AbstractOMMetadataManagerTest {
-
-  private OzoneConfiguration configuration;
-  private OzoneManagerProtocol ozoneManagerProtocol;
-
-  @Before
-  public void setUp() throws Exception {
-    configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR,
-        temporaryFolder.newFolder().getAbsolutePath());
-    configuration.set(OZONE_RECON_DB_DIR,
-        temporaryFolder.newFolder().getAbsolutePath());
-    configuration.set("ozone.om.address", "localhost:9862");
-    ozoneManagerProtocol = getMockOzoneManagerClient(new DBUpdatesWrapper());
-  }
-
-  @Test
-  public void testUpdateReconOmDBWithNewSnapshot() throws Exception {
-
-    OMMetadataManager omMetadataManager = initializeNewOmMetadataManager();
-    ReconOMMetadataManager reconOMMetadataManager =
-        getTestMetadataManager(omMetadataManager);
-
-    writeDataToOm(omMetadataManager, "key_one");
-    writeDataToOm(omMetadataManager, "key_two");
-
-    DBCheckpoint checkpoint = omMetadataManager.getStore()
-        .getCheckpoint(true);
-    File tarFile = createTarFile(checkpoint.getCheckpointLocation());
-    InputStream inputStream = new FileInputStream(tarFile);
-    ReconUtils reconUtilsMock = getMockReconUtils();
-    when(reconUtilsMock.makeHttpCall(any(), anyString()))
-        .thenReturn(inputStream);
-
-    ReconTaskController reconTaskController = getMockTaskController();
-
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new OzoneManagerServiceProviderImpl(configuration,
-            reconOMMetadataManager, reconTaskController, reconUtilsMock,
-            ozoneManagerProtocol);
-
-    Assert.assertNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-
-    assertTrue(ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot());
-
-    assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-  }
-
-  @Test
-  public void testGetOzoneManagerDBSnapshot() throws Exception {
-
-    File reconOmSnapshotDbDir = temporaryFolder.newFolder();
-
-    File checkpointDir = Paths.get(reconOmSnapshotDbDir.getAbsolutePath(),
-        "testGetOzoneManagerDBSnapshot").toFile();
-    checkpointDir.mkdir();
-
-    File file1 = Paths.get(checkpointDir.getAbsolutePath(), "file1")
-        .toFile();
-    String str = "File1 Contents";
-    BufferedWriter writer = new BufferedWriter(new FileWriter(
-        file1.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    File file2 = Paths.get(checkpointDir.getAbsolutePath(), "file2")
-        .toFile();
-    str = "File2 Contents";
-    writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath()));
-    writer.write(str);
-    writer.close();
-
-    //Create test tar file.
-    File tarFile = createTarFile(checkpointDir.toPath());
-    InputStream fileInputStream = new FileInputStream(tarFile);
-    ReconUtils reconUtilsMock = getMockReconUtils();
-    when(reconUtilsMock.makeHttpCall(any(), anyString()))
-        .thenReturn(fileInputStream);
-
-    ReconOMMetadataManager reconOMMetadataManager =
-        mock(ReconOMMetadataManager.class);
-    ReconTaskController reconTaskController = getMockTaskController();
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new OzoneManagerServiceProviderImpl(configuration,
-            reconOMMetadataManager, reconTaskController, reconUtilsMock,
-            ozoneManagerProtocol);
-
-    DBCheckpoint checkpoint = ozoneManagerServiceProvider
-        .getOzoneManagerDBSnapshot();
-    assertNotNull(checkpoint);
-    assertTrue(checkpoint.getCheckpointLocation().toFile().isDirectory());
-    assertTrue(checkpoint.getCheckpointLocation().toFile()
-        .listFiles().length == 2);
-  }
-
-  @Test
-  public void testGetAndApplyDeltaUpdatesFromOM() throws Exception {
-
-    // Writing 2 Keys into a source OM DB and collecting it in a
-    // DBUpdatesWrapper.
-    OMMetadataManager sourceOMMetadataMgr = initializeNewOmMetadataManager();
-    writeDataToOm(sourceOMMetadataMgr, "key_one");
-    writeDataToOm(sourceOMMetadataMgr, "key_two");
-
-    RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb();
-    TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L);
-    DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
-    while(transactionLogIterator.isValid()) {
-      TransactionLogIterator.BatchResult result =
-          transactionLogIterator.getBatch();
-      result.writeBatch().markWalTerminationPoint();
-      WriteBatch writeBatch = result.writeBatch();
-      dbUpdatesWrapper.addWriteBatch(writeBatch.data(),
-          result.sequenceNumber());
-      transactionLogIterator.next();
-    }
-
-    // OM Service Provider's Metadata Manager.
-    OMMetadataManager omMetadataManager = initializeNewOmMetadataManager();
-
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new OzoneManagerServiceProviderImpl(configuration,
-            getTestMetadataManager(omMetadataManager),
-            getMockTaskController(), new ReconUtils(),
-            getMockOzoneManagerClient(dbUpdatesWrapper));
-
-    OMDBUpdatesHandler updatesHandler =
-        new OMDBUpdatesHandler(omMetadataManager);
-    ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM(
-        0L, updatesHandler);
-
-    // In this method, we have to assert the "GET" part and the "APPLY" path.
-
-    // Assert GET path --> verify if the OMDBUpdatesHandler picked up the 4
-    // events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs).
-    assertEquals(4, updatesHandler.getEvents().size());
-
-    // Assert APPLY path --> Verify if the OM service provider's RocksDB got
-    // the changes.
-    String fullKey = omMetadataManager.getOzoneKey("sampleVol",
-        "bucketOne", "key_one");
-    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance()
-        .getKeyTable().isExist(fullKey));
-    fullKey = omMetadataManager.getOzoneKey("sampleVol",
-        "bucketOne", "key_two");
-    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance()
-        .getKeyTable().isExist(fullKey));
-  }
-
-  @Test
-  public void testSyncDataFromOMFullSnapshot() throws Exception {
-
-    // Empty OM DB to start with.
-    ReconOMMetadataManager omMetadataManager = getTestMetadataManager(
-        initializeEmptyOmMetadataManager());
-    ReconTaskStatusDao reconTaskStatusDaoMock =
-        mock(ReconTaskStatusDao.class);
-    doNothing().when(reconTaskStatusDaoMock)
-        .update(any(ReconTaskStatus.class));
-
-    ReconTaskController reconTaskControllerMock = getMockTaskController();
-    when(reconTaskControllerMock.getReconTaskStatusDao())
-        .thenReturn(reconTaskStatusDaoMock);
-    doNothing().when(reconTaskControllerMock)
-        .reInitializeTasks(omMetadataManager);
-
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new MockOzoneServiceProvider(configuration, omMetadataManager,
-            reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol);
-
-    // Should trigger full snapshot request.
-    ozoneManagerServiceProvider.syncDataFromOM();
-
-    ArgumentCaptor<ReconTaskStatus> captor =
-        ArgumentCaptor.forClass(ReconTaskStatus.class);
-    verify(reconTaskStatusDaoMock, times(1))
-        .update(captor.capture());
-    assertTrue(captor.getValue().getTaskName().equals("OM_DB_FULL_SNAPSHOT"));
-    verify(reconTaskControllerMock, times(1))
-        .reInitializeTasks(omMetadataManager);
-  }
-
-  @Test
-  public void testSyncDataFromOMDeltaUpdates() throws Exception {
-
-    // Non-Empty OM DB to start with.
-    ReconOMMetadataManager omMetadataManager = getTestMetadataManager(
-        initializeNewOmMetadataManager());
-    ReconTaskStatusDao reconTaskStatusDaoMock =
-        mock(ReconTaskStatusDao.class);
-    doNothing().when(reconTaskStatusDaoMock)
-        .update(any(ReconTaskStatus.class));
-
-    ReconTaskController reconTaskControllerMock = getMockTaskController();
-    when(reconTaskControllerMock.getReconTaskStatusDao())
-        .thenReturn(reconTaskStatusDaoMock);
-    doNothing().when(reconTaskControllerMock)
-        .consumeOMEvents(any(OMUpdateEventBatch.class),
-            any(OMMetadataManager.class));
-
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new OzoneManagerServiceProviderImpl(configuration, omMetadataManager,
-            reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol);
-
-    // Should trigger delta updates.
-    ozoneManagerServiceProvider.syncDataFromOM();
-
-    ArgumentCaptor<ReconTaskStatus> captor =
-        ArgumentCaptor.forClass(ReconTaskStatus.class);
-    verify(reconTaskStatusDaoMock, times(1))
-        .update(captor.capture());
-    assertTrue(captor.getValue().getTaskName().equals("OM_DB_DELTA_UPDATES"));
-
-    verify(reconTaskControllerMock, times(1))
-        .consumeOMEvents(any(OMUpdateEventBatch.class),
-            any(OMMetadataManager.class));
-  }
-
-  private ReconTaskController getMockTaskController() {
-    ReconTaskController reconTaskControllerMock =
-        mock(ReconTaskController.class);
-    return reconTaskControllerMock;
-  }
-
-  private ReconUtils getMockReconUtils() throws IOException {
-    ReconUtils reconUtilsMock = mock(ReconUtils.class);
-    when(reconUtilsMock.getReconDbDir(any(), anyString())).thenCallRealMethod();
-    doCallRealMethod().when(reconUtilsMock).untarCheckpointFile(any(), any());
-    return reconUtilsMock;
-  }
-
-  private OzoneManagerProtocol getMockOzoneManagerClient(
-      DBUpdatesWrapper dbUpdatesWrapper) throws IOException {
-    OzoneManagerProtocol ozoneManagerProtocolMock =
-        mock(OzoneManagerProtocol.class);
-    when(ozoneManagerProtocolMock.getDBUpdates(any(OzoneManagerProtocolProtos
-        .DBUpdatesRequest.class))).thenReturn(dbUpdatesWrapper);
-    return ozoneManagerProtocolMock;
-  }
-}
-
-/**
- * Mock OzoneManagerServiceProviderImpl which overrides
- * updateReconOmDBWithNewSnapshot.
- */
-class MockOzoneServiceProvider extends OzoneManagerServiceProviderImpl {
-
-  MockOzoneServiceProvider(OzoneConfiguration configuration,
-                           ReconOMMetadataManager omMetadataManager,
-                           ReconTaskController reconTaskController,
-                           ReconUtils reconUtils,
-                           OzoneManagerProtocol ozoneManagerClient)
-      throws IOException {
-    super(configuration, omMetadataManager, reconTaskController, reconUtils,
-        ozoneManagerClient);
-  }
-
-  @Override
-  public boolean updateReconOmDBWithNewSnapshot() {
-    return true;
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java
deleted file mode 100644
index ad1feeb..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.spi.impl;
-
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-
-/**
- * Tests the class that provides the instance of the DB Store used by Recon to
- * store its container - key data.
- */
-public class TestReconContainerDBProvider {
-
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
-
-  private Injector injector;
-
-  @Before
-  public void setUp() throws IOException {
-    tempFolder.create();
-    injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        File dbDir = tempFolder.getRoot();
-        OzoneConfiguration configuration = new OzoneConfiguration();
-        configuration.set(OZONE_RECON_DB_DIR, dbDir.getAbsolutePath());
-        bind(OzoneConfiguration.class).toInstance(configuration);
-        bind(DBStore.class).toProvider(ReconContainerDBProvider.class).in(
-            Singleton.class);
-      }
-    });
-  }
-
-  @Test
-  public void testGet() throws Exception {
-    ReconContainerDBProvider reconContainerDBProvider = injector.getInstance(
-        ReconContainerDBProvider.class);
-    DBStore dbStore = reconContainerDBProvider.get();
-    assertNotNull(dbStore);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
deleted file mode 100644
index 932c437..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Package for recon server impl tests.
- */
-package org.apache.hadoop.ozone.recon.spi.impl;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java
deleted file mode 100644
index 66be41e..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import java.util.Collection;
-import java.util.Collections;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-
-/**
- * Dummy Recon task that has 3 modes of operations.
- * ALWAYS_FAIL / FAIL_ONCE / ALWAYS_PASS
- */
-public class DummyReconDBTask implements ReconDBUpdateTask {
-
-  private int numFailuresAllowed = Integer.MIN_VALUE;
-  private int callCtr = 0;
-  private String taskName;
-
-  DummyReconDBTask(String taskName, TaskType taskType) {
-    this.taskName = taskName;
-    if (taskType.equals(TaskType.FAIL_ONCE)) {
-      numFailuresAllowed = 1;
-    } else if (taskType.equals(TaskType.ALWAYS_FAIL)) {
-      numFailuresAllowed = Integer.MAX_VALUE;
-    }
-  }
-
-  @Override
-  public String getTaskName() {
-    return taskName;
-  }
-
-  @Override
-  public Collection<String> getTaskTables() {
-    return Collections.singletonList("volumeTable");
-  }
-
-  @Override
-  public Pair<String, Boolean> process(OMUpdateEventBatch events) {
-    if (++callCtr <= numFailuresAllowed) {
-      return new ImmutablePair<>(getTaskName(), false);
-    } else {
-      return new ImmutablePair<>(getTaskName(), true);
-    }
-  }
-
-  @Override
-  public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
-    if (++callCtr <= numFailuresAllowed) {
-      return new ImmutablePair<>(getTaskName(), false);
-    } else {
-      return new ImmutablePair<>(getTaskName(), true);
-    }
-  }
-
-  /**
-   * Type of the task.
-   */
-  public enum TaskType {
-    ALWAYS_PASS,
-    FAIL_ONCE,
-    ALWAYS_FAIL
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
deleted file mode 100644
index 8634998..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest;
-import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl;
-import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
-import org.jooq.impl.DSL;
-import org.jooq.impl.DefaultConfiguration;
-import org.junit.Before;
-import org.junit.Test;
-import com.google.inject.Injector;
-import javax.sql.DataSource;
-
-/**
- * Unit test for Container Key mapper task.
- */
-public class TestContainerKeyMapperTask extends AbstractOMMetadataManagerTest {
-
-  private ContainerDBServiceProvider containerDbServiceProvider;
-  private OMMetadataManager omMetadataManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
-  private Injector injector;
-  private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider;
-  private boolean setUpIsDone = false;
-  private GuiceInjectorUtilsForTestsImpl guiceInjectorTest =
-      new GuiceInjectorUtilsForTestsImpl();
-
-  private Injector getInjector() {
-    return injector;
-  }
-
-  private void initializeInjector() throws Exception {
-    omMetadataManager = initializeNewOmMetadataManager();
-    ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider();
-    reconOMMetadataManager = getTestMetadataManager(omMetadataManager);
-
-    injector = guiceInjectorTest.getInjector(
-        ozoneManagerServiceProvider, reconOMMetadataManager, temporaryFolder);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    // The following setup is run only once
-    if (!setUpIsDone) {
-      initializeInjector();
-
-      DSL.using(new DefaultConfiguration().set(
-          injector.getInstance(DataSource.class)));
-
-      containerDbServiceProvider = injector.getInstance(
-          ContainerDBServiceProvider.class);
-
-      StatsSchemaDefinition schemaDefinition = getInjector().getInstance(
-          StatsSchemaDefinition.class);
-      schemaDefinition.initializeSchema();
-
-      setUpIsDone = true;
-    }
-
-    containerDbServiceProvider = injector.getInstance(
-        ContainerDBServiceProvider.class);
-  }
-
-  @Test
-  public void testReprocessOMDB() throws Exception{
-
-    Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
-        containerDbServiceProvider.getKeyPrefixesForContainer(1);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(2);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    Pipeline pipeline = getRandomPipeline();
-
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-    BlockID blockID1 = new BlockID(1, 1);
-    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
-        pipeline);
-
-    BlockID blockID2 = new BlockID(2, 1);
-    OmKeyLocationInfo omKeyLocationInfo2
-        = getOmKeyLocationInfo(blockID2, pipeline);
-
-    omKeyLocationInfoList.add(omKeyLocationInfo1);
-    omKeyLocationInfoList.add(omKeyLocationInfo2);
-
-    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
-        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
-
-    writeDataToOm(reconOMMetadataManager,
-        "key_one",
-        "bucketOne",
-        "sampleVol",
-        Collections.singletonList(omKeyLocationInfoGroup));
-
-    ContainerKeyMapperTask containerKeyMapperTask =
-        new ContainerKeyMapperTask(containerDbServiceProvider);
-    containerKeyMapperTask.reprocess(reconOMMetadataManager);
-
-    keyPrefixesForContainer =
-        containerDbServiceProvider.getKeyPrefixesForContainer(1);
-    assertEquals(1, keyPrefixesForContainer.size());
-    String omKey = omMetadataManager.getOzoneKey("sampleVol",
-        "bucketOne", "key_one");
-    ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1,
-        omKey, 0);
-    assertEquals(1,
-        keyPrefixesForContainer.get(containerKeyPrefix).intValue());
-
-    keyPrefixesForContainer =
-        containerDbServiceProvider.getKeyPrefixesForContainer(2);
-    assertEquals(1, keyPrefixesForContainer.size());
-    containerKeyPrefix = new ContainerKeyPrefix(2, omKey,
-        0);
-    assertEquals(1,
-        keyPrefixesForContainer.get(containerKeyPrefix).intValue());
-
-    // Test if container key counts are updated
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L));
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(2L));
-    assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(3L));
-
-    // Test if container count is updated
-    assertEquals(2, containerDbServiceProvider.getCountForContainers());
-  }
-
-  @Test
-  public void testProcessOMEvents() throws IOException {
-    Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
-        containerDbServiceProvider.getKeyPrefixesForContainer(1);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(2);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    Pipeline pipeline = getRandomPipeline();
-
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-    BlockID blockID1 = new BlockID(1, 1);
-    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
-        pipeline);
-
-    BlockID blockID2 = new BlockID(2, 1);
-    OmKeyLocationInfo omKeyLocationInfo2
-        = getOmKeyLocationInfo(blockID2, pipeline);
-
-    omKeyLocationInfoList.add(omKeyLocationInfo1);
-    omKeyLocationInfoList.add(omKeyLocationInfo2);
-
-    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
-        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
-
-    String bucket = "bucketOne";
-    String volume = "sampleVol";
-    String key = "key_one";
-    String omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
-    OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key,
-        omKeyLocationInfoGroup);
-
-    OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent.
-        OMUpdateEventBuilder<String, OmKeyInfo>()
-        .setKey(omKey)
-        .setValue(omKeyInfo)
-        .setTable(omMetadataManager.getKeyTable().getName())
-        .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
-        .build();
-
-    BlockID blockID3 = new BlockID(1, 2);
-    OmKeyLocationInfo omKeyLocationInfo3 =
-        getOmKeyLocationInfo(blockID3, pipeline);
-
-    BlockID blockID4 = new BlockID(3, 1);
-    OmKeyLocationInfo omKeyLocationInfo4
-        = getOmKeyLocationInfo(blockID4, pipeline);
-
-    omKeyLocationInfoList = new ArrayList<>();
-    omKeyLocationInfoList.add(omKeyLocationInfo3);
-    omKeyLocationInfoList.add(omKeyLocationInfo4);
-    omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0,
-        omKeyLocationInfoList);
-
-    String key2 = "key_two";
-    writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections
-        .singletonList(omKeyLocationInfoGroup));
-
-    omKey = omMetadataManager.getOzoneKey(volume, bucket, key2);
-    OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent.
-        OMUpdateEventBuilder<String, OmKeyInfo>()
-        .setKey(omKey)
-        .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
-        .setTable(omMetadataManager.getKeyTable().getName())
-        .build();
-
-    OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(new
-        ArrayList<OMDBUpdateEvent>() {{
-          add(keyEvent1);
-          add(keyEvent2);
-        }});
-
-    ContainerKeyMapperTask containerKeyMapperTask =
-        new ContainerKeyMapperTask(containerDbServiceProvider);
-    containerKeyMapperTask.reprocess(reconOMMetadataManager);
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(1);
-    assertEquals(1, keyPrefixesForContainer.size());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(2);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(3);
-    assertEquals(1, keyPrefixesForContainer.size());
-
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L));
-    assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(2L));
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(3L));
-
-    // Process PUT & DELETE event.
-    containerKeyMapperTask.process(omUpdateEventBatch);
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(1);
-    assertEquals(1, keyPrefixesForContainer.size());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(2);
-    assertEquals(1, keyPrefixesForContainer.size());
-
-    keyPrefixesForContainer = containerDbServiceProvider
-        .getKeyPrefixesForContainer(3);
-    assertTrue(keyPrefixesForContainer.isEmpty());
-
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L));
-    assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(2L));
-    assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(3L));
-
-    // Test if container count is updated
-    assertEquals(3, containerDbServiceProvider.getCountForContainers());
-  }
-
-  private OmKeyInfo buildOmKeyInfo(String volume,
-                                   String bucket,
-                                   String key,
-                                   OmKeyLocationInfoGroup
-                                       omKeyLocationInfoGroup) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            omKeyLocationInfoGroup))
-        .build();
-  }
-
-  private OzoneManagerServiceProviderImpl getMockOzoneManagerServiceProvider()
-      throws IOException {
-    OzoneManagerServiceProviderImpl omServiceProviderMock =
-        mock(OzoneManagerServiceProviderImpl.class);
-    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
-    Table tableMock = mock(Table.class);
-    when(tableMock.getName()).thenReturn("keyTable");
-    when(omMetadataManagerMock.getKeyTable()).thenReturn(tableMock);
-    when(omServiceProviderMock.getOMMetadataManagerInstance())
-      .thenReturn(omMetadataManagerMock);
-    return omServiceProviderMock;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
deleted file mode 100644
index b4b5467..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.hdds.utils.db.TypedTable;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT;
-import static org.junit.Assert.assertEquals;
-
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.BDDMockito.given;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.when;
-
-/**
- * Unit test for File Size Count Task.
- */
-public class TestFileSizeCountTask {
-  @Test
-  public void testCalculateBinIndex() {
-    FileSizeCountTask fileSizeCountTask = mock(FileSizeCountTask.class);
-
-    when(fileSizeCountTask.getMaxFileSizeUpperBound()).
-        thenReturn(1125899906842624L);    // 1 PB
-    when(fileSizeCountTask.getOneKB()).thenReturn(1024L);
-    when(fileSizeCountTask.getMaxBinSize()).thenReturn(42);
-    when(fileSizeCountTask.calculateBinIndex(anyLong())).thenCallRealMethod();
-    when(fileSizeCountTask.nextClosestPowerIndexOfTwo(
-        anyLong())).thenCallRealMethod();
-
-    long fileSize = 1024L;            // 1 KB
-    int binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(1, binIndex);
-
-    fileSize = 1023L;                // 1KB - 1B
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(0, binIndex);
-
-    fileSize = 562949953421312L;      // 512 TB
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(40, binIndex);
-
-    fileSize = 562949953421313L;      // (512 TB + 1B)
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(40, binIndex);
-
-    fileSize = 562949953421311L;      // (512 TB - 1B)
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(39, binIndex);
-
-    fileSize = 1125899906842624L;      // 1 PB - last (extra) bin
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(41, binIndex);
-
-    fileSize = 100000L;
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(7, binIndex);
-
-    fileSize = 1125899906842623L;      // (1 PB - 1B)
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(40, binIndex);
-
-    fileSize = 1125899906842624L * 4;      // 4 PB - last extra bin
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(41, binIndex);
-
-    fileSize = Long.MAX_VALUE;        // extra bin
-    binIndex = fileSizeCountTask.calculateBinIndex(fileSize);
-    assertEquals(41, binIndex);
-  }
-
-  @Test
-  public void testFileCountBySizeReprocess() throws IOException {
-    OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
-    given(omKeyInfo1.getKeyName()).willReturn("key1");
-    given(omKeyInfo1.getDataSize()).willReturn(1000L);
-
-    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
-    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
-
-
-    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable
-        .TypedTableIterator.class);
-    TypedTable.TypedKeyValue mockKeyValue = mock(
-        TypedTable.TypedKeyValue.class);
-
-    when(keyTable.iterator()).thenReturn(mockKeyIter);
-    when(omMetadataManager.getKeyTable()).thenReturn(keyTable);
-    when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(false);
-    when(mockKeyIter.next()).thenReturn(mockKeyValue);
-    when(mockKeyValue.getValue()).thenReturn(omKeyInfo1);
-
-    FileSizeCountTask fileSizeCountTask = mock(FileSizeCountTask.class);
-    when(fileSizeCountTask.getMaxFileSizeUpperBound()).
-        thenReturn(4096L);
-    when(fileSizeCountTask.getOneKB()).thenReturn(1024L);
-
-    when(fileSizeCountTask.reprocess(omMetadataManager)).thenCallRealMethod();
-    //call reprocess()
-    fileSizeCountTask.reprocess(omMetadataManager);
-    verify(fileSizeCountTask, times(1)).
-        updateUpperBoundCount(omKeyInfo1, PUT);
-    verify(fileSizeCountTask,
-        times(1)).populateFileCountBySizeDB();
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
deleted file mode 100644
index 7056e7e..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.RocksDB;
-import org.rocksdb.TransactionLogIterator;
-import org.rocksdb.WriteBatch;
-
-/**
- * Class used to test OMDBUpdatesHandler.
- */
-public class TestOMDBUpdatesHandler {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private OzoneConfiguration createNewTestPath() throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
-      assertTrue(newFolder.mkdirs());
-    }
-    ServerUtils.setOzoneMetaDirPath(configuration, newFolder.toString());
-    return configuration;
-  }
-
-  @Test
-  public void testPut() throws Exception {
-    OzoneConfiguration configuration = createNewTestPath();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration);
-
-    String volumeKey = metaMgr.getVolumeKey("sampleVol");
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setVolume("sampleVol")
-            .setAdminName("bilbo")
-            .setOwnerName("bilbo")
-            .build();
-    metaMgr.getVolumeTable().put(volumeKey, args);
-
-    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
-        .setBucketName("bucketOne")
-        .setVolumeName("sampleVol")
-        .setKeyName("key_one")
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-        .build();
-
-    metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo);
-    RDBStore rdbStore = (RDBStore) metaMgr.getStore();
-
-    RocksDB rocksDB = rdbStore.getDb();
-    TransactionLogIterator transactionLogIterator =
-        rocksDB.getUpdatesSince(0);
-    List<byte[]> writeBatches = new ArrayList<>();
-
-    while(transactionLogIterator.isValid()) {
-      TransactionLogIterator.BatchResult result =
-          transactionLogIterator.getBatch();
-      result.writeBatch().markWalTerminationPoint();
-      WriteBatch writeBatch = result.writeBatch();
-      writeBatches.add(writeBatch.data());
-      transactionLogIterator.next();
-    }
-
-    OzoneConfiguration conf2 = createNewTestPath();
-    OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2);
-    List<OMDBUpdateEvent> events = new ArrayList<>();
-    for (byte[] data : writeBatches) {
-      WriteBatch writeBatch = new WriteBatch(data);
-      OMDBUpdatesHandler omdbUpdatesHandler =
-          new OMDBUpdatesHandler(reconOmmetaMgr);
-      writeBatch.iterate(omdbUpdatesHandler);
-      events.addAll(omdbUpdatesHandler.getEvents());
-    }
-    assertNotNull(events);
-    assertTrue(events.size() == 2);
-
-    OMDBUpdateEvent volEvent = events.get(0);
-    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, volEvent.getAction());
-    assertEquals(volumeKey, volEvent.getKey());
-    assertEquals(args.getVolume(), ((OmVolumeArgs)volEvent.getValue())
-        .getVolume());
-
-    OMDBUpdateEvent keyEvent = events.get(1);
-    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, keyEvent.getAction());
-    assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
-    assertEquals(omKeyInfo.getBucketName(),
-        ((OmKeyInfo)keyEvent.getValue()).getBucketName());
-  }
-
-  @Test
-  public void testDelete() throws Exception {
-    OzoneConfiguration configuration = createNewTestPath();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration);
-
-    String volumeKey = metaMgr.getVolumeKey("sampleVol");
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setVolume("sampleVol")
-            .setAdminName("bilbo")
-            .setOwnerName("bilbo")
-            .build();
-    metaMgr.getVolumeTable().put(volumeKey, args);
-
-    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
-        .setBucketName("bucketOne")
-        .setVolumeName("sampleVol")
-        .setKeyName("key_one")
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
-        .build();
-
-    metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo);
-
-    metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_one");
-    metaMgr.getVolumeTable().delete(volumeKey);
-
-    RDBStore rdbStore = (RDBStore) metaMgr.getStore();
-
-    RocksDB rocksDB = rdbStore.getDb();
-    TransactionLogIterator transactionLogIterator =
-        rocksDB.getUpdatesSince(0);
-    List<byte[]> writeBatches = new ArrayList<>();
-
-    while(transactionLogIterator.isValid()) {
-      TransactionLogIterator.BatchResult result =
-          transactionLogIterator.getBatch();
-      result.writeBatch().markWalTerminationPoint();
-      WriteBatch writeBatch = result.writeBatch();
-      writeBatches.add(writeBatch.data());
-      transactionLogIterator.next();
-    }
-
-    OzoneConfiguration conf2 = createNewTestPath();
-    OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2);
-    List<OMDBUpdateEvent> events = new ArrayList<>();
-    for (byte[] data : writeBatches) {
-      WriteBatch writeBatch = new WriteBatch(data);
-      OMDBUpdatesHandler omdbUpdatesHandler =
-          new OMDBUpdatesHandler(reconOmmetaMgr);
-      writeBatch.iterate(omdbUpdatesHandler);
-      events.addAll(omdbUpdatesHandler.getEvents());
-    }
-    assertNotNull(events);
-    assertTrue(events.size() == 4);
-
-    OMDBUpdateEvent keyEvent = events.get(2);
-    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction());
-    assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
-
-    OMDBUpdateEvent volEvent = events.get(3);
-    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction());
-    assertEquals(volumeKey, volEvent.getKey());
-  }
-
-  @Test
-  public void testGetValueType() throws IOException {
-    OzoneConfiguration configuration = createNewTestPath();
-    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration);
-    OMDBUpdatesHandler omdbUpdatesHandler =
-        new OMDBUpdatesHandler(metaMgr);
-
-    assertEquals(OmKeyInfo.class, omdbUpdatesHandler.getValueType(
-        metaMgr.getKeyTable().getName()));
-    assertEquals(OmVolumeArgs.class, omdbUpdatesHandler.getValueType(
-        metaMgr.getVolumeTable().getName()));
-    assertEquals(OmBucketInfo.class, omdbUpdatesHandler.getValueType(
-        metaMgr.getBucketTable().getName()));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
deleted file mode 100644
index 6760869..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.tasks;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.util.Collections;
-import java.util.HashSet;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.recon.persistence.AbstractSqlDatabaseTest;
-import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition;
-import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
-import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.jooq.Configuration;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Class used to test ReconTaskControllerImpl.
- */
-public class TestReconTaskControllerImpl extends AbstractSqlDatabaseTest {
-
-  private ReconTaskController reconTaskController;
-  private Configuration sqlConfiguration;
-
-  @Before
-  public void setUp() throws Exception {
-
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-
-    sqlConfiguration = getInjector()
-        .getInstance(Configuration.class);
-
-    ReconInternalSchemaDefinition schemaDefinition = getInjector().
-        getInstance(ReconInternalSchemaDefinition.class);
-    schemaDefinition.initializeSchema();
-
-    reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration,
-        sqlConfiguration, new HashSet<>());
-  }
-
-  @Test
-  public void testRegisterTask() throws Exception {
-    String taskName = "Dummy_" + System.currentTimeMillis();
-    DummyReconDBTask dummyReconDBTask =
-        new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_PASS);
-    reconTaskController.registerTask(dummyReconDBTask);
-    assertTrue(reconTaskController.getRegisteredTasks().size() == 1);
-    assertTrue(reconTaskController.getRegisteredTasks()
-        .get(dummyReconDBTask.getTaskName()) == dummyReconDBTask);
-  }
-
-  @Test
-  public void testConsumeOMEvents() throws Exception {
-
-    ReconDBUpdateTask reconDBUpdateTaskMock = getMockTask("MockTask");
-    when(reconDBUpdateTaskMock.process(any(OMUpdateEventBatch.class)))
-        .thenReturn(new ImmutablePair<>("MockTask", true));
-    reconTaskController.registerTask(reconDBUpdateTaskMock);
-    OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class);
-    when(omUpdateEventBatchMock.isEmpty()).thenReturn(false);
-    when(omUpdateEventBatchMock.filter(Collections.singleton("MockTable")))
-        .thenReturn(omUpdateEventBatchMock);
-    reconTaskController.consumeOMEvents(
-        omUpdateEventBatchMock,
-        mock(OMMetadataManager.class));
-
-    verify(reconDBUpdateTaskMock, times(1))
-        .process(any());
-  }
-
-  @Test
-  public void testFailedTaskRetryLogic() throws Exception {
-    String taskName = "Dummy_" + System.currentTimeMillis();
-    DummyReconDBTask dummyReconDBTask =
-        new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.FAIL_ONCE);
-    reconTaskController.registerTask(dummyReconDBTask);
-
-    long currentTime = System.currentTimeMillis();
-    OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class);
-    when(omUpdateEventBatchMock.isEmpty()).thenReturn(false);
-    when(omUpdateEventBatchMock.getLastSequenceNumber()).thenReturn(100L);
-
-    reconTaskController.consumeOMEvents(omUpdateEventBatchMock,
-        mock(OMMetadataManager.class));
-    assertFalse(reconTaskController.getRegisteredTasks().isEmpty());
-    assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks()
-        .get(dummyReconDBTask.getTaskName()));
-
-    ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration);
-    ReconTaskStatus dbRecord = dao.findById(taskName);
-
-    Assert.assertEquals(taskName, dbRecord.getTaskName());
-    Assert.assertTrue(
-        dbRecord.getLastUpdatedTimestamp() > currentTime);
-    Assert.assertEquals(Long.valueOf(100L), dbRecord.getLastUpdatedSeqNumber());
-  }
-
-  @Test
-  public void testBadBehavedTaskBlacklisting() throws Exception {
-    String taskName = "Dummy_" + System.currentTimeMillis();
-    DummyReconDBTask dummyReconDBTask =
-        new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_FAIL);
-    reconTaskController.registerTask(dummyReconDBTask);
-
-    OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class);
-    when(omUpdateEventBatchMock.isEmpty()).thenReturn(false);
-    when(omUpdateEventBatchMock.getLastSequenceNumber()).thenReturn(100L);
-
-    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
-    for (int i = 0; i < 2; i++) {
-      reconTaskController.consumeOMEvents(omUpdateEventBatchMock,
-          omMetadataManagerMock);
-
-      assertFalse(reconTaskController.getRegisteredTasks().isEmpty());
-      assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks()
-          .get(dummyReconDBTask.getTaskName()));
-    }
-
-    //Should be blacklisted now.
-    reconTaskController.consumeOMEvents(omUpdateEventBatchMock,
-        omMetadataManagerMock);
-    assertTrue(reconTaskController.getRegisteredTasks().isEmpty());
-
-    ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration);
-    ReconTaskStatus dbRecord = dao.findById(taskName);
-
-    Assert.assertEquals(taskName, dbRecord.getTaskName());
-    Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp());
-    Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber());
-  }
-
-
-  @Test
-  public void testReInitializeTasks() throws Exception {
-
-    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
-    ReconDBUpdateTask reconDBUpdateTaskMock =
-        getMockTask("MockTask2");
-    when(reconDBUpdateTaskMock.reprocess(omMetadataManagerMock))
-        .thenReturn(new ImmutablePair<>("MockTask2", true));
-
-    reconTaskController.registerTask(reconDBUpdateTaskMock);
-    reconTaskController.reInitializeTasks(omMetadataManagerMock);
-
-    verify(reconDBUpdateTaskMock, times(1))
-        .reprocess(omMetadataManagerMock);
-  }
-
-  /**
-   * Helper method for getting a mocked Task.
-   * @param taskName name of the task.
-   * @return instance of ReconDBUpdateTask.
-   */
-  private ReconDBUpdateTask getMockTask(String taskName) {
-    ReconDBUpdateTask reconDBUpdateTaskMock = mock(ReconDBUpdateTask.class);
-    when(reconDBUpdateTaskMock.getTaskTables()).thenReturn(Collections
-        .EMPTY_LIST);
-    when(reconDBUpdateTaskMock.getTaskName()).thenReturn(taskName);
-    when(reconDBUpdateTaskMock.getTaskTables())
-        .thenReturn(Collections.singleton("MockTable"));
-    return reconDBUpdateTaskMock;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
deleted file mode 100644
index 9e1a31a..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * The classes in this package tests the various scheduled tasks used by
- * Recon.
- */
-package org.apache.hadoop.ozone.recon.tasks;
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java
deleted file mode 100644
index 77d9106..0000000
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.recon.types;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.recon.persistence.AbstractSqlDatabaseTest;
-import org.apache.hadoop.ozone.recon.persistence.DataSourceConfiguration;
-import org.apache.hadoop.ozone.recon.persistence.JooqPersistenceModule;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
-import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
-import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.junit.Assert;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
-
-/**
- * Utility methods to get guice injector and ozone configuration.
- */
-public interface GuiceInjectorUtilsForTests {
-
-  /**
-   * Get Guice Injector with bindings.
-   * @param ozoneManagerServiceProvider
-   * @param reconOMMetadataManager
-   * @param temporaryFolder
-   * @return Injector
-   * @throws IOException ioEx.
-   */
-  default Injector getInjector(
-      OzoneManagerServiceProviderImpl ozoneManagerServiceProvider,
-      ReconOMMetadataManager reconOMMetadataManager,
-      TemporaryFolder temporaryFolder
-  ) throws IOException {
-
-    File tempDir = temporaryFolder.newFolder();
-    AbstractSqlDatabaseTest.DataSourceConfigurationProvider
-        configurationProvider =
-        new AbstractSqlDatabaseTest.DataSourceConfigurationProvider(tempDir);
-
-    JooqPersistenceModule jooqPersistenceModule =
-        new JooqPersistenceModule(configurationProvider);
-
-    return Guice.createInjector(jooqPersistenceModule,
-        new AbstractModule() {
-          @Override
-          protected void configure() {
-            try {
-              bind(DataSourceConfiguration.class)
-                  .toProvider(configurationProvider);
-              bind(OzoneConfiguration.class).toInstance(
-                  getTestOzoneConfiguration(temporaryFolder));
-
-              if (reconOMMetadataManager != null) {
-                bind(ReconOMMetadataManager.class)
-                    .toInstance(reconOMMetadataManager);
-              }
-
-              if (ozoneManagerServiceProvider != null) {
-                bind(OzoneManagerServiceProvider.class)
-                    .toInstance(ozoneManagerServiceProvider);
-              }
-
-              bind(DBStore.class).toProvider(ReconContainerDBProvider.class).
-                  in(Singleton.class);
-              bind(ContainerDBServiceProvider.class).to(
-                  ContainerDBServiceProviderImpl.class).in(Singleton.class);
-            } catch (IOException e) {
-              Assert.fail();
-            }
-          }
-        });
-  }
-
-  /**
-   * Get Test OzoneConfiguration instance.
-   * @return OzoneConfiguration
-   * @throws IOException ioEx.
-   */
-  default OzoneConfiguration getTestOzoneConfiguration(
-      TemporaryFolder temporaryFolder) throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR,
-        temporaryFolder.newFolder().getAbsolutePath());
-    configuration.set(OZONE_RECON_DB_DIR, temporaryFolder.newFolder()
-        .getAbsolutePath());
-    return configuration;
-  }
-}
diff --git a/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
deleted file mode 100644
index 3c9e1c8..0000000
--- a/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-mock-maker-inline
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
deleted file mode 100644
index 32c9587..0000000
--- a/hadoop-ozone/s3gateway/pom.xml
+++ /dev/null
@@ -1,256 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-s3gateway</artifactId>
-  <name>Apache Hadoop Ozone S3 Gateway</name>
-  <packaging>jar</packaging>
-  <version>0.5.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.jboss.weld.servlet</groupId>
-      <artifactId>weld-servlet</artifactId>
-      <version>2.4.7.Final</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.containers</groupId>
-      <artifactId>jersey-container-servlet-core</artifactId>
-      <version>2.27</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.ext.cdi</groupId>
-      <artifactId>jersey-cdi1x</artifactId>
-      <version>2.27</version>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.jersey.inject</groupId>
-      <artifactId>jersey-hk2</artifactId>
-      <version>2.27</version>
-      <exclusions>
-        <exclusion>
-          <artifactId>hk2-api</artifactId>
-          <groupId>org.glassfish.hk2</groupId>
-        </exclusion>
-        <exclusion>
-          <artifactId>hk2-utils</artifactId>
-          <groupId>org.glassfish.hk2</groupId>
-        </exclusion>
-        <exclusion>
-          <artifactId>aopalliance-repackaged</artifactId>
-          <groupId>org.glassfish.hk2.external</groupId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.glassfish.hk2</groupId>
-      <artifactId>hk2-api</artifactId>
-      <version>2.5.0</version>
-    </dependency>
-    <dependency>
-      <groupId>com.fasterxml.jackson.dataformat</groupId>
-      <artifactId>jackson-dataformat-xml</artifactId>
-      <version>2.9.0</version>
-    </dependency>
-    <dependency>
-      <groupId>javax.enterprise</groupId>
-      <artifactId>cdi-api</artifactId>
-      <version>1.2</version>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.activation</groupId>
-      <artifactId>activation</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-servlet</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-servlet</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-servlet</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-servlet</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.15.0</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-common-html</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>unpack</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-server-framework</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}
-                  </outputDirectory>
-                  <includes>webapps/static/**/*.*</includes>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-docs</artifactId>
-                  <outputDirectory>
-                    ${project.build.outputDirectory}/webapps/static
-                  </outputDirectory>
-                  <includes>docs/**/*.*</includes>
-                </artifactItem>
-              </artifactItems>
-              <overWriteSnapshots>true</overWriteSnapshots>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java
deleted file mode 100644
index 88def0b..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import java.nio.charset.Charset;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
-
-/*
- * Parser to request auth parser for http request.
- * */
-interface AWSAuthParser {
-
-  String UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD";
-  String NEWLINE = "\n";
-  String CONTENT_TYPE = "content-type";
-  String X_AMAZ_DATE = "X-Amz-Date";
-  String CONTENT_MD5 = "content-md5";
-  String AUTHORIZATION_HEADER = "Authorization";
-  Charset UTF_8 = Charset.forName("utf-8");
-  String X_AMZ_CONTENT_SHA256 = "X-Amz-Content-SHA256";
-  String HOST = "host";
-
-  String AWS4_TERMINATOR = "aws4_request";
-
-  String AWS4_SIGNING_ALGORITHM = "AWS4-HMAC-SHA256";
-
-  /**
-   * Seconds in a week, which is the max expiration time Sig-v4 accepts.
-   */
-  long PRESIGN_URL_MAX_EXPIRATION_SECONDS =
-      60 * 60 * 24 * 7;
-
-  String X_AMZ_SECURITY_TOKEN = "X-Amz-Security-Token";
-
-  String X_AMZ_CREDENTIAL = "X-Amz-Credential";
-
-  String X_AMZ_DATE = "X-Amz-Date";
-
-  String X_AMZ_EXPIRES = "X-Amz-Expires";
-
-  String X_AMZ_SIGNED_HEADER = "X-Amz-SignedHeaders";
-
-  String X_AMZ_SIGNATURE = "X-Amz-Signature";
-
-  String X_AMZ_ALGORITHM = "X-Amz-Algorithm";
-
-  String AUTHORIZATION = "Authorization";
-
-  String HOST_HEADER = "Host";
-
-  DateTimeFormatter DATE_FORMATTER =
-      DateTimeFormatter.ofPattern("yyyyMMdd");
-
-  DateTimeFormatter TIME_FORMATTER =
-      DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss'Z'")
-          .withZone(ZoneOffset.UTC);
-
-  /**
-   * API to return string to sign.
-   */
-  String getStringToSign() throws Exception;
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
deleted file mode 100644
index 82ffa0c..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4;
-import org.apache.hadoop.ozone.s3.header.Credential;
-import org.apache.kerby.util.Hex;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.core.MultivaluedMap;
-import java.io.UnsupportedEncodingException;
-import java.net.InetAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLEncoder;
-import java.net.UnknownHostException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.time.LocalDate;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import static java.time.temporal.ChronoUnit.SECONDS;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_TOKEN_CREATION_ERROR;
-
-/**
- * Parser to process AWS v4 auth request. Creates string to sign and auth
- * header. For more details refer to AWS documentation https://docs.aws
- * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html.
- **/
-public class AWSV4AuthParser implements AWSAuthParser {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AWSV4AuthParser.class);
-  private MultivaluedMap<String, String> headerMap;
-  private MultivaluedMap<String, String> queryMap;
-  private String uri;
-  private String method;
-  private AuthorizationHeaderV4 v4Header;
-  private String stringToSign;
-  private String amzContentPayload;
-
-  public AWSV4AuthParser(ContainerRequestContext context)
-      throws OS3Exception {
-    this.headerMap = context.getHeaders();
-    this.queryMap = context.getUriInfo().getQueryParameters();
-    try {
-      this.uri = new URI(context.getUriInfo().getRequestUri()
-          .getPath().replaceAll("\\/+",
-              "/")).normalize().getPath();
-    } catch (URISyntaxException e) {
-      throw S3_TOKEN_CREATION_ERROR;
-    }
-
-    this.method = context.getMethod();
-    v4Header = new AuthorizationHeaderV4(
-        headerMap.getFirst(AUTHORIZATION_HEADER));
-  }
-
-  public void parse() throws Exception {
-    StringBuilder strToSign = new StringBuilder();
-
-    // According to AWS sigv4 documentation, authorization header should be
-    // in following format.
-    // Authorization: algorithm Credential=access key ID/credential scope,
-    // SignedHeaders=SignedHeaders, Signature=signature
-
-    // Construct String to sign in below format.
-    // StringToSign =
-    //    Algorithm + \n +
-    //    RequestDateTime + \n +
-    //    CredentialScope + \n +
-    //    HashedCanonicalRequest
-    String algorithm, requestDateTime, credentialScope, canonicalRequest;
-    algorithm = v4Header.getAlgorithm();
-    requestDateTime = headerMap.getFirst(X_AMAZ_DATE);
-    Credential credential = v4Header.getCredentialObj();
-    credentialScope = String.format("%s/%s/%s/%s", credential.getDate(),
-        credential.getAwsRegion(), credential.getAwsService(),
-        credential.getAwsRequest());
-
-    // If the absolute path is empty, use a forward slash (/)
-    uri = (uri.trim().length() > 0) ? uri : "/";
-    // Encode URI and preserve forward slashes
-    strToSign.append(algorithm + NEWLINE);
-    strToSign.append(requestDateTime + NEWLINE);
-    strToSign.append(credentialScope + NEWLINE);
-
-    canonicalRequest = buildCanonicalRequest();
-    strToSign.append(hash(canonicalRequest));
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("canonicalRequest:[{}]", canonicalRequest);
-    }
-
-    if (LOG.isTraceEnabled()) {
-      headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k,
-          headerMap.get(k)));
-    }
-
-    LOG.debug("StringToSign:[{}]", strToSign);
-    stringToSign = strToSign.toString();
-  }
-
-  private String buildCanonicalRequest() throws OS3Exception {
-    Iterable<String> parts = split("/", uri);
-    List<String> encParts = new ArrayList<>();
-    for (String p : parts) {
-      encParts.add(urlEncode(p));
-    }
-    String canonicalUri = join("/", encParts);
-
-    String canonicalQueryStr = getQueryParamString();
-
-    StringBuilder canonicalHeaders = new StringBuilder();
-
-    for (String header : v4Header.getSignedHeaders()) {
-      List<String> headerValue = new ArrayList<>();
-      canonicalHeaders.append(header.toLowerCase());
-      canonicalHeaders.append(":");
-      for (String originalHeader : headerMap.keySet()) {
-        if (originalHeader.toLowerCase().equals(header)) {
-          headerValue.add(headerMap.getFirst(originalHeader).trim());
-        }
-      }
-
-      if (headerValue.size() == 0) {
-        throw new RuntimeException("Header " + header + " not present in " +
-            "request");
-      }
-      if (headerValue.size() > 1) {
-        Collections.sort(headerValue);
-      }
-
-      // Set for testing purpose only to skip date and host validation.
-      validateSignedHeader(header, headerValue.get(0));
-
-      canonicalHeaders.append(join(",", headerValue));
-      canonicalHeaders.append(NEWLINE);
-    }
-
-    String payloadHash;
-    if (UNSIGNED_PAYLOAD.equals(
-        headerMap.get(X_AMZ_CONTENT_SHA256))) {
-      payloadHash = UNSIGNED_PAYLOAD;
-    } else {
-      payloadHash = headerMap.getFirst(X_AMZ_CONTENT_SHA256);
-    }
-
-    String signedHeaderStr = v4Header.getSignedHeaderString();
-    String canonicalRequest = method + NEWLINE
-        + canonicalUri + NEWLINE
-        + canonicalQueryStr + NEWLINE
-        + canonicalHeaders + NEWLINE
-        + signedHeaderStr + NEWLINE
-        + payloadHash;
-
-    return canonicalRequest;
-  }
-
-  @VisibleForTesting
-  void validateSignedHeader(String header, String headerValue)
-      throws OS3Exception {
-    switch (header) {
-    case HOST:
-      try {
-        URI hostUri = new URI(headerValue);
-        InetAddress.getByName(hostUri.getHost());
-        // TODO: Validate if current request is coming from same host.
-      } catch (UnknownHostException|URISyntaxException e) {
-        LOG.error("Host value mentioned in signed header is not valid. " +
-            "Host:{}", headerValue);
-        throw S3_TOKEN_CREATION_ERROR;
-      }
-      break;
-    case X_AMAZ_DATE:
-      LocalDate date = LocalDate.parse(headerValue, TIME_FORMATTER);
-      LocalDate now = LocalDate.now();
-      if (date.isBefore(now.minus(PRESIGN_URL_MAX_EXPIRATION_SECONDS, SECONDS))
-          || date.isAfter(now.plus(PRESIGN_URL_MAX_EXPIRATION_SECONDS,
-          SECONDS))) {
-        LOG.error("AWS date not in valid range. Request timestamp:{} should " +
-                "not be older than {} seconds.", headerValue,
-            PRESIGN_URL_MAX_EXPIRATION_SECONDS);
-        throw S3_TOKEN_CREATION_ERROR;
-      }
-      break;
-    case X_AMZ_CONTENT_SHA256:
-      // TODO: Construct request payload and match HEX(SHA256(requestPayload))
-      break;
-    default:
-      break;
-    }
-  }
-
-  /**
-   * String join that also works with empty strings.
-   *
-   * @return joined string
-   */
-  private static String join(String glue, List<String> parts) {
-    StringBuilder result = new StringBuilder();
-    boolean addSeparator = false;
-    for (String p : parts) {
-      if (addSeparator) {
-        result.append(glue);
-      }
-      result.append(p);
-      addSeparator = true;
-    }
-    return result.toString();
-  }
-
-  /**
-   * Returns matching strings.
-   *
-   * @param regex Regular expression to split by
-   * @param whole The string to split
-   * @return pieces
-   */
-  private static Iterable<String> split(String regex, String whole) {
-    Pattern p = Pattern.compile(regex);
-    Matcher m = p.matcher(whole);
-    List<String> result = new ArrayList<>();
-    int pos = 0;
-    while (m.find()) {
-      result.add(whole.substring(pos, m.start()));
-      pos = m.end();
-    }
-    result.add(whole.substring(pos));
-    return result;
-  }
-
-  private String urlEncode(String str) {
-    try {
-
-      return URLEncoder.encode(str, UTF_8.name())
-          .replaceAll("\\+", "%20")
-          .replaceAll("%7E", "~");
-    } catch (UnsupportedEncodingException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private String getQueryParamString() {
-    List<String> params = new ArrayList<>(queryMap.keySet());
-
-    // Sort by name, then by value
-    Collections.sort(params, (o1, o2) -> o1.equals(o2) ?
-        queryMap.getFirst(o1).compareTo(queryMap.getFirst(o2)) :
-        o1.compareTo(o2));
-
-    StringBuilder result = new StringBuilder();
-    for (String p : params) {
-      if (result.length() > 0) {
-        result.append("&");
-      }
-      result.append(urlEncode(p));
-      result.append('=');
-
-      result.append(urlEncode(queryMap.getFirst(p)));
-    }
-    return result.toString();
-  }
-
-  public static String hash(String payload) throws NoSuchAlgorithmException {
-    MessageDigest md = MessageDigest.getInstance("SHA-256");
-    md.update(payload.getBytes(UTF_8));
-    return Hex.encode(md.digest()).toLowerCase();
-  }
-
-  public String getAwsAccessId() {
-    return v4Header.getAccessKeyID();
-  }
-
-  public String getSignature() {
-    return v4Header.getSignature();
-  }
-
-  public String getStringToSign() throws Exception {
-    return stringToSign;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
deleted file mode 100644
index 27f792e..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.inject.Inject;
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.container.ContainerResponseContext;
-import javax.ws.rs.container.ContainerResponseFilter;
-import javax.ws.rs.ext.Provider;
-import java.io.IOException;
-
-/**
- * This class adds common header responses for all the requests.
- */
-@Provider
-public class CommonHeadersContainerResponseFilter implements
-    ContainerResponseFilter {
-
-  @Inject
-  private RequestIdentifier requestIdentifier;
-
-  @Override
-  public void filter(ContainerRequestContext containerRequestContext,
-      ContainerResponseContext containerResponseContext) throws IOException {
-
-    containerResponseContext.getHeaders().add("Server", "Ozone");
-    containerResponseContext.getHeaders()
-        .add("x-amz-id-2", requestIdentifier.getAmzId());
-    containerResponseContext.getHeaders()
-        .add("x-amz-request-id", requestIdentifier.getRequestId());
-
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java
deleted file mode 100644
index 061a2d7..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-
-/**
- * This class is used to start/stop S3 compatible rest server.
- */
-@Command(name = "ozone s3g",
-    hidden = true, description = "S3 compatible rest server.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class Gateway extends GenericCli {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Gateway.class);
-
-  private S3GatewayHttpServer httpServer;
-
-  public static void main(String[] args) throws Exception {
-    new Gateway().run(args);
-  }
-
-  @Override
-  public Void call() throws Exception {
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-    OzoneConfigurationHolder.setConfiguration(ozoneConfiguration);
-    httpServer = new S3GatewayHttpServer(ozoneConfiguration, "s3gateway");
-    start();
-    return null;
-  }
-
-  public void start() throws IOException {
-    LOG.info("Starting Ozone S3 gateway");
-    httpServer.start();
-  }
-
-  public void stop() throws Exception {
-    LOG.info("Stopping Ozone S3 gateway");
-    httpServer.stop();
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java
deleted file mode 100644
index c5a291b..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import org.glassfish.jersey.server.ResourceConfig;
-
-/**
- * JaxRS resource definition.
- */
-public class GatewayApplication extends ResourceConfig {
-  public GatewayApplication() {
-    packages("org.apache.hadoop.ozone.s3");
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
deleted file mode 100644
index db94bbb..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.annotation.Priority;
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.container.ContainerRequestFilter;
-import javax.ws.rs.container.PreMatching;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.Provider;
-import java.io.IOException;
-
-/**
- * Filter to adjust request headers for compatible reasons.
- *
- * It should be executed AFTER signature check (VirtualHostStyleFilter) as the
- * original Content-Type could be part of the base of the signature.
- */
-@Provider
-@PreMatching
-@Priority(VirtualHostStyleFilter.PRIORITY
-    + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER)
-public class HeaderPreprocessor implements ContainerRequestFilter {
-
-  public static final String MULTIPART_UPLOAD_MARKER = "ozone/mpu";
-
-  @Override
-  public void filter(ContainerRequestContext requestContext) throws
-      IOException {
-    MultivaluedMap<String, String> queryParameters =
-        requestContext.getUriInfo().getQueryParameters();
-
-    if (queryParameters.containsKey("delete")) {
-      //aws cli doesn't send proper Content-Type and by default POST requests
-      //processed as form-url-encoded. Here we can fix this.
-      requestContext.getHeaders()
-          .putSingle("Content-Type", MediaType.APPLICATION_XML);
-    }
-
-    if (queryParameters.containsKey("uploadId")) {
-      //aws cli doesn't send proper Content-Type and by default POST requests
-      //processed as form-url-encoded. Here we can fix this.
-      requestContext.getHeaders()
-          .putSingle("Content-Type", MediaType.APPLICATION_XML);
-    } else if (queryParameters.containsKey("uploads")) {
-      // uploads defined but uploadId is not --> this is the creation of the
-      // multi-part-upload requests.
-      //
-      //In  AWS SDK for go uses application/octet-stream which also
-      //should be fixed to route the request to the right jaxrs method.
-      //
-      //Should be empty instead of XML as the body is empty which can not be
-      //serialized as as CompleteMultipartUploadRequest
-      requestContext.getHeaders()
-          .putSingle("Content-Type", MULTIPART_UPLOAD_MARKER);
-    }
-
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
deleted file mode 100644
index d42c005..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.enterprise.context.RequestScoped;
-import javax.enterprise.inject.Produces;
-import javax.inject.Inject;
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.core.Context;
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.AUTHORIZATION_HEADER;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.UTF_8;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.AUTH_PROTOCOL_NOT_SUPPORTED;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_TOKEN_CREATION_ERROR;
-
-/**
- * This class creates the OzoneClient for the Rest endpoints.
- */
-@RequestScoped
-public class OzoneClientProducer {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(OzoneClientProducer.class);
-
-  @Context
-  private ContainerRequestContext context;
-
-  @Inject
-  private OzoneConfiguration ozoneConfiguration;
-
-  @Inject
-  private Text omService;
-
-  @Inject
-  private String omServiceID;
-
-
-  @Produces
-  public OzoneClient createClient() throws IOException {
-    return getClient(ozoneConfiguration);
-  }
-
-  private OzoneClient getClient(OzoneConfiguration config) throws IOException {
-    try {
-      if (OzoneSecurityUtil.isSecurityEnabled(config)) {
-        LOG.debug("Creating s3 token for client.");
-        if (context.getHeaderString(AUTHORIZATION_HEADER).startsWith("AWS4")) {
-          try {
-            AWSV4AuthParser v4RequestParser = new AWSV4AuthParser(context);
-            v4RequestParser.parse();
-
-            OzoneTokenIdentifier identifier = new OzoneTokenIdentifier();
-            identifier.setTokenType(S3TOKEN);
-            identifier.setStrToSign(v4RequestParser.getStringToSign());
-            identifier.setSignature(v4RequestParser.getSignature());
-            identifier.setAwsAccessId(v4RequestParser.getAwsAccessId());
-            identifier.setOwner(new Text(v4RequestParser.getAwsAccessId()));
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("Adding token for service:{}", omService);
-            }
-            Token<OzoneTokenIdentifier> token = new Token(identifier.getBytes(),
-                identifier.getSignature().getBytes(UTF_8),
-                identifier.getKind(),
-                omService);
-            UserGroupInformation remoteUser =
-                UserGroupInformation.createRemoteUser(
-                    v4RequestParser.getAwsAccessId());
-            remoteUser.addToken(token);
-            UserGroupInformation.setLoginUser(remoteUser);
-          } catch (OS3Exception | URISyntaxException ex) {
-            LOG.error("S3 token creation failed.");
-            throw S3_TOKEN_CREATION_ERROR;
-          }
-        } else {
-          throw AUTH_PROTOCOL_NOT_SUPPORTED;
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Error: ", e);
-    }
-
-    if (omServiceID == null) {
-      return OzoneClientFactory.getClient(ozoneConfiguration);
-    } else {
-      // As in HA case, we need to pass om service ID.
-      return OzoneClientFactory.getRpcClient(omServiceID, ozoneConfiguration);
-    }
-  }
-
-  @VisibleForTesting
-  public void setContext(ContainerRequestContext context) {
-    this.context = context;
-  }
-
-  @VisibleForTesting
-  public void setOzoneConfiguration(OzoneConfiguration config) {
-    this.ozoneConfiguration = config;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java
deleted file mode 100644
index 4aeab1f..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.enterprise.inject.Produces;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-/**
- * Ozone Configuration factory.
- * <p>
- * As the OzoneConfiguration is created by the CLI application here we inject
- * it via a singleton instance to the Jax-RS/CDI instances.
- */
-public class OzoneConfigurationHolder {
-
-  private static OzoneConfiguration configuration;
-
-  @Produces
-  public OzoneConfiguration configuration() {
-    return configuration;
-  }
-
-  public static void setConfiguration(
-      OzoneConfiguration conf) {
-    OzoneConfigurationHolder.configuration = conf;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java
deleted file mode 100644
index b98426c..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
-import org.apache.hadoop.security.SecurityUtil;
-
-import javax.annotation.PostConstruct;
-import javax.enterprise.context.ApplicationScoped;
-import javax.enterprise.inject.Produces;
-import javax.inject.Inject;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-
-/**
- * This class creates the OM service .
- */
-@ApplicationScoped
-public class OzoneServiceProvider {
-
-  private Text omServiceAddr;
-
-  private String omserviceID;
-
-  @Inject
-  private OzoneConfiguration conf;
-
-  @PostConstruct
-  public void init() {
-    Collection<String> serviceIdList =
-        conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY);
-    if (serviceIdList.size() == 0) {
-      // Non-HA cluster
-      omServiceAddr = SecurityUtil.buildTokenService(OmUtils.
-          getOmAddressForClients(conf));
-    } else {
-      // HA cluster.
-      //For now if multiple service id's are configured we throw exception.
-      // As if multiple service id's are configured, S3Gateway will not be
-      // knowing which one to talk to. In future, if OM federation is supported
-      // we can resolve this by having another property like
-      // ozone.om.internal.service.id.
-      // TODO: Revisit this later.
-      if (serviceIdList.size() > 1) {
-        throw new IllegalArgumentException("Multiple serviceIds are " +
-            "configured. " + Arrays.toString(serviceIdList.toArray()));
-      } else {
-        String serviceId = serviceIdList.iterator().next();
-        Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
-        if (omNodeIds.size() == 0) {
-          throw new IllegalArgumentException(OZONE_OM_NODES_KEY
-              + "." + serviceId + " is not defined");
-        }
-        omServiceAddr = new Text(OzoneS3Util.buildServiceNameForToken(conf,
-            serviceId, omNodeIds));
-        omserviceID = serviceId;
-      }
-    }
-  }
-
-
-  @Produces
-  public Text getService() {
-    return omServiceAddr;
-  }
-
-  @Produces
-  public String getOmServiceID() {
-    return omserviceID;
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java
deleted file mode 100644
index 379393c..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.enterprise.context.RequestScoped;
-
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-/**
- * Request specific identifiers.
- */
-@RequestScoped
-public class RequestIdentifier {
-
-  private final String requestId;
-
-  private final String amzId;
-
-  public RequestIdentifier() {
-    amzId = RandomStringUtils.randomAlphanumeric(8, 16);
-    requestId = OzoneUtils.getRequestID();
-  }
-
-  public String getRequestId() {
-    return requestId;
-  }
-
-  public String getAmzId() {
-    return amzId;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java
deleted file mode 100644
index 4a5570a..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * This class contains constants for configuration keys used in S3G.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class S3GatewayConfigKeys {
-
-  public static final String OZONE_S3G_HTTP_ENABLED_KEY =
-      "ozone.s3g.http.enabled";
-  public static final String OZONE_S3G_HTTP_BIND_HOST_KEY =
-      "ozone.s3g.http-bind-host";
-  public static final String OZONE_S3G_HTTPS_BIND_HOST_KEY =
-      "ozone.s3g.https-bind-host";
-  public static final String OZONE_S3G_HTTP_ADDRESS_KEY =
-      "ozone.s3g.http-address";
-  public static final String OZONE_S3G_HTTPS_ADDRESS_KEY =
-      "ozone.s3g.https-address";
-  public static final String OZONE_S3G_KEYTAB_FILE =
-      "ozone.s3g.keytab.file";
-  public static final String OZONE_S3G_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_S3G_HTTP_BIND_PORT_DEFAULT = 9878;
-  public static final int OZONE_S3G_HTTPS_BIND_PORT_DEFAULT = 9879;
-  public static final String OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.s3g.authentication.kerberos.principal";
-  public static final String OZONE_S3G_DOMAIN_NAME = "ozone.s3g.domain.name";
-
-  /**
-   * Never constructed.
-   */
-  private S3GatewayConfigKeys() {
-
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
deleted file mode 100644
index f3d8341..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-/**
- * S3 Gateway specific configuration keys.
- */
-public class S3GatewayHttpServer extends BaseHttpServer {
-
-  /**
-   * Default offset between two filters.
-   */
-  public static final int FILTER_PRIORITY_DO_AFTER = 50;
-
-  public S3GatewayHttpServer(Configuration conf,
-      String name) throws IOException {
-    super(conf, name);
-  }
-
-  @Override
-  protected String getHttpAddressKey() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpBindHostKey() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getHttpsAddressKey() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override
-  protected String getHttpsBindHostKey() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override
-  protected String getBindHostDefault() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpBindPortDefault() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected int getHttpsBindPortDefault() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override
-  protected String getKeytabFile() {
-    return S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE;
-  }
-
-  @Override
-  protected String getSpnegoPrincipal() {
-    return S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override
-  protected String getEnabledKey() {
-    return S3GatewayConfigKeys.OZONE_S3G_HTTP_ENABLED_KEY;
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
deleted file mode 100644
index 1074ef2..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Input stream implementation to read body with chunked signatures.
- * <p>
- * see: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
- */
-public class SignedChunksInputStream extends InputStream {
-
-  private Pattern signatureLinePattern =
-      Pattern.compile("([0-9A-Fa-f]+);chunk-signature=.*");
-
-  private InputStream originalStream;
-
-  /**
-   * Numer of following databits. If zero, the signature line should be parsed.
-   */
-  private int remainingData = 0;
-
-  public SignedChunksInputStream(InputStream inputStream) {
-    originalStream = inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    if (remainingData > 0) {
-      int curr = originalStream.read();
-      remainingData--;
-      if (remainingData == 0) {
-        //read the "\r\n" at the end of the data section
-        originalStream.read();
-        originalStream.read();
-      }
-      return curr;
-    } else {
-      remainingData = readHeader();
-      if (remainingData == -1) {
-        return -1;
-      }
-      return read();
-    }
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    if (b == null) {
-      throw new NullPointerException();
-    } else if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return 0;
-    }
-    int currentOff = off;
-    int currentLen = len;
-    int totalReadBytes = 0;
-    int realReadLen = 0;
-    int maxReadLen = 0;
-    do {
-      if (remainingData > 0) {
-        maxReadLen = Math.min(remainingData, currentLen);
-        realReadLen = originalStream.read(b, currentOff, maxReadLen);
-        if (realReadLen == -1) {
-          break;
-        }
-        currentOff += realReadLen;
-        currentLen -= realReadLen;
-        totalReadBytes += realReadLen;
-        remainingData -= realReadLen;
-        if (remainingData == 0) {
-          //read the "\r\n" at the end of the data section
-          originalStream.read();
-          originalStream.read();
-        }
-      } else {
-        remainingData = readHeader();
-        if (remainingData == -1) {
-          break;
-        }
-      }
-    } while (currentLen > 0);
-    return totalReadBytes > 0 ? totalReadBytes : -1;
-  }
-
-  private int readHeader() throws IOException {
-    int prev = -1;
-    int curr = 0;
-    StringBuilder buf = new StringBuilder();
-
-    //read everything until the next \r\n
-    while (!eol(prev, curr) && curr != -1) {
-      int next = originalStream.read();
-      if (next != -1) {
-        buf.append((char) next);
-      }
-      prev = curr;
-      curr = next;
-    }
-    String signatureLine = buf.toString().trim();
-    if (signatureLine.length() == 0) {
-      return -1;
-    }
-
-    //parse the data length.
-    Matcher matcher = signatureLinePattern.matcher(signatureLine);
-    if (matcher.matches()) {
-      return Integer.parseInt(matcher.group(1), 16);
-    } else {
-      throw new IOException("Invalid signature line: " + signatureLine);
-    }
-  }
-
-  private boolean eol(int prev, int curr) {
-    return prev == 13 && curr == 10;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
deleted file mode 100644
index 9ce98e1..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.annotation.Priority;
-import javax.inject.Inject;
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.container.ContainerRequestFilter;
-import javax.ws.rs.container.PreMatching;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.ext.Provider;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Arrays;
-
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.fs.InvalidRequestException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME;
-
-/**
- * Filter used to convert virtual host style pattern to path style pattern.
- */
-
-@Provider
-@PreMatching
-@Priority(VirtualHostStyleFilter.PRIORITY)
-public class VirtualHostStyleFilter implements ContainerRequestFilter {
-
-  public static final int PRIORITY = 100;
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      VirtualHostStyleFilter.class);
-
-  @Inject
-  private OzoneConfiguration conf;
-
-  @Inject
-  private AuthenticationHeaderParser authenticationHeaderParser;
-
-  private String[] domains;
-
-  @Override
-  public void filter(ContainerRequestContext requestContext) throws
-      IOException {
-
-    authenticationHeaderParser.setAuthHeader(requestContext.getHeaderString(
-        HttpHeaders.AUTHORIZATION));
-    domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME);
-
-    if (domains.length == 0) {
-      // domains is not configured, might be it is path style.
-      // So, do not continue further, just return.
-      return;
-    }
-    //Get the value of the host
-    String host = requestContext.getHeaderString(HttpHeaders.HOST);
-    host = checkHostWithoutPort(host);
-    String domain = getDomainName(host);
-
-    if (domain == null) {
-      throw getException("Invalid S3 Gateway request {" + requestContext
-          .getUriInfo().getRequestUri().toString() + " }: No matching domain " +
-          "{" + Arrays.toString(domains) + "} for the host {" + host  + "}");
-    }
-
-    LOG.debug("Http header host name is {}", host);
-    LOG.debug("Domain name matched is {}", domain);
-
-    //Check if we have a Virtual Host style request, host length greater than
-    // address length means it is virtual host style, we need to convert to
-    // path style.
-    if (host.length() > domain.length()) {
-      String bucketName = host.substring(0, host.length() - domain.length());
-
-      if(!bucketName.endsWith(".")) {
-        //Checking this as the virtual host style pattern is http://bucket.host/
-        throw getException("Invalid S3 Gateway request {" + requestContext
-            .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host
-            + " is in invalid format");
-      } else {
-        bucketName = bucketName.substring(0, bucketName.length() - 1);
-      }
-      LOG.debug("Bucket name is {}", bucketName);
-
-      URI baseURI = requestContext.getUriInfo().getBaseUri();
-      String currentPath = requestContext.getUriInfo().getPath();
-      String newPath = bucketName;
-      if (currentPath != null) {
-        newPath += String.format("%s", currentPath);
-      }
-      MultivaluedMap<String, String> queryParams = requestContext.getUriInfo()
-          .getQueryParameters();
-      UriBuilder requestAddrBuilder = UriBuilder.fromUri(baseURI).path(newPath);
-      queryParams.forEach((k, v) -> requestAddrBuilder.queryParam(k,
-          v.toArray()));
-      URI requestAddr = requestAddrBuilder.build();
-      requestContext.setRequestUri(baseURI, requestAddr);
-    }
-  }
-
-  private InvalidRequestException getException(String message) {
-    return new InvalidRequestException(message);
-  }
-
-  @VisibleForTesting
-  public void setConfiguration(OzoneConfiguration config) {
-    this.conf = config;
-  }
-
-
-  /**
-   * This method finds the longest match with the domain name.
-   * @param host
-   * @return domain name matched with the host. if none of them are matching,
-   * return null.
-   */
-  private String getDomainName(String host) {
-    String match = null;
-    int length=0;
-    for (String domainVal : domains) {
-      if (host.endsWith(domainVal)) {
-        int len = domainVal.length();
-        if (len > length) {
-          length = len;
-          match = domainVal;
-        }
-      }
-    }
-    return match;
-  }
-
-  private String checkHostWithoutPort(String host) {
-    if (host.contains(":")){
-      return host.substring(0, host.lastIndexOf(":"));
-    } else {
-      return host;
-    }
-  }
-
-  @VisibleForTesting
-  public void setAuthenticationHeaderParser(AuthenticationHeaderParser parser) {
-    this.authenticationHeaderParser = parser;
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java
deleted file mode 100644
index 04f8ffd..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.commontypes;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-
-/**
- * Metadata object represents one bucket.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-public class BucketMetadata {
-  @XmlElement(name = "Name")
-  private String name;
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "CreationDate")
-  private Instant creationDate;
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public Instant getCreationDate() {
-    return creationDate;
-  }
-
-  public void setCreationDate(Instant creationDate) {
-    this.creationDate = creationDate;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java
deleted file mode 100644
index 83e6047..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.commontypes;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-
-/**
- * Directory name ("key prefix") in case of listing.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-public class CommonPrefix {
-
-  @XmlElement(name = "Prefix")
-  private String prefix;
-
-  public CommonPrefix(String prefix) {
-    this.prefix = prefix;
-  }
-
-  public CommonPrefix() {
-  }
-
-  public String getPrefix() {
-    return prefix;
-  }
-
-  public void setPrefix(String prefix) {
-    this.prefix = prefix;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java
deleted file mode 100644
index cb04870..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.commontypes;
-
-import javax.xml.bind.annotation.adapters.XmlAdapter;
-import java.time.Instant;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
-
-/**
- * A converter to convert Instant to standard date string.
- */
-public class IsoDateAdapter extends XmlAdapter<String, Instant> {
-
-  private DateTimeFormatter iso8861Formatter;
-
-  public IsoDateAdapter() {
-    iso8861Formatter =
-        DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX")
-            .withZone(ZoneOffset.UTC);
-  }
-
-  @Override
-  public Instant unmarshal(String v) throws Exception {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public String marshal(Instant v) throws Exception {
-    return iso8861Formatter.format(v);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
deleted file mode 100644
index 34cea28..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.commontypes;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-
-/**
- * Metadata object represents one key in the object store.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-public class KeyMetadata {
-
-  @XmlElement(name = "Key")
-  private String key; // or the Object Name
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "LastModified")
-  private Instant lastModified;
-
-  @XmlElement(name = "ETag")
-  private String eTag;
-
-  @XmlElement(name = "Size")
-  private long size;
-
-  @XmlElement(name = "StorageClass")
-  private String storageClass;
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public Instant getLastModified() {
-    return lastModified;
-  }
-
-  public void setLastModified(Instant lastModified) {
-    this.lastModified = lastModified;
-  }
-
-  public String getETag() {
-    return eTag;
-  }
-
-  public void setETag(String tag) {
-    this.eTag = tag;
-  }
-
-  public long getSize() {
-    return size;
-  }
-
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  public String getStorageClass() {
-    return storageClass;
-  }
-
-  public void setStorageClass(String storageClass) {
-    this.storageClass = storageClass;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java
deleted file mode 100644
index dd916e8..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Common classes required for S3 rest API's.
- */
-@javax.xml.bind.annotation.XmlSchema(
-    namespace = "http://s3.amazonaws"
-        + ".com/doc/2006-03-01/", elementFormDefault =
-    javax.xml.bind.annotation.XmlNsForm.QUALIFIED,
-    xmlns = {
-        @javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws"
-            + ".com/doc/2006-03-01/", prefix = "")})
-package org.apache.hadoop.ozone.s3.commontypes;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
deleted file mode 100644
index e4db6cc..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ /dev/null
@@ -1,347 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.HEAD;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-import java.io.IOException;
-import java.io.InputStream;
-import java.time.Instant;
-import java.util.Iterator;
-
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneMultipartUploadList;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
-import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject;
-import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.DeletedObject;
-import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.apache.hadoop.ozone.s3.util.ContinueToken;
-import org.apache.hadoop.ozone.s3.util.S3StorageType;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.commons.lang3.StringUtils;
-import static org.apache.hadoop.ozone.s3.util.OzoneS3Util.getVolumeName;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
-import org.apache.http.HttpStatus;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Bucket level rest endpoints.
- */
-@Path("/{bucket}")
-public class BucketEndpoint extends EndpointBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BucketEndpoint.class);
-
-  /**
-   * Rest endpoint to list objects in a specific bucket.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
-   * for more details.
-   */
-  @GET
-  @SuppressFBWarnings
-  @SuppressWarnings("parameternumber")
-  public Response list(
-      @PathParam("bucket") String bucketName,
-      @QueryParam("delimiter") String delimiter,
-      @QueryParam("encoding-type") String encodingType,
-      @QueryParam("marker") String marker,
-      @DefaultValue("1000") @QueryParam("max-keys") int maxKeys,
-      @QueryParam("prefix") String prefix,
-      @QueryParam("browser") String browser,
-      @QueryParam("continuation-token") String continueToken,
-      @QueryParam("start-after") String startAfter,
-      @QueryParam("uploads") String uploads,
-      @Context HttpHeaders hh) throws OS3Exception, IOException {
-
-    if (browser != null) {
-      InputStream browserPage = getClass()
-          .getResourceAsStream("/browser.html");
-      return Response.ok(browserPage,
-            MediaType.TEXT_HTML_TYPE)
-            .build();
-
-    }
-
-    if (uploads != null) {
-      return listMultipartUploads(bucketName, prefix);
-    }
-
-    if (prefix == null) {
-      prefix = "";
-    }
-
-    OzoneBucket bucket = getBucket(bucketName);
-
-    Iterator<? extends OzoneKey> ozoneKeyIterator;
-
-    ContinueToken decodedToken =
-        ContinueToken.decodeFromString(continueToken);
-
-    if (startAfter != null && continueToken != null) {
-      // If continuation token and start after both are provided, then we
-      // ignore start After
-      ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
-    } else if (startAfter != null && continueToken == null) {
-      ozoneKeyIterator = bucket.listKeys(prefix, startAfter);
-    } else if (startAfter == null && continueToken != null){
-      ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
-    } else {
-      ozoneKeyIterator = bucket.listKeys(prefix);
-    }
-
-
-    ListObjectResponse response = new ListObjectResponse();
-    response.setDelimiter(delimiter);
-    response.setName(bucketName);
-    response.setPrefix(prefix);
-    response.setMarker("");
-    response.setMaxKeys(maxKeys);
-    response.setEncodingType(ENCODING_TYPE);
-    response.setTruncated(false);
-    response.setContinueToken(continueToken);
-
-    String prevDir = null;
-    if (continueToken != null) {
-      prevDir = decodedToken.getLastDir();
-    }
-    String lastKey = null;
-    int count = 0;
-    while (ozoneKeyIterator.hasNext()) {
-      OzoneKey next = ozoneKeyIterator.next();
-      String relativeKeyName = next.getName().substring(prefix.length());
-
-      int depth = StringUtils.countMatches(relativeKeyName, delimiter);
-      if (delimiter != null) {
-        if (depth > 0) {
-          // means key has multiple delimiters in its value.
-          // ex: dir/dir1/dir2, where delimiter is "/" and prefix is dir/
-          String dirName = relativeKeyName.substring(0, relativeKeyName
-              .indexOf(delimiter));
-          if (!dirName.equals(prevDir)) {
-            response.addPrefix(prefix + dirName + delimiter);
-            prevDir = dirName;
-            count++;
-          }
-        } else if (relativeKeyName.endsWith(delimiter)) {
-          // means or key is same as prefix with delimiter at end and ends with
-          // delimiter. ex: dir/, where prefix is dir and delimiter is /
-          response.addPrefix(relativeKeyName);
-          count++;
-        } else {
-          // means our key is matched with prefix if prefix is given and it
-          // does not have any common prefix.
-          addKey(response, next);
-          count++;
-        }
-      } else {
-        addKey(response, next);
-        count++;
-      }
-
-      if (count == maxKeys) {
-        lastKey = next.getName();
-        break;
-      }
-    }
-
-    response.setKeyCount(count);
-
-    if (count < maxKeys) {
-      response.setTruncated(false);
-    } else if(ozoneKeyIterator.hasNext()) {
-      response.setTruncated(true);
-      ContinueToken nextToken = new ContinueToken(lastKey, prevDir);
-      response.setNextToken(nextToken.encodeToString());
-    } else {
-      response.setTruncated(false);
-    }
-
-    response.setKeyCount(
-        response.getCommonPrefixes().size() + response.getContents().size());
-    return Response.ok(response).build();
-  }
-
-  @PUT
-  public Response put(@PathParam("bucket") String bucketName, @Context
-      HttpHeaders httpHeaders) throws IOException, OS3Exception {
-
-    String volumeName = getVolumeName(getAuthenticationHeaderParser().
-        getAccessKeyID());
-
-    String location = createS3Bucket(volumeName, bucketName);
-
-    LOG.info("Location is {}", location);
-    return Response.status(HttpStatus.SC_OK).header("Location", location)
-        .build();
-
-  }
-
-  public Response listMultipartUploads(
-      @PathParam("bucket") String bucketName,
-      @QueryParam("prefix") String prefix)
-      throws OS3Exception, IOException {
-
-    OzoneBucket bucket = getBucket(bucketName);
-
-    OzoneMultipartUploadList ozoneMultipartUploadList =
-        bucket.listMultipartUploads(prefix);
-
-    ListMultipartUploadsResult result = new ListMultipartUploadsResult();
-    result.setBucket(bucketName);
-
-    ozoneMultipartUploadList.getUploads().forEach(upload -> result.addUpload(
-        new ListMultipartUploadsResult.Upload(
-            upload.getKeyName(),
-            upload.getUploadId(),
-            upload.getCreationTime(),
-            S3StorageType.fromReplicationType(upload.getReplicationType(),
-                upload.getReplicationFactor())
-        )));
-    return Response.ok(result).build();
-  }
-  /**
-   * Rest endpoint to check the existence of a bucket.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html
-   * for more details.
-   */
-  @HEAD
-  public Response head(@PathParam("bucket") String bucketName)
-      throws OS3Exception, IOException {
-    try {
-      getBucket(bucketName);
-    } catch (OS3Exception ex) {
-      LOG.error("Exception occurred in headBucket", ex);
-      //TODO: use a subclass fo OS3Exception and catch it here.
-      if (ex.getCode().contains("NoSuchBucket")) {
-        return Response.status(Status.BAD_REQUEST).build();
-      } else {
-        throw ex;
-      }
-    }
-    return Response.ok().build();
-  }
-
-  /**
-   * Rest endpoint to delete specific bucket.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETE.html
-   * for more details.
-   */
-  @DELETE
-  public Response delete(@PathParam("bucket") String bucketName)
-      throws IOException, OS3Exception {
-
-    try {
-      deleteS3Bucket(bucketName);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .BUCKET_NOT_EMPTY, bucketName);
-      } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_BUCKET, bucketName);
-      } else {
-        throw ex;
-      }
-    }
-
-    return Response
-        .status(HttpStatus.SC_NO_CONTENT)
-        .build();
-
-  }
-
-  /**
-   * Implement multi delete.
-   * <p>
-   * see: https://docs.aws.amazon
-   * .com/AmazonS3/latest/API/multiobjectdeleteapi.html
-   */
-  @POST
-  @Produces(MediaType.APPLICATION_XML)
-  public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
-      @QueryParam("delete") String delete,
-      MultiDeleteRequest request) throws OS3Exception, IOException {
-    OzoneBucket bucket = getBucket(bucketName);
-    MultiDeleteResponse result = new MultiDeleteResponse();
-    if (request.getObjects() != null) {
-      for (DeleteObject keyToDelete : request.getObjects()) {
-        try {
-          bucket.deleteKey(keyToDelete.getKey());
-
-          if (!request.isQuiet()) {
-            result.addDeleted(new DeletedObject(keyToDelete.getKey()));
-          }
-        } catch (OMException ex) {
-          if (ex.getResult() != ResultCodes.KEY_NOT_FOUND) {
-            result.addError(
-                new Error(keyToDelete.getKey(), "InternalError",
-                    ex.getMessage()));
-          } else if (!request.isQuiet()) {
-            result.addDeleted(new DeletedObject(keyToDelete.getKey()));
-          }
-        } catch (Exception ex) {
-          result.addError(
-              new Error(keyToDelete.getKey(), "InternalError",
-                  ex.getMessage()));
-        }
-      }
-    }
-    return result;
-  }
-
-  private void addKey(ListObjectResponse response, OzoneKey next) {
-    KeyMetadata keyMetadata = new KeyMetadata();
-    keyMetadata.setKey(next.getName());
-    keyMetadata.setSize(next.getDataSize());
-    keyMetadata.setETag("" + next.getModificationTime());
-    if (next.getReplicationType().toString().equals(ReplicationType
-        .STAND_ALONE.toString())) {
-      keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString());
-    } else {
-      keyMetadata.setStorageClass(S3StorageType.STANDARD.toString());
-    }
-    keyMetadata.setLastModified(Instant.ofEpochMilli(
-        next.getModificationTime()));
-    response.addKey(keyMetadata);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
deleted file mode 100644
index 6120ad6..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Request for Complete Multipart Upload request.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "CompleteMultipartUpload")
-public class CompleteMultipartUploadRequest {
-
-  @XmlElement(name = "Part")
-  private List<Part> partList = new ArrayList<>();
-
-  public List<Part> getPartList() {
-    return partList;
-  }
-
-  public void setPartList(List<Part> partList) {
-    this.partList = partList;
-  }
-
-  /**
-   * JAXB entity for child element.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Part")
-  public static class Part {
-
-    @XmlElement(name = "PartNumber")
-    private int partNumber;
-
-    @XmlElement(name = "ETag")
-    private String eTag;
-
-    public int getPartNumber() {
-      return partNumber;
-    }
-
-    public void setPartNumber(int partNumber) {
-      this.partNumber = partNumber;
-    }
-
-    public String geteTag() {
-      return eTag;
-    }
-
-    public void seteTag(String eTag) {
-      this.eTag = eTag;
-    }
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
deleted file mode 100644
index c636f36..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Complete Multipart Upload request response.
- */
-
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "CompleteMultipartUploadResult", namespace =
-    "http://s3.amazonaws.com/doc/2006-03-01/")
-public class CompleteMultipartUploadResponse {
-
-  @XmlElement(name = "Location")
-  private String location;
-
-  @XmlElement(name = "Bucket")
-  private String bucket;
-
-  @XmlElement(name = "Key")
-  private String key;
-
-  @XmlElement(name = "ETag")
-  private String eTag;
-
-  public String getLocation() {
-    return location;
-  }
-
-  public void setLocation(String location) {
-    this.location = location;
-  }
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public String getETag() {
-    return eTag;
-  }
-
-  public void setETag(String tag) {
-    this.eTag = tag;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
deleted file mode 100644
index f090791..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-
-/**
- * Copy object Response.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "ListAllMyBucketsResult",
-    namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
-public class CopyObjectResponse {
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "LastModified")
-  private Instant lastModified;
-
-  @XmlElement(name = "ETag")
-  private String eTag;
-
-
-  public Instant getLastModified() {
-    return lastModified;
-  }
-
-  public void setLastModified(Instant lastModified) {
-    this.lastModified = lastModified;
-  }
-
-  public String getETag() {
-    return eTag;
-  }
-
-  public void setETag(String tag) {
-    this.eTag = tag;
-  }
-
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
deleted file mode 100644
index c4e65aa..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-
-import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
-
-/**
- * Copy object Response.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "CopyPartResult",
-    namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
-public class CopyPartResult {
-
-  @XmlJavaTypeAdapter(IsoDateAdapter.class)
-  @XmlElement(name = "LastModified")
-  private Instant lastModified;
-
-  @XmlElement(name = "ETag")
-  private String eTag;
-
-  public CopyPartResult() {
-  }
-
-  public CopyPartResult(String eTag) {
-    this.eTag = eTag;
-    this.lastModified = Instant.now();
-  }
-
-  public Instant getLastModified() {
-    return lastModified;
-  }
-
-  public void setLastModified(Instant lastModified) {
-    this.lastModified = lastModified;
-  }
-
-  public String getETag() {
-    return eTag;
-  }
-
-  public void setETag(String tag) {
-    this.eTag = tag;
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
deleted file mode 100644
index 19329a4..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.inject.Inject;
-import javax.ws.rs.NotFoundException;
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Basic helpers for all the REST endpoints.
- */
-public class EndpointBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(EndpointBase.class);
-
-  @Inject
-  private OzoneClient client;
-
-  @Inject
-  private AuthenticationHeaderParser authenticationHeaderParser;
-
-  protected OzoneBucket getBucket(String volumeName, String bucketName)
-      throws IOException {
-    return getVolume(volumeName).getBucket(bucketName);
-  }
-
-  protected OzoneBucket getBucket(OzoneVolume volume, String bucketName)
-      throws OS3Exception, IOException {
-    OzoneBucket bucket;
-    try {
-      bucket = volume.getBucket(bucketName);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
-      } else {
-        throw ex;
-      }
-    }
-    return bucket;
-  }
-
-  protected OzoneBucket getBucket(String bucketName)
-      throws OS3Exception, IOException {
-    OzoneBucket bucket;
-    try {
-      OzoneVolume volume = getVolume(getOzoneVolumeName(bucketName));
-      bucket = volume.getBucket(bucketName);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND
-          || ex.getResult() == ResultCodes.S3_BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
-      } else {
-        throw ex;
-      }
-    }
-    return bucket;
-  }
-
-  protected OzoneVolume getVolume(String volumeName) throws IOException {
-    OzoneVolume volume = null;
-    try {
-      volume = client.getObjectStore().getVolume(volumeName);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
-        throw new NotFoundException("Volume " + volumeName + " is not found");
-      } else {
-        throw ex;
-      }
-    }
-    return volume;
-  }
-
-  /**
-   * Create an S3Bucket, and also it creates mapping needed to access via
-   * ozone and S3.
-   * @param userName
-   * @param bucketName
-   * @return location of the S3Bucket.
-   * @throws IOException
-   */
-  protected String createS3Bucket(String userName, String bucketName) throws
-      IOException {
-    try {
-      client.getObjectStore().createS3Bucket(userName, bucketName);
-    } catch (OMException ex) {
-      if (ex.getResult() != ResultCodes.S3_BUCKET_ALREADY_EXISTS) {
-        // S3 does not return error for bucket already exists, it just
-        // returns the location.
-        throw ex;
-      }
-    }
-
-    // Not required to call as bucketname is same, but calling now in future
-    // if mapping changes we get right location.
-    String location = client.getObjectStore().getOzoneBucketName(
-        bucketName);
-    return "/"+location;
-  }
-
-  /**
-   * Deletes an s3 bucket and removes mapping of Ozone volume/bucket.
-   * @param s3BucketName - S3 Bucket Name.
-   * @throws  IOException in case the bucket cannot be deleted.
-   */
-  public void deleteS3Bucket(String s3BucketName)
-      throws IOException {
-    client.getObjectStore().deleteS3Bucket(s3BucketName);
-  }
-
-  /**
-   * Returns the Ozone Namespace for the S3Bucket. It will return the
-   * OzoneVolume/OzoneBucketName.
-   * @param s3BucketName  - S3 Bucket Name.
-   * @return String - The Ozone canonical name for this s3 bucket. This
-   * string is useful for mounting an OzoneFS.
-   * @throws IOException - Error is throw if the s3bucket does not exist.
-   */
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    return client.getObjectStore().getOzoneBucketMapping(s3BucketName);
-  }
-
-  /**
-   * Returns the corresponding Ozone volume given an S3 Bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone Volume name.
-   * @throws IOException - Throws if the s3Bucket does not exist.
-   */
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    return client.getObjectStore().getOzoneVolumeName(s3BucketName);
-  }
-
-  /**
-   * Returns the corresponding Ozone bucket name for the given S3 bucket.
-   * @param s3BucketName - S3Bucket Name.
-   * @return String - Ozone bucket Name.
-   * @throws IOException - Throws if the s3bucket does not exist.
-   */
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    return client.getObjectStore().getOzoneBucketName(s3BucketName);
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets for a specific user.
-   * The result can be restricted using bucket prefix, will return all
-   * buckets if bucket prefix is null.
-   *
-   * @param userName
-   * @param prefix
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String prefix)  {
-    return client.getObjectStore().listS3Buckets(userName, prefix);
-  }
-
-  /**
-   * Returns Iterator to iterate over all buckets after prevBucket for a
-   * specific user. If prevBucket is null it returns an iterator to iterate
-   * over all buckets for this user. The result can be restricted using
-   * bucket prefix, will return all buckets if bucket prefix is null.
-   *
-   * @param prefix Bucket prefix to match
-   * @param previousBucket Buckets are listed after this bucket
-   * @return {@code Iterator<OzoneBucket>}
-   */
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String prefix,
-                                                       String previousBucket)  {
-    return client.getObjectStore().listS3Buckets(userName, prefix,
-        previousBucket);
-  }
-
-  public AuthenticationHeaderParser getAuthenticationHeaderParser() {
-    return authenticationHeaderParser;
-  }
-
-  @VisibleForTesting
-  public void setAuthenticationHeaderParser(AuthenticationHeaderParser parser) {
-    this.authenticationHeaderParser = parser;
-  }
-
-  @VisibleForTesting
-  public void setClient(OzoneClient ozoneClient) {
-    this.client = ozoneClient;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java
deleted file mode 100644
index b9f8702..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
-
-import javax.xml.bind.annotation.*;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Response from the ListBucket RPC Call.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "ListAllMyBucketsResult",
-    namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
-public class ListBucketResponse {
-  @XmlElementWrapper(name = "Buckets")
-  @XmlElement(name = "Bucket")
-  private List<BucketMetadata> buckets = new ArrayList<>();
-
-  public List<BucketMetadata> getBuckets() {
-    return buckets;
-  }
-
-  @VisibleForTesting
-  public int getBucketsNum() {
-    return buckets.size();
-  }
-
-  public void setBuckets(List<BucketMetadata> buckets) {
-    this.buckets = buckets;
-  }
-
-  public void addBucket(BucketMetadata bucket) {
-    buckets.add(bucket);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java
deleted file mode 100644
index 20dc9cd..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
-import org.apache.hadoop.ozone.s3.util.S3StorageType;
-
-/**
- * AWS compatible REST response for list multipart upload.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "ListMultipartUploadsResult", namespace =
-    "http://s3.amazonaws.com/doc/2006-03-01/")
-public class ListMultipartUploadsResult {
-
-  public static final Owner
-      NOT_SUPPORTED_OWNER = new Owner("NOT-SUPPORTED", "Not Supported");
-
-  @XmlElement(name = "Bucket")
-  private String bucket;
-
-  @XmlElement(name = "KeyMarker")
-  private String keyMarker;
-
-  @XmlElement(name = "UploadIdMarker")
-  private String uploadIdMarker;
-
-  @XmlElement(name = "NextKeyMarker")
-  private String nextKeyMarker;
-
-  @XmlElement(name = "NextUploadIdMarker")
-  private String nextUploadIdMarker;
-
-  @XmlElement(name = "MaxUploads")
-  private int maxUploads = 1000;
-
-  @XmlElement(name = "IsTruncated")
-  private boolean isTruncated = false;
-
-  @XmlElement(name = "Upload")
-  private List<Upload> uploads = new ArrayList<>();
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKeyMarker() {
-    return keyMarker;
-  }
-
-  public void setKeyMarker(String keyMarker) {
-    this.keyMarker = keyMarker;
-  }
-
-  public String getUploadIdMarker() {
-    return uploadIdMarker;
-  }
-
-  public void setUploadIdMarker(String uploadIdMarker) {
-    this.uploadIdMarker = uploadIdMarker;
-  }
-
-  public String getNextKeyMarker() {
-    return nextKeyMarker;
-  }
-
-  public void setNextKeyMarker(String nextKeyMarker) {
-    this.nextKeyMarker = nextKeyMarker;
-  }
-
-  public String getNextUploadIdMarker() {
-    return nextUploadIdMarker;
-  }
-
-  public void setNextUploadIdMarker(String nextUploadIdMarker) {
-    this.nextUploadIdMarker = nextUploadIdMarker;
-  }
-
-  public int getMaxUploads() {
-    return maxUploads;
-  }
-
-  public void setMaxUploads(int maxUploads) {
-    this.maxUploads = maxUploads;
-  }
-
-  public boolean isTruncated() {
-    return isTruncated;
-  }
-
-  public void setTruncated(boolean truncated) {
-    isTruncated = truncated;
-  }
-
-  public List<Upload> getUploads() {
-    return uploads;
-  }
-
-  public void setUploads(
-      List<Upload> uploads) {
-    this.uploads = uploads;
-  }
-
-  public void addUpload(Upload upload) {
-    this.uploads.add(upload);
-  }
-
-  /**
-   * Upload information.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Upload")
-  public static class Upload {
-
-    @XmlElement(name = "Key")
-    private String key;
-
-    @XmlElement(name = "UploadId")
-    private String uploadId;
-
-    @XmlElement(name = "Owner")
-    private Owner owner = NOT_SUPPORTED_OWNER;
-
-    @XmlElement(name = "Initiator")
-    private Owner initiator = NOT_SUPPORTED_OWNER;
-
-    @XmlElement(name = "StorageClass")
-    private String storageClass = "STANDARD";
-
-    @XmlJavaTypeAdapter(IsoDateAdapter.class)
-    @XmlElement(name = "Initiated")
-    private Instant initiated;
-
-    public Upload() {
-    }
-
-    public Upload(String key, String uploadId, Instant initiated) {
-      this.key = key;
-      this.uploadId = uploadId;
-      this.initiated = initiated;
-    }
-
-    public Upload(String key, String uploadId, Instant initiated,
-        S3StorageType storageClass) {
-      this.key = key;
-      this.uploadId = uploadId;
-      this.initiated = initiated;
-      this.storageClass = storageClass.toString();
-    }
-
-    public String getKey() {
-      return key;
-    }
-
-    public void setKey(String key) {
-      this.key = key;
-    }
-
-    public String getUploadId() {
-      return uploadId;
-    }
-
-    public void setUploadId(String uploadId) {
-      this.uploadId = uploadId;
-    }
-
-    public Owner getOwner() {
-      return owner;
-    }
-
-    public void setOwner(
-        Owner owner) {
-      this.owner = owner;
-    }
-
-    public Owner getInitiator() {
-      return initiator;
-    }
-
-    public void setInitiator(
-        Owner initiator) {
-      this.initiator = initiator;
-    }
-
-    public String getStorageClass() {
-      return storageClass;
-    }
-
-    public void setStorageClass(String storageClass) {
-      this.storageClass = storageClass;
-    }
-
-    public Instant getInitiated() {
-      return initiated;
-    }
-
-    public void setInitiated(Instant initiated) {
-      this.initiated = initiated;
-    }
-  }
-
-  /**
-   * Upload information.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Owner")
-  public static class Owner {
-
-    @XmlElement(name = "ID")
-    private String id;
-
-    @XmlElement(name = "DisplayName")
-    private String displayName;
-
-    public Owner() {
-    }
-
-    public Owner(String id, String displayName) {
-      this.id = id;
-      this.displayName = displayName;
-    }
-
-    public String getId() {
-      return id;
-    }
-
-    public void setId(String id) {
-      this.id = id;
-    }
-
-    public String getDisplayName() {
-      return displayName;
-    }
-
-    public void setDisplayName(String displayName) {
-      this.displayName = displayName;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
deleted file mode 100644
index adb5f20..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.s3.commontypes.CommonPrefix;
-import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
-
-/**
- * Response from the ListObject RPC Call.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "ListBucketResult", namespace = "http://s3.amazonaws"
-    + ".com/doc/2006-03-01/")
-public class ListObjectResponse {
-
-  @XmlElement(name = "Name")
-  private String name;
-
-  @XmlElement(name = "Prefix")
-  private String prefix;
-
-  @XmlElement(name = "Marker")
-  private String marker;
-
-  @XmlElement(name = "MaxKeys")
-  private int maxKeys;
-
-  @XmlElement(name = "KeyCount")
-  private int keyCount;
-
-  @XmlElement(name = "Delimiter")
-  private String delimiter = "/";
-
-  @XmlElement(name = "EncodingType")
-  private String encodingType = "url";
-
-  @XmlElement(name = "IsTruncated")
-  private boolean isTruncated;
-
-  @XmlElement(name = "NextContinuationToken")
-  private String nextToken;
-
-  @XmlElement(name = "continueToken")
-  private String continueToken;
-
-  @XmlElement(name = "Contents")
-  private List<KeyMetadata> contents = new ArrayList<>();
-
-  @XmlElement(name = "CommonPrefixes")
-  private List<CommonPrefix> commonPrefixes = new ArrayList<>();
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getPrefix() {
-    return prefix;
-  }
-
-  public void setPrefix(String prefix) {
-    this.prefix = prefix;
-  }
-
-  public String getMarker() {
-    return marker;
-  }
-
-  public void setMarker(String marker) {
-    this.marker = marker;
-  }
-
-  public int getMaxKeys() {
-    return maxKeys;
-  }
-
-  public void setMaxKeys(int maxKeys) {
-    this.maxKeys = maxKeys;
-  }
-
-  public String getDelimiter() {
-    return delimiter;
-  }
-
-  public void setDelimiter(String delimiter) {
-    this.delimiter = delimiter;
-  }
-
-  public String getEncodingType() {
-    return encodingType;
-  }
-
-  public void setEncodingType(String encodingType) {
-    this.encodingType = encodingType;
-  }
-
-  public boolean isTruncated() {
-    return isTruncated;
-  }
-
-  public void setTruncated(boolean truncated) {
-    isTruncated = truncated;
-  }
-
-  public List<KeyMetadata> getContents() {
-    return contents;
-  }
-
-  public void setContents(
-      List<KeyMetadata> contents) {
-    this.contents = contents;
-  }
-
-  public List<CommonPrefix> getCommonPrefixes() {
-    return commonPrefixes;
-  }
-
-  public void setCommonPrefixes(
-      List<CommonPrefix> commonPrefixes) {
-    this.commonPrefixes = commonPrefixes;
-  }
-
-  public void addKey(KeyMetadata keyMetadata) {
-    contents.add(keyMetadata);
-  }
-
-  public void addPrefix(String relativeKeyName) {
-    commonPrefixes.add(new CommonPrefix(relativeKeyName));
-  }
-
-  public String getNextToken() {
-    return nextToken;
-  }
-
-  public void setNextToken(String nextToken) {
-    this.nextToken = nextToken;
-  }
-
-  public String getContinueToken() {
-    return continueToken;
-  }
-
-  public void setContinueToken(String continueToken) {
-    this.continueToken = continueToken;
-  }
-
-  public int getKeyCount() {
-    return keyCount;
-  }
-
-  public void setKeyCount(int keyCount) {
-    this.keyCount = keyCount;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
deleted file mode 100644
index fc9da14..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Request for list parts of a multipart upload request.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "ListPartsResult", namespace = "http://s3.amazonaws"
-    + ".com/doc/2006-03-01/")
-public class ListPartsResponse {
-
-  @XmlElement(name = "Bucket")
-  private String bucket;
-
-  @XmlElement(name = "Key")
-  private String key;
-
-  @XmlElement(name = "UploadId")
-  private String uploadID;
-
-  @XmlElement(name = "StorageClass")
-  private String storageClass;
-
-  @XmlElement(name = "PartNumberMarker")
-  private int partNumberMarker;
-
-  @XmlElement(name = "NextPartNumberMarker")
-  private int nextPartNumberMarker;
-
-  @XmlElement(name = "MaxParts")
-  private int maxParts;
-
-  @XmlElement(name = "IsTruncated")
-  private boolean truncated;
-
-  @XmlElement(name = "Part")
-  private List<Part> partList = new ArrayList<>();
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public String getUploadID() {
-    return uploadID;
-  }
-
-  public void setUploadID(String uploadID) {
-    this.uploadID = uploadID;
-  }
-
-  public String getStorageClass() {
-    return storageClass;
-  }
-
-  public void setStorageClass(String storageClass) {
-    this.storageClass = storageClass;
-  }
-
-  public int getPartNumberMarker() {
-    return partNumberMarker;
-  }
-
-  public void setPartNumberMarker(int partNumberMarker) {
-    this.partNumberMarker = partNumberMarker;
-  }
-
-  public int getNextPartNumberMarker() {
-    return nextPartNumberMarker;
-  }
-
-  public void setNextPartNumberMarker(int nextPartNumberMarker) {
-    this.nextPartNumberMarker = nextPartNumberMarker;
-  }
-
-  public int getMaxParts() {
-    return maxParts;
-  }
-
-  public void setMaxParts(int maxParts) {
-    this.maxParts = maxParts;
-  }
-
-  public boolean getTruncated() {
-    return truncated;
-  }
-
-  public void setTruncated(boolean truncated) {
-    this.truncated = truncated;
-  }
-
-  public List<Part> getPartList() {
-    return partList;
-  }
-
-  public void setPartList(List<Part> partList) {
-    this.partList = partList;
-  }
-
-  public void addPart(Part part) {
-    this.partList.add(part);
-  }
-
-  /**
-   * Part information.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Part")
-  public static class Part {
-
-    @XmlElement(name = "PartNumber")
-    private int partNumber;
-
-    @XmlJavaTypeAdapter(IsoDateAdapter.class)
-    @XmlElement(name = "LastModified")
-    private Instant lastModified;
-
-    @XmlElement(name = "ETag")
-    private String eTag;
-
-
-    @XmlElement(name = "Size")
-    private long size;
-
-    public int getPartNumber() {
-      return partNumber;
-    }
-
-    public void setPartNumber(int partNumber) {
-      this.partNumber = partNumber;
-    }
-
-    public Instant getLastModified() {
-      return lastModified;
-    }
-
-    public void setLastModified(Instant lastModified) {
-      this.lastModified = lastModified;
-    }
-
-    public String getETag() {
-      return eTag;
-    }
-
-    public void setETag(String tag) {
-      this.eTag = tag;
-    }
-
-    public long getSize() {
-      return size;
-    }
-
-    public void setSize(long size) {
-      this.size = size;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
deleted file mode 100644
index 45b8322..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Request for multi object delete request.
- */
-
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "Delete", namespace = "http://s3.amazonaws"
-    + ".com/doc/2006-03-01/")
-public class MultiDeleteRequest {
-
-  @XmlElement(name = "Quiet")
-  private Boolean quiet = Boolean.FALSE;
-
-  @XmlElement(name = "Object")
-  private List<DeleteObject> objects = new ArrayList<>();
-
-  public boolean isQuiet() {
-    return quiet;
-  }
-
-  public void setQuiet(boolean quiet) {
-    this.quiet = quiet;
-  }
-
-  public List<DeleteObject> getObjects() {
-    return objects;
-  }
-
-  public void setObjects(
-      List<DeleteObject> objects) {
-    this.objects = objects;
-  }
-
-  /**
-   * JAXB entity for child element.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Object", namespace = "http://s3.amazonaws"
-      + ".com/doc/2006-03-01/")
-  public static class DeleteObject {
-
-    @XmlElement(name = "Key")
-    private String key;
-
-    @XmlElement(name = "VersionId")
-    private String versionId;
-
-    public DeleteObject() {
-    }
-
-    public DeleteObject(String key) {
-      this.key = key;
-    }
-
-    public String getKey() {
-      return key;
-    }
-
-    public void setKey(String key) {
-      this.key = key;
-    }
-
-    public String getVersionId() {
-      return versionId;
-    }
-
-    public void setVersionId(String versionId) {
-      this.versionId = versionId;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
deleted file mode 100644
index e8ed515..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyReader;
-import javax.ws.rs.ext.Provider;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.UnmarshallerHandler;
-import javax.xml.parsers.SAXParserFactory;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-
-import org.xml.sax.InputSource;
-import org.xml.sax.XMLReader;
-
-/**
- * Custom unmarshaller to read MultiDeleteRequest w/wo namespace.
- */
-@Provider
-@Produces(MediaType.APPLICATION_XML)
-public class MultiDeleteRequestUnmarshaller
-    implements MessageBodyReader<MultiDeleteRequest> {
-
-  private final JAXBContext context;
-  private final XMLReader xmlReader;
-
-  public MultiDeleteRequestUnmarshaller() {
-    try {
-      context = JAXBContext.newInstance(MultiDeleteRequest.class);
-      SAXParserFactory saxParserFactory = SAXParserFactory.newInstance();
-      xmlReader = saxParserFactory.newSAXParser().getXMLReader();
-    } catch (Exception ex) {
-      throw new AssertionError("Can't instantiate MultiDeleteRequest parser",
-          ex);
-    }
-  }
-
-  @Override
-  public boolean isReadable(Class<?> type, Type genericType,
-      Annotation[] annotations, MediaType mediaType) {
-    return type.equals(MultiDeleteRequest.class);
-  }
-
-  @Override
-  public MultiDeleteRequest readFrom(Class<MultiDeleteRequest> type,
-      Type genericType, Annotation[] annotations, MediaType mediaType,
-      MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
-      throws IOException, WebApplicationException {
-    try {
-      UnmarshallerHandler unmarshallerHandler =
-          context.createUnmarshaller().getUnmarshallerHandler();
-
-      XmlNamespaceFilter filter =
-          new XmlNamespaceFilter("http://s3.amazonaws.com/doc/2006-03-01/");
-      filter.setContentHandler(unmarshallerHandler);
-      filter.setParent(xmlReader);
-      filter.parse(new InputSource(entityStream));
-      return (MultiDeleteRequest) unmarshallerHandler.getResult();
-    } catch (Exception e) {
-      throw new WebApplicationException("Can't parse request body to XML.", e);
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java
deleted file mode 100644
index f2e21e6..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Response for multi object delete request.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "DeleteResult", namespace = "http://s3.amazonaws"
-    + ".com/doc/2006-03-01/")
-public class MultiDeleteResponse {
-
-  @XmlElement(name = "Deleted")
-  private List<DeletedObject> deletedObjects = new ArrayList<>();
-
-  @XmlElement(name = "Error")
-  private List<Error> errors = new ArrayList<>();
-
-  public void addDeleted(DeletedObject deletedObject) {
-    deletedObjects.add(deletedObject);
-  }
-
-  public void addError(Error error) {
-    errors.add(error);
-  }
-
-  public List<DeletedObject> getDeletedObjects() {
-    return deletedObjects;
-  }
-
-  public void setDeletedObjects(
-      List<DeletedObject> deletedObjects) {
-    this.deletedObjects = deletedObjects;
-  }
-
-  public List<Error> getErrors() {
-    return errors;
-  }
-
-  public void setErrors(
-      List<Error> errors) {
-    this.errors = errors;
-  }
-
-  /**
-   * JAXB entity for child element.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Deleted", namespace = "http://s3.amazonaws"
-      + ".com/doc/2006-03-01/")
-  public static class DeletedObject {
-
-    @XmlElement(name = "Key")
-    private String key;
-
-    private String versionId;
-
-    public DeletedObject() {
-    }
-
-    public DeletedObject(String key) {
-      this.key = key;
-    }
-
-    public String getKey() {
-      return key;
-    }
-
-    public void setKey(String key) {
-      this.key = key;
-    }
-
-    public String getVersionId() {
-      return versionId;
-    }
-
-    public void setVersionId(String versionId) {
-      this.versionId = versionId;
-    }
-  }
-
-  /**
-   * JAXB entity for child element.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "Error", namespace = "http://s3.amazonaws"
-      + ".com/doc/2006-03-01/")
-  public static class Error {
-
-    @XmlElement(name = "Key")
-    private String key;
-
-    @XmlElement(name = "Code")
-    private String code;
-
-    @XmlElement(name = "Message")
-    private String message;
-
-    public Error() {
-    }
-
-    public Error(String key, String code, String message) {
-      this.key = key;
-      this.code = code;
-      this.message = message;
-    }
-
-    public String getKey() {
-      return key;
-    }
-
-    public void setKey(String key) {
-      this.key = key;
-    }
-
-    public String getCode() {
-      return code;
-    }
-
-    public void setCode(String code) {
-      this.code = code;
-    }
-
-    public String getMessage() {
-      return message;
-    }
-
-    public void setMessage(String message) {
-      this.message = message;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java
deleted file mode 100644
index c038820..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-
-/**
- * Response for Initiate Multipart Upload request.
- */
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlRootElement(name = "InitiateMultipartUploadResult",
-    namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
-public class MultipartUploadInitiateResponse {
-
-  @XmlElement(name = "Bucket")
-  private String bucket;
-
-  @XmlElement(name = "Key")
-  private String key;
-
-  @XmlElement(name = "UploadId")
-  private String uploadID;
-
-  public String getBucket() {
-    return bucket;
-  }
-
-  public void setBucket(String bucket) {
-    this.bucket = bucket;
-  }
-
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  public String getUploadID() {
-    return uploadID;
-  }
-
-  public void setUploadID(String uploadID) {
-    this.uploadID = uploadID;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
deleted file mode 100644
index b947c9e..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ /dev/null
@@ -1,766 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.HEAD;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.Response.Status;
-import javax.ws.rs.core.StreamingOutput;
-import java.io.IOException;
-import java.io.InputStream;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.s3.HeaderPreprocessor;
-import org.apache.hadoop.ozone.s3.SignedChunksInputStream;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.apache.hadoop.ozone.s3.io.S3WrapperInputStream;
-import org.apache.hadoop.ozone.s3.util.RFC1123Util;
-import org.apache.hadoop.ozone.s3.util.RangeHeader;
-import org.apache.hadoop.ozone.s3.util.RangeHeaderParserUtil;
-import org.apache.hadoop.ozone.s3.util.S3StorageType;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.annotations.VisibleForTesting;
-import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH;
-import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
-import org.apache.commons.io.IOUtils;
-
-import org.apache.commons.lang3.tuple.Pair;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import org.apache.http.HttpStatus;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Key level rest endpoints.
- */
-@Path("/{bucket}/{path:.+}")
-public class ObjectEndpoint extends EndpointBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ObjectEndpoint.class);
-
-  @Context
-  private HttpHeaders headers;
-
-  private List<String> customizableGetHeaders = new ArrayList<>();
-
-  public ObjectEndpoint() {
-    customizableGetHeaders.add("Content-Type");
-    customizableGetHeaders.add("Content-Language");
-    customizableGetHeaders.add("Expires");
-    customizableGetHeaders.add("Cache-Control");
-    customizableGetHeaders.add("Content-Disposition");
-    customizableGetHeaders.add("Content-Encoding");
-  }
-
-  /**
-   * Rest endpoint to upload object to a bucket.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
-   * more details.
-   */
-  @PUT
-  public Response put(
-      @PathParam("bucket") String bucketName,
-      @PathParam("path") String keyPath,
-      @HeaderParam("Content-Length") long length,
-      @QueryParam("partNumber")  int partNumber,
-      @QueryParam("uploadId") @DefaultValue("") String uploadID,
-      InputStream body) throws IOException, OS3Exception {
-
-    OzoneOutputStream output = null;
-
-    if (uploadID != null && !uploadID.equals("")) {
-      // If uploadID is specified, it is a request for upload part
-      return createMultipartKey(bucketName, keyPath, length,
-          partNumber, uploadID, body);
-    }
-
-    try {
-      String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
-      String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
-
-      ReplicationType replicationType;
-      ReplicationFactor replicationFactor;
-      boolean storageTypeDefault;
-      if (storageType == null || storageType.equals("")) {
-        replicationType = S3StorageType.getDefault().getType();
-        replicationFactor = S3StorageType.getDefault().getFactor();
-        storageTypeDefault = true;
-      } else {
-        try {
-          replicationType = S3StorageType.valueOf(storageType).getType();
-          replicationFactor = S3StorageType.valueOf(storageType).getFactor();
-        } catch (IllegalArgumentException ex) {
-          throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT,
-              storageType);
-        }
-        storageTypeDefault = false;
-      }
-
-      if (copyHeader != null) {
-        //Copy object, as copy source available.
-        CopyObjectResponse copyObjectResponse = copyObject(
-            copyHeader, bucketName, keyPath, replicationType,
-            replicationFactor, storageTypeDefault);
-        return Response.status(Status.OK).entity(copyObjectResponse).header(
-            "Connection", "close").build();
-      }
-
-      // Normal put object
-      OzoneBucket bucket = getBucket(bucketName);
-
-      output = bucket.createKey(keyPath, length, replicationType,
-          replicationFactor, new HashMap<>());
-
-      if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
-          .equals(headers.getHeaderString("x-amz-content-sha256"))) {
-        body = new SignedChunksInputStream(body);
-      }
-
-      IOUtils.copy(body, output);
-
-      return Response.ok().status(HttpStatus.SC_OK)
-          .build();
-    } catch (IOException ex) {
-      LOG.error("Exception occurred in PutObject", ex);
-      throw ex;
-    } finally {
-      if (output != null) {
-        output.close();
-      }
-    }
-  }
-
-  /**
-   * Rest endpoint to download object from a bucket, if query param uploadId
-   * is specified, request for list parts of a multipart upload key with
-   * specific uploadId.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
-   * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
-   * for more details.
-   */
-  @GET
-  public Response get(
-      @PathParam("bucket") String bucketName,
-      @PathParam("path") String keyPath,
-      @QueryParam("uploadId") String uploadId,
-      @QueryParam("max-parts") @DefaultValue("1000") int maxParts,
-      @QueryParam("part-number-marker") String partNumberMarker,
-      InputStream body) throws IOException, OS3Exception {
-    try {
-
-      if (uploadId != null) {
-        // When we have uploadId, this is the request for list Parts.
-        int partMarker = 0;
-        if (partNumberMarker != null) {
-          partMarker = Integer.parseInt(partNumberMarker);
-        }
-        return listParts(bucketName, keyPath, uploadId,
-            partMarker, maxParts);
-      }
-
-      OzoneBucket bucket = getBucket(bucketName);
-
-      OzoneKeyDetails keyDetails = bucket.getKey(keyPath);
-
-      long length = keyDetails.getDataSize();
-
-      LOG.debug("Data length of the key {} is {}", keyPath, length);
-
-      String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER);
-      RangeHeader rangeHeader = null;
-
-      LOG.debug("range Header provided value is {}", rangeHeaderVal);
-
-      if (rangeHeaderVal != null) {
-        rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal,
-            length);
-        LOG.debug("range Header provided value is {}", rangeHeader);
-        if (rangeHeader.isInValidRange()) {
-          OS3Exception exception = S3ErrorTable.newError(S3ErrorTable
-              .INVALID_RANGE, rangeHeaderVal);
-          throw exception;
-        }
-      }
-      ResponseBuilder responseBuilder;
-
-      if (rangeHeaderVal == null || rangeHeader.isReadFull()) {
-        StreamingOutput output = dest -> {
-          try (OzoneInputStream key = bucket.readKey(keyPath)) {
-            IOUtils.copy(key, dest);
-          }
-        };
-        responseBuilder = Response
-            .ok(output)
-            .header(CONTENT_LENGTH, keyDetails.getDataSize());
-
-      } else {
-        LOG.debug("range Header provided value is {}", rangeHeader);
-        OzoneInputStream key = bucket.readKey(keyPath);
-
-        long startOffset = rangeHeader.getStartOffset();
-        long endOffset = rangeHeader.getEndOffset();
-        long copyLength;
-        if (startOffset == endOffset) {
-          // if range header is given as bytes=0-0, then we should return 1
-          // byte from start offset
-          copyLength = 1;
-        } else {
-          copyLength = rangeHeader.getEndOffset() - rangeHeader
-              .getStartOffset() + 1;
-        }
-        StreamingOutput output = dest -> {
-          try (S3WrapperInputStream s3WrapperInputStream =
-              new S3WrapperInputStream(
-                  key.getInputStream())) {
-            IOUtils.copyLarge(s3WrapperInputStream, dest, startOffset,
-                copyLength);
-          }
-        };
-        responseBuilder = Response
-            .ok(output)
-            .header(CONTENT_LENGTH, copyLength);
-
-        String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " +
-            rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() +
-            "/" + length;
-
-        responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
-      }
-      responseBuilder.header(ACCEPT_RANGE_HEADER,
-          RANGE_HEADER_SUPPORTED_UNIT);
-      for (String responseHeader : customizableGetHeaders) {
-        String headerValue = headers.getHeaderString(responseHeader);
-        if (headerValue != null) {
-          responseBuilder.header(responseHeader, headerValue);
-        }
-      }
-      addLastModifiedDate(responseBuilder, keyDetails);
-      return responseBuilder.build();
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_KEY, keyPath);
-      } else {
-        throw ex;
-      }
-    }
-  }
-
-  private void addLastModifiedDate(
-      ResponseBuilder responseBuilder, OzoneKeyDetails key) {
-
-    ZonedDateTime lastModificationTime =
-        Instant.ofEpochMilli(key.getModificationTime())
-            .atZone(ZoneId.of("GMT"));
-
-    responseBuilder
-        .header(LAST_MODIFIED,
-            RFC1123Util.FORMAT.format(lastModificationTime));
-  }
-
-  /**
-   * Rest endpoint to check existence of an object in a bucket.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
-   * for more details.
-   */
-  @HEAD
-  public Response head(
-      @PathParam("bucket") String bucketName,
-      @PathParam("path") String keyPath) throws Exception {
-    OzoneKeyDetails key;
-
-    try {
-      key = getBucket(bucketName).getKey(keyPath);
-      // TODO: return the specified range bytes of this object.
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        // Just return 404 with no content
-        return Response.status(Status.NOT_FOUND).build();
-      } else {
-        throw ex;
-      }
-    }
-
-    ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK)
-        .header("ETag", "" + key.getModificationTime())
-        .header("Content-Length", key.getDataSize())
-        .header("Content-Type", "binary/octet-stream");
-    addLastModifiedDate(response, key);
-    return response
-        .build();
-  }
-
-  /**
-   * Abort multipart upload request.
-   * @param bucket
-   * @param key
-   * @param uploadId
-   * @return Response
-   * @throws IOException
-   * @throws OS3Exception
-   */
-  private Response abortMultipartUpload(String bucket, String key, String
-      uploadId) throws IOException, OS3Exception {
-    try {
-      OzoneBucket ozoneBucket = getBucket(bucket);
-      ozoneBucket.abortMultipartUpload(key, uploadId);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId);
-      }
-      throw ex;
-    }
-    return Response
-        .status(Status.NO_CONTENT)
-        .build();
-  }
-
-
-  /**
-   * Delete a specific object from a bucket, if query param uploadId is
-   * specified, this request is for abort multipart upload.
-   * <p>
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
-   * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html
-   * for more details.
-   */
-  @DELETE
-  @SuppressWarnings("emptyblock")
-  public Response delete(
-      @PathParam("bucket") String bucketName,
-      @PathParam("path") String keyPath,
-      @QueryParam("uploadId") @DefaultValue("") String uploadId) throws
-      IOException, OS3Exception {
-
-    try {
-      if (uploadId != null && !uploadId.equals("")) {
-        return abortMultipartUpload(bucketName, keyPath, uploadId);
-      }
-      OzoneBucket bucket = getBucket(bucketName);
-      bucket.getKey(keyPath);
-      bucket.deleteKey(keyPath);
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_BUCKET, bucketName);
-      } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        //NOT_FOUND is not a problem, AWS doesn't throw exception for missing
-        // keys. Just return 204
-      } else {
-        throw ex;
-      }
-
-    }
-    return Response
-        .status(Status.NO_CONTENT)
-        .build();
-
-  }
-
-  /**
-   * Initialize MultiPartUpload request.
-   * <p>
-   * Note: the specific content type is set by the HeaderPreprocessor.
-   */
-  @POST
-  @Produces(MediaType.APPLICATION_XML)
-  @Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
-  public Response initializeMultipartUpload(
-      @PathParam("bucket") String bucket,
-      @PathParam("path") String key
-  )
-      throws IOException, OS3Exception {
-    try {
-      OzoneBucket ozoneBucket = getBucket(bucket);
-      String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
-
-      ReplicationType replicationType;
-      ReplicationFactor replicationFactor;
-      if (storageType == null || storageType.equals("")) {
-        replicationType = S3StorageType.getDefault().getType();
-        replicationFactor = S3StorageType.getDefault().getFactor();
-      } else {
-        try {
-          replicationType = S3StorageType.valueOf(storageType).getType();
-          replicationFactor = S3StorageType.valueOf(storageType).getFactor();
-        } catch (IllegalArgumentException ex) {
-          throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT,
-              storageType);
-        }
-      }
-
-      OmMultipartInfo multipartInfo = ozoneBucket
-          .initiateMultipartUpload(key, replicationType, replicationFactor);
-
-      MultipartUploadInitiateResponse multipartUploadInitiateResponse = new
-          MultipartUploadInitiateResponse();
-
-      multipartUploadInitiateResponse.setBucket(bucket);
-      multipartUploadInitiateResponse.setKey(key);
-      multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
-
-      return Response.status(Status.OK).entity(
-          multipartUploadInitiateResponse).build();
-    } catch (IOException ex) {
-      LOG.error("Error in Initiate Multipart Upload Request for bucket: " +
-          bucket + ", key: " + key, ex);
-      throw ex;
-    }
-  }
-
-  /**
-   * Complete a multipart upload.
-   */
-  @POST
-  @Produces(MediaType.APPLICATION_XML)
-  public Response completeMultipartUpload(@PathParam("bucket") String bucket,
-      @PathParam("path") String key,
-      @QueryParam("uploadId") @DefaultValue("") String uploadID,
-      CompleteMultipartUploadRequest multipartUploadRequest)
-      throws IOException, OS3Exception {
-    OzoneBucket ozoneBucket = getBucket(bucket);
-    Map<Integer, String> partsMap = new TreeMap<>();
-    List<CompleteMultipartUploadRequest.Part> partList =
-        multipartUploadRequest.getPartList();
-
-    for (CompleteMultipartUploadRequest.Part part : partList) {
-      partsMap.put(part.getPartNumber(), part.geteTag());
-    }
-
-    LOG.debug("Parts map {}", partsMap.toString());
-
-    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
-    try {
-      omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(
-          key, uploadID, partsMap);
-      CompleteMultipartUploadResponse completeMultipartUploadResponse =
-          new CompleteMultipartUploadResponse();
-      completeMultipartUploadResponse.setBucket(bucket);
-      completeMultipartUploadResponse.setKey(key);
-      completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo
-          .getHash());
-      // Location also setting as bucket name.
-      completeMultipartUploadResponse.setLocation(bucket);
-      return Response.status(Status.OK).entity(completeMultipartUploadResponse)
-          .build();
-    } catch (OMException ex) {
-      LOG.error("Error in Complete Multipart Upload Request for bucket: " +
-          bucket + ", key: " + key, ex);
-      if (ex.getResult() == ResultCodes.MISMATCH_MULTIPART_LIST) {
-        OS3Exception oex =
-            S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key);
-        throw oex;
-      } else if (ex.getResult() == ResultCodes.MISSING_UPLOAD_PARTS) {
-        OS3Exception oex =
-            S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key);
-        throw oex;
-      } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        OS3Exception os3Exception = S3ErrorTable.newError(NO_SUCH_UPLOAD,
-            uploadID);
-        throw os3Exception;
-      } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
-        OS3Exception os3Exception = S3ErrorTable.newError(ENTITY_TOO_SMALL,
-            key);
-        throw os3Exception;
-      }
-      throw ex;
-    }
-  }
-
-  private Response createMultipartKey(String bucket, String key, long length,
-                                      int partNumber, String uploadID,
-                                      InputStream body)
-      throws IOException, OS3Exception {
-    try {
-      OzoneBucket ozoneBucket = getBucket(bucket);
-      OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
-          key, length, partNumber, uploadID);
-
-      String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
-      if (copyHeader != null) {
-        Pair<String, String> result = parseSourceHeader(copyHeader);
-
-        String sourceBucket = result.getLeft();
-        String sourceKey = result.getRight();
-
-        try (OzoneInputStream sourceObject =
-            getBucket(sourceBucket).readKey(sourceKey)) {
-
-          String range =
-              headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
-          if (range != null) {
-            RangeHeader rangeHeader =
-                RangeHeaderParserUtil.parseRangeHeader(range, 0);
-            IOUtils.copyLarge(sourceObject, ozoneOutputStream,
-                rangeHeader.getStartOffset(),
-                rangeHeader.getEndOffset() - rangeHeader.getStartOffset());
-
-          } else {
-            IOUtils.copy(sourceObject, ozoneOutputStream);
-          }
-        }
-
-      } else {
-        IOUtils.copy(body, ozoneOutputStream);
-      }
-      ozoneOutputStream.close();
-      OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
-          ozoneOutputStream.getCommitUploadPartInfo();
-      String eTag = omMultipartCommitUploadPartInfo.getPartName();
-
-      if (copyHeader != null) {
-        return Response.ok(new CopyPartResult(eTag)).build();
-      } else {
-        return Response.ok().header("ETag",
-            eTag).build();
-      }
-
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(NO_SUCH_UPLOAD,
-            uploadID);
-      }
-      throw ex;
-    }
-
-  }
-
-  /**
-   * Returns response for the listParts request.
-   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
-   * @param bucket
-   * @param key
-   * @param uploadID
-   * @param partNumberMarker
-   * @param maxParts
-   * @return
-   * @throws IOException
-   * @throws OS3Exception
-   */
-  private Response listParts(String bucket, String key, String uploadID,
-      int partNumberMarker, int maxParts) throws IOException, OS3Exception {
-    ListPartsResponse listPartsResponse = new ListPartsResponse();
-    try {
-      OzoneBucket ozoneBucket = getBucket(bucket);
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          ozoneBucket.listParts(key, uploadID, partNumberMarker, maxParts);
-      listPartsResponse.setBucket(bucket);
-      listPartsResponse.setKey(key);
-      listPartsResponse.setUploadID(uploadID);
-      listPartsResponse.setMaxParts(maxParts);
-      listPartsResponse.setPartNumberMarker(partNumberMarker);
-      listPartsResponse.setTruncated(false);
-
-      listPartsResponse.setStorageClass(S3StorageType.fromReplicationType(
-          ozoneMultipartUploadPartListParts.getReplicationType(),
-          ozoneMultipartUploadPartListParts.getReplicationFactor()).toString());
-
-      if (ozoneMultipartUploadPartListParts.isTruncated()) {
-        listPartsResponse.setTruncated(
-            ozoneMultipartUploadPartListParts.isTruncated());
-        listPartsResponse.setNextPartNumberMarker(
-            ozoneMultipartUploadPartListParts.getNextPartNumberMarker());
-      }
-
-      ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> {
-        ListPartsResponse.Part part = new ListPartsResponse.Part();
-        part.setPartNumber(partInfo.getPartNumber());
-        part.setETag(partInfo.getPartName());
-        part.setSize(partInfo.getSize());
-        part.setLastModified(Instant.ofEpochMilli(
-            partInfo.getModificationTime()));
-        listPartsResponse.addPart(part);
-      });
-
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(NO_SUCH_UPLOAD,
-            uploadID);
-      }
-      throw ex;
-    }
-    return Response.status(Status.OK).entity(listPartsResponse).build();
-  }
-
-  @VisibleForTesting
-  public void setHeaders(HttpHeaders headers) {
-    this.headers = headers;
-  }
-
-  private CopyObjectResponse copyObject(String copyHeader,
-                                        String destBucket,
-                                        String destkey,
-                                        ReplicationType replicationType,
-                                        ReplicationFactor replicationFactor,
-                                        boolean storageTypeDefault)
-      throws OS3Exception, IOException {
-
-    Pair<String, String> result = parseSourceHeader(copyHeader);
-
-    String sourceBucket = result.getLeft();
-    String sourceKey = result.getRight();
-    OzoneInputStream sourceInputStream = null;
-    OzoneOutputStream destOutputStream = null;
-    boolean closed = false;
-    try {
-      // Checking whether we trying to copying to it self.
-
-      if (sourceBucket.equals(destBucket)) {
-        if (sourceKey.equals(destkey)) {
-          // When copying to same storage type when storage type is provided,
-          // we should not throw exception, as aws cli checks if any of the
-          // options like storage type are provided or not when source and
-          // dest are given same
-          if (storageTypeDefault) {
-            OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-                .INVALID_REQUEST, copyHeader);
-            ex.setErrorMessage("This copy request is illegal because it is " +
-                "trying to copy an object to it self itself without changing " +
-                "the object's metadata, storage class, website redirect " +
-                "location or encryption attributes.");
-            throw ex;
-          } else {
-            // TODO: Actually here we should change storage type, as ozone
-            // still does not support this just returning dummy response
-            // for now
-            CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
-            copyObjectResponse.setETag(OzoneUtils.getRequestID());
-            copyObjectResponse.setLastModified(Instant.ofEpochMilli(
-                Time.now()));
-            return copyObjectResponse;
-          }
-        }
-      }
-
-
-      OzoneBucket sourceOzoneBucket = getBucket(sourceBucket);
-      OzoneBucket destOzoneBucket = getBucket(destBucket);
-
-      OzoneKeyDetails sourceKeyDetails = sourceOzoneBucket.getKey(sourceKey);
-      long sourceKeyLen = sourceKeyDetails.getDataSize();
-
-      sourceInputStream = sourceOzoneBucket.readKey(sourceKey);
-
-      destOutputStream = destOzoneBucket.createKey(destkey, sourceKeyLen,
-          replicationType, replicationFactor, new HashMap<>());
-
-      IOUtils.copy(sourceInputStream, destOutputStream);
-
-      // Closing here, as if we don't call close this key will not commit in
-      // OM, and getKey fails.
-      sourceInputStream.close();
-      destOutputStream.close();
-      closed = true;
-
-      OzoneKeyDetails destKeyDetails = destOzoneBucket.getKey(destkey);
-
-      CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
-      copyObjectResponse.setETag(OzoneUtils.getRequestID());
-      copyObjectResponse.setLastModified(Instant.ofEpochMilli(destKeyDetails
-          .getModificationTime()));
-      return copyObjectResponse;
-    } catch (OMException ex) {
-      if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, sourceKey);
-      } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket);
-      }
-      throw ex;
-    } finally {
-      if (!closed) {
-        if (sourceInputStream != null) {
-          sourceInputStream.close();
-        }
-        if (destOutputStream != null) {
-          destOutputStream.close();
-        }
-      }
-    }
-  }
-
-  /**
-   * Parse the key and bucket name from copy header.
-   */
-  @VisibleForTesting
-  public static Pair<String, String> parseSourceHeader(String copyHeader)
-      throws OS3Exception {
-    String header = copyHeader;
-    if (header.startsWith("/")) {
-      header = copyHeader.substring(1);
-    }
-    int pos = header.indexOf("/");
-    if (pos == -1) {
-      OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-          .INVALID_ARGUMENT, header);
-      ex.setErrorMessage("Copy Source must mention the source bucket and " +
-          "key: sourcebucket/sourcekey");
-      throw ex;
-    }
-
-    return Pair.of(header.substring(0, pos), header.substring(pos + 1));
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java
deleted file mode 100644
index 599b473..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyReader;
-import javax.ws.rs.ext.Provider;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-
-/**
- * Body reader to accept plain text MPU.
- * <p>
- * Aws s3 api sends a multipartupload request with the content type
- * 'text/plain' in case of using 'aws s3 cp' (instead of aws s3api).
- * <p>
- * Our generic ObjectEndpoint.multipartUpload has a
- * CompleteMultipartUploadRequest parameter, which is required only for the
- * completion request.
- * <p>
- * But JaxRS tries to parse it from the body for the requests and in case of
- * text/plain requests this parsing is failed. This simple BodyReader enables
- * to parse an empty text/plain message and return with an empty completion
- * request.
- */
-@Provider
-@Consumes("text/plain")
-public class PlainTextMultipartUploadReader
-    implements MessageBodyReader<CompleteMultipartUploadRequest> {
-
-  @Override
-  public boolean isReadable(Class<?> type, Type genericType,
-      Annotation[] annotations, MediaType mediaType) {
-    return type.equals(CompleteMultipartUploadRequest.class)
-        && mediaType.equals(MediaType.TEXT_PLAIN_TYPE);
-  }
-
-  @Override
-  public CompleteMultipartUploadRequest readFrom(
-      Class<CompleteMultipartUploadRequest> type, Type genericType,
-      Annotation[] annotations, MediaType mediaType,
-      MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
-      throws IOException, WebApplicationException {
-    return new CompleteMultipartUploadRequest();
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java
deleted file mode 100644
index 23d02e9..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-import java.io.IOException;
-import java.time.Instant;
-import java.util.Iterator;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.s3.util.OzoneS3Util.getVolumeName;
-
-/**
- * Top level rest endpoint.
- */
-@Path("/")
-public class RootEndpoint extends EndpointBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RootEndpoint.class);
-
-  /**
-   * Rest endpoint to list all the buckets of the current user.
-   *
-   * See https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
-   * for more details.
-   */
-  @GET
-  public Response get()
-      throws OS3Exception, IOException {
-    OzoneVolume volume;
-    ListBucketResponse response = new ListBucketResponse();
-
-    AuthenticationHeaderParser authenticationHeaderParser =
-        getAuthenticationHeaderParser();
-
-    if (!authenticationHeaderParser.doesAuthenticationInfoExists()) {
-      return Response.status(Status.TEMPORARY_REDIRECT)
-          .header("Location", "/static/")
-          .build();
-    }
-    String volumeName = getVolumeName(authenticationHeaderParser.
-        getAccessKeyID());
-    Iterator<? extends OzoneBucket> bucketIterator = listS3Buckets(volumeName,
-        null);
-
-    while (bucketIterator.hasNext()) {
-      OzoneBucket next = bucketIterator.next();
-      BucketMetadata bucketMetadata = new BucketMetadata();
-      bucketMetadata.setName(next.getName());
-      bucketMetadata.setCreationDate(Instant.ofEpochMilli(next
-          .getCreationTime()));
-      response.addBucket(bucketMetadata);
-    }
-
-    return Response.ok(response).build();
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java
deleted file mode 100644
index a49ecf6..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.xml.sax.Attributes;
-import org.xml.sax.SAXException;
-import org.xml.sax.helpers.XMLFilterImpl;
-
-/**
- * SAX filter to force namespace usage.
- * <p>
- * This filter will read the XML content as namespace qualified content
- * independent from the current namespace usage.
- */
-public class XmlNamespaceFilter extends XMLFilterImpl {
-
-  private String namespace;
-
-  /**
-   * Create the filter.
-   *
-   * @param namespace to add to every elements.
-   */
-  public XmlNamespaceFilter(String namespace) {
-    this.namespace = namespace;
-  }
-
-  @Override
-  public void startElement(String uri, String localName, String qName,
-      Attributes atts) throws SAXException {
-    super.startElement(namespace, localName, qName, atts);
-  }
-
-  @Override
-  public void endElement(String uri, String localName, String qName)
-      throws SAXException {
-    super.endElement(namespace, localName, qName);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
deleted file mode 100644
index c55cdf4..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Rest endpoint implementation for the s3 gateway.
- */
-@javax.xml.bind.annotation.XmlSchema(
-    namespace = "http://s3.amazonaws"
-        + ".com/doc/2006-03-01/", elementFormDefault =
-    javax.xml.bind.annotation.XmlNsForm.QUALIFIED,
-    xmlns = {
-        @javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws"
-            + ".com/doc/2006-03-01/", prefix = "")})
-
-package org.apache.hadoop.ozone.s3.endpoint;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java
deleted file mode 100644
index 722a4a1..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.exception;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.dataformat.xml.XmlMapper;
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlTransient;
-import javax.xml.bind.annotation.XmlRootElement;
-
-
-/**
- * This class represents exceptions raised from Ozone S3 service.
- *
- * Ref:https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
- */
-@XmlRootElement(name = "Error")
-@XmlAccessorType(XmlAccessType.NONE)
-public class OS3Exception extends  Exception {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OS3Exception.class);
-  private static ObjectMapper mapper;
-
-  static {
-    mapper = new XmlMapper();
-    mapper.registerModule(new JaxbAnnotationModule());
-    mapper.enable(SerializationFeature.INDENT_OUTPUT);
-  }
-  @XmlElement(name = "Code")
-  private String code;
-
-  @XmlElement(name = "Message")
-  private String errorMessage;
-
-  @XmlElement(name = "Resource")
-  private String resource;
-
-  @XmlElement(name = "RequestId")
-  private String requestId;
-
-  @XmlTransient
-  private int httpCode;
-
-  public OS3Exception() {
-    //Added for JaxB.
-  }
-
-  /**
-   * Create an object OS3Exception.
-   * @param codeVal
-   * @param messageVal
-   * @param requestIdVal
-   * @param resourceVal
-   */
-  public OS3Exception(String codeVal, String messageVal, String requestIdVal,
-                      String resourceVal) {
-    this.code = codeVal;
-    this.errorMessage = messageVal;
-    this.requestId = requestIdVal;
-    this.resource = resourceVal;
-  }
-
-  /**
-   * Create an object OS3Exception.
-   * @param codeVal
-   * @param messageVal
-   * @param httpCode
-   */
-  public OS3Exception(String codeVal, String messageVal, int httpCode) {
-    this.code = codeVal;
-    this.errorMessage = messageVal;
-    this.httpCode = httpCode;
-  }
-
-  public String getCode() {
-    return code;
-  }
-
-  public void setCode(String code) {
-    this.code = code;
-  }
-
-  public String getErrorMessage() {
-    return errorMessage;
-  }
-
-  public void setErrorMessage(String errorMessage) {
-    this.errorMessage = errorMessage;
-  }
-
-  public String getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(String requestId) {
-    this.requestId = requestId;
-  }
-
-  public String getResource() {
-    return resource;
-  }
-
-  public void setResource(String resource) {
-    this.resource = resource;
-  }
-
-  public int getHttpCode() {
-    return httpCode;
-  }
-
-  public void setHttpCode(int httpCode) {
-    this.httpCode = httpCode;
-  }
-
-  public String toXml() {
-    try {
-      String val = mapper.writeValueAsString(this);
-      LOG.debug("toXml val is {}", val);
-      String xmlLine = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
-          + val;
-      return xmlLine;
-    } catch (Exception ex) {
-      LOG.error("Exception occurred {}", ex);
-    }
-
-    //When we get exception log it, and return exception as xml from actual
-    // exception data. So, falling back to construct from exception.
-    String formatString = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" +
-        "<Error>" +
-        "<Code>%s</Code>" +
-        "<Message>%s</Message>" +
-        "<Resource>%s</Resource>" +
-        "<RequestId>%s</RequestId>" +
-        "</Error>";
-    return String.format(formatString, this.getCode(),
-        this.getErrorMessage(), this.getResource(),
-        this.getRequestId());
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
deleted file mode 100644
index 588dafa..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.exception;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.inject.Inject;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.ext.ExceptionMapper;
-import javax.ws.rs.ext.Provider;
-
-import org.apache.hadoop.ozone.s3.RequestIdentifier;
-
-/**
- *  Class the represents various errors returned by the
- *  Ozone S3 service.
- */
-@Provider
-public class OS3ExceptionMapper implements ExceptionMapper<OS3Exception> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OS3ExceptionMapper.class);
-
-  @Inject
-  private RequestIdentifier requestIdentifier;
-
-  @Override
-  public Response toResponse(OS3Exception exception) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Returning exception. ex: {}", exception.toString());
-    }
-    exception.setRequestId(requestIdentifier.getRequestId());
-    return Response.status(exception.getHttpCode())
-        .entity(exception.toXml()).build();
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
deleted file mode 100644
index 1df0444..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.exception;
-
-
-import static java.net.HttpURLConnection.HTTP_BAD_REQUEST;
-import static java.net.HttpURLConnection.HTTP_CONFLICT;
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_NOT_SATISFIABLE;
-
-/**
- * This class represents errors from Ozone S3 service.
- * This class needs to be updated to add new errors when required.
- */
-public final class S3ErrorTable {
-
-  private S3ErrorTable() {
-    //No one should construct this object.
-  }
-
-  public static final OS3Exception INVALID_URI = new OS3Exception("InvalidURI",
-      "Couldn't parse the specified URI.", HTTP_BAD_REQUEST);
-
-  public static final OS3Exception NO_SUCH_VOLUME = new OS3Exception(
-      "NoSuchVolume", "The specified volume does not exist", HTTP_NOT_FOUND);
-
-  public static final OS3Exception NO_SUCH_BUCKET = new OS3Exception(
-      "NoSuchBucket", "The specified bucket does not exist", HTTP_NOT_FOUND);
-
-  public static final OS3Exception AUTH_PROTOCOL_NOT_SUPPORTED =
-      new OS3Exception("AuthProtocolNotSupported", "Auth protocol used for" +
-          " this request is not supported.", HTTP_BAD_REQUEST);
-
-  public static final OS3Exception S3_TOKEN_CREATION_ERROR =
-      new OS3Exception("InvalidRequest", "Error creating s3 token creation.",
-          HTTP_BAD_REQUEST);
-
-  public static final OS3Exception BUCKET_NOT_EMPTY = new OS3Exception(
-      "BucketNotEmpty", "The bucket you tried to delete is not empty.",
-      HTTP_CONFLICT);
-
-  public static final OS3Exception MALFORMED_HEADER = new OS3Exception(
-      "AuthorizationHeaderMalformed", "The authorization header you provided " +
-      "is invalid.", HTTP_NOT_FOUND);
-
-  public static final OS3Exception NO_SUCH_KEY = new OS3Exception(
-      "NoSuchKey", "The specified key does not exist", HTTP_NOT_FOUND);
-
-  public static final OS3Exception INVALID_ARGUMENT = new OS3Exception(
-      "InvalidArgument", "Invalid Argument", HTTP_BAD_REQUEST);
-
-  public static final OS3Exception INVALID_REQUEST = new OS3Exception(
-      "InvalidRequest", "Invalid Request", HTTP_BAD_REQUEST);
-
-  public static final OS3Exception INVALID_RANGE = new OS3Exception(
-      "InvalidRange", "The requested range is not satisfiable",
-      RANGE_NOT_SATISFIABLE);
-
-  public static final OS3Exception NO_SUCH_UPLOAD = new OS3Exception(
-      "NoSuchUpload", "The specified multipart upload does not exist. The " +
-      "upload ID might be invalid, or the multipart upload might have " +
-      "been aborted or completed.", HTTP_NOT_FOUND);
-
-  public static final OS3Exception INVALID_PART = new OS3Exception(
-      "InvalidPart", "One or more of the specified parts could not be found." +
-      " The part might not have been uploaded, or the specified entity " +
-      "tag might not have matched the part's entity tag.", HTTP_BAD_REQUEST);
-
-  public static final OS3Exception INVALID_PART_ORDER = new OS3Exception(
-      "InvalidPartOrder", "The list of parts was not in ascending order. The " +
-      "parts list must be specified in order by part number.",
-      HTTP_BAD_REQUEST);
-
-  public static final OS3Exception ENTITY_TOO_SMALL = new OS3Exception(
-      "EntityTooSmall", "Your proposed upload is smaller than the minimum " +
-      "allowed object size. Each part must be at least 5 MB in size, except " +
-      "the last part.", HTTP_BAD_REQUEST);
-
-
-  /**
-   * Create a new instance of Error.
-   * @param e Error Template
-   * @param resource Resource associated with this exception
-   * @return creates a new instance of error based on the template
-   */
-  public static OS3Exception newError(OS3Exception e, String resource) {
-    OS3Exception err =  new OS3Exception(e.getCode(), e.getErrorMessage(),
-        e.getHttpCode());
-    err.setResource(resource);
-    return err;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java
deleted file mode 100644
index d295ae8..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains Ozone S3 exceptions.
- */
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java
deleted file mode 100644
index 7f17c9d..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.header;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.enterprise.context.RequestScoped;
-
-/**
- * Authentication Header parser to parse HttpHeader Authentication.
- */
-@RequestScoped
-public class AuthenticationHeaderParser {
-
-  private final static Logger LOG = LoggerFactory.getLogger(
-      AuthenticationHeaderParser.class);
-
-  private String authHeader;
-  private String accessKeyID;
-
-  public void parse() throws OS3Exception {
-    if (authHeader.startsWith("AWS4")) {
-      LOG.debug("V4 Header {}", authHeader);
-      AuthorizationHeaderV4 authorizationHeader = new AuthorizationHeaderV4(
-          authHeader);
-      accessKeyID = authorizationHeader.getAccessKeyID().toLowerCase();
-    } else {
-      LOG.debug("V2 Header {}", authHeader);
-      AuthorizationHeaderV2 authorizationHeader = new AuthorizationHeaderV2(
-          authHeader);
-      accessKeyID = authorizationHeader.getAccessKeyID().toLowerCase();
-    }
-  }
-
-  public boolean doesAuthenticationInfoExists() {
-    return authHeader != null;
-  }
-
-  public String getAccessKeyID() throws OS3Exception {
-    parse();
-    return accessKeyID;
-  }
-
-  public void setAuthHeader(String header) {
-    this.authHeader = header;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java
deleted file mode 100644
index dfafc3a..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.header;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-
-import static org.apache.commons.lang3.StringUtils.isBlank;
-
-/**
- * Authorization Header v2.
- */
-public class AuthorizationHeaderV2 {
-
-  private final static String IDENTIFIER = "AWS";
-  private String authHeader;
-  private String identifier;
-  private String accessKeyID;
-  private String signature;
-
-  public AuthorizationHeaderV2(String auth) throws OS3Exception {
-    Preconditions.checkNotNull(auth);
-    this.authHeader = auth;
-    parseHeader();
-  }
-
-  /**
-   * This method parses the authorization header.
-   *
-   * Authorization header sample:
-   * AWS AKIAIOSFODNN7EXAMPLE:frJIUN8DYpKDtOLCwo//yllqDzg=
-   *
-   * @throws OS3Exception
-   */
-  @SuppressWarnings("StringSplitter")
-  public void parseHeader() throws OS3Exception {
-    String[] split = authHeader.split(" ");
-    if (split.length != 2) {
-      throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader);
-    }
-
-    identifier = split[0];
-    if (!IDENTIFIER.equals(identifier)) {
-      throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader);
-    }
-
-    String[] remainingSplit = split[1].split(":");
-
-    if (remainingSplit.length != 2) {
-      throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader);
-    }
-
-    accessKeyID = remainingSplit[0];
-    signature = remainingSplit[1];
-    if (isBlank(accessKeyID) || isBlank(signature)) {
-      throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader);
-    }
-  }
-
-  public String getAuthHeader() {
-    return authHeader;
-  }
-
-  public void setAuthHeader(String authHeader) {
-    this.authHeader = authHeader;
-  }
-
-  public String getIdentifier() {
-    return identifier;
-  }
-
-  public String getAccessKeyID() {
-    return accessKeyID;
-  }
-
-  public String getSignature() {
-    return signature;
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java
deleted file mode 100644
index 2637522..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.s3.header;
-
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.codec.DecoderException;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.time.LocalDate;
-import java.util.Collection;
-
-import static java.time.temporal.ChronoUnit.DAYS;
-import static org.apache.commons.lang3.StringUtils.isAllEmpty;
-import static org.apache.commons.lang3.StringUtils.isNoneEmpty;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER;
-import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.AWS4_SIGNING_ALGORITHM;
-import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.DATE_FORMATTER;
-
-/**
- * S3 Authorization header.
- * Ref: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using
- * -authorization-header.html
- */
-public class AuthorizationHeaderV4 {
-
-  private final static Logger LOG = LoggerFactory.getLogger(
-      AuthorizationHeaderV4.class);
-
-  private final static String CREDENTIAL = "Credential=";
-  private final static String SIGNEDHEADERS = "SignedHeaders=";
-  private final static String SIGNATURE = "Signature=";
-
-  private String authHeader;
-  private String algorithm;
-  private String credential;
-  private String signedHeadersStr;
-  private String signature;
-  private Credential credentialObj;
-  private Collection<String> signedHeaders;
-
-  /**
-   * Construct AuthorizationHeader object.
-   * @param header
-   */
-  public AuthorizationHeaderV4(String header) throws OS3Exception {
-    Preconditions.checkNotNull(header);
-    this.authHeader = header;
-    parseAuthHeader();
-  }
-
-  /**
-   * This method parses authorization header.
-   *
-   *  Authorization Header sample:
-   *  AWS4-HMAC-SHA256 Credential=AKIAJWFJK62WUTKNFJJA/20181009/us-east-1/s3
-   *  /aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date,
-   * Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2
-   * @throws OS3Exception
-   */
-  @SuppressWarnings("StringSplitter")
-  public void parseAuthHeader() throws OS3Exception {
-    int firstSep = authHeader.indexOf(' ');
-    if (firstSep < 0) {
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-
-    //split the value parts of the authorization header
-    String[] split = authHeader.substring(firstSep + 1).trim().split(", *");
-
-    if (split.length != 3) {
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-
-    algorithm = authHeader.substring(0, firstSep);
-    validateAlgorithm();
-    credential = split[0];
-    signedHeadersStr = split[1];
-    signature = split[2];
-    validateCredentials();
-    validateSignedHeaders();
-    validateSignature();
-
-  }
-
-  /**
-   * Validate Signed headers.
-   * */
-  private void validateSignedHeaders() throws OS3Exception {
-    if (isNoneEmpty(signedHeadersStr)
-        && signedHeadersStr.startsWith(SIGNEDHEADERS)) {
-      signedHeadersStr = signedHeadersStr.substring(SIGNEDHEADERS.length());
-      signedHeaders = StringUtils.getStringCollection(signedHeadersStr, ";");
-      if (signedHeaders.size() == 0) {
-        LOG.error("No signed headers found. Authheader:{}", authHeader);
-        throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-      }
-    } else {
-      LOG.error("No signed headers found. Authheader:{}", authHeader);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-  }
-
-  /**
-   * Validate signature.
-   * */
-  private void validateSignature() throws OS3Exception {
-    if (signature.startsWith(SIGNATURE)) {
-      signature = signature.substring(SIGNATURE.length());
-      if (!isNoneEmpty(signature)) {
-        LOG.error("Signature can't be empty.", signature);
-        throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-      }
-      try {
-        Hex.decodeHex(signature);
-      } catch (DecoderException e) {
-        LOG.error("Signature:{} should be in hexa-decimal encoding.",
-            signature);
-        throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-      }
-    } else {
-      LOG.error("Signature can't be empty.", signature);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-  }
-
-  /**
-   * Validate credentials.
-   * */
-  private void validateCredentials() throws OS3Exception {
-    if (isNoneEmpty(credential) && credential.startsWith(CREDENTIAL)) {
-      credential = credential.substring(CREDENTIAL.length());
-      // Parse credential. Other parts of header are not validated yet. When
-      // security comes, it needs to be completed.
-      credentialObj = new Credential(credential);
-    } else {
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-
-    if (credentialObj.getAccessKeyID().isEmpty()) {
-      LOG.error("AWS access id shouldn't be empty. credential:{}", credential);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-    if (credentialObj.getAwsRegion().isEmpty()) {
-      LOG.error("AWS region shouldn't be empty. credential:{}", credential);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-    if (credentialObj.getAwsRequest().isEmpty()) {
-      LOG.error("AWS request shouldn't be empty. credential:{}", credential);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-    if (credentialObj.getAwsService().isEmpty()) {
-      LOG.error("AWS service:{} shouldn't be empty. credential:{}",
-          credential);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-
-    // Date should not be empty and within valid range.
-    if (!credentialObj.getDate().isEmpty()) {
-      LocalDate date = LocalDate.parse(credentialObj.getDate(), DATE_FORMATTER);
-      LocalDate now = LocalDate.now();
-      if (date.isBefore(now.minus(1, DAYS)) ||
-          date.isAfter(now.plus(1, DAYS))) {
-        LOG.error("AWS date not in valid range. Date:{} should not be older " +
-                "than 1 day(i.e yesterday) and greater than 1 day(i.e " +
-                "tomorrow).",
-            getDate());
-        throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-      }
-    } else {
-      LOG.error("AWS date shouldn't be empty. credential:{}", credential);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-  }
-
-  /**
-   * Validate if algorithm is in expected format.
-   * */
-  private void validateAlgorithm() throws OS3Exception {
-    if (isAllEmpty(algorithm) || !algorithm.equals(AWS4_SIGNING_ALGORITHM)) {
-      LOG.error("Unexpected hash algorithm. Algo:{}", algorithm);
-      throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader);
-    }
-  }
-
-  public String getAuthHeader() {
-    return authHeader;
-  }
-
-  public String getAlgorithm() {
-    return algorithm;
-  }
-
-  public String getCredential() {
-    return credential;
-  }
-
-  public String getSignedHeaderString() {
-    return signedHeadersStr;
-  }
-
-  public String getSignature() {
-    return signature;
-  }
-
-  public String getAccessKeyID() {
-    return credentialObj.getAccessKeyID();
-  }
-
-  public String getDate() {
-    return credentialObj.getDate();
-  }
-
-  public String getAwsRegion() {
-    return credentialObj.getAwsRegion();
-  }
-
-  public String getAwsService() {
-    return credentialObj.getAwsService();
-  }
-
-  public String getAwsRequest() {
-    return credentialObj.getAwsRequest();
-  }
-
-  public Collection<String> getSignedHeaders() {
-    return signedHeaders;
-  }
-
-  public Credential getCredentialObj() {
-    return credentialObj;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java
deleted file mode 100644
index 883980a..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.header;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Credential in the AWS authorization header.
- * Ref: https://docs.aws.amazon.com/AmazonS3/latest/API/
- * sigv4-auth-using-authorization-header.html
- *
- */
-public class Credential {
-  private static final Logger LOG = LoggerFactory.getLogger(Credential.class);
-
-  private String accessKeyID;
-  private String date;
-  private String awsRegion;
-  private String awsService;
-  private String awsRequest;
-  private String credential;
-
-  /**
-   * Construct Credential Object.
-   * @param cred
-   */
-  Credential(String cred) throws OS3Exception {
-    this.credential = cred;
-    parseCredential();
-  }
-
-  /**
-   * Parse credential value.
-   *
-   * Sample credential value:
-   * Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request
-   *
-   * @throws OS3Exception
-   */
-  @SuppressWarnings("StringSplitter")
-  public void parseCredential() throws OS3Exception {
-    String[] split = credential.split("/");
-    switch (split.length) {
-    case 5:
-      // Ex: dkjad922329ddnks/20190321/us-west-1/s3/aws4_request
-      accessKeyID = split[0].trim();
-      date = split[1].trim();
-      awsRegion = split[2].trim();
-      awsService = split[3].trim();
-      awsRequest = split[4].trim();
-      return;
-    case 6:
-      // Access id is kerberos principal.
-      // Ex: testuser/om@EXAMPLE.COM/20190321/us-west-1/s3/aws4_request
-      accessKeyID = split[0] + "/" +split[1];
-      date = split[2].trim();
-      awsRegion = split[3].trim();
-      awsService = split[4].trim();
-      awsRequest = split[5].trim();
-      return;
-    default:
-      LOG.error("Credentials not in expected format. credential:{}",
-          credential);
-      throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, credential);
-    }
-  }
-
-  public String getAccessKeyID() {
-    return accessKeyID;
-  }
-
-  public String getDate() {
-    return date;
-  }
-
-  public String getAwsRegion() {
-    return awsRegion;
-  }
-
-  public String getAwsService() {
-    return awsService;
-  }
-
-  public String getAwsRequest() {
-    return awsRequest;
-  }
-
-  public String getCredential() {
-    return credential;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java
deleted file mode 100644
index 40bc78bd..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains Ozone S3 Authorization header.
- */
-package org.apache.hadoop.ozone.s3.header;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java
deleted file mode 100644
index 9efcc87..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.io;
-
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.ozone.client.io.KeyInputStream;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * S3Wrapper Input Stream which encapsulates KeyInputStream from ozone.
- */
-public class S3WrapperInputStream extends FSInputStream {
-  private final KeyInputStream inputStream;
-
-  /**
-   * Constructs S3WrapperInputStream with KeyInputStream.
-   *
-   * @param inputStream
-   */
-  public S3WrapperInputStream(InputStream inputStream) {
-    this.inputStream = (KeyInputStream) inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return inputStream.read();
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    return inputStream.read(b, off, len);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    inputStream.close();
-  }
-
-  @Override
-  public int available() throws IOException {
-    return inputStream.available();
-  }
-
-  public InputStream getInputStream() {
-    return inputStream;
-  }
-
-  @Override
-  public void seek(long pos) throws IOException {
-    inputStream.seek(pos);
-  }
-  @Override
-  public long getPos() throws IOException {
-    return inputStream.getPos();
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java
deleted file mode 100644
index 5167e60..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains Ozone S3 wrapper stream related classes.
- */
-
-package org.apache.hadoop.ozone.s3.io;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java
deleted file mode 100644
index 9d41551..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains the top level generic classes of s3 gateway.
- */
-package org.apache.hadoop.ozone.s3;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
deleted file mode 100644
index 92ae6d4..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.StandardCharsets;
-import java.util.Objects;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.codec.DecoderException;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.codec.digest.DigestUtils;
-
-/**
- * Token which holds enough information to continue the key iteration.
- */
-public class ContinueToken {
-
-  private String lastKey;
-
-  private String lastDir;
-
-  private static final String CONTINUE_TOKEN_SEPERATOR = "-";
-
-  public ContinueToken(String lastKey, String lastDir) {
-    Preconditions.checkNotNull(lastKey,
-        "The last key can't be null in the continue token.");
-    this.lastKey = lastKey;
-    if (lastDir != null && lastDir.length() > 0) {
-      this.lastDir = lastDir;
-    }
-  }
-
-  /**
-   * Generate a continuation token which is used in get Bucket.
-   *
-   * @return if key is not null return continuation token, else returns null.
-   */
-  public String encodeToString() {
-    if (this.lastKey != null) {
-
-      ByteBuffer buffer = ByteBuffer
-          .allocate(4 + lastKey.length()
-              + (lastDir == null ? 0 : lastDir.length()));
-      buffer.putInt(lastKey.length());
-      buffer.put(lastKey.getBytes(StandardCharsets.UTF_8));
-      if (lastDir != null) {
-        buffer.put(lastDir.getBytes(StandardCharsets.UTF_8));
-      }
-
-      String hex = Hex.encodeHexString(buffer.array());
-      String digest = DigestUtils.sha256Hex(hex);
-      return hex + CONTINUE_TOKEN_SEPERATOR + digest;
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Decode a continuation token which is used in get Bucket.
-   *
-   * @param key
-   * @return if key is not null return decoded token, otherwise returns null.
-   * @throws OS3Exception
-   */
-  public static ContinueToken decodeFromString(String key) throws OS3Exception {
-    if (key != null) {
-      int indexSeparator = key.indexOf(CONTINUE_TOKEN_SEPERATOR);
-      if (indexSeparator == -1) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, key);
-      }
-      String hex = key.substring(0, indexSeparator);
-      String digest = key.substring(indexSeparator + 1);
-      try {
-        checkHash(key, hex, digest);
-
-        ByteBuffer buffer = ByteBuffer.wrap(Hex.decodeHex(hex));
-        int keySize = buffer.getInt();
-
-        byte[] actualKeyBytes = new byte[keySize];
-        buffer.get(actualKeyBytes);
-
-        byte[] actualDirBytes = new byte[buffer.remaining()];
-        buffer.get(actualDirBytes);
-
-        return new ContinueToken(
-            new String(actualKeyBytes, StandardCharsets.UTF_8),
-            new String(actualDirBytes, StandardCharsets.UTF_8)
-        );
-
-      } catch (DecoderException ex) {
-        OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
-            .INVALID_ARGUMENT, key);
-        os3Exception.setErrorMessage("The continuation token provided is " +
-            "incorrect");
-        throw os3Exception;
-      }
-    } else {
-      return null;
-    }
-  }
-
-  private static void checkHash(String key, String hex, String digest)
-      throws OS3Exception {
-    String digestActualKey = DigestUtils.sha256Hex(hex);
-    if (!digest.equals(digestActualKey)) {
-      OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-          .INVALID_ARGUMENT, key);
-      ex.setErrorMessage("The continuation token provided is incorrect");
-      throw ex;
-    }
-  }
-
-  public String getLastKey() {
-    return lastKey;
-  }
-
-  public void setLastKey(String lastKey) {
-    this.lastKey = lastKey;
-  }
-
-  public String getLastDir() {
-    return lastDir;
-  }
-
-  public void setLastDir(String lastDir) {
-    this.lastDir = lastDir;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ContinueToken that = (ContinueToken) o;
-    return lastKey.equals(that.lastKey) &&
-        Objects.equals(lastDir, that.lastDir);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(lastKey);
-  }
-
-  @Override
-  public String toString() {
-    return "ContinueToken{" +
-        "lastKey='" + lastKey + '\'' +
-        ", lastDir='" + lastDir + '\'' +
-        '}';
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
deleted file mode 100644
index ce7d4f2..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.security.SecurityUtil;
-
-import javax.annotation.Nonnull;
-import java.util.Collection;
-import java.util.Objects;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-
-/**
- * Ozone util for S3 related operations.
- */
-public final class OzoneS3Util {
-
-  private OzoneS3Util() {
-  }
-
-  public static String getVolumeName(String userName) {
-    Objects.requireNonNull(userName);
-    return DigestUtils.md5Hex(userName);
-  }
-
-  /**
-   * Generate service Name for token.
-   * @param configuration
-   * @param serviceId - ozone manager service ID
-   * @param omNodeIds - list of node ids for the given OM service.
-   * @return service Name.
-   */
-  public static String buildServiceNameForToken(
-      @Nonnull OzoneConfiguration configuration, @Nonnull String serviceId,
-      @Nonnull Collection<String> omNodeIds) {
-    StringBuilder rpcAddress = new StringBuilder();
-
-    int nodesLength = omNodeIds.size();
-    int counter = 0;
-    for (String nodeId : omNodeIds) {
-      counter++;
-      String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-          serviceId, nodeId);
-      String rpcAddrStr = OmUtils.getOmRpcAddress(configuration, rpcAddrKey);
-      if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
-        throw new IllegalArgumentException("Could not find rpcAddress for " +
-            OZONE_OM_ADDRESS_KEY + "." + serviceId + "." + nodeId);
-      }
-
-      if (counter != nodesLength) {
-        rpcAddress.append(SecurityUtil.buildTokenService(
-            NetUtils.createSocketAddr(rpcAddrStr)) + ",");
-      } else {
-        rpcAddress.append(SecurityUtil.buildTokenService(
-            NetUtils.createSocketAddr(rpcAddrStr)));
-      }
-    }
-    return rpcAddress.toString();
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java
deleted file mode 100644
index 15a09b4..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import java.time.format.DateTimeFormatter;
-import java.time.format.DateTimeFormatterBuilder;
-import java.time.format.SignStyle;
-import java.util.HashMap;
-import java.util.Map;
-
-import static java.time.temporal.ChronoField.DAY_OF_MONTH;
-import static java.time.temporal.ChronoField.DAY_OF_WEEK;
-import static java.time.temporal.ChronoField.HOUR_OF_DAY;
-import static java.time.temporal.ChronoField.MINUTE_OF_HOUR;
-import static java.time.temporal.ChronoField.MONTH_OF_YEAR;
-import static java.time.temporal.ChronoField.SECOND_OF_MINUTE;
-import static java.time.temporal.ChronoField.YEAR;
-
-/**
- * Stricter RFC1123 data format.
- * <p>
- * This format always use two digits for the days to make it compatible with
- * golang clients.
- */
-public final class RFC1123Util {
-
-  private RFC1123Util() {
-  }
-
-  /**
-   * An RFC-1123 compatible file format which always use two digits for the
-   * days.
-   */
-  public static final DateTimeFormatter FORMAT;
-
-  static {
-    Map<Long, String> dow = new HashMap<>();
-    dow.put(1L, "Mon");
-    dow.put(2L, "Tue");
-    dow.put(3L, "Wed");
-    dow.put(4L, "Thu");
-    dow.put(5L, "Fri");
-    dow.put(6L, "Sat");
-    dow.put(7L, "Sun");
-    Map<Long, String> moy = new HashMap<>();
-    moy.put(1L, "Jan");
-    moy.put(2L, "Feb");
-    moy.put(3L, "Mar");
-    moy.put(4L, "Apr");
-    moy.put(5L, "May");
-    moy.put(6L, "Jun");
-    moy.put(7L, "Jul");
-    moy.put(8L, "Aug");
-    moy.put(9L, "Sep");
-    moy.put(10L, "Oct");
-    moy.put(11L, "Nov");
-    moy.put(12L, "Dec");
-    FORMAT = new DateTimeFormatterBuilder()
-        .parseCaseInsensitive()
-        .parseLenient()
-        .optionalStart()
-        .appendText(DAY_OF_WEEK, dow)
-        .appendLiteral(", ")
-        .optionalEnd()
-        .appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE)
-        .appendLiteral(' ')
-        .appendText(MONTH_OF_YEAR, moy)
-        .appendLiteral(' ')
-        .appendValue(YEAR, 4)
-        .appendLiteral(' ')
-        .appendValue(HOUR_OF_DAY, 2)
-        .appendLiteral(':')
-        .appendValue(MINUTE_OF_HOUR, 2)
-        .optionalStart()
-        .appendLiteral(':')
-        .appendValue(SECOND_OF_MINUTE, 2)
-        .optionalEnd()
-        .appendLiteral(' ')
-        .appendOffset("+HHMM", "GMT")
-        .toFormatter();
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java
deleted file mode 100644
index 5f5c827..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.util;
-
-/**
- * Ranger Header class which hold startoffset, endoffset of the Range header
- * value provided as part of get object.
- *
- */
-public class RangeHeader {
-  private long startOffset;
-  private long endOffset;
-  private boolean readFull;
-  private boolean inValidRange;
-
-
-  /**
-   * Construct RangeHeader object.
-   * @param startOffset
-   * @param endOffset
-   */
-  public RangeHeader(long startOffset, long endOffset, boolean full,
-                     boolean invalid) {
-    this.startOffset = startOffset;
-    this.endOffset = endOffset;
-    this.readFull = full;
-    this.inValidRange = invalid;
-  }
-
-  /**
-   * Return startOffset.
-   *
-   * @return startOffset
-   */
-  public long getStartOffset() {
-    return startOffset;
-  }
-
-  /**
-   * Return endoffset.
-   *
-   * @return endoffset
-   */
-  public long getEndOffset() {
-    return endOffset;
-  }
-
-  /**
-   * Return a flag whether after parsing range header, when the provided
-   * values are with in a range, and whole file read is required.
-   *
-   * @return readFull
-   */
-  public boolean isReadFull() {
-    return readFull;
-  }
-
-  /**
-   * Return a flag, whether range header values are correct or not.
-   *
-   * @return isInValidRange
-   */
-  public boolean isInValidRange() {
-    return inValidRange;
-  }
-
-
-  public String toString() {
-    return "startOffset - [" + startOffset + "]" + "endOffset - [" +
-        endOffset + "]" + " readFull - [ " + readFull + "]" + " invalidRange " +
-        "- [ " + inValidRange + "]";
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
deleted file mode 100644
index b1b61cc..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import java.util.regex.Matcher;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_MATCH_PATTERN;
-/**
- * Utility class for S3.
- */
-@InterfaceAudience.Private
-public final class RangeHeaderParserUtil {
-
-  private RangeHeaderParserUtil() {
-  }
-
-  /**
-   * Parse the rangeHeader and set the start and end offset.
-   * @param rangeHeaderVal
-   * @param length
-   *
-   * @return RangeHeader
-   */
-  public static RangeHeader parseRangeHeader(String rangeHeaderVal, long
-      length) {
-    long start = 0;
-    long end = 0;
-    boolean noStart = false;
-    boolean readFull = false;
-    boolean inValidRange = false;
-    RangeHeader rangeHeader;
-    Matcher matcher = RANGE_HEADER_MATCH_PATTERN.matcher(rangeHeaderVal);
-    if (matcher.matches()) {
-      if (!matcher.group("start").equals("")) {
-        start = Integer.parseInt(matcher.group("start"));
-      } else {
-        noStart = true;
-      }
-      if (!matcher.group("end").equals("")) {
-        end = Integer.parseInt(matcher.group("end"));
-      } else {
-        end = length - 1;
-      }
-      if (noStart) {
-        if (end < length) {
-          start = length - end;
-        } else {
-          start = 0;
-        }
-        end = length - 1;
-      } else {
-        if (start >= length)  {
-          readFull = true;
-          if (end >= length) {
-            inValidRange = true;
-          } else {
-            start = 0;
-            end = length - 1;
-          }
-        } else {
-          if (end >= length) {
-            end = length - 1;
-          }
-        }
-      }
-    } else {
-      // Byte specification is not matching or start and endoffset provided
-      // are not matching with regex.
-      start = 0;
-      end = length - 1;
-      readFull = true;
-    }
-    rangeHeader = new RangeHeader(start, end, readFull, inValidRange);
-    return rangeHeader;
-
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
deleted file mode 100644
index 9516823..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.util.regex.Pattern;
-
-/**
- * Set of constants used for S3 implementation.
- */
-@InterfaceAudience.Private
-public final class S3Consts {
-
-  //Never Constructed
-  private S3Consts() {
-
-  }
-
-  public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
-  public static final String COPY_SOURCE_HEADER_RANGE =
-      "x-amz-copy-source-range";
-  public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class";
-  public static final String ENCODING_TYPE = "url";
-
-  // Constants related to Range Header
-  public static final String RANGE_HEADER_SUPPORTED_UNIT = "bytes";
-  public static final String RANGE_HEADER = "Range";
-  public static final String ACCEPT_RANGE_HEADER = "Accept-Ranges";
-  public static final String CONTENT_RANGE_HEADER = "Content-Range";
-
-
-  public static final Pattern RANGE_HEADER_MATCH_PATTERN =
-      Pattern.compile("bytes=(?<start>[0-9]*)-(?<end>[0-9]*)");
-
-  //Error code 416 is Range Not Satisfiable
-  public static final int RANGE_NOT_SATISFIABLE = 416;
-
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
deleted file mode 100644
index 7c0773b..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.util;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-/**
- * Maps S3 storage class values to Ozone replication values.
- */
-
-public enum S3StorageType {
-
-  REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE),
-  STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE);
-
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
-
-  S3StorageType(
-      ReplicationType type,
-      ReplicationFactor factor) {
-    this.type = type;
-    this.factor = factor;
-  }
-
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public ReplicationType getType() {
-    return type;
-  }
-
-  public static S3StorageType getDefault() {
-    return STANDARD;
-  }
-
-  public static S3StorageType fromReplicationType(
-      ReplicationType replicationType, ReplicationFactor factor) {
-    if ((replicationType == ReplicationType.STAND_ALONE) ||
-        (factor == ReplicationFactor.ONE)) {
-      return S3StorageType.REDUCED_REDUNDANCY;
-    } else {
-      return S3StorageType.STANDARD;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
deleted file mode 100644
index af93f08..0000000
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains Ozone S3 Util classes.
- */
-package org.apache.hadoop.ozone.s3.util;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml b/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml
deleted file mode 100644
index cf00d29..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<beans xmlns="http://java.sun.com/xml/ns/javaee"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-      http://java.sun.com/xml/ns/javaee
-      http://java.sun.com/xml/ns/javaee/beans_1_0.xsd">
-</beans>
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html b/hadoop-ozone/s3gateway/src/main/resources/browser.html
deleted file mode 100644
index 0405b17e..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/browser.html
+++ /dev/null
@@ -1,617 +0,0 @@
-<!DOCTYPE html>
-
-<!--
-Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License").
-
-You may not use this file except in compliance with the License. A copy
-of the License is located at
-
-https://aws.amazon.com/apache2.0/
-
-or in the "license" file accompanying this file. This file is distributed
-on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-either express or implied. See the License for the specific language governing
-permissions and limitations under the License.
--->
-
-<html lang="en">
-
-<head>
-    <title>Ozone S3 Explorer</title>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1.0">
-    <link rel="shortcut icon" href="static/images/ozone.ico">
-    <link rel="stylesheet"
-          href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
-    <link rel="stylesheet"
-          href="https://use.fontawesome.com/releases/v5.2.0/css/all.css">
-    <link rel="stylesheet"
-          href="https://cdn.datatables.net/plug-ins/f2c75b7247b/integration/bootstrap/3/dataTables.bootstrap.css">
-    <style type="text/css">
-        #wrapper {
-            padding-left: 0;
-        }
-
-        #page-wrapper {
-            width: 100%;
-            padding: 5px 15px;
-        }
-
-        #tb-s3objects {
-            width: 100% !Important;
-        }
-
-        body {
-            font: 14px "Lucida Grande", Helvetica, Arial, sans-serif;
-        }
-
-        td {
-            font: 12px "Lucida Grande", Helvetica, Arial, sans-serif;
-        }
-    </style>
-</head>
-
-<!-- DEBUG: Enable this for red outline on all elements -->
-<!-- <style media="screen" type="text/css"> * { outline: 1px red solid; } </style> -->
-
-<body>
-<div id="page-wrapper">
-    <div class="row">
-        <div class="col-lg-12">
-            <div class="panel panel-primary">
-
-                <!-- Panel including bucket/folder information and controls -->
-                <div class="panel-heading clearfix">
-                    <!-- Bucket selection and breadcrumbs -->
-                    <div class="btn-group pull-left">
-                        <div class="pull-left">
-                            Ozone S3 Explorer&nbsp;
-                        </div>
-                        <!-- Bucket breadcrumbs -->
-                        <div class="btn pull-right">
-                            <ul id="breadcrumb"
-                                class="btn breadcrumb pull-right">
-                                <li class="active dropdown">
-                                    <a href="#">&lt;bucket&gt;</a>
-                                </li>
-                            </ul>
-                        </div>
-                    </div>
-                    <!-- Folder/Bucket radio group and progress spinner -->
-                    <div class="btn-group pull-right">
-                        <div class="checkbox pull-left">
-                            <label>
-                                <input type="checkbox" id="hidefolders">&nbsp;Hide
-                                folders?
-                            </label>
-                            <!-- Folder/Bucket radio group -->
-                            <div class="btn-group" data-toggle="buttons">
-                                <label class="btn btn-primary active"
-                                       title="View all objects in folder">
-                                    <i class="fa fa-angle-double-up"></i>
-                                    <input type="radio" name="optionsdepth"
-                                           value="folder" id="optionfolder"
-                                           checked>&nbsp;Folder
-                                </label>
-                                <label class="btn btn-primary"
-                                       title="View all objects in bucket">
-                                    <i class="fa fa-angle-double-down"></i>
-                                    <input type="radio" name="optionsdepth"
-                                           value="bucket" id="optionbucket">&nbsp;Bucket
-                                </label>
-                            </div>
-                        </div>
-                        <!-- Dual purpose: progress spinner and refresh button -->
-                        <div class="btn-group pull-right" id="refresh">
-                            <span id="bucket-loader" style="cursor: pointer;"
-                                  class="btn fa fa-refresh fa-2x pull-left"
-                                  title="Refresh"></span>
-                            <span id="badgecount"
-                                  class="badge pull-right">42</span>
-                        </div>
-                    </div>
-                </div>
-
-                <!-- Panel including S3 object table -->
-                <div class="panel-body">
-                    <table class="table table-bordered table-hover table-striped"
-                           id="tb-s3objects">
-                        <thead>
-                        <tr>
-                            <th>Object</th>
-                            <th>Folder</th>
-                            <th>Last Modified</th>
-                            <th>Timestamp</th>
-                            <th>Size</th>
-                        </tr>
-                        </thead>
-                        <tbody id="tbody-s3objects"></tbody>
-                    </table>
-                </div>
-            </div>
-        </div>
-    </div>
-</div>
-</body>
-
-</html>
-
-<script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
-<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/bootbox.js/4.4.0/bootbox.min.js"></script>
-<script src="https://sdk.amazonaws.com/js/aws-sdk-2.207.0.min.js"></script>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.0/moment.min.js"></script>
-<script src="https://cdn.datatables.net/1.10.5/js/jquery.dataTables.min.js"></script>
-<script src="https://cdn.datatables.net/plug-ins/f2c75b7247b/integration/bootstrap/3/dataTables.bootstrap.js"></script>
-
-<script type="text/javascript">
-    var bucket;
-    var endpoint = document.location.protocol + '//' + document.location.host
-    if (document.location.pathname.length > 0) {
-        bucket = document.location.pathname.substring(1);
-        endpoint += document.location.pathname;
-    } else {
-        bucket = document.location.host.split(".")[0];
-    }
-    var s3exp_config = {
-        Region: '',
-        Bucket: bucket,
-        Prefix: '',
-        Delimiter: '/'
-    };
-    var s3exp_lister = null;
-    var s3exp_columns = {
-        key: 1,
-        folder: 2,
-        date: 3,
-        size: 4
-    };
-
-
-    // Initialize S3 SDK and the moment library (for time formatting utilities)
-    var s3 = new AWS.S3({endpoint: new AWS.Endpoint(endpoint)})
-    s3.config.s3BucketEndpoint = true;
-    moment().format();
-
-    function bytesToSize(bytes) {
-        var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'];
-        if (bytes === 0) return '0 Bytes';
-        var ii = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)));
-        return Math.round(bytes / Math.pow(1024, ii), 2) + ' ' + sizes[ii];
-    }
-
-    // Custom startsWith function for String prototype
-    if (typeof String.prototype.startsWith != 'function') {
-        String.prototype.startsWith = function (str) {
-            return this.indexOf(str) == 0;
-        };
-    }
-
-    // Custom endsWith function for String prototype
-    if (typeof String.prototype.endsWith != 'function') {
-        String.prototype.endsWith = function (str) {
-            return this.slice(-str.length) == str;
-        };
-    }
-
-    function object2hrefvirt(bucket, key) {
-        var enckey = key.split('/').map(function (x) {
-            return encodeURIComponent(x);
-        }).join('/');
-
-
-        return endpoint + "/" + enckey;
-
-    }
-
-    function object2hrefpath(bucket, key) {
-        var enckey = key.split('/').map(function (x) {
-            return encodeURIComponent(x);
-        }).join('/');
-
-
-        return endpoint + "/" + enckey;
-
-    }
-
-    function isthisdocument(bucket, key) {
-        return key === "index.html";
-    }
-
-    function isfolder(path) {
-        return path.endsWith('/');
-    }
-
-    // Convert cars/vw/golf.png to golf.png
-    function fullpath2filename(path) {
-        return path.replace(/^.*[\\\/]/, '');
-    }
-
-    // Convert cars/vw/golf.png to cars/vw
-    function fullpath2pathname(path) {
-        return path.substring(0, path.lastIndexOf('/'));
-    }
-
-    // Convert cars/vw/ to vw/
-    function prefix2folder(prefix) {
-        var parts = prefix.split('/');
-        return parts[parts.length - 2] + '/';
-    }
-
-    // Remove hash from document URL
-    function removeHash() {
-        history.pushState("", document.title, window.location.pathname + window.location.search);
-    }
-
-    // We are going to generate bucket/folder breadcrumbs. The resulting HTML will
-    // look something like this:
-    //
-    // <li>Home</li>
-    // <li>Library</li>
-    // <li class="active">Samples</li>
-    //
-    // Note: this code is a little complex right now so it would be good to find
-    // a simpler way to create the breadcrumbs.
-    function folder2breadcrumbs(data) {
-        console.log('Bucket: ' + data.params.Bucket);
-        console.log('Prefix: ' + data.params.Prefix);
-
-        if (data.params.Prefix && data.params.Prefix.length > 0) {
-            console.log('Set hash: ' + data.params.Prefix);
-            window.location.hash = data.params.Prefix;
-        } else {
-            console.log('Remove hash');
-            removeHash();
-        }
-
-        // The parts array will contain the bucket name followed by all the
-        // segments of the prefix, exploded out as separate strings.
-        var parts = [data.params.Bucket];
-
-        if (data.params.Prefix) {
-            parts.push.apply(parts,
-                data.params.Prefix.endsWith('/') ?
-                    data.params.Prefix.slice(0, -1).split('/') :
-                    data.params.Prefix.split('/'));
-        }
-
-        console.log('Parts: ' + parts + ' (length=' + parts.length + ')');
-
-        // Empty the current breadcrumb list
-        $('#breadcrumb li').remove();
-
-        // Now build the new breadcrumb list
-        var buildprefix = '';
-        $.each(parts, function (ii, part) {
-            var ipart;
-
-            // Add the bucket (the bucket is always first)
-            if (ii === 0) {
-                var a1 = $('<a>').attr('href', '#').text(part);
-                ipart = $('<li>').append(a1);
-                a1.click(function (e) {
-                    e.preventDefault();
-                    console.log('Breadcrumb click bucket: ' + data.params.Bucket);
-                    s3exp_config = {
-                        Bucket: data.params.Bucket,
-                        Prefix: '',
-                        Delimiter: data.params.Delimiter
-                    };
-                    (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-                });
-                // Else add the folders within the bucket
-            } else {
-                buildprefix += part + '/';
-
-                if (ii == parts.length - 1) {
-                    ipart = $('<li>').addClass('active').text(part);
-                } else {
-                    var a2 = $('<a>').attr('href', '#').append(part);
-                    ipart = $('<li>').append(a2);
-
-                    // Closure needed to enclose the saved S3 prefix
-                    (function () {
-                        var saveprefix = buildprefix;
-                        // console.log('Part: ' + part + ' has buildprefix: ' + saveprefix);
-                        a2.click(function (e) {
-                            e.preventDefault();
-                            console.log('Breadcrumb click object prefix: ' + saveprefix);
-                            s3exp_config = {
-                                Bucket: data.params.Bucket,
-                                Prefix: saveprefix,
-                                Delimiter: data.params.Delimiter
-                            };
-                            (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-                        });
-                    })();
-                }
-            }
-            $('#breadcrumb').append(ipart);
-        });
-    }
-
-    function s3draw(data, complete) {
-        $('li.li-bucket').remove();
-        folder2breadcrumbs(data);
-
-        // Add each part of current path (S3 bucket plus folder hierarchy) into the breadcrumbs
-        $.each(data.CommonPrefixes, function (i, prefix) {
-            $('#tb-s3objects').DataTable().rows.add([{
-                Key: prefix.Prefix
-            }]);
-        });
-
-        // Add S3 objects to DataTable
-        $('#tb-s3objects').DataTable().rows.add(data.Contents).draw();
-    }
-
-    function s3list(config, completecb) {
-        console.log('s3list config: ' + JSON.stringify(config));
-        var params = {
-            Bucket: config.Bucket,
-            Prefix: config.Prefix,
-            Delimiter: config.Delimiter
-        };
-        var scope = {
-            Contents: [],
-            CommonPrefixes: [],
-            params: params,
-            stop: false,
-            completecb: completecb
-        };
-
-        return {
-            // This is the callback that the S3 API makes when an S3 listObjectsV2
-            // request completes (successfully or in error). Note that a single call
-            // to listObjectsV2 may not be enough to get all objects so we need to
-            // check if the returned data is truncated and, if so, make additional
-            // requests with a 'next marker' until we have all the objects.
-            cb: function (err, data) {
-                if (err) {
-                    console.log('Error: ' + JSON.stringify(err));
-                    console.log('Error: ' + err.stack);
-                    scope.stop = true;
-                    $('#bucket-loader').removeClass('fa-spin');
-                    bootbox.alert("Error accessing S3 bucket " + scope.params.Bucket + ". Error: " + err);
-                } else {
-                    // console.log('Data: ' + JSON.stringify(data));
-                    console.log("Options: " + $("input[name='optionsdepth']:checked").val());
-
-                    // Store marker before filtering data
-                    if (data.IsTruncated) {
-                        if (data.NextContinuationToken) {
-                            scope.params.ContinuationToken = data.NextContinuationToken;
-                        }
-                    }
-
-                    // Filter the folders out of the listed S3 objects
-                    // (could probably be done more efficiently)
-                    console.log("Filter: remove folders");
-                    data.Contents = data.Contents.filter(function (el) {
-                        return el.Key !== scope.params.Prefix;
-                    });
-
-                    // Accumulate the S3 objects and common prefixes
-                    scope.Contents.push.apply(scope.Contents, data.Contents);
-                    scope.CommonPrefixes.push.apply(scope.CommonPrefixes, data.CommonPrefixes);
-
-                    // Update badge count to show number of objects read
-                    $('#badgecount').text(scope.Contents.length + scope.CommonPrefixes.length);
-
-                    if (scope.stop) {
-                        console.log('Bucket ' + scope.params.Bucket + ' stopped');
-                    } else if (data.IsTruncated) {
-                        console.log('Bucket ' + scope.params.Bucket + ' truncated');
-                        s3.makeUnauthenticatedRequest('listObjectsV2', scope.params, scope.cb);
-                    } else {
-                        console.log('Bucket ' + scope.params.Bucket + ' has ' + scope.Contents.length + ' objects, including ' + scope.CommonPrefixes.length + ' prefixes');
-                        delete scope.params.ContinuationToken;
-                        if (scope.completecb) {
-                            scope.completecb(scope, true);
-                        }
-                        $('#bucket-loader').removeClass('fa-spin');
-                    }
-                }
-            },
-
-            // Start the spinner, clear the table, make an S3 listObjectsV2 request
-            go: function () {
-                scope.cb = this.cb;
-                $('#bucket-loader').addClass('fa-spin');
-                $('#tb-s3objects').DataTable().clear();
-                s3.makeUnauthenticatedRequest('listObjectsV2', scope.params, this.cb);
-            },
-
-            stop: function () {
-                scope.stop = true;
-                delete scope.params.ContinuationToken;
-                if (scope.completecb) {
-                    scope.completecb(scope, false);
-                }
-                $('#bucket-loader').removeClass('fa-spin');
-            }
-        };
-    }
-
-    function promptForBucketInput() {
-        bootbox.prompt("Please enter the S3 bucket name", function (result) {
-            if (result !== null) {
-                resetDepth();
-                s3exp_config = {
-                    Bucket: result,
-                    Delimiter: '/'
-                };
-                (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-            }
-        });
-    }
-
-    function resetDepth() {
-        $('#tb-s3objects').DataTable().column(1).visible(false);
-        $('input[name="optionsdepth"]').val(['folder']);
-        $('input[name="optionsdepth"][value="bucket"]').parent().removeClass('active');
-        $('input[name="optionsdepth"][value="folder"]').parent().addClass('active');
-    }
-
-    $(document).ready(function () {
-        console.log('ready');
-
-        // Click handler for refresh button (to invoke manual refresh)
-        $('#bucket-loader').click(function (e) {
-            if ($('#bucket-loader').hasClass('fa-spin')) {
-                // To do: We need to stop the S3 list that's going on
-                // bootbox.alert("Stop is not yet supported.");
-                s3exp_lister.stop();
-            } else {
-                delete s3exp_config.ContinuationToken;
-                (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-            }
-        });
-
-        // Click handler for bucket button (to allow user to change bucket)
-        $('#bucket-chooser').click(function (e) {
-            promptForBucketInput();
-        });
-
-        $('#hidefolders').click(function (e) {
-            $('#tb-s3objects').DataTable().draw();
-        });
-
-        // Folder/Bucket radio button handler
-        $("input:radio[name='optionsdepth']").change(function () {
-            console.log("Folder/Bucket option change to " + $(this).val());
-            console.log("Change options: " + $("input[name='optionsdepth']:checked").val());
-
-            // If user selected deep then we do need to do a full list
-            if ($(this).val() == 'bucket') {
-                console.log("Switch to bucket");
-                var choice = $(this).val();
-                $('#tb-s3objects').DataTable().column(1).visible(choice === 'bucket');
-                delete s3exp_config.ContinuationToken;
-                delete s3exp_config.Prefix;
-                s3exp_config.Delimiter = '';
-                (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-                // Else user selected folder then can do a delimiter list
-            } else {
-                console.log("Switch to folder");
-                $('#tb-s3objects').DataTable().column(1).visible(false);
-                delete s3exp_config.ContinuationToken;
-                delete s3exp_config.Prefix;
-                s3exp_config.Delimiter = '/';
-                (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-            }
-        });
-
-        function renderObject(data, type, full) {
-            if (isthisdocument(s3exp_config.Bucket, data)) {
-                console.log("is this document: " + data);
-                return fullpath2filename(data);
-            } else if (isfolder(data)) {
-                console.log("is folder: " + data);
-                return '<a data-s3="folder" data-prefix="' + data + '" href="' + object2hrefvirt(s3exp_config.Bucket, data) + '">' + prefix2folder(data) + '</a>';
-            } else {
-                console.log("not folder/this document: " + data);
-                return '<a data-s3="object" href="' + object2hrefvirt(s3exp_config.Bucket, data) + '"download="' + fullpath2filename(data) + '">' + fullpath2filename(data) + '</a>';
-            }
-        }
-
-        function renderFolder(data, type, full) {
-            return isfolder(data) ? "" : fullpath2pathname(data);
-        }
-
-        // Initial DataTable settings
-        $('#tb-s3objects').DataTable({
-            iDisplayLength: 50,
-            order: [
-                [1, 'asc'],
-                [0, 'asc']
-            ],
-            aoColumnDefs: [{
-                "aTargets": [0],
-                "mData": "Key",
-                "mRender": function (data, type, full) {
-                    return (type == 'display') ? renderObject(data, type, full) : data;
-                },
-                "sType": "key"
-            }, {
-                "aTargets": [1],
-                "mData": "Key",
-                "mRender": function (data, type, full) {
-                    return renderFolder(data, type, full);
-                }
-            }, {
-                "aTargets": [2],
-                "mData": "LastModified",
-                "mRender": function (data, type, full) {
-                    return data ? moment(data).fromNow() : "";
-                }
-            }, {
-                "aTargets": [3],
-                "mData": "LastModified",
-                "mRender": function (data, type, full) {
-                    return data ? moment(data).local().format('YYYY-MM-DD HH:mm:ss') : "";
-                }
-            }, {
-                "aTargets": [4],
-                "mData": function (source, type, val) {
-                    return source.Size ? ((type == 'display') ? bytesToSize(source.Size) : source.Size) : "";
-                }
-            },]
-        });
-
-        $('#tb-s3objects').DataTable().column(s3exp_columns.key).visible(false);
-        console.log("jQuery version=" + $.fn.jquery);
-
-        // Custom sort for the Key column so that folders appear before objects
-        $.fn.dataTableExt.oSort['key-asc'] = function (a, b) {
-            var x = (isfolder(a) ? "0-" + a : "1-" + a).toLowerCase();
-            var y = (isfolder(b) ? "0-" + b : "1-" + b).toLowerCase();
-            return ((x < y) ? -1 : ((x > y) ? 1 : 0));
-        };
-
-        $.fn.dataTableExt.oSort['key-desc'] = function (a, b) {
-            var x = (isfolder(a) ? "1-" + a : "0-" + a).toLowerCase();
-            var y = (isfolder(b) ? "1-" + b : "0-" + b).toLowerCase();
-            return ((x < y) ? 1 : ((x > y) ? -1 : 0));
-        };
-
-        // Allow user to hide folders
-        $.fn.dataTableExt.afnFiltering.push(function (oSettings, aData, iDataIndex) {
-            console.log("hide folders");
-            return $('#hidefolders').is(':checked') ? !isfolder(aData[0]) : true;
-        });
-
-        // Delegated event handler for S3 object/folder clicks. This is delegated
-        // because the object/folder rows are added dynamically and we do not want
-        // to have to assign click handlers to each and every row.
-        $('#tb-s3objects').on('click', 'a', function (event) {
-            event.preventDefault();
-            var target = event.target;
-            console.log("target href=" + target.href);
-            console.log("target dataset=" + JSON.stringify(target.dataset));
-
-            // If the user has clicked on a folder then navigate into that folder
-            if (target.dataset.s3 === "folder") {
-                resetDepth();
-                delete s3exp_config.ContinuationToken;
-                s3exp_config.Prefix = target.dataset.prefix;
-                s3exp_config.Delimiter = $("input[name='optionsdepth']:checked").val() == "folder" ? "/" : "";
-                (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-                // Else user has clicked on an object so download it in new window/tab
-            } else {
-                window.open(target.href, '_blank');
-            }
-            return false;
-        });
-
-        if (window.location.hash) {
-            console.log("Location hash=" + window.location.hash);
-            s3exp_config.Prefix = window.location.hash.substring(1);
-        }
-
-        // Do initial bucket list
-        (s3exp_lister = s3list(s3exp_config, s3draw)).go();
-    });
-</script>
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml
deleted file mode 100644
index cf00d29..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<beans xmlns="http://java.sun.com/xml/ns/javaee"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-      http://java.sun.com/xml/ns/javaee
-      http://java.sun.com/xml/ns/javaee/beans_1_0.xsd">
-</beans>
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml
deleted file mode 100644
index a3552f0..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<web-app version="3.0" xmlns="http://java.sun.com/xml/ns/javaee"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
-  <servlet>
-    <servlet-name>jaxrs</servlet-name>
-    <servlet-class>org.glassfish.jersey.servlet.ServletContainer</servlet-class>
-    <init-param>
-      <param-name>javax.ws.rs.Application</param-name>
-      <param-value>org.apache.hadoop.ozone.s3.GatewayApplication</param-value>
-    </init-param>
-    <load-on-startup>1</load-on-startup>
-  </servlet>
-  <servlet-mapping>
-    <servlet-name>jaxrs</servlet-name>
-    <url-pattern>/*</url-pattern>
-  </servlet-mapping>
-
-  <listener>
-    <listener-class>org.jboss.weld.environment.servlet.Listener</listener-class>
-  </listener>
-
-
-</web-app>
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico
deleted file mode 100755
index 72886ea..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico
+++ /dev/null
Binary files differ
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
deleted file mode 100644
index b20bf35..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
+++ /dev/null
@@ -1,83 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <meta http-equiv="Content-Security-Policy" content="script-src 'self';">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="Apache Hadoop Ozone S3 gateway">
-
-    <title>S3 gateway -- Apache Hadoop Ozone</title>
-
-    <link href="bootstrap-3.4.1/css/bootstrap.min.css" rel="stylesheet">
-    <link href="hadoop.css" rel="stylesheet">
-    <link href="ozone.css" rel="stylesheet">
-
-</head>
-
-<body>
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed"
-                    data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">Ozone S3G</a>
-        </div>
-        <div id="navbar" class="collapse navbar-collapse">
-            <ul class="nav navbar-nav" id="ui-tabs">
-                <li><a href="docs">Documentation</a></li>
-
-            </ul>
-        </div><!--/.nav-collapse -->
-
-    </div>
-</header>
-
-
-<div class="container-fluid" style="margin: 12pt">
-
-    <h1>S3 gateway</h1>
-
-    <p>This is an endpoint of Apache Hadoop Ozone S3 gateway. Use it with any
-        AWS S3 compatible tool
-        with setting this url as an endpoint</p>
-
-    <p>For example with aws-cli:</p>
-
-    <pre>aws s3api --endpoint <span id="s3gurl"></span> create-bucket --bucket=wordcount</pre>
-
-    <p>For more information, please check the <a href="docs">documentation.</a>
-    </p>
-</div><!-- /.container -->
-
-<script src="jquery-3.4.1.min.js"></script>
-<script src="bootstrap-3.4.1/js/bootstrap.min.js"></script>
-<script src="s3g.js"></script>
-
-</body>
-</html>
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
deleted file mode 100644
index 8b1e977..0000000
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-window.onload = function () {
-    var safeurl = window.location.protocol + "//" + window.location.host + window.location.pathname;
-    safeurl = safeurl.replace("static/", "");
-    document.getElementById('s3gurl').innerHTML = safeurl;
-};
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
deleted file mode 100644
index 4feaca6..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-
-/**
- * ObjectStore implementation with in-memory state.
- */
-public class ObjectStoreStub extends ObjectStore {
-
-  public ObjectStoreStub() {
-    super();
-  }
-
-  private Map<String, OzoneVolumeStub> volumes = new HashMap<>();
-  private Map<String, String> bucketVolumeMap = new HashMap<>();
-  private Map<String, Boolean> bucketEmptyStatus = new HashMap<>();
-  private Map<String, List<OzoneBucket>> userBuckets = new HashMap<>();
-
-  @Override
-  public void createVolume(String volumeName) throws IOException {
-    createVolume(volumeName,
-        VolumeArgs.newBuilder()
-            .setAdmin("root")
-            .setOwner("root")
-            .setQuota("" + Integer.MAX_VALUE)
-            .setAcls(new ArrayList<>()).build());
-  }
-
-  @Override
-  public void createVolume(String volumeName, VolumeArgs volumeArgs)
-      throws IOException {
-    OzoneVolumeStub volume =
-        new OzoneVolumeStub(volumeName,
-            volumeArgs.getAdmin(),
-            volumeArgs.getOwner(),
-            Long.parseLong(volumeArgs.getQuota()),
-            System.currentTimeMillis(),
-            volumeArgs.getAcls());
-    volumes.put(volumeName, volume);
-  }
-
-  @Override
-  public OzoneVolume getVolume(String volumeName) throws IOException {
-    if (volumes.containsKey(volumeName)) {
-      return volumes.get(volumeName);
-    } else {
-      throw new OMException("", VOLUME_NOT_FOUND);
-    }
-  }
-
-  @Override
-  public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix)
-      throws IOException {
-    return volumes.values()
-        .stream()
-        .filter(volume -> volume.getName().startsWith(volumePrefix))
-        .collect(Collectors.toList())
-        .iterator();
-
-  }
-
-  @Override
-  public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix,
-      String prevVolume) throws IOException {
-    return volumes.values()
-        .stream()
-        .filter(volume -> volume.getName().compareTo(prevVolume) > 0)
-        .filter(volume -> volume.getName().startsWith(volumePrefix))
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public Iterator<? extends OzoneVolume> listVolumesByUser(String user,
-      String volumePrefix, String prevVolume) throws IOException {
-    return volumes.values()
-        .stream()
-        .filter(volume -> volume.getOwner().equals(user))
-        .filter(volume -> volume.getName().compareTo(prevVolume) < 0)
-        .filter(volume -> volume.getName().startsWith(volumePrefix))
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public void deleteVolume(String volumeName) throws IOException {
-    volumes.remove(volumeName);
-  }
-
-  @Override
-  public void createS3Bucket(String userName, String s3BucketName) throws
-      IOException {
-    String volumeName = "s3" + userName;
-    if (bucketVolumeMap.get(s3BucketName) == null) {
-      bucketVolumeMap.put(s3BucketName, volumeName + "/" + s3BucketName);
-      bucketEmptyStatus.put(s3BucketName, true);
-      createVolume(volumeName);
-      volumes.get(volumeName).createBucket(s3BucketName);
-    } else {
-      throw new OMException("", BUCKET_ALREADY_EXISTS);
-    }
-
-    if (userBuckets.get(userName) == null) {
-      List<OzoneBucket> ozoneBuckets = new ArrayList<>();
-      ozoneBuckets.add(volumes.get(volumeName).getBucket(s3BucketName));
-      userBuckets.put(userName, ozoneBuckets);
-    } else {
-      userBuckets.get(userName).add(volumes.get(volumeName).getBucket(
-          s3BucketName));
-    }
-  }
-
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String bucketPrefix) {
-    if (userBuckets.get(userName) == null) {
-      return new ArrayList<OzoneBucket>().iterator();
-    } else {
-      return userBuckets.get(userName).parallelStream()
-          .filter(ozoneBucket -> {
-            if (bucketPrefix != null) {
-              return ozoneBucket.getName().startsWith(bucketPrefix);
-            } else {
-              return true;
-            }
-          }).collect(Collectors.toList())
-          .iterator();
-    }
-  }
-
-  public Iterator<? extends OzoneBucket> listS3Buckets(String userName,
-                                                       String bucketPrefix,
-                                                       String prevBucket) {
-
-    if (userBuckets.get(userName) == null) {
-      return new ArrayList<OzoneBucket>().iterator();
-    } else {
-      //Sort buckets lexicographically
-      userBuckets.get(userName).sort(
-          (bucket1, bucket2) -> {
-            int compare = bucket1.getName().compareTo(bucket2.getName());
-            if (compare < 0) {
-              return -1;
-            } else if (compare == 0) {
-              return 0;
-            } else {
-              return 1;
-            }
-          });
-      return userBuckets.get(userName).stream()
-          .filter(ozoneBucket -> {
-            if (prevBucket != null) {
-              return ozoneBucket.getName().compareTo(prevBucket) > 0;
-            } else {
-              return true;
-            }
-          })
-          .filter(ozoneBucket -> {
-            if (bucketPrefix != null) {
-              return ozoneBucket.getName().startsWith(bucketPrefix);
-            } else {
-              return true;
-            }
-          }).collect(Collectors.toList())
-          .iterator();
-    }
-  }
-
-  @Override
-  public void deleteS3Bucket(String s3BucketName) throws
-      IOException {
-    if (bucketVolumeMap.containsKey(s3BucketName)) {
-      if (bucketEmptyStatus.get(s3BucketName)) {
-        bucketVolumeMap.remove(s3BucketName);
-      } else {
-        throw new OMException("", BUCKET_NOT_EMPTY);
-      }
-    } else {
-      throw new OMException("", BUCKET_NOT_FOUND);
-    }
-  }
-
-  @Override
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    if (bucketVolumeMap.get(s3BucketName) == null) {
-      throw new OMException("", S3_BUCKET_NOT_FOUND);
-    }
-    return bucketVolumeMap.get(s3BucketName);
-  }
-
-  @Override
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    if (bucketVolumeMap.get(s3BucketName) == null) {
-      throw new OMException("", S3_BUCKET_NOT_FOUND);
-    }
-    return bucketVolumeMap.get(s3BucketName).split("/")[0];
-  }
-
-  @Override
-  @SuppressWarnings("StringSplitter")
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    if (bucketVolumeMap.get(s3BucketName) == null) {
-      throw new OMException("", BUCKET_NOT_FOUND);
-    }
-    return bucketVolumeMap.get(s3BucketName).split("/")[1];
-  }
-
-  public void setBucketEmptyStatus(String bucketName, boolean status) {
-    bucketEmptyStatus.put(bucketName, status);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
deleted file mode 100644
index e8ebf02..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.client;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts.PartInfo;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.util.Time;
-
-/**
- * In-memory ozone bucket for testing.
- */
-public class OzoneBucketStub extends OzoneBucket {
-
-  private Map<String, OzoneKeyDetails> keyDetails = new HashMap<>();
-
-  private Map<String, byte[]> keyContents = new HashMap<>();
-
-  private Map<String, String> multipartUploadIdMap = new HashMap<>();
-
-  private Map<String, Map<Integer, Part>> partList = new HashMap<>();
-
-  /**
-   * Constructs OzoneBucket instance.
-   *
-   * @param volumeName   Name of the volume the bucket belongs to.
-   * @param bucketName   Name of the bucket.
-   * @param storageType  StorageType of the bucket.
-   * @param versioning   versioning status of the bucket.
-   * @param creationTime creation time of the bucket.
-   */
-  public OzoneBucketStub(
-      String volumeName,
-      String bucketName,
-      StorageType storageType, Boolean versioning,
-      long creationTime) {
-    super(volumeName,
-        bucketName,
-        ReplicationFactor.ONE,
-        ReplicationType.STAND_ALONE,
-        storageType,
-        versioning,
-        creationTime);
-  }
-
-  @Override
-  public OzoneOutputStream createKey(String key, long size) throws IOException {
-    return createKey(key, size, ReplicationType.STAND_ALONE,
-        ReplicationFactor.ONE, new HashMap<>());
-  }
-
-  @Override
-  public OzoneOutputStream createKey(String key, long size,
-                                     ReplicationType type,
-                                     ReplicationFactor factor,
-                                     Map<String, String> metadata)
-      throws IOException {
-    ByteArrayOutputStream byteArrayOutputStream =
-        new ByteArrayOutputStream((int) size) {
-          @Override
-          public void close() throws IOException {
-            keyContents.put(key, toByteArray());
-            keyDetails.put(key, new OzoneKeyDetails(
-                getVolumeName(),
-                getName(),
-                key,
-                size,
-                System.currentTimeMillis(),
-                System.currentTimeMillis(),
-                new ArrayList<>(), type, metadata, null,
-                factor.getValue()
-            ));
-            super.close();
-          }
-        };
-    return new OzoneOutputStream(byteArrayOutputStream);
-  }
-
-  @Override
-  public OzoneInputStream readKey(String key) throws IOException {
-    return new OzoneInputStream(new ByteArrayInputStream(keyContents.get(key)));
-  }
-
-  @Override
-  public OzoneKeyDetails getKey(String key) throws IOException {
-    if (keyDetails.containsKey(key)) {
-      return keyDetails.get(key);
-    } else {
-      throw new OMException(ResultCodes.KEY_NOT_FOUND);
-    }
-  }
-
-  @Override
-  public Iterator<? extends OzoneKey> listKeys(String keyPrefix) {
-    Map<String, OzoneKey> sortedKey = new TreeMap<String, OzoneKey>(keyDetails);
-    return sortedKey.values()
-        .stream()
-        .filter(key -> key.getName().startsWith(keyPrefix))
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public Iterator<? extends OzoneKey> listKeys(String keyPrefix,
-      String prevKey) {
-    Map<String, OzoneKey> sortedKey = new TreeMap<String, OzoneKey>(keyDetails);
-    return sortedKey.values()
-        .stream()
-        .filter(key -> key.getName().compareTo(prevKey) > 0)
-        .filter(key -> key.getName().startsWith(keyPrefix))
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public void deleteKey(String key) throws IOException {
-    keyDetails.remove(key);
-  }
-
-  @Override
-  public void renameKey(String fromKeyName, String toKeyName)
-      throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(String keyName,
-                                                 ReplicationType type,
-                                                 ReplicationFactor factor)
-      throws IOException {
-    String uploadID = UUID.randomUUID().toString();
-    multipartUploadIdMap.put(keyName, uploadID);
-    return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID);
-  }
-
-  @Override
-  public OzoneOutputStream createMultipartKey(String key, long size,
-                                              int partNumber, String uploadID)
-      throws IOException {
-    String multipartUploadID = multipartUploadIdMap.get(key);
-    if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) {
-      throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      ByteArrayOutputStream byteArrayOutputStream =
-          new ByteArrayOutputStream((int) size) {
-            @Override
-            public void close() throws IOException {
-              Part part = new Part(key + size,
-                  toByteArray());
-              if (partList.get(key) == null) {
-                Map<Integer, Part> parts = new TreeMap<>();
-                parts.put(partNumber, part);
-                partList.put(key, parts);
-              } else {
-                partList.get(key).put(partNumber, part);
-              }
-            }
-          };
-      return new OzoneOutputStreamStub(byteArrayOutputStream, key + size);
-    }
-  }
-
-  @Override
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(String key,
-      String uploadID, Map<Integer, String> partsMap) throws IOException {
-
-    if (multipartUploadIdMap.get(key) == null) {
-      throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      final Map<Integer, Part> partsList = partList.get(key);
-
-      if (partsMap.size() != partsList.size()) {
-        throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
-      }
-
-      int count = 1;
-
-      ByteArrayOutputStream output = new ByteArrayOutputStream();
-
-      for (Map.Entry<Integer, String> part: partsMap.entrySet()) {
-        Part recordedPart = partsList.get(part.getKey());
-        if (part.getKey() != count) {
-          throw new OMException(ResultCodes.MISSING_UPLOAD_PARTS);
-        } else {
-          if (!part.getValue().equals(recordedPart.getPartName())) {
-            throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
-          } else {
-            count++;
-            output.write(recordedPart.getContent());
-          }
-        }
-      }
-      keyContents.put(key, output.toByteArray());
-    }
-
-    return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key,
-        DigestUtils.sha256Hex(key));
-  }
-
-  @Override
-  public void abortMultipartUpload(String keyName, String uploadID) throws
-      IOException {
-    if (multipartUploadIdMap.get(keyName) == null) {
-      throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      multipartUploadIdMap.remove(keyName);
-    }
-  }
-
-  @Override
-  public OzoneMultipartUploadPartListParts listParts(String key,
-      String uploadID, int partNumberMarker, int maxParts) throws IOException {
-    if (multipartUploadIdMap.get(key) == null) {
-      throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    }
-    List<PartInfo> partInfoList = new ArrayList<>();
-
-    if (partList.get(key) == null) {
-      return new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
-          ReplicationFactor.ONE, 0, false);
-    } else {
-      Map<Integer, Part> partMap = partList.get(key);
-      Iterator<Map.Entry<Integer, Part>> partIterator =
-          partMap.entrySet().iterator();
-
-      int count = 0;
-      int nextPartNumberMarker = 0;
-      boolean truncated = false;
-      while (count < maxParts && partIterator.hasNext()) {
-        Map.Entry<Integer, Part> partEntry = partIterator.next();
-        nextPartNumberMarker = partEntry.getKey();
-        if (partEntry.getKey() > partNumberMarker) {
-          PartInfo partInfo = new PartInfo(partEntry.getKey(),
-              partEntry.getValue().getPartName(),
-              partEntry.getValue().getContent().length, Time.now());
-          partInfoList.add(partInfo);
-          count++;
-        }
-      }
-
-      if (partIterator.hasNext()) {
-        truncated = true;
-      } else {
-        truncated = false;
-        nextPartNumberMarker = 0;
-      }
-
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
-              ReplicationFactor.ONE,
-              nextPartNumberMarker, truncated);
-      ozoneMultipartUploadPartListParts.addAllParts(partInfoList);
-
-      return ozoneMultipartUploadPartListParts;
-
-    }
-
-  }
-
-  /**
-   * Class used to hold part information in a upload part request.
-   */
-  public class Part {
-    private String partName;
-    private byte[] content;
-
-    public Part(String name, byte[] data) {
-      this.partName = name;
-      this.content = data;
-    }
-
-    public String getPartName() {
-      return partName;
-    }
-
-    public byte[] getContent() {
-      return content;
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java
deleted file mode 100644
index 3c7a253..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-
-/**
- * In-memory OzoneClient for testing.
- */
-public class OzoneClientStub extends OzoneClient {
-
-  public OzoneClientStub() {
-    super(new ObjectStoreStub());
-  }
-
-  @Override
-  public void close() throws IOException {
-    //NOOP.
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
deleted file mode 100644
index 28e377b4..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * OzoneOutputStream stub for testing.
- */
-public class OzoneOutputStreamStub extends OzoneOutputStream {
-
-  private final String partName;
-
-  /**
-   * Constructs OzoneOutputStreamStub with outputStream and partName.
-   *
-   * @param outputStream
-   * @param name - partName
-   */
-  public OzoneOutputStreamStub(OutputStream outputStream, String name) {
-    super(outputStream);
-    this.partName = name;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    getOutputStream().write(b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    getOutputStream().write(b, off, len);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    getOutputStream().flush();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    //commitKey can be done here, if needed.
-    getOutputStream().close();
-  }
-
-  @Override
-  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    return new OmMultipartCommitUploadPartInfo(partName);
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java
deleted file mode 100644
index 8997260..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.client;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-
-/**
- * Ozone volume with in-memory state for testing.
- */
-public class OzoneVolumeStub extends OzoneVolume {
-
-  private Map<String, OzoneBucketStub> buckets = new HashMap<>();
-
-  public OzoneVolumeStub(String name, String admin, String owner,
-      long quotaInBytes,
-      long creationTime, List<OzoneAcl> acls) {
-    super(name, admin, owner, quotaInBytes, creationTime, acls);
-  }
-
-  @Override
-  public void createBucket(String bucketName) throws IOException {
-    createBucket(bucketName, new BucketArgs.Builder()
-        .setStorageType(StorageType.DEFAULT)
-        .setVersioning(false)
-        .build());
-  }
-
-  @Override
-  public void createBucket(String bucketName, BucketArgs bucketArgs)
-      throws IOException {
-    buckets.put(bucketName, new OzoneBucketStub(
-        getName(),
-        bucketName,
-        bucketArgs.getStorageType(),
-        bucketArgs.getVersioning(),
-        System.currentTimeMillis()));
-
-  }
-
-  @Override
-  public OzoneBucket getBucket(String bucketName) throws IOException {
-    if (buckets.containsKey(bucketName)) {
-      return buckets.get(bucketName);
-    } else {
-      throw new IOException("BUCKET_NOT_FOUND");
-    }
-
-  }
-
-  @Override
-  public Iterator<? extends OzoneBucket> listBuckets(String bucketPrefix) {
-    return buckets.values()
-        .stream()
-        .filter(bucket -> {
-          if (bucketPrefix != null) {
-            return bucket.getName().startsWith(bucketPrefix);
-          } else {
-            return true;
-          }
-        })
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public Iterator<? extends OzoneBucket> listBuckets(String bucketPrefix,
-      String prevBucket) {
-    return buckets.values()
-        .stream()
-        .filter(bucket -> bucket.getName().compareTo(prevBucket) > 0)
-        .filter(bucket -> bucket.getName().startsWith(bucketPrefix))
-        .collect(Collectors.toList())
-        .iterator();
-  }
-
-  @Override
-  public void deleteBucket(String bucketName) throws IOException {
-    if (buckets.containsKey(bucketName)) {
-      buckets.remove(bucketName);
-    } else {
-      throw new IOException("BUCKET_NOT_FOUND");
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java
deleted file mode 100644
index 10e4274..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * In-memory OzoneClient implementation to test REST endpoints.
- */
-package org.apache.hadoop.ozone.client;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
deleted file mode 100644
index 252d87b..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import javax.ws.rs.container.ContainerRequestContext;
-import javax.ws.rs.core.MultivaluedHashMap;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.test.LambdaTestUtils;
-
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.AUTHORIZATION_HEADER;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_MD5;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_TYPE;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.HOST_HEADER;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMAZ_DATE;
-import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMZ_CONTENT_SHA256;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.mockito.Mockito;
-
-/**
- * Test class for @{@link OzoneClientProducer}.
- * */
-@RunWith(Parameterized.class)
-public class TestOzoneClientProducer {
-
-  private OzoneClientProducer producer;
-  private MultivaluedMap<String, String> headerMap;
-  private MultivaluedMap<String, String> queryMap;
-  private String authHeader;
-  private String contentMd5;
-  private String host;
-  private String amzContentSha256;
-  private String date;
-  private String contentType;
-
-
-  private ContainerRequestContext context;
-  private UriInfo uriInfo;
-
-  public TestOzoneClientProducer(String authHeader, String contentMd5,
-      String host, String amzContentSha256, String date, String contentType)
-      throws Exception {
-    this.authHeader = authHeader;
-    this.contentMd5 = contentMd5;
-    this.host = host;
-    this.amzContentSha256 = amzContentSha256;
-    this.date = date;
-    this.contentType = contentType;
-    producer = new OzoneClientProducer();
-    headerMap = new MultivaluedHashMap<>();
-    queryMap = new MultivaluedHashMap<>();
-    uriInfo = Mockito.mock(UriInfo.class);
-    context = Mockito.mock(ContainerRequestContext.class);
-    OzoneConfiguration config = new OzoneConfiguration();
-    config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true);
-    config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
-    setupContext();
-    producer.setContext(context);
-    producer.setOzoneConfiguration(config);
-  }
-
-  @Test
-  public void testGetClientFailure() throws Exception {
-    LambdaTestUtils.intercept(IOException.class, "Couldn't create",
-        () -> producer.createClient());
-  }
-
-  private void setupContext() throws Exception {
-    headerMap.putSingle(AUTHORIZATION_HEADER, authHeader);
-    headerMap.putSingle(CONTENT_MD5, contentMd5);
-    headerMap.putSingle(HOST_HEADER, host);
-    headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256);
-    headerMap.putSingle(X_AMAZ_DATE, date);
-    headerMap.putSingle(CONTENT_TYPE, contentType);
-
-    Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap);
-    Mockito.when(uriInfo.getRequestUri()).thenReturn(new URI(""));
-
-    Mockito.when(context.getUriInfo()).thenReturn(uriInfo);
-    Mockito.when(context.getHeaders()).thenReturn(headerMap);
-    Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER))
-        .thenReturn(authHeader);
-    Mockito.when(context.getUriInfo().getQueryParameters())
-        .thenReturn(queryMap);
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {
-            "AWS4-HMAC-SHA256 Credential=testuser1/20190221/us-west-1/s3" +
-                "/aws4_request, SignedHeaders=content-md5;host;" +
-                "x-amz-content-sha256;x-amz-date, " +
-                "Signature" +
-                "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" +
-                "65f46a14cd745ad",
-            "Zi68x2nPDDXv5qfDC+ZWTg==",
-            "s3g:9878",
-            "e2bd43f11c97cde3465e0e8d1aad77af7ec7aa2ed8e213cd0e24" +
-                "1e28375860c6",
-            "20190221T002037Z",
-            ""
-        },
-        {
-            "AWS4-HMAC-SHA256 " +
-                "Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request," +
-                " SignedHeaders=content-type;host;x-amz-date, " +
-                "Signature=" +
-                "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400" +
-                "e06b5924a6f2b5d7",
-            "",
-            "iam.amazonaws.com",
-            "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
-            "20150830T123600Z",
-            "application/x-www-form-urlencoded; charset=utf-8"
-        }
-    });
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java
deleted file mode 100644
index 3599c05..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.Charset;
-
-import org.apache.commons.io.IOUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test input stream parsing with signatures.
- */
-public class TestSignedChunksInputStream {
-
-  @Test
-  public void emptyfile() throws IOException {
-    InputStream is = fileContent("0;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40");
-    String result = IOUtils.toString(is, Charset.forName("UTF-8"));
-    Assert.assertEquals("", result);
-
-    is = fileContent("0;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r"
-        + "\n");
-    result = IOUtils.toString(is, Charset.forName("UTF-8"));
-    Assert.assertEquals("", result);
-  }
-
-  @Test
-  public void singlechunk() throws IOException {
-    //test simple read()
-    InputStream is = fileContent("0A;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r"
-        + "\n1234567890\r\n");
-    String result = IOUtils.toString(is, Charset.forName("UTF-8"));
-    Assert.assertEquals("1234567890", result);
-
-    //test read(byte[],int,int)
-    is = fileContent("0A;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r"
-        + "\n1234567890\r\n");
-    byte[] bytes = new byte[10];
-    IOUtils.read(is, bytes, 0, 10);
-    Assert.assertEquals("1234567890", new String(bytes));
-  }
-
-  @Test
-  public void singlechunkwithoutend() throws IOException {
-    //test simple read()
-    InputStream is = fileContent("0A;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r"
-        + "\n1234567890");
-    String result = IOUtils.toString(is, Charset.forName("UTF-8"));
-    Assert.assertEquals("1234567890", result);
-
-    //test read(byte[],int,int)
-    is = fileContent("0A;chunk-signature"
-        +
-        "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r"
-        + "\n1234567890");
-    byte[] bytes = new byte[10];
-    IOUtils.read(is, bytes, 0, 10);
-    Assert.assertEquals("1234567890", new String(bytes));
-  }
-
-  @Test
-  public void multichunks() throws IOException {
-    //test simple read()
-    InputStream is = fileContent("0a;chunk-signature=signature\r\n"
-        + "1234567890\r\n"
-        + "05;chunk-signature=signature\r\n"
-        + "abcde\r\n");
-    String result = IOUtils.toString(is, Charset.forName("UTF-8"));
-    Assert.assertEquals("1234567890abcde", result);
-
-    //test read(byte[],int,int)
-    is = fileContent("0a;chunk-signature=signature\r\n"
-        + "1234567890\r\n"
-        + "05;chunk-signature=signature\r\n"
-        + "abcde\r\n");
-    byte[] bytes = new byte[15];
-    IOUtils.read(is, bytes, 0, 15);
-    Assert.assertEquals("1234567890abcde", new String(bytes));
-  }
-
-  private InputStream fileContent(String content) {
-    return new SignedChunksInputStream(
-        new ByteArrayInputStream(content.getBytes()));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
deleted file mode 100644
index eead447..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.s3;
-
-import org.apache.hadoop.fs.InvalidRequestException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.glassfish.jersey.internal.PropertiesDelegate;
-import org.glassfish.jersey.server.ContainerRequest;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.SecurityContext;
-import java.net.URI;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class test virtual host style mapping conversion to path style.
- */
-public class TestVirtualHostStyleFilter {
-
-  private static OzoneConfiguration conf;
-  private static String s3HttpAddr;
-  private AuthenticationHeaderParser authenticationHeaderParser;
-
-  @Before
-  public void setup() {
-    conf = new OzoneConfiguration();
-    s3HttpAddr = "localhost:9878";
-    conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, s3HttpAddr);
-    s3HttpAddr = s3HttpAddr.substring(0, s3HttpAddr.lastIndexOf(":"));
-    conf.set(S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME, s3HttpAddr);
-    authenticationHeaderParser = new AuthenticationHeaderParser();
-    authenticationHeaderParser.setAuthHeader("AWS ozone:scret");
-  }
-
-  /**
-   * Create containerRequest object.
-   * @return ContainerRequest
-   * @throws Exception
-   */
-  public ContainerRequest createContainerRequest(String host, String path,
-                                                 String queryParams,
-                                                 boolean virtualHostStyle)
-      throws Exception {
-    URI baseUri = new URI("http://" + s3HttpAddr);
-    URI virtualHostStyleUri;
-    if (path == null && queryParams == null) {
-      virtualHostStyleUri = new URI("http://" + s3HttpAddr);
-    } else if (path != null && queryParams == null) {
-      virtualHostStyleUri = new URI("http://" + s3HttpAddr + path);
-    } else if (path !=null && queryParams != null)  {
-      virtualHostStyleUri = new URI("http://" + s3HttpAddr + path +
-          queryParams);
-    } else {
-      virtualHostStyleUri = new URI("http://" + s3HttpAddr  + queryParams);
-    }
-    URI pathStyleUri;
-    if (queryParams == null) {
-      pathStyleUri = new URI("http://" + s3HttpAddr + path);
-    } else {
-      pathStyleUri = new URI("http://" + s3HttpAddr + path + queryParams);
-    }
-    String httpMethod = "DELETE";
-    SecurityContext securityContext = Mockito.mock(SecurityContext.class);
-    PropertiesDelegate propertiesDelegate = Mockito.mock(PropertiesDelegate
-        .class);
-    ContainerRequest containerRequest;
-    if (virtualHostStyle) {
-      containerRequest = new ContainerRequest(baseUri, virtualHostStyleUri,
-          httpMethod, securityContext, propertiesDelegate);
-      containerRequest.header(HttpHeaders.HOST, host);
-    } else {
-      containerRequest = new ContainerRequest(baseUri, pathStyleUri,
-          httpMethod, securityContext, propertiesDelegate);
-      containerRequest.header(HttpHeaders.HOST, host);
-    }
-    return containerRequest;
-  }
-
-  @Test
-  public void testVirtualHostStyle() throws  Exception {
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-            ".localhost:9878", "/myfile", null, true);
-    virtualHostStyleFilter.filter(containerRequest);
-    URI expected = new URI("http://" + s3HttpAddr +
-        "/mybucket/myfile");
-    Assert.assertEquals(expected, containerRequest.getRequestUri());
-  }
-
-  @Test
-  public void testPathStyle() throws Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest(s3HttpAddr,
-        "/mybucket/myfile", null, false);
-    virtualHostStyleFilter.filter(containerRequest);
-    URI expected = new URI("http://" + s3HttpAddr +
-        "/mybucket/myfile");
-    Assert.assertEquals(expected, containerRequest.getRequestUri());
-
-  }
-
-  @Test
-  public void testVirtualHostStyleWithCreateBucketRequest() throws Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        ".localhost:9878", null, null, true);
-    virtualHostStyleFilter.filter(containerRequest);
-    URI expected = new URI("http://" + s3HttpAddr + "/mybucket");
-    Assert.assertEquals(expected, containerRequest.getRequestUri());
-
-  }
-
-  @Test
-  public void testVirtualHostStyleWithQueryParams() throws Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        ".localhost:9878", null, "?prefix=bh", true);
-    virtualHostStyleFilter.filter(containerRequest);
-    URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh");
-    assertTrue(expected.toString().contains(containerRequest.getRequestUri()
-        .toString()));
-
-    containerRequest = createContainerRequest("mybucket" +
-        ".localhost:9878", null, "?prefix=bh&type=dir", true);
-    virtualHostStyleFilter.filter(containerRequest);
-    expected = new URI("http://" + s3HttpAddr +
-        "/mybucket?prefix=bh&type=dir");
-    assertTrue(expected.toString().contains(containerRequest.getRequestUri()
-        .toString()));
-
-  }
-
-  @Test
-  public void testVirtualHostStyleWithNoMatchingDomain() throws Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        ".myhost:9999", null, null, true);
-    try {
-      virtualHostStyleFilter.filter(containerRequest);
-      fail("testVirtualHostStyleWithNoMatchingDomain");
-    } catch (InvalidRequestException ex) {
-      GenericTestUtils.assertExceptionContains("No matching domain", ex);
-    }
-
-  }
-
-  @Test
-  public void testIncorrectVirtualHostStyle() throws
-      Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-    virtualHostStyleFilter.setAuthenticationHeaderParser(
-        authenticationHeaderParser);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        "localhost:9878", null, null, true);
-    try {
-      virtualHostStyleFilter.filter(containerRequest);
-      fail("testIncorrectVirtualHostStyle failed");
-    } catch (InvalidRequestException ex) {
-      GenericTestUtils.assertExceptionContains("invalid format", ex);
-    }
-
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
deleted file mode 100644
index 912a769..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.when;
-
-/**
- * This class tests abort multipart upload request.
- */
-public class TestAbortMultipartUpload {
-
-
-  @Test
-  public void testAbortMultipartUpload() throws Exception {
-
-    String bucket = "s3bucket";
-    String key = "key1";
-    OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", bucket);
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    ObjectEndpoint rest = new ObjectEndpoint();
-    rest.setHeaders(headers);
-    rest.setClient(client);
-
-    Response response = rest.initializeMultipartUpload(bucket, key);
-
-    assertEquals(response.getStatus(), 200);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-
-    // Abort multipart upload
-    response = rest.delete(bucket, key, uploadID);
-
-    assertEquals(204, response.getStatus());
-
-    // test with unknown upload Id.
-    try {
-      rest.delete(bucket, key, "random");
-    } catch (OS3Exception ex) {
-      assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode());
-      assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(),
-          ex.getErrorMessage());
-    }
-
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
deleted file mode 100644
index ea574d4..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.Response;
-
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.ObjectStoreStub;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-
-import org.apache.http.HttpStatus;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This class tests delete bucket functionality.
- */
-public class TestBucketDelete {
-
-  private String bucketName = "myBucket";
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private BucketEndpoint bucketEndpoint;
-
-  @Before
-  public void setup() throws Exception {
-
-    //Create client stub and object store stub.
-    clientStub = new OzoneClientStub();
-    objectStoreStub = clientStub.getObjectStore();
-
-    objectStoreStub.createS3Bucket("ozone", bucketName);
-
-    // Create HeadBucket and setClient to OzoneClientStub
-    bucketEndpoint = new BucketEndpoint();
-    bucketEndpoint.setClient(clientStub);
-
-
-  }
-
-  @Test
-  public void testBucketEndpoint() throws Exception {
-    Response response = bucketEndpoint.delete(bucketName);
-    assertEquals(HttpStatus.SC_NO_CONTENT, response.getStatus());
-
-  }
-
-  @Test
-  public void testDeleteWithNoSuchBucket() throws Exception {
-    try {
-      bucketEndpoint.delete("unknownbucket");
-    } catch (OS3Exception ex) {
-      assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), ex.getCode());
-      assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(),
-          ex.getErrorMessage());
-      return;
-    }
-    fail("testDeleteWithNoSuchBucket failed");
-  }
-
-
-  @Test
-  public void testDeleteWithBucketNotEmpty() throws Exception {
-    try {
-      String bucket = "nonemptybucket";
-      objectStoreStub.createS3Bucket("ozone1", bucket);
-      ObjectStoreStub stub = (ObjectStoreStub) objectStoreStub;
-      stub.setBucketEmptyStatus(bucket, false);
-      bucketEndpoint.delete(bucket);
-    } catch (OS3Exception ex) {
-      assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getCode(), ex.getCode());
-      assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getErrorMessage(),
-          ex.getErrorMessage());
-      return;
-    }
-    fail("testDeleteWithBucketNotEmpty failed");
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
deleted file mode 100644
index 844f9be..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
+++ /dev/null
@@ -1,380 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.junit.Assert;
-import static org.junit.Assert.fail;
-import org.junit.Test;
-
-/**
- * Testing basic object list browsing.
- */
-public class TestBucketGet {
-
-  @Test
-  public void listRoot() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient client = createClientWithKeys("file1", "dir1/file2");
-
-    getBucket.setClient(client);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket
-            .list("b1", "/", null, null, 100, "", null, null, null, null, null)
-            .getEntity();
-
-    Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("dir1/",
-        getBucketResponse.getCommonPrefixes().get(0).getPrefix());
-
-    Assert.assertEquals(1, getBucketResponse.getContents().size());
-    Assert.assertEquals("file1",
-        getBucketResponse.getContents().get(0).getKey());
-
-  }
-
-  @Test
-  public void listDir() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2");
-
-    getBucket.setClient(client);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
-            "dir1", null, null, null, null, null).getEntity();
-
-    Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("dir1/",
-        getBucketResponse.getCommonPrefixes().get(0).getPrefix());
-
-    Assert.assertEquals(0, getBucketResponse.getContents().size());
-
-  }
-
-  @Test
-  public void listSubDir() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2");
-
-    getBucket.setClient(ozoneClient);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket
-            .list("b1", "/", null, null, 100, "dir1/", null, null,
-                null, null, null)
-            .getEntity();
-
-    Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("dir1/dir2/",
-        getBucketResponse.getCommonPrefixes().get(0).getPrefix());
-
-    Assert.assertEquals(1, getBucketResponse.getContents().size());
-    Assert.assertEquals("dir1/file2",
-        getBucketResponse.getContents().get(0).getKey());
-
-  }
-
-
-  @Test
-  public void listWithPrefixAndDelimiter() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2", "file2");
-
-    getBucket.setClient(ozoneClient);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
-            "dir1", null, null, null, null, null).getEntity();
-
-    Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
-
-  }
-
-  @Test
-  public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2", "file2");
-
-    getBucket.setClient(ozoneClient);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
-            "", null, null, null, null, null).getEntity();
-
-    Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("file2", getBucketResponse.getContents().get(0)
-        .getKey());
-
-  }
-
-  @Test
-  public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2", "file2");
-
-    getBucket.setClient(ozoneClient);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
-            "dir1bh", null, null, "dir1/dir2/file2", null, null).getEntity();
-
-    Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size());
-
-  }
-
-  @Test
-  public void listWithContinuationToken() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2", "file2");
-
-    getBucket.setClient(ozoneClient);
-
-    int maxKeys = 2;
-    // As we have 5 keys, with max keys 2 we should call list 3 times.
-
-    // First time
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
-            "", null, null, null, null, null).getEntity();
-
-    Assert.assertTrue(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 2);
-
-    // 2nd time
-    String continueToken = getBucketResponse.getNextToken();
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
-            "", null, continueToken, null, null, null).getEntity();
-    Assert.assertTrue(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 2);
-
-
-    continueToken = getBucketResponse.getNextToken();
-
-    //3rd time
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
-            "", null, continueToken, null, null, null).getEntity();
-
-    Assert.assertFalse(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 1);
-
-  }
-
-  @Test
-  public void listWithContinuationTokenDirBreak()
-      throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys(
-            "test/dir1/file1",
-            "test/dir1/file2",
-            "test/dir1/file3",
-            "test/dir2/file4",
-            "test/dir2/file5",
-            "test/dir2/file6",
-            "test/dir3/file7",
-            "test/file8");
-
-    getBucket.setClient(ozoneClient);
-
-    int maxKeys = 2;
-
-    ListObjectResponse getBucketResponse;
-
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
-            "test/", null, null, null, null, null).getEntity();
-
-    Assert.assertEquals(0, getBucketResponse.getContents().size());
-    Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("test/dir1/",
-        getBucketResponse.getCommonPrefixes().get(0).getPrefix());
-    Assert.assertEquals("test/dir2/",
-        getBucketResponse.getCommonPrefixes().get(1).getPrefix());
-
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
-            "test/", null, getBucketResponse.getNextToken(), null, null, null)
-            .getEntity();
-    Assert.assertEquals(1, getBucketResponse.getContents().size());
-    Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
-    Assert.assertEquals("test/dir3/",
-        getBucketResponse.getCommonPrefixes().get(0).getPrefix());
-    Assert.assertEquals("test/file8",
-        getBucketResponse.getContents().get(0).getKey());
-
-  }
-
-  @Test
-  /**
-   * This test is with prefix and delimiter and verify continuation-token
-   * behavior.
-   */
-  public void listWithContinuationToken1() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file1", "dir1bh/file1",
-            "dir1bha/file1", "dir0/file1", "dir2/file1");
-
-    getBucket.setClient(ozoneClient);
-
-    int maxKeys = 2;
-    // As we have 5 keys, with max keys 2 we should call list 3 times.
-
-    // First time
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
-            "dir", null, null, null, null, null).getEntity();
-
-    Assert.assertTrue(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
-
-    // 2nd time
-    String continueToken = getBucketResponse.getNextToken();
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
-            "dir", null, continueToken, null, null, null).getEntity();
-    Assert.assertTrue(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
-
-    //3rd time
-    continueToken = getBucketResponse.getNextToken();
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
-            "dir", null, continueToken, null, null, null).getEntity();
-
-    Assert.assertFalse(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 1);
-
-  }
-
-  @Test
-  public void listWithContinuationTokenFail() throws OS3Exception, IOException {
-
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
-            "dir1bha/file2", "dir1", "dir2", "dir3");
-
-    getBucket.setClient(ozoneClient);
-
-    try {
-      ListObjectResponse getBucketResponse =
-          (ListObjectResponse) getBucket.list("b1", "/", null, null, 2,
-              "dir", null, "random", null, null, null).getEntity();
-      fail("listWithContinuationTokenFail");
-    } catch (OS3Exception ex) {
-      Assert.assertEquals("random", ex.getResource());
-      Assert.assertEquals("Invalid Argument", ex.getErrorMessage());
-    }
-
-  }
-
-
-  @Test
-  public void testStartAfter() throws IOException, OS3Exception {
-    BucketEndpoint getBucket = new BucketEndpoint();
-
-    OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file1", "dir1bh/file1",
-            "dir1bha/file1", "dir0/file1", "dir2/file1");
-
-    getBucket.setClient(ozoneClient);
-
-    ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null, 1000,
-            null, null, null, null, null, null).getEntity();
-
-    Assert.assertFalse(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 5);
-
-    //As our list output is sorted, after seeking to startAfter, we shall
-    // have 4 keys.
-    String startAfter = "dir0/file1";
-
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null,
-            1000, null, null, null, startAfter, null, null).getEntity();
-
-    Assert.assertFalse(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 4);
-
-    getBucketResponse =
-        (ListObjectResponse) getBucket.list("b1", null, null, null,
-            1000, null, null, null, "random", null, null).getEntity();
-
-    Assert.assertFalse(getBucketResponse.isTruncated());
-    Assert.assertTrue(getBucketResponse.getContents().size() == 0);
-
-
-  }
-
-  private OzoneClient createClientWithKeys(String... keys) throws IOException {
-    OzoneClient client = new OzoneClientStub();
-
-    client.getObjectStore().createS3Bucket("bilbo", "b1");
-    String volume = client.getObjectStore().getOzoneVolumeName("b1");
-    client.getObjectStore().getVolume(volume).createBucket("b1");
-    OzoneBucket bucket =
-        client.getObjectStore().getVolume(volume).getBucket("b1");
-    for (String key : keys) {
-      bucket.createKey(key, 0).close();
-    }
-    return client;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
deleted file mode 100644
index f06da70..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.Response;
-
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This class test HeadBucket functionality.
- */
-public class TestBucketHead {
-
-  private String bucketName = "myBucket";
-  private String userName = "ozone";
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private BucketEndpoint bucketEndpoint;
-
-  @Before
-  public void setup() throws Exception {
-
-    //Create client stub and object store stub.
-    clientStub = new OzoneClientStub();
-    objectStoreStub = clientStub.getObjectStore();
-
-    objectStoreStub.createS3Bucket(userName, bucketName);
-
-    // Create HeadBucket and setClient to OzoneClientStub
-    bucketEndpoint = new BucketEndpoint();
-    bucketEndpoint.setClient(clientStub);
-  }
-
-  @Test
-  public void testHeadBucket() throws Exception {
-
-    Response response = bucketEndpoint.head(bucketName);
-    assertEquals(200, response.getStatus());
-
-  }
-
-  @Test
-  public void testHeadFail() throws Exception {
-    Response response = bucketEndpoint.head("unknownbucket");
-    Assert.assertEquals(400, response.getStatus());
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java
deleted file mode 100644
index 7c5bfad..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.junit.Test;
-
-/**
- * Testing JAXB serialization.
- */
-public class TestBucketResponse {
-
-  @Test
-  public void serialize() throws JAXBException {
-    JAXBContext context = JAXBContext.newInstance(ListObjectResponse.class);
-    context.createMarshaller().marshal(new ListObjectResponse(), System.out);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
deleted file mode 100644
index 212721a..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.when;
-
-/**
- * This class tests Initiate Multipart Upload request.
- */
-public class TestInitiateMultipartUpload {
-
-  @Test
-  public void testInitiateMultipartUpload() throws Exception {
-
-    String bucket = "s3bucket";
-    String key = "key1";
-    OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", bucket);
-    String volumeName = client.getObjectStore().getOzoneVolumeName(bucket);
-    OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket ozoneBucket = volume.getBucket("s3bucket");
-
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    ObjectEndpoint rest = new ObjectEndpoint();
-    rest.setHeaders(headers);
-    rest.setClient(client);
-
-    Response response = rest.initializeMultipartUpload(bucket, key);
-
-    assertEquals(response.getStatus(), 200);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    // Calling again should return different uploadID.
-    response = rest.initializeMultipartUpload(bucket, key);
-    assertEquals(response.getStatus(), 200);
-    multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    assertNotEquals(multipartUploadInitiateResponse.getUploadID(), uploadID);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
deleted file mode 100644
index 21545ec..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-
-import java.io.ByteArrayInputStream;
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.when;
-
-/**
- * This class test list parts request.
- */
-public class TestListParts {
-
-
-  private final static ObjectEndpoint REST = new ObjectEndpoint();
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
-  private static String uploadID;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-
-    OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", BUCKET);
-
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    REST.setHeaders(headers);
-    REST.setClient(client);
-
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    assertEquals(response.getStatus(), 200);
-
-    String content = "Multipart Upload";
-    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
-
-    assertNotNull(response.getHeaderString("ETag"));
-
-    response = REST.put(BUCKET, KEY, content.length(), 2, uploadID, body);
-
-    assertNotNull(response.getHeaderString("ETag"));
-
-    response = REST.put(BUCKET, KEY, content.length(), 3, uploadID, body);
-
-    assertNotNull(response.getHeaderString("ETag"));
-  }
-
-  @Test
-  public void testListParts() throws Exception {
-    Response response = REST.get(BUCKET, KEY, uploadID, 3, "0", null);
-
-    ListPartsResponse listPartsResponse =
-        (ListPartsResponse) response.getEntity();
-
-    Assert.assertFalse(listPartsResponse.getTruncated());
-    Assert.assertTrue(listPartsResponse.getPartList().size() == 3);
-
-  }
-
-  @Test
-  public void testListPartsContinuation() throws Exception {
-    Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null);
-    ListPartsResponse listPartsResponse =
-        (ListPartsResponse) response.getEntity();
-
-    Assert.assertTrue(listPartsResponse.getTruncated());
-    Assert.assertTrue(listPartsResponse.getPartList().size() == 2);
-
-    // Continue
-    response = REST.get(BUCKET, KEY, uploadID, 2,
-        Integer.toString(listPartsResponse.getNextPartNumberMarker()), null);
-    listPartsResponse = (ListPartsResponse) response.getEntity();
-
-    Assert.assertFalse(listPartsResponse.getTruncated());
-    Assert.assertTrue(listPartsResponse.getPartList().size() == 1);
-
-  }
-
-  @Test
-  public void testListPartsWithUnknownUploadID() throws Exception {
-    try {
-      Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null);
-    } catch (OS3Exception ex) {
-      Assert.assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(),
-          ex.getErrorMessage());
-    }
-  }
-
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
deleted file mode 100644
index c15a128..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-
-import org.junit.Assert;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.*;
-import org.junit.Test;
-
-/**
- * Test custom marshalling of MultiDeleteRequest.
- */
-public class TestMultiDeleteRequestUnmarshaller {
-
-  @Test
-  public void fromStreamWithNamespace() throws IOException {
-    //GIVEN
-    ByteArrayInputStream inputBody =
-        new ByteArrayInputStream(
-            ("<Delete xmlns=\"http://s3.amazonaws"
-                + ".com/doc/2006-03-01/\"><Object>key1</Object><Object>key2"
-                + "</Object><Object>key3"
-                + "</Object></Delete>")
-                .getBytes(UTF_8));
-
-    //WHEN
-    MultiDeleteRequest multiDeleteRequest =
-        unmarshall(inputBody);
-
-    //THEN
-    Assert.assertEquals(3, multiDeleteRequest.getObjects().size());
-  }
-
-  @Test
-  public void fromStreamWithoutNamespace() throws IOException {
-    //GIVEN
-    ByteArrayInputStream inputBody =
-        new ByteArrayInputStream(
-            ("<Delete><Object>key1</Object><Object>key2"
-                + "</Object><Object>key3"
-                + "</Object></Delete>")
-                .getBytes(UTF_8));
-
-    //WHEN
-    MultiDeleteRequest multiDeleteRequest =
-        unmarshall(inputBody);
-
-    //THEN
-    Assert.assertEquals(3, multiDeleteRequest.getObjects().size());
-  }
-
-  private MultiDeleteRequest unmarshall(ByteArrayInputStream inputBody)
-      throws IOException {
-    return new MultiDeleteRequestUnmarshaller()
-        .readFrom(null, null, null, null, null, inputBody);
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
deleted file mode 100644
index b9e3885..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.when;
-
-/**
- * Class to test Multipart upload end to end.
- */
-
-public class TestMultipartUploadComplete {
-
-  private final static ObjectEndpoint REST = new ObjectEndpoint();;
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
-  private final static OzoneClientStub CLIENT = new OzoneClientStub();
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-
-    CLIENT.getObjectStore().createS3Bucket("ozone", BUCKET);
-
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    REST.setHeaders(headers);
-    REST.setClient(CLIENT);
-  }
-
-  private String initiateMultipartUpload(String key) throws IOException,
-      OS3Exception {
-    Response response = REST.initializeMultipartUpload(BUCKET, key);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    assertEquals(response.getStatus(), 200);
-
-    return uploadID;
-
-  }
-
-  private Part uploadPart(String key, String uploadID, int partNumber, String
-      content) throws IOException, OS3Exception {
-    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    Response response = REST.put(BUCKET, key, content.length(), partNumber,
-        uploadID, body);
-    assertEquals(response.getStatus(), 200);
-    assertNotNull(response.getHeaderString("ETag"));
-    Part part = new Part();
-    part.seteTag(response.getHeaderString("ETag"));
-    part.setPartNumber(partNumber);
-
-    return part;
-  }
-
-  private void completeMultipartUpload(String key,
-      CompleteMultipartUploadRequest completeMultipartUploadRequest,
-      String uploadID) throws IOException, OS3Exception {
-    Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
-        completeMultipartUploadRequest);
-
-    assertEquals(response.getStatus(), 200);
-
-    CompleteMultipartUploadResponse completeMultipartUploadResponse =
-        (CompleteMultipartUploadResponse) response.getEntity();
-
-    assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
-    assertEquals(completeMultipartUploadResponse.getKey(), KEY);
-    assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
-    assertNotNull(completeMultipartUploadResponse.getETag());
-  }
-
-  @Test
-  public void testMultipart() throws Exception {
-
-    // Initiate multipart upload
-    String uploadID = initiateMultipartUpload(KEY);
-
-    List<Part> partsList = new ArrayList<>();
-
-
-    // Upload parts
-    String content = "Multipart Upload 1";
-    int partNumber = 1;
-
-    Part part1 = uploadPart(KEY, uploadID, partNumber, content);
-    partsList.add(part1);
-
-    content = "Multipart Upload 2";
-    partNumber = 2;
-    Part part2 = uploadPart(KEY, uploadID, partNumber, content);
-    partsList.add(part2);
-
-    // complete multipart upload
-    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
-        CompleteMultipartUploadRequest();
-    completeMultipartUploadRequest.setPartList(partsList);
-
-
-    completeMultipartUpload(KEY, completeMultipartUploadRequest,
-        uploadID);
-
-  }
-
-
-  @Test
-  public void testMultipartInvalidPartOrderError() throws Exception {
-
-    // Initiate multipart upload
-    String key = UUID.randomUUID().toString();
-    String uploadID = initiateMultipartUpload(key);
-
-    List<Part> partsList = new ArrayList<>();
-
-    // Upload parts
-    String content = "Multipart Upload 1";
-    int partNumber = 1;
-
-    Part part1 = uploadPart(key, uploadID, partNumber, content);
-    // Change part number
-    part1.setPartNumber(3);
-    partsList.add(part1);
-
-    content = "Multipart Upload 2";
-    partNumber = 2;
-
-    Part part2 = uploadPart(key, uploadID, partNumber, content);
-    partsList.add(part2);
-
-    // complete multipart upload
-    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
-        CompleteMultipartUploadRequest();
-    completeMultipartUploadRequest.setPartList(partsList);
-    try {
-      completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
-      fail("testMultipartInvalidPartOrderError");
-    } catch (OS3Exception ex) {
-      assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART_ORDER.getCode());
-    }
-
-  }
-
-  @Test
-  public void testMultipartInvalidPartError() throws Exception {
-
-    // Initiate multipart upload
-    String key = UUID.randomUUID().toString();
-    String uploadID = initiateMultipartUpload(key);
-
-    List<Part> partsList = new ArrayList<>();
-
-    // Upload parts
-    String content = "Multipart Upload 1";
-    int partNumber = 1;
-
-    Part part1 = uploadPart(key, uploadID, partNumber, content);
-    // Change part number
-    part1.seteTag("random");
-    partsList.add(part1);
-
-    content = "Multipart Upload 2";
-    partNumber = 2;
-
-    Part part2 = uploadPart(key, uploadID, partNumber, content);
-    partsList.add(part2);
-
-    // complete multipart upload
-    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
-        CompleteMultipartUploadRequest();
-    completeMultipartUploadRequest.setPartList(partsList);
-    try {
-      completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
-      fail("testMultipartInvalidPartOrderError");
-    } catch (OS3Exception ex) {
-      assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode());
-    }
-
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
deleted file mode 100644
index 425bfc4..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Scanner;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
-
-/**
- * Class to test Multipart upload where parts are created with copy header.
- */
-
-public class TestMultipartUploadWithCopy {
-
-  private final static ObjectEndpoint REST = new ObjectEndpoint();
-
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key2";
-  private final static String EXISTING_KEY = "key1";
-  private static final String EXISTING_KEY_CONTENT = "testkey";
-  private final static OzoneClientStub CLIENT = new OzoneClientStub();
-  private static final int RANGE_FROM = 2;
-  private static final int RANGE_TO = 4;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-
-    ObjectStore objectStore = CLIENT.getObjectStore();
-    objectStore.createS3Bucket("ozone", BUCKET);
-
-    OzoneBucket bucket = getOzoneBucket(objectStore, BUCKET);
-
-    byte[] keyContent = EXISTING_KEY_CONTENT.getBytes();
-    try (OutputStream stream = bucket
-        .createKey(EXISTING_KEY, keyContent.length, ReplicationType.RATIS,
-            ReplicationFactor.THREE, new HashMap<>())) {
-      stream.write(keyContent);
-    }
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    REST.setHeaders(headers);
-    REST.setClient(CLIENT);
-  }
-
-  @Test
-  public void testMultipart() throws Exception {
-
-    // Initiate multipart upload
-    String uploadID = initiateMultipartUpload(KEY);
-
-    List<Part> partsList = new ArrayList<>();
-
-    // Upload parts
-    String content = "Multipart Upload 1";
-    int partNumber = 1;
-
-    Part part1 = uploadPart(KEY, uploadID, partNumber, content);
-    partsList.add(part1);
-
-    partNumber = 2;
-    Part part2 =
-        uploadPartWithCopy(KEY, uploadID, partNumber,
-            BUCKET + "/" + EXISTING_KEY, null);
-    partsList.add(part2);
-
-    partNumber = 3;
-    Part part3 =
-        uploadPartWithCopy(KEY, uploadID, partNumber,
-            BUCKET + "/" + EXISTING_KEY,
-            "bytes=" + RANGE_FROM + "-" + RANGE_TO);
-    partsList.add(part3);
-
-    // complete multipart upload
-    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
-        CompleteMultipartUploadRequest();
-    completeMultipartUploadRequest.setPartList(partsList);
-
-    completeMultipartUpload(KEY, completeMultipartUploadRequest,
-        uploadID);
-
-    OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(), BUCKET);
-    try (InputStream is = bucket.readKey(KEY)) {
-      String keyContent = new Scanner(is).useDelimiter("\\A").next();
-      Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT
-          .substring(RANGE_FROM, RANGE_TO), keyContent);
-    }
-  }
-
-  private String initiateMultipartUpload(String key) throws IOException,
-      OS3Exception {
-    setHeaders();
-    Response response = REST.initializeMultipartUpload(BUCKET, key);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    assertEquals(response.getStatus(), 200);
-
-    return uploadID;
-
-  }
-
-  private Part uploadPart(String key, String uploadID, int partNumber, String
-      content) throws IOException, OS3Exception {
-    setHeaders();
-    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    Response response = REST.put(BUCKET, key, content.length(), partNumber,
-        uploadID, body);
-    assertEquals(response.getStatus(), 200);
-    assertNotNull(response.getHeaderString("ETag"));
-    Part part = new Part();
-    part.seteTag(response.getHeaderString("ETag"));
-    part.setPartNumber(partNumber);
-
-    return part;
-  }
-
-  private Part uploadPartWithCopy(String key, String uploadID, int partNumber,
-      String keyOrigin, String range) throws IOException, OS3Exception {
-    Map<String, String> additionalHeaders = new HashMap<>();
-    additionalHeaders.put(COPY_SOURCE_HEADER, keyOrigin);
-    if (range != null) {
-      additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, range);
-
-    }
-    setHeaders(additionalHeaders);
-
-    ByteArrayInputStream body = new ByteArrayInputStream("".getBytes());
-    Response response = REST.put(BUCKET, key, 0, partNumber,
-        uploadID, body);
-    assertEquals(response.getStatus(), 200);
-
-    CopyPartResult result = (CopyPartResult) response.getEntity();
-    assertNotNull(result.getETag());
-    assertNotNull(result.getLastModified());
-    Part part = new Part();
-    part.seteTag(result.getETag());
-    part.setPartNumber(partNumber);
-
-    return part;
-  }
-
-  private void completeMultipartUpload(String key,
-      CompleteMultipartUploadRequest completeMultipartUploadRequest,
-      String uploadID) throws IOException, OS3Exception {
-    setHeaders();
-    Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
-        completeMultipartUploadRequest);
-
-    assertEquals(response.getStatus(), 200);
-
-    CompleteMultipartUploadResponse completeMultipartUploadResponse =
-        (CompleteMultipartUploadResponse) response.getEntity();
-
-    assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
-    assertEquals(completeMultipartUploadResponse.getKey(), KEY);
-    assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
-    assertNotNull(completeMultipartUploadResponse.getETag());
-  }
-
-  private void setHeaders(Map<String, String> additionalHeaders) {
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    additionalHeaders
-        .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v));
-    REST.setHeaders(headers);
-  }
-
-  private void setHeaders() {
-    setHeaders(new HashMap<>());
-  }
-
-  private static OzoneBucket getOzoneBucket(ObjectStore objectStore,
-      String bucketName)
-      throws IOException {
-
-    String ozoneBucketName = objectStore.getOzoneBucketName(bucketName);
-    String ozoneVolumeName = objectStore.getOzoneVolumeName(bucketName);
-
-    return objectStore.getVolume(ozoneVolumeName).getBucket(ozoneBucketName);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
deleted file mode 100644
index b5d0c93..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test delete object.
- */
-public class TestObjectDelete {
-
-  @Test
-  public void delete() throws IOException, OS3Exception {
-    //GIVEN
-    OzoneClient client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("bilbo", "b1");
-
-    String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
-
-    OzoneBucket bucket =
-        client.getObjectStore().getVolume(volumeName).getBucket("b1");
-
-    bucket.createKey("key1", 0).close();
-
-    ObjectEndpoint rest = new ObjectEndpoint();
-    rest.setClient(client);
-
-    //WHEN
-    rest.delete("b1", "key1", null);
-
-    //THEN
-    Assert.assertFalse("Bucket Should not contain any key after delete",
-        bucket.listKeys("").hasNext());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
deleted file mode 100644
index 070c827..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.apache.commons.lang3.tuple.Pair;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test static utility methods of the ObjectEndpoint.
- */
-public class TestObjectEndpoint {
-
-  @Test
-  public void parseSourceHeader() throws OS3Exception {
-    Pair<String, String> bucketKey =
-        ObjectEndpoint.parseSourceHeader("bucket1/key1");
-
-    Assert.assertEquals("bucket1", bucketKey.getLeft());
-
-    Assert.assertEquals("key1", bucketKey.getRight());
-  }
-
-  @Test
-  public void parseSourceHeaderWithPrefix() throws OS3Exception {
-    Pair<String, String> bucketKey =
-        ObjectEndpoint.parseSourceHeader("/bucket1/key1");
-
-    Assert.assertEquals("bucket1", bucketKey.getLeft());
-
-    Assert.assertEquals("key1", bucketKey.getRight());
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
deleted file mode 100644
index fcafe31..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.time.format.DateTimeFormatter;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.apache.commons.io.IOUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Test get object.
- */
-public class TestObjectGet {
-
-  public static final String CONTENT = "0123456789";
-
-  @Test
-  public void get() throws IOException, OS3Exception {
-    //GIVEN
-    OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("bilbo", "b1");
-    String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
-    OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket =
-        volume.getBucket("b1");
-    OzoneOutputStream keyStream =
-        bucket.createKey("key1", CONTENT.getBytes(UTF_8).length);
-    keyStream.write(CONTENT.getBytes(UTF_8));
-    keyStream.close();
-
-    ObjectEndpoint rest = new ObjectEndpoint();
-    rest.setClient(client);
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    rest.setHeaders(headers);
-    ByteArrayInputStream body =
-        new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
-
-    //WHEN
-    Response response = rest.get("b1", "key1", null, 0, null, body);
-
-    //THEN
-    OzoneInputStream ozoneInputStream =
-        volume.getBucket("b1")
-            .readKey("key1");
-    String keyContent =
-        IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
-
-    Assert.assertEquals(CONTENT, keyContent);
-    Assert.assertEquals("" + keyContent.length(),
-        response.getHeaderString("Content-Length"));
-
-    DateTimeFormatter.RFC_1123_DATE_TIME
-        .parse(response.getHeaderString("Last-Modified"));
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
deleted file mode 100644
index ba39b28..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.Response;
-import java.io.IOException;
-import java.time.format.DateTimeFormatter;
-import java.util.HashMap;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test head object.
- */
-public class TestObjectHead {
-  private String bucketName = "b1";
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private ObjectEndpoint keyEndpoint;
-  private OzoneBucket bucket;
-
-  @Before
-  public void setup() throws IOException {
-    //Create client stub and object store stub.
-    clientStub = new OzoneClientStub();
-    objectStoreStub = clientStub.getObjectStore();
-
-    // Create volume and bucket
-    objectStoreStub.createS3Bucket("bilbo", bucketName);
-    String volName = objectStoreStub.getOzoneVolumeName(bucketName);
-
-    bucket = objectStoreStub.getVolume(volName).getBucket(bucketName);
-
-    // Create HeadBucket and setClient to OzoneClientStub
-    keyEndpoint = new ObjectEndpoint();
-    keyEndpoint.setClient(clientStub);
-  }
-
-  @Test
-  public void testHeadObject() throws Exception {
-    //GIVEN
-    String value = RandomStringUtils.randomAlphanumeric(32);
-    OzoneOutputStream out = bucket.createKey("key1",
-        value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
-        ReplicationFactor.ONE, new HashMap<>());
-    out.write(value.getBytes(UTF_8));
-    out.close();
-
-    //WHEN
-    Response response = keyEndpoint.head(bucketName, "key1");
-
-    //THEN
-    Assert.assertEquals(200, response.getStatus());
-    Assert.assertEquals(value.getBytes(UTF_8).length,
-        Long.parseLong(response.getHeaderString("Content-Length")));
-
-    DateTimeFormatter.RFC_1123_DATE_TIME
-        .parse(response.getHeaderString("Last-Modified"));
-
-  }
-
-  @Test
-  public void testHeadFailByBadName() throws Exception {
-    //Head an object that doesn't exist.
-    try {
-      Response response =  keyEndpoint.head(bucketName, "badKeyName");
-      Assert.assertEquals(404, response.getStatus());
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
-      Assert.assertTrue(ex.getErrorMessage().contains("object does not exist"));
-      Assert.assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
deleted file mode 100644
index f4c3b94..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import com.google.common.collect.Sets;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test object multi delete.
- */
-public class TestObjectMultiDelete {
-
-  @Test
-  public void delete() throws IOException, OS3Exception, JAXBException {
-    //GIVEN
-    OzoneClient client = new OzoneClientStub();
-    OzoneBucket bucket = initTestData(client);
-
-    BucketEndpoint rest = new BucketEndpoint();
-    rest.setClient(client);
-
-    MultiDeleteRequest mdr = new MultiDeleteRequest();
-    mdr.getObjects().add(new DeleteObject("key1"));
-    mdr.getObjects().add(new DeleteObject("key2"));
-    mdr.getObjects().add(new DeleteObject("key4"));
-
-    //WHEN
-    MultiDeleteResponse response = rest.multiDelete("b1", "", mdr);
-
-    //THEN
-    Set<String> keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream()
-        .map(OzoneKey::getName)
-        .collect(Collectors.toSet());
-
-    Set<String> expectedResult = new HashSet<>();
-    expectedResult.add("key3");
-
-    //THEN
-    Assert.assertEquals(expectedResult, keysAtTheEnd);
-    Assert.assertEquals(3, response.getDeletedObjects().size());
-    Assert.assertEquals(0, response.getErrors().size());
-  }
-
-  @Test
-  public void deleteQuiet() throws IOException, OS3Exception, JAXBException {
-    //GIVEN
-    OzoneClient client = new OzoneClientStub();
-    OzoneBucket bucket = initTestData(client);
-
-    BucketEndpoint rest = new BucketEndpoint();
-    rest.setClient(client);
-
-    MultiDeleteRequest mdr = new MultiDeleteRequest();
-    mdr.setQuiet(true);
-    mdr.getObjects().add(new DeleteObject("key1"));
-    mdr.getObjects().add(new DeleteObject("key2"));
-    mdr.getObjects().add(new DeleteObject("key4"));
-
-    //WHEN
-    MultiDeleteResponse response = rest.multiDelete("b1", "", mdr);
-
-    //THEN
-    Set<String> keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream()
-        .map(OzoneKey::getName)
-        .collect(Collectors.toSet());
-
-    //THEN
-    Assert.assertEquals(0, response.getDeletedObjects().size());
-    Assert.assertEquals(0, response.getErrors().size());
-  }
-
-  private OzoneBucket initTestData(OzoneClient client) throws IOException {
-    client.getObjectStore().createS3Bucket("bilbo", "b1");
-
-    String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
-
-    OzoneBucket bucket =
-        client.getObjectStore().getVolume(volumeName).getBucket("b1");
-
-    bucket.createKey("key1", 0).close();
-    bucket.createKey("key2", 0).close();
-    bucket.createKey("key3", 0).close();
-    return bucket;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
deleted file mode 100644
index 839834c..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.nio.charset.Charset;
-
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.when;
-
-/**
- * Test put object.
- */
-public class TestObjectPut {
-  public static final String CONTENT = "0123456789";
-  private String userName = "ozone";
-  private String bucketName = "b1";
-  private String keyName = "key1";
-  private String destBucket = "b2";
-  private String destkey = "key2";
-  private String nonexist = "nonexist";
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private ObjectEndpoint objectEndpoint;
-
-  @Before
-  public void setup() throws IOException {
-    //Create client stub and object store stub.
-    clientStub = new OzoneClientStub();
-    objectStoreStub = clientStub.getObjectStore();
-
-    // Create bucket
-    objectStoreStub.createS3Bucket(userName, bucketName);
-    objectStoreStub.createS3Bucket("ozone1", destBucket);
-
-    // Create PutObject and setClient to OzoneClientStub
-    objectEndpoint = new ObjectEndpoint();
-    objectEndpoint.setClient(clientStub);
-  }
-
-  @Test
-  public void testPutObject() throws IOException, OS3Exception {
-    //GIVEN
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
-    objectEndpoint.setHeaders(headers);
-
-    //WHEN
-    Response response = objectEndpoint.put(bucketName, keyName, CONTENT
-        .length(), 1, null, body);
-
-
-    //THEN
-    String volumeName = clientStub.getObjectStore()
-        .getOzoneVolumeName(bucketName);
-    OzoneInputStream ozoneInputStream =
-        clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName)
-            .readKey(keyName);
-    String keyContent =
-        IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
-
-    Assert.assertEquals(200, response.getStatus());
-    Assert.assertEquals(CONTENT, keyContent);
-  }
-
-  @Test
-  public void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
-    //GIVEN
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    objectEndpoint.setHeaders(headers);
-
-    String chunkedContent = "0a;chunk-signature=signature\r\n"
-        + "1234567890\r\n"
-        + "05;chunk-signature=signature\r\n"
-        + "abcde\r\n";
-
-    when(headers.getHeaderString("x-amz-content-sha256"))
-        .thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD");
-
-    //WHEN
-    Response response = objectEndpoint.put(bucketName, keyName,
-        chunkedContent.length(), 1, null,
-        new ByteArrayInputStream(chunkedContent.getBytes()));
-
-    //THEN
-    String volumeName = clientStub.getObjectStore()
-        .getOzoneVolumeName(bucketName);
-    OzoneInputStream ozoneInputStream =
-        clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName)
-            .readKey(keyName);
-    String keyContent =
-        IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
-
-    Assert.assertEquals(200, response.getStatus());
-    Assert.assertEquals("1234567890abcde", keyContent);
-  }
-
-  @Test
-  public void testCopyObject() throws IOException, OS3Exception {
-    // Put object in to source bucket
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
-    objectEndpoint.setHeaders(headers);
-    keyName = "sourceKey";
-
-    Response response = objectEndpoint.put(bucketName, keyName,
-        CONTENT.length(), 1, null, body);
-
-    String volumeName = clientStub.getObjectStore().getOzoneVolumeName(
-        bucketName);
-
-    OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getVolume(
-        volumeName).getBucket(bucketName).readKey(keyName);
-
-    String keyContent = IOUtils.toString(ozoneInputStream, Charset.forName(
-        "UTF-8"));
-
-    Assert.assertEquals(200, response.getStatus());
-    Assert.assertEquals(CONTENT, keyContent);
-
-
-    // Add copy header, and then call put
-    when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
-        bucketName  + "/" + keyName);
-
-    response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1,
-        null, body);
-
-    // Check destination key and response
-    volumeName = clientStub.getObjectStore().getOzoneVolumeName(destBucket);
-    ozoneInputStream = clientStub.getObjectStore().getVolume(volumeName)
-        .getBucket(destBucket).readKey(destkey);
-
-    keyContent = IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
-
-    Assert.assertEquals(200, response.getStatus());
-    Assert.assertEquals(CONTENT, keyContent);
-
-    // source and dest same
-    try {
-      objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
-      fail("test copy object failed");
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getErrorMessage().contains("This copy request is " +
-          "illegal"));
-    }
-
-    // source bucket not found
-    try {
-      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
-          nonexist + "/"  + keyName);
-      objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null,
-          body);
-      fail("test copy object failed");
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
-    }
-
-    // dest bucket not found
-    try {
-      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
-          bucketName + "/" + keyName);
-      objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
-      fail("test copy object failed");
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
-    }
-
-    //Both source and dest bucket not found
-    try {
-      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
-          nonexist + "/" + keyName);
-      objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
-      fail("test copy object failed");
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
-    }
-
-    // source key not found
-    try {
-      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
-          bucketName + "/" + nonexist);
-      objectEndpoint.put("nonexistent", keyName, CONTENT.length(), 1,
-          null, body);
-      fail("test copy object failed");
-    } catch (OS3Exception ex) {
-      Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
-    }
-
-  }
-
-  @Test
-  public void testInvalidStorageType() throws IOException {
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
-    objectEndpoint.setHeaders(headers);
-    keyName = "sourceKey";
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random");
-
-    try {
-      Response response = objectEndpoint.put(bucketName, keyName,
-          CONTENT.length(), 1, null, body);
-      fail("testInvalidStorageType");
-    } catch (OS3Exception ex) {
-      assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(),
-          ex.getErrorMessage());
-      assertEquals("random", ex.getResource());
-    }
-  }
-
-  @Test
-  public void testEmptyStorageType() throws IOException, OS3Exception {
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
-    objectEndpoint.setHeaders(headers);
-    keyName = "sourceKey";
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("");
-
-    Response response = objectEndpoint.put(bucketName, keyName, CONTENT
-            .length(), 1, null, body);
-
-    String volumeName = clientStub.getObjectStore()
-        .getOzoneVolumeName(bucketName);
-
-    OzoneKeyDetails key =
-        clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName)
-            .getKey(keyName);
-
-    //default type is set
-    Assert.assertEquals(ReplicationType.RATIS, key.getReplicationType());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
deleted file mode 100644
index 3e91a77..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-
-import java.io.ByteArrayInputStream;
-
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.when;
-
-/**
- * This class tests Upload part request.
- */
-public class TestPartUpload {
-
-  private final static ObjectEndpoint REST = new ObjectEndpoint();
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-
-    OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", BUCKET);
-
-
-    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
-        "STANDARD");
-
-    REST.setHeaders(headers);
-    REST.setClient(client);
-  }
-
-
-  @Test
-  public void testPartUpload() throws Exception {
-
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    assertEquals(response.getStatus(), 200);
-
-    String content = "Multipart Upload";
-    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
-
-    assertNotNull(response.getHeaderString("ETag"));
-
-  }
-
-  @Test
-  public void testPartUploadWithOverride() throws Exception {
-
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
-    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
-        (MultipartUploadInitiateResponse) response.getEntity();
-    assertNotNull(multipartUploadInitiateResponse.getUploadID());
-    String uploadID = multipartUploadInitiateResponse.getUploadID();
-
-    assertEquals(response.getStatus(), 200);
-
-    String content = "Multipart Upload";
-    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
-
-    assertNotNull(response.getHeaderString("ETag"));
-
-    String eTag = response.getHeaderString("ETag");
-
-    // Upload part again with same part Number, the ETag should be changed.
-    content = "Multipart Upload Changed";
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
-    assertNotNull(response.getHeaderString("ETag"));
-    assertNotEquals(eTag, response.getHeaderString("ETag"));
-
-  }
-
-
-  @Test
-  public void testPartUploadWithIncorrectUploadID() throws Exception {
-    try {
-      String content = "Multipart Upload With Incorrect uploadID";
-      ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-      REST.put(BUCKET, KEY, content.length(), 1, "random", body);
-      fail("testPartUploadWithIncorrectUploadID failed");
-    } catch (OS3Exception ex) {
-      assertEquals("NoSuchUpload", ex.getCode());
-      assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
deleted file mode 100644
index b7512cb..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.hadoop.ozone.s3.endpoint;
-
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneClientStub;
-import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
-
-import static org.junit.Assert.assertEquals;
-import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This class test HeadBucket functionality.
- */
-public class TestRootList {
-
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private RootEndpoint rootEndpoint;
-  private String userName = "ozone";
-
-  @Before
-  public void setup() throws Exception {
-
-    //Create client stub and object store stub.
-    clientStub = new OzoneClientStub();
-    objectStoreStub = clientStub.getObjectStore();
-
-    // Create HeadBucket and setClient to OzoneClientStub
-    rootEndpoint = new RootEndpoint();
-    rootEndpoint.setClient(clientStub);
-
-    AuthenticationHeaderParser parser = new AuthenticationHeaderParser();
-    parser.setAuthHeader("AWS " + userName +":secret");
-    rootEndpoint.setAuthenticationHeaderParser(parser);
-  }
-
-  @Test
-  public void testListBucket() throws Exception {
-
-    // List operation should succeed even there is no bucket.
-    ListBucketResponse response =
-        (ListBucketResponse) rootEndpoint.get().getEntity();
-    assertEquals(0, response.getBucketsNum());
-    String volumeName = OzoneS3Util.getVolumeName(userName);
-
-    String bucketBaseName = "bucket-" + getClass().getName();
-    for(int i = 0; i < 10; i++) {
-      objectStoreStub.createS3Bucket(volumeName, bucketBaseName + i);
-    }
-    response = (ListBucketResponse) rootEndpoint.get().getEntity();
-    assertEquals(10, response.getBucketsNum());
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
deleted file mode 100644
index d320041..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Unit tests for the rest endpoint implementations.
- */
-package org.apache.hadoop.ozone.s3.endpoint;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java
deleted file mode 100644
index fa6e2c7..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.exception;
-
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * This class tests OS3Exception class.
- */
-public class TestOS3Exception {
-
-  @Test
-  public void testOS3Exception() {
-    OS3Exception ex = new OS3Exception("AccessDenied", "Access Denied",
-        403);
-    String requestId = OzoneUtils.getRequestID();
-    ex = S3ErrorTable.newError(ex, "bucket");
-    ex.setRequestId(requestId);
-    String val = ex.toXml();
-    String formatString = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
-        "<Error>\n" +
-        "  <Code>%s</Code>\n" +
-        "  <Message>%s</Message>\n" +
-        "  <Resource>%s</Resource>\n" +
-        "  <RequestId>%s</RequestId>\n" +
-        "</Error>\n";
-    String expected = String.format(formatString, ex.getCode(),
-        ex.getErrorMessage(), ex.getResource(),
-        ex.getRequestId());
-    Assert.assertEquals(expected, val);
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java
deleted file mode 100644
index 31effe4..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package tests OS3Exception.
- */
-package org.apache.hadoop.ozone.s3.exception;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java
deleted file mode 100644
index 97f7fb4..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.header;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.junit.Test;
-
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests Authorization header format v2.
- */
-public class TestAuthorizationHeaderV2 {
-
-  @Test
-  public void testAuthHeaderV2() throws OS3Exception {
-    try {
-      String auth = "AWS accessKey:signature";
-      AuthorizationHeaderV2 v2 = new AuthorizationHeaderV2(auth);
-      assertEquals(v2.getAccessKeyID(), "accessKey");
-      assertEquals(v2.getSignature(), "signature");
-    } catch (OS3Exception ex) {
-      fail("testAuthHeaderV2 failed");
-    }
-  }
-
-  @Test
-  public void testIncorrectHeader1() throws OS3Exception {
-    try {
-      String auth = "AAA accessKey:signature";
-      new AuthorizationHeaderV2(auth);
-      fail("testIncorrectHeader");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-
-  @Test
-  public void testIncorrectHeader2() throws OS3Exception {
-    try {
-      String auth = "AWS :accessKey";
-      new AuthorizationHeaderV2(auth);
-      fail("testIncorrectHeader");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-
-  @Test
-  public void testIncorrectHeader3() throws OS3Exception {
-    try {
-      String auth = "AWS :signature";
-      new AuthorizationHeaderV2(auth);
-      fail("testIncorrectHeader");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-
-  @Test
-  public void testIncorrectHeader4() throws OS3Exception {
-    try {
-      String auth = "AWS accessKey:";
-      new AuthorizationHeaderV2(auth);
-      fail("testIncorrectHeader");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java
deleted file mode 100644
index a8cffbe..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.header;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.time.LocalDate;
-
-import static java.time.temporal.ChronoUnit.DAYS;
-import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.DATE_FORMATTER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests Authorization header format v2.
- */
-
-public class TestAuthorizationHeaderV4 {
-  private String curDate;
-
-  @Before
-  public void setup() {
-    LocalDate now = LocalDate.now();
-    curDate = DATE_FORMATTER.format(now);
-  }
-
-  @Test
-  public void testV4HeaderWellFormed() throws Exception {
-    String auth = "AWS4-HMAC-SHA256 " +
-        "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " +
-        "SignedHeaders=host;range;x-amz-date, " +
-        "Signature=fe5f80f77d5fa3beca038a248ff027";
-    AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth);
-    assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm());
-    assertEquals("ozone", v4.getAccessKeyID());
-    assertEquals(curDate, v4.getDate());
-    assertEquals("us-east-1", v4.getAwsRegion());
-    assertEquals("aws4_request", v4.getAwsRequest());
-    assertEquals("host;range;x-amz-date", v4.getSignedHeaderString());
-    assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature());
-  }
-
-  @Test
-  public void testV4HeaderMissingParts() {
-    try {
-      String auth = "AWS4-HMAC-SHA256 " +
-          "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " +
-          "SignedHeaders=host;range;x-amz-date,";
-      AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth);
-      fail("Exception is expected in case of malformed header");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-
-  @Test
-  public void testV4HeaderInvalidCredential() {
-    try {
-      String auth = "AWS4-HMAC-SHA256 " +
-          "Credential=" + curDate + "/us-east-1/s3/aws4_request, " +
-          "SignedHeaders=host;range;x-amz-date, " +
-          "Signature=fe5f80f77d5fa3beca038a248ff027";
-      AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth);
-      fail("Exception is expected in case of malformed header");
-    } catch (OS3Exception ex) {
-      assertEquals("AuthorizationHeaderMalformed", ex.getCode());
-    }
-  }
-
-  @Test
-  public void testV4HeaderWithoutSpace() throws OS3Exception {
-
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth);
-
-    assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm());
-    assertEquals("ozone", v4.getAccessKeyID());
-    assertEquals(curDate, v4.getDate());
-    assertEquals("us-east-1", v4.getAwsRegion());
-    assertEquals("aws4_request", v4.getAwsRequest());
-    assertEquals("host;x-amz-content-sha256;x-amz-date",
-        v4.getSignedHeaderString());
-    assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature());
-
-  }
-
-  @Test
-  public void testV4HeaderDateValidationSuccess() throws OS3Exception {
-    // Case 1: valid date within range.
-    LocalDate now = LocalDate.now();
-    String dateStr = DATE_FORMATTER.format(now);
-    validateResponse(dateStr);
-
-    // Case 2: Valid date with in range.
-    dateStr = DATE_FORMATTER.format(now.plus(1, DAYS));
-    validateResponse(dateStr);
-
-    // Case 3: Valid date with in range.
-    dateStr = DATE_FORMATTER.format(now.minus(1, DAYS));
-    validateResponse(dateStr);
-  }
-
-  @Test
-  public void testV4HeaderDateValidationFailure() throws Exception {
-    // Case 1: Empty date.
-    LocalDate now = LocalDate.now();
-    String dateStr = "";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> validateResponse(dateStr));
-
-    // Case 2: Date after yesterday.
-    String dateStr2 = DATE_FORMATTER.format(now.plus(2, DAYS));
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> validateResponse(dateStr2));
-
-    // Case 3: Date before yesterday.
-    String dateStr3 = DATE_FORMATTER.format(now.minus(2, DAYS));
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> validateResponse(dateStr3));
-  }
-
-  private void validateResponse(String dateStr) throws OS3Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + dateStr + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth);
-
-    assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm());
-    assertEquals("ozone", v4.getAccessKeyID());
-    assertEquals(dateStr, v4.getDate());
-    assertEquals("us-east-1", v4.getAwsRegion());
-    assertEquals("aws4_request", v4.getAwsRequest());
-    assertEquals("host;x-amz-content-sha256;x-amz-date",
-        v4.getSignedHeaderString());
-    assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature());
-  }
-
-  @Test
-  public void testV4HeaderRegionValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "//s3/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027%";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-    String auth2 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "s3/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027%";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-  }
-
-  @Test
-  public void testV4HeaderServiceValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1" +
-            "//aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-  }
-
-  @Test
-  public void testV4HeaderRequestValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/   ,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-
-    String auth3 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            ","
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth3));
-  }
-
-  @Test
-  public void testV4HeaderSignedHeaderValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=;;,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-
-    String auth3 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "=x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth3));
-
-    String auth4 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "=,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth4));
-  }
-
-  @Test
-  public void testV4HeaderSignatureValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027%";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-
-    String auth3 =
-        "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + ""
-            + "=";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth3));
-  }
-
-  @Test
-  public void testV4HeaderHashAlgoValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "SHA-256 Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-
-    String auth3 =
-        " Credential=ozone/" + curDate + "/us-east-1/s3" +
-            "/aws4_request,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth3));
-  }
-
-  @Test
-  public void testV4HeaderCredentialValidationFailure() throws Exception {
-    String auth =
-        "AWS4-HMAC-SHA Credential=/" + curDate + "//" +
-            "/,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth));
-
-    String auth2 =
-        "AWS4-HMAC-SHA =/" + curDate + "//" +
-            "/,"
-            + "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
-            + "Signature"
-            + "=fe5f80f77d5fa3beca038a248ff027";
-    LambdaTestUtils.intercept(OS3Exception.class, "",
-        () -> new AuthorizationHeaderV4(auth2));
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java
deleted file mode 100644
index e7e04ab..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Unit tests for the bucket related rest endpoints.
- */
-package org.apache.hadoop.ozone.s3;
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java
deleted file mode 100644
index a590367d..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test encode/decode of the continue token.
- */
-public class TestContinueToken {
-
-  @Test
-  public void encodeDecode() throws OS3Exception {
-    ContinueToken ct = new ContinueToken("key1", "dir1");
-
-    ContinueToken parsedToken =
-        ContinueToken.decodeFromString(ct.encodeToString());
-
-    Assert.assertEquals(ct, parsedToken);
-  }
-
-  @Test
-  public void encodeDecodeNullDir() throws OS3Exception {
-    ContinueToken ct = new ContinueToken("key1", null);
-
-    ContinueToken parsedToken =
-        ContinueToken.decodeFromString(ct.encodeToString());
-
-    Assert.assertEquals(ct, parsedToken);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
deleted file mode 100644
index 8892a97..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.util;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.Collection;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP;
-import static org.junit.Assert.fail;
-
-/**
- * Class used to test OzoneS3Util.
- */
-public class TestOzoneS3Util {
-
-
-  private OzoneConfiguration configuration;
-  private String serviceID = "omService";
-
-  @Before
-  public void setConf() {
-    configuration = new OzoneConfiguration();
-
-    String nodeIDs = "om1,om2,om3";
-    configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID);
-    configuration.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID,
-        nodeIDs);
-    configuration.setBoolean(HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
-  }
-
-  @Test
-  public void testBuildServiceNameForToken() {
-
-    Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
-        serviceID);
-
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceID, "om1"), "om1:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceID, "om2"), "om2:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceID, "om3"), "om3:9862");
-
-    String expectedOmServiceAddress = buildServiceAddress(nodeIDList);
-
-    SecurityUtil.setConfiguration(configuration);
-    String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration,
-        serviceID, nodeIDList);
-
-    Assert.assertEquals(expectedOmServiceAddress, omserviceAddr);
-  }
-
-
-  @Test
-  public void testBuildServiceNameForTokenIncorrectConfig() {
-
-    Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
-        serviceID);
-
-    // Don't set om3 node rpc address. Here we are skipping setting of one of
-    // the OM address. So buildServiceNameForToken will fail.
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceID, "om1"), "om1:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-        serviceID, "om2"), "om2:9862");
-
-
-    SecurityUtil.setConfiguration(configuration);
-
-    try {
-      OzoneS3Util.buildServiceNameForToken(configuration,
-          serviceID, nodeIDList);
-      fail("testBuildServiceNameForTokenIncorrectConfig failed");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Could not find rpcAddress " +
-          "for", ex);
-    }
-
-
-  }
-
-  /**
-   * Build serviceName from list of node ids.
-   * @param nodeIDList
-   * @return service name for token.
-   */
-  private String buildServiceAddress(Collection<String> nodeIDList) {
-    StringBuilder omServiceAddrBuilder = new StringBuilder();
-    int nodesLength = nodeIDList.size();
-    int counter = 0;
-    for (String nodeID : nodeIDList) {
-      counter++;
-      String addr = configuration.get(OmUtils.addKeySuffixes(
-          OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, nodeID));
-
-      if (counter != nodesLength) {
-        omServiceAddrBuilder.append(addr + ",");
-      } else {
-        omServiceAddrBuilder.append(addr);
-      }
-    }
-
-    return omServiceAddrBuilder.toString();
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java
deleted file mode 100644
index 7576025..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.util;
-
-import java.time.temporal.TemporalAccessor;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test for RFC1123 util.
- */
-public class TestRFC1123Util {
-
-  @Test
-  public void parse() {
-    //one digit day
-    String dateStr = "Mon, 5 Nov 2018 15:04:05 GMT";
-
-    TemporalAccessor date = RFC1123Util.FORMAT.parse(dateStr);
-
-    String formatted = RFC1123Util.FORMAT.format(date);
-
-    //two digits day
-    Assert.assertEquals("Mon, 05 Nov 2018 15:04:05 GMT", formatted);
-
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
deleted file mode 100644
index 03c91bf..0000000
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.s3.util;
-
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test class to test RangeHeaderParserUtil.
- */
-public class TestRangeHeaderParserUtil {
-
-  @Test
-  public void testRangeHeaderParser() {
-
-    RangeHeader rangeHeader;
-
-
-    //range is with in file length
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-8", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(8, rangeHeader.getEndOffset());
-    assertEquals(false, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    //range is with in file length, both start and end offset are same
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-0", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(0, rangeHeader.getEndOffset());
-    assertEquals(false, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    //range is not with in file length, both start and end offset are greater
-    // than length
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-10", 10);
-    assertEquals(true, rangeHeader.isInValidRange());
-
-    // range is satisfying, one of the range is with in the length. So, read
-    // full file
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-8", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(9, rangeHeader.getEndOffset());
-    assertEquals(true, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    // bytes spec is wrong
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("mb=11-8", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(9, rangeHeader.getEndOffset());
-    assertEquals(true, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    // range specified is invalid
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-11-8", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(9, rangeHeader.getEndOffset());
-    assertEquals(true, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    //Last n bytes
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-6", 10);
-    assertEquals(4, rangeHeader.getStartOffset());
-    assertEquals(9, rangeHeader.getEndOffset());
-    assertEquals(false, rangeHeader.isReadFull());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-    rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-106", 10);
-    assertEquals(0, rangeHeader.getStartOffset());
-    assertEquals(9, rangeHeader.getEndOffset());
-    assertEquals(false, rangeHeader.isInValidRange());
-
-
-
-  }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
deleted file mode 100644
index b8ad21d..0000000
--- a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=info,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
-
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index e6a345e..0000000
--- a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<FindBugsFilter>
-     <Match>
-       <Package name="org.apache.hadoop.ozone.genesis.generated" />
-     </Match>
- </FindBugsFilter>
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
deleted file mode 100644
index d1ee9d5..0000000
--- a/hadoop-ozone/tools/pom.xml
+++ /dev/null
@@ -1,146 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-tools</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Tools</description>
-  <name>Apache Hadoop Ozone Tools</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <!-- Genesis requires server side components -->
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.xml.bind</groupId>
-      <artifactId>jaxb-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.activation</groupId>
-      <artifactId>activation</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-      <version>3.2.4</version>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <version>1.19</version>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <version>1.19</version>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.amazonaws</groupId>
-      <artifactId>aws-java-sdk-s3</artifactId>
-      <version>1.11.615</version>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-integration-test</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.15.0</version>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
-          </excludeFilterFile>
-          <fork>true</fork>
-          <maxHeap>2048</maxHeap>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java
deleted file mode 100644
index 5690296..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.ozone.audit.parser.handler.LoadCommandHandler;
-import org.apache.hadoop.ozone.audit.parser.handler.QueryCommandHandler;
-import org.apache.hadoop.ozone.audit.parser.handler.TemplateCommandHandler;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-
-/**
- * Ozone audit parser tool.
- */
-@Command(name = "ozone auditparser",
-    description = "Shell parser for Ozone Audit Logs",
-    subcommands = {
-        LoadCommandHandler.class,
-        TemplateCommandHandler.class,
-        QueryCommandHandler.class
-    },
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class AuditParser extends GenericCli {
-  /*
-  <.db file path> load <file>
-  <.db file path> template <template name>
-  <.db file path> query <custom sql>
-   */
-  @Parameters(arity = "1..1", description = "Existing or new .db file")
-  private String database;
-
-  public static void main(String[] argv) throws Exception {
-    new AuditParser().run(argv);
-  }
-
-  public String getDatabase(){
-    return database;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
deleted file mode 100644
index a7282b2..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.common;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.audit.parser.model.AuditEntry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.*;
-import java.sql.*;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Properties;
-import java.util.stream.Collectors;
-
-/**
- * Database helper for ozone audit parser tool.
- */
-public final class DatabaseHelper {
-  private DatabaseHelper() {
-    //Never constructed
-  }
-  static {
-    loadProperties();
-  }
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DatabaseHelper.class);
-  private static Map<String, String> properties;
-
-  public static boolean setup(String dbName, String logs) {
-    //loadProperties();
-    if(createAuditTable(dbName)) {
-      return insertAudits(dbName, logs);
-    } else {
-      return false;
-    }
-  }
-
-  private static Connection getConnection(String dbName) {
-
-    Connection connection = null;
-    try{
-      Class.forName(ParserConsts.DRIVER);
-      connection = DriverManager.getConnection(
-          ParserConsts.CONNECTION_PREFIX + dbName);
-    } catch (ClassNotFoundException e) {
-      LOG.error(e.getMessage());
-    } catch (SQLException e) {
-      LOG.error(e.getMessage());
-    }
-    return connection;
-  }
-
-  private static void loadProperties() {
-    Properties props = new Properties();
-    try{
-      InputStream inputStream = DatabaseHelper.class.getClassLoader()
-          .getResourceAsStream(ParserConsts.PROPS_FILE);
-      if (inputStream != null) {
-        props.load(inputStream);
-        properties = props.entrySet().stream().collect(
-            Collectors.toMap(
-                e -> e.getKey().toString(),
-                e -> e.getValue().toString()
-            )
-        );
-      } else {
-        throw new FileNotFoundException("property file '"
-            + ParserConsts.PROPS_FILE + "' not found in the classpath");
-      }
-    } catch(Exception e){
-      LOG.error(e.getMessage());
-    }
-
-  }
-
-  private static boolean createAuditTable(String dbName) {
-
-    try(Connection connection = getConnection(dbName);
-        Statement st = connection.createStatement()) {
-
-      st.executeUpdate(properties.get(ParserConsts.CREATE_AUDIT_TABLE));
-    } catch (SQLException e) {
-      LOG.error(e.getMessage());
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean insertAudits(String dbName, String logs) {
-
-    try(Connection connection = getConnection(dbName);
-        PreparedStatement preparedStatement = connection.prepareStatement(
-            properties.get(ParserConsts.INSERT_AUDITS))) {
-
-      ArrayList<AuditEntry> auditEntries = parseAuditLogs(logs);
-
-      final int batchSize = 1000;
-      int count = 0;
-
-      //Insert list to db
-      for(AuditEntry audit : auditEntries) {
-        preparedStatement.setString(1, audit.getTimestamp());
-        preparedStatement.setString(2, audit.getLevel());
-        preparedStatement.setString(3, audit.getLogger());
-        preparedStatement.setString(4, audit.getUser());
-        preparedStatement.setString(5, audit.getIp());
-        preparedStatement.setString(6, audit.getOp());
-        preparedStatement.setString(7, audit.getParams());
-        preparedStatement.setString(8, audit.getResult());
-        preparedStatement.setString(9, audit.getException());
-
-        preparedStatement.addBatch();
-
-        if(++count % batchSize == 0) {
-          preparedStatement.executeBatch();
-        }
-      }
-      if(auditEntries.size() > 0) {
-        preparedStatement.executeBatch(); // insert remaining records
-      }
-    } catch (Exception e) {
-      LOG.error(e.getMessage());
-      return false;
-    }
-    return true;
-  }
-
-  private static ArrayList<AuditEntry> parseAuditLogs(String filePath)
-      throws Exception {
-    ArrayList<AuditEntry> listResult = new ArrayList<AuditEntry>();
-    try(FileInputStream fis = new FileInputStream(filePath);
-        InputStreamReader isr = new InputStreamReader(fis, "UTF-8");
-        BufferedReader bReader = new BufferedReader(isr)) {
-      String currentLine = null;
-      String[] entry = null;
-      AuditEntry tempEntry = null;
-      String nextLine = null;
-      currentLine = bReader.readLine();
-      nextLine = bReader.readLine();
-
-      while(true) {
-        if(tempEntry == null){
-          tempEntry = new AuditEntry();
-        }
-
-        if(currentLine == null) {
-          break;
-        } else {
-          if(!currentLine.matches(ParserConsts.DATE_REGEX)){
-            tempEntry.appendException(currentLine);
-          } else {
-            entry = StringUtils.stripAll(currentLine.split("\\|"));
-            String[] ops =
-                entry[5].substring(entry[5].indexOf('=') + 1).split(" ", 2);
-            tempEntry = new AuditEntry.Builder()
-                .setTimestamp(entry[0])
-                .setLevel(entry[1])
-                .setLogger(entry[2])
-                .setUser(entry[3].substring(entry[3].indexOf('=') + 1))
-                .setIp(entry[4].substring(entry[4].indexOf('=') + 1))
-                .setOp(ops[0])
-                .setParams(ops[1])
-                .setResult(entry[6].substring(entry[6].indexOf('=') + 1))
-                .build();
-            if(entry.length == 8){
-              tempEntry.setException(entry[7]);
-            }
-          }
-          if(nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
-            listResult.add(tempEntry);
-            tempEntry = null;
-          }
-          currentLine = nextLine;
-          nextLine = bReader.readLine();
-        }
-      }
-    } catch (RuntimeException rx) {
-      throw rx;
-    } catch (Exception ex) {
-      throw ex;
-    }
-
-    return listResult;
-  }
-
-  public static String executeCustomQuery(String dbName, String query)
-      throws SQLException {
-    return executeStatement(dbName, query);
-  }
-
-  public static String executeTemplate(String dbName, String template)
-      throws SQLException {
-    return executeStatement(dbName,
-        properties.get(template));
-  }
-
-  private static String executeStatement(String dbName, String sql)
-      throws SQLException {
-    StringBuilder result = new StringBuilder();
-    ResultSet rs = null;
-    Statement st = null;
-    ResultSetMetaData rsm = null;
-    try(Connection connection = getConnection(dbName)) {
-      //loadProperties();
-
-      if(connection != null){
-        st = connection.createStatement();
-        rs = st.executeQuery(sql);
-        if(rs != null) {
-          rsm = rs.getMetaData();
-          int cols = rsm.getColumnCount();
-          while(rs.next()){
-            for(int index =1; index<=cols; index++){
-              result.append(rs.getObject(index) + "\t");
-            }
-            result.append("\n");
-          }
-        }
-        st.close();
-        rs.close();
-      }
-    }
-    return result.toString();
-  }
-
-  public static boolean validateTemplate(String templateName) {
-    return (properties.get(templateName) != null);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/ParserConsts.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/ParserConsts.java
deleted file mode 100644
index 5259940..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/ParserConsts.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.common;
-
-/**
- * Constants used for ozone audit parser.
- */
-public final class ParserConsts {
-
-  private ParserConsts() {
-    //Never constructed
-  }
-
-  public static final String DRIVER = "org.sqlite.JDBC";
-  public static final String CONNECTION_PREFIX = "jdbc:sqlite:";
-  public static final String DATE_REGEX = "^\\d{4}-\\d{2}-\\d{2}.*$";
-  public static final String PROPS_FILE = "commands.properties";
-  public static final String INSERT_AUDITS = "insertAuditEntry";
-  public static final String CREATE_AUDIT_TABLE = "createAuditTable";
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/package-info.java
deleted file mode 100644
index 05c6172..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.common;
-/**
- * Classes to define constants & helpers for Ozone audit parser tool.
- */
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
deleted file mode 100644
index ec67bad..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.handler;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.ozone.audit.parser.AuditParser;
-import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.util.concurrent.Callable;
-
-/**
- * Load command handler for ozone audit parser.
- */
-@Command(name = "load",
-    aliases = "l",
-    description = "Load ozone audit log files",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class LoadCommandHandler implements Callable<Void> {
-
-  @Parameters(arity = "1..1", description = "Audit Log file(s)")
-  private String logs;
-
-  @ParentCommand
-  private AuditParser auditParser;
-
-  public Void call() {
-    if(DatabaseHelper.setup(auditParser.getDatabase(), logs)) {
-      System.out.println(logs + " has been loaded successfully");
-    } else {
-      System.out.println("Failed to load " + logs);
-    }
-    return null;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
deleted file mode 100644
index fe083ca..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.handler;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.ozone.audit.parser.AuditParser;
-import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
-
-/**
- * Custom query command handler for ozone audit parser.
- * The query must be enclosed within double quotes.
- */
-@Command(name = "query",
-    aliases = "q",
-    description = "Execute custom query",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class QueryCommandHandler implements Callable<Void> {
-
-  @Parameters(arity = "1..1", description = "Custom query enclosed within " +
-      "double quotes.")
-  private String query;
-
-  @ParentCommand
-  private AuditParser auditParser;
-
-  public Void call() {
-    try {
-      System.out.println(
-          DatabaseHelper.executeCustomQuery(auditParser.getDatabase(), query)
-      );
-    } catch (SQLException ex) {
-      System.err.println(ex.getMessage());
-    }
-    return null;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
deleted file mode 100644
index 9b97ee9..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.handler;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.ozone.audit.parser.AuditParser;
-import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
-
-/**
- * Template command handler for ozone audit parser.
- */
-@Command(name = "template",
-    aliases = "t",
-    description = "Execute template query",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class TemplateCommandHandler implements Callable<Void> {
-
-  @Parameters(arity = "1..1", description = "Custom query")
-  private String template;
-
-  @ParentCommand
-  private AuditParser auditParser;
-
-  public Void call() {
-    try {
-      if(DatabaseHelper.validateTemplate(template)) {
-        System.out.println(
-            DatabaseHelper.executeTemplate(auditParser.getDatabase(),
-                template)
-        );
-      } else {
-        System.err.println("ERROR: Invalid template name - " + template);
-      }
-    } catch (SQLException ex) {
-      System.err.println(ex.getMessage());
-    }
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/package-info.java
deleted file mode 100644
index 720a7f5..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.handler;
-/**
- * Command handlers used for Ozone audit parser tool.
- */
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java
deleted file mode 100644
index c6b0b33..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.model;
-
-/**
- * POJO used for ozone audit parser tool.
- */
-public class AuditEntry {
-  private String timestamp;
-  private String level;
-  private String logger;
-  private String user;
-  private String ip;
-  private String op;
-  private String params;
-  private String result;
-  private String exception;
-
-  public AuditEntry(){}
-
-  public String getUser() {
-    return user;
-  }
-
-  public void setUser(String user) {
-    this.user = user;
-  }
-
-  public String getIp() {
-    return ip;
-  }
-
-  public void setIp(String ip) {
-    this.ip = ip;
-  }
-
-  public String getTimestamp() {
-    return timestamp;
-  }
-
-  public void setTimestamp(String timestamp) {
-    this.timestamp = timestamp;
-  }
-
-  public String getLevel() {
-    return level;
-  }
-
-  public void setLevel(String level) {
-    this.level = level;
-  }
-
-  public String getLogger() {
-    return logger;
-  }
-
-  public void setLogger(String logger) {
-    this.logger = logger;
-  }
-
-  public String getOp() {
-    return op;
-  }
-
-  public void setOp(String op) {
-    this.op = op;
-  }
-
-  public String getParams() {
-    return params;
-  }
-
-  public void setParams(String params) {
-    this.params = params;
-  }
-
-  public String getResult() {
-    return result;
-  }
-
-  public void setResult(String result) {
-    this.result = result;
-  }
-
-  public String getException() {
-    return exception;
-  }
-
-  public void setException(String exception) {
-    this.exception = exception.trim();
-  }
-
-  public void appendException(String text){
-    this.exception += "\n" + text.trim();
-  }
-
-  /**
-   * Builder for AuditEntry.
-   */
-  public static class Builder {
-    private String timestamp;
-    private String level;
-    private String logger;
-    private String user;
-    private String ip;
-    private String op;
-    private String params;
-    private String result;
-    private String exception;
-
-    public Builder() {
-
-    }
-
-    public Builder setTimestamp(String ts){
-      this.timestamp = ts;
-      return this;
-    }
-
-    public Builder setLevel(String lvl){
-      this.level = lvl;
-      return this;
-    }
-
-    public Builder setLogger(String lgr){
-      this.logger = lgr;
-      return this;
-    }
-
-    public Builder setUser(String usr){
-      this.user = usr;
-      return this;
-    }
-
-    public Builder setIp(String ipAddress){
-      this.ip = ipAddress;
-      return this;
-    }
-
-    public Builder setOp(String operation){
-      this.op = operation;
-      return this;
-    }
-
-    public Builder setParams(String prms){
-      this.params = prms;
-      return this;
-    }
-
-    public Builder setResult(String res){
-      this.result = res;
-      return this;
-    }
-
-    public Builder setException(String exp){
-      this.exception = exp;
-      return this;
-    }
-
-    public AuditEntry build() {
-      AuditEntry aentry = new AuditEntry();
-      aentry.timestamp = this.timestamp;
-      aentry.level = this.level;
-      aentry.logger = this.logger;
-      aentry.user = this.user;
-      aentry.ip = this.ip;
-      aentry.op = this.op;
-      aentry.params = this.params;
-      aentry.result = this.result;
-      aentry.exception = this.exception;
-      return aentry;
-    }
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/package-info.java
deleted file mode 100644
index 2e6c56f..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser.model;
-/**
- * POJO used for Ozone audit parser tool.
- */
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/package-info.java
deleted file mode 100644
index 0119ada..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit.parser;
-/**
- * Classes used for Ozone audit parser tool.
- */
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
deleted file mode 100644
index f9b5e03..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetSocketAddress;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import com.codahale.metrics.ConsoleReporter;
-import com.codahale.metrics.MetricRegistry;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.ratis.protocol.ClientId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * Base class for simplified performance tests.
- */
-public class BaseFreonGenerator {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BaseFreonGenerator.class);
-
-  private static final int CHECK_INTERVAL_MILLIS = 1000;
-
-  private static final String DIGEST_ALGORITHM = "MD5";
-
-  private static final Pattern ENV_VARIABLE_IN_PATTERN =
-      Pattern.compile("__(.+?)__");
-
-  @ParentCommand
-  private Freon freonCommand;
-
-  @Option(names = {"-n", "--number-of-tests"},
-      description = "Number of the generated objects.",
-      defaultValue = "1000")
-  private long testNo = 1000;
-
-  @Option(names = {"-t", "--threads", "--thread"},
-      description = "Number of threads used to execute",
-      defaultValue = "10")
-  private int threadNo;
-
-  @Option(names = {"-f", "--fail-at-end"},
-      description = "If turned on, all the tasks will be executed even if "
-          + "there are failures.")
-  private boolean failAtEnd;
-
-  @Option(names = {"-p", "--prefix"},
-      description = "Unique identifier of the test execution. Usually used as"
-          + " a prefix of the generated object names. If empty, a random name"
-          + " will be generated",
-      defaultValue = "")
-  private String prefix = "";
-
-  private MetricRegistry metrics = new MetricRegistry();
-
-  private ExecutorService executor;
-
-  private AtomicLong successCounter;
-
-  private AtomicLong failureCounter;
-
-  private long startTime;
-
-  private PathSchema pathSchema;
-
-  /**
-   * The main logic to execute a test generator.
-   *
-   * @param provider creates the new steps to execute.
-   */
-  public void runTests(TaskProvider provider) {
-
-    executor = Executors.newFixedThreadPool(threadNo);
-
-    ProgressBar progressBar =
-        new ProgressBar(System.out, testNo, successCounter::get);
-    progressBar.start();
-
-    startTime = System.currentTimeMillis();
-    //schedule the execution of all the tasks.
-
-    for (long i = 0; i < testNo; i++) {
-
-      final long counter = i;
-
-      executor.execute(() -> {
-        try {
-
-          //in case of an other failed test, we shouldn't execute more tasks.
-          if (!failAtEnd && failureCounter.get() > 0) {
-            return;
-          }
-
-          provider.executeNextTask(counter);
-          successCounter.incrementAndGet();
-        } catch (Exception e) {
-          failureCounter.incrementAndGet();
-          LOG.error("Error on executing task", e);
-        }
-      });
-    }
-
-    // wait until all tasks are executed
-
-    while (successCounter.get() + failureCounter.get() < testNo && (
-        failureCounter.get() == 0 || failAtEnd)) {
-      try {
-        Thread.sleep(CHECK_INTERVAL_MILLIS);
-      } catch (InterruptedException e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    //shutdown everything
-    if (failureCounter.get() > 0 && !failAtEnd) {
-      progressBar.terminate();
-    } else {
-      progressBar.shutdown();
-    }
-    executor.shutdown();
-    try {
-      executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
-    } catch (Exception ex) {
-      ex.printStackTrace();
-    }
-
-    if (failureCounter.get() > 0) {
-      throw new RuntimeException("One ore more freon test is failed.");
-    }
-  }
-
-  /**
-   * Initialize internal counters, and variables. Call it before runTests.
-   */
-  public void init() {
-
-    successCounter = new AtomicLong(0);
-    failureCounter = new AtomicLong(0);
-
-    if (prefix.length() == 0) {
-      prefix = RandomStringUtils.randomAlphanumeric(10);
-    } else {
-      //replace environment variables to support multi-node execution
-      prefix = resolvePrefix(prefix);
-    }
-    LOG.info("Executing test with prefix {}", prefix);
-
-    pathSchema = new PathSchema(prefix);
-
-    Runtime.getRuntime().addShutdownHook(
-        new Thread(this::printReport));
-  }
-
-  /**
-   * Resolve environment variables in the prefixes.
-   */
-  public String resolvePrefix(String inputPrefix) {
-    Matcher m = ENV_VARIABLE_IN_PATTERN.matcher(inputPrefix);
-    StringBuffer sb = new StringBuffer();
-    while (m.find()) {
-      String environment = System.getenv(m.group(1));
-      m.appendReplacement(sb, environment != null ? environment : "");
-    }
-    m.appendTail(sb);
-    return sb.toString();
-  }
-
-  /**
-   * Print out reports from the executed tests.
-   */
-  public void printReport() {
-    ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).build();
-    reporter.report();
-    System.out.println("Total execution time (sec): " + Math
-        .round((System.currentTimeMillis() - startTime) / 1000.0));
-    System.out.println("Failures: " + failureCounter.get());
-    System.out.println("Successful executions: " + successCounter.get());
-  }
-
-  /**
-   * Create the OM RPC client to use it for testing.
-   */
-  public OzoneManagerProtocolClientSideTranslatorPB createOmClient(
-      OzoneConfiguration conf) throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    InetSocketAddress omAddress = OmUtils.getOmAddressForClients(conf);
-    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-    String clientId = ClientId.randomId().toString();
-    return new OzoneManagerProtocolClientSideTranslatorPB(
-        RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress,
-            ugi, conf, NetUtils.getDefaultSocketFactory(conf),
-            Client.getRpcTimeout(conf)), clientId);
-  }
-
-  /**
-   * Generate a key/file name based on the prefix and counter.
-   */
-  public String generateObjectName(long counter) {
-    return pathSchema.getPath(counter);
-  }
-
-  /**
-   * Create missing target volume/bucket.
-   */
-  public void ensureVolumeAndBucketExist(OzoneConfiguration ozoneConfiguration,
-      String volumeName, String bucketName) throws IOException {
-
-    try (OzoneClient rpcClient = OzoneClientFactory
-        .getRpcClient(ozoneConfiguration)) {
-
-      OzoneVolume volume = null;
-      try {
-        volume = rpcClient.getObjectStore().getVolume(volumeName);
-      } catch (OMException ex) {
-        if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
-          rpcClient.getObjectStore().createVolume(volumeName);
-          volume = rpcClient.getObjectStore().getVolume(volumeName);
-        } else {
-          throw ex;
-        }
-      }
-
-      try {
-        volume.getBucket(bucketName);
-      } catch (OMException ex) {
-        if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-          volume.createBucket(bucketName);
-        } else {
-          throw ex;
-        }
-      }
-    }
-  }
-
-  /**
-   * Create missing target volume.
-   */
-  public void ensureVolumeExists(
-      OzoneConfiguration ozoneConfiguration,
-      String volumeName) throws IOException {
-    try (OzoneClient rpcClient = OzoneClientFactory
-        .getRpcClient(ozoneConfiguration)) {
-
-      try {
-        rpcClient.getObjectStore().getVolume(volumeName);
-      } catch (OMException ex) {
-        if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
-          rpcClient.getObjectStore().createVolume(volumeName);
-        }
-      }
-
-    }
-  }
-
-  /**
-   * Calculate checksum of a byte array.
-   */
-  public byte[] getDigest(byte[] content) throws IOException {
-    DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
-    dig.getMessageDigest().reset();
-    return dig.digest(content);
-  }
-
-  /**
-   * Calculate checksum of an Input stream.
-   */
-  public byte[] getDigest(InputStream stream) throws IOException {
-    DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
-    dig.getMessageDigest().reset();
-    return dig.digest(stream);
-  }
-
-  public String getPrefix() {
-    return prefix;
-  }
-
-  public MetricRegistry getMetrics() {
-    return metrics;
-  }
-
-  public OzoneConfiguration createOzoneConfiguration() {
-    return freonCommand.createOzoneConfiguration();
-  }
-  /**
-   * Simple contract to execute a new step during a freon test.
-   */
-  @FunctionalInterface
-  public interface TaskProvider {
-    void executeNextTask(long step) throws Exception;
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java
deleted file mode 100644
index e31c709..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-/**
- * Utility class to write random keys from a limited buffer.
- */
-public class ContentGenerator {
-
-  /**
-   * Size of the destination object (key or file).
-   */
-  private int keySize;
-
-  /**
-   * Buffer for the pre-allocated content (will be reused if less than the
-   * keySize).
-   */
-  private int bufferSize;
-
-  private final byte[] buffer;
-
-  ContentGenerator(int keySize, int bufferSize) {
-    this.keySize = keySize;
-    this.bufferSize = bufferSize;
-
-    buffer = RandomStringUtils.randomAscii(bufferSize)
-        .getBytes(StandardCharsets.UTF_8);
-
-  }
-
-  /**
-   * Write the required bytes to the output stream.
-   */
-  public void write(OutputStream outputStream) throws IOException {
-    for (long nrRemaining = keySize;
-         nrRemaining > 0; nrRemaining -= bufferSize) {
-      int curSize = (int) Math.min(bufferSize, nrRemaining);
-      outputStream.write(buffer, 0, curSize);
-    }
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java
deleted file mode 100644
index 21adb0d..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Ozone data generator and performance test tool.
- */
-@Command(
-    name = "ozone freon",
-    description = "Load generator and tester tool for ozone",
-    subcommands = {
-        RandomKeyGenerator.class,
-        OzoneClientKeyGenerator.class,
-        OzoneClientKeyValidator.class,
-        OmKeyGenerator.class,
-        OmBucketGenerator.class,
-        HadoopFsGenerator.class,
-        HadoopFsValidator.class,
-        SameKeyReader.class,
-        S3KeyGenerator.class},
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class Freon extends GenericCli {
-
-  public static final Logger LOG = LoggerFactory.getLogger(Freon.class);
-
-  @Option(names = "--server",
-      description = "Enable internal http server to provide metric "
-          + "and profile endpoint")
-  private boolean httpServer = false;
-
-  private FreonHttpServer freonHttpServer;
-
-  @Override
-  public void execute(String[] argv) {
-    HddsUtils.initializeMetrics(createOzoneConfiguration(), "ozone-freon");
-    TracingUtil.initTracing("freon");
-    super.execute(argv);
-  }
-
-  public void stopHttpServer() {
-    if (freonHttpServer != null) {
-      try {
-        freonHttpServer.stop();
-      } catch (Exception e) {
-        LOG.error("Freon http server can't be stopped", e);
-      }
-    }
-  }
-
-  public void startHttpServer() {
-    if (httpServer) {
-      try {
-        freonHttpServer = new FreonHttpServer(createOzoneConfiguration());
-        freonHttpServer.start();
-      } catch (IOException e) {
-        LOG.error("Freon http server can't be started", e);
-      }
-    }
-
-  }
-
-  public static void main(String[] args) {
-    new Freon().run(args);
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java
deleted file mode 100644
index dab4889..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-/**
- * Http server to provide metrics + profile endpoint.
- */
-public class FreonHttpServer extends BaseHttpServer {
-  public FreonHttpServer(Configuration conf) throws IOException {
-    super(conf, "freon");
-  }
-
-
-  @Override protected String getHttpAddressKey() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return OzoneConfigKeys.OZONE_FREON_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return OzoneConfigKeys.OZONE_FREON_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return OzoneConfigKeys.OZONE_FREON_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_KERBEROS_KEYTAB_FILE_KEY;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_KERBEROS_PRINCIPAL_KEY;
-  }
-
-  @Override protected String getEnabledKey() {
-    return OzoneConfigKeys.OZONE_FREON_HTTP_ENABLED_KEY;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
deleted file mode 100644
index 548f829..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.net.URI;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import com.codahale.metrics.Timer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "dfsg",
-    aliases = "dfs-file-generator",
-    description = "Create random files to the any dfs compatible file system.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class HadoopFsGenerator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(HadoopFsGenerator.class);
-
-  @Option(names = {"--path"},
-      description = "Hadoop FS file system path",
-      defaultValue = "o3fs://bucket1.vol1")
-  private String rootPath;
-
-  @Option(names = {"-s", "--size"},
-      description = "Size of the generated files (in bytes)",
-      defaultValue = "10240")
-  private int fileSize;
-
-  @Option(names = {"--buffer"},
-      description = "Size of buffer used to generated the key content.",
-      defaultValue = "4096")
-  private int bufferSize;
-
-  private ContentGenerator contentGenerator;
-
-  private Timer timer;
-
-  private FileSystem fileSystem;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration configuration = createOzoneConfiguration();
-
-    fileSystem = FileSystem.get(URI.create(rootPath), configuration);
-
-    contentGenerator = new ContentGenerator(fileSize, bufferSize);
-
-    timer = getMetrics().timer("file-create");
-
-    runTests(this::createFile);
-
-    return null;
-  }
-
-  private void createFile(long counter) throws Exception {
-    Path file = new Path(rootPath + "/" + generateObjectName(counter));
-    fileSystem.mkdirs(file.getParent());
-
-    timer.time(() -> {
-      try (FSDataOutputStream output = fileSystem.create(file)) {
-        contentGenerator.write(output);
-      }
-      return null;
-    });
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java
deleted file mode 100644
index fe160ef..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.net.URI;
-import java.security.MessageDigest;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import com.codahale.metrics.Timer;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "dfsv",
-    aliases = "dfs-file-validator",
-    description = "Validate if the generated files have the same hash.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class HadoopFsValidator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(HadoopFsValidator.class);
-
-  @Option(names = {"--path"},
-      description = "Hadoop FS file system path",
-      defaultValue = "o3fs://bucket1.vol1")
-  private String rootPath;
-
-  private ContentGenerator contentGenerator;
-
-  private Timer timer;
-
-  private FileSystem fileSystem;
-
-  private byte[] referenceDigest;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration configuration = createOzoneConfiguration();
-
-    fileSystem = FileSystem.get(URI.create(rootPath), configuration);
-
-    Path file = new Path(rootPath + "/" + generateObjectName(0));
-    try (FSDataInputStream stream = fileSystem.open(file)) {
-      referenceDigest = getDigest(stream);
-    }
-
-    timer = getMetrics().timer("file-read");
-
-    runTests(this::validateFile);
-
-    return null;
-  }
-
-  private void validateFile(long counter) throws Exception {
-    Path file = new Path(rootPath + "/" + generateObjectName(counter));
-
-    byte[] content = timer.time(() -> {
-      try (FSDataInputStream input = fileSystem.open(file)) {
-        return IOUtils.toByteArray(input);
-      }
-    });
-
-    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
-      throw new IllegalStateException(
-          "Reference (=first) message digest doesn't match with digest of "
-              + file.toString());
-    }
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java
deleted file mode 100644
index 1f61d56b2..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-
-import com.codahale.metrics.Timer;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "ombg",
-    aliases = "om-bucket-generator",
-    description = "Generate ozone buckets on OM side.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class OmBucketGenerator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  @Option(names = {"-v", "--volume"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "vol1")
-  private String volumeName;
-
-  private OzoneManagerProtocol ozoneManagerClient;
-
-  private Timer bucketCreationTimer;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-
-    ozoneManagerClient = createOmClient(ozoneConfiguration);
-
-    ensureVolumeExists(ozoneConfiguration, volumeName);
-
-    bucketCreationTimer = getMetrics().timer("bucket-create");
-
-    runTests(this::createBucket);
-
-    return null;
-  }
-
-  private void createBucket(long index) throws Exception {
-
-    OmBucketInfo bucketInfo = new OmBucketInfo.Builder()
-        .setBucketName(getPrefix()+index)
-        .setVolumeName(volumeName)
-        .setStorageType(StorageType.DISK)
-        .build();
-
-    bucketCreationTimer.time(() -> {
-      ozoneManagerClient.createBucket(bucketInfo);
-      return null;
-    });
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
deleted file mode 100644
index 81165c4..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.util.ArrayList;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-
-import com.codahale.metrics.Timer;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "omkg",
-    aliases = "om-key-generator",
-    description = "Create keys to the om metadata table.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class OmKeyGenerator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  @Option(names = {"-v", "--volume"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "vol1")
-  private String volumeName;
-
-  @Option(names = {"-b", "--bucket"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "bucket1")
-  private String bucketName;
-
-  private OzoneManagerProtocol ozoneManagerClient;
-
-  private Timer timer;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-
-    ensureVolumeAndBucketExist(ozoneConfiguration, volumeName, bucketName);
-
-    ozoneManagerClient = createOmClient(ozoneConfiguration);
-
-    timer = getMetrics().timer("key-create");
-
-    runTests(this::createKey);
-
-    return null;
-  }
-
-  private void createKey(long counter) throws Exception {
-
-    OmKeyArgs keyArgs = new Builder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName)
-        .setType(ReplicationType.RATIS)
-        .setFactor(ReplicationFactor.THREE)
-        .setKeyName(generateObjectName(counter))
-        .setLocationInfoList(new ArrayList<>())
-        .build();
-
-    timer.time(() -> {
-      OpenKeySession openKeySession = ozoneManagerClient.openKey(keyArgs);
-
-      ozoneManagerClient.commitKey(keyArgs, openKeySession.getId());
-      return null;
-    });
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
deleted file mode 100644
index 1ae691e..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-
-import com.codahale.metrics.Timer;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "ockg",
-    aliases = "ozone-client-key-generator",
-    description = "Generate keys with the help of the ozone clients.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class OzoneClientKeyGenerator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  @Option(names = {"-v", "--volume"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "vol1")
-  private String volumeName;
-
-  @Option(names = {"-b", "--bucket"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "bucket1")
-  private String bucketName;
-
-  @Option(names = {"-s", "--size"},
-      description = "Size of the generated key (in bytes)",
-      defaultValue = "10240")
-  private int keySize;
-
-  @Option(names = {"--buffer"},
-      description = "Size of buffer used to generated the key content.",
-      defaultValue = "4096")
-  private int bufferSize;
-
-  private Timer timer;
-
-  private OzoneBucket bucket;
-  private ContentGenerator contentGenerator;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-
-    ensureVolumeAndBucketExist(ozoneConfiguration, volumeName, bucketName);
-
-    contentGenerator = new ContentGenerator(keySize, bufferSize);
-
-    try (OzoneClient rpcClient = OzoneClientFactory
-        .getRpcClient(ozoneConfiguration)) {
-
-      bucket =
-          rpcClient.getObjectStore().getVolume(volumeName)
-              .getBucket(bucketName);
-
-      timer = getMetrics().timer("key-create");
-
-      runTests(this::createKey);
-
-    }
-    return null;
-  }
-
-  private void createKey(long counter) throws Exception {
-
-    timer.time(() -> {
-      try (OutputStream stream = bucket
-          .createKey(generateObjectName(counter), keySize,
-              ReplicationType.RATIS,
-              ReplicationFactor.THREE,
-              new HashMap<>())) {
-        contentGenerator.write(stream);
-        stream.flush();
-      }
-      return null;
-    });
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java
deleted file mode 100644
index f247b33..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.InputStream;
-import java.security.MessageDigest;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-
-import com.codahale.metrics.Timer;
-import org.apache.commons.io.IOUtils;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "ockv",
-    aliases = "ozone-client-key-validator",
-    description = "Validate keys with the help of the ozone clients.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class OzoneClientKeyValidator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  @Option(names = {"-v", "--volume"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "vol1")
-  private String volumeName;
-
-  @Option(names = {"-b", "--bucket"},
-      description = "Name of the bucket which contains the test data.",
-      defaultValue = "bucket1")
-  private String bucketName;
-
-  private Timer timer;
-
-  private byte[] referenceDigest;
-
-  private OzoneClient rpcClient;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-
-    rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
-
-    try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
-        .getBucket(bucketName).readKey(generateObjectName(0))) {
-      referenceDigest = getDigest(stream);
-    }
-
-    timer = getMetrics().timer("key-validate");
-
-    runTests(this::validateKey);
-
-    return null;
-  }
-
-  private void validateKey(long counter) throws Exception {
-    String objectName = generateObjectName(counter);
-
-    byte[] content = timer.time(() -> {
-      try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
-          .getBucket(bucketName).readKey(objectName)) {
-        return IOUtils.toByteArray(stream);
-      }
-    });
-    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
-      throw new IllegalStateException(
-          "Reference (=first) message digest doesn't match with digest of "
-              + objectName);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/PathSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/PathSchema.java
deleted file mode 100644
index 09242d5..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/PathSchema.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-/**
- * Class to generate the path based on a counter.
- */
-public class PathSchema {
-
-  private String prefix;
-
-  public PathSchema(String prefix) {
-    this.prefix = prefix;
-  }
-
-  /**
-   * Return with a relative path based on the current counter.
-   * <p>
-   * A more advanced implementation can generate deep directory hierarchy.
-   */
-  public String getPath(long counter) {
-    return prefix + "/" + counter;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
deleted file mode 100644
index a987eea..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.PrintStream;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Supplier;
-
-/**
- * Creates and runs a ProgressBar in new Thread which gets printed on
- * the provided PrintStream.
- */
-public class ProgressBar {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ProgressBar.class);
-  private static final long REFRESH_INTERVAL = 1000L;
-
-  private final long maxValue;
-  private final Supplier<Long> currentValue;
-  private final Thread progressBar;
-
-  private volatile boolean running;
-
-  private volatile long startTime;
-
-  /**
-   * Creates a new ProgressBar instance which prints the progress on the given
-   * PrintStream when started.
-   *
-   * @param stream to display the progress
-   * @param maxValue Maximum value of the progress
-   * @param currentValue Supplier that provides the current value
-   */
-  public ProgressBar(final PrintStream stream, final Long maxValue,
-                     final Supplier<Long> currentValue) {
-    this.maxValue = maxValue;
-    this.currentValue = currentValue;
-    this.progressBar = new Thread(getProgressBar(stream));
-    this.running = false;
-  }
-
-  /**
-   * Starts the ProgressBar in a new Thread.
-   * This is a non blocking call.
-   */
-  public synchronized void start() {
-    if (!running) {
-      running = true;
-      startTime = System.nanoTime();
-      progressBar.start();
-    }
-  }
-
-  /**
-   * Graceful shutdown, waits for the progress bar to complete.
-   * This is a blocking call.
-   */
-  public synchronized void shutdown() {
-    if (running) {
-      try {
-        progressBar.join();
-        running = false;
-      } catch (InterruptedException e) {
-        LOG.warn("Got interrupted while waiting for the progress bar to " +
-                "complete.");
-      }
-    }
-  }
-
-  /**
-   * Terminates the progress bar. This doesn't wait for the progress bar
-   * to complete.
-   */
-  public synchronized void terminate() {
-    if (running) {
-      try {
-        running = false;
-        progressBar.join();
-      } catch (InterruptedException e) {
-        LOG.warn("Got interrupted while waiting for the progress bar to " +
-                "complete.");
-      }
-    }
-  }
-
-  private Runnable getProgressBar(final PrintStream stream) {
-    return () -> {
-      stream.println();
-      while (running && currentValue.get() < maxValue) {
-        print(stream, currentValue.get());
-        try {
-          Thread.sleep(REFRESH_INTERVAL);
-        } catch (InterruptedException e) {
-          LOG.warn("ProgressBar was interrupted.");
-        }
-      }
-      print(stream, maxValue);
-      stream.println();
-      running = false;
-    };
-  }
-
-  /**
-   * Given current value prints the progress bar.
-   *
-   * @param value current progress position
-   */
-  private void print(final PrintStream stream, final long value) {
-    stream.print('\r');
-    double percent = 100.0 * value / maxValue;
-    StringBuilder sb = new StringBuilder();
-    sb.append(" " + String.format("%.2f", percent) + "% |");
-
-    for (int i = 0; i <= percent; i++) {
-      sb.append('█');
-    }
-    for (int j = 0; j < 100 - percent; j++) {
-      sb.append(' ');
-    }
-    sb.append("|  ");
-    sb.append(value + "/" + maxValue);
-    long timeInSec = TimeUnit.SECONDS.convert(
-            System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
-    String timeToPrint = String.format("%d:%02d:%02d", timeInSec / 3600,
-            (timeInSec % 3600) / 60, timeInSec % 60);
-    sb.append(" Time: " + timeToPrint);
-    stream.print(sb.toString());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
deleted file mode 100644
index 2bf79c5..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ /dev/null
@@ -1,1112 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Supplier;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.VersionInfo;
-
-import com.codahale.metrics.Histogram;
-import com.codahale.metrics.Snapshot;
-import com.codahale.metrics.UniformReservoir;
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.time.DurationFormatUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.ParentCommand;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.security.MessageDigest;
-
-/**
- * Data generator tool to generate as much keys as possible.
- */
-@Command(name = "randomkeys",
-    aliases = "rk",
-    description = "Generate volumes/buckets and put generated keys.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public final class RandomKeyGenerator implements Callable<Void> {
-
-  @ParentCommand
-  private Freon freon;
-
-  enum FreonOps {
-    VOLUME_CREATE,
-    BUCKET_CREATE,
-    KEY_CREATE,
-    KEY_WRITE
-  }
-
-  private static final String DURATION_FORMAT = "HH:mm:ss,SSS";
-
-  private static final int QUANTILES = 10;
-
-  private static final int CHECK_INTERVAL_MILLIS = 5000;
-
-  private byte[] keyValueBuffer = null;
-
-  private static final String DIGEST_ALGORITHM = "MD5";
-  // A common initial MesssageDigest for each key without its UUID
-  private MessageDigest commonInitialMD = null;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RandomKeyGenerator.class);
-
-  private volatile boolean completed = false;
-  private volatile Exception exception = null;
-
-  @Option(names = "--numOfThreads",
-      description = "number of threads to be launched for the run",
-      defaultValue = "10")
-  private int numOfThreads = 10;
-
-  @Option(names = "--numOfVolumes",
-      description = "specifies number of Volumes to be created in offline mode",
-      defaultValue = "10")
-  private int numOfVolumes = 10;
-
-  @Option(names = "--numOfBuckets",
-      description = "specifies number of Buckets to be created per Volume",
-      defaultValue = "1000")
-  private int numOfBuckets = 1000;
-
-  @Option(
-      names = "--numOfKeys",
-      description = "specifies number of Keys to be created per Bucket",
-      defaultValue = "500000"
-  )
-  private int numOfKeys = 500000;
-
-  @Option(
-      names = "--keySize",
-      description = "Specifies the size of Key in bytes to be created",
-      defaultValue = "10240"
-  )
-  private long keySize = 10240;
-
-  @Option(
-      names = "--validateWrites",
-      description = "Specifies whether to validate keys after writing"
-  )
-  private boolean validateWrites = false;
-
-  @Option(
-      names = "--bufferSize",
-      description = "Specifies the buffer size while writing",
-      defaultValue = "4096"
-  )
-  private int bufferSize = 4096;
-
-  @Option(
-      names = "--json",
-      description = "directory where json is created."
-  )
-  private String jsonDir;
-
-  @Option(
-      names = "--replicationType",
-      description = "Replication type (STAND_ALONE, RATIS)",
-      defaultValue = "STAND_ALONE"
-  )
-  private ReplicationType type = ReplicationType.STAND_ALONE;
-
-  @Option(
-      names = "--factor",
-      description = "Replication factor (ONE, THREE)",
-      defaultValue = "ONE"
-  )
-  private ReplicationFactor factor = ReplicationFactor.ONE;
-
-  private int threadPoolSize;
-
-  private OzoneClient ozoneClient;
-  private ObjectStore objectStore;
-  private ExecutorService executor;
-
-  private long startTime;
-  private long jobStartTime;
-
-  private AtomicLong volumeCreationTime;
-  private AtomicLong bucketCreationTime;
-  private AtomicLong keyCreationTime;
-  private AtomicLong keyWriteTime;
-
-  private AtomicLong totalBytesWritten;
-
-  private int totalBucketCount;
-  private long totalKeyCount;
-  private AtomicInteger volumeCounter;
-  private AtomicInteger bucketCounter;
-  private AtomicLong keyCounter;
-  private Map<Integer, OzoneVolume> volumes;
-  private Map<Integer, OzoneBucket> buckets;
-
-  private AtomicInteger numberOfVolumesCreated;
-  private AtomicInteger numberOfBucketsCreated;
-  private AtomicLong numberOfKeysAdded;
-
-  private Long totalWritesValidated;
-  private Long writeValidationSuccessCount;
-  private Long writeValidationFailureCount;
-
-  private BlockingQueue<KeyValidate> validationQueue;
-  private ArrayList<Histogram> histograms = new ArrayList<>();
-
-  private OzoneConfiguration ozoneConfiguration;
-  private ProgressBar progressbar;
-
-  RandomKeyGenerator() {
-  }
-
-  @VisibleForTesting
-  RandomKeyGenerator(OzoneConfiguration ozoneConfiguration) {
-    this.ozoneConfiguration = ozoneConfiguration;
-  }
-
-  public void init(OzoneConfiguration configuration) throws IOException {
-    startTime = System.nanoTime();
-    jobStartTime = System.currentTimeMillis();
-    volumeCreationTime = new AtomicLong();
-    bucketCreationTime = new AtomicLong();
-    keyCreationTime = new AtomicLong();
-    keyWriteTime = new AtomicLong();
-    totalBytesWritten = new AtomicLong();
-    numberOfVolumesCreated = new AtomicInteger();
-    numberOfBucketsCreated = new AtomicInteger();
-    numberOfKeysAdded = new AtomicLong();
-    volumeCounter = new AtomicInteger();
-    bucketCounter = new AtomicInteger();
-    keyCounter = new AtomicLong();
-    volumes = new ConcurrentHashMap<>();
-    buckets = new ConcurrentHashMap<>();
-    ozoneClient = OzoneClientFactory.getClient(configuration);
-    objectStore = ozoneClient.getObjectStore();
-    for (FreonOps ops : FreonOps.values()) {
-      histograms.add(ops.ordinal(), new Histogram(new UniformReservoir()));
-    }
-    if (freon != null) {
-      freon.startHttpServer();
-    }
-  }
-
-  @Override
-  public Void call() throws Exception {
-    if (ozoneConfiguration != null) {
-      if (!ozoneConfiguration.getBoolean(
-          HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA,
-          HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT)) {
-        LOG.info("Override validateWrites to false, because "
-            + HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA + " is set to false.");
-        validateWrites = false;
-      }
-      init(ozoneConfiguration);
-    } else {
-      init(freon.createOzoneConfiguration());
-    }
-
-    keyValueBuffer = DFSUtil.string2Bytes(
-        RandomStringUtils.randomAscii(bufferSize));
-
-    // Compute the common initial digest for all keys without their UUID
-    if (validateWrites) {
-      commonInitialMD = DigestUtils.getDigest(DIGEST_ALGORITHM);
-      for (long nrRemaining = keySize; nrRemaining > 0;
-          nrRemaining -= bufferSize) {
-        int curSize = (int)Math.min(bufferSize, nrRemaining);
-        commonInitialMD.update(keyValueBuffer, 0, curSize);
-      }
-    }
-
-    totalBucketCount = numOfVolumes * numOfBuckets;
-    totalKeyCount = totalBucketCount * numOfKeys;
-
-    LOG.info("Number of Threads: " + numOfThreads);
-    threadPoolSize = numOfThreads;
-    executor = Executors.newFixedThreadPool(threadPoolSize);
-    addShutdownHook();
-
-    LOG.info("Number of Volumes: {}.", numOfVolumes);
-    LOG.info("Number of Buckets per Volume: {}.", numOfBuckets);
-    LOG.info("Number of Keys per Bucket: {}.", numOfKeys);
-    LOG.info("Key size: {} bytes", keySize);
-    LOG.info("Buffer size: {} bytes", bufferSize);
-    LOG.info("validateWrites : {}", validateWrites);
-    for (int i = 0; i < numOfThreads; i++) {
-      executor.submit(new ObjectCreator());
-    }
-
-    Thread validator = null;
-    if (validateWrites) {
-      totalWritesValidated = 0L;
-      writeValidationSuccessCount = 0L;
-      writeValidationFailureCount = 0L;
-
-      validationQueue = new LinkedBlockingQueue<>();
-      validator = new Thread(new Validator());
-      validator.start();
-      LOG.info("Data validation is enabled.");
-    }
-
-    Supplier<Long> currentValue = numberOfKeysAdded::get;
-    progressbar = new ProgressBar(System.out, totalKeyCount, currentValue);
-
-    LOG.info("Starting progress bar Thread.");
-
-    progressbar.start();
-
-    // wait until all keys are added or exception occurred.
-    while ((numberOfKeysAdded.get() != totalKeyCount)
-           && exception == null) {
-      try {
-        Thread.sleep(CHECK_INTERVAL_MILLIS);
-      } catch (InterruptedException e) {
-        throw e;
-      }
-    }
-    executor.shutdown();
-    executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
-    completed = true;
-
-    if (exception != null) {
-      progressbar.terminate();
-    } else {
-      progressbar.shutdown();
-    }
-
-    if (validator != null) {
-      validator.join();
-    }
-    ozoneClient.close();
-    if (exception != null) {
-      throw exception;
-    }
-    return null;
-  }
-
-  /**
-   * Adds ShutdownHook to print statistics.
-   */
-  private void addShutdownHook() {
-    Runtime.getRuntime().addShutdownHook(
-        new Thread(() -> {
-          printStats(System.out);
-          if (freon != null) {
-            freon.stopHttpServer();
-          }
-        }));
-  }
-  /**
-   * Prints stats of {@link Freon} run to the PrintStream.
-   *
-   * @param out PrintStream
-   */
-  private void printStats(PrintStream out) {
-    long endTime = System.nanoTime() - startTime;
-    String execTime = DurationFormatUtils
-        .formatDuration(TimeUnit.NANOSECONDS.toMillis(endTime),
-            DURATION_FORMAT);
-
-    long volumeTime = TimeUnit.NANOSECONDS.toMillis(volumeCreationTime.get())
-        / threadPoolSize;
-    String prettyAverageVolumeTime =
-        DurationFormatUtils.formatDuration(volumeTime, DURATION_FORMAT);
-
-    long bucketTime = TimeUnit.NANOSECONDS.toMillis(bucketCreationTime.get())
-        / threadPoolSize;
-    String prettyAverageBucketTime =
-        DurationFormatUtils.formatDuration(bucketTime, DURATION_FORMAT);
-
-    long averageKeyCreationTime =
-        TimeUnit.NANOSECONDS.toMillis(keyCreationTime.get())
-            / threadPoolSize;
-    String prettyAverageKeyCreationTime = DurationFormatUtils
-        .formatDuration(averageKeyCreationTime, DURATION_FORMAT);
-
-    long averageKeyWriteTime =
-        TimeUnit.NANOSECONDS.toMillis(keyWriteTime.get()) / threadPoolSize;
-    String prettyAverageKeyWriteTime = DurationFormatUtils
-        .formatDuration(averageKeyWriteTime, DURATION_FORMAT);
-
-    out.println();
-    out.println("***************************************************");
-    out.println("Status: " + (exception != null ? "Failed" : "Success"));
-    out.println("Git Base Revision: " + VersionInfo.getRevision());
-    out.println("Number of Volumes created: " + numberOfVolumesCreated);
-    out.println("Number of Buckets created: " + numberOfBucketsCreated);
-    out.println("Number of Keys added: " + numberOfKeysAdded);
-    out.println("Ratis replication factor: " + factor.name());
-    out.println("Ratis replication type: " + type.name());
-    out.println(
-        "Average Time spent in volume creation: " + prettyAverageVolumeTime);
-    out.println(
-        "Average Time spent in bucket creation: " + prettyAverageBucketTime);
-    out.println(
-        "Average Time spent in key creation: " + prettyAverageKeyCreationTime);
-    out.println(
-        "Average Time spent in key write: " + prettyAverageKeyWriteTime);
-    out.println("Total bytes written: " + totalBytesWritten);
-    if (validateWrites) {
-      out.println("Total number of writes validated: " +
-          totalWritesValidated);
-      out.println("Writes validated: " +
-          (100.0 * totalWritesValidated / numberOfKeysAdded.get())
-          + " %");
-      out.println("Successful validation: " +
-          writeValidationSuccessCount);
-      out.println("Unsuccessful validation: " +
-          writeValidationFailureCount);
-    }
-    out.println("Total Execution time: " + execTime);
-    out.println("***************************************************");
-
-    if (jsonDir != null) {
-
-      String[][] quantileTime =
-          new String[FreonOps.values().length][QUANTILES + 1];
-      String[] deviations = new String[FreonOps.values().length];
-      String[] means = new String[FreonOps.values().length];
-      for (FreonOps ops : FreonOps.values()) {
-        Snapshot snapshot = histograms.get(ops.ordinal()).getSnapshot();
-        for (int i = 0; i <= QUANTILES; i++) {
-          quantileTime[ops.ordinal()][i] = DurationFormatUtils.formatDuration(
-              TimeUnit.NANOSECONDS
-                  .toMillis((long) snapshot.getValue((1.0 / QUANTILES) * i)),
-              DURATION_FORMAT);
-        }
-        deviations[ops.ordinal()] = DurationFormatUtils.formatDuration(
-            TimeUnit.NANOSECONDS.toMillis((long) snapshot.getStdDev()),
-            DURATION_FORMAT);
-        means[ops.ordinal()] = DurationFormatUtils.formatDuration(
-            TimeUnit.NANOSECONDS.toMillis((long) snapshot.getMean()),
-            DURATION_FORMAT);
-      }
-
-      FreonJobInfo jobInfo = new FreonJobInfo().setExecTime(execTime)
-          .setGitBaseRevision(VersionInfo.getRevision())
-          .setMeanVolumeCreateTime(means[FreonOps.VOLUME_CREATE.ordinal()])
-          .setDeviationVolumeCreateTime(
-              deviations[FreonOps.VOLUME_CREATE.ordinal()])
-          .setTenQuantileVolumeCreateTime(
-              quantileTime[FreonOps.VOLUME_CREATE.ordinal()])
-          .setMeanBucketCreateTime(means[FreonOps.BUCKET_CREATE.ordinal()])
-          .setDeviationBucketCreateTime(
-              deviations[FreonOps.BUCKET_CREATE.ordinal()])
-          .setTenQuantileBucketCreateTime(
-              quantileTime[FreonOps.BUCKET_CREATE.ordinal()])
-          .setMeanKeyCreateTime(means[FreonOps.KEY_CREATE.ordinal()])
-          .setDeviationKeyCreateTime(deviations[FreonOps.KEY_CREATE.ordinal()])
-          .setTenQuantileKeyCreateTime(
-              quantileTime[FreonOps.KEY_CREATE.ordinal()])
-          .setMeanKeyWriteTime(means[FreonOps.KEY_WRITE.ordinal()])
-          .setDeviationKeyWriteTime(deviations[FreonOps.KEY_WRITE.ordinal()])
-          .setTenQuantileKeyWriteTime(
-              quantileTime[FreonOps.KEY_WRITE.ordinal()]);
-      String jsonName =
-          new SimpleDateFormat("yyyyMMddHHmmss").format(Time.now()) + ".json";
-      String jsonPath = jsonDir + "/" + jsonName;
-      FileOutputStream os = null;
-      try {
-        os = new FileOutputStream(jsonPath);
-        ObjectMapper mapper = new ObjectMapper();
-        mapper.setVisibility(PropertyAccessor.FIELD,
-            JsonAutoDetect.Visibility.ANY);
-        ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
-        writer.writeValue(os, jobInfo);
-      } catch (FileNotFoundException e) {
-        out.println("Json File could not be created for the path: " + jsonPath);
-        out.println(e);
-      } catch (IOException e) {
-        out.println("Json object could not be created");
-        out.println(e);
-      } finally {
-        try {
-          if (os != null) {
-            os.close();
-          }
-        } catch (IOException e) {
-          LOG.warn("Could not close the output stream for json", e);
-        }
-      }
-    }
-  }
-
-  /**
-   * Returns the number of volumes created.
-   *
-   * @return volume count.
-   */
-  @VisibleForTesting
-  int getNumberOfVolumesCreated() {
-    return numberOfVolumesCreated.get();
-  }
-
-  /**
-   * Returns the number of buckets created.
-   *
-   * @return bucket count.
-   */
-  @VisibleForTesting
-  int getNumberOfBucketsCreated() {
-    return numberOfBucketsCreated.get();
-  }
-
-  /**
-   * Returns the number of keys added.
-   *
-   * @return keys count.
-   */
-  @VisibleForTesting
-  long getNumberOfKeysAdded() {
-    return numberOfKeysAdded.get();
-  }
-
-  /**
-   * Returns true if random validation of write is enabled.
-   *
-   * @return validateWrites
-   */
-  @VisibleForTesting
-  boolean getValidateWrites() {
-    return validateWrites;
-  }
-
-  /**
-   * Returns the number of keys validated.
-   *
-   * @return validated key count.
-   */
-  @VisibleForTesting
-  long getTotalKeysValidated() {
-    return totalWritesValidated;
-  }
-
-  /**
-   * Returns the number of successful validation.
-   *
-   * @return successful validation count.
-   */
-  @VisibleForTesting
-  long getSuccessfulValidationCount() {
-    return writeValidationSuccessCount;
-  }
-
-  /**
-   * Returns the number of unsuccessful validation.
-   *
-   * @return unsuccessful validation count.
-   */
-  @VisibleForTesting
-  long getUnsuccessfulValidationCount() {
-    return validateWrites ? writeValidationFailureCount : 0;
-  }
-
-  /**
-   * Wrapper to hold ozone keyValidate entry.
-   */
-  private static class KeyValidate {
-    /**
-     * Bucket name.
-     */
-    private OzoneBucket bucket;
-
-    /**
-     * Key name.
-     */
-    private String keyName;
-
-    /**
-     * Digest of this key's full value.
-     */
-    private byte[] digest;
-
-    /**
-     * Constructs a new ozone keyValidate.
-     *
-     * @param bucket    bucket part
-     * @param keyName   key part
-     * @param digest    digest of this key's full value
-     */
-    KeyValidate(OzoneBucket bucket, String keyName, byte[] digest) {
-      this.bucket = bucket;
-      this.keyName = keyName;
-      this.digest = digest;
-    }
-  }
-
-  private class ObjectCreator implements Runnable {
-    @Override
-    public void run() {
-      int v;
-      while ((v = volumeCounter.getAndIncrement()) < numOfVolumes) {
-        if (!createVolume(v)) {
-          return;
-        }
-      }
-
-      int b;
-      while ((b = bucketCounter.getAndIncrement()) < totalBucketCount) {
-        if (!createBucket(b)) {
-          return;
-        }
-      }
-
-      long k;
-      while ((k = keyCounter.getAndIncrement()) < totalKeyCount) {
-        if (!createKey(k)) {
-          return;
-        }
-      }
-    }
-  }
-
-  private boolean createVolume(int volumeNumber) {
-    String volumeName = "vol-" + volumeNumber + "-"
-        + RandomStringUtils.randomNumeric(5);
-    LOG.trace("Creating volume: {}", volumeName);
-    try (Scope ignored = GlobalTracer.get().buildSpan("createVolume")
-        .startActive(true)) {
-      long start = System.nanoTime();
-      objectStore.createVolume(volumeName);
-      long volumeCreationDuration = System.nanoTime() - start;
-      volumeCreationTime.getAndAdd(volumeCreationDuration);
-      histograms.get(FreonOps.VOLUME_CREATE.ordinal())
-          .update(volumeCreationDuration);
-      numberOfVolumesCreated.getAndIncrement();
-
-      OzoneVolume volume = objectStore.getVolume(volumeName);
-      volumes.put(volumeNumber, volume);
-      return true;
-    } catch (IOException e) {
-      exception = e;
-      LOG.error("Could not create volume", e);
-      return false;
-    }
-  }
-
-  private boolean createBucket(int globalBucketNumber) {
-    int volumeNumber = globalBucketNumber % numOfVolumes;
-    int bucketNumber = globalBucketNumber / numOfVolumes;
-    OzoneVolume volume = getVolume(volumeNumber);
-    if (volume == null) {
-      return false;
-    }
-    String bucketName = "bucket-" + bucketNumber + "-" +
-        RandomStringUtils.randomNumeric(5);
-    LOG.trace("Creating bucket: {} in volume: {}",
-        bucketName, volume.getName());
-    try (Scope ignored = GlobalTracer.get().buildSpan("createBucket")
-        .startActive(true)) {
-      long start = System.nanoTime();
-      volume.createBucket(bucketName);
-      long bucketCreationDuration = System.nanoTime() - start;
-      histograms.get(FreonOps.BUCKET_CREATE.ordinal())
-          .update(bucketCreationDuration);
-      bucketCreationTime.getAndAdd(bucketCreationDuration);
-      numberOfBucketsCreated.getAndIncrement();
-
-      OzoneBucket bucket = volume.getBucket(bucketName);
-      buckets.put(globalBucketNumber, bucket);
-      return true;
-    } catch (IOException e) {
-      exception = e;
-      LOG.error("Could not create bucket ", e);
-      return false;
-    }
-  }
-
-  @SuppressFBWarnings("REC_CATCH_EXCEPTION")
-  private boolean createKey(long globalKeyNumber) {
-    int globalBucketNumber = (int) (globalKeyNumber % totalBucketCount);
-    long keyNumber = globalKeyNumber / totalBucketCount;
-    OzoneBucket bucket = getBucket(globalBucketNumber);
-    if (bucket == null) {
-      return false;
-    }
-    String bucketName = bucket.getName();
-    String volumeName = bucket.getVolumeName();
-    String keyName = "key-" + keyNumber + "-"
-        + RandomStringUtils.randomNumeric(5);
-    LOG.trace("Adding key: {} in bucket: {} of volume: {}",
-        keyName, bucketName, volumeName);
-    try {
-      try (Scope scope = GlobalTracer.get().buildSpan("createKey")
-          .startActive(true)) {
-        long keyCreateStart = System.nanoTime();
-        OzoneOutputStream os = bucket.createKey(keyName, keySize, type,
-            factor, new HashMap<>());
-        long keyCreationDuration = System.nanoTime() - keyCreateStart;
-        histograms.get(FreonOps.KEY_CREATE.ordinal())
-            .update(keyCreationDuration);
-        keyCreationTime.getAndAdd(keyCreationDuration);
-
-        try (Scope writeScope = GlobalTracer.get().buildSpan("writeKeyData")
-            .startActive(true)) {
-          long keyWriteStart = System.nanoTime();
-          for (long nrRemaining = keySize;
-               nrRemaining > 0; nrRemaining -= bufferSize) {
-            int curSize = (int) Math.min(bufferSize, nrRemaining);
-            os.write(keyValueBuffer, 0, curSize);
-          }
-          os.close();
-
-          long keyWriteDuration = System.nanoTime() - keyWriteStart;
-          histograms.get(FreonOps.KEY_WRITE.ordinal())
-              .update(keyWriteDuration);
-          keyWriteTime.getAndAdd(keyWriteDuration);
-          totalBytesWritten.getAndAdd(keySize);
-          numberOfKeysAdded.getAndIncrement();
-        }
-      }
-
-      if (validateWrites) {
-        MessageDigest tmpMD = (MessageDigest) commonInitialMD.clone();
-        boolean validate = validationQueue.offer(
-            new KeyValidate(bucket, keyName, tmpMD.digest()));
-        if (validate) {
-          LOG.trace("Key {} is queued for validation.", keyName);
-        }
-      }
-
-      return true;
-    } catch (Exception e) {
-      exception = e;
-      LOG.error("Exception while adding key: {} in bucket: {}" +
-          " of volume: {}.", keyName, bucketName, volumeName, e);
-      return false;
-    }
-  }
-
-  private OzoneVolume getVolume(Integer volumeNumber) {
-    return waitUntilAddedToMap(volumes, volumeNumber);
-  }
-
-  private OzoneBucket getBucket(Integer bucketNumber) {
-    return waitUntilAddedToMap(buckets, bucketNumber);
-  }
-
-  /**
-   * Looks up volume or bucket from the cache.  Waits for it to be created if
-   * needed (can happen for the last few items depending on the number of
-   * threads).
-   *
-   * @return may return null if this thread is interrupted, or if any other
-   *   thread encounters an exception (and stores it to {@code exception})
-   */
-  private <T> T waitUntilAddedToMap(Map<Integer, T> map, Integer i) {
-    while (exception == null && !map.containsKey(i)) {
-      try {
-        Thread.sleep(10);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        return null;
-      }
-    }
-    return map.get(i);
-  }
-
-  private final class FreonJobInfo {
-
-    private String status;
-    private String gitBaseRevision;
-    private String jobStartTime;
-    private int numOfVolumes;
-    private int numOfBuckets;
-    private int numOfKeys;
-    private int numOfThreads;
-    private String dataWritten;
-    private String execTime;
-    private String replicationFactor;
-    private String replicationType;
-
-    private long keySize;
-    private int bufferSize;
-
-    private String totalThroughputPerSecond;
-
-    private String meanVolumeCreateTime;
-    private String deviationVolumeCreateTime;
-    private String[] tenQuantileVolumeCreateTime;
-
-    private String meanBucketCreateTime;
-    private String deviationBucketCreateTime;
-    private String[] tenQuantileBucketCreateTime;
-
-    private String meanKeyCreateTime;
-    private String deviationKeyCreateTime;
-    private String[] tenQuantileKeyCreateTime;
-
-    private String meanKeyWriteTime;
-    private String deviationKeyWriteTime;
-    private String[] tenQuantileKeyWriteTime;
-
-    private FreonJobInfo() {
-      this.status = exception != null ? "Failed" : "Success";
-      this.numOfVolumes = RandomKeyGenerator.this.numOfVolumes;
-      this.numOfBuckets = RandomKeyGenerator.this.numOfBuckets;
-      this.numOfKeys = RandomKeyGenerator.this.numOfKeys;
-      this.numOfThreads = RandomKeyGenerator.this.numOfThreads;
-      this.keySize = RandomKeyGenerator.this.keySize;
-      this.bufferSize = RandomKeyGenerator.this.bufferSize;
-      this.jobStartTime = Time.formatTime(RandomKeyGenerator.this.jobStartTime);
-      this.replicationFactor = RandomKeyGenerator.this.factor.name();
-      this.replicationType = RandomKeyGenerator.this.type.name();
-
-      long totalBytes =
-          (long) numOfVolumes * numOfBuckets * numOfKeys * keySize;
-      this.dataWritten = getInStorageUnits((double) totalBytes);
-      this.totalThroughputPerSecond = getInStorageUnits(
-          (totalBytes * 1.0) / TimeUnit.NANOSECONDS
-              .toSeconds(
-                  RandomKeyGenerator.this.keyWriteTime.get() / threadPoolSize));
-    }
-
-    private String getInStorageUnits(Double value) {
-      double size;
-      OzoneQuota.Units unit;
-      if ((long) (value / OzoneConsts.TB) != 0) {
-        size = value / OzoneConsts.TB;
-        unit = OzoneQuota.Units.TB;
-      } else if ((long) (value / OzoneConsts.GB) != 0) {
-        size = value / OzoneConsts.GB;
-        unit = OzoneQuota.Units.GB;
-      } else if ((long) (value / OzoneConsts.MB) != 0) {
-        size = value / OzoneConsts.MB;
-        unit = OzoneQuota.Units.MB;
-      } else if ((long) (value / OzoneConsts.KB) != 0) {
-        size = value / OzoneConsts.KB;
-        unit = OzoneQuota.Units.KB;
-      } else {
-        size = value;
-        unit = OzoneQuota.Units.BYTES;
-      }
-      return size + " " + unit;
-    }
-
-    public FreonJobInfo setGitBaseRevision(String gitBaseRevisionVal) {
-      gitBaseRevision = gitBaseRevisionVal;
-      return this;
-    }
-
-    public FreonJobInfo setExecTime(String execTimeVal) {
-      execTime = execTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setMeanKeyWriteTime(String deviationKeyWriteTimeVal) {
-      this.meanKeyWriteTime = deviationKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setDeviationKeyWriteTime(
-        String deviationKeyWriteTimeVal) {
-      this.deviationKeyWriteTime = deviationKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setTenQuantileKeyWriteTime(
-        String[] tenQuantileKeyWriteTimeVal) {
-      this.tenQuantileKeyWriteTime = tenQuantileKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setMeanKeyCreateTime(String deviationKeyWriteTimeVal) {
-      this.meanKeyCreateTime = deviationKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setDeviationKeyCreateTime(
-        String deviationKeyCreateTimeVal) {
-      this.deviationKeyCreateTime = deviationKeyCreateTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setTenQuantileKeyCreateTime(
-        String[] tenQuantileKeyCreateTimeVal) {
-      this.tenQuantileKeyCreateTime = tenQuantileKeyCreateTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setMeanBucketCreateTime(
-        String deviationKeyWriteTimeVal) {
-      this.meanBucketCreateTime = deviationKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setDeviationBucketCreateTime(
-        String deviationBucketCreateTimeVal) {
-      this.deviationBucketCreateTime = deviationBucketCreateTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setTenQuantileBucketCreateTime(
-        String[] tenQuantileBucketCreateTimeVal) {
-      this.tenQuantileBucketCreateTime = tenQuantileBucketCreateTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setMeanVolumeCreateTime(
-        String deviationKeyWriteTimeVal) {
-      this.meanVolumeCreateTime = deviationKeyWriteTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setDeviationVolumeCreateTime(
-        String deviationVolumeCreateTimeVal) {
-      this.deviationVolumeCreateTime = deviationVolumeCreateTimeVal;
-      return this;
-    }
-
-    public FreonJobInfo setTenQuantileVolumeCreateTime(
-        String[] tenQuantileVolumeCreateTimeVal) {
-      this.tenQuantileVolumeCreateTime = tenQuantileVolumeCreateTimeVal;
-      return this;
-    }
-
-    public String getJobStartTime() {
-      return jobStartTime;
-    }
-
-    public int getNumOfVolumes() {
-      return numOfVolumes;
-    }
-
-    public int getNumOfBuckets() {
-      return numOfBuckets;
-    }
-
-    public int getNumOfKeys() {
-      return numOfKeys;
-    }
-
-    public int getNumOfThreads() {
-      return numOfThreads;
-    }
-
-    public String getExecTime() {
-      return execTime;
-    }
-
-    public String getReplicationFactor() {
-      return replicationFactor;
-    }
-
-    public String getReplicationType() {
-      return replicationType;
-    }
-
-    public String getStatus() {
-      return status;
-    }
-
-    public long getKeySize() {
-      return keySize;
-    }
-
-    public int getBufferSize() {
-      return bufferSize;
-    }
-
-    public String getGitBaseRevision() {
-      return gitBaseRevision;
-    }
-
-    public String getDataWritten() {
-      return dataWritten;
-    }
-
-    public String getTotalThroughputPerSecond() {
-      return totalThroughputPerSecond;
-    }
-
-    public String getMeanVolumeCreateTime() {
-      return meanVolumeCreateTime;
-    }
-
-    public String getDeviationVolumeCreateTime() {
-      return deviationVolumeCreateTime;
-    }
-
-    public String[] getTenQuantileVolumeCreateTime() {
-      return tenQuantileVolumeCreateTime;
-    }
-
-    public String getMeanBucketCreateTime() {
-      return meanBucketCreateTime;
-    }
-
-    public String getDeviationBucketCreateTime() {
-      return deviationBucketCreateTime;
-    }
-
-    public String[] getTenQuantileBucketCreateTime() {
-      return tenQuantileBucketCreateTime;
-    }
-
-    public String getMeanKeyCreateTime() {
-      return meanKeyCreateTime;
-    }
-
-    public String getDeviationKeyCreateTime() {
-      return deviationKeyCreateTime;
-    }
-
-    public String[] getTenQuantileKeyCreateTime() {
-      return tenQuantileKeyCreateTime;
-    }
-
-    public String getMeanKeyWriteTime() {
-      return meanKeyWriteTime;
-    }
-
-    public String getDeviationKeyWriteTime() {
-      return deviationKeyWriteTime;
-    }
-
-    public String[] getTenQuantileKeyWriteTime() {
-      return tenQuantileKeyWriteTime;
-    }
-  }
-
-  /**
-   * Validates the write done in ozone cluster.
-   */
-  private class Validator implements Runnable {
-    @Override
-    public void run() {
-      DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
-
-      while (true) {
-        if (completed && validationQueue.isEmpty()) {
-          return;
-        }
-
-        try {
-          KeyValidate kv = validationQueue.poll(5, TimeUnit.SECONDS);
-          if (kv != null) {
-            OzoneInputStream is = kv.bucket.readKey(kv.keyName);
-            dig.getMessageDigest().reset();
-            byte[] curDigest = dig.digest(is);
-            totalWritesValidated++;
-            if (MessageDigest.isEqual(kv.digest, curDigest)) {
-              writeValidationSuccessCount++;
-            } else {
-              writeValidationFailureCount++;
-              LOG.warn("Data validation error for key {}/{}/{}",
-                  kv.bucket.getVolumeName(), kv.bucket, kv.keyName);
-              LOG.warn("Expected checksum: {}, Actual checksum: {}",
-                  kv.digest, curDigest);
-            }
-            is.close();
-          }
-        } catch (IOException | InterruptedException ex) {
-          LOG.error("Exception while validating write: " + ex.getMessage());
-        }
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public void setNumOfVolumes(int numOfVolumes) {
-    this.numOfVolumes = numOfVolumes;
-  }
-
-  @VisibleForTesting
-  public void setNumOfBuckets(int numOfBuckets) {
-    this.numOfBuckets = numOfBuckets;
-  }
-
-  @VisibleForTesting
-  public void setNumOfKeys(int numOfKeys) {
-    this.numOfKeys = numOfKeys;
-  }
-
-  @VisibleForTesting
-  public void setNumOfThreads(int numOfThreads) {
-    this.numOfThreads = numOfThreads;
-  }
-
-  @VisibleForTesting
-  public void setKeySize(long keySize) {
-    this.keySize = keySize;
-  }
-
-  @VisibleForTesting
-  public void setType(ReplicationType type) {
-    this.type = type;
-  }
-
-  @VisibleForTesting
-  public void setFactor(ReplicationFactor factor) {
-    this.factor = factor;
-  }
-
-  @VisibleForTesting
-  public void setValidateWrites(boolean validateWrites) {
-    this.validateWrites = validateWrites;
-  }
-
-  @VisibleForTesting
-  public int getThreadPoolSize() {
-    return threadPoolSize;
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
deleted file mode 100644
index eb9a0ce..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-
-import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
-import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
-import com.amazonaws.regions.Regions;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.codahale.metrics.Timer;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Generate random keys via the s3 interface.
- */
-@Command(name = "s3kg",
-    aliases = "s3-key-generator",
-    description = "Create random keys via the s3 interface.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class S3KeyGenerator extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(S3KeyGenerator.class);
-
-  @Option(names = {"-b", "--bucket"},
-      description =
-          "Name of the (S3!) bucket which contains the test data.",
-      defaultValue = "bucket1")
-  private String bucketName;
-
-  @Option(names = {"-s", "--size"},
-      description = "Size of the generated key (in bytes)",
-      defaultValue = "10240")
-  private int fileSize;
-
-  @Option(names = {"-e", "--endpoint"},
-      description = "S3 HTTP endpoint",
-      defaultValue = "http://localhost:9878")
-  private String endpoint;
-
-  private Timer timer;
-
-  private String content;
-
-  private AmazonS3 s3;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    AmazonS3ClientBuilder amazonS3ClientBuilder =
-        AmazonS3ClientBuilder.standard()
-            .withCredentials(new EnvironmentVariableCredentialsProvider());
-
-    if (endpoint.length() > 0) {
-      amazonS3ClientBuilder
-          .withPathStyleAccessEnabled(true)
-          .withEndpointConfiguration(new EndpointConfiguration(endpoint, ""));
-
-    } else {
-      amazonS3ClientBuilder.withRegion(Regions.DEFAULT_REGION);
-    }
-
-    s3 = amazonS3ClientBuilder.build();
-
-    content = RandomStringUtils.randomAscii(fileSize);
-
-    timer = getMetrics().timer("key-create");
-
-    runTests(this::createKey);
-
-    return null;
-  }
-
-  private void createKey(long counter) throws Exception {
-    timer.time(() -> {
-
-      s3.putObject(bucketName, generateObjectName(counter),
-          content);
-      return null;
-    });
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java
deleted file mode 100644
index bafd3ec..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import java.io.InputStream;
-import java.security.MessageDigest;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-
-import com.codahale.metrics.Timer;
-import org.apache.commons.io.IOUtils;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * Data generator tool test om performance.
- */
-@Command(name = "ocokr",
-    aliases = "ozone-client-one-key-reader",
-    description = "Read the same key from multiple threads.",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true,
-    showDefaultValues = true)
-public class SameKeyReader extends BaseFreonGenerator
-    implements Callable<Void> {
-
-  @Option(names = {"-v", "--volume"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "vol1")
-  private String volumeName;
-
-  @Option(names = {"-b", "--bucket"},
-      description = "Name of the bucket which contains the test data. Will be"
-          + " created if missing.",
-      defaultValue = "bucket1")
-  private String bucketName;
-
-  @Option(names = {"-k", "--key"},
-      required = true,
-      description = "Name of the key read from multiple threads")
-  private String keyName;
-
-  private Timer timer;
-
-  private byte[] referenceDigest;
-
-  private OzoneClient rpcClient;
-
-  @Override
-  public Void call() throws Exception {
-
-    init();
-
-    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
-
-    rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
-
-    try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
-        .getBucket(bucketName).readKey(keyName)) {
-      referenceDigest = getDigest(stream);
-    }
-
-    timer = getMetrics().timer("key-create");
-
-    runTests(this::validateKey);
-
-    return null;
-  }
-
-  private void validateKey(long counter) throws Exception {
-
-    byte[] content = timer.time(() -> {
-      try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
-          .getBucket(bucketName).readKey(keyName)) {
-        return IOUtils.toByteArray(stream);
-      }
-    });
-    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
-      throw new IllegalStateException(
-          "Reference message digest doesn't match with the digest of the same"
-              + " key." + counter);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
deleted file mode 100644
index 3ef9123..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-/**
- This package contains class used for testing and benchmarking ozone cluster.
- */
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
deleted file mode 100644
index cf15e1f..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.fsck;
-
-import java.util.Objects;
-
-/**
- * Getter and Setter for BlockDetails.
- */
-
-public class BlockIdDetails {
-
-  private String bucketName;
-  private String blockVol;
-  private  String keyName;
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public void setBucketName(String bucketName) {
-    this.bucketName = bucketName;
-  }
-
-  public String getBlockVol() {
-    return blockVol;
-  }
-
-  public void setBlockVol(String blockVol) {
-    this.blockVol = blockVol;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  @Override
-  public String toString() {
-    return "BlockIdDetails{" +
-        "bucketName='" + bucketName + '\'' +
-        ", blockVol='" + blockVol + '\'' +
-        ", keyName='" + keyName + '\'' +
-        '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    BlockIdDetails that = (BlockIdDetails) o;
-    return Objects.equals(bucketName, that.bucketName) &&
-        Objects.equals(blockVol, that.blockVol) &&
-        Objects.equals(keyName, that.keyName);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(bucketName, blockVol, keyName);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
deleted file mode 100644
index 81ff0ea..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.fsck;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-
-
-/**
- * Generates Container Id to Blocks and BlockDetails mapping.
- */
-
-public class ContainerMapper {
-
-
-  private static Table getMetaTable(OzoneConfiguration configuration)
-      throws IOException {
-    OmMetadataManagerImpl metadataManager =
-        new OmMetadataManagerImpl(configuration);
-    return metadataManager.getKeyTable();
-  }
-
-  public static void main(String[] args) throws IOException {
-    String path = args[0];
-    if (path == null) {
-      throw new IOException("Path cannot be null");
-    }
-
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_OM_DB_DIRS, path);
-
-    ContainerMapper containerMapper = new ContainerMapper();
-    Map<Long, List<Map<Long, BlockIdDetails>>> dataMap =
-        containerMapper.parseOmDB(configuration);
-
-
-    ObjectMapper mapper = new ObjectMapper();
-    System.out.println(mapper.writeValueAsString(dataMap));
-
-  }
-
-  /**
-   * Generates Container Id to Blocks and BlockDetails mapping.
-   * @param configuration @{@link OzoneConfiguration}
-   * @return Map<Long, List<Map<Long, @BlockDetails>>>
-   *   Map of ContainerId -> (Block, Block info)
-   * @throws IOException
-   */
-  public Map<Long, List<Map<Long, BlockIdDetails>>>
-      parseOmDB(OzoneConfiguration configuration) throws IOException {
-    String path = configuration.get(OZONE_OM_DB_DIRS);
-    if (path == null || path.isEmpty()) {
-      throw new IOException(OZONE_OM_DB_DIRS + "should be set ");
-    } else {
-      Table keyTable = getMetaTable(configuration);
-      Map<Long, List<Map<Long, BlockIdDetails>>> dataMap = new HashMap<>();
-
-      if (keyTable != null) {
-        try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-                 keyValueTableIterator = keyTable.iterator()) {
-          while (keyValueTableIterator.hasNext()) {
-            Table.KeyValue<String, OmKeyInfo> keyValue =
-                keyValueTableIterator.next();
-            OmKeyInfo omKeyInfo = keyValue.getValue();
-            byte[] value = omKeyInfo.getProtobuf().toByteArray();
-            OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(
-                OzoneManagerProtocolProtos.KeyInfo.parseFrom(value));
-            for (OmKeyLocationInfoGroup keyLocationInfoGroup : keyInfo
-                .getKeyLocationVersions()) {
-              List<OmKeyLocationInfo> keyLocationInfo = keyLocationInfoGroup
-                  .getLocationList();
-              for (OmKeyLocationInfo keyLocation : keyLocationInfo) {
-                BlockIdDetails blockIdDetails = new BlockIdDetails();
-                Map<Long, BlockIdDetails> innerMap = new HashMap<>();
-
-                long containerID = keyLocation.getBlockID().getContainerID();
-                long blockID = keyLocation.getBlockID().getLocalID();
-                blockIdDetails.setBucketName(keyInfo.getBucketName());
-                blockIdDetails.setBlockVol(keyInfo.getVolumeName());
-                blockIdDetails.setKeyName(keyInfo.getKeyName());
-
-                List<Map<Long, BlockIdDetails>> innerList = new ArrayList<>();
-                innerMap.put(blockID, blockIdDetails);
-
-                if (dataMap.containsKey(containerID)) {
-                  innerList = dataMap.get(containerID);
-                }
-
-                innerList.add(innerMap);
-                dataMap.put(containerID, innerList);
-              }
-            }
-          }
-        }
-      }
-
-      return dataMap;
-
-    }
-  }
-}
-
-
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java
deleted file mode 100644
index 432d65c..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Package info.
- * <p>
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * fsck tool.
- */
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * fsck tool.
- */
-
-
-package org.apache.hadoop.ozone.fsck;
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
deleted file mode 100644
index ae3a83b..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.genconf;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.PicocliException;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.nio.file.Files;
-import java.nio.file.InvalidPathException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-/**
- * GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml<br>
- * This tool generates an ozone-site.xml with minimally required configs.
- * This tool can be invoked as follows:<br>
- * <ul>
- * <li>ozone genconf {@literal <Path to output file>}</li>
- * <li>ozone genconf --help</li>
- * <li>ozone genconf -h</li>
- * </ul>
- */
-@Command(
-    name = "ozone genconf",
-    description = "Tool to generate template ozone-site.xml",
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public final class GenerateOzoneRequiredConfigurations extends GenericCli {
-
-  @Parameters(arity = "1..1",
-      description = "Directory path where ozone-site file should be generated.")
-  private String path;
-
-  /**
-   * Entry point for using genconf tool.
-   *
-   * @param args
-   *
-   */
-  public static void main(String[] args) throws Exception {
-    new GenerateOzoneRequiredConfigurations().run(args);
-  }
-
-  @Override
-  public Void call() throws Exception {
-    generateConfigurations(path);
-    return null;
-  }
-
-  /**
-   * Generate ozone-site.xml at specified path.
-   * @param path
-   * @throws PicocliException
-   * @throws JAXBException
-   */
-  public static void generateConfigurations(String path) throws
-      PicocliException, JAXBException, IOException {
-
-    if (!isValidPath(path)) {
-      throw new PicocliException("Invalid directory path.");
-    }
-
-    if (!canWrite(path)) {
-      throw new PicocliException("Insufficient permission.");
-    }
-
-    OzoneConfiguration oc = new OzoneConfiguration();
-
-    ClassLoader cL = Thread.currentThread().getContextClassLoader();
-    if (cL == null) {
-      cL = OzoneConfiguration.class.getClassLoader();
-    }
-    URL url = cL.getResource("ozone-default.xml");
-
-    List<OzoneConfiguration.Property> allProperties =
-        oc.readPropertyFromXml(url);
-
-    List<OzoneConfiguration.Property> requiredProperties = new ArrayList<>();
-
-    for (OzoneConfiguration.Property p : allProperties) {
-      if (p.getTag() != null && p.getTag().contains("REQUIRED")) {
-        if (p.getName().equalsIgnoreCase(OzoneConfigKeys.OZONE_ENABLED)) {
-          p.setValue(String.valueOf(Boolean.TRUE));
-        } else if (p.getName().equalsIgnoreCase(
-            OzoneConfigKeys.OZONE_METADATA_DIRS)) {
-          p.setValue(System.getProperty(OzoneConsts.JAVA_TMP_DIR));
-        } else if (p.getName().equalsIgnoreCase(
-            OMConfigKeys.OZONE_OM_ADDRESS_KEY)
-            || p.getName().equalsIgnoreCase(ScmConfigKeys.OZONE_SCM_NAMES)
-            || p.getName().equalsIgnoreCase(
-              ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY)) {
-          p.setValue(OzoneConsts.LOCALHOST);
-        }
-
-        requiredProperties.add(p);
-      }
-    }
-
-    OzoneConfiguration.XMLConfiguration requiredConfig =
-        new OzoneConfiguration.XMLConfiguration();
-    requiredConfig.setProperties(requiredProperties);
-
-    File output = new File(path, "ozone-site.xml");
-    if(output.createNewFile()){
-      JAXBContext context =
-          JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class);
-      Marshaller m = context.createMarshaller();
-      m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
-      m.marshal(requiredConfig, output);
-
-      System.out.println("ozone-site.xml has been generated at " + path);
-    } else {
-      System.out.printf("ozone-site.xml already exists at %s and " +
-          "will not be overwritten%n", path);
-    }
-
-  }
-
-  /**
-   * Check if the path is valid directory.
-   *
-   * @param path
-   * @return true, if path is valid directory, else return false
-   */
-  public static boolean isValidPath(String path) {
-    try {
-      return Files.isDirectory(Paths.get(path));
-    } catch (InvalidPathException | NullPointerException ex) {
-      return Boolean.FALSE;
-    }
-  }
-
-  /**
-   * Check if user has permission to write in the specified path.
-   *
-   * @param path
-   * @return true, if the user has permission to write, else returns false
-   */
-  public static boolean canWrite(String path) {
-    File file = new File(path);
-    return file.canWrite();
-  }
-
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
deleted file mode 100644
index 4817d39..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.genconf;
-
-/**
- * Command line tool to generate required Ozone configs to an ozone-site.xml.
- */
-
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
deleted file mode 100644
index 9c0b541..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.util.Time;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.infra.Blackhole;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Objects;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
-
-/**
- * Benchmarks ContainerStateMap class.
- */
-@State(Scope.Thread)
-public class BenchMarkContainerStateMap {
-  private ContainerStateMap stateMap;
-  private AtomicInteger containerID;
-  private AtomicInteger runCount;
-  private static int errorFrequency = 100;
-
-  @Setup(Level.Trial)
-  public void initialize() throws IOException {
-    stateMap = new ContainerStateMap();
-    runCount = new AtomicInteger(0);
-    Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
-    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null.");
-    int currentCount = 1;
-    for (int x = 1; x < 1000; x++) {
-      try {
-        ContainerInfo containerInfo = new ContainerInfo.Builder()
-            .setState(CLOSED)
-            .setPipelineID(pipeline.getId())
-            .setReplicationType(pipeline.getType())
-            .setReplicationFactor(pipeline.getFactor())
-            .setUsedBytes(0)
-            .setNumberOfKeys(0)
-            .setStateEnterTime(Time.monotonicNow())
-            .setOwner("OZONE")
-            .setContainerID(x)
-            .setDeleteTransactionId(0)
-            .build();
-        stateMap.addContainer(containerInfo);
-        currentCount++;
-      } catch (SCMException e) {
-        e.printStackTrace();
-      }
-    }
-    for (int y = currentCount; y < 50000; y++) {
-      try {
-        ContainerInfo containerInfo = new ContainerInfo.Builder()
-            .setState(OPEN)
-            .setPipelineID(pipeline.getId())
-            .setReplicationType(pipeline.getType())
-            .setReplicationFactor(pipeline.getFactor())
-            .setUsedBytes(0)
-            .setNumberOfKeys(0)
-            .setStateEnterTime(Time.monotonicNow())
-            .setOwner("OZONE")
-            .setContainerID(y)
-            .setDeleteTransactionId(0)
-            .build();
-        stateMap.addContainer(containerInfo);
-        currentCount++;
-      } catch (SCMException e) {
-        e.printStackTrace();
-      }
-    }
-    try {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setState(OPEN)
-          .setPipelineID(pipeline.getId())
-          .setReplicationType(pipeline.getType())
-          .setReplicationFactor(pipeline.getFactor())
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(currentCount++)
-          .setDeleteTransactionId(0)
-          .build();
-      stateMap.addContainer(containerInfo);
-    } catch (SCMException e) {
-      e.printStackTrace();
-    }
-
-    containerID = new AtomicInteger(currentCount++);
-
-  }
-
-  public static Pipeline createSingleNodePipeline(String containerName)
-      throws IOException {
-    return createPipeline(containerName, 1);
-  }
-
-  /**
-   * Create a pipeline with single node replica.
-   *
-   * @return Pipeline with single node in it.
-   * @throws IOException
-   */
-  public static Pipeline createPipeline(String containerName, int numNodes)
-      throws IOException {
-    Preconditions.checkArgument(numNodes >= 1);
-    final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
-    for (int i = 0; i < numNodes; i++) {
-      ids.add(GenesisUtil.createDatanodeDetails(UUID.randomUUID().toString()));
-    }
-    return createPipeline(containerName, ids);
-  }
-
-  public static Pipeline createPipeline(String containerName,
-      Iterable<DatanodeDetails> ids) throws IOException {
-    Objects.requireNonNull(ids, "ids == null");
-    Preconditions.checkArgument(ids.iterator().hasNext());
-    List<DatanodeDetails> dns = new ArrayList<>();
-    ids.forEach(dns::add);
-    final Pipeline pipeline = Pipeline.newBuilder()
-        .setState(Pipeline.PipelineState.OPEN)
-        .setId(PipelineID.randomId())
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setNodes(dns)
-        .build();
-    return pipeline;
-  }
-
-  @Benchmark
-  public void createContainerBenchMark(BenchMarkContainerStateMap state,
-      Blackhole bh) throws IOException {
-    ContainerInfo containerInfo = getContainerInfo(state);
-    state.stateMap.addContainer(containerInfo);
-  }
-
-  private ContainerInfo getContainerInfo(BenchMarkContainerStateMap state)
-      throws IOException {
-    Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
-    int cid = state.containerID.incrementAndGet();
-    return new ContainerInfo.Builder()
-        .setState(CLOSED)
-        .setPipelineID(pipeline.getId())
-        .setReplicationType(pipeline.getType())
-        .setReplicationFactor(pipeline.getFactor())
-        .setUsedBytes(0)
-        .setNumberOfKeys(0)
-        .setStateEnterTime(Time.monotonicNow())
-        .setOwner("OZONE")
-        .setContainerID(cid)
-        .setDeleteTransactionId(0)
-        .build();
-  }
-
-  @Benchmark
-  public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state,
-      Blackhole bh) throws IOException {
-    if(runCount.incrementAndGet() % errorFrequency == 0) {
-      state.stateMap.addContainer(getContainerInfo(state));
-    }
-    bh.consume(state.stateMap
-        .getMatchingContainerIDs(OPEN, "OZONE", ReplicationFactor.ONE,
-            ReplicationType.STAND_ALONE));
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
deleted file mode 100644
index c05ecb9..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.genesis;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Benchmarks DatanodeDispatcher class.
- */
-@State(Scope.Benchmark)
-public class BenchMarkDatanodeDispatcher {
-
-  private String baseDir;
-  private String datanodeUuid;
-  private HddsDispatcher dispatcher;
-  private ByteString data;
-  private Random random;
-  private AtomicInteger containerCount;
-  private AtomicInteger keyCount;
-  private AtomicInteger chunkCount;
-
-  private static final int INIT_CONTAINERS = 100;
-  private static final int INIT_KEYS = 50;
-  private static final int INIT_CHUNKS = 100;
-
-  private List<Long> containers;
-  private List<Long> keys;
-  private List<String> chunks;
-  private VolumeSet volumeSet;
-
-  @Setup(Level.Trial)
-  public void initialize() throws IOException {
-    datanodeUuid = UUID.randomUUID().toString();
-
-    // 1 MB of data
-    data = ByteString.copyFromUtf8(RandomStringUtils.randomAscii(1048576));
-    random  = new Random();
-    Configuration conf = new OzoneConfiguration();
-    baseDir = System.getProperty("java.io.tmpdir") + File.separator +
-        datanodeUuid;
-
-    // data directory
-    conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data");
-
-    ContainerSet containerSet = new ContainerSet();
-    volumeSet = new VolumeSet(datanodeUuid, conf);
-    StateContext context = new StateContext(
-        conf, DatanodeStates.RUNNING, null);
-    ContainerMetrics metrics = ContainerMetrics.create(conf);
-    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
-    for (ContainerProtos.ContainerType containerType :
-        ContainerProtos.ContainerType.values()) {
-      handlers.put(containerType,
-          Handler.getHandlerForContainerType(
-              containerType, conf, context, containerSet, volumeSet, metrics));
-    }
-    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers,
-        context, metrics);
-    dispatcher.init();
-
-    containerCount = new AtomicInteger();
-    keyCount = new AtomicInteger();
-    chunkCount = new AtomicInteger();
-
-    containers = new ArrayList<>();
-    keys = new ArrayList<>();
-    chunks = new ArrayList<>();
-
-    // Create containers
-    for (int x = 0; x < INIT_CONTAINERS; x++) {
-      long containerID = HddsUtils.getUtcTime() + x;
-      ContainerCommandRequestProto req = getCreateContainerCommand(containerID);
-      dispatcher.dispatch(req, null);
-      containers.add(containerID);
-      containerCount.getAndIncrement();
-    }
-
-    for (int x = 0; x < INIT_KEYS; x++) {
-      keys.add(HddsUtils.getUtcTime()+x);
-    }
-
-    for (int x = 0; x < INIT_CHUNKS; x++) {
-      chunks.add("chunk-" + x);
-    }
-
-    // Add chunk and keys to the containers
-    for (int x = 0; x < INIT_KEYS; x++) {
-      String chunkName = chunks.get(x);
-      chunkCount.getAndIncrement();
-      long key = keys.get(x);
-      keyCount.getAndIncrement();
-      for (int y = 0; y < INIT_CONTAINERS; y++) {
-        long containerID = containers.get(y);
-        BlockID  blockID = new BlockID(containerID, key);
-        dispatcher
-            .dispatch(getPutBlockCommand(blockID, chunkName), null);
-        dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName), null);
-      }
-    }
-  }
-
-  @TearDown(Level.Trial)
-  public void cleanup() throws IOException {
-    volumeSet.shutdown();
-    FileUtils.deleteDirectory(new File(baseDir));
-  }
-
-  private ContainerCommandRequestProto getCreateContainerCommand(
-      long containerID) {
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setContainerID(containerID);
-    request.setCreateContainer(
-        ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(datanodeUuid);
-    request.setTraceID(containerID + "-trace");
-    return request.build();
-  }
-
-  private ContainerCommandRequestProto getWriteChunkCommand(
-      BlockID blockID, String chunkName) {
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(getChunkInfo(blockID, chunkName))
-        .setData(data);
-
-    ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
-        .newBuilder();
-    request.setCmdType(ContainerProtos.Type.WriteChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(getBlockTraceID(blockID))
-        .setDatanodeUuid(datanodeUuid)
-        .setWriteChunk(writeChunkRequest);
-    return request.build();
-  }
-
-  private ContainerCommandRequestProto getReadChunkCommand(
-      BlockID blockID, String chunkName) {
-    ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(getChunkInfo(blockID, chunkName));
-
-    ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
-        .newBuilder();
-    request.setCmdType(ContainerProtos.Type.ReadChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(getBlockTraceID(blockID))
-        .setDatanodeUuid(datanodeUuid)
-        .setReadChunk(readChunkRequest);
-    return request.build();
-  }
-
-  private ContainerProtos.ChunkInfo getChunkInfo(
-      BlockID blockID, String chunkName) {
-    ContainerProtos.ChunkInfo.Builder builder =
-        ContainerProtos.ChunkInfo.newBuilder()
-            .setChunkName(
-                DigestUtils.md5Hex(chunkName)
-                    + "_stream_" + blockID.getContainerID() + "_block_"
-                    + blockID.getLocalID())
-            .setOffset(0).setLen(data.size());
-    return builder.build();
-  }
-
-  private ContainerCommandRequestProto getPutBlockCommand(
-      BlockID blockID, String chunkKey) {
-    PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto
-        .newBuilder()
-        .setBlockData(getBlockData(blockID, chunkKey));
-
-    ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
-        .newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutBlock)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(getBlockTraceID(blockID))
-        .setDatanodeUuid(datanodeUuid)
-        .setPutBlock(putBlockRequest);
-    return request.build();
-  }
-
-  private ContainerCommandRequestProto getGetBlockCommand(BlockID blockID) {
-    GetBlockRequestProto.Builder readBlockRequest =
-        GetBlockRequestProto.newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(ContainerProtos.Type.GetBlock)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(getBlockTraceID(blockID))
-        .setDatanodeUuid(datanodeUuid)
-        .setGetBlock(readBlockRequest);
-    return request.build();
-  }
-
-  private ContainerProtos.BlockData getBlockData(
-      BlockID blockID, String chunkKey) {
-    ContainerProtos.BlockData.Builder builder =  ContainerProtos.BlockData
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .addChunks(getChunkInfo(blockID, chunkKey));
-    return builder.build();
-  }
-
-  @Benchmark
-  public void createContainer(BenchMarkDatanodeDispatcher bmdd) {
-    long containerID = RandomUtils.nextLong();
-    ContainerCommandRequestProto req = getCreateContainerCommand(containerID);
-    bmdd.dispatcher.dispatch(req, null);
-    bmdd.containers.add(containerID);
-    bmdd.containerCount.getAndIncrement();
-  }
-
-
-  @Benchmark
-  public void writeChunk(BenchMarkDatanodeDispatcher bmdd) {
-    bmdd.dispatcher.dispatch(getWriteChunkCommand(
-        getRandomBlockID(), getNewChunkToWrite()), null);
-  }
-
-  @Benchmark
-  public void readChunk(BenchMarkDatanodeDispatcher bmdd) {
-    BlockID blockID = getRandomBlockID();
-    String chunkKey = getRandomChunkToRead();
-    bmdd.dispatcher.dispatch(getReadChunkCommand(blockID, chunkKey), null);
-  }
-
-  @Benchmark
-  public void putBlock(BenchMarkDatanodeDispatcher bmdd) {
-    BlockID blockID = getRandomBlockID();
-    String chunkKey = getNewChunkToWrite();
-    bmdd.dispatcher.dispatch(getPutBlockCommand(blockID, chunkKey), null);
-  }
-
-  @Benchmark
-  public void getBlock(BenchMarkDatanodeDispatcher bmdd) {
-    BlockID blockID = getRandomBlockID();
-    bmdd.dispatcher.dispatch(getGetBlockCommand(blockID), null);
-  }
-
-  // Chunks writes from benchmark only reaches certain containers
-  // Use INIT_CHUNKS instead of updated counters to guarantee
-  // key/chunks are readable.
-
-  private BlockID getRandomBlockID() {
-    return new BlockID(getRandomContainerID(), getRandomKeyID());
-  }
-
-  private long getRandomContainerID() {
-    return containers.get(random.nextInt(INIT_CONTAINERS));
-  }
-
-  private long getRandomKeyID() {
-    return keys.get(random.nextInt(INIT_KEYS));
-  }
-
-  private String getRandomChunkToRead() {
-    return chunks.get(random.nextInt(INIT_CHUNKS));
-  }
-
-  private String getNewChunkToWrite() {
-    return "chunk-" + chunkCount.getAndIncrement();
-  }
-
-  private String getBlockTraceID(BlockID blockID) {
-    return blockID.getContainerID() + "-" + blockID.getLocalID() +"-trace";
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java
deleted file mode 100644
index bf40ebc..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Param;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.infra.Blackhole;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_10MB_TYPE;
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_1GB_TYPE;
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.CLOSED_TYPE;
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
-
-/**
- * Measure metadatastore read performance.
- */
-@State(Scope.Thread)
-public class BenchMarkMetadataStoreReads {
-
-  private static final int DATA_LEN = 1024;
-  private static final long MAX_KEYS = 1024 * 10;
-
-  private MetadataStore store;
-
-  @Param({DEFAULT_TYPE, CACHE_10MB_TYPE, CACHE_1GB_TYPE, CLOSED_TYPE})
-  private String type;
-
-  @Setup
-  public void initialize() throws IOException {
-    store = GenesisUtil.getMetadataStore(this.type);
-    byte[] data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
-        .getBytes(Charset.forName("UTF-8"));
-    for (int x = 0; x < MAX_KEYS; x++) {
-      store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
-    }
-    if (type.compareTo(CLOSED_TYPE) == 0) {
-      store.compactDB();
-    }
-  }
-
-  @Benchmark
-  public void test(Blackhole bh) throws IOException {
-    long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
-    bh.consume(
-        store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java
deleted file mode 100644
index aa7aedd..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Param;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_10MB_TYPE;
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_1GB_TYPE;
-import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
-
-/**
- * Measure default metadatastore put performance.
- */
-@State(Scope.Thread)
-public class BenchMarkMetadataStoreWrites {
-
-  private static final int DATA_LEN = 1024;
-  private static final long MAX_KEYS = 1024 * 10;
-
-  private MetadataStore store;
-  private byte[] data;
-
-  @Param({DEFAULT_TYPE, CACHE_10MB_TYPE, CACHE_1GB_TYPE})
-  private String type;
-
-  @Setup
-  public void initialize() throws IOException {
-    data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
-        .getBytes(Charset.forName("UTF-8"));
-    store = GenesisUtil.getMetadataStore(this.type);
-  }
-
-  @Benchmark
-  public void test() throws IOException {
-    long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
-    store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java
deleted file mode 100644
index cfc1e1e..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.*;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.protocol.ClientId;
-import org.openjdk.jmh.annotations.*;
-import org.openjdk.jmh.infra.Blackhole;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.*;
-import java.util.concurrent.locks.ReentrantLock;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * Benchmarks OM Client.
- */
-@State(Scope.Thread)
-public class BenchMarkOMClient {
-
-  private static String testDir;
-  private static ReentrantLock lock = new ReentrantLock();
-  private static String volumeName = UUID.randomUUID().toString();
-  private static String bucketName = UUID.randomUUID().toString();
-  private static List<String> keyNames = new ArrayList<>();
-  private static List<Long> clientIDs = new ArrayList<>();
-  private static OzoneManagerProtocolClientSideTranslatorPB ozoneManagerClient;
-  private static volatile boolean bool = false;
-
-  @Setup(Level.Trial)
-  public static void initialize() throws IOException {
-    try {
-      lock.lock();
-      if (!bool) {
-        bool = true;
-        OzoneConfiguration conf = new OzoneConfiguration();
-        conf.setBoolean(OZONE_ENABLED, true);
-        testDir = GenesisUtil.getTempPath()
-            .resolve(RandomStringUtils.randomNumeric(7)).toString();
-        conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
-
-        // set the ip address and port number for the OM service
-        conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "OMADDR:PORT");
-        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-        long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-        InetSocketAddress omAddress = OmUtils.getOmAddressForClients(conf);
-        RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
-            ProtobufRpcEngine.class);
-        ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
-            RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress,
-                ugi, conf, NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)), ClientId.randomId().toString());
-
-        // prepare OM
-        ozoneManagerClient.createVolume(
-            new OmVolumeArgs.Builder().setVolume(volumeName)
-                .setAdminName(UserGroupInformation.getLoginUser().getUserName())
-                .setOwnerName(UserGroupInformation.getLoginUser().getUserName())
-                .build());
-        ozoneManagerClient.createBucket(
-            new OmBucketInfo.Builder().setBucketName(bucketName)
-                .setVolumeName(volumeName).build());
-        createKeys(10);
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  private static void createKeys(int numKeys) throws IOException {
-    for (int i = 0; i < numKeys; i++) {
-      String key = UUID.randomUUID().toString();
-      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-          .setVolumeName(volumeName)
-          .setBucketName(bucketName)
-          .setKeyName(key)
-          .setDataSize(0)
-          .setFactor(HddsProtos.ReplicationFactor.ONE)
-          .setType(HddsProtos.ReplicationType.RATIS)
-          .build();
-      OpenKeySession keySession = ozoneManagerClient.openKey(omKeyArgs);
-      long clientID = keySession.getId();
-      keyNames.add(key);
-      clientIDs.add(clientID);
-    }
-  }
-
-  @TearDown(Level.Trial)
-  public static void tearDown() throws IOException {
-    try {
-      lock.lock();
-      if (ozoneManagerClient != null) {
-        ozoneManagerClient.close();
-        ozoneManagerClient = null;
-        FileUtil.fullyDelete(new File(testDir));
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Threads(6)
-  @Benchmark
-  public void allocateBlockBenchMark(BenchMarkOMClient state,
-      Blackhole bh) throws IOException {
-    int index = (int) (Math.random() * keyNames.size());
-    String key = keyNames.get(index);
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key)
-        .setDataSize(50)
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .build();
-    state.ozoneManagerClient
-        .allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList());
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMKeyAllocation.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMKeyAllocation.java
deleted file mode 100644
index fbb686a..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMKeyAllocation.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.BucketManager;
-import org.apache.hadoop.ozone.om.BucketManagerImpl;
-import org.apache.hadoop.ozone.om.KeyManager;
-import org.apache.hadoop.ozone.om.KeyManagerImpl;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.VolumeManager;
-import org.apache.hadoop.ozone.om.VolumeManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Benchmark key creation in a bucket in OM.
- */
-@State(Scope.Thread)
-public class BenchMarkOMKeyAllocation {
-
-  private static final String TMP_DIR = "java.io.tmpdir";
-  private String volumeName = UUID.randomUUID().toString();
-  private String bucketName = UUID.randomUUID().toString();
-  private KeyManager keyManager;
-  private VolumeManager volumeManager;
-  private BucketManager bucketManager;
-  private String path = Paths.get(System.getProperty(TMP_DIR)).resolve(
-      RandomStringUtils.randomNumeric(6)).toFile()
-            .getAbsolutePath();
-
-  @Setup(Level.Trial)
-  public void setup() throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, path);
-
-    OmMetadataManagerImpl omMetadataManager =
-        new OmMetadataManagerImpl(configuration);
-    volumeManager = new VolumeManagerImpl(omMetadataManager, configuration);
-    bucketManager = new BucketManagerImpl(omMetadataManager);
-
-    volumeManager.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName)
-        .setAdminName(UserGroupInformation.getLoginUser().getUserName())
-        .setOwnerName(UserGroupInformation.getLoginUser().getUserName())
-        .build());
-
-    bucketManager.createBucket(new OmBucketInfo.Builder()
-        .setBucketName(bucketName)
-        .setVolumeName(volumeName).build());
-
-    keyManager = new KeyManagerImpl(null, omMetadataManager, configuration,
-        UUID.randomUUID().toString(), null);
-  }
-
-  @TearDown(Level.Trial)
-  public void cleanup() throws IOException {
-    FileUtils.deleteDirectory(new File(path));
-    keyManager.stop();
-  }
-
-  @Benchmark
-  public void keyCreation() throws IOException {
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, path);
-
-    List<OmKeyLocationInfo> keyLocationInfos = getKeyInfoList();
-
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(UUID.randomUUID().toString())
-        .setDataSize(0)
-        .setFactor(HddsProtos.ReplicationFactor.THREE)
-        .setType(HddsProtos.ReplicationType.RATIS).build();
-    OpenKeySession openKeySession = keyManager.openKey(omKeyArgs);
-    // setting location info list
-    omKeyArgs.setLocationInfoList(keyLocationInfos);
-    keyManager.commitKey(omKeyArgs, openKeySession.getId());
-  }
-
-  public List<OmKeyLocationInfo> getKeyInfoList() {
-    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
-
-    omKeyLocationInfoList.add(getKeyInfo());
-    omKeyLocationInfoList.add(getKeyInfo());
-
-    return omKeyLocationInfoList;
-  }
-
-  public OmKeyLocationInfo getKeyInfo() {
-    return new OmKeyLocationInfo.Builder().setBlockID(
-        new BlockID(RandomUtils.nextLong(0, 100000000),
-            RandomUtils.nextLong(0, 10000000)))
-        .setLength(RandomUtils.nextLong(0, 10000000))
-        .setOffset(0).build();
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java
deleted file mode 100644
index 45c90d3..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.*;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.openjdk.jmh.annotations.*;
-import org.openjdk.jmh.infra.Blackhole;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.locks.ReentrantLock;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * Benchmarks OzoneManager.
- */
-@State(Scope.Thread)
-public class BenchMarkOzoneManager {
-
-  private static String testDir;
-  private static OzoneManager om;
-  private static StorageContainerManager scm;
-  private static ReentrantLock lock = new ReentrantLock();
-  private static String volumeName = UUID.randomUUID().toString();
-  private static String bucketName = UUID.randomUUID().toString();
-  private static List<String> keyNames = new ArrayList<>();
-  private static List<Long> clientIDs = new ArrayList<>();
-
-  private static int numPipelines = 1;
-  private static int numContainersPerPipeline = 3;
-
-  @Setup(Level.Trial)
-  public static void initialize()
-      throws IOException, AuthenticationException, InterruptedException {
-    try {
-      lock.lock();
-      if (scm == null) {
-        OzoneConfiguration conf = new OzoneConfiguration();
-        conf.setBoolean(OZONE_ENABLED, true);
-        testDir = GenesisUtil.getTempPath()
-            .resolve(RandomStringUtils.randomNumeric(7)).toString();
-        conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
-
-        GenesisUtil.configureSCM(conf, 10);
-        GenesisUtil.configureOM(conf, 20);
-        conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-            numContainersPerPipeline);
-        GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
-
-        scm = GenesisUtil.getScm(conf, new SCMConfigurator());
-        scm.start();
-        om = GenesisUtil.getOm(conf);
-        om.start();
-
-        // prepare SCM
-        PipelineManager pipelineManager = scm.getPipelineManager();
-        for (Pipeline pipeline : pipelineManager
-            .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
-          pipelineManager.openPipeline(pipeline.getId());
-        }
-        scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS,
-            new SCMSafeModeManager.SafeModeStatus(false));
-        Thread.sleep(1000);
-
-        // prepare OM
-        om.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName)
-            .setAdminName(UserGroupInformation.getLoginUser().getUserName())
-            .setOwnerName(UserGroupInformation.getLoginUser().getUserName())
-            .build());
-        om.createBucket(new OmBucketInfo.Builder().setBucketName(bucketName)
-            .setVolumeName(volumeName).build());
-        createKeys(100000);
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  private static void createKeys(int numKeys) throws IOException {
-    for (int i = 0; i < numKeys; i++) {
-      String key = UUID.randomUUID().toString();
-      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-          .setVolumeName(volumeName)
-          .setBucketName(bucketName)
-          .setKeyName(key)
-          .setDataSize(0)
-          .setFactor(HddsProtos.ReplicationFactor.THREE)
-          .setType(HddsProtos.ReplicationType.RATIS)
-          .build();
-      OpenKeySession keySession = om.getKeyManager().openKey(omKeyArgs);
-      long clientID = keySession.getId();
-      keyNames.add(key);
-      clientIDs.add(clientID);
-    }
-  }
-
-  @TearDown(Level.Trial)
-  public static void tearDown() {
-    try {
-      lock.lock();
-      if (scm != null) {
-        scm.stop();
-        scm.join();
-        scm = null;
-        om.stop();
-        om.join();
-        om = null;
-        FileUtil.fullyDelete(new File(testDir));
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Threads(4)
-  @Benchmark
-  public void allocateBlockBenchMark(BenchMarkOzoneManager state,
-      Blackhole bh) throws IOException {
-    int index = (int) (Math.random() * keyNames.size());
-    String key = keyNames.get(index);
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key)
-        .setDataSize(50)
-        .setFactor(HddsProtos.ReplicationFactor.THREE)
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .build();
-    state.om.allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList());
-  }
-
-  @Threads(4)
-  @Benchmark
-  public void createAndCommitKeyBenchMark(BenchMarkOzoneManager state,
-      Blackhole bh) throws IOException {
-    String key = UUID.randomUUID().toString();
-    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(key)
-        .setDataSize(50)
-        .setFactor(HddsProtos.ReplicationFactor.THREE)
-        .setType(HddsProtos.ReplicationType.RATIS)
-        .build();
-    OpenKeySession openKeySession = state.om.openKey(omKeyArgs);
-    state.om.allocateBlock(omKeyArgs, openKeySession.getId(),
-        new ExcludeList());
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
deleted file mode 100644
index daf44ec..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.RocksDBStore;
-import org.openjdk.jmh.annotations.*;
-import org.openjdk.jmh.infra.Blackhole;
-import org.rocksdb.*;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.nio.file.Paths;
-
-/**
- * Benchmark rocksdb store.
- */
-@State(Scope.Thread)
-public class BenchMarkRocksDbStore {
-  private static final int DATA_LEN = 1024;
-  private static final long MAX_KEYS = 1024 * 10;
-  private static final int DB_FILE_LEN = 7;
-  private static final String TMP_DIR = "java.io.tmpdir";
-
-  private MetadataStore store;
-  private byte[] data;
-
-  @Param(value = {"8"})
-  private String blockSize; // 4KB default
-
-  @Param(value = {"64"})
-  private String writeBufferSize; //64 MB default
-
-  @Param(value = {"16"})
-  private String maxWriteBufferNumber; // 2 default
-
-  @Param(value = {"4"})
-  private String maxBackgroundFlushes; // 1 default
-
-  @Param(value = {"512"})
-  private String maxBytesForLevelBase;
-
-  @Param(value = {"4"})
-  private String backgroundThreads;
-
-  @Param(value = {"5000"})
-  private String maxOpenFiles;
-
-  @Setup(Level.Trial)
-  public void initialize() throws IOException {
-    data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
-        .getBytes(Charset.forName("UTF-8"));
-    org.rocksdb.Options opts = new org.rocksdb.Options();
-    File dbFile = Paths.get(System.getProperty(TMP_DIR))
-        .resolve(RandomStringUtils.randomNumeric(DB_FILE_LEN))
-        .toFile();
-    opts.setCreateIfMissing(true);
-    opts.setWriteBufferSize(
-        (long) StorageUnit.MB.toBytes(Long.parseLong(writeBufferSize)));
-    opts.setMaxWriteBufferNumber(Integer.parseInt(maxWriteBufferNumber));
-    opts.setMaxBackgroundFlushes(Integer.parseInt(maxBackgroundFlushes));
-    BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
-    tableConfig.setBlockSize(
-        (long) StorageUnit.KB.toBytes(Long.parseLong(blockSize)));
-    opts.setMaxOpenFiles(Integer.parseInt(maxOpenFiles));
-    opts.setMaxBytesForLevelBase(
-        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase)));
-    opts.setCompactionStyle(CompactionStyle.UNIVERSAL);
-    opts.setLevel0FileNumCompactionTrigger(10);
-    opts.setLevel0SlowdownWritesTrigger(20);
-    opts.setLevel0StopWritesTrigger(40);
-    opts.setTargetFileSizeBase(
-        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase))
-            / 10);
-    opts.setMaxBackgroundCompactions(8);
-    opts.setUseFsync(false);
-    opts.setBytesPerSync(8388608);
-    org.rocksdb.Filter bloomFilter = new org.rocksdb.BloomFilter(20);
-    tableConfig.setCacheIndexAndFilterBlocks(true);
-    tableConfig.setIndexType(IndexType.kHashSearch);
-    tableConfig.setFilter(bloomFilter);
-    opts.setTableFormatConfig(tableConfig);
-    opts.useCappedPrefixExtractor(4);
-    store = new RocksDBStore(dbFile, opts);
-  }
-
-  @TearDown(Level.Trial)
-  public void cleanup() throws IOException {
-    store.destroy();
-    FileUtils.deleteDirectory(new File(TMP_DIR));
-  }
-
-  @Benchmark
-  public void test(Blackhole bh) throws IOException {
-    long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
-    store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
-    bh.consume(
-        store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java
deleted file mode 100644
index 6ec0833c..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.openjdk.jmh.annotations.*;
-import org.openjdk.jmh.infra.Blackhole;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-
-/**
- * Benchmarks BlockManager class.
- */
-@State(Scope.Thread)
-public class BenchMarkSCM {
-
-  private static String testDir;
-  private static StorageContainerManager scm;
-  private static BlockManager blockManager;
-  private static ReentrantLock lock = new ReentrantLock();
-
-  @Param({ "1", "10", "100", "1000", "10000", "100000" })
-  private static int numPipelines;
-  @Param({ "3", "10", "100" })
-  private static int numContainersPerPipeline;
-
-  @Setup(Level.Trial)
-  public static void initialize()
-      throws IOException, AuthenticationException, InterruptedException {
-    try {
-      lock.lock();
-      if (scm == null) {
-        OzoneConfiguration conf = new OzoneConfiguration();
-        conf.setBoolean(OZONE_ENABLED, true);
-        testDir = GenesisUtil.getTempPath()
-            .resolve(RandomStringUtils.randomNumeric(7)).toString();
-        conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
-
-        GenesisUtil.configureSCM(conf, 10);
-        conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-            numContainersPerPipeline);
-        GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
-
-        scm = GenesisUtil.getScm(conf, new SCMConfigurator());
-        scm.start();
-        blockManager = scm.getScmBlockManager();
-
-        // prepare SCM
-        PipelineManager pipelineManager = scm.getPipelineManager();
-        for (Pipeline pipeline : pipelineManager
-            .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
-          pipelineManager.openPipeline(pipeline.getId());
-        }
-        scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS,
-            new SCMSafeModeManager.SafeModeStatus(false));
-        Thread.sleep(1000);
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @TearDown(Level.Trial)
-  public static void tearDown() {
-    try {
-      lock.lock();
-      if (scm != null) {
-        scm.stop();
-        scm.join();
-        scm = null;
-        FileUtil.fullyDelete(new File(testDir));
-      }
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Threads(4)
-  @Benchmark
-  public void allocateBlockBenchMark(BenchMarkSCM state,
-      Blackhole bh) throws IOException {
-    state.blockManager
-        .allocateBlock(50, ReplicationType.RATIS, ReplicationFactor.THREE,
-            "Genesis", new ExcludeList());
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
deleted file mode 100644
index 2de9b0f..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import org.openjdk.jmh.profile.StackProfiler;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-import picocli.CommandLine;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Command;
-
-/**
- * Main class that executes a set of HDDS/Ozone benchmarks.
- * We purposefully don't use the runner and tools classes from Hadoop.
- * There are some name collisions with OpenJDK JMH package.
- * <p>
- * Hence, these classes do not use the Tool/Runner pattern of standard Hadoop
- * CLI.
- */
-@Command(name = "ozone genesis",
-    description = "Tool for running ozone benchmarks",
-    mixinStandardHelpOptions = true)
-public final class Genesis {
-
-  // After adding benchmark in genesis package add the benchmark name in the
-  // description for this option.
-  @Option(names = "-benchmark", split = ",", description =
-      "Option used for specifying benchmarks to run.\n"
-          + "Ex. ozone genesis -benchmark BenchMarkContainerStateMap,"
-          + "BenchMarkOMKeyAllocation.\n"
-          + "Possible benchmarks which can be used are "
-          + "{BenchMarkContainerStateMap, BenchMarkOMKeyAllocation, "
-          + "BenchMarkOzoneManager, BenchMarkOMClient, "
-          + "BenchMarkSCM, BenchMarkMetadataStoreReads, "
-          + "BenchMarkMetadataStoreWrites, BenchMarkDatanodeDispatcher, "
-          + "BenchMarkRocksDbStore}")
-  private static String[] benchmarks;
-
-  @Option(names = "-t", defaultValue = "4",
-      description = "Number of threads to use for the benchmark.\n"
-          + "This option can be overridden by threads mentioned in benchmark.")
-  private static int numThreads;
-
-  private Genesis() {
-  }
-
-  public static void main(String[] args) throws RunnerException {
-    CommandLine commandLine = new CommandLine(new Genesis());
-    commandLine.parse(args);
-    if (commandLine.isUsageHelpRequested()) {
-      commandLine.usage(System.out);
-      return;
-    }
-
-    OptionsBuilder optionsBuilder = new OptionsBuilder();
-    if (benchmarks != null) {
-      // The OptionsBuilder#include takes a regular expression as argument.
-      // Therefore it is important to keep the benchmark names unique for
-      // running a benchmark. For example if there are two benchmarks -
-      // BenchMarkOM and BenchMarkOMClient and we include BenchMarkOM then
-      // both the benchmarks will be run.
-      for (String benchmark : benchmarks) {
-        optionsBuilder.include(benchmark);
-      }
-    }
-    optionsBuilder.warmupIterations(2)
-        .measurementIterations(20)
-        .addProfiler(StackProfiler.class)
-        .shouldDoGC(true)
-        .forks(1)
-        .threads(numThreads);
-
-    new Runner(optionsBuilder.build()).run();
-  }
-}
-
-
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
deleted file mode 100644
index 8ba19fc..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.openjdk.jmh.infra.BenchmarkParams;
-import org.openjdk.jmh.infra.IterationParams;
-import org.openjdk.jmh.profile.InternalProfiler;
-import org.openjdk.jmh.results.AggregationPolicy;
-import org.openjdk.jmh.results.IterationResult;
-import org.openjdk.jmh.results.Result;
-import org.openjdk.jmh.results.ScalarResult;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- * Max memory profiler.
- */
-public class GenesisMemoryProfiler implements InternalProfiler {
-  @Override
-  public void beforeIteration(BenchmarkParams benchmarkParams,
-      IterationParams iterationParams) {
-
-  }
-
-  @Override
-  public Collection<? extends Result> afterIteration(BenchmarkParams
-      benchmarkParams, IterationParams iterationParams, IterationResult
-      result) {
-    long totalHeap = Runtime.getRuntime().totalMemory();
-
-    Collection<ScalarResult> samples = new ArrayList<>();
-    samples.add(new ScalarResult("Max heap",
-        StorageUnit.BYTES.toGBs(totalHeap), "GBs",
-        AggregationPolicy.MAX));
-    return samples;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Genesis Memory Profiler. Computes Max Memory used by a test.";
-  }
-}
-
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
deleted file mode 100644
index 969f9f1..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.genesis;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
-
-/**
- * Utility class for benchmark test cases.
- */
-public final class GenesisUtil {
-
-  private GenesisUtil() {
-    // private constructor.
-  }
-
-  public static final String DEFAULT_TYPE = "default";
-  public static final String CACHE_10MB_TYPE = "Cache10MB";
-  public static final String CACHE_1GB_TYPE = "Cache1GB";
-  public static final String CLOSED_TYPE = "ClosedContainer";
-
-  private static final int DB_FILE_LEN = 7;
-  private static final String TMP_DIR = "java.io.tmpdir";
-
-  public static Path getTempPath() {
-    return Paths.get(System.getProperty(TMP_DIR));
-  }
-
-  public static MetadataStore getMetadataStore(String dbType)
-      throws IOException {
-    Configuration conf = new Configuration();
-    MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder();
-    builder.setConf(conf);
-    builder.setCreateIfMissing(true);
-    builder.setDbFile(
-        getTempPath().resolve(RandomStringUtils.randomNumeric(DB_FILE_LEN))
-            .toFile());
-    switch (dbType) {
-    case DEFAULT_TYPE:
-      break;
-    case CLOSED_TYPE:
-      break;
-    case CACHE_10MB_TYPE:
-      builder.setCacheSize((long) StorageUnit.MB.toBytes(10));
-      break;
-    case CACHE_1GB_TYPE:
-      builder.setCacheSize((long) StorageUnit.GB.toBytes(1));
-      break;
-    default:
-      throw new IllegalStateException("Unknown type: " + dbType);
-    }
-    return builder.build();
-  }
-
-  public static DatanodeDetails createDatanodeDetails(String uuid) {
-    Random random = new Random();
-    String ipAddress =
-        random.nextInt(256) + "." + random.nextInt(256) + "." + random
-            .nextInt(256) + "." + random.nextInt(256);
-
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-  static StorageContainerManager getScm(OzoneConfiguration conf,
-      SCMConfigurator configurator) throws IOException,
-      AuthenticationException {
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return new StorageContainerManager(conf, configurator);
-  }
-
-  static void configureSCM(Configuration conf, int numHandlers) {
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numHandlers);
-  }
-
-  static void addPipelines(HddsProtos.ReplicationFactor factor,
-      int numPipelines, Configuration conf) throws IOException {
-    final File metaDir = ServerUtils.getScmDbDir(conf);
-    final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
-    int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-    MetadataStore pipelineStore =
-        MetadataStoreBuilder.newBuilder().setCreateIfMissing(true)
-            .setConf(conf).setDbFile(pipelineDBPath)
-            .setCacheSize(cacheSize * OzoneConsts.MB).build();
-
-    List<DatanodeDetails> nodes = new ArrayList<>();
-    for (int i = 0; i < factor.getNumber(); i++) {
-      nodes
-          .add(GenesisUtil.createDatanodeDetails(UUID.randomUUID().toString()));
-    }
-    for (int i = 0; i < numPipelines; i++) {
-      Pipeline pipeline =
-          Pipeline.newBuilder()
-              .setState(Pipeline.PipelineState.OPEN)
-              .setId(PipelineID.randomId())
-              .setType(HddsProtos.ReplicationType.RATIS)
-              .setFactor(factor)
-              .setNodes(nodes)
-              .build();
-      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
-          pipeline.getProtobufMessage().toByteArray());
-    }
-
-    pipelineStore.close();
-  }
-
-  static OzoneManager getOm(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    OMStorage omStorage = new OMStorage(conf);
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if (omStorage.getState() != Storage.StorageState.INITIALIZED) {
-      omStorage.setClusterId(scmStore.getClusterID());
-      omStorage.setScmId(scmStore.getScmId());
-      omStorage.setOmId(UUID.randomUUID().toString());
-      omStorage.initialize();
-    }
-    return OzoneManager.createOm(conf);
-  }
-
-  static void configureOM(Configuration conf, int numHandlers) {
-    conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numHandlers);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java
deleted file mode 100644
index a7c8ee2..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-/**
- * Zephyr contains a set of benchmarks for Ozone. This is a command line tool
- * that can be run by end users to get a sense of what kind of performance
- * the system is capable of; Since Ozone is a new system, these benchmarks
- * will allow us to correlate a base line to real world performance.
- */
-package org.apache.hadoop.ozone.genesis;
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
deleted file mode 100644
index cc31619..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.scm.cli;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.ParseException;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
-
-/**
- * This is the CLI that can be use to convert an ozone metadata DB into
- * a sqlite DB file.
- *
- * NOTE: user should use this CLI in an offline fashion. Namely, this should not
- * be used to convert a DB that is currently being used by Ozone. Instead,
- * this should be used to debug and diagnosis closed DB instances.
- *
- */
-public class SQLCLI  extends Configured implements Tool {
-
-  private Options options;
-  private BasicParser parser;
-  private final Charset encoding = Charset.forName("UTF-8");
-  private final OzoneConfiguration conf;
-
-  // for container.db
-  private static final String CREATE_CONTAINER_INFO =
-      "CREATE TABLE containerInfo (" +
-          "containerID LONG PRIMARY KEY NOT NULL, " +
-          "replicationType TEXT NOT NULL," +
-          "replicationFactor TEXT NOT NULL," +
-          "usedBytes LONG NOT NULL," +
-          "owner TEXT," +
-          "numberOfKeys LONG)";
-  private static final String CREATE_DATANODE_INFO =
-      "CREATE TABLE datanodeInfo (" +
-          "hostName TEXT NOT NULL, " +
-          "datanodeUUId TEXT PRIMARY KEY NOT NULL," +
-          "ipAddress TEXT, " +
-          "containerPort INTEGER NOT NULL);";
-  private static final String INSERT_CONTAINER_INFO =
-      "INSERT INTO containerInfo (containerID, replicationType, "
-          + "replicationFactor, usedBytes, owner, "
-          + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", "
-          + "\"%s\", \"%d\")";
-  private static final String INSERT_DATANODE_INFO =
-      "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
-          "containerPort) " +
-          "VALUES (\"%s\", \"%s\", \"%s\", \"%d\")";
-  private static final String INSERT_CONTAINER_MEMBERS =
-      "INSERT INTO containerMembers (containerName, datanodeUUID) " +
-          "VALUES (\"%s\", \"%s\")";
-  // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
-  // for openContainer.db
-  private static final String CREATE_OPEN_CONTAINER =
-      "CREATE TABLE openContainer (" +
-          "containerName TEXT PRIMARY KEY NOT NULL, " +
-          "containerUsed INTEGER NOT NULL)";
-  private static final String INSERT_OPEN_CONTAINER =
-      "INSERT INTO openContainer (containerName, containerUsed) " +
-          "VALUES (\"%s\", \"%s\")";
-
-  // for om.db
-  private static final String CREATE_VOLUME_LIST =
-      "CREATE TABLE volumeList (" +
-          "userName TEXT NOT NULL," +
-          "volumeName TEXT NOT NULL," +
-          "PRIMARY KEY (userName, volumeName))";
-  private static final String INSERT_VOLUME_LIST =
-      "INSERT INTO volumeList (userName, volumeName) " +
-          "VALUES (\"%s\", \"%s\")";
-
-  private static final String CREATE_VOLUME_INFO =
-      "CREATE TABLE volumeInfo (" +
-          "adminName TEXT NOT NULL," +
-          "ownerName TEXT NOT NULL," +
-          "volumeName TEXT NOT NULL," +
-          "PRIMARY KEY (adminName, ownerName, volumeName))";
-  private static final String INSERT_VOLUME_INFO =
-      "INSERT INTO volumeInfo (adminName, ownerName, volumeName) " +
-          "VALUES (\"%s\", \"%s\", \"%s\")";
-
-  private static final String CREATE_ACL_INFO =
-      "CREATE TABLE aclInfo (" +
-          "adminName TEXT NOT NULL," +
-          "ownerName TEXT NOT NULL," +
-          "volumeName TEXT NOT NULL," +
-          "type TEXT NOT NULL," +
-          "userName TEXT NOT NULL," +
-          "rights TEXT NOT NULL," +
-          "FOREIGN KEY (adminName, ownerName, volumeName, userName, type)" +
-          "REFERENCES " +
-          "volumeInfo(adminName, ownerName, volumeName, userName, type)" +
-          "PRIMARY KEY (adminName, ownerName, volumeName, userName, type))";
-  private static final String INSERT_ACL_INFO =
-      "INSERT INTO aclInfo (adminName, ownerName, volumeName, type, " +
-          "userName, rights) " +
-          "VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")";
-
-  private static final String CREATE_BUCKET_INFO =
-      "CREATE TABLE bucketInfo (" +
-          "volumeName TEXT NOT NULL," +
-          "bucketName TEXT NOT NULL," +
-          "versionEnabled BOOLEAN NOT NULL," +
-          "storageType TEXT," +
-          "PRIMARY KEY (volumeName, bucketName))";
-  private static final String INSERT_BUCKET_INFO =
-      "INSERT INTO bucketInfo(volumeName, bucketName, " +
-          "versionEnabled, storageType)" +
-          "VALUES (\"%s\", \"%s\", \"%s\", \"%s\")";
-
-  private static final String CREATE_KEY_INFO =
-      "CREATE TABLE keyInfo (" +
-          "volumeName TEXT NOT NULL," +
-          "bucketName TEXT NOT NULL," +
-          "keyName TEXT NOT NULL," +
-          "dataSize INTEGER," +
-          "blockKey TEXT NOT NULL," +
-          "containerName TEXT NOT NULL," +
-          "PRIMARY KEY (volumeName, bucketName, keyName))";
-  private static final String INSERT_KEY_INFO =
-      "INSERT INTO keyInfo (volumeName, bucketName, keyName, dataSize, " +
-          "blockKey, containerName)" +
-          "VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SQLCLI.class);
-
-  public SQLCLI(OzoneConfiguration conf) {
-    this.options = getOptions();
-    this.parser = new BasicParser();
-    this.conf = conf;
-  }
-
-  @SuppressWarnings("static-access")
-  private Options getOptions() {
-    Options allOptions = new Options();
-    Option helpOpt = OptionBuilder
-        .hasArg(false)
-        .withLongOpt("help")
-        .withDescription("display help message")
-        .create("h");
-    allOptions.addOption(helpOpt);
-
-    Option dbPathOption = OptionBuilder
-        .withArgName("DB path")
-        .withLongOpt("dbPath")
-        .hasArgs(1)
-        .withDescription("specify DB path")
-        .create("p");
-    allOptions.addOption(dbPathOption);
-
-    Option outPathOption = OptionBuilder
-        .withArgName("output path")
-        .withLongOpt("outPath")
-        .hasArgs(1)
-        .withDescription("specify output DB file path")
-        .create("o");
-    allOptions.addOption(outPathOption);
-
-    return allOptions;
-  }
-
-  public void displayHelp() {
-    HelpFormatter helpFormatter = new HelpFormatter();
-    Options allOpts = getOptions();
-    helpFormatter.printHelp("hdfs oz_debug -p <DB path>"
-        + " -o <Output DB file path>", allOpts);
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    CommandLine commandLine = parseArgs(args);
-    if (commandLine.hasOption("help")) {
-      displayHelp();
-      return 0;
-    }
-    if (!commandLine.hasOption("p") || !commandLine.hasOption("o")) {
-      displayHelp();
-      return -1;
-    }
-    String value = commandLine.getOptionValue("p");
-    LOG.info("DB path {}", value);
-    // the value is supposed to be an absolute path to a container file
-    Path dbPath = Paths.get(value);
-    if (!Files.exists(dbPath)) {
-      LOG.error("DB path not exist:{}", dbPath);
-    }
-    Path parentPath = dbPath.getParent();
-    Path dbName = dbPath.getFileName();
-    if (parentPath == null || dbName == null) {
-      LOG.error("Error processing db path {}", dbPath);
-      return -1;
-    }
-
-    value = commandLine.getOptionValue("o");
-    Path outPath = Paths.get(value);
-    if (outPath == null || outPath.getParent() == null) {
-      LOG.error("Error processing output path {}", outPath);
-      return -1;
-    }
-
-    if (outPath.toFile().isDirectory()) {
-      LOG.error("The db output path should be a file instead of a directory");
-      return -1;
-    }
-
-    Path outParentPath = outPath.getParent();
-    if (outParentPath != null) {
-      if (!Files.exists(outParentPath)) {
-        Files.createDirectories(outParentPath);
-      }
-    }
-    LOG.info("Parent path [{}] db name [{}]", parentPath, dbName);
-    if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
-      LOG.info("Converting container DB");
-      convertContainerDB(dbPath, outPath);
-    } else if (dbName.toString().equals(OM_DB_NAME)) {
-      LOG.info("Converting om DB");
-      convertOMDB(dbPath, outPath);
-    } else {
-      LOG.error("Unrecognized db name {}", dbName);
-    }
-    return 0;
-  }
-
-  private Connection connectDB(String dbPath) throws Exception {
-    Class.forName("org.sqlite.JDBC");
-    String connectPath =
-        String.format("jdbc:sqlite:%s", dbPath);
-    return DriverManager.getConnection(connectPath);
-  }
-
-  private void executeSQL(Connection conn, String sql) throws SQLException {
-    try (Statement stmt = conn.createStatement()) {
-      stmt.executeUpdate(sql);
-    }
-  }
-
-  /**
-   * Convert om.db to sqlite db file. With following schema.
-   * (* for primary key)
-   *
-   * 1. for key type USER, it contains a username and a list volumes
-   * volumeList
-   * --------------------------------
-   *   userName*     |  volumeName*
-   * --------------------------------
-   *
-   * 2. for key type VOLUME:
-   *
-   * volumeInfo
-   * ----------------------------------------------
-   * adminName | ownerName* | volumeName* | aclID
-   * ----------------------------------------------
-   *
-   * aclInfo
-   * ----------------------------------------------
-   * aclEntryID* | type* | userName* | rights
-   * ----------------------------------------------
-   *
-   * 3. for key type BUCKET
-   * bucketInfo
-   * --------------------------------------------------------
-   * volumeName* | bucketName* | versionEnabled | storageType
-   * --------------------------------------------------------
-   *
-   * TODO : the following table will be changed when key partition is added.
-   * Only has the minimum entries for test purpose now.
-   * 4. for key type KEY
-   * -----------------------------------------------
-   * volumeName* | bucketName* | keyName* | dataSize
-   * -----------------------------------------------
-   *
-   *
-   *
-   * @param dbPath
-   * @param outPath
-   * @throws Exception
-   */
-  private void convertOMDB(Path dbPath, Path outPath) throws Exception {
-    LOG.info("Create tables for sql om db.");
-    File dbFile = dbPath.toFile();
-    try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf).setDbFile(dbFile).build();
-         Connection conn = connectDB(outPath.toString())) {
-      executeSQL(conn, CREATE_VOLUME_LIST);
-      executeSQL(conn, CREATE_VOLUME_INFO);
-      executeSQL(conn, CREATE_ACL_INFO);
-      executeSQL(conn, CREATE_BUCKET_INFO);
-      executeSQL(conn, CREATE_KEY_INFO);
-
-      dbStore.iterate(null, (key, value) -> {
-        String keyString = DFSUtilClient.bytes2String(key);
-        KeyType type = getKeyType(keyString);
-        try {
-          insertOMDB(conn, type, keyString, value);
-        } catch (IOException | SQLException ex) {
-          LOG.error("Exception inserting key {} type {}", keyString, type, ex);
-        }
-        return true;
-      });
-    }
-  }
-
-  private void insertOMDB(Connection conn, KeyType type, String keyName,
-                          byte[] value) throws IOException, SQLException {
-    switch (type) {
-    case USER:
-      UserVolumeInfo volumeList = UserVolumeInfo.parseFrom(value);
-      for (String volumeName : volumeList.getVolumeNamesList()) {
-        String insertVolumeList =
-            String.format(INSERT_VOLUME_LIST, keyName, volumeName);
-        executeSQL(conn, insertVolumeList);
-      }
-      break;
-    case VOLUME:
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(value);
-      String adminName = volumeInfo.getAdminName();
-      String ownerName = volumeInfo.getOwnerName();
-      String volumeName = volumeInfo.getVolume();
-      String insertVolumeInfo =
-          String.format(INSERT_VOLUME_INFO, adminName, ownerName, volumeName);
-      executeSQL(conn, insertVolumeInfo);
-      for (OzoneAclInfo aclInfo : volumeInfo.getVolumeAclsList()) {
-        String insertAclInfo =
-            String.format(INSERT_ACL_INFO, adminName, ownerName, volumeName,
-                aclInfo.getType(), aclInfo.getName(), aclInfo.getRights());
-        executeSQL(conn, insertAclInfo);
-      }
-      break;
-    case BUCKET:
-      BucketInfo bucketInfo = BucketInfo.parseFrom(value);
-      String insertBucketInfo =
-          String.format(INSERT_BUCKET_INFO, bucketInfo.getVolumeName(),
-              bucketInfo.getBucketName(), bucketInfo.getIsVersionEnabled(),
-              bucketInfo.getStorageType());
-      executeSQL(conn, insertBucketInfo);
-      break;
-    case KEY:
-      KeyInfo keyInfo = KeyInfo.parseFrom(value);
-      // TODO : the two fields container name and block id are no longer used,
-      // need to revisit this later.
-      String insertKeyInfo =
-          String.format(INSERT_KEY_INFO, keyInfo.getVolumeName(),
-              keyInfo.getBucketName(), keyInfo.getKeyName(),
-              keyInfo.getDataSize(), "EMPTY",
-              "EMPTY");
-      executeSQL(conn, insertKeyInfo);
-      break;
-    default:
-      throw new IOException("Unknown key from om.db");
-    }
-  }
-
-  // TODO: This has to be fixed.
-  // we don't have prefix anymore. now each key is written into different
-  // table. The logic has to be changed.
-  private KeyType getKeyType(String key) {
-    if (key.startsWith(OM_USER_PREFIX)) {
-      return KeyType.USER;
-    } else if (key.startsWith(OM_KEY_PREFIX)) {
-      return key.replaceFirst(OM_KEY_PREFIX, "")
-          .contains(OM_KEY_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
-    }else {
-      return KeyType.KEY;
-    }
-  }
-
-  private enum KeyType {
-    USER,
-    VOLUME,
-    BUCKET,
-    KEY,
-    UNKNOWN
-  }
-
-  /**
-   * Convert container.db to sqlite. The schema of sql db:
-   * three tables, containerId, containerMachines, datanodeInfo
-   * (* for primary key)
-   *
-   * containerInfo:
-   * ----------------------------------------------
-   * container name* | container lead datanode uuid
-   * ----------------------------------------------
-   *
-   * containerMembers:
-   * --------------------------------
-   * container name* |  datanodeUUid*
-   * --------------------------------
-   *
-   * datanodeInfo:
-   * ---------------------------------------------------------
-   * hostname | datanodeUUid* | xferPort | ipcPort
-   * ---------------------------------------------------------
-   *
-   * --------------------------------
-   * | containerPort
-   * --------------------------------
-   *
-   * @param dbPath path to container db.
-   * @param outPath path to output sqlite
-   * @throws IOException throws exception.
-   */
-  private void convertContainerDB(Path dbPath, Path outPath)
-      throws Exception {
-    LOG.info("Create tables for sql container db.");
-    File dbFile = dbPath.toFile();
-    try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf).setDbFile(dbFile).build();
-        Connection conn = connectDB(outPath.toString())) {
-      executeSQL(conn, CREATE_CONTAINER_INFO);
-
-      dbStore.iterate(null, (key, value) -> {
-        long containerID = Longs.fromByteArray(key);
-        ContainerInfo containerInfo = null;
-        containerInfo = ContainerInfo.fromProtobuf(
-            HddsProtos.ContainerInfoProto.PARSER.parseFrom(value));
-        Preconditions.checkNotNull(containerInfo);
-        try {
-          //TODO: include container state to sqllite schema
-          insertContainerDB(conn, containerInfo, containerID);
-          return true;
-        } catch (SQLException e) {
-          throw new IOException(e);
-        }
-      });
-    }
-  }
-
-  /**
-   * Insert into the sqlite DB of container.db.
-   * @param conn the connection to the sqlite DB.
-   * @param containerInfo
-   * @param containerID
-   * @throws SQLException throws exception.
-   */
-  private void insertContainerDB(Connection conn, ContainerInfo containerInfo,
-      long containerID) throws SQLException {
-    LOG.info("Insert to sql container db, for container {}", containerID);
-    String insertContainerInfo = String.format(
-        INSERT_CONTAINER_INFO, containerID,
-        containerInfo.getReplicationType(),
-        containerInfo.getReplicationFactor(),
-        containerInfo.getUsedBytes(),
-        containerInfo.getOwner(),
-        containerInfo.getNumberOfKeys());
-
-    executeSQL(conn, insertContainerInfo);
-    LOG.info("Insertion completed.");
-  }
-
-
-  /**
-   * Convert openContainer.db to sqlite db file. This is rather simple db,
-   * the schema has only one table:
-   *
-   * openContainer
-   * -------------------------------
-   * containerName* | containerUsed
-   * -------------------------------
-   *
-   * @param dbPath path to container db.
-   * @param outPath path to output sqlite
-   * @throws IOException throws exception.
-   */
-  private void convertOpenContainerDB(Path dbPath, Path outPath)
-      throws Exception {
-    LOG.info("Create table for open container db.");
-    File dbFile = dbPath.toFile();
-    try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf).setDbFile(dbFile).build();
-        Connection conn = connectDB(outPath.toString())) {
-      executeSQL(conn, CREATE_OPEN_CONTAINER);
-
-      dbStore.iterate(null, (key, value) -> {
-        String containerName = DFSUtil.bytes2String(key);
-        Long containerUsed =
-            Long.parseLong(DFSUtil.bytes2String(value));
-        String insertOpenContainer = String
-            .format(INSERT_OPEN_CONTAINER, containerName, containerUsed);
-        try {
-          executeSQL(conn, insertOpenContainer);
-          return true;
-        } catch (SQLException e) {
-          throw new IOException(e);
-        }
-      });
-    }
-  }
-
-  private CommandLine parseArgs(String[] argv)
-      throws ParseException {
-    return parser.parse(options, argv);
-  }
-
-  public static void main(String[] args) {
-    Tool shell = new SQLCLI(new OzoneConfiguration());
-    int res = 0;
-    try {
-      ToolRunner.run(shell, args);
-    } catch (Exception ex) {
-      LOG.error(ex.toString());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Command execution failed", ex);
-      }
-      res = 1;
-    }
-    System.exit(res);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java
deleted file mode 100644
index 4c38ae0..0000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm.cli;
-
-/**
- * Command line helpers for scm management.
- */
diff --git a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 0368002..0000000
--- a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git a/hadoop-ozone/tools/src/main/resources/commands.properties b/hadoop-ozone/tools/src/main/resources/commands.properties
deleted file mode 100644
index 084cacf..0000000
--- a/hadoop-ozone/tools/src/main/resources/commands.properties
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-createAuditTable=CREATE TABLE IF NOT EXISTS audit (datetime text,level varchar(7),logger varchar(7),user text,ip text,op text,params text,result varchar(7),exception text,UNIQUE(datetime,level,logger,user,ip,op,params,result))
-insertAuditEntry=INSERT INTO AUDIT VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
-top5users=select user,count(*) as total from audit group by user order by total DESC limit 5
-top5cmds=select op,count(*) as total from audit group by op order by total DESC limit 5
-top5activetimebyseconds=select substr(datetime,1,charindex(',',datetime)-1) as dt,count(*) as thecount from audit group by dt order by thecount DESC limit 5
diff --git a/hadoop-ozone/tools/src/main/resources/webapps/freon/.gitkeep b/hadoop-ozone/tools/src/main/resources/webapps/freon/.gitkeep
deleted file mode 100644
index 6485314..0000000
--- a/hadoop-ozone/tools/src/main/resources/webapps/freon/.gitkeep
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
deleted file mode 100644
index 31864e2..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.audit.parser;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.ParameterException;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Tests AuditParser.
- */
-public class TestAuditParser {
-  private static File outputBaseDir;
-  private static AuditParser parserTool;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestAuditParser.class);
-  private static final ByteArrayOutputStream OUT = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-  private static String dbName;
-  private static final String LOGS = TestAuditParser.class
-      .getClassLoader().getResource("testaudit.log").getPath();
-  /**
-   * Creates output directory which will be used by the test-cases.
-   * If a test-case needs a separate directory, it has to create a random
-   * directory inside {@code outputBaseDir}.
-   *
-   * @throws Exception In case of exception while creating output directory.
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    outputBaseDir = getRandomTempDir();
-    dbName = getRandomTempDir() + "/testAudit.db";
-    parserTool = new AuditParser();
-    String[] args = new String[]{dbName, "load", LOGS};
-    execute(args, "");
-  }
-
-  @Before
-  public void setup() {
-    System.setOut(new PrintStream(OUT));
-    System.setErr(new PrintStream(err));
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    OUT.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-  /**
-   * Cleans up the output base directory.
-   */
-  @AfterClass
-  public static void cleanup() throws IOException {
-    FileUtils.deleteDirectory(outputBaseDir);
-  }
-
-  private static void execute(String[] args, String msg) {
-    List<String> arguments = new ArrayList(Arrays.asList(args));
-    LOG.info("Executing shell command with args {}", arguments);
-    CommandLine cmd = parserTool.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseResult) {
-            throw ex;
-          }
-        };
-    cmd.parseWithHandlers(new CommandLine.RunLast(),
-        exceptionHandler, args);
-    Assert.assertTrue(OUT.toString().contains(msg));
-  }
-
-  /**
-   * Test to find top 5 commands.
-   */
-  @Test
-  public void testTemplateTop5Cmds() {
-    String[] args = new String[]{dbName, "template", "top5cmds"};
-    execute(args,
-        "DELETE_KEY\t3\t\n" +
-            "ALLOCATE_KEY\t2\t\n" +
-            "COMMIT_KEY\t2\t\n" +
-            "CREATE_BUCKET\t1\t\n" +
-            "CREATE_VOLUME\t1\t\n\n");
-  }
-
-  /**
-   * Test to find top 5 users.
-   */
-  @Test
-  public void testTemplateTop5Users() {
-    String[] args = new String[]{dbName, "template", "top5users"};
-    execute(args, "hadoop\t9\t\n");
-  }
-
-  /**
-   * Test to find top 5 users.
-   */
-  @Test
-  public void testTemplateTop5ActiveTimeBySeconds() {
-    String[] args = new String[]{dbName, "template", "top5activetimebyseconds"};
-    execute(args,
-        "2018-09-06 01:57:22\t3\t\n" +
-            "2018-09-06 01:58:08\t1\t\n" +
-            "2018-09-06 01:58:18\t1\t\n" +
-            "2018-09-06 01:59:36\t1\t\n" +
-            "2018-09-06 01:59:41\t1\t\n");
-  }
-
-  /**
-   * Test to execute custom query.
-   */
-  @Test
-  public void testQueryCommand() {
-    String[] args = new String[]{dbName, "query",
-        "select count(*) from audit"};
-    execute(args,
-        "9");
-  }
-
-  /**
-   * Test to check help message.
-   * @throws Exception
-   */
-  @Test
-  public void testHelp() throws Exception {
-    String[] args = new String[]{"--help"};
-    execute(args,
-        "Usage: ozone auditparser [-hV] [--verbose] " +
-            "[-conf=<configurationPath>]\n" +
-            "                         [-D=<String=String>]... <database> " +
-            "[COMMAND]");
-  }
-
-  private static File getRandomTempDir() throws IOException {
-    File tempDir = new File(outputBaseDir,
-        RandomStringUtils.randomAlphanumeric(5));
-    FileUtils.forceMkdir(tempDir);
-    return tempDir;
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/package-info.java
deleted file mode 100644
index a9b25f4..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.audit.parser;
-/**
- * Tests for AuditParser.
- */
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
deleted file mode 100644
index fdcb822..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Tests Freon, with MiniOzoneCluster and validate data.
- */
-public abstract class TestDataValidate {
-
-  private static MiniOzoneCluster cluster = null;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  static void startCluster(OzoneConfiguration conf) throws Exception {
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(5).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  static void shutdownCluster() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void ratisTestLargeKey() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setKeySize(20971520);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
-  }
-
-  @Test
-  public void standaloneTestLargeKey() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setKeySize(20971520);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
-  }
-
-  @Test
-  public void validateWriteTest() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(2);
-    randomKeyGenerator.setNumOfBuckets(5);
-    randomKeyGenerator.setNumOfKeys(10);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.call();
-    Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertTrue(randomKeyGenerator.getValidateWrites());
-    Assert.assertNotEquals(0, randomKeyGenerator.getTotalKeysValidated());
-    Assert.assertNotEquals(0, randomKeyGenerator
-        .getSuccessfulValidationCount());
-    Assert.assertEquals(0, randomKeyGenerator
-        .getUnsuccessfulValidationCount());
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java
deleted file mode 100644
index b0683bd..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Tests Freon with MiniOzoneCluster and ChunkManagerDummyImpl.
- * Data validation is disabled in RandomKeyGenerator.
- */
-public class TestDataValidateWithDummyContainers
-    extends TestDataValidate {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestDataValidate.class);
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration sc =
-        conf.getObject(ContainerScrubberConfiguration.class);
-    sc.setEnabled(false);
-    conf.setBoolean(HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, false);
-    conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
-        false);
-    startCluster(conf);
-  }
-
-  /**
-   * Write validation is not supported for non-persistent containers.
-   * This test is a no-op.
-   */
-  @Test
-  @Override
-  public void validateWriteTest() throws Exception {
-    LOG.info("Skipping validateWriteTest for non-persistent containers.");
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    shutdownCluster();
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java
deleted file mode 100644
index 745cee4..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Tests Freon, with MiniOzoneCluster and validate data.
- */
-public class TestDataValidateWithSafeByteOperations extends TestDataValidate {
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
-        false);
-    startCluster(conf);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    shutdownCluster();
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java
deleted file mode 100644
index 5ecef9b..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Tests Freon, with MiniOzoneCluster and validate data.
- */
-public class TestDataValidateWithUnsafeByteOperations extends TestDataValidate {
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
-        true);
-    startCluster(conf);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    shutdownCluster();
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
deleted file mode 100644
index 545f2b3..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.statemachine.StateMachine;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * Tests Freon with Datanode restarts without waiting for pipeline to close.
- */
-public class TestFreonWithDatanodeFastRestart {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-      .setHbProcessorInterval(1000)
-      .setHbInterval(1000)
-      .setNumDatanodes(3)
-      .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  @Ignore("TODO:HDDS-1160")
-  public void testRestart() throws Exception {
-    startFreon();
-    StateMachine sm = getStateMachine();
-    TermIndex termIndexBeforeRestart = sm.getLastAppliedTermIndex();
-    cluster.restartHddsDatanode(0, false);
-    sm = getStateMachine();
-    SimpleStateMachineStorage storage =
-        (SimpleStateMachineStorage)sm.getStateMachineStorage();
-    SingleFileSnapshotInfo snapshotInfo = storage.getLatestSnapshot();
-    TermIndex termInSnapshot = snapshotInfo.getTermIndex();
-    String expectedSnapFile =
-        storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
-            termIndexBeforeRestart.getIndex()).getAbsolutePath();
-    Assert.assertEquals(expectedSnapFile,
-        snapshotInfo.getFile().getPath().toString());
-    Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
-
-    // After restart the term index might have progressed to apply pending
-    // transactions.
-    TermIndex termIndexAfterRestart = sm.getLastAppliedTermIndex();
-    Assert.assertTrue(termIndexAfterRestart.getIndex() >=
-        termIndexBeforeRestart.getIndex());
-    // TODO: fix me
-    // Give some time for the datanode to register again with SCM.
-    // If we try to use the pipeline before the datanode registers with SCM
-    // we end up in "NullPointerException: scmId cannot be null" in
-    // datanode statemachine and datanode crashes.
-    // This has to be fixed. Check HDDS-830.
-    // Until then this sleep should help us!
-    Thread.sleep(5000);
-    startFreon();
-  }
-
-  private void startFreon() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setKeySize(20971520);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
-  }
-
-  private StateMachine getStateMachine() throws Exception {
-    return ContainerTestHelper.getStateMachine(cluster);
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
deleted file mode 100644
index 4c54e9f..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Tests Freon with Datanode restarts.
- */
-public class TestFreonWithDatanodeRestart {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5, TimeUnit.SECONDS);
-    conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1,
-        TimeUnit.SECONDS);
-    conf.setTimeDuration(OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, 5,
-        TimeUnit.SECONDS);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-      .setHbProcessorInterval(1000)
-      .setHbInterval(1000)
-      .setNumDatanodes(3)
-      .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Ignore
-  // Pipeline close is not happening now, this requires HDDS-801 and
-  // pipeline teardown logic in place. Enable this once those things are in
-  // place
-  @Test
-  public void testRestart() throws Exception {
-    startFreon();
-    cluster.restartHddsDatanode(0, true);
-    startFreon();
-  }
-
-  private void startFreon() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setKeySize(20971520);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
deleted file mode 100644
index 13ecab6..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests Freon with Pipeline destroy.
- */
-public class TestFreonWithPipelineDestroy {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-      .setHbProcessorInterval(1000)
-      .setHbInterval(1000)
-      .setNumDatanodes(3)
-      .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testRestart() throws Exception {
-    startFreon();
-    destroyPipeline();
-    startFreon();
-  }
-
-  private void startFreon() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setKeySize(20971520);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(0,
-        randomKeyGenerator.getUnsuccessfulValidationCount());
-  }
-
-  private void destroyPipeline() throws Exception {
-    XceiverServerSpi server =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().
-            getContainer().getWriteChannel();
-    StorageContainerDatanodeProtocolProtos.PipelineReport report =
-        server.getPipelineReport().get(0);
-    PipelineID id = PipelineID.getFromProtobuf(report.getPipelineID());
-    PipelineManager pipelineManager =
-        cluster.getStorageContainerManager().getPipelineManager();
-    Pipeline pipeline = pipelineManager.getPipeline(id);
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java
deleted file mode 100644
index 90366da..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.freon;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.junit.MockitoJUnitRunner;
-
-import java.io.PrintStream;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Supplier;
-import java.util.stream.LongStream;
-
-import static org.mockito.Mockito.*;
-
-/**
- * Using Mockito runner.
- */
-@RunWith(MockitoJUnitRunner.class)
-/**
- * Tests for the Progressbar class for Freon.
- */
-public class TestProgressBar {
-
-  private PrintStream stream;
-  private AtomicLong numberOfKeysAdded;
-  private Supplier<Long> currentValue;
-
-  @Before
-  public void setupMock() {
-    numberOfKeysAdded = new AtomicLong(0L);
-    currentValue = () -> numberOfKeysAdded.get();
-    stream = mock(PrintStream.class);
-  }
-
-  @Test
-  public void testWithRunnable() {
-
-    Long maxValue = 10L;
-
-    ProgressBar progressbar = new ProgressBar(stream, maxValue, currentValue);
-
-    Runnable task = () -> {
-      LongStream.range(0, maxValue).forEach(
-          counter -> {
-            numberOfKeysAdded.getAndIncrement();
-          }
-      );
-    };
-
-    progressbar.start();
-    task.run();
-    progressbar.shutdown();
-
-    verify(stream, atLeastOnce()).print(anyChar());
-    verify(stream, atLeastOnce()).print(anyString());
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
deleted file mode 100644
index 45ea23d..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests Freon, with MiniOzoneCluster.
- */
-public class TestRandomKeyGenerator {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void defaultTest() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(2);
-    randomKeyGenerator.setNumOfBuckets(5);
-    randomKeyGenerator.setNumOfKeys(10);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.call();
-    Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
-  }
-
-  @Test
-  public void multiThread() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(10);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(10);
-    randomKeyGenerator.setNumOfThreads(10);
-    randomKeyGenerator.setKeySize(10240);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.call();
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
-  }
-
-  @Test
-  public void ratisTest3() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(10);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(10);
-    randomKeyGenerator.setNumOfThreads(10);
-    randomKeyGenerator.setKeySize(10240);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.call();
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
-  }
-
-  @Test
-  public void bigFileThan2GB() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setNumOfThreads(1);
-    randomKeyGenerator.setKeySize(10L + Integer.MAX_VALUE);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
-  }
-
-  @Test
-  public void fileWithSizeZero() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setNumOfThreads(1);
-    randomKeyGenerator.setKeySize(0);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setValidateWrites(true);
-    randomKeyGenerator.call();
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-    Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
-  }
-
-  @Test
-  public void testThreadPoolSize() throws Exception {
-    RandomKeyGenerator randomKeyGenerator =
-        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
-    randomKeyGenerator.setNumOfVolumes(1);
-    randomKeyGenerator.setNumOfBuckets(1);
-    randomKeyGenerator.setNumOfKeys(1);
-    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
-    randomKeyGenerator.setType(ReplicationType.RATIS);
-    randomKeyGenerator.setNumOfThreads(10);
-    randomKeyGenerator.call();
-    Assert.assertEquals(10, randomKeyGenerator.getThreadPoolSize());
-    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
deleted file mode 100644
index f7cb075..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Freon Ozone Load Generator.
- */
-package org.apache.hadoop.ozone.freon;
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
deleted file mode 100644
index 112674a..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.fsck;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.util.FileUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import java.io.File;
-import java.io.IOException;
-import java.util.*;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static org.junit.Assert.*;
-
-/**
- * Test cases for ContainerMapper.
- */
-
-public class TestContainerMapper {
-
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static final String SCM_ID = UUID.randomUUID().toString();
-  private static String volName = UUID.randomUUID().toString();
-  private static String bucketName = UUID.randomUUID().toString();
-  private static OzoneConfiguration conf;
-  private static List<String> keyList = new ArrayList<>();
-  private static String dbPath;
-
-
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    dbPath = GenericTestUtils.getRandomizedTempPath();
-    conf.set(OZONE_OM_DB_DIRS, dbPath);
-    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB");
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .setScmId(SCM_ID)
-        .build();
-    cluster.waitForClusterToBeReady();
-    ozClient = OzoneClientFactory.getRpcClient(conf);
-    store = ozClient.getObjectStore();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    ozoneManager = cluster.getOzoneManager();
-    store.createVolume(volName);
-    OzoneVolume volume = store.getVolume(volName);
-    volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    byte[] data = generateData(10 * 1024 * 1024, (byte)98);
-
-    for (int i = 0; i < 20; i++) {
-      String key = UUID.randomUUID().toString();
-      keyList.add(key);
-      OzoneOutputStream out = bucket.createKey(key, data.length,
-          ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-          new HashMap<String, String>());
-      out.write(data, 0, data.length);
-      out.close();
-    }
-    cluster.stop();
-  }
-
-  @Test
-  public void testContainerMapper() throws Exception {
-    ContainerMapper containerMapper = new ContainerMapper();
-    Map<Long, List<Map<Long, BlockIdDetails>>> dataMap =
-        containerMapper.parseOmDB(conf);
-    // As we have created 20 keys with 10 MB size, and each
-    // container max size is 100 MB, it should create 3 containers because
-    // containers are closing before reaching the threshold
-    assertEquals(3, dataMap.size());
-  }
-
-  private static byte[] generateData(int size, byte val) {
-    byte[] chars = new byte[size];
-    Arrays.fill(chars, val);
-    return chars;
-  }
-
-  @AfterClass
-  public static void shutdown() throws IOException {
-    cluster.shutdown();
-    FileUtils.deleteFully(new File(dbPath));
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java
deleted file mode 100644
index 432d65c..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Package info.
- * <p>
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * <p>
- * fsck tool.
- */
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * fsck tool.
- */
-
-
-package org.apache.hadoop.ozone.fsck;
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
deleted file mode 100644
index 9279d7f..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.genconf;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.ParameterException;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Tests GenerateOzoneRequiredConfigurations.
- */
-public class TestGenerateOzoneRequiredConfigurations {
-  private static File outputBaseDir;
-  private static GenerateOzoneRequiredConfigurations genconfTool;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestGenerateOzoneRequiredConfigurations.class);
-  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-  /**
-   * Creates output directory which will be used by the test-cases.
-   * If a test-case needs a separate directory, it has to create a random
-   * directory inside {@code outputBaseDir}.
-   *
-   * @throws Exception In case of exception while creating output directory.
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    outputBaseDir = GenericTestUtils.getTestDir();
-    FileUtils.forceMkdir(outputBaseDir);
-    genconfTool = new GenerateOzoneRequiredConfigurations();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    System.setOut(new PrintStream(out));
-    System.setErr(new PrintStream(err));
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    out.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-  /**
-   * Cleans up the output base directory.
-   */
-  @AfterClass
-  public static void cleanup() throws IOException {
-    FileUtils.deleteDirectory(outputBaseDir);
-  }
-
-  private void execute(String[] args, String msg) {
-    List<String> arguments = new ArrayList(Arrays.asList(args));
-    LOG.info("Executing shell command with args {}", arguments);
-    CommandLine cmd = genconfTool.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseResult) {
-            throw ex;
-          }
-        };
-    cmd.parseWithHandlers(new CommandLine.RunLast(),
-        exceptionHandler, args);
-    Assert.assertTrue(out.toString().contains(msg));
-  }
-
-  private void executeWithException(String[] args, String msg) {
-    List<String> arguments = new ArrayList(Arrays.asList(args));
-    LOG.info("Executing shell command with args {}", arguments);
-    CommandLine cmd = genconfTool.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseResult) {
-            throw ex;
-          }
-        };
-    try{
-      cmd.parseWithHandlers(new CommandLine.RunLast(),
-          exceptionHandler, args);
-    }catch(Exception ex){
-      Assert.assertTrue(ex.getMessage().contains(msg));
-    }
-  }
-
-  /**
-   * Tests a valid path and generates ozone-site.xml by calling
-   * {@code GenerateOzoneRequiredConfigurations#generateConfigurations}.
-   * Further verifies that all properties have a default value.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testGenerateConfigurations() throws Exception {
-    File tempPath = getRandomTempDir();
-    String[] args = new String[]{tempPath.getAbsolutePath()};
-    execute(args, "ozone-site.xml has been generated at " +
-        tempPath.getAbsolutePath());
-
-    //Fetch file generated by above line
-    URL url = new File(tempPath.getAbsolutePath() + "/ozone-site.xml")
-        .toURI().toURL();
-    OzoneConfiguration oc = new OzoneConfiguration();
-    List<OzoneConfiguration.Property> allProperties =
-        oc.readPropertyFromXml(url);
-
-    //Asserts all properties have a non-empty value
-    for (OzoneConfiguration.Property p : allProperties) {
-      Assert.assertTrue(
-          p.getValue() != null && p.getValue().length() > 0);
-    }
-  }
-
-  /**
-   * Generates ozone-site.xml at specified path.
-   * Verify that it does not overwrite if file already exists in path.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testDoesNotOverwrite() throws Exception {
-    File tempPath = getRandomTempDir();
-    String[] args = new String[]{tempPath.getAbsolutePath()};
-    execute(args, "ozone-site.xml has been generated at " +
-        tempPath.getAbsolutePath());
-
-    //attempt overwrite
-    execute(args, "ozone-site.xml already exists at " +
-            tempPath.getAbsolutePath() + " and will not be overwritten");
-
-  }
-
-  /**
-   * Test to avoid generating ozone-site.xml when insufficient permission.
-   * @throws Exception
-   */
-  @Test
-  public void genconfFailureByInsufficientPermissions() throws Exception {
-    File tempPath = getRandomTempDir();
-    tempPath.setReadOnly();
-    String[] args = new String[]{tempPath.getAbsolutePath()};
-    executeWithException(args, "Insufficient permission.");
-  }
-
-  /**
-   * Test to avoid generating ozone-site.xml when invalid path.
-   * @throws Exception
-   */
-  @Test
-  public void genconfFailureByInvalidPath() throws Exception {
-    File tempPath = getRandomTempDir();
-    String[] args = new String[]{"invalid-path"};
-    executeWithException(args, "Invalid directory path.");
-  }
-
-  /**
-   * Test to avoid generating ozone-site.xml when path not specified.
-   * @throws Exception
-   */
-  @Test
-  public void genconfPathNotSpecified() throws Exception {
-    File tempPath = getRandomTempDir();
-    String[] args = new String[]{};
-    executeWithException(args, "Missing required parameter: <path>");
-  }
-
-  /**
-   * Test to check help message.
-   * @throws Exception
-   */
-  @Test
-  public void genconfHelp() throws Exception {
-    File tempPath = getRandomTempDir();
-    String[] args = new String[]{"--help"};
-    execute(args, "Usage: ozone genconf [-hV] [--verbose]");
-  }
-
-  private File getRandomTempDir() throws IOException {
-    File tempDir = new File(outputBaseDir,
-        RandomStringUtils.randomAlphanumeric(5));
-    FileUtils.forceMkdir(tempDir);
-    return tempDir;
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/package-info.java
deleted file mode 100644
index 8f58a82..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.genconf;
-/**
- * Tests for ozone genconf tool
- */
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
deleted file mode 100644
index 2cdbf0d..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * This class tests the CLI that transforms om.db into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestOmSQLCli {
-  private MiniOzoneCluster cluster = null;
-
-  private OzoneConfiguration conf;
-  private SQLCLI cli;
-
-  private String userName = "userTest";
-  private String adminName = "adminTest";
-  private String volumeName0 = "volumeTest0";
-  private String volumeName1 = "volumeTest1";
-  private String bucketName0 = "bucketTest0";
-  private String bucketName1 = "bucketTest1";
-  private String bucketName2 = "bucketTest2";
-  private String keyName0 = "key0";
-  private String keyName1 = "key1";
-  private String keyName2 = "key2";
-  private String keyName3 = "key3";
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        // Uncomment the below line if we support leveldb in future.
-        //{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
-    });
-  }
-
-  private String metaStoreType;
-
-  public TestOmSQLCli(String type) {
-    metaStoreType = type;
-  }
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    OzoneBucket bucket0 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName0);
-    OzoneBucket bucket1 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName1, bucketName1);
-    OzoneBucket bucket2 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName2);
-
-    TestDataUtil.createKey(bucket0, keyName0, "");
-    TestDataUtil.createKey(bucket1, keyName1, "");
-    TestDataUtil.createKey(bucket2, keyName2, "");
-    TestDataUtil.createKey(bucket2, keyName3, "");
-
-    cluster.getOzoneManager().stop();
-    cluster.getStorageContainerManager().stop();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
-    cli = new SQLCLI(conf);
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  // After HDDS-357, we have to fix SQLCli.
-  // TODO: fix SQLCli
-  @Ignore
-  @Test
-  public void testOmDB() throws Exception {
-    String dbOutPath =  GenericTestUtils.getTempPath(
-        UUID.randomUUID() + "/out_sql.db");
-
-    String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
-    String dbPath = dbRootPath + "/" + OM_DB_NAME;
-    String[] args = {"-p", dbPath, "-o", dbOutPath};
-
-    cli.run(args);
-
-    Connection conn = connectDB(dbOutPath);
-    String sql = "SELECT * FROM volumeList";
-    ResultSet rs = executeQuery(conn, sql);
-    List<String> expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String userNameRs = rs.getString("userName");
-      String volumeNameRs = rs.getString("volumeName");
-      assertEquals(userName,  userNameRs.substring(1));
-      assertTrue(expectedValues.remove(volumeNameRs));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM volumeInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM aclInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      String type = rs.getString("type");
-      String uName = rs.getString("userName");
-      String rights = rs.getString("rights");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertEquals("USER", type);
-      assertEquals(userName, uName);
-      assertEquals("READ_WRITE", rights);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM bucketInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, String> expectedMap = new HashMap<>();
-    expectedMap.put(bucketName0, volumeName0);
-    expectedMap.put(bucketName2, volumeName0);
-    expectedMap.put(bucketName1, volumeName1);
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      boolean versionEnabled = rs.getBoolean("versionEnabled");
-      String storegeType = rs.getString("storageType");
-      assertEquals(volumeName, expectedMap.remove(bucketName));
-      assertFalse(versionEnabled);
-      assertEquals("DISK", storegeType);
-    }
-    assertEquals(0, expectedMap.size());
-
-    sql = "SELECT * FROM keyInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
-    // no data written, data size will be 0
-    expectedMap2.put(keyName0,
-        Arrays.asList(volumeName0, bucketName0, "0"));
-    expectedMap2.put(keyName1,
-        Arrays.asList(volumeName1, bucketName1, "0"));
-    expectedMap2.put(keyName2,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    expectedMap2.put(keyName3,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      String keyName = rs.getString("keyName");
-      int dataSize = rs.getInt("dataSize");
-      List<String> vals = expectedMap2.remove(keyName);
-      assertNotNull(vals);
-      assertEquals(vals.get(0), volumeName);
-      assertEquals(vals.get(1), bucketName);
-      assertEquals(vals.get(2), Integer.toString(dataSize));
-    }
-    assertEquals(0, expectedMap2.size());
-
-    conn.close();
-    Files.delete(Paths.get(dbOutPath));
-  }
-
-  private ResultSet executeQuery(Connection conn, String sql)
-      throws SQLException {
-    Statement stmt = conn.createStatement();
-    return stmt.executeQuery(sql);
-  }
-
-  private Connection connectDB(String dbPath) throws Exception {
-    Class.forName("org.sqlite.JDBC");
-    String connectPath =
-        String.format("jdbc:sqlite:%s", dbPath);
-    return DriverManager.getConnection(connectPath);
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java
deleted file mode 100644
index 595708c..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * OM to SQL Converter. Currently broken.
- */
-package org.apache.hadoop.ozone.om;
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/package-info.java
deleted file mode 100644
index 291fcd9..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * A tool to convert Ozone manager Metadata to SQL DB.
- */
-package org.apache.hadoop.ozone.scm;
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java
deleted file mode 100644
index e62ba47..0000000
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test;
-
-import org.apache.hadoop.ozone.freon.Freon;
-import org.apache.hadoop.util.ProgramDriver;
-
-/**
- * Driver for Ozone tests.
- */
-public class OzoneTestDriver {
-
-  private final ProgramDriver pgd;
-
-  public OzoneTestDriver() {
-    this(new ProgramDriver());
-  }
-
-  public OzoneTestDriver(ProgramDriver pgd) {
-    this.pgd = pgd;
-    try {
-      pgd.addClass("freon", Freon.class,
-          "Populates ozone with data.");
-    } catch(Throwable e) {
-      e.printStackTrace();
-    }
-  }
-
-  public void run(String[] args) {
-    int exitCode = -1;
-    try {
-      exitCode = pgd.run(args);
-    } catch(Throwable e) {
-      e.printStackTrace();
-    }
-
-    System.exit(exitCode);
-  }
-
-  public static void main(String[] args){
-    new OzoneTestDriver().run(args);
-  }
-}
diff --git a/hadoop-ozone/tools/src/test/resources/commands.properties b/hadoop-ozone/tools/src/test/resources/commands.properties
deleted file mode 100644
index 084cacf..0000000
--- a/hadoop-ozone/tools/src/test/resources/commands.properties
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-createAuditTable=CREATE TABLE IF NOT EXISTS audit (datetime text,level varchar(7),logger varchar(7),user text,ip text,op text,params text,result varchar(7),exception text,UNIQUE(datetime,level,logger,user,ip,op,params,result))
-insertAuditEntry=INSERT INTO AUDIT VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
-top5users=select user,count(*) as total from audit group by user order by total DESC limit 5
-top5cmds=select op,count(*) as total from audit group by op order by total DESC limit 5
-top5activetimebyseconds=select substr(datetime,1,charindex(',',datetime)-1) as dt,count(*) as thecount from audit group by dt order by thecount DESC limit 5
diff --git a/hadoop-ozone/tools/src/test/resources/testaudit.log b/hadoop-ozone/tools/src/test/resources/testaudit.log
deleted file mode 100644
index 4c56f35..0000000
--- a/hadoop-ozone/tools/src/test/resources/testaudit.log
+++ /dev/null
@@ -1,15 +0,0 @@
-2018-09-06 01:57:22,996 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=ALLOCATE_KEY {volume=vol-7-67105, bucket=bucket-0-68911, key=key-246-29031, dataSize=10240, replicationType=STAND_ALONE, replicationFactor=ONE, keyLocationInfo=null} | ret=SUCCESS |
-2018-09-06 01:57:22,997 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=COMMIT_KEY {volume=vol-4-88912, bucket=bucket-0-27678, key=key-241-42688, dataSize=10240, replicationType=null, replicationFactor=null, keyLocationInfo=[org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo@25bd7387], clientID=61987500296} | ret=SUCCESS |
-2018-09-06 01:57:22,997 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=COMMIT_KEY {volume=vol-1-59303, bucket=bucket-0-47510, key=key-248-17213, dataSize=10240, replicationType=null, replicationFactor=null, keyLocationInfo=[org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo@788f5bea], clientID=61990833797} | ret=SUCCESS |
-2018-09-06 01:58:08,035 | ERROR | OMAudit | user=hadoop | ip=172.18.0.4 | op=CREATE_VOLUME {admin=hadoop, owner=tom, volume=dcv, creationTime=0, quotaInBytes=1152921504606846976} | ret=FAILURE | org.apache.hadoop.ozone.om.exceptions.OMException
-at org.apache.hadoop.ozone.om.VolumeManagerImpl.createVolume(VolumeManagerImpl.java:137)
-at org.apache.hadoop.ozone.om.OzoneManager.createVolume(OzoneManager.java:469)
-2018-09-06 01:58:18,447 | ERROR | OMAudit | user=hadoop | ip=172.18.0.4 | op=CREATE_BUCKET {volume=dcv, bucket=dcb, acls=[USER:hadoop:rw, GROUP:users:rw], isVersionEnabled=false, storageType=DISK, creationTime=0} | ret=FAILURE | org.apache.hadoop.ozone.om.exceptions.OMException: Bucket already exist
-at org.apache.hadoop.ozone.om.BucketManagerImpl.createBucket(BucketManagerImpl.java:98)
-at org.apache.hadoop.ozone.om.OzoneManager.createBucket(OzoneManager.java:694)
-2018-09-06 01:59:36,686 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=DELETE_KEY {volume=dcv, bucket=dcb, key=dck1, dataSize=0, replicationType=null, replicationFactor=null, keyLocationInfo=null} | ret=SUCCESS |
-2018-09-06 01:59:41,027 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=DELETE_KEY {volume=dcv, bucket=dcb, key=dck2, dataSize=0, replicationType=null, replicationFactor=null, keyLocationInfo=null} | ret=SUCCESS |
-2018-09-06 01:59:47,169 | ERROR | OMAudit | user=hadoop | ip=172.18.0.4 | op=DELETE_KEY {volume=dcv, bucket=dcb, key=dck2, dataSize=0, replicationType=null, replicationFactor=null, keyLocationInfo=null} | ret=FAILURE | org.apache.hadoop.ozone.om.exceptions.OMException: Key not found
-at org.apache.hadoop.ozone.om.KeyManagerImpl.deleteKey(KeyManagerImpl.java:448)
-at org.apache.hadoop.ozone.om.OzoneManager.deleteKey(OzoneManager.java:892)
-2018-09-06 01:60:22,900 | INFO  | OMAudit | user=hadoop | ip=172.18.0.4 | op=ALLOCATE_KEY {volume=vol-8-67105, bucket=bucket-0-68911, key=key-246-29031, dataSize=10240, replicationType=STAND_ALONE, replicationFactor=ONE, keyLocationInfo=null} | ret=SUCCESS |
diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml
deleted file mode 100644
index 0a4bd7f..0000000
--- a/hadoop-ozone/upgrade/pom.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-upgrade</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone In-Place Upgrade</description>
-  <name>Apache Hadoop Ozone In-Place Upgrade</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.15.0</version>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java
deleted file mode 100644
index 1492738..0000000
--- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.upgrade;
-
-import java.util.concurrent.Callable;
-
-import picocli.CommandLine.Command;
-
-/**
- * Command to move blocks between HDFS datanodes.
- */
-@Command(name = "balance",
-    description = "Move the HDFS blocks for a better distribution "
-        + "usage.")
-public class Balance implements Callable<Void> {
-
-  @Override
-  public Void call() throws Exception {
-    System.err.println("[In-Place upgrade : balance] is not yet supported.");
-    return null;
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java
deleted file mode 100644
index 0837200..0000000
--- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.upgrade;
-
-import java.util.concurrent.Callable;
-
-import picocli.CommandLine.Command;
-
-/**
- * Execute Ozone specific HDFS ballanced..
- */
-@Command(name = "execute",
-    description = "Start/restart upgrade from HDFS to Ozone cluster.")
-public class Execute implements Callable<Void> {
-
-  @Override
-  public Void call() throws Exception {
-    System.err.println("In-Place upgrade : execute] is not yet supported.");
-    return null;
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java
deleted file mode 100644
index b307f44..0000000
--- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.upgrade;
-
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-
-import picocli.CommandLine.Command;
-
-/**
- * Command  line interface for the In-Place upgrade utility.
- * <p>
- * In-Place upgrade can convert HDFS cluster data to Ozone data without
- * (or minimal) data moving.
- */
-@Command(name = "ozone upgrade",
-    description = "Convert raw HDFS data to Ozone data without data movement.",
-    subcommands = {
-        Plan.class,
-        Balance.class,
-        Execute.class,
-    },
-    versionProvider = HddsVersionProvider.class,
-    mixinStandardHelpOptions = true)
-public class InPlaceUpgrade extends GenericCli {
-
-  public static void main(String[] args) {
-    new InPlaceUpgrade().run(args);
-  }
-}
diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java
deleted file mode 100644
index efd6092..0000000
--- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.upgrade;
-
-import java.util.concurrent.Callable;
-
-import picocli.CommandLine.Command;
-
-/**
- * Command to calculate statistics and estimate the upgrade.
- */
-@Command(name = "plan",
-    description = "Plan existing HDFS block distribution and give."
-        + "estimation.")
-public class Plan implements Callable<Void> {
-
-  @Override
-  public Void call() throws Exception {
-    System.err.println("[In-Place upgrade : plan] is not yet supported.");
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java
deleted file mode 100644
index b147683..0000000
--- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.upgrade;
-
-/**
- * In-Place upgrade utility to upgrade HDDS to Ozone cluster..
- */
\ No newline at end of file
diff --git a/pom.ozone.xml b/pom.ozone.xml
deleted file mode 100644
index f83c55c..0000000
--- a/pom.ozone.xml
+++ /dev/null
@@ -1,2032 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-main-ozone</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Main</description>
-  <name>Apache Hadoop Ozone Main</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>hadoop-hdds</module>
-    <module>hadoop-ozone</module>
-  </modules>
-
-  <distributionManagement>
-    <repository>
-      <id>${distMgmtStagingId}</id>
-      <name>${distMgmtStagingName}</name>
-      <url>${distMgmtStagingUrl}</url>
-    </repository>
-    <snapshotRepository>
-      <id>${distMgmtSnapshotsId}</id>
-      <name>${distMgmtSnapshotsName}</name>
-      <url>${distMgmtSnapshotsUrl}</url>
-    </snapshotRepository>
-  </distributionManagement>
-
-  <repositories>
-    <repository>
-      <id>${distMgmtSnapshotsId}</id>
-      <name>${distMgmtSnapshotsName}</name>
-      <url>${distMgmtSnapshotsUrl}</url>
-    </repository>
-    <repository>
-      <id>repository.jboss.org</id>
-      <url>http://repository.jboss.org/nexus/content/groups/public/</url>
-      <snapshots>
-        <enabled>false</enabled>
-      </snapshots>
-    </repository>
-  </repositories>
-
-  <licenses>
-    <license>
-      <name>Apache License, Version 2.0</name>
-      <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
-    </license>
-  </licenses>
-
-  <organization>
-    <name>Apache Software Foundation</name>
-    <url>http://www.apache.org</url>
-  </organization>
-
-  <properties>
-    <hadoop.version>3.2.0</hadoop.version>
-
-    <distMgmtSnapshotsId>apache.snapshots.https</distMgmtSnapshotsId>
-    <distMgmtSnapshotsName>Apache Development Snapshot Repository</distMgmtSnapshotsName>
-    <distMgmtSnapshotsUrl>https://repository.apache.org/content/repositories/snapshots</distMgmtSnapshotsUrl>
-    <distMgmtStagingId>apache.staging.https</distMgmtStagingId>
-    <distMgmtStagingName>Apache Release Distribution Repository</distMgmtStagingName>
-    <distMgmtStagingUrl>https://repository.apache.org/service/local/staging/deploy/maven2</distMgmtStagingUrl>
-
-    <!-- platform encoding override -->
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
-
-    <maven-gpg-plugin.version>1.5</maven-gpg-plugin.version>
-    <shell-executable>bash</shell-executable>
-
-    <!-- Set the Release year during release -->
-    <release-year>2019</release-year>
-
-    <failIfNoTests>false</failIfNoTests>
-    <maven.test.redirectTestOutputToFile>true</maven.test.redirectTestOutputToFile>
-    <jetty.version>9.3.25.v20180904</jetty.version>
-    <test.exclude>_</test.exclude>
-    <test.exclude.pattern>_</test.exclude.pattern>
-
-    <!-- number of threads/forks to use when running tests in parallel, see parallel-tests profile -->
-    <testsThreadCount>4</testsThreadCount>
-
-    <!-- These 2 versions are defined here because they are used -->
-    <!-- JDIFF generation from embedded ant in the antrun plugin -->
-    <jdiff.version>1.0.9</jdiff.version>
-    <!-- Version number for xerces used by JDiff -->
-    <xerces.jdiff.version>2.11.0</xerces.jdiff.version>
-
-    <hadoop.assemblies.version>3.2.0</hadoop.assemblies.version>
-    <commons-daemon.version>1.0.13</commons-daemon.version>
-
-    <test.build.dir>${project.build.directory}/test-dir</test.build.dir>
-    <test.build.data>${test.build.dir}</test.build.data>
-
-    <!-- Used for building path to native library loaded by tests.  Projects -->
-    <!-- at different nesting levels in the source tree may need to override. -->
-    <hadoop.common.build.dir>${basedir}/../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
-    <java.security.egd>file:///dev/urandom</java.security.egd>
-
-    <!-- avro version -->
-    <avro.version>1.7.7</avro.version>
-
-    <!-- jersey version -->
-    <jersey.version>1.19</jersey.version>
-
-    <!-- jackson versions -->
-    <jackson.version>1.9.13</jackson.version>
-    <jackson2.version>2.9.9</jackson2.version>
-
-    <!-- jaegertracing veresion -->
-    <jaeger.version>0.34.0</jaeger.version>
-
-    <!-- httpcomponents versions -->
-    <httpclient.version>4.5.2</httpclient.version>
-    <httpcore.version>4.4.4</httpcore.version>
-
-    <!-- SLF4J version -->
-    <slf4j.version>1.7.25</slf4j.version>
-
-    <!-- com.google.re2j version -->
-    <re2j.version>1.1</re2j.version>
-
-    <!-- ProtocolBuffer version, used to verify the protoc version and -->
-    <!-- define the protobuf JAR version                               -->
-    <protobuf.version>2.5.0</protobuf.version>
-    <protoc.path>${env.HADOOP_PROTOC_PATH}</protoc.path>
-
-    <curator.version>2.12.0</curator.version>
-    <findbugs.version>3.0.0</findbugs.version>
-    <spotbugs.version>3.1.12</spotbugs.version>
-    <dnsjava.version>2.1.7</dnsjava.version>
-
-    <guava.version>11.0.2</guava.version>
-    <guice.version>4.0</guice.version>
-    <joda-time.version>2.9.9</joda-time.version>
-
-    <!-- Required for testing LDAP integration -->
-    <apacheds.version>2.0.0-M21</apacheds.version>
-    <ldap-api.version>1.0.0-M33</ldap-api.version>
-
-    <!-- Apache Ratis version -->
-    <ratis.version>0.4.0</ratis.version>
-    <jcache.version>1.0-alpha-1</jcache.version>
-    <ehcache.version>3.3.1</ehcache.version>
-    <hikari.version>2.4.12</hikari.version>
-    <mssql.version>6.2.1.jre7</mssql.version>
-    <okhttp.version>2.7.5</okhttp.version>
-
-    <!-- Maven protoc compiler -->
-    <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
-    <protobuf-compile.version>3.5.0</protobuf-compile.version>
-    <grpc.version>1.10.0</grpc.version>
-    <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
-
-    <!-- define the Java language version used by the compiler -->
-    <javac.version>1.8</javac.version>
-
-    <!-- The java version enforced by the maven enforcer -->
-    <!-- more complex patterns can be used here, such as
-       [${javac.version})
-    for an open-ended enforcement
-    -->
-    <enforced.java.version>[${javac.version},)</enforced.java.version>
-    <enforced.maven.version>[3.3.0,)</enforced.maven.version>
-
-    <!-- Plugin versions and config -->
-    <maven-surefire-plugin.argLine>-Xmx2048m -XX:+HeapDumpOnOutOfMemoryError</maven-surefire-plugin.argLine>
-    <maven-surefire-plugin.version>2.21.0</maven-surefire-plugin.version>
-    <maven-surefire-report-plugin.version>${maven-surefire-plugin.version}</maven-surefire-report-plugin.version>
-    <maven-failsafe-plugin.version>${maven-surefire-plugin.version}</maven-failsafe-plugin.version>
-
-    <maven-clean-plugin.version>2.5</maven-clean-plugin.version>
-    <maven-compiler-plugin.version>3.1</maven-compiler-plugin.version>
-    <maven-install-plugin.version>2.5.1</maven-install-plugin.version>
-    <maven-resources-plugin.version>2.6</maven-resources-plugin.version>
-    <maven-shade-plugin.version>3.2.0</maven-shade-plugin.version>
-    <maven-jar-plugin.version>2.5</maven-jar-plugin.version>
-    <maven-war-plugin.version>3.1.0</maven-war-plugin.version>
-    <maven-source-plugin.version>2.3</maven-source-plugin.version>
-    <maven-pdf-plugin.version>1.2</maven-pdf-plugin.version>
-    <maven-remote-resources-plugin.version>1.5</maven-remote-resources-plugin.version>
-    <maven-enforcer-plugin.version>3.0.0-M1</maven-enforcer-plugin.version>
-    <maven-javadoc-plugin.version>3.0.1</maven-javadoc-plugin.version>
-    <maven-assembly-plugin.version>2.4</maven-assembly-plugin.version>
-    <apache-rat-plugin.version>0.12</apache-rat-plugin.version>
-    <maven-deploy-plugin.version>2.8.1</maven-deploy-plugin.version>
-    <build-helper-maven-plugin.version>1.9</build-helper-maven-plugin.version>
-    <maven-dependency-plugin.version>3.0.2</maven-dependency-plugin.version>
-    <exec-maven-plugin.version>1.3.1</exec-maven-plugin.version>
-    <make-maven-plugin.version>1.0-beta-1</make-maven-plugin.version>
-    <native-maven-plugin.version>1.0-alpha-8</native-maven-plugin.version>
-    <maven-checkstyle-plugin.version>3.0.0</maven-checkstyle-plugin.version>
-    <checkstyle.version>8.19</checkstyle.version>
-    <surefire.fork.timeout>900</surefire.fork.timeout>
-    <aws-java-sdk.version>1.11.375</aws-java-sdk.version>
-    <hsqldb.version>2.3.4</hsqldb.version>
-    <frontend-maven-plugin.version>1.5</frontend-maven-plugin.version>
-    <!-- the version of Hadoop declared in the version resources; can be overridden
-    so that Hadoop 3.x can declare itself a 2.x artifact. -->
-    <declared.hadoop.version>${hadoop.version}</declared.hadoop.version>
-
-    <swagger-annotations-version>1.5.4</swagger-annotations-version>
-    <snakeyaml.version>1.16</snakeyaml.version>
-    <hbase.one.version>1.2.6</hbase.one.version>
-    <hbase.two.version>2.0.0-beta-1</hbase.two.version>
-  </properties>
-
-
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>com.squareup.okhttp</groupId>
-        <artifactId>okhttp</artifactId>
-        <version>${okhttp.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.squareup.okhttp3</groupId>
-        <artifactId>mockwebserver</artifactId>
-        <version>3.7.0</version>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
-        <groupId>jdiff</groupId>
-        <artifactId>jdiff</artifactId>
-        <version>${jdiff.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-assemblies</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-annotations</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-modules</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-api</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-check-invariants</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-check-test-invariants</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-integration-tests</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-runtime</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client-minicluster</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-common</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-auth</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-auth</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-nfs</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs-rbf</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-app</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-app</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-api</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-core</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-common</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-tests</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-common</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-registry</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-        <type>test-jar</type>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-router</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-services-core</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-services-core</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-hs</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-examples</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-gridmix</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-streaming</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-archives</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-archive-logs</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-distcp</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-distcp</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-datajoin</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-rumen</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-extras</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-minicluster</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-minikdc</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-openstack</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-azure</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-azure-datalake</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-aws</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-aliyun</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-core</artifactId>
-        <version>1.19</version>
-      </dependency>
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-generator-annprocess</artifactId>
-        <version>1.19</version>
-      </dependency>
-
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-kms</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-kms</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>com.google.guava</groupId>
-        <artifactId>guava</artifactId>
-        <version>${guava.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.code.gson</groupId>
-        <artifactId>gson</artifactId>
-        <version>2.2.4</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-cli</groupId>
-        <artifactId>commons-cli</artifactId>
-        <version>1.2</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-math3</artifactId>
-        <version>3.1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-compress</artifactId>
-        <version>1.4.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-csv</artifactId>
-        <version>1.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.httpcomponents</groupId>
-        <artifactId>httpclient</artifactId>
-        <version>${httpclient.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.httpcomponents</groupId>
-        <artifactId>httpcore</artifactId>
-        <version>${httpcore.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-codec</groupId>
-        <artifactId>commons-codec</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-net</groupId>
-        <artifactId>commons-net</artifactId>
-        <version>3.6</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.servlet</groupId>
-        <artifactId>javax.servlet-api</artifactId>
-        <version>3.1.0</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.ws.rs</groupId>
-        <artifactId>jsr311-api</artifactId>
-        <version>1.1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-server</artifactId>
-        <version>${jetty.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>org.eclipse.jetty</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-util</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-servlet</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-webapp</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-util-ajax</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.servlet.jsp</groupId>
-        <artifactId>jsp-api</artifactId>
-        <version>2.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.glassfish</groupId>
-        <artifactId>javax.servlet</artifactId>
-        <version>3.1</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.codehaus.plexus</groupId>
-        <artifactId>plexus-utils</artifactId>
-        <version>2.0.5</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.plexus</groupId>
-        <artifactId>plexus-component-annotations</artifactId>
-        <version>1.5.5</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.ow2.asm</groupId>
-        <artifactId>asm</artifactId>
-        <version>5.0.4</version>
-      </dependency>
-      <dependency>
-        <groupId>org.ojalgo</groupId>
-        <artifactId>ojalgo</artifactId>
-        <version>43.0</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-core</artifactId>
-        <version>${jersey.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-servlet</artifactId>
-        <version>${jersey.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-json</artifactId>
-        <version>${jersey.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>stax</groupId>
-            <artifactId>stax-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-server</artifactId>
-        <version>${jersey.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.google.inject</groupId>
-        <artifactId>guice</artifactId>
-        <version>${guice.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>cglib</groupId>
-        <artifactId>cglib</artifactId>
-        <version>3.2.0</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.google.inject.extensions</groupId>
-        <artifactId>guice-servlet</artifactId>
-        <version>${guice.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.sun.jersey.contribs</groupId>
-        <artifactId>jersey-guice</artifactId>
-        <version>${jersey.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.sun.jersey.jersey-test-framework</groupId>
-        <artifactId>jersey-test-framework-core</artifactId>
-        <version>${jersey.version}</version>
-        <scope>test</scope>
-        <exclusions>
-          <exclusion>
-            <groupId>javax.servlet</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey.jersey-test-framework</groupId>
-        <artifactId>jersey-test-framework-grizzly2</artifactId>
-        <version>${jersey.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>javax.servlet</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.ratis</groupId>
-        <artifactId>ratis-proto-shaded</artifactId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-common</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-client</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-server</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-netty</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-grpc</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.netty</groupId>
-        <artifactId>netty</artifactId>
-        <version>3.10.5.Final</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.netty</groupId>
-        <artifactId>netty-all</artifactId>
-        <version>4.0.52.Final</version>
-      </dependency>
-
-      <dependency>
-        <groupId>commons-io</groupId>
-        <artifactId>commons-io</artifactId>
-        <version>2.5</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-servlet-tester</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-logging</groupId>
-        <artifactId>commons-logging</artifactId>
-        <version>1.1.3</version>
-        <exclusions>
-          <exclusion>
-            <groupId>avalon-framework</groupId>
-            <artifactId>avalon-framework</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>logkit</groupId>
-            <artifactId>logkit</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.servlet</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>commons-logging</groupId>
-        <artifactId>commons-logging-api</artifactId>
-        <version>1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>log4j</groupId>
-        <artifactId>log4j</artifactId>
-        <version>1.2.17</version>
-        <exclusions>
-          <exclusion>
-            <groupId>com.sun.jdmk</groupId>
-            <artifactId>jmxtools</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>com.sun.jmx</groupId>
-            <artifactId>jmxri</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.mail</groupId>
-            <artifactId>mail</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.jms</groupId>
-            <artifactId>jmx</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.jms</groupId>
-            <artifactId>jms</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>com.amazonaws</groupId>
-        <artifactId>aws-java-sdk-bundle</artifactId>
-        <version>${aws-java-sdk.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>io.netty</groupId>
-            <artifactId>*</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.mina</groupId>
-        <artifactId>mina-core</artifactId>
-        <version>2.0.16</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.sshd</groupId>
-        <artifactId>sshd-core</artifactId>
-        <version>1.6.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.ftpserver</groupId>
-        <artifactId>ftplet-api</artifactId>
-        <version>1.0.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.ftpserver</groupId>
-        <artifactId>ftpserver-core</artifactId>
-        <version>1.0.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.ftpserver</groupId>
-        <artifactId>ftpserver-deprecated</artifactId>
-        <version>1.0.0-M2</version>
-      </dependency>
-      <dependency>
-        <groupId>junit</groupId>
-        <artifactId>junit</artifactId>
-        <version>4.11</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-collections</groupId>
-        <artifactId>commons-collections</artifactId>
-        <version>3.2.2</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-beanutils</groupId>
-        <artifactId>commons-beanutils</artifactId>
-        <version>1.9.4</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-configuration2</artifactId>
-        <version>2.1.1</version>
-        <exclusions>
-          <exclusion>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-lang3</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-lang3</artifactId>
-        <version>3.7</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.commons</groupId>
-        <artifactId>commons-text</artifactId>
-        <version>1.4</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-api</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-log4j12</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>jul-to-slf4j</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jdt</groupId>
-        <artifactId>core</artifactId>
-        <version>3.1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.woodstox</groupId>
-        <artifactId>stax2-api</artifactId>
-        <version>3.1.4</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.woodstox</groupId>
-        <artifactId>woodstox-core</artifactId>
-        <version>5.0.3</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-mapper-asl</artifactId>
-        <version>${jackson.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-core-asl</artifactId>
-        <version>${jackson.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-jaxrs</artifactId>
-        <version>${jackson.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-xc</artifactId>
-        <version>${jackson.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.core</groupId>
-        <artifactId>jackson-core</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.core</groupId>
-        <artifactId>jackson-databind</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.core</groupId>
-        <artifactId>jackson-annotations</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.module</groupId>
-        <artifactId>jackson-module-jaxb-annotations</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.dataformat</groupId>
-        <artifactId>jackson-dataformat-cbor</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.mockito</groupId>
-        <artifactId>mockito-all</artifactId>
-        <version>1.8.5</version>
-      </dependency>
-      <dependency>
-        <groupId>org.objenesis</groupId>
-        <artifactId>objenesis</artifactId>
-        <version>1.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.mock-server</groupId>
-        <artifactId>mockserver-netty</artifactId>
-        <version>3.9.2</version>
-        <exclusions>
-          <exclusion>
-            <groupId>javax.servlet</groupId>
-            <artifactId>javax.servlet-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.avro</groupId>
-        <artifactId>avro</artifactId>
-        <version>${avro.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>net.sf.kosmosfs</groupId>
-        <artifactId>kfs</artifactId>
-        <version>0.3</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.ant</groupId>
-        <artifactId>ant</artifactId>
-        <version>1.8.1</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.re2j</groupId>
-        <artifactId>re2j</artifactId>
-        <version>${re2j.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.protobuf</groupId>
-        <artifactId>protobuf-java</artifactId>
-        <version>${protobuf.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-daemon</groupId>
-        <artifactId>commons-daemon</artifactId>
-        <version>${commons-daemon.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.jcraft</groupId>
-        <artifactId>jsch</artifactId>
-        <version>0.1.54</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.htrace</groupId>
-        <artifactId>htrace-core</artifactId>
-        <version>3.1.0-incubating</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.htrace</groupId>
-        <artifactId>htrace-core4</artifactId>
-        <version>4.1.0-incubating</version>
-      </dependency>
-      <dependency>
-        <groupId>org.jdom</groupId>
-        <artifactId>jdom</artifactId>
-        <version>1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>com.googlecode.json-simple</groupId>
-        <artifactId>json-simple</artifactId>
-        <version>1.1.1</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.hsqldb</groupId>
-        <artifactId>hsqldb</artifactId>
-        <version>${hsqldb.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>io.dropwizard.metrics</groupId>
-        <artifactId>metrics-core</artifactId>
-        <version>3.2.4</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-sls</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-cloud-storage</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs</artifactId>
-        <version>${spotbugs.version}</version>
-        <scope>provided</scope>
-      </dependency>
-      <dependency>
-        <groupId>com.google.code.findbugs</groupId>
-        <artifactId>jsr305</artifactId>
-        <version>${findbugs.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.xml.bind</groupId>
-        <artifactId>jaxb-api</artifactId>
-        <version>2.2.11</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jettison</groupId>
-        <artifactId>jettison</artifactId>
-        <version>1.1</version>
-        <exclusions>
-          <exclusion>
-            <groupId>stax</groupId>
-            <artifactId>stax-api</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-client</artifactId>
-        <version>${jersey.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.glassfish.grizzly</groupId>
-        <artifactId>grizzly-http-servlet</artifactId>
-        <version>2.2.21</version>
-      </dependency>
-      <dependency>
-        <groupId>org.glassfish.grizzly</groupId>
-        <artifactId>grizzly-http</artifactId>
-        <version>2.2.21</version>
-      </dependency>
-      <dependency>
-        <groupId>org.glassfish.grizzly</groupId>
-        <artifactId>grizzly-http-server</artifactId>
-        <version>2.2.21</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.fusesource.leveldbjni</groupId>
-        <artifactId>leveldbjni-all</artifactId>
-        <version>1.8</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.microsoft.azure</groupId>
-        <artifactId>azure-storage</artifactId>
-        <version>7.0.0</version>
-      </dependency>
-
-      <!--Wildfly openssl dependency is introduced by HADOOP-15669-->
-      <dependency>
-        <groupId>org.wildfly.openssl</groupId>
-        <artifactId>wildfly-openssl</artifactId>
-        <version>1.0.4.Final</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.threadly</groupId>
-        <artifactId>threadly</artifactId>
-        <version>4.9.0</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.aliyun.oss</groupId>
-        <artifactId>aliyun-sdk-oss</artifactId>
-        <version>2.8.3</version>
-        <exclusions>
-          <exclusion>
-            <groupId>org.apache.httpcomponents</groupId>
-            <artifactId>httpclient</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.curator</groupId>
-        <artifactId>curator-recipes</artifactId>
-        <version>${curator.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.curator</groupId>
-        <artifactId>curator-client</artifactId>
-        <version>${curator.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.curator</groupId>
-        <artifactId>curator-framework</artifactId>
-        <version>${curator.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.curator</groupId>
-        <artifactId>curator-test</artifactId>
-        <version>${curator.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.bouncycastle</groupId>
-        <artifactId>bcprov-jdk16</artifactId>
-        <version>1.46</version>
-        <scope>test</scope>
-      </dependency>
-
-      <dependency>
-        <groupId>joda-time</groupId>
-        <artifactId>joda-time</artifactId>
-        <version>${joda-time.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>com.nimbusds</groupId>
-        <artifactId>nimbus-jose-jwt</artifactId>
-        <version>4.41.1</version>
-        <scope>compile</scope>
-        <exclusions>
-          <exclusion>
-            <groupId>org.bouncycastle</groupId>
-            <artifactId>bcprov-jdk15on</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
-      <dependency>
-        <groupId>dnsjava</groupId>
-        <artifactId>dnsjava</artifactId>
-        <version>${dnsjava.version}</version>
-      </dependency>
-
-      <dependency>
-        <!-- HACK.  Transitive dependency for nimbus-jose-jwt.  Needed for
-             packaging.  Please re-check this version when updating
-             nimbus-jose-jwt.  Please read HADOOP-14903 for more details.
-          -->
-        <groupId>net.minidev</groupId>
-        <artifactId>json-smart</artifactId>
-        <version>2.3</version>
-      </dependency>
-      <dependency>
-        <groupId>org.skyscreamer</groupId>
-        <artifactId>jsonassert</artifactId>
-        <version>1.3.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-common</artifactId>
-        <version>${hbase.version}</version>
-        <exclusions>
-          <exclusion>
-            <artifactId>jdk.tools</artifactId>
-            <groupId>jdk.tools</groupId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-common</artifactId>
-        <version>${hbase.version}</version>
-        <scope>test</scope>
-        <classifier>tests</classifier>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-client</artifactId>
-        <version>${hbase.version}</version>
-        <exclusions>
-          <!-- exclude jdk.tools (1.7) as we're not managing it -->
-          <exclusion>
-            <groupId>jdk.tools</groupId>
-            <artifactId>jdk.tools</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-server</artifactId>
-        <version>${hbase.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-server</artifactId>
-        <version>${hbase.version}</version>
-        <scope>test</scope>
-        <classifier>tests</classifier>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hbase</groupId>
-        <artifactId>hbase-testing-util</artifactId>
-        <version>${hbase.version}</version>
-        <scope>test</scope>
-        <optional>true</optional>
-        <exclusions>
-          <exclusion>
-            <groupId>org.jruby</groupId>
-            <artifactId>jruby-complete</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-hdfs</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-minicluster</artifactId>
-          </exclusion>
-          <exclusion>
-            <artifactId>jdk.tools</artifactId>
-            <groupId>jdk.tools</groupId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.kerby</groupId>
-        <artifactId>kerb-simplekdc</artifactId>
-        <version>1.0.1</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.geronimo.specs</groupId>
-        <artifactId>geronimo-jcache_1.0_spec</artifactId>
-        <version>${jcache.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.ehcache</groupId>
-        <artifactId>ehcache</artifactId>
-        <version>${ehcache.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.zaxxer</groupId>
-        <artifactId>HikariCP-java7</artifactId>
-        <version>${hikari.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.microsoft.sqlserver</groupId>
-        <artifactId>mssql-jdbc</artifactId>
-        <version>${mssql.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>io.swagger</groupId>
-        <artifactId>swagger-annotations</artifactId>
-        <version>${swagger-annotations-version}</version>
-      </dependency>
-      <dependency>
-        <groupId>com.fasterxml.jackson.jaxrs</groupId>
-        <artifactId>jackson-jaxrs-json-provider</artifactId>
-        <version>${jackson2.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.yaml</groupId>
-        <artifactId>snakeyaml</artifactId>
-        <version>${snakeyaml.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.assertj</groupId>
-        <artifactId>assertj-core</artifactId>
-        <version>3.8.0</version>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.jruby.jcodings</groupId>
-        <artifactId>jcodings</artifactId>
-        <version>1.0.13</version>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>build-helper-maven-plugin</artifactId>
-          <version>${build-helper-maven-plugin.version}</version>
-        </plugin>
-        <!-- We include the configuration for license-maven-plugin to correct
-             maven-shade-plugin generated poms because it's always the same. We
-             can't simply configure the plugin because we must ensure execution
-             happens in the package phase after the shade plugin runs.
-          -->
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>license-maven-plugin</artifactId>
-          <version>1.10</version>
-          <configuration>
-            <canUpdateCopyright>false</canUpdateCopyright>
-            <roots><root>${project.basedir}</root></roots>
-          </configuration>
-          <executions>
-            <execution>
-              <id>update-pom-license</id>
-              <goals>
-                <goal>update-file-header</goal>
-              </goals>
-              <phase>package</phase>
-              <configuration>
-                <licenseName>apache_v2</licenseName>
-                <includes>
-                  <include>dependency-reduced-pom.xml</include>
-                </includes>
-              </configuration>
-            </execution>
-          </executions>
-        </plugin>
-        <plugin>
-          <artifactId>maven-clean-plugin</artifactId>
-          <version>${maven-clean-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-compiler-plugin</artifactId>
-          <version>${maven-compiler-plugin.version}</version>
-          <configuration>
-            <source>${javac.version}</source>
-            <target>${javac.version}</target>
-            <useIncrementalCompilation>false</useIncrementalCompilation>
-          </configuration>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-shade-plugin</artifactId>
-          <version>${maven-shade-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-surefire-plugin</artifactId>
-          <version>${maven-surefire-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-failsafe-plugin</artifactId>
-          <version>${maven-failsafe-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-install-plugin</artifactId>
-          <version>${maven-install-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-jar-plugin</artifactId>
-          <version>${maven-jar-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-javadoc-plugin</artifactId>
-          <version>${maven-javadoc-plugin.version}</version>
-          <configuration>
-            <additionalJOption>-Xdoclint:none</additionalJOption>
-          </configuration>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-war-plugin</artifactId>
-          <version>${maven-war-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>com.github.spotbugs</groupId>
-          <artifactId>spotbugs-maven-plugin</artifactId>
-          <version>${spotbugs.version}</version>
-          <configuration>
-            <maxHeap>1024</maxHeap>
-            <xmlOutput>true</xmlOutput>
-          </configuration>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>native-maven-plugin</artifactId>
-          <version>${native-maven-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>make-maven-plugin</artifactId>
-          <version>${make-maven-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-source-plugin</artifactId>
-          <version>${maven-source-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.avro</groupId>
-          <artifactId>avro-maven-plugin</artifactId>
-          <version>${avro.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-resources-plugin</artifactId>
-          <version>${maven-resources-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>exec-maven-plugin</artifactId>
-          <version>${exec-maven-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-pdf-plugin</artifactId>
-          <version>${maven-pdf-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-maven-plugins</artifactId>
-          <version>${hadoop.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-dependency-plugin</artifactId>
-          <version>${maven-dependency-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-enforcer-plugin</artifactId>
-          <version>${maven-enforcer-plugin.version}</version>
-          <configuration>
-            <rules>
-              <requireMavenVersion>
-                <version>[3.0.2,)</version>
-              </requireMavenVersion>
-              <requireJavaVersion>
-                <version>[1.8,)</version>
-              </requireJavaVersion>
-            </rules>
-          </configuration>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-assembly-plugin</artifactId>
-          <version>${maven-assembly-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-deploy-plugin</artifactId>
-          <version>${maven-deploy-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.rat</groupId>
-          <artifactId>apache-rat-plugin</artifactId>
-          <version>${apache-rat-plugin.version}</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-
-    <plugins>
-      <plugin>
-        <artifactId>maven-clean-plugin</artifactId>
-        <configuration>
-          <filesets>
-            <fileset>
-              <directory>${project.basedir}</directory>
-              <includes>
-                <include>dependency-reduced-pom.xml</include>
-              </includes>
-            </fileset>
-          </filesets>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-remote-resources-plugin</artifactId>
-        <version>${maven-remote-resources-plugin.version}</version>
-        <configuration>
-          <resourceBundles>
-            <resourceBundle>org.apache.hadoop:hadoop-build-tools:${hadoop.version}</resourceBundle>
-          </resourceBundles>
-        </configuration>
-        <dependencies>
-          <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-build-tools</artifactId>
-            <version>${hadoop.version}</version>
-          </dependency>
-        </dependencies>
-        <executions>
-          <execution>
-            <goals>
-              <goal>process</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <version>1.16</version>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase>verify</phase>
-            <goals>
-              <goal>check</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <signature>
-            <groupId>org.codehaus.mojo.signature</groupId>
-            <artifactId>java18</artifactId>
-            <version>1.0</version>
-          </signature>
-          <ignores>
-            <ignore>sun.misc.*</ignore>
-            <ignore>sun.net.*</ignore>
-            <ignore>sun.nio.ch.*</ignore>
-            <ignore>com.sun.javadoc.*</ignore>
-            <ignore>com.sun.tools.*</ignore>
-            <ignore>java.lang.invoke.*</ignore>
-          </ignores>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>create-testdirs</id>
-            <phase>validate</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${test.build.dir}"/>
-                <mkdir dir="${test.build.data}"/>
-              </target>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <reuseForks>false</reuseForks>
-          <forkedProcessTimeoutInSeconds>${surefire.fork.timeout}</forkedProcessTimeoutInSeconds>
-          <argLine>${maven-surefire-plugin.argLine}</argLine>
-          <environmentVariables>
-            <HADOOP_COMMON_HOME>${hadoop.common.build.dir}</HADOOP_COMMON_HOME>
-            <!-- HADOOP_HOME required for tests on Windows to find winutils -->
-            <HADOOP_HOME>${hadoop.common.build.dir}</HADOOP_HOME>
-            <LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib</LD_LIBRARY_PATH>
-            <DYLD_LIBRARY_PATH>${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib</DYLD_LIBRARY_PATH>
-            <MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
-          </environmentVariables>
-          <trimStackTrace>false</trimStackTrace>
-          <systemPropertyVariables>
-
-            <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
-            <hadoop.tmp.dir>${project.build.directory}/tmp</hadoop.tmp.dir>
-
-            <!-- TODO: all references in testcases should be updated to this default -->
-            <test.build.dir>${test.build.dir}</test.build.dir>
-            <test.build.data>${test.build.data}</test.build.data>
-            <test.build.webapps>${test.build.webapps}</test.build.webapps>
-            <test.cache.data>${test.cache.data}</test.cache.data>
-            <test.build.classes>${project.build.directory}/test-classes</test.build.classes>
-
-            <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
-            <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
-            <java.security.egd>${java.security.egd}</java.security.egd>
-            <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
-          </systemPropertyVariables>
-          <includes>
-            <include>**/Test*.java</include>
-          </includes>
-          <excludes>
-            <exclude>**/${test.exclude}.java</exclude>
-            <exclude>${test.exclude.pattern}</exclude>
-            <exclude>**/Test*$*.java</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-pdf-plugin</artifactId>
-        <configuration>
-          <outputDirectory>${project.reporting.outputDirectory}</outputDirectory>
-          <includeReports>false</includeReports>
-        </configuration>
-      </plugin>
-       <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-checkstyle-plugin</artifactId>
-          <version>${maven-checkstyle-plugin.version}</version>
-          <dependencies>
-            <dependency>
-              <groupId>com.puppycrawl.tools</groupId>
-              <artifactId>checkstyle</artifactId>
-              <version>${checkstyle.version}</version>
-            </dependency>
-          </dependencies>
-          <configuration>
-            <configLocation>hadoop-hdds/dev-support/checkstyle/checkstyle.xml</configLocation>
-            <suppressionsLocation>hadoop-hdds/dev-support/checkstyle/suppressions.xml</suppressionsLocation>
-            <includeTestSourceDirectory>true</includeTestSourceDirectory>
-            <failOnViolation>false</failOnViolation>
-            <format>xml</format>
-            <format>html</format>
-            <outputFile>${project.build.directory}/test/checkstyle-errors.xml</outputFile>
-          </configuration>
-        </plugin>
-    </plugins>
-  </build>
-
-
-
-  <profiles>
-    <profile>
-      <id>dist</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-javadoc-plugin</artifactId>
-            <executions>
-              <execution>
-                <!-- build javadoc jars per jar for publishing to maven -->
-                <id>module-javadocs</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>jar</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-source-plugin</artifactId>
-            <executions>
-              <execution>
-                <!-- builds source jars and attaches them to the project for publishing -->
-                <id>hadoop-java-sources</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>jar-no-fork</goal>
-                  <goal>test-jar-no-fork</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-enforcer-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>dist-enforce</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>enforce</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>src</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-assembly-plugin</artifactId>
-            <inherited>false</inherited>
-            <executions>
-              <execution>
-                <id>src-dist</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>single</goal>
-                </goals>
-                <configuration>
-                  <appendAssemblyId>false</appendAssemblyId>
-                  <attach>false</attach>
-                  <finalName>hadoop-ozone-${project.version}-src</finalName>
-                  <outputDirectory>hadoop-ozone/dist/target</outputDirectory>
-                  <!-- Not using descriptorRef and hadoop-assembly dependency -->
-                  <!-- to avoid making hadoop-main to depend on a module      -->
-                  <descriptors>
-                    <descriptor>hadoop-ozone/dist/src/main/assemblies/ozone-src.xml</descriptor>
-                  </descriptors>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <inherited>false</inherited>
-            <executions>
-              <execution>
-                <id>src-dist-msg</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <echo/>
-                    <echo>Hadoop Ozone source tar available at: ${basedir}/hadoop-ozone/dist/target/hadoop-ozone-${project.version}-src.tar.gz</echo>
-                    <echo/>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-
-    <profile>
-      <id>sign</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-gpg-plugin</artifactId>
-            <version>${maven-gpg-plugin.version}</version>
-            <executions>
-              <execution>
-                <id>sign-artifacts</id>
-                <phase>verify</phase>
-                <goals>
-                  <goal>sign</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>clover</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-        <property>
-          <name>clover</name>
-        </property>
-      </activation>
-      <properties>
-        <cloverLicenseLocation>${user.home}/.clover.license</cloverLicenseLocation>
-        <cloverDatabase>${project.build.directory}/clover/hadoop-coverage.db</cloverDatabase>
-        <!-- NB: This additional parametrization is made in order
-             to be able to re-define these properties with "-Dk=v" maven options.
-             By some reason the expressions declared in clover
-             docs like "${maven.clover.generateHtml}" do not work in that way.
-             However, the below properties are confirmed to work: e.g.
-             -DcloverGenHtml=false switches off the Html generation.
-             The default values provided here exactly correspond to Clover defaults, so
-             the behavior is 100% backwards compatible. -->
-        <cloverAlwaysReport>true</cloverAlwaysReport>
-        <cloverGenHtml>true</cloverGenHtml>
-        <cloverGenXml>true</cloverGenXml>
-        <cloverGenHistorical>false</cloverGenHistorical>
-      </properties>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>com.atlassian.maven.plugins</groupId>
-            <artifactId>maven-clover2-plugin</artifactId>
-            <configuration>
-              <includesAllSourceRoots>false</includesAllSourceRoots>
-              <includesTestSourceRoots>true</includesTestSourceRoots>
-              <licenseLocation>${cloverLicenseLocation}</licenseLocation>
-              <cloverDatabase>${cloverDatabase}</cloverDatabase>
-              <targetPercentage>50%</targetPercentage>
-              <outputDirectory>${project.build.directory}/clover</outputDirectory>
-              <alwaysReport>${cloverAlwaysReport}</alwaysReport>
-              <generateHtml>${cloverGenHtml}</generateHtml>
-              <generateXml>${cloverGenXml}</generateXml>
-              <generateHistorical>${cloverGenHistorical}</generateHistorical>
-              <excludes>
-                <exclude>**/examples/**/*.java</exclude>
-                <exclude>**/hamlet/*.java</exclude>
-                <exclude>**/ha/proto/*.java</exclude>
-                <exclude>**/protocol/proto/*.java</exclude>
-                <exclude>**/compiler/generated/*.java</exclude>
-                <exclude>**/protobuf/*.java</exclude>
-                <exclude>**/v2/proto/*.java</exclude>
-                <exclude>**/yarn/proto/*.java</exclude>
-                <exclude>**/security/proto/*.java</exclude>
-                <exclude>**/tools/proto/*.java</exclude>
-                <exclude>**/hs/proto/*.java</exclude>
-              </excludes>
-            </configuration>
-            <executions>
-              <execution>
-                <id>clover-setup</id>
-                <phase>process-sources</phase>
-                <goals>
-                  <goal>setup</goal>
-                </goals>
-              </execution>
-              <execution>
-                <id>clover</id>
-                <phase>test</phase>
-                <goals>
-                  <goal>clover</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/pom.xml b/pom.xml
index 2167c8d..33a1d95 100644
--- a/pom.xml
+++ b/pom.xml
@@ -406,8 +406,6 @@
             <exclude>**/build/**</exclude>
             <exclude>**/patchprocess/**</exclude>
             <exclude>**/*.js</exclude>
-            <exclude>hadoop-hdds/**/</exclude>
-            <exclude>hadoop-ozone/**/</exclude>
             <exclude>hadoop-submarine/**/</exclude>
             <exclude>licenses/**</exclude>
             <exclude>licenses-binary/**</exclude>
@@ -742,16 +740,6 @@
       </build>
     </profile>
     <profile>
-      <id>hdds</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <modules>
-        <module>hadoop-hdds</module>
-        <module>hadoop-ozone</module>
-      </modules>
-    </profile>
-    <profile>
       <id>submarine</id>
       <activation>
         <activeByDefault>false</activeByDefault>